diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 77d2eb0b9..edb95fac1 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -9,12 +9,12 @@ assignees: '' Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs -Please ask questions in https://github.com/seaweedfs/seaweedfs/discussions +Please ask questions in https://github.com/chrislusf/seaweedfs/discussions example of a good issue report: -https://github.com/seaweedfs/seaweedfs/issues/1005 +https://github.com/chrislusf/seaweedfs/issues/1005 example of a bad issue report: -https://github.com/seaweedfs/seaweedfs/issues/1008 +https://github.com/chrislusf/seaweedfs/issues/1008 **Describe the bug** A clear and concise description of what the bug is. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 26de250af..b5899d12d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -5,11 +5,6 @@ # How are we solving the problem? - -# How is the PR tested? - - - # Checks - [ ] I have added unit tests if possible. - [ ] I will add related wiki document changes and link to this PR after merging. diff --git a/.github/workflows/binaries_dev.yml b/.github/workflows/binaries_dev.yml index 45a17c2d4..f906ed2bb 100644 --- a/.github/workflows/binaries_dev.yml +++ b/.github/workflows/binaries_dev.yml @@ -38,13 +38,13 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Set BUILD_TIME env run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} - name: Go Release Binaries Large Disk - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -53,14 +53,14 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed-large-disk asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -68,7 +68,7 @@ jobs: release_tag: dev overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed-normal-disk @@ -87,13 +87,13 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Set BUILD_TIME env run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} - name: Go Release Binaries Large Disk - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -102,14 +102,14 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed-large-disk asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -117,7 +117,7 @@ jobs: release_tag: dev overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed-normal-disk diff --git a/.github/workflows/binaries_release0.yml b/.github/workflows/binaries_release0.yml index e0293747c..3a6cb734d 100644 --- a/.github/workflows/binaries_release0.yml +++ b/.github/workflows/binaries_release0.yml @@ -28,9 +28,9 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -38,13 +38,13 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -52,7 +52,7 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed diff --git a/.github/workflows/binaries_release1.yml b/.github/workflows/binaries_release1.yml index 55287e2b8..d0a51ce8f 100644 --- a/.github/workflows/binaries_release1.yml +++ b/.github/workflows/binaries_release1.yml @@ -28,9 +28,9 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -38,13 +38,13 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -52,7 +52,7 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed diff --git a/.github/workflows/binaries_release2.yml b/.github/workflows/binaries_release2.yml index 83e18092a..09e8fc7ae 100644 --- a/.github/workflows/binaries_release2.yml +++ b/.github/workflows/binaries_release2.yml @@ -28,9 +28,9 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -38,13 +38,13 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -52,7 +52,7 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed diff --git a/.github/workflows/binaries_release3.yml b/.github/workflows/binaries_release3.yml index bb2318835..c96a91cee 100644 --- a/.github/workflows/binaries_release3.yml +++ b/.github/workflows/binaries_release3.yml @@ -28,9 +28,9 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -38,13 +38,13 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} @@ -52,7 +52,7 @@ jobs: overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed diff --git a/.github/workflows/binaries_release4.yml b/.github/workflows/binaries_release4.yml index 8345da4e4..ba98d0f81 100644 --- a/.github/workflows/binaries_release4.yml +++ b/.github/workflows/binaries_release4.yml @@ -28,32 +28,32 @@ jobs: # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + - uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} goarch: ${{ matrix.goarch }} overwrite: true - build_flags: -tags elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb + build_flags: -tags elastic,ydb,gocdk,tikv pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_full" - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 + uses: wangyoucao577/go-release-action@16624612d4e2b73de613857a362d294700207fff # v1.22 with: github_token: ${{ secrets.GITHUB_TOKEN }} goos: ${{ matrix.goos }} goarch: ${{ matrix.goarch }} overwrite: true pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 - build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} + build_flags: -tags 5BytesOffset,elastic,ydb,gocdk,tikv + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} # Where to run `go build .` project_path: weed binary_name: weed diff --git a/.github/workflows/binaries_release5.yml b/.github/workflows/binaries_release5.yml deleted file mode 100644 index a22b3b32e..000000000 --- a/.github/workflows/binaries_release5.yml +++ /dev/null @@ -1,59 +0,0 @@ -# This is a basic workflow to help you get started with Actions - -name: "go: build versioned binaries for openbsd" - -on: - push: - tags: - - '*' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -permissions: - contents: read - -jobs: - - build-release-binaries_openbsd: - permissions: - contents: write # for wangyoucao577/go-release-action to upload release assets - runs-on: ubuntu-latest - strategy: - matrix: - goos: [openbsd] - goarch: [amd64, arm, arm64] - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 - - name: Go Release Binaries Normal Volume Size - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - goos: ${{ matrix.goos }} - goarch: ${{ matrix.goarch }} - overwrite: true - pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 - # build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} - # Where to run `go build .` - project_path: weed - binary_name: weed - asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}" - - name: Go Release Large Disk Binaries - uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - goos: ${{ matrix.goos }} - goarch: ${{ matrix.goarch }} - overwrite: true - pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0 - build_flags: -tags 5BytesOffset # optional, default is - ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}} - # Where to run `go build .` - project_path: weed - binary_name: weed - asset_name: "${{ matrix.goos }}_${{ matrix.goarch }}_large_disk" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index bf0c7dafc..142e4e963 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -3,10 +3,6 @@ name: "Code Scanning - Action" on: pull_request: -concurrency: - group: ${{ github.head_ref }}/codeql - cancel-in-progress: true - jobs: CodeQL-Build: # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest @@ -18,11 +14,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 + uses: actions/checkout@v3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v4 + uses: github/codeql-action/init@v2 # Override language selection by uncommenting this and choosing your languages with: languages: go @@ -30,7 +26,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below). - name: Autobuild - uses: github/codeql-action/autobuild@v4 + uses: github/codeql-action/autobuild@v2 # โ„น๏ธ Command-line programs to run using the OS shell. # ๐Ÿ“š See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -44,4 +40,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/container_dev.yml b/.github/workflows/container_dev.yml index dbf5b365d..d8a2312ea 100644 --- a/.github/workflows/container_dev.yml +++ b/.github/workflows/container_dev.yml @@ -16,11 +16,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -33,30 +33,30 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 with: buildkitd-flags: "--debug" - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Login to GHCR if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/container_latest.yml b/.github/workflows/container_latest.yml index ffeabfb01..35dcea714 100644 --- a/.github/workflows/container_latest.yml +++ b/.github/workflows/container_latest.yml @@ -17,11 +17,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -34,30 +34,30 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 with: buildkitd-flags: "--debug" - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Login to GHCR if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: registry: ghcr.io username: ${{ secrets.GHCR_USERNAME }} password: ${{ secrets.GHCR_TOKEN }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/container_release1.yml b/.github/workflows/container_release1.yml index cc1ded0e3..1bcf768cd 100644 --- a/.github/workflows/container_release1.yml +++ b/.github/workflows/container_release1.yml @@ -16,11 +16,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -34,20 +34,20 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/container_release2.yml b/.github/workflows/container_release2.yml index 5debf0bf8..c58bb2b40 100644 --- a/.github/workflows/container_release2.yml +++ b/.github/workflows/container_release2.yml @@ -17,11 +17,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -35,20 +35,20 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/container_release3.yml b/.github/workflows/container_release3.yml index 5fbeb5357..5ff6cd497 100644 --- a/.github/workflows/container_release3.yml +++ b/.github/workflows/container_release3.yml @@ -17,11 +17,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -35,26 +35,24 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} file: ./docker/Dockerfile.rocksdb_large - build-args: | - BRANCH=${{ github.sha }} platforms: linux/amd64 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/.github/workflows/container_release4.yml b/.github/workflows/container_release4.yml index 7fcaf12c6..f9f88fdcf 100644 --- a/.github/workflows/container_release4.yml +++ b/.github/workflows/container_release4.yml @@ -16,11 +16,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -34,25 +34,25 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} file: ./docker/Dockerfile.go_build - build-args: TAGS=elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb + build-args: TAGS=elastic,ydb,gocdk,tikv platforms: linux/amd64 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/.github/workflows/container_release5.yml b/.github/workflows/container_release5.yml index fd3cb75d2..dd97bde31 100644 --- a/.github/workflows/container_release5.yml +++ b/.github/workflows/container_release5.yml @@ -16,11 +16,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Docker meta id: docker_meta - uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3 + uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v3 with: images: | chrislusf/seaweedfs @@ -34,25 +34,25 @@ jobs: org.opencontainers.image.vendor=Chris Lu - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 + uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v1 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 + uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v1 - name: Login to Docker Hub if: github.event_name != 'pull_request' - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 + uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v2 with: context: ./docker push: ${{ github.event_name != 'pull_request' }} file: ./docker/Dockerfile.go_build - build-args: TAGS=5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb + build-args: TAGS=5BytesOffset,elastic,ydb,gocdk,tikv platforms: linux/amd64 tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/.github/workflows/container_rocksdb_version.yml b/.github/workflows/container_rocksdb_version.yml deleted file mode 100644 index cd733fe04..000000000 --- a/.github/workflows/container_rocksdb_version.yml +++ /dev/null @@ -1,110 +0,0 @@ -name: "docker: build rocksdb image by version" - -on: - workflow_dispatch: - inputs: - rocksdb_version: - description: 'RocksDB git tag or branch to build (e.g. v10.5.1)' - required: true - default: 'v10.5.1' - seaweedfs_ref: - description: 'SeaweedFS git tag, branch, or commit to build' - required: true - default: 'master' - image_tag: - description: 'Optional Docker tag suffix (defaults to rocksdb__seaweedfs_)' - required: false - default: '' - -permissions: - contents: read - -jobs: - build-rocksdb-image: - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 - - - name: Prepare Docker tag - id: tag - env: - ROCKSDB_VERSION_INPUT: ${{ inputs.rocksdb_version }} - SEAWEEDFS_REF_INPUT: ${{ inputs.seaweedfs_ref }} - CUSTOM_TAG_INPUT: ${{ inputs.image_tag }} - run: | - set -euo pipefail - sanitize() { - local value="$1" - value="${value,,}" - value="${value// /-}" - value="${value//[^a-z0-9_.-]/-}" - value="${value#-}" - value="${value%-}" - printf '%s' "$value" - } - version="${ROCKSDB_VERSION_INPUT}" - seaweed="${SEAWEEDFS_REF_INPUT}" - tag="${CUSTOM_TAG_INPUT}" - if [ -z "$version" ]; then - echo "RocksDB version input is required." >&2 - exit 1 - fi - if [ -z "$seaweed" ]; then - echo "SeaweedFS ref input is required." >&2 - exit 1 - fi - sanitized_version="$(sanitize "$version")" - if [ -z "$sanitized_version" ]; then - echo "Unable to sanitize RocksDB version '$version'." >&2 - exit 1 - fi - sanitized_seaweed="$(sanitize "$seaweed")" - if [ -z "$sanitized_seaweed" ]; then - echo "Unable to sanitize SeaweedFS ref '$seaweed'." >&2 - exit 1 - fi - if [ -z "$tag" ]; then - tag="rocksdb_${sanitized_version}_seaweedfs_${sanitized_seaweed}" - fi - tag="${tag,,}" - tag="${tag// /-}" - tag="${tag//[^a-z0-9_.-]/-}" - tag="${tag#-}" - tag="${tag%-}" - if [ -z "$tag" ]; then - echo "Resulting Docker tag is empty." >&2 - exit 1 - fi - echo "docker_tag=$tag" >> "$GITHUB_OUTPUT" - echo "full_image=chrislusf/seaweedfs:$tag" >> "$GITHUB_OUTPUT" - echo "seaweedfs_ref=$seaweed" >> "$GITHUB_OUTPUT" - - - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1 - - - name: Login to Docker Hub - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Build and push image - uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2 - with: - context: ./docker - push: true - file: ./docker/Dockerfile.rocksdb_large - build-args: | - ROCKSDB_VERSION=${{ inputs.rocksdb_version }} - BRANCH=${{ inputs.seaweedfs_ref }} - platforms: linux/amd64 - tags: ${{ steps.tag.outputs.full_image }} - labels: | - org.opencontainers.image.title=seaweedfs - org.opencontainers.image.description=SeaweedFS is a distributed storage system for blobs, objects, files, and data lake, to store and serve billions of files fast! - org.opencontainers.image.vendor=Chris Lu diff --git a/.github/workflows/deploy_telemetry.yml b/.github/workflows/deploy_telemetry.yml deleted file mode 100644 index 511199b56..000000000 --- a/.github/workflows/deploy_telemetry.yml +++ /dev/null @@ -1,171 +0,0 @@ -# This workflow will build and deploy the SeaweedFS telemetry server -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go - -name: Deploy Telemetry Server - -on: - workflow_dispatch: - inputs: - setup: - description: 'Run first-time server setup' - required: true - type: boolean - default: false - deploy: - description: 'Deploy telemetry server to remote server' - required: true - type: boolean - default: false - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version: '1.24' - - - name: Build Telemetry Server - if: github.event_name == 'workflow_dispatch' && inputs.deploy - run: | - go mod tidy - echo "Building telemetry server..." - GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go - ls -la telemetry-server - echo "Build completed successfully" - - - name: First-time Server Setup - if: github.event_name == 'workflow_dispatch' && inputs.setup - env: - SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }} - REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }} - REMOTE_USER: ${{ secrets.TELEMETRY_USER }} - run: | - mkdir -p ~/.ssh - echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key - chmod 600 ~/.ssh/deploy_key - echo "Host *" > ~/.ssh/config - echo " StrictHostKeyChecking no" >> ~/.ssh/config - - # Create all required directories with proper permissions - ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST " - mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \ - chmod 755 ~/seaweedfs-telemetry/logs && \ - chmod 755 ~/seaweedfs-telemetry/data && \ - touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \ - chmod 644 ~/seaweedfs-telemetry/logs/*.log" - - # Create systemd service file - echo " - [Unit] - Description=SeaweedFS Telemetry Server - After=network.target - - [Service] - Type=simple - User=$REMOTE_USER - WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry - ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353 - Restart=always - RestartSec=5 - StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log - StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log - - [Install] - WantedBy=multi-user.target" > telemetry.service - - # Setup logrotate configuration - echo "# SeaweedFS Telemetry service log rotation - /home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log { - daily - rotate 30 - compress - delaycompress - missingok - notifempty - create 644 $REMOTE_USER $REMOTE_USER - postrotate - systemctl restart telemetry.service - endscript - }" > telemetry_logrotate - - # Copy configuration files - scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/ - scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/ - - # Copy and install service and logrotate files - scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/ - ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST " - sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \ - sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \ - sudo systemctl daemon-reload && \ - sudo systemctl enable telemetry.service" - - echo "โœ… First-time setup completed successfully!" - echo "๐Ÿ“‹ Next step: Run the deployment to install the telemetry server binary" - echo " 1. Go to GitHub Actions โ†’ Deploy Telemetry Server" - echo " 2. Click 'Run workflow'" - echo " 3. Check 'Deploy telemetry server to remote server'" - echo " 4. Click 'Run workflow'" - - rm -f ~/.ssh/deploy_key - - - name: Deploy Telemetry Server to Remote Server - if: github.event_name == 'workflow_dispatch' && inputs.deploy - env: - SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }} - REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }} - REMOTE_USER: ${{ secrets.TELEMETRY_USER }} - run: | - mkdir -p ~/.ssh - echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key - chmod 600 ~/.ssh/deploy_key - echo "Host *" > ~/.ssh/config - echo " StrictHostKeyChecking no" >> ~/.ssh/config - - # Create temp directory and copy binary - ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp" - scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/ - - # Copy updated configuration files - scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/ - scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/ - - # Check if service exists and deploy accordingly - ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST " - if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then - echo 'Service exists, performing update...' - sudo systemctl stop telemetry.service - mkdir -p ~/seaweedfs-telemetry/bin - mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/ - chmod +x ~/seaweedfs-telemetry/bin/telemetry-server - sudo systemctl start telemetry.service - sudo systemctl status telemetry.service - else - echo 'ERROR: telemetry.service not found!' - echo 'Please run the first-time setup before deploying.' - echo 'Go to GitHub Actions โ†’ Deploy Telemetry Server โ†’ Run workflow โ†’ Check \"Run first-time server setup\"' - exit 1 - fi" - - # Verify deployment - ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST " - echo 'Waiting for service to start...' - sleep 5 - curl -f http://localhost:8353/health || echo 'Health check failed'" - - rm -f ~/.ssh/deploy_key - - - name: Notify Deployment Status - if: always() - run: | - if [ "${{ job.status }}" == "success" ]; then - echo "โœ… Telemetry server deployment successful" - echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353" - echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics" - else - echo "โŒ Telemetry server deployment failed" - fi \ No newline at end of file diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml index e72edcd07..b84b27d15 100644 --- a/.github/workflows/depsreview.yml +++ b/.github/workflows/depsreview.yml @@ -9,6 +9,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 + uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748 - name: 'Dependency Review' - uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a + uses: actions/dependency-review-action@1c59cdf2a9c7f29c90e8da32237eb04b81bad9f0 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml deleted file mode 100644 index 27b8ace8b..000000000 --- a/.github/workflows/e2e.yml +++ /dev/null @@ -1,144 +0,0 @@ -name: "End to End" - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -concurrency: - group: ${{ github.head_ref }}/e2e - cancel-in-progress: true - -permissions: - contents: read - -defaults: - run: - working-directory: docker - -jobs: - e2e: - name: FUSE Mount - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - name: Set up Go 1.x - uses: actions/setup-go@c0137caad775660c0844396c52da96e560aba63d # v2 - with: - go-version: ^1.13 - id: go - - - name: Check out code into the Go module directory - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Cache Docker layers - uses: actions/cache@v4 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-e2e-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx-e2e- - - - name: Install dependencies - run: | - # Use faster mirrors and install with timeout - echo "deb http://azure.archive.ubuntu.com/ubuntu/ $(lsb_release -cs) main restricted universe multiverse" | sudo tee /etc/apt/sources.list - echo "deb http://azure.archive.ubuntu.com/ubuntu/ $(lsb_release -cs)-updates main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list - - sudo apt-get update --fix-missing - sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends fuse - - # Verify FUSE installation - echo "FUSE version: $(fusermount --version 2>&1 || echo 'fusermount not found')" - echo "FUSE device: $(ls -la /dev/fuse 2>&1 || echo '/dev/fuse not found')" - - - name: Start SeaweedFS - timeout-minutes: 10 - run: | - # Enable Docker buildkit for better caching - export DOCKER_BUILDKIT=1 - export COMPOSE_DOCKER_CLI_BUILD=1 - - # Build with retry logic - for i in {1..3}; do - echo "Build attempt $i/3" - if make build_e2e; then - echo "Build successful on attempt $i" - break - elif [ $i -eq 3 ]; then - echo "Build failed after 3 attempts" - exit 1 - else - echo "Build attempt $i failed, retrying in 30 seconds..." - sleep 30 - fi - done - - # Start services with wait - docker compose -f ./compose/e2e-mount.yml up --wait - - - name: Run FIO 4k - timeout-minutes: 15 - run: | - echo "Starting FIO at: $(date)" - # Concurrent r/w - echo 'Run randrw with size=16M bs=4k' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --group_reporting --runtime=30 --time_based=1 - - echo "Verify FIO at: $(date)" - # Verified write - echo 'Run randwrite with size=16M bs=4k' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=4k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1 - - - name: Run FIO 128k - timeout-minutes: 15 - run: | - echo "Starting FIO at: $(date)" - # Concurrent r/w - echo 'Run randrw with size=16M bs=128k' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 - - echo "Verify FIO at: $(date)" - # Verified write - echo 'Run randwrite with size=16M bs=128k' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=128k --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1 - - - name: Run FIO 1MB - timeout-minutes: 15 - run: | - echo "Starting FIO at: $(date)" - # Concurrent r/w - echo 'Run randrw with size=16M bs=1m' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randrw --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 - - echo "Verify FIO at: $(date)" - # Verified write - echo 'Run randwrite with size=16M bs=1m' - docker compose -f ./compose/e2e-mount.yml exec mount timeout -k5 60 fio --name=fiotest --filename=/mnt/seaweedfs/fiotest --size=16M --rw=randwrite --bs=1m --direct=1 --numjobs=8 --ioengine=libaio --iodepth=32 --group_reporting --runtime=30 --time_based=1 --do_verify=0 --verify=crc32c --verify_backlog=1 - - - name: Save logs - if: always() - run: | - docker compose -f ./compose/e2e-mount.yml logs > output.log - echo 'Showing last 500 log lines of mount service:' - docker compose -f ./compose/e2e-mount.yml logs --tail 500 mount - - - name: Check for data races - if: always() - continue-on-error: true # TODO: remove this comment to enable build failure on data races (after all are fixed) - run: grep -A50 'DATA RACE' output.log && exit 1 || exit 0 - - - name: Archive logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: output-logs - path: docker/output.log - - - name: Cleanup - if: always() - run: docker compose -f ./compose/e2e-mount.yml down --volumes --remove-orphans --rmi all diff --git a/.github/workflows/fuse-integration.yml b/.github/workflows/fuse-integration.yml deleted file mode 100644 index cb68e3343..000000000 --- a/.github/workflows/fuse-integration.yml +++ /dev/null @@ -1,234 +0,0 @@ -name: "FUSE Integration Tests" - -on: - push: - branches: [ master, main ] - paths: - - 'weed/**' - - 'test/fuse_integration/**' - - '.github/workflows/fuse-integration.yml' - pull_request: - branches: [ master, main ] - paths: - - 'weed/**' - - 'test/fuse_integration/**' - - '.github/workflows/fuse-integration.yml' - -concurrency: - group: ${{ github.head_ref }}/fuse-integration - cancel-in-progress: true - -permissions: - contents: read - -env: - GO_VERSION: '1.24' - TEST_TIMEOUT: '45m' - -jobs: - fuse-integration: - name: FUSE Integration Testing - runs-on: ubuntu-22.04 - timeout-minutes: 50 - - steps: - - name: Checkout code - uses: actions/checkout@v5 - - - name: Set up Go ${{ env.GO_VERSION }} - uses: actions/setup-go@v6 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Install FUSE and dependencies - run: | - sudo apt-get update - sudo apt-get install -y fuse libfuse-dev - # Verify FUSE installation - fusermount --version || true - ls -la /dev/fuse || true - - - name: Build SeaweedFS - run: | - cd weed - go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v . - chmod +x weed - # Verify binary - ./weed version - - - name: Prepare FUSE Integration Tests - run: | - # Create isolated test directory to avoid Go module conflicts - mkdir -p /tmp/seaweedfs-fuse-tests - - # Copy only the working test files to avoid Go module conflicts - # These are the files we've verified work without package name issues - cp test/fuse_integration/simple_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "โš ๏ธ simple_test.go not found" - cp test/fuse_integration/working_demo_test.go /tmp/seaweedfs-fuse-tests/ 2>/dev/null || echo "โš ๏ธ working_demo_test.go not found" - - # Note: Other test files (framework.go, basic_operations_test.go, etc.) - # have Go module conflicts and are skipped until resolved - - echo "๐Ÿ“ Working test files copied:" - ls -la /tmp/seaweedfs-fuse-tests/*.go 2>/dev/null || echo "โ„น๏ธ No test files found" - - # Initialize Go module in isolated directory - cd /tmp/seaweedfs-fuse-tests - go mod init seaweedfs-fuse-tests - go mod tidy - - # Verify setup - echo "โœ… FUSE integration test environment prepared" - ls -la /tmp/seaweedfs-fuse-tests/ - - echo "" - echo "โ„น๏ธ Current Status: Running working subset of FUSE tests" - echo " โ€ข simple_test.go: Package structure verification" - echo " โ€ข working_demo_test.go: Framework capability demonstration" - echo " โ€ข Full framework: Available in test/fuse_integration/ (module conflicts pending resolution)" - - - name: Run FUSE Integration Tests - run: | - cd /tmp/seaweedfs-fuse-tests - - echo "๐Ÿงช Running FUSE integration tests..." - echo "============================================" - - # Run available working test files - TESTS_RUN=0 - - if [ -f "simple_test.go" ]; then - echo "๐Ÿ“‹ Running simple_test.go..." - go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go - TESTS_RUN=$((TESTS_RUN + 1)) - fi - - if [ -f "working_demo_test.go" ]; then - echo "๐Ÿ“‹ Running working_demo_test.go..." - go test -v -timeout=${{ env.TEST_TIMEOUT }} working_demo_test.go - TESTS_RUN=$((TESTS_RUN + 1)) - fi - - # Run combined test if multiple files exist - if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then - echo "๐Ÿ“‹ Running combined tests..." - go test -v -timeout=${{ env.TEST_TIMEOUT }} simple_test.go working_demo_test.go - fi - - if [ $TESTS_RUN -eq 0 ]; then - echo "โš ๏ธ No working test files found, running module verification only" - go version - go mod verify - else - echo "โœ… Successfully ran $TESTS_RUN test file(s)" - fi - - echo "============================================" - echo "โœ… FUSE integration tests completed" - - - name: Run Extended Framework Validation - run: | - cd /tmp/seaweedfs-fuse-tests - - echo "๐Ÿ” Running extended framework validation..." - echo "============================================" - - # Test individual components (only run tests that exist) - if [ -f "simple_test.go" ]; then - echo "Testing simple verification..." - go test -v simple_test.go - fi - - if [ -f "working_demo_test.go" ]; then - echo "Testing framework demo..." - go test -v working_demo_test.go - fi - - # Test combined execution if both files exist - if [ -f "simple_test.go" ] && [ -f "working_demo_test.go" ]; then - echo "Testing combined execution..." - go test -v simple_test.go working_demo_test.go - elif [ -f "simple_test.go" ] || [ -f "working_demo_test.go" ]; then - echo "โœ… Individual tests already validated above" - else - echo "โš ๏ธ No working test files found for combined testing" - fi - - echo "============================================" - echo "โœ… Extended validation completed" - - - name: Generate Test Coverage Report - run: | - cd /tmp/seaweedfs-fuse-tests - - echo "๐Ÿ“Š Generating test coverage report..." - go test -v -coverprofile=coverage.out . - go tool cover -html=coverage.out -o coverage.html - - echo "Coverage report generated: coverage.html" - - - name: Verify SeaweedFS Binary Integration - run: | - # Test that SeaweedFS binary is accessible from test environment - WEED_BINARY=$(pwd)/weed/weed - - if [ -f "$WEED_BINARY" ]; then - echo "โœ… SeaweedFS binary found at: $WEED_BINARY" - $WEED_BINARY version - echo "Binary is ready for full integration testing" - else - echo "โŒ SeaweedFS binary not found" - exit 1 - fi - - - name: Upload Test Artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: fuse-integration-test-results - path: | - /tmp/seaweedfs-fuse-tests/coverage.out - /tmp/seaweedfs-fuse-tests/coverage.html - /tmp/seaweedfs-fuse-tests/*.log - retention-days: 7 - - - name: Test Summary - if: always() - run: | - echo "## ๐Ÿš€ FUSE Integration Test Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Framework Status" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Framework Design**: Complete and validated" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Working Tests**: Core framework demonstration functional" >> $GITHUB_STEP_SUMMARY - echo "- โš ๏ธ **Full Framework**: Available but requires Go module resolution" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **CI/CD Integration**: Automated testing pipeline established" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Test Capabilities" >> $GITHUB_STEP_SUMMARY - echo "- ๐Ÿ“ **File Operations**: Create, read, write, delete, permissions" >> $GITHUB_STEP_SUMMARY - echo "- ๐Ÿ“‚ **Directory Operations**: Create, list, delete, nested structures" >> $GITHUB_STEP_SUMMARY - echo "- ๐Ÿ“Š **Large Files**: Multi-megabyte file handling" >> $GITHUB_STEP_SUMMARY - echo "- ๐Ÿ”„ **Concurrent Operations**: Multi-threaded stress testing" >> $GITHUB_STEP_SUMMARY - echo "- โš ๏ธ **Error Scenarios**: Comprehensive error handling validation" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Comparison with Current Tests" >> $GITHUB_STEP_SUMMARY - echo "| Aspect | Current (FIO) | This Framework |" >> $GITHUB_STEP_SUMMARY - echo "|--------|---------------|----------------|" >> $GITHUB_STEP_SUMMARY - echo "| **Scope** | Performance only | Functional + Performance |" >> $GITHUB_STEP_SUMMARY - echo "| **Operations** | Read/Write only | All FUSE operations |" >> $GITHUB_STEP_SUMMARY - echo "| **Concurrency** | Single-threaded | Multi-threaded stress tests |" >> $GITHUB_STEP_SUMMARY - echo "| **Automation** | Manual setup | Fully automated |" >> $GITHUB_STEP_SUMMARY - echo "| **Validation** | Speed metrics | Correctness + Performance |" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Current Working Tests" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Framework Structure**: Package and module verification" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Configuration Management**: Test config validation" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **File Operations Demo**: Basic file create/read/write simulation" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Large File Handling**: 1MB+ file processing demonstration" >> $GITHUB_STEP_SUMMARY - echo "- โœ… **Concurrency Simulation**: Multi-file operation testing" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Next Steps" >> $GITHUB_STEP_SUMMARY - echo "1. **Module Resolution**: Fix Go package conflicts for full framework" >> $GITHUB_STEP_SUMMARY - echo "2. **SeaweedFS Integration**: Connect with real cluster for end-to-end testing" >> $GITHUB_STEP_SUMMARY - echo "3. **Performance Benchmarks**: Add performance regression testing" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "๐Ÿ“ˆ **Total Framework Size**: ~1,500 lines of comprehensive testing infrastructure" >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 60ccfe4ae..8631cd9d2 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -21,20 +21,20 @@ jobs: steps: - name: Set up Go 1.x - uses: actions/setup-go@c0137caad775660c0844396c52da96e560aba63d # v2 + uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a # v2 with: go-version: ^1.13 id: go - name: Check out code into the Go module directory - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2 + uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2 - name: Get dependencies run: | cd weed; go get -v -t -d ./... - name: Build - run: cd weed; go build -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v . + run: cd weed; go build -tags "elastic gocdk sqlite ydb tikv" -v . - name: Test - run: cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./... + run: cd weed; go test -tags "elastic gocdk sqlite ydb tikv" -v ./... diff --git a/.github/workflows/helm_chart_release.yml b/.github/workflows/helm_chart_release.yml deleted file mode 100644 index 66cfae398..000000000 --- a/.github/workflows/helm_chart_release.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: "helm: publish charts" -on: - push: - tags: - - '*' - -permissions: - contents: write - pages: write - -jobs: - release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - - name: Publish Helm charts - uses: stefanprodan/helm-gh-pages@v1.7.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} - charts_dir: k8s/charts - target_dir: helm - branch: gh-pages - helm_version: "3.18.4" diff --git a/.github/workflows/helm_ci.yml b/.github/workflows/helm_ci.yml deleted file mode 100644 index 39f5d9181..000000000 --- a/.github/workflows/helm_ci.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: "helm: lint and test charts" - -on: - push: - branches: [ master ] - paths: ['k8s/**'] - pull_request: - branches: [ master ] - paths: ['k8s/**'] - -permissions: - contents: read - -jobs: - lint-test: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 - with: - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@v4 - with: - version: v3.18.4 - - - uses: actions/setup-python@v6 - with: - python-version: '3.9' - check-latest: true - - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.7.0 - - - name: Run chart-testing (list-changed) - id: list-changed - run: | - changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }} --chart-dirs k8s/charts) - if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" - fi - - - name: Run chart-testing (lint) - run: ct lint --target-branch ${{ github.event.repository.default_branch }} --all --validate-maintainers=false --chart-dirs k8s/charts - - - name: Create kind cluster - uses: helm/kind-action@v1.12.0 - - - name: Run chart-testing (install) - run: ct install --target-branch ${{ github.event.repository.default_branch }} --all --chart-dirs k8s/charts diff --git a/.github/workflows/kafka-quicktest.yml b/.github/workflows/kafka-quicktest.yml deleted file mode 100644 index 2348caa56..000000000 --- a/.github/workflows/kafka-quicktest.yml +++ /dev/null @@ -1,124 +0,0 @@ -name: "Kafka Quick Test (Load Test with Schema Registry)" - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - workflow_dispatch: # Allow manual trigger - -concurrency: - group: ${{ github.head_ref }}/kafka-quicktest - cancel-in-progress: true - -permissions: - contents: read - -jobs: - kafka-client-quicktest: - name: Kafka Client Load Test (Quick) - runs-on: ubuntu-latest - timeout-minutes: 15 - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - cache: true - cache-dependency-path: | - **/go.sum - id: go - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Install dependencies - run: | - # Ensure make is available - sudo apt-get update -qq - sudo apt-get install -y make - - - name: Validate test setup - working-directory: test/kafka/kafka-client-loadtest - run: | - make validate-setup - - - name: Run quick-test - working-directory: test/kafka/kafka-client-loadtest - run: | - # Run the quick-test target which includes: - # 1. Building the gateway - # 2. Starting all services (SeaweedFS, MQ broker, Schema Registry) - # 3. Registering Avro schemas - # 4. Running a 1-minute load test with Avro messages - # Override GOARCH to build for AMD64 (GitHub Actions runners are x86_64) - GOARCH=amd64 make quick-test - env: - # Docker Compose settings - COMPOSE_HTTP_TIMEOUT: 300 - DOCKER_CLIENT_TIMEOUT: 300 - # Test parameters (set by quick-test, but can override) - TEST_DURATION: 60s - PRODUCER_COUNT: 1 - CONSUMER_COUNT: 1 - MESSAGE_RATE: 10 - VALUE_TYPE: avro - - - name: Show test results - if: always() - working-directory: test/kafka/kafka-client-loadtest - run: | - echo "=========================================" - echo "Test Results" - echo "=========================================" - make show-results || echo "Could not retrieve results" - - - name: Show service logs on failure - if: failure() - working-directory: test/kafka/kafka-client-loadtest - run: | - echo "=========================================" - echo "Service Logs" - echo "=========================================" - - echo "Checking running containers..." - docker compose ps || true - - echo "=========================================" - echo "Master Logs" - echo "=========================================" - docker compose logs --tail=100 seaweedfs-master 2>&1 || echo "No master logs available" - - echo "=========================================" - echo "MQ Broker Logs (Last 100 lines)" - echo "=========================================" - docker compose logs --tail=100 seaweedfs-mq-broker 2>&1 || echo "No broker logs available" - - echo "=========================================" - echo "Kafka Gateway Logs (FULL - Critical for debugging)" - echo "=========================================" - docker compose logs kafka-gateway 2>&1 || echo "ERROR: Could not retrieve kafka-gateway logs" - - echo "=========================================" - echo "Schema Registry Logs (FULL)" - echo "=========================================" - docker compose logs schema-registry 2>&1 || echo "ERROR: Could not retrieve schema-registry logs" - - echo "=========================================" - echo "Load Test Logs" - echo "=========================================" - docker compose logs --tail=100 kafka-client-loadtest 2>&1 || echo "No loadtest logs available" - - - name: Cleanup - if: always() - working-directory: test/kafka/kafka-client-loadtest - run: | - # Stop containers first - docker compose --profile loadtest --profile monitoring down -v --remove-orphans || true - # Clean up data with sudo to handle Docker root-owned files - sudo rm -rf data/* || true - # Clean up binary - rm -f weed-linux-* || true diff --git a/.github/workflows/kafka-tests.yml b/.github/workflows/kafka-tests.yml deleted file mode 100644 index cc4ef0348..000000000 --- a/.github/workflows/kafka-tests.yml +++ /dev/null @@ -1,814 +0,0 @@ -name: "Kafka Gateway Tests" - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -concurrency: - group: ${{ github.head_ref }}/kafka-tests - cancel-in-progress: true - -# Force different runners for better isolation -env: - FORCE_RUNNER_SEPARATION: true - -permissions: - contents: read - -jobs: - kafka-unit-tests: - name: Kafka Unit Tests - runs-on: ubuntu-latest - timeout-minutes: 5 - strategy: - fail-fast: false - matrix: - container-id: [unit-tests-1] - container: - image: golang:1.24-alpine - options: --cpus 1.0 --memory 1g --hostname kafka-unit-${{ matrix.container-id }} - env: - GOMAXPROCS: 1 - CGO_ENABLED: 0 - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - id: go - - - name: Check out code - uses: actions/checkout@v5 - - - name: Setup Container Environment - run: | - apk add --no-cache git - ulimit -n 1024 || echo "Warning: Could not set file descriptor limit" - - - name: Get dependencies - run: | - cd test/kafka - go mod download - - - name: Run Kafka Gateway Unit Tests - run: | - cd test/kafka - # Set process limits for container isolation - ulimit -n 512 || echo "Warning: Could not set file descriptor limit" - ulimit -u 100 || echo "Warning: Could not set process limit" - go test -v -timeout 10s ./unit/... - - kafka-integration-tests: - name: Kafka Integration Tests (Critical) - runs-on: ubuntu-latest - timeout-minutes: 5 - strategy: - fail-fast: false - matrix: - container-id: [integration-1] - container: - image: golang:1.24-alpine - options: --cpus 2.0 --memory 2g --ulimit nofile=1024:1024 --hostname kafka-integration-${{ matrix.container-id }} - env: - GOMAXPROCS: 2 - CGO_ENABLED: 0 - KAFKA_TEST_ISOLATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - id: go - - - name: Check out code - uses: actions/checkout@v5 - - - name: Setup Integration Container Environment - run: | - apk add --no-cache git procps - ulimit -n 2048 || echo "Warning: Could not set file descriptor limit" - - - name: Get dependencies - run: | - cd test/kafka - go mod download - - - name: Run Integration Tests - run: | - cd test/kafka - # Higher limits for integration tests - ulimit -n 1024 || echo "Warning: Could not set file descriptor limit" - ulimit -u 200 || echo "Warning: Could not set process limit" - go test -v -timeout 90s ./integration/... - env: - GOMAXPROCS: 2 - - kafka-e2e-tests: - name: Kafka End-to-End Tests (with SMQ) - runs-on: ubuntu-latest - timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - container-id: [e2e-1] - container: - image: golang:1.24-alpine - options: --cpus 2.0 --memory 2g --hostname kafka-e2e-${{ matrix.container-id }} - env: - GOMAXPROCS: 2 - CGO_ENABLED: 0 - KAFKA_E2E_ISOLATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - cache: true - cache-dependency-path: | - **/go.sum - id: go - - - name: Setup E2E Container Environment - run: | - apk add --no-cache git procps curl netcat-openbsd - ulimit -n 2048 || echo "Warning: Could not set file descriptor limit" - - - name: Warm Go module cache - run: | - # Warm cache for root module - go mod download || true - # Warm cache for kafka test module - cd test/kafka - go mod download || true - - - name: Get dependencies - run: | - cd test/kafka - # Use go mod download with timeout to prevent hanging - timeout 90s go mod download || echo "Warning: Dependency download timed out, continuing with cached modules" - - - name: Build and start SeaweedFS MQ - run: | - set -e - cd $GITHUB_WORKSPACE - # Build weed binary - go build -o /usr/local/bin/weed ./weed - # Start SeaweedFS components with MQ brokers - export WEED_DATA_DIR=/tmp/seaweedfs-e2e-$RANDOM - mkdir -p "$WEED_DATA_DIR" - - # Start SeaweedFS server (master, volume, filer) with consistent IP advertising - nohup weed -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="0.0.0.0" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-server.log 2>&1 & - - # Wait for master to be ready - for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "SeaweedFS master HTTP is up"; break - fi - echo "Waiting for SeaweedFS master HTTP... ($i/30)"; sleep 1 - done - - # Wait for master gRPC to be ready (this is what broker discovery uses) - echo "Waiting for master gRPC port..." - for i in $(seq 1 30); do - if nc -z 127.0.0.1 19333; then - echo "โœ“ SeaweedFS master gRPC is up (port 19333)" - break - fi - echo " Waiting for master gRPC... ($i/30)"; sleep 1 - done - - # Give server time to initialize all components including gRPC services - echo "Waiting for SeaweedFS components to initialize..." - sleep 15 - - # Additional wait specifically for gRPC services to be ready for streaming - echo "Allowing extra time for master gRPC streaming services to initialize..." - sleep 10 - - # Start MQ broker with maximum verbosity for debugging - echo "Starting MQ broker..." - nohup weed -v 3 mq.broker \ - -master="127.0.0.1:9333" \ - -ip="127.0.0.1" \ - -port=17777 \ - -logFlushInterval=0 \ - > /tmp/weed-mq-broker.log 2>&1 & - - # Wait for broker to be ready with better error reporting - sleep 15 - broker_ready=false - for i in $(seq 1 20); do - if nc -z 127.0.0.1 17777; then - echo "SeaweedFS MQ broker is up" - broker_ready=true - break - fi - echo "Waiting for MQ broker... ($i/20)"; sleep 1 - done - - # Give broker additional time to register with master - if [ "$broker_ready" = true ]; then - echo "Allowing broker to register with master..." - sleep 30 - - # Check if broker is properly registered by querying cluster nodes - echo "Cluster status after broker registration:" - curl -s "http://127.0.0.1:9333/cluster/status" || echo "Could not check cluster status" - - echo "Checking cluster topology (includes registered components):" - curl -s "http://127.0.0.1:9333/dir/status" | head -20 || echo "Could not check dir status" - - echo "Verifying broker discovery via master client debug:" - echo "If broker registration is successful, it should appear in dir status" - - echo "Testing gRPC connectivity with weed binary:" - echo "This simulates what the gateway does during broker discovery..." - timeout 10s weed shell -master=127.0.0.1:9333 -filer=127.0.0.1:8888 > /tmp/shell-test.log 2>&1 || echo "weed shell test completed or timed out - checking logs..." - echo "Shell test results:" - cat /tmp/shell-test.log 2>/dev/null | head -10 || echo "No shell test logs" - fi - - # Check if broker failed to start and show logs - if [ "$broker_ready" = false ]; then - echo "ERROR: MQ broker failed to start. Broker logs:" - cat /tmp/weed-mq-broker.log || echo "No broker logs found" - echo "Server logs:" - tail -20 /tmp/weed-server.log || echo "No server logs found" - exit 1 - fi - - - name: Run End-to-End Tests - run: | - cd test/kafka - # Higher limits for E2E tests - ulimit -n 1024 || echo "Warning: Could not set file descriptor limit" - ulimit -u 200 || echo "Warning: Could not set process limit" - - # Allow additional time for all background processes to settle - echo "Allowing additional settlement time for SeaweedFS ecosystem..." - sleep 15 - - # Run tests and capture result - if ! go test -v -timeout 180s ./e2e/...; then - echo "=========================================" - echo "Tests failed! Showing debug information:" - echo "=========================================" - echo "Server logs (last 50 lines):" - tail -50 /tmp/weed-server.log || echo "No server logs" - echo "=========================================" - echo "Broker logs (last 50 lines):" - tail -50 /tmp/weed-mq-broker.log || echo "No broker logs" - echo "=========================================" - exit 1 - fi - env: - GOMAXPROCS: 2 - SEAWEEDFS_MASTERS: 127.0.0.1:9333 - - kafka-consumer-group-tests: - name: Kafka Consumer Group Tests (Highly Isolated) - runs-on: ubuntu-latest - timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - container-id: [consumer-group-1] - container: - image: golang:1.24-alpine - options: --cpus 1.0 --memory 2g --ulimit nofile=512:512 --hostname kafka-consumer-${{ matrix.container-id }} - env: - GOMAXPROCS: 1 - CGO_ENABLED: 0 - KAFKA_CONSUMER_ISOLATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - cache: true - cache-dependency-path: | - **/go.sum - id: go - - - name: Setup Consumer Group Container Environment - run: | - apk add --no-cache git procps curl netcat-openbsd - ulimit -n 256 || echo "Warning: Could not set file descriptor limit" - - - name: Warm Go module cache - run: | - # Warm cache for root module - go mod download || true - # Warm cache for kafka test module - cd test/kafka - go mod download || true - - - name: Get dependencies - run: | - cd test/kafka - # Use go mod download with timeout to prevent hanging - timeout 90s go mod download || echo "Warning: Dependency download timed out, continuing with cached modules" - - - name: Build and start SeaweedFS MQ - run: | - set -e - cd $GITHUB_WORKSPACE - # Build weed binary - go build -o /usr/local/bin/weed ./weed - # Start SeaweedFS components with MQ brokers - export WEED_DATA_DIR=/tmp/seaweedfs-mq-$RANDOM - mkdir -p "$WEED_DATA_DIR" - - # Start SeaweedFS server (master, volume, filer) with consistent IP advertising - nohup weed -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="0.0.0.0" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-server.log 2>&1 & - - # Wait for master to be ready - for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "SeaweedFS master HTTP is up"; break - fi - echo "Waiting for SeaweedFS master HTTP... ($i/30)"; sleep 1 - done - - # Wait for master gRPC to be ready (this is what broker discovery uses) - echo "Waiting for master gRPC port..." - for i in $(seq 1 30); do - if nc -z 127.0.0.1 19333; then - echo "โœ“ SeaweedFS master gRPC is up (port 19333)" - break - fi - echo " Waiting for master gRPC... ($i/30)"; sleep 1 - done - - # Give server time to initialize all components including gRPC services - echo "Waiting for SeaweedFS components to initialize..." - sleep 15 - - # Additional wait specifically for gRPC services to be ready for streaming - echo "Allowing extra time for master gRPC streaming services to initialize..." - sleep 10 - - # Start MQ broker with maximum verbosity for debugging - echo "Starting MQ broker..." - nohup weed -v 3 mq.broker \ - -master="127.0.0.1:9333" \ - -ip="127.0.0.1" \ - -port=17777 \ - -logFlushInterval=0 \ - > /tmp/weed-mq-broker.log 2>&1 & - - # Wait for broker to be ready with better error reporting - sleep 15 - broker_ready=false - for i in $(seq 1 20); do - if nc -z 127.0.0.1 17777; then - echo "SeaweedFS MQ broker is up" - broker_ready=true - break - fi - echo "Waiting for MQ broker... ($i/20)"; sleep 1 - done - - # Give broker additional time to register with master - if [ "$broker_ready" = true ]; then - echo "Allowing broker to register with master..." - sleep 30 - - # Check if broker is properly registered by querying cluster nodes - echo "Cluster status after broker registration:" - curl -s "http://127.0.0.1:9333/cluster/status" || echo "Could not check cluster status" - - echo "Checking cluster topology (includes registered components):" - curl -s "http://127.0.0.1:9333/dir/status" | head -20 || echo "Could not check dir status" - - echo "Verifying broker discovery via master client debug:" - echo "If broker registration is successful, it should appear in dir status" - - echo "Testing gRPC connectivity with weed binary:" - echo "This simulates what the gateway does during broker discovery..." - timeout 10s weed shell -master=127.0.0.1:9333 -filer=127.0.0.1:8888 > /tmp/shell-test.log 2>&1 || echo "weed shell test completed or timed out - checking logs..." - echo "Shell test results:" - cat /tmp/shell-test.log 2>/dev/null | head -10 || echo "No shell test logs" - fi - - # Check if broker failed to start and show logs - if [ "$broker_ready" = false ]; then - echo "ERROR: MQ broker failed to start. Broker logs:" - cat /tmp/weed-mq-broker.log || echo "No broker logs found" - echo "Server logs:" - tail -20 /tmp/weed-server.log || echo "No server logs found" - exit 1 - fi - - - name: Run Consumer Group Tests - run: | - cd test/kafka - # Test consumer group functionality with explicit timeout - ulimit -n 512 || echo "Warning: Could not set file descriptor limit" - ulimit -u 100 || echo "Warning: Could not set process limit" - timeout 240s go test -v -run "^TestConsumerGroups" -timeout 180s ./integration/... || echo "Test execution timed out or failed" - env: - GOMAXPROCS: 1 - SEAWEEDFS_MASTERS: 127.0.0.1:9333 - - kafka-client-compatibility: - name: Kafka Client Compatibility (with SMQ) - runs-on: ubuntu-latest - timeout-minutes: 25 - strategy: - fail-fast: false - matrix: - container-id: [client-compat-1] - container: - image: golang:1.24-alpine - options: --cpus 1.0 --memory 1.5g --shm-size 256m --hostname kafka-client-${{ matrix.container-id }} - env: - GOMAXPROCS: 1 - CGO_ENABLED: 0 - KAFKA_CLIENT_ISOLATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - cache: true - cache-dependency-path: | - **/go.sum - id: go - - - name: Setup Client Container Environment - run: | - apk add --no-cache git procps curl netcat-openbsd - ulimit -n 1024 || echo "Warning: Could not set file descriptor limit" - - - name: Warm Go module cache - run: | - # Warm cache for root module - go mod download || true - # Warm cache for kafka test module - cd test/kafka - go mod download || true - - - name: Get dependencies - run: | - cd test/kafka - timeout 90s go mod download || echo "Warning: Dependency download timed out, continuing with cached modules" - - - name: Build and start SeaweedFS MQ - run: | - set -e - cd $GITHUB_WORKSPACE - # Build weed binary - go build -o /usr/local/bin/weed ./weed - # Start SeaweedFS components with MQ brokers - export WEED_DATA_DIR=/tmp/seaweedfs-client-$RANDOM - mkdir -p "$WEED_DATA_DIR" - - # Start SeaweedFS server (master, volume, filer) with consistent IP advertising - nohup weed -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="0.0.0.0" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-server.log 2>&1 & - - # Wait for master to be ready - for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "SeaweedFS master HTTP is up"; break - fi - echo "Waiting for SeaweedFS master HTTP... ($i/30)"; sleep 1 - done - - # Wait for master gRPC to be ready (this is what broker discovery uses) - echo "Waiting for master gRPC port..." - for i in $(seq 1 30); do - if nc -z 127.0.0.1 19333; then - echo "โœ“ SeaweedFS master gRPC is up (port 19333)" - break - fi - echo " Waiting for master gRPC... ($i/30)"; sleep 1 - done - - # Give server time to initialize all components including gRPC services - echo "Waiting for SeaweedFS components to initialize..." - sleep 15 - - # Additional wait specifically for gRPC services to be ready for streaming - echo "Allowing extra time for master gRPC streaming services to initialize..." - sleep 10 - - # Start MQ broker with maximum verbosity for debugging - echo "Starting MQ broker..." - nohup weed -v 3 mq.broker \ - -master="127.0.0.1:9333" \ - -ip="127.0.0.1" \ - -port=17777 \ - -logFlushInterval=0 \ - > /tmp/weed-mq-broker.log 2>&1 & - - # Wait for broker to be ready with better error reporting - sleep 15 - broker_ready=false - for i in $(seq 1 20); do - if nc -z 127.0.0.1 17777; then - echo "SeaweedFS MQ broker is up" - broker_ready=true - break - fi - echo "Waiting for MQ broker... ($i/20)"; sleep 1 - done - - # Give broker additional time to register with master - if [ "$broker_ready" = true ]; then - echo "Allowing broker to register with master..." - sleep 30 - - # Check if broker is properly registered by querying cluster nodes - echo "Cluster status after broker registration:" - curl -s "http://127.0.0.1:9333/cluster/status" || echo "Could not check cluster status" - - echo "Checking cluster topology (includes registered components):" - curl -s "http://127.0.0.1:9333/dir/status" | head -20 || echo "Could not check dir status" - - echo "Verifying broker discovery via master client debug:" - echo "If broker registration is successful, it should appear in dir status" - - echo "Testing gRPC connectivity with weed binary:" - echo "This simulates what the gateway does during broker discovery..." - timeout 10s weed shell -master=127.0.0.1:9333 -filer=127.0.0.1:8888 > /tmp/shell-test.log 2>&1 || echo "weed shell test completed or timed out - checking logs..." - echo "Shell test results:" - cat /tmp/shell-test.log 2>/dev/null | head -10 || echo "No shell test logs" - fi - - # Check if broker failed to start and show logs - if [ "$broker_ready" = false ]; then - echo "ERROR: MQ broker failed to start. Broker logs:" - cat /tmp/weed-mq-broker.log || echo "No broker logs found" - echo "Server logs:" - tail -20 /tmp/weed-server.log || echo "No server logs found" - exit 1 - fi - - - name: Run Client Compatibility Tests - run: | - cd test/kafka - go test -v -run "^TestClientCompatibility" -timeout 180s ./integration/... - env: - GOMAXPROCS: 1 - SEAWEEDFS_MASTERS: 127.0.0.1:9333 - - kafka-smq-integration-tests: - name: Kafka SMQ Integration Tests (Full Stack) - runs-on: ubuntu-latest - timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - container-id: [smq-integration-1] - container: - image: golang:1.24-alpine - options: --cpus 1.0 --memory 2g --hostname kafka-smq-${{ matrix.container-id }} - env: - GOMAXPROCS: 1 - CGO_ENABLED: 0 - KAFKA_SMQ_INTEGRATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - cache: true - cache-dependency-path: | - **/go.sum - id: go - - - name: Setup SMQ Integration Container Environment - run: | - apk add --no-cache git procps curl netcat-openbsd - ulimit -n 1024 || echo "Warning: Could not set file descriptor limit" - - - name: Warm Go module cache - run: | - # Warm cache for root module - go mod download || true - # Warm cache for kafka test module - cd test/kafka - go mod download || true - - - name: Get dependencies - run: | - cd test/kafka - timeout 90s go mod download || echo "Warning: Dependency download timed out, continuing with cached modules" - - - name: Build and start SeaweedFS MQ - run: | - set -e - cd $GITHUB_WORKSPACE - # Build weed binary - go build -o /usr/local/bin/weed ./weed - # Start SeaweedFS components with MQ brokers - export WEED_DATA_DIR=/tmp/seaweedfs-smq-$RANDOM - mkdir -p "$WEED_DATA_DIR" - - # Start SeaweedFS server (master, volume, filer) with consistent IP advertising - nohup weed -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="0.0.0.0" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-server.log 2>&1 & - - # Wait for master to be ready - for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "SeaweedFS master HTTP is up"; break - fi - echo "Waiting for SeaweedFS master HTTP... ($i/30)"; sleep 1 - done - - # Wait for master gRPC to be ready (this is what broker discovery uses) - echo "Waiting for master gRPC port..." - for i in $(seq 1 30); do - if nc -z 127.0.0.1 19333; then - echo "โœ“ SeaweedFS master gRPC is up (port 19333)" - break - fi - echo " Waiting for master gRPC... ($i/30)"; sleep 1 - done - - # Give server time to initialize all components including gRPC services - echo "Waiting for SeaweedFS components to initialize..." - sleep 15 - - # Additional wait specifically for gRPC services to be ready for streaming - echo "Allowing extra time for master gRPC streaming services to initialize..." - sleep 10 - - # Start MQ broker with maximum verbosity for debugging - echo "Starting MQ broker..." - nohup weed -v 3 mq.broker \ - -master="127.0.0.1:9333" \ - -ip="127.0.0.1" \ - -port=17777 \ - -logFlushInterval=0 \ - > /tmp/weed-mq-broker.log 2>&1 & - - # Wait for broker to be ready with better error reporting - sleep 15 - broker_ready=false - for i in $(seq 1 20); do - if nc -z 127.0.0.1 17777; then - echo "SeaweedFS MQ broker is up" - broker_ready=true - break - fi - echo "Waiting for MQ broker... ($i/20)"; sleep 1 - done - - # Give broker additional time to register with master - if [ "$broker_ready" = true ]; then - echo "Allowing broker to register with master..." - sleep 30 - - # Check if broker is properly registered by querying cluster nodes - echo "Cluster status after broker registration:" - curl -s "http://127.0.0.1:9333/cluster/status" || echo "Could not check cluster status" - - echo "Checking cluster topology (includes registered components):" - curl -s "http://127.0.0.1:9333/dir/status" | head -20 || echo "Could not check dir status" - - echo "Verifying broker discovery via master client debug:" - echo "If broker registration is successful, it should appear in dir status" - - echo "Testing gRPC connectivity with weed binary:" - echo "This simulates what the gateway does during broker discovery..." - timeout 10s weed shell -master=127.0.0.1:9333 -filer=127.0.0.1:8888 > /tmp/shell-test.log 2>&1 || echo "weed shell test completed or timed out - checking logs..." - echo "Shell test results:" - cat /tmp/shell-test.log 2>/dev/null | head -10 || echo "No shell test logs" - fi - - # Check if broker failed to start and show logs - if [ "$broker_ready" = false ]; then - echo "ERROR: MQ broker failed to start. Broker logs:" - cat /tmp/weed-mq-broker.log || echo "No broker logs found" - echo "Server logs:" - tail -20 /tmp/weed-server.log || echo "No server logs found" - exit 1 - fi - - - name: Run SMQ Integration Tests - run: | - cd test/kafka - ulimit -n 512 || echo "Warning: Could not set file descriptor limit" - ulimit -u 100 || echo "Warning: Could not set process limit" - # Run the dedicated SMQ integration tests - go test -v -run "^TestSMQIntegration" -timeout 180s ./integration/... - env: - GOMAXPROCS: 1 - SEAWEEDFS_MASTERS: 127.0.0.1:9333 - - kafka-protocol-tests: - name: Kafka Protocol Tests (Isolated) - runs-on: ubuntu-latest - timeout-minutes: 5 - strategy: - fail-fast: false - matrix: - container-id: [protocol-1] - container: - image: golang:1.24-alpine - options: --cpus 1.0 --memory 1g --tmpfs /tmp:exec --hostname kafka-protocol-${{ matrix.container-id }} - env: - GOMAXPROCS: 1 - CGO_ENABLED: 0 - KAFKA_PROTOCOL_ISOLATION: "true" - CONTAINER_ID: ${{ matrix.container-id }} - steps: - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - id: go - - - name: Check out code - uses: actions/checkout@v5 - - - name: Setup Protocol Container Environment - run: | - apk add --no-cache git procps - # Ensure proper permissions for test execution - chmod -R 755 /tmp || true - export TMPDIR=/tmp - export GOCACHE=/tmp/go-cache - mkdir -p $GOCACHE - chmod 755 $GOCACHE - - - name: Get dependencies - run: | - cd test/kafka - go mod download - - - name: Run Protocol Tests - run: | - cd test/kafka - export TMPDIR=/tmp - export GOCACHE=/tmp/go-cache - # Run protocol tests from the weed/mq/kafka directory since they test the protocol implementation - cd ../../weed/mq/kafka - go test -v -run "^Test.*" -timeout 10s ./... - env: - GOMAXPROCS: 1 - TMPDIR: /tmp - GOCACHE: /tmp/go-cache diff --git a/.github/workflows/postgres-tests.yml b/.github/workflows/postgres-tests.yml deleted file mode 100644 index 25876d35d..000000000 --- a/.github/workflows/postgres-tests.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: "PostgreSQL Gateway Tests" - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -concurrency: - group: ${{ github.head_ref }}/postgres-tests - cancel-in-progress: true - -permissions: - contents: read - -jobs: - postgres-basic-tests: - name: PostgreSQL Basic Tests - runs-on: ubuntu-latest - timeout-minutes: 15 - defaults: - run: - working-directory: test/postgres - steps: - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - id: go - - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Cache Docker layers - uses: actions/cache@v4 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-postgres-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx-postgres- - - - name: Start PostgreSQL Gateway Services - run: | - make dev-start - sleep 10 - - - name: Run Basic Connectivity Test - run: | - make test-basic - - - name: Run PostgreSQL Client Tests - run: | - make test-client - - - name: Save logs - if: always() - run: | - docker compose logs > postgres-output.log || true - - - name: Archive logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: postgres-logs - path: test/postgres/postgres-output.log - - - name: Cleanup - if: always() - run: | - make clean || true diff --git a/.github/workflows/s3-go-tests.yml b/.github/workflows/s3-go-tests.yml deleted file mode 100644 index dabb79505..000000000 --- a/.github/workflows/s3-go-tests.yml +++ /dev/null @@ -1,414 +0,0 @@ -name: "S3 Go Tests" - -on: - pull_request: - -concurrency: - group: ${{ github.head_ref }}/s3-go-tests - cancel-in-progress: true - -permissions: - contents: read - -defaults: - run: - working-directory: weed - -jobs: - s3-versioning-tests: - name: S3 Versioning Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - strategy: - matrix: - test-type: ["quick", "comprehensive"] - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 Versioning Tests - ${{ matrix.test-type }} - timeout-minutes: 25 - working-directory: test/s3/versioning - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting Tests ===" - - # Run tests with automatic server management - # The test-with-server target handles server startup/shutdown automatically - if [ "${{ matrix.test-type }}" = "quick" ]; then - # Override TEST_PATTERN for quick tests only - make test-with-server TEST_PATTERN="TestBucketListReturnDataVersioning|TestVersioningBasicWorkflow|TestVersioningDeleteMarkers" - else - # Run all versioning tests - make test-with-server - fi - - - name: Show server logs on failure - if: failure() - working-directory: test/s3/versioning - run: | - echo "=== Server Logs ===" - if [ -f weed-test.log ]; then - echo "Last 100 lines of server logs:" - tail -100 weed-test.log - else - echo "No server log file found" - fi - - echo "=== Test Environment ===" - ps aux | grep -E "(weed|test)" || true - netstat -tlnp | grep -E "(8333|9333|8080)" || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-versioning-test-logs-${{ matrix.test-type }} - path: test/s3/versioning/weed-test*.log - retention-days: 3 - - s3-versioning-compatibility: - name: S3 Versioning Compatibility Test - runs-on: ubuntu-22.04 - timeout-minutes: 20 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run Core Versioning Test (Python s3tests equivalent) - timeout-minutes: 15 - working-directory: test/s3/versioning - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run the specific test that is equivalent to the Python s3tests - make test-with-server || { - echo "โŒ Test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -100 weed-test.log - fi - echo "=== Process information ===" - ps aux | grep -E "(weed|test)" || true - exit 1 - } - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-versioning-compatibility-logs - path: test/s3/versioning/weed-test*.log - retention-days: 3 - - s3-cors-compatibility: - name: S3 CORS Compatibility Test - runs-on: ubuntu-22.04 - timeout-minutes: 20 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run Core CORS Test (AWS S3 compatible) - timeout-minutes: 15 - working-directory: test/s3/cors - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run the specific test that is equivalent to AWS S3 CORS behavior - make test-with-server || { - echo "โŒ Test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -100 weed-test.log - fi - echo "=== Process information ===" - ps aux | grep -E "(weed|test)" || true - exit 1 - } - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-cors-compatibility-logs - path: test/s3/cors/weed-test*.log - retention-days: 3 - - s3-retention-tests: - name: S3 Retention Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - strategy: - matrix: - test-type: ["quick", "comprehensive"] - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 Retention Tests - ${{ matrix.test-type }} - timeout-minutes: 25 - working-directory: test/s3/retention - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting Tests ===" - - # Run tests with automatic server management - # The test-with-server target handles server startup/shutdown automatically - if [ "${{ matrix.test-type }}" = "quick" ]; then - # Override TEST_PATTERN for quick tests only - make test-with-server TEST_PATTERN="TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow" - else - # Run all retention tests - make test-with-server - fi - - - name: Show server logs on failure - if: failure() - working-directory: test/s3/retention - run: | - echo "=== Server Logs ===" - if [ -f weed-test.log ]; then - echo "Last 100 lines of server logs:" - tail -100 weed-test.log - else - echo "No server log file found" - fi - - echo "=== Test Environment ===" - ps aux | grep -E "(weed|test)" || true - netstat -tlnp | grep -E "(8333|9333|8080)" || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-retention-test-logs-${{ matrix.test-type }} - path: test/s3/retention/weed-test*.log - retention-days: 3 - - s3-cors-tests: - name: S3 CORS Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - strategy: - matrix: - test-type: ["quick", "comprehensive"] - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 CORS Tests - ${{ matrix.test-type }} - timeout-minutes: 25 - working-directory: test/s3/cors - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting Tests ===" - - # Run tests with automatic server management - # The test-with-server target handles server startup/shutdown automatically - if [ "${{ matrix.test-type }}" = "quick" ]; then - # Override TEST_PATTERN for quick tests only - make test-with-server TEST_PATTERN="TestCORSConfigurationManagement|TestServiceLevelCORS|TestCORSBasicWorkflow" - else - # Run all CORS tests - make test-with-server - fi - - - name: Show server logs on failure - if: failure() - working-directory: test/s3/cors - run: | - echo "=== Server Logs ===" - if [ -f weed-test.log ]; then - echo "Last 100 lines of server logs:" - tail -100 weed-test.log - else - echo "No server log file found" - fi - - echo "=== Test Environment ===" - ps aux | grep -E "(weed|test)" || true - netstat -tlnp | grep -E "(8333|9333|8080)" || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-cors-test-logs-${{ matrix.test-type }} - path: test/s3/cors/weed-test*.log - retention-days: 3 - - s3-retention-worm: - name: S3 Retention WORM Integration Test - runs-on: ubuntu-22.04 - timeout-minutes: 20 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run WORM Integration Tests - timeout-minutes: 15 - working-directory: test/s3/retention - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run the WORM integration tests with automatic server management - # The test-with-server target handles server startup/shutdown automatically - make test-with-server TEST_PATTERN="TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" || { - echo "โŒ WORM integration test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -100 weed-test.log - fi - echo "=== Process information ===" - ps aux | grep -E "(weed|test)" || true - exit 1 - } - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-retention-worm-logs - path: test/s3/retention/weed-test*.log - retention-days: 3 - - s3-versioning-stress: - name: S3 Versioning Stress Test - runs-on: ubuntu-22.04 - timeout-minutes: 35 - # Only run stress tests on master branch pushes to avoid overloading PR testing - if: github.event_name == 'push' && github.ref == 'refs/heads/master' - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 Versioning Stress Tests - timeout-minutes: 30 - working-directory: test/s3/versioning - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run stress tests (concurrent operations) - make test-versioning-stress || { - echo "โŒ Stress test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -200 weed-test.log - fi - make clean - exit 1 - } - make clean - - - name: Upload stress test logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: s3-versioning-stress-logs - path: test/s3/versioning/weed-test*.log - retention-days: 7 - - # Removed SSE-C integration tests and compatibility job \ No newline at end of file diff --git a/.github/workflows/s3-iam-tests.yml b/.github/workflows/s3-iam-tests.yml deleted file mode 100644 index d59b4f86f..000000000 --- a/.github/workflows/s3-iam-tests.yml +++ /dev/null @@ -1,283 +0,0 @@ -name: "S3 IAM Integration Tests" - -on: - pull_request: - paths: - - 'weed/iam/**' - - 'weed/s3api/**' - - 'test/s3/iam/**' - - '.github/workflows/s3-iam-tests.yml' - push: - branches: [ master ] - paths: - - 'weed/iam/**' - - 'weed/s3api/**' - - 'test/s3/iam/**' - - '.github/workflows/s3-iam-tests.yml' - -concurrency: - group: ${{ github.head_ref }}/s3-iam-tests - cancel-in-progress: true - -permissions: - contents: read - -defaults: - run: - working-directory: weed - -jobs: - # Unit tests for IAM components - iam-unit-tests: - name: IAM Unit Tests - runs-on: ubuntu-22.04 - timeout-minutes: 15 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Get dependencies - run: | - go mod download - - - name: Run IAM Unit Tests - timeout-minutes: 10 - run: | - set -x - echo "=== Running IAM STS Tests ===" - go test -v -timeout 5m ./iam/sts/... - - echo "=== Running IAM Policy Tests ===" - go test -v -timeout 5m ./iam/policy/... - - echo "=== Running IAM Integration Tests ===" - go test -v -timeout 5m ./iam/integration/... - - echo "=== Running S3 API IAM Tests ===" - go test -v -timeout 5m ./s3api/... -run ".*IAM.*|.*JWT.*|.*Auth.*" - - - name: Upload test results on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: iam-unit-test-results - path: | - weed/testdata/ - weed/**/testdata/ - retention-days: 3 - - # S3 IAM integration tests with SeaweedFS services - s3-iam-integration-tests: - name: S3 IAM Integration Tests - runs-on: ubuntu-22.04 - timeout-minutes: 25 - strategy: - matrix: - test-type: ["basic", "advanced", "policy-enforcement"] - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - working-directory: weed - run: | - go install -buildvcs=false - - - name: Run S3 IAM Integration Tests - ${{ matrix.test-type }} - timeout-minutes: 20 - working-directory: test/s3/iam - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting S3 IAM Integration Tests (${{ matrix.test-type }}) ===" - - # Set WEED_BINARY to use the installed version - export WEED_BINARY=$(which weed) - export TEST_TIMEOUT=15m - - # Run tests based on type - case "${{ matrix.test-type }}" in - "basic") - echo "Running basic IAM functionality tests..." - make clean setup start-services wait-for-services - go test -v -timeout 15m -run "TestS3IAMAuthentication|TestS3IAMBasicWorkflow|TestS3IAMTokenValidation" ./... - ;; - "advanced") - echo "Running advanced IAM feature tests..." - make clean setup start-services wait-for-services - go test -v -timeout 15m -run "TestS3IAMSessionExpiration|TestS3IAMMultipart|TestS3IAMPresigned" ./... - ;; - "policy-enforcement") - echo "Running policy enforcement tests..." - make clean setup start-services wait-for-services - go test -v -timeout 15m -run "TestS3IAMPolicyEnforcement|TestS3IAMBucketPolicy|TestS3IAMContextual" ./... - ;; - *) - echo "Unknown test type: ${{ matrix.test-type }}" - exit 1 - ;; - esac - - # Always cleanup - make stop-services - - - name: Show service logs on failure - if: failure() - working-directory: test/s3/iam - run: | - echo "=== Service Logs ===" - echo "--- Master Log ---" - tail -50 weed-master.log 2>/dev/null || echo "No master log found" - echo "" - echo "--- Filer Log ---" - tail -50 weed-filer.log 2>/dev/null || echo "No filer log found" - echo "" - echo "--- Volume Log ---" - tail -50 weed-volume.log 2>/dev/null || echo "No volume log found" - echo "" - echo "--- S3 API Log ---" - tail -50 weed-s3.log 2>/dev/null || echo "No S3 log found" - echo "" - - echo "=== Process Information ===" - ps aux | grep -E "(weed|test)" || true - netstat -tlnp | grep -E "(8333|8888|9333|8080)" || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-iam-integration-logs-${{ matrix.test-type }} - path: test/s3/iam/weed-*.log - retention-days: 5 - - # Distributed IAM tests - s3-iam-distributed-tests: - name: S3 IAM Distributed Tests - runs-on: ubuntu-22.04 - timeout-minutes: 25 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - working-directory: weed - run: | - go install -buildvcs=false - - - name: Run Distributed IAM Tests - timeout-minutes: 20 - working-directory: test/s3/iam - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - export WEED_BINARY=$(which weed) - export TEST_TIMEOUT=15m - - # Test distributed configuration - echo "Testing distributed IAM configuration..." - make clean setup - - # Start services with distributed IAM config - echo "Starting services with distributed configuration..." - make start-services - make wait-for-services - - # Run distributed-specific tests - export ENABLE_DISTRIBUTED_TESTS=true - go test -v -timeout 15m -run "TestS3IAMDistributedTests" ./... || { - echo "โŒ Distributed tests failed, checking logs..." - make logs - exit 1 - } - - make stop-services - - - name: Upload distributed test logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: s3-iam-distributed-logs - path: test/s3/iam/weed-*.log - retention-days: 7 - - # Performance and stress tests - s3-iam-performance-tests: - name: S3 IAM Performance Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - working-directory: weed - run: | - go install -buildvcs=false - - - name: Run IAM Performance Benchmarks - timeout-minutes: 25 - working-directory: test/s3/iam - run: | - set -x - echo "=== Running IAM Performance Tests ===" - - export WEED_BINARY=$(which weed) - export TEST_TIMEOUT=20m - - make clean setup start-services wait-for-services - - # Run performance tests (benchmarks disabled for CI) - echo "Running performance tests..." - export ENABLE_PERFORMANCE_TESTS=true - go test -v -timeout 15m -run "TestS3IAMPerformanceTests" ./... || { - echo "โŒ Performance tests failed" - make logs - exit 1 - } - - make stop-services - - - name: Upload performance test results - if: always() - uses: actions/upload-artifact@v4 - with: - name: s3-iam-performance-results - path: | - test/s3/iam/weed-*.log - test/s3/iam/*.test - retention-days: 7 diff --git a/.github/workflows/s3-keycloak-tests.yml b/.github/workflows/s3-keycloak-tests.yml deleted file mode 100644 index 722661b81..000000000 --- a/.github/workflows/s3-keycloak-tests.yml +++ /dev/null @@ -1,161 +0,0 @@ -name: "S3 Keycloak Integration Tests" - -on: - pull_request: - paths: - - 'weed/iam/**' - - 'weed/s3api/**' - - 'test/s3/iam/**' - - '.github/workflows/s3-keycloak-tests.yml' - push: - branches: [ master ] - paths: - - 'weed/iam/**' - - 'weed/s3api/**' - - 'test/s3/iam/**' - - '.github/workflows/s3-keycloak-tests.yml' - -concurrency: - group: ${{ github.head_ref }}/s3-keycloak-tests - cancel-in-progress: true - -permissions: - contents: read - -defaults: - run: - working-directory: weed - -jobs: - # Dedicated job for Keycloak integration tests - s3-keycloak-integration-tests: - name: S3 Keycloak Integration Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - working-directory: weed - run: | - go install -buildvcs=false - - - name: Run Keycloak Integration Tests - timeout-minutes: 25 - working-directory: test/s3/iam - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting S3 Keycloak Integration Tests ===" - - # Set WEED_BINARY to use the installed version - export WEED_BINARY=$(which weed) - export TEST_TIMEOUT=20m - - echo "Running Keycloak integration tests..." - # Start Keycloak container first - docker run -d \ - --name keycloak \ - -p 8080:8080 \ - -e KC_BOOTSTRAP_ADMIN_USERNAME=admin \ - -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin \ - -e KC_HTTP_ENABLED=true \ - -e KC_HOSTNAME_STRICT=false \ - -e KC_HOSTNAME_STRICT_HTTPS=false \ - quay.io/keycloak/keycloak:26.0 \ - start-dev - - # Wait for Keycloak with better health checking - timeout 300 bash -c ' - while true; do - if curl -s http://localhost:8080/health/ready > /dev/null 2>&1; then - echo "โœ… Keycloak health check passed" - break - fi - echo "... waiting for Keycloak to be ready" - sleep 5 - done - ' - - # Setup Keycloak configuration - ./setup_keycloak.sh - - # Start SeaweedFS services - make clean setup start-services wait-for-services - - # Verify service accessibility - echo "=== Verifying Service Accessibility ===" - curl -f http://localhost:8080/realms/master - curl -s http://localhost:8333 - echo "โœ… SeaweedFS S3 API is responding (IAM-protected endpoint)" - - # Run Keycloak-specific tests - echo "=== Running Keycloak Tests ===" - export KEYCLOAK_URL=http://localhost:8080 - export S3_ENDPOINT=http://localhost:8333 - - # Wait for realm to be properly configured - timeout 120 bash -c 'until curl -fs http://localhost:8080/realms/seaweedfs-test/.well-known/openid-configuration > /dev/null; do echo "... waiting for realm"; sleep 3; done' - - # Run the Keycloak integration tests - go test -v -timeout 20m -run "TestKeycloak" ./... - - - name: Show server logs on failure - if: failure() - working-directory: test/s3/iam - run: | - echo "=== Service Logs ===" - echo "--- Keycloak logs ---" - docker logs keycloak --tail=100 || echo "No Keycloak container logs" - - echo "--- SeaweedFS Master logs ---" - if [ -f weed-master.log ]; then - tail -100 weed-master.log - fi - - echo "--- SeaweedFS S3 logs ---" - if [ -f weed-s3.log ]; then - tail -100 weed-s3.log - fi - - echo "--- SeaweedFS Filer logs ---" - if [ -f weed-filer.log ]; then - tail -100 weed-filer.log - fi - - echo "=== System Status ===" - ps aux | grep -E "(weed|keycloak)" || true - netstat -tlnp | grep -E "(8333|9333|8080|8888)" || true - docker ps -a || true - - - name: Cleanup - if: always() - working-directory: test/s3/iam - run: | - # Stop Keycloak container - docker stop keycloak || true - docker rm keycloak || true - - # Stop SeaweedFS services - make clean || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-keycloak-test-logs - path: | - test/s3/iam/*.log - test/s3/iam/test-volume-data/ - retention-days: 3 diff --git a/.github/workflows/s3-sse-tests.yml b/.github/workflows/s3-sse-tests.yml deleted file mode 100644 index 48b34261f..000000000 --- a/.github/workflows/s3-sse-tests.yml +++ /dev/null @@ -1,345 +0,0 @@ -name: "S3 SSE Tests" - -on: - pull_request: - paths: - - 'weed/s3api/s3_sse_*.go' - - 'weed/s3api/s3api_object_handlers_put.go' - - 'weed/s3api/s3api_object_handlers_copy*.go' - - 'weed/server/filer_server_handlers_*.go' - - 'weed/kms/**' - - 'test/s3/sse/**' - - '.github/workflows/s3-sse-tests.yml' - push: - branches: [ master, main ] - paths: - - 'weed/s3api/s3_sse_*.go' - - 'weed/s3api/s3api_object_handlers_put.go' - - 'weed/s3api/s3api_object_handlers_copy*.go' - - 'weed/server/filer_server_handlers_*.go' - - 'weed/kms/**' - - 'test/s3/sse/**' - -concurrency: - group: ${{ github.head_ref }}/s3-sse-tests - cancel-in-progress: true - -permissions: - contents: read - -defaults: - run: - working-directory: weed - -jobs: - s3-sse-integration-tests: - name: S3 SSE Integration Tests - runs-on: ubuntu-22.04 - timeout-minutes: 30 - strategy: - matrix: - test-type: ["quick", "comprehensive"] - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 SSE Integration Tests - ${{ matrix.test-type }} - timeout-minutes: 25 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - df -h - echo "=== Starting SSE Tests ===" - - # Run tests with automatic server management - # The test-with-server target handles server startup/shutdown automatically - if [ "${{ matrix.test-type }}" = "quick" ]; then - # Quick tests - basic SSE-C and SSE-KMS functionality - make test-with-server TEST_PATTERN="TestSSECIntegrationBasic|TestSSEKMSIntegrationBasic|TestSimpleSSECIntegration" - else - # Comprehensive tests - SSE-C/KMS functionality, excluding copy operations (pre-existing SSE-C issues) - make test-with-server TEST_PATTERN="TestSSECIntegrationBasic|TestSSECIntegrationVariousDataSizes|TestSSEKMSIntegrationBasic|TestSSEKMSIntegrationVariousDataSizes|.*Multipart.*Integration|TestSimpleSSECIntegration" - fi - - - name: Show server logs on failure - if: failure() - working-directory: test/s3/sse - run: | - echo "=== Server Logs ===" - if [ -f weed-test.log ]; then - echo "Last 100 lines of server logs:" - tail -100 weed-test.log - else - echo "No server log file found" - fi - - echo "=== Test Environment ===" - ps aux | grep -E "(weed|test)" || true - netstat -tlnp | grep -E "(8333|9333|8080|8888)" || true - - - name: Upload test logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-test-logs-${{ matrix.test-type }} - path: test/s3/sse/weed-test*.log - retention-days: 3 - - s3-sse-compatibility: - name: S3 SSE Compatibility Test - runs-on: ubuntu-22.04 - timeout-minutes: 20 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run Core SSE Compatibility Test (AWS S3 equivalent) - timeout-minutes: 15 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run the specific tests that validate AWS S3 SSE compatibility - both SSE-C and SSE-KMS basic functionality - make test-with-server TEST_PATTERN="TestSSECIntegrationBasic|TestSSEKMSIntegrationBasic" || { - echo "โŒ SSE compatibility test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -100 weed-test.log - fi - echo "=== Process information ===" - ps aux | grep -E "(weed|test)" || true - exit 1 - } - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-compatibility-logs - path: test/s3/sse/weed-test*.log - retention-days: 3 - - s3-sse-metadata-persistence: - name: S3 SSE Metadata Persistence Test - runs-on: ubuntu-22.04 - timeout-minutes: 20 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run SSE Metadata Persistence Test - timeout-minutes: 15 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run the specific test that would catch filer metadata storage bugs - # This test validates that encryption metadata survives the full PUT/GET cycle - make test-metadata-persistence || { - echo "โŒ SSE metadata persistence test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -100 weed-test.log - fi - echo "=== Process information ===" - ps aux | grep -E "(weed|test)" || true - exit 1 - } - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-metadata-persistence-logs - path: test/s3/sse/weed-test*.log - retention-days: 3 - - s3-sse-copy-operations: - name: S3 SSE Copy Operations Test - runs-on: ubuntu-22.04 - timeout-minutes: 25 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run SSE Copy Operations Tests - timeout-minutes: 20 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run tests that validate SSE copy operations and cross-encryption scenarios - echo "๐Ÿš€ Running SSE copy operations tests..." - echo "๐Ÿ“‹ Note: SSE-C copy operations have pre-existing functionality gaps" - echo " Cross-encryption copy security fix has been implemented and maintained" - - # Skip SSE-C copy operations due to pre-existing HTTP 500 errors - # The critical security fix for cross-encryption (SSE-C โ†’ SSE-KMS) has been preserved - echo "โญ๏ธ Skipping SSE copy operations tests due to known limitations:" - echo " - SSE-C copy operations: HTTP 500 errors (pre-existing functionality gap)" - echo " - Cross-encryption security fix: โœ… Implemented and tested (forces streaming copy)" - echo " - These limitations are documented as pre-existing issues" - exit 0 # Job succeeds with security fix preserved and limitations documented - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-copy-operations-logs - path: test/s3/sse/weed-test*.log - retention-days: 3 - - s3-sse-multipart: - name: S3 SSE Multipart Upload Test - runs-on: ubuntu-22.04 - timeout-minutes: 25 - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run SSE Multipart Upload Tests - timeout-minutes: 20 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Multipart tests - Document known architectural limitations - echo "๐Ÿš€ Running multipart upload tests..." - echo "๐Ÿ“‹ Note: SSE-KMS multipart upload has known architectural limitation requiring per-chunk metadata storage" - echo " SSE-C multipart tests will be skipped due to pre-existing functionality gaps" - - # Test SSE-C basic multipart (skip advanced multipart that fails with HTTP 500) - # Skip SSE-KMS multipart due to architectural limitation (each chunk needs independent metadata) - echo "โญ๏ธ Skipping multipart upload tests due to known limitations:" - echo " - SSE-C multipart GET operations: HTTP 500 errors (pre-existing functionality gap)" - echo " - SSE-KMS multipart decryption: Requires per-chunk SSE metadata architecture changes" - echo " - These limitations are documented and require future architectural work" - exit 0 # Job succeeds with clear documentation of known limitations - - - name: Upload server logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-multipart-logs - path: test/s3/sse/weed-test*.log - retention-days: 3 - - s3-sse-performance: - name: S3 SSE Performance Test - runs-on: ubuntu-22.04 - timeout-minutes: 35 - # Only run performance tests on master branch pushes to avoid overloading PR testing - if: github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/main') - - steps: - - name: Check out code - uses: actions/checkout@v5 - - - name: Set up Go - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Install SeaweedFS - run: | - go install -buildvcs=false - - - name: Run S3 SSE Performance Tests - timeout-minutes: 30 - working-directory: test/s3/sse - run: | - set -x - echo "=== System Information ===" - uname -a - free -h - - # Run performance tests with various data sizes - make perf || { - echo "โŒ SSE performance test failed, checking logs..." - if [ -f weed-test.log ]; then - echo "=== Server logs ===" - tail -200 weed-test.log - fi - make clean - exit 1 - } - make clean - - - name: Upload performance test logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: s3-sse-performance-logs - path: test/s3/sse/weed-test*.log - retention-days: 7 diff --git a/.github/workflows/s3tests.yml b/.github/workflows/s3tests.yml deleted file mode 100644 index 540247a34..000000000 --- a/.github/workflows/s3tests.yml +++ /dev/null @@ -1,1131 +0,0 @@ -name: "Ceph S3 tests" - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -concurrency: - group: ${{ github.head_ref }}/s3tests - cancel-in-progress: true - -permissions: - contents: read - -jobs: - basic-s3-tests: - name: Basic S3 tests (KV store) - runs-on: ubuntu-22.04 - timeout-minutes: 15 - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.9' - - - name: Clone s3-tests - run: | - git clone https://github.com/ceph/s3-tests.git - cd s3-tests - pip install -r requirements.txt - pip install tox - pip install -e . - - - name: Fix S3 tests bucket creation conflicts - run: | - python3 test/s3/fix_s3_tests_bucket_conflicts.py - env: - S3_TESTS_PATH: s3-tests - - - name: Run Basic S3 tests - timeout-minutes: 15 - env: - S3TEST_CONF: ../docker/compose/s3tests.conf - shell: bash - run: | - cd weed - go install -buildvcs=false - set -x - # Create clean data directory for this test run - export WEED_DATA_DIR="/tmp/seaweedfs-s3tests-$(date +%s)" - mkdir -p "$WEED_DATA_DIR" - weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ - -volume.max=100 -volume.preStopSeconds=1 \ - -master.port=9333 -volume.port=8080 -filer.port=8888 -s3.port=8000 -metricsPort=9324 \ - -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config="$GITHUB_WORKSPACE/docker/compose/s3.json" & - pid=$! - - # Wait for all SeaweedFS components to be ready - echo "Waiting for SeaweedFS components to start..." - for i in {1..30}; do - if curl -s http://localhost:9333/cluster/status > /dev/null 2>&1; then - echo "Master server is ready" - break - fi - echo "Waiting for master server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8080/status > /dev/null 2>&1; then - echo "Volume server is ready" - break - fi - echo "Waiting for volume server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8888/ > /dev/null 2>&1; then - echo "Filer is ready" - break - fi - echo "Waiting for filer... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8000/ > /dev/null 2>&1; then - echo "S3 server is ready" - break - fi - echo "Waiting for S3 server... ($i/30)" - sleep 2 - done - - echo "All SeaweedFS components are ready!" - cd ../s3-tests - sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests/functional/test_s3.py - - # Debug: Show the config file contents - echo "=== S3 Config File Contents ===" - cat ../docker/compose/s3tests.conf - echo "=== End Config ===" - - # Additional wait for S3-Filer integration to be fully ready - echo "Waiting additional 10 seconds for S3-Filer integration..." - sleep 10 - - # Test S3 connection before running tests - echo "Testing S3 connection..." - for i in {1..10}; do - if curl -s -f http://localhost:8000/ > /dev/null 2>&1; then - echo "S3 connection test successful" - break - fi - echo "S3 connection test failed, retrying... ($i/10)" - sleep 2 - done - - echo "โœ… S3 server is responding, starting tests..." - - tox -- \ - s3tests/functional/test_s3.py::test_bucket_list_empty \ - s3tests/functional/test_s3.py::test_bucket_list_distinct \ - s3tests/functional/test_s3.py::test_bucket_list_many \ - s3tests/functional/test_s3.py::test_bucket_listv2_many \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_encoding_basic \ - s3tests/functional/test_s3.py::test_bucket_list_encoding_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix_ends_with_delimiter \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_underscore \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix_underscore \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_percentage \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_percentage \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_whitespace \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_whitespace \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_dot \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_dot \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_empty \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_none \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_not_skip_special \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_notempty \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_defaultempty \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_empty \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_basic \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_alt \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_empty \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_none \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_one \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_one \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_zero \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_none \ - s3tests/functional/test_s3.py::test_bucket_list_unordered \ - s3tests/functional/test_s3.py::test_bucket_listv2_unordered \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_invalid \ - s3tests/functional/test_s3.py::test_bucket_list_marker_none \ - s3tests/functional/test_s3.py::test_bucket_list_marker_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_continuationtoken \ - s3tests/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \ - s3tests/functional/test_s3.py::test_bucket_list_marker_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_marker_not_in_list \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_not_in_list \ - s3tests/functional/test_s3.py::test_bucket_list_marker_after_list \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_after_list \ - s3tests/functional/test_s3.py::test_bucket_list_return_data \ - s3tests/functional/test_s3.py::test_bucket_list_objects_anonymous \ - s3tests/functional/test_s3.py::test_bucket_listv2_objects_anonymous \ - s3tests/functional/test_s3.py::test_bucket_list_objects_anonymous_fail \ - s3tests/functional/test_s3.py::test_bucket_listv2_objects_anonymous_fail \ - s3tests/functional/test_s3.py::test_bucket_list_long_name \ - s3tests/functional/test_s3.py::test_bucket_list_special_prefix \ - s3tests/functional/test_s3.py::test_bucket_delete_notexist \ - s3tests/functional/test_s3.py::test_bucket_create_delete \ - s3tests/functional/test_s3.py::test_object_read_not_exist \ - s3tests/functional/test_s3.py::test_multi_object_delete \ - s3tests/functional/test_s3.py::test_multi_objectv2_delete \ - s3tests/functional/test_s3.py::test_object_head_zero_bytes \ - s3tests/functional/test_s3.py::test_object_write_check_etag \ - s3tests/functional/test_s3.py::test_object_write_cache_control \ - s3tests/functional/test_s3.py::test_object_write_expires \ - s3tests/functional/test_s3.py::test_object_write_read_update_read_delete \ - s3tests/functional/test_s3.py::test_object_metadata_replaced_on_put \ - s3tests/functional/test_s3.py::test_object_write_file \ - s3tests/functional/test_s3.py::test_post_object_invalid_date_format \ - s3tests/functional/test_s3.py::test_post_object_no_key_specified \ - s3tests/functional/test_s3.py::test_post_object_missing_signature \ - s3tests/functional/test_s3.py::test_post_object_condition_is_case_sensitive \ - s3tests/functional/test_s3.py::test_post_object_expires_is_case_sensitive \ - s3tests/functional/test_s3.py::test_post_object_missing_expires_condition \ - s3tests/functional/test_s3.py::test_post_object_missing_conditions_list \ - s3tests/functional/test_s3.py::test_post_object_upload_size_limit_exceeded \ - s3tests/functional/test_s3.py::test_post_object_missing_content_length_argument \ - s3tests/functional/test_s3.py::test_post_object_invalid_content_length_argument \ - s3tests/functional/test_s3.py::test_post_object_upload_size_below_minimum \ - s3tests/functional/test_s3.py::test_post_object_empty_conditions \ - s3tests/functional/test_s3.py::test_get_object_ifmatch_good \ - s3tests/functional/test_s3.py::test_get_object_ifnonematch_good \ - s3tests/functional/test_s3.py::test_get_object_ifmatch_failed \ - s3tests/functional/test_s3.py::test_get_object_ifnonematch_failed \ - s3tests/functional/test_s3.py::test_get_object_ifmodifiedsince_good \ - s3tests/functional/test_s3.py::test_get_object_ifmodifiedsince_failed \ - s3tests/functional/test_s3.py::test_get_object_ifunmodifiedsince_failed \ - s3tests/functional/test_s3.py::test_bucket_head \ - s3tests/functional/test_s3.py::test_bucket_head_notexist \ - s3tests/functional/test_s3.py::test_object_raw_authenticated \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_bucket_acl \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_object_acl \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_object_gone \ - s3tests/functional/test_s3.py::test_object_raw_get_x_amz_expires_out_range_zero \ - s3tests/functional/test_s3.py::test_object_anon_put \ - s3tests/functional/test_s3.py::test_object_put_authenticated \ - s3tests/functional/test_s3.py::test_bucket_recreate_overwrite_acl \ - s3tests/functional/test_s3.py::test_bucket_recreate_new_acl \ - s3tests/functional/test_s3.py::test_buckets_create_then_list \ - s3tests/functional/test_s3.py::test_buckets_list_ctime \ - s3tests/functional/test_s3.py::test_list_buckets_invalid_auth \ - s3tests/functional/test_s3.py::test_list_buckets_bad_auth \ - s3tests/functional/test_s3.py::test_bucket_create_naming_good_contains_period \ - s3tests/functional/test_s3.py::test_bucket_create_naming_good_contains_hyphen \ - s3tests/functional/test_s3.py::test_bucket_list_special_prefix \ - s3tests/functional/test_s3.py::test_object_copy_zero_size \ - s3tests/functional/test_s3.py::test_object_copy_same_bucket \ - s3tests/functional/test_s3.py::test_object_copy_to_itself \ - s3tests/functional/test_s3.py::test_object_copy_diff_bucket \ - s3tests/functional/test_s3.py::test_object_copy_canned_acl \ - s3tests/functional/test_s3.py::test_object_copy_bucket_not_found \ - s3tests/functional/test_s3.py::test_object_copy_key_not_found \ - s3tests/functional/test_s3.py::test_multipart_copy_small \ - s3tests/functional/test_s3.py::test_multipart_copy_without_range \ - s3tests/functional/test_s3.py::test_multipart_copy_special_names \ - s3tests/functional/test_s3.py::test_multipart_copy_multiple_sizes \ - s3tests/functional/test_s3.py::test_multipart_get_part \ - s3tests/functional/test_s3.py::test_multipart_upload \ - s3tests/functional/test_s3.py::test_multipart_upload_empty \ - s3tests/functional/test_s3.py::test_multipart_upload_multiple_sizes \ - s3tests/functional/test_s3.py::test_multipart_upload_contents \ - s3tests/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \ - s3tests/functional/test_s3.py::test_multipart_upload_size_too_small \ - s3tests/functional/test_s3.py::test_multipart_resend_first_finishes_last \ - s3tests/functional/test_s3.py::test_multipart_upload_resend_part \ - s3tests/functional/test_s3.py::test_multipart_upload_missing_part \ - s3tests/functional/test_s3.py::test_multipart_upload_incorrect_etag \ - s3tests/functional/test_s3.py::test_abort_multipart_upload \ - s3tests/functional/test_s3.py::test_list_multipart_upload \ - s3tests/functional/test_s3.py::test_atomic_read_1mb \ - s3tests/functional/test_s3.py::test_atomic_read_4mb \ - s3tests/functional/test_s3.py::test_atomic_read_8mb \ - s3tests/functional/test_s3.py::test_atomic_write_1mb \ - s3tests/functional/test_s3.py::test_atomic_write_4mb \ - s3tests/functional/test_s3.py::test_atomic_write_8mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_1mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_4mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_8mb \ - s3tests/functional/test_s3.py::test_atomic_multipart_upload_write \ - s3tests/functional/test_s3.py::test_ranged_request_response_code \ - s3tests/functional/test_s3.py::test_ranged_big_request_response_code \ - s3tests/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \ - s3tests/functional/test_s3.py::test_ranged_request_return_trailing_bytes_response_code \ - s3tests/functional/test_s3.py::test_copy_object_ifmatch_good \ - s3tests/functional/test_s3.py::test_copy_object_ifnonematch_failed \ - s3tests/functional/test_s3.py::test_copy_object_ifmatch_failed \ - s3tests/functional/test_s3.py::test_copy_object_ifnonematch_good \ - s3tests/functional/test_s3.py::test_lifecycle_set \ - s3tests/functional/test_s3.py::test_lifecycle_get \ - s3tests/functional/test_s3.py::test_lifecycle_set_filter - kill -9 $pid || true - # Clean up data directory - rm -rf "$WEED_DATA_DIR" || true - - versioning-tests: - name: S3 Versioning & Object Lock tests - runs-on: ubuntu-22.04 - timeout-minutes: 15 - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.9' - - - name: Clone s3-tests - run: | - git clone https://github.com/ceph/s3-tests.git - cd s3-tests - pip install -r requirements.txt - pip install tox - pip install -e . - - - name: Fix S3 tests bucket creation conflicts - run: | - python3 test/s3/fix_s3_tests_bucket_conflicts.py - env: - S3_TESTS_PATH: s3-tests - - - name: Run S3 Object Lock, Retention, and Versioning tests - timeout-minutes: 15 - shell: bash - run: | - cd weed - go install -buildvcs=false - set -x - # Create clean data directory for this test run - export WEED_DATA_DIR="/tmp/seaweedfs-objectlock-versioning-$(date +%s)" - mkdir -p "$WEED_DATA_DIR" - - # Verify S3 config file exists - echo "Checking S3 config file: $GITHUB_WORKSPACE/docker/compose/s3.json" - ls -la "$GITHUB_WORKSPACE/docker/compose/s3.json" - weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ - -volume.max=100 -volume.preStopSeconds=1 \ - -master.port=9334 -volume.port=8081 -filer.port=8889 -s3.port=8001 -metricsPort=9325 \ - -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config="$GITHUB_WORKSPACE/docker/compose/s3.json" & - pid=$! - - # Wait for all SeaweedFS components to be ready - echo "Waiting for SeaweedFS components to start..." - for i in {1..30}; do - if curl -s http://localhost:9334/cluster/status > /dev/null 2>&1; then - echo "Master server is ready" - break - fi - echo "Waiting for master server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8081/status > /dev/null 2>&1; then - echo "Volume server is ready" - break - fi - echo "Waiting for volume server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8889/ > /dev/null 2>&1; then - echo "Filer is ready" - break - fi - echo "Waiting for filer... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8001/ > /dev/null 2>&1; then - echo "S3 server is ready" - break - fi - echo "Waiting for S3 server... ($i/30)" - sleep 2 - done - - echo "All SeaweedFS components are ready!" - cd ../s3-tests - sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests/functional/test_s3.py - # Create and update s3tests.conf to use port 8001 - cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-versioning.conf - sed -i 's/port = 8000/port = 8001/g' ../docker/compose/s3tests-versioning.conf - sed -i 's/:8000/:8001/g' ../docker/compose/s3tests-versioning.conf - sed -i 's/localhost:8000/localhost:8001/g' ../docker/compose/s3tests-versioning.conf - sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8001/g' ../docker/compose/s3tests-versioning.conf - # Use the configured bucket prefix from config and do not override with unique prefixes - # This avoids mismatch in tests that rely on a fixed provided name - export S3TEST_CONF=../docker/compose/s3tests-versioning.conf - - # Debug: Show the config file contents - echo "=== S3 Config File Contents ===" - cat ../docker/compose/s3tests-versioning.conf - echo "=== End Config ===" - - # Additional wait for S3-Filer integration to be fully ready - echo "Waiting additional 10 seconds for S3-Filer integration..." - sleep 10 - - # Test S3 connection before running tests - echo "Testing S3 connection..." - for i in {1..10}; do - if curl -s -f http://localhost:8001/ > /dev/null 2>&1; then - echo "S3 connection test successful" - break - fi - echo "S3 connection test failed, retrying... ($i/10)" - sleep 2 - done - - # Force cleanup any existing buckets to avoid conflicts - echo "Cleaning up any existing buckets..." - python3 -c " - import boto3 - from botocore.exceptions import ClientError - try: - s3 = boto3.client('s3', - endpoint_url='http://localhost:8001', - aws_access_key_id='0555b35654ad1656d804', - aws_secret_access_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==') - buckets = s3.list_buckets()['Buckets'] - for bucket in buckets: - bucket_name = bucket['Name'] - print(f'Deleting bucket: {bucket_name}') - try: - # Delete all objects first - objects = s3.list_objects_v2(Bucket=bucket_name) - if 'Contents' in objects: - for obj in objects['Contents']: - s3.delete_object(Bucket=bucket_name, Key=obj['Key']) - # Delete all versions if versioning enabled - versions = s3.list_object_versions(Bucket=bucket_name) - if 'Versions' in versions: - for version in versions['Versions']: - s3.delete_object(Bucket=bucket_name, Key=version['Key'], VersionId=version['VersionId']) - if 'DeleteMarkers' in versions: - for marker in versions['DeleteMarkers']: - s3.delete_object(Bucket=bucket_name, Key=marker['Key'], VersionId=marker['VersionId']) - # Delete bucket - s3.delete_bucket(Bucket=bucket_name) - except ClientError as e: - print(f'Error deleting bucket {bucket_name}: {e}') - except Exception as e: - print(f'Cleanup failed: {e}') - " || echo "Cleanup completed with some errors (expected)" - - # Run versioning and object lock tests once (avoid duplicates) - tox -- s3tests/functional/test_s3.py -k "object_lock or versioning" --tb=short - kill -9 $pid || true - # Clean up data directory - rm -rf "$WEED_DATA_DIR" || true - - cors-tests: - name: S3 CORS tests - runs-on: ubuntu-22.04 - timeout-minutes: 10 - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.9' - - - name: Clone s3-tests - run: | - git clone https://github.com/ceph/s3-tests.git - cd s3-tests - pip install -r requirements.txt - pip install tox - pip install -e . - - - name: Run S3 CORS tests - timeout-minutes: 10 - shell: bash - run: | - cd weed - go install -buildvcs=false - set -x - # Create clean data directory for this test run - export WEED_DATA_DIR="/tmp/seaweedfs-cors-test-$(date +%s)" - mkdir -p "$WEED_DATA_DIR" - weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ - -volume.max=100 -volume.preStopSeconds=1 \ - -master.port=9335 -volume.port=8082 -filer.port=8890 -s3.port=8002 -metricsPort=9326 \ - -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config="$GITHUB_WORKSPACE/docker/compose/s3.json" & - pid=$! - - # Wait for all SeaweedFS components to be ready - echo "Waiting for SeaweedFS components to start..." - for i in {1..30}; do - if curl -s http://localhost:9335/cluster/status > /dev/null 2>&1; then - echo "Master server is ready" - break - fi - echo "Waiting for master server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8082/status > /dev/null 2>&1; then - echo "Volume server is ready" - break - fi - echo "Waiting for volume server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8890/ > /dev/null 2>&1; then - echo "Filer is ready" - break - fi - echo "Waiting for filer... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8002/ > /dev/null 2>&1; then - echo "S3 server is ready" - break - fi - echo "Waiting for S3 server... ($i/30)" - sleep 2 - done - - echo "All SeaweedFS components are ready!" - cd ../s3-tests - sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests/functional/test_s3.py - # Create and update s3tests.conf to use port 8002 - cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-cors.conf - sed -i 's/port = 8000/port = 8002/g' ../docker/compose/s3tests-cors.conf - sed -i 's/:8000/:8002/g' ../docker/compose/s3tests-cors.conf - sed -i 's/localhost:8000/localhost:8002/g' ../docker/compose/s3tests-cors.conf - sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8002/g' ../docker/compose/s3tests-cors.conf - export S3TEST_CONF=../docker/compose/s3tests-cors.conf - - # Debug: Show the config file contents - echo "=== S3 Config File Contents ===" - cat ../docker/compose/s3tests-cors.conf - echo "=== End Config ===" - - # Additional wait for S3-Filer integration to be fully ready - echo "Waiting additional 10 seconds for S3-Filer integration..." - sleep 10 - - # Test S3 connection before running tests - echo "Testing S3 connection..." - for i in {1..10}; do - if curl -s -f http://localhost:8002/ > /dev/null 2>&1; then - echo "S3 connection test successful" - break - fi - echo "S3 connection test failed, retrying... ($i/10)" - sleep 2 - done - # Run CORS-specific tests from s3-tests suite - tox -- s3tests/functional/test_s3.py -k "cors" --tb=short || echo "No CORS tests found in s3-tests suite" - # If no specific CORS tests exist, run bucket configuration tests that include CORS - tox -- s3tests/functional/test_s3.py::test_put_bucket_cors || echo "No put_bucket_cors test found" - tox -- s3tests/functional/test_s3.py::test_get_bucket_cors || echo "No get_bucket_cors test found" - tox -- s3tests/functional/test_s3.py::test_delete_bucket_cors || echo "No delete_bucket_cors test found" - kill -9 $pid || true - # Clean up data directory - rm -rf "$WEED_DATA_DIR" || true - - copy-tests: - name: SeaweedFS Custom S3 Copy tests - runs-on: ubuntu-22.04 - timeout-minutes: 10 - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Run SeaweedFS Custom S3 Copy tests - timeout-minutes: 10 - shell: bash - run: | - cd weed - go install -buildvcs=false - # Create clean data directory for this test run - export WEED_DATA_DIR="/tmp/seaweedfs-copy-test-$(date +%s)" - mkdir -p "$WEED_DATA_DIR" - set -x - weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ - -volume.max=100 -volume.preStopSeconds=1 \ - -master.port=9336 -volume.port=8083 -filer.port=8891 -s3.port=8003 -metricsPort=9327 \ - -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config="$GITHUB_WORKSPACE/docker/compose/s3.json" & - pid=$! - - # Wait for all SeaweedFS components to be ready - echo "Waiting for SeaweedFS components to start..." - for i in {1..30}; do - if curl -s http://localhost:9336/cluster/status > /dev/null 2>&1; then - echo "Master server is ready" - break - fi - echo "Waiting for master server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8083/status > /dev/null 2>&1; then - echo "Volume server is ready" - break - fi - echo "Waiting for volume server... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8891/ > /dev/null 2>&1; then - echo "Filer is ready" - break - fi - echo "Waiting for filer... ($i/30)" - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8003/ > /dev/null 2>&1; then - echo "S3 server is ready" - break - fi - echo "Waiting for S3 server... ($i/30)" - sleep 2 - done - - echo "All SeaweedFS components are ready!" - cd ../test/s3/copying - # Patch Go tests to use the correct S3 endpoint (port 8003) - sed -i 's/http:\/\/127\.0\.0\.1:8000/http:\/\/127.0.0.1:8003/g' s3_copying_test.go - - # Debug: Show what endpoint the Go tests will use - echo "=== Go Test Configuration ===" - grep -n "127.0.0.1" s3_copying_test.go || echo "No IP configuration found" - echo "=== End Configuration ===" - - # Additional wait for S3-Filer integration to be fully ready - echo "Waiting additional 10 seconds for S3-Filer integration..." - sleep 10 - - # Test S3 connection before running tests - echo "Testing S3 connection..." - for i in {1..10}; do - if curl -s -f http://localhost:8003/ > /dev/null 2>&1; then - echo "S3 connection test successful" - break - fi - echo "S3 connection test failed, retrying... ($i/10)" - sleep 2 - done - - go test -v - kill -9 $pid || true - # Clean up data directory - rm -rf "$WEED_DATA_DIR" || true - - sql-store-tests: - name: Basic S3 tests (SQL store) - runs-on: ubuntu-22.04 - timeout-minutes: 15 - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v5 - - - name: Set up Go 1.x - uses: actions/setup-go@v6 - with: - go-version-file: 'go.mod' - id: go - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: '3.9' - - - name: Clone s3-tests - run: | - git clone https://github.com/ceph/s3-tests.git - cd s3-tests - pip install -r requirements.txt - pip install tox - pip install -e . - - - name: Run Ceph S3 tests with SQL store - timeout-minutes: 15 - shell: bash - run: | - cd weed - - # Debug: Check for port conflicts before starting - echo "=== Pre-start Port Check ===" - netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" || echo "Ports are free" - - # Kill any existing weed processes that might interfere - echo "=== Cleanup existing processes ===" - pkill -f weed || echo "No weed processes found" - - # More aggressive port cleanup using multiple methods - for port in 9337 8085 8892 8004 9328; do - echo "Cleaning port $port..." - - # Method 1: lsof - pid=$(lsof -ti :$port 2>/dev/null || echo "") - if [ -n "$pid" ]; then - echo "Found process $pid using port $port (via lsof)" - kill -9 $pid 2>/dev/null || echo "Failed to kill $pid" - fi - - # Method 2: netstat + ps (for cases where lsof fails) - netstat_pids=$(netstat -tlnp 2>/dev/null | grep ":$port " | awk '{print $7}' | cut -d'/' -f1 | grep -v '^-$' || echo "") - for npid in $netstat_pids; do - if [ -n "$npid" ] && [ "$npid" != "-" ]; then - echo "Found process $npid using port $port (via netstat)" - kill -9 $npid 2>/dev/null || echo "Failed to kill $npid" - fi - done - - # Method 3: fuser (if available) - if command -v fuser >/dev/null 2>&1; then - fuser -k ${port}/tcp 2>/dev/null || echo "No process found via fuser for port $port" - fi - - sleep 1 - done - - # Wait for ports to be released - sleep 5 - - echo "=== Post-cleanup Port Check ===" - netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" || echo "All ports are now free" - - # If any ports are still in use, fail fast - if netstat -tulpn | grep -E "(9337|8085|8892|8004|9328)" >/dev/null 2>&1; then - echo "โŒ ERROR: Some ports are still in use after aggressive cleanup!" - echo "=== Detailed Port Analysis ===" - for port in 9337 8085 8892 8004 9328; do - echo "Port $port:" - netstat -tlnp 2>/dev/null | grep ":$port " || echo " Not in use" - lsof -i :$port 2>/dev/null || echo " No lsof info" - done - exit 1 - fi - - go install -tags "sqlite" -buildvcs=false - # Create clean data directory for this test run with unique timestamp and process ID - export WEED_DATA_DIR="/tmp/seaweedfs-sql-test-$(date +%s)-$$" - mkdir -p "$WEED_DATA_DIR" - chmod 777 "$WEED_DATA_DIR" - - # SQLite-specific configuration - export WEED_LEVELDB2_ENABLED="false" - export WEED_SQLITE_ENABLED="true" - export WEED_SQLITE_DBFILE="$WEED_DATA_DIR/filer.db" - - echo "=== SQL Store Configuration ===" - echo "Data Dir: $WEED_DATA_DIR" - echo "SQLite DB: $WEED_SQLITE_DBFILE" - echo "LEVELDB2_ENABLED: $WEED_LEVELDB2_ENABLED" - echo "SQLITE_ENABLED: $WEED_SQLITE_ENABLED" - - set -x - weed -v 1 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=100 \ - -volume.max=100 -volume.preStopSeconds=1 \ - -master.port=9337 -volume.port=8085 -filer.port=8892 -s3.port=8004 -metricsPort=9328 \ - -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config="$GITHUB_WORKSPACE/docker/compose/s3.json" \ - > /tmp/seaweedfs-sql-server.log 2>&1 & - pid=$! - - echo "=== Server started with PID: $pid ===" - - # Wait for all SeaweedFS components to be ready - echo "Waiting for SeaweedFS components to start..." - - # Check if server process is still alive before waiting - if ! kill -0 $pid 2>/dev/null; then - echo "โŒ Server process died immediately after start" - echo "=== Immediate Log Check ===" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null || echo "No log available" - exit 1 - fi - - sleep 5 # Give SQLite more time to initialize - - for i in {1..30}; do - if curl -s http://localhost:9337/cluster/status > /dev/null 2>&1; then - echo "Master server is ready" - break - fi - echo "Waiting for master server... ($i/30)" - # Check if server process is still alive - if ! kill -0 $pid 2>/dev/null; then - echo "โŒ Server process died while waiting for master" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null - exit 1 - fi - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8085/status > /dev/null 2>&1; then - echo "Volume server is ready" - break - fi - echo "Waiting for volume server... ($i/30)" - if ! kill -0 $pid 2>/dev/null; then - echo "โŒ Server process died while waiting for volume" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null - exit 1 - fi - sleep 2 - done - - for i in {1..30}; do - if curl -s http://localhost:8892/ > /dev/null 2>&1; then - echo "Filer (SQLite) is ready" - break - fi - echo "Waiting for filer (SQLite)... ($i/30)" - if ! kill -0 $pid 2>/dev/null; then - echo "โŒ Server process died while waiting for filer" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null - exit 1 - fi - sleep 2 - done - - # Extra wait for SQLite filer to fully initialize - echo "Giving SQLite filer extra time to initialize..." - sleep 5 - - for i in {1..30}; do - if curl -s http://localhost:8004/ > /dev/null 2>&1; then - echo "S3 server is ready" - break - fi - echo "Waiting for S3 server... ($i/30)" - if ! kill -0 $pid 2>/dev/null; then - echo "โŒ Server process died while waiting for S3" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null - exit 1 - fi - sleep 2 - done - - echo "All SeaweedFS components are ready!" - cd ../s3-tests - sed -i "s/assert prefixes == \['foo%2B1\/', 'foo\/', 'quux%20ab\/'\]/assert prefixes == \['foo\/', 'foo%2B1\/', 'quux%20ab\/'\]/" s3tests/functional/test_s3.py - # Create and update s3tests.conf to use port 8004 - cp ../docker/compose/s3tests.conf ../docker/compose/s3tests-sql.conf - sed -i 's/port = 8000/port = 8004/g' ../docker/compose/s3tests-sql.conf - sed -i 's/:8000/:8004/g' ../docker/compose/s3tests-sql.conf - sed -i 's/localhost:8000/localhost:8004/g' ../docker/compose/s3tests-sql.conf - sed -i 's/127\.0\.0\.1:8000/127.0.0.1:8004/g' ../docker/compose/s3tests-sql.conf - export S3TEST_CONF=../docker/compose/s3tests-sql.conf - - # Debug: Show the config file contents - echo "=== S3 Config File Contents ===" - cat ../docker/compose/s3tests-sql.conf - echo "=== End Config ===" - - # Additional wait for S3-Filer integration to be fully ready - echo "Waiting additional 10 seconds for S3-Filer integration..." - sleep 10 - - # Test S3 connection before running tests - echo "Testing S3 connection..." - - # Debug: Check if SeaweedFS processes are running - echo "=== Process Status ===" - ps aux | grep -E "(weed|seaweedfs)" | grep -v grep || echo "No SeaweedFS processes found" - - # Debug: Check port status - echo "=== Port Status ===" - netstat -tulpn | grep -E "(8004|9337|8085|8892)" || echo "Ports not found" - - # Debug: Check server logs - echo "=== Recent Server Logs ===" - echo "--- SQL Server Log ---" - tail -20 /tmp/seaweedfs-sql-server.log 2>/dev/null || echo "No SQL server log found" - echo "--- Other Logs ---" - ls -la /tmp/seaweedfs-*.log 2>/dev/null || echo "No other log files found" - - for i in {1..10}; do - if curl -s -f http://localhost:8004/ > /dev/null 2>&1; then - echo "S3 connection test successful" - break - fi - echo "S3 connection test failed, retrying... ($i/10)" - - # Debug: Try different HTTP methods - echo "Debug: Testing different endpoints..." - curl -s -I http://localhost:8004/ || echo "HEAD request failed" - curl -s http://localhost:8004/status || echo "Status endpoint failed" - - sleep 2 - done - tox -- \ - s3tests/functional/test_s3.py::test_bucket_list_empty \ - s3tests/functional/test_s3.py::test_bucket_list_distinct \ - s3tests/functional/test_s3.py::test_bucket_list_many \ - s3tests/functional/test_s3.py::test_bucket_listv2_many \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_encoding_basic \ - s3tests/functional/test_s3.py::test_bucket_list_encoding_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_ends_with_delimiter \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix_ends_with_delimiter \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_prefix_underscore \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_prefix_underscore \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_percentage \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_percentage \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_whitespace \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_whitespace \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_dot \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_dot \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_empty \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_none \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_delimiter_not_skip_special \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_basic \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_alt \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_notempty \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_defaultempty \ - s3tests/functional/test_s3.py::test_bucket_listv2_fetchowner_empty \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_basic \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_basic \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_alt \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_alt \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_empty \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_none \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_not_exist \ - s3tests/functional/test_s3.py::test_bucket_list_prefix_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_prefix_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_one \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_one \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_zero \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_zero \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_none \ - s3tests/functional/test_s3.py::test_bucket_listv2_maxkeys_none \ - s3tests/functional/test_s3.py::test_bucket_list_unordered \ - s3tests/functional/test_s3.py::test_bucket_listv2_unordered \ - s3tests/functional/test_s3.py::test_bucket_list_maxkeys_invalid \ - s3tests/functional/test_s3.py::test_bucket_list_marker_none \ - s3tests/functional/test_s3.py::test_bucket_list_marker_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_continuationtoken_empty \ - s3tests/functional/test_s3.py::test_bucket_listv2_continuationtoken \ - s3tests/functional/test_s3.py::test_bucket_listv2_both_continuationtoken_startafter \ - s3tests/functional/test_s3.py::test_bucket_list_marker_unreadable \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_unreadable \ - s3tests/functional/test_s3.py::test_bucket_list_marker_not_in_list \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_not_in_list \ - s3tests/functional/test_s3.py::test_bucket_list_marker_after_list \ - s3tests/functional/test_s3.py::test_bucket_listv2_startafter_after_list \ - s3tests/functional/test_s3.py::test_bucket_list_return_data \ - s3tests/functional/test_s3.py::test_bucket_list_objects_anonymous \ - s3tests/functional/test_s3.py::test_bucket_listv2_objects_anonymous \ - s3tests/functional/test_s3.py::test_bucket_list_objects_anonymous_fail \ - s3tests/functional/test_s3.py::test_bucket_listv2_objects_anonymous_fail \ - s3tests/functional/test_s3.py::test_bucket_list_long_name \ - s3tests/functional/test_s3.py::test_bucket_list_special_prefix \ - s3tests/functional/test_s3.py::test_bucket_delete_notexist \ - s3tests/functional/test_s3.py::test_bucket_create_delete \ - s3tests/functional/test_s3.py::test_object_read_not_exist \ - s3tests/functional/test_s3.py::test_multi_object_delete \ - s3tests/functional/test_s3.py::test_multi_objectv2_delete \ - s3tests/functional/test_s3.py::test_object_head_zero_bytes \ - s3tests/functional/test_s3.py::test_object_write_check_etag \ - s3tests/functional/test_s3.py::test_object_write_cache_control \ - s3tests/functional/test_s3.py::test_object_write_expires \ - s3tests/functional/test_s3.py::test_object_write_read_update_read_delete \ - s3tests/functional/test_s3.py::test_object_metadata_replaced_on_put \ - s3tests/functional/test_s3.py::test_object_write_file \ - s3tests/functional/test_s3.py::test_post_object_invalid_date_format \ - s3tests/functional/test_s3.py::test_post_object_no_key_specified \ - s3tests/functional/test_s3.py::test_post_object_missing_signature \ - s3tests/functional/test_s3.py::test_post_object_condition_is_case_sensitive \ - s3tests/functional/test_s3.py::test_post_object_expires_is_case_sensitive \ - s3tests/functional/test_s3.py::test_post_object_missing_expires_condition \ - s3tests/functional/test_s3.py::test_post_object_missing_conditions_list \ - s3tests/functional/test_s3.py::test_post_object_upload_size_limit_exceeded \ - s3tests/functional/test_s3.py::test_post_object_missing_content_length_argument \ - s3tests/functional/test_s3.py::test_post_object_invalid_content_length_argument \ - s3tests/functional/test_s3.py::test_post_object_upload_size_below_minimum \ - s3tests/functional/test_s3.py::test_post_object_empty_conditions \ - s3tests/functional/test_s3.py::test_get_object_ifmatch_good \ - s3tests/functional/test_s3.py::test_get_object_ifnonematch_good \ - s3tests/functional/test_s3.py::test_get_object_ifmatch_failed \ - s3tests/functional/test_s3.py::test_get_object_ifnonematch_failed \ - s3tests/functional/test_s3.py::test_get_object_ifmodifiedsince_good \ - s3tests/functional/test_s3.py::test_get_object_ifmodifiedsince_failed \ - s3tests/functional/test_s3.py::test_get_object_ifunmodifiedsince_failed \ - s3tests/functional/test_s3.py::test_bucket_head \ - s3tests/functional/test_s3.py::test_bucket_head_notexist \ - s3tests/functional/test_s3.py::test_object_raw_authenticated \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_bucket_acl \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_object_acl \ - s3tests/functional/test_s3.py::test_object_raw_authenticated_object_gone \ - s3tests/functional/test_s3.py::test_object_raw_get_x_amz_expires_out_range_zero \ - s3tests/functional/test_s3.py::test_object_anon_put \ - s3tests/functional/test_s3.py::test_object_put_authenticated \ - s3tests/functional/test_s3.py::test_bucket_recreate_overwrite_acl \ - s3tests/functional/test_s3.py::test_bucket_recreate_new_acl \ - s3tests/functional/test_s3.py::test_buckets_create_then_list \ - s3tests/functional/test_s3.py::test_buckets_list_ctime \ - s3tests/functional/test_s3.py::test_list_buckets_invalid_auth \ - s3tests/functional/test_s3.py::test_list_buckets_bad_auth \ - s3tests/functional/test_s3.py::test_bucket_create_naming_good_contains_period \ - s3tests/functional/test_s3.py::test_bucket_create_naming_good_contains_hyphen \ - s3tests/functional/test_s3.py::test_bucket_list_special_prefix \ - s3tests/functional/test_s3.py::test_object_copy_zero_size \ - s3tests/functional/test_s3.py::test_object_copy_same_bucket \ - s3tests/functional/test_s3.py::test_object_copy_to_itself \ - s3tests/functional/test_s3.py::test_object_copy_diff_bucket \ - s3tests/functional/test_s3.py::test_object_copy_canned_acl \ - s3tests/functional/test_s3.py::test_object_copy_bucket_not_found \ - s3tests/functional/test_s3.py::test_object_copy_key_not_found \ - s3tests/functional/test_s3.py::test_multipart_copy_small \ - s3tests/functional/test_s3.py::test_multipart_copy_without_range \ - s3tests/functional/test_s3.py::test_multipart_copy_special_names \ - s3tests/functional/test_s3.py::test_multipart_copy_multiple_sizes \ - s3tests/functional/test_s3.py::test_multipart_get_part \ - s3tests/functional/test_s3.py::test_multipart_upload \ - s3tests/functional/test_s3.py::test_multipart_upload_empty \ - s3tests/functional/test_s3.py::test_multipart_upload_multiple_sizes \ - s3tests/functional/test_s3.py::test_multipart_upload_contents \ - s3tests/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \ - s3tests/functional/test_s3.py::test_multipart_upload_size_too_small \ - s3tests/functional/test_s3.py::test_multipart_resend_first_finishes_last \ - s3tests/functional/test_s3.py::test_multipart_upload_resend_part \ - s3tests/functional/test_s3.py::test_multipart_upload_missing_part \ - s3tests/functional/test_s3.py::test_multipart_upload_incorrect_etag \ - s3tests/functional/test_s3.py::test_abort_multipart_upload \ - s3tests/functional/test_s3.py::test_list_multipart_upload \ - s3tests/functional/test_s3.py::test_atomic_read_1mb \ - s3tests/functional/test_s3.py::test_atomic_read_4mb \ - s3tests/functional/test_s3.py::test_atomic_read_8mb \ - s3tests/functional/test_s3.py::test_atomic_write_1mb \ - s3tests/functional/test_s3.py::test_atomic_write_4mb \ - s3tests/functional/test_s3.py::test_atomic_write_8mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_1mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_4mb \ - s3tests/functional/test_s3.py::test_atomic_dual_write_8mb \ - s3tests/functional/test_s3.py::test_atomic_multipart_upload_write \ - s3tests/functional/test_s3.py::test_ranged_request_response_code \ - s3tests/functional/test_s3.py::test_ranged_big_request_response_code \ - s3tests/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \ - s3tests/functional/test_s3.py::test_ranged_request_return_trailing_bytes_response_code \ - s3tests/functional/test_s3.py::test_copy_object_ifmatch_good \ - s3tests/functional/test_s3.py::test_copy_object_ifnonematch_failed \ - s3tests/functional/test_s3.py::test_copy_object_ifmatch_failed \ - s3tests/functional/test_s3.py::test_copy_object_ifnonematch_good \ - s3tests/functional/test_s3.py::test_lifecycle_set \ - s3tests/functional/test_s3.py::test_lifecycle_get \ - s3tests/functional/test_s3.py::test_lifecycle_set_filter - kill -9 $pid || true - # Clean up data directory - rm -rf "$WEED_DATA_DIR" || true - - diff --git a/.github/workflows/test-s3-over-https-using-awscli.yml b/.github/workflows/test-s3-over-https-using-awscli.yml deleted file mode 100644 index f09d1c1aa..000000000 --- a/.github/workflows/test-s3-over-https-using-awscli.yml +++ /dev/null @@ -1,79 +0,0 @@ -name: "test s3 over https using aws-cli" - -on: - push: - branches: [master, test-https-s3-awscli] - pull_request: - branches: [master, test-https-s3-awscli] - -env: - AWS_ACCESS_KEY_ID: some_access_key1 - AWS_SECRET_ACCESS_KEY: some_secret_key1 - AWS_ENDPOINT_URL: https://localhost:8443 - -defaults: - run: - working-directory: weed - -jobs: - awscli-tests: - runs-on: ubuntu-latest - timeout-minutes: 5 - steps: - - uses: actions/checkout@v5 - - - uses: actions/setup-go@v6 - with: - go-version: ^1.24 - - - name: Build SeaweedFS - run: | - go build - - - name: Start SeaweedFS - run: | - set -e - mkdir -p /tmp/data - ./weed server -s3 -dir=/tmp/data -s3.config=../docker/compose/s3.json & - until curl -s http://localhost:8333/ > /dev/null; do sleep 1; done - - - name: Setup Caddy - run: | - curl -fsSL "https://caddyserver.com/api/download?os=linux&arch=amd64" -o caddy - chmod +x caddy - ./caddy version - echo "{ - auto_https disable_redirects - local_certs - } - localhost:8443 { - tls internal - reverse_proxy localhost:8333 - }" > Caddyfile - - - name: Start Caddy - run: | - ./caddy start - until curl -fsS --insecure https://localhost:8443 > /dev/null; do sleep 1; done - - - name: Create Bucket - run: | - aws --no-verify-ssl s3api create-bucket --bucket bucket - - - name: Test PutObject - run: | - set -e - dd if=/dev/urandom of=generated bs=1M count=2 - aws --no-verify-ssl s3api put-object --bucket bucket --key test-putobject --body generated - aws --no-verify-ssl s3api get-object --bucket bucket --key test-putobject downloaded - diff -q generated downloaded - rm -f generated downloaded - - - name: Test Multi-part Upload - run: | - set -e - dd if=/dev/urandom of=generated bs=1M count=32 - aws --no-verify-ssl s3 cp --no-progress generated s3://bucket/test-multipart - aws --no-verify-ssl s3 cp --no-progress s3://bucket/test-multipart downloaded - diff -q generated downloaded - rm -f generated downloaded diff --git a/.gitignore b/.gitignore index cd240ab6d..25a58bc67 100644 --- a/.gitignore +++ b/.gitignore @@ -87,40 +87,3 @@ other/java/hdfs/dependency-reduced-pom.xml # binary file weed/weed -docker/weed - -# test generated files -weed/*/*.jpg -docker/weed_sub -docker/weed_pub -weed/mq/schema/example.parquet -docker/agent_sub_record -test/mq/bin/consumer -test/mq/bin/producer -test/producer -bin/weed -weed_binary -/test/s3/copying/filerldb2 -/filerldb2 -/test/s3/retention/test-volume-data -test/s3/cors/weed-test.log -test/s3/cors/weed-server.pid -/test/s3/cors/test-volume-data -test/s3/cors/cors.test -/test/s3/retention/filerldb2 -test/s3/retention/weed-server.pid -test/s3/retention/weed-test.log -/test/s3/versioning/test-volume-data -test/s3/versioning/weed-test.log -/docker/admin_integration/data -docker/agent_pub_record -docker/admin_integration/weed-local -/seaweedfs-rdma-sidecar/bin -/test/s3/encryption/filerldb2 -/test/s3/sse/filerldb2 -test/s3/sse/weed-test.log -ADVANCED_IAM_DEVELOPMENT_PLAN.md -/test/s3/iam/test-volume-data -*.log -weed-iam -test/kafka/kafka-client-loadtest/weed-linux-arm64 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index c561b2fa2..000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -- Using welcoming and inclusive language -- Being respectful of differing viewpoints and experiences -- Gracefully accepting constructive criticism -- Focusing on what is best for the community -- Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -- The use of sexualized language or imagery and unwelcome sexual attention or - advances -- Trolling, insulting/derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others' private information, such as a physical or electronic - address, without explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/DESIGN.md b/DESIGN.md deleted file mode 100644 index d164467c3..000000000 --- a/DESIGN.md +++ /dev/null @@ -1,413 +0,0 @@ -# SeaweedFS Task Distribution System Design - -## Overview - -This document describes the design of a distributed task management system for SeaweedFS that handles Erasure Coding (EC) and vacuum operations through a scalable admin server and worker process architecture. - -## System Architecture - -### High-Level Components - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Master โ”‚โ—„โ”€โ”€โ–บโ”‚ Admin Server โ”‚โ—„โ”€โ”€โ–บโ”‚ Workers โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ”‚ - Volume Info โ”‚ โ”‚ - Task Discovery โ”‚ โ”‚ - Task Exec โ”‚ -โ”‚ - Shard Status โ”‚ โ”‚ - Task Assign โ”‚ โ”‚ - Progress โ”‚ -โ”‚ - Heartbeats โ”‚ โ”‚ - Progress Track โ”‚ โ”‚ - Error Report โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ โ”‚ - โ”‚ โ”‚ โ”‚ - โ–ผ โ–ผ โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Volume Servers โ”‚ โ”‚ Volume Monitor โ”‚ โ”‚ Task Execution โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ”‚ - Store Volumes โ”‚ โ”‚ - Health Check โ”‚ โ”‚ - EC Convert โ”‚ -โ”‚ - EC Shards โ”‚ โ”‚ - Usage Stats โ”‚ โ”‚ - Vacuum Clean โ”‚ -โ”‚ - Report Status โ”‚ โ”‚ - State Sync โ”‚ โ”‚ - Status Report โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## 1. Admin Server Design - -### 1.1 Core Responsibilities - -- **Task Discovery**: Scan volumes to identify EC and vacuum candidates -- **Worker Management**: Track available workers and their capabilities -- **Task Assignment**: Match tasks to optimal workers -- **Progress Tracking**: Monitor in-progress tasks for capacity planning -- **State Reconciliation**: Sync with master server for volume state updates - -### 1.2 Task Discovery Engine - -```go -type TaskDiscoveryEngine struct { - masterClient MasterClient - volumeScanner VolumeScanner - taskDetectors map[TaskType]TaskDetector - scanInterval time.Duration -} - -type VolumeCandidate struct { - VolumeID uint32 - Server string - Collection string - TaskType TaskType - Priority TaskPriority - Reason string - DetectedAt time.Time - Parameters map[string]interface{} -} -``` - -**EC Detection Logic**: -- Find volumes >= 95% full and idle for > 1 hour -- Exclude volumes already in EC format -- Exclude volumes with ongoing operations -- Prioritize by collection and age - -**Vacuum Detection Logic**: -- Find volumes with garbage ratio > 30% -- Exclude read-only volumes -- Exclude volumes with recent vacuum operations -- Prioritize by garbage percentage - -### 1.3 Worker Registry & Management - -```go -type WorkerRegistry struct { - workers map[string]*Worker - capabilities map[TaskType][]*Worker - lastHeartbeat map[string]time.Time - taskAssignment map[string]*Task - mutex sync.RWMutex -} - -type Worker struct { - ID string - Address string - Capabilities []TaskType - MaxConcurrent int - CurrentLoad int - Status WorkerStatus - LastSeen time.Time - Performance WorkerMetrics -} -``` - -### 1.4 Task Assignment Algorithm - -```go -type TaskScheduler struct { - registry *WorkerRegistry - taskQueue *PriorityQueue - inProgressTasks map[string]*InProgressTask - volumeReservations map[uint32]*VolumeReservation -} - -// Worker Selection Criteria: -// 1. Has required capability (EC or Vacuum) -// 2. Available capacity (CurrentLoad < MaxConcurrent) -// 3. Best performance history for task type -// 4. Lowest current load -// 5. Geographically close to volume server (optional) -``` - -## 2. Worker Process Design - -### 2.1 Worker Architecture - -```go -type MaintenanceWorker struct { - id string - config *WorkerConfig - adminClient AdminClient - taskExecutors map[TaskType]TaskExecutor - currentTasks map[string]*RunningTask - registry *TaskRegistry - heartbeatTicker *time.Ticker - requestTicker *time.Ticker -} -``` - -### 2.2 Task Execution Framework - -```go -type TaskExecutor interface { - Execute(ctx context.Context, task *Task) error - EstimateTime(task *Task) time.Duration - ValidateResources(task *Task) error - GetProgress() float64 - Cancel() error -} - -type ErasureCodingExecutor struct { - volumeClient VolumeServerClient - progress float64 - cancelled bool -} - -type VacuumExecutor struct { - volumeClient VolumeServerClient - progress float64 - cancelled bool -} -``` - -### 2.3 Worker Capabilities & Registration - -```go -type WorkerCapabilities struct { - SupportedTasks []TaskType - MaxConcurrent int - ResourceLimits ResourceLimits - PreferredServers []string // Affinity for specific volume servers -} - -type ResourceLimits struct { - MaxMemoryMB int64 - MaxDiskSpaceMB int64 - MaxNetworkMbps int64 - MaxCPUPercent float64 -} -``` - -## 3. Task Lifecycle Management - -### 3.1 Task States - -```go -type TaskState string - -const ( - TaskStatePending TaskState = "pending" - TaskStateAssigned TaskState = "assigned" - TaskStateInProgress TaskState = "in_progress" - TaskStateCompleted TaskState = "completed" - TaskStateFailed TaskState = "failed" - TaskStateCancelled TaskState = "cancelled" - TaskStateStuck TaskState = "stuck" // Taking too long - TaskStateDuplicate TaskState = "duplicate" // Detected duplicate -) -``` - -### 3.2 Progress Tracking & Monitoring - -```go -type InProgressTask struct { - Task *Task - WorkerID string - StartedAt time.Time - LastUpdate time.Time - Progress float64 - EstimatedEnd time.Time - VolumeReserved bool // Reserved for capacity planning -} - -type TaskMonitor struct { - inProgressTasks map[string]*InProgressTask - timeoutChecker *time.Ticker - stuckDetector *time.Ticker - duplicateChecker *time.Ticker -} -``` - -## 4. Volume Capacity Reconciliation - -### 4.1 Volume State Tracking - -```go -type VolumeStateManager struct { - masterClient MasterClient - inProgressTasks map[uint32]*InProgressTask // VolumeID -> Task - committedChanges map[uint32]*VolumeChange // Changes not yet in master - reconcileInterval time.Duration -} - -type VolumeChange struct { - VolumeID uint32 - ChangeType ChangeType // "ec_encoding", "vacuum_completed" - OldCapacity int64 - NewCapacity int64 - TaskID string - CompletedAt time.Time - ReportedToMaster bool -} -``` - -### 4.2 Shard Assignment Integration - -When the master needs to assign shards, it must consider: -1. **Current volume state** from its own records -2. **In-progress capacity changes** from admin server -3. **Committed but unreported changes** from admin server - -```go -type CapacityOracle struct { - adminServer AdminServerClient - masterState *MasterVolumeState - updateFreq time.Duration -} - -func (o *CapacityOracle) GetAdjustedCapacity(volumeID uint32) int64 { - baseCapacity := o.masterState.GetCapacity(volumeID) - - // Adjust for in-progress tasks - if task := o.adminServer.GetInProgressTask(volumeID); task != nil { - switch task.Type { - case TaskTypeErasureCoding: - // EC reduces effective capacity - return baseCapacity / 2 // Simplified - case TaskTypeVacuum: - // Vacuum may increase available space - return baseCapacity + int64(float64(baseCapacity) * 0.3) - } - } - - // Adjust for completed but unreported changes - if change := o.adminServer.GetPendingChange(volumeID); change != nil { - return change.NewCapacity - } - - return baseCapacity -} -``` - -## 5. Error Handling & Recovery - -### 5.1 Worker Failure Scenarios - -```go -type FailureHandler struct { - taskRescheduler *TaskRescheduler - workerMonitor *WorkerMonitor - alertManager *AlertManager -} - -// Failure Scenarios: -// 1. Worker becomes unresponsive (heartbeat timeout) -// 2. Task execution fails (reported by worker) -// 3. Task gets stuck (progress timeout) -// 4. Duplicate task detection -// 5. Resource exhaustion -``` - -### 5.2 Recovery Strategies - -**Worker Timeout Recovery**: -- Mark worker as inactive after 3 missed heartbeats -- Reschedule all assigned tasks to other workers -- Cleanup any partial state - -**Task Stuck Recovery**: -- Detect tasks with no progress for > 2x estimated time -- Cancel stuck task and mark volume for cleanup -- Reschedule if retry count < max_retries - -**Duplicate Task Prevention**: -```go -type DuplicateDetector struct { - activeFingerprints map[string]bool // VolumeID+TaskType - recentCompleted *LRUCache // Recently completed tasks -} - -func (d *DuplicateDetector) IsTaskDuplicate(task *Task) bool { - fingerprint := fmt.Sprintf("%d-%s", task.VolumeID, task.Type) - return d.activeFingerprints[fingerprint] || - d.recentCompleted.Contains(fingerprint) -} -``` - -## 6. Simulation & Testing Framework - -### 6.1 Failure Simulation - -```go -type TaskSimulator struct { - scenarios map[string]SimulationScenario -} - -type SimulationScenario struct { - Name string - WorkerCount int - VolumeCount int - FailurePatterns []FailurePattern - Duration time.Duration -} - -type FailurePattern struct { - Type FailureType // "worker_timeout", "task_stuck", "duplicate" - Probability float64 // 0.0 to 1.0 - Timing TimingSpec // When during task execution - Duration time.Duration -} -``` - -### 6.2 Test Scenarios - -**Scenario 1: Worker Timeout During EC** -- Start EC task on 30GB volume -- Kill worker at 50% progress -- Verify task reassignment -- Verify no duplicate EC operations - -**Scenario 2: Stuck Vacuum Task** -- Start vacuum on high-garbage volume -- Simulate worker hanging at 75% progress -- Verify timeout detection and cleanup -- Verify volume state consistency - -**Scenario 3: Duplicate Task Prevention** -- Submit same EC task from multiple sources -- Verify only one task executes -- Verify proper conflict resolution - -**Scenario 4: Master-Admin State Divergence** -- Create in-progress EC task -- Simulate master restart -- Verify state reconciliation -- Verify shard assignment accounts for in-progress work - -## 7. Performance & Scalability - -### 7.1 Metrics & Monitoring - -```go -type SystemMetrics struct { - TasksPerSecond float64 - WorkerUtilization float64 - AverageTaskTime time.Duration - FailureRate float64 - QueueDepth int - VolumeStatesSync bool -} -``` - -### 7.2 Scalability Considerations - -- **Horizontal Worker Scaling**: Add workers without admin server changes -- **Admin Server HA**: Master-slave admin servers for fault tolerance -- **Task Partitioning**: Partition tasks by collection or datacenter -- **Batch Operations**: Group similar tasks for efficiency - -## 8. Implementation Plan - -### Phase 1: Core Infrastructure -1. Admin server basic framework -2. Worker registration and heartbeat -3. Simple task assignment -4. Basic progress tracking - -### Phase 2: Advanced Features -1. Volume state reconciliation -2. Sophisticated worker selection -3. Failure detection and recovery -4. Duplicate prevention - -### Phase 3: Optimization & Monitoring -1. Performance metrics -2. Load balancing algorithms -3. Capacity planning integration -4. Comprehensive monitoring - -This design provides a robust, scalable foundation for distributed task management in SeaweedFS while maintaining consistency with the existing architecture patterns. \ No newline at end of file diff --git a/LICENSE b/LICENSE index c9c58bb50..735f67b68 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 Chris Lu + Copyright 2016 Chris Lu Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index 6abe59423..aa736edee 100644 --- a/Makefile +++ b/Makefile @@ -1,71 +1,14 @@ -.PHONY: test admin-generate admin-build admin-clean admin-dev admin-run admin-test admin-fmt admin-help - BINARY = weed -ADMIN_DIR = weed/admin SOURCE_DIR = . -debug ?= 0 all: install -install: admin-generate +install: cd weed; go install -warp_install: - go install github.com/minio/warp@v0.7.6 +full_install: + cd weed; go install -tags "elastic gocdk sqlite ydb tikv" -full_install: admin-generate - cd weed; go install -tags "elastic gocdk sqlite ydb tarantool tikv rclone" - -server: install - weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324 - -benchmark: install warp_install - pkill weed || true - pkill warp || true - weed server -debug=$(debug) -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false -s3.config=./docker/compose/s3.json & - warp client & - while ! nc -z localhost 8000 ; do sleep 1 ; done - warp mixed --host=127.0.0.1:8000 --access-key=some_access_key1 --secret-key=some_secret_key1 --autoterm - pkill warp - pkill weed - -# curl -o profile "http://127.0.0.1:6060/debug/pprof/profile?debug=1" -benchmark_with_pprof: debug = 1 -benchmark_with_pprof: benchmark - -test: admin-generate - cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./... - -# Admin component targets -admin-generate: - @echo "Generating admin component templates..." - @cd $(ADMIN_DIR) && $(MAKE) generate - -admin-build: admin-generate - @echo "Building admin component..." - @cd $(ADMIN_DIR) && $(MAKE) build - -admin-clean: - @echo "Cleaning admin component..." - @cd $(ADMIN_DIR) && $(MAKE) clean - -admin-dev: - @echo "Starting admin development server..." - @cd $(ADMIN_DIR) && $(MAKE) dev - -admin-run: - @echo "Running admin server..." - @cd $(ADMIN_DIR) && $(MAKE) run - -admin-test: - @echo "Testing admin component..." - @cd $(ADMIN_DIR) && $(MAKE) test - -admin-fmt: - @echo "Formatting admin component..." - @cd $(ADMIN_DIR) && $(MAKE) fmt - -admin-help: - @echo "Admin component help..." - @cd $(ADMIN_DIR) && $(MAKE) help +test: + cd weed; go test -tags "elastic gocdk sqlite ydb tikv" -v ./... diff --git a/README.md b/README.md index 0f9028d1b..a0fdd1492 100644 --- a/README.md +++ b/README.md @@ -3,19 +3,19 @@ [![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) [![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs) -[![Build Status](https://img.shields.io/github/actions/workflow/status/seaweedfs/seaweedfs/go.yml)](https://github.com/seaweedfs/seaweedfs/actions/workflows/go.yml) -[![GoDoc](https://godoc.org/github.com/seaweedfs/seaweedfs/weed?status.svg)](https://godoc.org/github.com/seaweedfs/seaweedfs/weed) -[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/seaweedfs/seaweedfs/wiki) +[![Build Status](https://img.shields.io/github/workflow/status/chrislusf/seaweedfs/Go)](https://github.com/chrislusf/seaweedfs/actions/workflows/go.yml) +[![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed) +[![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki) [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/) [![SeaweedFS on Maven Central](https://img.shields.io/maven-central/v/com.github.chrislusf/seaweedfs-client)](https://search.maven.org/search?q=g:com.github.chrislusf) -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/seaweedfs)](https://artifacthub.io/packages/search?repo=seaweedfs) -![SeaweedFS Logo](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/seaweedfs.png) + +![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)

Sponsor SeaweedFS via Patreon

SeaweedFS is an independent Apache-licensed open source project with its ongoing development made -possible entirely thanks to the support of these awesome [backers](https://github.com/seaweedfs/seaweedfs/blob/master/backers.md). +possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). If you'd like to grow SeaweedFS even stronger, please consider joining our sponsors on Patreon. @@ -32,21 +32,18 @@ Your support will be really appreciated by me and other supporters! --> ### Gold Sponsors -[![nodion](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/sponsor_nodion.png)](https://www.nodion.com) -[![piknik](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/piknik.png)](https://www.piknik.com) -[![keepsec](https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/note/keepsec.png)](https://www.keepsec.ca) +- [![nodion](https://www.nodion.com/img/logo.svg)](https://www.nodion.com) --- -- [Download Binaries for different platforms](https://github.com/seaweedfs/seaweedfs/releases/latest) +- [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) - [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS on Telegram](https://t.me/Seaweedfs) - [SeaweedFS on Reddit](https://www.reddit.com/r/SeaweedFS/) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) -- [Wiki Documentation](https://github.com/seaweedfs/seaweedfs/wiki) -- [SeaweedFS White Paper](https://github.com/seaweedfs/seaweedfs/wiki/SeaweedFS_Architecture.pdf) -- [SeaweedFS Introduction Slides 2025.5](https://docs.google.com/presentation/d/1tdkp45J01oRV68dIm4yoTXKJDof-EhainlA0LMXexQE/edit?usp=sharing) +- [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) +- [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf) - [SeaweedFS Introduction Slides 2021.5](https://docs.google.com/presentation/d/1DcxKWlINc-HNCjhYeERkpGXXm6nTCES8mi2W5G0Z4Ts/edit?usp=sharing) - [SeaweedFS Introduction Slides 2019.3](https://www.slideshare.net/chrislusf/seaweedfs-introduction) @@ -61,30 +58,26 @@ Table of Contents * [Features](#features) * [Additional Features](#additional-features) * [Filer Features](#filer-features) -* [Example: Using Seaweed Object Store](#example-using-seaweed-object-store) -* [Architecture](#object-store-architecture) +* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store) +* [Architecture](#Object-Store-Architecture) * [Compared to Other File Systems](#compared-to-other-file-systems) * [Compared to HDFS](#compared-to-hdfs) * [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph) * [Compared to GlusterFS](#compared-to-glusterfs) * [Compared to Ceph](#compared-to-ceph) - * [Compared to Minio](#compared-to-minio) * [Dev Plan](#dev-plan) * [Installation Guide](#installation-guide) * [Disk Related Topics](#disk-related-topics) -* [Benchmark](#benchmark) -* [Enterprise](#enterprise) +* [Benchmark](#Benchmark) * [License](#license) -# Quick Start # ## Quick Start for S3 API on Docker ## `docker run -p 8333:8333 chrislusf/seaweedfs server -s3` ## Quick Start with Single Binary ## -* Download the latest binary from https://github.com/seaweedfs/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`. Or run `go install github.com/seaweedfs/seaweedfs/weed@latest`. -* `export AWS_ACCESS_KEY_ID=admin ; export AWS_SECRET_ACCESS_KEY=key` as the admin credentials to access the object store. +* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe` * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver=":9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it! @@ -92,7 +85,7 @@ Also, to increase capacity, just add more volume servers by running `weed volume ## Quick Start SeaweedFS S3 on AWS ## * Setup fast production-ready [SeaweedFS S3 on AWS with cloudformation](https://aws.amazon.com/marketplace/pp/prodview-nzelz5gprlrjc) -# Introduction # +## Introduction ## SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: @@ -125,18 +118,17 @@ SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. What's more, the cloud storage access API cost is minimized. -Faster and cheaper than direct cloud storage! +Faster and Cheaper than direct cloud storage! [Back to TOC](#table-of-contents) -# Features # ## Additional Features ## * Can choose no replication or different replication levels, rack and data center aware. * Automatic master servers failover - no single point of failure (SPOF). * Automatic Gzip compression depending on file MIME type. * Automatic compaction to reclaim disk space after deletion or update. * [Automatic entry TTL expiration][VolumeServerTTL]. -* Any server with some disk space can add to the total storage space. +* Any server with some disk spaces can add to the total storage space. * Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands. * Optional picture resizing. * Support ETag, Accept-Range, Last-Modified, etc. @@ -149,7 +141,7 @@ Faster and cheaper than direct cloud storage! [Back to TOC](#table-of-contents) ## Filer Features ## -* [Filer server][Filer] provides "normal" directories and files via HTTP. +* [Filer server][Filer] provides "normal" directories and files via http. * [File TTL][FilerTTL] automatically expires file metadata and actual file data. * [Mount filer][Mount] reads and writes files directly as a local directory via FUSE. * [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores. @@ -167,25 +159,25 @@ Faster and cheaper than direct cloud storage! * [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/) * [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator) -[Filer]: https://github.com/seaweedfs/seaweedfs/wiki/Directories-and-Files -[SuperLargeFiles]: https://github.com/seaweedfs/seaweedfs/wiki/Data-Structure-for-Large-Files -[Mount]: https://github.com/seaweedfs/seaweedfs/wiki/FUSE-Mount -[AmazonS3API]: https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API -[BackupToCloud]: https://github.com/seaweedfs/seaweedfs/wiki/Async-Replication-to-Cloud -[Hadoop]: https://github.com/seaweedfs/seaweedfs/wiki/Hadoop-Compatible-File-System -[WebDAV]: https://github.com/seaweedfs/seaweedfs/wiki/WebDAV -[ErasureCoding]: https://github.com/seaweedfs/seaweedfs/wiki/Erasure-coding-for-warm-storage -[TieredStorage]: https://github.com/seaweedfs/seaweedfs/wiki/Tiered-Storage -[CloudTier]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Tier -[FilerDataEncryption]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Data-Encryption -[FilerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Stores -[VolumeServerTTL]: https://github.com/seaweedfs/seaweedfs/wiki/Store-file-with-a-Time-To-Live +[Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files +[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files +[Mount]: https://github.com/chrislusf/seaweedfs/wiki/FUSE-Mount +[AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API +[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Async-Replication-to-Cloud +[Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System +[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV +[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[TieredStorage]: https://github.com/chrislusf/seaweedfs/wiki/Tiered-Storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier +[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption +[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores +[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live [SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver -[ActiveActiveAsyncReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization -[FilerStoreReplication]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication -[KeyLargeValueStore]: https://github.com/seaweedfs/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store -[CloudDrive]: https://github.com/seaweedfs/seaweedfs/wiki/Cloud-Drive-Architecture -[GatewayToRemoteObjectStore]: https://github.com/seaweedfs/seaweedfs/wiki/Gateway-to-Remote-Object-Storage +[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization +[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication +[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store +[CloudDrive]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Drive-Architecture +[GatewayToRemoteObjectStore]: https://github.com/chrislusf/seaweedfs/wiki/Gateway-to-Remote-Object-Storage [Back to TOC](#table-of-contents) @@ -308,7 +300,7 @@ The replication parameter options are: More details about replication can be found [on the wiki][Replication]. -[Replication]: https://github.com/seaweedfs/seaweedfs/wiki/Replication +[Replication]: https://github.com/chrislusf/seaweedfs/wiki/Replication You can also set the default replication strategy when starting the master server. @@ -333,10 +325,10 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass * [Chunking large files][feat-3] * [Collection as a Simple Name Space][feat-4] -[feat-1]: https://github.com/seaweedfs/seaweedfs/wiki/Failover-Master-Server -[feat-2]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#insert-with-your-own-keys -[feat-3]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#upload-large-files -[feat-4]: https://github.com/seaweedfs/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space +[feat-1]: https://github.com/chrislusf/seaweedfs/wiki/Failover-Master-Server +[feat-2]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#insert-with-your-own-keys +[feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files +[feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space [Back to TOC](#table-of-contents) @@ -378,7 +370,7 @@ Each individual file size is limited to the volume size. ### Saving memory ### -All file meta information stored on a volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. ### Tiered Storage to the cloud ### @@ -450,7 +442,7 @@ MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode ### Compared to Ceph ### -Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/seaweedfs/seaweedfs/issues/120) +Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. @@ -510,7 +502,7 @@ make sure to define your $GOPATH Step 2: checkout this repo: ```bash -git clone https://github.com/seaweedfs/seaweedfs.git +git clone https://github.com/chrislusf/seaweedfs.git ``` Step 3: download, compile, and install the project by executing the following command @@ -586,78 +578,6 @@ Percentage of the requests served within a certain time (ms) 100% 54.1 ms ``` -### Run WARP and launch a mixed benchmark. ### - -``` -make benchmark -warp: Benchmark data written to "warp-mixed-2023-10-16[102354]-l70a.csv.zst" -Mixed operations. -Operation: DELETE, 10%, Concurrency: 20, Ran 4m59s. - * Throughput: 6.19 obj/s - -Operation: GET, 45%, Concurrency: 20, Ran 5m0s. - * Throughput: 279.85 MiB/s, 27.99 obj/s - -Operation: PUT, 15%, Concurrency: 20, Ran 5m0s. - * Throughput: 89.86 MiB/s, 8.99 obj/s - -Operation: STAT, 30%, Concurrency: 20, Ran 5m0s. - * Throughput: 18.63 obj/s - -Cluster Total: 369.74 MiB/s, 61.79 obj/s, 0 errors over 5m0s. -``` - -To see segmented request statistics, use the --analyze.v parameter. -``` -warp analyze --analyze.v warp-mixed-2023-10-16[102354]-l70a.csv.zst -18642 operations loaded... Done! -Mixed operations. ----------------------------------------- -Operation: DELETE - total: 1854, 10.0%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05 - * Throughput: 6.19 obj/s - -Requests considered: 1855: - * Avg: 104ms, 50%: 30ms, 90%: 207ms, 99%: 1.355s, Fastest: 1ms, Slowest: 4.613s, StdDev: 320ms - ----------------------------------------- -Operation: GET - total: 8388, 45.3%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.12 +0500 +05 - * Throughput: 279.77 MiB/s, 27.98 obj/s - -Requests considered: 8389: - * Avg: 221ms, 50%: 106ms, 90%: 492ms, 99%: 1.739s, Fastest: 8ms, Slowest: 8.633s, StdDev: 383ms - * TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 171ms, 99th: 669ms, Worst: 4.783s StdDev: 163ms - * First Access: Avg: 240ms, 50%: 105ms, 90%: 511ms, 99%: 2.08s, Fastest: 12ms, Slowest: 8.633s, StdDev: 480ms - * First Access TTFB: Avg: 88ms, Best: 2ms, 25th: 24ms, Median: 38ms, 75th: 64ms, 90th: 179ms, 99th: 919ms, Worst: 4.783s StdDev: 199ms - * Last Access: Avg: 219ms, 50%: 106ms, 90%: 463ms, 99%: 1.782s, Fastest: 9ms, Slowest: 8.633s, StdDev: 416ms - * Last Access TTFB: Avg: 81ms, Best: 2ms, 25th: 24ms, Median: 39ms, 75th: 65ms, 90th: 161ms, 99th: 657ms, Worst: 4.783s StdDev: 176ms - ----------------------------------------- -Operation: PUT - total: 2688, 14.5%, Size: 10485760 bytes. Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.115 +0500 +05 - * Throughput: 89.83 MiB/s, 8.98 obj/s - -Requests considered: 2689: - * Avg: 1.165s, 50%: 878ms, 90%: 2.015s, 99%: 5.74s, Fastest: 99ms, Slowest: 8.264s, StdDev: 968ms - ----------------------------------------- -Operation: STAT - total: 5586, 30.2%, Concurrency: 20, Ran 5m0s, starting 2023-10-16 10:23:57.113 +0500 +05 - * Throughput: 18.63 obj/s - -Requests considered: 5587: - * Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 80ms, Fastest: 0s, Slowest: 245ms, StdDev: 17ms - * First Access: Avg: 14ms, 50%: 10ms, 90%: 33ms, 99%: 69ms, Fastest: 0s, Slowest: 203ms, StdDev: 16ms - * Last Access: Avg: 15ms, 50%: 11ms, 90%: 34ms, 99%: 74ms, Fastest: 0s, Slowest: 203ms, StdDev: 17ms - -Cluster Total: 369.64 MiB/s, 61.77 obj/s, 0 errors over 5m0s. -Total Errors:0. -``` - -[Back to TOC](#table-of-contents) - -## Enterprise ## - -For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition, -which has a self-healing storage format with better data protection. - [Back to TOC](#table-of-contents) ## License ## @@ -681,3 +601,4 @@ The text of this page is available for modification and reuse under the terms of ## Stargazers over time [![Stargazers over time](https://starchart.cc/chrislusf/seaweedfs.svg)](https://starchart.cc/chrislusf/seaweedfs) + diff --git a/SQL_FEATURE_PLAN.md b/SQL_FEATURE_PLAN.md deleted file mode 100644 index 28a6d2c24..000000000 --- a/SQL_FEATURE_PLAN.md +++ /dev/null @@ -1,145 +0,0 @@ -# SQL Query Engine Feature, Dev, and Test Plan - -This document outlines the plan for adding SQL querying support to SeaweedFS, focusing on reading and analyzing data from Message Queue (MQ) topics. - -## Feature Plan - -**1. Goal** - -To provide a SQL querying interface for SeaweedFS, enabling analytics on existing MQ topics. This enables: -- Basic querying with SELECT, WHERE, aggregations on MQ topics -- Schema discovery and metadata operations (SHOW DATABASES, SHOW TABLES, DESCRIBE) -- In-place analytics on Parquet-stored messages without data movement - -**2. Key Features** - -* **Schema Discovery and Metadata:** - * `SHOW DATABASES` - List all MQ namespaces - * `SHOW TABLES` - List all topics in a namespace - * `DESCRIBE table_name` - Show topic schema details - * Automatic schema detection from existing Parquet data -* **Basic Query Engine:** - * `SELECT` support with `WHERE`, `LIMIT`, `OFFSET` - * Aggregation functions: `COUNT()`, `SUM()`, `AVG()`, `MIN()`, `MAX()` - * Temporal queries with timestamp-based filtering -* **User Interfaces:** - * New CLI command `weed sql` with interactive shell mode - * Optional: Web UI for query execution and result visualization -* **Output Formats:** - * JSON (default), CSV, Parquet for result sets - * Streaming results for large queries - * Pagination support for result navigation - -## Development Plan - - - -**3. Data Source Integration** - -* **MQ Topic Connector (Primary):** - * Build on existing `weed/mq/logstore/read_parquet_to_log.go` - * Implement efficient Parquet scanning with predicate pushdown - * Support schema evolution and backward compatibility - * Handle partition-based parallelism for scalable queries -* **Schema Registry Integration:** - * Extend `weed/mq/schema/schema.go` for SQL metadata operations - * Read existing topic schemas for query planning - * Handle schema evolution during query execution - -**4. API & CLI Integration** - -* **CLI Command:** - * New `weed sql` command with interactive shell mode (similar to `weed shell`) - * Support for script execution and result formatting - * Connection management for remote SeaweedFS clusters -* **gRPC API:** - * Add SQL service to existing MQ broker gRPC interface - * Enable efficient query execution with streaming results - -## Example Usage Scenarios - -**Scenario 1: Schema Discovery and Metadata** -```sql --- List all namespaces (databases) -SHOW DATABASES; - --- List topics in a namespace -USE my_namespace; -SHOW TABLES; - --- View topic structure and discovered schema -DESCRIBE user_events; -``` - -**Scenario 2: Data Querying** -```sql --- Basic filtering and projection -SELECT user_id, event_type, timestamp -FROM user_events -WHERE timestamp > 1640995200000 -LIMIT 100; - --- Aggregation queries -SELECT COUNT(*) as event_count -FROM user_events -WHERE timestamp >= 1640995200000; - --- More aggregation examples -SELECT MAX(timestamp), MIN(timestamp) -FROM user_events; -``` - -**Scenario 3: Analytics & Monitoring** -```sql --- Basic analytics -SELECT COUNT(*) as total_events -FROM user_events -WHERE timestamp >= 1640995200000; - --- Simple monitoring -SELECT AVG(response_time) as avg_response -FROM api_logs -WHERE timestamp >= 1640995200000; - -## Architecture Overview - -``` -SQL Query Flow: - 1. Parse SQL 2. Plan & Optimize 3. Execute Query -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Client โ”‚ โ”‚ SQL Parser โ”‚ โ”‚ Query Planner โ”‚ โ”‚ Execution โ”‚ -โ”‚ (CLI) โ”‚โ”€โ”€โ†’ โ”‚ PostgreSQL โ”‚โ”€โ”€โ†’ โ”‚ & Optimizer โ”‚โ”€โ”€โ†’ โ”‚ Engine โ”‚ -โ”‚ โ”‚ โ”‚ (Custom) โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ - โ”‚ Schema Lookup โ”‚ Data Access - โ–ผ โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ Schema Catalog โ”‚ - โ”‚ โ€ข Namespace โ†’ Database mapping โ”‚ - โ”‚ โ€ข Topic โ†’ Table mapping โ”‚ - โ”‚ โ€ข Schema version management โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ–ฒ - โ”‚ Metadata - โ”‚ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ MQ Storage Layer โ”‚ -โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ–ฒ โ”‚ -โ”‚ โ”‚ Topic A โ”‚ โ”‚ Topic B โ”‚ โ”‚ Topic C โ”‚ โ”‚ ... โ”‚ โ”‚ โ”‚ -โ”‚ โ”‚ (Parquet) โ”‚ โ”‚ (Parquet) โ”‚ โ”‚ (Parquet) โ”‚ โ”‚ (Parquet) โ”‚ โ”‚ โ”‚ -โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”‚โ”€โ”€โ”˜ - โ”‚ - Data Access -``` - - -## Success Metrics - -* **Feature Completeness:** Support for all specified SELECT operations and metadata commands -* **Performance:** - * **Simple SELECT queries**: < 100ms latency for single-table queries with up to 3 WHERE predicates on โ‰ค 100K records - * **Complex queries**: < 1s latency for queries involving aggregations (COUNT, SUM, MAX, MIN) on โ‰ค 1M records - * **Time-range queries**: < 500ms for timestamp-based filtering on โ‰ค 500K records within 24-hour windows -* **Scalability:** Handle topics with millions of messages efficiently diff --git a/SSE-C_IMPLEMENTATION.md b/SSE-C_IMPLEMENTATION.md deleted file mode 100644 index 55da0aa70..000000000 --- a/SSE-C_IMPLEMENTATION.md +++ /dev/null @@ -1,169 +0,0 @@ -# Server-Side Encryption with Customer-Provided Keys (SSE-C) Implementation - -This document describes the implementation of SSE-C support in SeaweedFS, addressing the feature request from [GitHub Discussion #5361](https://github.com/seaweedfs/seaweedfs/discussions/5361). - -## Overview - -SSE-C allows clients to provide their own encryption keys for server-side encryption of objects stored in SeaweedFS. The server encrypts the data using the customer-provided AES-256 key but does not store the key itself - only an MD5 hash of the key for validation purposes. - -## Implementation Details - -### Architecture - -The SSE-C implementation follows a transparent encryption/decryption pattern: - -1. **Upload (PUT/POST)**: Data is encrypted with the customer key before being stored -2. **Download (GET/HEAD)**: Encrypted data is decrypted on-the-fly using the customer key -3. **Metadata Storage**: Only the encryption algorithm and key MD5 are stored as metadata - -### Key Components - -#### 1. Constants and Headers (`weed/s3api/s3_constants/header.go`) -- Added AWS-compatible SSE-C header constants -- Support for both regular and copy-source SSE-C headers - -#### 2. Core SSE-C Logic (`weed/s3api/s3_sse_c.go`) -- **SSECustomerKey**: Structure to hold customer encryption key and metadata -- **SSECEncryptedReader**: Streaming encryption with AES-256-CTR mode -- **SSECDecryptedReader**: Streaming decryption with IV extraction -- **validateAndParseSSECHeaders**: Shared validation logic (DRY principle) -- **ParseSSECHeaders**: Parse regular SSE-C headers -- **ParseSSECCopySourceHeaders**: Parse copy-source SSE-C headers -- Header validation and parsing functions -- Metadata extraction and response handling - -#### 3. Error Handling (`weed/s3api/s3err/s3api_errors.go`) -- New error codes for SSE-C validation failures -- AWS-compatible error messages and HTTP status codes - -#### 4. S3 API Integration -- **PUT Object Handler**: Encrypts data streams transparently -- **GET Object Handler**: Decrypts data streams transparently -- **HEAD Object Handler**: Validates keys and returns appropriate headers -- **Metadata Storage**: Integrates with existing `SaveAmzMetaData` function - -### Encryption Scheme - -- **Algorithm**: AES-256-CTR (Counter mode) -- **Key Size**: 256 bits (32 bytes) -- **IV Generation**: Random 16-byte IV per object -- **Storage Format**: `[IV][EncryptedData]` where IV is prepended to encrypted content - -### Metadata Storage - -SSE-C metadata is stored in the filer's extended attributes: -``` -x-amz-server-side-encryption-customer-algorithm: "AES256" -x-amz-server-side-encryption-customer-key-md5: "" -``` - -## API Compatibility - -### Required Headers for Encryption (PUT/POST) -``` -x-amz-server-side-encryption-customer-algorithm: AES256 -x-amz-server-side-encryption-customer-key: -x-amz-server-side-encryption-customer-key-md5: -``` - -### Required Headers for Decryption (GET/HEAD) -Same headers as encryption - the server validates the key MD5 matches. - -### Copy Operations -Support for copy-source SSE-C headers: -``` -x-amz-copy-source-server-side-encryption-customer-algorithm -x-amz-copy-source-server-side-encryption-customer-key -x-amz-copy-source-server-side-encryption-customer-key-md5 -``` - -## Error Handling - -The implementation provides AWS-compatible error responses: - -- **InvalidEncryptionAlgorithmError**: Non-AES256 algorithm specified -- **InvalidArgument**: Invalid key format, size, or MD5 mismatch -- **Missing customer key**: Object encrypted but no key provided -- **Unnecessary customer key**: Object not encrypted but key provided - -## Security Considerations - -1. **Key Management**: Customer keys are never stored - only MD5 hashes for validation -2. **IV Randomness**: Fresh random IV generated for each object -3. **Transparent Security**: Volume servers never see unencrypted data -4. **Key Validation**: Strict validation of key format, size, and MD5 - -## Testing - -Comprehensive test suite covers: -- Header validation and parsing (regular and copy-source) -- Encryption/decryption round-trip -- Error condition handling -- Metadata extraction -- Code reuse validation (DRY principle) -- AWS S3 compatibility - -Run tests with: -```bash -go test -v ./weed/s3api - -## Usage Example - -### Upload with SSE-C -```bash -# Generate a 256-bit key -KEY=$(openssl rand -base64 32) -KEY_MD5=$(echo -n "$KEY" | base64 -d | openssl dgst -md5 -binary | base64) - -# Upload object with SSE-C -curl -X PUT "http://localhost:8333/bucket/object" \ - -H "x-amz-server-side-encryption-customer-algorithm: AES256" \ - -H "x-amz-server-side-encryption-customer-key: $KEY" \ - -H "x-amz-server-side-encryption-customer-key-md5: $KEY_MD5" \ - --data-binary @file.txt -``` - -### Download with SSE-C -```bash -# Download object with SSE-C (same key required) -curl "http://localhost:8333/bucket/object" \ - -H "x-amz-server-side-encryption-customer-algorithm: AES256" \ - -H "x-amz-server-side-encryption-customer-key: $KEY" \ - -H "x-amz-server-side-encryption-customer-key-md5: $KEY_MD5" -``` - -## Integration Points - -### Existing SeaweedFS Features -- **Filer Metadata**: Extends existing metadata storage -- **Volume Servers**: No changes required - store encrypted data transparently -- **S3 API**: Integrates seamlessly with existing handlers -- **Versioning**: Compatible with object versioning -- **Multipart Upload**: Ready for multipart upload integration - -### Future Enhancements -- **SSE-S3**: Server-managed encryption keys -- **SSE-KMS**: External key management service integration -- **Performance Optimization**: Hardware acceleration for encryption -- **Compliance**: Enhanced audit logging for encrypted objects - -## File Changes Summary - -1. **`weed/s3api/s3_constants/header.go`** - Added SSE-C header constants -2. **`weed/s3api/s3_sse_c.go`** - Core SSE-C implementation (NEW) -3. **`weed/s3api/s3_sse_c_test.go`** - Comprehensive test suite (NEW) -4. **`weed/s3api/s3err/s3api_errors.go`** - Added SSE-C error codes -5. **`weed/s3api/s3api_object_handlers.go`** - GET/HEAD with SSE-C support -6. **`weed/s3api/s3api_object_handlers_put.go`** - PUT with SSE-C support -7. **`weed/server/filer_server_handlers_write_autochunk.go`** - Metadata storage - -## Compliance - -This implementation follows the [AWS S3 SSE-C specification](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) for maximum compatibility with existing S3 clients and tools. - -## Performance Impact - -- **Encryption Overhead**: Minimal CPU impact with efficient AES-CTR streaming -- **Memory Usage**: Constant memory usage via streaming encryption/decryption -- **Storage Overhead**: 16 bytes per object for IV storage -- **Network**: No additional network overhead diff --git a/backers.md b/backers.md index a1f3aa3db..cfd892a39 100644 --- a/backers.md +++ b/backers.md @@ -7,8 +7,6 @@ - [Evercam Camera Management Software](https://evercam.io/) - [Spherical Elephant GmbH](https://www.sphericalelephant.com) -- [WizardTales GmbH](https://www.wizardtales.com) -- [Nimbus Web Services](https://nimbusws.com) -

Backers

diff --git a/docker/Dockerfile.e2e b/docker/Dockerfile.e2e deleted file mode 100644 index 3ac60cb11..000000000 --- a/docker/Dockerfile.e2e +++ /dev/null @@ -1,41 +0,0 @@ -FROM ubuntu:22.04 - -LABEL author="Chris Lu" - -# Use faster mirrors and optimize package installation -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - --no-install-recommends \ - --no-install-suggests \ - curl \ - fio \ - fuse \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && rm -rf /tmp/* \ - && rm -rf /var/tmp/* -RUN mkdir -p /etc/seaweedfs /data/filerldb2 - -COPY ./weed /usr/bin/ -COPY ./filer.toml /etc/seaweedfs/filer.toml -COPY ./entrypoint.sh /entrypoint.sh - -# volume server grpc port -EXPOSE 18080 -# volume server http port -EXPOSE 8080 -# filer server grpc port -EXPOSE 18888 -# filer server http port -EXPOSE 8888 -# master server shared grpc port -EXPOSE 19333 -# master server shared http port -EXPOSE 9333 - -VOLUME /data -WORKDIR /data - -RUN chmod +x /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.gccgo_build b/docker/Dockerfile.gccgo_build new file mode 100644 index 000000000..90cdf352f --- /dev/null +++ b/docker/Dockerfile.gccgo_build @@ -0,0 +1,44 @@ +FROM gcc:11 as builder +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +ARG BRANCH=${BRANCH:-master} +RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ + && apt-get update \ + && apt-get install -y golang-src \ + && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ + && CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" -compiler=gccgo -tags gccgo,noasm + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh +RUN apk add fuse # for weed mount + +# volume server gprc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server gprc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared gprc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 +# webdav server http port +EXPOSE 7333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data +WORKDIR /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index a52e74143..c917ec556 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,20 +1,20 @@ -FROM golang:1.24-alpine as builder +FROM golang:1.18-alpine as builder RUN apk add git g++ fuse -RUN mkdir -p /go/src/github.com/seaweedfs/ -RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs ARG BRANCH=${BRANCH:-master} ARG TAGS -RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH -RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \ - && export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \ +RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ + && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ && CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}" FROM alpine AS final LABEL author="Chris Lu" COPY --from=builder /go/bin/weed /usr/bin/ RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN apk add fuse # for weed mount # volume server gprc port diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local index 269a993b4..947edffda 100644 --- a/docker/Dockerfile.local +++ b/docker/Dockerfile.local @@ -1,13 +1,10 @@ FROM alpine AS final LABEL author="Chris Lu" COPY ./weed /usr/bin/ -COPY ./weed_pub* /usr/bin/ -COPY ./weed_sub* /usr/bin/ RUN mkdir -p /etc/seaweedfs COPY ./filer.toml /etc/seaweedfs/filer.toml COPY ./entrypoint.sh /entrypoint.sh RUN apk add fuse # for weed mount -RUN apk add curl # for health checks # volume server grpc port EXPOSE 18080 diff --git a/docker/Dockerfile.rocksdb_dev_env b/docker/Dockerfile.rocksdb_dev_env deleted file mode 100644 index e4fe0acaf..000000000 --- a/docker/Dockerfile.rocksdb_dev_env +++ /dev/null @@ -1,17 +0,0 @@ -FROM golang:1.24 AS builder - -RUN apt-get update -RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev - -ARG ROCKSDB_VERSION=v10.5.1 -ENV ROCKSDB_VERSION=${ROCKSDB_VERSION} - -# build RocksDB -RUN cd /tmp && \ - git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \ - cd rocksdb && \ - PORTABLE=1 make -j"$(nproc)" static_lib && \ - make install-static - -ENV CGO_CFLAGS="-I/tmp/rocksdb/include" -ENV CGO_LDFLAGS="-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" diff --git a/docker/Dockerfile.rocksdb_large b/docker/Dockerfile.rocksdb_large index 2c3516fb0..0025eb116 100644 --- a/docker/Dockerfile.rocksdb_large +++ b/docker/Dockerfile.rocksdb_large @@ -1,28 +1,27 @@ -FROM golang:1.24 AS builder +FROM golang:1.18-buster as builder RUN apt-get update RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev -ARG ROCKSDB_VERSION=v10.5.1 -ENV ROCKSDB_VERSION=${ROCKSDB_VERSION} +ENV ROCKSDB_VERSION v7.2.2 # build RocksDB RUN cd /tmp && \ git clone https://github.com/facebook/rocksdb.git /tmp/rocksdb --depth 1 --single-branch --branch $ROCKSDB_VERSION && \ cd rocksdb && \ - PORTABLE=1 make -j"$(nproc)" static_lib && \ + PORTABLE=1 make static_lib && \ make install-static -ENV CGO_CFLAGS="-I/tmp/rocksdb/include" -ENV CGO_LDFLAGS="-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" +ENV CGO_CFLAGS "-I/tmp/rocksdb/include" +ENV CGO_LDFLAGS "-L/tmp/rocksdb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" # build SeaweedFS -RUN mkdir -p /go/src/github.com/seaweedfs/ -RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedfs/seaweedfs -ARG BRANCH=master -RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH -RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \ - && export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +ARG BRANCH=${BRANCH:-master} +RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ + && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ && go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}" @@ -30,8 +29,8 @@ FROM alpine AS final LABEL author="Chris Lu" COPY --from=builder /go/bin/weed /usr/bin/ RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN apk add fuse snappy gflags # volume server gprc port diff --git a/docker/Dockerfile.rocksdb_large_local b/docker/Dockerfile.rocksdb_large_local deleted file mode 100644 index b3b08dd0c..000000000 --- a/docker/Dockerfile.rocksdb_large_local +++ /dev/null @@ -1,45 +0,0 @@ -FROM chrislusf/rocksdb_dev_env as builder - -# build SeaweedFS -RUN mkdir -p /go/src/github.com/seaweedfs/ -ADD . /go/src/github.com/seaweedfs/seaweedfs -RUN ls -al /go/src/github.com/seaweedfs/ && \ - cd /go/src/github.com/seaweedfs/seaweedfs/weed \ - && export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \ - && go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}" - - -FROM alpine AS final -LABEL author="Chris Lu" -COPY --from=builder /go/bin/weed /usr/bin/ -RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/filer_rocksdb.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/seaweedfs/seaweedfs/docker/entrypoint.sh /entrypoint.sh -RUN apk add fuse snappy gflags tmux - -# volume server gprc port -EXPOSE 18080 -# volume server http port -EXPOSE 8080 -# filer server gprc port -EXPOSE 18888 -# filer server http port -EXPOSE 8888 -# master server shared gprc port -EXPOSE 19333 -# master server shared http port -EXPOSE 9333 -# s3 server http port -EXPOSE 8333 -# webdav server http port -EXPOSE 7333 - -RUN mkdir -p /data/filer_rocksdb - -VOLUME /data - -WORKDIR /data - -RUN chmod +x /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.s3tests b/docker/Dockerfile.s3tests index 75c7f5f0a..e55ebadf9 100644 --- a/docker/Dockerfile.s3tests +++ b/docker/Dockerfile.s3tests @@ -25,7 +25,7 @@ ENV \ NOSETESTS_EXCLUDE="" \ NOSETESTS_ATTR="" \ NOSETESTS_OPTIONS="" \ - S3TEST_CONF="/s3tests.conf" + S3TEST_CONF="/s3test.conf" ENTRYPOINT ["/bin/bash", "-c"] -CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"] +CMD ["sleep 30 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"] \ No newline at end of file diff --git a/docker/Dockerfile.tarantool.dev_env b/docker/Dockerfile.tarantool.dev_env deleted file mode 100644 index 4ce0fc9af..000000000 --- a/docker/Dockerfile.tarantool.dev_env +++ /dev/null @@ -1,17 +0,0 @@ -FROM tarantool/tarantool:3.3.1 AS builder - -# install dependencies -RUN apt update && \ - apt install -y git unzip cmake tt=2.7.0 - -# init tt dir structure, create dir for app, create symlink -RUN tt init && \ - mkdir app && \ - ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app - -# copy cluster configs -COPY tarantool /opt/tarantool/app - -# build app -RUN tt build app - diff --git a/docker/Makefile b/docker/Makefile index f9a23b646..3afea17c1 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -4,135 +4,93 @@ all: gen gen: dev -cgo ?= 0 binary: export SWCOMMIT=$(shell git rev-parse --short HEAD) - export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)" - cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed - cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/ - cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/ - -binary_race: options = -race -binary_race: cgo = 1 -binary_race: binary + export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)" + cd ../weed; CGO_ENABLED=0 GOOS=linux go build -tags "$(tags)" -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/ build: binary docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . + rm ./weed -build_e2e: binary_race - docker buildx build \ - --cache-from=type=local,src=/tmp/.buildx-cache \ - --cache-to=type=local,dest=/tmp/.buildx-cache-new,mode=max \ - --load \ - -t chrislusf/seaweedfs:e2e \ - -f Dockerfile.e2e . - # Move cache to avoid growing cache size - rm -rf /tmp/.buildx-cache || true - mv /tmp/.buildx-cache-new /tmp/.buildx-cache || true - -go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool +go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build . go_build_large_disk: docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build . -build_rocksdb_dev_env: - docker build --no-cache -t chrislusf/rocksdb_dev_env -f Dockerfile.rocksdb_dev_env . - -build_rocksdb_local: build_rocksdb_dev_env - cd .. ; docker build --no-cache -t chrislusf/seaweedfs:rocksdb_local -f docker/Dockerfile.rocksdb_large_local . - build_rocksdb: docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large . -build_tarantool_dev_env: - docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env . - s3tests_build: docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests . dev: build - docker compose -f compose/local-dev-compose.yml -p seaweedfs up - -dev_race: binary_race - docker compose -f compose/local-dev-compose.yml -p seaweedfs up + docker-compose -f compose/local-dev-compose.yml -p seaweedfs up dev_tls: build certstrap - ENV_FILE="tls.env" docker compose -f compose/local-dev-compose.yml -p seaweedfs up + ENV_FILE="tls.env" docker-compose -f compose/local-dev-compose.yml -p seaweedfs up dev_mount: build - docker compose -f compose/local-mount-compose.yml -p seaweedfs up + docker-compose -f compose/local-mount-compose.yml -p seaweedfs up run_image: build docker run --rm -ti --device /dev/fuse --cap-add SYS_ADMIN --entrypoint /bin/sh chrislusf/seaweedfs:local profile_mount: build - docker compose -f compose/local-mount-profile-compose.yml -p seaweedfs up + docker-compose -f compose/local-mount-profile-compose.yml -p seaweedfs up k8s: build - docker compose -f compose/local-k8s-compose.yml -p seaweedfs up + docker-compose -f compose/local-k8s-compose.yml -p seaweedfs up dev_registry: build - docker compose -f compose/local-registry-compose.yml -p seaweedfs up + docker-compose -f compose/local-registry-compose.yml -p seaweedfs up -dev_replicate: - docker build --build-arg TAGS=gocdk --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build . - docker compose -f compose/local-replicate-compose.yml -p seaweedfs up +dev_replicate: build + docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up dev_auditlog: build - docker compose -f compose/local-auditlog-compose.yml -p seaweedfs up + docker-compose -f compose/local-auditlog-compose.yml -p seaweedfs up dev_nextcloud: build - docker compose -f compose/local-nextcloud-compose.yml -p seaweedfs up + docker-compose -f compose/local-nextcloud-compose.yml -p seaweedfs up cluster: build - docker compose -f compose/local-cluster-compose.yml -p seaweedfs up + docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up 2clusters: build - docker compose -f compose/local-clusters-compose.yml -p seaweedfs up + docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up 2mount: build - docker compose -f compose/local-sync-mount-compose.yml -p seaweedfs up - -filer_backup: build - docker compose -f compose/local-filer-backup-compose.yml -p seaweedfs up + docker-compose -f compose/local-sync-mount-compose.yml -p seaweedfs up hashicorp_raft: build - docker compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up + docker-compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up s3tests: build s3tests_build - docker compose -f compose/local-s3tests-compose.yml -p seaweedfs up - -brokers: build - docker compose -f compose/local-brokers-compose.yml -p seaweedfs up - -agent: build - docker compose -f compose/local-mq-test.yml -p seaweedfs up + docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up filer_etcd: build docker stack deploy -c compose/swarm-etcd.yml fs test_etcd: build - docker compose -f compose/test-etcd-filer.yml -p seaweedfs up + docker-compose -f compose/test-etcd-filer.yml -p seaweedfs up test_ydb: tags = ydb test_ydb: build - docker compose -f compose/test-ydb-filer.yml -p seaweedfs up - -test_tarantool: tags = tarantool -test_tarantool: build_tarantool_dev_env build - docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up + export + docker-compose -f compose/test-ydb-filer.yml -p seaweedfs up clean: rm ./weed certstrap: - go install -v github.com/square/certstrap@latest - certstrap --depot-path compose/tls init --curve P-256 --passphrase "" --common-name "SeaweedFS CA" || true - certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --domain localhost --common-name volume01.dev || true - certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name master01.dev || true - certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name filer01.dev || true - certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name client01.dev || true + go get github.com/square/certstrap + certstrap --depot-path compose/tls init --passphrase "" --common-name "SeaweedFS CA" || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name volume01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name master01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name filer01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name client01.dev || true certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true diff --git a/docker/README.md b/docker/README.md index bec6c4bf9..288d87158 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,23 +1,13 @@ # Docker -## Compose V2 -SeaweedFS now uses the `v2` syntax `docker compose` - -If you rely on using Docker Compose as docker-compose (with a hyphen), you can set up Compose V2 to act as a drop-in replacement of the previous docker-compose. Refer to the [Installing Compose](https://docs.docker.com/compose/install/) section for detailed instructions on upgrading. - -Confirm your system has docker compose v2 with a version check -```bash -$ docker compose version -Docker Compose version v2.10.2 -``` ## Try it out ```bash -wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-compose.yml +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-compose.yml -docker compose -f seaweedfs-compose.yml -p seaweedfs up +docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` @@ -25,16 +15,16 @@ docker compose -f seaweedfs-compose.yml -p seaweedfs up ```bash -wget https://raw.githubusercontent.com/seaweedfs/seaweedfs/master/docker/seaweedfs-dev-compose.yml +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml -docker compose -f seaweedfs-dev-compose.yml -p seaweedfs up +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up ``` ## Local Development ```bash -cd $GOPATH/src/github.com/seaweedfs/seaweedfs/docker +cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker make ``` @@ -54,8 +44,8 @@ docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,l docker buildx stop $BUILDER ``` -## Minio debugging +## Minio debuging ``` mc config host add local http://127.0.0.1:9000 some_access_key1 some_secret_key1 mc admin trace --all --verbose local -``` +``` \ No newline at end of file diff --git a/docker/admin_integration/Dockerfile.local b/docker/admin_integration/Dockerfile.local deleted file mode 100644 index 9795b6ea3..000000000 --- a/docker/admin_integration/Dockerfile.local +++ /dev/null @@ -1,18 +0,0 @@ -FROM alpine:latest - -# Install required packages -RUN apk add --no-cache \ - ca-certificates \ - fuse \ - curl \ - jq - -# Copy our locally built binary -COPY weed-local /usr/bin/weed -RUN chmod +x /usr/bin/weed - -# Create working directory -WORKDIR /data - -# Default command -ENTRYPOINT ["/usr/bin/weed"] \ No newline at end of file diff --git a/docker/admin_integration/EC-TESTING-README.md b/docker/admin_integration/EC-TESTING-README.md deleted file mode 100644 index 57e0a5985..000000000 --- a/docker/admin_integration/EC-TESTING-README.md +++ /dev/null @@ -1,438 +0,0 @@ -# SeaweedFS EC Worker Testing Environment - -This Docker Compose setup provides a comprehensive testing environment for SeaweedFS Erasure Coding (EC) workers using **official SeaweedFS commands**. - -## ๐Ÿ“‚ Directory Structure - -The testing environment is located in `docker/admin_integration/` and includes: - -``` -docker/admin_integration/ -โ”œโ”€โ”€ Makefile # Main management interface -โ”œโ”€โ”€ docker-compose-ec-test.yml # Docker compose configuration -โ”œโ”€โ”€ EC-TESTING-README.md # This documentation -โ””โ”€โ”€ run-ec-test.sh # Quick start script -``` - -## ๐Ÿ—๏ธ Architecture - -The testing environment uses **official SeaweedFS commands** and includes: - -- **1 Master Server** (port 9333) - Coordinates the cluster with 50MB volume size limit -- **6 Volume Servers** (ports 8080-8085) - Distributed across 2 data centers and 3 racks for diversity -- **1 Filer** (port 8888) - Provides file system interface -- **1 Admin Server** (port 23646) - Detects volumes needing EC and manages workers using official `admin` command -- **3 EC Workers** - Execute erasure coding tasks using official `worker` command with task-specific working directories -- **1 Load Generator** - Continuously writes and deletes files using SeaweedFS shell commands -- **1 Monitor** - Tracks cluster health and EC progress using shell scripts - -## โœจ New Features - -### **Task-Specific Working Directories** -Each worker now creates dedicated subdirectories for different task types: -- `/work/erasure_coding/` - For EC encoding tasks -- `/work/vacuum/` - For vacuum cleanup tasks -- `/work/balance/` - For volume balancing tasks - -This provides: -- **Organization**: Each task type gets isolated working space -- **Debugging**: Easy to find files/logs related to specific task types -- **Cleanup**: Can clean up task-specific artifacts easily -- **Concurrent Safety**: Different task types won't interfere with each other's files - -## ๐Ÿš€ Quick Start - -### Prerequisites - -- Docker and Docker Compose installed -- GNU Make installed -- At least 4GB RAM available for containers -- Ports 8080-8085, 8888, 9333, 23646 available - -### Start the Environment - -```bash -# Navigate to the admin integration directory -cd docker/admin_integration/ - -# Show available commands -make help - -# Start the complete testing environment -make start -``` - -The `make start` command will: -1. Start all services using official SeaweedFS images -2. Configure workers with task-specific working directories -3. Wait for services to be ready -4. Display monitoring URLs and run health checks - -### Alternative Commands - -```bash -# Quick start aliases -make up # Same as 'make start' - -# Development mode (higher load for faster testing) -make dev-start - -# Build images without starting -make build -``` - -## ๐Ÿ“‹ Available Make Targets - -Run `make help` to see all available targets: - -### **๐Ÿš€ Main Operations** -- `make start` - Start the complete EC testing environment -- `make stop` - Stop all services -- `make restart` - Restart all services -- `make clean` - Complete cleanup (containers, volumes, images) - -### **๐Ÿ“Š Monitoring & Status** -- `make health` - Check health of all services -- `make status` - Show status of all containers -- `make urls` - Display all monitoring URLs -- `make monitor` - Open monitor dashboard in browser -- `make monitor-status` - Show monitor status via API -- `make volume-status` - Show volume status from master -- `make admin-status` - Show admin server status -- `make cluster-status` - Show complete cluster status - -### **๐Ÿ“‹ Logs Management** -- `make logs` - Show logs from all services -- `make logs-admin` - Show admin server logs -- `make logs-workers` - Show all worker logs -- `make logs-worker1/2/3` - Show specific worker logs -- `make logs-load` - Show load generator logs -- `make logs-monitor` - Show monitor logs -- `make backup-logs` - Backup all logs to files - -### **โš–๏ธ Scaling & Testing** -- `make scale-workers WORKERS=5` - Scale workers to 5 instances -- `make scale-load RATE=25` - Increase load generation rate -- `make test-ec` - Run focused EC test scenario - -### **๐Ÿ”ง Development & Debug** -- `make shell-admin` - Open shell in admin container -- `make shell-worker1` - Open shell in worker container -- `make debug` - Show debug information -- `make troubleshoot` - Run troubleshooting checks - -## ๐Ÿ“Š Monitoring URLs - -| Service | URL | Description | -|---------|-----|-------------| -| Master UI | http://localhost:9333 | Cluster status and topology | -| Filer | http://localhost:8888 | File operations | -| Admin Server | http://localhost:23646/ | Task management | -| Monitor | http://localhost:9999/status | Complete cluster monitoring | -| Volume Servers | http://localhost:8080-8085/status | Individual volume server stats | - -Quick access: `make urls` or `make monitor` - -## ๐Ÿ”„ How EC Testing Works - -### 1. Continuous Load Generation -- **Write Rate**: 10 files/second (1-5MB each) -- **Delete Rate**: 2 files/second -- **Target**: Fill volumes to 50MB limit quickly - -### 2. Volume Detection -- Admin server scans master every 30 seconds -- Identifies volumes >40MB (80% of 50MB limit) -- Queues EC tasks for eligible volumes - -### 3. EC Worker Assignment -- **Worker 1**: EC specialist (max 2 concurrent tasks) -- **Worker 2**: EC + Vacuum hybrid (max 2 concurrent tasks) -- **Worker 3**: EC + Vacuum hybrid (max 1 concurrent task) - -### 4. Comprehensive EC Process -Each EC task follows 6 phases: -1. **Copy Volume Data** (5-15%) - Stream .dat/.idx files locally -2. **Mark Read-Only** (20-25%) - Ensure data consistency -3. **Local Encoding** (30-60%) - Create 14 shards (10+4 Reed-Solomon) -4. **Calculate Placement** (65-70%) - Smart rack-aware distribution -5. **Distribute Shards** (75-90%) - Upload to optimal servers -6. **Verify & Cleanup** (95-100%) - Validate and clean temporary files - -### 5. Real-Time Monitoring -- Volume analysis and EC candidate detection -- Worker health and task progress -- No data loss verification -- Performance metrics - -## ๐Ÿ“‹ Key Features Tested - -### โœ… EC Implementation Features -- [x] Local volume data copying with progress tracking -- [x] Local Reed-Solomon encoding (10+4 shards) -- [x] Intelligent shard placement with rack awareness -- [x] Load balancing across available servers -- [x] Backup server selection for redundancy -- [x] Detailed step-by-step progress tracking -- [x] Comprehensive error handling and recovery - -### โœ… Infrastructure Features -- [x] Multi-datacenter topology (dc1, dc2) -- [x] Rack diversity (rack1, rack2, rack3) -- [x] Volume size limits (50MB) -- [x] Worker capability matching -- [x] Health monitoring and alerting -- [x] Continuous workload simulation - -## ๐Ÿ› ๏ธ Common Usage Patterns - -### Basic Testing Workflow -```bash -# Start environment -make start - -# Watch progress -make monitor-status - -# Check for EC candidates -make volume-status - -# View worker activity -make logs-workers - -# Stop when done -make stop -``` - -### High-Load Testing -```bash -# Start with higher load -make dev-start - -# Scale up workers and load -make scale-workers WORKERS=5 -make scale-load RATE=50 - -# Monitor intensive EC activity -make logs-admin -``` - -### Debugging Issues -```bash -# Check port conflicts and system state -make troubleshoot - -# View specific service logs -make logs-admin -make logs-worker1 - -# Get shell access for debugging -make shell-admin -make shell-worker1 - -# Check detailed status -make debug -``` - -### Development Iteration -```bash -# Quick restart after code changes -make restart - -# Rebuild and restart -make clean -make start - -# Monitor specific components -make logs-monitor -``` - -## ๐Ÿ“ˆ Expected Results - -### Successful EC Testing Shows: -1. **Volume Growth**: Steady increase in volume sizes toward 50MB limit -2. **EC Detection**: Admin server identifies volumes >40MB for EC -3. **Task Assignment**: Workers receive and execute EC tasks -4. **Shard Distribution**: 14 shards distributed across 6 volume servers -5. **No Data Loss**: All files remain accessible during and after EC -6. **Performance**: EC tasks complete within estimated timeframes - -### Sample Monitor Output: -```bash -# Check current status -make monitor-status - -# Output example: -{ - "monitor": { - "uptime": "15m30s", - "master_addr": "master:9333", - "admin_addr": "admin:9900" - }, - "stats": { - "VolumeCount": 12, - "ECTasksDetected": 3, - "WorkersActive": 3 - } -} -``` - -## ๐Ÿ”ง Configuration - -### Environment Variables - -You can customize the environment by setting variables: - -```bash -# High load testing -WRITE_RATE=25 DELETE_RATE=5 make start - -# Extended test duration -TEST_DURATION=7200 make start # 2 hours -``` - -### Scaling Examples - -```bash -# Scale workers -make scale-workers WORKERS=6 - -# Increase load generation -make scale-load RATE=30 - -# Combined scaling -make scale-workers WORKERS=4 -make scale-load RATE=40 -``` - -## ๐Ÿงน Cleanup Options - -```bash -# Stop services only -make stop - -# Remove containers but keep volumes -make down - -# Remove data volumes only -make clean-volumes - -# Remove built images only -make clean-images - -# Complete cleanup (everything) -make clean -``` - -## ๐Ÿ› Troubleshooting - -### Quick Diagnostics -```bash -# Run complete troubleshooting -make troubleshoot - -# Check specific components -make health -make debug -make status -``` - -### Common Issues - -**Services not starting:** -```bash -# Check port availability -make troubleshoot - -# View startup logs -make logs-master -make logs-admin -``` - -**No EC tasks being created:** -```bash -# Check volume status -make volume-status - -# Increase load to fill volumes faster -make scale-load RATE=30 - -# Check admin detection -make logs-admin -``` - -**Workers not responding:** -```bash -# Check worker registration -make admin-status - -# View worker logs -make logs-workers - -# Restart workers -make restart -``` - -### Performance Tuning - -**For faster testing:** -```bash -make dev-start # Higher default load -make scale-load RATE=50 # Very high load -``` - -**For stress testing:** -```bash -make scale-workers WORKERS=8 -make scale-load RATE=100 -``` - -## ๐Ÿ“š Technical Details - -### Network Architecture -- Custom bridge network (172.20.0.0/16) -- Service discovery via container names -- Health checks for all services - -### Storage Layout -- Each volume server: max 100 volumes -- Data centers: dc1, dc2 -- Racks: rack1, rack2, rack3 -- Volume limit: 50MB per volume - -### EC Algorithm -- Reed-Solomon RS(10,4) -- 10 data shards + 4 parity shards -- Rack-aware distribution -- Backup server redundancy - -### Make Integration -- Color-coded output for better readability -- Comprehensive help system (`make help`) -- Parallel execution support -- Error handling and cleanup -- Cross-platform compatibility - -## ๐ŸŽฏ Quick Reference - -```bash -# Essential commands -make help # Show all available targets -make start # Start complete environment -make health # Check all services -make monitor # Open dashboard -make logs-admin # View admin activity -make clean # Complete cleanup - -# Monitoring -make volume-status # Check for EC candidates -make admin-status # Check task queue -make monitor-status # Full cluster status - -# Scaling & Testing -make test-ec # Run focused EC test -make scale-load RATE=X # Increase load -make troubleshoot # Diagnose issues -``` - -This environment provides a realistic testing scenario for SeaweedFS EC workers with actual data operations, comprehensive monitoring, and easy management through Make targets. \ No newline at end of file diff --git a/docker/admin_integration/Makefile b/docker/admin_integration/Makefile deleted file mode 100644 index 68fb0cec6..000000000 --- a/docker/admin_integration/Makefile +++ /dev/null @@ -1,346 +0,0 @@ -# SeaweedFS Admin Integration Test Makefile -# Tests the admin server and worker functionality using official weed commands - -.PHONY: help build build-and-restart restart-workers start stop restart logs clean status test admin-ui worker-logs master-logs admin-logs vacuum-test vacuum-demo vacuum-status vacuum-data vacuum-data-high vacuum-data-low vacuum-continuous vacuum-clean vacuum-help -.DEFAULT_GOAL := help - -COMPOSE_FILE := docker-compose-ec-test.yml -PROJECT_NAME := admin_integration - -build: ## Build SeaweedFS with latest changes and create Docker image - @echo "๐Ÿ”จ Building SeaweedFS with latest changes..." - @echo "1๏ธโƒฃ Generating admin templates..." - @cd ../../ && make admin-generate - @echo "2๏ธโƒฃ Building Docker image with latest changes..." - @cd ../ && make build - @echo "3๏ธโƒฃ Copying binary for local docker-compose..." - @cp ../weed ./weed-local - @echo "โœ… Build complete! Updated image: chrislusf/seaweedfs:local" - @echo "๐Ÿ’ก Run 'make restart' to apply changes to running services" - -build-and-restart: build ## Build with latest changes and restart services - @echo "๐Ÿ”„ Recreating services with new image..." - @echo "1๏ธโƒฃ Recreating admin server with new image..." - @docker-compose -f $(COMPOSE_FILE) up -d admin - @sleep 5 - @echo "2๏ธโƒฃ Recreating workers to reconnect..." - @docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3 - @echo "โœ… All services recreated with latest changes!" - @echo "๐ŸŒ Admin UI: http://localhost:23646/" - @echo "๐Ÿ’ก Workers will reconnect to the new admin server" - -restart-workers: ## Restart all workers to reconnect to admin server - @echo "๐Ÿ”„ Restarting workers to reconnect to admin server..." - @docker-compose -f $(COMPOSE_FILE) restart worker1 worker2 worker3 - @echo "โœ… Workers restarted and will reconnect to admin server" - -help: ## Show this help message - @echo "SeaweedFS Admin Integration Test" - @echo "================================" - @echo "Tests admin server task distribution to workers using official weed commands" - @echo "" - @echo "๐Ÿ—๏ธ Cluster Management:" - @grep -E '^(start|stop|restart|clean|status|build):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}' - @echo "" - @echo "๐Ÿงช Testing:" - @grep -E '^(test|demo|validate|quick-test):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}' - @echo "" - @echo "๐Ÿ—‘๏ธ Vacuum Testing:" - @grep -E '^vacuum-.*:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}' - @echo "" - @echo "๐Ÿ“œ Monitoring:" - @grep -E '^(logs|admin-logs|worker-logs|master-logs|admin-ui):.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-18s %s\n", $$1, $$2}' - @echo "" - @echo "๐Ÿš€ Quick Start:" - @echo " make start # Start cluster" - @echo " make vacuum-test # Test vacuum tasks" - @echo " make vacuum-help # Vacuum testing guide" - @echo "" - @echo "๐Ÿ’ก For detailed vacuum testing: make vacuum-help" - -start: ## Start the complete SeaweedFS cluster with admin and workers - @echo "๐Ÿš€ Starting SeaweedFS cluster with admin and workers..." - @docker-compose -f $(COMPOSE_FILE) up -d - @echo "โœ… Cluster started!" - @echo "" - @echo "๐Ÿ“Š Access points:" - @echo " โ€ข Admin UI: http://localhost:23646/" - @echo " โ€ข Master UI: http://localhost:9333/" - @echo " โ€ข Filer: http://localhost:8888/" - @echo "" - @echo "๐Ÿ“ˆ Services starting up..." - @echo " โ€ข Master server: โœ“" - @echo " โ€ข Volume servers: Starting (6 servers)..." - @echo " โ€ข Filer: Starting..." - @echo " โ€ข Admin server: Starting..." - @echo " โ€ข Workers: Starting (3 workers)..." - @echo "" - @echo "โณ Use 'make status' to check startup progress" - @echo "๐Ÿ’ก Use 'make logs' to watch the startup process" - -start-staged: ## Start services in proper order with delays - @echo "๐Ÿš€ Starting SeaweedFS cluster in stages..." - @echo "" - @echo "Stage 1: Starting Master server..." - @docker-compose -f $(COMPOSE_FILE) up -d master - @sleep 10 - @echo "" - @echo "Stage 2: Starting Volume servers..." - @docker-compose -f $(COMPOSE_FILE) up -d volume1 volume2 volume3 volume4 volume5 volume6 - @sleep 15 - @echo "" - @echo "Stage 3: Starting Filer..." - @docker-compose -f $(COMPOSE_FILE) up -d filer - @sleep 10 - @echo "" - @echo "Stage 4: Starting Admin server..." - @docker-compose -f $(COMPOSE_FILE) up -d admin - @sleep 15 - @echo "" - @echo "Stage 5: Starting Workers..." - @docker-compose -f $(COMPOSE_FILE) up -d worker1 worker2 worker3 - @sleep 10 - @echo "" - @echo "Stage 6: Starting Load generator and Monitor..." - @docker-compose -f $(COMPOSE_FILE) up -d load_generator monitor - @echo "" - @echo "โœ… All services started!" - @echo "" - @echo "๐Ÿ“Š Access points:" - @echo " โ€ข Admin UI: http://localhost:23646/" - @echo " โ€ข Master UI: http://localhost:9333/" - @echo " โ€ข Filer: http://localhost:8888/" - @echo "" - @echo "โณ Services are initializing... Use 'make status' to check progress" - -stop: ## Stop all services - @echo "๐Ÿ›‘ Stopping SeaweedFS cluster..." - @docker-compose -f $(COMPOSE_FILE) down - @echo "โœ… Cluster stopped" - -restart: stop start ## Restart the entire cluster - -clean: ## Stop and remove all containers, networks, and volumes - @echo "๐Ÿงน Cleaning up SeaweedFS test environment..." - @docker-compose -f $(COMPOSE_FILE) down -v --remove-orphans - @docker system prune -f - @rm -rf data/ - @echo "โœ… Environment cleaned" - -status: ## Check the status of all services - @echo "๐Ÿ“Š SeaweedFS Cluster Status" - @echo "==========================" - @docker-compose -f $(COMPOSE_FILE) ps - @echo "" - @echo "๐Ÿ“‹ Service Health:" - @echo "Master:" - @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' 2>/dev/null || echo " โŒ Master not ready" - @echo "Admin:" - @curl -s http://localhost:23646/ | grep -q "Admin" && echo " โœ… Admin ready" || echo " โŒ Admin not ready" - -logs: ## Show logs from all services - @echo "๐Ÿ“œ Following logs from all services..." - @echo "๐Ÿ’ก Press Ctrl+C to stop following logs" - @docker-compose -f $(COMPOSE_FILE) logs -f - -admin-logs: ## Show logs from admin server only - @echo "๐Ÿ“œ Admin server logs:" - @docker-compose -f $(COMPOSE_FILE) logs -f admin - -worker-logs: ## Show logs from all workers - @echo "๐Ÿ“œ Worker logs:" - @docker-compose -f $(COMPOSE_FILE) logs -f worker1 worker2 worker3 - -master-logs: ## Show logs from master server - @echo "๐Ÿ“œ Master server logs:" - @docker-compose -f $(COMPOSE_FILE) logs -f master - -admin-ui: ## Open admin UI in browser (macOS) - @echo "๐ŸŒ Opening admin UI in browser..." - @open http://localhost:23646/ || echo "๐Ÿ’ก Manually open: http://localhost:23646/" - -test: ## Run integration test to verify task assignment and completion - @echo "๐Ÿงช Running Admin-Worker Integration Test" - @echo "========================================" - @echo "" - @echo "1๏ธโƒฃ Checking cluster health..." - @sleep 5 - @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "โœ… Master healthy" || echo "โŒ Master not ready" - @curl -s http://localhost:23646/ | grep -q "Admin" && echo "โœ… Admin healthy" || echo "โŒ Admin not ready" - @echo "" - @echo "2๏ธโƒฃ Checking worker registration..." - @sleep 10 - @echo "๐Ÿ’ก Check admin UI for connected workers: http://localhost:23646/" - @echo "" - @echo "3๏ธโƒฃ Generating load to trigger EC tasks..." - @echo "๐Ÿ“ Creating test files to fill volumes..." - @echo "Creating large files with random data to trigger EC (targeting ~60MB total to exceed 50MB limit)..." - @for i in {1..12}; do \ - echo "Creating 5MB random file $$i..."; \ - docker run --rm --network admin_integration_seaweed_net -v /tmp:/tmp --entrypoint sh chrislusf/seaweedfs:local -c "dd if=/dev/urandom of=/tmp/largefile$$i.dat bs=1M count=5 2>/dev/null && weed upload -master=master:9333 /tmp/largefile$$i.dat && rm /tmp/largefile$$i.dat"; \ - sleep 3; \ - done - @echo "" - @echo "4๏ธโƒฃ Waiting for volumes to process large files and reach 50MB limit..." - @echo "This may take a few minutes as we're uploading 60MB of data..." - @sleep 60 - @echo "" - @echo "5๏ธโƒฃ Checking for EC task creation and assignment..." - @echo "๐Ÿ’ก Monitor the admin UI to see:" - @echo " โ€ข Tasks being created for volumes needing EC" - @echo " โ€ข Workers picking up tasks" - @echo " โ€ข Task progress (pending โ†’ running โ†’ completed)" - @echo " โ€ข EC shards being distributed" - @echo "" - @echo "โœ… Integration test setup complete!" - @echo "๐Ÿ“Š Monitor progress at: http://localhost:23646/" - -quick-test: ## Quick verification that core services are running - @echo "โšก Quick Health Check" - @echo "====================" - @echo "Master: $$(curl -s http://localhost:9333/cluster/status | jq -r '.IsLeader // "not ready"')" - @echo "Admin: $$(curl -s http://localhost:23646/ | grep -q "Admin" && echo "ready" || echo "not ready")" - @echo "Workers: $$(docker-compose -f $(COMPOSE_FILE) ps worker1 worker2 worker3 | grep -c Up) running" - -validate: ## Validate integration test configuration - @echo "๐Ÿ” Validating Integration Test Configuration" - @echo "===========================================" - @chmod +x test-integration.sh - @./test-integration.sh - -demo: start ## Start cluster and run demonstration - @echo "๐ŸŽญ SeaweedFS Admin-Worker Demo" - @echo "=============================" - @echo "" - @echo "โณ Waiting for services to start..." - @sleep 45 - @echo "" - @echo "๐ŸŽฏ Demo Overview:" - @echo " โ€ข 1 Master server (coordinates cluster)" - @echo " โ€ข 6 Volume servers (50MB volume limit)" - @echo " โ€ข 1 Admin server (task management)" - @echo " โ€ข 3 Workers (execute EC tasks)" - @echo " โ€ข Load generator (creates files continuously)" - @echo "" - @echo "๐Ÿ“Š Watch the process:" - @echo " 1. Visit: http://localhost:23646/" - @echo " 2. Observe workers connecting" - @echo " 3. Watch tasks being created and assigned" - @echo " 4. See tasks progress from pending โ†’ completed" - @echo "" - @echo "๐Ÿ”„ The demo will:" - @echo " โ€ข Fill volumes to 50MB limit" - @echo " โ€ข Admin detects volumes needing EC" - @echo " โ€ข Workers receive and execute EC tasks" - @echo " โ€ข Tasks complete with shard distribution" - @echo "" - @echo "๐Ÿ’ก Use 'make worker-logs' to see worker activity" - @echo "๐Ÿ’ก Use 'make admin-logs' to see admin task management" - -# Vacuum Testing Targets -vacuum-test: ## Create test data with garbage and verify vacuum detection - @echo "๐Ÿงช SeaweedFS Vacuum Task Testing" - @echo "================================" - @echo "" - @echo "1๏ธโƒฃ Checking cluster health..." - @curl -s http://localhost:9333/cluster/status | jq '.IsLeader' > /dev/null && echo "โœ… Master ready" || (echo "โŒ Master not ready. Run 'make start' first." && exit 1) - @curl -s http://localhost:23646/ | grep -q "Admin" && echo "โœ… Admin ready" || (echo "โŒ Admin not ready. Run 'make start' first." && exit 1) - @echo "" - @echo "2๏ธโƒฃ Creating test data with garbage..." - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=25 -delete=0.5 -size=200 - @echo "" - @echo "3๏ธโƒฃ Configuration Instructions:" - @echo " Visit: http://localhost:23646/maintenance/config/vacuum" - @echo " Set for testing:" - @echo " โ€ข Enable Vacuum Tasks: โœ… Checked" - @echo " โ€ข Garbage Threshold: 0.20 (20%)" - @echo " โ€ข Scan Interval: [30] [Seconds]" - @echo " โ€ข Min Volume Age: [0] [Minutes]" - @echo " โ€ข Max Concurrent: 2" - @echo "" - @echo "4๏ธโƒฃ Monitor vacuum tasks at: http://localhost:23646/maintenance" - @echo "" - @echo "๐Ÿ’ก Use 'make vacuum-status' to check volume garbage ratios" - -vacuum-demo: ## Run automated vacuum testing demonstration - @echo "๐ŸŽญ Vacuum Task Demo" - @echo "==================" - @echo "" - @echo "โš ๏ธ This demo requires user interaction for configuration" - @echo "๐Ÿ’ก Make sure cluster is running with 'make start'" - @echo "" - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x demo_vacuum_testing.sh && ./demo_vacuum_testing.sh" - -vacuum-status: ## Check current volume status and garbage ratios - @echo "๐Ÿ“Š Current Volume Status" - @echo "=======================" - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester sh -c "chmod +x check_volumes.sh && ./check_volumes.sh" - -vacuum-data: ## Create test data with configurable parameters - @echo "๐Ÿ“ Creating vacuum test data..." - @echo "Usage: make vacuum-data [FILES=20] [DELETE=0.4] [SIZE=100]" - @echo "" - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go \ - -files=$${FILES:-20} \ - -delete=$${DELETE:-0.4} \ - -size=$${SIZE:-100} - -vacuum-data-high: ## Create high garbage ratio test data (should trigger vacuum) - @echo "๐Ÿ“ Creating high garbage test data (70% garbage)..." - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.7 -size=150 - -vacuum-data-low: ## Create low garbage ratio test data (should NOT trigger vacuum) - @echo "๐Ÿ“ Creating low garbage test data (15% garbage)..." - @docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=30 -delete=0.15 -size=150 - -vacuum-continuous: ## Generate garbage continuously for testing - @echo "๐Ÿ”„ Generating continuous garbage for vacuum testing..." - @echo "Creating 5 rounds of test data with 30-second intervals..." - @for i in {1..5}; do \ - echo "Round $$i: Creating garbage..."; \ - docker-compose -f $(COMPOSE_FILE) exec vacuum-tester go run create_vacuum_test_data.go -files=10 -delete=0.6 -size=100; \ - echo "Waiting 30 seconds..."; \ - sleep 30; \ - done - @echo "โœ… Continuous test complete. Check vacuum task activity!" - -vacuum-clean: ## Clean up vacuum test data (removes all volumes!) - @echo "๐Ÿงน Cleaning up vacuum test data..." - @echo "โš ๏ธ WARNING: This will delete ALL volumes!" - @read -p "Are you sure? (y/N): " confirm && [ "$$confirm" = "y" ] || exit 1 - @echo "Stopping cluster..." - @docker-compose -f $(COMPOSE_FILE) down - @echo "Removing volume data..." - @rm -rf data/volume*/ - @echo "Restarting cluster..." - @docker-compose -f $(COMPOSE_FILE) up -d - @echo "โœ… Clean up complete. Fresh volumes ready for testing." - -vacuum-help: ## Show vacuum testing help and examples - @echo "๐Ÿงช Vacuum Testing Commands (Docker-based)" - @echo "==========================================" - @echo "" - @echo "Quick Start:" - @echo " make start # Start SeaweedFS cluster with vacuum-tester" - @echo " make vacuum-test # Create test data and instructions" - @echo " make vacuum-status # Check volume status" - @echo "" - @echo "Data Generation:" - @echo " make vacuum-data-high # High garbage (should trigger)" - @echo " make vacuum-data-low # Low garbage (should NOT trigger)" - @echo " make vacuum-continuous # Continuous garbage generation" - @echo "" - @echo "Monitoring:" - @echo " make vacuum-status # Quick volume status check" - @echo " make vacuum-demo # Full guided demonstration" - @echo "" - @echo "Configuration:" - @echo " Visit: http://localhost:23646/maintenance/config/vacuum" - @echo " Monitor: http://localhost:23646/maintenance" - @echo "" - @echo "Custom Parameters:" - @echo " make vacuum-data FILES=50 DELETE=0.8 SIZE=200" - @echo "" - @echo "๐Ÿ’ก All commands now run inside Docker containers" - @echo "Documentation:" - @echo " See: VACUUM_TEST_README.md for complete guide" \ No newline at end of file diff --git a/docker/admin_integration/check_volumes.sh b/docker/admin_integration/check_volumes.sh deleted file mode 100755 index 8cc6c14c5..000000000 --- a/docker/admin_integration/check_volumes.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh - -echo "๐Ÿ“Š Quick Volume Status Check" -echo "============================" -echo "" - -# Check if master is running -MASTER_URL="${MASTER_HOST:-master:9333}" -if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then - echo "โŒ Master server not available at $MASTER_URL" - exit 1 -fi - -echo "๐Ÿ” Fetching volume status from master..." -curl -s "http://$MASTER_URL/vol/status" | jq -r ' -if .Volumes and .Volumes.DataCenters then - .Volumes.DataCenters | to_entries[] | .value | to_entries[] | .value | to_entries[] | .value | if . then .[] else empty end | - "Volume \(.Id): - Size: \(.Size | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end) - Files: \(.FileCount) active, \(.DeleteCount) deleted - Garbage: \(.DeletedByteCount | if . < 1024 then "\(.) B" elif . < 1048576 then "\(. / 1024 | floor) KB" elif . < 1073741824 then "\(. / 1048576 * 100 | floor / 100) MB" else "\(. / 1073741824 * 100 | floor / 100) GB" end) (\(if .Size > 0 then (.DeletedByteCount / .Size * 100 | floor) else 0 end)%) - Status: \(if (.DeletedByteCount / .Size * 100) > 30 then "๐ŸŽฏ NEEDS VACUUM" else "โœ… OK" end) -" -else - "No volumes found" -end' - -echo "" -echo "๐Ÿ’ก Legend:" -echo " ๐ŸŽฏ NEEDS VACUUM: >30% garbage ratio" -echo " โœ… OK: <30% garbage ratio" -echo "" \ No newline at end of file diff --git a/docker/admin_integration/create_vacuum_test_data.go b/docker/admin_integration/create_vacuum_test_data.go deleted file mode 100644 index 46acdd4cd..000000000 --- a/docker/admin_integration/create_vacuum_test_data.go +++ /dev/null @@ -1,280 +0,0 @@ -package main - -import ( - "bytes" - "crypto/rand" - "encoding/json" - "flag" - "fmt" - "io" - "log" - "net/http" - "time" -) - -var ( - master = flag.String("master", "master:9333", "SeaweedFS master server address") - fileCount = flag.Int("files", 20, "Number of files to create") - deleteRatio = flag.Float64("delete", 0.4, "Ratio of files to delete (0.0-1.0)") - fileSizeKB = flag.Int("size", 100, "Size of each file in KB") -) - -type AssignResult struct { - Fid string `json:"fid"` - Url string `json:"url"` - PublicUrl string `json:"publicUrl"` - Count int `json:"count"` - Error string `json:"error"` -} - -func main() { - flag.Parse() - - fmt.Println("๐Ÿงช Creating fake data for vacuum task testing...") - fmt.Printf("Master: %s\n", *master) - fmt.Printf("Files to create: %d\n", *fileCount) - fmt.Printf("Delete ratio: %.1f%%\n", *deleteRatio*100) - fmt.Printf("File size: %d KB\n", *fileSizeKB) - fmt.Println() - - if *fileCount == 0 { - // Just check volume status - fmt.Println("๐Ÿ“Š Checking volume status...") - checkVolumeStatus() - return - } - - // Step 1: Create test files - fmt.Println("๐Ÿ“ Step 1: Creating test files...") - fids := createTestFiles() - - // Step 2: Delete some files to create garbage - fmt.Println("๐Ÿ—‘๏ธ Step 2: Deleting files to create garbage...") - deleteFiles(fids) - - // Step 3: Check volume status - fmt.Println("๐Ÿ“Š Step 3: Checking volume status...") - checkVolumeStatus() - - // Step 4: Configure vacuum for testing - fmt.Println("โš™๏ธ Step 4: Instructions for testing...") - printTestingInstructions() -} - -func createTestFiles() []string { - var fids []string - - for i := 0; i < *fileCount; i++ { - // Generate random file content - fileData := make([]byte, *fileSizeKB*1024) - rand.Read(fileData) - - // Get file ID assignment - assign, err := assignFileId() - if err != nil { - log.Printf("Failed to assign file ID for file %d: %v", i, err) - continue - } - - // Upload file - err = uploadFile(assign, fileData, fmt.Sprintf("test_file_%d.dat", i)) - if err != nil { - log.Printf("Failed to upload file %d: %v", i, err) - continue - } - - fids = append(fids, assign.Fid) - - if (i+1)%5 == 0 { - fmt.Printf(" Created %d/%d files...\n", i+1, *fileCount) - } - } - - fmt.Printf("โœ… Created %d files successfully\n\n", len(fids)) - return fids -} - -func deleteFiles(fids []string) { - deleteCount := int(float64(len(fids)) * *deleteRatio) - - for i := 0; i < deleteCount; i++ { - err := deleteFile(fids[i]) - if err != nil { - log.Printf("Failed to delete file %s: %v", fids[i], err) - continue - } - - if (i+1)%5 == 0 { - fmt.Printf(" Deleted %d/%d files...\n", i+1, deleteCount) - } - } - - fmt.Printf("โœ… Deleted %d files (%.1f%% of total)\n\n", deleteCount, *deleteRatio*100) -} - -func assignFileId() (*AssignResult, error) { - resp, err := http.Get(fmt.Sprintf("http://%s/dir/assign", *master)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result AssignResult - err = json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - return nil, err - } - - if result.Error != "" { - return nil, fmt.Errorf("assignment error: %s", result.Error) - } - - return &result, nil -} - -func uploadFile(assign *AssignResult, data []byte, filename string) error { - url := fmt.Sprintf("http://%s/%s", assign.Url, assign.Fid) - - body := &bytes.Buffer{} - body.Write(data) - - req, err := http.NewRequest("POST", url, body) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/octet-stream") - if filename != "" { - req.Header.Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) - } - - client := &http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body)) - } - - return nil -} - -func deleteFile(fid string) error { - url := fmt.Sprintf("http://%s/%s", *master, fid) - - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return err - } - - client := &http.Client{Timeout: 10 * time.Second} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func checkVolumeStatus() { - // Get volume list from master - resp, err := http.Get(fmt.Sprintf("http://%s/vol/status", *master)) - if err != nil { - log.Printf("Failed to get volume status: %v", err) - return - } - defer resp.Body.Close() - - var volumes map[string]interface{} - err = json.NewDecoder(resp.Body).Decode(&volumes) - if err != nil { - log.Printf("Failed to decode volume status: %v", err) - return - } - - fmt.Println("๐Ÿ“Š Volume Status Summary:") - - if vols, ok := volumes["Volumes"].([]interface{}); ok { - for _, vol := range vols { - if v, ok := vol.(map[string]interface{}); ok { - id := int(v["Id"].(float64)) - size := uint64(v["Size"].(float64)) - fileCount := int(v["FileCount"].(float64)) - deleteCount := int(v["DeleteCount"].(float64)) - deletedBytes := uint64(v["DeletedByteCount"].(float64)) - - garbageRatio := 0.0 - if size > 0 { - garbageRatio = float64(deletedBytes) / float64(size) * 100 - } - - fmt.Printf(" Volume %d:\n", id) - fmt.Printf(" Size: %s\n", formatBytes(size)) - fmt.Printf(" Files: %d (active), %d (deleted)\n", fileCount, deleteCount) - fmt.Printf(" Garbage: %s (%.1f%%)\n", formatBytes(deletedBytes), garbageRatio) - - if garbageRatio > 30 { - fmt.Printf(" ๐ŸŽฏ This volume should trigger vacuum (>30%% garbage)\n") - } - fmt.Println() - } - } - } -} - -func formatBytes(bytes uint64) string { - if bytes < 1024 { - return fmt.Sprintf("%d B", bytes) - } else if bytes < 1024*1024 { - return fmt.Sprintf("%.1f KB", float64(bytes)/1024) - } else if bytes < 1024*1024*1024 { - return fmt.Sprintf("%.1f MB", float64(bytes)/(1024*1024)) - } else { - return fmt.Sprintf("%.1f GB", float64(bytes)/(1024*1024*1024)) - } -} - -func printTestingInstructions() { - fmt.Println("๐Ÿงช Testing Instructions:") - fmt.Println() - fmt.Println("1. Configure Vacuum for Testing:") - fmt.Println(" Visit: http://localhost:23646/maintenance/config/vacuum") - fmt.Println(" Set:") - fmt.Printf(" - Garbage Percentage Threshold: 20 (20%% - lower than default 30)\n") - fmt.Printf(" - Scan Interval: [30] [Seconds] (faster than default)\n") - fmt.Printf(" - Min Volume Age: [0] [Minutes] (no age requirement)\n") - fmt.Printf(" - Max Concurrent: 2\n") - fmt.Printf(" - Min Interval: 1m (faster repeat)\n") - fmt.Println() - - fmt.Println("2. Monitor Vacuum Tasks:") - fmt.Println(" Visit: http://localhost:23646/maintenance") - fmt.Println(" Watch for vacuum tasks to appear in the queue") - fmt.Println() - - fmt.Println("3. Manual Vacuum (Optional):") - fmt.Println(" curl -X POST 'http://localhost:9333/vol/vacuum?garbageThreshold=0.20'") - fmt.Println(" (Note: Master API still uses 0.0-1.0 decimal format)") - fmt.Println() - - fmt.Println("4. Check Logs:") - fmt.Println(" Look for messages like:") - fmt.Println(" - 'Vacuum detector found X volumes needing vacuum'") - fmt.Println(" - 'Applied vacuum configuration'") - fmt.Println(" - 'Worker executing task: vacuum'") - fmt.Println() - - fmt.Println("5. Verify Results:") - fmt.Println(" Re-run this script with -files=0 to check volume status") - fmt.Println(" Garbage ratios should decrease after vacuum operations") - fmt.Println() - - fmt.Printf("๐Ÿš€ Quick test command:\n") - fmt.Printf(" go run create_vacuum_test_data.go -files=0\n") - fmt.Println() -} diff --git a/docker/admin_integration/demo_vacuum_testing.sh b/docker/admin_integration/demo_vacuum_testing.sh deleted file mode 100755 index 6835e14cc..000000000 --- a/docker/admin_integration/demo_vacuum_testing.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/sh - -echo "๐Ÿงช SeaweedFS Vacuum Task Testing Demo" -echo "======================================" -echo "" - -# Check if SeaweedFS is running -echo "๐Ÿ“‹ Checking SeaweedFS status..." -MASTER_URL="${MASTER_HOST:-master:9333}" -ADMIN_URL="${ADMIN_HOST:-admin:23646}" - -if ! curl -s http://$MASTER_URL/cluster/status > /dev/null; then - echo "โŒ SeaweedFS master not running at $MASTER_URL" - echo " Please ensure Docker cluster is running: make start" - exit 1 -fi - -if ! curl -s http://volume1:8080/status > /dev/null; then - echo "โŒ SeaweedFS volume servers not running" - echo " Please ensure Docker cluster is running: make start" - exit 1 -fi - -if ! curl -s http://$ADMIN_URL/ > /dev/null; then - echo "โŒ SeaweedFS admin server not running at $ADMIN_URL" - echo " Please ensure Docker cluster is running: make start" - exit 1 -fi - -echo "โœ… All SeaweedFS components are running" -echo "" - -# Phase 1: Create test data -echo "๐Ÿ“ Phase 1: Creating test data with garbage..." -go run create_vacuum_test_data.go -master=$MASTER_URL -files=15 -delete=0.5 -size=150 -echo "" - -# Phase 2: Check initial status -echo "๐Ÿ“Š Phase 2: Checking initial volume status..." -go run create_vacuum_test_data.go -master=$MASTER_URL -files=0 -echo "" - -# Phase 3: Configure vacuum -echo "โš™๏ธ Phase 3: Vacuum configuration instructions..." -echo " 1. Visit: http://localhost:23646/maintenance/config/vacuum" -echo " 2. Set these values for testing:" -echo " - Enable Vacuum Tasks: โœ… Checked" -echo " - Garbage Threshold: 0.30" -echo " - Scan Interval: [30] [Seconds]" -echo " - Min Volume Age: [0] [Minutes]" -echo " - Max Concurrent: 2" -echo " 3. Click 'Save Configuration'" -echo "" - -read -p " Press ENTER after configuring vacuum settings..." -echo "" - -# Phase 4: Monitor tasks -echo "๐ŸŽฏ Phase 4: Monitoring vacuum tasks..." -echo " Visit: http://localhost:23646/maintenance" -echo " You should see vacuum tasks appear within 30 seconds" -echo "" - -echo " Waiting 60 seconds for vacuum detection and execution..." -for i in {60..1}; do - printf "\r Countdown: %02d seconds" $i - sleep 1 -done -echo "" -echo "" - -# Phase 5: Check results -echo "๐Ÿ“ˆ Phase 5: Checking results after vacuum..." -go run create_vacuum_test_data.go -master=$MASTER_URL -files=0 -echo "" - -# Phase 6: Create more garbage for continuous testing -echo "๐Ÿ”„ Phase 6: Creating additional garbage for continuous testing..." -echo " Running 3 rounds of garbage creation..." - -for round in {1..3}; do - echo " Round $round: Creating garbage..." - go run create_vacuum_test_data.go -master=$MASTER_URL -files=8 -delete=0.6 -size=100 - echo " Waiting 30 seconds before next round..." - sleep 30 -done - -echo "" -echo "๐Ÿ“Š Final volume status:" -go run create_vacuum_test_data.go -master=$MASTER_URL -files=0 -echo "" - -echo "๐ŸŽ‰ Demo Complete!" -echo "" -echo "๐Ÿ” Things to check:" -echo " 1. Maintenance Queue: http://localhost:23646/maintenance" -echo " 2. Volume Status: http://localhost:9333/vol/status" -echo " 3. Admin Dashboard: http://localhost:23646" -echo "" -echo "๐Ÿ’ก Next Steps:" -echo " - Try different garbage thresholds (0.10, 0.50, 0.80)" -echo " - Adjust scan intervals (10s, 1m, 5m)" -echo " - Monitor logs for vacuum operations" -echo " - Test with multiple volumes" -echo "" \ No newline at end of file diff --git a/docker/admin_integration/docker-compose-ec-test.yml b/docker/admin_integration/docker-compose-ec-test.yml deleted file mode 100644 index 73d0ee0ff..000000000 --- a/docker/admin_integration/docker-compose-ec-test.yml +++ /dev/null @@ -1,240 +0,0 @@ -name: admin_integration - -networks: - seaweed_net: - driver: bridge - -services: - master: - image: chrislusf/seaweedfs:local - ports: - - "9333:9333" - - "19333:19333" - command: "master -ip=master -mdir=/data -volumeSizeLimitMB=50" - environment: - - WEED_MASTER_VOLUME_GROWTH_COPY_1=1 - - WEED_MASTER_VOLUME_GROWTH_COPY_2=2 - - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER=1 - volumes: - - ./data/master:/data - networks: - - seaweed_net - - volume1: - image: chrislusf/seaweedfs:local - ports: - - "8080:8080" - - "18080:18080" - command: "volume -mserver=master:9333 -ip=volume1 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume1:/data - networks: - - seaweed_net - - volume2: - image: chrislusf/seaweedfs:local - ports: - - "8081:8080" - - "18081:18080" - command: "volume -mserver=master:9333 -ip=volume2 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume2:/data - networks: - - seaweed_net - - volume3: - image: chrislusf/seaweedfs:local - ports: - - "8082:8080" - - "18082:18080" - command: "volume -mserver=master:9333 -ip=volume3 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume3:/data - networks: - - seaweed_net - - volume4: - image: chrislusf/seaweedfs:local - ports: - - "8083:8080" - - "18083:18080" - command: "volume -mserver=master:9333 -ip=volume4 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume4:/data - networks: - - seaweed_net - - volume5: - image: chrislusf/seaweedfs:local - ports: - - "8084:8080" - - "18084:18080" - command: "volume -mserver=master:9333 -ip=volume5 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume5:/data - networks: - - seaweed_net - - volume6: - image: chrislusf/seaweedfs:local - ports: - - "8085:8080" - - "18085:18080" - command: "volume -mserver=master:9333 -ip=volume6 -dir=/data -max=10" - depends_on: - - master - volumes: - - ./data/volume6:/data - networks: - - seaweed_net - - filer: - image: chrislusf/seaweedfs:local - ports: - - "8888:8888" - - "18888:18888" - command: "filer -master=master:9333 -ip=filer" - depends_on: - - master - volumes: - - ./data/filer:/data - networks: - - seaweed_net - - admin: - image: chrislusf/seaweedfs:local - ports: - - "23646:23646" # HTTP admin interface (default port) - - "33646:33646" # gRPC worker communication (23646 + 10000) - command: "-v=2 admin -port=23646 -masters=master:9333 -dataDir=/data" - depends_on: - - master - - filer - volumes: - - ./data/admin:/data - networks: - - seaweed_net - - worker1: - image: chrislusf/seaweedfs:local - command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2" - depends_on: - - admin - volumes: - - ./data/worker1:/data - networks: - - seaweed_net - environment: - - WORKER_ID=worker-1 - - worker2: - image: chrislusf/seaweedfs:local - command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2" - depends_on: - - admin - volumes: - - ./data/worker2:/data - networks: - - seaweed_net - environment: - - WORKER_ID=worker-2 - - worker3: - image: chrislusf/seaweedfs:local - command: "-v=2 worker -admin=admin:23646 -capabilities=erasure_coding,vacuum -maxConcurrent=2" - depends_on: - - admin - volumes: - - ./data/worker3:/data - networks: - - seaweed_net - environment: - - WORKER_ID=worker-3 - - load_generator: - image: chrislusf/seaweedfs:local - entrypoint: ["/bin/sh"] - command: > - -c " - echo 'Starting load generator...'; - sleep 30; - echo 'Generating continuous load with 50MB volume limit...'; - while true; do - echo 'Writing test files...'; - echo 'Test file content at $(date)' | /usr/bin/weed upload -server=master:9333; - sleep 5; - echo 'Deleting some files...'; - /usr/bin/weed shell -master=master:9333 <<< 'fs.rm /test_file_*' || true; - sleep 10; - done - " - depends_on: - - master - - filer - - admin - networks: - - seaweed_net - - monitor: - image: alpine:latest - entrypoint: ["/bin/sh"] - command: > - -c " - apk add --no-cache curl jq; - echo 'Starting cluster monitor...'; - sleep 30; - while true; do - echo '=== Cluster Status $(date) ==='; - echo 'Master status:'; - curl -s http://master:9333/cluster/status | jq '.IsLeader, .Peers' || echo 'Master not ready'; - echo; - echo 'Admin status:'; - curl -s http://admin:23646/ | grep -o 'Admin.*Interface' || echo 'Admin not ready'; - echo; - echo 'Volume count by server:'; - curl -s http://master:9333/vol/status | jq '.Volumes | length' || echo 'Volumes not ready'; - echo; - sleep 60; - done - " - depends_on: - - master - - admin - - filer - networks: - - seaweed_net - - vacuum-tester: - image: chrislusf/seaweedfs:local - entrypoint: ["/bin/sh"] - command: > - -c " - echo 'Installing dependencies for vacuum testing...'; - apk add --no-cache jq curl go bash; - echo 'Vacuum tester ready...'; - echo 'Use: docker-compose exec vacuum-tester sh'; - echo 'Available commands: go, weed, curl, jq, bash, sh'; - sleep infinity - " - depends_on: - - master - - admin - - filer - volumes: - - .:/testing - working_dir: /testing - networks: - - seaweed_net - environment: - - MASTER_HOST=master:9333 - - ADMIN_HOST=admin:23646 \ No newline at end of file diff --git a/docker/admin_integration/test-integration.sh b/docker/admin_integration/test-integration.sh deleted file mode 100755 index b355b1dfd..000000000 --- a/docker/admin_integration/test-integration.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -set -e - -echo "๐Ÿงช Testing SeaweedFS Admin-Worker Integration" -echo "=============================================" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -cd "$(dirname "$0")" - -echo -e "${BLUE}1. Validating docker-compose configuration...${NC}" -if docker-compose -f docker-compose-ec-test.yml config > /dev/null; then - echo -e "${GREEN}โœ… Docker compose configuration is valid${NC}" -else - echo -e "${RED}โŒ Docker compose configuration is invalid${NC}" - exit 1 -fi - -echo -e "${BLUE}2. Checking if required ports are available...${NC}" -for port in 9333 8080 8081 8082 8083 8084 8085 8888 23646; do - if lsof -i :$port > /dev/null 2>&1; then - echo -e "${YELLOW}โš ๏ธ Port $port is in use${NC}" - else - echo -e "${GREEN}โœ… Port $port is available${NC}" - fi -done - -echo -e "${BLUE}3. Testing worker command syntax...${NC}" -# Test that the worker command in docker-compose has correct syntax -if docker-compose -f docker-compose-ec-test.yml config | grep -q "workingDir=/work"; then - echo -e "${GREEN}โœ… Worker working directory option is properly configured${NC}" -else - echo -e "${RED}โŒ Worker working directory option is missing${NC}" - exit 1 -fi - -echo -e "${BLUE}4. Verifying admin server configuration...${NC}" -if docker-compose -f docker-compose-ec-test.yml config | grep -q "admin:23646"; then - echo -e "${GREEN}โœ… Admin server port configuration is correct${NC}" -else - echo -e "${RED}โŒ Admin server port configuration is incorrect${NC}" - exit 1 -fi - -echo -e "${BLUE}5. Checking service dependencies...${NC}" -if docker-compose -f docker-compose-ec-test.yml config | grep -q "depends_on"; then - echo -e "${GREEN}โœ… Service dependencies are configured${NC}" -else - echo -e "${YELLOW}โš ๏ธ Service dependencies may not be configured${NC}" -fi - -echo "" -echo -e "${GREEN}๐ŸŽ‰ Integration test configuration is ready!${NC}" -echo "" -echo -e "${BLUE}To start the integration test:${NC}" -echo " make start # Start all services" -echo " make health # Check service health" -echo " make logs # View logs" -echo " make stop # Stop all services" -echo "" -echo -e "${BLUE}Key features verified:${NC}" -echo " โœ… Official SeaweedFS images are used" -echo " โœ… Worker working directories are configured" -echo " โœ… Admin-worker communication on correct ports" -echo " โœ… Task-specific directories will be created" -echo " โœ… Load generator will trigger EC tasks" -echo " โœ… Monitor will track progress" \ No newline at end of file diff --git a/docker/compose/e2e-mount.yml b/docker/compose/e2e-mount.yml deleted file mode 100644 index 5571bf003..000000000 --- a/docker/compose/e2e-mount.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3.9' - -services: - master: - image: chrislusf/seaweedfs:e2e - command: "-v=4 master -ip=master -ip.bind=0.0.0.0 -raftBootstrap" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ] - interval: 2s - timeout: 10s - retries: 30 - start_period: 10s - - volume: - image: chrislusf/seaweedfs:e2e - command: "-v=4 volume -mserver=master:9333 -ip=volume -ip.bind=0.0.0.0 -preStopSeconds=1" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8080/healthz" ] - interval: 2s - timeout: 10s - retries: 15 - start_period: 5s - depends_on: - master: - condition: service_healthy - - filer: - image: chrislusf/seaweedfs:e2e - command: "-v=4 filer -master=master:9333 -ip=filer -ip.bind=0.0.0.0" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:8888" ] - interval: 2s - timeout: 10s - retries: 15 - start_period: 5s - depends_on: - volume: - condition: service_healthy - - mount: - image: chrislusf/seaweedfs:e2e - command: "-v=4 mount -filer=filer:8888 -filer.path=/ -dirAutoCreate -dir=/mnt/seaweedfs" - cap_add: - - SYS_ADMIN - devices: - - /dev/fuse - security_opt: - - apparmor:unconfined - deploy: - resources: - limits: - memory: 4096m - healthcheck: - test: [ "CMD", "mountpoint", "-q", "--", "/mnt/seaweedfs" ] - interval: 2s - timeout: 10s - retries: 15 - start_period: 10s - depends_on: - filer: - condition: service_healthy diff --git a/docker/compose/fluent.conf b/docker/compose/fluent.conf deleted file mode 100644 index d4396dd4f..000000000 --- a/docker/compose/fluent.conf +++ /dev/null @@ -1,8 +0,0 @@ - - @type forward - port 24224 - - - - @type stdout # Output logs to container's stdout (visible via `docker logs`) - diff --git a/docker/compose/local-auditlog-compose.yml b/docker/compose/local-auditlog-compose.yml index dc3fee948..39c997448 100644 --- a/docker/compose/local-auditlog-compose.yml +++ b/docker/compose/local-auditlog-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: s3: @@ -19,9 +19,7 @@ services: depends_on: - fluent fluent: - image: fluent/fluentd:v1.17 - volumes: - - ./fluent.conf:/fluentd/etc/fluent.conf + image: fluent/fluentd:v1.14 ports: - 24224:24224 #s3tests: diff --git a/docker/compose/local-brokers-compose.yml b/docker/compose/local-brokers-compose.yml deleted file mode 100644 index 62ec94995..000000000 --- a/docker/compose/local-brokers-compose.yml +++ /dev/null @@ -1,127 +0,0 @@ -version: '3.9' - -services: - master0: - image: chrislusf/seaweedfs:local - ports: - - 9333:9333 - - 19333:19333 - command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp" - environment: - WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 - WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 - master1: - image: chrislusf/seaweedfs:local - ports: - - 9334:9334 - - 19334:19334 - command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp" - environment: - WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 - WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 - master2: - image: chrislusf/seaweedfs:local - ports: - - 9335:9335 - - 19335:19335 - command: "-v=0 master -volumeSizeLimitMB 100 -resumeState=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/tmp" - environment: - WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 - WEED_MASTER_VOLUME_GROWTH_COPY_2: 2 - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 - volume1: - image: chrislusf/seaweedfs:local - ports: - - 8080:8080 - - 18080:18080 - command: 'volume -dataCenter=dc1 -rack=v1 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1' - depends_on: - - master0 - - master1 - - master2 - volume2: - image: chrislusf/seaweedfs:local - ports: - - 8082:8082 - - 18082:18082 - command: 'volume -dataCenter=dc2 -rack=v2 -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1' - depends_on: - - master0 - - master1 - - master2 - volume3: - image: chrislusf/seaweedfs:local - ports: - - 8083:8083 - - 18083:18083 - command: 'volume -dataCenter=dc3 -rack=v3 -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1' - depends_on: - - master0 - - master1 - - master2 - filer1: - image: chrislusf/seaweedfs:local - ports: - - 8888:8888 - - 18888:18888 - command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8888 -ip=filer1' - depends_on: - - master0 - - master1 - - master2 - - volume1 - - volume2 - filer2: - image: chrislusf/seaweedfs:local - ports: - - 8889:8889 - - 18889:18889 - command: 'filer -defaultReplicaPlacement=100 -iam -master="master0:9333,master1:9334,master2:9335" -port=8889 -ip=filer2' - depends_on: - - master0 - - master1 - - master2 - - volume1 - - volume2 - - filer1 - broker1: - image: chrislusf/seaweedfs:local - ports: - - 17777:17777 - command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17777 -ip=broker1' - depends_on: - - master0 - - master1 - - master2 - - volume1 - - volume2 - - filer1 - - filer2 - broker2: - image: chrislusf/seaweedfs:local - ports: - - 17778:17778 - command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17778 -ip=broker2' - depends_on: - - master0 - - master1 - - master2 - - volume1 - - volume2 - - filer1 - - filer2 - broker3: - image: chrislusf/seaweedfs:local - ports: - - 17779:17779 - command: 'mq.broker -master="master0:9333,master1:9334,master2:9335" -port=17779 -ip=broker3' - depends_on: - - master0 - - master1 - - master2 - - volume1 - - volume2 - - filer1 - - filer2 diff --git a/docker/compose/local-cluster-compose.yml b/docker/compose/local-cluster-compose.yml index 13cbcb861..f781244ab 100644 --- a/docker/compose/local-cluster-compose.yml +++ b/docker/compose/local-cluster-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master0: diff --git a/docker/compose/local-clusters-compose.yml b/docker/compose/local-clusters-compose.yml index 62b1c5d4d..f9e9a1589 100644 --- a/docker/compose/local-clusters-compose.yml +++ b/docker/compose/local-clusters-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: server1: @@ -10,7 +10,7 @@ services: - 18084:18080 - 8888:8888 - 18888:18888 - command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1" + command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1" volumes: - ./master-cloud.toml:/etc/seaweedfs/master.toml depends_on: @@ -25,4 +25,4 @@ services: - 8889:8888 - 18889:18888 - 8334:8333 - command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1" + command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1" diff --git a/docker/compose/local-dev-compose.yml b/docker/compose/local-dev-compose.yml index 6f0d0fb29..5df1e7332 100644 --- a/docker/compose/local-dev-compose.yml +++ b/docker/compose/local-dev-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: @@ -6,7 +6,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "-v=1 master -ip=master -volumeSizeLimitMB=10" + command: "-v=1 master -ip=master" volumes: - ./tls:/etc/seaweedfs/tls env_file: @@ -16,7 +16,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1 -max=10000" + command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" depends_on: - master volumes: @@ -26,9 +26,10 @@ services: filer: image: chrislusf/seaweedfs:local ports: + - 8111:8111 - 8888:8888 - 18888:18888 - command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333"' + command: '-v=1 filer -ip.bind=0.0.0.0 -master="master:9333" -iam -iam.ip=filer' depends_on: - master - volume @@ -36,19 +37,6 @@ services: - ./tls:/etc/seaweedfs/tls env_file: - ${ENV_FILE:-dev.env} - - iam: - image: chrislusf/seaweedfs:local - ports: - - 8111:8111 - command: '-v=1 iam -filer="filer:8888" -master="master:9333"' - depends_on: - - master - - volume - - filer - volumes: - - ./tls:/etc/seaweedfs/tls - s3: image: chrislusf/seaweedfs:local ports: @@ -62,7 +50,6 @@ services: - ./tls:/etc/seaweedfs/tls env_file: - ${ENV_FILE:-dev.env} - mount: image: chrislusf/seaweedfs:local privileged: true diff --git a/docker/compose/local-filer-backup-compose.yml b/docker/compose/local-filer-backup-compose.yml deleted file mode 100644 index 3e56e624d..000000000 --- a/docker/compose/local-filer-backup-compose.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: '3.9' - -services: - server-left: - image: chrislusf/seaweedfs:local - command: "-v=0 server -ip=server-left -filer -filer.maxMB 5 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1" - volumes: - - ./s3.json:/etc/seaweedfs/s3.json - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ] - interval: 3s - start_period: 15s - timeout: 30s - server-right: - image: chrislusf/seaweedfs:local - command: "-v=0 server -ip=server-right -filer -filer.maxMB 64 -s3 -s3.config=/etc/seaweedfs/s3.json -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1" - volumes: - - ./s3.json:/etc/seaweedfs/s3.json - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ] - interval: 3s - start_period: 15s - timeout: 30s - filer-backup: - image: chrislusf/seaweedfs:local - command: "-v=0 filer.backup -debug -doDeleteFiles=False -filer server-left:8888" - volumes: - - ./replication.toml:/etc/seaweedfs/replication.toml - environment: - WEED_SINK_LOCAL_INCREMENTAL_ENABLED: "false" - WEED_SINK_S3_ENABLED: "true" - WEED_SINK_S3_BUCKET: "backup" - WEED_SINK_S3_ENDPOINT: "http://server-right:8333" - WEED_SINK_S3_DIRECTORY: "/" - WEED_SINK_S3_AWS_ACCESS_KEY_ID: "some_access_key1" - WEED_SINK_S3_AWS_SECRET_ACCESS_KEY: "some_secret_key1" - WEED_SINK_S3_S3_DISABLE_CONTENT_MD5_VALIDATION: "false" - WEED_SINK_S3_UPLOADER_PART_SIZE_MB: "5" - WEED_SINK_S3_KEEP_PART_SIZE: "false" - depends_on: - server-left: - condition: service_healthy - server-right: - condition: service_healthy - minio-warp: - image: minio/warp - command: 'mixed --duration 5s --obj.size=6mb --md5 --objects 10 --concurrent 2' - restart: on-failure - environment: - WARP_HOST: "server-left:8333" - WARP_ACCESS_KEY: "some_access_key1" - WARP_SECRET_KEY: "some_secret_key1" - depends_on: - - filer-backup \ No newline at end of file diff --git a/docker/compose/local-hashicorp-raft-compose.yml b/docker/compose/local-hashicorp-raft-compose.yml index 4b9814d92..14b5eb57a 100644 --- a/docker/compose/local-hashicorp-raft-compose.yml +++ b/docker/compose/local-hashicorp-raft-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master0: @@ -6,7 +6,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data" + command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data" volumes: - ./master/0:/data environment: @@ -18,7 +18,7 @@ services: ports: - 9334:9334 - 19334:19334 - command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data" + command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data" volumes: - ./master/1:/data environment: @@ -30,7 +30,7 @@ services: ports: - 9335:9335 - 19335:19335 - command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -electionTimeout 1s -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data" + command: "-v=4 master -volumeSizeLimitMB 100 -raftHashicorp -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data" volumes: - ./master/2:/data environment: diff --git a/docker/compose/local-k8s-compose.yml b/docker/compose/local-k8s-compose.yml index c73103d92..9a25465c4 100644 --- a/docker/compose/local-k8s-compose.yml +++ b/docker/compose/local-k8s-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-minio-gateway-compose.yml b/docker/compose/local-minio-gateway-compose.yml index 179ea1630..fafee59c8 100644 --- a/docker/compose/local-minio-gateway-compose.yml +++ b/docker/compose/local-minio-gateway-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: @@ -6,7 +6,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master -ip=master -volumeSizeLimitMB=100" + command: "master -ip=master -volumeSizeLimitMB=1024" volume: image: chrislusf/seaweedfs:local ports: diff --git a/docker/compose/local-mount-compose.yml b/docker/compose/local-mount-compose.yml index 6f8847050..8c4329054 100644 --- a/docker/compose/local-mount-compose.yml +++ b/docker/compose/local-mount-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-mount-profile-compose.yml b/docker/compose/local-mount-profile-compose.yml index 65308bd96..20cddae2d 100644 --- a/docker/compose/local-mount-profile-compose.yml +++ b/docker/compose/local-mount-profile-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-mq-test.yml b/docker/compose/local-mq-test.yml deleted file mode 100644 index fef68cac8..000000000 --- a/docker/compose/local-mq-test.yml +++ /dev/null @@ -1,32 +0,0 @@ -services: - server: - image: chrislusf/seaweedfs:local - ports: - - 9333:9333 - - 19333:19333 - - 8888:8888 - - 18888:18888 - command: "server -ip=server -filer -volume.max=0 -master.volumeSizeLimitMB=8 -volume.preStopSeconds=1" - healthcheck: - test: curl -f http://localhost:8888/healthz - mq_broker: - image: chrislusf/seaweedfs:local - ports: - - 17777:17777 - command: "mq.broker -master=server:9333 -ip=mq_broker" - depends_on: - server: - condition: service_healthy - mq_agent: - image: chrislusf/seaweedfs:local - ports: - - 16777:16777 - command: "mq.agent -broker=mq_broker:17777 -port=16777" - depends_on: - - mq_broker - mq_client: - image: chrislusf/seaweedfs:local - # run a custom command instead of entrypoint - command: "ls -al" - depends_on: - - mq_agent diff --git a/docker/compose/local-nextcloud-compose.yml b/docker/compose/local-nextcloud-compose.yml index 288ae06e5..80c3fca53 100644 --- a/docker/compose/local-nextcloud-compose.yml +++ b/docker/compose/local-nextcloud-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-registry-compose.yml b/docker/compose/local-registry-compose.yml index 3aa056a90..b61278d66 100644 --- a/docker/compose/local-registry-compose.yml +++ b/docker/compose/local-registry-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: @@ -6,7 +6,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master -ip=master -volumeSizeLimitMB=100" + command: "master -ip=master -volumeSizeLimitMB=1024" volume: image: chrislusf/seaweedfs:local ports: diff --git a/docker/compose/local-replicate-compose.yml b/docker/compose/local-replicate-compose.yml index d88a54101..8240d45a7 100644 --- a/docker/compose/local-replicate-compose.yml +++ b/docker/compose/local-replicate-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-s3tests-compose.yml b/docker/compose/local-s3tests-compose.yml index f1961700c..952a9165c 100644 --- a/docker/compose/local-s3tests-compose.yml +++ b/docker/compose/local-s3tests-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/compose/local-sync-mount-compose.yml b/docker/compose/local-sync-mount-compose.yml index 0ce1fdeda..fec866698 100644 --- a/docker/compose/local-sync-mount-compose.yml +++ b/docker/compose/local-sync-mount-compose.yml @@ -3,54 +3,19 @@ services: node1: image: chrislusf/seaweedfs:local command: "server -master -volume -filer" - ports: - - 8888:8888 - - 18888:18888 - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ] - interval: 1s - start_period: 10s - timeout: 30s mount1: image: chrislusf/seaweedfs:local privileged: true command: "mount -filer=node1:8888 -dir=/mnt -dirAutoCreate" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://node1:8888/" ] - interval: 1s - start_period: 10s - timeout: 30s - depends_on: - node1: - condition: service_healthy node2: image: chrislusf/seaweedfs:local ports: - 7888:8888 - - 17888:18888 command: "server -master -volume -filer" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://localhost:9333/cluster/healthz" ] - interval: 1s - start_period: 10s - timeout: 30s mount2: image: chrislusf/seaweedfs:local privileged: true command: "mount -filer=node2:8888 -dir=/mnt -dirAutoCreate" - healthcheck: - test: [ "CMD", "curl", "--fail", "-I", "http://node2:8888/" ] - interval: 1s - start_period: 10s - timeout: 30s - depends_on: - node2: - condition: service_healthy sync: image: chrislusf/seaweedfs:local command: "-v=4 filer.sync -a=node1:8888 -b=node2:8888 -a.debug -b.debug" - depends_on: - mount1: - condition: service_healthy - mount2: - condition: service_healthy diff --git a/docker/compose/master-cloud.toml b/docker/compose/master-cloud.toml index ef7796f04..6ddb14e12 100644 --- a/docker/compose/master-cloud.toml +++ b/docker/compose/master-cloud.toml @@ -13,7 +13,7 @@ scripts = """ ec.rebuild -force ec.balance -force volume.balance -force - volume.fix.replication -force + volume.fix.replication unlock """ sleep_minutes = 17 # sleep minutes between each script execution diff --git a/docker/compose/notification.toml b/docker/compose/notification.toml index d93d2ba87..dcd5f2c6f 100644 --- a/docker/compose/notification.toml +++ b/docker/compose/notification.toml @@ -1,5 +1,5 @@ [notification.log] -# this is only for debugging purpose and does not work with "weed filer.replicate" +# this is only for debugging perpose and does not work with "weed filer.replicate" enabled = false diff --git a/docker/compose/s3.json b/docker/compose/s3.json index ce230863b..64dedb681 100644 --- a/docker/compose/s3.json +++ b/docker/compose/s3.json @@ -40,10 +40,7 @@ "List", "Tagging", "Write" - ], - "account": { - "id": "testid" - } + ] }, { "name": "s3_tests_alt", @@ -104,12 +101,5 @@ "Write" ] } - ], - "accounts": [ - { - "id" : "testid", - "displayName": "M. Tester", - "emailAddress": "tester@ceph.com" - } - ] + ] } \ No newline at end of file diff --git a/docker/compose/s3tests.conf b/docker/compose/s3tests.conf index 3b0629fcb..68d9ddeb7 100644 --- a/docker/compose/s3tests.conf +++ b/docker/compose/s3tests.conf @@ -2,7 +2,7 @@ ## this section is just used for host, port and bucket_prefix # host set for rgw in vstart.sh -host = 127.0.0.1 +host = s3 # port set for rgw in vstart.sh port = 8000 @@ -67,37 +67,4 @@ access_key = HIJKLMNOPQRSTUVWXYZA secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab # tenant email set in vstart.sh -email = tenanteduser@example.com - -# tenant name -tenant = testx - -[iam] -#used for iam operations in sts-tests -#email from vstart.sh -email = s3@example.com - -#user_id from vstart.sh -user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - -#access_key from vstart.sh -access_key = ABCDEFGHIJKLMNOPQRST - -#secret_key from vstart.sh -secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz - -#display_name from vstart.sh -display_name = youruseridhere - -[iam root] -access_key = AAAAAAAAAAAAAAAAAAaa -secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -user_id = RGW11111111111111111 -email = account1@ceph.com - -# iam account root user in a different account than [iam root] -[iam alt root] -access_key = BBBBBBBBBBBBBBBBBBbb -secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -user_id = RGW22222222222222222 -email = account2@ceph.com \ No newline at end of file +email = tenanteduser@example.com \ No newline at end of file diff --git a/docker/compose/swarm-etcd.yml b/docker/compose/swarm-etcd.yml index bc9510ad0..186b24790 100644 --- a/docker/compose/swarm-etcd.yml +++ b/docker/compose/swarm-etcd.yml @@ -1,4 +1,6 @@ # 2021-01-30 16:25:30 +version: '3.8' + services: etcd: diff --git a/docker/compose/test-etcd-filer.yml b/docker/compose/test-etcd-filer.yml index c6f24c559..400bd0fae 100644 --- a/docker/compose/test-etcd-filer.yml +++ b/docker/compose/test-etcd-filer.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: etcd: @@ -11,7 +11,7 @@ services: ports: - 9333:9333 - 19333:19333 - command: "master -ip=master -volumeSizeLimitMB=100" + command: "master -ip=master -volumeSizeLimitMB=1024" volume: image: chrislusf/seaweedfs:local ports: @@ -30,7 +30,6 @@ services: environment: WEED_LEVELDB2_ENABLED: 'false' WEED_ETCD_ENABLED: 'true' - WEED_ETCD_KEY_PREFIX: 'seaweedfs.' WEED_ETCD_SERVERS: "http://etcd:2379" volumes: - ./s3.json:/etc/seaweedfs/s3.json diff --git a/docker/compose/test-tarantool-filer.yml b/docker/compose/test-tarantool-filer.yml deleted file mode 100644 index 8f31bf855..000000000 --- a/docker/compose/test-tarantool-filer.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: '3.9' - -services: - tarantool: - image: chrislusf/tarantool_dev_env - entrypoint: "tt start app -i" - environment: - APP_USER_PASSWORD: "app" - CLIENT_USER_PASSWORD: "client" - REPLICATOR_USER_PASSWORD: "replicator" - STORAGE_USER_PASSWORD: "storage" - network_mode: "host" - ports: - - "3303:3303" - - s3: - image: chrislusf/seaweedfs:local - command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false" - volumes: - - ./s3.json:/etc/seaweedfs/s3.json - environment: - WEED_LEVELDB2_ENABLED: "false" - WEED_TARANTOOL_ENABLED: "true" - WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303" - WEED_TARANTOOL_USER: "client" - WEED_TARANTOOL_PASSWORD: "client" - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 - network_mode: "host" - depends_on: - - tarantool \ No newline at end of file diff --git a/docker/compose/test-ydb-filer.yml b/docker/compose/test-ydb-filer.yml index ddbfe18d0..c0c31fe5b 100644 --- a/docker/compose/test-ydb-filer.yml +++ b/docker/compose/test-ydb-filer.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: ydb: diff --git a/docker/compose/tls.env b/docker/compose/tls.env index d80a3abbd..3a52fce52 100644 --- a/docker/compose/tls.env +++ b/docker/compose/tls.env @@ -12,9 +12,5 @@ WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,clie WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" -WEED_HTTPS_CLIENT_ENABLE=true -WEED_HTTPS_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt -WEED_HTTPS_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key -WEED_HTTPS_VOLUME_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt #GRPC_GO_LOG_SEVERITY_LEVEL=info #GRPC_GO_LOG_VERBOSITY_LEVEL=2 \ No newline at end of file diff --git a/docker/compose/userstore.json b/docker/compose/userstore.json deleted file mode 100644 index e9f8d8ab4..000000000 --- a/docker/compose/userstore.json +++ /dev/null @@ -1,37 +0,0 @@ -[ - { - "Username": "admin", - "Password": "myadminpassword", - "PublicKeys": [ - ], - "HomeDir": "/", - "Permissions": { - "/": ["*"] - }, - "Uid": 0, - "Gid": 0 - }, - { - "Username": "user1", - "Password": "myuser1password", - "PublicKeys": [""], - "HomeDir": "/user1", - "Permissions": { - "/user1": ["*"], - "/public": ["read", "list","write"] - }, - "Uid": 1111, - "Gid": 1111 - }, - { - "Username": "readonly", - "Password": "myreadonlypassword", - "PublicKeys": [], - "HomeDir": "/public", - "Permissions": { - "/public": ["read", "list"] - }, - "Uid": 1112, - "Gid": 1112 - } -] diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 28ed97b43..cce1c39ef 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml index 8e0fd2a85..0b11e72e4 100644 --- a/docker/seaweedfs-dev-compose.yml +++ b/docker/seaweedfs-dev-compose.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: '2' services: master: diff --git a/docker/seaweedfs.sql b/docker/seaweedfs.sql index c9974e0e6..a27eb7081 100644 --- a/docker/seaweedfs.sql +++ b/docker/seaweedfs.sql @@ -3,10 +3,10 @@ CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret'; GRANT ALL PRIVILEGES ON seaweedfs.* TO 'seaweedfs'@'%'; FLUSH PRIVILEGES; USE seaweedfs; -CREATE TABLE IF NOT EXISTS `filemeta` ( - `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', - `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', - `directory` TEXT NOT NULL COMMENT 'full path to parent directory', - `meta` LONGBLOB, - PRIMARY KEY (`dirhash`, `name`) -) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; \ No newline at end of file +CREATE TABLE IF NOT EXISTS filemeta ( + dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', + name VARCHAR(1000) COMMENT 'directory or file name', + directory TEXT COMMENT 'full path to parent directory', + meta LONGBLOB, + PRIMARY KEY (dirhash, name) +) DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/docker/tarantool/app-scm-1.rockspec b/docker/tarantool/app-scm-1.rockspec deleted file mode 100644 index 79eb1ca38..000000000 --- a/docker/tarantool/app-scm-1.rockspec +++ /dev/null @@ -1,14 +0,0 @@ -package = 'app' -version = 'scm-1' -source = { - url = '/dev/null', -} -dependencies = { - 'crud == 1.5.2-1', - 'expirationd == 1.6.0-1', - 'metrics-export-role == 0.3.0-1', - 'vshard == 0.1.32-1' -} -build = { - type = 'none'; -} \ No newline at end of file diff --git a/docker/tarantool/config.yaml b/docker/tarantool/config.yaml deleted file mode 100644 index 00a693a2e..000000000 --- a/docker/tarantool/config.yaml +++ /dev/null @@ -1,145 +0,0 @@ -config: - context: - app_user_password: - from: env - env: APP_USER_PASSWORD - client_user_password: - from: env - env: CLIENT_USER_PASSWORD - replicator_user_password: - from: env - env: REPLICATOR_USER_PASSWORD - storage_user_password: - from: env - env: STORAGE_USER_PASSWORD - -credentials: - roles: - crud-role: - privileges: - - permissions: [ "execute" ] - lua_call: [ "crud.delete", "crud.get", "crud.upsert" ] - users: - app: - password: '{{ context.app_user_password }}' - roles: [ public, crud-role ] - client: - password: '{{ context.client_user_password }}' - roles: [ super ] - replicator: - password: '{{ context.replicator_user_password }}' - roles: [ replication ] - storage: - password: '{{ context.storage_user_password }}' - roles: [ sharding ] - -iproto: - advertise: - peer: - login: replicator - sharding: - login: storage - -sharding: - bucket_count: 10000 - -metrics: - include: [ all ] - exclude: [ vinyl ] - labels: - alias: '{{ instance_name }}' - - -groups: - storages: - roles: - - roles.crud-storage - - roles.expirationd - - roles.metrics-export - roles_cfg: - roles.expirationd: - cfg: - metrics: true - filer_metadata_task: - space: filer_metadata - is_expired: filer_metadata.is_expired - options: - atomic_iteration: true - force: true - index: 'expire_at_idx' - iterator_type: GT - start_key: - - 0 - tuples_per_iteration: 10000 - app: - module: storage - sharding: - roles: [ storage ] - replication: - failover: election - database: - use_mvcc_engine: true - replicasets: - storage-001: - instances: - storage-001-a: - roles_cfg: - roles.metrics-export: - http: - - listen: '0.0.0.0:8081' - endpoints: - - path: /metrics/prometheus/ - format: prometheus - - path: /metrics/json - format: json - iproto: - listen: - - uri: 127.0.0.1:3301 - advertise: - client: 127.0.0.1:3301 - storage-001-b: - roles_cfg: - roles.metrics-export: - http: - - listen: '0.0.0.0:8082' - endpoints: - - path: /metrics/prometheus/ - format: prometheus - - path: /metrics/json - format: json - iproto: - listen: - - uri: 127.0.0.1:3302 - advertise: - client: 127.0.0.1:3302 - routers: - roles: - - roles.crud-router - - roles.metrics-export - roles_cfg: - roles.crud-router: - stats: true - stats_driver: metrics - stats_quantiles: true - app: - module: router - sharding: - roles: [ router ] - replicasets: - router-001: - instances: - router-001-a: - roles_cfg: - roles.metrics-export: - http: - - listen: '0.0.0.0:8083' - endpoints: - - path: /metrics/prometheus/ - format: prometheus - - path: /metrics/json - format: json - iproto: - listen: - - uri: 127.0.0.1:3303 - advertise: - client: 127.0.0.1:3303 \ No newline at end of file diff --git a/docker/tarantool/instances.yaml b/docker/tarantool/instances.yaml deleted file mode 100644 index 225b7382f..000000000 --- a/docker/tarantool/instances.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -storage-001-a: - -storage-001-b: - -router-001-a: - diff --git a/docker/tarantool/router.lua b/docker/tarantool/router.lua deleted file mode 100644 index 359a8c49b..000000000 --- a/docker/tarantool/router.lua +++ /dev/null @@ -1,77 +0,0 @@ -local vshard = require('vshard') -local log = require('log') - --- Bootstrap the vshard router. -while true do - local ok, err = vshard.router.bootstrap({ - if_not_bootstrapped = true, - }) - if ok then - break - end - log.info(('Router bootstrap error: %s'):format(err)) -end - --- functions for filer_metadata space -local filer_metadata = { - delete_by_directory_idx = function(directory) - -- find all storages - local storages = require('vshard').router.routeall() - -- on each storage - for _, storage in pairs(storages) do - -- call local function - local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory }) - -- check for error - if err then - error("Failed to call function on storage: " .. tostring(err)) - end - end - -- return - return true - end, - find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit) - -- init results - local results = {} - -- find all storages - local storages = require('vshard').router.routeall() - -- on each storage - for _, storage in pairs(storages) do - -- call local function - local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', { - dirPath, - startFileName, - includeStartFile, - limit - }) - -- check for error - if err then - error("Failed to call function on storage: " .. tostring(err)) - end - -- add to results - for _, tuple in ipairs(result) do - table.insert(results, tuple) - end - end - -- sort - table.sort(results, function(a, b) return a[3] < b[3] end) - -- apply limit - if #results > limit then - local limitedResults = {} - for i = 1, limit do - table.insert(limitedResults, results[i]) - end - results = limitedResults - end - -- return - return results - end, -} - -rawset(_G, 'filer_metadata', filer_metadata) - --- register functions for filer_metadata space, set grants -for name, _ in pairs(filer_metadata) do - box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true }) - box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) - box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) -end diff --git a/docker/tarantool/storage.lua b/docker/tarantool/storage.lua deleted file mode 100644 index ff1ec0288..000000000 --- a/docker/tarantool/storage.lua +++ /dev/null @@ -1,97 +0,0 @@ -box.watch('box.status', function() - if box.info.ro then - return - end - - -- ==================================== - -- key_value space - -- ==================================== - box.schema.create_space('key_value', { - format = { - { name = 'key', type = 'string' }, - { name = 'bucket_id', type = 'unsigned' }, - { name = 'value', type = 'string' } - }, - if_not_exists = true - }) - - -- create key_value space indexes - box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true}) - box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true }) - - -- ==================================== - -- filer_metadata space - -- ==================================== - box.schema.create_space('filer_metadata', { - format = { - { name = 'directory', type = 'string' }, - { name = 'bucket_id', type = 'unsigned' }, - { name = 'name', type = 'string' }, - { name = 'expire_at', type = 'unsigned' }, - { name = 'data', type = 'string' } - }, - if_not_exists = true - }) - - -- create filer_metadata space indexes - box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true}) - box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true }) - box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true }) - box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true }) - box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true}) -end) - --- functions for filer_metadata space -local filer_metadata = { - delete_by_directory_idx = function(directory) - local space = box.space.filer_metadata - local index = space.index.directory_idx - -- for each finded directories - for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do - space:delete({ tuple[1], tuple[3] }) - end - return true - end, - find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit) - local space = box.space.filer_metadata - local directory_idx = space.index.directory_idx - -- choose filter name function - local filter_filename_func - if includeStartFile then - filter_filename_func = function(value) return value >= startFileName end - else - filter_filename_func = function(value) return value > startFileName end - end - -- init results - local results = {} - -- for each finded directories - for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do - -- filter by name - if filter_filename_func(tuple[3]) then - table.insert(results, tuple) - end - end - -- sort - table.sort(results, function(a, b) return a[3] < b[3] end) - -- apply limit - if #results > limit then - local limitedResults = {} - for i = 1, limit do - table.insert(limitedResults, results[i]) - end - results = limitedResults - end - -- return - return results - end, - is_expired = function(args, tuple) - return (tuple[4] > 0) and (require('fiber').time() > tuple[4]) - end -} - --- register functions for filer_metadata space, set grants -rawset(_G, 'filer_metadata', filer_metadata) -for name, _ in pairs(filer_metadata) do - box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true }) - box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true }) -end diff --git a/docker/test.py b/docker/test.py deleted file mode 100755 index 8ac025b32..000000000 --- a/docker/test.py +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env python3 -# /// script -# requires-python = ">=3.12" -# dependencies = [ -# "boto3", -# ] -# /// - -import argparse -import json -import random -import string -import subprocess -from enum import Enum -from pathlib import Path - -import boto3 - -REGION_NAME = "us-east-1" - - -class Actions(str, Enum): - Get = "Get" - Put = "Put" - List = "List" - - -def get_user_dir(bucket_name, user, with_bucket=True): - if with_bucket: - return f"{bucket_name}/user-id-{user}" - - return f"user-id-{user}" - - -def create_power_user(): - power_user_key = "power_user_key" - power_user_secret = "power_user_secret" - command = f"s3.configure -apply -user poweruser -access_key {power_user_key} -secret_key {power_user_secret} -actions Admin" - print("Creating Power User...") - subprocess.run( - ["docker", "exec", "-i", "seaweedfs-master-1", "weed", "shell"], - input=command, - text=True, - stdout=subprocess.PIPE, - ) - print( - f"Power User created with key: {power_user_key} and secret: {power_user_secret}" - ) - return power_user_key, power_user_secret - - -def create_bucket(s3_client, bucket_name): - print(f"Creating Bucket {bucket_name}...") - s3_client.create_bucket(Bucket=bucket_name) - print(f"Bucket {bucket_name} created.") - - -def upload_file(s3_client, bucket_name, user, file_path, custom_remote_path=None): - user_dir = get_user_dir(bucket_name, user, with_bucket=False) - if custom_remote_path: - remote_path = custom_remote_path - else: - remote_path = f"{user_dir}/{str(Path(file_path).name)}" - - print(f"Uploading {file_path} for {user}... on {user_dir}") - - s3_client.upload_file(file_path, bucket_name, remote_path) - print(f"File {file_path} uploaded for {user}.") - - -def create_user(iam_client, user): - print(f"Creating user {user}...") - response = iam_client.create_access_key(UserName=user) - print( - f"User {user} created with access key: {response['AccessKey']['AccessKeyId']}" - ) - return response - - -def list_files(s3_client, bucket_name, path=None): - if path is None: - path = "" - print(f"Listing files of s3://{bucket_name}/{path}...") - try: - response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=path) - if "Contents" in response: - for obj in response["Contents"]: - print(f"\t - {obj['Key']}") - else: - print("No files found.") - except Exception as e: - print(f"Error listing files: {e}") - - -def create_policy_for_user( - iam_client, user, bucket_name, actions=[Actions.Get, Actions.List] -): - print(f"Creating policy for {user} on {bucket_name}...") - policy_document = { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [f"s3:{action.value}*" for action in actions], - "Resource": [ - f"arn:aws:s3:::{get_user_dir(bucket_name, user)}/*", - ], - } - ], - } - policy_name = f"{user}-{bucket_name}-full-access" - - policy_json = json.dumps(policy_document) - filepath = f"/tmp/{policy_name}.json" - with open(filepath, "w") as f: - f.write(json.dumps(policy_document, indent=2)) - - iam_client.put_user_policy( - PolicyName=policy_name, PolicyDocument=policy_json, UserName=user - ) - print(f"Policy for {user} on {bucket_name} created.") - - -def main(): - parser = argparse.ArgumentParser(description="SeaweedFS S3 Test Script") - parser.add_argument( - "--s3-url", default="http://127.0.0.1:8333", help="S3 endpoint URL" - ) - parser.add_argument( - "--iam-url", default="http://127.0.0.1:8111", help="IAM endpoint URL" - ) - args = parser.parse_args() - - bucket_name = ( - f"test-bucket-{''.join(random.choices(string.digits + 'abcdef', k=8))}" - ) - sentinel_file = "/tmp/SENTINEL" - with open(sentinel_file, "w") as f: - f.write("Hello World") - print(f"SENTINEL file created at {sentinel_file}") - - power_user_key, power_user_secret = create_power_user() - - admin_s3_client = get_s3_client(args, power_user_key, power_user_secret) - iam_client = get_iam_client(args, power_user_key, power_user_secret) - - create_bucket(admin_s3_client, bucket_name) - upload_file(admin_s3_client, bucket_name, "Alice", sentinel_file) - upload_file(admin_s3_client, bucket_name, "Bob", sentinel_file) - list_files(admin_s3_client, bucket_name) - - alice_user_info = create_user(iam_client, "Alice") - bob_user_info = create_user(iam_client, "Bob") - - alice_key = alice_user_info["AccessKey"]["AccessKeyId"] - alice_secret = alice_user_info["AccessKey"]["SecretAccessKey"] - bob_key = bob_user_info["AccessKey"]["AccessKeyId"] - bob_secret = bob_user_info["AccessKey"]["SecretAccessKey"] - - # Make sure Admin can read any files - list_files(admin_s3_client, bucket_name) - list_files( - admin_s3_client, - bucket_name, - get_user_dir(bucket_name, "Alice", with_bucket=False), - ) - list_files( - admin_s3_client, - bucket_name, - get_user_dir(bucket_name, "Bob", with_bucket=False), - ) - - # Create read policy for Alice and Bob - create_policy_for_user(iam_client, "Alice", bucket_name) - create_policy_for_user(iam_client, "Bob", bucket_name) - - alice_s3_client = get_s3_client(args, alice_key, alice_secret) - - # Make sure Alice can read her files - list_files( - alice_s3_client, - bucket_name, - get_user_dir(bucket_name, "Alice", with_bucket=False) + "/", - ) - - # Make sure Bob can read his files - bob_s3_client = get_s3_client(args, bob_key, bob_secret) - list_files( - bob_s3_client, - bucket_name, - get_user_dir(bucket_name, "Bob", with_bucket=False) + "/", - ) - - # Update policy to include write - create_policy_for_user(iam_client, "Alice", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off - create_policy_for_user(iam_client, "Bob", bucket_name, actions=[Actions.Put, Actions.Get, Actions.List]) # fmt: off - - print("############################# Make sure Alice can write her files") - upload_file( - alice_s3_client, - bucket_name, - "Alice", - sentinel_file, - custom_remote_path=f"{get_user_dir(bucket_name, 'Alice', with_bucket=False)}/SENTINEL_by_Alice", - ) - - - print("############################# Make sure Bob can write his files") - upload_file( - bob_s3_client, - bucket_name, - "Bob", - sentinel_file, - custom_remote_path=f"{get_user_dir(bucket_name, 'Bob', with_bucket=False)}/SENTINEL_by_Bob", - ) - - - print("############################# Make sure Alice can read her new files") - list_files( - alice_s3_client, - bucket_name, - get_user_dir(bucket_name, "Alice", with_bucket=False) + "/", - ) - - - print("############################# Make sure Bob can read his new files") - list_files( - bob_s3_client, - bucket_name, - get_user_dir(bucket_name, "Bob", with_bucket=False) + "/", - ) - - - print("############################# Make sure Bob cannot read Alice's files") - list_files( - bob_s3_client, - bucket_name, - get_user_dir(bucket_name, "Alice", with_bucket=False) + "/", - ) - - print("############################# Make sure Alice cannot read Bob's files") - - list_files( - alice_s3_client, - bucket_name, - get_user_dir(bucket_name, "Bob", with_bucket=False) + "/", - ) - - - -def get_iam_client(args, access_key, secret_key): - iam_client = boto3.client( - "iam", - endpoint_url=args.iam_url, - region_name=REGION_NAME, - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - ) - return iam_client - - -def get_s3_client(args, access_key, secret_key): - s3_client = boto3.client( - "s3", - endpoint_url=args.s3_url, - region_name=REGION_NAME, - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - ) - return s3_client - - -if __name__ == "__main__": - main() diff --git a/go.mod b/go.mod index 6b10f1a4f..fc956e42a 100644 --- a/go.mod +++ b/go.mod @@ -1,470 +1,238 @@ -module github.com/seaweedfs/seaweedfs +module github.com/chrislusf/seaweedfs -go 1.24.0 - -toolchain go1.24.1 +go 1.18 require ( - cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/pubsub v1.50.1 - cloud.google.com/go/storage v1.57.0 - github.com/Shopify/sarama v1.38.1 - github.com/aws/aws-sdk-go v1.55.8 + cloud.google.com/go v0.102.1 // indirect + cloud.google.com/go/pubsub v1.23.1 + cloud.google.com/go/storage v1.23.0 + github.com/Azure/azure-pipeline-go v0.2.3 + github.com/Azure/azure-storage-blob-go v0.15.0 + github.com/OneOfOne/xxhash v1.2.8 + github.com/Shopify/sarama v1.34.1 + github.com/aws/aws-sdk-go v1.44.51 github.com/beorn7/perks v1.0.1 // indirect + github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/bwmarrin/snowflake v0.3.0 - github.com/cenkalti/backoff/v4 v4.3.0 - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/cespare/xxhash v1.1.0 + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chrislusf/raft v1.0.9 + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dustin/go-humanize v1.0.1 - github.com/eapache/go-resiliency v1.6.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/disintegration/imaging v1.6.2 + github.com/dustin/go-humanize v1.0.0 + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/go-redsync/redsync/v4 v4.14.0 - github.com/go-sql-driver/mysql v1.9.3 - github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/gocql/gocql v1.7.0 - github.com/golang/protobuf v1.5.4 - github.com/golang/snappy v1.0.0 - github.com/google/btree v1.1.3 - github.com/google/uuid v1.6.0 - github.com/google/wire v0.6.0 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/gorilla/mux v1.8.1 + github.com/fclairamb/ftpserverlib v0.18.0 + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-errors/errors v1.1.1 // indirect + github.com/go-redis/redis/v8 v8.11.5 + github.com/go-redsync/redsync/v4 v4.5.1 + github.com/go-sql-driver/mysql v1.6.0 + github.com/go-stack/stack v1.8.1 // indirect + github.com/go-zookeeper/zk v1.0.2 // indirect + github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d + github.com/golang-jwt/jwt v3.2.2+incompatible + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 + github.com/google/go-cmp v0.5.8 // indirect + github.com/google/uuid v1.3.0 + github.com/google/wire v0.5.0 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/gorilla/mux v1.8.0 github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/jackc/pgx/v5 v5.7.6 - github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect - github.com/jinzhu/copier v0.4.0 + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jinzhu/copier v0.3.5 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 github.com/karlseguin/ccache/v2 v2.0.8 - github.com/klauspost/compress v1.18.1 - github.com/klauspost/reedsolomon v1.12.5 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.15.6 // indirect + github.com/klauspost/reedsolomon v1.10.0 github.com/kurin/blazer v0.5.3 - github.com/linxGnu/grocksdb v1.10.2 + github.com/lib/pq v1.10.6 + github.com/linxGnu/grocksdb v1.7.3 + github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-ieproxy v0.0.3 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/olivere/elastic/v7 v7.0.32 + github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterh/liner v1.2.2 github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.2.3 - github.com/pquerna/cachecontrol v0.2.0 - github.com/prometheus/client_golang v1.23.2 - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 + github.com/pquerna/cachecontrol v0.1.0 + github.com/prometheus/client_golang v1.12.2 + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/seaweedfs/goexif v1.0.3 - github.com/seaweedfs/raft v1.1.3 - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/viper v1.21.0 - github.com/stretchr/testify v1.11.1 + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/seaweedfs/goexif v2.0.0+incompatible + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.12.0 + github.com/streadway/amqp v1.0.0 + github.com/stretchr/testify v1.8.0 github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 - github.com/tidwall/gjson v1.18.0 - github.com/tidwall/match v1.2.0 + github.com/tidwall/gjson v1.14.1 + github.com/tidwall/match v1.1.1 github.com/tidwall/pretty v1.2.0 // indirect github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 github.com/valyala/bytebufferpool v1.0.0 - github.com/viant/ptrie v1.0.1 + github.com/viant/assertly v0.5.4 // indirect + github.com/viant/ptrie v0.3.0 + github.com/viant/toolbox v0.33.2 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.2 // indirect - github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - go.etcd.io/etcd/client/v3 v3.6.5 - go.mongodb.org/mongo-driver v1.17.4 - go.opencensus.io v0.24.0 // indirect - gocloud.dev v0.43.0 - gocloud.dev/pubsub/natspubsub v0.43.0 - gocloud.dev/pubsub/rabbitpubsub v0.43.0 - golang.org/x/crypto v0.43.0 - golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 - golang.org/x/image v0.32.0 - golang.org/x/net v0.45.0 - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.37.0 - golang.org/x/text v0.30.0 // indirect - golang.org/x/tools v0.37.0 // indirect - golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/api v0.247.0 - google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect - google.golang.org/grpc v1.75.1 - google.golang.org/protobuf v1.36.9 + github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + go.etcd.io/etcd/client/v3 v3.5.4 + go.mongodb.org/mongo-driver v1.9.1 + go.opencensus.io v0.23.0 // indirect + gocloud.dev v0.25.0 + gocloud.dev/pubsub/natspubsub v0.25.0 + gocloud.dev/pubsub/rabbitpubsub v0.25.0 + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect + golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd + golang.org/x/image v0.0.0-20200119044424-58c23975cae1 + golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e + golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 // indirect + golang.org/x/sys v0.0.0-20220624220833-87e55d714810 + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/api v0.86.0 + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect + google.golang.org/grpc v1.47.0 + google.golang.org/protobuf v1.28.0 gopkg.in/inf.v0 v0.9.1 // indirect modernc.org/b v1.0.0 // indirect - modernc.org/mathutil v1.7.1 - modernc.org/memory v1.11.0 // indirect - modernc.org/sqlite v1.39.0 - modernc.org/strutil v1.2.1 + modernc.org/cc/v3 v3.36.0 // indirect + modernc.org/ccgo/v3 v3.16.6 // indirect + modernc.org/libc v1.16.7 // indirect + modernc.org/mathutil v1.4.1 // indirect + modernc.org/memory v1.1.1 // indirect + modernc.org/opt v0.1.1 // indirect + modernc.org/sqlite v1.17.3 + modernc.org/strutil v1.1.2 + modernc.org/token v1.0.0 // indirect ) require ( - cloud.google.com/go/kms v1.23.1 - github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 - github.com/Jille/raft-grpc-transport v1.6.1 - github.com/ThreeDotsLabs/watermill v1.5.1 - github.com/a-h/templ v0.3.943 - github.com/arangodb/go-driver v1.6.7 - github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go-v2 v1.39.2 - github.com/aws/aws-sdk-go-v2/config v1.31.3 - github.com/aws/aws-sdk-go-v2/credentials v1.18.10 - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 - github.com/cognusion/imaging v1.0.2 - github.com/fluent/fluent-logger-golang v1.10.1 - github.com/getsentry/sentry-go v0.35.3 - github.com/gin-contrib/sessions v1.0.4 - github.com/gin-gonic/gin v1.11.0 - github.com/golang-jwt/jwt/v5 v5.3.0 - github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50 - github.com/hanwen/go-fuse/v2 v2.8.0 - github.com/hashicorp/raft v1.7.3 - github.com/hashicorp/raft-boltdb/v2 v2.3.1 - github.com/hashicorp/vault/api v1.20.0 - github.com/jhump/protoreflect v1.17.0 - github.com/lib/pq v1.10.9 - github.com/linkedin/goavro/v2 v2.14.0 - github.com/mattn/go-sqlite3 v1.14.32 - github.com/minio/crc64nvme v1.1.1 - github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/parquet-go/parquet-go v0.25.1 - github.com/pkg/sftp v1.13.9 - github.com/rabbitmq/amqp091-go v1.10.0 - github.com/rclone/rclone v1.71.1 - github.com/rdleal/intervalst v1.5.0 - github.com/redis/go-redis/v9 v9.14.1 - github.com/schollz/progressbar/v3 v3.18.0 - github.com/shirou/gopsutil/v4 v4.25.9 - github.com/tarantool/go-tarantool/v2 v2.4.0 - github.com/tikv/client-go/v2 v2.0.7 - github.com/xeipuuv/gojsonschema v1.2.0 - github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0 - github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5 - go.etcd.io/etcd/client/pkg/v3 v3.6.5 - go.uber.org/atomic v1.11.0 - golang.org/x/sync v0.17.0 - golang.org/x/tools/godoc v0.1.0-deprecated - google.golang.org/grpc/security/advancedtls v1.0.0 -) - -require github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect - -require ( - cloud.google.com/go/longrunning v0.6.7 // indirect - cloud.google.com/go/pubsub/v2 v2.0.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/bazelbuild/rules_go v0.46.0 // indirect - github.com/biogo/store v0.0.0-20201120204734-aad293a2328f // indirect - github.com/blevesearch/snowballstem v0.9.0 // indirect - github.com/bufbuild/protocompile v0.14.1 // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/cockroachdb/apd/v3 v3.1.0 // indirect - github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect - github.com/cockroachdb/redact v1.1.5 // indirect - github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2 // indirect - github.com/dave/dst v0.27.2 // indirect - github.com/goccy/go-yaml v1.18.0 // indirect - github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/hcl v1.0.1-vault-7 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/puddle/v2 v2.2.2 // indirect - github.com/jaegertracing/jaeger v1.47.0 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/lithammer/shortuuid/v3 v3.0.7 // indirect - github.com/openzipkin/zipkin-go v0.4.3 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect - github.com/pierrre/geohash v1.0.0 // indirect - github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.54.1 // indirect - github.com/rogpeppe/go-internal v1.14.1 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/sasha-s/go-deadlock v0.3.1 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/twpayne/go-geom v1.4.1 // indirect - github.com/twpayne/go-kml v1.5.2 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/zeebo/xxh3 v1.0.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.36.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.0 // indirect - go.uber.org/mock v0.5.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/mod v0.28.0 // indirect - gonum.org/v1/gonum v0.16.0 // indirect + github.com/Jille/raft-grpc-transport v1.2.0 + github.com/arangodb/go-driver v1.3.2 + github.com/fluent/fluent-logger-golang v1.9.0 + github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17 + github.com/hashicorp/raft v1.3.9 + github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0 + github.com/tikv/client-go/v2 v2.0.1 + github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 + github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0 + google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 ) require ( - cel.dev/expr v0.24.0 // indirect - cloud.google.com/go/auth v0.16.5 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.8.0 // indirect - cloud.google.com/go/iam v1.5.2 // indirect - cloud.google.com/go/monitoring v1.24.2 // indirect - filippo.io/edwards25519 v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 - github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect - github.com/Files-com/files-sdk-go/v3 v3.2.218 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect - github.com/IBM/go-sdk-core/v5 v5.21.0 // indirect - github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect - github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect - github.com/ProtonMail/go-crypto v1.3.0 // indirect - github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect - github.com/ProtonMail/go-srp v0.0.7 // indirect - github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect - github.com/abbot/go-http-auth v0.4.0 // indirect - github.com/andybalholm/brotli v1.2.0 // indirect - github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect + cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/iam v0.3.0 // indirect github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect - github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect - github.com/aws/smithy-go v1.23.0 // indirect + github.com/armon/go-metrics v0.3.10 // indirect + github.com/aws/aws-sdk-go-v2 v1.16.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.15.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sns v1.17.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 // indirect + github.com/aws/smithy-go v1.11.2 // indirect + github.com/benbjohnson/clock v1.1.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect - github.com/bradenaw/juniper v0.15.3 // indirect - github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect - github.com/buengese/sgzip v0.1.1 // indirect - github.com/bytedance/sonic v1.14.0 // indirect - github.com/bytedance/sonic/loader v0.3.0 // indirect - github.com/calebcase/tmpfile v1.0.3 // indirect - github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect - github.com/cloudflare/circl v1.6.1 // indirect - github.com/cloudinary/cloudinary-go/v2 v2.12.0 // indirect - github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect - github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect - github.com/cloudwego/base64x v0.1.6 // indirect - github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect - github.com/colinmarc/hdfs/v2 v2.4.0 // indirect - github.com/creasty/defaults v1.8.0 // indirect - github.com/cronokirby/saferith v0.33.0 // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/d4l3k/messagediff v1.2.1 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect - github.com/ebitengine/purego v0.9.0 // indirect - github.com/elastic/gosigar v0.14.3 // indirect - github.com/emersion/go-message v0.18.2 // indirect - github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect - github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/fatih/color v1.16.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/flynn/noise v1.1.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.9 // indirect - github.com/geoffgarside/ber v1.2.0 // indirect - github.com/gin-contrib/sse v1.1.0 // indirect - github.com/go-chi/chi/v5 v5.2.2 // indirect - github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect - github.com/go-jose/go-jose/v4 v4.1.1 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/errors v0.22.2 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-playground/locales v0.14.1 // indirect - github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.27.0 // indirect - github.com/go-resty/resty/v2 v2.16.5 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/goccy/go-json v0.10.5 // indirect - github.com/gofrs/flock v0.12.1 // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fclairamb/go-log v0.3.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/gorilla/context v1.1.2 // indirect - github.com/gorilla/schema v1.4.1 // indirect - github.com/gorilla/securecookie v1.1.2 // indirect - github.com/gorilla/sessions v1.4.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/go-type-adapters v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect + github.com/hashicorp/go-hclog v1.2.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-metrics v0.5.4 // indirect - github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect - github.com/henrybear327/go-proton-api v1.0.0 // indirect + github.com/hashicorp/go-msgpack v1.1.5 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect - github.com/jonboulle/clockwork v0.5.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect - github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect - github.com/k0kubun/pp v3.0.1+incompatible - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect - github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect - github.com/kr/fs v0.1.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lanrat/extsort v1.4.0 // indirect - github.com/leodido/go-urn v1.4.0 // indirect - github.com/lpar/date v1.0.0 // indirect - github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect - github.com/montanaflynn/stats v0.7.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nats-io/nats.go v1.43.0 // indirect - github.com/nats-io/nkeys v0.4.11 // indirect + github.com/klauspost/cpuid/v2 v2.0.14 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-runewidth v0.0.7 // indirect + github.com/mattn/go-sqlite3 v2.0.1+incompatible // indirect + github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d // indirect + github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect - github.com/ncruces/go-strftime v0.1.9 // indirect - github.com/ncw/swift/v2 v2.0.4 // indirect - github.com/nxadm/tail v1.4.11 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/onsi/ginkgo/v2 v2.23.3 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/oracle/oci-go-sdk/v65 v65.98.0 // indirect - github.com/panjf2000/ants/v2 v2.11.3 // indirect - github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect - github.com/philhofer/fwd v1.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.22 + github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/philhofer/fwd v1.1.1 // indirect + github.com/pierrec/lz4/v4 v4.1.14 // indirect github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect - github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect - github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106 // indirect - github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/xattr v0.4.12 // indirect - github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect - github.com/relvacode/iso8601 v1.6.0 // indirect - github.com/rfjakob/eme v1.1.2 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/samber/lo v1.51.0 // indirect - github.com/seaweedfs/cockroachdb-parser v0.0.0-20251021184156-909763b17138 - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/smartystreets/goconvey v1.8.1 // indirect - github.com/sony/gobreaker v1.0.0 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect - github.com/tarantool/go-iproto v1.1.0 // indirect - github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect - github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect - github.com/tinylib/msgp v1.3.0 // indirect - github.com/tklauser/go-sysconf v0.3.15 // indirect - github.com/tklauser/numcpus v0.10.0 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 // indirect + github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898 // indirect + github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee // indirect + github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.3.0 // indirect + github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect + github.com/tinylib/msgp v1.1.6 // indirect github.com/twmb/murmur3 v1.1.3 // indirect - github.com/ugorji/go/codec v1.3.0 // indirect - github.com/unknwon/goconfig v1.0.0 // indirect - github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect - github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e // indirect - github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 // indirect - github.com/ydb-platform/ydb-go-yc v0.12.1 // indirect - github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect - github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - github.com/zeebo/blake3 v0.2.4 // indirect - github.com/zeebo/errs v1.4.0 // indirect - go.etcd.io/bbolt v1.4.2 // indirect - go.etcd.io/etcd/api/v3 v3.6.5 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.37.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/arch v0.20.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/time v0.12.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect - gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/validator.v2 v2.0.1 // indirect + github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7 // indirect + github.com/ydb-platform/ydb-go-yc v0.8.3 // indirect + github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.4 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.66.3 // indirect - moul.io/http2curl/v2 v2.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect - storj.io/common v0.0.0-20250808122759-804533d519c1 // indirect - storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect - storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect - storj.io/infectious v0.0.2 // indirect - storj.io/picobuf v0.0.4 // indirect - storj.io/uplink v1.13.1 // indirect + lukechampine.com/uint128 v1.1.1 // indirect ) -// Use the seaweedfs fork of cockroachdb-parser to fix cross-platform build issues -replace github.com/cockroachdb/cockroachdb-parser => github.com/seaweedfs/cockroachdb-parser v0.0.0-20251021182748-d0c58c67297e - -// replace github.com/seaweedfs/raft => /Users/chrislu/go/src/github.com/seaweedfs/raft +// replace github.com/chrislusf/raft => /Users/chrislu/go/src/github.com/chrislusf/raft diff --git a/go.sum b/go.sum index db3541d52..22a9a1ac4 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -15,6 +13,7 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.63.0/go.mod h1:GmezbQc7T2snqkEXWfZ0sy0VfkB/ivI2DdtJL2DEmlg= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= @@ -22,6 +21,7 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= @@ -33,840 +33,245 @@ cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2Z cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= -cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= -cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= -cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI= +cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.23.1 h1:Mesyv84WoP3tPjUC0O5LRqPWICO0ufdpWf9jtBCEz64= -cloud.google.com/go/kms v1.23.1/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= -cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= -cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= -cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= -cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= -cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs= +cloud.google.com/go/pubsub v1.23.1 h1:eVtkabVa+1M5ai67fGU+idws0hVb/KEPXiDmSS17+qc= +cloud.google.com/go/pubsub v1.23.1/go.mod h1:ttM6nEGYK/2CnB36ndNySU3ZxPwpBk8cXM6+iOlxH9U= +cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.57.0 h1:4g7NB7Ta7KetVbOMpCqy89C+Vg5VE8scqlSHUPm7Rds= -cloud.google.com/go/storage v1.57.0/go.mod h1:329cwlpzALLgJuu8beyJ/uvQznDHpa2U5lGjWednkzg= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= -cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= +contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= +contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= -github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= -github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= -github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo= -github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/Azure/azure-amqp-common-go/v3 v3.2.1/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= +github.com/Azure/azure-amqp-common-go/v3 v3.2.2/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= +github.com/Azure/azure-service-bus-go v0.11.5/go.mod h1:MI6ge2CuQWBVq+ly456MY7XqNLJip5LO1iSFodbNLbU= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-amqp v0.16.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= +github.com/Azure/go-amqp v0.16.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws= +github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY= +github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Codefor/geohash v0.0.0-20140723084247-1b41c28e3a9d h1:iG9B49Q218F/XxXNRM7k/vWf7MKmLIS8AcJV9cGN4nA= -github.com/Codefor/geohash v0.0.0-20140723084247-1b41c28e3a9d/go.mod h1:RVnhzAX71far8Kc3TQeA0k/dcaEKUnTDSOyet/JCmGI= -github.com/DATA-DOG/go-sqlmock v1.3.2 h1:2L2f5t3kKnCLxnClDD/PrDfExFFa1wjESgxHG/B1ibo= -github.com/DATA-DOG/go-sqlmock v1.3.2/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/Files-com/files-sdk-go/v3 v3.2.218 h1:tIvcbHXNY/bq+Sno6vajOJOxhe5XbU59Fa1ohOybK+s= -github.com/Files-com/files-sdk-go/v3 v3.2.218/go.mod h1:E0BaGQbcMUcql+AfubCR/iasWKBxX5UZPivnQGC2z0M= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= -github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk= -github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= -github.com/Jille/raft-grpc-transport v1.6.1 h1:gN3sjapb+fVbiebS7AfQQgbV2ecTOI7ur7NPPC7Mhoc= -github.com/Jille/raft-grpc-transport v1.6.1/go.mod h1:HbOjEdu/yzCJ/mjTF6wEOJNbAUpHfU2UOA2hVD4CNFg= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= -github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8= +github.com/Jille/raft-grpc-transport v1.2.0 h1:W/YSPz8IsirEyomjKmDog5Xk71o9+l4KhyMEX2TsgSs= +github.com/Jille/raft-grpc-transport v1.2.0/go.mod h1:GQGUXJfjlzwA390Ox1AyVYpjCLhtGd6yqY9Sb5hpQfc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I= -github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug= -github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo= -github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk= -github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo= -github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= -github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= -github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= -github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= -github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= -github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI= -github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk= -github.com/ProtonMail/gopenpgp/v2 v2.9.0 h1:ruLzBmwe4dR1hdnrsEJ/S7psSBmV15gFttFUPP/+/kE= -github.com/ProtonMail/gopenpgp/v2 v2.9.0/go.mod h1:IldDyh9Hv1ZCCYatTuuEt1XZJ0OPjxLpTarDfglih7s= -github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= -github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= -github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= -github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= -github.com/ThreeDotsLabs/watermill v1.5.1 h1:t5xMivyf9tpmU3iozPqyrCZXHvoV1XQDfihas4sV0fY= -github.com/ThreeDotsLabs/watermill v1.5.1/go.mod h1:Uop10dA3VeJWsSvis9qO3vbVY892LARrKAdki6WtXS4= -github.com/TomiHiltunen/geohash-golang v0.0.0-20150112065804-b3e4e625abfb h1:wumPkzt4zaxO4rHPBrjDK8iZMR41C1qs7njNqlacwQg= -github.com/TomiHiltunen/geohash-golang v0.0.0-20150112065804-b3e4e625abfb/go.mod h1:QiYsIBRQEO+Z4Rz7GoI+dsHVneZNONvhczuA+llOZNM= -github.com/a-h/templ v0.3.943 h1:o+mT/4yqhZ33F3ootBiHwaY4HM5EVaOJfIshvd5UNTY= -github.com/a-h/templ v0.3.943/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo= -github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= -github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3/go.mod h1:XaUnRxSCYgL3kkgX0QHIV0D+znljPIDImxlv2kbGv0Y= -github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= -github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/Shopify/sarama v1.34.1 h1:pVCQO7BMAK3s1jWhgi5v1W6lwZ6Veiekfc2vsgRS06Y= +github.com/Shopify/sarama v1.34.1/go.mod h1:NZSNswsnStpq8TUdFaqnpXm2Do6KRzTIjdBdVlL1YRM= +github.com/Shopify/toxiproxy/v2 v2.4.0 h1:O1e4Jfvr/hefNTNu+8VtdEG5lSeamJRo4aKhMOKNM64= +github.com/Shopify/toxiproxy/v2 v2.4.0/go.mod h1:3ilnjng821bkozDRxNoo64oI/DKqM+rOyJzb564+bvg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= -github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= -github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= -github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU= -github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A= -github.com/arangodb/go-driver v1.6.7 h1:9FBUsH60cKu7DjFGozTsaqWMy+3UeEplplqUn4yEcg4= -github.com/arangodb/go-driver v1.6.7/go.mod h1:H6uhiKUD/ki7fS9dNDK6xzMX/D5ibj5kGN1bGKd37Ho= +github.com/arangodb/go-driver v1.3.2 h1:07cmMqPqEl+/MlosjFtVVakEbkPqBSUvw9S9/atX0+4= +github.com/arangodb/go-driver v1.3.2/go.mod h1:5GAx3XvK72DJPhJgyjZOtYAGc4SpY7rZDb3LyhCvLcQ= github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e h1:Xg+hGrY2LcQBbxd0ZFdbGSyRKTYMZCfBbw/pMJFOk1g= github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e/go.mod h1:mq7Shfa/CaixoDxiyAAc5jZ6CVBAyPaNQCGS7mkj4Ho= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= -github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= -github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco= -github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 h1:by3nYZLR9l8bUH7kgaMU4dJgYFjyRdFEfORlDpPILB4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 h1:P18I4ipbk+b/3dZNq5YYh+Hq6XC0vp5RWkLp1tJldDA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3/go.mod h1:Rm3gw2Jov6e6kDuamDvyIlZJDMYk97VeCZ82wz/mVZ0= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= -github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= -github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/bazelbuild/rules_go v0.46.0 h1:CTefzjN/D3Cdn3rkrM6qMWuQj59OBcuOjyIp3m4hZ7s= -github.com/bazelbuild/rules_go v0.46.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.51 h1:jO9hoLynZOrMM4dj0KjeKIK+c6PA+HQbKoHOkAEye2Y= +github.com/aws/aws-sdk-go v1.44.51/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= +github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= +github.com/aws/aws-sdk-go-v2/config v1.15.3 h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw= +github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= +github.com/aws/aws-sdk-go-v2/credentials v1.11.2 h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo= +github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc= +github.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4/go.mod h1:PJc8s+lxyU8rrre0/4a0pn2wgwiDvOEzoOjcJUBr67o= +github.com/aws/aws-sdk-go-v2/service/sns v1.17.4 h1:7TdmoJJBwLFyakXjfrGztejwY5Ie1JEto7YFfznCmAw= +github.com/aws/aws-sdk-go-v2/service/sns v1.17.4/go.mod h1:kElt+uCcXxcqFyc+bQqZPFD9DME/eC6oHBXvFzQ9Bcw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3 h1:uHjK81fESbGy2Y9lspub1+C6VN5W2UXTDo2A/Pm4G0U= +github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM= +github.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= +github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/biogo/store v0.0.0-20201120204734-aad293a2328f h1:+6okTAeUsUrdQr/qN7fIODzowrjjCrnJDg/gkYqcSXY= -github.com/biogo/store v0.0.0-20201120204734-aad293a2328f/go.mod h1:z52shMwD6SGwRg2iYFjjDwX5Ene4ENTw6HfXraUy/08= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= -github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo= -github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8= -github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= -github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= -github.com/broady/gogeohash v0.0.0-20120525094510-7b2c40d64042 h1:iEdmkrNMLXbM7ecffOAtZJQOQUTE4iMonxrb5opUgE4= -github.com/broady/gogeohash v0.0.0-20120525094510-7b2c40d64042/go.mod h1:f1L9YvXvlt9JTa+A17trQjSMM6bV40f+tHjB+Pi+Fqk= -github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= -github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= -github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU= -github.com/buengese/sgzip v0.1.1/go.mod h1:i5ZiXGF3fhV7gL1xaRRL1nDnmpNj0X061FQzOS8VMas= -github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= -github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg= github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= -github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= -github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= -github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= -github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo= -github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= -github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= -github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 h1:z0uK8UQqjMVYzvk4tiiu3obv2B44+XBsvgEJREQfnO8= -github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9/go.mod h1:Jl2neWsQaDanWORdqZ4emBl50J4/aRBBS4FyyG9/PFo= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chrislusf/raft v1.0.9 h1:EGUpBUzQSzu7WG/jF16IeoySSuxyyK3lfoltcUckC3I= +github.com/chrislusf/raft v1.0.9/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= -github.com/cloudinary/cloudinary-go/v2 v2.12.0 h1:uveBJeNpJztKDwFW/B+Wuklq584hQmQXlo+hGTSOGZ8= -github.com/cloudinary/cloudinary-go/v2 v2.12.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo= -github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg= -github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA= -github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs= -github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc/go.mod h1:uvR42Hb/t52HQd7x5/ZLzZEK8oihrFpgnodIJ1vte2E= -github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= -github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= -github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/cockroachdb/apd/v3 v3.1.0 h1:MK3Ow7LH0W8zkd5GMKA1PvS9qG3bWFI95WaVNfyZJ/w= -github.com/cockroachdb/apd/v3 v3.1.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= -github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= -github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= -github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2 h1:8Vfw2iNEpYIV6aLtMwT5UOGuPmp9MKlEKWKFTuB+MPU= -github.com/cockroachdb/version v0.0.0-20250314144055-3860cd14adf2/go.mod h1:P9WiZOdQ1R/ZZDL0WzF5wlyRvrjtfhNOwMZymFpBwjE= -github.com/cognusion/imaging v1.0.2 h1:BQwBV8V8eF3+dwffp8Udl9xF1JKh5Z0z5JkJwAi98Mc= -github.com/cognusion/imaging v1.0.2/go.mod h1:mj7FvH7cT2dlFogQOSUQRtotBxJ4gFQ2ySMSmBm5dSk= -github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0= -github.com/colinmarc/hdfs/v2 v2.4.0/go.mod h1:0NAO+/3knbMx6+5pCv+Hcbaz4xn/Zzbn9+WIib2rKVI= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-iptables v0.4.3/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk= -github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= -github.com/cronokirby/saferith v0.33.0 h1:TgoQlfsD4LIwx71+ChfRcIpjkw+RPOapDEVxa+LhwLo= -github.com/cronokirby/saferith v0.33.0/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= -github.com/dave/dst v0.27.2 h1:4Y5VFTkhGLC1oddtNwuxxe36pnyLxMFXT51FOzH8Ekc= -github.com/dave/dst v0.27.2/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= -github.com/dave/jennifer v1.5.0 h1:HmgPN93bVDpkQyYbqhCHj5QlgvUkvEOzMyEvKLgCRrg= -github.com/dave/jennifer v1.5.0/go.mod h1:4MnyiFIlZS3l5tSDn8VnzE6ffAhYBMB2SZntBsZGUok= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= -github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= -github.com/dgryski/go-ddmin v0.0.0-20210904190556-96a6d69f1034/go.mod h1:zz4KxBkcXUWKjIcrc+uphJ1gPh/t18ymGm3PmQ+VGTk= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dchest/uniuri v0.0.0-20160212164326-8902c56451e9/go.mod h1:GgB8SF9nRG+GqaDtLcwJZsQFhcogVCJ79j4EdT0c2V4= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU= -github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M= -github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= -github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k= -github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= -github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg= -github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA= -github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg= -github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -876,20 +281,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= -github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= -github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= -github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -900,136 +292,85 @@ github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz4 github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a h1:Fyfh/dsHFrC6nkX7H7+nFdTd1wROlX/FxEIWVpKYf1U= -github.com/fanixk/geohash v0.0.0-20150324002647-c1f9b5fa157a/go.mod h1:UgNw+PTmmGN8rV7RvjvnBMsoTU8ZXXnaT3hYsDTBlgQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluent/fluent-logger-golang v1.10.1 h1:wu54iN1O2afll5oQrtTjhgZRwWcfOeFFzwRsEkABfFQ= -github.com/fluent/fluent-logger-golang v1.10.1/go.mod h1:qOuXG4ZMrXaSTk12ua+uAb21xfNYOzn0roAtp7mfGAE= -github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= -github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fclairamb/ftpserverlib v0.18.0 h1:q/uz7jVFMoGEMswnA+nbaKEC5mzxXJOmhPE/Q3r7VZI= +github.com/fclairamb/ftpserverlib v0.18.0/go.mod h1:QhLRiCajhPG/2WwGgcsAqmlaYXX8KziNXtSe1BlRH+k= +github.com/fclairamb/go-log v0.3.0 h1:oSC7Zjt0FZIYC5xXahUUycKGkypSdr2srFPLsp7CLd0= +github.com/fclairamb/go-log v0.3.0/go.mod h1:XG61EiPlAXnPDN8SA4N3zeA+GyBJmVOCCo12WORx/gA= +github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg= +github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= -github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= -github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64= -github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= -github.com/getsentry/sentry-go v0.35.3 h1:u5IJaEqZyPdWqe/hKlBKBBnMTSxB/HenCqF3QLabeds= -github.com/getsentry/sentry-go v0.35.3/go.mod h1:mdL49ixwT2yi57k5eh7mpnDyPybixPzlzEJFu0Z76QA= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U= -github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs= -github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= -github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= -github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= -github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= -github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= -github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= -github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo= -github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= -github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg= +github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= -github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= -github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg= -github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= -github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= -github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= -github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= +github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-redsync/redsync/v4 v4.14.0 h1:zyxzFJsmQHIPBl8iBT7KFKohWsjsghgGLiP8TnFMLNc= -github.com/go-redsync/redsync/v4 v4.14.0/go.mod h1:twMlVd19upZ/juvJyJGlQOSQxor1oeHtjs62l4pRFzo= -github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= -github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-redsync/redsync/v4 v4.5.1 h1:T97UCaY8MfQg/6kB7MTuimF4tnLOCdJbsvIoN5KmjZE= +github.com/go-redsync/redsync/v4 v4.5.1/go.mod h1:AfhgO1E6W3rlUTs6Zmz/B6qBZJFasV30lwo7nlizdDs= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= -github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= -github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= -github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus= -github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d h1:k544nNVphXK4Yt0FTduvOvCfJabEY/DMkdNw0zpCwBE= +github.com/gocql/gocql v0.0.0-20210707082121-9a3953d1826d/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= -github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= -github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= -github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1043,9 +384,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= -github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1062,24 +402,19 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= -github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s= -github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50 h1:T0YCYlZLzmdsd0bsozI4ecxk03KYOiszof14y7ekQFw= -github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50/go.mod h1:qmRCJW6OqZkfBt584Cmq1im0f4367CLrdABrq5lMOWo= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -1093,26 +428,23 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo= -github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= -github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= -github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= +github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= +github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= +github.com/google/go-replayers/httpreplay v1.1.1 h1:H91sIMlt1NZzN7R+/ASswyouLJfW0WLW7fhyUFvDEkY= +github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= -github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1125,171 +457,146 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= -github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= +github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8= +github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o= -github.com/gorilla/context v1.1.2/go.mod h1:KDPwT9i/MeWHiLl90fuTgrt4/wPcv75vFAZLaOOcbxM= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= -github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= -github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hanwen/go-fuse/v2 v2.8.0 h1:wV8rG7rmCz8XHSOwBZhG5YcVqcYjkzivjmbaMafPlAs= -github.com/hanwen/go-fuse/v2 v2.8.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI= +github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= +github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= +github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17 h1:fOTuYWxywhaliwMobGTP/6NUCgGdal6pCpuch4MHCnU= +github.com/hanwen/go-fuse/v2 v2.1.1-0.20220627082937-d01fda7edf17/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= -github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= -github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= -github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= -github.com/hashicorp/raft v1.7.3 h1:DxpEqZJysHN0wK+fviai5mFcSYsCkNpFUl1xpAW8Rbo= -github.com/hashicorp/raft v1.7.3/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ= -github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= -github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= -github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc= -github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE= -github.com/hashicorp/vault/api v1.20.0 h1:KQMHElgudOsr+IbJgmbjHnCTxEpKs9LnozA1D3nozU4= -github.com/hashicorp/vault/api v1.20.0/go.mod h1:GZ4pcjfzoOWpkJ3ijHNpEoAxKEsBJnVljyTe3jM2Sms= -github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0= -github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts= -github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw= -github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.3.1/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.3.9 h1:9yuo1aR0bFTr1cw7pj3S2Bk6MhJCsnr2NAxvIBrP2x4= +github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0 h1:CO8dBMLH6dvE1jTn/30ZZw3iuPsNfajshWoJTnVc5cc= +github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jaegertracing/jaeger v1.47.0 h1:XXxTMO+GxX930gxKWsg90rFr6RswkCRIW0AgWFnTYsg= -github.com/jaegertracing/jaeger v1.47.0/go.mod h1:mHU/OHFML51CijQql4+rLfgPOcIb9MhxOMn+RKQwrJc= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= -github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= -github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= -github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= -github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= -github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs= -github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= -github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1301,124 +608,93 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 h1:JcltaO1HXM5S2KYOYcKgAV7slU0xPy1OcvrVgn98sRQ= -github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg= -github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= -github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA= github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= -github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/klauspost/reedsolomon v1.12.5 h1:4cJuyH926If33BeDgiZpI5OU0pE+wUHZvMSyNGqN73Y= -github.com/klauspost/reedsolomon v1.12.5/go.mod h1:LkXRjLYGM8K/iQfujYnaPeDmhZLqkrGUyG9p7zs5L68= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.14 h1:QRqdp6bb9M9S5yyKeYteXKuoKE4p0tGlra81fKOpWH8= +github.com/klauspost/cpuid/v2 v2.0.14/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/reedsolomon v1.10.0 h1:MonMtg979rxSHjwtsla5dZLhreS0Lu42AyQ20bhjIGg= +github.com/klauspost/reedsolomon v1.10.0/go.mod h1:qHMIzMkuZUWqIh8mS/GruPdo3u0qwX2jk/LH440ON7Y= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU= -github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988/go.mod h1:/agobYum3uo/8V6yPVnq+R82pyVGCeuWW5arT4Txn8A= -github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 h1:FHVoZMOVRA+6/y4yRlbiR3WvsrOcKBd/f64H7YiWR2U= -github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk= github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lanrat/extsort v1.4.0 h1:jysS/Tjnp7mBwJ6NG8SY+XYFi8HF3LujGbqY9jOWjco= -github.com/lanrat/extsort v1.4.0/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4= -github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= -github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linkedin/goavro/v2 v2.14.0 h1:aNO/js65U+Mwq4yB5f1h01c3wiM458qtRad1DN0CMUI= -github.com/linkedin/goavro/v2 v2.14.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= -github.com/linxGnu/grocksdb v1.10.2 h1:y0dXsWYULY15/BZMcwAZzLd13ZuyA470vyoNzWwmqG0= -github.com/linxGnu/grocksdb v1.10.2/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= -github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= -github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= -github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I= -github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linxGnu/grocksdb v1.7.3 h1:S9XiU4FviunvjNdNG+kWe2BoOy/2EKZSdDyeGmL0vDs= +github.com/linxGnu/grocksdb v1.7.3/go.mod h1:G4zrMNj2CP2aCXF61jbmZH81tu+kU3qU4rYpOU8WOL8= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-ieproxy v0.0.3 h1:YkaHmK1CzE5C4O7A3hv3TCbfNDPSCf0RKZFX+VhBeYk= +github.com/mattn/go-ieproxy v0.0.3/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= -github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI= -github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mmcloughlin/geohash v0.9.0 h1:FihR004p/aE1Sju6gcVq5OLDqGcMnpBY+8moBqIsVOs= -github.com/mmcloughlin/geohash v0.9.0/go.mod h1:oNZxQo5yWJh0eMQEP/8hwQuVx9Z9tjwFUqcTB1SmG0c= -github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= -github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1426,540 +702,340 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= -github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= -github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= -github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= -github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= -github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= -github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= -github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= -github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296 h1:vU9tpM3apjYlLLeY23zRWJ9Zktr5jp+mloR942LEOpY= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.7.2 h1:+LEN8m0+jdCkiGc884WnDuxR+qj80/5arj+szKuRpRI= +github.com/nats-io/nats-server/v2 v2.7.2/go.mod h1:tckmrt0M6bVaDT3kmh9UrIq/CBOBBse+TpXQi5ldaa8= +github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d h1:GRSmEJutHkdoxKsRypP575IIdoXe7Bm6yHQF6GcDBnA= +github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= -github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/ncw/swift/v2 v2.0.4 h1:hHWVFxn5/YaTWAASmn4qyq2p6OyP/Hm3vMLzkjEqR7w= -github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olivere/elastic/v7 v7.0.32 h1:R7CXvbu8Eq+WlsLgxmKVKPox0oOwAE/2T9Si5BnvK6E= github.com/olivere/elastic/v7 v7.0.32/go.mod h1:c7PVmLe3Fxq77PIfY/bZmxY/TAamBhCzZ8xDOE09a9k= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= -github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= -github.com/oracle/oci-go-sdk/v65 v65.98.0 h1:ZKsy97KezSiYSN1Fml4hcwjpO+wq01rjBkPqIiUejVc= -github.com/oracle/oci-go-sdk/v65 v65.98.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q= -github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= -github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= -github.com/ory/dockertest/v3 v3.6.0/go.mod h1:4ZOpj8qBUmh8fcBSVzkH2bws2s91JdGvHUqan4GHEuQ= -github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= -github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= -github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= -github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= -github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw= github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= -github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= -github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrre/compare v1.0.2 h1:k4IUsHgh+dbcAOIWCfxVa/7G6STjADH2qmhomv+1quc= -github.com/pierrre/compare v1.0.2/go.mod h1:8UvyRHH+9HS8Pczdd2z5x/wvv67krDwVxoOndaIIDVU= -github.com/pierrre/geohash v1.0.0 h1:f/zfjdV4rVofTCz1FhP07T+EMQAvcMM2ioGZVt+zqjI= -github.com/pierrre/geohash v1.0.0/go.mod h1:atytaeVa21hj5F6kMebHYPf8JbIrGxK2FSzN2ajKXms= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/check v0.0.0-20211026125417-57bd13f7b5f0 h1:HVl5539r48eA+uDuX/ziBmQCxzT1pGrzWbKuXT46Bq0= +github.com/pingcap/check v0.0.0-20211026125417-57bd13f7b5f0/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= -github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgWM9fSBIvaxsJHuGP0uM74HXtv3MyyGQ= -github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= +github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= +github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106 h1:lOtHtTItLlc9R+Vg/hU2klOOs+pjKLT2Cq+CEJgjvIQ= -github.com/pingcap/kvproto v0.0.0-20230403051650-e166ae588106/go.mod h1:guCyM5N+o+ru0TsoZ1hi9lDjUMs2sIBjW3ARTEpVbnk= -github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= -github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898 h1:c0d/sMTeftJQF9O5OHyezWwPrzf2FXcEE5HWwnq/Ahs= +github.com/pingcap/kvproto v0.0.0-20220106070556-3fa8fa04f898/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee h1:VO2t6IBpfvW34TdtD/G10VvnGqjLic1jzOuHjUb5VqM= +github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= -github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= -github.com/pkg/xattr v0.4.12 h1:rRTkSyFNTRElv6pkA3zpjHpQ90p/OdHQC1GmGh1aTjM= -github.com/pkg/xattr v0.4.12/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= -github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= -github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= -github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= -github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= -github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= -github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= -github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= -github.com/rclone/rclone v1.71.1 h1:cpODfWTRz5i/WAzXsyW85tzfIKNsd1aq8CE8lUB+0zg= -github.com/rclone/rclone v1.71.1/go.mod h1:NLyX57FrnZ9nVLTY5TRdMmGelrGKbIRYGcgRkNdqqlA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rdleal/intervalst v1.5.0 h1:SEB9bCFz5IqD1yhfH1Wv8IBnY/JQxDplwkxHjT6hamU= -github.com/rdleal/intervalst v1.5.0/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ= -github.com/redis/go-redis/v9 v9.14.1 h1:nDCrEiJmfOWhD76xlaw+HXT0c9hfNWeXgl0vIRYSDvQ= -github.com/redis/go-redis/v9 v9.14.1/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= -github.com/redis/rueidis v1.0.64 h1:XqgbueDuNV3qFdVdQwAHJl1uNt90zUuAJuzqjH4cw6Y= -github.com/redis/rueidis v1.0.64/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA= -github.com/redis/rueidis/rueidiscompat v1.0.64 h1:M8JbLP4LyHQhBLBRsUQIzui8/LyTtdESNIMVveqm4RY= -github.com/redis/rueidis/rueidiscompat v1.0.64/go.mod h1:8pJVPhEjpw0izZFSxYwDziUiEYEkEklTSw/nZzga61M= -github.com/rekby/fixenv v0.3.2/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= -github.com/rekby/fixenv v0.6.1 h1:jUFiSPpajT4WY2cYuc++7Y1zWrnCxnovGCIX72PZniM= -github.com/rekby/fixenv v0.6.1/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= -github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= -github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4= -github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= -github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI= -github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= -github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= -github.com/seaweedfs/cockroachdb-parser v0.0.0-20251021184156-909763b17138 h1:bX1vBF7GQjPeFQsCAZ8gCQGS/nJQnekL7gZ4Qg/pF4E= -github.com/seaweedfs/cockroachdb-parser v0.0.0-20251021184156-909763b17138/go.mod h1:JSKCh6uCHBz91lQYFYHCyTrSVIPge4SUFVn28iwMNB0= -github.com/seaweedfs/goexif v1.0.3 h1:ve/OjI7dxPW8X9YQsv3JuVMaxEyF9Rvfd04ouL+Bz30= -github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk= -github.com/seaweedfs/raft v1.1.3 h1:5B6hgneQ7IuU4Ceom/f6QUt8pEeqjcsRo+IxlyPZCws= -github.com/seaweedfs/raft v1.1.3/go.mod h1:9cYlEBA+djJbnf/5tWsCybtbL7ICYpi+Uxcg3MxjuNs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seaweedfs/goexif v2.0.0+incompatible h1:x8pckiT12QQhifwhDQpeISgDfsqmQ6VR4LFPQ64JRps= +github.com/seaweedfs/goexif v2.0.0+incompatible/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk= +github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU= -github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= -github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/snabb/httpreaderat v1.0.1 h1:whlb+vuZmyjqVop8x1EKOg05l2NE4z9lsMMXjmSUCnY= -github.com/snabb/httpreaderat v1.0.1/go.mod h1:lpbGrKDWF37yvRbtRvQsbesS6Ty5c83t8ztannPoMsA= -github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= -github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spacemonkeygo/monkit/v3 v3.0.24 h1:cKixJ+evHnfJhWNyIZjBy5hoW8LTWmrJXPo18tzLNrk= -github.com/spacemonkeygo/monkit/v3 v3.0.24/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc= -github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY= -github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= -github.com/tarantool/go-iproto v1.1.0 h1:HULVOIHsiehI+FnHfM7wMDntuzUddO09DKqu2WnFQ5A= -github.com/tarantool/go-iproto v1.1.0/go.mod h1:LNCtdyZxojUed8SbOiYHoc3v9NvaZTB7p96hUySMlIo= -github.com/tarantool/go-tarantool/v2 v2.4.0 h1:cfGngxdknpVVbd/vF2LvaoWsKjsLV9i3xC859XgsJlI= -github.com/tarantool/go-tarantool/v2 v2.4.0/go.mod h1:MTbhdjFc3Jl63Lgi/UJr5D+QbT+QegqOzsNJGmaw7VM= -github.com/the42/cartconvert v0.0.0-20131203171324-aae784c392b8 h1:I4DY8wLxJXCrMYzDM6lKCGc3IQwJX0PlTLsd3nQqI3c= -github.com/the42/cartconvert v0.0.0-20131203171324-aae784c392b8/go.mod h1:fWO/msnJVhHqN1yX6OBoxSyfj7TEj1hHiL8bJSQsK30= -github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4= -github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM= -github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= -github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM= -github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tikv/client-go/v2 v2.0.7 h1:nNTx/AR6n8Ew5VtHanFPG8NkFLLXbaNs5/K43DDma04= -github.com/tikv/client-go/v2 v2.0.7/go.mod h1:9JNUWtHN8cx8eynHZ9xzdPi5YY6aiN1ILQyhfPUBcMo= -github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 h1:bzlSSzw+6qTwPs8pMcPI1bt27TAOhSdAEwdPCz6eBlg= -github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1/go.mod h1:3cTcfo8GRA2H/uSttqA3LvMfMSHVBJaXk3IgkFXFVxo= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= -github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= -github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= -github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= -github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= -github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= +github.com/tikv/client-go/v2 v2.0.1 h1:+K/VvVOxEOXKMtR83bs5Aj3lrYdTdTZdvH0apfAWW10= +github.com/tikv/client-go/v2 v2.0.1/go.mod h1:gaHSp8rnxZ0w36qb6QPPNPh9P0Mu5vAEwCQcc0Brni4= +github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 h1:7h/Oi4Zw6eGCeXh4Q4ZvKI4k7nBJVUq0c29YCcLwKPM= +github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201/go.mod h1:fEvI5fhAuJn1Fn87VJF8ByE9Vc16EzWGoePZB21/nL8= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU= github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twmb/murmur3 v1.1.3 h1:D83U0XYKcHRYwYIpBKf3Pks91Z0Byda/9SJ8B6EMRcA= github.com/twmb/murmur3 v1.1.3/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/twpayne/go-geom v1.4.1 h1:LeivFqaGBRfyg0XJJ9pkudcptwhSSrYN9KZUW6HcgdA= -github.com/twpayne/go-geom v1.4.1/go.mod h1:k/zktXdL+qnA6OgKsdEGUTA17jbQ2ZPTUa3CCySuGpE= -github.com/twpayne/go-kml v1.5.2 h1:rFMw2/EwgkVssGS2MT6YfWSPZz6BgcJkLxQ53jnE8rQ= -github.com/twpayne/go-kml v1.5.2/go.mod h1:kz8jAiIz6FIdU2Zjce9qGlVtgFYES9vt7BTPBHf5jl4= -github.com/twpayne/go-polyline v1.0.0/go.mod h1:ICh24bcLYBX8CknfvNPKqoTbe+eg+MX1NPyJmSBo7pU= -github.com/twpayne/go-waypoint v0.0.0-20200706203930-b263a7f6e4e8/go.mod h1:qj5pHncxKhu9gxtZEYWypA/z097sxhFlbTyOyt9gcnU= github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 h1:QEePdg0ty2r0t1+qwfZmQ4OOl/MB2UXIeJSpIZv56lg= github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43/go.mod h1:OYRfF6eb5wY9VRFkXJH8FFBi3plw2v+giaIu7P054pM= -github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= -github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= -github.com/unknwon/goconfig v1.0.0 h1:rS7O+CmUdli1T+oDm7fYj1MwqNWtEJfNj+FqcUHML8U= -github.com/unknwon/goconfig v1.0.0/go.mod h1:qu2ZQ/wcC/if2u32263HTVC39PeOQRSmidQk3DuDFQ8= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/viant/assertly v0.9.0 h1:uB3jO+qmWQcrSCHQRxA2kk88eXAdaklUUDxxCU5wBHQ= -github.com/viant/assertly v0.9.0/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/ptrie v1.0.1 h1:3fFC8XqCSchf11sCSS5sbb8eGDNEP2g2Hj96lNdHlZY= -github.com/viant/ptrie v1.0.1/go.mod h1:Y+mwwNCIUgFrCZcrG4/QChfi4ubvnNBsyrENBIgigu0= -github.com/viant/toolbox v0.34.5 h1:szWNPiGHjo8Dd4v2a59saEhG31DRL2Xf3aJ0ZtTSuqc= -github.com/viant/toolbox v0.34.5/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= -github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/viant/assertly v0.5.4 h1:5Hh4U3pLZa6uhCFAGpYOxck/8l9TZczEzoHNfJAhHEQ= +github.com/viant/assertly v0.5.4/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/ptrie v0.3.0 h1:SDaRd7Gqr1+ItCNz0GpTxRdK21nOfqjV6YtBm9jGlMY= +github.com/viant/ptrie v0.3.0/go.mod h1:VguMnbGfz95Zw+V5VarYSqtqslDxJbOv++xLzxkMhec= +github.com/viant/toolbox v0.33.2 h1:Av844IIeGz81gT672qZemyptGfbrcxqGymA5RFnIPjE= +github.com/viant/toolbox v0.33.2/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= -github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e h1:9LPdmD1vqadsDQUva6t2O9MbnyvoOgo8nFNPaOIH5U8= github.com/yandex-cloud/go-genproto v0.0.0-20211115083454-9ca41db5ed9e/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= -github.com/ydb-platform/ydb-go-genproto v0.0.0-20221215182650-986f9d10542f/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= -github.com/ydb-platform/ydb-go-genproto v0.0.0-20230528143953-42c825ace222/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= -github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 h1:LY6cI8cP4B9rrpTleZk95+08kl2gF4rixG7+V/dwL6Q= -github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= -github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0 h1:/NyPd9KnCJgzrEXCArqk1ThqCH2Dh31uUwl88o/VkuM= -github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0/go.mod h1:9YzkhlIymWaJGX6KMU3vh5sOf3UKbCXkG/ZdjaI3zNM= -github.com/ydb-platform/ydb-go-sdk/v3 v3.44.0/go.mod h1:oSLwnuilwIpaF5bJJMAofnGgzPJusoI3zWMNb8I+GnM= -github.com/ydb-platform/ydb-go-sdk/v3 v3.47.3/go.mod h1:bWnOIcUHd7+Sl7DN+yhyY1H/I61z53GczvwJgXMgvj0= -github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5 h1:olAAZfpMnFYChJNgZJ16G4jqoelRNx7Kx4tW50XcMv0= -github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5/go.mod h1:Pp1w2xxUoLQ3NCNAwV7pvDq0TVQOdtAqs+ZiC+i8r14= -github.com/ydb-platform/ydb-go-yc v0.12.1 h1:qw3Fa+T81+Kpu5Io2vYHJOwcrYrVjgJlT6t/0dOXJrA= -github.com/ydb-platform/ydb-go-yc v0.12.1/go.mod h1:t/ZA4ECdgPWjAb4jyDe8AzQZB5dhpGbi3iCahFaNwBY= -github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 h1:9E5q8Nsy2RiJMZDNVy0A3KUrIMBPakJ2VgloeWbcI84= -github.com/ydb-platform/ydb-go-yc-metadata v0.6.1/go.mod h1:NW4LXW2WhY2tLAwCBHBuHAwRUVF5lsscaSPjdAFKldc= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20220203104745-929cf9c248bc/go.mod h1:cc138nptTn9eKptCQl/grxP6pBKpo/bnXDiOxuVZtps= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7 h1:S3bwscnat3pa188pvEuXDUstxPjCnGpn5bZJuRMkL+g= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7/go.mod h1:cc138nptTn9eKptCQl/grxP6pBKpo/bnXDiOxuVZtps= +github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4= +github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4= +github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88= +github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0 h1:F394Kkl+QPLrl0+fWpoSafdfgGqQCZOyzvXLxzVUWfs= +github.com/ydb-platform/ydb-go-sdk/v3 v3.28.0/go.mod h1:vXjmbeEAWlkVE5/ym3XHhtnWk7aDGGqFMKrfgwbRUkQ= +github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo= +github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE= +github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg= +github.com/ydb-platform/ydb-go-yc-metadata v0.5.2/go.mod h1:82SQ4L3PewiEmFW4oTMc1sfPjODasIYxD/SKGsbK74s= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8= -github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A= -github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= -github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= -github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= -go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= -go.etcd.io/etcd/api/v3 v3.6.5 h1:pMMc42276sgR1j1raO/Qv3QI9Af/AuyQUW6CBAWuntA= -go.etcd.io/etcd/api/v3 v3.6.5/go.mod h1:ob0/oWA/UQQlT1BmaEkWQzI0sJ1M0Et0mMpaABxguOQ= -go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= -go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= -go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U= -go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo= -go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= -go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o= +go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c= +go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.37.0 h1:B+WbN9RPsvobe6q4vP6KgM8/9plR/HNjgGBrfcOlweA= -go.opentelemetry.io/contrib/detectors/gcp v1.37.0/go.mod h1:K5zQ3TT7p2ru9Qkzk0bKtCql0RGkPj9pRjpXgZJZ+rU= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0 h1:rbRJ8BBoVMsQShESYZ0FkvcITu8X8QNwJogcLUmDNNw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0/go.mod h1:u8hcp8ji5gaM/RfcOo8z9NMnf1pVLfVY7lBY2VOGuUU= -go.opentelemetry.io/otel/exporters/zipkin v1.36.0 h1:s0n95ya5tOG03exJ5JySOdJFtwGo4ZQ+KeY7Zro4CLI= -go.opentelemetry.io/otel/exporters/zipkin v1.36.0/go.mod h1:m9wRxtKA2MZ1HcnNC4BKI+9aYe434qRZTCvI7QGUN7Y= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -gocloud.dev v0.43.0 h1:aW3eq4RMyehbJ54PMsh4hsp7iX8cO/98ZRzJJOzN/5M= -gocloud.dev v0.43.0/go.mod h1:eD8rkg7LhKUHrzkEdLTZ+Ty/vgPHPCd+yMQdfelQVu4= -gocloud.dev/pubsub/natspubsub v0.43.0 h1:k35tFoaorvD9Fa26zVEEzyXiMOEyXNHc0pBOmRYvQI0= -gocloud.dev/pubsub/natspubsub v0.43.0/go.mod h1:xJn8TO8pGYieDn6AsRFsYfhQW8cnC+xGmG9APGNxkpQ= -gocloud.dev/pubsub/rabbitpubsub v0.43.0 h1:6nNZFSlJ1dk2GujL8PFltfLz3vC6IbrpjGS4FTduo1s= -gocloud.dev/pubsub/rabbitpubsub v0.43.0/go.mod h1:sEaueAGat+OASRoB3QDkghCtibKttgg7X6zsPTm1pl0= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +gocloud.dev v0.25.0 h1:Y7vDq8xj7SyM848KXf32Krda2e6jQ4CLh/mTeCSqXtk= +gocloud.dev v0.25.0/go.mod h1:7HegHVCYZrMiU3IE1qtnzf/vRrDwLYnRNR3EhWX8x9Y= +gocloud.dev/pubsub/natspubsub v0.25.0 h1:k7saTEnbNZeJRWW2l46pri6f9Ha8ndoyvA1QNj92nt4= +gocloud.dev/pubsub/natspubsub v0.25.0/go.mod h1:Q8aqOBI3tmMg1IaxZBog6nq/fEyH9UNEtpO6X4aJH0I= +gocloud.dev/pubsub/rabbitpubsub v0.25.0 h1:jDAHvIH0h40quEuqusYXfK28sCABAMAnjLqLybu/aeo= +gocloud.dev/pubsub/rabbitpubsub v0.25.0/go.mod h1:gfOrMlNXnxzIYB3dK1mNenXeBwJjm2ZSRBgNzxan0/Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd h1:zVFyTKZN/Q7mNRWSs1GOYnHM9NiFSJ54YVRsD0rNWT4= +golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= -golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1984,22 +1060,13 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2011,7 +1078,10 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2022,6 +1092,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2035,38 +1106,26 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210907225631-ff17edfbf26d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220420153159-1850ba15e1be/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= -golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2078,25 +1137,19 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2108,27 +1161,17 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2136,15 +1179,20 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2159,23 +1207,25 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2184,71 +1234,37 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220110181412-a018aaa089fe/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2257,48 +1273,41 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2307,6 +1316,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2325,13 +1335,15 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200806022845-90696ccdc692/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2341,40 +1353,18 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= -golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk= -golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2396,6 +1386,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -2404,40 +1395,32 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= +google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8= +google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= -google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= +google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U= +google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2447,6 +1430,7 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2460,7 +1444,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2469,6 +1452,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2483,7 +1467,9 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -2498,83 +1484,49 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f h1:hJ/Y5SqPXbarffmAsApliUlcvMU+wScNGfyop4bZm8o= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 h1:Nt6z9UHqSlIdIGJdz6KhTIs2VRx/iOsA5iE8bmQNcxs= -google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s= -google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -2597,27 +1549,18 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20 h1:MLBCGN1O7GzIx+cBiwfYPwtmZ41U3Mn/cotLJciaArI= -google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= -google.golang.org/grpc/security/advancedtls v1.0.0 h1:/KQ7VP/1bs53/aopk9QhuPyFAp9Dm9Ejix3lzYkCrDA= -google.golang.org/grpc/security/advancedtls v1.0.0/go.mod h1:o+s4go+e1PJ2AjuQMY5hU82W7lDlefjJA6FqEHRVHWk= +google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b h1:NuxyvVZoDfHZwYW9LD4GJiF5/nhiSyP4/InTrvw9Ibk= +google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b/go.mod h1:IBqQ7wSUJ2Ep09a8rMWFsg4fmI2r38zwsq8a0GgxXpM= +google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1 h1:0emxaJWaG6CfrA9Nbe4aHWbFz5AXw2QPEJP0/f42LCE= +google.golang.org/grpc/security/advancedtls v0.0.0-20220622233350-5cdb09fa29c1/go.mod h1:PoKncN6QA5h/eFRzlCWpHSZnXF2pCtnBzAfeanB8OGQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2631,11 +1574,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2644,33 +1584,28 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= -gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= -gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2678,88 +1613,48 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM= -modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6 h1:3l18poV+iUemQ98O3X5OMr97LOqlzis+ytivU4NqGhA= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU= -modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM= -modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= -modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= -modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= -modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= -modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ= -modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8= +modernc.org/libc v1.16.7 h1:qzQtHhsZNpVPpeCu+aMIQldXeV1P0vRhSqCL0nOIJOA= +modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= -modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU= modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= -modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= -modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= -modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= -modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.39.0 h1:6bwu9Ooim0yVYA7IZn9demiQk/Ejp0BtTjBWFLymSeY= -modernc.org/sqlite v1.39.0/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= +modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI= +modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= -modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI= +modernc.org/strutil v1.1.2/go.mod h1:OYajnUAcI/MX+XD/Wx7v1bbdvcQSvxgtb0gC+u3d3eg= +modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao= modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= -moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -storj.io/common v0.0.0-20250808122759-804533d519c1 h1:z7ZjU+TlPZ2Lq2S12hT6+Fr7jFsBxPMrPBH4zZpZuUA= -storj.io/common v0.0.0-20250808122759-804533d519c1/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY= -storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro= -storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg= -storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 h1:5MZ0CyMbG6Pi0rRzUWVG6dvpXjbBYEX2oyXuj+tT+sk= -storj.io/eventkit v0.0.0-20250410172343-61f26d3de156/go.mod h1:CpnM6kfZV58dcq3lpbo/IQ4/KoutarnTSHY0GYVwnYw= -storj.io/infectious v0.0.2 h1:rGIdDC/6gNYAStsxsZU79D/MqFjNyJc1tsyyj9sTl7Q= -storj.io/infectious v0.0.2/go.mod h1:QEjKKww28Sjl1x8iDsjBpOM4r1Yp8RsowNcItsZJ1Vs= -storj.io/picobuf v0.0.4 h1:qswHDla+YZ2TovGtMnU4astjvrADSIz84FXRn0qgP6o= -storj.io/picobuf v0.0.4/go.mod h1:hSMxmZc58MS/2qSLy1I0idovlO7+6K47wIGUyRZa6mg= -storj.io/uplink v1.13.1 h1:C8RdW/upALoCyuF16Lod9XGCXEdbJAS+ABQy9JO/0pA= -storj.io/uplink v1.13.1/go.mod h1:x0MQr4UfFsQBwgVWZAtEsLpuwAn6dg7G0Mpne1r516E= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/k8s/charts/artifacthub-repo.yml b/k8s/charts/artifacthub-repo.yml deleted file mode 100644 index 2fac2aed3..000000000 --- a/k8s/charts/artifacthub-repo.yml +++ /dev/null @@ -1,16 +0,0 @@ -# Artifact Hub repository metadata file -# -# Some settings like the verified publisher flag or the ignored packages won't -# be applied until the next time the repository is processed. Please keep in -# mind that the repository won't be processed if it has not changed since the -# last time it was processed. Depending on the repository kind, this is checked -# in a different way. For Helm http based repositories, we consider it has -# changed if the `index.yaml` file changes. For git based repositories, it does -# when the hash of the last commit in the branch you set up changes. This does -# NOT apply to ownership claim operations, which are processed immediately. -# - -repositoryID: 5b2f1fe2-20e5-486e-9746-183484642aa2 -# owners: # (optional, used to claim repository ownership) -# - name: username -# email: email diff --git a/k8s/charts/seaweedfs/Chart.yaml b/k8s/charts/seaweedfs/Chart.yaml deleted file mode 100644 index c83b341b6..000000000 --- a/k8s/charts/seaweedfs/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -description: SeaweedFS -name: seaweedfs -appVersion: "3.98" -# Dev note: Trigger a helm chart release by `git tag -a helm-` -version: 4.0.398 diff --git a/k8s/charts/seaweedfs/README.md b/k8s/charts/seaweedfs/README.md deleted file mode 100644 index 30885aee3..000000000 --- a/k8s/charts/seaweedfs/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# SEAWEEDFS - helm chart (2.x+) - -## Getting Started - -### Add the helm repo - -```bash -helm repo add seaweedfs https://seaweedfs.github.io/seaweedfs/helm -``` - -### Install the helm chart - -```bash -helm install seaweedfs seaweedfs/seaweedfs -``` - -### (Recommended) Provide `values.yaml` - -```bash -helm install --values=values.yaml seaweedfs seaweedfs/seaweedfs -``` - -## Info: -* master/filer/volume are stateful sets with anti-affinity on the hostname, -so your deployment will be spread/HA. -* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) and backup/HA memsql can provide. -* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer with ENV. -* cert config exists and can be enabled, but not been tested, requires cert-manager to be installed. - -## Prerequisites -### Database - -leveldb is the default database, this supports multiple filer replicas that will [sync automatically](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication), with some [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation). - -When the [limitations](https://github.com/seaweedfs/seaweedfs/wiki/Filer-Store-Replication#limitation) apply, or for a large number of filer replicas, an external datastore is recommended. - -Such as MySQL-compatible database, as specified in the `values.yaml` at `filer.extraEnvironmentVars`. -This database should be pre-configured and initialized by running: -```sql -CREATE TABLE IF NOT EXISTS `filemeta` ( - `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', - `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', - `directory` TEXT NOT NULL COMMENT 'full path to parent directory', - `meta` LONGBLOB, - PRIMARY KEY (`dirhash`, `name`) -) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -``` - -Alternative database can also be configured (e.g. leveldb, postgres) following the instructions at `filer.extraEnvironmentVars`. - -### Node Labels -Kubernetes nodes can have labels which help to define which node(Host) will run which pod: - -Here is an example: -* s3/filer/master needs the label **sw-backend=true** -* volume need the label **sw-volume=true** - -to label a node to be able to run all pod types in k8s: -``` -kubectl label node YOUR_NODE_NAME sw-volume=true sw-backend=true -``` - -on production k8s deployment you will want each pod to have a different host, -especially the volume server and the masters, all pods (master/volume/filer) -should have anti-affinity rules to disallow running multiple component pods on the same host. - -If you still want to run multiple pods of the same component (master/volume/filer) on the same host please set/update the corresponding affinity rule in values.yaml to an empty one: - -```affinity: ""``` - -## PVC - storage class ### - -On the volume stateful set added support for k8s PVC, currently example -with the simple local-path-provisioner from Rancher (comes included with k3d / k3s) -https://github.com/rancher/local-path-provisioner - -you can use ANY storage class you like, just update the correct storage-class -for your deployment. - -## current instances config (AIO): - -1 instance for each type (master/filer+s3/volume) - -You can update the replicas count for each node type in values.yaml, -need to add more nodes with the corresponding labels if applicable. - -Most of the configuration are available through values.yaml any pull requests to expand functionality or usability are greatly appreciated. Any pull request must pass [chart-testing](https://github.com/helm/chart-testing). - -## S3 configuration - -To enable an s3 endpoint for your filer with a default install add the following to your values.yaml: - -```yaml -filer: - s3: - enabled: true -``` - -### Enabling Authentication to S3 - -To enable authentication for S3, you have two options: - -- let the helm chart create an admin user as well as a read only user -- provide your own s3 config.json file via an existing Kubernetes Secret - -#### Use the default credentials for S3 - -Example parameters for your values.yaml: - -```yaml -filer: - s3: - enabled: true - enableAuth: true -``` - -#### Provide your own credentials for S3 - -Example parameters for your values.yaml: - -```yaml -filer: - s3: - enabled: true - enableAuth: true - existingConfigSecret: my-s3-secret -``` - -Example existing secret with your s3 config to create an admin user and readonly user, both with credentials: - -```yaml ---- -# Source: seaweedfs/templates/seaweedfs-s3-secret.yaml -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: my-s3-secret - namespace: seaweedfs - labels: - app.kubernetes.io/name: seaweedfs - app.kubernetes.io/component: s3 -stringData: - # this key must be an inline json config file - seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}' -``` - -## Enterprise - -For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition, -which has a self-healing storage format with better data protection. diff --git a/k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json b/k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json deleted file mode 100644 index 30b43f867..000000000 --- a/k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json +++ /dev/null @@ -1,3359 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "limit": 100, - "name": "Annotations & Alerts", - "showIn": 0, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 10423, - "graphTooltip": 0, - "id": 160, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 67, - "panels": [], - "title": "Master", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Whether master is leader or not", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "bool_yes_no", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 1 - }, - "id": 57, - "links": [], - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": {}, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "exemplar": true, - "expr": "sum by (pod) (SeaweedFS_master_is_leader{job=\"seaweedfs-master\", namespace=\"$NAMESPACE\"})", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "refId": "A", - "step": 60 - } - ], - "title": "Raft leader", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Count times leader changed", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 4, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 1 - }, - "id": 68, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.1.2", - "targets": [ - { - "exemplar": true, - "expr": "sum by (pod) (SeaweedFS_master_leader_changes{job=\"seaweedfs-master\", type=~\".+\", namespace=\"$NAMESPACE\"})", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "refId": "A", - "step": 60 - } - ], - "title": "Master leader changes", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Heartbeats received from components", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 4, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 1 - }, - "id": 69, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.1.2", - "targets": [ - { - "exemplar": true, - "expr": "sum by (type) (increase(SeaweedFS_master_received_heartbeats{job=\"seaweedfs-master\", namespace=\"$NAMESPACE\"}[$__rate_interval]))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 60 - } - ], - "title": "Received heartbeats", - "type": "timeseries" - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 0 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "avg" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "message": "", - "name": "Replica Placement Mismatch alert", - "noDataState": "ok", - "notifications": [] - }, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Count replica placement mismatch", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 4, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 7 - }, - "id": 70, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.1.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "sum (SeaweedFS_master_replica_placement_mismatch{job=\"seaweedfs-master\", namespace=\"$NAMESPACE\"} > 0) by (pod)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "refId": "A", - "step": 60 - } - ], - "thresholds": [ - { - "colorMode": "critical", - "op": "gt", - "value": 0, - "visible": true - } - ], - "title": "Replica Placement Mismatch", - "type": "timeseries" - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1, - 1 - ], - "type": "outside_range" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "avg" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "message": "Raft leader count of master-servers not equal to 1", - "name": "Raft leader alert", - "noDataState": "no_data", - "notifications": [] - }, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Total count of raft leaders", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 7 - }, - "id": 71, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.1.2", - "targets": [ - { - "exemplar": true, - "expr": "sum (SeaweedFS_master_is_leader{job=\"seaweedfs-master\", namespace=\"$NAMESPACE\"})", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "Leaders", - "refId": "A", - "step": 60 - } - ], - "thresholds": [ - { - "colorMode": "critical", - "op": "lt", - "value": 1, - "visible": true - }, - { - "colorMode": "critical", - "op": "gt", - "value": 1, - "visible": true - } - ], - "title": "Raft leader count", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Whether cluster locked or not", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "bool_yes_no", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 7 - }, - "id": 74, - "links": [], - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "last" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": {}, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "SeaweedFS_master_admin_lock{job=\"seaweedfs-master\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "Client IP: {{client}}", - "range": true, - "refId": "A", - "step": 60 - } - ], - "title": "Admin lock", - "type": "stat" - }, - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 60, - "panels": [], - "title": "Filer", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 14 - }, - "id": 46, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - } - ], - "title": "Filer Request Duration 90th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 14 - }, - "id": 49, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - }, - { - "expr": "", - "format": "time_series", - "intervalFactor": 2, - "refId": "C" - } - ], - "title": "Filer Request Duration 95th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 14 - }, - "id": 45, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - }, - { - "expr": "", - "format": "time_series", - "intervalFactor": 2, - "refId": "C" - } - ], - "title": "Filer Request Duration 99th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short", - "unitScale": true - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "total" - }, - "properties": [ - { - "id": "custom.lineWidth", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsZero", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsNull", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 2, - "links": [], - "options": { - "legend": { - "calcs": [ - "lastNotNull", - "max" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true, - "width": 250 - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "exemplar": true, - "expr": "sum by (type) (rate(SeaweedFS_filer_request_total{namespace=\"$NAMESPACE\"}[$__rate_interval]))", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 30 - } - ], - "title": "Filer QPS", - "type": "timeseries" - }, - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 61, - "panels": [], - "title": "S3 Gateway", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 29 - }, - "id": 65, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - } - ], - "title": "S3 Request Duration 90th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 29 - }, - "id": 56, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - } - ], - "title": "S3 Request Duration 95th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 29 - }, - "id": 58, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B", - "step": 60 - } - ], - "title": "S3 Request Duration 99th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 36 - }, - "id": 84, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_received_bytes_total{namespace=\"$NAMESPACE\"}[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "title": "S3 Bucket Traffic Received", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 36 - }, - "id": 85, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_sent_bytes_total{namespace=\"$NAMESPACE\"}[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "title": "S3 Bucket Traffic Sent", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 41 - }, - "id": 72, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "average", - "refId": "A", - "step": 60 - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type, pod))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{type}}-{{pod}}", - "refId": "B", - "step": 60 - } - ], - "title": "S3 Request. Duration 99th percentile per instance", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 50 - }, - "id": 73, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type, bucket))", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{type}}-{{bucket}}", - "refId": "B", - "step": 60 - } - ], - "title": "S3 Request Duration 99th percentile per bucket", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short", - "unitScale": true - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "total" - }, - "properties": [ - { - "id": "custom.lineWidth", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsZero", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsNull", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 57 - }, - "id": 55, - "links": [], - "options": { - "legend": { - "calcs": [ - "lastNotNull", - "max" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true, - "width": 250 - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum (rate(SeaweedFS_s3_request_total{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (type)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 30 - } - ], - "title": "S3 API QPS", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Cost in US$", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyUSD", - "unitScale": true - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "total" - }, - "properties": [ - { - "id": "custom.lineWidth", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsZero", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsNull", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 64 - }, - "hideTimeOverride": false, - "id": 59, - "links": [], - "options": { - "legend": { - "calcs": [ - "lastNotNull", - "max" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true, - "width": 250 - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum by (type) (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST', namespace=\"$NAMESPACE\"})*0.000005", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}} requests", - "refId": "A", - "step": 30 - }, - { - "expr": "sum (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST', namespace=\"$NAMESPACE\"})*0.000005", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "All PUT, COPY, POST, LIST", - "refId": "C", - "step": 30 - }, - { - "expr": "sum (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST', namespace=\"$NAMESPACE\"})*0.0000004", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "GET and all other", - "refId": "B" - }, - { - "expr": "sum by (type) (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST', namespace=\"$NAMESPACE\"})*0.0000004", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{type}} requests", - "refId": "D" - } - ], - "timeFrom": "1M", - "title": "S3 API Monthly Cost if on AWS", - "type": "timeseries" - }, - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 71 - }, - "id": 62, - "panels": [], - "title": "Volume Server", - "type": "row" - }, - { - "datasource": { - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 72 - }, - "id": 47, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, exported_instance))", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{exported_instance}}", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "average", - "refId": "C" - } - ], - "title": "Volume Server Request Duration 99th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short", - "unitScale": true - }, - "overrides": [ - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsZero", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - }, - { - "matcher": { - "id": "byValue", - "options": { - "op": "gte", - "reducer": "allIsNull", - "value": 0 - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": true, - "tooltip": true, - "viz": false - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 72 - }, - "id": 40, - "links": [], - "options": { - "legend": { - "calcs": [ - "sum" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(rate(SeaweedFS_volumeServer_request_total{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (type)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 4 - } - ], - "title": "Volume Server QPS", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 79 - }, - "id": 48, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(SeaweedFS_volumeServer_volumes{namespace=\"$NAMESPACE\"}) by (collection, type)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{collection}} {{type}}", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(SeaweedFS_volumeServer_volumes{namespace=\"$NAMESPACE\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Total", - "refId": "B" - } - ], - "title": "Volume Count", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 86 - }, - "id": 50, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$NAMESPACE\"}) by (collection, type)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{collection}} {{type}}", - "refId": "A" - }, - { - "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$NAMESPACE\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Total", - "refId": "B" - } - ], - "title": "Used Disk Space by Collection and Type", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 93 - }, - "id": 51, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$NAMESPACE\"}) by (exported_instance)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{exported_instance}}", - "refId": "A" - } - ], - "title": "Used Disk Space by Host", - "type": "timeseries" - }, - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 93 - }, - "id": 63, - "panels": [], - "title": "Filer Store", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 101 - }, - "id": 12, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (le, type))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B" - } - ], - "title": "Filer Store Request Duration 99th percentile", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 101 - }, - "id": 14, - "links": [], - "options": { - "legend": { - "calcs": [ - "mean", - "lastNotNull" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "expr": "sum(rate(SeaweedFS_filerStore_request_total{namespace=\"$NAMESPACE\"}[$__rate_interval])) by (type)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "B" - } - ], - "title": "Filer Store QPS", - "type": "timeseries" - }, - { - "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 108 - }, - "id": 64, - "panels": [], - "title": "Filer Instances", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 109 - }, - "id": 52, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "exemplar": true, - "expr": "go_memstats_alloc_bytes{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "bytes allocated", - "refId": "B" - }, - { - "exemplar": true, - "expr": "rate(go_memstats_alloc_bytes_total{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}[$__rate_interval])", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "alloc rate", - "refId": "A" - }, - { - "exemplar": true, - "expr": "go_memstats_stack_inuse_bytes{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "stack inuse", - "refId": "C" - }, - { - "exemplar": true, - "expr": "go_memstats_heap_inuse_bytes{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "heap inuse", - "refId": "D" - } - ], - "title": "Filer Go Memory Stats", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 109 - }, - "id": 54, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "exemplar": true, - "expr": "go_gc_duration_seconds{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{quantile}}", - "refId": "B" - } - ], - "title": "Filer Go GC duration quantiles", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none", - "unitScale": true - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 116 - }, - "id": 53, - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "10.3.1", - "targets": [ - { - "exemplar": true, - "expr": "go_goroutines{job=\"seaweedfs-filer\", namespace=\"$NAMESPACE\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{exported_instance}}", - "refId": "B" - } - ], - "title": "Filer Go Routines", - "type": "timeseries" - } - ], - "refresh": "", - "schemaVersion": 39, - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Prometheus", - "value": "PBFA97CFB590B2093" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": { - "selected": false, - "text": "mes", - "value": "mes" - }, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "label_values(SeaweedFS_master_is_leader,namespace)", - "hide": 0, - "includeAll": false, - "label": "Namespace", - "multi": false, - "name": "NAMESPACE", - "options": [], - "query": { - "qryType": 1, - "query": "label_values(SeaweedFS_master_is_leader,namespace)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 2, - "type": "query" - } - ] - }, - "time": { - "from": "now-1d", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "SeaweedFS", - "uid": "a24009d7-cbda-4443-a132-1cc1c4677304", - "version": 1, - "weekStart": "" -} diff --git a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-deployment.yaml b/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-deployment.yaml deleted file mode 100644 index 8700a8a69..000000000 --- a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-deployment.yaml +++ /dev/null @@ -1,446 +0,0 @@ -{{- if .Values.allInOne.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "seaweedfs.name" . }}-all-in-one - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: seaweedfs-all-in-one - {{- if .Values.allInOne.annotations }} - annotations: - {{- toYaml .Values.allInOne.annotations | nindent 4 }} - {{- end }} -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: seaweedfs-all-in-one - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: seaweedfs-all-in-one - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.allInOne.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.allInOne.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.allInOne.restartPolicy }} - {{- if .Values.allInOne.affinity }} - affinity: - {{ tpl .Values.allInOne.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.allInOne.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.allInOne.topologySpreadConstraints . | nindent 8 | trim }} - {{- end }} - {{- if .Values.allInOne.tolerations }} - tolerations: - {{- tpl .Values.allInOne.tolerations . | nindent 8 }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - terminationGracePeriodSeconds: 60 - enableServiceLinks: false - {{- if .Values.allInOne.priorityClassName }} - priorityClassName: {{ .Values.allInOne.priorityClassName | quote }} - {{- end }} - {{- if .Values.allInOne.serviceAccountName }} - serviceAccountName: {{ .Values.allInOne.serviceAccountName | quote }} - {{- end }} - {{- if .Values.allInOne.initContainers }} - initContainers: - {{- tpl .Values.allInOne.initContainers . | nindent 8 }} - {{- end }} - {{- if .Values.allInOne.podSecurityContext.enabled }} - securityContext: - {{- omit .Values.allInOne.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "master.image" . }} - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - {{- /* Determine default cluster alias and the corresponding env var keys to avoid conflicts */}} - {{- $envMerged := merge (.Values.global.extraEnvironmentVars | default dict) (.Values.allInOne.extraEnvironmentVars | default dict) }} - {{- $clusterDefault := default "sw" (index $envMerged "WEED_CLUSTER_DEFAULT") }} - {{- $clusterUpper := upper $clusterDefault }} - {{- $clusterMasterKey := printf "WEED_CLUSTER_%s_MASTER" $clusterUpper }} - {{- $clusterFilerKey := printf "WEED_CLUSTER_%s_FILER" $clusterUpper }} - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - {{- if .Values.allInOne.extraEnvironmentVars }} - {{- range $key, $value := .Values.allInOne.extraEnvironmentVars }} - {{- if and (ne $key $clusterMasterKey) (ne $key $clusterFilerKey) }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - {{- if and (ne $key $clusterMasterKey) (ne $key $clusterFilerKey) }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - # Inject computed cluster endpoints for the default cluster - - name: {{ $clusterMasterKey }} - value: {{ include "seaweedfs.cluster.masterAddress" . | quote }} - - name: {{ $clusterFilerKey }} - value: {{ include "seaweedfs.cluster.filerAddress" . | quote }} - command: - - "/bin/sh" - - "-ec" - - | - /usr/bin/weed \ - -v={{ .Values.global.loggingLevel }} \ - server \ - -dir=/data \ - -master \ - -volume \ - -ip=${POD_IP} \ - -ip.bind=0.0.0.0 \ - {{- if .Values.allInOne.idleTimeout }} - -idleTimeout={{ .Values.allInOne.idleTimeout }} \ - {{- end }} - {{- if .Values.allInOne.dataCenter }} - -dataCenter={{ .Values.allInOne.dataCenter }} \ - {{- end }} - {{- if .Values.allInOne.rack }} - -rack={{ .Values.allInOne.rack }} \ - {{- end }} - {{- if .Values.allInOne.whiteList }} - -whiteList={{ .Values.allInOne.whiteList }} \ - {{- end }} - {{- if .Values.allInOne.disableHttp }} - -disableHttp={{ .Values.allInOne.disableHttp }} \ - {{- end }} - {{- if and (.Values.volume.dataDirs) (index .Values.volume.dataDirs 0 "maxVolumes") }} - -volume.max={{ index .Values.volume.dataDirs 0 "maxVolumes" }} \ - {{- end }} - -master.port={{ .Values.master.port }} \ - {{- if .Values.global.enableReplication }} - -master.defaultReplication={{ .Values.global.replicationPlacement }} \ - {{- else }} - -master.defaultReplication={{ .Values.master.defaultReplication }} \ - {{- end }} - {{- if .Values.master.volumePreallocate }} - -master.volumePreallocate \ - {{- end }} - -master.volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ - {{- if .Values.master.garbageThreshold }} - -master.garbageThreshold={{ .Values.master.garbageThreshold }} \ - {{- end }} - -volume.port={{ .Values.volume.port }} \ - -volume.readMode={{ .Values.volume.readMode }} \ - {{- if .Values.volume.imagesFixOrientation }} - -volume.images.fix.orientation \ - {{- end }} - {{- if .Values.volume.index }} - -volume.index={{ .Values.volume.index }} \ - {{- end }} - {{- if .Values.volume.fileSizeLimitMB }} - -volume.fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \ - {{- end }} - -volume.minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ - -volume.compactionMBps={{ .Values.volume.compactionMBps }} \ - {{- if .Values.allInOne.metricsPort }} - -metricsPort={{ .Values.allInOne.metricsPort }} \ - {{- else if .Values.master.metricsPort }} - -metricsPort={{ .Values.master.metricsPort }} \ - {{- end }} - -filer \ - -filer.port={{ .Values.filer.port }} \ - {{- if .Values.filer.disableDirListing }} - -filer.disableDirListing \ - {{- end }} - -filer.dirListLimit={{ .Values.filer.dirListLimit }} \ - {{- if .Values.global.enableReplication }} - -filer.defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \ - {{- else }} - -filer.defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \ - {{- end }} - {{- if .Values.filer.maxMB }} - -filer.maxMB={{ .Values.filer.maxMB }} \ - {{- end }} - {{- if .Values.filer.encryptVolumeData }} - -filer.encryptVolumeData \ - {{- end }} - {{- if .Values.filer.filerGroup}} - -filer.filerGroup={{ .Values.filer.filerGroup}} \ - {{- end }} - {{- if .Values.filer.rack }} - -filer.rack={{ .Values.filer.rack }} \ - {{- end }} - {{- if .Values.filer.dataCenter }} - -filer.dataCenter={{ .Values.filer.dataCenter }} \ - {{- end }} - {{- if .Values.allInOne.s3.enabled }} - -s3 \ - -s3.port={{ .Values.s3.port }} \ - {{- if .Values.s3.domainName }} - -s3.domainName={{ .Values.s3.domainName }} \ - {{- end }} - {{- if .Values.global.enableSecurity }} - {{- if .Values.s3.httpsPort }} - -s3.port.https={{ .Values.s3.httpsPort }} \ - {{- end }} - -s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \ - -s3.key.file=/usr/local/share/ca-certificates/client/tls.key \ - {{- end }} - {{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }} - -s3.allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \ - {{- end }} - {{- if .Values.s3.enableAuth }} - -s3.config=/etc/sw/s3/seaweedfs_s3_config \ - {{- end }} - {{- if .Values.s3.auditLogConfig }} - -s3.auditLogConfig=/etc/sw/s3/s3_auditLogConfig.json \ - {{- end }} - {{- end }} - {{- if .Values.allInOne.sftp.enabled }} - -sftp \ - -sftp.port={{ .Values.sftp.port }} \ - {{- if .Values.sftp.sshPrivateKey }} - -sftp.sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \ - {{- end }} - {{- if .Values.sftp.hostKeysFolder }} - -sftp.hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \ - {{- end }} - {{- if .Values.sftp.authMethods }} - -sftp.authMethods={{ .Values.sftp.authMethods }} \ - {{- end }} - {{- if .Values.sftp.maxAuthTries }} - -sftp.maxAuthTries={{ .Values.sftp.maxAuthTries }} \ - {{- end }} - {{- if .Values.sftp.bannerMessage }} - -sftp.bannerMessage="{{ .Values.sftp.bannerMessage }}" \ - {{- end }} - {{- if .Values.sftp.loginGraceTime }} - -sftp.loginGraceTime={{ .Values.sftp.loginGraceTime }} \ - {{- end }} - {{- if .Values.sftp.clientAliveInterval }} - -sftp.clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \ - {{- end }} - {{- if .Values.sftp.clientAliveCountMax }} - -sftp.clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \ - {{- end }} - -sftp.userStoreFile=/etc/sw/sftp/seaweedfs_sftp_config \ - {{- end }} - - volumeMounts: - - name: data - mountPath: /data - {{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }} - - name: config-s3-users - mountPath: /etc/sw/s3 - readOnly: true - {{- end }} - {{- if .Values.allInOne.sftp.enabled }} - - name: config-ssh - mountPath: /etc/sw/ssh - readOnly: true - - mountPath: /etc/sw/sftp - name: config-users - readOnly: true - {{- end }} - {{- if .Values.filer.notificationConfig }} - - name: notification-config - mountPath: /etc/seaweedfs/notification.toml - subPath: notification.toml - readOnly: true - {{- end }} - - name: master-config - mountPath: /etc/seaweedfs/master.toml - subPath: master.toml - readOnly: true - {{- if .Values.global.enableSecurity }} - - name: security-config - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - readOnly: true - - name: ca-cert - mountPath: /usr/local/share/ca-certificates/ca/ - readOnly: true - - name: master-cert - mountPath: /usr/local/share/ca-certificates/master/ - readOnly: true - - name: volume-cert - mountPath: /usr/local/share/ca-certificates/volume/ - readOnly: true - - name: filer-cert - mountPath: /usr/local/share/ca-certificates/filer/ - readOnly: true - - name: client-cert - mountPath: /usr/local/share/ca-certificates/client/ - readOnly: true - {{- end }} - {{ tpl .Values.allInOne.extraVolumeMounts . | nindent 12 }} - ports: - - containerPort: {{ .Values.master.port }} - name: swfs-mas - - containerPort: {{ .Values.master.grpcPort }} - name: swfs-mas-grpc - - containerPort: {{ .Values.volume.port }} - name: swfs-vol - - containerPort: {{ .Values.volume.grpcPort }} - name: swfs-vol-grpc - - containerPort: {{ .Values.filer.port }} - name: swfs-fil - - containerPort: {{ .Values.filer.grpcPort }} - name: swfs-fil-grpc - {{- if .Values.allInOne.s3.enabled }} - - containerPort: {{ .Values.s3.port }} - name: swfs-s3 - {{- if .Values.s3.httpsPort }} - - containerPort: {{ .Values.s3.httpsPort }} - name: swfs-s3-tls - {{- end }} - {{- end }} - {{- if .Values.allInOne.sftp.enabled }} - - containerPort: {{ .Values.sftp.port }} - name: swfs-sftp - {{- end }} - {{- if .Values.allInOne.metricsPort }} - - containerPort: {{ .Values.allInOne.metricsPort }} - name: server-metrics - {{- end }} - {{- if .Values.allInOne.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.allInOne.readinessProbe.httpGet.path }} - port: {{ .Values.master.port }} - scheme: {{ .Values.allInOne.readinessProbe.scheme }} - initialDelaySeconds: {{ .Values.allInOne.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.allInOne.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.allInOne.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.allInOne.readinessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.allInOne.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.allInOne.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.allInOne.livenessProbe.httpGet.path }} - port: {{ .Values.master.port }} - scheme: {{ .Values.allInOne.livenessProbe.scheme }} - initialDelaySeconds: {{ .Values.allInOne.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.allInOne.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.allInOne.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.allInOne.livenessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.allInOne.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.allInOne.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.allInOne.containerSecurityContext.enabled }} - securityContext: - {{- omit .Values.allInOne.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.allInOne.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.allInOne.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - - name: data - {{- if eq .Values.allInOne.data.type "hostPath" }} - hostPath: - path: {{ .Values.allInOne.data.hostPathPrefix }}/seaweedfs-all-in-one-data/ - type: DirectoryOrCreate - {{- else if eq .Values.allInOne.data.type "persistentVolumeClaim" }} - persistentVolumeClaim: - claimName: {{ .Values.allInOne.data.claimName }} - {{- else if eq .Values.allInOne.data.type "emptyDir" }} - emptyDir: {} - {{- end }} - {{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }} - - name: config-s3-users - secret: - defaultMode: 420 - secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }} - {{- end }} - {{- if .Values.allInOne.sftp.enabled }} - - name: config-ssh - secret: - defaultMode: 420 - secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) .Values.sftp.existingSshConfigSecret }} - - name: config-users - secret: - defaultMode: 420 - secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) .Values.sftp.existingConfigSecret }} - {{- end }} - {{- if .Values.filer.notificationConfig }} - - name: notification-config - configMap: - name: {{ template "seaweedfs.name" . }}-notification-config - {{- end }} - - name: master-config - configMap: - name: {{ template "seaweedfs.name" . }}-master-config - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.allInOne.extraVolumes . | nindent 8 }} - {{- if .Values.allInOne.nodeSelector }} - nodeSelector: - {{ tpl .Values.allInOne.nodeSelector . | nindent 8 }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-pvc.yaml b/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-pvc.yaml deleted file mode 100644 index 49ac20148..000000000 --- a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-pvc.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.allInOne.enabled (eq .Values.allInOne.data.type "persistentVolumeClaim") }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ .Values.allInOne.data.claimName }} - labels: - app.kubernetes.io/component: seaweedfs-all-in-one - {{- if .Values.allInOne.annotations }} - annotations: - {{- toYaml .Values.allInOne.annotations | nindent 4 }} - {{- end }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.allInOne.data.size }} - {{- if .Values.allInOne.data.storageClass }} - storageClassName: {{ .Values.allInOne.data.storageClass }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-service.yml b/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-service.yml deleted file mode 100644 index 14076a9c3..000000000 --- a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-service.yml +++ /dev/null @@ -1,83 +0,0 @@ -{{- if .Values.allInOne.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "seaweedfs.name" . }}-all-in-one - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: seaweedfs-all-in-one - {{- if .Values.allInOne.service.annotations }} - annotations: - {{- toYaml .Values.allInOne.service.annotations | nindent 4 }} - {{- end }} -spec: - internalTrafficPolicy: {{ .Values.allInOne.service.internalTrafficPolicy | default "Cluster" }} - ports: - # Master ports - - name: "swfs-master" - port: {{ .Values.master.port }} - targetPort: {{ .Values.master.port }} - protocol: TCP - - name: "swfs-master-grpc" - port: {{ .Values.master.grpcPort }} - targetPort: {{ .Values.master.grpcPort }} - protocol: TCP - - # Volume ports - - name: "swfs-volume" - port: {{ .Values.volume.port }} - targetPort: {{ .Values.volume.port }} - protocol: TCP - - name: "swfs-volume-grpc" - port: {{ .Values.volume.grpcPort }} - targetPort: {{ .Values.volume.grpcPort }} - protocol: TCP - - # Filer ports - - name: "swfs-filer" - port: {{ .Values.filer.port }} - targetPort: {{ .Values.filer.port }} - protocol: TCP - - name: "swfs-filer-grpc" - port: {{ .Values.filer.grpcPort }} - targetPort: {{ .Values.filer.grpcPort }} - protocol: TCP - - # S3 ports (if enabled) - {{- if .Values.allInOne.s3.enabled }} - - name: "swfs-s3" - port: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} - targetPort: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} - protocol: TCP - {{- if and .Values.allInOne.s3.enabled .Values.s3.httpsPort }} - - name: "swfs-s3-tls" - port: {{ .Values.s3.httpsPort }} - targetPort: {{ .Values.s3.httpsPort }} - protocol: TCP - {{- end }} - {{- end }} - - # SFTP ports (if enabled) - {{- if .Values.allInOne.sftp.enabled }} - - name: "swfs-sftp" - port: {{ .Values.sftp.port }} - targetPort: {{ .Values.sftp.port }} - protocol: TCP - {{- end }} - - # Server metrics port (single metrics endpoint for all services) - {{- if .Values.allInOne.metricsPort }} - - name: "server-metrics" - port: {{ .Values.allInOne.metricsPort }} - targetPort: {{ .Values.allInOne.metricsPort }} - protocol: TCP - {{- end }} - - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: seaweedfs-all-in-one -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-servicemonitor.yaml deleted file mode 100644 index 0f9ce392c..000000000 --- a/k8s/charts/seaweedfs/templates/all-in-one/all-in-one-servicemonitor.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if .Values.allInOne.enabled }} -{{- if .Values.global.monitoring.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" . }}-all-in-one - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: all-in-one - {{- with .Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - {{- if .Values.allInOne.metricsPort }} - - interval: 30s - port: server-metrics - scrapeTimeout: 5s - {{- end }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: seaweedfs-all-in-one -{{- end }} -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/cert/ca-cert.yaml b/k8s/charts/seaweedfs/templates/cert/ca-cert.yaml deleted file mode 100644 index 0fd6615e1..000000000 --- a/k8s/charts/seaweedfs/templates/cert/ca-cert.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} -kind: Certificate -metadata: - name: {{ template "seaweedfs.name" . }}-ca-cert - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - commonName: "{{ template "seaweedfs.name" . }}-root-ca" - isCA: true - issuerRef: - name: {{ template "seaweedfs.name" . }}-issuer - kind: Issuer -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cert/cert-caissuer.yaml b/k8s/charts/seaweedfs/templates/cert/cert-caissuer.yaml deleted file mode 100644 index 72de126e1..000000000 --- a/k8s/charts/seaweedfs/templates/cert/cert-caissuer.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} -kind: Issuer -metadata: - name: {{ template "seaweedfs.name" . }}-ca-issuer - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - ca: - secretName: {{ template "seaweedfs.name" . }}-ca-cert -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cert/cert-issuer.yaml b/k8s/charts/seaweedfs/templates/cert/cert-issuer.yaml deleted file mode 100644 index 9f243d07c..000000000 --- a/k8s/charts/seaweedfs/templates/cert/cert-issuer.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} -kind: Issuer -metadata: - name: {{ template "seaweedfs.name" . }}-issuer - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - selfSigned: {} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cosi/cosi-bucket-class.yaml b/k8s/charts/seaweedfs/templates/cosi/cosi-bucket-class.yaml deleted file mode 100644 index e5503abd8..000000000 --- a/k8s/charts/seaweedfs/templates/cosi/cosi-bucket-class.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.cosi.enabled .Values.cosi.bucketClassName }} ---- -kind: BucketClass -apiVersion: objectstorage.k8s.io/v1alpha1 -metadata: - name: {{ .Values.cosi.bucketClassName }} -driverName: {{ .Values.cosi.driverName }} -deletionPolicy: Delete ---- -kind: BucketAccessClass -apiVersion: objectstorage.k8s.io/v1alpha1 -metadata: - name: {{ .Values.cosi.bucketClassName }} -driverName: {{ .Values.cosi.driverName }} -authenticationType: KEY -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cosi/cosi-cluster-role.yaml b/k8s/charts/seaweedfs/templates/cosi/cosi-cluster-role.yaml deleted file mode 100644 index 75d3ec32c..000000000 --- a/k8s/charts/seaweedfs/templates/cosi/cosi-cluster-role.yaml +++ /dev/null @@ -1,69 +0,0 @@ -{{- if .Values.cosi.enabled }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -rules: -- apiGroups: ["objectstorage.k8s.io"] - resources: - - "buckets" - - "bucketaccesses" - - "bucketclaims" - - "bucketaccessclasses" - - "buckets/status" - - "bucketaccesses/status" - - "bucketclaims/status" - - "bucketaccessclasses/status" - verbs: - - "get" - - "list" - - "watch" - - "update" - - "create" - - "delete" -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: - - "get" - - "watch" - - "list" - - "delete" - - "update" - - "create" -- apiGroups: [""] - resources: - - "secrets" - - "events" - verbs: - - "get" - - "list" - - "watch" - - "update" - - "create" - - "delete" - - "patch" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cosi/cosi-deployment.yaml b/k8s/charts/seaweedfs/templates/cosi/cosi-deployment.yaml deleted file mode 100644 index 813af850d..000000000 --- a/k8s/charts/seaweedfs/templates/cosi/cosi-deployment.yaml +++ /dev/null @@ -1,216 +0,0 @@ -{{- if .Values.cosi.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "seaweedfs.name" . }}-objectstorage-provisioner - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: objectstorage-provisioner -spec: - replicas: {{ .Values.cosi.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: objectstorage-provisioner - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: objectstorage-provisioner - {{ with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.cosi.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{ with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.cosi.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.cosi.restartPolicy }} - {{- if .Values.cosi.affinity }} - affinity: - {{ tpl .Values.cosi.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.cosi.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.cosi.topologySpreadConstraint . | nindent 8 | trim }} - {{- end }} - {{- if .Values.cosi.tolerations }} - tolerations: - {{ tpl .Values.cosi.tolerations . | nindent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - terminationGracePeriodSeconds: 10 - {{- if .Values.cosi.priorityClassName }} - priorityClassName: {{ .Values.cosi.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - serviceAccountName: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - {{- if .Values.cosi.initContainers }} - initContainers: - {{ tpl .Values.cosi.initContainers . | nindent 8 | trim }} - {{- end }} - {{- if .Values.cosi.podSecurityContext.enabled }} - securityContext: {{- omit .Values.cosi.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs-cosi-driver - image: "{{ .Values.cosi.image }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - - name: DRIVERNAME - value: "{{ .Values.cosi.driverName }}" - - name: ENDPOINT - {{- if .Values.cosi.endpoint }} - value: "{{ .Values.cosi.endpoint }}" - {{- else if .Values.s3.ingress.enabled }} - value: "{{ printf "https://%s" .Values.s3.ingress.host }}" - {{- else if .Values.s3.enabled }} - value: "{{ printf "https://%s-s3.%s.svc" (include "seaweedfs.name" .) .Release.Namespace }}" - {{- else }} - value: "{{ printf "https://%s-filer.%s.svc" (include "seaweedfs.name" .) .Release.Namespace }}" - {{- end }} - {{- with .Values.cosi.region }} - - name: REGION - value: "{{ . }}" - {{- end }} - - name: SEAWEEDFS_FILER - value: "{{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.grpcPort }}" - {{- if .Values.global.enableSecurity }} - - name: WEED_GRPC_CLIENT_KEY - value: /usr/local/share/ca-certificates/client/tls.key - - name: WEED_GRPC_CLIENT_CERT - value: /usr/local/share/ca-certificates/client/tls.crt - - name: WEED_GRPC_CA - value: /usr/local/share/ca-certificates/client/ca.crt - {{- end }} - {{- if .Values.cosi.extraEnvironmentVars }} - {{- range $key, $value := .Values.cosi.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - volumeMounts: - - mountPath: /var/lib/cosi - name: socket - {{- if .Values.cosi.enableAuth }} - - mountPath: /etc/sw - name: config-users - readOnly: true - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl .Values.cosi.extraVolumeMounts . | nindent 12 | trim }} - {{- with .Values.cosi.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - - name: seaweedfs-cosi-sidecar - image: "{{ .Values.cosi.sidecar.image }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - args: - - {{ printf "--v=%s" (default "5" .Values.cosi.sidecar.logLevel) }} - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - mountPath: /var/lib/cosi - name: socket - {{- with .Values.cosi.sidecar.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.cosi.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.cosi.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.cosi.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.cosi.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - - name: socket - emptyDir: {} - {{- if .Values.cosi.enableAuth }} - - name: config-users - secret: - defaultMode: 420 - {{- if .Values.cosi.existingConfigSecret }} - secretName: {{ .Values.cosi.existingConfigSecret }} - {{- else }} - secretName: seaweedfs-s3-secret - {{- end }} - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.cosi.extraVolumes . | indent 8 | trim }} - {{- if .Values.cosi.nodeSelector }} - nodeSelector: - {{ tpl .Values.cosi.nodeSelector . | indent 8 | trim }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cosi/cosi-service-account.yaml b/k8s/charts/seaweedfs/templates/cosi/cosi-service-account.yaml deleted file mode 100644 index 78227fdeb..000000000 --- a/k8s/charts/seaweedfs/templates/cosi/cosi-service-account.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.cosi.enabled }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.global.serviceAccountName }}-objectstorage-provisioner - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -automountServiceAccountToken: {{ .Values.global.automountServiceAccountToken }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer/filer-ingress.yaml b/k8s/charts/seaweedfs/templates/filer/filer-ingress.yaml deleted file mode 100644 index 9ce15ae90..000000000 --- a/k8s/charts/seaweedfs/templates/filer/filer-ingress.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.filer.enabled }} -{{- if .Values.filer.ingress.enabled }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: ingress-{{ template "seaweedfs.name" . }}-filer - namespace: {{ .Release.Namespace }} - {{- with .Values.filer.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer -spec: - ingressClassName: {{ .Values.filer.ingress.className | quote }} - tls: - {{ .Values.filer.ingress.tls | default list | toYaml | nindent 6}} - rules: - - http: - paths: - - path: {{ .Values.filer.ingress.path | quote }} - pathType: {{ .Values.filer.ingress.pathType | quote }} - backend: -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} - service: - name: {{ template "seaweedfs.name" . }}-filer - port: - number: {{ .Values.filer.port }} - #name: -{{- else }} - serviceName: {{ template "seaweedfs.name" . }}-filer - servicePort: {{ .Values.filer.port }} -{{- end }} -{{- if .Values.filer.ingress.host }} - host: {{ .Values.filer.ingress.host }} -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer/filer-service.yaml b/k8s/charts/seaweedfs/templates/filer/filer-service.yaml deleted file mode 100644 index 67436972e..000000000 --- a/k8s/charts/seaweedfs/templates/filer/filer-service.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if .Values.filer.enabled }} -apiVersion: v1 -kind: Service -metadata: - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - name: {{ template "seaweedfs.name" . }}-filer - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer -{{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} -{{- end }} -spec: - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: "swfs-filer" - port: {{ .Values.filer.port }} - targetPort: {{ .Values.filer.port }} - protocol: TCP - - name: "swfs-filer-grpc" - port: {{ .Values.filer.grpcPort }} - targetPort: {{ .Values.filer.grpcPort }} - protocol: TCP - {{- if .Values.filer.s3.enabled }} - - name: "swfs-s3" - port: {{ .Values.filer.s3.port }} - targetPort: {{ .Values.filer.s3.port }} - protocol: TCP - {{- if .Values.filer.s3.httpsPort }} - - name: "swfs-s3-tls" - port: {{ .Values.filer.s3.httpsPort }} - targetPort: {{ .Values.filer.s3.httpsPort }} - protocol: TCP - {{- end }} - {{- end }} - {{- if .Values.filer.metricsPort }} - - name: "metrics" - port: {{ .Values.filer.metricsPort }} - targetPort: {{ .Values.filer.metricsPort }} - protocol: TCP - {{- end }} - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: filer -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer/filer-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/filer/filer-servicemonitor.yaml deleted file mode 100644 index e26c04b1f..000000000 --- a/k8s/charts/seaweedfs/templates/filer/filer-servicemonitor.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.filer.enabled }} -{{- if .Values.filer.metricsPort }} -{{- if .Values.global.monitoring.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" . }}-filer - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer - {{- with .Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} -{{- end }} -spec: - endpoints: - - interval: 30s - port: metrics - scrapeTimeout: 5s - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: filer -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer/filer-statefulset.yaml b/k8s/charts/seaweedfs/templates/filer/filer-statefulset.yaml deleted file mode 100644 index 5c1a0950b..000000000 --- a/k8s/charts/seaweedfs/templates/filer/filer-statefulset.yaml +++ /dev/null @@ -1,442 +0,0 @@ -{{- if .Values.filer.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "seaweedfs.name" . }}-filer - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer -{{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} -{{- end }} -spec: - serviceName: {{ template "seaweedfs.name" . }}-filer - podManagementPolicy: {{ .Values.filer.podManagementPolicy }} - replicas: {{ .Values.filer.replicas }} - {{- if (gt (int .Values.filer.updatePartition) 0) }} - updateStrategy: - type: RollingUpdate - rollingUpdate: - partition: {{ .Values.filer.updatePartition }} - {{- end }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.filer.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.filer.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.filer.s3.existingConfigSecret }} - {{- $configSecret := (lookup "v1" "Secret" .Release.Namespace .Values.filer.s3.existingConfigSecret) | default dict }} - checksum/s3config: {{ $configSecret | toYaml | sha256sum }} - {{- else }} - checksum/s3config: {{ include (print .Template.BasePath "/s3/s3-secret.yaml") . | sha256sum }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} - {{- if .Values.filer.affinity }} - affinity: - {{ tpl .Values.filer.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.filer.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.filer.topologySpreadConstraints . | nindent 8 | trim }} - {{- end }} - {{- if .Values.filer.tolerations }} - tolerations: - {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - serviceAccountName: {{ .Values.filer.serviceAccountName | default .Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration - terminationGracePeriodSeconds: 60 - {{- if .Values.filer.priorityClassName }} - priorityClassName: {{ .Values.filer.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - {{- if .Values.filer.initContainers }} - initContainers: - {{ tpl .Values.filer.initContainers . | nindent 8 | trim }} - {{- end }} - {{- if .Values.filer.podSecurityContext.enabled }} - securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "filer.image" . }} - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: WEED_MYSQL_USERNAME - valueFrom: - secretKeyRef: - name: secret-seaweedfs-db - key: user - optional: true - - name: WEED_MYSQL_PASSWORD - valueFrom: - secretKeyRef: - name: secret-seaweedfs-db - key: password - optional: true - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - {{- if .Values.filer.extraEnvironmentVars }} - {{- range $key, $value := .Values.filer.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.filer.secretExtraEnvironmentVars }} - {{- range $key, $value := .Values.filer.secretExtraEnvironmentVars }} - - name: {{ $key }} - valueFrom: {{ toYaml $value | nindent 16 }} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - exec /usr/bin/weed \ - {{- if or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir") }} - -logdir=/logs \ - {{- else }} - -logtostderr=true \ - {{- end }} - {{- if .Values.filer.loggingOverrideLevel }} - -v={{ .Values.filer.loggingOverrideLevel }} \ - {{- else }} - -v={{ .Values.global.loggingLevel }} \ - {{- end }} - filer \ - -port={{ .Values.filer.port }} \ - {{- if .Values.filer.metricsPort }} - -metricsPort={{ .Values.filer.metricsPort }} \ - {{- end }} - {{- if .Values.filer.metricsIp }} - -metricsIp={{ .Values.filer.metricsIp }} \ - {{- end }} - {{- if .Values.filer.redirectOnRead }} - -redirectOnRead \ - {{- end }} - {{- if .Values.filer.disableHttp }} - -disableHttp \ - {{- end }} - {{- if .Values.filer.disableDirListing }} - -disableDirListing \ - {{- end }} - -dirListLimit={{ .Values.filer.dirListLimit }} \ - {{- if .Values.global.enableReplication }} - -defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \ - {{- else }} - -defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \ - {{- end }} - {{- if .Values.filer.disableDirListing }} - -disableDirListing \ - {{- end }} - {{- if .Values.filer.maxMB }} - -maxMB={{ .Values.filer.maxMB }} \ - {{- end }} - {{- if .Values.filer.encryptVolumeData }} - -encryptVolumeData \ - {{- end }} - -ip=${POD_IP} \ - -ip.bind={{ .Values.filer.ipBind }} \ - {{- if .Values.filer.filerGroup}} - -filerGroup={{ .Values.filer.filerGroup}} \ - {{- end }} - {{- if .Values.filer.rack }} - -rack={{ .Values.filer.rack }} \ - {{- end }} - {{- if .Values.filer.dataCenter }} - -dataCenter={{ .Values.filer.dataCenter }} \ - {{- end }} - {{- if .Values.filer.s3.enabled }} - -s3 \ - -s3.port={{ .Values.filer.s3.port }} \ - {{- if .Values.filer.s3.domainName }} - -s3.domainName={{ .Values.filer.s3.domainName }} \ - {{- end }} - {{- if .Values.global.enableSecurity }} - {{- if .Values.filer.s3.httpsPort }} - -s3.port.https={{ .Values.filer.s3.httpsPort }} \ - {{- end }} - -s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \ - -s3.key.file=/usr/local/share/ca-certificates/client/tls.key \ - {{- end }} - {{- if eq (typeOf .Values.filer.s3.allowEmptyFolder) "bool" }} - -s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \ - {{- end }} - {{- if .Values.filer.s3.enableAuth }} - -s3.config=/etc/sw/seaweedfs_s3_config \ - {{- end }} - {{- if .Values.filer.s3.auditLogConfig }} - -s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \ - {{- end }} - {{- end }} - -master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \ - {{- range .Values.filer.extraArgs }} - {{ . }} \ - {{- end }} - volumeMounts: - {{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }} - - name: seaweedfs-filer-log-volume - mountPath: "/logs/" - {{- end }} - {{- if .Values.filer.s3.enableAuth }} - - name: config-users - mountPath: /etc/sw - readOnly: true - {{- end }} - {{- if (or .Values.filer.enablePVC (or (eq .Values.filer.data.type "hostPath") (eq .Values.filer.data.type "persistentVolumeClaim") (eq .Values.filer.data.type "emptyDir"))) }} - - name: data-filer - mountPath: /data - {{- end }} - {{- if .Values.filer.notificationConfig }} - - name: notification-config - readOnly: true - mountPath: /etc/seaweedfs/notification.toml - subPath: notification.toml - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} - ports: - - containerPort: {{ .Values.filer.port }} - name: swfs-filer - - containerPort: {{ .Values.filer.metricsPort }} - name: metrics - - containerPort: {{ .Values.filer.grpcPort }} - #name: swfs-filer-grpc - {{- if .Values.filer.s3.enabled }} - - containerPort: {{ .Values.filer.s3.port }} - name: swfs-s3 - {{- if .Values.filer.s3.httpsPort }} - - containerPort: {{ .Values.filer.s3.httpsPort }} - name: swfs-s3-tls - {{- end }} - {{- end }} - {{- if .Values.filer.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.filer.readinessProbe.httpGet.path }} - port: {{ .Values.filer.port }} - scheme: {{ .Values.filer.readinessProbe.scheme }} - initialDelaySeconds: {{ .Values.filer.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.filer.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.filer.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.filer.readinessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.filer.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.filer.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.filer.livenessProbe.httpGet.path }} - port: {{ .Values.filer.port }} - scheme: {{ .Values.filer.livenessProbe.scheme }} - initialDelaySeconds: {{ .Values.filer.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.filer.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.filer.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.filer.livenessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.filer.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.filer.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.filer.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.filer.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.filer.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - {{- if eq .Values.filer.logs.type "hostPath" }} - - name: seaweedfs-filer-log-volume - hostPath: - path: {{ .Values.filer.logs.hostPathPrefix }}/logs/seaweedfs/filer - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.filer.logs.type "existingClaim" }} - - name: seaweedfs-filer-log-volume - persistentVolumeClaim: - claimName: {{ .Values.filer.logs.claimName }} - {{- end }} - {{- if eq .Values.filer.logs.type "emptyDir" }} - - name: seaweedfs-filer-log-volume - emptyDir: {} - {{- end }} - {{- if eq .Values.filer.data.type "hostPath" }} - - name: data-filer - hostPath: - path: {{ .Values.filer.data.hostPathPrefix }}/filer_store - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.filer.data.type "existingClaim" }} - - name: data-filer - persistentVolumeClaim: - claimName: {{ .Values.filer.data.claimName }} - {{- end }} - {{- if eq .Values.filer.data.type "emptyDir" }} - - name: data-filer - emptyDir: {} - {{- end }} - - name: db-schema-config-volume - configMap: - name: seaweedfs-db-init-config - {{- if and .Values.filer.s3.enabled .Values.filer.s3.enableAuth }} - - name: config-users - secret: - defaultMode: 420 - {{- if .Values.filer.s3.existingConfigSecret }} - secretName: {{ .Values.filer.s3.existingConfigSecret }} - {{- else }} - secretName: seaweedfs-s3-secret - {{- end }} - {{- end }} - {{- if .Values.filer.notificationConfig }} - - name: notification-config - configMap: - name: {{ template "seaweedfs.name" . }}-notification-config - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} - {{- if .Values.filer.nodeSelector }} - nodeSelector: - {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} - {{- end }} - {{- if and (.Values.filer.enablePVC) (eq .Values.filer.data.type "persistentVolumeClaim") }} - # DEPRECATION: Deprecate in favor of filer.data section below - volumeClaimTemplates: - - metadata: - name: data-filer - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ .Values.filer.storage }} - {{- if .Values.filer.storageClass }} - storageClassName: {{ .Values.filer.storageClass }} - {{- end }} - {{- end }} - {{- $pvc_exists := include "filer.pvc_exists" . -}} - {{- if $pvc_exists }} - volumeClaimTemplates: - {{- if eq .Values.filer.data.type "persistentVolumeClaim" }} - - metadata: - name: data-filer - {{- with .Values.filer.data.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.filer.data.storageClass }} - resources: - requests: - storage: {{ .Values.filer.data.size }} - {{- end }} - {{- if eq .Values.filer.logs.type "persistentVolumeClaim" }} - - metadata: - name: seaweedfs-filer-log-volume - {{- with .Values.filer.logs.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.filer.logs.storageClass }} - resources: - requests: - storage: {{ .Values.filer.logs.size }} - {{- end }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master/master-configmap.yaml b/k8s/charts/seaweedfs/templates/master/master-configmap.yaml deleted file mode 100644 index b3d7fe7d9..000000000 --- a/k8s/charts/seaweedfs/templates/master/master-configmap.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if or .Values.master.enabled .Values.allInOne.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "seaweedfs.name" . }}-master-config - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Values.master.annotations }} - annotations: - {{- toYaml .Values.master.annotations | nindent 4 }} -{{- end }} -data: - master.toml: |- - {{ .Values.master.config | nindent 4 }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master/master-ingress.yaml b/k8s/charts/seaweedfs/templates/master/master-ingress.yaml deleted file mode 100644 index ac1cb3392..000000000 --- a/k8s/charts/seaweedfs/templates/master/master-ingress.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.master.enabled }} -{{- if .Values.master.ingress.enabled }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: ingress-{{ template "seaweedfs.name" . }}-master - namespace: {{ .Release.Namespace }} - {{- with .Values.master.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master -spec: - ingressClassName: {{ .Values.master.ingress.className | quote }} - tls: - {{ .Values.master.ingress.tls | default list | toYaml | nindent 6 }} - rules: - - http: - paths: - - path: {{ .Values.master.ingress.path | quote }} - pathType: {{ .Values.master.ingress.pathType | quote }} - backend: -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} - service: - name: {{ template "seaweedfs.name" . }}-master - port: - number: {{ .Values.master.port }} - #name: -{{- else }} - serviceName: {{ template "seaweedfs.name" . }}-master - servicePort: {{ .Values.master.port }} -{{- end }} -{{- if .Values.filer.ingress.host }} - host: {{ .Values.master.ingress.host }} -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master/master-service.yaml b/k8s/charts/seaweedfs/templates/master/master-service.yaml deleted file mode 100644 index 0086b84c1..000000000 --- a/k8s/charts/seaweedfs/templates/master/master-service.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.master.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "seaweedfs.name" . }}-master - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: master - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -{{- if .Values.master.annotations }} - {{- toYaml .Values.master.annotations | nindent 4 }} -{{- end }} -spec: - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: "swfs-master" - port: {{ .Values.master.port }} - targetPort: {{ .Values.master.port }} - protocol: TCP - - name: "swfs-master-grpc" - port: {{ .Values.master.grpcPort }} - targetPort: {{ .Values.master.grpcPort }} - protocol: TCP - {{- if .Values.master.metricsPort }} - - name: "metrics" - port: {{ .Values.master.metricsPort }} - targetPort: {{ .Values.master.metricsPort }} - protocol: TCP - {{- end }} - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: master -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master/master-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/master/master-servicemonitor.yaml deleted file mode 100644 index 7804e84ae..000000000 --- a/k8s/charts/seaweedfs/templates/master/master-servicemonitor.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.master.enabled }} -{{- if .Values.master.metricsPort }} -{{- if .Values.global.monitoring.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" . }}-master - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master - {{- with .Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if .Values.master.annotations }} - annotations: - {{- toYaml .Values.master.annotations | nindent 4 }} -{{- end }} -spec: - endpoints: - - interval: 30s - port: metrics - scrapeTimeout: 5s - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: master -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/master/master-statefulset.yaml b/k8s/charts/seaweedfs/templates/master/master-statefulset.yaml deleted file mode 100644 index 01387fc91..000000000 --- a/k8s/charts/seaweedfs/templates/master/master-statefulset.yaml +++ /dev/null @@ -1,358 +0,0 @@ -{{- if .Values.master.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "seaweedfs.name" . }}-master - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master -{{- if .Values.master.annotations }} - annotations: - {{- toYaml .Values.master.annotations | nindent 4 }} -{{- end }} -spec: - serviceName: {{ template "seaweedfs.name" . }}-master - podManagementPolicy: {{ .Values.master.podManagementPolicy }} - replicas: {{ .Values.master.replicas }} - {{- if (gt (int .Values.master.updatePartition) 0) }} - updateStrategy: - type: RollingUpdate - rollingUpdate: - partition: {{ .Values.master.updatePartition }} - {{- end }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master - {{ with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.master.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{ with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.master.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} - {{- if .Values.master.affinity }} - affinity: - {{ tpl .Values.master.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.master.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.master.topologySpreadConstraints . | nindent 8 | trim }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: - {{ tpl .Values.master.tolerations . | nindent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - terminationGracePeriodSeconds: 60 - {{- if .Values.master.priorityClassName }} - priorityClassName: {{ .Values.master.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - {{- if .Values.global.createClusterRole }} - serviceAccountName: {{ .Values.master.serviceAccountName | default .Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration - {{- end }} - {{- if .Values.master.initContainers }} - initContainers: - {{ tpl .Values.master.initContainers . | nindent 8 | trim }} - {{- end }} - {{- if .Values.master.podSecurityContext.enabled }} - securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "master.image" . }} - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - {{- if .Values.master.extraEnvironmentVars }} - {{- range $key, $value := .Values.master.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - exec /usr/bin/weed \ - {{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }} - -logdir=/logs \ - {{- else }} - -logtostderr=true \ - {{- end }} - {{- if .Values.master.loggingOverrideLevel }} - -v={{ .Values.master.loggingOverrideLevel }} \ - {{- else }} - -v={{ .Values.global.loggingLevel }} \ - {{- end }} - master \ - -port={{ .Values.master.port }} \ - -mdir=/data \ - -ip.bind={{ .Values.master.ipBind }} \ - {{- if .Values.global.enableReplication }} - -defaultReplication={{ .Values.global.replicationPlacement }} \ - {{- else }} - -defaultReplication={{ .Values.master.defaultReplication }} \ - {{- end }} - {{- if .Values.master.volumePreallocate }} - -volumePreallocate \ - {{- end }} - {{- if .Values.global.monitoring.enabled }} - {{- if and .Values.global.monitoring.gatewayHost .Values.global.monitoring.gatewayPort }} - -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ - {{- if .Values.master.metricsIntervalSec }} - -metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \ - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.master.metricsPort }} - -metricsPort={{ .Values.master.metricsPort }} \ - {{- end }} - {{- if .Values.master.metricsIp }} - -metricsIp={{ .Values.master.metricsIp }} \ - {{- end }} - -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ - {{- if .Values.master.disableHttp }} - -disableHttp \ - {{- end }} - {{- if .Values.master.resumeState }} - -resumeState \ - {{- end }} - {{- if .Values.master.raftHashicorp }} - -raftHashicorp \ - {{- end }} - {{- if .Values.master.raftBootstrap }} - -raftBootstrap \ - {{- end }} - {{- if .Values.master.electionTimeout }} - -electionTimeout={{ .Values.master.electionTimeout }} \ - {{- end }} - {{- if .Values.master.heartbeatInterval }} - -heartbeatInterval={{ .Values.master.heartbeatInterval }} \ - {{- end }} - {{- if .Values.master.garbageThreshold }} - -garbageThreshold={{ .Values.master.garbageThreshold }} \ - {{- end }} - -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \ - -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} \ - {{- range .Values.master.extraArgs }} - {{ . }} \ - {{- end }} - volumeMounts: - - name : data-{{ .Release.Namespace }} - mountPath: /data - {{- if or (eq .Values.master.logs.type "hostPath") (eq .Values.master.logs.type "persistentVolumeClaim") (eq .Values.master.logs.type "emptyDir") }} - - name: seaweedfs-master-log-volume - mountPath: "/logs/" - {{- end }} - - name: master-config - readOnly: true - mountPath: /etc/seaweedfs/master.toml - subPath: master.toml - {{- if .Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} - ports: - - containerPort: {{ .Values.master.port }} - name: swfs-master - {{- if and .Values.global.monitoring.enabled .Values.master.metricsPort }} - - containerPort: {{ .Values.master.metricsPort }} - name: metrics - {{- end }} - - containerPort: {{ .Values.master.grpcPort }} - #name: swfs-master-grpc - {{- if .Values.master.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.master.readinessProbe.httpGet.path }} - port: {{ .Values.master.port }} - scheme: {{ .Values.master.readinessProbe.scheme }} - initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.master.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.master.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.master.livenessProbe.httpGet.path }} - port: {{ .Values.master.port }} - scheme: {{ .Values.master.livenessProbe.scheme }} - initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.master.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.master.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.master.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.master.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - {{- if eq .Values.master.logs.type "hostPath" }} - - name: seaweedfs-master-log-volume - hostPath: - path: {{ .Values.master.logs.hostPathPrefix }}/logs/seaweedfs/master - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.master.logs.type "existingClaim" }} - - name: seaweedfs-master-log-volume - persistentVolumeClaim: - claimName: {{ .Values.master.logs.claimName }} - {{- end }} - {{- if eq .Values.master.logs.type "emptyDir" }} - - name: seaweedfs-master-log-volume - emptyDir: {} - {{- end }} - {{- if eq .Values.master.data.type "hostPath" }} - - name: data-{{ .Release.Namespace }} - hostPath: - path: {{ .Values.master.data.hostPathPrefix }}/seaweed-master/ - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.master.data.type "existingClaim" }} - - name: data-{{ .Release.Namespace }} - persistentVolumeClaim: - claimName: {{ .Values.master.data.claimName }} - {{- end }} - {{- if eq .Values.master.data.type "emptyDir" }} - - name: data-{{ .Release.Namespace }} - emptyDir: {} - {{- end }} - - name: master-config - configMap: - name: {{ template "seaweedfs.name" . }}-master-config - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} - {{- if .Values.master.nodeSelector }} - nodeSelector: - {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} - {{- end }} - {{- $pvc_exists := include "master.pvc_exists" . -}} - {{- if $pvc_exists }} - volumeClaimTemplates: - {{- if eq .Values.master.data.type "persistentVolumeClaim"}} - - metadata: - name: data-{{ .Release.Namespace }} - {{- with .Values.master.data.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.master.data.storageClass }} - resources: - requests: - storage: {{ .Values.master.data.size }} - {{- end }} - {{- if eq .Values.master.logs.type "persistentVolumeClaim"}} - - metadata: - name: seaweedfs-master-log-volume - {{- with .Values.master.logs.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ .Values.master.logs.storageClass }} - resources: - requests: - storage: {{ .Values.master.logs.size }} - {{- end }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3/s3-deployment.yaml b/k8s/charts/seaweedfs/templates/s3/s3-deployment.yaml deleted file mode 100644 index 0c6d52c3e..000000000 --- a/k8s/charts/seaweedfs/templates/s3/s3-deployment.yaml +++ /dev/null @@ -1,282 +0,0 @@ -{{- if .Values.s3.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "seaweedfs.name" . }}-s3 - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 -{{- if .Values.s3.annotations }} - annotations: - {{- toYaml .Values.s3.annotations | nindent 4 }} -{{- end }} -spec: - replicas: {{ .Values.s3.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 - {{ with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.s3.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{ with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.s3.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} - {{- if .Values.s3.affinity }} - affinity: - {{ tpl .Values.s3.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.s3.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.s3.topologySpreadConstraints . | nindent 8 | trim }} - {{- end }} - {{- if .Values.s3.tolerations }} - tolerations: - {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - terminationGracePeriodSeconds: 10 - {{- if .Values.s3.priorityClassName }} - priorityClassName: {{ .Values.s3.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - {{- if .Values.s3.serviceAccountName }} - serviceAccountName: {{ .Values.s3.serviceAccountName | quote }} - {{- end }} - {{- if .Values.s3.initContainers }} - initContainers: - {{ tpl .Values.s3.initContainers . | nindent 8 | trim }} - {{- end }} - {{- if .Values.s3.podSecurityContext.enabled }} - securityContext: {{- omit .Values.s3.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "s3.image" . }} - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - {{- if .Values.s3.extraEnvironmentVars }} - {{- range $key, $value := .Values.s3.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - exec /usr/bin/weed \ - {{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }} - -logdir=/logs \ - {{- else }} - -logtostderr=true \ - {{- end }} - {{- if .Values.s3.loggingOverrideLevel }} - -v={{ .Values.s3.loggingOverrideLevel }} \ - {{- else }} - -v={{ .Values.global.loggingLevel }} \ - {{- end }} - s3 \ - -ip.bind={{ .Values.s3.bindAddress }} \ - -port={{ .Values.s3.port }} \ - {{- if .Values.s3.metricsPort }} - -metricsPort {{ .Values.s3.metricsPort }} \ - {{- end }} - {{- if .Values.global.enableSecurity }} - {{- if .Values.s3.httpsPort }} - -port.https={{ .Values.s3.httpsPort }} \ - {{- end }} - -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ - -key.file=/usr/local/share/ca-certificates/client/tls.key \ - {{- end }} - {{- if .Values.s3.domainName }} - -domainName={{ .Values.s3.domainName }} \ - {{- end }} - {{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }} - -allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \ - {{- end }} - {{- if .Values.s3.enableAuth }} - -config=/etc/sw/seaweedfs_s3_config \ - {{- end }} - {{- if .Values.s3.auditLogConfig }} - -auditLogConfig=/etc/sw/s3_auditLogConfig.json \ - {{- end }} - -filer={{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }} \ - {{- range .Values.s3.extraArgs }} - {{ . }} \ - {{- end }} - volumeMounts: - {{- if or (eq .Values.s3.logs.type "hostPath") (eq .Values.s3.logs.type "emptyDir") }} - - name: logs - mountPath: "/logs/" - {{- end }} - {{- if .Values.s3.enableAuth }} - - mountPath: /etc/sw - name: config-users - readOnly: true - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} - ports: - - containerPort: {{ .Values.s3.port }} - name: swfs-s3 - {{- if .Values.s3.httpsPort }} - - containerPort: {{ .Values.s3.httpsPort }} - name: swfs-s3-tls - {{- end }} - {{- if .Values.s3.metricsPort }} - - containerPort: {{ .Values.s3.metricsPort }} - name: metrics - {{- end }} - {{- if .Values.s3.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ .Values.s3.readinessProbe.httpGet.path }} - port: {{ .Values.s3.port }} - scheme: {{ .Values.s3.readinessProbe.scheme }} - initialDelaySeconds: {{ .Values.s3.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.s3.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.s3.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.s3.readinessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.s3.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.s3.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.s3.livenessProbe.httpGet.path }} - port: {{ .Values.s3.port }} - scheme: {{ .Values.s3.livenessProbe.scheme }} - initialDelaySeconds: {{ .Values.s3.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.s3.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.s3.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.s3.livenessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.s3.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.s3.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.s3.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.s3.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.s3.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.s3.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - {{- if .Values.s3.enableAuth }} - - name: config-users - secret: - defaultMode: 420 - {{- if .Values.s3.existingConfigSecret }} - secretName: {{ .Values.s3.existingConfigSecret }} - {{- else }} - secretName: seaweedfs-s3-secret - {{- end }} - {{- end }} - {{- if eq .Values.s3.logs.type "hostPath" }} - - name: logs - hostPath: - path: {{ .Values.s3.logs.hostPathPrefix }}/logs/seaweedfs/s3 - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.s3.logs.type "emptyDir" }} - - name: logs - emptyDir: {} - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} - {{- if .Values.s3.nodeSelector }} - nodeSelector: - {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3/s3-ingress.yaml b/k8s/charts/seaweedfs/templates/s3/s3-ingress.yaml deleted file mode 100644 index a856923e9..000000000 --- a/k8s/charts/seaweedfs/templates/s3/s3-ingress.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{- if .Values.s3.ingress.enabled }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} -apiVersion: networking.k8s.io/v1beta1 -{{- else }} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: ingress-{{ template "seaweedfs.name" . }}-s3 - namespace: {{ .Release.Namespace }} - {{- with .Values.s3.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 -spec: - ingressClassName: {{ .Values.s3.ingress.className | quote }} - tls: - {{ .Values.s3.ingress.tls | default list | toYaml | nindent 6}} - rules: - - http: - paths: - - path: {{ .Values.s3.ingress.path | quote }} - pathType: {{ .Values.s3.ingress.pathType | quote }} - backend: -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} - service: - name: {{ template "seaweedfs.name" . }}-s3 - port: - number: {{ .Values.s3.port }} - #name: -{{- else }} - serviceName: {{ template "seaweedfs.name" . }}-s3 - servicePort: {{ .Values.s3.port }} -{{- end }} -{{- if .Values.s3.ingress.host }} - host: {{ .Values.s3.ingress.host | quote }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3/s3-secret.yaml b/k8s/charts/seaweedfs/templates/s3/s3-secret.yaml deleted file mode 100644 index 587ea77c4..000000000 --- a/k8s/charts/seaweedfs/templates/s3/s3-secret.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if or (and (or .Values.s3.enabled .Values.allInOne.enabled) .Values.s3.enableAuth (not .Values.s3.existingConfigSecret)) (and .Values.filer.s3.enabled .Values.filer.s3.enableAuth (not .Values.filer.s3.existingConfigSecret)) }} -{{- $access_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_access_key_id" "length" 20) -}} -{{- $secret_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_secret_access_key" "length" 40) -}} -{{- $access_key_read := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "read_access_key_id" "length" 20) -}} -{{- $secret_key_read := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "read_secret_access_key" "length" 40) -}} -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: seaweedfs-s3-secret - namespace: {{ .Release.Namespace }} - annotations: - "helm.sh/resource-policy": keep - "helm.sh/hook": "pre-install,pre-upgrade" - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 -stringData: - admin_access_key_id: {{ $access_key_admin }} - admin_secret_access_key: {{ $secret_key_admin }} - read_access_key_id: {{ $access_key_read }} - read_secret_access_key: {{ $secret_key_read }} - seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}' - {{- if .Values.filer.s3.auditLogConfig }} - filer_s3_auditLogConfig.json: | - {{ toJson .Values.filer.s3.auditLogConfig | nindent 4 }} - {{- end }} - {{- if .Values.s3.auditLogConfig }} - s3_auditLogConfig.json: | - {{ toJson .Values.s3.auditLogConfig | nindent 4 }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/s3/s3-service.yaml b/k8s/charts/seaweedfs/templates/s3/s3-service.yaml deleted file mode 100644 index 8afd48654..000000000 --- a/k8s/charts/seaweedfs/templates/s3/s3-service.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if or .Values.s3.enabled .Values.filer.s3.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "seaweedfs.name" . }}-s3 - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: s3 - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Values.s3.annotations }} - annotations: - {{- toYaml .Values.s3.annotations | nindent 4 }} -{{- end }} -spec: - internalTrafficPolicy: {{ .Values.s3.internalTrafficPolicy | default "Cluster" }} - ports: - - name: "swfs-s3" - port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} - targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} - protocol: TCP -{{- if and .Values.s3.enabled .Values.s3.httpsPort }} - - name: "swfs-s3-tls" - port: {{ .Values.s3.httpsPort }} - targetPort: {{ .Values.s3.httpsPort }} - protocol: TCP -{{- end }} -{{- if and .Values.s3.enabled .Values.s3.metricsPort }} - - name: "metrics" - port: {{ .Values.s3.metricsPort }} - targetPort: {{ .Values.s3.metricsPort }} - protocol: TCP -{{- end }} - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/s3/s3-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/s3/s3-servicemonitor.yaml deleted file mode 100644 index 348255912..000000000 --- a/k8s/charts/seaweedfs/templates/s3/s3-servicemonitor.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if or .Values.s3.enabled .Values.filer.s3.enabled }} -{{- if .Values.s3.metricsPort }} -{{- if .Values.global.monitoring.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" . }}-s3 - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: s3 - {{- with .Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if .Values.s3.annotations }} - annotations: - {{- toYaml .Values.s3.annotations | nindent 4 }} -{{- end }} -spec: - endpoints: - - interval: 30s - port: metrics - scrapeTimeout: 5s - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: s3 -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/sftp/sftp-deployment.yaml b/k8s/charts/seaweedfs/templates/sftp/sftp-deployment.yaml deleted file mode 100644 index c0bcb2c4a..000000000 --- a/k8s/charts/seaweedfs/templates/sftp/sftp-deployment.yaml +++ /dev/null @@ -1,301 +0,0 @@ -{{- if .Values.sftp.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "seaweedfs.name" . }}-sftp - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: sftp -{{- if .Values.sftp.annotations }} - annotations: - {{- toYaml .Values.sftp.annotations | nindent 4 }} -{{- end }} -spec: - replicas: {{ .Values.sftp.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: sftp - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: sftp - {{ with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.sftp.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{ with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.sftp.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - restartPolicy: {{ default .Values.global.restartPolicy .Values.sftp.restartPolicy }} - {{- if .Values.sftp.affinity }} - affinity: - {{ tpl .Values.sftp.affinity . | nindent 8 | trim }} - {{- end }} - {{- if .Values.sftp.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl .Values.sftp.topologySpreadConstraint . | nindent 8 | trim }} - {{- end }} - {{- if .Values.sftp.tolerations }} - tolerations: - {{ tpl .Values.sftp.tolerations . | nindent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} - terminationGracePeriodSeconds: 10 - {{- if .Values.sftp.priorityClassName }} - priorityClassName: {{ .Values.sftp.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - {{- if .Values.sftp.serviceAccountName }} - serviceAccountName: {{ .Values.sftp.serviceAccountName | quote }} - {{- end }} - {{- if .Values.sftp.initContainers }} - initContainers: - {{ tpl .Values.sftp.initContainers . | nindent 8 | trim }} - {{- end }} - {{- if .Values.sftp.podSecurityContext.enabled }} - securityContext: {{- omit .Values.sftp.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "sftp.image" . }} - imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - {{- if .Values.sftp.extraEnvironmentVars }} - {{- range $key, $value := .Values.sftp.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if .Values.global.extraEnvironmentVars }} - {{- range $key, $value := .Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - exec /usr/bin/weed \ - {{- if or (eq .Values.sftp.logs.type "hostPath") (eq .Values.sftp.logs.type "emptyDir") }} - -logdir=/logs \ - {{- else }} - -logtostderr=true \ - {{- end }} - {{- if .Values.sftp.loggingOverrideLevel }} - -v={{ .Values.sftp.loggingOverrideLevel }} \ - {{- else }} - -v={{ .Values.global.loggingLevel }} \ - {{- end }} - sftp \ - -ip.bind={{ .Values.sftp.bindAddress }} \ - -port={{ .Values.sftp.port }} \ - {{- if .Values.sftp.metricsPort }} - -metricsPort={{ .Values.sftp.metricsPort }} \ - {{- end }} - {{- if .Values.sftp.metricsIp }} - -metricsIp={{ .Values.sftp.metricsIp }} \ - {{- end }} - {{- if .Values.sftp.sshPrivateKey }} - -sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \ - {{- end }} - {{- if .Values.sftp.hostKeysFolder }} - -hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \ - {{- end }} - {{- if .Values.sftp.authMethods }} - -authMethods={{ .Values.sftp.authMethods }} \ - {{- end }} - {{- if .Values.sftp.maxAuthTries }} - -maxAuthTries={{ .Values.sftp.maxAuthTries }} \ - {{- end }} - {{- if .Values.sftp.bannerMessage }} - -bannerMessage="{{ .Values.sftp.bannerMessage }}" \ - {{- end }} - {{- if .Values.sftp.loginGraceTime }} - -loginGraceTime={{ .Values.sftp.loginGraceTime }} \ - {{- end }} - {{- if .Values.sftp.clientAliveInterval }} - -clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \ - {{- end }} - {{- if .Values.sftp.clientAliveCountMax }} - -clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \ - {{- end }} - {{- if .Values.sftp.dataCenter }} - -dataCenter={{ .Values.sftp.dataCenter }} \ - {{- end }} - {{- if .Values.sftp.localSocket }} - -localSocket={{ .Values.sftp.localSocket }} \ - {{- end }} - {{- if .Values.global.enableSecurity }} - -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ - -key.file=/usr/local/share/ca-certificates/client/tls.key \ - {{- end }} - -userStoreFile=/etc/sw/seaweedfs_sftp_config \ - -filer={{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }} - volumeMounts: - {{- if or (eq .Values.sftp.logs.type "hostPath") (eq .Values.sftp.logs.type "emptyDir") }} - - name: logs - mountPath: "/logs/" - {{- end }} - {{- if .Values.sftp.enableAuth }} - - mountPath: /etc/sw - name: config-users - readOnly: true - {{- end }} - - mountPath: /etc/sw/ssh - name: config-ssh - readOnly: true - {{- if .Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl .Values.sftp.extraVolumeMounts . | nindent 12 | trim }} - ports: - - containerPort: {{ .Values.sftp.port }} - name: swfs-sftp - {{- if .Values.sftp.metricsPort }} - - containerPort: {{ .Values.sftp.metricsPort }} - name: metrics - {{- end }} - {{- if .Values.sftp.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: {{ .Values.sftp.port }} - initialDelaySeconds: {{ .Values.sftp.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sftp.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.sftp.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.sftp.readinessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.sftp.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if .Values.sftp.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: {{ .Values.sftp.port }} - initialDelaySeconds: {{ .Values.sftp.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.sftp.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.sftp.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.sftp.livenessProbe.failureThreshold }} - timeoutSeconds: {{ .Values.sftp.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.sftp.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.sftp.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.sftp.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.sftp.sidecars }} - {{- include "common.tplvalues.render" (dict "value" .Values.sftp.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - {{- if .Values.sftp.enableAuth }} - - name: config-users - secret: - defaultMode: 420 - {{- if .Values.sftp.existingConfigSecret }} - secretName: {{ .Values.sftp.existingConfigSecret }} - {{- else }} - secretName: seaweedfs-sftp-secret - {{- end }} - {{- end }} - - name: config-ssh - secret: - defaultMode: 420 - {{- if .Values.sftp.existingSshConfigSecret }} - secretName: {{ .Values.sftp.existingSshConfigSecret }} - {{- else }} - secretName: seaweedfs-sftp-ssh-secret - {{- end }} - {{- if eq .Values.sftp.logs.type "hostPath" }} - - name: logs - hostPath: - path: {{ .Values.sftp.logs.hostPathPrefix }}/logs/seaweedfs/sftp - type: DirectoryOrCreate - {{- end }} - {{- if eq .Values.sftp.logs.type "emptyDir" }} - - name: logs - emptyDir: {} - {{- end }} - {{- if .Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" . }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" . }}-client-cert - {{- end }} - {{ tpl .Values.sftp.extraVolumes . | indent 8 | trim }} - {{- if .Values.sftp.nodeSelector }} - nodeSelector: - {{ tpl .Values.sftp.nodeSelector . | indent 8 | trim }} - {{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/sftp/sftp-secret.yaml b/k8s/charts/seaweedfs/templates/sftp/sftp-secret.yaml deleted file mode 100644 index 2cec992a0..000000000 --- a/k8s/charts/seaweedfs/templates/sftp/sftp-secret.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if or .Values.sftp.enabled .Values.allInOne.enabled }} -{{- $admin_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "admin_password" 20) -}} -{{- $read_user_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "readonly_password" 20) -}} -{{- $public_user_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "public_user_password" 20) -}} -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: seaweedfs-sftp-secret - namespace: {{ .Release.Namespace }} - annotations: - "helm.sh/resource-policy": keep - "helm.sh/hook": "pre-install,pre-upgrade" - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: sftp -stringData: - admin_password: {{ $admin_pwd }} - readonly_password: {{ $read_user_pwd }} - public_user_password: {{ $public_user_pwd }} - seaweedfs_sftp_config: '[{"Username":"admin","Password":"{{ $admin_pwd }}","PublicKeys":[],"HomeDir":"/","Permissions":{"/":["read","write","list"]},"Uid":0,"Gid":0},{"Username":"readonly_user","Password":"{{ $read_user_pwd }}","PublicKeys":[],"HomeDir":"/","Permissions":{"/":["read","list"]},"Uid":1112,"Gid":1112},{"Username":"public_user","Password":"{{ $public_user_pwd }}","PublicKeys":[],"HomeDir":"/public","Permissions":{"/public":["write","read","list"]},"Uid":1113,"Gid":1113}]' - seaweedfs_sftp_ssh_private_key: | - -----BEGIN OPENSSH PRIVATE KEY----- - b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW - QyNTUxOQAAACDH4McwcDphteXVullu6q7ephEN1N60z+w0qZw0UVW8OwAAAJDjxkmk48ZJ - pAAAAAtzc2gtZWQyNTUxOQAAACDH4McwcDphteXVullu6q7ephEN1N60z+w0qZw0UVW8Ow - AAAEAeVy/4+gf6rjj2jla/AHqJpC1LcS5hn04IUs4q+iVq/MfgxzBwOmG15dW6WW7qrt6m - EQ3U3rTP7DSpnDRRVbw7AAAADHNla291ckAwMDY2NwE= - -----END OPENSSH PRIVATE KEY----- -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/sftp/sftp-service.yaml b/k8s/charts/seaweedfs/templates/sftp/sftp-service.yaml deleted file mode 100644 index 5e67570d6..000000000 --- a/k8s/charts/seaweedfs/templates/sftp/sftp-service.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if .Values.sftp.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "seaweedfs.name" . }}-sftp - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: sftp - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Values.sftp.annotations }} - annotations: - {{- toYaml .Values.sftp.annotations | nindent 4 }} -{{- end }} -spec: - internalTrafficPolicy: {{ .Values.sftp.internalTrafficPolicy | default "Cluster" }} - ports: - - name: "swfs-sftp" - port: {{ .Values.sftp.port }} - targetPort: {{ .Values.sftp.port }} - protocol: TCP -{{- if .Values.sftp.metricsPort }} - - name: "metrics" - port: {{ .Values.sftp.metricsPort }} - targetPort: {{ .Values.sftp.metricsPort }} - protocol: TCP -{{- end }} - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: sftp -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/sftp/sftp-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/sftp/sftp-servicemonitor.yaml deleted file mode 100644 index 4c7188866..000000000 --- a/k8s/charts/seaweedfs/templates/sftp/sftp-servicemonitor.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if .Values.sftp.enabled }} -{{- if .Values.sftp.metricsPort }} -{{- if .Values.global.monitoring.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" . }}-sftp - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: sftp - {{- with .Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if .Values.sftp.annotations }} - annotations: - {{- toYaml .Values.sftp.annotations | nindent 4 }} -{{- end }} -spec: - endpoints: - - interval: 30s - port: metrics - scrapeTimeout: 5s - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: sftp -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/shared/_helpers.tpl b/k8s/charts/seaweedfs/templates/shared/_helpers.tpl deleted file mode 100644 index d22d14224..000000000 --- a/k8s/charts/seaweedfs/templates/shared/_helpers.tpl +++ /dev/null @@ -1,248 +0,0 @@ -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to -this (by the DNS naming spec). If release name contains chart name it will -be used as a full name. -*/}} -{{- define "seaweedfs.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "seaweedfs.chart" -}} -{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Expand the name of the chart. -*/}} -{{- define "seaweedfs.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Inject extra environment vars in the format key:value, if populated -*/}} -{{- define "seaweedfs.extraEnvironmentVars" -}} -{{- if .extraEnvironmentVars -}} -{{- range $key, $value := .extraEnvironmentVars }} -- name: {{ $key }} - value: {{ $value | quote }} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* Return the proper filer image */}} -{{- define "filer.image" -}} -{{- if .Values.filer.imageOverride -}} -{{- $imageOverride := .Values.filer.imageOverride -}} -{{- printf "%s" $imageOverride -}} -{{- else -}} -{{- include "common.image" . }} -{{- end -}} -{{- end -}} - -{{/* Return the proper master image */}} -{{- define "master.image" -}} -{{- if .Values.master.imageOverride -}} -{{- $imageOverride := .Values.master.imageOverride -}} -{{- printf "%s" $imageOverride -}} -{{- else -}} -{{- include "common.image" . }} -{{- end -}} -{{- end -}} - -{{/* Return the proper s3 image */}} -{{- define "s3.image" -}} -{{- if .Values.s3.imageOverride -}} -{{- $imageOverride := .Values.s3.imageOverride -}} -{{- printf "%s" $imageOverride -}} -{{- else -}} -{{- include "common.image" . }} -{{- end -}} -{{- end -}} - -{{/* Return the proper sftp image */}} -{{- define "sftp.image" -}} -{{- if .Values.sftp.imageOverride -}} -{{- $imageOverride := .Values.sftp.imageOverride -}} -{{- printf "%s" $imageOverride -}} -{{- else -}} -{{- include "common.image" . }} -{{- end -}} -{{- end -}} - -{{/* Return the proper volume image */}} -{{- define "volume.image" -}} -{{- if .Values.volume.imageOverride -}} -{{- $imageOverride := .Values.volume.imageOverride -}} -{{- printf "%s" $imageOverride -}} -{{- else -}} -{{- include "common.image" . }} -{{- end -}} -{{- end -}} - -{{/* Computes the container image name for all components (if they are not overridden) */}} -{{- define "common.image" -}} -{{- $registryName := default .Values.image.registry .Values.global.registry | toString -}} -{{- $repositoryName := default .Values.image.repository .Values.global.repository | toString -}} -{{- $name := .Values.global.imageName | toString -}} -{{- $tag := default .Chart.AppVersion .Values.image.tag | toString -}} -{{- if $repositoryName -}} -{{- $name = printf "%s/%s" (trimSuffix "/" $repositoryName) (base $name) -}} -{{- end -}} -{{- if $registryName -}} -{{- printf "%s/%s:%s" $registryName $name $tag -}} -{{- else -}} -{{- printf "%s:%s" $name $tag -}} -{{- end -}} -{{- end -}} - -{{/* check if any Volume PVC exists */}} -{{- define "volume.pvc_exists" -}} -{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}} -{{- printf "true" -}} -{{- else -}} -{{- printf "" -}} -{{- end -}} -{{- end -}} - -{{/* check if any Filer PVC exists */}} -{{- define "filer.pvc_exists" -}} -{{- if or (eq .Values.filer.data.type "persistentVolumeClaim") (eq .Values.filer.logs.type "persistentVolumeClaim") -}} -{{- printf "true" -}} -{{- else -}} -{{- printf "" -}} -{{- end -}} -{{- end -}} - -{{/* check if any Master PVC exists */}} -{{- define "master.pvc_exists" -}} -{{- if or (eq .Values.master.data.type "persistentVolumeClaim") (eq .Values.master.logs.type "persistentVolumeClaim") -}} -{{- printf "true" -}} -{{- else -}} -{{- printf "" -}} -{{- end -}} -{{- end -}} - -{{/* check if any InitContainers exist for Volumes */}} -{{- define "volume.initContainers_exists" -}} -{{- if or (not (empty .Values.volume.idx )) (not (empty .Values.volume.initContainers )) -}} -{{- printf "true" -}} -{{- else -}} -{{- printf "" -}} -{{- end -}} -{{- end -}} - -{{/* Return the proper imagePullSecrets */}} -{{- define "seaweedfs.imagePullSecrets" -}} -{{- with .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- if kindIs "string" . }} - - name: {{ . }} -{{- else }} -{{- range . }} - {{- if kindIs "string" . }} - - name: {{ . }} - {{- else }} - - {{ toYaml . }} - {{- end}} -{{- end }} -{{- end }} -{{- end }} -{{- end -}} - -{{/* -Renders a value that contains template perhaps with scope if the scope is present. -Usage: -{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} -{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} -*/}} -{{- define "common.tplvalues.render" -}} -{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} -{{- if contains "{{" (toJson .value) }} - {{- if .scope }} - {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} - {{- else }} - {{- tpl $value .context }} - {{- end }} -{{- else }} - {{- $value }} -{{- end }} -{{- end -}} - -{{/* -Converts a Kubernetes quantity like "256Mi" or "2G" to a float64 in base units, -handling both binary (Ki, Mi, Gi) and decimal (m, k, M) suffixes; numeric inputs -Usage: -{{ include "common.resource-quantity" "10Gi" }} -*/}} -{{- define "common.resource-quantity" -}} - {{- $value := . -}} - {{- $unit := 1.0 -}} - {{- if typeIs "string" . -}} - {{- $base2 := dict "Ki" 0x1p10 "Mi" 0x1p20 "Gi" 0x1p30 "Ti" 0x1p40 "Pi" 0x1p50 "Ei" 0x1p60 -}} - {{- $base10 := dict "m" 1e-3 "k" 1e3 "M" 1e6 "G" 1e9 "T" 1e12 "P" 1e15 "E" 1e18 -}} - {{- range $k, $v := merge $base2 $base10 -}} - {{- if hasSuffix $k $ -}} - {{- $value = trimSuffix $k $ -}} - {{- $unit = $v -}} - {{- end -}} - {{- end -}} - {{- end -}} - {{- mulf (float64 $value) $unit -}} -{{- end -}} - -{{/* -getOrGeneratePassword will check if a password exists in a secret and return it, -or generate a new random password if it doesn't exist. -*/}} -{{- define "getOrGeneratePassword" -}} -{{- $params := . -}} -{{- $namespace := $params.namespace -}} -{{- $secretName := $params.secretName -}} -{{- $key := $params.key -}} -{{- $length := default 16 $params.length -}} - -{{- $existingSecret := lookup "v1" "Secret" $namespace $secretName -}} -{{- if and $existingSecret (index $existingSecret.data $key) -}} - {{- index $existingSecret.data $key | b64dec -}} -{{- else -}} - {{- randAlphaNum $length -}} -{{- end -}} -{{- end -}} - -{{/* -Compute the master service address to be used in cluster env vars. -If allInOne is enabled, point to the all-in-one service; otherwise, point to the master service. -*/}} -{{- define "seaweedfs.cluster.masterAddress" -}} -{{- $serviceNameSuffix := "-master" -}} -{{- if .Values.allInOne.enabled -}} -{{- $serviceNameSuffix = "-all-in-one" -}} -{{- end -}} -{{- printf "%s%s.%s:%d" (include "seaweedfs.name" .) $serviceNameSuffix .Release.Namespace (int .Values.master.port) -}} -{{- end -}} - -{{/* -Compute the filer service address to be used in cluster env vars. -If allInOne is enabled, point to the all-in-one service; otherwise, point to the filer-client service. -*/}} -{{- define "seaweedfs.cluster.filerAddress" -}} -{{- $serviceNameSuffix := "-filer-client" -}} -{{- if .Values.allInOne.enabled -}} -{{- $serviceNameSuffix = "-all-in-one" -}} -{{- end -}} -{{- printf "%s%s.%s:%d" (include "seaweedfs.name" .) $serviceNameSuffix .Release.Namespace (int .Values.filer.port) -}} -{{- end -}} diff --git a/k8s/charts/seaweedfs/templates/shared/cluster-role.yaml b/k8s/charts/seaweedfs/templates/shared/cluster-role.yaml deleted file mode 100644 index 154de0675..000000000 --- a/k8s/charts/seaweedfs/templates/shared/cluster-role.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.global.createClusterRole }} -#hack for delete pod master after migration ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ .Values.global.serviceAccountName }}-rw-cr - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:serviceaccount:{{ .Values.global.serviceAccountName }}:default - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: {{ .Values.global.serviceAccountName }} - namespace: {{ .Release.Namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ .Values.global.serviceAccountName }}-rw-cr -{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/shared/notification-configmap.yaml b/k8s/charts/seaweedfs/templates/shared/notification-configmap.yaml deleted file mode 100644 index c638c8771..000000000 --- a/k8s/charts/seaweedfs/templates/shared/notification-configmap.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.filer.enabled .Values.filer.notificationConfig }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "seaweedfs.name" . }}-notification-config - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} -{{- end }} -data: - notification.toml: |- - {{ .Values.filer.notificationConfig | nindent 4 }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml b/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml deleted file mode 100644 index 44d650898..000000000 --- a/k8s/charts/seaweedfs/templates/shared/post-install-bucket-hook.yaml +++ /dev/null @@ -1,122 +0,0 @@ -{{- if .Values.master.enabled }} -{{- if .Values.filer.s3.enabled }} -{{- if .Values.filer.s3.createBuckets }} ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $.Release.Name }}-bucket-hook" - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - annotations: - "helm.sh/hook": post-install - "helm.sh/hook-weight": "-5" - "helm.sh/hook-delete-policy": hook-succeeded -spec: - template: - metadata: - name: "{{ .Release.Name }}" - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - spec: - restartPolicy: Never - {{- if .Values.filer.podSecurityContext.enabled }} - securityContext: {{- omit .Values.filer.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: post-install-job - image: {{ template "master.image" . }} - env: - - name: WEED_CLUSTER_DEFAULT - value: "sw" - - name: WEED_CLUSTER_SW_MASTER - value: "{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}:{{ .Values.master.port }}" - - name: WEED_CLUSTER_SW_FILER - value: "{{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" . }}" - command: - - "/bin/sh" - - "-ec" - - | - wait_for_service() { - local url=$1 - local max_attempts=60 # 5 minutes total (5s * 60) - local attempt=1 - - echo "Waiting for service at $url..." - while [ $attempt -le $max_attempts ]; do - if wget -q --spider "$url" >/dev/null 2>&1; then - echo "Service at $url is up!" - return 0 - fi - echo "Attempt $attempt: Service not ready yet, retrying in 5s..." - sleep 5 - attempt=$((attempt + 1)) - done - echo "Service at $url failed to become ready within 5 minutes" - exit 1 - } - wait_for_service "http://$WEED_CLUSTER_SW_MASTER{{ .Values.master.readinessProbe.httpGet.path }}" - wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}" - {{- range $reg, $props := $.Values.filer.s3.createBuckets }} - exec /bin/echo \ - "s3.bucket.create --name {{ $props.name }}" |\ - /usr/bin/weed shell - {{- end }} - {{- range $reg, $props := $.Values.filer.s3.createBuckets }} - {{- if $props.anonymousRead }} - exec /bin/echo \ - "s3.configure --user anonymous \ - --buckets {{ $props.name }} \ - --actions Read \ - --apply true" |\ - /usr/bin/weed shell - {{- end }} - {{- end }} - {{- if .Values.filer.s3.enableAuth }} - volumeMounts: - - name: config-users - mountPath: /etc/sw - readOnly: true - {{- end }} - ports: - - containerPort: {{ .Values.master.port }} - name: swfs-master - {{- if and .Values.global.monitoring.enabled .Values.master.metricsPort }} - - containerPort: {{ .Values.master.metricsPort }} - name: metrics - {{- end }} - - containerPort: {{ .Values.master.grpcPort }} - #name: swfs-master-grpc - {{- if .Values.filer.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.filer.s3.enableAuth }} - volumes: - - name: config-users - secret: - defaultMode: 420 - {{- if not (empty .Values.filer.s3.existingConfigSecret) }} - secretName: {{ .Values.filer.s3.existingConfigSecret }} - {{- else }} - secretName: seaweedfs-s3-secret - {{- end }} - {{- end }}{{/** if .Values.filer.s3.enableAuth **/}} -{{- end }}{{/** if .Values.master.enabled **/}} -{{- end }}{{/** if .Values.filer.s3.enabled **/}} -{{- end }}{{/** if .Values.filer.s3.createBuckets **/}} diff --git a/k8s/charts/seaweedfs/templates/shared/seaweedfs-grafana-dashboard.yaml b/k8s/charts/seaweedfs/templates/shared/seaweedfs-grafana-dashboard.yaml deleted file mode 100644 index cf7801cce..000000000 --- a/k8s/charts/seaweedfs/templates/shared/seaweedfs-grafana-dashboard.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.global.monitoring.enabled }} -{{- $files := .Files.Glob "dashboards/*.json" }} -{{- if $files }} -{{- range $path, $file := $files }} -{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }} - namespace: {{ $.Release.Namespace }} - labels: - grafana_dashboard: "1" -data: - {{ $dashboardName }}.json: |- -{{ toString $file | indent 4 }} -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/shared/security-configmap.yaml b/k8s/charts/seaweedfs/templates/shared/security-configmap.yaml deleted file mode 100644 index 6f229c595..000000000 --- a/k8s/charts/seaweedfs/templates/shared/security-configmap.yaml +++ /dev/null @@ -1,82 +0,0 @@ -{{- if .Values.global.enableSecurity }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "seaweedfs.name" . }}-security-config - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -data: - {{- $existing := (lookup "v1" "ConfigMap" .Release.Namespace (printf "%s-security-config" (include "seaweedfs.name" .))) }} - {{- $securityConfig := fromToml (dig "data" "security.toml" "" $existing) }} - security.toml: |- - # this file is read by master, volume server, and filer - - {{- if .Values.global.securityConfig.jwtSigning.volumeWrite }} - # the jwt signing key is read by master and volume server - # a jwt expires in 10 seconds - [jwt.signing] - key = "{{ dig "jwt" "signing" "key" (randAlphaNum 10 | b64enc) $securityConfig }}" - {{- end }} - - {{- if .Values.global.securityConfig.jwtSigning.volumeRead }} - # this jwt signing key is read by master and volume server, and it is used for read operations: - # - the Master server generates the JWT, which can be used to read a certain file on a volume server - # - the Volume server validates the JWT on reading - [jwt.signing.read] - key = "{{ dig "jwt" "signing" "read" "key" (randAlphaNum 10 | b64enc) $securityConfig }}" - {{- end }} - - {{- if .Values.global.securityConfig.jwtSigning.filerWrite }} - # If this JWT key is configured, Filer only accepts writes over HTTP if they are signed with this JWT: - # - f.e. the S3 API Shim generates the JWT - # - the Filer server validates the JWT on writing - # the jwt defaults to expire after 10 seconds. - [jwt.filer_signing] - key = "{{ dig "jwt" "filer_signing" "key" (randAlphaNum 10 | b64enc) $securityConfig }}" - {{- end }} - - {{- if .Values.global.securityConfig.jwtSigning.filerRead }} - # If this JWT key is configured, Filer only accepts reads over HTTP if they are signed with this JWT: - # - f.e. the S3 API Shim generates the JWT - # - the Filer server validates the JWT on writing - # the jwt defaults to expire after 10 seconds. - [jwt.filer_signing.read] - key = "{{ dig "jwt" "filer_signing" "read" "key" (randAlphaNum 10 | b64enc) $securityConfig }}" - {{- end }} - - # all grpc tls authentications are mutual - # the values for the following ca, cert, and key are paths to the PERM files. - [grpc] - ca = "/usr/local/share/ca-certificates/ca/tls.crt" - - [grpc.volume] - cert = "/usr/local/share/ca-certificates/volume/tls.crt" - key = "/usr/local/share/ca-certificates/volume/tls.key" - - [grpc.master] - cert = "/usr/local/share/ca-certificates/master/tls.crt" - key = "/usr/local/share/ca-certificates/master/tls.key" - - [grpc.filer] - cert = "/usr/local/share/ca-certificates/filer/tls.crt" - key = "/usr/local/share/ca-certificates/filer/tls.key" - - # use this for any place needs a grpc client - # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" - [grpc.client] - cert = "/usr/local/share/ca-certificates/client/tls.crt" - key = "/usr/local/share/ca-certificates/client/tls.key" - - # volume server https options - # Note: work in progress! - # this does not work with other clients, e.g., "weed filer|mount" etc, yet. - [https.client] - enabled = false - [https.volume] - cert = "" - key = "" -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/shared/service-account.yaml b/k8s/charts/seaweedfs/templates/shared/service-account.yaml deleted file mode 100644 index 429158a27..000000000 --- a/k8s/charts/seaweedfs/templates/shared/service-account.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.global.serviceAccountName }} - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -automountServiceAccountToken: {{ .Values.global.automountServiceAccountToken }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/volume/volume-resize-hook.yaml b/k8s/charts/seaweedfs/templates/volume/volume-resize-hook.yaml deleted file mode 100644 index 78e8a3fc9..000000000 --- a/k8s/charts/seaweedfs/templates/volume/volume-resize-hook.yaml +++ /dev/null @@ -1,117 +0,0 @@ -{{- $seaweedfsName := include "seaweedfs.name" $ }} -{{- $volumes := deepCopy .Values.volumes | mergeOverwrite (dict "" .Values.volume) }} - - -{{- if .Values.volume.resizeHook.enabled }} -{{- $commands := list }} -{{- range $vname, $volume := $volumes }} -{{- $volumeName := trimSuffix "-" (printf "volume-%s" $vname) }} -{{- $volume := mergeOverwrite (deepCopy $.Values.volume) (dict "enabled" true) $volume }} - -{{- if $volume.enabled }} -{{- $replicas := int $volume.replicas -}} -{{- $statefulsetName := printf "%s-%s" $seaweedfsName $volumeName -}} -{{- $statefulset := (lookup "apps/v1" "StatefulSet" $.Release.Namespace $statefulsetName) -}} - -{{/* Check for changes in volumeClaimTemplates */}} -{{- if $statefulset }} -{{- range $dir := $volume.dataDirs }} -{{- if eq .type "persistentVolumeClaim" }} -{{- $desiredSize := .size }} -{{- range $statefulset.spec.volumeClaimTemplates }} -{{- if and (eq .metadata.name $dir.name) (ne .spec.resources.requests.storage $desiredSize) }} -{{- $commands = append $commands (printf "kubectl delete statefulset %s --cascade=orphan" $statefulsetName) }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} - -{{/* Check for the need for patching existing PVCs */}} -{{- range $dir := $volume.dataDirs }} -{{- if eq .type "persistentVolumeClaim" }} -{{- $desiredSize := .size }} -{{- range $i, $e := until $replicas }} -{{- $pvcName := printf "%s-%s-%s-%d" $dir.name $seaweedfsName $volumeName $e }} -{{- $currentPVC := (lookup "v1" "PersistentVolumeClaim" $.Release.Namespace $pvcName) }} -{{- if and $currentPVC }} -{{- $oldSize := include "common.resource-quantity" $currentPVC.spec.resources.requests.storage }} -{{- $newSize := include "common.resource-quantity" $desiredSize }} -{{- if gt $newSize $oldSize }} -{{- $commands = append $commands (printf "kubectl patch pvc %s-%s-%s-%d -p '{\"spec\":{\"resources\":{\"requests\":{\"storage\":\"%s\"}}}}'" $dir.name $seaweedfsName $volumeName $e $desiredSize) }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} - -{{- end }} -{{- end }} - -{{- if $commands }} -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ $seaweedfsName }}-volume-resize-hook" - annotations: - helm.sh/hook: pre-install,pre-upgrade - helm.sh/hook-weight: "0" - helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation -spec: - template: - spec: - serviceAccountName: {{ $seaweedfsName }}-volume-resize-hook - restartPolicy: Never - backoffLimit: 1 - containers: - - name: resize - image: {{ .Values.volume.resizeHook.image }} - command: ["sh", "-xec"] - args: - - | - {{- range $commands }} - {{ . }} - {{- end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ $seaweedfsName }}-volume-resize-hook - annotations: - helm.sh/hook: pre-install,pre-upgrade - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ $seaweedfsName }}-volume-resize-hook - annotations: - helm.sh/hook: pre-install,pre-upgrade - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation -rules: - - apiGroups: ["apps"] - resources: ["statefulsets"] - verbs: ["delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["patch", "get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ $seaweedfsName }}-volume-resize-hook - annotations: - helm.sh/hook: pre-install,pre-upgrade - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation -subjects: - - kind: ServiceAccount - name: {{ $seaweedfsName }}-volume-resize-hook -roleRef: - kind: Role - name: {{ $seaweedfsName }}-volume-resize-hook - apiGroup: rbac.authorization.k8s.io -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume/volume-service.yaml b/k8s/charts/seaweedfs/templates/volume/volume-service.yaml deleted file mode 100644 index dfafc8163..000000000 --- a/k8s/charts/seaweedfs/templates/volume/volume-service.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{ $volumes := deepCopy .Values.volumes | mergeOverwrite (dict "" .Values.volume) }} -{{- range $vname, $volume := $volumes }} -{{- $volumeName := trimSuffix "-" (printf "volume-%s" $vname) }} -{{- $volume := mergeOverwrite (deepCopy $.Values.volume) (dict "enabled" true) $volume }} - -{{- if $volume.enabled }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "seaweedfs.name" $ }}-{{ $volumeName }} - namespace: {{ $.Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - app.kubernetes.io/component: {{ $volumeName }} - helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ $.Release.Service }} -{{- if $volume.annotations }} - annotations: - {{- toYaml $volume.annotations | nindent 4 }} -{{- end }} -spec: - clusterIP: None - internalTrafficPolicy: {{ $volume.internalTrafficPolicy | default "Cluster" }} - ports: - - name: "swfs-volume" - port: {{ $volume.port }} - targetPort: {{ $volume.port }} - protocol: TCP - - name: "swfs-volume-18080" - port: {{ $volume.grpcPort }} - targetPort: {{ $volume.grpcPort }} - protocol: TCP -{{- if $volume.metricsPort }} - - name: "metrics" - port: {{ $volume.metricsPort }} - targetPort: {{ $volume.metricsPort }} - protocol: TCP -{{- end }} - selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - app.kubernetes.io/component: {{ $volumeName }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume/volume-servicemonitor.yaml b/k8s/charts/seaweedfs/templates/volume/volume-servicemonitor.yaml deleted file mode 100644 index ac82eb573..000000000 --- a/k8s/charts/seaweedfs/templates/volume/volume-servicemonitor.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{ $volumes := deepCopy .Values.volumes | mergeOverwrite (dict "" .Values.volume) }} -{{- range $vname, $volume := $volumes }} -{{- $volumeName := trimSuffix "-" (printf "volume-%s" $vname) }} -{{- $volume := mergeOverwrite (deepCopy $.Values.volume) (dict "enabled" true) $volume }} - -{{- if $volume.enabled }} -{{- if $volume.metricsPort }} -{{- if $.Values.global.monitoring.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "seaweedfs.name" $ }}-{{ $volumeName }} - namespace: {{ $.Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ $.Release.Service }} - app.kubernetes.io/instance: {{ $.Release.Name }} - app.kubernetes.io/component: {{ $volumeName }} - {{- with $.Values.global.monitoring.additionalLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- with $volume.annotations }} - annotations: - {{- toYaml . | nindent 4 }} -{{- end }} -spec: - endpoints: - - interval: 30s - port: metrics - scrapeTimeout: 5s - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - app.kubernetes.io/component: {{ $volumeName }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/templates/volume/volume-statefulset.yaml b/k8s/charts/seaweedfs/templates/volume/volume-statefulset.yaml deleted file mode 100644 index 29a035a2b..000000000 --- a/k8s/charts/seaweedfs/templates/volume/volume-statefulset.yaml +++ /dev/null @@ -1,420 +0,0 @@ -{{ $volumes := deepCopy .Values.volumes | mergeOverwrite (dict "" .Values.volume) }} -{{- range $vname, $volume := $volumes }} -{{- $volumeName := trimSuffix "-" (printf "volume-%s" $vname) }} -{{- $volume := mergeOverwrite (deepCopy $.Values.volume) (dict "enabled" true) $volume }} - -{{- if $volume.enabled }} ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "seaweedfs.name" $ }}-{{ $volumeName }} - namespace: {{ $.Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ $.Release.Service }} - app.kubernetes.io/instance: {{ $.Release.Name }} - app.kubernetes.io/component: {{ $volumeName }} -{{- if $volume.annotations }} - annotations: - {{- toYaml $volume.annotations | nindent 4 }} -{{- end }} -spec: - serviceName: {{ template "seaweedfs.name" $ }}-{{ $volumeName }} - replicas: {{ $volume.replicas }} - podManagementPolicy: {{ $volume.podManagementPolicy }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - app.kubernetes.io/instance: {{ $.Release.Name }} - app.kubernetes.io/component: {{ $volumeName }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" $ }} - helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} - app.kubernetes.io/instance: {{ $.Release.Name }} - app.kubernetes.io/component: {{ $volumeName }} - {{ with $.Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with $volume.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - {{ with $.Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with $volume.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- if $volume.affinity }} - affinity: - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.affinity) $ | indent 8 | trim }} - {{- end }} - {{- if $volume.topologySpreadConstraints }} - topologySpreadConstraints: - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.topologySpreadConstraints) $ | nindent 8 | trim }} - {{- end }} - restartPolicy: {{ default $.Values.global.restartPolicy $volume.restartPolicy }} - {{- if $volume.tolerations }} - tolerations: - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.tolerations) $ | indent 8 | trim }} - {{- end }} - {{- include "seaweedfs.imagePullSecrets" $ | nindent 6 }} - terminationGracePeriodSeconds: 150 - {{- if $volume.priorityClassName }} - priorityClassName: {{ $volume.priorityClassName | quote }} - {{- end }} - enableServiceLinks: false - {{- if $.Values.global.createClusterRole }} - serviceAccountName: {{ $volume.serviceAccountName | default $.Values.global.serviceAccountName | quote }} # for deleting statefulset pods after migration - {{- end }} - {{- $initContainers_exists := include "volume.initContainers_exists" $ -}} - {{- if $initContainers_exists }} - initContainers: - {{- if $volume.idx }} - - name: seaweedfs-vol-move-idx - image: {{ template "volume.image" $ }} - imagePullPolicy: {{ $.Values.global.imagePullPolicy | default "IfNotPresent" }} - command: [ '/bin/sh', '-c' ] - args: [ '{{range $dir := $volume.dataDirs }}if ls /{{$dir.name}}/*.idx >/dev/null 2>&1; then mv /{{$dir.name}}/*.idx /idx/ ; fi; {{end}}' ] - volumeMounts: - - name: idx - mountPath: /idx - {{- range $dir := $volume.dataDirs }} - - name: {{ $dir.name }} - mountPath: /{{ $dir.name }} - {{- end }} - {{- if $volume.containerSecurityContext.enabled }} - securityContext: {{- omit $volume.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- end }} - {{- if $volume.initContainers }} - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.initContainers) $ | indent 8 | trim }} - {{- end }} - {{- end }} - {{- if $volume.podSecurityContext.enabled }} - securityContext: {{- omit $volume.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - containers: - - name: seaweedfs - image: {{ template "volume.image" $ }} - imagePullPolicy: {{ default "IfNotPresent" $.Values.global.imagePullPolicy }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: SEAWEEDFS_FULLNAME - value: "{{ template "seaweedfs.name" $ }}" - {{- if $volume.extraEnvironmentVars }} - {{- range $key, $value := $volume.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - {{- if $.Values.global.extraEnvironmentVars }} - {{- range $key, $value := $.Values.global.extraEnvironmentVars }} - - name: {{ $key }} - {{- if kindIs "string" $value }} - value: {{ $value | quote }} - {{- else }} - valueFrom: - {{ toYaml $value | nindent 16 | trim }} - {{- end -}} - {{- end }} - {{- end }} - command: - - "/bin/sh" - - "-ec" - - | - exec /usr/bin/weed \ - {{- if $volume.logs }} - -logdir=/logs \ - {{- else }} - -logtostderr=true \ - {{- end }} - {{- if $volume.loggingOverrideLevel }} - -v={{ $volume.loggingOverrideLevel }} \ - {{- else }} - -v={{ $.Values.global.loggingLevel }} \ - {{- end }} - volume \ - -port={{ $volume.port }} \ - {{- if $volume.metricsPort }} - -metricsPort={{ $volume.metricsPort }} \ - {{- end }} - {{- if $volume.metricsIp }} - -metricsIp={{ $volume.metricsIp }} \ - {{- end }} - -dir {{range $index, $dir := $volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \ - {{- if $volume.idx }} - -dir.idx=/idx \ - {{- end }} - -max {{range $index, $dir := $volume.dataDirs }}{{if ne $index 0}},{{end}} - {{- if eq ($dir.maxVolumes | toString) "0" }}0{{ else if not $dir.maxVolumes }}7{{ else }}{{$dir.maxVolumes}}{{ end }} - {{- end }} \ - {{- if $volume.rack }} - -rack={{ $volume.rack }} \ - {{- end }} - {{- if $volume.dataCenter }} - -dataCenter={{ $volume.dataCenter }} \ - {{- end }} - -ip.bind={{ $volume.ipBind }} \ - -readMode={{ $volume.readMode }} \ - {{- if $volume.whiteList }} - -whiteList={{ $volume.whiteList }} \ - {{- end }} - {{- if $volume.imagesFixOrientation }} - -images.fix.orientation \ - {{- end }} - {{- if $volume.pulseSeconds }} - -pulseSeconds={{ $volume.pulseSeconds }} \ - {{- end }} - {{- if $volume.index }} - -index={{ $volume.index }} \ - {{- end }} - {{- if $volume.fileSizeLimitMB }} - -fileSizeLimitMB={{ $volume.fileSizeLimitMB }} \ - {{- end }} - -minFreeSpacePercent={{ $volume.minFreeSpacePercent }} \ - -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-{{ $volumeName }}.{{ $.Release.Namespace }} \ - -compactionMBps={{ $volume.compactionMBps }} \ - -mserver={{ if $.Values.global.masterServer }}{{ $.Values.global.masterServer}}{{ else }}{{ range $index := until ($.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} - {{- range $volume.extraArgs }} - {{ . }} \ - {{- end }} - volumeMounts: - {{- range $dir := $volume.dataDirs }} - {{- if not ( eq $dir.type "custom" ) }} - - name: {{ $dir.name }} - mountPath: "/{{ $dir.name }}/" - {{- end }} - {{- end }} - {{- if $volume.logs }} - - name: logs - mountPath: "/logs/" - {{- end }} - {{- if $volume.idx }} - - name: idx - mountPath: "/idx/" - {{- end }} - {{- if $.Values.global.enableSecurity }} - - name: security-config - readOnly: true - mountPath: /etc/seaweedfs/security.toml - subPath: security.toml - - name: ca-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/ca/ - - name: master-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/master/ - - name: volume-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/volume/ - - name: filer-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/filer/ - - name: client-cert - readOnly: true - mountPath: /usr/local/share/ca-certificates/client/ - {{- end }} - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.extraVolumeMounts) $ | indent 12 | trim }} - ports: - - containerPort: {{ $volume.port }} - name: swfs-vol - {{- if $volume.metricsPort }} - - containerPort: {{ $volume.metricsPort }} - name: metrics - {{- end }} - - containerPort: {{ $volume.grpcPort }} - name: swfs-vol-grpc - {{- if $volume.readinessProbe.enabled }} - readinessProbe: - httpGet: - path: {{ $volume.readinessProbe.httpGet.path }} - port: {{ $volume.port }} - scheme: {{ $volume.readinessProbe.scheme }} - initialDelaySeconds: {{ $volume.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ $volume.readinessProbe.periodSeconds }} - successThreshold: {{ $volume.readinessProbe.successThreshold }} - failureThreshold: {{ $volume.readinessProbe.failureThreshold }} - timeoutSeconds: {{ $volume.readinessProbe.timeoutSeconds }} - {{- end }} - {{- if $volume.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ $volume.livenessProbe.httpGet.path }} - port: {{ $volume.port }} - scheme: {{ $volume.livenessProbe.scheme }} - initialDelaySeconds: {{ $volume.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ $volume.livenessProbe.periodSeconds }} - successThreshold: {{ $volume.livenessProbe.successThreshold }} - failureThreshold: {{ $volume.livenessProbe.failureThreshold }} - timeoutSeconds: {{ $volume.livenessProbe.timeoutSeconds }} - {{- end }} - {{- with $volume.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if $volume.containerSecurityContext.enabled }} - securityContext: {{- omit $volume.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if $volume.sidecars }} - {{- include "common.tplvalues.render" (dict "value" (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.sidecars) "context" $) | nindent 8 }} - {{- end }} - volumes: - - {{- range $dir := $volume.dataDirs }} - - {{- if eq $dir.type "hostPath" }} - - name: {{ $dir.name }} - hostPath: - path: {{ $dir.hostPathPrefix }}/object_store/ - type: DirectoryOrCreate - {{- end }} - {{- if eq $dir.type "existingClaim" }} - - name: {{ $dir.name }} - persistentVolumeClaim: - claimName: {{ $dir.claimName }} - {{- end }} - {{- if eq $dir.type "emptyDir" }} - - name: {{ $dir.name }} - emptyDir: {} - {{- end }} - - {{- end }} - - {{- if $volume.idx }} - {{- if eq $volume.idx.type "hostPath" }} - - name: idx - hostPath: - path: {{ $volume.idx.hostPathPrefix }}/seaweedfs-volume-idx/ - type: DirectoryOrCreate - {{- end }} - {{- if eq $volume.idx.type "existingClaim" }} - - name: idx - persistentVolumeClaim: - claimName: {{ $volume.idx.claimName }} - {{- end }} - {{- if eq $volume.idx.type "emptyDir" }} - - name: idx - emptyDir: {} - {{- end }} - {{- end }} - - {{- if $volume.logs }} - {{- if eq $volume.logs.type "hostPath" }} - - name: logs - hostPath: - path: {{ $volume.logs.hostPathPrefix }}/logs/seaweedfs/volume - type: DirectoryOrCreate - {{- end }} - {{- if eq $volume.logs.type "existingClaim" }} - - name: logs - persistentVolumeClaim: - claimName: {{ $volume.logs.claimName }} - {{- end }} - {{- if eq $volume.logs.type "emptyDir" }} - - name: logs - emptyDir: {} - {{- end }} - {{- end }} - {{- if $.Values.global.enableSecurity }} - - name: security-config - configMap: - name: {{ template "seaweedfs.name" $ }}-security-config - - name: ca-cert - secret: - secretName: {{ template "seaweedfs.name" $ }}-ca-cert - - name: master-cert - secret: - secretName: {{ template "seaweedfs.name" $ }}-master-cert - - name: volume-cert - secret: - secretName: {{ template "seaweedfs.name" $ }}-volume-cert - - name: filer-cert - secret: - secretName: {{ template "seaweedfs.name" $ }}-filer-cert - - name: client-cert - secret: - secretName: {{ template "seaweedfs.name" $ }}-client-cert - {{- end }} - {{- if $volume.extraVolumes }} - {{ tpl $volume.extraVolumes $ | indent 8 | trim }} - {{- end }} - {{- if $volume.nodeSelector }} - nodeSelector: - {{ tpl (printf "{{ $volumeName := \"%s\" }}%s" $volumeName $volume.nodeSelector) $ | indent 8 | trim }} - {{- end }} - volumeClaimTemplates: - {{- range $dir := $volume.dataDirs }} - {{- if eq $dir.type "persistentVolumeClaim" }} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: {{ $dir.name }} - {{- with $dir.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ $dir.storageClass }} - resources: - requests: - storage: {{ $dir.size }} - {{- end }} - {{- end }} - - {{- if and $volume.idx (eq $volume.idx.type "persistentVolumeClaim") }} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: idx - {{- with $volume.idx.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ $volume.idx.storageClass }} - resources: - requests: - storage: {{ $volume.idx.size }} - {{- end }} - {{- if and $volume.logs (eq $volume.logs.type "persistentVolumeClaim") }} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: logs - {{- with $volume.logs.annotations }} - annotations: - {{- toYaml . | nindent 10 }} - {{- end }} - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: {{ $volume.logs.storageClass }} - resources: - requests: - storage: {{ $volume.logs.size }} - {{- end }} -{{- end }} -{{- end }} diff --git a/k8s/charts/seaweedfs/values.yaml b/k8s/charts/seaweedfs/values.yaml deleted file mode 100644 index f75794dae..000000000 --- a/k8s/charts/seaweedfs/values.yaml +++ /dev/null @@ -1,1280 +0,0 @@ -# Available parameters and their default values for the SeaweedFS chart. - -global: - createClusterRole: true - registry: "" - # if repository is set, it overrides the namespace part of imageName - repository: "" - imageName: chrislusf/seaweedfs - imagePullPolicy: IfNotPresent - imagePullSecrets: "" - restartPolicy: Always - loggingLevel: 1 - enableSecurity: false - masterServer: null - securityConfig: - jwtSigning: - volumeWrite: true - volumeRead: false - filerWrite: false - filerRead: false - # we will use this serviceAccountName for all ClusterRoles/ClusterRoleBindings - serviceAccountName: "seaweedfs" - automountServiceAccountToken: true - certificates: - alphacrds: false - monitoring: - enabled: false - gatewayHost: null - gatewayPort: null - additionalLabels: {} - # if enabled will use global.replicationPlacement and override master & filer defaultReplicaPlacement config - enableReplication: false - # replication type is XYZ: - # X number of replica in other data centers - # Y number of replica in other racks in the same data center - # Z number of replica in other servers in the same rack - replicationPlacement: "001" - extraEnvironmentVars: - WEED_CLUSTER_DEFAULT: "sw" - WEED_CLUSTER_SW_MASTER: "seaweedfs-master.seaweedfs:9333" - WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client.seaweedfs:8888" - # WEED_JWT_SIGNING_KEY: - # secretKeyRef: - # name: seaweedfs-signing-key - # key: signingKey - -image: - registry: "" - repository: "" - tag: "" - -master: - enabled: true - imageOverride: null - restartPolicy: null - replicas: 1 - port: 9333 - grpcPort: 19333 - metricsPort: 9327 - metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind - ipBind: "0.0.0.0" - volumePreallocate: false - volumeSizeLimitMB: 1000 - loggingOverrideLevel: null - # threshold to vacuum and reclaim spaces, default 0.3 (30%) - garbageThreshold: null - # Prometheus push interval in seconds, default 15 - metricsIntervalSec: 15 - # replication type is XYZ: - # X number of replica in other data centers - # Y number of replica in other racks in the same data center - # Z number of replica in other servers in the same rack - defaultReplication: "000" - - # Disable http request, only gRpc operations are allowed - disableHttp: false - - # Resume previous state on start master server - resumeState: false - # Use Hashicorp Raft - raftHashicorp: false - # Whether to bootstrap the Raft cluster. Only use it when use Hashicorp Raft - raftBootstrap: false - - # election timeout of master servers - electionTimeout: "10s" - # heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25) - heartbeatInterval: "300ms" - - # Custom command line arguments to add to the master command - # Example to fix IPv6 metrics connectivity issues: - # extraArgs: ["-metricsIp", "0.0.0.0"] - # Example with multiple args: - # extraArgs: ["-customFlag", "value", "-anotherFlag"] - extraArgs: [] - - config: |- - # Enter any extra configuration for master.toml here. - # It may be a multi-line string. - - # You may use ANY storage-class, example with local-path-provisioner - # Annotations are optional. - # data: - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: - # "key": "value" - # - # You may also spacify an existing claim: - # data: - # type: "existingClaim" - # claimName: "my-pvc" - # - # You can also use emptyDir storage: - # data: - # type: "emptyDir" - data: - type: "hostPath" - storageClass: "" - hostPathPrefix: /ssd - - # You may use ANY storage-class, example with local-path-provisioner - # Annotations are optional. - # logs: - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: - # "key": "value" - - # You can also use emptyDir storage: - # logs: - # type: "emptyDir" - logs: - type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage - - ## @param master.sidecars Add additional sidecar containers to the master pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - initContainers: "" - - extraVolumes: "" - extraVolumeMounts: "" - - # Labels to be added to the master pods - podLabels: {} - - # Annotations to be added to the master pods - podAnnotations: {} - - # Annotations to be added to the master resources - annotations: {} - - ## Set podManagementPolicy - podManagementPolicy: Parallel - - # Resource requests, limits, etc. for the master cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - - # updatePartition is used to control a careful rolling update of SeaweedFS - # masters. - updatePartition: 0 - - # Affinity Settings - # Commenting out or setting as empty the affinity variable, will allow - # deployment to single node services such as Minikube - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master - topologyKey: kubernetes.io/hostname - - # Topology Spread Constraints Settings - # This should map directly to the value of the topologySpreadConstraints - # for a PodSpec. By Default no constraints are set. - topologySpreadConstraints: "" - - # Toleration Settings for master pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: "" - - # nodeSelector labels for master pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - nodeSelector: "" - # nodeSelector: | - # sw-backend: "true" - - # used to assign priority to master pods - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: "" - - # used to assign a service account. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - serviceAccountName: "" - - # Configure security context for Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # podSecurityContext: - # enabled: true - # runAsUser: 1000 - # runAsGroup: 3000 - # fsGroup: 2000 - podSecurityContext: {} - - # Configure security context for Container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # containerSecurityContext: - # enabled: true - # runAsUser: 2000 - # allowPrivilegeEscalation: false - containerSecurityContext: {} - - ingress: - enabled: false - className: "nginx" - # host: false for "*" hostname - host: "master.seaweedfs.local" - path: "/sw-master/?(.*)" - pathType: ImplementationSpecific - annotations: - nginx.ingress.kubernetes.io/auth-type: "basic" - nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" - nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' - nginx.ingress.kubernetes.io/service-upstream: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/enable-rewrite-log: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" - nginx.ingress.kubernetes.io/configuration-snippet: | - sub_filter '' ' '; #add base url - sub_filter '="/' '="./'; #make absolute paths to relative - sub_filter '=/' '=./'; - sub_filter '/seaweedfsstatic' './seaweedfsstatic'; - sub_filter_once off; - tls: [] - - extraEnvironmentVars: - WEED_MASTER_VOLUME_GROWTH_COPY_1: '7' - WEED_MASTER_VOLUME_GROWTH_COPY_2: '6' - WEED_MASTER_VOLUME_GROWTH_COPY_3: '3' - WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: '1' - - # used to configure livenessProbe on master-server containers - # - livenessProbe: - enabled: true - httpGet: - path: /cluster/status - scheme: HTTP - initialDelaySeconds: 20 - periodSeconds: 30 - successThreshold: 1 - failureThreshold: 4 - timeoutSeconds: 10 - - # used to configure readinessProbe on master-server containers - # - readinessProbe: - enabled: true - httpGet: - path: /cluster/status - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 45 - successThreshold: 2 - failureThreshold: 100 - timeoutSeconds: 10 - -volume: - enabled: true - imageOverride: null - restartPolicy: null - port: 8080 - grpcPort: 18080 - metricsPort: 9327 - metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind - ipBind: "0.0.0.0" - replicas: 1 - loggingOverrideLevel: null - # number of seconds between heartbeats, must be smaller than or equal to the master's setting - pulseSeconds: null - # Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance., default memory - index: null - # limit file size to avoid out of memory, default 256mb - fileSizeLimitMB: null - # minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly - minFreeSpacePercent: 7 - - # Custom command line arguments to add to the volume command - # Example to fix IPv6 metrics connectivity issues: - # extraArgs: ["-metricsIp", "0.0.0.0"] - # Example with multiple args: - # extraArgs: ["-customFlag", "value", "-anotherFlag"] - extraArgs: [] - - # For each data disk you may use ANY storage-class, example with local-path-provisioner - # Annotations are optional. - # dataDirs: - # - name: data - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: - # "key": "value" - # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - # - # You may also spacify an existing claim: - # - name: data - # type: "existingClaim" - # claimName: "my-pvc" - # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - # - # You can also use emptyDir storage: - # - name: data - # type: "emptyDir" - # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - # - # If these don't meet your needs, you can use "custom" here along with extraVolumes and extraVolumeMounts - # Particularly useful when using more than 1 for the volume server replicas. - # - name: data - # type: "custom" - # maxVolumes: 0 # If set to zero on non-windows OS, the limit will be auto configured. (default "7") - - dataDirs: - - name: data1 - type: "hostPath" - hostPathPrefix: /ssd - maxVolumes: 0 - - # - name: data2 - # type: "persistentVolumeClaim" - # storageClass: "yourClassNameOfChoice" - # size: "800Gi" - # maxVolumes: 0 - - # This will automatically create a job for patching Kubernetes resources if the dataDirs type is 'persistentVolumeClaim' and the size has changed. - resizeHook: - enabled: true - image: alpine/k8s:1.28.4 - - # idx can be defined by: - # - # idx: - # type: "hostPath" - # hostPathPrefix: /ssd - # - # or - # - # idx: - # type: "persistentVolumeClaim" - # size: "20Gi" - # storageClass: "local-path-provisioner" - # - # or - # - # idx: - # type: "existingClaim" - # claimName: "myClaim" - # - # or - # - # idx: - # type: "emptyDir" - - # same applies to "logs" - - idx: {} - - logs: {} - - # limit background compaction or copying speed in mega bytes per second - compactionMBps: "50" - - - # Volume server's rack name - rack: null - - # Volume server's data center name - dataCenter: null - - # Redirect moved or non-local volumes. (default proxy) - readMode: proxy - - # Comma separated Ip addresses having write permission. No limit if empty. - whiteList: null - - # Adjust jpg orientation when uploading. - imagesFixOrientation: false - - ## @param volume.sidecars Add additional sidecar containers to the volume pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - initContainers: "" - - # Example for use when using more than 1 volume server replica - # extraVolumeMounts: | - # - name: drive - # mountPath: /drive - # subPathExpr: $(POD_NAME) - # extraVolumes: | - # - name: drive - # hostPath: - # path: /var/mnt/ - extraVolumes: "" - extraVolumeMounts: "" - - # Labels to be added to the volume pods - podLabels: {} - - # Annotations to be added to the volume pods - podAnnotations: {} - - # Annotations to be added to the volume resources - annotations: {} - - ## Set podManagementPolicy - podManagementPolicy: Parallel - - # Affinity Settings - # Commenting out or setting as empty the affinity variable, will allow - # deployment to single node services such as Minikube - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: {{ $volumeName }} - topologyKey: kubernetes.io/hostname - - # Topology Spread Constraints Settings - # This should map directly to the value of the topologySpreadConstraints - # for a PodSpec. By Default no constraints are set. - topologySpreadConstraints: "" - - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - - # Toleration Settings for server pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: "" - - # nodeSelector labels for server pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - nodeSelector: "" - # nodeSelector: | - # sw-volume: "true" - - # used to assign priority to server pods - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: "" - - # used to assign a service account. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - serviceAccountName: "" - - extraEnvironmentVars: - - # Configure security context for Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # podSecurityContext: - # enabled: true - # runAsUser: 1000 - # runAsGroup: 3000 - # fsGroup: 2000 - podSecurityContext: {} - - # Configure security context for Container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # containerSecurityContext: - # enabled: true - # runAsUser: 2000 - # allowPrivilegeEscalation: false - containerSecurityContext: {} - - # used to configure livenessProbe on volume-server containers - # - livenessProbe: - enabled: true - httpGet: - path: /healthz - scheme: HTTP - initialDelaySeconds: 20 - periodSeconds: 90 - successThreshold: 1 - failureThreshold: 4 - timeoutSeconds: 30 - - # used to configure readinessProbe on volume-server containers - # - readinessProbe: - enabled: true - httpGet: - path: /healthz - scheme: HTTP - initialDelaySeconds: 15 - periodSeconds: 15 - successThreshold: 1 - failureThreshold: 100 - timeoutSeconds: 30 - -# Map of named volume groups for topology-aware deployments. -# Each key inherits all fields from the `volume` section but can override -# them locallyโ€”for example, replicas, nodeSelector, dataCenter, etc. -# To switch entirely to this scheme, set `volume.enabled: false` -# and define one entry per zone/data-center under `volumes`. -# -# volumes: -# dc1: -# replicas: 2 -# dataCenter: "dc1" -# nodeSelector: | -# topology.kubernetes.io/zone: dc1 -# dc2: -# replicas: 2 -# dataCenter: "dc2" -# nodeSelector: | -# topology.kubernetes.io/zone: dc2 -# dc3: -# replicas: 2 -# dataCenter: "dc3" -# nodeSelector: | -# topology.kubernetes.io/zone: dc3 -# -volumes: {} - -filer: - enabled: true - imageOverride: null - restartPolicy: null - replicas: 1 - port: 8888 - grpcPort: 18888 - metricsPort: 9327 - metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind - ipBind: "0.0.0.0" # IP address to bind to. Set to 0.0.0.0 to allow external traffic - loggingOverrideLevel: null - filerGroup: "" - # prefer to read and write to volumes in this data center (not set by default) - dataCenter: null - # prefer to write to volumes in this rack (not set by default) - rack: null - # replication type is XYZ: - # X number of replica in other data centers - # Y number of replica in other racks in the same data center - # Z number of replica in other servers in the same rack - defaultReplicaPlacement: "000" - # turn off directory listing - disableDirListing: false - # split files larger than the limit, default 32 - maxMB: null - # encrypt data on volume servers - encryptVolumeData: false - - # Whether proxy or redirect to volume server during file GET request - redirectOnRead: false - - # Limit sub dir listing size (default 100000) - dirListLimit: 100000 - - # Disable http request, only gRpc operations are allowed - disableHttp: false - - # Custom command line arguments to add to the filer command - # Example to fix IPv6 metrics connectivity issues: - # extraArgs: ["-metricsIp", "0.0.0.0"] - # Example with multiple args: - # extraArgs: ["-customFlag", "value", "-anotherFlag"] - extraArgs: [] - - # Add a custom notification.toml to configure filer notifications - # Example: - # notificationConfig: |- - # [notification.kafka] - # enabled = false - # hosts = [ - # "localhost:9092" - # ] - # topic = "seaweedfs_filer" - # offsetFile = "./last.offset" - # offsetSaveIntervalSeconds = 10 - notificationConfig: "" - - # DEPRECATE: enablePVC, storage, storageClass - # Consider replacing with filer.data section below instead. - - # Settings for configuring stateful storage of filer pods. - # enablePVC will create a pvc for filer for data persistence. - enablePVC: false - # storage should be set to the disk size of the attached volume. - storage: 25Gi - # storageClass is the class of storage which defaults to null (the Kube cluster will pick the default). - storageClass: null - # You may use ANY storage-class, example with local-path-provisioner - # Annotations are optional. - # data: - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: - # "key": "value" - # - # You may also specify an existing claim: - # data: - # type: "existingClaim" - # claimName: "my-pvc" - # - # You can also use emptyDir storage: - # data: - # type: "emptyDir" - data: - type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage - - # You may use ANY storage-class, example with local-path-provisioner - # Annotations are optional. - # logs: - # type: "persistentVolumeClaim" - # size: "24Ti" - # storageClass: "local-path-provisioner" - # annotations: - # "key": "value" - - # You can also use emptyDir storage: - # logs: - # type: "emptyDir" - logs: - type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage - - ## @param filer.sidecars Add additional sidecar containers to the filer pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - initContainers: "" - - extraVolumes: "" - extraVolumeMounts: "" - - # Labels to be added to the filer pods - podLabels: {} - - # Annotations to be added to the filer pods - podAnnotations: {} - - # Annotations to be added to the filer resource - annotations: {} - - ## Set podManagementPolicy - podManagementPolicy: Parallel - - # Affinity Settings - # Commenting out or setting as empty the affinity variable, will allow - # deployment to single node services such as Minikube - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer - topologyKey: kubernetes.io/hostname - - # Topology Spread Constraints Settings - # This should map directly to the value of the topologySpreadConstraints - # for a PodSpec. By Default no constraints are set. - topologySpreadConstraints: "" - - # updatePartition is used to control a careful rolling update of SeaweedFS - # masters. - updatePartition: 0 - - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - - # Toleration Settings for server pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: "" - - # nodeSelector labels for server pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - nodeSelector: "" - # nodeSelector: | - # sw-backend: "true" - - # used to assign priority to server pods - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: "" - - # used to assign a service account. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - serviceAccountName: "" - - # Configure security context for Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # podSecurityContext: - # enabled: true - # runAsUser: 1000 - # runAsGroup: 3000 - # fsGroup: 2000 - podSecurityContext: {} - - # Configure security context for Container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # containerSecurityContext: - # enabled: true - # runAsUser: 2000 - # allowPrivilegeEscalation: false - containerSecurityContext: {} - - ingress: - enabled: false - className: "nginx" - # host: false for "*" hostname - host: "seaweedfs.cluster.local" - path: "/sw-filer/?(.*)" - pathType: ImplementationSpecific - annotations: - nginx.ingress.kubernetes.io/backend-protocol: GRPC - nginx.ingress.kubernetes.io/auth-type: "basic" - nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" - nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' - nginx.ingress.kubernetes.io/service-upstream: "true" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/enable-rewrite-log: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" - nginx.ingress.kubernetes.io/configuration-snippet: | - sub_filter '' ' '; #add base url - sub_filter '="/' '="./'; #make absolute paths to relative - sub_filter '=/' '=./'; - sub_filter '/seaweedfsstatic' './seaweedfsstatic'; - sub_filter_once off; - - # extraEnvVars is a list of extra environment variables to set with the stateful set. - extraEnvironmentVars: - WEED_MYSQL_ENABLED: "false" - WEED_MYSQL_HOSTNAME: "mysql-db-host" - WEED_MYSQL_PORT: "3306" - WEED_MYSQL_DATABASE: "sw_database" - WEED_MYSQL_CONNECTION_MAX_IDLE: "5" - WEED_MYSQL_CONNECTION_MAX_OPEN: "75" - # "refresh" connection every 10 minutes, eliminating mysql closing "old" connections - WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600" - # enable usage of memsql as filer backend - WEED_MYSQL_INTERPOLATEPARAMS: "true" - # if you want to use leveldb2, then should enable "enablePVC". or you may lose your data. - WEED_LEVELDB2_ENABLED: "true" - # with http DELETE, by default the filer would check whether a folder is empty. - # recursive_delete will delete all sub folders and files, similar to "rm -Rf" - WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" - # directories under this folder will be automatically creating a separate bucket - WEED_FILER_BUCKETS_FOLDER: "/buckets" - - # used to configure livenessProbe on filer containers - # - livenessProbe: - enabled: true - httpGet: - path: / - scheme: HTTP - initialDelaySeconds: 20 - periodSeconds: 30 - successThreshold: 1 - failureThreshold: 5 - timeoutSeconds: 10 - - # used to configure readinessProbe on filer containers - # - readinessProbe: - enabled: true - httpGet: - path: / - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 15 - successThreshold: 1 - failureThreshold: 100 - timeoutSeconds: 10 - - # secret env variables - secretExtraEnvironmentVars: {} - # WEED_POSTGRES_USERNAME: - # secretKeyRef: - # name: postgres-credentials - # key: username - # WEED_POSTGRES_PASSWORD: - # secretKeyRef: - # name: postgres-credentials - # key: password - - s3: - enabled: false - port: 8333 - # add additional https port - httpsPort: 0 - # allow empty folders - allowEmptyFolder: false - # Suffix of the host name, {bucket}.{domainName} - domainName: "" - # enable user & permission to s3 (need to inject to all services) - enableAuth: false - # set to the name of an existing kubernetes Secret with the s3 json config file - # should have a secret key called seaweedfs_s3_config with an inline json configure - existingConfigSecret: null - auditLogConfig: {} - # You may specify buckets to be created during the install process. - # Buckets may be exposed publicly by setting `anonymousRead` to `true` - # createBuckets: - # - name: bucket-a - # anonymousRead: true - # - name: bucket-b - # anonymousRead: false - -s3: - enabled: true - imageOverride: null - restartPolicy: null - replicas: 1 - bindAddress: 0.0.0.0 - port: 8333 - # add additional https port - httpsPort: 0 - metricsPort: 9327 - loggingOverrideLevel: null - # allow empty folders - allowEmptyFolder: true - # enable user & permission to s3 (need to inject to all services) - enableAuth: false - # set to the name of an existing kubernetes Secret with the s3 json config file - # should have a secret key called seaweedfs_s3_config with an inline json config - existingConfigSecret: null - auditLogConfig: {} - - # Suffix of the host name, {bucket}.{domainName} - domainName: "" - - ## @param s3.sidecars Add additional sidecar containers to the s3 pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - initContainers: "" - - extraVolumes: "" - extraVolumeMounts: "" - - # Labels to be added to the s3 pods - podLabels: {} - - # Annotations to be added to the s3 pods - podAnnotations: {} - - # Annotations to be added to the s3 resources - annotations: {} - - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - - # Toleration Settings for server pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: "" - - # nodeSelector labels for server pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - nodeSelector: "" - # nodeSelector: | - # sw-backend: "true" - - # used to assign priority to server pods - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: "" - - # used to assign a service account. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - serviceAccountName: "" - - # Configure security context for Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # podSecurityContext: - # enabled: true - # runAsUser: 1000 - # runAsGroup: 3000 - # fsGroup: 2000 - podSecurityContext: {} - - # Configure security context for Container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # containerSecurityContext: - # enabled: true - # runAsUser: 2000 - # allowPrivilegeEscalation: false - containerSecurityContext: {} - - # You can also use emptyDir storage: - # logs: - # type: "emptyDir" - logs: - type: "hostPath" - size: "" - storageClass: "" - hostPathPrefix: /storage - - extraEnvironmentVars: - - # Custom command line arguments to add to the s3 command - # Example to fix connection idle seconds: - extraArgs: ["-idleTimeout=30"] - #extraArgs: [] - - # used to configure livenessProbe on s3 containers - # - livenessProbe: - enabled: true - httpGet: - path: /status - scheme: HTTP - initialDelaySeconds: 20 - periodSeconds: 60 - successThreshold: 1 - failureThreshold: 20 - timeoutSeconds: 10 - - # used to configure readinessProbe on s3 containers - # - readinessProbe: - enabled: true - httpGet: - path: /status - scheme: HTTP - initialDelaySeconds: 15 - periodSeconds: 15 - successThreshold: 1 - failureThreshold: 100 - timeoutSeconds: 10 - - ingress: - enabled: false - className: "nginx" - # host: false for "*" hostname - host: "seaweedfs.cluster.local" - path: "/" - pathType: Prefix - # additional ingress annotations for the s3 endpoint - annotations: {} - tls: [] - -sftp: - enabled: false - imageOverride: null - restartPolicy: null - replicas: 1 - bindAddress: 0.0.0.0 - port: 2022 # Default SFTP port - metricsPort: 9327 - metricsIp: "" # If empty, defaults to bindAddress - loggingOverrideLevel: null - - # SSH server configuration - sshPrivateKey: "/etc/sw/seaweedfs_sftp_ssh_private_key" # Path to the SSH private key file for host authentication - hostKeysFolder: "/etc/sw/ssh" # path to folder containing SSH private key files for host authentication - authMethods: "password,publickey" # Comma-separated list of allowed auth methods: password, publickey, keyboard-interactive - maxAuthTries: 6 # Maximum number of authentication attempts per connection - bannerMessage: "SeaweedFS SFTP Server" # Message displayed before authentication - loginGraceTime: "2m" # Timeout for authentication - clientAliveInterval: "5s" # Interval for sending keep-alive messages - clientAliveCountMax: 3 # Maximum number of missed keep-alive messages before disconnecting - dataCenter: "" # Prefer to read and write to volumes in this data center - localSocket: "" # Default to /tmp/seaweedfs-sftp-.sock - - # User authentication - enableAuth: false - # Set to the name of an existing kubernetes Secret with the sftp json config file - # Should have a secret key called seaweedfs_sftp_config with an inline json config - existingConfigSecret: null - # Set to the name of an existing kubernetes Secret with the list of ssh private keys for sftp - existingSshConfigSecret: null - - # Additional resources - sidecars: [] - initContainers: "" - extraVolumes: "" - extraVolumeMounts: "" - podLabels: {} - podAnnotations: {} - annotations: {} - resources: {} - tolerations: "" - nodeSelector: "" - priorityClassName: "" - serviceAccountName: "" - podSecurityContext: {} - containerSecurityContext: {} - - logs: - type: "hostPath" - hostPathPrefix: /storage - - extraEnvironmentVars: {} - - # Health checks - # Health checks for SFTP - using tcpSocket instead of httpGet - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 60 - successThreshold: 1 - failureThreshold: 20 - timeoutSeconds: 10 - - # Health checks for SFTP - using tcpSocket instead of httpGet - readinessProbe: - enabled: true - initialDelaySeconds: 15 - periodSeconds: 15 - successThreshold: 1 - failureThreshold: 100 - timeoutSeconds: 10 - -# All-in-one deployment configuration -allInOne: - enabled: false - imageOverride: null - restartPolicy: Always - - # Core configuration - idleTimeout: 30 # Connection idle seconds - dataCenter: "" # Current volume server's data center name - rack: "" # Current volume server's rack name - whiteList: "" # Comma separated IP addresses having write permission - disableHttp: false # Disable HTTP requests, only gRPC operations are allowed - metricsPort: 9324 # Prometheus metrics listen port - metricsIp: "" # Metrics listen IP. If empty, defaults to bindAddress - loggingOverrideLevel: null # Override logging level - - # Service configuration - s3: - enabled: false # Whether to enable S3 gateway - sftp: - enabled: false # Whether to enable SFTP server - - # Service settings - service: - annotations: {} # Annotations for the service - type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer) - - # Storage configuration - data: - type: "emptyDir" # Options: "hostPath", "persistentVolumeClaim", "emptyDir" - hostPathPrefix: /mnt/data # Path prefix for hostPath volumes - claimName: seaweedfs-data-pvc # Name of the PVC to use - size: "" # Size of the PVC - storageClass: "" # Storage class for the PVC - - # Health checks - readinessProbe: - enabled: true - httpGet: - path: /cluster/status - port: 9333 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 15 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 5 - - livenessProbe: - enabled: true - httpGet: - path: /cluster/status - port: 9333 - scheme: HTTP - initialDelaySeconds: 20 - periodSeconds: 30 - successThreshold: 1 - failureThreshold: 5 - timeoutSeconds: 5 - - # Additional resources - extraEnvironmentVars: {} # Additional environment variables - extraVolumeMounts: "" # Additional volume mounts - extraVolumes: "" # Additional volumes - initContainers: "" # Init containers - sidecars: "" # Sidecar containers - annotations: {} # Annotations for the deployment - podAnnotations: {} # Annotations for the pods - podLabels: {} # Labels for the pods - - # Scheduling configuration - # Affinity Settings - # Commenting out or setting as empty the affinity variable, will allow - # deployment to single node services such as Minikube - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master - topologyKey: kubernetes.io/hostname - - # Topology Spread Constraints Settings - # This should map directly to the value of the topologySpreadConstraints - # for a PodSpec. By Default no constraints are set. - topologySpreadConstraints: "" - - # Toleration Settings for master pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: "" - - # nodeSelector labels for master pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - nodeSelector: "" - - # Used to assign priority to master pods - # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - priorityClassName: "" - - # Used to assign a service account. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - serviceAccountName: "" - - # Configure security context for Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # podSecurityContext: - # enabled: true - # runAsUser: 1000 - # runAsGroup: 3000 - # fsGroup: 2000 - podSecurityContext: {} - - # Configure security context for Container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - # Example: - # containerSecurityContext: - # enabled: true - # runAsUser: 2000 - # allowPrivilegeEscalation: false - containerSecurityContext: {} - - # Resource management - resources: - limits: - cpu: "2" - memory: "2Gi" - requests: - cpu: "500m" - memory: "1Gi" - -# Deploy Kubernetes COSI Driver for SeaweedFS -# Requires COSI CRDs and controller to be installed in the cluster -# For more information, visit: https://container-object-storage-interface.github.io/docs/deployment-guide -cosi: - enabled: false - image: "ghcr.io/seaweedfs/seaweedfs-cosi-driver:v0.1.2" - driverName: "seaweedfs.objectstorage.k8s.io" - bucketClassName: "seaweedfs" - endpoint: "" - region: "" - - sidecar: - image: gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20250711-controllerv0.2.0-rc1-80-gc2f6e65 - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - - # enable user & permission to s3 (need to inject to all services) - enableAuth: false - # set to the name of an existing kubernetes Secret with the s3 json config file - # should have a secret key called seaweedfs_s3_config with an inline json configure - existingConfigSecret: null - - podSecurityContext: {} - containerSecurityContext: {} - - extraVolumes: "" - extraVolumeMounts: "" - - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec, - # formatted as a multi-line string. By default no direct resource request - # is made. - resources: {} - -certificates: - commonName: "SeaweedFS CA" - ipAddresses: [] - keyAlgorithm: RSA - keySize: 2048 - duration: 2160h # 90d - renewBefore: 360h # 15d - externalCertificates: - # This will avoid the need to use cert-manager and will rely on providing your own external certificates and CA - # you will need to store your provided certificates in the secret read by the different services: - # seaweedfs-master-cert, seaweedfs-filer-cert, etc. Can see any statefulset definition to see secret names - enabled: false - -# Labels to be added to all the created pods -podLabels: {} -# Annotations to be added to all the created pods -podAnnotations: {} diff --git a/k8s/charts/seaweedfs/.helmignore b/k8s/helm_charts2/.helmignore similarity index 100% rename from k8s/charts/seaweedfs/.helmignore rename to k8s/helm_charts2/.helmignore diff --git a/k8s/helm_charts2/Chart.yaml b/k8s/helm_charts2/Chart.yaml new file mode 100644 index 000000000..a92ba313e --- /dev/null +++ b/k8s/helm_charts2/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +description: SeaweedFS +name: seaweedfs +appVersion: "3.16" +version: "3.16" diff --git a/k8s/helm_charts2/README.md b/k8s/helm_charts2/README.md new file mode 100644 index 000000000..c5615522c --- /dev/null +++ b/k8s/helm_charts2/README.md @@ -0,0 +1,48 @@ +## SEAWEEDFS - helm chart (2.x) + +### info: +* master/filer/volume are stateful sets with anti-affinity on the hostname, +so your deployment will be spread/HA. +* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) +and backup/HA memsql can provide. +* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer +with ENV. +* cert config exists and can be enabled, but not been tested. + +### prerequisites +kubernetes node have labels which help to define which node(Host) will run which pod. + +s3/filer/master needs the label **sw-backend=true** + +volume need the label **sw-volume=true** + +to label a node to be able to run all pod types in k8s: +``` +kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true +``` + +on production k8s deployment you will want each pod to have a different host, +especially the volume server & the masters, currently all pods (master/volume/filer) +have anti-affinity rule to disallow running multiple pod type on the same host. +if you still want to run multiple pods of the same type (master/volume/filer) on the same host +please set/update the corresponding affinity rule in values.yaml to an empty one: + +```affinity: ""``` + +### PVC - storage class ### + +on the volume stateful set added support for K8S PVC, currently example +with the simple local-path-provisioner from Rancher (comes included with k3d / k3s) +https://github.com/rancher/local-path-provisioner + +you can use ANY storage class you like, just update the correct storage-class +for your deployment. + +### current instances config (AIO): +1 instance for each type (master/filer+s3/volume) + +you can update the replicas count for each node type in values.yaml, +need to add more nodes with the corresponding labels. + +most of the configuration are available through values.yaml + diff --git a/k8s/helm_charts2/dashboards/seaweedfs-grafana-dashboard.json b/k8s/helm_charts2/dashboards/seaweedfs-grafana-dashboard.json new file mode 100644 index 000000000..d492a0695 --- /dev/null +++ b/k8s/helm_charts2/dashboards/seaweedfs-grafana-dashboard.json @@ -0,0 +1,1856 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS-DEV", + "label": "prometheus-dev", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.6.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_PROMETHEUS-DEV}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 56, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 57, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 55, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_s3_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 API QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "hideTimeOverride": false, + "id": 59, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "A", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "All PUT, COPY, POST, LIST", + "refId": "C", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "GET and all other", + "refId": "B" + }, + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": "1M", + "timeShift": null, + "title": "S3 API Monthly Cost if on AWS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": "Cost in US$", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "currencyUSD", + "label": "Write Cost", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "S3 Gateway", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{quantile}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 2 +} \ No newline at end of file diff --git a/k8s/helm_charts2/templates/_helpers.tpl b/k8s/helm_charts2/templates/_helpers.tpl new file mode 100644 index 000000000..688efaa23 --- /dev/null +++ b/k8s/helm_charts2/templates/_helpers.tpl @@ -0,0 +1,136 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "seaweedfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "seaweedfs.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "seaweedfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "seaweedfs.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper filer image */}} +{{- define "filer.image" -}} +{{- if .Values.filer.imageOverride -}} +{{- $imageOverride := .Values.filer.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper dbSchema image */}} +{{- define "filer.dbSchema.image" -}} +{{- if .Values.filer.dbSchema.imageOverride -}} +{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.global.repository | toString -}} +{{- $name := .Values.filer.dbSchema.imageName | toString -}} +{{- $tag := .Values.filer.dbSchema.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper master image */}} +{{- define "master.image" -}} +{{- if .Values.master.imageOverride -}} +{{- $imageOverride := .Values.master.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper s3 image */}} +{{- define "s3.image" -}} +{{- if .Values.s3.imageOverride -}} +{{- $imageOverride := .Values.s3.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper volume image */}} +{{- define "volume.image" -}} +{{- if .Values.volume.imageOverride -}} +{{- $imageOverride := .Values.volume.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* check if any PVC exists */}} +{{- define "volume.pvc_exists" -}} +{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} + +{{/* check if any HostPath exists */}} +{{- define "volume.hostpath_exists" -}} +{{- if or (or (eq .Values.volume.data.type "hostPath") (and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "hostPath") -}} +{{- printf "true" -}} +{{- else -}} +{{- if or .Values.global.enableSecurity .Values.volume.extraVolumes -}} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/k8s/helm_charts2/templates/ca-cert.yaml b/k8s/helm_charts2/templates/ca-cert.yaml new file mode 100644 index 000000000..056f01502 --- /dev/null +++ b/k8s/helm_charts2/templates/ca-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-ca-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + commonName: "{{ template "seaweedfs.name" . }}-root-ca" + isCA: true + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer +{{- end }} diff --git a/k8s/helm_charts2/templates/cert-clusterissuer.yaml b/k8s/helm_charts2/templates/cert-clusterissuer.yaml new file mode 100644 index 000000000..d0bd42593 --- /dev/null +++ b/k8s/helm_charts2/templates/cert-clusterissuer.yaml @@ -0,0 +1,8 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: ClusterIssuer +metadata: + name: {{ template "seaweedfs.name" . }}-clusterissuer +spec: + selfSigned: {} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cert/client-cert.yaml b/k8s/helm_charts2/templates/client-cert.yaml similarity index 59% rename from k8s/charts/seaweedfs/templates/cert/client-cert.yaml rename to k8s/helm_charts2/templates/client-cert.yaml index bda132a02..4d27b5659 100644 --- a/k8s/charts/seaweedfs/templates/cert/client-cert.yaml +++ b/k8s/helm_charts2/templates/client-cert.yaml @@ -1,22 +1,16 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 kind: Certificate metadata: name: {{ template "seaweedfs.name" . }}-client-cert namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} spec: secretName: {{ template "seaweedfs.name" . }}-client-cert issuerRef: - name: {{ template "seaweedfs.name" . }}-ca-issuer - kind: Issuer + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer commonName: {{ .Values.certificates.commonName }} - subject: - organizations: + organization: - "SeaweedFS CA" dnsNames: - '*.{{ .Release.Namespace }}' @@ -32,9 +26,8 @@ spec: - {{ . }} {{- end }} {{- end }} - privateKey: - algorithm: {{ .Values.certificates.keyAlgorithm }} - size: {{ .Values.certificates.keySize }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} duration: {{ .Values.certificates.duration }} renewBefore: {{ .Values.certificates.renewBefore }} {{- end }} diff --git a/k8s/charts/seaweedfs/templates/cert/filer-cert.yaml b/k8s/helm_charts2/templates/filer-cert.yaml similarity index 54% rename from k8s/charts/seaweedfs/templates/cert/filer-cert.yaml rename to k8s/helm_charts2/templates/filer-cert.yaml index 4cb117ae8..855183c54 100644 --- a/k8s/charts/seaweedfs/templates/cert/filer-cert.yaml +++ b/k8s/helm_charts2/templates/filer-cert.yaml @@ -1,27 +1,16 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 kind: Certificate metadata: name: {{ template "seaweedfs.name" . }}-filer-cert namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer - {{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} - {{- end }} spec: secretName: {{ template "seaweedfs.name" . }}-filer-cert issuerRef: - name: {{ template "seaweedfs.name" . }}-ca-issuer - kind: Issuer + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer commonName: {{ .Values.certificates.commonName }} - subject: - organizations: + organization: - "SeaweedFS CA" dnsNames: - '*.{{ .Release.Namespace }}' @@ -37,9 +26,8 @@ spec: - {{ . }} {{- end }} {{- end }} - privateKey: - algorithm: {{ .Values.certificates.keyAlgorithm }} - size: {{ .Values.certificates.keySize }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} duration: {{ .Values.certificates.duration }} renewBefore: {{ .Values.certificates.renewBefore }} {{- end }} diff --git a/k8s/charts/seaweedfs/templates/filer/filer-service-client.yaml b/k8s/helm_charts2/templates/filer-service-client.yaml similarity index 55% rename from k8s/charts/seaweedfs/templates/filer/filer-service-client.yaml rename to k8s/helm_charts2/templates/filer-service-client.yaml index 1c32de0ba..929b6f8bc 100644 --- a/k8s/charts/seaweedfs/templates/filer/filer-service-client.yaml +++ b/k8s/helm_charts2/templates/filer-service-client.yaml @@ -1,22 +1,14 @@ -{{- if .Values.filer.enabled }} apiVersion: v1 kind: Service metadata: name: {{ template "seaweedfs.name" . }}-filer-client namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: filer + app: {{ template "seaweedfs.name" . }} + component: filer {{- if .Values.filer.metricsPort }} monitoring: "true" {{- end }} -{{- if .Values.filer.annotations }} - annotations: - {{- toYaml .Values.filer.annotations | nindent 4 }} -{{- end }} spec: clusterIP: None ports: @@ -35,6 +27,5 @@ spec: protocol: TCP {{- end }} selector: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - app.kubernetes.io/component: filer -{{- end }} + app: {{ template "seaweedfs.name" . }} + component: filer diff --git a/k8s/helm_charts2/templates/filer-service.yaml b/k8s/helm_charts2/templates/filer-service.yaml new file mode 100644 index 000000000..45035fc27 --- /dev/null +++ b/k8s/helm_charts2/templates/filer-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: filer diff --git a/k8s/helm_charts2/templates/filer-servicemonitor.yaml b/k8s/helm_charts2/templates/filer-servicemonitor.yaml new file mode 100644 index 000000000..ed45442dc --- /dev/null +++ b/k8s/helm_charts2/templates/filer-servicemonitor.yaml @@ -0,0 +1,20 @@ +{{- if .Values.filer.metricsPort }} +{{- if .Values.global.monitoring.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + endpoints: + - interval: 30s + port: swfs-filer-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: filer +{{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/helm_charts2/templates/filer-statefulset.yaml b/k8s/helm_charts2/templates/filer-statefulset.yaml new file mode 100644 index 000000000..21a4256be --- /dev/null +++ b/k8s/helm_charts2/templates/filer-statefulset.yaml @@ -0,0 +1,269 @@ +{{- if .Values.filer.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-filer + podManagementPolicy: Parallel + replicas: {{ .Values.filer.replicas }} + {{- if (gt (int .Values.filer.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.filer.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} + {{- if .Values.filer.affinity }} + affinity: + {{ tpl .Values.filer.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.filer.tolerations }} + tolerations: + {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + serviceAccountName: seaweedfs-rw-sa #hack for delete pod master after migration + terminationGracePeriodSeconds: 60 + {{- if .Values.filer.priorityClassName }} + priorityClassName: {{ .Values.filer.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "filer.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEED_MYSQL_USERNAME + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: user + - name: WEED_MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: password + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.filer.extraEnvironmentVars }} + {{- range $key, $value := .Values.filer.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.filer.loggingOverrideLevel }} + -v={{ .Values.filer.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + filer \ + -port={{ .Values.filer.port }} \ + {{- if .Values.filer.metricsPort }} + -metricsPort {{ .Values.filer.metricsPort }} \ + {{- end }} + {{- if .Values.filer.redirectOnRead }} + -redirectOnRead \ + {{- end }} + {{- if .Values.filer.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + -dirListLimit={{ .Values.filer.dirListLimit }} \ + {{- if .Values.global.enableReplication }} + -defaultReplicaPlacement={{ .Values.global.replicationPlacment }} \ + {{- else }} + -defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + {{- if .Values.filer.maxMB }} + -maxMB={{ .Values.filer.maxMB }} \ + {{- end }} + {{- if .Values.filer.encryptVolumeData }} + -encryptVolumeData \ + {{- end }} + -ip=${POD_IP} \ + {{- if .Values.filer.s3.enabled }} + -s3 \ + -s3.port={{ .Values.filer.s3.port }} \ + {{- if .Values.filer.s3.domainName }} + -s3.domainName={{ .Values.filer.s3.domainName }} \ + {{- end }} + {{- if .Values.global.enableSecurity }} + -s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -s3.key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.filer.s3.allowEmptyFolder }} + -s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \ + {{- end }} + {{- if .Values.filer.s3.enableAuth }} + -s3.config=/etc/sw/seaweedfs_s3_config \ + {{- end }} + {{- if .Values.filer.s3.auditLogConfig }} + -s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \ + {{- end }} + {{- end }} + -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: seaweedfs-filer-log-volume + mountPath: "/logs/" + - mountPath: /etc/sw + name: config-users + readOnly: true + {{- if .Values.filer.enablePVC }} + - name: data-filer + mountPath: /data + {{- end }} + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.filer.port }} + name: swfs-filer + - containerPort: {{ .Values.filer.grpcPort }} + #name: swfs-filer-grpc + readinessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 5 + timeoutSeconds: 10 + {{- if .Values.filer.resources }} + resources: + {{ tpl .Values.filer.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-filer-log-volume + hostPath: + path: /storage/logs/seaweedfs/filer + type: DirectoryOrCreate + - name: db-schema-config-volume + configMap: + name: seaweedfs-db-init-config + - name: config-users + secret: + defaultMode: 420 + secretName: seaweedfs-s3-secret + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} + {{- if .Values.filer.nodeSelector }} + nodeSelector: + {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- if .Values.filer.enablePVC }} + volumeClaimTemplates: + - metadata: + name: data-filer + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.filer.storage }} + {{- if .Values.filer.storageClass }} + storageClassName: {{ .Values.filer.storageClass }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/helm_charts2/templates/ingress.yaml b/k8s/helm_charts2/templates/ingress.yaml new file mode 100644 index 000000000..f488ef67e --- /dev/null +++ b/k8s/helm_charts2/templates/ingress.yaml @@ -0,0 +1,67 @@ +{{- if .Values.filer.ingress.enabled }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + annotations: + {{ omit .Values.filer.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }} +spec: + ingressClassName: {{ .Values.filer.ingress.className | quote }} + rules: + - http: + paths: + - path: /sw-filer/?(.*) + pathType: ImplementationSpecific + backend: +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + service: + name: {{ template "seaweedfs.name" . }}-filer + port: + number: {{ .Values.filer.port }} + #name: +{{- else }} + serviceName: {{ template "seaweedfs.name" . }}-filer + servicePort: {{ .Values.filer.port }} +{{- end }} +{{- end }} +--- +{{- if .Values.master.ingress.enabled }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + annotations: + {{ omit .Values.master.ingress.annotations "kubernetes.io/ingress.class" | toYaml | nindent 4 }} +spec: + ingressClassName: {{ .Values.master.ingress.className | quote }} + rules: + - http: + paths: + - path: /sw-master/?(.*) + pathType: ImplementationSpecific + backend: +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + service: + name: {{ template "seaweedfs.name" . }}-master + port: + number: {{ .Values.master.port }} + #name: +{{- else }} + serviceName: {{ template "seaweedfs.name" . }}-master + servicePort: {{ .Values.master.port }} +{{- end }} +{{- end }} diff --git a/k8s/charts/seaweedfs/templates/cert/master-cert.yaml b/k8s/helm_charts2/templates/master-cert.yaml similarity index 54% rename from k8s/charts/seaweedfs/templates/cert/master-cert.yaml rename to k8s/helm_charts2/templates/master-cert.yaml index 256785254..a8b0fc1d1 100644 --- a/k8s/charts/seaweedfs/templates/cert/master-cert.yaml +++ b/k8s/helm_charts2/templates/master-cert.yaml @@ -1,27 +1,16 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 kind: Certificate metadata: name: {{ template "seaweedfs.name" . }}-master-cert namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: master -{{- if .Values.master.annotations }} - annotations: - {{- toYaml .Values.master.annotations | nindent 4 }} -{{- end }} spec: secretName: {{ template "seaweedfs.name" . }}-master-cert issuerRef: - name: {{ template "seaweedfs.name" . }}-ca-issuer - kind: Issuer + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer commonName: {{ .Values.certificates.commonName }} - subject: - organizations: + organization: - "SeaweedFS CA" dnsNames: - '*.{{ .Release.Namespace }}' @@ -37,9 +26,8 @@ spec: - {{ . }} {{- end }} {{- end }} - privateKey: - algorithm: {{ .Values.certificates.keyAlgorithm }} - size: {{ .Values.certificates.keySize }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} duration: {{ .Values.certificates.duration }} renewBefore: {{ .Values.certificates.renewBefore }} {{- end }} diff --git a/k8s/helm_charts2/templates/master-service.yaml b/k8s/helm_charts2/templates/master-service.yaml new file mode 100644 index 000000000..0ce467538 --- /dev/null +++ b/k8s/helm_charts2/templates/master-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: master + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "swfs-master" + port: {{ .Values.master.port }} + targetPort: {{ .Values.master.port }} + protocol: TCP + - name: "swfs-master-grpc" + port: {{ .Values.master.grpcPort }} + targetPort: {{ .Values.master.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: master diff --git a/k8s/helm_charts2/templates/master-statefulset.yaml b/k8s/helm_charts2/templates/master-statefulset.yaml new file mode 100644 index 000000000..e5a7a537a --- /dev/null +++ b/k8s/helm_charts2/templates/master-statefulset.yaml @@ -0,0 +1,227 @@ +{{- if .Values.master.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-master + podManagementPolicy: Parallel + replicas: {{ .Values.master.replicas }} + {{- if (gt (int .Values.master.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.master.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} + {{- if .Values.master.affinity }} + affinity: + {{ tpl .Values.master.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: + {{ tpl .Values.master.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "master.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.master.extraEnvironmentVars }} + {{- range $key, $value := .Values.master.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.master.loggingOverrideLevel }} + -v={{ .Values.master.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + master \ + -port={{ .Values.master.port }} \ + -mdir=/data \ + -ip.bind={{ .Values.master.ipBind }} \ + {{- if .Values.global.enableReplication }} + -defaultReplication={{ .Values.global.replicationPlacment }} \ + {{- else }} + -defaultReplication={{ .Values.master.defaultReplication }} \ + {{- end }} + {{- if .Values.master.volumePreallocate }} + -volumePreallocate \ + {{- end }} + {{- if .Values.global.monitoring.enabled }} + -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ + {{- end }} + -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ + {{- if .Values.master.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.master.pulseSeconds }} + -pulseSeconds={{ .Values.master.pulseSeconds }} \ + {{- end }} + {{- if .Values.master.garbageThreshold }} + -garbageThreshold={{ .Values.master.garbageThreshold }} \ + {{- end }} + {{- if .Values.master.metricsIntervalSec }} + -metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \ + -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name : data-{{ .Release.Namespace }} + mountPath: /data + - name: seaweedfs-master-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.master.port }} + name: swfs-master + - containerPort: {{ .Values.master.grpcPort }} + #name: swfs-master-grpc + readinessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 45 + successThreshold: 2 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 4 + timeoutSeconds: 10 + {{- if .Values.master.resources }} + resources: + {{ tpl .Values.master.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-master-log-volume + hostPath: + path: /storage/logs/seaweedfs/master + type: DirectoryOrCreate + - name: data-{{ .Release.Namespace }} + hostPath: + path: /ssd/seaweed-master/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} + {{- if .Values.master.nodeSelector }} + nodeSelector: + {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.master.storage }}*/}} +{{/* {{- if .Values.master.storageClass }}*/}} +{{/* storageClassName: {{ .Values.master.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/helm_charts2/templates/s3-deployment.yaml b/k8s/helm_charts2/templates/s3-deployment.yaml new file mode 100644 index 000000000..aac09f328 --- /dev/null +++ b/k8s/helm_charts2/templates/s3-deployment.yaml @@ -0,0 +1,190 @@ +{{- if .Values.s3.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: {{ .Values.s3.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} + {{- if .Values.s3.tolerations }} + tolerations: + {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.s3.priorityClassName }} + priorityClassName: {{ .Values.s3.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "s3.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.s3.loggingOverrideLevel }} + -v={{ .Values.s3.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + s3 \ + -port={{ .Values.s3.port }} \ + {{- if .Values.s3.metricsPort }} + -metricsPort {{ .Values.s3.metricsPort }} \ + {{- end }} + {{- if .Values.global.enableSecurity }} + -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.s3.domainName }} + -domainName={{ .Values.s3.domainName }} \ + {{- end }} + {{- if .Values.s3.allowEmptyFolder }} + -allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \ + {{- end }} + {{- if .Values.s3.enableAuth }} + -config=/etc/sw/seaweedfs_s3_config \ + {{- end }} + {{- if .Values.s3.auditLogConfig }} + -auditLogConfig=/etc/sw/s3_auditLogConfig.json \ + {{- end }} + -filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }} + volumeMounts: + - name: logs + mountPath: "/logs/" + - mountPath: /etc/sw + name: config-users + readOnly: true + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.s3.port }} + name: swfs-s3 + readinessProbe: + httpGet: + path: /status + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 60 + successThreshold: 1 + failureThreshold: 20 + timeoutSeconds: 10 + {{- if .Values.s3.resources }} + resources: + {{ tpl .Values.s3.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: config-users + secret: + defaultMode: 420 + secretName: seaweedfs-s3-secret + {{- if eq .Values.s3.logs.type "hostPath" }} + - name: logs + hostPath: + path: /storage/logs/seaweedfs/s3 + type: DirectoryOrCreate + {{- end }} + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} + {{- if .Values.s3.nodeSelector }} + nodeSelector: + {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/helm_charts2/templates/s3-service.yaml b/k8s/helm_charts2/templates/s3-service.yaml new file mode 100644 index 000000000..122b33298 --- /dev/null +++ b/k8s/helm_charts2/templates/s3-service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + ports: + - name: "swfs-s3" + port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} + targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} + protocol: TCP +{{- if and .Values.s3.enabled .Values.s3.metricsPort }} + - name: "metrics" + port: {{ .Values.s3.metricsPort }} + targetPort: {{ .Values.s3.metricsPort }} + protocol: TCP +{{- end }} + selector: + app: {{ template "seaweedfs.name" . }} + component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }} diff --git a/k8s/helm_charts2/templates/s3-servicemonitor.yaml b/k8s/helm_charts2/templates/s3-servicemonitor.yaml new file mode 100644 index 000000000..b549893c7 --- /dev/null +++ b/k8s/helm_charts2/templates/s3-servicemonitor.yaml @@ -0,0 +1,20 @@ +{{- if .Values.s3.metricsPort }} +{{- if .Values.global.monitoring.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + endpoints: + - interval: 30s + port: swfs-s3-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: s3 +{{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/helm_charts2/templates/seaweedfs-grafana-dashboard.yaml b/k8s/helm_charts2/templates/seaweedfs-grafana-dashboard.yaml new file mode 100644 index 000000000..eb5a5ebac --- /dev/null +++ b/k8s/helm_charts2/templates/seaweedfs-grafana-dashboard.yaml @@ -0,0 +1,20 @@ +{{- if .Values.global.monitoring.enabled }} +{{- $files := .Files.Glob "dashboards/*.json" }} +{{- if $files }} +apiVersion: v1 +kind: ConfigMapList +items: +{{- range $path, $fileContents := $files }} +{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} +- apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }} + namespace: {{ $.Release.Namespace }} + labels: + grafana_dashboard: "1" + data: + {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml b/k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml new file mode 100644 index 000000000..4e9189633 --- /dev/null +++ b/k8s/helm_charts2/templates/seaweedfs-s3-secret.yaml @@ -0,0 +1,29 @@ +{{- if not (or .Values.filer.s3.skipAuthSecretCreation .Values.s3.skipAuthSecretCreation) }} +{{- $access_key_admin := randAlphaNum 16 -}} +{{- $secret_key_admin := randAlphaNum 32 -}} +{{- $access_key_read := randAlphaNum 16 -}} +{{- $secret_key_read := randAlphaNum 32 -}} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: seaweedfs-s3-secret + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + admin_access_key_id: {{ $access_key_admin }} + admin_secret_access_key: {{ $secret_key_admin }} + read_access_key_id: {{ $access_key_read }} + read_secret_access_key: {{ $secret_key_read }} + seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}' + {{- if .Values.filer.s3.auditLogConfig }} + filer_s3_auditLogConfig.json: | + {{ toJson .Values.filer.s3.auditLogConfig | nindent 4 }} + {{- end }} + {{- if .Values.s3.auditLogConfig }} + s3_auditLogConfig.json: | + {{ toJson .Values.s3.auditLogConfig | nindent 4 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/charts/seaweedfs/templates/shared/secret-seaweedfs-db.yaml b/k8s/helm_charts2/templates/secret-seaweedfs-db.yaml similarity index 56% rename from k8s/charts/seaweedfs/templates/shared/secret-seaweedfs-db.yaml rename to k8s/helm_charts2/templates/secret-seaweedfs-db.yaml index 5b7a81038..c6132c9ea 100644 --- a/k8s/charts/seaweedfs/templates/shared/secret-seaweedfs-db.yaml +++ b/k8s/helm_charts2/templates/secret-seaweedfs-db.yaml @@ -1,4 +1,3 @@ -{{- if .Values.filer.enabled }} apiVersion: v1 kind: Secret type: Opaque @@ -8,14 +7,8 @@ metadata: annotations: "helm.sh/resource-policy": keep "helm.sh/hook": "pre-install" - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} stringData: user: "YourSWUser" password: "HardCodedPassword" # better to random generate and create in DB # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} -{{- end }} diff --git a/k8s/helm_charts2/templates/security-configmap.yaml b/k8s/helm_charts2/templates/security-configmap.yaml new file mode 100644 index 000000000..7d06614ec --- /dev/null +++ b/k8s/helm_charts2/templates/security-configmap.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "seaweedfs.name" . }}-security-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + security.toml: |- + # this file is read by master, volume server, and filer + + # the jwt signing key is read by master and volume server + # a jwt expires in 10 seconds + [jwt.signing] + key = "{{ randAlphaNum 10 | b64enc }}" + + # all grpc tls authentications are mutual + # the values for the following ca, cert, and key are paths to the PERM files. + [grpc] + ca = "/usr/local/share/ca-certificates/ca/tls.crt" + + [grpc.volume] + cert = "/usr/local/share/ca-certificates/volume/tls.crt" + key = "/usr/local/share/ca-certificates/volume/tls.key" + + [grpc.master] + cert = "/usr/local/share/ca-certificates/master/tls.crt" + key = "/usr/local/share/ca-certificates/master/tls.key" + + [grpc.filer] + cert = "/usr/local/share/ca-certificates/filer/tls.crt" + key = "/usr/local/share/ca-certificates/filer/tls.key" + + # use this for any place needs a grpc client + # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" + [grpc.client] + cert = "/usr/local/share/ca-certificates/client/tls.crt" + key = "/usr/local/share/ca-certificates/client/tls.key" + + # volume server https options + # Note: work in progress! + # this does not work with other clients, e.g., "weed filer|mount" etc, yet. + [https.client] + enabled = false + [https.volume] + cert = "" + key = "" +{{- end }} diff --git a/k8s/helm_charts2/templates/service-account.yaml b/k8s/helm_charts2/templates/service-account.yaml new file mode 100644 index 000000000..22c29b56a --- /dev/null +++ b/k8s/helm_charts2/templates/service-account.yaml @@ -0,0 +1,29 @@ +#hack for delete pod master after migration +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: seaweedfs-rw-cr +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seaweedfs-rw-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:serviceaccount:seaweedfs-rw-sa:default +subjects: +- kind: ServiceAccount + name: seaweedfs-rw-sa + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: seaweedfs-rw-cr diff --git a/k8s/charts/seaweedfs/templates/cert/volume-cert.yaml b/k8s/helm_charts2/templates/volume-cert.yaml similarity index 54% rename from k8s/charts/seaweedfs/templates/cert/volume-cert.yaml rename to k8s/helm_charts2/templates/volume-cert.yaml index bd59a676d..72c62a0f5 100644 --- a/k8s/charts/seaweedfs/templates/cert/volume-cert.yaml +++ b/k8s/helm_charts2/templates/volume-cert.yaml @@ -1,27 +1,16 @@ -{{- if and .Values.global.enableSecurity (not .Values.certificates.externalCertificates.enabled)}} -apiVersion: cert-manager.io/v1{{ if .Values.global.certificates.alphacrds }}alpha1{{ end }} +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 kind: Certificate metadata: name: {{ template "seaweedfs.name" . }}-volume-cert namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ template "seaweedfs.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/component: volume -{{- if .Values.volume.annotations }} - annotations: - {{- toYaml .Values.volume.annotations | nindent 4 }} -{{- end }} spec: secretName: {{ template "seaweedfs.name" . }}-volume-cert issuerRef: - name: {{ template "seaweedfs.name" . }}-ca-issuer - kind: Issuer + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer commonName: {{ .Values.certificates.commonName }} - subject: - organizations: + organization: - "SeaweedFS CA" dnsNames: - '*.{{ .Release.Namespace }}' @@ -37,9 +26,8 @@ spec: - {{ . }} {{- end }} {{- end }} - privateKey: - algorithm: {{ .Values.certificates.keyAlgorithm }} - size: {{ .Values.certificates.keySize }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} duration: {{ .Values.certificates.duration }} renewBefore: {{ .Values.certificates.renewBefore }} {{- end }} diff --git a/k8s/helm_charts2/templates/volume-service.yaml b/k8s/helm_charts2/templates/volume-service.yaml new file mode 100644 index 000000000..0a9173fde --- /dev/null +++ b/k8s/helm_charts2/templates/volume-service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + clusterIP: None + ports: + - name: "swfs-volume" + port: {{ .Values.volume.port }} + targetPort: {{ .Values.volume.port }} + protocol: TCP + - name: "swfs-volume-18080" + port: {{ .Values.volume.grpcPort }} + targetPort: {{ .Values.volume.grpcPort }} + protocol: TCP +{{- if .Values.volume.metricsPort }} + - name: "swfs-volume-metrics" + port: {{ .Values.volume.metricsPort }} + targetPort: {{ .Values.volume.metricsPort }} + protocol: TCP +{{- end }} + selector: + app: {{ template "seaweedfs.name" . }} + component: volume \ No newline at end of file diff --git a/k8s/helm_charts2/templates/volume-servicemonitor.yaml b/k8s/helm_charts2/templates/volume-servicemonitor.yaml new file mode 100644 index 000000000..90d70e8de --- /dev/null +++ b/k8s/helm_charts2/templates/volume-servicemonitor.yaml @@ -0,0 +1,20 @@ +{{- if .Values.volume.metricsPort }} +{{- if .Values.global.monitoring.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + endpoints: + - interval: 30s + port: swfs-volume-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: volume +{{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/helm_charts2/templates/volume-statefulset.yaml b/k8s/helm_charts2/templates/volume-statefulset.yaml new file mode 100644 index 000000000..de2703d14 --- /dev/null +++ b/k8s/helm_charts2/templates/volume-statefulset.yaml @@ -0,0 +1,276 @@ +{{- if .Values.volume.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-volume + replicas: {{ .Values.volume.replicas }} + podManagementPolicy: Parallel + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + spec: + {{- if .Values.volume.affinity }} + affinity: + {{ tpl .Values.volume.affinity . | nindent 8 | trim }} + {{- end }} + restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }} + {{- if .Values.volume.tolerations }} + tolerations: + {{ tpl .Values.volume.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 150 + {{- if .Values.volume.priorityClassName }} + priorityClassName: {{ .Values.volume.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + {{- if .Values.volume.dir_idx }} + initContainers: + - name: seaweedfs-vol-move-idx + image: {{ template "volume.image" . }} + imagePullPolicy: {{ .Values.global.pullPolicy | default "IfNotPresent" }} + command: [ '/bin/sh', '-c' ] + args: ['if ls {{ .Values.volume.dir }}/*.idx >/dev/null 2>&1; then mv {{ .Values.volume.dir }}/*.idx {{ .Values.volume.dir_idx }}/; fi;'] + volumeMounts: + - name: idx + mountPath: {{ .Values.volume.dir_idx }} + - name: data + mountPath: {{ .Values.volume.dir }} + {{- end }} + containers: + - name: seaweedfs + image: {{ template "volume.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.volume.loggingOverrideLevel }} + -v={{ .Values.volume.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + volume \ + -port={{ .Values.volume.port }} \ + {{- if .Values.volume.metricsPort }} + -metricsPort {{ .Values.volume.metricsPort }} \ + {{- end }} + -dir={{ .Values.volume.dir }} \ + {{- if .Values.volume.dir_idx }} + -dir.idx={{ .Values.volume.dir_idx }} \ + {{- end }} + -max={{ .Values.volume.maxVolumes }} \ + {{- if .Values.volume.rack }} + -rack={{ .Values.volume.rack }} \ + {{- end }} + {{- if .Values.volume.dataCenter }} + -dataCenter={{ .Values.volume.dataCenter }} \ + {{- end }} + -ip.bind={{ .Values.volume.ipBind }} \ + -readMode={{ .Values.volume.readMode }} \ + {{- if .Values.volume.whiteList }} + -whiteList={{ .Values.volume.whiteList }} \ + {{- end }} + {{- if .Values.volume.imagesFixOrientation }} + -images.fix.orientation \ + {{- end }} + {{- if .Values.volume.pulseSeconds }} + -pulseSeconds={{ .Values.volume.pulseSeconds }} \ + {{- end }} + {{- if .Values.volume.index }} + -index={{ .Values.volume.index }} \ + {{- end }} + {{- if .Values.volume.fileSizeLimitMB }} + -fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \ + {{- end }} + -minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \ + -compactionMBps={{ .Values.volume.compactionMBps }} \ + -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: data + mountPath: "{{ .Values.volume.dir }}/" + {{- if .Values.volume.dir_idx }} + - name: idx + mountPath: "{{ .Values.volume.dir_idx }}/" + {{- end }} + - name: logs + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.volume.port }} + name: swfs-vol + - containerPort: {{ .Values.volume.grpcPort }} + #name: swfs-vol-grpc + readinessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 90 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 90 + successThreshold: 1 + failureThreshold: 4 + timeoutSeconds: 30 + {{- if .Values.volume.resources }} + resources: + {{ tpl .Values.volume.resources . | nindent 12 | trim }} + {{- end }} + {{- $hostpath_exists := include "volume.hostpath_exists" . -}} + {{- if $hostpath_exists }} + volumes: + {{- if eq .Values.volume.data.type "hostPath" }} + - name: data + hostPath: + path: /storage/object_store/ + type: DirectoryOrCreate + {{- end }} + {{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }} + - name: idx + hostPath: + path: /ssd/seaweedfs-volume-idx/ + type: DirectoryOrCreate + {{- end }} + {{- if eq .Values.volume.logs.type "hostPath" }} + - name: logs + hostPath: + path: /storage/logs/seaweedfs/volume + type: DirectoryOrCreate + {{- end }} + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{- if .Values.volume.extraVolumes }} + {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} + {{- end }} + {{- end }} + {{- if .Values.volume.nodeSelector }} + nodeSelector: + {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- $pvc_exists := include "volume.pvc_exists" . -}} + {{- if $pvc_exists }} + volumeClaimTemplates: + {{- if eq .Values.volume.data.type "persistentVolumeClaim"}} + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.data.storageClass }} + resources: + requests: + storage: {{ .Values.volume.data.size }} + {{- end }} + {{- if and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx }} + - metadata: + name: idx + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.idx.storageClass }} + resources: + requests: + storage: {{ .Values.volume.idx.size }} + {{- end }} + {{- if eq .Values.volume.logs.type "persistentVolumeClaim" }} + - metadata: + name: logs + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.logs.storageClass }} + resources: + requests: + storage: {{ .Values.volume.logs.size }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/helm_charts2/values.yaml b/k8s/helm_charts2/values.yaml new file mode 100644 index 000000000..b5d059ee9 --- /dev/null +++ b/k8s/helm_charts2/values.yaml @@ -0,0 +1,441 @@ +# Available parameters and their default values for the SeaweedFS chart. + +global: + registry: "" + repository: "" + imageName: chrislusf/seaweedfs + imagePullPolicy: IfNotPresent + imagePullSecrets: imagepullsecret + restartPolicy: Always + loggingLevel: 1 + enableSecurity: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + # if enabled will use global.replicationPlacment and override master & filer defaultReplicaPlacement config + enableReplication: false + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + replicationPlacment: "001" + extraEnvironmentVars: + WEED_CLUSTER_DEFAULT: "sw" + WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333" + WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888" + +image: + registry: "" + repository: "" + +master: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 9333 + grpcPort: 19333 + ipBind: "0.0.0.0" + volumePreallocate: false + volumeSizeLimitMB: 1000 + loggingOverrideLevel: null + #number of seconds between heartbeats, default 5 + pulseSeconds: null + #threshold to vacuum and reclaim spaces, default 0.3 (30%) + garbageThreshold: null + #Prometheus push interval in seconds, default 15 + metricsIntervalSec: 15 + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + defaultReplication: "000" + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + extraVolumes: "" + extraVolumeMounts: "" + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + # Resource requests, limits, etc. for the master cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: master + topologyKey: kubernetes.io/hostname + + # Toleration Settings for master pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for master pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to master pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + ingress: + enabled: false + className: "nginx" + annotations: + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; + + extraEnvironmentVars: + WEED_MASTER_VOLUME_GROWTH_COPY_1: 7 + WEED_MASTER_VOLUME_GROWTH_COPY_2: 6 + WEED_MASTER_VOLUME_GROWTH_COPY_3: 3 + WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 + +volume: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + port: 8080 + grpcPort: 18080 + metricsPort: 9327 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + # number of seconds between heartbeats, must be smaller than or equal to the master's setting + pulseSeconds: null + # Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance., default memory + index: null + # limit file size to avoid out of memory, default 256mb + fileSizeLimitMB: null + # minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly + minFreeSpacePercent: 7 + +# can use ANY storage-class , example with local-path-provisner +# data: +# type: "persistentVolumeClaim" +# size: "24Ti" +# storageClass: "local-path-provisioner" + data: + type: "hostPath" + size: "" + storageClass: "" + idx: + type: "hostPath" + size: "" + storageClass: "" + + logs: + type: "hostPath" + size: "" + storageClass: "" + + # limit background compaction or copying speed in mega bytes per second + compactionMBps: "50" + + # Directories to store data files. dir[,dir]... (default "/tmp") + dir: "/data" + # Directories to store index files. dir[,dir]... (default is the same as "dir") + dir_idx: null + + # Maximum numbers of volumes, count[,count]... + # If set to zero on non-windows OS, the limit will be auto configured. (default "7") + maxVolumes: "0" + + # Volume server's rack name + rack: null + + # Volume server's data center name + dataCenter: null + + # Redirect moved or non-local volumes. (default proxy) + readMode: proxy + + # Comma separated Ip addresses having write permission. No limit if empty. + whiteList: null + + # Adjust jpg orientation when uploading. + imagesFixOrientation: false + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: volume + topologyKey: kubernetes.io/hostname + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-volume: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +filer: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 8888 + grpcPort: 18888 + metricsPort: 9327 + loggingOverrideLevel: null + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + defaultReplicaPlacement: "000" + # turn off directory listing + disableDirListing: false + # split files larger than the limit, default 32 + maxMB: null + # encrypt data on volume servers + encryptVolumeData: false + + # Whether proxy or redirect to volume server during file GET request + redirectOnRead: false + + # Limit sub dir listing size (default 100000) + dirListLimit: 100000 + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + # enablePVC will create a pvc for filer for data persistence. + enablePVC: false + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: filer + topologyKey: kubernetes.io/hostname + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + ingress: + enabled: false + className: "nginx" + annotations: + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; + + # extraEnvVars is a list of extra enviroment variables to set with the stateful set. + extraEnvironmentVars: + WEED_MYSQL_ENABLED: "true" + WEED_MYSQL_HOSTNAME: "mysql-db-host" + WEED_MYSQL_PORT: "3306" + WEED_MYSQL_DATABASE: "sw_database" + WEED_MYSQL_CONNECTION_MAX_IDLE: "5" + WEED_MYSQL_CONNECTION_MAX_OPEN: "75" + # "refresh" connection every 10 minutes, eliminating mysql closing "old" connections + WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600" + # enable usage of memsql as filer backend + WEED_MYSQL_INTERPOLATEPARAMS: "true" + # if you want to use leveldb2, then should enable "enablePVC". or you may lose your data. + WEED_LEVELDB2_ENABLED: "false" + # with http DELETE, by default the filer would check whether a folder is empty. + # recursive_delete will delete all sub folders and files, similar to "rm -Rf" + WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" + # directories under this folder will be automatically creating a separate bucket + WEED_FILER_BUCKETS_FOLDER: "/buckets" + + s3: + enabled: true + port: 8333 + #allow empty folders + allowEmptyFolder: false + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + # enable user & permission to s3 (need to inject to all services) + enableAuth: false + skipAuthSecretCreation: false + auditLogConfig: {} + +s3: + enabled: false + repository: null + imageName: null + imageTag: null + restartPolicy: null + replicas: 1 + port: 8333 + metricsPort: 9327 + loggingOverrideLevel: null + #allow empty folders + allowEmptyFolder: true + # enable user & permission to s3 (need to inject to all services) + enableAuth: false + skipAuthSecretCreation: false + auditLogConfig: {} + + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + + extraVolumes: "" + extraVolumeMounts: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + logs: + type: "hostPath" + size: "" + storageClass: "" + + +certificates: + commonName: "SeaweedFS CA" + ipAddresses: [] + keyAlgorithm: rsa + keySize: 2048 + duration: 2160h # 90d + renewBefore: 360h # 15d diff --git a/note/SeaweedMQ_Architecture.png b/note/SeaweedMQ_Architecture.png deleted file mode 100644 index d3cce6230..000000000 Binary files a/note/SeaweedMQ_Architecture.png and /dev/null differ diff --git a/note/keepsec.png b/note/keepsec.png deleted file mode 100644 index 14159dc7b..000000000 Binary files a/note/keepsec.png and /dev/null differ diff --git a/note/piknik.png b/note/piknik.png deleted file mode 100644 index b4fbcb40b..000000000 Binary files a/note/piknik.png and /dev/null differ diff --git a/note/seaweedfs.svg b/note/seaweedfs.svg deleted file mode 100644 index 61fce5681..000000000 --- a/note/seaweedfs.svg +++ /dev/null @@ -1,317 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/note/shuguang.png b/note/shuguang.png new file mode 100644 index 000000000..54b6d0b6b Binary files /dev/null and b/note/shuguang.png differ diff --git a/note/sponsor_nodion.png b/note/sponsor_nodion.png deleted file mode 100644 index 9ff027ebf..000000000 Binary files a/note/sponsor_nodion.png and /dev/null differ diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 682582f7b..221de5f8f 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -3,38 +3,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - com.seaweedfs + com.github.chrislusf seaweedfs-client - 3.80 + 3.13 - SeaweedFS Java Client - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - + + org.sonatype.oss + oss-parent + 9 + - 3.25.5 + 3.16.1 - 1.75.0 - 32.0.0-jre + 1.23.0 + 30.0-jre @@ -92,6 +75,12 @@ + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + @@ -107,7 +96,6 @@ 8 8 - 8 @@ -145,13 +133,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -170,7 +159,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs diff --git a/other/java/client/pom.xml.deploy b/other/java/client/pom.xml.deploy index 03de3f5e1..e3239de0e 100644 --- a/other/java/client/pom.xml.deploy +++ b/other/java/client/pom.xml.deploy @@ -3,38 +3,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - com.seaweedfs + com.github.chrislusf seaweedfs-client - 3.80 + 3.13 - SeaweedFS Java Client - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - + + org.sonatype.oss + oss-parent + 9 + - 3.25.5 + 3.9.1 - 1.68.1 - 32.0.0-jre + 1.23.0 + 28.0-jre @@ -82,16 +65,17 @@ junit junit - 4.13.1 + 4.12 test - - javax.annotation - javax.annotation-api - 1.3.2 - + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + @@ -107,7 +91,6 @@ 8 8 - 8 @@ -145,13 +128,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -170,7 +154,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml index 60d07560f..dade66f7b 100644 --- a/other/java/client/pom_debug.xml +++ b/other/java/client/pom_debug.xml @@ -3,9 +3,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - com.seaweedfs + com.github.chrislusf seaweedfs-client - 3.80 + 3.13 org.sonatype.oss @@ -14,9 +14,9 @@ - 3.25.5 + 3.9.1 - 1.68.1 + 1.23.0 28.0-jre @@ -90,7 +90,6 @@ 8 8 - 8 @@ -129,7 +128,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs diff --git a/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java b/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java index dd4d407b0..9b6ba5dfc 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java +++ b/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java @@ -128,7 +128,6 @@ public class FileChunkManifest { FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk( filerClient.getReplication(), - filerClient.getCollection(), filerClient, minOffset, data, 0, data.length, parentDirectory); diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index 9ce6036a0..10d263968 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -13,14 +13,12 @@ public class FilerClient extends FilerGrpcClient { private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class); - public FilerClient(String filerHost, int filerGrpcPort) { - this(filerHost, filerGrpcPort-10000, filerGrpcPort, ""); + public FilerClient(String host, int grpcPort) { + super(host, grpcPort-10000, grpcPort); } - public FilerClient(String filerHost, int filerGrpcPort, String cn) { this(filerHost, filerGrpcPort-10000, filerGrpcPort, cn); } - public FilerClient(String filerHost, int filerPort, int filerGrpcPort) { this(filerHost, filerPort, filerGrpcPort, ""); } - public FilerClient(String filerHost, int filerPort, int filerGrpcPort, String cn) { - super(filerHost, filerPort, filerGrpcPort, cn); + public FilerClient(String host, int port, int grpcPort) { + super(host, port, grpcPort); } public static String toFileId(FilerProto.FileId fid) { diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 44977d186..0a2e6332e 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -8,6 +8,7 @@ import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.SSLException; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -16,12 +17,14 @@ import java.util.concurrent.TimeUnit; public class FilerGrpcClient { private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class); - private static final SslContext sslContext; - private static final String protocol; + static SslContext sslContext; static { - sslContext = FilerSecurityContext.getGrpcSslContext(); - protocol = FilerSecurityContext.isHttpSecurityEnabled() ? "https" : "http"; + try { + sslContext = FilerSslContext.loadSslContext(); + } catch (SSLException e) { + logger.warn("failed to load ssl context", e); + } } public final int VOLUME_SERVER_ACCESS_DIRECT = 0; @@ -39,27 +42,19 @@ public class FilerGrpcClient { private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT; private String filerAddress; - public FilerGrpcClient(String host, int port, int grpcPort, String cn) { - this(host, port, grpcPort, cn, sslContext); + public FilerGrpcClient(String host, int port, int grpcPort) { + this(host, port, grpcPort, sslContext); } - public FilerGrpcClient(String host, int port, int grpcPort, String cn, SslContext sslContext) { + public FilerGrpcClient(String host, int port, int grpcPort, SslContext sslContext) { this(sslContext == null ? - ManagedChannelBuilder.forAddress(host, grpcPort) - .usePlaintext() + ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() .maxInboundMessageSize(1024 * 1024 * 1024) : - cn.isEmpty() ? - NettyChannelBuilder.forAddress(host, grpcPort) - .maxInboundMessageSize(1024 * 1024 * 1024) - .negotiationType(NegotiationType.TLS) - .sslContext(sslContext) : - NettyChannelBuilder.forAddress(host, grpcPort) - .maxInboundMessageSize(1024 * 1024 * 1024) - .negotiationType(NegotiationType.TLS) - .overrideAuthority(cn) //will not check hostname of the filer server - .sslContext(sslContext) - ); + NettyChannelBuilder.forAddress(host, grpcPort) + .maxInboundMessageSize(1024 * 1024 * 1024) + .negotiationType(NegotiationType.TLS) + .sslContext(sslContext)); filerAddress = SeaweedUtil.joinHostPort(host, port); @@ -135,11 +130,12 @@ public class FilerGrpcClient { public String getChunkUrl(String chunkId, String url, String publicUrl) { switch (this.volumeServerAccess) { case VOLUME_SERVER_ACCESS_PUBLIC_URL: - return String.format("%s://%s/%s", protocol, publicUrl, chunkId); + return String.format("http://%s/%s", publicUrl, chunkId); case VOLUME_SERVER_ACCESS_FILER_PROXY: - return String.format("%s://%s/?proxyChunkId=%s", protocol, this.filerAddress, chunkId); + return String.format("http://%s/?proxyChunkId=%s", this.filerAddress, chunkId); default: - return String.format("%s://%s/%s", protocol, url, chunkId); + return String.format("http://%s/%s", url, chunkId); } } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerSecurityContext.java b/other/java/client/src/main/java/seaweedfs/client/FilerSecurityContext.java deleted file mode 100644 index 07a2ce286..000000000 --- a/other/java/client/src/main/java/seaweedfs/client/FilerSecurityContext.java +++ /dev/null @@ -1,163 +0,0 @@ -package seaweedfs.client; - -import com.google.common.base.Strings; -import com.moandjiezana.toml.Toml; -import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; -import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; -import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; -import org.apache.http.ssl.SSLContextBuilder; -import org.apache.http.ssl.SSLContexts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.*; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.PrivateKey; -import java.security.cert.X509Certificate; - -public abstract class FilerSecurityContext extends SslContext { -//extends Netty SslContext to access its protected static utility methods in -//buildHttpSslContext() - - private static final Logger logger = LoggerFactory.getLogger(FilerSecurityContext.class); - private static boolean grpcSecurityEnabled; - private static boolean httpSecurityEnabled; - private static SslContext grpcSslContext; - private static SSLContext httpSslContext; - - private static String grpcTrustCertCollectionFilePath; - private static String grpcClientCertChainFilePath; - private static String grpcClientPrivateKeyFilePath; - - private static String httpTrustCertCollectionFilePath; - private static String httpClientCertChainFilePath; - private static String httpClientPrivateKeyFilePath; - - - static { - String securityFileName = "security.toml"; - String home = System.getProperty("user.home"); - File f1 = new File("./"+securityFileName); - File f2 = new File(home + "/.seaweedfs/"+securityFileName); - File f3 = new File("/etc/seaweedfs/"+securityFileName); - - File securityFile = f1.exists()? f1 : f2.exists() ? f2 : f3.exists()? f3 : null; - - if (securityFile==null){ - logger.debug("Security file not found"); - grpcSecurityEnabled = false; - httpSecurityEnabled = false; - } else { - - Toml toml = new Toml().read(securityFile); - logger.debug("reading ssl setup from {}", securityFile); - - grpcTrustCertCollectionFilePath = toml.getString("grpc.ca"); - logger.debug("loading gRPC ca from {}", grpcTrustCertCollectionFilePath); - grpcClientCertChainFilePath = toml.getString("grpc.client.cert"); - logger.debug("loading gRPC client ca from {}", grpcClientCertChainFilePath); - grpcClientPrivateKeyFilePath = toml.getString("grpc.client.key"); - logger.debug("loading gRPC client key from {}", grpcClientPrivateKeyFilePath); - - if (Strings.isNullOrEmpty(grpcClientCertChainFilePath) && Strings.isNullOrEmpty(grpcClientPrivateKeyFilePath)) { - logger.debug("gRPC private key file locations not set"); - grpcSecurityEnabled = false; - } else { - try { - grpcSslContext = buildGrpcSslContext(); - grpcSecurityEnabled = true; - } catch (Exception e) { - logger.warn("Couldn't initialize gRPC security context, filer operations are likely to fail!", e); - grpcSslContext = null; - grpcSecurityEnabled = false; - } - } - - if (toml.getBoolean("https.client.enabled")) { - httpTrustCertCollectionFilePath = toml.getString("https.client.ca"); - logger.debug("loading HTTP ca from {}", httpTrustCertCollectionFilePath); - httpClientCertChainFilePath = toml.getString("https.client.cert"); - logger.debug("loading HTTP client ca from {}", httpClientCertChainFilePath); - httpClientPrivateKeyFilePath = toml.getString("https.client.key"); - logger.debug("loading HTTP client key from {}", httpClientPrivateKeyFilePath); - - if (Strings.isNullOrEmpty(httpClientCertChainFilePath) && Strings.isNullOrEmpty(httpClientPrivateKeyFilePath)) { - logger.debug("HTTP private key file locations not set"); - httpSecurityEnabled = false; - } else { - try { - httpSslContext = buildHttpSslContext(); - httpSecurityEnabled = true; - } catch (Exception e) { - logger.warn("Couldn't initialize HTTP security context, volume operations are likely to fail!", e); - httpSslContext = null; - httpSecurityEnabled = false; - } - } - } else { - httpSecurityEnabled = false; - } - } - // possibly fix the format https://netty.io/wiki/sslcontextbuilder-and-private-key.html - } - - public static boolean isGrpcSecurityEnabled() { - return grpcSecurityEnabled; - } - - public static boolean isHttpSecurityEnabled() { - return httpSecurityEnabled; - } - - public static SslContext getGrpcSslContext() { - return grpcSslContext; - } - - public static SSLContext getHttpSslContext() { - return httpSslContext; - } - - private static SslContext buildGrpcSslContext() throws SSLException { - SslContextBuilder builder = GrpcSslContexts.forClient(); - if (grpcTrustCertCollectionFilePath != null) { - builder.trustManager(new File(grpcTrustCertCollectionFilePath)); - } - if (grpcClientCertChainFilePath != null && grpcClientPrivateKeyFilePath != null) { - builder.keyManager(new File(grpcClientCertChainFilePath), new File(grpcClientPrivateKeyFilePath)); - } - - return builder.build(); - } - - private static SSLContext buildHttpSslContext() throws GeneralSecurityException, IOException { - SSLContextBuilder builder = SSLContexts.custom(); - - if (httpTrustCertCollectionFilePath != null) { - final X509Certificate[] trustCerts = toX509Certificates(new File(httpTrustCertCollectionFilePath)); - final KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - ks.load(null, null); - - int i = 0; - for (X509Certificate cert: trustCerts) { - String alias = Integer.toString(++i); - ks.setCertificateEntry(alias, cert); - } - - builder.loadTrustMaterial(ks, null); - } - - if (httpClientCertChainFilePath != null && httpClientPrivateKeyFilePath != null) { - final X509Certificate[] keyCerts = toX509Certificates(new File(httpClientCertChainFilePath)); - final PrivateKey key = toPrivateKey(new File(httpClientPrivateKeyFilePath), null); - char[] emptyPassword = new char[0]; - final KeyStore ks = buildKeyStore(keyCerts, key, emptyPassword, null); - logger.debug("Loaded {} key certificates", ks.size()); - builder.loadKeyMaterial(ks, emptyPassword); - } - - return builder.build(); - } -} diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java new file mode 100644 index 000000000..5a88c1da3 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java @@ -0,0 +1,64 @@ +package seaweedfs.client; + +import com.google.common.base.Strings; +import com.moandjiezana.toml.Toml; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLException; +import java.io.File; + +public class FilerSslContext { + + private static final Logger logger = LoggerFactory.getLogger(FilerSslContext.class); + + public static SslContext loadSslContext() throws SSLException { + String securityFileName = "security.toml"; + String home = System.getProperty("user.home"); + File f1 = new File("./"+securityFileName); + File f2 = new File(home + "/.seaweedfs/"+securityFileName); + File f3 = new File(home + "/etc/seaweedfs/"+securityFileName); + + File securityFile = f1.exists()? f1 : f2.exists() ? f2 : f3.exists()? f3 : null; + + if (securityFile==null){ + return null; + } + + Toml toml = new Toml().read(securityFile); + logger.debug("reading ssl setup from {}", securityFile); + + String trustCertCollectionFilePath = toml.getString("grpc.ca"); + logger.debug("loading ca from {}", trustCertCollectionFilePath); + String clientCertChainFilePath = toml.getString("grpc.client.cert"); + logger.debug("loading client ca from {}", clientCertChainFilePath); + String clientPrivateKeyFilePath = toml.getString("grpc.client.key"); + logger.debug("loading client key from {}", clientPrivateKeyFilePath); + + if (Strings.isNullOrEmpty(clientPrivateKeyFilePath) && Strings.isNullOrEmpty(clientPrivateKeyFilePath)){ + return null; + } + + // possibly fix the format https://netty.io/wiki/sslcontextbuilder-and-private-key.html + + return buildSslContext(trustCertCollectionFilePath, clientCertChainFilePath, clientPrivateKeyFilePath); + } + + + private static SslContext buildSslContext(String trustCertCollectionFilePath, + String clientCertChainFilePath, + String clientPrivateKeyFilePath) throws SSLException { + SslContextBuilder builder = GrpcSslContexts.forClient(); + if (trustCertCollectionFilePath != null) { + builder.trustManager(new File(trustCertCollectionFilePath)); + } + if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) { + builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath)); + } + return builder.trustManager(InsecureTrustManagerFactory.INSTANCE).build(); + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java b/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java index 18826dd48..2eba4f808 100644 --- a/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java +++ b/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java @@ -14,23 +14,20 @@ public class ReadChunks { points.add(new Point(chunk.getOffset(), chunk, true)); points.add(new Point(chunk.getOffset() + chunk.getSize(), chunk, false)); } - Collections.sort(points, new Comparator() { @Override public int compare(Point a, Point b) { - int xComparison = Long.compare(a.x, b.x); - if (xComparison != 0) { - return xComparison; + int x = (int) (a.x - b.x); + if (a.x != b.x) { + return (int) (a.x - b.x); } - - // If x values are equal, compare ts - int tsComparison = Long.compare(a.ts, b.ts); - if (tsComparison != 0) { - return tsComparison; + if (a.ts != b.ts) { + return (int) (a.ts - b.ts); } - - // If both x and ts are equal, prioritize start points - return Boolean.compare(b.isStart, a.isStart); // b.isStart first to prioritize starts + if (!a.isStart) { + return -1; + } + return 1; } }); @@ -86,7 +83,7 @@ public class ReadChunks { prevX, point.x, chunk.getFileId(), - chunk.getModifiedTsNs(), + chunk.getMtime(), prevX - chunk.getOffset(), chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x, chunk.getCipherKey().toByteArray(), @@ -103,7 +100,7 @@ public class ReadChunks { public Point(long x, FilerProto.FileChunk chunk, boolean isStart) { this.x = x; - this.ts = chunk.getModifiedTsNs(); + this.ts = chunk.getMtime(); this.chunk = chunk; this.isStart = isStart; } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java index 64754321b..9d1fb3417 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java @@ -119,8 +119,9 @@ public class SeaweedInputStream extends InputStream { long bytesRead = 0; int len = buf.remaining(); - if (this.position< Integer.MAX_VALUE && (this.position + len )<= entry.getContent().size()) { - entry.getContent().substring((int)this.position, (int)(this.position + len)).copyTo(buf); + int start = (int) this.position; + if (start + len <= entry.getContent().size()) { + entry.getContent().substring(start, start + len).copyTo(buf); } else { bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry)); } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java index 68c281992..d5c3399ed 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java @@ -33,7 +33,6 @@ public class SeaweedOutputStream extends OutputStream { private ByteBuffer buffer; private long outputIndex; private String replication = ""; - private String collection = ""; public SeaweedOutputStream(FilerClient filerClient, final String fullpath) { this(filerClient, fullpath, ""); @@ -54,6 +53,7 @@ public class SeaweedOutputStream extends OutputStream { this.lastFlushOffset = 0; this.bufferSize = bufferSize; this.buffer = ByteBufferPool.request(bufferSize); + this.outputIndex = 0; this.writeOperations = new ConcurrentLinkedDeque<>(); this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors(); @@ -83,13 +83,6 @@ public class SeaweedOutputStream extends OutputStream { } - public void setReplication(String replication) { - this.replication = replication; - } - public void setCollection(String collection) { - this.collection = collection; - } - public static String getParentDirectory(String path) { int protoIndex = path.indexOf("://"); if (protoIndex >= 0) { @@ -151,11 +144,13 @@ public class SeaweedOutputStream extends OutputStream { if (numberOfBytesToWrite < writableBytes) { buffer.put(data, currentOffset, numberOfBytesToWrite); + outputIndex += numberOfBytesToWrite; break; } // System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity()); buffer.put(data, currentOffset, writableBytes); + outputIndex += writableBytes; currentOffset += writableBytes; writeCurrentBufferToService(); numberOfBytesToWrite = numberOfBytesToWrite - writableBytes; @@ -199,6 +194,7 @@ public class SeaweedOutputStream extends OutputStream { lastError = new IOException("Stream is closed!"); ByteBufferPool.release(buffer); buffer = null; + outputIndex = 0; closed = true; writeOperations.clear(); if (!threadExecutor.isShutdown()) { @@ -229,7 +225,7 @@ public class SeaweedOutputStream extends OutputStream { } final Future job = completionService.submit(() -> { // System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); - SeaweedWrite.writeData(entry, replication, collection, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path); + SeaweedWrite.writeData(entry, replication, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path); // System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); ByteBufferPool.release(bufferToWrite); return null; diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index cac85d186..41033befb 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -69,7 +69,7 @@ public class SeaweedRead { if (locations == null || locations.getLocationsCount() == 0) { LOG.error("failed to locate {}", chunkView.fileId); volumeIdCache.clearLocations(volumeId); - throw new IOException("failed to locate fileId " + chunkView.fileId); + return 0; } int len = readChunkView(filerClient, startOffset, buf, chunkView, locations); @@ -103,7 +103,7 @@ public class SeaweedRead { chunkCache.setChunk(chunkView.fileId, chunkData); } - int len = (int) chunkView.size - (int) (startOffset - chunkView.logicOffset); + int len = (int) chunkView.size; LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} chunkView[{};{}) startOffset:{}", chunkView.fileId, chunkData.length, chunkView.offset, chunkView.logicOffset, chunkView.logicOffset + chunkView.size, startOffset); buf.put(chunkData, (int) (startOffset - chunkView.logicOffset + chunkView.offset), len); diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java index 3628a10d1..027e49b96 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java @@ -1,64 +1,27 @@ package seaweedfs.client; -import org.apache.http.config.Registry; -import org.apache.http.config.RegistryBuilder; -import org.apache.http.conn.socket.ConnectionSocketFactory; -import org.apache.http.conn.socket.PlainConnectionSocketFactory; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.NoopHostnameVerifier; -import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.impl.DefaultConnectionReuseStrategy; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.HostnameVerifier; -import javax.net.ssl.SSLContext; public class SeaweedUtil { - private static final Logger logger = LoggerFactory.getLogger(SeaweedUtil.class); - static PoolingHttpClientConnectionManager cm; + static PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(); static CloseableHttpClient httpClient; static { - //Apache HTTP client has a terrible API that makes you configure everything twice - //NoopHostnameVerifier is required because SeaweedFS doesn't verify hostnames - //and the servers are likely to have TLS certificates that do not match their hosts - if (FilerSecurityContext.isHttpSecurityEnabled()) { - SSLConnectionSocketFactory sslSocketFactory = new SSLConnectionSocketFactory( - FilerSecurityContext.getHttpSslContext(), - NoopHostnameVerifier.INSTANCE); - - Registry socketFactoryRegistry = - RegistryBuilder.create() - .register("https", sslSocketFactory) - .register("http", new PlainConnectionSocketFactory()) - .build(); - cm = new PoolingHttpClientConnectionManager(socketFactoryRegistry); - } else { - cm = new PoolingHttpClientConnectionManager(); - } - // Increase max total connection to 200 cm.setMaxTotal(200); // Increase default max connection per route to 20 cm.setDefaultMaxPerRoute(20); - HttpClientBuilder builder = HttpClientBuilder.create() + httpClient = HttpClientBuilder.create() .setConnectionManager(cm) .setConnectionReuseStrategy(DefaultConnectionReuseStrategy.INSTANCE) - .setKeepAliveStrategy(DefaultConnectionKeepAliveStrategy.INSTANCE); - - if (FilerSecurityContext.isHttpSecurityEnabled()) { - builder.setSSLContext(FilerSecurityContext.getHttpSslContext()); - builder.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE); - } - - httpClient = builder.build(); + .setKeepAliveStrategy(DefaultConnectionKeepAliveStrategy.INSTANCE) + .build(); } public static CloseableHttpClient getClosableHttpClient() { diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index 88c7cefbe..1ee745ed0 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -14,9 +14,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.security.SecureRandom; -import java.security.MessageDigest; import java.util.List; -import java.util.Base64; public class SeaweedWrite { @@ -26,7 +24,6 @@ public class SeaweedWrite { public static void writeData(FilerProto.Entry.Builder entry, final String replication, - String collection, final FilerClient filerClient, final long offset, final byte[] bytes, @@ -37,7 +34,7 @@ public class SeaweedWrite { for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) { try { FilerProto.FileChunk.Builder chunkBuilder = writeChunk( - replication, collection, filerClient, offset, bytes, bytesOffset, bytesLength, path); + replication, filerClient, offset, bytes, bytesOffset, bytesLength, path); lastException = null; synchronized (entry) { entry.addChunks(chunkBuilder); @@ -60,7 +57,6 @@ public class SeaweedWrite { } public static FilerProto.FileChunk.Builder writeChunk(final String replication, - final String collection, final FilerClient filerClient, final long offset, final byte[] bytes, @@ -69,7 +65,7 @@ public class SeaweedWrite { final String path) throws IOException { FilerProto.AssignVolumeResponse response = filerClient.getBlockingStub().assignVolume( FilerProto.AssignVolumeRequest.newBuilder() - .setCollection(Strings.isNullOrEmpty(collection) ? filerClient.getCollection() : collection) + .setCollection(filerClient.getCollection()) .setReplication(Strings.isNullOrEmpty(replication) ? filerClient.getReplication() : replication) .setDataCenter("") .setTtlSec(0) @@ -100,7 +96,7 @@ public class SeaweedWrite { .setFileId(fileId) .setOffset(offset) .setSize(bytesLength) - .setModifiedTsNs(System.nanoTime()) + .setMtime(System.currentTimeMillis() / 10000L) .setETag(etag) .setCipherKey(cipherKeyString); } @@ -127,20 +123,13 @@ public class SeaweedWrite { final byte[] bytes, final long bytesOffset, final long bytesLength, byte[] cipherKey) throws IOException { - MessageDigest md = null; - try { - md = MessageDigest.getInstance("MD5"); - } catch (java.security.NoSuchAlgorithmException e) { - } InputStream inputStream = null; if (cipherKey == null || cipherKey.length == 0) { - md.update(bytes, (int) bytesOffset, (int) bytesLength); inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); } else { try { byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey); - md.update(encryptedBytes); inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length); } catch (Exception e) { throw new IOException("fail to encrypt data", e); @@ -151,7 +140,6 @@ public class SeaweedWrite { if (auth != null && auth.length() != 0) { post.addHeader("Authorization", "BEARER " + auth); } - post.addHeader("Content-MD5", Base64.getEncoder().encodeToString(md.digest())); post.setEntity(MultipartEntityBuilder.create() .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) @@ -161,13 +149,6 @@ public class SeaweedWrite { CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(post); try { - if (response.getStatusLine().getStatusCode() / 100 != 2) { - if (response.getEntity().getContentType() != null && response.getEntity().getContentType().getValue().equals("application/json")) { - throw new IOException(EntityUtils.toString(response.getEntity(), "UTF-8")); - } else { - throw new IOException(response.getStatusLine().getReasonPhrase()); - } - } String etag = response.getLastHeader("ETag").getValue(); diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 9257996ed..bd0932cb8 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package filer_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -54,15 +54,18 @@ service SeaweedFiler { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { } - rpc TraverseBfsMetadata (TraverseBfsMetadataRequest) returns (stream TraverseBfsMetadataResponse) { - } - rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { } rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { } + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + rpc KvGet (KvGetRequest) returns (KvGetResponse) { } @@ -71,16 +74,6 @@ service SeaweedFiler { rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) { } - - rpc DistributedLock(LockRequest) returns (LockResponse) { - } - rpc DistributedUnlock(UnlockRequest) returns (UnlockResponse) { - } - rpc FindLockOwner(FindLockOwnerRequest) returns (FindLockOwnerResponse) { - } - // distributed lock management internal use only - rpc TransferLocks(TransferLocksRequest) returns (TransferLocksResponse) { - } } ////////////////////////////////////////////////// @@ -125,7 +118,6 @@ message Entry { RemoteEntry remote_entry = 10; int64 quota = 11; // for bucket only. Positive/Negative means enabled/disabled. - int64 worm_enforced_at_ts_ns = 12; } message FullEntry { @@ -142,18 +134,11 @@ message EventNotification { repeated int32 signatures = 6; } -enum SSEType { - NONE = 0; // No server-side encryption - SSE_C = 1; // Server-Side Encryption with Customer-Provided Keys - SSE_KMS = 2; // Server-Side Encryption with KMS-Managed Keys - SSE_S3 = 3; // Server-Side Encryption with S3-Managed Keys -} - message FileChunk { string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; - int64 modified_ts_ns = 4; + int64 mtime = 4; string e_tag = 5; string source_file_id = 6; // to be deprecated FileId fid = 7; @@ -161,8 +146,6 @@ message FileChunk { bytes cipher_key = 9; bool is_compressed = 10; bool is_chunk_manifest = 11; // content is a list of FileChunks - SSEType sse_type = 12; // Server-side encryption type - bytes sse_metadata = 13; // Serialized SSE metadata for this chunk (SSE-C, SSE-KMS, or SSE-S3) } message FileChunkManifest { @@ -231,7 +214,6 @@ message DeleteEntryRequest { bool ignore_recursive_error = 6; bool is_from_other_cluster = 7; repeated int32 signatures = 8; - int64 if_not_modified_after = 9; } message DeleteEntryResponse { @@ -295,7 +277,6 @@ message Location { string url = 1; string public_url = 2; uint32 grpc_port = 3; - string data_center = 4; } message LookupVolumeResponse { map locations_map = 1; @@ -355,8 +336,6 @@ message GetFilerConfigurationResponse { string version = 11; string cluster_id = 12; string filer_group = 13; - int32 major_version = 14; - int32 minor_version = 15; } message SubscribeMetadataRequest { @@ -367,8 +346,6 @@ message SubscribeMetadataRequest { repeated string path_prefixes = 6; int32 client_id = 7; int64 until_ns = 8; - int32 client_epoch = 9; - repeated string directories = 10; // exact directory to watch } message SubscribeMetadataResponse { string directory = 1; @@ -376,21 +353,10 @@ message SubscribeMetadataResponse { int64 ts_ns = 3; } -message TraverseBfsMetadataRequest { - string directory = 1; - repeated string excluded_prefixes = 2; -} -message TraverseBfsMetadataResponse { - string directory = 1; - Entry entry = 2; -} - message LogEntry { int64 ts_ns = 1; int32 partition_key_hash = 2; bytes data = 3; - bytes key = 4; - int64 offset = 5; // Sequential offset within partition } message KeepConnectedRequest { @@ -451,11 +417,6 @@ message FilerConf { string data_center = 9; string rack = 10; string data_node = 11; - uint32 max_file_name_length = 12; - bool disable_chunk_deletion = 13; - bool worm = 14; - uint64 worm_grace_period_seconds = 15; - uint64 worm_retention_time_seconds = 16; } repeated PathConf locations = 2; } @@ -470,47 +431,3 @@ message CacheRemoteObjectToLocalClusterRequest { message CacheRemoteObjectToLocalClusterResponse { Entry entry = 1; } - -///////////////////////// -// distributed lock management -///////////////////////// -message LockRequest { - string name = 1; - int64 seconds_to_lock = 2; - string renew_token = 3; - bool is_moved = 4; - string owner = 5; -} -message LockResponse { - string renew_token = 1; - string lock_owner = 2; - string lock_host_moved_to = 3; - string error = 4; -} -message UnlockRequest { - string name = 1; - string renew_token = 2; - bool is_moved = 3; -} -message UnlockResponse { - string error = 1; - string moved_to = 2; -} -message FindLockOwnerRequest { - string name = 1; - bool is_moved = 2; -} -message FindLockOwnerResponse { - string owner = 1; -} -message Lock { - string name = 1; - string renew_token = 2; - int64 expired_at_ns = 3; - string owner = 4; -} -message TransferLocksRequest { - repeated Lock locks = 1; -} -message TransferLocksResponse { -} diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java index 137148425..6ad9edb2c 100644 --- a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java @@ -17,13 +17,13 @@ public class SeaweedReadTest { .setFileId("aaa") .setOffset(0) .setSize(100) - .setModifiedTsNs(1000) + .setMtime(1000) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("bbb") .setOffset(100) .setSize(133) - .setModifiedTsNs(2000) + .setMtime(2000) .build()); List visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks); @@ -70,31 +70,31 @@ public class SeaweedReadTest { .setFileId("a") .setOffset(0) .setSize(100) - .setModifiedTsNs(1) + .setMtime(1) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("b") .setOffset(50) .setSize(100) - .setModifiedTsNs(2) + .setMtime(2) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("c") .setOffset(200) .setSize(50) - .setModifiedTsNs(3) + .setMtime(3) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("d") .setOffset(250) .setSize(50) - .setModifiedTsNs(4) + .setMtime(4) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("e") .setOffset(175) .setSize(100) - .setModifiedTsNs(5) + .setMtime(5) .build()); List visibleIntervals = ReadChunks.readResolvedChunks(chunks); @@ -161,7 +161,7 @@ public class SeaweedReadTest { .setFileId("") .setOffset(start) .setSize(size) - .setModifiedTsNs(ts) + .setMtime(ts) .build(); } } diff --git a/other/java/examples/pom.xml b/other/java/examples/pom.xml index 5c0981eae..7a5994072 100644 --- a/other/java/examples/pom.xml +++ b/other/java/examples/pom.xml @@ -9,21 +9,21 @@ 1.0-SNAPSHOT - com.seaweedfs + com.github.chrislusf seaweedfs-client - 3.80 + 3.13 compile - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop2-client - 3.80 + 3.13 compile org.apache.hadoop hadoop-common - 2.10.2 + 2.10.1 compile diff --git a/other/java/hdfs-over-ftp/pom.xml b/other/java/hdfs-over-ftp/pom.xml index 6cf1c86ea..8b4f0e612 100644 --- a/other/java/hdfs-over-ftp/pom.xml +++ b/other/java/hdfs-over-ftp/pom.xml @@ -31,12 +31,12 @@ io.springfox springfox-swagger-ui - 2.10.0 + 2.9.2 org.apache.hadoop hadoop-common - 3.2.4 + 3.2.3 org.apache.hadoop @@ -49,7 +49,7 @@ 1.1.1 - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop3-client 1.6.2 @@ -117,4 +117,4 @@ - + \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java index 9fe5dfd95..c3fa31872 100644 --- a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java @@ -16,7 +16,7 @@ import org.springframework.stereotype.Component; import java.io.File; /** - * reference: https://github.com/AShiou/hof + * reference: https://github.com/iponweb/hdfs-over-ftp */ @Component public class HFtpService { diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index fd84befa0..a5dedc27a 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -1,31 +1,15 @@ + + oss-parent + org.sonatype.oss + 9 + ../pom.xml/pom.xml + 4.0.0 - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop2-client - SeaweedFS HDFS2 Client ${seaweedfs.client.version} - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - @@ -33,7 +17,6 @@ 8 8 - 8 @@ -101,13 +84,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -124,7 +108,7 @@ maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs @@ -140,7 +124,7 @@ org.apache.hadoop hadoop-client - 3.2.4 + 2.10.1 provided @@ -148,11 +132,11 @@ org.apache.hadoop - hadoop-yarn-api + hadoop-mapreduce-client-app org.apache.hadoop - hadoop-yarn-client + hadoop-yarn-api org.apache.hadoop @@ -172,7 +156,7 @@ org.apache.hadoop hadoop-common - 3.2.4 + 2.10.1 provided @@ -183,6 +167,10 @@ commons-math3 org.apache.commons + + xmlenc + xmlenc + commons-io commons-io @@ -196,28 +184,20 @@ commons-collections - javax.servlet-api + servlet-api javax.servlet - javax.activation-api - javax.activation - - - jetty-server - org.eclipse.jetty + jetty + org.mortbay.jetty jetty-util - org.eclipse.jetty + org.mortbay.jetty - jetty-servlet - org.eclipse.jetty - - - jetty-webapp - org.eclipse.jetty + jetty-sslengine + org.mortbay.jetty jsp-api @@ -227,10 +207,6 @@ jersey-core com.sun.jersey - - jersey-servlet - com.sun.jersey - jersey-json com.sun.jersey @@ -240,37 +216,49 @@ com.sun.jersey - reload4j - ch.qos.reload4j + log4j + log4j + + + jets3t + net.java.dev.jets3t + + + commons-lang + commons-lang + + + commons-configuration + commons-configuration + + + commons-digester + commons-digester commons-beanutils commons-beanutils - - commons-configuration2 - org.apache.commons - commons-lang3 org.apache.commons - commons-text - org.apache.commons + slf4j-log4j12 + org.slf4j - slf4j-reload4j - org.slf4j + jackson-core-asl + org.codehaus.jackson + + + jackson-mapper-asl + org.codehaus.jackson avro org.apache.avro - - re2j - com.google.re2j - hadoop-auth org.apache.hadoop @@ -299,14 +287,6 @@ commons-compress org.apache.commons - - kerb-simplekdc - org.apache.kerby - - - jackson-databind - com.fasterxml.jackson.core - stax2-api org.codehaus.woodstox @@ -315,10 +295,6 @@ woodstox-core com.fasterxml.woodstox - - dnsjava - dnsjava - hadoop-annotations org.apache.hadoop @@ -326,8 +302,14 @@ + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + - 3.80 - 3.2.4 + 3.13 + 2.10.1 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index 50fbdbc06..6e1dee356 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,37 +5,26 @@ 4.0.0 - 3.80 - 3.2.4 + 3.13 + 2.10.1 - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop2-client ${seaweedfs.client.version} - SeaweedFS HDFS2 Client - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - + + org.sonatype.oss + oss-parent + 9 + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + @@ -44,7 +33,6 @@ 8 8 - 8 @@ -115,13 +103,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -140,7 +129,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs @@ -161,7 +150,7 @@ provided - com.seaweedfs + com.github.chrislusf seaweedfs-client ${seaweedfs.client.version} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index 58fcaf975..b6ea4c3bb 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -29,7 +29,6 @@ public class SeaweedFileSystem extends FileSystem { public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access"; public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; - public static final String FS_SEAWEED_FILER_CN = "fs.seaweed.filer.cn"; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); @@ -64,9 +63,8 @@ public class SeaweedFileSystem extends FileSystem { setConf(conf); this.uri = uri; - String cn = conf.get(FS_SEAWEED_FILER_CN, ""); + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf); - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, cn, conf); } @Override diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index f65c1961b..a73dbeb74 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -27,8 +27,8 @@ public class SeaweedFileSystemStore { private FilerClient filerClient; private Configuration conf; - public SeaweedFileSystemStore(String host, int port, int grpcPort, String cn, Configuration conf) { - filerClient = new FilerClient(host, port, grpcPort, cn); + public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) { + filerClient = new FilerClient(host, port, grpcPort); this.conf = conf; String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); if (volumeServerAccessMode.equals("publicUrl")) { @@ -36,6 +36,7 @@ public class SeaweedFileSystemStore { } else if (volumeServerAccessMode.equals("filerProxy")) { filerClient.setAccessVolumeServerByFilerProxy(); } + } public void close() { diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index decf55a59..066687017 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -1,31 +1,15 @@ + + oss-parent + org.sonatype.oss + 9 + ../pom.xml/pom.xml + 4.0.0 - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop3-client - SeaweedFS HDFS3 Client ${seaweedfs.client.version} - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - @@ -33,7 +17,6 @@ 8 8 - 8 @@ -101,13 +84,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -124,7 +108,7 @@ maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs @@ -140,7 +124,7 @@ org.apache.hadoop hadoop-client - 3.2.4 + 3.2.3 provided @@ -172,7 +156,7 @@ org.apache.hadoop hadoop-common - 3.2.4 + 3.2.3 provided @@ -240,8 +224,8 @@ com.sun.jersey - reload4j - ch.qos.reload4j + log4j + log4j commons-beanutils @@ -260,7 +244,7 @@ org.apache.commons - slf4j-reload4j + slf4j-log4j12 org.slf4j @@ -326,8 +310,14 @@ + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + - 3.80 - 3.2.4 + 3.13 + 3.2.3 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 3faba03be..976029aee 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,37 +5,26 @@ 4.0.0 - 3.80 - 3.2.4 + 3.13 + 3.2.3 - com.seaweedfs + com.github.chrislusf seaweedfs-hadoop3-client ${seaweedfs.client.version} - SeaweedFS HDFS3 Client - A java client for SeaweedFS. - https://github.com/seaweedfs/seaweedfs - - - The Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - - - - - Chris Lu - chris.lu@gmail.com - SeaweedFS - https://seaweedfs.com - - - - scm:git:git://github.com/seaweedfs/seaweedfs.git - scm:git:ssh://github.com:seaweedfs/seaweedfs.git - https://github.com/seaweedfs/seaweedfs/tree/master - + + org.sonatype.oss + oss-parent + 9 + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + @@ -44,7 +33,6 @@ 8 8 - 8 @@ -115,13 +103,14 @@ - org.sonatype.central - central-publishing-maven-plugin - 0.5.0 + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.8 true - central - true + ossrh + https://oss.sonatype.org/ + true @@ -140,7 +129,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.0.1 + 2.9.1 attach-javadocs @@ -161,7 +150,7 @@ provided - com.seaweedfs + com.github.chrislusf seaweedfs-client ${seaweedfs.client.version} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index 58fcaf975..b6ea4c3bb 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -29,7 +29,6 @@ public class SeaweedFileSystem extends FileSystem { public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access"; public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; - public static final String FS_SEAWEED_FILER_CN = "fs.seaweed.filer.cn"; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); @@ -64,9 +63,8 @@ public class SeaweedFileSystem extends FileSystem { setConf(conf); this.uri = uri; - String cn = conf.get(FS_SEAWEED_FILER_CN, ""); + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, conf); - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, grpcPort, cn, conf); } @Override diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index f65c1961b..a73dbeb74 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -27,8 +27,8 @@ public class SeaweedFileSystemStore { private FilerClient filerClient; private Configuration conf; - public SeaweedFileSystemStore(String host, int port, int grpcPort, String cn, Configuration conf) { - filerClient = new FilerClient(host, port, grpcPort, cn); + public SeaweedFileSystemStore(String host, int port, int grpcPort, Configuration conf) { + filerClient = new FilerClient(host, port, grpcPort); this.conf = conf; String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); if (volumeServerAccessMode.equals("publicUrl")) { @@ -36,6 +36,7 @@ public class SeaweedFileSystemStore { } else if (volumeServerAccessMode.equals("filerProxy")) { filerClient.setAccessVolumeServerByFilerProxy(); } + } public void close() { diff --git a/other/java/s3copier/pom.xml b/other/java/s3copier/pom.xml index 0050c70da..c3ff30932 100644 --- a/other/java/s3copier/pom.xml +++ b/other/java/s3copier/pom.xml @@ -5,10 +5,6 @@ copier jar 1.0-SNAPSHOT - - 18 - 18 - copier http://maven.apache.org diff --git a/other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java b/other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java index 06e623886..b86df95a0 100644 --- a/other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java +++ b/other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java @@ -10,8 +10,6 @@ import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.model.CreateBucketRequest; -import com.amazonaws.services.s3.model.GetBucketLocationRequest; import com.amazonaws.services.s3.transfer.TransferManager; import com.amazonaws.services.s3.transfer.TransferManagerBuilder; import com.amazonaws.services.s3.transfer.Upload; @@ -44,19 +42,6 @@ public class HighLevelMultipartUpload { .withS3Client(s3Client) .build(); - - if (!s3Client.doesBucketExistV2(bucketName)) { - // Because the CreateBucketRequest object doesn't specify a region, the - // bucket is created in the region specified in the client. - s3Client.createBucket(new CreateBucketRequest(bucketName)); - - // Verify that the bucket was created by retrieving it and checking its location. - String bucketLocation = s3Client.getBucketLocation(new GetBucketLocationRequest(bucketName)); - System.out.println("Bucket location: " + bucketLocation); - } else { - System.out.println("Bucket already exists"); - } - // TransferManager processes all transfers asynchronously, // so this call returns immediately. Upload upload = tm.upload(bucketName, keyName, file); diff --git a/other/metrics/grafana_seaweedfs.json b/other/metrics/grafana_seaweedfs.json index e8c98ae21..88844b3c3 100644 --- a/other/metrics/grafana_seaweedfs.json +++ b/other/metrics/grafana_seaweedfs.json @@ -105,7 +105,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -114,7 +114,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -200,7 +200,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -209,7 +209,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -301,7 +301,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -310,7 +310,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -415,7 +415,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(SeaweedFS_filerStore_request_total[1m])", + "expr": "rate(SeaweedFS_filer_request_total[1m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", @@ -804,192 +804,6 @@ "alignLevel": null } }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 1, - "fillGradient": 0, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 84, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.1.2", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_received_bytes_total[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "S3 Bucket Traffic Received", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 1, - "fillGradient": 0, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 85, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.1.2", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_sent_bytes_total[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "S3 Bucket Traffic Sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": {}, "bars": false, @@ -1005,7 +819,7 @@ "h": 7, "w": 24, "x": 0, - "y": 16 + "y": 9 }, "hiddenSeries": false, "id": 55, @@ -1114,7 +928,7 @@ "h": 7, "w": 24, "x": 0, - "y": 23 + "y": 16 }, "hiddenSeries": false, "hideTimeOverride": false, @@ -1254,7 +1068,7 @@ "h": 1, "w": 24, "x": 0, - "y": 30 + "y": 23 }, "id": 71, "panels": [], @@ -1277,7 +1091,7 @@ "h": 7, "w": 12, "x": 0, - "y": 31 + "y": 24 }, "hiddenSeries": false, "id": 47, @@ -1381,7 +1195,7 @@ "h": 7, "w": 12, "x": 12, - "y": 31 + "y": 24 }, "hiddenSeries": false, "id": 40, @@ -1480,7 +1294,7 @@ "h": 7, "w": 24, "x": 0, - "y": 38 + "y": 31 }, "hiddenSeries": false, "id": 48, @@ -1579,7 +1393,7 @@ "h": 7, "w": 24, "x": 0, - "y": 45 + "y": 38 }, "hiddenSeries": false, "id": 50, @@ -1678,7 +1492,7 @@ "h": 7, "w": 24, "x": 0, - "y": 52 + "y": 45 }, "hiddenSeries": false, "id": 51, @@ -1769,7 +1583,7 @@ "h": 1, "w": 24, "x": 0, - "y": 59 + "y": 52 }, "id": 72, "panels": [], @@ -1792,7 +1606,7 @@ "h": 7, "w": 12, "x": 0, - "y": 60 + "y": 53 }, "hiddenSeries": false, "id": 12, @@ -1888,7 +1702,7 @@ "h": 7, "w": 12, "x": 12, - "y": 60 + "y": 53 }, "hiddenSeries": false, "id": 14, @@ -1983,7 +1797,7 @@ "h": 1, "w": 24, "x": 0, - "y": 67 + "y": 60 }, "id": 73, "panels": [], @@ -2006,7 +1820,7 @@ "h": 7, "w": 12, "x": 0, - "y": 68 + "y": 61 }, "hiddenSeries": false, "id": 52, @@ -2127,7 +1941,7 @@ "h": 7, "w": 12, "x": 12, - "y": 68 + "y": 61 }, "hiddenSeries": false, "id": 54, @@ -2223,7 +2037,7 @@ "h": 7, "w": 24, "x": 0, - "y": 75 + "y": 68 }, "hiddenSeries": false, "id": 53, @@ -2322,7 +2136,7 @@ "h": 7, "w": 8, "x": 0, - "y": 82 + "y": 75 }, "hiddenSeries": false, "id": 66, @@ -2473,7 +2287,7 @@ "h": 7, "w": 8, "x": 8, - "y": 82 + "y": 75 }, "hiddenSeries": false, "id": 68, @@ -2638,27 +2452,7 @@ "style": "dark", "tags": [], "templating": { - "list": [ - { - "current": { - "selected": true, - "text": "default", - "value": "default" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - } - ] + "list": [] }, "time": { "from": "now-3h", diff --git a/other/metrics/grafana_seaweedfs_heartbeat.json b/other/metrics/grafana_seaweedfs_heartbeat.json index e3ab94eb9..9a52624cf 100644 --- a/other/metrics/grafana_seaweedfs_heartbeat.json +++ b/other/metrics/grafana_seaweedfs_heartbeat.json @@ -91,7 +91,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -100,7 +100,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -181,7 +181,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -190,7 +190,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -277,7 +277,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -286,7 +286,7 @@ "step": 60 }, { - "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", "format": "time_series", "hide": false, "intervalFactor": 2, @@ -386,7 +386,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(SeaweedFS_filerStore_request_total[1m]) * 5", + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", @@ -713,168 +713,6 @@ } ] }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS-DEV}", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "id": 84, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_received_bytes_total[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "S3 Bucket Traffic Received", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS-DEV}", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "id": 85, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_sent_bytes_total[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "S3 Bucket Traffic Sent", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ] - }, { "aliasColors": {}, "bars": false, @@ -2057,27 +1895,7 @@ "style": "dark", "tags": [], "templating": { - "list": [ - { - "current": { - "selected": true, - "text": "default", - "value": "default" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "DS_PROMETHEUS", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - } - ] + "list": [] }, "time": { "from": "now-5m", @@ -2111,4 +1929,4 @@ "timezone": "browser", "title": "SeaweedFS ks8 heartbeat", "version": 1 -} +} \ No newline at end of file diff --git a/other/metrics/grafana_seaweedfs_k8s.json b/other/metrics/grafana_seaweedfs_k8s.json index 50f56c7bd..348198e52 100644 --- a/other/metrics/grafana_seaweedfs_k8s.json +++ b/other/metrics/grafana_seaweedfs_k8s.json @@ -261,192 +261,6 @@ "alignLevel": null } }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 1, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 8 - }, - "hiddenSeries": false, - "id": 84, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.1.2", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_received_bytes_total{namespace=\"$namespace\",service=~\"$service-api\"}[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "S3 Bucket Traffic Received", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 1, - "fieldConfig": { - "defaults": { - "unit": "decbytes" - }, - "overrides": [] - }, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 85, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.1.2", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(SeaweedFS_s3_bucket_traffic_sent_bytes_total{namespace=\"$namespace\",service=~\"$service-api\"}[$__interval])) by (bucket)", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "S3 Bucket Traffic Sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "logBase": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": {}, "bars": false, @@ -468,7 +282,7 @@ "h": 7, "w": 8, "x": 0, - "y": 15 + "y": 8 }, "hiddenSeries": false, "id": 68, @@ -579,7 +393,7 @@ "h": 7, "w": 8, "x": 8, - "y": 15 + "y": 8 }, "hiddenSeries": false, "id": 67, @@ -690,7 +504,7 @@ "h": 7, "w": 8, "x": 16, - "y": 15 + "y": 8 }, "hiddenSeries": false, "id": 65, @@ -787,7 +601,7 @@ "h": 1, "w": 24, "x": 0, - "y": 22 + "y": 15 }, "id": 55, "panels": [], @@ -816,7 +630,7 @@ "h": 7, "w": 8, "x": 0, - "y": 23 + "y": 16 }, "hiddenSeries": false, "id": 46, @@ -927,7 +741,7 @@ "h": 7, "w": 8, "x": 8, - "y": 23 + "y": 16 }, "hiddenSeries": false, "id": 49, @@ -1043,7 +857,7 @@ "h": 7, "w": 8, "x": 16, - "y": 23 + "y": 16 }, "hiddenSeries": false, "id": 66, @@ -1159,7 +973,7 @@ "h": 7, "w": 24, "x": 0, - "y": 30 + "y": 23 }, "hiddenSeries": false, "id": 2, @@ -1258,7 +1072,7 @@ "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 30 }, "id": 56, "panels": [], @@ -1287,7 +1101,7 @@ "h": 7, "w": 12, "x": 0, - "y": 38 + "y": 31 }, "hiddenSeries": false, "id": 47, @@ -1395,7 +1209,7 @@ "h": 7, "w": 12, "x": 12, - "y": 38 + "y": 31 }, "hiddenSeries": false, "id": 40, @@ -1498,7 +1312,7 @@ "h": 7, "w": 24, "x": 0, - "y": 45 + "y": 38 }, "hiddenSeries": false, "id": 48, @@ -1611,7 +1425,7 @@ "h": 7, "w": 24, "x": 0, - "y": 52 + "y": 45 }, "hiddenSeries": false, "id": 50, @@ -1716,7 +1530,7 @@ "h": 7, "w": 24, "x": 0, - "y": 59 + "y": 52 }, "hiddenSeries": false, "id": 51, @@ -1801,7 +1615,7 @@ "h": 1, "w": 24, "x": 0, - "y": 66 + "y": 59 }, "id": 57, "panels": [], @@ -1830,7 +1644,7 @@ "h": 7, "w": 12, "x": 0, - "y": 67 + "y": 60 }, "hiddenSeries": false, "id": 12, @@ -1930,7 +1744,7 @@ "h": 7, "w": 12, "x": 12, - "y": 67 + "y": 60 }, "hiddenSeries": false, "id": 14, @@ -2019,7 +1833,7 @@ "h": 1, "w": 24, "x": 0, - "y": 74 + "y": 67 }, "id": 58, "panels": [], @@ -2048,7 +1862,7 @@ "h": 7, "w": 12, "x": 0, - "y": 75 + "y": 68 }, "hiddenSeries": false, "id": 52, @@ -2077,7 +1891,7 @@ "steppedLine": false, "targets": [ { - "expr": "go_memstats_alloc_bytes{namespace=~\"$namespace\", endpoint=\"metrics\"}", + "expr": "go_memstats_alloc_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", "format": "time_series", "hide": false, "interval": "", @@ -2095,7 +1909,7 @@ "refId": "A" }, { - "expr": "go_memstats_stack_inuse_bytes{namespace=~\"$namespace\", endpoint=\"metrics\"}", + "expr": "go_memstats_stack_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", "format": "time_series", "hide": true, "interval": "", @@ -2104,7 +1918,7 @@ "refId": "C" }, { - "expr": "go_memstats_heap_inuse_bytes{namespace=~\"$namespace\", endpoint=\"metrics\"}", + "expr": "go_memstats_heap_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", "format": "time_series", "hide": true, "interval": "", @@ -2176,7 +1990,7 @@ "h": 7, "w": 12, "x": 12, - "y": 75 + "y": 68 }, "hiddenSeries": false, "id": 54, @@ -2205,7 +2019,7 @@ "steppedLine": false, "targets": [ { - "expr": "go_gc_duration_seconds{namespace=~\"$namespace\", endpoint=\"metrics\"}", + "expr": "go_gc_duration_seconds{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -2276,7 +2090,7 @@ "h": 7, "w": 24, "x": 0, - "y": 82 + "y": 75 }, "hiddenSeries": false, "id": 53, @@ -2305,7 +2119,7 @@ "steppedLine": false, "targets": [ { - "expr": "go_goroutines{namespace=~\"$namespace\", endpoint=\"metrics\"}", + "expr": "go_goroutines{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -2365,8 +2179,8 @@ { "current": { "selected": true, - "text": "default", - "value": "default" + "text": "clickhouse-prom", + "value": "clickhouse-prom" }, "hide": 0, "includeAll": false, @@ -2389,7 +2203,7 @@ "value": "s3" }, "datasource": "$DS_PROMETHEUS", - "definition": "label_values({endpoint=\"metrics\"}, namespace)", + "definition": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)", "hide": 0, "includeAll": false, "label": "namespace", @@ -2402,7 +2216,7 @@ "value": "s3" } ], - "query": "label_values({endpoint=\"metrics\"}, namespace)", + "query": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)", "refresh": 0, "regex": "", "skipUrlSync": false, diff --git a/other/mq_client_example/agent_pub_record/agent_pub_record.go b/other/mq_client_example/agent_pub_record/agent_pub_record.go deleted file mode 100644 index 48e78f530..000000000 --- a/other/mq_client_example/agent_pub_record/agent_pub_record.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "github.com/seaweedfs/seaweedfs/other/mq_client_example/example" - "github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "log" - "sync" - "sync/atomic" - "time" -) - -var ( - messageCount = flag.Int("n", 1000, "message count") - messageDelay = flag.Duration("d", time.Second, "delay between messages") - concurrency = flag.Int("c", 4, "concurrent publishers") - partitionCount = flag.Int("p", 6, "partition count") - - clientName = flag.String("client", "c1", "client name") - - namespace = flag.String("ns", "test", "namespace") - t = flag.String("t", "test", "t") - agent = flag.String("agent", "localhost:16777", "mq agent address") - - counter int32 -) - -func genMyRecord(id int32) *example.MyRecord { - return &example.MyRecord{ - Key: []byte(fmt.Sprintf("key-%s-%d", *clientName, id)), - Field1: []byte(fmt.Sprintf("field1-%s-%d", *clientName, id)), - Field2: fmt.Sprintf("field2-%s-%d", *clientName, id), - Field3: id, - Field4: int64(id), - Field5: float32(id), - Field6: float64(id), - Field7: id%2 == 0, - } -} - -func doPublish(publisher *agent_client.PublishSession, id int) { - startTime := time.Now() - for { - i := atomic.AddInt32(&counter, 1) - if i > int32(*messageCount) { - break - } - // Simulate publishing a message - myRecord := genMyRecord(int32(i)) - if err := publisher.PublishMessageRecord(myRecord.Key, myRecord.ToRecordValue()); err != nil { - fmt.Println(err) - break - } - if *messageDelay > 0 { - time.Sleep(*messageDelay) - fmt.Printf("sent %+v\n", string(myRecord.Key)) - } - } - elapsed := time.Since(startTime) - log.Printf("Publisher %s-%d finished in %s", *clientName, id, elapsed) -} - -func main() { - flag.Parse() - - recordType := example.MyRecordType() - - session, err := agent_client.NewPublishSession(*agent, schema.NewSchema(*namespace, *t, recordType), *partitionCount, *clientName) - if err != nil { - log.Printf("failed to create session: %v", err) - return - } - defer session.CloseSession() - - startTime := time.Now() - - var wg sync.WaitGroup - // Start multiple publishers - for i := 0; i < *concurrency; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - doPublish(session, id) - }(i) - } - - // Wait for all publishers to finish - wg.Wait() - elapsed := time.Since(startTime) - - log.Printf("Published %d messages in %s (%.2f msg/s)", *messageCount, elapsed, float64(*messageCount)/elapsed.Seconds()) - -} diff --git a/other/mq_client_example/agent_sub_record/agent_sub_record.go b/other/mq_client_example/agent_sub_record/agent_sub_record.go deleted file mode 100644 index 21c74a46d..000000000 --- a/other/mq_client_example/agent_sub_record/agent_sub_record.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "github.com/seaweedfs/seaweedfs/other/mq_client_example/example" - "github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "log" - "time" -) - -var ( - namespace = flag.String("ns", "test", "namespace") - t = flag.String("topic", "test", "topic") - agent = flag.String("agent", "localhost:16777", "mq agent address") - maxPartitionCount = flag.Int("maxPartitionCount", 3, "max partition count") - slidingWindowSize = flag.Int("slidingWindowSize", 1, "per partition concurrency") - timeAgo = flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"ยตs\"), \"ms\", \"s\", \"m\", \"h\"") - - clientId = flag.Uint("client_id", uint(util.RandomInt32()), "client id") -) - -func main() { - flag.Parse() - - // determine the start of the messages - var startTsNs int64 - startType := schema_pb.OffsetType_RESUME_OR_EARLIEST - if *timeAgo > 0 { - startTsNs = time.Now().Add(-*timeAgo).UnixNano() - startType = schema_pb.OffsetType_EXACT_TS_NS - } - - session, err := agent_client.NewSubscribeSession(*agent, &agent_client.SubscribeOption{ - ConsumerGroup: "test", - ConsumerGroupInstanceId: fmt.Sprintf("client-%d", *clientId), - Topic: topic.NewTopic(*namespace, *t), - OffsetType: startType, - OffsetTsNs: startTsNs, - Filter: "", - MaxSubscribedPartitions: int32(*maxPartitionCount), - SlidingWindowSize: int32(*slidingWindowSize), - }) - if err != nil { - log.Printf("new subscribe session: %v", err) - return - } - defer session.CloseSession() - - counter := 0 - session.SubscribeMessageRecord(func(key []byte, recordValue *schema_pb.RecordValue) { - counter++ - record := example.FromRecordValue(recordValue) - fmt.Printf("%d %s %v\n", counter, string(key), record.Field2) - }, func() { - log.Printf("done received %d messages", counter) - }) - -} diff --git a/other/mq_client_example/example/my_record.go b/other/mq_client_example/example/my_record.go deleted file mode 100644 index ea6a0e7bd..000000000 --- a/other/mq_client_example/example/my_record.go +++ /dev/null @@ -1,56 +0,0 @@ -package example - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type MyRecord struct { - Key []byte - Field1 []byte - Field2 string - Field3 int32 - Field4 int64 - Field5 float32 - Field6 float64 - Field7 bool -} - -func MyRecordType() *schema_pb.RecordType { - return schema.RecordTypeBegin(). - WithField("key", schema.TypeBytes). - WithField("field1", schema.TypeBytes). - WithField("field2", schema.TypeString). - WithField("field3", schema.TypeInt32). - WithField("field4", schema.TypeInt64). - WithField("field5", schema.TypeFloat). - WithField("field6", schema.TypeDouble). - WithField("field7", schema.TypeBoolean). - RecordTypeEnd() -} - -func (r *MyRecord) ToRecordValue() *schema_pb.RecordValue { - return schema.RecordBegin(). - SetBytes("key", r.Key). - SetBytes("field1", r.Field1). - SetString("field2", r.Field2). - SetInt32("field3", r.Field3). - SetInt64("field4", r.Field4). - SetFloat("field5", r.Field5). - SetDouble("field6", r.Field6). - SetBool("field7", r.Field7). - RecordEnd() -} - -func FromRecordValue(recordValue *schema_pb.RecordValue) *MyRecord { - return &MyRecord{ - Key: recordValue.Fields["key"].GetBytesValue(), - Field1: recordValue.Fields["field1"].GetBytesValue(), - Field2: recordValue.Fields["field2"].GetStringValue(), - Field3: recordValue.Fields["field3"].GetInt32Value(), - Field4: recordValue.Fields["field4"].GetInt64Value(), - Field5: recordValue.Fields["field5"].GetFloatValue(), - Field6: recordValue.Fields["field6"].GetDoubleValue(), - Field7: recordValue.Fields["field7"].GetBoolValue(), - } -} diff --git a/postgres-examples/README.md b/postgres-examples/README.md deleted file mode 100644 index fcf853745..000000000 --- a/postgres-examples/README.md +++ /dev/null @@ -1,414 +0,0 @@ -# SeaweedFS PostgreSQL Protocol Examples - -This directory contains examples demonstrating how to connect to SeaweedFS using the PostgreSQL wire protocol. - -## Starting the PostgreSQL Server - -```bash -# Start with trust authentication (no password required) -weed postgres -port=5432 -master=localhost:9333 - -# Start with password authentication -weed postgres -port=5432 -auth=password -users="admin:secret;readonly:view123" - -# Start with MD5 authentication (more secure) -weed postgres -port=5432 -auth=md5 -users="user1:pass1;user2:pass2" - -# Start with TLS encryption -weed postgres -port=5432 -tls-cert=server.crt -tls-key=server.key - -# Allow connections from any host -weed postgres -host=0.0.0.0 -port=5432 -``` - -## Client Connections - -### psql Command Line - -```bash -# Basic connection (trust auth) -psql -h localhost -p 5432 -U seaweedfs -d default - -# With password -PGPASSWORD=secret psql -h localhost -p 5432 -U admin -d default - -# Connection string format -psql "postgresql://admin:secret@localhost:5432/default" - -# Connection string with parameters -psql "host=localhost port=5432 dbname=default user=admin password=secret" -``` - -### Programming Languages - -#### Python (psycopg2) -```python -import psycopg2 - -# Connect to SeaweedFS -conn = psycopg2.connect( - host="localhost", - port=5432, - user="seaweedfs", - database="default" -) - -# Execute queries -cursor = conn.cursor() -cursor.execute("SELECT * FROM my_topic LIMIT 10") - -for row in cursor.fetchall(): - print(row) - -cursor.close() -conn.close() -``` - -#### Java JDBC -```java -import java.sql.*; - -public class SeaweedFSExample { - public static void main(String[] args) throws SQLException { - String url = "jdbc:postgresql://localhost:5432/default"; - - Connection conn = DriverManager.getConnection(url, "seaweedfs", ""); - Statement stmt = conn.createStatement(); - - ResultSet rs = stmt.executeQuery("SELECT * FROM my_topic LIMIT 10"); - while (rs.next()) { - System.out.println("ID: " + rs.getLong("id")); - System.out.println("Message: " + rs.getString("message")); - } - - rs.close(); - stmt.close(); - conn.close(); - } -} -``` - -#### Go (lib/pq) -```go -package main - -import ( - "database/sql" - "fmt" - _ "github.com/lib/pq" -) - -func main() { - db, err := sql.Open("postgres", - "host=localhost port=5432 user=seaweedfs dbname=default sslmode=disable") - if err != nil { - panic(err) - } - defer db.Close() - - rows, err := db.Query("SELECT * FROM my_topic LIMIT 10") - if err != nil { - panic(err) - } - defer rows.Close() - - for rows.Next() { - var id int64 - var message string - err := rows.Scan(&id, &message) - if err != nil { - panic(err) - } - fmt.Printf("ID: %d, Message: %s\n", id, message) - } -} -``` - -#### Node.js (pg) -```javascript -const { Client } = require('pg'); - -const client = new Client({ - host: 'localhost', - port: 5432, - user: 'seaweedfs', - database: 'default', -}); - -async function query() { - await client.connect(); - - const result = await client.query('SELECT * FROM my_topic LIMIT 10'); - console.log(result.rows); - - await client.end(); -} - -query().catch(console.error); -``` - -## SQL Operations - -### Basic Queries -```sql --- List databases -SHOW DATABASES; - --- List tables (topics) -SHOW TABLES; - --- Describe table structure -DESCRIBE my_topic; --- or use the shorthand: DESC my_topic; - --- Basic select -SELECT * FROM my_topic; - --- With WHERE clause -SELECT id, message FROM my_topic WHERE id > 1000; - --- With LIMIT -SELECT * FROM my_topic LIMIT 100; -``` - -### Aggregations -```sql --- Count records -SELECT COUNT(*) FROM my_topic; - --- Multiple aggregations -SELECT - COUNT(*) as total_messages, - MIN(id) as min_id, - MAX(id) as max_id, - AVG(amount) as avg_amount -FROM my_topic; - --- Aggregations with WHERE -SELECT COUNT(*) FROM my_topic WHERE status = 'active'; -``` - -### System Columns -```sql --- Access system columns -SELECT - id, - message, - _timestamp_ns as timestamp, - _key as partition_key, - _source as data_source -FROM my_topic; - --- Filter by timestamp -SELECT * FROM my_topic -WHERE _timestamp_ns > 1640995200000000000 -LIMIT 10; -``` - -### PostgreSQL System Queries -```sql --- Version information -SELECT version(); - --- Current database -SELECT current_database(); - --- Current user -SELECT current_user; - --- Server settings -SELECT current_setting('server_version'); -SELECT current_setting('server_encoding'); -``` - -## psql Meta-Commands - -```sql --- List tables -\d -\dt - --- List databases -\l - --- Describe specific table -\d my_topic -\dt my_topic - --- List schemas -\dn - --- Help -\h -\? - --- Quit -\q -``` - -## Database Tools Integration - -### DBeaver -1. Create New Connection โ†’ PostgreSQL -2. Settings: - - **Host**: localhost - - **Port**: 5432 - - **Database**: default - - **Username**: seaweedfs (or configured user) - - **Password**: (if using password auth) - -### pgAdmin -1. Add New Server -2. Connection tab: - - **Host**: localhost - - **Port**: 5432 - - **Username**: seaweedfs - - **Database**: default - -### DataGrip -1. New Data Source โ†’ PostgreSQL -2. Configure: - - **Host**: localhost - - **Port**: 5432 - - **User**: seaweedfs - - **Database**: default - -### Grafana -1. Add Data Source โ†’ PostgreSQL -2. Configuration: - - **Host**: localhost:5432 - - **Database**: default - - **User**: seaweedfs - - **SSL Mode**: disable - -## BI Tools - -### Tableau -1. Connect to Data โ†’ PostgreSQL -2. Server: localhost -3. Port: 5432 -4. Database: default -5. Username: seaweedfs - -### Power BI -1. Get Data โ†’ Database โ†’ PostgreSQL -2. Server: localhost -3. Database: default -4. Username: seaweedfs - -## Connection Pooling - -### Java (HikariCP) -```java -HikariConfig config = new HikariConfig(); -config.setJdbcUrl("jdbc:postgresql://localhost:5432/default"); -config.setUsername("seaweedfs"); -config.setMaximumPoolSize(10); - -HikariDataSource dataSource = new HikariDataSource(config); -``` - -### Python (connection pooling) -```python -from psycopg2 import pool - -connection_pool = psycopg2.pool.SimpleConnectionPool( - 1, 20, - host="localhost", - port=5432, - user="seaweedfs", - database="default" -) - -conn = connection_pool.getconn() -# Use connection -connection_pool.putconn(conn) -``` - -## Security Best Practices - -### Use TLS Encryption -```bash -# Generate self-signed certificate for testing -openssl req -x509 -newkey rsa:4096 -keyout server.key -out server.crt -days 365 -nodes - -# Start with TLS -weed postgres -tls-cert=server.crt -tls-key=server.key -``` - -### Use MD5 Authentication -```bash -# More secure than password auth -weed postgres -auth=md5 -users="admin:secret123;readonly:view456" -``` - -### Limit Connections -```bash -# Limit concurrent connections -weed postgres -max-connections=50 -idle-timeout=30m -``` - -## Troubleshooting - -### Connection Issues -```bash -# Test connectivity -telnet localhost 5432 - -# Check if server is running -ps aux | grep "weed postgres" - -# Check logs for errors -tail -f /var/log/seaweedfs/postgres.log -``` - -### Common Errors - -**"Connection refused"** -- Ensure PostgreSQL server is running -- Check host/port configuration -- Verify firewall settings - -**"Authentication failed"** -- Check username/password -- Verify auth method configuration -- Ensure user is configured in server - -**"Database does not exist"** -- Use correct database name (default: 'default') -- Check available databases: `SHOW DATABASES` - -**"Permission denied"** -- Check user permissions -- Verify authentication method -- Use correct credentials - -## Performance Tips - -1. **Use LIMIT clauses** for large result sets -2. **Filter with WHERE clauses** to reduce data transfer -3. **Use connection pooling** for multi-threaded applications -4. **Close resources properly** (connections, statements, result sets) -5. **Use prepared statements** for repeated queries - -## Monitoring - -### Connection Statistics -```sql --- Current connections (if supported) -SELECT COUNT(*) FROM pg_stat_activity; - --- Server version -SELECT version(); - --- Current settings -SELECT name, setting FROM pg_settings WHERE name LIKE '%connection%'; -``` - -### Query Performance -```sql --- Use EXPLAIN for query plans (if supported) -EXPLAIN SELECT * FROM my_topic WHERE id > 1000; -``` - -This PostgreSQL protocol support makes SeaweedFS accessible to the entire PostgreSQL ecosystem, enabling seamless integration with existing tools, applications, and workflows. diff --git a/postgres-examples/test_client.py b/postgres-examples/test_client.py deleted file mode 100644 index e293d53cc..000000000 --- a/postgres-examples/test_client.py +++ /dev/null @@ -1,374 +0,0 @@ -#!/usr/bin/env python3 -""" -Test client for SeaweedFS PostgreSQL protocol support. - -This script demonstrates how to connect to SeaweedFS using standard PostgreSQL -libraries and execute various types of queries. - -Requirements: - pip install psycopg2-binary - -Usage: - python test_client.py - python test_client.py --host localhost --port 5432 --user seaweedfs --database default -""" - -import sys -import argparse -import time -import traceback - -try: - import psycopg2 - import psycopg2.extras -except ImportError: - print("Error: psycopg2 not found. Install with: pip install psycopg2-binary") - sys.exit(1) - - -def test_connection(host, port, user, database, password=None): - """Test basic connection to SeaweedFS PostgreSQL server.""" - print(f"๐Ÿ”— Testing connection to {host}:{port}/{database} as user '{user}'") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database, - 'connect_timeout': 10 - } - - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - print("โœ… Connection successful!") - - # Test basic query - cursor = conn.cursor() - cursor.execute("SELECT 1 as test") - result = cursor.fetchone() - print(f"โœ… Basic query successful: {result}") - - cursor.close() - conn.close() - return True - - except Exception as e: - print(f"โŒ Connection failed: {e}") - return False - - -def test_system_queries(host, port, user, database, password=None): - """Test PostgreSQL system queries.""" - print("\n๐Ÿ”ง Testing PostgreSQL system queries...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) - - system_queries = [ - ("Version", "SELECT version()"), - ("Current Database", "SELECT current_database()"), - ("Current User", "SELECT current_user"), - ("Server Encoding", "SELECT current_setting('server_encoding')"), - ("Client Encoding", "SELECT current_setting('client_encoding')"), - ] - - for name, query in system_queries: - try: - cursor.execute(query) - result = cursor.fetchone() - print(f" โœ… {name}: {result[0]}") - except Exception as e: - print(f" โŒ {name}: {e}") - - cursor.close() - conn.close() - - except Exception as e: - print(f"โŒ System queries failed: {e}") - - -def test_schema_queries(host, port, user, database, password=None): - """Test schema and metadata queries.""" - print("\n๐Ÿ“Š Testing schema queries...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) - - schema_queries = [ - ("Show Databases", "SHOW DATABASES"), - ("Show Tables", "SHOW TABLES"), - ("List Schemas", "SELECT 'public' as schema_name"), - ] - - for name, query in schema_queries: - try: - cursor.execute(query) - results = cursor.fetchall() - print(f" โœ… {name}: Found {len(results)} items") - for row in results[:3]: # Show first 3 results - print(f" - {dict(row)}") - if len(results) > 3: - print(f" ... and {len(results) - 3} more") - except Exception as e: - print(f" โŒ {name}: {e}") - - cursor.close() - conn.close() - - except Exception as e: - print(f"โŒ Schema queries failed: {e}") - - -def test_data_queries(host, port, user, database, password=None): - """Test data queries on actual topics.""" - print("\n๐Ÿ“ Testing data queries...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) - - # First, try to get available tables/topics - cursor.execute("SHOW TABLES") - tables = cursor.fetchall() - - if not tables: - print(" โ„น๏ธ No tables/topics found for data testing") - cursor.close() - conn.close() - return - - # Test with first available table - table_name = tables[0][0] if tables[0] else 'test_topic' - print(f" ๐Ÿ“‹ Testing with table: {table_name}") - - test_queries = [ - (f"Count records in {table_name}", f"SELECT COUNT(*) FROM \"{table_name}\""), - (f"Sample data from {table_name}", f"SELECT * FROM \"{table_name}\" LIMIT 3"), - (f"System columns from {table_name}", f"SELECT _timestamp_ns, _key, _source FROM \"{table_name}\" LIMIT 3"), - (f"Describe {table_name}", f"DESCRIBE \"{table_name}\""), - ] - - for name, query in test_queries: - try: - cursor.execute(query) - results = cursor.fetchall() - - if "COUNT" in query.upper(): - count = results[0][0] if results else 0 - print(f" โœ… {name}: {count} records") - elif "DESCRIBE" in query.upper(): - print(f" โœ… {name}: {len(results)} columns") - for row in results[:5]: # Show first 5 columns - print(f" - {dict(row)}") - else: - print(f" โœ… {name}: {len(results)} rows") - for row in results: - print(f" - {dict(row)}") - - except Exception as e: - print(f" โŒ {name}: {e}") - - cursor.close() - conn.close() - - except Exception as e: - print(f"โŒ Data queries failed: {e}") - - -def test_prepared_statements(host, port, user, database, password=None): - """Test prepared statements.""" - print("\n๐Ÿ“ Testing prepared statements...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor() - - # Test parameterized query - try: - cursor.execute("SELECT %s as param1, %s as param2", ("hello", 42)) - result = cursor.fetchone() - print(f" โœ… Prepared statement: {result}") - except Exception as e: - print(f" โŒ Prepared statement: {e}") - - cursor.close() - conn.close() - - except Exception as e: - print(f"โŒ Prepared statements test failed: {e}") - - -def test_transaction_support(host, port, user, database, password=None): - """Test transaction support (should be no-op for read-only).""" - print("\n๐Ÿ”„ Testing transaction support...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor() - - transaction_commands = [ - "BEGIN", - "SELECT 1 as in_transaction", - "COMMIT", - "SELECT 1 as after_commit", - ] - - for cmd in transaction_commands: - try: - cursor.execute(cmd) - if "SELECT" in cmd: - result = cursor.fetchone() - print(f" โœ… {cmd}: {result}") - else: - print(f" โœ… {cmd}: OK") - except Exception as e: - print(f" โŒ {cmd}: {e}") - - cursor.close() - conn.close() - - except Exception as e: - print(f"โŒ Transaction test failed: {e}") - - -def test_performance(host, port, user, database, password=None, iterations=10): - """Test query performance.""" - print(f"\nโšก Testing performance ({iterations} iterations)...") - - try: - conn_params = { - 'host': host, - 'port': port, - 'user': user, - 'database': database - } - if password: - conn_params['password'] = password - - times = [] - - for i in range(iterations): - start_time = time.time() - - conn = psycopg2.connect(**conn_params) - cursor = conn.cursor() - cursor.execute("SELECT 1") - result = cursor.fetchone() - cursor.close() - conn.close() - - elapsed = time.time() - start_time - times.append(elapsed) - - if i < 3: # Show first 3 iterations - print(f" Iteration {i+1}: {elapsed:.3f}s") - - avg_time = sum(times) / len(times) - min_time = min(times) - max_time = max(times) - - print(f" โœ… Performance results:") - print(f" - Average: {avg_time:.3f}s") - print(f" - Min: {min_time:.3f}s") - print(f" - Max: {max_time:.3f}s") - - except Exception as e: - print(f"โŒ Performance test failed: {e}") - - -def main(): - parser = argparse.ArgumentParser(description="Test SeaweedFS PostgreSQL Protocol") - parser.add_argument("--host", default="localhost", help="PostgreSQL server host") - parser.add_argument("--port", type=int, default=5432, help="PostgreSQL server port") - parser.add_argument("--user", default="seaweedfs", help="PostgreSQL username") - parser.add_argument("--password", help="PostgreSQL password") - parser.add_argument("--database", default="default", help="PostgreSQL database") - parser.add_argument("--skip-performance", action="store_true", help="Skip performance tests") - - args = parser.parse_args() - - print("๐Ÿงช SeaweedFS PostgreSQL Protocol Test Client") - print("=" * 50) - - # Test basic connection first - if not test_connection(args.host, args.port, args.user, args.database, args.password): - print("\nโŒ Basic connection failed. Cannot continue with other tests.") - sys.exit(1) - - # Run all tests - try: - test_system_queries(args.host, args.port, args.user, args.database, args.password) - test_schema_queries(args.host, args.port, args.user, args.database, args.password) - test_data_queries(args.host, args.port, args.user, args.database, args.password) - test_prepared_statements(args.host, args.port, args.user, args.database, args.password) - test_transaction_support(args.host, args.port, args.user, args.database, args.password) - - if not args.skip_performance: - test_performance(args.host, args.port, args.user, args.database, args.password) - - except KeyboardInterrupt: - print("\n\nโš ๏ธ Tests interrupted by user") - sys.exit(0) - except Exception as e: - print(f"\nโŒ Unexpected error during testing: {e}") - traceback.print_exc() - sys.exit(1) - - print("\n๐ŸŽ‰ All tests completed!") - print("\nTo use SeaweedFS with PostgreSQL tools:") - print(f" psql -h {args.host} -p {args.port} -U {args.user} -d {args.database}") - print(f" Connection string: postgresql://{args.user}@{args.host}:{args.port}/{args.database}") - - -if __name__ == "__main__": - main() diff --git a/seaweedfs-rdma-sidecar/.dockerignore b/seaweedfs-rdma-sidecar/.dockerignore deleted file mode 100644 index 3989eb5bd..000000000 --- a/seaweedfs-rdma-sidecar/.dockerignore +++ /dev/null @@ -1,65 +0,0 @@ -# Git -.git -.gitignore -.gitmodules - -# Documentation -*.md -docs/ - -# Development files -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# OS generated files -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db - -# Build artifacts -# bin/ (commented out for Docker build - needed for mount container) -# target/ (commented out for Docker build) -*.exe -*.dll -*.so -*.dylib - -# Go specific -vendor/ -*.test -*.prof -go.work -go.work.sum - -# Rust specific -Cargo.lock -# rdma-engine/target/ (commented out for Docker build) -*.pdb - -# Docker -Dockerfile* -docker-compose*.yml -.dockerignore - -# Test files (tests/ needed for integration test container) -# tests/ -# scripts/ (commented out for Docker build - needed for mount container) -*.log - -# Temporary files -tmp/ -temp/ -*.tmp -*.temp - -# IDE and editor files -*.sublime-* -.vscode/ -.idea/ diff --git a/seaweedfs-rdma-sidecar/CORRECT-SIDECAR-APPROACH.md b/seaweedfs-rdma-sidecar/CORRECT-SIDECAR-APPROACH.md deleted file mode 100644 index 743128ba8..000000000 --- a/seaweedfs-rdma-sidecar/CORRECT-SIDECAR-APPROACH.md +++ /dev/null @@ -1,196 +0,0 @@ -# โœ… Correct RDMA Sidecar Approach - Simple Parameter-Based - -## ๐ŸŽฏ **You're Right - Simplified Architecture** - -The RDMA sidecar should be **simple** and just take the volume server address as a parameter. The volume lookup complexity should stay in `weed mount`, not in the sidecar. - -## ๐Ÿ—๏ธ **Correct Architecture** - -### **1. weed mount (Client Side) - Does Volume Lookup** -```go -// File: weed/mount/filehandle_read.go (integration point) -func (fh *FileHandle) tryRDMARead(ctx context.Context, buff []byte, offset int64) (int64, int64, error) { - entry := fh.GetEntry() - - for _, chunk := range entry.GetEntry().Chunks { - if offset >= chunk.Offset && offset < chunk.Offset+int64(chunk.Size) { - // Parse chunk info - volumeID, needleID, cookie, err := ParseFileId(chunk.FileId) - if err != nil { - return 0, 0, err - } - - // ๐Ÿ” VOLUME LOOKUP (in weed mount, not sidecar) - volumeServerAddr, err := fh.wfs.lookupVolumeServer(ctx, volumeID) - if err != nil { - return 0, 0, err - } - - // ๐Ÿš€ SIMPLE RDMA REQUEST WITH VOLUME SERVER PARAMETER - data, isRDMA, err := fh.wfs.rdmaClient.ReadNeedleFromServer( - ctx, volumeServerAddr, volumeID, needleID, cookie, chunkOffset, readSize) - - return int64(copy(buff, data)), time.Now().UnixNano(), nil - } - } -} -``` - -### **2. RDMA Mount Client - Passes Volume Server Address** -```go -// File: weed/mount/rdma_client.go (modify existing) -func (c *RDMAMountClient) ReadNeedleFromServer(ctx context.Context, volumeServerAddr string, volumeID uint32, needleID uint64, cookie uint32, offset, size uint64) ([]byte, bool, error) { - // Simple HTTP request with volume server as parameter - reqURL := fmt.Sprintf("http://%s/rdma/read", c.sidecarAddr) - - requestBody := map[string]interface{}{ - "volume_server": volumeServerAddr, // โ† KEY: Pass volume server address - "volume_id": volumeID, - "needle_id": needleID, - "cookie": cookie, - "offset": offset, - "size": size, - } - - // POST request with volume server parameter - jsonBody, err := json.Marshal(requestBody) - if err != nil { - return nil, false, fmt.Errorf("failed to marshal request body: %w", err) - } - resp, err := c.httpClient.Post(reqURL, "application/json", bytes.NewBuffer(jsonBody)) - if err != nil { - return nil, false, fmt.Errorf("http post to sidecar: %w", err) - } -} -``` - -### **3. RDMA Sidecar - Simple, No Lookup Logic** -```go -// File: seaweedfs-rdma-sidecar/cmd/demo-server/main.go -func (s *DemoServer) rdmaReadHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Parse request body - var req struct { - VolumeServer string `json:"volume_server"` // โ† Receive volume server address - VolumeID uint32 `json:"volume_id"` - NeedleID uint64 `json:"needle_id"` - Cookie uint32 `json:"cookie"` - Offset uint64 `json:"offset"` - Size uint64 `json:"size"` - } - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "Invalid request", http.StatusBadRequest) - return - } - - s.logger.WithFields(logrus.Fields{ - "volume_server": req.VolumeServer, // โ† Use provided volume server - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - }).Info("๐Ÿ“– Processing RDMA read with volume server parameter") - - // ๐Ÿš€ SIMPLE: Use the provided volume server address - // No complex lookup logic needed! - resp, err := s.rdmaClient.ReadFromVolumeServer(r.Context(), req.VolumeServer, req.VolumeID, req.NeedleID, req.Cookie, req.Offset, req.Size) - - if err != nil { - http.Error(w, fmt.Sprintf("RDMA read failed: %v", err), http.StatusInternalServerError) - return - } - - // Return binary data - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("X-RDMA-Used", "true") - w.Write(resp.Data) -} -``` - -### **4. Volume Lookup in weed mount (Where it belongs)** -```go -// File: weed/mount/weedfs.go (add method) -func (wfs *WFS) lookupVolumeServer(ctx context.Context, volumeID uint32) (string, error) { - // Use existing SeaweedFS volume lookup logic - vid := fmt.Sprintf("%d", volumeID) - - // Query master server for volume location - locations, err := operation.LookupVolumeId(wfs.getMasterFn(), wfs.option.GrpcDialOption, vid) - if err != nil { - return "", fmt.Errorf("volume lookup failed: %w", err) - } - - if len(locations.Locations) == 0 { - return "", fmt.Errorf("no locations found for volume %d", volumeID) - } - - // Return first available location (or implement smart selection) - return locations.Locations[0].Url, nil -} -``` - -## ๐ŸŽฏ **Key Differences from Over-Complicated Approach** - -### **โŒ Over-Complicated (What I Built Before):** -- โŒ Sidecar does volume lookup -- โŒ Sidecar has master client integration -- โŒ Sidecar has volume location caching -- โŒ Sidecar forwards requests to remote sidecars -- โŒ Complex distributed logic in sidecar - -### **โœ… Correct Simple Approach:** -- โœ… **weed mount** does volume lookup (where it belongs) -- โœ… **weed mount** passes volume server address to sidecar -- โœ… **Sidecar** is simple and stateless -- โœ… **Sidecar** just does local RDMA read for given server -- โœ… **No complex distributed logic in sidecar** - -## ๐Ÿš€ **Request Flow (Corrected)** - -1. **User Application** โ†’ `read()` system call -2. **FUSE** โ†’ `weed mount` WFS.Read() -3. **weed mount** โ†’ Volume lookup: "Where is volume 7?" -4. **SeaweedFS Master** โ†’ "Volume 7 is on server-B:8080" -5. **weed mount** โ†’ HTTP POST to sidecar: `{volume_server: "server-B:8080", volume: 7, needle: 12345}` -6. **RDMA Sidecar** โ†’ Connect to server-B:8080, do local RDMA read -7. **RDMA Engine** โ†’ Direct memory access to volume file -8. **Response** โ†’ Binary data back to weed mount โ†’ user - -## ๐Ÿ“ **Implementation Changes Needed** - -### **1. Simplify Sidecar (Remove Complex Logic)** -- Remove `DistributedRDMAClient` -- Remove volume lookup logic -- Remove master client integration -- Keep simple RDMA engine communication - -### **2. Add Volume Lookup to weed mount** -- Add `lookupVolumeServer()` method to WFS -- Modify `RDMAMountClient` to accept volume server parameter -- Integrate with existing SeaweedFS volume lookup - -### **3. Simple Sidecar API** -``` -POST /rdma/read -{ - "volume_server": "server-B:8080", - "volume_id": 7, - "needle_id": 12345, - "cookie": 0, - "offset": 0, - "size": 4096 -} -``` - -## โœ… **Benefits of Simple Approach** - -- **๐ŸŽฏ Single Responsibility**: Sidecar only does RDMA, weed mount does lookup -- **๐Ÿ”ง Maintainable**: Less complex logic in sidecar -- **โšก Performance**: No extra network hops for volume lookup -- **๐Ÿ—๏ธ Clean Architecture**: Separation of concerns -- **๐Ÿ› Easier Debugging**: Clear responsibility boundaries - -You're absolutely right - this is much cleaner! The sidecar should be a simple RDMA accelerator, not a distributed system coordinator. diff --git a/seaweedfs-rdma-sidecar/CURRENT-STATUS.md b/seaweedfs-rdma-sidecar/CURRENT-STATUS.md deleted file mode 100644 index e8f53dc1d..000000000 --- a/seaweedfs-rdma-sidecar/CURRENT-STATUS.md +++ /dev/null @@ -1,165 +0,0 @@ -# SeaweedFS RDMA Sidecar - Current Status Summary - -## ๐ŸŽ‰ **IMPLEMENTATION COMPLETE** -**Status**: โœ… **READY FOR PRODUCTION** (Mock Mode) / ๐Ÿ”„ **READY FOR HARDWARE INTEGRATION** - ---- - -## ๐Ÿ“Š **What's Working Right Now** - -### โœ… **Complete Integration Pipeline** -- **SeaweedFS Mount** โ†’ **Go Sidecar** โ†’ **Rust Engine** โ†’ **Mock RDMA** -- End-to-end data flow with proper error handling -- Zero-copy page cache optimization -- Connection pooling for performance - -### โœ… **Production-Ready Components** -- HTTP API with RESTful endpoints -- Robust health checks and monitoring -- Docker multi-service orchestration -- Comprehensive error handling and fallback -- Volume lookup and server discovery - -### โœ… **Performance Features** -- **Zero-Copy**: Direct kernel page cache population -- **Connection Pooling**: Reused IPC connections -- **Async Operations**: Non-blocking I/O throughout -- **Metrics**: Detailed performance monitoring - -### โœ… **Code Quality** -- All GitHub PR review comments addressed -- Memory-safe operations (no dangerous channel closes) -- Proper file ID parsing using SeaweedFS functions -- RESTful API design with correct HTTP methods - ---- - -## ๐Ÿ”„ **What's Mock/Simulated** - -### ๐ŸŸก **Mock RDMA Engine** (Rust) -- **Location**: `rdma-engine/src/rdma.rs` -- **Function**: Simulates RDMA hardware operations -- **Data**: Generates pattern data (0,1,2...255,0,1,2...) -- **Performance**: Realistic latency simulation (150ns reads) - -### ๐ŸŸก **Simulated Hardware** -- **Device Info**: Mock Mellanox ConnectX-5 capabilities -- **Memory Regions**: Fake registration without HCA -- **Transfers**: Pattern generation instead of network transfer -- **Completions**: Synthetic work completions - ---- - -## ๐Ÿ“ˆ **Current Performance** -- **Throughput**: ~403 operations/second -- **Latency**: ~2.48ms average (mock overhead) -- **Success Rate**: 100% in integration tests -- **Memory Usage**: Optimized with zero-copy - ---- - -## ๐Ÿ—๏ธ **Architecture Overview** - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SeaweedFS โ”‚โ”€โ”€โ”€โ”€โ–ถโ”‚ Go Sidecar โ”‚โ”€โ”€โ”€โ”€โ–ถโ”‚ Rust Engine โ”‚ -โ”‚ Mount Client โ”‚ โ”‚ HTTP Server โ”‚ โ”‚ Mock RDMA โ”‚ -โ”‚ (REAL) โ”‚ โ”‚ (REAL) โ”‚ โ”‚ (MOCK) โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ โ”‚ - โ–ผ โ–ผ โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ - File ID Parse โ”‚ โ”‚ - Zero-Copy โ”‚ โ”‚ - UCX Ready โ”‚ -โ”‚ - Volume Lookup โ”‚ โ”‚ - Conn Pooling โ”‚ โ”‚ - Memory Mgmt โ”‚ -โ”‚ - HTTP Fallback โ”‚ โ”‚ - Health Checks โ”‚ โ”‚ - IPC Protocol โ”‚ -โ”‚ - Error Handlingโ”‚ โ”‚ - REST API โ”‚ โ”‚ - Async Ops โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - ---- - -## ๐Ÿ”ง **Key Files & Locations** - -### **Core Integration** -- `weed/mount/filehandle_read.go` - RDMA read integration in FUSE -- `weed/mount/rdma_client.go` - Mount client RDMA communication -- `cmd/demo-server/main.go` - Main RDMA sidecar HTTP server - -### **RDMA Engine** -- `rdma-engine/src/rdma.rs` - Mock RDMA implementation -- `rdma-engine/src/ipc.rs` - IPC protocol with Go sidecar -- `pkg/rdma/client.go` - Go client for RDMA engine - -### **Configuration** -- `docker-compose.mount-rdma.yml` - Complete integration test setup -- `go.mod` - Dependencies with local SeaweedFS replacement - ---- - -## ๐Ÿš€ **Ready For Next Steps** - -### **Immediate Capability** -- โœ… **Development**: Full testing without RDMA hardware -- โœ… **Integration Testing**: Complete pipeline validation -- โœ… **Performance Benchmarking**: Baseline metrics -- โœ… **CI/CD**: Mock mode for automated testing - -### **Production Transition** -- ๐Ÿ”„ **Hardware Integration**: Replace mock with UCX library -- ๐Ÿ”„ **Real Data Transfer**: Remove pattern generation -- ๐Ÿ”„ **Device Detection**: Enumerate actual RDMA NICs -- ๐Ÿ”„ **Performance Optimization**: Hardware-specific tuning - ---- - -## ๐Ÿ“‹ **Commands to Resume Work** - -### **Start Development Environment** -```bash -# Navigate to your seaweedfs-rdma-sidecar directory -cd /path/to/your/seaweedfs/seaweedfs-rdma-sidecar - -# Build components -go build -o bin/demo-server ./cmd/demo-server -cargo build --manifest-path rdma-engine/Cargo.toml - -# Run integration tests -docker-compose -f docker-compose.mount-rdma.yml up -``` - -### **Test Current Implementation** -```bash -# Test sidecar HTTP API -curl http://localhost:8081/health -curl http://localhost:8081/stats - -# Test RDMA read -curl "http://localhost:8081/read?volume=1&needle=123&cookie=456&offset=0&size=1024&volume_server=http://localhost:8080" -``` - ---- - -## ๐ŸŽฏ **Success Metrics Achieved** - -- โœ… **Functional**: Complete RDMA integration pipeline -- โœ… **Reliable**: Robust error handling and fallback -- โœ… **Performant**: Zero-copy and connection pooling -- โœ… **Testable**: Comprehensive mock implementation -- โœ… **Maintainable**: Clean code with proper documentation -- โœ… **Scalable**: Async operations and pooling -- โœ… **Production-Ready**: All review comments addressed - ---- - -## ๐Ÿ“š **Documentation** - -- `FUTURE-WORK-TODO.md` - Next steps for hardware integration -- `DOCKER-TESTING.md` - Integration testing guide -- `docker-compose.mount-rdma.yml` - Complete test environment -- GitHub PR reviews - All issues addressed and documented - ---- - -**๐Ÿ† ACHIEVEMENT**: Complete RDMA sidecar architecture with production-ready infrastructure and seamless mock-to-real transition path! - -**Next**: Follow `FUTURE-WORK-TODO.md` to replace mock with real UCX hardware integration. diff --git a/seaweedfs-rdma-sidecar/DOCKER-TESTING.md b/seaweedfs-rdma-sidecar/DOCKER-TESTING.md deleted file mode 100644 index 88ea1971d..000000000 --- a/seaweedfs-rdma-sidecar/DOCKER-TESTING.md +++ /dev/null @@ -1,290 +0,0 @@ -# ๐Ÿณ Docker Integration Testing Guide - -This guide provides comprehensive Docker-based integration testing for the SeaweedFS RDMA sidecar system. - -## ๐Ÿ—๏ธ Architecture - -The Docker Compose setup includes: - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SeaweedFS Master โ”‚ โ”‚ SeaweedFS Volume โ”‚ โ”‚ Rust RDMA โ”‚ -โ”‚ :9333 โ”‚โ—„โ”€โ”€โ–บโ”‚ :8080 โ”‚ โ”‚ Engine โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ - โ–ผ โ–ผ -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Go RDMA Sidecar โ”‚โ—„โ”€โ”€โ–บโ”‚ Unix Socket โ”‚โ—„โ”€โ”€โ–บโ”‚ Integration โ”‚ -โ”‚ :8081 โ”‚ โ”‚ /tmp/rdma.sock โ”‚ โ”‚ Test Suite โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## ๐Ÿš€ Quick Start - -### 1. Start All Services - -```bash -# Using the helper script (recommended) -./tests/docker-test-helper.sh start - -# Or using docker-compose directly -docker-compose up -d -``` - -### 2. Run Integration Tests - -```bash -# Run the complete test suite -./tests/docker-test-helper.sh test - -# Or run tests manually -docker-compose run --rm integration-tests -``` - -### 3. Interactive Testing - -```bash -# Open a shell in the test container -./tests/docker-test-helper.sh shell - -# Inside the container, you can run: -./test-rdma ping -./test-rdma capabilities -./test-rdma read --volume 1 --needle 12345 --size 1024 -curl http://rdma-sidecar:8081/health -curl http://rdma-sidecar:8081/stats -``` - -## ๐Ÿ“‹ Test Helper Commands - -The `docker-test-helper.sh` script provides convenient commands: - -```bash -# Service Management -./tests/docker-test-helper.sh start # Start all services -./tests/docker-test-helper.sh stop # Stop all services -./tests/docker-test-helper.sh clean # Stop and clean volumes - -# Testing -./tests/docker-test-helper.sh test # Run integration tests -./tests/docker-test-helper.sh shell # Interactive testing shell - -# Monitoring -./tests/docker-test-helper.sh status # Check service health -./tests/docker-test-helper.sh logs # Show all logs -./tests/docker-test-helper.sh logs rdma-engine # Show specific service logs -``` - -## ๐Ÿงช Test Coverage - -The integration test suite covers: - -### โœ… Core Components -- **SeaweedFS Master**: Cluster leadership and status -- **SeaweedFS Volume Server**: Volume operations and health -- **Rust RDMA Engine**: Socket communication and operations -- **Go RDMA Sidecar**: HTTP API and RDMA integration - -### โœ… Integration Points -- **IPC Communication**: Unix socket + MessagePack protocol -- **RDMA Operations**: Ping, capabilities, read operations -- **HTTP API**: All sidecar endpoints and error handling -- **Fallback Logic**: RDMA โ†’ HTTP fallback behavior - -### โœ… Performance Testing -- **Direct RDMA Benchmarks**: Engine-level performance -- **Sidecar Benchmarks**: End-to-end performance -- **Latency Measurements**: Operation timing validation -- **Throughput Testing**: Operations per second - -## ๐Ÿ”ง Service Details - -### SeaweedFS Master -- **Port**: 9333 -- **Health Check**: `/cluster/status` -- **Data**: Persistent volume `master-data` - -### SeaweedFS Volume Server -- **Port**: 8080 -- **Health Check**: `/status` -- **Data**: Persistent volume `volume-data` -- **Depends on**: SeaweedFS Master - -### Rust RDMA Engine -- **Socket**: `/tmp/rdma-engine.sock` -- **Mode**: Mock RDMA (development) -- **Health Check**: Socket existence -- **Privileged**: Yes (for RDMA access) - -### Go RDMA Sidecar -- **Port**: 8081 -- **Health Check**: `/health` -- **API Endpoints**: `/stats`, `/read`, `/benchmark` -- **Depends on**: RDMA Engine, Volume Server - -### Test Client -- **Purpose**: Integration testing and interactive debugging -- **Tools**: curl, jq, test-rdma binary -- **Environment**: All service URLs configured - -## ๐Ÿ“Š Expected Test Results - -### โœ… Successful Output Example - -``` -=============================================== -๐Ÿš€ SEAWEEDFS RDMA INTEGRATION TEST SUITE -=============================================== - -๐Ÿ”ต Waiting for SeaweedFS Master to be ready... -โœ… SeaweedFS Master is ready -โœ… SeaweedFS Master is leader and ready - -๐Ÿ”ต Waiting for SeaweedFS Volume Server to be ready... -โœ… SeaweedFS Volume Server is ready -Volume Server Version: 3.60 - -๐Ÿ”ต Checking RDMA engine socket... -โœ… RDMA engine socket exists -๐Ÿ”ต Testing RDMA engine ping... -โœ… RDMA engine ping successful - -๐Ÿ”ต Waiting for RDMA Sidecar to be ready... -โœ… RDMA Sidecar is ready -โœ… RDMA Sidecar is healthy -RDMA Status: true - -๐Ÿ”ต Testing needle read via sidecar... -โœ… Sidecar needle read successful -โš ๏ธ HTTP fallback used. Duration: 2.48ms - -๐Ÿ”ต Running sidecar performance benchmark... -โœ… Sidecar benchmark completed -Benchmark Results: - RDMA Operations: 5 - HTTP Operations: 0 - Average Latency: 2.479ms - Operations/sec: 403.2 - -=============================================== -๐ŸŽ‰ ALL INTEGRATION TESTS COMPLETED! -=============================================== -``` - -## ๐Ÿ› Troubleshooting - -### Service Not Starting - -```bash -# Check service logs -./tests/docker-test-helper.sh logs [service-name] - -# Check container status -docker-compose ps - -# Restart specific service -docker-compose restart [service-name] -``` - -### RDMA Engine Issues - -```bash -# Check socket permissions -docker-compose exec rdma-engine ls -la /tmp/rdma/rdma-engine.sock - -# Check RDMA engine logs -./tests/docker-test-helper.sh logs rdma-engine - -# Test socket directly -docker-compose exec test-client ./test-rdma ping -``` - -### Sidecar Connection Issues - -```bash -# Test sidecar health directly -curl http://localhost:8081/health - -# Check sidecar logs -./tests/docker-test-helper.sh logs rdma-sidecar - -# Verify environment variables -docker-compose exec rdma-sidecar env | grep RDMA -``` - -### Volume Server Issues - -```bash -# Check SeaweedFS status -curl http://localhost:9333/cluster/status -curl http://localhost:8080/status - -# Check volume server logs -./tests/docker-test-helper.sh logs seaweedfs-volume -``` - -## ๐Ÿ” Manual Testing Examples - -### Test RDMA Engine Directly - -```bash -# Enter test container -./tests/docker-test-helper.sh shell - -# Test RDMA operations -./test-rdma ping --socket /tmp/rdma-engine.sock -./test-rdma capabilities --socket /tmp/rdma-engine.sock -./test-rdma read --socket /tmp/rdma-engine.sock --volume 1 --needle 12345 -./test-rdma bench --socket /tmp/rdma-engine.sock --iterations 10 -``` - -### Test Sidecar HTTP API - -```bash -# Health and status -curl http://rdma-sidecar:8081/health | jq '.' -curl http://rdma-sidecar:8081/stats | jq '.' - -# Needle operations -curl "http://rdma-sidecar:8081/read?volume=1&needle=12345&size=1024" | jq '.' - -# Benchmarking -curl "http://rdma-sidecar:8081/benchmark?iterations=5&size=2048" | jq '.benchmark_results' -``` - -### Test SeaweedFS Integration - -```bash -# Check cluster status -curl http://seaweedfs-master:9333/cluster/status | jq '.' - -# Check volume status -curl http://seaweedfs-volume:8080/status | jq '.' - -# List volumes -curl http://seaweedfs-master:9333/vol/status | jq '.' -``` - -## ๐Ÿš€ Production Deployment - -This Docker setup can be adapted for production by: - -1. **Replacing Mock RDMA**: Switch to `real-ucx` feature in Rust -2. **RDMA Hardware**: Add RDMA device mappings and capabilities -3. **Security**: Remove privileged mode, add proper user/group mapping -4. **Scaling**: Use Docker Swarm or Kubernetes for orchestration -5. **Monitoring**: Add Prometheus metrics and Grafana dashboards -6. **Persistence**: Configure proper volume management - -## ๐Ÿ“š Additional Resources - -- [Main README](README.md) - Complete project overview -- [Docker Compose Reference](https://docs.docker.com/compose/) -- [SeaweedFS Documentation](https://github.com/seaweedfs/seaweedfs/wiki) -- [UCX Documentation](https://github.com/openucx/ucx) - ---- - -**๐Ÿณ Happy Docker Testing!** - -For issues or questions, please check the logs first and refer to the troubleshooting section above. diff --git a/seaweedfs-rdma-sidecar/Dockerfile.integration-test b/seaweedfs-rdma-sidecar/Dockerfile.integration-test deleted file mode 100644 index 8e9d6610e..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.integration-test +++ /dev/null @@ -1,25 +0,0 @@ -# Dockerfile for RDMA Mount Integration Tests -FROM ubuntu:22.04 - -# Install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - wget \ - ca-certificates \ - jq \ - bc \ - time \ - util-linux \ - coreutils \ - && rm -rf /var/lib/apt/lists/* - -# Create test directories -RUN mkdir -p /usr/local/bin /test-results - -# Copy test scripts -COPY scripts/run-integration-tests.sh /usr/local/bin/run-integration-tests.sh -COPY scripts/test-rdma-mount.sh /usr/local/bin/test-rdma-mount.sh -RUN chmod +x /usr/local/bin/*.sh - -# Default command -CMD ["/usr/local/bin/run-integration-tests.sh"] diff --git a/seaweedfs-rdma-sidecar/Dockerfile.mount-rdma b/seaweedfs-rdma-sidecar/Dockerfile.mount-rdma deleted file mode 100644 index 425defcc7..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.mount-rdma +++ /dev/null @@ -1,40 +0,0 @@ -# Dockerfile for SeaweedFS Mount with RDMA support -FROM ubuntu:22.04 - -# Install dependencies -RUN apt-get update && apt-get install -y \ - fuse3 \ - curl \ - wget \ - ca-certificates \ - procps \ - util-linux \ - jq \ - && rm -rf /var/lib/apt/lists/* - -# Create necessary directories -RUN mkdir -p /usr/local/bin /mnt/seaweedfs /var/log/seaweedfs - -# Copy SeaweedFS binary (will be built from context) -COPY bin/weed /usr/local/bin/weed -RUN chmod +x /usr/local/bin/weed - -# Copy mount helper scripts -COPY scripts/mount-helper.sh /usr/local/bin/mount-helper.sh -RUN chmod +x /usr/local/bin/mount-helper.sh - -# Create mount point -RUN mkdir -p /mnt/seaweedfs - -# Set up FUSE permissions -RUN echo 'user_allow_other' >> /etc/fuse.conf - -# Health check script -COPY scripts/mount-health-check.sh /usr/local/bin/mount-health-check.sh -RUN chmod +x /usr/local/bin/mount-health-check.sh - -# Expose mount point as volume -VOLUME ["/mnt/seaweedfs"] - -# Default command -CMD ["/usr/local/bin/mount-helper.sh"] diff --git a/seaweedfs-rdma-sidecar/Dockerfile.performance-test b/seaweedfs-rdma-sidecar/Dockerfile.performance-test deleted file mode 100644 index 7ffa81c4f..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.performance-test +++ /dev/null @@ -1,26 +0,0 @@ -# Dockerfile for RDMA Mount Performance Tests -FROM ubuntu:22.04 - -# Install dependencies -RUN apt-get update && apt-get install -y \ - curl \ - wget \ - ca-certificates \ - jq \ - bc \ - time \ - util-linux \ - coreutils \ - fio \ - iozone3 \ - && rm -rf /var/lib/apt/lists/* - -# Create test directories -RUN mkdir -p /usr/local/bin /performance-results - -# Copy test scripts -COPY scripts/run-performance-tests.sh /usr/local/bin/run-performance-tests.sh -RUN chmod +x /usr/local/bin/*.sh - -# Default command -CMD ["/usr/local/bin/run-performance-tests.sh"] diff --git a/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine b/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine deleted file mode 100644 index 539a71bd1..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine +++ /dev/null @@ -1,63 +0,0 @@ -# Multi-stage build for Rust RDMA Engine -FROM rust:1.80-slim AS builder - -# Install build dependencies -RUN apt-get update && apt-get install -y \ - pkg-config \ - libssl-dev \ - libudev-dev \ - build-essential \ - libc6-dev \ - linux-libc-dev \ - && rm -rf /var/lib/apt/lists/* - -# Set work directory -WORKDIR /app - -# Copy Rust project files -COPY rdma-engine/Cargo.toml ./ -COPY rdma-engine/Cargo.lock ./ -COPY rdma-engine/src ./src - -# Build the release binary -RUN cargo build --release - -# Runtime stage -FROM debian:bookworm-slim - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - libssl3 \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Create app user -RUN useradd -m -u 1001 appuser - -# Set work directory -WORKDIR /app - -# Copy binary from builder stage -COPY --from=builder /app/target/release/rdma-engine-server . - -# Change ownership -RUN chown -R appuser:appuser /app - -# Set default socket path (can be overridden) -ENV RDMA_SOCKET_PATH=/tmp/rdma/rdma-engine.sock - -# Create socket directory with proper permissions (before switching user) -RUN mkdir -p /tmp/rdma && chown -R appuser:appuser /tmp/rdma - -USER appuser - -# Expose any needed ports (none for this service as it uses Unix sockets) -# EXPOSE 18515 - -# Health check - verify both process and socket using environment variable -HEALTHCHECK --interval=5s --timeout=3s --start-period=10s --retries=3 \ - CMD pgrep rdma-engine-server >/dev/null && test -S "$RDMA_SOCKET_PATH" - -# Default command using environment variable -CMD sh -c "./rdma-engine-server --debug --ipc-socket \"$RDMA_SOCKET_PATH\"" diff --git a/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine.simple b/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine.simple deleted file mode 100644 index cbe3edf16..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.rdma-engine.simple +++ /dev/null @@ -1,36 +0,0 @@ -# Simplified Dockerfile for Rust RDMA Engine (using pre-built binary) -FROM debian:bookworm-slim - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - libssl3 \ - curl \ - procps \ - && rm -rf /var/lib/apt/lists/* - -# Create app user -RUN useradd -m -u 1001 appuser - -# Set work directory -WORKDIR /app - -# Copy pre-built binary from local build -COPY ./rdma-engine/target/release/rdma-engine-server . - -# Change ownership -RUN chown -R appuser:appuser /app -USER appuser - -# Set default socket path (can be overridden) -ENV RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - -# Create socket directory -RUN mkdir -p /tmp - -# Health check - verify both process and socket using environment variable -HEALTHCHECK --interval=5s --timeout=3s --start-period=10s --retries=3 \ - CMD pgrep rdma-engine-server >/dev/null && test -S "$RDMA_SOCKET_PATH" - -# Default command using environment variable -CMD sh -c "./rdma-engine-server --debug --ipc-socket \"$RDMA_SOCKET_PATH\"" diff --git a/seaweedfs-rdma-sidecar/Dockerfile.sidecar b/seaweedfs-rdma-sidecar/Dockerfile.sidecar deleted file mode 100644 index e9da9a63c..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.sidecar +++ /dev/null @@ -1,55 +0,0 @@ -# Multi-stage build for Go Sidecar -FROM golang:1.24-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git ca-certificates tzdata - -# Set work directory -WORKDIR /app - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY cmd/ ./cmd/ -COPY pkg/ ./pkg/ - -# Build the binaries -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o demo-server ./cmd/demo-server -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o sidecar ./cmd/sidecar -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o test-rdma ./cmd/test-rdma - -# Runtime stage -FROM alpine:3.18 - -# Install runtime dependencies -RUN apk --no-cache add ca-certificates curl jq - -# Create app user -RUN addgroup -g 1001 appgroup && \ - adduser -D -s /bin/sh -u 1001 -G appgroup appuser - -# Set work directory -WORKDIR /app - -# Copy binaries from builder stage -COPY --from=builder /app/demo-server . -COPY --from=builder /app/sidecar . -COPY --from=builder /app/test-rdma . - -# Change ownership -RUN chown -R appuser:appgroup /app -USER appuser - -# Expose the demo server port -EXPOSE 8081 - -# Health check -HEALTHCHECK --interval=10s --timeout=5s --start-period=15s --retries=3 \ - CMD curl -f http://localhost:8081/health || exit 1 - -# Default command (demo server) -CMD ["./demo-server", "--port", "8081", "--enable-rdma", "--debug"] diff --git a/seaweedfs-rdma-sidecar/Dockerfile.test-client b/seaweedfs-rdma-sidecar/Dockerfile.test-client deleted file mode 100644 index 879b8033a..000000000 --- a/seaweedfs-rdma-sidecar/Dockerfile.test-client +++ /dev/null @@ -1,59 +0,0 @@ -# Multi-stage build for Test Client -FROM golang:1.23-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git ca-certificates tzdata - -# Set work directory -WORKDIR /app - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY cmd/ ./cmd/ -COPY pkg/ ./pkg/ - -# Build the test binaries -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o test-rdma ./cmd/test-rdma -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o demo-server ./cmd/demo-server - -# Runtime stage -FROM alpine:3.18 - -# Install runtime dependencies and testing tools -RUN apk --no-cache add \ - ca-certificates \ - curl \ - jq \ - bash \ - wget \ - netcat-openbsd \ - && rm -rf /var/cache/apk/* - -# Create app user -RUN addgroup -g 1001 appgroup && \ - adduser -D -s /bin/bash -u 1001 -G appgroup appuser - -# Set work directory -WORKDIR /app - -# Copy binaries from builder stage -COPY --from=builder /app/test-rdma . -COPY --from=builder /app/demo-server . - -# Copy test scripts -COPY tests/ ./tests/ -RUN chmod +x ./tests/*.sh - -# Change ownership -RUN chown -R appuser:appgroup /app - -# Switch to app user -USER appuser - -# Default command -CMD ["/bin/bash"] diff --git a/seaweedfs-rdma-sidecar/FUTURE-WORK-TODO.md b/seaweedfs-rdma-sidecar/FUTURE-WORK-TODO.md deleted file mode 100644 index cc7457b90..000000000 --- a/seaweedfs-rdma-sidecar/FUTURE-WORK-TODO.md +++ /dev/null @@ -1,276 +0,0 @@ -# SeaweedFS RDMA Sidecar - Future Work TODO - -## ๐ŸŽฏ **Current Status (โœ… COMPLETED)** - -### **Phase 1: Architecture & Integration - DONE** -- โœ… **Complete Go โ†” Rust IPC Pipeline**: Unix sockets + MessagePack -- โœ… **SeaweedFS Integration**: Mount client with RDMA acceleration -- โœ… **Docker Orchestration**: Multi-service setup with proper networking -- โœ… **Error Handling**: Robust fallback and recovery mechanisms -- โœ… **Performance Optimizations**: Zero-copy page cache + connection pooling -- โœ… **Code Quality**: All GitHub PR review comments addressed -- โœ… **Testing Framework**: Integration tests and benchmarking tools - -### **Phase 2: Mock Implementation - DONE** -- โœ… **Mock RDMA Engine**: Complete Rust implementation for development -- โœ… **Pattern Data Generation**: Predictable test data for validation -- โœ… **Simulated Performance**: Realistic latency and throughput modeling -- โœ… **Development Environment**: Full testing without hardware requirements - ---- - -## ๐Ÿš€ **PHASE 3: REAL RDMA IMPLEMENTATION** - -### **3.1 Hardware Abstraction Layer** ๐Ÿ”ด **HIGH PRIORITY** - -#### **Replace Mock RDMA Context** -**File**: `rdma-engine/src/rdma.rs` -**Current**: -```rust -RdmaContextImpl::Mock(MockRdmaContext::new(config).await?) -``` -**TODO**: -```rust -// Enable UCX feature and implement -RdmaContextImpl::Ucx(UcxRdmaContext::new(config).await?) -``` - -**Tasks**: -- [ ] Implement `UcxRdmaContext` struct -- [ ] Add UCX FFI bindings for Rust -- [ ] Handle UCX initialization and cleanup -- [ ] Add feature flag: `real-ucx` vs `mock` - -#### **Real Memory Management** -**File**: `rdma-engine/src/rdma.rs` lines 245-270 -**Current**: Fake memory regions in vector -**TODO**: -- [ ] Integrate with UCX memory registration APIs -- [ ] Implement HugePage support for large transfers -- [ ] Add memory region caching for performance -- [ ] Handle registration/deregistration errors - -#### **Actual RDMA Operations** -**File**: `rdma-engine/src/rdma.rs` lines 273-335 -**Current**: Pattern data + artificial latency -**TODO**: -- [ ] Replace `post_read()` with real UCX RDMA operations -- [ ] Implement `post_write()` with actual memory transfers -- [ ] Add completion polling from hardware queues -- [ ] Handle partial transfers and retries - -### **3.2 Data Path Replacement** ๐ŸŸก **MEDIUM PRIORITY** - -#### **Real Data Transfer** -**File**: `pkg/rdma/client.go` lines 420-442 -**Current**: -```go -// MOCK: Pattern generation -mockData[i] = byte(i % 256) -``` -**TODO**: -```go -// Get actual data from RDMA buffer -realData := getRdmaBufferContents(startResp.LocalAddr, startResp.TransferSize) -validateDataIntegrity(realData, completeResp.ServerCrc) -``` - -**Tasks**: -- [ ] Remove mock data generation -- [ ] Access actual RDMA transferred data -- [ ] Implement CRC validation: `completeResp.ServerCrc` -- [ ] Add data integrity error handling - -#### **Hardware Device Detection** -**File**: `rdma-engine/src/rdma.rs` lines 222-233 -**Current**: Hardcoded Mellanox device info -**TODO**: -- [ ] Enumerate real RDMA devices using UCX -- [ ] Query actual device capabilities -- [ ] Handle multiple device scenarios -- [ ] Add device selection logic - -### **3.3 Performance Optimization** ๐ŸŸข **LOW PRIORITY** - -#### **Memory Registration Caching** -**TODO**: -- [ ] Implement MR (Memory Region) cache -- [ ] Add LRU eviction for memory pressure -- [ ] Optimize for frequently accessed regions -- [ ] Monitor cache hit rates - -#### **Advanced RDMA Features** -**TODO**: -- [ ] Implement RDMA Write operations -- [ ] Add Immediate Data support -- [ ] Implement RDMA Write with Immediate -- [ ] Add Atomic operations (if needed) - -#### **Multi-Transport Support** -**TODO**: -- [ ] Leverage UCX's automatic transport selection -- [ ] Add InfiniBand support -- [ ] Add RoCE (RDMA over Converged Ethernet) support -- [ ] Implement TCP fallback via UCX - ---- - -## ๐Ÿ”ง **PHASE 4: PRODUCTION HARDENING** - -### **4.1 Error Handling & Recovery** -- [ ] Add RDMA-specific error codes -- [ ] Implement connection recovery -- [ ] Add retry logic for transient failures -- [ ] Handle device hot-plug scenarios - -### **4.2 Monitoring & Observability** -- [ ] Add RDMA-specific metrics (bandwidth, latency, errors) -- [ ] Implement tracing for RDMA operations -- [ ] Add health checks for RDMA devices -- [ ] Create performance dashboards - -### **4.3 Configuration & Tuning** -- [ ] Add RDMA-specific configuration options -- [ ] Implement auto-tuning based on workload -- [ ] Add support for multiple RDMA ports -- [ ] Create deployment guides for different hardware - ---- - -## ๐Ÿ“‹ **IMMEDIATE NEXT STEPS** - -### **Step 1: UCX Integration Setup** -1. **Add UCX dependencies to Rust**: - ```toml - [dependencies] - ucx-sys = "0.1" # UCX FFI bindings - ``` - -2. **Create UCX wrapper module**: - ```bash - touch rdma-engine/src/ucx.rs - ``` - -3. **Implement basic UCX context**: - ```rust - pub struct UcxRdmaContext { - context: *mut ucx_sys::ucp_context_h, - worker: *mut ucx_sys::ucp_worker_h, - } - ``` - -### **Step 2: Development Environment** -1. **Install UCX library**: - ```bash - # Ubuntu/Debian - sudo apt-get install libucx-dev - - # CentOS/RHEL - sudo yum install ucx-devel - ``` - -2. **Update Cargo.toml features**: - ```toml - [features] - default = ["mock"] - mock = [] - real-ucx = ["ucx-sys"] - ``` - -### **Step 3: Testing Strategy** -1. **Add hardware detection tests** -2. **Create UCX initialization tests** -3. **Implement gradual feature migration** -4. **Maintain mock fallback for CI/CD** - ---- - -## ๐Ÿ—๏ธ **ARCHITECTURE NOTES** - -### **Current Working Components** -- โœ… **Go Sidecar**: Production-ready HTTP API -- โœ… **IPC Layer**: Robust Unix socket + MessagePack -- โœ… **SeaweedFS Integration**: Complete mount client integration -- โœ… **Docker Setup**: Multi-service orchestration -- โœ… **Error Handling**: Comprehensive fallback mechanisms - -### **Mock vs Real Boundary** -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SeaweedFS โ”‚โ”€โ”€โ”€โ”€โ–ถโ”‚ Go Sidecar โ”‚โ”€โ”€โ”€โ”€โ–ถโ”‚ Rust Engine โ”‚ -โ”‚ (REAL) โ”‚ โ”‚ (REAL) โ”‚ โ”‚ (MOCK) โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ RDMA Hardware โ”‚ - โ”‚ (TO IMPLEMENT) โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### **Performance Expectations** -- **Current Mock**: ~403 ops/sec, 2.48ms latency -- **Target Real**: ~4000 ops/sec, 250ฮผs latency (UCX optimized) -- **Bandwidth Goal**: 25-100 Gbps (depending on hardware) - ---- - -## ๐Ÿ“š **REFERENCE MATERIALS** - -### **UCX Documentation** -- **GitHub**: https://github.com/openucx/ucx -- **API Reference**: https://openucx.readthedocs.io/ -- **Rust Bindings**: https://crates.io/crates/ucx-sys - -### **RDMA Programming** -- **InfiniBand Architecture**: Volume 1 Specification -- **RoCE Standards**: IBTA Annex A17 -- **Performance Tuning**: UCX Performance Guide - -### **SeaweedFS Integration** -- **File ID Format**: `weed/storage/needle/file_id.go` -- **Volume Server**: `weed/server/volume_server_handlers_read.go` -- **Mount Client**: `weed/mount/filehandle_read.go` - ---- - -## โš ๏ธ **IMPORTANT NOTES** - -### **Breaking Changes to Avoid** -- **Keep IPC Protocol Stable**: Don't change MessagePack format -- **Maintain HTTP API**: Existing endpoints must remain compatible -- **Preserve Configuration**: Environment variables should work unchanged - -### **Testing Requirements** -- **Hardware Tests**: Require actual RDMA NICs -- **CI/CD Compatibility**: Must fallback to mock for automated testing -- **Performance Benchmarks**: Compare mock vs real performance - -### **Security Considerations** -- **Memory Protection**: Ensure RDMA regions are properly isolated -- **Access Control**: Validate remote memory access permissions -- **Data Validation**: Always verify CRC checksums - ---- - -## ๐ŸŽฏ **SUCCESS CRITERIA** - -### **Phase 3 Complete When**: -- [ ] Real RDMA data transfers working -- [ ] Hardware device detection functional -- [ ] Performance exceeds mock implementation -- [ ] All integration tests passing with real hardware - -### **Phase 4 Complete When**: -- [ ] Production deployment successful -- [ ] Monitoring and alerting operational -- [ ] Performance targets achieved -- [ ] Error handling validated under load - ---- - -**๐Ÿ“… Last Updated**: December 2024 -**๐Ÿ‘ค Contact**: Resume from `seaweedfs-rdma-sidecar/` directory -**๐Ÿท๏ธ Version**: v1.0 (Mock Implementation Complete) - -**๐Ÿš€ Ready to resume**: All infrastructure is in place, just need to replace the mock RDMA layer with UCX integration! diff --git a/seaweedfs-rdma-sidecar/Makefile b/seaweedfs-rdma-sidecar/Makefile deleted file mode 100644 index 19aa90461..000000000 --- a/seaweedfs-rdma-sidecar/Makefile +++ /dev/null @@ -1,205 +0,0 @@ -# SeaweedFS RDMA Sidecar Makefile - -.PHONY: help build test clean docker-build docker-test docker-clean integration-test - -# Default target -help: ## Show this help message - @echo "SeaweedFS RDMA Sidecar - Available Commands:" - @echo "" - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' - @echo "" - @echo "Examples:" - @echo " make build # Build all components locally" - @echo " make docker-test # Run complete Docker integration tests" - @echo " make test # Run unit tests" - -# Local Build Targets -build: build-go build-rust ## Build all components locally - -build-go: ## Build Go components (sidecar, demo-server, test-rdma) - @echo "๐Ÿ”จ Building Go components..." - go build -o bin/sidecar ./cmd/sidecar - go build -o bin/demo-server ./cmd/demo-server - go build -o bin/test-rdma ./cmd/test-rdma - @echo "โœ… Go build complete" - -build-rust: ## Build Rust RDMA engine - @echo "๐Ÿฆ€ Building Rust RDMA engine..." - cd rdma-engine && cargo build --release - @echo "โœ… Rust build complete" - -# Testing Targets -test: test-go test-rust ## Run all unit tests - -test-go: ## Run Go tests - @echo "๐Ÿงช Running Go tests..." - go test ./... - @echo "โœ… Go tests complete" - -test-rust: ## Run Rust tests - @echo "๐Ÿงช Running Rust tests..." - cd rdma-engine && cargo test - @echo "โœ… Rust tests complete" - -integration-test: build ## Run local integration test - @echo "๐Ÿ”— Running local integration test..." - ./scripts/demo-e2e.sh - @echo "โœ… Local integration test complete" - -# Docker Targets -docker-build: ## Build all Docker images - @echo "๐Ÿณ Building Docker images..." - docker-compose build - @echo "โœ… Docker images built" - -docker-start: ## Start Docker services - @echo "๐Ÿš€ Starting Docker services..." - ./tests/docker-test-helper.sh start - @echo "โœ… Docker services started" - -docker-test: ## Run Docker integration tests - @echo "๐Ÿงช Running Docker integration tests..." - ./tests/docker-test-helper.sh test - @echo "โœ… Docker integration tests complete" - -docker-stop: ## Stop Docker services - @echo "๐Ÿ›‘ Stopping Docker services..." - ./tests/docker-test-helper.sh stop - @echo "โœ… Docker services stopped" - -docker-clean: ## Clean Docker services and volumes - @echo "๐Ÿงน Cleaning Docker environment..." - ./tests/docker-test-helper.sh clean - docker system prune -f - @echo "โœ… Docker cleanup complete" - -docker-logs: ## Show Docker logs - ./tests/docker-test-helper.sh logs - -docker-status: ## Show Docker service status - ./tests/docker-test-helper.sh status - -docker-shell: ## Open interactive shell in test container - ./tests/docker-test-helper.sh shell - -# RDMA Simulation Targets -rdma-sim-build: ## Build RDMA simulation environment - @echo "๐Ÿš€ Building RDMA simulation environment..." - docker-compose -f docker-compose.rdma-sim.yml build - @echo "โœ… RDMA simulation images built" - -rdma-sim-start: ## Start RDMA simulation environment - @echo "๐Ÿš€ Starting RDMA simulation environment..." - docker-compose -f docker-compose.rdma-sim.yml up -d - @echo "โœ… RDMA simulation environment started" - -rdma-sim-test: ## Run RDMA simulation tests - @echo "๐Ÿงช Running RDMA simulation tests..." - docker-compose -f docker-compose.rdma-sim.yml run --rm integration-tests-rdma - @echo "โœ… RDMA simulation tests complete" - -rdma-sim-stop: ## Stop RDMA simulation environment - @echo "๐Ÿ›‘ Stopping RDMA simulation environment..." - docker-compose -f docker-compose.rdma-sim.yml down - @echo "โœ… RDMA simulation environment stopped" - -rdma-sim-clean: ## Clean RDMA simulation environment - @echo "๐Ÿงน Cleaning RDMA simulation environment..." - docker-compose -f docker-compose.rdma-sim.yml down -v --remove-orphans - docker system prune -f - @echo "โœ… RDMA simulation cleanup complete" - -rdma-sim-status: ## Check RDMA simulation status - @echo "๐Ÿ“Š RDMA simulation status:" - docker-compose -f docker-compose.rdma-sim.yml ps - @echo "" - @echo "๐Ÿ” RDMA device status:" - docker-compose -f docker-compose.rdma-sim.yml exec rdma-simulation /opt/rdma-sim/test-rdma.sh || true - -rdma-sim-shell: ## Open shell in RDMA simulation container - @echo "๐Ÿš Opening RDMA simulation shell..." - docker-compose -f docker-compose.rdma-sim.yml exec rdma-simulation /bin/bash - -rdma-sim-logs: ## Show RDMA simulation logs - docker-compose -f docker-compose.rdma-sim.yml logs - -rdma-sim-ucx: ## Show UCX information in simulation - @echo "๐Ÿ“‹ UCX information in simulation:" - docker-compose -f docker-compose.rdma-sim.yml exec rdma-simulation /opt/rdma-sim/ucx-info.sh - -# Development Targets -dev-setup: ## Set up development environment - @echo "๐Ÿ› ๏ธ Setting up development environment..." - go mod tidy - cd rdma-engine && cargo check - chmod +x scripts/*.sh tests/*.sh - @echo "โœ… Development environment ready" - -format: ## Format code - @echo "โœจ Formatting code..." - go fmt ./... - cd rdma-engine && cargo fmt - @echo "โœ… Code formatted" - -lint: ## Run linters - @echo "๐Ÿ” Running linters..." - go vet ./... - cd rdma-engine && cargo clippy -- -D warnings - @echo "โœ… Linting complete" - -# Cleanup Targets -clean: clean-go clean-rust ## Clean all build artifacts - -clean-go: ## Clean Go build artifacts - @echo "๐Ÿงน Cleaning Go artifacts..." - rm -rf bin/ - go clean -testcache - @echo "โœ… Go artifacts cleaned" - -clean-rust: ## Clean Rust build artifacts - @echo "๐Ÿงน Cleaning Rust artifacts..." - cd rdma-engine && cargo clean - @echo "โœ… Rust artifacts cleaned" - -# Full Workflow Targets -check: format lint test ## Format, lint, and test everything - -ci: check integration-test docker-test ## Complete CI workflow - -demo: build ## Run local demo - @echo "๐ŸŽฎ Starting local demo..." - ./scripts/demo-e2e.sh - -# Docker Development Workflow -docker-dev: docker-clean docker-build docker-test ## Complete Docker development cycle - -# Quick targets -quick-test: build ## Quick local test - ./bin/test-rdma --help - -quick-docker: ## Quick Docker test - docker-compose up -d rdma-engine rdma-sidecar - sleep 5 - curl -s http://localhost:8081/health | jq '.' - docker-compose down - -# Help and Documentation -docs: ## Generate/update documentation - @echo "๐Ÿ“š Documentation ready:" - @echo " README.md - Main project documentation" - @echo " DOCKER-TESTING.md - Docker integration testing guide" - @echo " Use 'make help' for available commands" - -# Environment Info -info: ## Show environment information - @echo "๐Ÿ” Environment Information:" - @echo " Go Version: $$(go version)" - @echo " Rust Version: $$(cd rdma-engine && cargo --version)" - @echo " Docker Version: $$(docker --version)" - @echo " Docker Compose Version: $$(docker-compose --version)" - @echo "" - @echo "๐Ÿ—๏ธ Project Structure:" - @echo " Go Components: cmd/ pkg/" - @echo " Rust Engine: rdma-engine/" - @echo " Tests: tests/" - @echo " Scripts: scripts/" diff --git a/seaweedfs-rdma-sidecar/README.md b/seaweedfs-rdma-sidecar/README.md deleted file mode 100644 index 3234fed6c..000000000 --- a/seaweedfs-rdma-sidecar/README.md +++ /dev/null @@ -1,385 +0,0 @@ -# ๐Ÿš€ SeaweedFS RDMA Sidecar - -**High-Performance RDMA Acceleration for SeaweedFS using UCX and Rust** - -[![Build Status](https://img.shields.io/badge/build-passing-brightgreen)](#) -[![Go Version](https://img.shields.io/badge/go-1.23+-blue)](#) -[![Rust Version](https://img.shields.io/badge/rust-1.70+-orange)](#) -[![License](https://img.shields.io/badge/license-MIT-green)](#) - -## ๐ŸŽฏ Overview - -This project implements a **high-performance RDMA (Remote Direct Memory Access) sidecar** for SeaweedFS that provides significant performance improvements for data-intensive read operations. The sidecar uses a **hybrid Go + Rust architecture** with the [UCX (Unified Communication X)](https://github.com/openucx/ucx) framework to deliver up to **44x performance improvement** over traditional HTTP-based reads. - -### ๐Ÿ—๏ธ Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ SeaweedFS โ”‚ โ”‚ Go Sidecar โ”‚ โ”‚ Rust Engine โ”‚ -โ”‚ Volume Server โ”‚โ—„โ”€โ”€โ–บโ”‚ (Control Plane) โ”‚โ—„โ”€โ”€โ–บโ”‚ (Data Plane) โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ โ”‚ - โ”‚ โ”‚ โ”‚ - โ–ผ โ–ผ โ–ผ - HTTP/gRPC API RDMA Client API UCX/RDMA Hardware -``` - -**Components:** -- **๐ŸŸข Go Sidecar**: Control plane handling SeaweedFS integration, client API, and fallback logic -- **๐Ÿฆ€ Rust Engine**: High-performance data plane with UCX framework for RDMA operations -- **๐Ÿ”— IPC Bridge**: Unix domain socket communication with MessagePack serialization - -## ๐ŸŒŸ Key Features - -### โšก Performance -- **44x faster** than HTTP reads (theoretical max based on RDMA vs TCP overhead) -- **Sub-microsecond latency** for memory-mapped operations -- **Zero-copy data transfers** directly to/from SeaweedFS volume files -- **Concurrent session management** with up to 1000+ simultaneous operations - -### ๐Ÿ›ก๏ธ Reliability -- **Automatic HTTP fallback** when RDMA unavailable -- **Graceful degradation** under failure conditions -- **Session timeout and cleanup** to prevent resource leaks -- **Comprehensive error handling** with structured logging - -### ๐Ÿ”ง Production Ready -- **Container-native deployment** with Kubernetes support -- **RDMA device plugin integration** for hardware resource management -- **HugePages optimization** for memory efficiency -- **Prometheus metrics** and structured logging for observability - -### ๐ŸŽš๏ธ Flexibility -- **Mock RDMA implementation** for development and testing -- **Configurable transport selection** (RDMA, TCP, shared memory via UCX) -- **Multi-device support** with automatic failover -- **Authentication and authorization** support - -## ๐Ÿš€ Quick Start - -### Prerequisites - -```bash -# Required dependencies -- Go 1.23+ -- Rust 1.70+ -- UCX libraries (for hardware RDMA) -- Linux with RDMA-capable hardware (InfiniBand/RoCE) - -# Optional for development -- Docker -- Kubernetes -- jq (for demo scripts) -``` - -### ๐Ÿ—๏ธ Build - -```bash -# Clone the repository -git clone -cd seaweedfs-rdma-sidecar - -# Build Go components -go build -o bin/sidecar ./cmd/sidecar -go build -o bin/test-rdma ./cmd/test-rdma -go build -o bin/demo-server ./cmd/demo-server - -# Build Rust engine -cd rdma-engine -cargo build --release -cd .. -``` - -### ๐ŸŽฎ Demo - -Run the complete end-to-end demonstration: - -```bash -# Interactive demo with all components -./scripts/demo-e2e.sh - -# Or run individual components -./rdma-engine/target/release/rdma-engine-server --debug & -./bin/demo-server --port 8080 --enable-rdma -``` - -## ๐Ÿ“Š Performance Results - -### Mock RDMA Performance (Development) -``` -Average Latency: 2.48ms per operation -Throughput: 403.2 operations/sec -Success Rate: 100% -Session Management: โœ… Working -IPC Communication: โœ… Working -``` - -### Expected Hardware RDMA Performance -``` -Average Latency: < 10ยตs per operation (440x improvement) -Throughput: > 1M operations/sec (2500x improvement) -Bandwidth: > 100 Gbps (theoretical InfiniBand limit) -CPU Utilization: < 5% (vs 60%+ for HTTP) -``` - -## ๐Ÿงฉ Components - -### 1๏ธโƒฃ Rust RDMA Engine (`rdma-engine/`) - -High-performance data plane built with: - -- **๐Ÿ”ง UCX Integration**: Production-grade RDMA framework -- **โšก Async Operations**: Tokio-based async runtime -- **๐Ÿง  Memory Management**: Pooled buffers with HugePage support -- **๐Ÿ“ก IPC Server**: Unix domain socket with MessagePack -- **๐Ÿ“Š Session Management**: Thread-safe lifecycle handling - -```rust -// Example: Starting the RDMA engine -let config = RdmaEngineConfig { - device_name: "auto".to_string(), - port: 18515, - max_sessions: 1000, - // ... other config -}; - -let engine = RdmaEngine::new(config).await?; -engine.start().await?; -``` - -### 2๏ธโƒฃ Go Sidecar (`pkg/`, `cmd/`) - -Control plane providing: - -- **๐Ÿ”Œ SeaweedFS Integration**: Native needle read/write support -- **๐Ÿ”„ HTTP Fallback**: Automatic degradation when RDMA unavailable -- **๐Ÿ“ˆ Performance Monitoring**: Metrics and benchmarking -- **๐ŸŒ HTTP API**: RESTful interface for management - -```go -// Example: Using the RDMA client -client := seaweedfs.NewSeaweedFSRDMAClient(&seaweedfs.Config{ - RDMASocketPath: "/tmp/rdma-engine.sock", - Enabled: true, -}) - -resp, err := client.ReadNeedle(ctx, &seaweedfs.NeedleReadRequest{ - VolumeID: 1, - NeedleID: 12345, - Size: 4096, -}) -``` - -### 3๏ธโƒฃ Integration Examples (`cmd/demo-server/`) - -Production-ready integration examples: - -- **๐ŸŒ HTTP Server**: Demonstrates SeaweedFS integration -- **๐Ÿ“Š Benchmarking**: Performance testing utilities -- **๐Ÿ” Health Checks**: Monitoring and diagnostics -- **๐Ÿ“ฑ Web Interface**: Browser-based demo and testing - -## ๐Ÿณ Deployment - -### Kubernetes - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: seaweedfs-with-rdma -spec: - containers: - - name: volume-server - image: chrislusf/seaweedfs:latest - # ... volume server config - - - name: rdma-sidecar - image: seaweedfs-rdma-sidecar:latest - resources: - limits: - rdma/hca: 1 # RDMA device - hugepages-2Mi: 1Gi - volumeMounts: - - name: rdma-socket - mountPath: /tmp/rdma-engine.sock -``` - -### Docker Compose - -```yaml -version: '3.8' -services: - rdma-engine: - build: - context: . - dockerfile: rdma-engine/Dockerfile - privileged: true - volumes: - - /tmp/rdma-engine.sock:/tmp/rdma-engine.sock - - seaweedfs-sidecar: - build: . - depends_on: - - rdma-engine - ports: - - "8080:8080" - volumes: - - /tmp/rdma-engine.sock:/tmp/rdma-engine.sock -``` - -## ๐Ÿงช Testing - -### Unit Tests -```bash -# Go tests -go test ./... - -# Rust tests -cd rdma-engine && cargo test -``` - -### Integration Tests -```bash -# Full end-to-end testing -./scripts/demo-e2e.sh - -# Direct RDMA engine testing -./bin/test-rdma ping -./bin/test-rdma capabilities -./bin/test-rdma read --volume 1 --needle 12345 -./bin/test-rdma bench --iterations 100 -``` - -### Performance Benchmarking -```bash -# HTTP vs RDMA comparison -./bin/demo-server --enable-rdma & -curl "http://localhost:8080/benchmark?iterations=1000&size=1048576" -``` - -## ๐Ÿ”ง Configuration - -### RDMA Engine Configuration - -```toml -# rdma-engine/config.toml -[rdma] -device_name = "mlx5_0" # or "auto" -port = 18515 -max_sessions = 1000 -buffer_size = "1GB" - -[ipc] -socket_path = "/tmp/rdma-engine.sock" -max_connections = 100 - -[logging] -level = "info" -``` - -### Go Sidecar Configuration - -```yaml -# config.yaml -rdma: - socket_path: "/tmp/rdma-engine.sock" - enabled: true - timeout: "30s" - -seaweedfs: - volume_server_url: "http://localhost:8080" - -http: - port: 8080 - enable_cors: true -``` - -## ๐Ÿ“ˆ Monitoring - -### Metrics - -The sidecar exposes Prometheus-compatible metrics: - -- `rdma_operations_total{type="read|write", result="success|error"}` -- `rdma_operation_duration_seconds{type="read|write"}` -- `rdma_sessions_active` -- `rdma_bytes_transferred_total{direction="tx|rx"}` - -### Health Checks - -```bash -# Sidecar health -curl http://localhost:8080/health - -# RDMA engine health -curl http://localhost:8080/stats -``` - -### Logging - -Structured logging with configurable levels: - -```json -{ - "timestamp": "2025-08-16T20:55:17Z", - "level": "INFO", - "message": "โœ… RDMA read completed successfully", - "session_id": "db152578-bfad-4cb3-a50f-a2ac66eecc6a", - "bytes_read": 1024, - "duration": "2.48ms", - "transfer_rate": 800742.88 -} -``` - -## ๐Ÿ› ๏ธ Development - -### Mock RDMA Mode - -For development without RDMA hardware: - -```bash -# Enable mock mode (default) -cargo run --features mock-ucx - -# All operations simulate RDMA with realistic latencies -``` - -### UCX Hardware Mode - -For production with real RDMA hardware: - -```bash -# Enable hardware UCX -cargo run --features real-ucx - -# Requires UCX libraries and RDMA-capable hardware -``` - -### Adding New Operations - -1. **Define protobuf messages** in `rdma-engine/src/ipc.rs` -2. **Implement Go client** in `pkg/ipc/client.go` -3. **Add Rust handler** in `rdma-engine/src/ipc.rs` -4. **Update tests** in both languages - -## ๐Ÿ™ Acknowledgments - -- **[UCX Project](https://github.com/openucx/ucx)** - Unified Communication X framework -- **[SeaweedFS](https://github.com/seaweedfs/seaweedfs)** - Distributed file system -- **Rust Community** - Excellent async/await and FFI capabilities -- **Go Community** - Robust networking and gRPC libraries - -## ๐Ÿ“ž Support - -- ๐Ÿ› **Bug Reports**: [Create an issue](../../issues/new?template=bug_report.md) -- ๐Ÿ’ก **Feature Requests**: [Create an issue](../../issues/new?template=feature_request.md) -- ๐Ÿ“š **Documentation**: See [docs/](docs/) folder -- ๐Ÿ’ฌ **Discussions**: [GitHub Discussions](../../discussions) - ---- - -**๐Ÿš€ Ready to accelerate your SeaweedFS deployment with RDMA?** - -Get started with the [Quick Start Guide](#-quick-start) or explore the [Demo Server](cmd/demo-server/) for hands-on experience! - diff --git a/seaweedfs-rdma-sidecar/REVIEW_FEEDBACK.md b/seaweedfs-rdma-sidecar/REVIEW_FEEDBACK.md deleted file mode 100644 index 5034f1bf0..000000000 --- a/seaweedfs-rdma-sidecar/REVIEW_FEEDBACK.md +++ /dev/null @@ -1,55 +0,0 @@ -# PR #7140 Review Feedback Summary - -## Positive Feedback Received โœ… - -### Source: [GitHub PR #7140 Review](https://github.com/seaweedfs/seaweedfs/pull/7140#pullrequestreview-3126580539) -**Reviewer**: Gemini Code Assist (Automated Review Bot) -**Date**: August 18, 2025 - -## Comments Analysis - -### ๐Ÿ† Binary Search Optimization - PRAISED -**File**: `weed/mount/filehandle_read.go` -**Implementation**: Efficient chunk lookup using binary search with cached cumulative offsets - -**Reviewer Comment**: -> "The `tryRDMARead` function efficiently finds the target chunk for a given offset by using a binary search on cached cumulative chunk offsets. This is an effective optimization that will perform well even for files with a large number of chunks." - -**Technical Merit**: -- โœ… O(log N) performance vs O(N) linear search -- โœ… Cached cumulative offsets prevent repeated calculations -- โœ… Scales well for large fragmented files -- โœ… Memory-efficient implementation - -### ๐Ÿ† Resource Management - PRAISED -**File**: `weed/mount/weedfs.go` -**Implementation**: Proper RDMA client initialization and cleanup - -**Reviewer Comment**: -> "The RDMA client is now correctly initialized and attached to the `WFS` struct when RDMA is enabled. The shutdown logic in the `grace.OnInterrupt` handler has also been updated to properly close the RDMA client, preventing resource leaks." - -**Technical Merit**: -- โœ… Proper initialization with error handling -- โœ… Clean shutdown in interrupt handler -- โœ… No resource leaks -- โœ… Graceful degradation on failure - -## Summary - -**All review comments are positive acknowledgments of excellent implementation practices.** - -### Key Strengths Recognized: -1. **Performance Optimization**: Binary search algorithm implementation -2. **Memory Safety**: Proper resource lifecycle management -3. **Code Quality**: Clean, efficient, and maintainable code -4. **Production Readiness**: Robust error handling and cleanup - -### Build Status: โœ… PASSING -- โœ… `go build ./...` - All packages compile successfully -- โœ… `go vet ./...` - No linting issues -- โœ… All tests passing -- โœ… Docker builds working - -## Conclusion - -The RDMA sidecar implementation has received positive feedback from automated code review, confirming high code quality and adherence to best practices. **No action items required** - these are endorsements of excellent work. diff --git a/seaweedfs-rdma-sidecar/WEED-MOUNT-CODE-PATH.md b/seaweedfs-rdma-sidecar/WEED-MOUNT-CODE-PATH.md deleted file mode 100644 index 1fdace934..000000000 --- a/seaweedfs-rdma-sidecar/WEED-MOUNT-CODE-PATH.md +++ /dev/null @@ -1,260 +0,0 @@ -# ๐Ÿ“‹ Weed Mount RDMA Integration - Code Path Analysis - -## Current Status - -The RDMA client (`RDMAMountClient`) exists in `weed/mount/rdma_client.go` but is **not yet integrated** into the actual file read path. The integration points are identified but not implemented. - -## ๐Ÿ” Complete Code Path - -### **1. FUSE Read Request Entry Point** -```go -// File: weed/mount/weedfs_file_read.go:41 -func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse.ReadResult, fuse.Status) { - fh := wfs.GetHandle(FileHandleId(in.Fh)) - // ... - offset := int64(in.Offset) - totalRead, err := readDataByFileHandleWithContext(ctx, buff, fh, offset) - // ... - return fuse.ReadResultData(buff[:totalRead]), fuse.OK -} -``` - -### **2. File Handle Read Coordination** -```go -// File: weed/mount/weedfs_file_read.go:103 -func readDataByFileHandleWithContext(ctx context.Context, buff []byte, fhIn *FileHandle, offset int64) (int64, error) { - size := len(buff) - fhIn.lockForRead(offset, size) - defer fhIn.unlockForRead(offset, size) - - // KEY INTEGRATION POINT: This is where RDMA should be attempted - n, tsNs, err := fhIn.readFromChunksWithContext(ctx, buff, offset) - // ... - return n, err -} -``` - -### **3. Chunk Reading (Current Implementation)** -```go -// File: weed/mount/filehandle_read.go:29 -func (fh *FileHandle) readFromChunksWithContext(ctx context.Context, buff []byte, offset int64) (int64, int64, error) { - // ... - - // CURRENT: Direct chunk reading without RDMA - totalRead, ts, err := fh.entryChunkGroup.ReadDataAt(ctx, fileSize, buff, offset) - - // MISSING: RDMA integration should happen here - return int64(totalRead), ts, err -} -``` - -### **4. RDMA Integration Point (What Needs to Be Added)** - -The integration should happen in `readFromChunksWithContext` like this: - -```go -func (fh *FileHandle) readFromChunksWithContext(ctx context.Context, buff []byte, offset int64) (int64, int64, error) { - // ... existing code ... - - // NEW: Try RDMA acceleration first - if fh.wfs.rdmaClient != nil && fh.wfs.rdmaClient.IsHealthy() { - if totalRead, ts, err := fh.tryRDMARead(ctx, buff, offset); err == nil { - glog.V(4).Infof("RDMA read successful: %d bytes", totalRead) - return totalRead, ts, nil - } - glog.V(2).Infof("RDMA read failed, falling back to HTTP") - } - - // FALLBACK: Original HTTP-based chunk reading - totalRead, ts, err := fh.entryChunkGroup.ReadDataAt(ctx, fileSize, buff, offset) - return int64(totalRead), ts, err -} -``` - -## ๐Ÿš€ RDMA Client Integration - -### **5. RDMA Read Implementation (Already Exists)** -```go -// File: weed/mount/rdma_client.go:129 -func (c *RDMAMountClient) ReadNeedle(ctx context.Context, volumeID uint32, needleID uint64, cookie uint32, offset, size uint64) ([]byte, bool, error) { - // Prepare request URL - reqURL := fmt.Sprintf("http://%s/read?volume=%d&needle=%d&cookie=%d&offset=%d&size=%d", - c.sidecarAddr, volumeID, needleID, cookie, offset, size) - - // Execute HTTP request to RDMA sidecar - resp, err := c.httpClient.Do(req) - // ... - - // Return data with RDMA metadata - return data, isRDMA, nil -} -``` - -### **6. RDMA Sidecar Processing** -```go -// File: seaweedfs-rdma-sidecar/cmd/demo-server/main.go:375 -func (s *DemoServer) readHandler(w http.ResponseWriter, r *http.Request) { - // Parse volume, needle, cookie from URL parameters - volumeID, _ := strconv.ParseUint(query.Get("volume"), 10, 32) - needleID, _ := strconv.ParseUint(query.Get("needle"), 10, 64) - - // Use distributed client for volume lookup + RDMA - if s.useDistributed && s.distributedClient != nil { - resp, err = s.distributedClient.ReadNeedle(ctx, req) - } else { - resp, err = s.rdmaClient.ReadNeedle(ctx, req) // Local RDMA - } - - // Return binary data or JSON metadata - w.Write(resp.Data) -} -``` - -### **7. Volume Lookup & RDMA Engine** -```go -// File: seaweedfs-rdma-sidecar/pkg/seaweedfs/distributed_client.go:45 -func (c *DistributedRDMAClient) ReadNeedle(ctx context.Context, req *NeedleReadRequest) (*NeedleReadResponse, error) { - // Step 1: Lookup volume location from master - locations, err := c.locationService.LookupVolume(ctx, req.VolumeID) - - // Step 2: Find best server (local preferred) - bestLocation := c.locationService.FindBestLocation(locations) - - // Step 3: Make HTTP request to target server's RDMA sidecar - return c.makeRDMARequest(ctx, req, bestLocation, start) -} -``` - -### **8. Rust RDMA Engine (Final Data Access)** -```rust -// File: rdma-engine/src/ipc.rs:403 -async fn handle_start_read(req: StartReadRequest, ...) -> RdmaResult { - // Create RDMA session - let session_id = Uuid::new_v4().to_string(); - let buffer = vec![0u8; transfer_size as usize]; - - // Register memory for RDMA - let memory_region = rdma_context.register_memory(local_addr, transfer_size).await?; - - // Perform RDMA read (mock implementation) - rdma_context.post_read(local_addr, remote_addr, remote_key, size, wr_id).await?; - let completions = rdma_context.poll_completion(1).await?; - - // Return session info - Ok(StartReadResponse { session_id, local_addr, ... }) -} -``` - -## ๐Ÿ”ง Missing Integration Components - -### **1. WFS Struct Extension** -```go -// File: weed/mount/weedfs.go (needs modification) -type WFS struct { - // ... existing fields ... - rdmaClient *RDMAMountClient // ADD THIS -} -``` - -### **2. RDMA Client Initialization** -```go -// File: weed/command/mount.go (needs modification) -func runMount(cmd *cobra.Command, args []string) bool { - // ... existing code ... - - // NEW: Initialize RDMA client if enabled - var rdmaClient *mount.RDMAMountClient - if *mountOptions.rdmaEnabled && *mountOptions.rdmaSidecarAddr != "" { - rdmaClient, err = mount.NewRDMAMountClient( - *mountOptions.rdmaSidecarAddr, - *mountOptions.rdmaMaxConcurrent, - *mountOptions.rdmaTimeoutMs, - ) - if err != nil { - glog.Warningf("Failed to initialize RDMA client: %v", err) - } - } - - // Pass RDMA client to WFS - wfs := mount.NewSeaweedFileSystem(&mount.Option{ - // ... existing options ... - RDMAClient: rdmaClient, // ADD THIS - }) -} -``` - -### **3. Chunk-to-Needle Mapping** -```go -// File: weed/mount/filehandle_read.go (needs new method) -func (fh *FileHandle) tryRDMARead(ctx context.Context, buff []byte, offset int64) (int64, int64, error) { - entry := fh.GetEntry() - - // Find which chunk contains the requested offset - for _, chunk := range entry.GetEntry().Chunks { - if offset >= chunk.Offset && offset < chunk.Offset+int64(chunk.Size) { - // Parse chunk.FileId to get volume, needle, cookie - volumeID, needleID, cookie, err := ParseFileId(chunk.FileId) - if err != nil { - return 0, 0, err - } - - // Calculate offset within the chunk - chunkOffset := uint64(offset - chunk.Offset) - readSize := uint64(min(len(buff), int(chunk.Size-chunkOffset))) - - // Make RDMA request - data, isRDMA, err := fh.wfs.rdmaClient.ReadNeedle( - ctx, volumeID, needleID, cookie, chunkOffset, readSize) - if err != nil { - return 0, 0, err - } - - // Copy data to buffer - copied := copy(buff, data) - return int64(copied), time.Now().UnixNano(), nil - } - } - - return 0, 0, fmt.Errorf("chunk not found for offset %d", offset) -} -``` - -## ๐Ÿ“Š Request Flow Summary - -1. **User Application** โ†’ `read()` system call -2. **FUSE Kernel** โ†’ Routes to `WFS.Read()` -3. **WFS.Read()** โ†’ Calls `readDataByFileHandleWithContext()` -4. **readDataByFileHandleWithContext()** โ†’ Calls `fh.readFromChunksWithContext()` -5. **readFromChunksWithContext()** โ†’ **[INTEGRATION POINT]** Try RDMA first -6. **tryRDMARead()** โ†’ Parse chunk info, call `RDMAMountClient.ReadNeedle()` -7. **RDMAMountClient** โ†’ HTTP request to RDMA sidecar -8. **RDMA Sidecar** โ†’ Volume lookup + RDMA engine call -9. **RDMA Engine** โ†’ Direct memory access via RDMA hardware -10. **Response Path** โ†’ Data flows back through all layers to user - -## โœ… What's Working vs Missing - -### **โœ… Already Implemented:** -- โœ… `RDMAMountClient` with HTTP communication -- โœ… RDMA sidecar with volume lookup -- โœ… Rust RDMA engine with mock hardware -- โœ… File ID parsing utilities -- โœ… Health checks and statistics -- โœ… Command-line flags for RDMA options - -### **โŒ Missing Integration:** -- โŒ RDMA client not added to WFS struct -- โŒ RDMA client not initialized in mount command -- โŒ `tryRDMARead()` method not implemented -- โŒ Chunk-to-needle mapping logic missing -- โŒ RDMA integration not wired into read path - -## ๐ŸŽฏ Next Steps - -1. **Add RDMA client to WFS struct and Option** -2. **Initialize RDMA client in mount command** -3. **Implement `tryRDMARead()` method** -4. **Wire RDMA integration into `readFromChunksWithContext()`** -5. **Test end-to-end RDMA acceleration** - -The architecture is sound and most components exist - only the final integration wiring is needed! diff --git a/seaweedfs-rdma-sidecar/cmd/demo-server/main.go b/seaweedfs-rdma-sidecar/cmd/demo-server/main.go deleted file mode 100644 index 42b5020e5..000000000 --- a/seaweedfs-rdma-sidecar/cmd/demo-server/main.go +++ /dev/null @@ -1,663 +0,0 @@ -// Package main provides a demonstration server showing SeaweedFS RDMA integration -package main - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "seaweedfs-rdma-sidecar/pkg/seaweedfs" - - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var ( - port int - rdmaSocket string - volumeServerURL string - enableRDMA bool - enableZeroCopy bool - tempDir string - enablePooling bool - maxConnections int - maxIdleTime time.Duration - debug bool -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "demo-server", - Short: "SeaweedFS RDMA integration demonstration server", - Long: `Demonstration server that shows how SeaweedFS can integrate with the RDMA sidecar -for accelerated read operations. This server provides HTTP endpoints that demonstrate -the RDMA fast path with HTTP fallback capabilities.`, - RunE: runServer, - } - - rootCmd.Flags().IntVarP(&port, "port", "p", 8080, "Demo server HTTP port") - rootCmd.Flags().StringVarP(&rdmaSocket, "rdma-socket", "r", "/tmp/rdma-engine.sock", "Path to RDMA engine Unix socket") - rootCmd.Flags().StringVarP(&volumeServerURL, "volume-server", "v", "http://localhost:8080", "SeaweedFS volume server URL for HTTP fallback") - rootCmd.Flags().BoolVarP(&enableRDMA, "enable-rdma", "e", true, "Enable RDMA acceleration") - rootCmd.Flags().BoolVarP(&enableZeroCopy, "enable-zerocopy", "z", true, "Enable zero-copy optimization via temp files") - rootCmd.Flags().StringVarP(&tempDir, "temp-dir", "t", "/tmp/rdma-cache", "Temp directory for zero-copy files") - rootCmd.Flags().BoolVar(&enablePooling, "enable-pooling", true, "Enable RDMA connection pooling") - rootCmd.Flags().IntVar(&maxConnections, "max-connections", 10, "Maximum connections in RDMA pool") - rootCmd.Flags().DurationVar(&maxIdleTime, "max-idle-time", 5*time.Minute, "Maximum idle time for pooled connections") - rootCmd.Flags().BoolVarP(&debug, "debug", "d", false, "Enable debug logging") - - if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func runServer(cmd *cobra.Command, args []string) error { - // Setup logging - logger := logrus.New() - if debug { - logger.SetLevel(logrus.DebugLevel) - logger.SetFormatter(&logrus.TextFormatter{ - FullTimestamp: true, - ForceColors: true, - }) - } else { - logger.SetLevel(logrus.InfoLevel) - } - - logger.WithFields(logrus.Fields{ - "port": port, - "rdma_socket": rdmaSocket, - "volume_server_url": volumeServerURL, - "enable_rdma": enableRDMA, - "enable_zerocopy": enableZeroCopy, - "temp_dir": tempDir, - "enable_pooling": enablePooling, - "max_connections": maxConnections, - "max_idle_time": maxIdleTime, - "debug": debug, - }).Info("๐Ÿš€ Starting SeaweedFS RDMA Demo Server") - - // Create SeaweedFS RDMA client - config := &seaweedfs.Config{ - RDMASocketPath: rdmaSocket, - VolumeServerURL: volumeServerURL, - Enabled: enableRDMA, - DefaultTimeout: 30 * time.Second, - Logger: logger, - TempDir: tempDir, - UseZeroCopy: enableZeroCopy, - EnablePooling: enablePooling, - MaxConnections: maxConnections, - MaxIdleTime: maxIdleTime, - } - - rdmaClient, err := seaweedfs.NewSeaweedFSRDMAClient(config) - if err != nil { - return fmt.Errorf("failed to create RDMA client: %w", err) - } - - // Start RDMA client - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := rdmaClient.Start(ctx); err != nil { - logger.WithError(err).Error("Failed to start RDMA client") - } - cancel() - - // Create demo server - server := &DemoServer{ - rdmaClient: rdmaClient, - logger: logger, - } - - // Setup HTTP routes - mux := http.NewServeMux() - mux.HandleFunc("/", server.homeHandler) - mux.HandleFunc("/health", server.healthHandler) - mux.HandleFunc("/stats", server.statsHandler) - mux.HandleFunc("/read", server.readHandler) - mux.HandleFunc("/benchmark", server.benchmarkHandler) - mux.HandleFunc("/cleanup", server.cleanupHandler) - - httpServer := &http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: mux, - } - - // Handle graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - logger.WithField("port", port).Info("๐ŸŒ Demo server starting") - if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - logger.WithError(err).Fatal("HTTP server failed") - } - }() - - // Wait for shutdown signal - <-sigChan - logger.Info("๐Ÿ“ก Received shutdown signal, gracefully shutting down...") - - // Shutdown HTTP server - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) - defer shutdownCancel() - - if err := httpServer.Shutdown(shutdownCtx); err != nil { - logger.WithError(err).Error("HTTP server shutdown failed") - } else { - logger.Info("๐ŸŒ HTTP server shutdown complete") - } - - // Stop RDMA client - rdmaClient.Stop() - logger.Info("๐Ÿ›‘ Demo server shutdown complete") - - return nil -} - -// DemoServer demonstrates SeaweedFS RDMA integration -type DemoServer struct { - rdmaClient *seaweedfs.SeaweedFSRDMAClient - logger *logrus.Logger -} - -// homeHandler provides information about the demo server -func (s *DemoServer) homeHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("Content-Type", "text/html") - fmt.Fprintf(w, ` - - - SeaweedFS RDMA Demo Server - - - -
-

๐Ÿš€ SeaweedFS RDMA Demo Server

-

This server demonstrates SeaweedFS integration with RDMA acceleration for high-performance reads.

- -
- RDMA Status: %s -
- -

๐Ÿ“‹ Available Endpoints

- -
-

๐Ÿฅ Health Check

-

/health - Check server and RDMA engine health

-
- -
-

๐Ÿ“Š Statistics

-

/stats - Get RDMA client statistics and capabilities

-
- -
-

๐Ÿ“– Read Needle

-

/read - Read a needle with RDMA fast path

-

Parameters: file_id OR (volume, needle, cookie), volume_server, offset (optional), size (optional)

-
- -
-

๐Ÿ Benchmark

-

/benchmark - Run performance benchmark

-

Parameters: iterations (default: 10), size (default: 4096)

-
- -

๐Ÿ“ Example Usage

-
-# Read a needle using file ID (recommended)
-curl "http://localhost:%d/read?file_id=3,01637037d6&size=1024&volume_server=http://localhost:8080"
-
-# Read a needle using individual parameters (legacy)
-curl "http://localhost:%d/read?volume=1&needle=12345&cookie=305419896&size=1024&volume_server=http://localhost:8080"
-
-# Read a needle (hex cookie)
-curl "http://localhost:%d/read?volume=1&needle=12345&cookie=0x12345678&size=1024&volume_server=http://localhost:8080"
-
-# Run benchmark
-curl "http://localhost:%d/benchmark?iterations=5&size=2048"
-
-# Check health
-curl "http://localhost:%d/health"
-        
-
- -`, - map[bool]string{true: "enabled", false: "disabled"}[s.rdmaClient.IsEnabled()], - map[bool]string{true: "RDMA Enabled โœ…", false: "RDMA Disabled (HTTP Fallback Only) โš ๏ธ"}[s.rdmaClient.IsEnabled()], - port, port, port, port) -} - -// healthHandler checks server and RDMA health -func (s *DemoServer) healthHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - health := map[string]interface{}{ - "status": "healthy", - "timestamp": time.Now().Format(time.RFC3339), - "rdma": map[string]interface{}{ - "enabled": false, - "connected": false, - }, - } - - if s.rdmaClient != nil { - health["rdma"].(map[string]interface{})["enabled"] = s.rdmaClient.IsEnabled() - health["rdma"].(map[string]interface{})["type"] = "local" - - if s.rdmaClient.IsEnabled() { - if err := s.rdmaClient.HealthCheck(ctx); err != nil { - s.logger.WithError(err).Warn("RDMA health check failed") - health["rdma"].(map[string]interface{})["error"] = err.Error() - } else { - health["rdma"].(map[string]interface{})["connected"] = true - } - } - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(health) -} - -// statsHandler returns RDMA statistics -func (s *DemoServer) statsHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - var stats map[string]interface{} - - if s.rdmaClient != nil { - stats = s.rdmaClient.GetStats() - stats["client_type"] = "local" - } else { - stats = map[string]interface{}{ - "client_type": "none", - "error": "no RDMA client available", - } - } - - stats["timestamp"] = time.Now().Format(time.RFC3339) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(stats) -} - -// readHandler demonstrates needle reading with RDMA -func (s *DemoServer) readHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Parse parameters - support both file_id and individual parameters for backward compatibility - query := r.URL.Query() - volumeServer := query.Get("volume_server") - fileID := query.Get("file_id") - - var volumeID, cookie uint64 - var needleID uint64 - var err error - - if fileID != "" { - // Use file ID format (e.g., "3,01637037d6") - // Extract individual components using existing SeaweedFS parsing - fid, parseErr := needle.ParseFileIdFromString(fileID) - if parseErr != nil { - http.Error(w, fmt.Sprintf("invalid 'file_id' parameter: %v", parseErr), http.StatusBadRequest) - return - } - volumeID = uint64(fid.VolumeId) - needleID = uint64(fid.Key) - cookie = uint64(fid.Cookie) - } else { - // Use individual parameters (backward compatibility) - volumeID, err = strconv.ParseUint(query.Get("volume"), 10, 32) - if err != nil { - http.Error(w, "invalid 'volume' parameter", http.StatusBadRequest) - return - } - - needleID, err = strconv.ParseUint(query.Get("needle"), 10, 64) - if err != nil { - http.Error(w, "invalid 'needle' parameter", http.StatusBadRequest) - return - } - - // Parse cookie parameter - support both decimal and hexadecimal formats - cookieStr := query.Get("cookie") - if strings.HasPrefix(strings.ToLower(cookieStr), "0x") { - // Parse as hexadecimal (remove "0x" prefix) - cookie, err = strconv.ParseUint(cookieStr[2:], 16, 32) - } else { - // Parse as decimal (default) - cookie, err = strconv.ParseUint(cookieStr, 10, 32) - } - if err != nil { - http.Error(w, "invalid 'cookie' parameter (expected decimal or hex with 0x prefix)", http.StatusBadRequest) - return - } - } - - var offset uint64 - if offsetStr := query.Get("offset"); offsetStr != "" { - var parseErr error - offset, parseErr = strconv.ParseUint(offsetStr, 10, 64) - if parseErr != nil { - http.Error(w, "invalid 'offset' parameter", http.StatusBadRequest) - return - } - } - - var size uint64 - if sizeStr := query.Get("size"); sizeStr != "" { - var parseErr error - size, parseErr = strconv.ParseUint(sizeStr, 10, 64) - if parseErr != nil { - http.Error(w, "invalid 'size' parameter", http.StatusBadRequest) - return - } - } - - if volumeServer == "" { - http.Error(w, "volume_server parameter is required", http.StatusBadRequest) - return - } - - if volumeID == 0 || needleID == 0 { - http.Error(w, "volume and needle parameters are required", http.StatusBadRequest) - return - } - - // Note: cookie and size can have defaults for demo purposes when user provides empty values, - // but invalid parsing is caught above with proper error responses - if cookie == 0 { - cookie = 0x12345678 // Default cookie for demo - } - - if size == 0 { - size = 4096 // Default size - } - - logFields := logrus.Fields{ - "volume_server": volumeServer, - "volume_id": volumeID, - "needle_id": needleID, - "cookie": fmt.Sprintf("0x%x", cookie), - "offset": offset, - "size": size, - } - if fileID != "" { - logFields["file_id"] = fileID - } - s.logger.WithFields(logFields).Info("๐Ÿ“– Processing needle read request") - - ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second) - defer cancel() - - start := time.Now() - req := &seaweedfs.NeedleReadRequest{ - VolumeID: uint32(volumeID), - NeedleID: needleID, - Cookie: uint32(cookie), - Offset: offset, - Size: size, - VolumeServer: volumeServer, - } - - resp, err := s.rdmaClient.ReadNeedle(ctx, req) - - if err != nil { - s.logger.WithError(err).Error("โŒ Needle read failed") - http.Error(w, fmt.Sprintf("Read failed: %v", err), http.StatusInternalServerError) - return - } - - duration := time.Since(start) - - s.logger.WithFields(logrus.Fields{ - "volume_id": volumeID, - "needle_id": needleID, - "is_rdma": resp.IsRDMA, - "source": resp.Source, - "duration": duration, - "data_size": len(resp.Data), - }).Info("โœ… Needle read completed") - - // Return metadata and first few bytes - result := map[string]interface{}{ - "success": true, - "volume_id": volumeID, - "needle_id": needleID, - "cookie": fmt.Sprintf("0x%x", cookie), - "is_rdma": resp.IsRDMA, - "source": resp.Source, - "session_id": resp.SessionID, - "duration": duration.String(), - "data_size": len(resp.Data), - "timestamp": time.Now().Format(time.RFC3339), - "use_temp_file": resp.UseTempFile, - "temp_file": resp.TempFilePath, - } - - // Set headers for zero-copy optimization - if resp.UseTempFile && resp.TempFilePath != "" { - w.Header().Set("X-Use-Temp-File", "true") - w.Header().Set("X-Temp-File", resp.TempFilePath) - w.Header().Set("X-Source", resp.Source) - w.Header().Set("X-RDMA-Used", fmt.Sprintf("%t", resp.IsRDMA)) - - // For zero-copy, return minimal JSON response and let client read from temp file - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(result) - return - } - - // Regular response with data - w.Header().Set("X-Source", resp.Source) - w.Header().Set("X-RDMA-Used", fmt.Sprintf("%t", resp.IsRDMA)) - - // Include first 32 bytes as hex for verification - if len(resp.Data) > 0 { - displayLen := 32 - if len(resp.Data) < displayLen { - displayLen = len(resp.Data) - } - result["data_preview"] = fmt.Sprintf("%x", resp.Data[:displayLen]) - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(result) -} - -// benchmarkHandler runs performance benchmarks -func (s *DemoServer) benchmarkHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Parse parameters - query := r.URL.Query() - - iterations := 10 // default value - if iterationsStr := query.Get("iterations"); iterationsStr != "" { - var parseErr error - iterations, parseErr = strconv.Atoi(iterationsStr) - if parseErr != nil { - http.Error(w, "invalid 'iterations' parameter", http.StatusBadRequest) - return - } - } - - size := uint64(4096) // default value - if sizeStr := query.Get("size"); sizeStr != "" { - var parseErr error - size, parseErr = strconv.ParseUint(sizeStr, 10, 64) - if parseErr != nil { - http.Error(w, "invalid 'size' parameter", http.StatusBadRequest) - return - } - } - - if iterations <= 0 { - iterations = 10 - } - if size == 0 { - size = 4096 - } - - s.logger.WithFields(logrus.Fields{ - "iterations": iterations, - "size": size, - }).Info("๐Ÿ Starting benchmark") - - ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second) - defer cancel() - - var rdmaSuccessful, rdmaFailed, httpSuccessful, httpFailed int - var totalDuration time.Duration - var totalBytes uint64 - - startTime := time.Now() - - for i := 0; i < iterations; i++ { - req := &seaweedfs.NeedleReadRequest{ - VolumeID: 1, - NeedleID: uint64(i + 1), - Cookie: 0x12345678, - Offset: 0, - Size: size, - } - - opStart := time.Now() - resp, err := s.rdmaClient.ReadNeedle(ctx, req) - opDuration := time.Since(opStart) - - if err != nil { - httpFailed++ - continue - } - - totalDuration += opDuration - totalBytes += uint64(len(resp.Data)) - - if resp.IsRDMA { - rdmaSuccessful++ - } else { - httpSuccessful++ - } - } - - benchDuration := time.Since(startTime) - - // Calculate statistics - totalOperations := rdmaSuccessful + httpSuccessful - avgLatency := time.Duration(0) - if totalOperations > 0 { - avgLatency = totalDuration / time.Duration(totalOperations) - } - - throughputMBps := float64(totalBytes) / benchDuration.Seconds() / (1024 * 1024) - opsPerSec := float64(totalOperations) / benchDuration.Seconds() - - result := map[string]interface{}{ - "benchmark_results": map[string]interface{}{ - "iterations": iterations, - "size_per_op": size, - "total_duration": benchDuration.String(), - "successful_ops": totalOperations, - "failed_ops": rdmaFailed + httpFailed, - "rdma_ops": rdmaSuccessful, - "http_ops": httpSuccessful, - "avg_latency": avgLatency.String(), - "throughput_mbps": fmt.Sprintf("%.2f", throughputMBps), - "ops_per_sec": fmt.Sprintf("%.1f", opsPerSec), - "total_bytes": totalBytes, - }, - "rdma_enabled": s.rdmaClient.IsEnabled(), - "timestamp": time.Now().Format(time.RFC3339), - } - - s.logger.WithFields(logrus.Fields{ - "iterations": iterations, - "successful_ops": totalOperations, - "rdma_ops": rdmaSuccessful, - "http_ops": httpSuccessful, - "avg_latency": avgLatency, - "throughput_mbps": throughputMBps, - "ops_per_sec": opsPerSec, - }).Info("๐Ÿ“Š Benchmark completed") - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(result) -} - -// cleanupHandler handles temp file cleanup requests from mount clients -func (s *DemoServer) cleanupHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodDelete { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Get temp file path from query parameters - tempFilePath := r.URL.Query().Get("temp_file") - if tempFilePath == "" { - http.Error(w, "missing 'temp_file' parameter", http.StatusBadRequest) - return - } - - s.logger.WithField("temp_file", tempFilePath).Debug("๐Ÿ—‘๏ธ Processing cleanup request") - - // Use the RDMA client's cleanup method (which delegates to seaweedfs client) - err := s.rdmaClient.CleanupTempFile(tempFilePath) - if err != nil { - s.logger.WithError(err).WithField("temp_file", tempFilePath).Warn("Failed to cleanup temp file") - http.Error(w, fmt.Sprintf("cleanup failed: %v", err), http.StatusInternalServerError) - return - } - - s.logger.WithField("temp_file", tempFilePath).Debug("๐Ÿงน Temp file cleanup successful") - - // Return success response - w.Header().Set("Content-Type", "application/json") - response := map[string]interface{}{ - "success": true, - "message": "temp file cleaned up successfully", - "temp_file": tempFilePath, - "timestamp": time.Now().Format(time.RFC3339), - } - json.NewEncoder(w).Encode(response) -} diff --git a/seaweedfs-rdma-sidecar/cmd/sidecar/main.go b/seaweedfs-rdma-sidecar/cmd/sidecar/main.go deleted file mode 100644 index 55d98c4c6..000000000 --- a/seaweedfs-rdma-sidecar/cmd/sidecar/main.go +++ /dev/null @@ -1,345 +0,0 @@ -// Package main provides the main RDMA sidecar service that integrates with SeaweedFS -package main - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - "seaweedfs-rdma-sidecar/pkg/rdma" - - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var ( - port int - engineSocket string - debug bool - timeout time.Duration -) - -// Response structs for JSON encoding -type HealthResponse struct { - Status string `json:"status"` - RdmaEngineConnected bool `json:"rdma_engine_connected"` - RdmaEngineLatency string `json:"rdma_engine_latency"` - Timestamp string `json:"timestamp"` -} - -type CapabilitiesResponse struct { - Version string `json:"version"` - DeviceName string `json:"device_name"` - VendorId uint32 `json:"vendor_id"` - MaxSessions uint32 `json:"max_sessions"` - MaxTransferSize uint64 `json:"max_transfer_size"` - ActiveSessions uint32 `json:"active_sessions"` - RealRdma bool `json:"real_rdma"` - PortGid string `json:"port_gid"` - PortLid uint16 `json:"port_lid"` - SupportedAuth []string `json:"supported_auth"` -} - -type PingResponse struct { - Success bool `json:"success"` - EngineLatency string `json:"engine_latency"` - TotalLatency string `json:"total_latency"` - Timestamp string `json:"timestamp"` -} - -func main() { - var rootCmd = &cobra.Command{ - Use: "rdma-sidecar", - Short: "SeaweedFS RDMA acceleration sidecar", - Long: `RDMA sidecar that accelerates SeaweedFS read/write operations using UCX and Rust RDMA engine. - -This sidecar acts as a bridge between SeaweedFS volume servers and the high-performance -Rust RDMA engine, providing significant performance improvements for data-intensive workloads.`, - RunE: runSidecar, - } - - // Flags - rootCmd.Flags().IntVarP(&port, "port", "p", 8081, "HTTP server port") - rootCmd.Flags().StringVarP(&engineSocket, "engine-socket", "e", "/tmp/rdma-engine.sock", "Path to RDMA engine Unix socket") - rootCmd.Flags().BoolVarP(&debug, "debug", "d", false, "Enable debug logging") - rootCmd.Flags().DurationVarP(&timeout, "timeout", "t", 30*time.Second, "RDMA operation timeout") - - if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func runSidecar(cmd *cobra.Command, args []string) error { - // Setup logging - logger := logrus.New() - if debug { - logger.SetLevel(logrus.DebugLevel) - logger.SetFormatter(&logrus.TextFormatter{ - FullTimestamp: true, - ForceColors: true, - }) - } else { - logger.SetLevel(logrus.InfoLevel) - } - - logger.WithFields(logrus.Fields{ - "port": port, - "engine_socket": engineSocket, - "debug": debug, - "timeout": timeout, - }).Info("๐Ÿš€ Starting SeaweedFS RDMA Sidecar") - - // Create RDMA client - rdmaConfig := &rdma.Config{ - EngineSocketPath: engineSocket, - DefaultTimeout: timeout, - Logger: logger, - } - - rdmaClient := rdma.NewClient(rdmaConfig) - - // Connect to RDMA engine - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - logger.Info("๐Ÿ”— Connecting to RDMA engine...") - if err := rdmaClient.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect to RDMA engine: %w", err) - } - logger.Info("โœ… Connected to RDMA engine successfully") - - // Create HTTP server - sidecar := &Sidecar{ - rdmaClient: rdmaClient, - logger: logger, - } - - mux := http.NewServeMux() - - // Health check endpoint - mux.HandleFunc("/health", sidecar.healthHandler) - - // RDMA operations endpoints - mux.HandleFunc("/rdma/read", sidecar.rdmaReadHandler) - mux.HandleFunc("/rdma/capabilities", sidecar.capabilitiesHandler) - mux.HandleFunc("/rdma/ping", sidecar.pingHandler) - - server := &http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: mux, - } - - // Handle graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - logger.WithField("port", port).Info("๐ŸŒ HTTP server starting") - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - logger.WithError(err).Fatal("HTTP server failed") - } - }() - - // Wait for shutdown signal - <-sigChan - logger.Info("๐Ÿ“ก Received shutdown signal, gracefully shutting down...") - - // Shutdown HTTP server - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) - defer shutdownCancel() - - if err := server.Shutdown(shutdownCtx); err != nil { - logger.WithError(err).Error("HTTP server shutdown failed") - } else { - logger.Info("๐ŸŒ HTTP server shutdown complete") - } - - // Disconnect from RDMA engine - rdmaClient.Disconnect() - logger.Info("๐Ÿ›‘ RDMA sidecar shutdown complete") - - return nil -} - -// Sidecar represents the main sidecar service -type Sidecar struct { - rdmaClient *rdma.Client - logger *logrus.Logger -} - -// Health check handler -func (s *Sidecar) healthHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - // Test RDMA engine connectivity - if !s.rdmaClient.IsConnected() { - s.logger.Warn("โš ๏ธ RDMA engine not connected") - http.Error(w, "RDMA engine not connected", http.StatusServiceUnavailable) - return - } - - // Ping RDMA engine - latency, err := s.rdmaClient.Ping(ctx) - if err != nil { - s.logger.WithError(err).Error("โŒ RDMA engine ping failed") - http.Error(w, "RDMA engine ping failed", http.StatusServiceUnavailable) - return - } - - w.Header().Set("Content-Type", "application/json") - response := HealthResponse{ - Status: "healthy", - RdmaEngineConnected: true, - RdmaEngineLatency: latency.String(), - Timestamp: time.Now().Format(time.RFC3339), - } - json.NewEncoder(w).Encode(response) -} - -// RDMA capabilities handler -func (s *Sidecar) capabilitiesHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - caps := s.rdmaClient.GetCapabilities() - if caps == nil { - http.Error(w, "No capabilities available", http.StatusServiceUnavailable) - return - } - - w.Header().Set("Content-Type", "application/json") - response := CapabilitiesResponse{ - Version: caps.Version, - DeviceName: caps.DeviceName, - VendorId: caps.VendorId, - MaxSessions: uint32(caps.MaxSessions), - MaxTransferSize: caps.MaxTransferSize, - ActiveSessions: uint32(caps.ActiveSessions), - RealRdma: caps.RealRdma, - PortGid: caps.PortGid, - PortLid: caps.PortLid, - SupportedAuth: caps.SupportedAuth, - } - json.NewEncoder(w).Encode(response) -} - -// RDMA ping handler -func (s *Sidecar) pingHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) - defer cancel() - - start := time.Now() - latency, err := s.rdmaClient.Ping(ctx) - totalLatency := time.Since(start) - - if err != nil { - s.logger.WithError(err).Error("โŒ RDMA ping failed") - http.Error(w, fmt.Sprintf("Ping failed: %v", err), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - response := PingResponse{ - Success: true, - EngineLatency: latency.String(), - TotalLatency: totalLatency.String(), - Timestamp: time.Now().Format(time.RFC3339), - } - json.NewEncoder(w).Encode(response) -} - -// RDMA read handler - uses GET method with query parameters for RESTful read operations -func (s *Sidecar) rdmaReadHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Parse query parameters - query := r.URL.Query() - - // Get file ID (e.g., "3,01637037d6") - this is the natural SeaweedFS identifier - fileID := query.Get("file_id") - if fileID == "" { - http.Error(w, "missing 'file_id' parameter", http.StatusBadRequest) - return - } - - // Parse optional offset and size parameters - offset := uint64(0) // default value - if offsetStr := query.Get("offset"); offsetStr != "" { - val, err := strconv.ParseUint(offsetStr, 10, 64) - if err != nil { - http.Error(w, "invalid 'offset' parameter", http.StatusBadRequest) - return - } - offset = val - } - - size := uint64(4096) // default value - if sizeStr := query.Get("size"); sizeStr != "" { - val, err := strconv.ParseUint(sizeStr, 10, 64) - if err != nil { - http.Error(w, "invalid 'size' parameter", http.StatusBadRequest) - return - } - size = val - } - - s.logger.WithFields(logrus.Fields{ - "file_id": fileID, - "offset": offset, - "size": size, - }).Info("๐Ÿ“– Processing RDMA read request") - - ctx, cancel := context.WithTimeout(r.Context(), timeout) - defer cancel() - - start := time.Now() - resp, err := s.rdmaClient.ReadFileRange(ctx, fileID, offset, size) - duration := time.Since(start) - - if err != nil { - s.logger.WithError(err).Error("โŒ RDMA read failed") - http.Error(w, fmt.Sprintf("RDMA read failed: %v", err), http.StatusInternalServerError) - return - } - - s.logger.WithFields(logrus.Fields{ - "file_id": fileID, - "bytes_read": resp.BytesRead, - "duration": duration, - "transfer_rate": resp.TransferRate, - "session_id": resp.SessionID, - }).Info("โœ… RDMA read completed successfully") - - // Set response headers - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("X-RDMA-Session-ID", resp.SessionID) - w.Header().Set("X-RDMA-Duration", duration.String()) - w.Header().Set("X-RDMA-Transfer-Rate", fmt.Sprintf("%.2f", resp.TransferRate)) - w.Header().Set("X-RDMA-Bytes-Read", fmt.Sprintf("%d", resp.BytesRead)) - - // Write the data - w.Write(resp.Data) -} diff --git a/seaweedfs-rdma-sidecar/cmd/test-rdma/main.go b/seaweedfs-rdma-sidecar/cmd/test-rdma/main.go deleted file mode 100644 index 4f2b2da43..000000000 --- a/seaweedfs-rdma-sidecar/cmd/test-rdma/main.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package main provides a test client for the RDMA engine integration -package main - -import ( - "context" - "fmt" - "os" - "time" - - "seaweedfs-rdma-sidecar/pkg/rdma" - - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -var ( - socketPath string - debug bool - timeout time.Duration - volumeID uint32 - needleID uint64 - cookie uint32 - offset uint64 - size uint64 -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "test-rdma", - Short: "Test client for SeaweedFS RDMA engine integration", - Long: `Test client that demonstrates communication between Go sidecar and Rust RDMA engine. - -This tool allows you to test various RDMA operations including: -- Engine connectivity and capabilities -- RDMA read operations with mock data -- Performance measurements -- IPC protocol validation`, - } - - // Global flags - defaultSocketPath := os.Getenv("RDMA_SOCKET_PATH") - if defaultSocketPath == "" { - defaultSocketPath = "/tmp/rdma-engine.sock" - } - rootCmd.PersistentFlags().StringVarP(&socketPath, "socket", "s", defaultSocketPath, "Path to RDMA engine Unix socket (env: RDMA_SOCKET_PATH)") - rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "d", false, "Enable debug logging") - rootCmd.PersistentFlags().DurationVarP(&timeout, "timeout", "t", 30*time.Second, "Operation timeout") - - // Subcommands - rootCmd.AddCommand(pingCmd()) - rootCmd.AddCommand(capsCmd()) - rootCmd.AddCommand(readCmd()) - rootCmd.AddCommand(benchCmd()) - - if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func pingCmd() *cobra.Command { - return &cobra.Command{ - Use: "ping", - Short: "Test connectivity to RDMA engine", - Long: "Send a ping message to the RDMA engine and measure latency", - RunE: func(cmd *cobra.Command, args []string) error { - client := createClient() - defer client.Disconnect() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - fmt.Printf("๐Ÿ“ Pinging RDMA engine at %s...\n", socketPath) - - if err := client.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - - latency, err := client.Ping(ctx) - if err != nil { - return fmt.Errorf("ping failed: %w", err) - } - - fmt.Printf("โœ… Ping successful! Latency: %v\n", latency) - return nil - }, - } -} - -func capsCmd() *cobra.Command { - return &cobra.Command{ - Use: "capabilities", - Short: "Get RDMA engine capabilities", - Long: "Query the RDMA engine for its current capabilities and status", - RunE: func(cmd *cobra.Command, args []string) error { - client := createClient() - defer client.Disconnect() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - fmt.Printf("๐Ÿ” Querying RDMA engine capabilities...\n") - - if err := client.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - - caps := client.GetCapabilities() - if caps == nil { - return fmt.Errorf("no capabilities received") - } - - fmt.Printf("\n๐Ÿ“Š RDMA Engine Capabilities:\n") - fmt.Printf(" Version: %s\n", caps.Version) - fmt.Printf(" Max Sessions: %d\n", caps.MaxSessions) - fmt.Printf(" Max Transfer Size: %d bytes (%.1f MB)\n", caps.MaxTransferSize, float64(caps.MaxTransferSize)/(1024*1024)) - fmt.Printf(" Active Sessions: %d\n", caps.ActiveSessions) - fmt.Printf(" Real RDMA: %t\n", caps.RealRdma) - fmt.Printf(" Port GID: %s\n", caps.PortGid) - fmt.Printf(" Port LID: %d\n", caps.PortLid) - fmt.Printf(" Supported Auth: %v\n", caps.SupportedAuth) - - if caps.RealRdma { - fmt.Printf("๐Ÿš€ Hardware RDMA enabled!\n") - } else { - fmt.Printf("๐ŸŸก Using mock RDMA (development mode)\n") - } - - return nil - }, - } -} - -func readCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "read", - Short: "Test RDMA read operation", - Long: "Perform a test RDMA read operation with specified parameters", - RunE: func(cmd *cobra.Command, args []string) error { - client := createClient() - defer client.Disconnect() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - fmt.Printf("๐Ÿ“– Testing RDMA read operation...\n") - fmt.Printf(" Volume ID: %d\n", volumeID) - fmt.Printf(" Needle ID: %d\n", needleID) - fmt.Printf(" Cookie: 0x%x\n", cookie) - fmt.Printf(" Offset: %d\n", offset) - fmt.Printf(" Size: %d bytes\n", size) - - if err := client.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - - start := time.Now() - resp, err := client.ReadRange(ctx, volumeID, needleID, cookie, offset, size) - if err != nil { - return fmt.Errorf("read failed: %w", err) - } - - duration := time.Since(start) - - fmt.Printf("\nโœ… RDMA read completed successfully!\n") - fmt.Printf(" Session ID: %s\n", resp.SessionID) - fmt.Printf(" Bytes Read: %d\n", resp.BytesRead) - fmt.Printf(" Duration: %v\n", duration) - fmt.Printf(" Transfer Rate: %.2f MB/s\n", resp.TransferRate) - fmt.Printf(" Success: %t\n", resp.Success) - fmt.Printf(" Message: %s\n", resp.Message) - - // Show first few bytes of data for verification - if len(resp.Data) > 0 { - displayLen := 32 - if len(resp.Data) < displayLen { - displayLen = len(resp.Data) - } - fmt.Printf(" Data (first %d bytes): %x\n", displayLen, resp.Data[:displayLen]) - } - - return nil - }, - } - - cmd.Flags().Uint32VarP(&volumeID, "volume", "v", 1, "Volume ID") - cmd.Flags().Uint64VarP(&needleID, "needle", "n", 100, "Needle ID") - cmd.Flags().Uint32VarP(&cookie, "cookie", "c", 0x12345678, "Needle cookie") - cmd.Flags().Uint64VarP(&offset, "offset", "o", 0, "Read offset") - cmd.Flags().Uint64VarP(&size, "size", "z", 4096, "Read size in bytes") - - return cmd -} - -func benchCmd() *cobra.Command { - var ( - iterations int - readSize uint64 - ) - - cmd := &cobra.Command{ - Use: "bench", - Short: "Benchmark RDMA read performance", - Long: "Run multiple RDMA read operations and measure performance statistics", - RunE: func(cmd *cobra.Command, args []string) error { - client := createClient() - defer client.Disconnect() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - fmt.Printf("๐Ÿ Starting RDMA read benchmark...\n") - fmt.Printf(" Iterations: %d\n", iterations) - fmt.Printf(" Read Size: %d bytes\n", readSize) - fmt.Printf(" Socket: %s\n", socketPath) - - if err := client.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect: %w", err) - } - - // Warmup - fmt.Printf("๐Ÿ”ฅ Warming up...\n") - for i := 0; i < 5; i++ { - _, err := client.ReadRange(ctx, 1, uint64(i+1), 0x12345678, 0, readSize) - if err != nil { - return fmt.Errorf("warmup read %d failed: %w", i+1, err) - } - } - - // Benchmark - fmt.Printf("๐Ÿ“Š Running benchmark...\n") - var totalDuration time.Duration - var totalBytes uint64 - successful := 0 - - startTime := time.Now() - for i := 0; i < iterations; i++ { - opStart := time.Now() - resp, err := client.ReadRange(ctx, 1, uint64(i+1), 0x12345678, 0, readSize) - opDuration := time.Since(opStart) - - if err != nil { - fmt.Printf("โŒ Read %d failed: %v\n", i+1, err) - continue - } - - totalDuration += opDuration - totalBytes += resp.BytesRead - successful++ - - if (i+1)%10 == 0 || i == iterations-1 { - fmt.Printf(" Completed %d/%d reads\n", i+1, iterations) - } - } - benchDuration := time.Since(startTime) - - // Calculate statistics - avgLatency := totalDuration / time.Duration(successful) - throughputMBps := float64(totalBytes) / benchDuration.Seconds() / (1024 * 1024) - opsPerSec := float64(successful) / benchDuration.Seconds() - - fmt.Printf("\n๐Ÿ“ˆ Benchmark Results:\n") - fmt.Printf(" Total Duration: %v\n", benchDuration) - fmt.Printf(" Successful Operations: %d/%d (%.1f%%)\n", successful, iterations, float64(successful)/float64(iterations)*100) - fmt.Printf(" Total Bytes Transferred: %d (%.1f MB)\n", totalBytes, float64(totalBytes)/(1024*1024)) - fmt.Printf(" Average Latency: %v\n", avgLatency) - fmt.Printf(" Throughput: %.2f MB/s\n", throughputMBps) - fmt.Printf(" Operations/sec: %.1f\n", opsPerSec) - - return nil - }, - } - - cmd.Flags().IntVarP(&iterations, "iterations", "i", 100, "Number of read operations") - cmd.Flags().Uint64VarP(&readSize, "read-size", "r", 4096, "Size of each read in bytes") - - return cmd -} - -func createClient() *rdma.Client { - logger := logrus.New() - if debug { - logger.SetLevel(logrus.DebugLevel) - } else { - logger.SetLevel(logrus.InfoLevel) - } - - config := &rdma.Config{ - EngineSocketPath: socketPath, - DefaultTimeout: timeout, - Logger: logger, - } - - return rdma.NewClient(config) -} diff --git a/seaweedfs-rdma-sidecar/demo-server b/seaweedfs-rdma-sidecar/demo-server deleted file mode 100755 index 737f1721c..000000000 Binary files a/seaweedfs-rdma-sidecar/demo-server and /dev/null differ diff --git a/seaweedfs-rdma-sidecar/docker-compose.mount-rdma.yml b/seaweedfs-rdma-sidecar/docker-compose.mount-rdma.yml deleted file mode 100644 index 9098515ef..000000000 --- a/seaweedfs-rdma-sidecar/docker-compose.mount-rdma.yml +++ /dev/null @@ -1,267 +0,0 @@ -services: - # SeaweedFS Master - seaweedfs-master: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-master - ports: - - "9333:9333" - - "19333:19333" - command: > - master - -port=9333 - -mdir=/data - -volumeSizeLimitMB=1024 - -defaultReplication=000 - volumes: - - seaweedfs_master_data:/data - networks: - - seaweedfs-rdma - healthcheck: - test: ["CMD", "wget", "--timeout=10", "--quiet", "--tries=1", "--spider", "http://127.0.0.1:9333/cluster/status"] - interval: 10s - timeout: 10s - retries: 6 - start_period: 60s - - # SeaweedFS Volume Server - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-volume - ports: - - "8080:8080" - - "18080:18080" - command: > - volume - -mserver=seaweedfs-master:9333 - -port=8080 - -dir=/data - -max=100 - volumes: - - seaweedfs_volume_data:/data - networks: - - seaweedfs-rdma - depends_on: - seaweedfs-master: - condition: service_healthy - healthcheck: - test: ["CMD", "sh", "-c", "pgrep weed && netstat -tln | grep :8080"] - interval: 10s - timeout: 10s - retries: 6 - start_period: 30s - - # SeaweedFS Filer - seaweedfs-filer: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-filer - ports: - - "8888:8888" - - "18888:18888" - command: > - filer - -master=seaweedfs-master:9333 - -port=8888 - -defaultReplicaPlacement=000 - networks: - - seaweedfs-rdma - depends_on: - seaweedfs-master: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - healthcheck: - test: ["CMD", "sh", "-c", "pgrep weed && netstat -tln | grep :8888"] - interval: 10s - timeout: 10s - retries: 6 - start_period: 45s - - # RDMA Engine (Rust) - rdma-engine: - build: - context: . - dockerfile: Dockerfile.rdma-engine - container_name: rdma-engine - volumes: - - rdma_socket:/tmp/rdma - networks: - - seaweedfs-rdma - environment: - - RUST_LOG=debug - - RDMA_SOCKET_PATH=/tmp/rdma/rdma-engine.sock - - RDMA_DEVICE=auto - - RDMA_PORT=18515 - - RDMA_GID_INDEX=0 - - DEBUG=true - command: > - ./rdma-engine-server - --ipc-socket ${RDMA_SOCKET_PATH} - --device ${RDMA_DEVICE} - --port ${RDMA_PORT} - --debug - healthcheck: - test: ["CMD", "sh", "-c", "pgrep rdma-engine-server >/dev/null && test -S /tmp/rdma/rdma-engine.sock"] - interval: 5s - timeout: 3s - retries: 5 - start_period: 10s - - # RDMA Sidecar (Go) - rdma-sidecar: - build: - context: . - dockerfile: Dockerfile.sidecar - container_name: rdma-sidecar - ports: - - "8081:8081" - volumes: - - rdma_socket:/tmp/rdma - networks: - - seaweedfs-rdma - environment: - - RDMA_SOCKET_PATH=/tmp/rdma/rdma-engine.sock - - VOLUME_SERVER_URL=http://seaweedfs-volume:8080 - - SIDECAR_PORT=8081 - - ENABLE_RDMA=true - - ENABLE_ZEROCOPY=true - - ENABLE_POOLING=true - - MAX_CONNECTIONS=10 - - MAX_IDLE_TIME=5m - - DEBUG=true - command: > - ./demo-server - --port ${SIDECAR_PORT} - --rdma-socket ${RDMA_SOCKET_PATH} - --volume-server ${VOLUME_SERVER_URL} - --enable-rdma - --enable-zerocopy - --enable-pooling - --max-connections ${MAX_CONNECTIONS} - --max-idle-time ${MAX_IDLE_TIME} - --debug - depends_on: - rdma-engine: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/health"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 15s - - # SeaweedFS Mount with RDMA - seaweedfs-mount: - build: - context: . - dockerfile: Dockerfile.mount-rdma - platform: linux/amd64 - container_name: seaweedfs-mount - privileged: true # Required for FUSE - devices: - - /dev/fuse:/dev/fuse - cap_add: - - SYS_ADMIN - volumes: - - seaweedfs_mount:/mnt/seaweedfs - - /tmp/seaweedfs-mount-logs:/var/log/seaweedfs - networks: - - seaweedfs-rdma - environment: - - FILER_ADDR=seaweedfs-filer:8888 - - RDMA_SIDECAR_ADDR=rdma-sidecar:8081 - - MOUNT_POINT=/mnt/seaweedfs - - RDMA_ENABLED=true - - RDMA_FALLBACK=true - - RDMA_MAX_CONCURRENT=64 - - RDMA_TIMEOUT_MS=5000 - - DEBUG=true - command: /usr/local/bin/mount-helper.sh - depends_on: - seaweedfs-filer: - condition: service_healthy - rdma-sidecar: - condition: service_healthy - healthcheck: - test: ["CMD", "mountpoint", "-q", "/mnt/seaweedfs"] - interval: 15s - timeout: 10s - retries: 3 - start_period: 45s - - # Integration Test Runner - integration-test: - build: - context: . - dockerfile: Dockerfile.integration-test - container_name: integration-test - volumes: - - seaweedfs_mount:/mnt/seaweedfs - - ./test-results:/test-results - networks: - - seaweedfs-rdma - environment: - - MOUNT_POINT=/mnt/seaweedfs - - FILER_ADDR=seaweedfs-filer:8888 - - RDMA_SIDECAR_ADDR=rdma-sidecar:8081 - - TEST_RESULTS_DIR=/test-results - depends_on: - seaweedfs-mount: - condition: service_healthy - command: > - sh -c " - echo 'Starting RDMA Mount Integration Tests...' && - sleep 10 && - /usr/local/bin/run-integration-tests.sh - " - profiles: - - test - - # Performance Test Runner - performance-test: - build: - context: . - dockerfile: Dockerfile.performance-test - container_name: performance-test - volumes: - - seaweedfs_mount:/mnt/seaweedfs - - ./performance-results:/performance-results - networks: - - seaweedfs-rdma - environment: - - MOUNT_POINT=/mnt/seaweedfs - - RDMA_SIDECAR_ADDR=rdma-sidecar:8081 - - PERFORMANCE_RESULTS_DIR=/performance-results - depends_on: - seaweedfs-mount: - condition: service_healthy - command: > - sh -c " - echo 'Starting RDMA Mount Performance Tests...' && - sleep 10 && - /usr/local/bin/run-performance-tests.sh - " - profiles: - - performance - -volumes: - seaweedfs_master_data: - driver: local - seaweedfs_volume_data: - driver: local - seaweedfs_mount: - driver: local - driver_opts: - type: tmpfs - device: tmpfs - o: size=1g - rdma_socket: - driver: local - -networks: - seaweedfs-rdma: - driver: bridge - ipam: - config: - - subnet: 172.20.0.0/16 diff --git a/seaweedfs-rdma-sidecar/docker-compose.rdma-sim.yml b/seaweedfs-rdma-sidecar/docker-compose.rdma-sim.yml deleted file mode 100644 index 527a0d67b..000000000 --- a/seaweedfs-rdma-sidecar/docker-compose.rdma-sim.yml +++ /dev/null @@ -1,209 +0,0 @@ -services: - # SeaweedFS Master Server - seaweedfs-master: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-master - command: master -ip=seaweedfs-master -port=9333 -mdir=/data - ports: - - "9333:9333" - volumes: - - master-data:/data - networks: - - seaweedfs-rdma - healthcheck: - test: ["CMD", "pgrep", "-f", "weed"] - interval: 15s - timeout: 10s - retries: 5 - start_period: 30s - - # SeaweedFS Volume Server - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-volume - command: volume -mserver=seaweedfs-master:9333 -ip=seaweedfs-volume -port=8080 -dir=/data - ports: - - "8080:8080" - volumes: - - volume-data:/data - depends_on: - seaweedfs-master: - condition: service_healthy - networks: - - seaweedfs-rdma - healthcheck: - test: ["CMD", "pgrep", "-f", "weed"] - interval: 15s - timeout: 10s - retries: 5 - start_period: 30s - - # RDMA Simulation Environment - rdma-simulation: - build: - context: . - dockerfile: docker/Dockerfile.rdma-simulation - container_name: rdma-simulation - privileged: true # Required for RDMA kernel module loading - environment: - - RDMA_DEVICE=rxe0 - - UCX_TLS=rc_verbs,ud_verbs,tcp - - UCX_LOG_LEVEL=info - volumes: - - /lib/modules:/lib/modules:ro # Host kernel modules - - /sys:/sys # Required for sysfs access - - rdma-simulation-data:/opt/rdma-sim/data - networks: - - seaweedfs-rdma - ports: - - "18515:18515" # RDMA application port - - "4791:4791" # RDMA CM port - - "4792:4792" # Additional RDMA port - command: | - bash -c " - echo '๐Ÿš€ Setting up RDMA simulation environment...' - sudo /opt/rdma-sim/setup-soft-roce.sh || echo 'RDMA setup failed, continuing...' - echo '๐Ÿ“‹ RDMA environment status:' - /opt/rdma-sim/test-rdma.sh || true - echo '๐Ÿ”ง UCX information:' - /opt/rdma-sim/ucx-info.sh || true - echo 'โœ… RDMA simulation ready - keeping container alive...' - tail -f /dev/null - " - healthcheck: - test: ["CMD", "test", "-f", "/opt/rdma-sim/setup-soft-roce.sh"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 30s - - # Rust RDMA Engine (with RDMA simulation support) - rdma-engine: - build: - context: . - dockerfile: Dockerfile.rdma-engine - container_name: rdma-engine - environment: - - RUST_LOG=debug - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - # UCX configuration for real RDMA - - UCX_TLS=rc_verbs,ud_verbs,tcp,shm - - UCX_NET_DEVICES=all - - UCX_LOG_LEVEL=info - - UCX_RNDV_SCHEME=put_zcopy - - UCX_RNDV_THRESH=8192 - volumes: - - rdma-socket:/tmp - # Share network namespace with RDMA simulation for device access - network_mode: "container:rdma-simulation" - depends_on: - rdma-simulation: - condition: service_healthy - command: ["./rdma-engine-server", "--debug", "--ipc-socket", "/tmp/rdma-engine.sock"] - healthcheck: - test: ["CMD", "test", "-S", "/tmp/rdma-engine.sock"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 15s - - # Go RDMA Sidecar / Demo Server - rdma-sidecar: - build: - context: . - dockerfile: Dockerfile.sidecar - container_name: rdma-sidecar - ports: - - "8081:8081" - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - VOLUME_SERVER_URL=http://seaweedfs-volume:8080 - - DEBUG=true - volumes: - - rdma-socket:/tmp - depends_on: - rdma-engine: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - networks: - - seaweedfs-rdma - command: [ - "./demo-server", - "--port", "8081", - "--rdma-socket", "/tmp/rdma-engine.sock", - "--volume-server", "http://seaweedfs-volume:8080", - "--enable-rdma", - "--debug" - ] - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/health"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 20s - - # Test Client for Integration Testing - test-client: - build: - context: . - dockerfile: Dockerfile.test-client - container_name: test-client - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - SIDECAR_URL=http://rdma-sidecar:8081 - - SEAWEEDFS_MASTER=http://seaweedfs-master:9333 - - SEAWEEDFS_VOLUME=http://seaweedfs-volume:8080 - volumes: - - rdma-socket:/tmp - depends_on: - rdma-sidecar: - condition: service_healthy - networks: - - seaweedfs-rdma - profiles: - - testing - command: ["tail", "-f", "/dev/null"] # Keep container running for manual testing - - # Integration Test Runner with RDMA - integration-tests-rdma: - build: - context: . - dockerfile: Dockerfile.test-client - container_name: integration-tests-rdma - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - SIDECAR_URL=http://rdma-sidecar:8081 - - SEAWEEDFS_MASTER=http://seaweedfs-master:9333 - - SEAWEEDFS_VOLUME=http://seaweedfs-volume:8080 - - RDMA_SIMULATION=true - volumes: - - rdma-socket:/tmp - - ./tests:/tests - depends_on: - rdma-sidecar: - condition: service_healthy - rdma-simulation: - condition: service_healthy - networks: - - seaweedfs-rdma - profiles: - - testing - command: ["/tests/run-integration-tests.sh"] - -volumes: - master-data: - driver: local - volume-data: - driver: local - rdma-socket: - driver: local - rdma-simulation-data: - driver: local - -networks: - seaweedfs-rdma: - driver: bridge - ipam: - config: - - subnet: 172.20.0.0/16 diff --git a/seaweedfs-rdma-sidecar/docker-compose.yml b/seaweedfs-rdma-sidecar/docker-compose.yml deleted file mode 100644 index b2970f114..000000000 --- a/seaweedfs-rdma-sidecar/docker-compose.yml +++ /dev/null @@ -1,157 +0,0 @@ -services: - # SeaweedFS Master Server - seaweedfs-master: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-master - command: master -ip=seaweedfs-master -port=9333 -mdir=/data - ports: - - "9333:9333" - volumes: - - master-data:/data - networks: - - seaweedfs-rdma - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9333/cluster/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - - # SeaweedFS Volume Server - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-volume - command: volume -mserver=seaweedfs-master:9333 -ip=seaweedfs-volume -port=8080 -dir=/data - ports: - - "8080:8080" - volumes: - - volume-data:/data - depends_on: - seaweedfs-master: - condition: service_healthy - networks: - - seaweedfs-rdma - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 15s - - # Rust RDMA Engine - rdma-engine: - build: - context: . - dockerfile: Dockerfile.rdma-engine.simple - container_name: rdma-engine - environment: - - RUST_LOG=debug - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - volumes: - - rdma-socket:/tmp - # Note: hugepages mount commented out to avoid host system requirements - # - /dev/hugepages:/dev/hugepages - # Privileged mode for RDMA access (in production, use specific capabilities) - privileged: true - networks: - - seaweedfs-rdma - command: ["./rdma-engine-server", "--debug", "--ipc-socket", "/tmp/rdma-engine.sock"] - healthcheck: - test: ["CMD", "test", "-S", "/tmp/rdma-engine.sock"] - interval: 5s - timeout: 3s - retries: 5 - start_period: 10s - - # Go RDMA Sidecar / Demo Server - rdma-sidecar: - build: - context: . - dockerfile: Dockerfile.sidecar - container_name: rdma-sidecar - ports: - - "8081:8081" - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - VOLUME_SERVER_URL=http://seaweedfs-volume:8080 - - DEBUG=true - volumes: - - rdma-socket:/tmp - depends_on: - rdma-engine: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - networks: - - seaweedfs-rdma - command: [ - "./demo-server", - "--port", "8081", - "--rdma-socket", "/tmp/rdma-engine.sock", - "--volume-server", "http://seaweedfs-volume:8080", - "--enable-rdma", - "--debug" - ] - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/health"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 15s - - # Test Client for Integration Testing - test-client: - build: - context: . - dockerfile: Dockerfile.test-client - container_name: test-client - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - SIDECAR_URL=http://rdma-sidecar:8081 - - SEAWEEDFS_MASTER=http://seaweedfs-master:9333 - - SEAWEEDFS_VOLUME=http://seaweedfs-volume:8080 - volumes: - - rdma-socket:/tmp - depends_on: - rdma-sidecar: - condition: service_healthy - networks: - - seaweedfs-rdma - profiles: - - testing - command: ["tail", "-f", "/dev/null"] # Keep container running for manual testing - - # Integration Test Runner - integration-tests: - build: - context: . - dockerfile: Dockerfile.test-client - container_name: integration-tests - environment: - - RDMA_SOCKET_PATH=/tmp/rdma-engine.sock - - SIDECAR_URL=http://rdma-sidecar:8081 - - SEAWEEDFS_MASTER=http://seaweedfs-master:9333 - - SEAWEEDFS_VOLUME=http://seaweedfs-volume:8080 - volumes: - - rdma-socket:/tmp - - ./tests:/tests - depends_on: - rdma-sidecar: - condition: service_healthy - networks: - - seaweedfs-rdma - profiles: - - testing - command: ["/tests/run-integration-tests.sh"] - -volumes: - master-data: - driver: local - volume-data: - driver: local - rdma-socket: - driver: local - -networks: - seaweedfs-rdma: - driver: bridge diff --git a/seaweedfs-rdma-sidecar/docker/Dockerfile.rdma-simulation b/seaweedfs-rdma-sidecar/docker/Dockerfile.rdma-simulation deleted file mode 100644 index 9f2566623..000000000 --- a/seaweedfs-rdma-sidecar/docker/Dockerfile.rdma-simulation +++ /dev/null @@ -1,77 +0,0 @@ -# RDMA Simulation Container with Soft-RoCE (RXE) -# This container enables software RDMA over regular Ethernet - -FROM ubuntu:22.04 - -# Install RDMA and networking tools -RUN apt-get update && apt-get install -y \ - # System utilities - sudo \ - # RDMA core libraries - libibverbs1 \ - libibverbs-dev \ - librdmacm1 \ - librdmacm-dev \ - rdma-core \ - ibverbs-utils \ - infiniband-diags \ - # Network tools - iproute2 \ - iputils-ping \ - net-tools \ - # Build tools - build-essential \ - pkg-config \ - cmake \ - # UCX dependencies - libnuma1 \ - libnuma-dev \ - # UCX library (pre-built) - try to install but don't fail if not available - # libucx0 \ - # libucx-dev \ - # Debugging tools - strace \ - gdb \ - valgrind \ - # Utilities - curl \ - wget \ - vim \ - htop \ - && rm -rf /var/lib/apt/lists/* - -# Try to install UCX tools (optional, may not be available in all repositories) -RUN apt-get update && \ - (apt-get install -y ucx-tools || echo "UCX tools not available in repository") && \ - rm -rf /var/lib/apt/lists/* - -# Create rdmauser for security (avoid conflict with system rdma group) -RUN useradd -m -s /bin/bash -G sudo,rdma rdmauser && \ - echo "rdmauser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers - -# Create directories for RDMA setup -RUN mkdir -p /opt/rdma-sim /var/log/rdma - -# Copy RDMA simulation scripts -COPY docker/scripts/setup-soft-roce.sh /opt/rdma-sim/ -COPY docker/scripts/test-rdma.sh /opt/rdma-sim/ -COPY docker/scripts/ucx-info.sh /opt/rdma-sim/ - -# Make scripts executable -RUN chmod +x /opt/rdma-sim/*.sh - -# Set working directory -WORKDIR /opt/rdma-sim - -# Switch to rdmauser -USER rdmauser - -# Default command -CMD ["/bin/bash"] - -# Health check for RDMA devices -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD /opt/rdma-sim/test-rdma.sh || exit 1 - -# Expose common RDMA ports -EXPOSE 18515 4791 4792 diff --git a/seaweedfs-rdma-sidecar/docker/scripts/setup-soft-roce.sh b/seaweedfs-rdma-sidecar/docker/scripts/setup-soft-roce.sh deleted file mode 100755 index 55c8f3b80..000000000 --- a/seaweedfs-rdma-sidecar/docker/scripts/setup-soft-roce.sh +++ /dev/null @@ -1,183 +0,0 @@ -#!/bin/bash - -# Setup Soft-RoCE (RXE) for RDMA simulation -# This script enables RDMA over Ethernet using the RXE kernel module - -set -e - -echo "๐Ÿ”ง Setting up Soft-RoCE (RXE) RDMA simulation..." - -# Function to check if running with required privileges -check_privileges() { - if [ "$EUID" -ne 0 ]; then - echo "โŒ This script requires root privileges" - echo "Run with: sudo $0 or inside a privileged container" - exit 1 - fi -} - -# Function to load RXE kernel module -load_rxe_module() { - echo "๐Ÿ“ฆ Loading RXE kernel module..." - - # Try to load the rdma_rxe module - if modprobe rdma_rxe 2>/dev/null; then - echo "โœ… rdma_rxe module loaded successfully" - else - echo "โš ๏ธ Failed to load rdma_rxe module, trying alternative approach..." - - # Alternative: Try loading rxe_net (older kernels) - if modprobe rxe_net 2>/dev/null; then - echo "โœ… rxe_net module loaded successfully" - else - echo "โŒ Failed to load RXE modules. Possible causes:" - echo " - Kernel doesn't support RXE (needs CONFIG_RDMA_RXE=m)" - echo " - Running in unprivileged container" - echo " - Missing kernel modules" - echo "" - echo "๐Ÿ”ง Workaround: Run container with --privileged flag" - exit 1 - fi - fi - - # Verify module is loaded - if lsmod | grep -q "rdma_rxe\|rxe_net"; then - echo "โœ… RXE module verification successful" - else - echo "โŒ RXE module verification failed" - exit 1 - fi -} - -# Function to setup virtual RDMA device -setup_rxe_device() { - echo "๐ŸŒ Setting up RXE device over Ethernet interface..." - - # Find available network interface (prefer eth0, fallback to others) - local interface="" - for iface in eth0 enp0s3 enp0s8 lo; do - if ip link show "$iface" >/dev/null 2>&1; then - interface="$iface" - break - fi - done - - if [ -z "$interface" ]; then - echo "โŒ No suitable network interface found" - echo "Available interfaces:" - ip link show | grep "^[0-9]" | cut -d':' -f2 | tr -d ' ' - exit 1 - fi - - echo "๐Ÿ“ก Using network interface: $interface" - - # Create RXE device - echo "๐Ÿ”จ Creating RXE device on $interface..." - - # Try modern rxe_cfg approach first - if command -v rxe_cfg >/dev/null 2>&1; then - rxe_cfg add "$interface" || { - echo "โš ๏ธ rxe_cfg failed, trying manual approach..." - setup_rxe_manual "$interface" - } - else - echo "โš ๏ธ rxe_cfg not available, using manual setup..." - setup_rxe_manual "$interface" - fi -} - -# Function to manually setup RXE device -setup_rxe_manual() { - local interface="$1" - - # Use sysfs interface to create RXE device - if [ -d /sys/module/rdma_rxe ]; then - echo "$interface" > /sys/module/rdma_rxe/parameters/add 2>/dev/null || { - echo "โŒ Failed to add RXE device via sysfs" - exit 1 - } - else - echo "โŒ RXE sysfs interface not found" - exit 1 - fi -} - -# Function to verify RDMA devices -verify_rdma_devices() { - echo "๐Ÿ” Verifying RDMA devices..." - - # Check for RDMA devices - if [ -d /sys/class/infiniband ]; then - local devices=$(ls /sys/class/infiniband/ 2>/dev/null | wc -l) - if [ "$devices" -gt 0 ]; then - echo "โœ… Found $devices RDMA device(s):" - ls /sys/class/infiniband/ - - # Show device details - for device in /sys/class/infiniband/*; do - if [ -d "$device" ]; then - local dev_name=$(basename "$device") - echo " ๐Ÿ“‹ Device: $dev_name" - - # Try to get device info - if command -v ibv_devinfo >/dev/null 2>&1; then - ibv_devinfo -d "$dev_name" | head -10 - fi - fi - done - else - echo "โŒ No RDMA devices found in /sys/class/infiniband/" - exit 1 - fi - else - echo "โŒ /sys/class/infiniband directory not found" - exit 1 - fi -} - -# Function to test basic RDMA functionality -test_basic_rdma() { - echo "๐Ÿงช Testing basic RDMA functionality..." - - # Test libibverbs - if command -v ibv_devinfo >/dev/null 2>&1; then - echo "๐Ÿ“‹ RDMA device information:" - ibv_devinfo | head -20 - else - echo "โš ๏ธ ibv_devinfo not available" - fi - - # Test UCX if available - if command -v ucx_info >/dev/null 2>&1; then - echo "๐Ÿ“‹ UCX information:" - ucx_info -d | head -10 - else - echo "โš ๏ธ UCX tools not available" - fi -} - -# Main execution -main() { - echo "๐Ÿš€ Starting Soft-RoCE RDMA simulation setup..." - echo "======================================" - - check_privileges - load_rxe_module - setup_rxe_device - verify_rdma_devices - test_basic_rdma - - echo "" - echo "๐ŸŽ‰ Soft-RoCE setup completed successfully!" - echo "======================================" - echo "โœ… RDMA simulation is ready for testing" - echo "๐Ÿ“ก You can now run RDMA applications" - echo "" - echo "Next steps:" - echo " - Test with: /opt/rdma-sim/test-rdma.sh" - echo " - Check UCX: /opt/rdma-sim/ucx-info.sh" - echo " - Run your RDMA applications" -} - -# Execute main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/docker/scripts/test-rdma.sh b/seaweedfs-rdma-sidecar/docker/scripts/test-rdma.sh deleted file mode 100755 index 91e60ca7f..000000000 --- a/seaweedfs-rdma-sidecar/docker/scripts/test-rdma.sh +++ /dev/null @@ -1,253 +0,0 @@ -#!/bin/bash - -# Test RDMA functionality in simulation environment -# This script validates that RDMA devices and libraries are working - -set -e - -echo "๐Ÿงช Testing RDMA simulation environment..." - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Function to print colored output -print_status() { - local status="$1" - local message="$2" - - case "$status" in - "success") - echo -e "${GREEN}โœ… $message${NC}" - ;; - "warning") - echo -e "${YELLOW}โš ๏ธ $message${NC}" - ;; - "error") - echo -e "${RED}โŒ $message${NC}" - ;; - "info") - echo -e "${BLUE}๐Ÿ“‹ $message${NC}" - ;; - esac -} - -# Function to test RDMA devices -test_rdma_devices() { - print_status "info" "Testing RDMA devices..." - - # Check for InfiniBand/RDMA devices - if [ -d /sys/class/infiniband ]; then - local device_count=$(ls /sys/class/infiniband/ 2>/dev/null | wc -l) - if [ "$device_count" -gt 0 ]; then - print_status "success" "Found $device_count RDMA device(s)" - - # List devices - for device in /sys/class/infiniband/*; do - if [ -d "$device" ]; then - local dev_name=$(basename "$device") - print_status "info" "Device: $dev_name" - fi - done - return 0 - else - print_status "error" "No RDMA devices found" - return 1 - fi - else - print_status "error" "/sys/class/infiniband directory not found" - return 1 - fi -} - -# Function to test libibverbs -test_libibverbs() { - print_status "info" "Testing libibverbs..." - - if command -v ibv_devinfo >/dev/null 2>&1; then - # Get device info - local device_info=$(ibv_devinfo 2>/dev/null) - if [ -n "$device_info" ]; then - print_status "success" "libibverbs working - devices detected" - - # Show basic info - echo "$device_info" | head -5 - - # Test device capabilities - if echo "$device_info" | grep -q "transport.*InfiniBand\|transport.*Ethernet"; then - print_status "success" "RDMA transport layer detected" - else - print_status "warning" "Transport layer information unclear" - fi - - return 0 - else - print_status "error" "ibv_devinfo found no devices" - return 1 - fi - else - print_status "error" "ibv_devinfo command not found" - return 1 - fi -} - -# Function to test UCX -test_ucx() { - print_status "info" "Testing UCX..." - - if command -v ucx_info >/dev/null 2>&1; then - # Test UCX device detection - local ucx_output=$(ucx_info -d 2>/dev/null) - if [ -n "$ucx_output" ]; then - print_status "success" "UCX detecting devices" - - # Show UCX device info - echo "$ucx_output" | head -10 - - # Check for RDMA transports - if echo "$ucx_output" | grep -q "rc\|ud\|dc"; then - print_status "success" "UCX RDMA transports available" - else - print_status "warning" "UCX RDMA transports not detected" - fi - - return 0 - else - print_status "warning" "UCX not detecting devices" - return 1 - fi - else - print_status "warning" "UCX tools not available" - return 1 - fi -} - -# Function to test RDMA CM (Connection Manager) -test_rdma_cm() { - print_status "info" "Testing RDMA Connection Manager..." - - # Check for RDMA CM device - if [ -e /dev/infiniband/rdma_cm ]; then - print_status "success" "RDMA CM device found" - return 0 - else - print_status "warning" "RDMA CM device not found" - return 1 - fi -} - -# Function to test basic RDMA operations -test_rdma_operations() { - print_status "info" "Testing basic RDMA operations..." - - # Try to run a simple RDMA test if tools are available - if command -v ibv_rc_pingpong >/dev/null 2>&1; then - # This would need a client/server setup, so just check if binary exists - print_status "success" "RDMA test tools available (ibv_rc_pingpong)" - else - print_status "warning" "RDMA test tools not available" - fi - - # Check for other useful RDMA utilities - local tools_found=0 - for tool in ibv_asyncwatch ibv_read_lat ibv_write_lat; do - if command -v "$tool" >/dev/null 2>&1; then - tools_found=$((tools_found + 1)) - fi - done - - if [ "$tools_found" -gt 0 ]; then - print_status "success" "Found $tools_found additional RDMA test tools" - else - print_status "warning" "No additional RDMA test tools found" - fi -} - -# Function to generate test summary -generate_summary() { - echo "" - print_status "info" "RDMA Simulation Test Summary" - echo "======================================" - - # Re-run key tests for summary - local devices_ok=0 - local libibverbs_ok=0 - local ucx_ok=0 - - if [ -d /sys/class/infiniband ] && [ "$(ls /sys/class/infiniband/ 2>/dev/null | wc -l)" -gt 0 ]; then - devices_ok=1 - fi - - if command -v ibv_devinfo >/dev/null 2>&1 && ibv_devinfo >/dev/null 2>&1; then - libibverbs_ok=1 - fi - - if command -v ucx_info >/dev/null 2>&1 && ucx_info -d >/dev/null 2>&1; then - ucx_ok=1 - fi - - echo "๐Ÿ“Š Test Results:" - [ "$devices_ok" -eq 1 ] && print_status "success" "RDMA Devices: PASS" || print_status "error" "RDMA Devices: FAIL" - [ "$libibverbs_ok" -eq 1 ] && print_status "success" "libibverbs: PASS" || print_status "error" "libibverbs: FAIL" - [ "$ucx_ok" -eq 1 ] && print_status "success" "UCX: PASS" || print_status "warning" "UCX: FAIL/WARNING" - - echo "" - if [ "$devices_ok" -eq 1 ] && [ "$libibverbs_ok" -eq 1 ]; then - print_status "success" "RDMA simulation environment is ready! ๐ŸŽ‰" - echo "" - print_status "info" "You can now:" - echo " - Run RDMA applications" - echo " - Test SeaweedFS RDMA engine with real RDMA" - echo " - Use UCX for high-performance transfers" - return 0 - else - print_status "error" "RDMA simulation setup needs attention" - echo "" - print_status "info" "Troubleshooting:" - echo " - Run setup script: sudo /opt/rdma-sim/setup-soft-roce.sh" - echo " - Check container privileges (--privileged flag)" - echo " - Verify kernel RDMA support" - return 1 - fi -} - -# Main test execution -main() { - echo "๐Ÿš€ RDMA Simulation Test Suite" - echo "======================================" - - # Run tests - test_rdma_devices || true - echo "" - - test_libibverbs || true - echo "" - - test_ucx || true - echo "" - - test_rdma_cm || true - echo "" - - test_rdma_operations || true - echo "" - - # Generate summary - generate_summary -} - -# Health check mode (for Docker healthcheck) -if [ "$1" = "healthcheck" ]; then - # Quick health check - just verify devices exist - if [ -d /sys/class/infiniband ] && [ "$(ls /sys/class/infiniband/ 2>/dev/null | wc -l)" -gt 0 ]; then - exit 0 - else - exit 1 - fi -fi - -# Execute main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/docker/scripts/ucx-info.sh b/seaweedfs-rdma-sidecar/docker/scripts/ucx-info.sh deleted file mode 100755 index 9bf287c6e..000000000 --- a/seaweedfs-rdma-sidecar/docker/scripts/ucx-info.sh +++ /dev/null @@ -1,269 +0,0 @@ -#!/bin/bash - -# UCX Information and Testing Script -# Provides detailed information about UCX configuration and capabilities - -set -e - -echo "๐Ÿ“‹ UCX (Unified Communication X) Information" -echo "=============================================" - -# Colors for output -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -print_section() { - echo -e "\n${BLUE}๐Ÿ“Œ $1${NC}" - echo "----------------------------------------" -} - -print_info() { - echo -e "${GREEN}โ„น๏ธ $1${NC}" -} - -print_warning() { - echo -e "${YELLOW}โš ๏ธ $1${NC}" -} - -# Function to check UCX installation -check_ucx_installation() { - print_section "UCX Installation Status" - - if command -v ucx_info >/dev/null 2>&1; then - print_info "UCX tools are installed" - - # Get UCX version - if ucx_info -v >/dev/null 2>&1; then - local version=$(ucx_info -v 2>/dev/null | head -1) - print_info "Version: $version" - fi - else - print_warning "UCX tools not found" - echo "Install with: apt-get install ucx-tools libucx-dev" - return 1 - fi - - # Check UCX libraries - local libs_found=0 - for lib in libucp.so libucs.so libuct.so; do - if ldconfig -p | grep -q "$lib"; then - libs_found=$((libs_found + 1)) - fi - done - - if [ "$libs_found" -eq 3 ]; then - print_info "All UCX libraries found (ucp, ucs, uct)" - else - print_warning "Some UCX libraries may be missing ($libs_found/3 found)" - fi -} - -# Function to show UCX device information -show_ucx_devices() { - print_section "UCX Transport Devices" - - if command -v ucx_info >/dev/null 2>&1; then - echo "Available UCX transports and devices:" - ucx_info -d 2>/dev/null || { - print_warning "Failed to get UCX device information" - return 1 - } - else - print_warning "ucx_info command not available" - return 1 - fi -} - -# Function to show UCX configuration -show_ucx_config() { - print_section "UCX Configuration" - - if command -v ucx_info >/dev/null 2>&1; then - echo "UCX configuration parameters:" - ucx_info -c 2>/dev/null | head -20 || { - print_warning "Failed to get UCX configuration" - return 1 - } - - echo "" - print_info "Key UCX environment variables:" - echo " UCX_TLS - Transport layers to use" - echo " UCX_NET_DEVICES - Network devices to use" - echo " UCX_LOG_LEVEL - Logging level (error, warn, info, debug, trace)" - echo " UCX_MEMTYPE_CACHE - Memory type caching (y/n)" - else - print_warning "ucx_info command not available" - return 1 - fi -} - -# Function to test UCX capabilities -test_ucx_capabilities() { - print_section "UCX Capability Testing" - - if command -v ucx_info >/dev/null 2>&1; then - print_info "Testing UCX transport capabilities..." - - # Check for RDMA transports - local ucx_transports=$(ucx_info -d 2>/dev/null | grep -i "transport\|tl:" || true) - - if echo "$ucx_transports" | grep -q "rc\|dc\|ud"; then - print_info "โœ… RDMA transports detected (RC/DC/UD)" - else - print_warning "No RDMA transports detected" - fi - - if echo "$ucx_transports" | grep -q "tcp"; then - print_info "โœ… TCP transport available" - else - print_warning "TCP transport not detected" - fi - - if echo "$ucx_transports" | grep -q "shm\|posix"; then - print_info "โœ… Shared memory transport available" - else - print_warning "Shared memory transport not detected" - fi - - # Memory types - print_info "Testing memory type support..." - local memory_info=$(ucx_info -d 2>/dev/null | grep -i "memory\|md:" || true) - if [ -n "$memory_info" ]; then - echo "$memory_info" | head -5 - fi - - else - print_warning "Cannot test UCX capabilities - ucx_info not available" - return 1 - fi -} - -# Function to show recommended UCX settings for RDMA -show_rdma_settings() { - print_section "Recommended UCX Settings for RDMA" - - print_info "For optimal RDMA performance with SeaweedFS:" - echo "" - echo "Environment Variables:" - echo " export UCX_TLS=rc_verbs,ud_verbs,rc_mlx5_dv,dc_mlx5_dv" - echo " export UCX_NET_DEVICES=all" - echo " export UCX_LOG_LEVEL=info" - echo " export UCX_RNDV_SCHEME=put_zcopy" - echo " export UCX_RNDV_THRESH=8192" - echo "" - - print_info "For development/debugging:" - echo " export UCX_LOG_LEVEL=debug" - echo " export UCX_LOG_FILE=/tmp/ucx.log" - echo "" - - print_info "For Soft-RoCE (RXE) specifically:" - echo " export UCX_TLS=rc_verbs,ud_verbs" - echo " export UCX_IB_DEVICE_SPECS=rxe0:1" - echo "" -} - -# Function to test basic UCX functionality -test_ucx_basic() { - print_section "Basic UCX Functionality Test" - - if command -v ucx_hello_world >/dev/null 2>&1; then - print_info "UCX hello_world test available" - echo "You can test UCX with:" - echo " Server: UCX_TLS=tcp ucx_hello_world -l" - echo " Client: UCX_TLS=tcp ucx_hello_world " - else - print_warning "UCX hello_world test not available" - fi - - # Check for other UCX test utilities - local test_tools=0 - for tool in ucx_perftest ucp_hello_world; do - if command -v "$tool" >/dev/null 2>&1; then - test_tools=$((test_tools + 1)) - print_info "UCX test tool available: $tool" - fi - done - - if [ "$test_tools" -eq 0 ]; then - print_warning "No UCX test tools found" - echo "Consider installing: ucx-tools package" - fi -} - -# Function to generate UCX summary -generate_summary() { - print_section "UCX Status Summary" - - local ucx_ok=0 - local devices_ok=0 - local rdma_ok=0 - - # Check UCX availability - if command -v ucx_info >/dev/null 2>&1; then - ucx_ok=1 - fi - - # Check devices - if command -v ucx_info >/dev/null 2>&1 && ucx_info -d >/dev/null 2>&1; then - devices_ok=1 - - # Check for RDMA - if ucx_info -d 2>/dev/null | grep -q "rc\|dc\|ud"; then - rdma_ok=1 - fi - fi - - echo "๐Ÿ“Š UCX Status:" - [ "$ucx_ok" -eq 1 ] && print_info "โœ… UCX Installation: OK" || print_warning "โŒ UCX Installation: Missing" - [ "$devices_ok" -eq 1 ] && print_info "โœ… UCX Devices: Detected" || print_warning "โŒ UCX Devices: Not detected" - [ "$rdma_ok" -eq 1 ] && print_info "โœ… RDMA Support: Available" || print_warning "โš ๏ธ RDMA Support: Limited/Missing" - - echo "" - if [ "$ucx_ok" -eq 1 ] && [ "$devices_ok" -eq 1 ]; then - print_info "๐ŸŽ‰ UCX is ready for SeaweedFS RDMA integration!" - - if [ "$rdma_ok" -eq 1 ]; then - print_info "๐Ÿš€ Real RDMA acceleration is available" - else - print_warning "๐Ÿ’ก Only TCP/shared memory transports available" - fi - else - print_warning "๐Ÿ”ง UCX setup needs attention for optimal performance" - fi -} - -# Main execution -main() { - check_ucx_installation - echo "" - - show_ucx_devices - echo "" - - show_ucx_config - echo "" - - test_ucx_capabilities - echo "" - - show_rdma_settings - echo "" - - test_ucx_basic - echo "" - - generate_summary - - echo "" - print_info "For SeaweedFS RDMA engine integration:" - echo " 1. Use UCX with your Rust engine" - echo " 2. Configure appropriate transport layers" - echo " 3. Test with SeaweedFS RDMA sidecar" - echo " 4. Monitor performance and adjust settings" -} - -# Execute main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/go.mod b/seaweedfs-rdma-sidecar/go.mod deleted file mode 100644 index 6d71a3a44..000000000 --- a/seaweedfs-rdma-sidecar/go.mod +++ /dev/null @@ -1,50 +0,0 @@ -module seaweedfs-rdma-sidecar - -go 1.24 - -require ( - github.com/seaweedfs/seaweedfs v0.0.0-00010101000000-000000000000 - github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.8.0 - github.com/vmihailenco/msgpack/v5 v5.4.1 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cognusion/imaging v1.0.2 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/prometheus/client_golang v1.23.0 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect - github.com/prometheus/procfs v0.17.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/seaweedfs/goexif v1.0.3 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.20.1 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/image v0.30.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/text v0.28.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect - google.golang.org/grpc v1.74.2 // indirect - google.golang.org/protobuf v1.36.7 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -// For local development, this replace directive is required to build the sidecar -// against the parent SeaweedFS module in this monorepo. -// -// To build this module, ensure the main SeaweedFS repository is checked out -// as a sibling directory to this `seaweedfs-rdma-sidecar` directory. -replace github.com/seaweedfs/seaweedfs => ../ diff --git a/seaweedfs-rdma-sidecar/go.sum b/seaweedfs-rdma-sidecar/go.sum deleted file mode 100644 index 7a4c3e2a4..000000000 --- a/seaweedfs-rdma-sidecar/go.sum +++ /dev/null @@ -1,121 +0,0 @@ -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cognusion/imaging v1.0.2 h1:BQwBV8V8eF3+dwffp8Udl9xF1JKh5Z0z5JkJwAi98Mc= -github.com/cognusion/imaging v1.0.2/go.mod h1:mj7FvH7cT2dlFogQOSUQRtotBxJ4gFQ2ySMSmBm5dSk= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/seaweedfs/goexif v1.0.3 h1:ve/OjI7dxPW8X9YQsv3JuVMaxEyF9Rvfd04ouL+Bz30= -github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= -github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/image v0.30.0 h1:jD5RhkmVAnjqaCUXfbGBrn3lpxbknfN9w2UhHHU+5B4= -golang.org/x/image v0.30.0/go.mod h1:SAEUTxCCMWSrJcCy/4HwavEsfZZJlYxeHLc6tTiAe/c= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/seaweedfs-rdma-sidecar/pkg/ipc/client.go b/seaweedfs-rdma-sidecar/pkg/ipc/client.go deleted file mode 100644 index b2c1d2db1..000000000 --- a/seaweedfs-rdma-sidecar/pkg/ipc/client.go +++ /dev/null @@ -1,331 +0,0 @@ -package ipc - -import ( - "context" - "encoding/binary" - "fmt" - "net" - "sync" - "time" - - "github.com/sirupsen/logrus" - "github.com/vmihailenco/msgpack/v5" -) - -// Client provides IPC communication with the Rust RDMA engine -type Client struct { - socketPath string - conn net.Conn - mu sync.RWMutex - logger *logrus.Logger - connected bool -} - -// NewClient creates a new IPC client -func NewClient(socketPath string, logger *logrus.Logger) *Client { - if logger == nil { - logger = logrus.New() - logger.SetLevel(logrus.InfoLevel) - } - - return &Client{ - socketPath: socketPath, - logger: logger, - } -} - -// Connect establishes connection to the Rust RDMA engine -func (c *Client) Connect(ctx context.Context) error { - c.mu.Lock() - defer c.mu.Unlock() - - if c.connected { - return nil - } - - c.logger.WithField("socket", c.socketPath).Info("๐Ÿ”— Connecting to Rust RDMA engine") - - dialer := &net.Dialer{} - conn, err := dialer.DialContext(ctx, "unix", c.socketPath) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to connect to RDMA engine") - return fmt.Errorf("failed to connect to RDMA engine at %s: %w", c.socketPath, err) - } - - c.conn = conn - c.connected = true - c.logger.Info("โœ… Connected to Rust RDMA engine") - - return nil -} - -// Disconnect closes the connection -func (c *Client) Disconnect() { - c.mu.Lock() - defer c.mu.Unlock() - - if c.conn != nil { - c.conn.Close() - c.conn = nil - c.connected = false - c.logger.Info("๐Ÿ”Œ Disconnected from Rust RDMA engine") - } -} - -// IsConnected returns connection status -func (c *Client) IsConnected() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.connected -} - -// SendMessage sends an IPC message and waits for response -func (c *Client) SendMessage(ctx context.Context, msg *IpcMessage) (*IpcMessage, error) { - c.mu.RLock() - conn := c.conn - connected := c.connected - c.mu.RUnlock() - - if !connected || conn == nil { - return nil, fmt.Errorf("not connected to RDMA engine") - } - - // Set write timeout - if deadline, ok := ctx.Deadline(); ok { - conn.SetWriteDeadline(deadline) - } else { - conn.SetWriteDeadline(time.Now().Add(30 * time.Second)) - } - - c.logger.WithField("type", msg.Type).Debug("๐Ÿ“ค Sending message to Rust engine") - - // Serialize message with MessagePack - data, err := msgpack.Marshal(msg) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to marshal message") - return nil, fmt.Errorf("failed to marshal message: %w", err) - } - - // Send message length (4 bytes) + message data - lengthBytes := make([]byte, 4) - binary.LittleEndian.PutUint32(lengthBytes, uint32(len(data))) - - if _, err := conn.Write(lengthBytes); err != nil { - c.logger.WithError(err).Error("โŒ Failed to send message length") - return nil, fmt.Errorf("failed to send message length: %w", err) - } - - if _, err := conn.Write(data); err != nil { - c.logger.WithError(err).Error("โŒ Failed to send message data") - return nil, fmt.Errorf("failed to send message data: %w", err) - } - - c.logger.WithFields(logrus.Fields{ - "type": msg.Type, - "size": len(data), - }).Debug("๐Ÿ“ค Message sent successfully") - - // Read response - return c.readResponse(ctx, conn) -} - -// readResponse reads and deserializes the response message -func (c *Client) readResponse(ctx context.Context, conn net.Conn) (*IpcMessage, error) { - // Set read timeout - if deadline, ok := ctx.Deadline(); ok { - conn.SetReadDeadline(deadline) - } else { - conn.SetReadDeadline(time.Now().Add(30 * time.Second)) - } - - // Read message length (4 bytes) - lengthBytes := make([]byte, 4) - if _, err := conn.Read(lengthBytes); err != nil { - c.logger.WithError(err).Error("โŒ Failed to read response length") - return nil, fmt.Errorf("failed to read response length: %w", err) - } - - length := binary.LittleEndian.Uint32(lengthBytes) - if length > 64*1024*1024 { // 64MB sanity check - c.logger.WithField("length", length).Error("โŒ Response message too large") - return nil, fmt.Errorf("response message too large: %d bytes", length) - } - - // Read message data - data := make([]byte, length) - if _, err := conn.Read(data); err != nil { - c.logger.WithError(err).Error("โŒ Failed to read response data") - return nil, fmt.Errorf("failed to read response data: %w", err) - } - - c.logger.WithField("size", length).Debug("๐Ÿ“ฅ Response received") - - // Deserialize with MessagePack - var response IpcMessage - if err := msgpack.Unmarshal(data, &response); err != nil { - c.logger.WithError(err).Error("โŒ Failed to unmarshal response") - return nil, fmt.Errorf("failed to unmarshal response: %w", err) - } - - c.logger.WithField("type", response.Type).Debug("๐Ÿ“ฅ Response deserialized successfully") - - return &response, nil -} - -// High-level convenience methods - -// Ping sends a ping message to test connectivity -func (c *Client) Ping(ctx context.Context, clientID *string) (*PongResponse, error) { - msg := NewPingMessage(clientID) - - response, err := c.SendMessage(ctx, msg) - if err != nil { - return nil, err - } - - if response.Type == MsgError { - errorData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal engine error data: %w", err) - } - var errorResp ErrorResponse - if err := msgpack.Unmarshal(errorData, &errorResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal engine error response: %w", err) - } - return nil, fmt.Errorf("engine error: %s - %s", errorResp.Code, errorResp.Message) - } - - if response.Type != MsgPong { - return nil, fmt.Errorf("unexpected response type: %s", response.Type) - } - - // Convert response data to PongResponse - pongData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal pong data: %w", err) - } - - var pong PongResponse - if err := msgpack.Unmarshal(pongData, &pong); err != nil { - return nil, fmt.Errorf("failed to unmarshal pong response: %w", err) - } - - return &pong, nil -} - -// GetCapabilities requests engine capabilities -func (c *Client) GetCapabilities(ctx context.Context, clientID *string) (*GetCapabilitiesResponse, error) { - msg := NewGetCapabilitiesMessage(clientID) - - response, err := c.SendMessage(ctx, msg) - if err != nil { - return nil, err - } - - if response.Type == MsgError { - errorData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal engine error data: %w", err) - } - var errorResp ErrorResponse - if err := msgpack.Unmarshal(errorData, &errorResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal engine error response: %w", err) - } - return nil, fmt.Errorf("engine error: %s - %s", errorResp.Code, errorResp.Message) - } - - if response.Type != MsgGetCapabilitiesResponse { - return nil, fmt.Errorf("unexpected response type: %s", response.Type) - } - - // Convert response data to GetCapabilitiesResponse - capsData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal capabilities data: %w", err) - } - - var caps GetCapabilitiesResponse - if err := msgpack.Unmarshal(capsData, &caps); err != nil { - return nil, fmt.Errorf("failed to unmarshal capabilities response: %w", err) - } - - return &caps, nil -} - -// StartRead initiates an RDMA read operation -func (c *Client) StartRead(ctx context.Context, req *StartReadRequest) (*StartReadResponse, error) { - msg := NewStartReadMessage(req) - - response, err := c.SendMessage(ctx, msg) - if err != nil { - return nil, err - } - - if response.Type == MsgError { - errorData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal engine error data: %w", err) - } - var errorResp ErrorResponse - if err := msgpack.Unmarshal(errorData, &errorResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal engine error response: %w", err) - } - return nil, fmt.Errorf("engine error: %s - %s", errorResp.Code, errorResp.Message) - } - - if response.Type != MsgStartReadResponse { - return nil, fmt.Errorf("unexpected response type: %s", response.Type) - } - - // Convert response data to StartReadResponse - startData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal start read data: %w", err) - } - - var startResp StartReadResponse - if err := msgpack.Unmarshal(startData, &startResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal start read response: %w", err) - } - - return &startResp, nil -} - -// CompleteRead completes an RDMA read operation -func (c *Client) CompleteRead(ctx context.Context, sessionID string, success bool, bytesTransferred uint64, clientCrc *uint32) (*CompleteReadResponse, error) { - msg := NewCompleteReadMessage(sessionID, success, bytesTransferred, clientCrc, nil) - - response, err := c.SendMessage(ctx, msg) - if err != nil { - return nil, err - } - - if response.Type == MsgError { - errorData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal engine error data: %w", err) - } - var errorResp ErrorResponse - if err := msgpack.Unmarshal(errorData, &errorResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal engine error response: %w", err) - } - return nil, fmt.Errorf("engine error: %s - %s", errorResp.Code, errorResp.Message) - } - - if response.Type != MsgCompleteReadResponse { - return nil, fmt.Errorf("unexpected response type: %s", response.Type) - } - - // Convert response data to CompleteReadResponse - completeData, err := msgpack.Marshal(response.Data) - if err != nil { - return nil, fmt.Errorf("failed to marshal complete read data: %w", err) - } - - var completeResp CompleteReadResponse - if err := msgpack.Unmarshal(completeData, &completeResp); err != nil { - return nil, fmt.Errorf("failed to unmarshal complete read response: %w", err) - } - - return &completeResp, nil -} diff --git a/seaweedfs-rdma-sidecar/pkg/ipc/messages.go b/seaweedfs-rdma-sidecar/pkg/ipc/messages.go deleted file mode 100644 index 4293ac396..000000000 --- a/seaweedfs-rdma-sidecar/pkg/ipc/messages.go +++ /dev/null @@ -1,160 +0,0 @@ -// Package ipc provides communication between Go sidecar and Rust RDMA engine -package ipc - -import "time" - -// IpcMessage represents the tagged union of all IPC messages -// This matches the Rust enum: #[serde(tag = "type", content = "data")] -type IpcMessage struct { - Type string `msgpack:"type"` - Data interface{} `msgpack:"data"` -} - -// Request message types -const ( - MsgStartRead = "StartRead" - MsgCompleteRead = "CompleteRead" - MsgGetCapabilities = "GetCapabilities" - MsgPing = "Ping" -) - -// Response message types -const ( - MsgStartReadResponse = "StartReadResponse" - MsgCompleteReadResponse = "CompleteReadResponse" - MsgGetCapabilitiesResponse = "GetCapabilitiesResponse" - MsgPong = "Pong" - MsgError = "Error" -) - -// StartReadRequest corresponds to Rust StartReadRequest -type StartReadRequest struct { - VolumeID uint32 `msgpack:"volume_id"` - NeedleID uint64 `msgpack:"needle_id"` - Cookie uint32 `msgpack:"cookie"` - Offset uint64 `msgpack:"offset"` - Size uint64 `msgpack:"size"` - RemoteAddr uint64 `msgpack:"remote_addr"` - RemoteKey uint32 `msgpack:"remote_key"` - TimeoutSecs uint64 `msgpack:"timeout_secs"` - AuthToken *string `msgpack:"auth_token,omitempty"` -} - -// StartReadResponse corresponds to Rust StartReadResponse -type StartReadResponse struct { - SessionID string `msgpack:"session_id"` - LocalAddr uint64 `msgpack:"local_addr"` - LocalKey uint32 `msgpack:"local_key"` - TransferSize uint64 `msgpack:"transfer_size"` - ExpectedCrc uint32 `msgpack:"expected_crc"` - ExpiresAtNs uint64 `msgpack:"expires_at_ns"` -} - -// CompleteReadRequest corresponds to Rust CompleteReadRequest -type CompleteReadRequest struct { - SessionID string `msgpack:"session_id"` - Success bool `msgpack:"success"` - BytesTransferred uint64 `msgpack:"bytes_transferred"` - ClientCrc *uint32 `msgpack:"client_crc,omitempty"` - ErrorMessage *string `msgpack:"error_message,omitempty"` -} - -// CompleteReadResponse corresponds to Rust CompleteReadResponse -type CompleteReadResponse struct { - Success bool `msgpack:"success"` - ServerCrc *uint32 `msgpack:"server_crc,omitempty"` - Message *string `msgpack:"message,omitempty"` -} - -// GetCapabilitiesRequest corresponds to Rust GetCapabilitiesRequest -type GetCapabilitiesRequest struct { - ClientID *string `msgpack:"client_id,omitempty"` -} - -// GetCapabilitiesResponse corresponds to Rust GetCapabilitiesResponse -type GetCapabilitiesResponse struct { - DeviceName string `msgpack:"device_name"` - VendorId uint32 `msgpack:"vendor_id"` - MaxTransferSize uint64 `msgpack:"max_transfer_size"` - MaxSessions usize `msgpack:"max_sessions"` - ActiveSessions usize `msgpack:"active_sessions"` - PortGid string `msgpack:"port_gid"` - PortLid uint16 `msgpack:"port_lid"` - SupportedAuth []string `msgpack:"supported_auth"` - Version string `msgpack:"version"` - RealRdma bool `msgpack:"real_rdma"` -} - -// usize corresponds to Rust's usize type (platform dependent, but typically uint64 on 64-bit systems) -type usize uint64 - -// PingRequest corresponds to Rust PingRequest -type PingRequest struct { - TimestampNs uint64 `msgpack:"timestamp_ns"` - ClientID *string `msgpack:"client_id,omitempty"` -} - -// PongResponse corresponds to Rust PongResponse -type PongResponse struct { - ClientTimestampNs uint64 `msgpack:"client_timestamp_ns"` - ServerTimestampNs uint64 `msgpack:"server_timestamp_ns"` - ServerRttNs uint64 `msgpack:"server_rtt_ns"` -} - -// ErrorResponse corresponds to Rust ErrorResponse -type ErrorResponse struct { - Code string `msgpack:"code"` - Message string `msgpack:"message"` - Details *string `msgpack:"details,omitempty"` -} - -// Helper functions for creating messages -func NewStartReadMessage(req *StartReadRequest) *IpcMessage { - return &IpcMessage{ - Type: MsgStartRead, - Data: req, - } -} - -func NewCompleteReadMessage(sessionID string, success bool, bytesTransferred uint64, clientCrc *uint32, errorMessage *string) *IpcMessage { - return &IpcMessage{ - Type: MsgCompleteRead, - Data: &CompleteReadRequest{ - SessionID: sessionID, - Success: success, - BytesTransferred: bytesTransferred, - ClientCrc: clientCrc, - ErrorMessage: errorMessage, - }, - } -} - -func NewGetCapabilitiesMessage(clientID *string) *IpcMessage { - return &IpcMessage{ - Type: MsgGetCapabilities, - Data: &GetCapabilitiesRequest{ - ClientID: clientID, - }, - } -} - -func NewPingMessage(clientID *string) *IpcMessage { - return &IpcMessage{ - Type: MsgPing, - Data: &PingRequest{ - TimestampNs: uint64(time.Now().UnixNano()), - ClientID: clientID, - }, - } -} - -func NewErrorMessage(code, message string, details *string) *IpcMessage { - return &IpcMessage{ - Type: MsgError, - Data: &ErrorResponse{ - Code: code, - Message: message, - Details: details, - }, - } -} diff --git a/seaweedfs-rdma-sidecar/pkg/rdma/client.go b/seaweedfs-rdma-sidecar/pkg/rdma/client.go deleted file mode 100644 index 156bb5497..000000000 --- a/seaweedfs-rdma-sidecar/pkg/rdma/client.go +++ /dev/null @@ -1,630 +0,0 @@ -// Package rdma provides high-level RDMA operations for SeaweedFS integration -package rdma - -import ( - "context" - "fmt" - "sync" - "time" - - "seaweedfs-rdma-sidecar/pkg/ipc" - - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/sirupsen/logrus" -) - -// PooledConnection represents a pooled RDMA connection -type PooledConnection struct { - ipcClient *ipc.Client - lastUsed time.Time - inUse bool - sessionID string - created time.Time -} - -// ConnectionPool manages a pool of RDMA connections -type ConnectionPool struct { - connections []*PooledConnection - mutex sync.RWMutex - maxConnections int - maxIdleTime time.Duration - enginePath string - logger *logrus.Logger -} - -// Client provides high-level RDMA operations with connection pooling -type Client struct { - pool *ConnectionPool - logger *logrus.Logger - enginePath string - capabilities *ipc.GetCapabilitiesResponse - connected bool - defaultTimeout time.Duration - - // Legacy single connection (for backward compatibility) - ipcClient *ipc.Client -} - -// Config holds configuration for the RDMA client -type Config struct { - EngineSocketPath string - DefaultTimeout time.Duration - Logger *logrus.Logger - - // Connection pooling options - EnablePooling bool // Enable connection pooling (default: true) - MaxConnections int // Max connections in pool (default: 10) - MaxIdleTime time.Duration // Max idle time before connection cleanup (default: 5min) -} - -// ReadRequest represents a SeaweedFS needle read request -type ReadRequest struct { - VolumeID uint32 - NeedleID uint64 - Cookie uint32 - Offset uint64 - Size uint64 - AuthToken *string -} - -// ReadResponse represents the result of an RDMA read operation -type ReadResponse struct { - Data []byte - BytesRead uint64 - Duration time.Duration - TransferRate float64 - SessionID string - Success bool - Message string -} - -// NewConnectionPool creates a new connection pool -func NewConnectionPool(enginePath string, maxConnections int, maxIdleTime time.Duration, logger *logrus.Logger) *ConnectionPool { - if maxConnections <= 0 { - maxConnections = 10 // Default - } - if maxIdleTime <= 0 { - maxIdleTime = 5 * time.Minute // Default - } - - return &ConnectionPool{ - connections: make([]*PooledConnection, 0, maxConnections), - maxConnections: maxConnections, - maxIdleTime: maxIdleTime, - enginePath: enginePath, - logger: logger, - } -} - -// getConnection gets an available connection from the pool or creates a new one -func (p *ConnectionPool) getConnection(ctx context.Context) (*PooledConnection, error) { - p.mutex.Lock() - defer p.mutex.Unlock() - - // Look for an available connection - for _, conn := range p.connections { - if !conn.inUse && time.Since(conn.lastUsed) < p.maxIdleTime { - conn.inUse = true - conn.lastUsed = time.Now() - p.logger.WithField("session_id", conn.sessionID).Debug("๐Ÿ”Œ Reusing pooled RDMA connection") - return conn, nil - } - } - - // Create new connection if under limit - if len(p.connections) < p.maxConnections { - ipcClient := ipc.NewClient(p.enginePath, p.logger) - if err := ipcClient.Connect(ctx); err != nil { - return nil, fmt.Errorf("failed to create new pooled connection: %w", err) - } - - conn := &PooledConnection{ - ipcClient: ipcClient, - lastUsed: time.Now(), - inUse: true, - sessionID: fmt.Sprintf("pool-%d-%d", len(p.connections), time.Now().Unix()), - created: time.Now(), - } - - p.connections = append(p.connections, conn) - p.logger.WithFields(logrus.Fields{ - "session_id": conn.sessionID, - "pool_size": len(p.connections), - }).Info("๐Ÿš€ Created new pooled RDMA connection") - - return conn, nil - } - - // Pool is full, wait for an available connection - return nil, fmt.Errorf("connection pool exhausted (max: %d)", p.maxConnections) -} - -// releaseConnection returns a connection to the pool -func (p *ConnectionPool) releaseConnection(conn *PooledConnection) { - p.mutex.Lock() - defer p.mutex.Unlock() - - conn.inUse = false - conn.lastUsed = time.Now() - - p.logger.WithField("session_id", conn.sessionID).Debug("๐Ÿ”„ Released RDMA connection back to pool") -} - -// cleanup removes idle connections from the pool -func (p *ConnectionPool) cleanup() { - p.mutex.Lock() - defer p.mutex.Unlock() - - now := time.Now() - activeConnections := make([]*PooledConnection, 0, len(p.connections)) - - for _, conn := range p.connections { - if conn.inUse || now.Sub(conn.lastUsed) < p.maxIdleTime { - activeConnections = append(activeConnections, conn) - } else { - // Close idle connection - conn.ipcClient.Disconnect() - p.logger.WithFields(logrus.Fields{ - "session_id": conn.sessionID, - "idle_time": now.Sub(conn.lastUsed), - }).Debug("๐Ÿงน Cleaned up idle RDMA connection") - } - } - - p.connections = activeConnections -} - -// Close closes all connections in the pool -func (p *ConnectionPool) Close() { - p.mutex.Lock() - defer p.mutex.Unlock() - - for _, conn := range p.connections { - conn.ipcClient.Disconnect() - } - p.connections = nil - p.logger.Info("๐Ÿ”Œ Connection pool closed") -} - -// NewClient creates a new RDMA client -func NewClient(config *Config) *Client { - if config.Logger == nil { - config.Logger = logrus.New() - config.Logger.SetLevel(logrus.InfoLevel) - } - - if config.DefaultTimeout == 0 { - config.DefaultTimeout = 30 * time.Second - } - - client := &Client{ - logger: config.Logger, - enginePath: config.EngineSocketPath, - defaultTimeout: config.DefaultTimeout, - } - - // Initialize connection pooling if enabled (default: true) - enablePooling := config.EnablePooling - if config.MaxConnections == 0 && config.MaxIdleTime == 0 { - // Default to enabled if not explicitly configured - enablePooling = true - } - - if enablePooling { - client.pool = NewConnectionPool( - config.EngineSocketPath, - config.MaxConnections, - config.MaxIdleTime, - config.Logger, - ) - - // Start cleanup goroutine - go client.startCleanupRoutine() - - config.Logger.WithFields(logrus.Fields{ - "max_connections": client.pool.maxConnections, - "max_idle_time": client.pool.maxIdleTime, - }).Info("๐Ÿ”Œ RDMA connection pooling enabled") - } else { - // Legacy single connection mode - client.ipcClient = ipc.NewClient(config.EngineSocketPath, config.Logger) - config.Logger.Info("๐Ÿ”Œ RDMA single connection mode (pooling disabled)") - } - - return client -} - -// startCleanupRoutine starts a background goroutine to clean up idle connections -func (c *Client) startCleanupRoutine() { - ticker := time.NewTicker(1 * time.Minute) // Cleanup every minute - go func() { - defer ticker.Stop() - for range ticker.C { - if c.pool != nil { - c.pool.cleanup() - } - } - }() -} - -// Connect establishes connection to the Rust RDMA engine and queries capabilities -func (c *Client) Connect(ctx context.Context) error { - c.logger.Info("๐Ÿš€ Connecting to RDMA engine") - - if c.pool != nil { - // Connection pooling mode - connections are created on-demand - c.connected = true - c.logger.Info("โœ… RDMA client ready (connection pooling enabled)") - return nil - } - - // Single connection mode - if err := c.ipcClient.Connect(ctx); err != nil { - return fmt.Errorf("failed to connect to IPC: %w", err) - } - - // Test connectivity with ping - clientID := "rdma-client" - pong, err := c.ipcClient.Ping(ctx, &clientID) - if err != nil { - c.ipcClient.Disconnect() - return fmt.Errorf("failed to ping RDMA engine: %w", err) - } - - latency := time.Duration(pong.ServerRttNs) - c.logger.WithFields(logrus.Fields{ - "latency": latency, - "server_rtt": time.Duration(pong.ServerRttNs), - }).Info("๐Ÿ“ก RDMA engine ping successful") - - // Get capabilities - caps, err := c.ipcClient.GetCapabilities(ctx, &clientID) - if err != nil { - c.ipcClient.Disconnect() - return fmt.Errorf("failed to get engine capabilities: %w", err) - } - - c.capabilities = caps - c.connected = true - - c.logger.WithFields(logrus.Fields{ - "version": caps.Version, - "device_name": caps.DeviceName, - "vendor_id": caps.VendorId, - "max_sessions": caps.MaxSessions, - "max_transfer_size": caps.MaxTransferSize, - "active_sessions": caps.ActiveSessions, - "real_rdma": caps.RealRdma, - "port_gid": caps.PortGid, - "port_lid": caps.PortLid, - }).Info("โœ… RDMA engine connected and ready") - - return nil -} - -// Disconnect closes the connection to the RDMA engine -func (c *Client) Disconnect() { - if c.connected { - if c.pool != nil { - // Connection pooling mode - c.pool.Close() - c.logger.Info("๐Ÿ”Œ Disconnected from RDMA engine (pool closed)") - } else { - // Single connection mode - c.ipcClient.Disconnect() - c.logger.Info("๐Ÿ”Œ Disconnected from RDMA engine") - } - c.connected = false - } -} - -// IsConnected returns true if connected to the RDMA engine -func (c *Client) IsConnected() bool { - if c.pool != nil { - // Connection pooling mode - always connected if pool exists - return c.connected - } else { - // Single connection mode - return c.connected && c.ipcClient.IsConnected() - } -} - -// GetCapabilities returns the RDMA engine capabilities -func (c *Client) GetCapabilities() *ipc.GetCapabilitiesResponse { - return c.capabilities -} - -// Read performs an RDMA read operation for a SeaweedFS needle -func (c *Client) Read(ctx context.Context, req *ReadRequest) (*ReadResponse, error) { - if !c.IsConnected() { - return nil, fmt.Errorf("not connected to RDMA engine") - } - - startTime := time.Now() - - c.logger.WithFields(logrus.Fields{ - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - "offset": req.Offset, - "size": req.Size, - }).Debug("๐Ÿ“– Starting RDMA read operation") - - if c.pool != nil { - // Connection pooling mode - return c.readWithPool(ctx, req, startTime) - } - - // Single connection mode - // Create IPC request - ipcReq := &ipc.StartReadRequest{ - VolumeID: req.VolumeID, - NeedleID: req.NeedleID, - Cookie: req.Cookie, - Offset: req.Offset, - Size: req.Size, - RemoteAddr: 0, // Will be set by engine (mock for now) - RemoteKey: 0, // Will be set by engine (mock for now) - TimeoutSecs: uint64(c.defaultTimeout.Seconds()), - AuthToken: req.AuthToken, - } - - // Start RDMA read - startResp, err := c.ipcClient.StartRead(ctx, ipcReq) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to start RDMA read") - return nil, fmt.Errorf("failed to start RDMA read: %w", err) - } - - // In the new protocol, if we got a StartReadResponse, the operation was successful - - c.logger.WithFields(logrus.Fields{ - "session_id": startResp.SessionID, - "local_addr": fmt.Sprintf("0x%x", startResp.LocalAddr), - "local_key": startResp.LocalKey, - "transfer_size": startResp.TransferSize, - "expected_crc": fmt.Sprintf("0x%x", startResp.ExpectedCrc), - "expires_at": time.Unix(0, int64(startResp.ExpiresAtNs)).Format(time.RFC3339), - }).Debug("๐Ÿ“– RDMA read session started") - - // Complete the RDMA read - completeResp, err := c.ipcClient.CompleteRead(ctx, startResp.SessionID, true, startResp.TransferSize, &startResp.ExpectedCrc) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to complete RDMA read") - return nil, fmt.Errorf("failed to complete RDMA read: %w", err) - } - - duration := time.Since(startTime) - - if !completeResp.Success { - errorMsg := "unknown error" - if completeResp.Message != nil { - errorMsg = *completeResp.Message - } - c.logger.WithFields(logrus.Fields{ - "session_id": startResp.SessionID, - "error_message": errorMsg, - }).Error("โŒ RDMA read completion failed") - return nil, fmt.Errorf("RDMA read completion failed: %s", errorMsg) - } - - // Calculate transfer rate (bytes/second) - transferRate := float64(startResp.TransferSize) / duration.Seconds() - - c.logger.WithFields(logrus.Fields{ - "session_id": startResp.SessionID, - "bytes_read": startResp.TransferSize, - "duration": duration, - "transfer_rate": transferRate, - "server_crc": completeResp.ServerCrc, - }).Info("โœ… RDMA read completed successfully") - - // MOCK DATA IMPLEMENTATION - FOR DEVELOPMENT/TESTING ONLY - // - // This section generates placeholder data for the mock RDMA implementation. - // In a production RDMA implementation, this should be replaced with: - // - // 1. The actual data transferred via RDMA from the remote memory region - // 2. Data validation using checksums/CRC from the RDMA completion - // 3. Proper error handling for RDMA transfer failures - // 4. Memory region cleanup and deregistration - // - // TODO for real RDMA implementation: - // - Replace mockData with actual RDMA buffer contents - // - Validate data integrity using server CRC: completeResp.ServerCrc - // - Handle partial transfers and retry logic - // - Implement proper memory management for RDMA regions - // - // Current mock behavior: Generates a simple pattern (0,1,2...255,0,1,2...) - // This allows testing of the integration pipeline without real hardware - mockData := make([]byte, startResp.TransferSize) - for i := range mockData { - mockData[i] = byte(i % 256) // Simple repeating pattern for verification - } - // END MOCK DATA IMPLEMENTATION - - return &ReadResponse{ - Data: mockData, - BytesRead: startResp.TransferSize, - Duration: duration, - TransferRate: transferRate, - SessionID: startResp.SessionID, - Success: true, - Message: "RDMA read completed successfully", - }, nil -} - -// ReadRange performs an RDMA read for a specific range within a needle -func (c *Client) ReadRange(ctx context.Context, volumeID uint32, needleID uint64, cookie uint32, offset, size uint64) (*ReadResponse, error) { - req := &ReadRequest{ - VolumeID: volumeID, - NeedleID: needleID, - Cookie: cookie, - Offset: offset, - Size: size, - } - return c.Read(ctx, req) -} - -// ReadFileRange performs an RDMA read using SeaweedFS file ID format -func (c *Client) ReadFileRange(ctx context.Context, fileID string, offset, size uint64) (*ReadResponse, error) { - // Parse file ID (e.g., "3,01637037d6" -> volume=3, needle=0x01637037d6, cookie extracted) - volumeID, needleID, cookie, err := parseFileID(fileID) - if err != nil { - return nil, fmt.Errorf("invalid file ID %s: %w", fileID, err) - } - - req := &ReadRequest{ - VolumeID: volumeID, - NeedleID: needleID, - Cookie: cookie, - Offset: offset, - Size: size, - } - return c.Read(ctx, req) -} - -// parseFileID extracts volume ID, needle ID, and cookie from a SeaweedFS file ID -// Uses existing SeaweedFS parsing logic to ensure compatibility -func parseFileID(fileId string) (volumeID uint32, needleID uint64, cookie uint32, err error) { - // Use existing SeaweedFS file ID parsing - fid, err := needle.ParseFileIdFromString(fileId) - if err != nil { - return 0, 0, 0, fmt.Errorf("failed to parse file ID %s: %w", fileId, err) - } - - volumeID = uint32(fid.VolumeId) - needleID = uint64(fid.Key) - cookie = uint32(fid.Cookie) - - return volumeID, needleID, cookie, nil -} - -// ReadFull performs an RDMA read for an entire needle -func (c *Client) ReadFull(ctx context.Context, volumeID uint32, needleID uint64, cookie uint32) (*ReadResponse, error) { - req := &ReadRequest{ - VolumeID: volumeID, - NeedleID: needleID, - Cookie: cookie, - Offset: 0, - Size: 0, // 0 means read entire needle - } - return c.Read(ctx, req) -} - -// Ping tests connectivity to the RDMA engine -func (c *Client) Ping(ctx context.Context) (time.Duration, error) { - if !c.IsConnected() { - return 0, fmt.Errorf("not connected to RDMA engine") - } - - clientID := "health-check" - start := time.Now() - pong, err := c.ipcClient.Ping(ctx, &clientID) - if err != nil { - return 0, err - } - - totalLatency := time.Since(start) - serverRtt := time.Duration(pong.ServerRttNs) - - c.logger.WithFields(logrus.Fields{ - "total_latency": totalLatency, - "server_rtt": serverRtt, - "client_id": clientID, - }).Debug("๐Ÿ“ RDMA engine ping successful") - - return totalLatency, nil -} - -// readWithPool performs RDMA read using connection pooling -func (c *Client) readWithPool(ctx context.Context, req *ReadRequest, startTime time.Time) (*ReadResponse, error) { - // Get connection from pool - conn, err := c.pool.getConnection(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get pooled connection: %w", err) - } - defer c.pool.releaseConnection(conn) - - c.logger.WithField("session_id", conn.sessionID).Debug("๐Ÿ”Œ Using pooled RDMA connection") - - // Create IPC request - ipcReq := &ipc.StartReadRequest{ - VolumeID: req.VolumeID, - NeedleID: req.NeedleID, - Cookie: req.Cookie, - Offset: req.Offset, - Size: req.Size, - RemoteAddr: 0, // Will be set by engine (mock for now) - RemoteKey: 0, // Will be set by engine (mock for now) - TimeoutSecs: uint64(c.defaultTimeout.Seconds()), - AuthToken: req.AuthToken, - } - - // Start RDMA read - startResp, err := conn.ipcClient.StartRead(ctx, ipcReq) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to start RDMA read (pooled)") - return nil, fmt.Errorf("failed to start RDMA read: %w", err) - } - - c.logger.WithFields(logrus.Fields{ - "session_id": startResp.SessionID, - "local_addr": fmt.Sprintf("0x%x", startResp.LocalAddr), - "local_key": startResp.LocalKey, - "transfer_size": startResp.TransferSize, - "expected_crc": fmt.Sprintf("0x%x", startResp.ExpectedCrc), - "expires_at": time.Unix(0, int64(startResp.ExpiresAtNs)).Format(time.RFC3339), - "pooled": true, - }).Debug("๐Ÿ“– RDMA read session started (pooled)") - - // Complete the RDMA read - completeResp, err := conn.ipcClient.CompleteRead(ctx, startResp.SessionID, true, startResp.TransferSize, &startResp.ExpectedCrc) - if err != nil { - c.logger.WithError(err).Error("โŒ Failed to complete RDMA read (pooled)") - return nil, fmt.Errorf("failed to complete RDMA read: %w", err) - } - - duration := time.Since(startTime) - - if !completeResp.Success { - errorMsg := "unknown error" - if completeResp.Message != nil { - errorMsg = *completeResp.Message - } - c.logger.WithFields(logrus.Fields{ - "session_id": conn.sessionID, - "error_message": errorMsg, - "pooled": true, - }).Error("โŒ RDMA read completion failed (pooled)") - return nil, fmt.Errorf("RDMA read completion failed: %s", errorMsg) - } - - // Calculate transfer rate (bytes/second) - transferRate := float64(startResp.TransferSize) / duration.Seconds() - - c.logger.WithFields(logrus.Fields{ - "session_id": conn.sessionID, - "bytes_read": startResp.TransferSize, - "duration": duration, - "transfer_rate": transferRate, - "server_crc": completeResp.ServerCrc, - "pooled": true, - }).Info("โœ… RDMA read completed successfully (pooled)") - - // For the mock implementation, we'll return placeholder data - // In the real implementation, this would be the actual RDMA transferred data - mockData := make([]byte, startResp.TransferSize) - for i := range mockData { - mockData[i] = byte(i % 256) // Simple pattern for testing - } - - return &ReadResponse{ - Data: mockData, - BytesRead: startResp.TransferSize, - Duration: duration, - TransferRate: transferRate, - SessionID: conn.sessionID, - Success: true, - Message: "RDMA read successful (pooled)", - }, nil -} diff --git a/seaweedfs-rdma-sidecar/pkg/seaweedfs/client.go b/seaweedfs-rdma-sidecar/pkg/seaweedfs/client.go deleted file mode 100644 index 5073c349a..000000000 --- a/seaweedfs-rdma-sidecar/pkg/seaweedfs/client.go +++ /dev/null @@ -1,401 +0,0 @@ -// Package seaweedfs provides SeaweedFS-specific RDMA integration -package seaweedfs - -import ( - "context" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "time" - - "seaweedfs-rdma-sidecar/pkg/rdma" - - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/sirupsen/logrus" -) - -// SeaweedFSRDMAClient provides SeaweedFS-specific RDMA operations -type SeaweedFSRDMAClient struct { - rdmaClient *rdma.Client - logger *logrus.Logger - volumeServerURL string - enabled bool - - // Zero-copy optimization - tempDir string - useZeroCopy bool -} - -// Config holds configuration for the SeaweedFS RDMA client -type Config struct { - RDMASocketPath string - VolumeServerURL string - Enabled bool - DefaultTimeout time.Duration - Logger *logrus.Logger - - // Zero-copy optimization - TempDir string // Directory for temp files (default: /tmp/rdma-cache) - UseZeroCopy bool // Enable zero-copy via temp files - - // Connection pooling options - EnablePooling bool // Enable RDMA connection pooling (default: true) - MaxConnections int // Max connections in pool (default: 10) - MaxIdleTime time.Duration // Max idle time before connection cleanup (default: 5min) -} - -// NeedleReadRequest represents a SeaweedFS needle read request -type NeedleReadRequest struct { - VolumeID uint32 - NeedleID uint64 - Cookie uint32 - Offset uint64 - Size uint64 - VolumeServer string // Override volume server URL for this request -} - -// NeedleReadResponse represents the result of a needle read -type NeedleReadResponse struct { - Data []byte - IsRDMA bool - Latency time.Duration - Source string // "rdma" or "http" - SessionID string - - // Zero-copy optimization fields - TempFilePath string // Path to temp file with data (for zero-copy) - UseTempFile bool // Whether to use temp file instead of Data -} - -// NewSeaweedFSRDMAClient creates a new SeaweedFS RDMA client -func NewSeaweedFSRDMAClient(config *Config) (*SeaweedFSRDMAClient, error) { - if config.Logger == nil { - config.Logger = logrus.New() - config.Logger.SetLevel(logrus.InfoLevel) - } - - var rdmaClient *rdma.Client - if config.Enabled && config.RDMASocketPath != "" { - rdmaConfig := &rdma.Config{ - EngineSocketPath: config.RDMASocketPath, - DefaultTimeout: config.DefaultTimeout, - Logger: config.Logger, - EnablePooling: config.EnablePooling, - MaxConnections: config.MaxConnections, - MaxIdleTime: config.MaxIdleTime, - } - rdmaClient = rdma.NewClient(rdmaConfig) - } - - // Setup temp directory for zero-copy optimization - tempDir := config.TempDir - if tempDir == "" { - tempDir = "/tmp/rdma-cache" - } - - if config.UseZeroCopy { - if err := os.MkdirAll(tempDir, 0755); err != nil { - config.Logger.WithError(err).Warn("Failed to create temp directory, disabling zero-copy") - config.UseZeroCopy = false - } - } - - return &SeaweedFSRDMAClient{ - rdmaClient: rdmaClient, - logger: config.Logger, - volumeServerURL: config.VolumeServerURL, - enabled: config.Enabled, - tempDir: tempDir, - useZeroCopy: config.UseZeroCopy, - }, nil -} - -// Start initializes the RDMA client connection -func (c *SeaweedFSRDMAClient) Start(ctx context.Context) error { - if !c.enabled || c.rdmaClient == nil { - c.logger.Info("๐Ÿ”„ RDMA disabled, using HTTP fallback only") - return nil - } - - c.logger.Info("๐Ÿš€ Starting SeaweedFS RDMA client...") - - if err := c.rdmaClient.Connect(ctx); err != nil { - c.logger.WithError(err).Error("โŒ Failed to connect to RDMA engine") - return fmt.Errorf("failed to connect to RDMA engine: %w", err) - } - - c.logger.Info("โœ… SeaweedFS RDMA client started successfully") - return nil -} - -// Stop shuts down the RDMA client -func (c *SeaweedFSRDMAClient) Stop() { - if c.rdmaClient != nil { - c.rdmaClient.Disconnect() - c.logger.Info("๐Ÿ”Œ SeaweedFS RDMA client stopped") - } -} - -// IsEnabled returns true if RDMA is enabled and available -func (c *SeaweedFSRDMAClient) IsEnabled() bool { - return c.enabled && c.rdmaClient != nil && c.rdmaClient.IsConnected() -} - -// ReadNeedle reads a needle using RDMA fast path or HTTP fallback -func (c *SeaweedFSRDMAClient) ReadNeedle(ctx context.Context, req *NeedleReadRequest) (*NeedleReadResponse, error) { - start := time.Now() - var rdmaErr error - - // Try RDMA fast path first - if c.IsEnabled() { - c.logger.WithFields(logrus.Fields{ - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - "offset": req.Offset, - "size": req.Size, - }).Debug("๐Ÿš€ Attempting RDMA fast path") - - rdmaReq := &rdma.ReadRequest{ - VolumeID: req.VolumeID, - NeedleID: req.NeedleID, - Cookie: req.Cookie, - Offset: req.Offset, - Size: req.Size, - } - - resp, err := c.rdmaClient.Read(ctx, rdmaReq) - if err != nil { - c.logger.WithError(err).Warn("โš ๏ธ RDMA read failed, falling back to HTTP") - rdmaErr = err - } else { - c.logger.WithFields(logrus.Fields{ - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - "bytes_read": resp.BytesRead, - "transfer_rate": resp.TransferRate, - "latency": time.Since(start), - }).Info("๐Ÿš€ RDMA fast path successful") - - // Try zero-copy optimization if enabled and data is large enough - if c.useZeroCopy && len(resp.Data) > 64*1024 { // 64KB threshold - tempFilePath, err := c.writeToTempFile(req, resp.Data) - if err != nil { - c.logger.WithError(err).Warn("Failed to write temp file, using regular response") - // Fall back to regular response - } else { - c.logger.WithFields(logrus.Fields{ - "temp_file": tempFilePath, - "size": len(resp.Data), - }).Info("๐Ÿ”ฅ Zero-copy temp file created") - - return &NeedleReadResponse{ - Data: nil, // Don't duplicate data in memory - IsRDMA: true, - Latency: time.Since(start), - Source: "rdma-zerocopy", - SessionID: resp.SessionID, - TempFilePath: tempFilePath, - UseTempFile: true, - }, nil - } - } - - return &NeedleReadResponse{ - Data: resp.Data, - IsRDMA: true, - Latency: time.Since(start), - Source: "rdma", - SessionID: resp.SessionID, - }, nil - } - } - - // Fallback to HTTP - c.logger.WithFields(logrus.Fields{ - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - "reason": "rdma_unavailable", - }).Debug("๐ŸŒ Using HTTP fallback") - - data, err := c.httpFallback(ctx, req) - if err != nil { - if rdmaErr != nil { - return nil, fmt.Errorf("both RDMA and HTTP fallback failed: RDMA=%v, HTTP=%v", rdmaErr, err) - } - return nil, fmt.Errorf("HTTP fallback failed: %w", err) - } - - return &NeedleReadResponse{ - Data: data, - IsRDMA: false, - Latency: time.Since(start), - Source: "http", - }, nil -} - -// ReadNeedleRange reads a specific range from a needle -func (c *SeaweedFSRDMAClient) ReadNeedleRange(ctx context.Context, volumeID uint32, needleID uint64, cookie uint32, offset, size uint64) (*NeedleReadResponse, error) { - req := &NeedleReadRequest{ - VolumeID: volumeID, - NeedleID: needleID, - Cookie: cookie, - Offset: offset, - Size: size, - } - return c.ReadNeedle(ctx, req) -} - -// httpFallback performs HTTP fallback read from SeaweedFS volume server -func (c *SeaweedFSRDMAClient) httpFallback(ctx context.Context, req *NeedleReadRequest) ([]byte, error) { - // Use volume server from request, fallback to configured URL - volumeServerURL := req.VolumeServer - if volumeServerURL == "" { - volumeServerURL = c.volumeServerURL - } - - if volumeServerURL == "" { - return nil, fmt.Errorf("no volume server URL provided in request or configured") - } - - // Build URL using existing SeaweedFS file ID construction - volumeId := needle.VolumeId(req.VolumeID) - needleId := types.NeedleId(req.NeedleID) - cookie := types.Cookie(req.Cookie) - - fileId := &needle.FileId{ - VolumeId: volumeId, - Key: needleId, - Cookie: cookie, - } - - url := fmt.Sprintf("%s/%s", volumeServerURL, fileId.String()) - - if req.Offset > 0 || req.Size > 0 { - url += fmt.Sprintf("?offset=%d&size=%d", req.Offset, req.Size) - } - - c.logger.WithField("url", url).Debug("๐Ÿ“ฅ HTTP fallback request") - - httpReq, err := http.NewRequestWithContext(ctx, "GET", url, nil) - if err != nil { - return nil, fmt.Errorf("failed to create HTTP request: %w", err) - } - - client := &http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(httpReq) - if err != nil { - return nil, fmt.Errorf("HTTP request failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("HTTP request failed with status: %d", resp.StatusCode) - } - - // Read response data - io.ReadAll handles context cancellation and timeouts correctly - data, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read HTTP response body: %w", err) - } - - c.logger.WithFields(logrus.Fields{ - "volume_id": req.VolumeID, - "needle_id": req.NeedleID, - "data_size": len(data), - }).Debug("๐Ÿ“ฅ HTTP fallback successful") - - return data, nil -} - -// HealthCheck verifies that the RDMA client is healthy -func (c *SeaweedFSRDMAClient) HealthCheck(ctx context.Context) error { - if !c.enabled { - return fmt.Errorf("RDMA is disabled") - } - - if c.rdmaClient == nil { - return fmt.Errorf("RDMA client not initialized") - } - - if !c.rdmaClient.IsConnected() { - return fmt.Errorf("RDMA client not connected") - } - - // Try a ping to the RDMA engine - _, err := c.rdmaClient.Ping(ctx) - return err -} - -// GetStats returns statistics about the RDMA client -func (c *SeaweedFSRDMAClient) GetStats() map[string]interface{} { - stats := map[string]interface{}{ - "enabled": c.enabled, - "volume_server_url": c.volumeServerURL, - "rdma_socket_path": "", - } - - if c.rdmaClient != nil { - stats["connected"] = c.rdmaClient.IsConnected() - // Note: Capabilities method may not be available, skip for now - } else { - stats["connected"] = false - stats["error"] = "RDMA client not initialized" - } - - return stats -} - -// writeToTempFile writes RDMA data to a temp file for zero-copy optimization -func (c *SeaweedFSRDMAClient) writeToTempFile(req *NeedleReadRequest, data []byte) (string, error) { - // Create temp file with unique name based on needle info - fileName := fmt.Sprintf("vol%d_needle%x_cookie%d_offset%d_size%d.tmp", - req.VolumeID, req.NeedleID, req.Cookie, req.Offset, req.Size) - tempFilePath := filepath.Join(c.tempDir, fileName) - - // Write data to temp file (this populates the page cache) - err := os.WriteFile(tempFilePath, data, 0644) - if err != nil { - return "", fmt.Errorf("failed to write temp file: %w", err) - } - - c.logger.WithFields(logrus.Fields{ - "temp_file": tempFilePath, - "size": len(data), - }).Debug("๐Ÿ“ Temp file written to page cache") - - return tempFilePath, nil -} - -// CleanupTempFile removes a temp file (called by mount client after use) -func (c *SeaweedFSRDMAClient) CleanupTempFile(tempFilePath string) error { - if tempFilePath == "" { - return nil - } - - // Validate that tempFilePath is within c.tempDir - absTempDir, err := filepath.Abs(c.tempDir) - if err != nil { - return fmt.Errorf("failed to resolve temp dir: %w", err) - } - absFilePath, err := filepath.Abs(tempFilePath) - if err != nil { - return fmt.Errorf("failed to resolve temp file path: %w", err) - } - // Ensure absFilePath is within absTempDir - if !strings.HasPrefix(absFilePath, absTempDir+string(os.PathSeparator)) && absFilePath != absTempDir { - c.logger.WithField("temp_file", tempFilePath).Warn("Attempted cleanup of file outside temp dir") - return fmt.Errorf("invalid temp file path") - } - - err = os.Remove(absFilePath) - if err != nil && !os.IsNotExist(err) { - c.logger.WithError(err).WithField("temp_file", absFilePath).Warn("Failed to cleanup temp file") - return err - } - - c.logger.WithField("temp_file", absFilePath).Debug("๐Ÿงน Temp file cleaned up") - return nil -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/Cargo.lock b/seaweedfs-rdma-sidecar/rdma-engine/Cargo.lock deleted file mode 100644 index eadb69977..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/Cargo.lock +++ /dev/null @@ -1,1934 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.16", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "anes" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" - -[[package]] -name = "anstream" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" -dependencies = [ - "windows-sys 0.60.2", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys 0.60.2", -] - -[[package]] -name = "anyhow" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" - -[[package]] -name = "async-trait" -version = "0.1.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "autocfg" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" - -[[package]] -name = "backtrace" -version = "0.3.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bit-set" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" -dependencies = [ - "bit-vec", -] - -[[package]] -name = "bit-vec" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29" - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bumpalo" -version = "3.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "cast" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" - -[[package]] -name = "cc" -version = "1.2.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f" -dependencies = [ - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" - -[[package]] -name = "chrono" -version = "0.4.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "serde", - "wasm-bindgen", - "windows-link", -] - -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - -[[package]] -name = "clap" -version = "4.5.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "clap_lex" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "config" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca" -dependencies = [ - "async-trait", - "json5", - "lazy_static", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml", - "yaml-rust", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "cpufeatures" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "criterion" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" -dependencies = [ - "anes", - "cast", - "ciborium", - "clap", - "criterion-plot", - "is-terminal", - "itertools", - "num-traits", - "once_cell", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crunchy" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "dlv-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "errno" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" -dependencies = [ - "libc", - "windows-sys 0.60.2", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", -] - -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - -[[package]] -name = "half" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" -dependencies = [ - "cfg-if", - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" - -[[package]] -name = "iana-time-zone" -version = "0.1.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "log", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "io-uring" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" -dependencies = [ - "bitflags 2.9.2", - "cfg-if", - "libc", -] - -[[package]] -name = "is-terminal" -version = "0.4.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "js-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "libc" -version = "0.2.175" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" - -[[package]] -name = "libloading" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" -dependencies = [ - "cfg-if", - "windows-targets 0.53.3", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" - -[[package]] -name = "lock_api" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" - -[[package]] -name = "matchers" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" - -[[package]] -name = "memmap2" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" -dependencies = [ - "libc", -] - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - -[[package]] -name = "mio" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" -dependencies = [ - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", -] - -[[package]] -name = "nix" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" -dependencies = [ - "bitflags 2.9.2", - "cfg-if", - "libc", -] - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nu-ansi-term" -version = "0.50.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" - -[[package]] -name = "oorandom" -version = "11.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" - -[[package]] -name = "ordered-multimap" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" -dependencies = [ - "dlv-list", - "hashbrown", -] - -[[package]] -name = "parking_lot" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets 0.52.6", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "pathdiff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" - -[[package]] -name = "pest" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" -dependencies = [ - "memchr", - "thiserror 2.0.15", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pest_meta" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" -dependencies = [ - "pest", - "sha2", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "plotters" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" - -[[package]] -name = "plotters-svg" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro2" -version = "1.0.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "802989b9fe1b674bc996ac7bed7b3012090a9b4cbfa0fe157ee3ea97e93e4ccd" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "proptest" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" -dependencies = [ - "bit-set", - "bit-vec", - "bitflags 2.9.2", - "lazy_static", - "num-traits", - "rand", - "rand_chacha", - "rand_xorshift", - "regex-syntax", - "rusty-fork", - "tempfile", - "unarray", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" - -[[package]] -name = "rand" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" -dependencies = [ - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -dependencies = [ - "getrandom 0.3.3", -] - -[[package]] -name = "rand_xorshift" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" -dependencies = [ - "rand_core", -] - -[[package]] -name = "rayon" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "rdma-engine" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "bincode", - "bytes", - "chrono", - "clap", - "config", - "criterion", - "libc", - "libloading", - "memmap2", - "nix", - "parking_lot", - "proptest", - "rmp-serde", - "serde", - "tempfile", - "thiserror 1.0.69", - "tokio", - "tokio-util", - "tracing", - "tracing-subscriber", - "uuid", -] - -[[package]] -name = "redox_syscall" -version = "0.5.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" -dependencies = [ - "bitflags 2.9.2", -] - -[[package]] -name = "regex" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "rmp" -version = "0.8.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" -dependencies = [ - "byteorder", - "num-traits", - "paste", -] - -[[package]] -name = "rmp-serde" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" -dependencies = [ - "byteorder", - "rmp", - "serde", -] - -[[package]] -name = "ron" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" -dependencies = [ - "base64", - "bitflags 1.3.2", - "serde", -] - -[[package]] -name = "rust-ini" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - -[[package]] -name = "rustix" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" -dependencies = [ - "bitflags 2.9.2", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.60.2", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "rusty-fork" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" -dependencies = [ - "fnv", - "quick-error", - "tempfile", - "wait-timeout", -] - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "serde" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.142" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook-registry" -version = "1.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" -dependencies = [ - "libc", -] - -[[package]] -name = "slab" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "socket2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" -dependencies = [ - "fastrand", - "getrandom 0.3.3", - "once_cell", - "rustix", - "windows-sys 0.59.0", -] - -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] - -[[package]] -name = "thiserror" -version = "2.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d76d3f064b981389ecb4b6b7f45a0bf9fdac1d5b9204c7bd6714fecc302850" -dependencies = [ - "thiserror-impl 2.0.15", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d29feb33e986b6ea906bd9c3559a856983f92371b3eaa5e83782a351623de0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "tokio" -version = "1.47.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" -dependencies = [ - "backtrace", - "bytes", - "io-uring", - "libc", - "mio", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "slab", - "socket2", - "tokio-macros", - "windows-sys 0.59.0", -] - -[[package]] -name = "tokio-macros" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-util" -version = "0.7.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" -dependencies = [ - "matchers", - "nu-ansi-term", - "once_cell", - "regex-automata", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - -[[package]] -name = "typenum" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - -[[package]] -name = "unarray" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "uuid" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" -dependencies = [ - "getrandom 0.3.3", - "js-sys", - "serde", - "wasm-bindgen", -] - -[[package]] -name = "valuable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "wait-timeout" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" -dependencies = [ - "libc", -] - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" -dependencies = [ - "wit-bindgen-rt", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-implement" -version = "0.60.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-interface" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.3", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - -[[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.2", -] - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "zerocopy" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] diff --git a/seaweedfs-rdma-sidecar/rdma-engine/Cargo.toml b/seaweedfs-rdma-sidecar/rdma-engine/Cargo.toml deleted file mode 100644 index b04934f71..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/Cargo.toml +++ /dev/null @@ -1,74 +0,0 @@ -[package] -name = "rdma-engine" -version = "0.1.0" -edition = "2021" -authors = ["SeaweedFS Team "] -description = "High-performance RDMA engine for SeaweedFS sidecar" -license = "Apache-2.0" - -[[bin]] -name = "rdma-engine-server" -path = "src/main.rs" - -[lib] -name = "rdma_engine" -path = "src/lib.rs" - -[dependencies] -# UCX (Unified Communication X) for high-performance networking -# Much better than direct libibverbs - provides unified API across transports -libc = "0.2" -libloading = "0.8" # Dynamic loading of UCX libraries - -# Async runtime and networking -tokio = { version = "1.0", features = ["full"] } -tokio-util = "0.7" - -# Serialization for IPC -serde = { version = "1.0", features = ["derive"] } -bincode = "1.3" -rmp-serde = "1.1" # MessagePack for efficient IPC - -# Error handling and logging -anyhow = "1.0" -thiserror = "1.0" -tracing = "0.1" -tracing-subscriber = { version = "0.3", features = ["env-filter"] } - -# UUID and time handling -uuid = { version = "1.0", features = ["v4", "serde"] } -chrono = { version = "0.4", features = ["serde"] } - -# Memory management and utilities -memmap2 = "0.9" -bytes = "1.0" -parking_lot = "0.12" # Fast mutexes - -# IPC and networking -nix = { version = "0.27", features = ["mman"] } # Unix domain sockets and system calls -async-trait = "0.1" # Async traits - -# Configuration -clap = { version = "4.0", features = ["derive"] } -config = "0.13" - -[dev-dependencies] -proptest = "1.0" -criterion = "0.5" -tempfile = "3.0" - -[features] -default = ["mock-ucx"] -mock-ucx = [] -real-ucx = [] # UCX integration for production RDMA - -[profile.release] -opt-level = 3 -lto = true -codegen-units = 1 -panic = "abort" - - - -[package.metadata.docs.rs] -features = ["real-rdma"] diff --git a/seaweedfs-rdma-sidecar/rdma-engine/README.md b/seaweedfs-rdma-sidecar/rdma-engine/README.md deleted file mode 100644 index 1c7d575ae..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# UCX-based RDMA Engine for SeaweedFS - -High-performance Rust-based communication engine for SeaweedFS using [UCX (Unified Communication X)](https://github.com/openucx/ucx) framework that provides optimized data transfers across multiple transports including RDMA (InfiniBand/RoCE), TCP, and shared memory. - -## ๐Ÿš€ **Complete Rust RDMA Sidecar Scaffolded!** - -I've successfully created a comprehensive Rust RDMA engine with the following components: - -### โœ… **What's Implemented** - -1. **Complete Project Structure**: - - `src/lib.rs` - Main library with engine management - - `src/main.rs` - Binary entry point with CLI - - `src/error.rs` - Comprehensive error types - - `src/rdma.rs` - RDMA operations (mock & real) - - `src/ipc.rs` - IPC communication with Go sidecar - - `src/session.rs` - Session management - - `src/memory.rs` - Memory management and pooling - -2. **Advanced Features**: - - Mock RDMA implementation for development - - Real RDMA stubs ready for `libibverbs` integration - - High-performance memory management with pooling - - HugePage support for large allocations - - Thread-safe session management with expiration - - MessagePack-based IPC protocol - - Comprehensive error handling and recovery - - Performance monitoring and statistics - -3. **Production-Ready Architecture**: - - Async/await throughout for high concurrency - - Zero-copy memory operations where possible - - Proper resource cleanup and garbage collection - - Signal handling for graceful shutdown - - Configurable via CLI flags and config files - - Extensive logging and metrics - -### ๐Ÿ› ๏ธ **Current Status** - -The scaffolding is **functionally complete** but has some compilation errors that need to be resolved: - -1. **Async Trait Object Issues** - Rust doesn't support async methods in trait objects -2. **Stream Ownership** - BufReader/BufWriter ownership needs fixing -3. **Memory Management** - Some lifetime and cloning issues - -### ๐Ÿ”ง **Next Steps to Complete** - -1. **Fix Compilation Errors** (1-2 hours): - - Replace trait objects with enums for RDMA context - - Fix async trait issues with concrete types - - Resolve memory ownership issues - -2. **Integration with Go Sidecar** (2-4 hours): - - Update Go sidecar to communicate with Rust engine - - Implement Unix domain socket protocol - - Add fallback when Rust engine is unavailable - -3. **RDMA Hardware Integration** (1-2 weeks): - - Add `libibverbs` FFI bindings - - Implement real RDMA operations - - Test on actual InfiniBand hardware - -### ๐Ÿ“Š **Architecture Overview** - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” IPC โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Go Control Plane โ”‚โ—„โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–บโ”‚ Rust Data Plane โ”‚ -โ”‚ โ”‚ ~300ns โ”‚ โ”‚ -โ”‚ โ€ข gRPC Server โ”‚ โ”‚ โ€ข RDMA Operations โ”‚ -โ”‚ โ€ข Session Mgmt โ”‚ โ”‚ โ€ข Memory Mgmt โ”‚ -โ”‚ โ€ข HTTP Fallback โ”‚ โ”‚ โ€ข Hardware Access โ”‚ -โ”‚ โ€ข Error Handling โ”‚ โ”‚ โ€ข Zero-Copy I/O โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### ๐ŸŽฏ **Performance Expectations** - -- **Mock RDMA**: ~150ns per operation (current) -- **Real RDMA**: ~50ns per operation (projected) -- **Memory Operations**: Zero-copy with hugepage support -- **Session Throughput**: 1M+ sessions/second -- **IPC Overhead**: ~300ns (Unix domain sockets) - -## ๐Ÿš€ **Ready for Hardware Integration** - -This Rust RDMA engine provides a **solid foundation** for high-performance RDMA acceleration. The architecture is sound, the error handling is comprehensive, and the memory management is optimized for RDMA workloads. - -**Next milestone**: Fix compilation errors and integrate with the existing Go sidecar for end-to-end testing! ๐ŸŽฏ diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/error.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/error.rs deleted file mode 100644 index be60ef4aa..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/error.rs +++ /dev/null @@ -1,269 +0,0 @@ -//! Error types and handling for the RDMA engine - -// use std::fmt; // Unused for now -use thiserror::Error; - -/// Result type alias for RDMA operations -pub type RdmaResult = Result; - -/// Comprehensive error types for RDMA operations -#[derive(Error, Debug)] -pub enum RdmaError { - /// RDMA device not found or unavailable - #[error("RDMA device '{device}' not found or unavailable")] - DeviceNotFound { device: String }, - - /// Failed to initialize RDMA context - #[error("Failed to initialize RDMA context: {reason}")] - ContextInitFailed { reason: String }, - - /// Failed to allocate protection domain - #[error("Failed to allocate protection domain: {reason}")] - PdAllocFailed { reason: String }, - - /// Failed to create completion queue - #[error("Failed to create completion queue: {reason}")] - CqCreationFailed { reason: String }, - - /// Failed to create queue pair - #[error("Failed to create queue pair: {reason}")] - QpCreationFailed { reason: String }, - - /// Memory registration failed - #[error("Memory registration failed: {reason}")] - MemoryRegFailed { reason: String }, - - /// RDMA operation failed - #[error("RDMA operation failed: {operation}, status: {status}")] - OperationFailed { operation: String, status: i32 }, - - /// Session not found - #[error("Session '{session_id}' not found")] - SessionNotFound { session_id: String }, - - /// Session expired - #[error("Session '{session_id}' has expired")] - SessionExpired { session_id: String }, - - /// Too many active sessions - #[error("Maximum number of sessions ({max_sessions}) exceeded")] - TooManySessions { max_sessions: usize }, - - /// IPC communication error - #[error("IPC communication error: {reason}")] - IpcError { reason: String }, - - /// Serialization/deserialization error - #[error("Serialization error: {reason}")] - SerializationError { reason: String }, - - /// Invalid request parameters - #[error("Invalid request: {reason}")] - InvalidRequest { reason: String }, - - /// Insufficient buffer space - #[error("Insufficient buffer space: requested {requested}, available {available}")] - InsufficientBuffer { requested: usize, available: usize }, - - /// Hardware not supported - #[error("Hardware not supported: {reason}")] - UnsupportedHardware { reason: String }, - - /// System resource exhausted - #[error("System resource exhausted: {resource}")] - ResourceExhausted { resource: String }, - - /// Permission denied - #[error("Permission denied: {operation}")] - PermissionDenied { operation: String }, - - /// Network timeout - #[error("Network timeout after {timeout_ms}ms")] - NetworkTimeout { timeout_ms: u64 }, - - /// I/O error - #[error("I/O error: {0}")] - Io(#[from] std::io::Error), - - /// Generic error for unexpected conditions - #[error("Internal error: {reason}")] - Internal { reason: String }, -} - -impl RdmaError { - /// Create a new DeviceNotFound error - pub fn device_not_found(device: impl Into) -> Self { - Self::DeviceNotFound { device: device.into() } - } - - /// Create a new ContextInitFailed error - pub fn context_init_failed(reason: impl Into) -> Self { - Self::ContextInitFailed { reason: reason.into() } - } - - /// Create a new MemoryRegFailed error - pub fn memory_reg_failed(reason: impl Into) -> Self { - Self::MemoryRegFailed { reason: reason.into() } - } - - /// Create a new OperationFailed error - pub fn operation_failed(operation: impl Into, status: i32) -> Self { - Self::OperationFailed { - operation: operation.into(), - status - } - } - - /// Create a new SessionNotFound error - pub fn session_not_found(session_id: impl Into) -> Self { - Self::SessionNotFound { session_id: session_id.into() } - } - - /// Create a new IpcError - pub fn ipc_error(reason: impl Into) -> Self { - Self::IpcError { reason: reason.into() } - } - - /// Create a new InvalidRequest error - pub fn invalid_request(reason: impl Into) -> Self { - Self::InvalidRequest { reason: reason.into() } - } - - /// Create a new Internal error - pub fn internal(reason: impl Into) -> Self { - Self::Internal { reason: reason.into() } - } - - /// Check if this error is recoverable - pub fn is_recoverable(&self) -> bool { - match self { - // Network and temporary errors are recoverable - Self::NetworkTimeout { .. } | - Self::ResourceExhausted { .. } | - Self::TooManySessions { .. } | - Self::InsufficientBuffer { .. } => true, - - // Session errors are recoverable (can retry with new session) - Self::SessionNotFound { .. } | - Self::SessionExpired { .. } => true, - - // Hardware and system errors are generally not recoverable - Self::DeviceNotFound { .. } | - Self::ContextInitFailed { .. } | - Self::UnsupportedHardware { .. } | - Self::PermissionDenied { .. } => false, - - // IPC errors might be recoverable - Self::IpcError { .. } | - Self::SerializationError { .. } => true, - - // Invalid requests are not recoverable without fixing the request - Self::InvalidRequest { .. } => false, - - // RDMA operation failures might be recoverable - Self::OperationFailed { .. } => true, - - // Memory and resource allocation failures depend on the cause - Self::PdAllocFailed { .. } | - Self::CqCreationFailed { .. } | - Self::QpCreationFailed { .. } | - Self::MemoryRegFailed { .. } => false, - - // I/O errors might be recoverable - Self::Io(_) => true, - - // Internal errors are generally not recoverable - Self::Internal { .. } => false, - } - } - - /// Get error category for metrics and logging - pub fn category(&self) -> &'static str { - match self { - Self::DeviceNotFound { .. } | - Self::ContextInitFailed { .. } | - Self::UnsupportedHardware { .. } => "hardware", - - Self::PdAllocFailed { .. } | - Self::CqCreationFailed { .. } | - Self::QpCreationFailed { .. } | - Self::MemoryRegFailed { .. } => "resource", - - Self::OperationFailed { .. } => "rdma", - - Self::SessionNotFound { .. } | - Self::SessionExpired { .. } | - Self::TooManySessions { .. } => "session", - - Self::IpcError { .. } | - Self::SerializationError { .. } => "ipc", - - Self::InvalidRequest { .. } => "request", - - Self::InsufficientBuffer { .. } | - Self::ResourceExhausted { .. } => "capacity", - - Self::PermissionDenied { .. } => "security", - - Self::NetworkTimeout { .. } => "network", - - Self::Io(_) => "io", - - Self::Internal { .. } => "internal", - } - } -} - -/// Convert from various RDMA library error codes -impl From for RdmaError { - fn from(errno: i32) -> Self { - match errno { - libc::ENODEV => Self::DeviceNotFound { - device: "unknown".to_string() - }, - libc::ENOMEM => Self::ResourceExhausted { - resource: "memory".to_string() - }, - libc::EPERM | libc::EACCES => Self::PermissionDenied { - operation: "RDMA operation".to_string() - }, - libc::ETIMEDOUT => Self::NetworkTimeout { - timeout_ms: 5000 - }, - libc::ENOSPC => Self::InsufficientBuffer { - requested: 0, - available: 0 - }, - _ => Self::Internal { - reason: format!("System error: {}", errno) - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_error_creation() { - let err = RdmaError::device_not_found("mlx5_0"); - assert!(matches!(err, RdmaError::DeviceNotFound { .. })); - assert_eq!(err.category(), "hardware"); - assert!(!err.is_recoverable()); - } - - #[test] - fn test_error_recoverability() { - assert!(RdmaError::NetworkTimeout { timeout_ms: 1000 }.is_recoverable()); - assert!(!RdmaError::DeviceNotFound { device: "test".to_string() }.is_recoverable()); - assert!(RdmaError::SessionExpired { session_id: "test".to_string() }.is_recoverable()); - } - - #[test] - fn test_error_display() { - let err = RdmaError::InvalidRequest { reason: "missing field".to_string() }; - assert!(err.to_string().contains("Invalid request")); - assert!(err.to_string().contains("missing field")); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/ipc.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/ipc.rs deleted file mode 100644 index a578c2d7d..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/ipc.rs +++ /dev/null @@ -1,542 +0,0 @@ -//! IPC (Inter-Process Communication) module for communicating with Go sidecar -//! -//! This module handles high-performance IPC between the Rust RDMA engine and -//! the Go control plane sidecar using Unix domain sockets and MessagePack serialization. - -use crate::{RdmaError, RdmaResult, rdma::RdmaContext, session::SessionManager}; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use std::sync::atomic::{AtomicU64, Ordering}; -use tokio::net::{UnixListener, UnixStream}; -use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter}; -use tracing::{info, debug, error}; -use uuid::Uuid; -use std::path::Path; - -/// Atomic counter for generating unique work request IDs -/// This ensures no hash collisions that could cause incorrect completion handling -static NEXT_WR_ID: AtomicU64 = AtomicU64::new(1); - -/// IPC message types between Go sidecar and Rust RDMA engine -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type", content = "data")] -pub enum IpcMessage { - /// Request to start an RDMA read operation - StartRead(StartReadRequest), - /// Response with RDMA session information - StartReadResponse(StartReadResponse), - - /// Request to complete an RDMA operation - CompleteRead(CompleteReadRequest), - /// Response confirming completion - CompleteReadResponse(CompleteReadResponse), - - /// Request for engine capabilities - GetCapabilities(GetCapabilitiesRequest), - /// Response with engine capabilities - GetCapabilitiesResponse(GetCapabilitiesResponse), - - /// Health check ping - Ping(PingRequest), - /// Ping response - Pong(PongResponse), - - /// Error response - Error(ErrorResponse), -} - -/// Request to start RDMA read operation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StartReadRequest { - /// Volume ID in SeaweedFS - pub volume_id: u32, - /// Needle ID in SeaweedFS - pub needle_id: u64, - /// Needle cookie for validation - pub cookie: u32, - /// File offset within the needle data - pub offset: u64, - /// Size to read (0 = entire needle) - pub size: u64, - /// Remote memory address from Go sidecar - pub remote_addr: u64, - /// Remote key for RDMA access - pub remote_key: u32, - /// Session timeout in seconds - pub timeout_secs: u64, - /// Authentication token (optional) - pub auth_token: Option, -} - -/// Response with RDMA session details -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct StartReadResponse { - /// Unique session identifier - pub session_id: String, - /// Local buffer address for RDMA - pub local_addr: u64, - /// Local key for RDMA operations - pub local_key: u32, - /// Actual size that will be transferred - pub transfer_size: u64, - /// Expected CRC checksum - pub expected_crc: u32, - /// Session expiration timestamp (Unix nanoseconds) - pub expires_at_ns: u64, -} - -/// Request to complete RDMA operation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CompleteReadRequest { - /// Session ID to complete - pub session_id: String, - /// Whether the operation was successful - pub success: bool, - /// Actual bytes transferred - pub bytes_transferred: u64, - /// Client-computed CRC (for verification) - pub client_crc: Option, - /// Error message if failed - pub error_message: Option, -} - -/// Response confirming completion -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CompleteReadResponse { - /// Whether completion was successful - pub success: bool, - /// Server-computed CRC for verification - pub server_crc: Option, - /// Any cleanup messages - pub message: Option, -} - -/// Request for engine capabilities -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetCapabilitiesRequest { - /// Client identifier - pub client_id: Option, -} - -/// Response with engine capabilities -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GetCapabilitiesResponse { - /// RDMA device name - pub device_name: String, - /// RDMA device vendor ID - pub vendor_id: u32, - /// Maximum transfer size in bytes - pub max_transfer_size: u64, - /// Maximum concurrent sessions - pub max_sessions: usize, - /// Current active sessions - pub active_sessions: usize, - /// Device port GID - pub port_gid: String, - /// Device port LID - pub port_lid: u16, - /// Supported authentication methods - pub supported_auth: Vec, - /// Engine version - pub version: String, - /// Whether real RDMA hardware is available - pub real_rdma: bool, -} - -/// Health check ping request -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PingRequest { - /// Client timestamp (Unix nanoseconds) - pub timestamp_ns: u64, - /// Client identifier - pub client_id: Option, -} - -/// Ping response -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PongResponse { - /// Original client timestamp - pub client_timestamp_ns: u64, - /// Server timestamp (Unix nanoseconds) - pub server_timestamp_ns: u64, - /// Round-trip time in nanoseconds (server perspective) - pub server_rtt_ns: u64, -} - -/// Error response -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ErrorResponse { - /// Error code - pub code: String, - /// Human-readable error message - pub message: String, - /// Error category - pub category: String, - /// Whether the error is recoverable - pub recoverable: bool, -} - -impl From<&RdmaError> for ErrorResponse { - fn from(error: &RdmaError) -> Self { - Self { - code: format!("{:?}", error), - message: error.to_string(), - category: error.category().to_string(), - recoverable: error.is_recoverable(), - } - } -} - -/// IPC server handling communication with Go sidecar -pub struct IpcServer { - socket_path: String, - listener: Option, - rdma_context: Arc, - session_manager: Arc, - shutdown_flag: Arc>, -} - -impl IpcServer { - /// Create new IPC server - pub async fn new( - socket_path: &str, - rdma_context: Arc, - session_manager: Arc, - ) -> RdmaResult { - // Remove existing socket if it exists - if Path::new(socket_path).exists() { - std::fs::remove_file(socket_path) - .map_err(|e| RdmaError::ipc_error(format!("Failed to remove existing socket: {}", e)))?; - } - - Ok(Self { - socket_path: socket_path.to_string(), - listener: None, - rdma_context, - session_manager, - shutdown_flag: Arc::new(parking_lot::RwLock::new(false)), - }) - } - - /// Start the IPC server - pub async fn run(&mut self) -> RdmaResult<()> { - let listener = UnixListener::bind(&self.socket_path) - .map_err(|e| RdmaError::ipc_error(format!("Failed to bind Unix socket: {}", e)))?; - - info!("๐ŸŽฏ IPC server listening on: {}", self.socket_path); - self.listener = Some(listener); - - if let Some(ref listener) = self.listener { - loop { - // Check shutdown flag - if *self.shutdown_flag.read() { - info!("IPC server shutting down"); - break; - } - - // Accept connection with timeout - let accept_result = tokio::time::timeout( - tokio::time::Duration::from_millis(100), - listener.accept() - ).await; - - match accept_result { - Ok(Ok((stream, addr))) => { - debug!("New IPC connection from: {:?}", addr); - - // Spawn handler for this connection - let rdma_context = self.rdma_context.clone(); - let session_manager = self.session_manager.clone(); - let shutdown_flag = self.shutdown_flag.clone(); - - tokio::spawn(async move { - if let Err(e) = Self::handle_connection(stream, rdma_context, session_manager, shutdown_flag).await { - error!("IPC connection error: {}", e); - } - }); - } - Ok(Err(e)) => { - error!("Failed to accept IPC connection: {}", e); - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - } - Err(_) => { - // Timeout - continue loop to check shutdown flag - continue; - } - } - } - } - - Ok(()) - } - - /// Handle a single IPC connection - async fn handle_connection( - stream: UnixStream, - rdma_context: Arc, - session_manager: Arc, - shutdown_flag: Arc>, - ) -> RdmaResult<()> { - let (reader_half, writer_half) = stream.into_split(); - let mut reader = BufReader::new(reader_half); - let mut writer = BufWriter::new(writer_half); - - let mut buffer = Vec::with_capacity(4096); - - loop { - // Check shutdown - if *shutdown_flag.read() { - break; - } - - // Read message length (4 bytes) - let mut len_bytes = [0u8; 4]; - match tokio::time::timeout( - tokio::time::Duration::from_millis(100), - reader.read_exact(&mut len_bytes) - ).await { - Ok(Ok(_)) => {}, - Ok(Err(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => { - debug!("IPC connection closed by peer"); - break; - } - Ok(Err(e)) => return Err(RdmaError::ipc_error(format!("Read error: {}", e))), - Err(_) => continue, // Timeout, check shutdown flag - } - - let msg_len = u32::from_le_bytes(len_bytes) as usize; - if msg_len > 1024 * 1024 { // 1MB max message size - return Err(RdmaError::ipc_error("Message too large")); - } - - // Read message data - buffer.clear(); - buffer.resize(msg_len, 0); - reader.read_exact(&mut buffer).await - .map_err(|e| RdmaError::ipc_error(format!("Failed to read message: {}", e)))?; - - // Deserialize message - let request: IpcMessage = rmp_serde::from_slice(&buffer) - .map_err(|e| RdmaError::SerializationError { reason: e.to_string() })?; - - debug!("Received IPC message: {:?}", request); - - // Process message - let response = Self::process_message( - request, - &rdma_context, - &session_manager, - ).await; - - // Serialize response - let response_data = rmp_serde::to_vec(&response) - .map_err(|e| RdmaError::SerializationError { reason: e.to_string() })?; - - // Send response - let response_len = (response_data.len() as u32).to_le_bytes(); - writer.write_all(&response_len).await - .map_err(|e| RdmaError::ipc_error(format!("Failed to write response length: {}", e)))?; - writer.write_all(&response_data).await - .map_err(|e| RdmaError::ipc_error(format!("Failed to write response: {}", e)))?; - writer.flush().await - .map_err(|e| RdmaError::ipc_error(format!("Failed to flush response: {}", e)))?; - - debug!("Sent IPC response"); - } - - Ok(()) - } - - /// Process IPC message and generate response - async fn process_message( - message: IpcMessage, - rdma_context: &Arc, - session_manager: &Arc, - ) -> IpcMessage { - match message { - IpcMessage::Ping(req) => { - let server_timestamp = chrono::Utc::now().timestamp_nanos_opt().unwrap_or(0) as u64; - IpcMessage::Pong(PongResponse { - client_timestamp_ns: req.timestamp_ns, - server_timestamp_ns: server_timestamp, - server_rtt_ns: server_timestamp.saturating_sub(req.timestamp_ns), - }) - } - - IpcMessage::GetCapabilities(_req) => { - let device_info = rdma_context.device_info(); - let active_sessions = session_manager.active_session_count().await; - - IpcMessage::GetCapabilitiesResponse(GetCapabilitiesResponse { - device_name: device_info.name.clone(), - vendor_id: device_info.vendor_id, - max_transfer_size: device_info.max_mr_size, - max_sessions: session_manager.max_sessions(), - active_sessions, - port_gid: device_info.port_gid.clone(), - port_lid: device_info.port_lid, - supported_auth: vec!["none".to_string()], - version: env!("CARGO_PKG_VERSION").to_string(), - real_rdma: cfg!(feature = "real-ucx"), - }) - } - - IpcMessage::StartRead(req) => { - match Self::handle_start_read(req, rdma_context, session_manager).await { - Ok(response) => IpcMessage::StartReadResponse(response), - Err(error) => IpcMessage::Error(ErrorResponse::from(&error)), - } - } - - IpcMessage::CompleteRead(req) => { - match Self::handle_complete_read(req, session_manager).await { - Ok(response) => IpcMessage::CompleteReadResponse(response), - Err(error) => IpcMessage::Error(ErrorResponse::from(&error)), - } - } - - _ => IpcMessage::Error(ErrorResponse { - code: "UNSUPPORTED_MESSAGE".to_string(), - message: "Unsupported message type".to_string(), - category: "request".to_string(), - recoverable: true, - }), - } - } - - /// Handle StartRead request - async fn handle_start_read( - req: StartReadRequest, - rdma_context: &Arc, - session_manager: &Arc, - ) -> RdmaResult { - info!("๐Ÿš€ Starting RDMA read: volume={}, needle={}, size={}", - req.volume_id, req.needle_id, req.size); - - // Create session - let session_id = Uuid::new_v4().to_string(); - let transfer_size = if req.size == 0 { 65536 } else { req.size }; // Default 64KB - - // Allocate local buffer - let buffer = vec![0u8; transfer_size as usize]; - let local_addr = buffer.as_ptr() as u64; - - // Register memory for RDMA - let memory_region = rdma_context.register_memory(local_addr, transfer_size as usize).await?; - - // Create and store session - session_manager.create_session( - session_id.clone(), - req.volume_id, - req.needle_id, - req.remote_addr, - req.remote_key, - transfer_size, - buffer, - memory_region.clone(), - chrono::Duration::seconds(req.timeout_secs as i64), - ).await?; - - // Perform RDMA read with unique work request ID - // Use atomic counter to avoid hash collisions that could cause incorrect completion handling - let wr_id = NEXT_WR_ID.fetch_add(1, Ordering::Relaxed); - rdma_context.post_read( - local_addr, - req.remote_addr, - req.remote_key, - transfer_size as usize, - wr_id, - ).await?; - - // Poll for completion - let completions = rdma_context.poll_completion(1).await?; - if completions.is_empty() { - return Err(RdmaError::operation_failed("RDMA read", -1)); - } - - let completion = &completions[0]; - if completion.status != crate::rdma::CompletionStatus::Success { - return Err(RdmaError::operation_failed("RDMA read", completion.status as i32)); - } - - info!("โœ… RDMA read completed: {} bytes", completion.byte_len); - - let expires_at = chrono::Utc::now() + chrono::Duration::seconds(req.timeout_secs as i64); - - Ok(StartReadResponse { - session_id, - local_addr, - local_key: memory_region.lkey, - transfer_size, - expected_crc: 0x12345678, // Mock CRC - expires_at_ns: expires_at.timestamp_nanos_opt().unwrap_or(0) as u64, - }) - } - - /// Handle CompleteRead request - async fn handle_complete_read( - req: CompleteReadRequest, - session_manager: &Arc, - ) -> RdmaResult { - info!("๐Ÿ Completing RDMA read session: {}", req.session_id); - - // Clean up session - session_manager.remove_session(&req.session_id).await?; - - Ok(CompleteReadResponse { - success: req.success, - server_crc: Some(0x12345678), // Mock CRC - message: Some("Session completed successfully".to_string()), - }) - } - - /// Shutdown the IPC server - pub async fn shutdown(&mut self) -> RdmaResult<()> { - info!("Shutting down IPC server"); - *self.shutdown_flag.write() = true; - - // Remove socket file - if Path::new(&self.socket_path).exists() { - std::fs::remove_file(&self.socket_path) - .map_err(|e| RdmaError::ipc_error(format!("Failed to remove socket file: {}", e)))?; - } - - Ok(()) - } -} - - - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_error_response_conversion() { - let error = RdmaError::device_not_found("mlx5_0"); - let response = ErrorResponse::from(&error); - - assert!(response.message.contains("mlx5_0")); - assert_eq!(response.category, "hardware"); - assert!(!response.recoverable); - } - - #[test] - fn test_message_serialization() { - let request = IpcMessage::Ping(PingRequest { - timestamp_ns: 12345, - client_id: Some("test".to_string()), - }); - - let serialized = rmp_serde::to_vec(&request).unwrap(); - let deserialized: IpcMessage = rmp_serde::from_slice(&serialized).unwrap(); - - match deserialized { - IpcMessage::Ping(ping) => { - assert_eq!(ping.timestamp_ns, 12345); - assert_eq!(ping.client_id, Some("test".to_string())); - } - _ => panic!("Wrong message type"), - } - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/lib.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/lib.rs deleted file mode 100644 index c92dcf91a..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/lib.rs +++ /dev/null @@ -1,153 +0,0 @@ -//! High-Performance RDMA Engine for SeaweedFS -//! -//! This crate provides a high-performance RDMA (Remote Direct Memory Access) engine -//! designed to accelerate data transfer operations in SeaweedFS. It communicates with -//! the Go-based sidecar via IPC and handles the performance-critical RDMA operations. -//! -//! # Architecture -//! -//! ```text -//! โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” IPC โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -//! โ”‚ Go Control Plane โ”‚โ—„โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–บโ”‚ Rust Data Plane โ”‚ -//! โ”‚ โ”‚ ~300ns โ”‚ โ”‚ -//! โ”‚ โ€ข gRPC Server โ”‚ โ”‚ โ€ข RDMA Operations โ”‚ -//! โ”‚ โ€ข Session Mgmt โ”‚ โ”‚ โ€ข Memory Mgmt โ”‚ -//! โ”‚ โ€ข HTTP Fallback โ”‚ โ”‚ โ€ข Hardware Access โ”‚ -//! โ”‚ โ€ข Error Handling โ”‚ โ”‚ โ€ข Zero-Copy I/O โ”‚ -//! โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -//! ``` -//! -//! # Features -//! -//! - `mock-rdma` (default): Mock RDMA operations for testing and development -//! - `real-rdma`: Real RDMA hardware integration using rdma-core bindings - -use std::sync::Arc; -use anyhow::Result; - -pub mod ucx; -pub mod rdma; -pub mod ipc; -pub mod session; -pub mod memory; -pub mod error; - -pub use error::{RdmaError, RdmaResult}; - -/// Configuration for the RDMA engine -#[derive(Debug, Clone)] -pub struct RdmaEngineConfig { - /// RDMA device name (e.g., "mlx5_0") - pub device_name: String, - /// RDMA port number - pub port: u16, - /// Maximum number of concurrent sessions - pub max_sessions: usize, - /// Session timeout in seconds - pub session_timeout_secs: u64, - /// Memory buffer size in bytes - pub buffer_size: usize, - /// IPC socket path - pub ipc_socket_path: String, - /// Enable debug logging - pub debug: bool, -} - -impl Default for RdmaEngineConfig { - fn default() -> Self { - Self { - device_name: "mlx5_0".to_string(), - port: 18515, - max_sessions: 1000, - session_timeout_secs: 300, // 5 minutes - buffer_size: 1024 * 1024 * 1024, // 1GB - ipc_socket_path: "/tmp/rdma-engine.sock".to_string(), - debug: false, - } - } -} - -/// Main RDMA engine instance -pub struct RdmaEngine { - config: RdmaEngineConfig, - rdma_context: Arc, - session_manager: Arc, - ipc_server: Option, -} - -impl RdmaEngine { - /// Create a new RDMA engine with the given configuration - pub async fn new(config: RdmaEngineConfig) -> Result { - tracing::info!("Initializing RDMA engine with config: {:?}", config); - - // Initialize RDMA context - let rdma_context = Arc::new(rdma::RdmaContext::new(&config).await?); - - // Initialize session manager - let session_manager = Arc::new(session::SessionManager::new( - config.max_sessions, - std::time::Duration::from_secs(config.session_timeout_secs), - )); - - Ok(Self { - config, - rdma_context, - session_manager, - ipc_server: None, - }) - } - - /// Start the RDMA engine server - pub async fn run(&mut self) -> Result<()> { - tracing::info!("Starting RDMA engine server on {}", self.config.ipc_socket_path); - - // Start IPC server - let ipc_server = ipc::IpcServer::new( - &self.config.ipc_socket_path, - self.rdma_context.clone(), - self.session_manager.clone(), - ).await?; - - self.ipc_server = Some(ipc_server); - - // Start session cleanup task - let session_manager = self.session_manager.clone(); - tokio::spawn(async move { - session_manager.start_cleanup_task().await; - }); - - // Run IPC server - if let Some(ref mut server) = self.ipc_server { - server.run().await?; - } - - Ok(()) - } - - /// Shutdown the RDMA engine - pub async fn shutdown(&mut self) -> Result<()> { - tracing::info!("Shutting down RDMA engine"); - - if let Some(ref mut server) = self.ipc_server { - server.shutdown().await?; - } - - self.session_manager.shutdown().await; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_rdma_engine_creation() { - let config = RdmaEngineConfig::default(); - let result = RdmaEngine::new(config).await; - - // Should succeed with mock RDMA - assert!(result.is_ok()); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/main.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/main.rs deleted file mode 100644 index 996d3a9d5..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/main.rs +++ /dev/null @@ -1,175 +0,0 @@ -//! RDMA Engine Server -//! -//! High-performance RDMA engine server that communicates with the Go sidecar -//! via IPC and handles RDMA operations with zero-copy semantics. -//! -//! Usage: -//! ```bash -//! rdma-engine-server --device mlx5_0 --port 18515 --ipc-socket /tmp/rdma-engine.sock -//! ``` - -use clap::Parser; -use rdma_engine::{RdmaEngine, RdmaEngineConfig}; -use std::path::PathBuf; -use tracing::{info, error}; -use tracing_subscriber::{EnvFilter, fmt::layer, prelude::*}; - -#[derive(Parser)] -#[command( - name = "rdma-engine-server", - about = "High-performance RDMA engine for SeaweedFS", - version = env!("CARGO_PKG_VERSION") -)] -struct Args { - /// UCX device name preference (e.g., mlx5_0, or 'auto' for UCX auto-selection) - #[arg(short, long, default_value = "auto")] - device: String, - - /// RDMA port number - #[arg(short, long, default_value_t = 18515)] - port: u16, - - /// Maximum number of concurrent sessions - #[arg(long, default_value_t = 1000)] - max_sessions: usize, - - /// Session timeout in seconds - #[arg(long, default_value_t = 300)] - session_timeout: u64, - - /// Memory buffer size in bytes - #[arg(long, default_value_t = 1024 * 1024 * 1024)] - buffer_size: usize, - - /// IPC socket path - #[arg(long, default_value = "/tmp/rdma-engine.sock")] - ipc_socket: PathBuf, - - /// Enable debug logging - #[arg(long)] - debug: bool, - - /// Configuration file path - #[arg(short, long)] - config: Option, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let args = Args::parse(); - - // Initialize tracing - let filter = if args.debug { - EnvFilter::try_from_default_env() - .or_else(|_| EnvFilter::try_new("debug")) - .unwrap() - } else { - EnvFilter::try_from_default_env() - .or_else(|_| EnvFilter::try_new("info")) - .unwrap() - }; - - tracing_subscriber::registry() - .with(layer().with_target(false)) - .with(filter) - .init(); - - info!("๐Ÿš€ Starting SeaweedFS UCX RDMA Engine Server"); - info!(" Version: {}", env!("CARGO_PKG_VERSION")); - info!(" UCX Device Preference: {}", args.device); - info!(" Port: {}", args.port); - info!(" Max Sessions: {}", args.max_sessions); - info!(" Buffer Size: {} bytes", args.buffer_size); - info!(" IPC Socket: {}", args.ipc_socket.display()); - info!(" Debug Mode: {}", args.debug); - - // Load configuration - let config = RdmaEngineConfig { - device_name: args.device, - port: args.port, - max_sessions: args.max_sessions, - session_timeout_secs: args.session_timeout, - buffer_size: args.buffer_size, - ipc_socket_path: args.ipc_socket.to_string_lossy().to_string(), - debug: args.debug, - }; - - // Override with config file if provided - if let Some(config_path) = args.config { - info!("Loading configuration from: {}", config_path.display()); - // TODO: Implement configuration file loading - } - - // Create and run RDMA engine - let mut engine = match RdmaEngine::new(config).await { - Ok(engine) => { - info!("โœ… RDMA engine initialized successfully"); - engine - } - Err(e) => { - error!("โŒ Failed to initialize RDMA engine: {}", e); - return Err(e); - } - }; - - // Set up signal handlers for graceful shutdown - let mut sigterm = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; - let mut sigint = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::interrupt())?; - - // Run engine in background - let engine_handle = tokio::spawn(async move { - if let Err(e) = engine.run().await { - error!("RDMA engine error: {}", e); - return Err(e); - } - Ok(()) - }); - - info!("๐ŸŽฏ RDMA engine is running and ready to accept connections"); - info!(" Send SIGTERM or SIGINT to shutdown gracefully"); - - // Wait for shutdown signal - tokio::select! { - _ = sigterm.recv() => { - info!("๐Ÿ“ก Received SIGTERM, shutting down gracefully"); - } - _ = sigint.recv() => { - info!("๐Ÿ“ก Received SIGINT (Ctrl+C), shutting down gracefully"); - } - result = engine_handle => { - match result { - Ok(Ok(())) => info!("๐Ÿ RDMA engine completed successfully"), - Ok(Err(e)) => { - error!("โŒ RDMA engine failed: {}", e); - return Err(e); - } - Err(e) => { - error!("โŒ RDMA engine task panicked: {}", e); - return Err(anyhow::anyhow!("Engine task panicked: {}", e)); - } - } - } - } - - info!("๐Ÿ›‘ RDMA engine server shut down complete"); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_args_parsing() { - let args = Args::try_parse_from(&[ - "rdma-engine-server", - "--device", "mlx5_0", - "--port", "18515", - "--debug" - ]).unwrap(); - - assert_eq!(args.device, "mlx5_0"); - assert_eq!(args.port, 18515); - assert!(args.debug); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/memory.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/memory.rs deleted file mode 100644 index 17a9a5b1d..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/memory.rs +++ /dev/null @@ -1,630 +0,0 @@ -//! Memory management for RDMA operations -//! -//! This module provides efficient memory allocation, registration, and management -//! for RDMA operations with zero-copy semantics and proper cleanup. - -use crate::{RdmaError, RdmaResult}; -use memmap2::MmapMut; -use parking_lot::RwLock; -use std::collections::HashMap; -use std::sync::Arc; -use tracing::{debug, info, warn}; - -/// Memory pool for efficient buffer allocation -pub struct MemoryPool { - /// Pre-allocated memory regions by size - pools: RwLock>>, - /// Total allocated memory in bytes - total_allocated: RwLock, - /// Maximum pool size per buffer size - max_pool_size: usize, - /// Maximum total memory usage - max_total_memory: usize, - /// Statistics - stats: RwLock, -} - -/// Statistics for memory pool -#[derive(Debug, Clone, Default)] -pub struct MemoryPoolStats { - /// Total allocations requested - pub total_allocations: u64, - /// Total deallocations - pub total_deallocations: u64, - /// Cache hits (reused buffers) - pub cache_hits: u64, - /// Cache misses (new allocations) - pub cache_misses: u64, - /// Current active allocations - pub active_allocations: usize, - /// Peak memory usage in bytes - pub peak_memory_usage: usize, -} - -/// A pooled memory buffer -pub struct PooledBuffer { - /// Raw buffer data - data: Vec, - /// Size of the buffer - size: usize, - /// Whether the buffer is currently in use - in_use: bool, - /// Creation timestamp - created_at: std::time::Instant, -} - -impl PooledBuffer { - /// Create new pooled buffer - fn new(size: usize) -> Self { - Self { - data: vec![0u8; size], - size, - in_use: false, - created_at: std::time::Instant::now(), - } - } - - /// Get buffer data as slice - pub fn as_slice(&self) -> &[u8] { - &self.data - } - - /// Get buffer data as mutable slice - pub fn as_mut_slice(&mut self) -> &mut [u8] { - &mut self.data - } - - /// Get buffer size - pub fn size(&self) -> usize { - self.size - } - - /// Get buffer age - pub fn age(&self) -> std::time::Duration { - self.created_at.elapsed() - } - - /// Get raw pointer to buffer data - pub fn as_ptr(&self) -> *const u8 { - self.data.as_ptr() - } - - /// Get mutable raw pointer to buffer data - pub fn as_mut_ptr(&mut self) -> *mut u8 { - self.data.as_mut_ptr() - } -} - -impl MemoryPool { - /// Create new memory pool - pub fn new(max_pool_size: usize, max_total_memory: usize) -> Self { - info!("๐Ÿง  Memory pool initialized: max_pool_size={}, max_total_memory={} bytes", - max_pool_size, max_total_memory); - - Self { - pools: RwLock::new(HashMap::new()), - total_allocated: RwLock::new(0), - max_pool_size, - max_total_memory, - stats: RwLock::new(MemoryPoolStats::default()), - } - } - - /// Allocate buffer from pool - pub fn allocate(&self, size: usize) -> RdmaResult>> { - // Round up to next power of 2 for better pooling - let pool_size = size.next_power_of_two(); - - { - let mut stats = self.stats.write(); - stats.total_allocations += 1; - } - - // Try to get buffer from pool first - { - let mut pools = self.pools.write(); - if let Some(pool) = pools.get_mut(&pool_size) { - // Find available buffer in pool - for buffer in pool.iter_mut() { - if !buffer.in_use { - buffer.in_use = true; - - let mut stats = self.stats.write(); - stats.cache_hits += 1; - stats.active_allocations += 1; - - debug!("๐Ÿ“ฆ Reused buffer from pool: size={}", pool_size); - return Ok(Arc::new(RwLock::new(std::mem::replace( - buffer, - PooledBuffer::new(0) // Placeholder - )))); - } - } - } - } - - // No available buffer in pool, create new one - let total_allocated = *self.total_allocated.read(); - if total_allocated + pool_size > self.max_total_memory { - return Err(RdmaError::ResourceExhausted { - resource: "memory".to_string() - }); - } - - let mut buffer = PooledBuffer::new(pool_size); - buffer.in_use = true; - - // Update allocation tracking - let new_total = { - let mut total = self.total_allocated.write(); - *total += pool_size; - *total - }; - - { - let mut stats = self.stats.write(); - stats.cache_misses += 1; - stats.active_allocations += 1; - if new_total > stats.peak_memory_usage { - stats.peak_memory_usage = new_total; - } - } - - debug!("๐Ÿ†• Allocated new buffer: size={}, total_allocated={}", - pool_size, new_total); - - Ok(Arc::new(RwLock::new(buffer))) - } - - /// Return buffer to pool - pub fn deallocate(&self, buffer: Arc>) -> RdmaResult<()> { - let buffer_size = { - let buf = buffer.read(); - buf.size() - }; - - { - let mut stats = self.stats.write(); - stats.total_deallocations += 1; - stats.active_allocations = stats.active_allocations.saturating_sub(1); - } - - // Try to return buffer to pool - { - let mut pools = self.pools.write(); - let pool = pools.entry(buffer_size).or_insert_with(Vec::new); - - if pool.len() < self.max_pool_size { - // Reset buffer state and return to pool - if let Ok(buf) = Arc::try_unwrap(buffer) { - let mut buf = buf.into_inner(); - buf.in_use = false; - buf.data.fill(0); // Clear data for security - pool.push(buf); - - debug!("โ™ป๏ธ Returned buffer to pool: size={}", buffer_size); - return Ok(()); - } - } - } - - // Pool is full or buffer is still referenced, just track deallocation - { - let mut total = self.total_allocated.write(); - *total = total.saturating_sub(buffer_size); - } - - debug!("๐Ÿ—‘๏ธ Buffer deallocated (not pooled): size={}", buffer_size); - Ok(()) - } - - /// Get memory pool statistics - pub fn stats(&self) -> MemoryPoolStats { - self.stats.read().clone() - } - - /// Get current memory usage - pub fn current_usage(&self) -> usize { - *self.total_allocated.read() - } - - /// Clean up old unused buffers from pools - pub fn cleanup_old_buffers(&self, max_age: std::time::Duration) { - let mut cleaned_count = 0; - let mut cleaned_bytes = 0; - - { - let mut pools = self.pools.write(); - for (size, pool) in pools.iter_mut() { - pool.retain(|buffer| { - if buffer.age() > max_age && !buffer.in_use { - cleaned_count += 1; - cleaned_bytes += size; - false - } else { - true - } - }); - } - } - - if cleaned_count > 0 { - { - let mut total = self.total_allocated.write(); - *total = total.saturating_sub(cleaned_bytes); - } - - info!("๐Ÿงน Cleaned up {} old buffers, freed {} bytes", - cleaned_count, cleaned_bytes); - } - } -} - -/// RDMA-specific memory manager -pub struct RdmaMemoryManager { - /// General purpose memory pool - pool: MemoryPool, - /// Memory-mapped regions for large allocations - mmapped_regions: RwLock>, - /// HugePage allocations (if available) - hugepage_regions: RwLock>, - /// Configuration - config: MemoryConfig, -} - -/// Memory configuration -#[derive(Debug, Clone)] -pub struct MemoryConfig { - /// Use hugepages for large allocations - pub use_hugepages: bool, - /// Hugepage size in bytes - pub hugepage_size: usize, - /// Memory pool settings - pub pool_max_size: usize, - /// Maximum total memory usage - pub max_total_memory: usize, - /// Buffer cleanup interval - pub cleanup_interval_secs: u64, -} - -impl Default for MemoryConfig { - fn default() -> Self { - Self { - use_hugepages: true, - hugepage_size: 2 * 1024 * 1024, // 2MB - pool_max_size: 1000, - max_total_memory: 8 * 1024 * 1024 * 1024, // 8GB - cleanup_interval_secs: 300, // 5 minutes - } - } -} - -/// Memory-mapped region -#[allow(dead_code)] -struct MmapRegion { - mmap: MmapMut, - size: usize, - created_at: std::time::Instant, -} - -/// HugePage memory region -#[allow(dead_code)] -struct HugePageRegion { - addr: *mut u8, - size: usize, - created_at: std::time::Instant, -} - -unsafe impl Send for HugePageRegion {} -unsafe impl Sync for HugePageRegion {} - -impl RdmaMemoryManager { - /// Create new RDMA memory manager - pub fn new(config: MemoryConfig) -> Self { - let pool = MemoryPool::new(config.pool_max_size, config.max_total_memory); - - Self { - pool, - mmapped_regions: RwLock::new(HashMap::new()), - hugepage_regions: RwLock::new(HashMap::new()), - config, - } - } - - /// Allocate memory optimized for RDMA operations - pub fn allocate_rdma_buffer(&self, size: usize) -> RdmaResult { - if size >= self.config.hugepage_size && self.config.use_hugepages { - self.allocate_hugepage_buffer(size) - } else if size >= 64 * 1024 { // Use mmap for large buffers - self.allocate_mmap_buffer(size) - } else { - self.allocate_pool_buffer(size) - } - } - - /// Allocate buffer from memory pool - fn allocate_pool_buffer(&self, size: usize) -> RdmaResult { - let buffer = self.pool.allocate(size)?; - Ok(RdmaBuffer::Pool { buffer, size }) - } - - /// Allocate memory-mapped buffer - fn allocate_mmap_buffer(&self, size: usize) -> RdmaResult { - let mmap = MmapMut::map_anon(size) - .map_err(|e| RdmaError::memory_reg_failed(format!("mmap failed: {}", e)))?; - - let addr = mmap.as_ptr() as u64; - let region = MmapRegion { - mmap, - size, - created_at: std::time::Instant::now(), - }; - - { - let mut regions = self.mmapped_regions.write(); - regions.insert(addr, region); - } - - debug!("๐Ÿ—บ๏ธ Allocated mmap buffer: addr=0x{:x}, size={}", addr, size); - Ok(RdmaBuffer::Mmap { addr, size }) - } - - /// Allocate hugepage buffer (Linux-specific) - fn allocate_hugepage_buffer(&self, size: usize) -> RdmaResult { - #[cfg(target_os = "linux")] - { - use nix::sys::mman::{mmap, MapFlags, ProtFlags}; - - // Round up to hugepage boundary - let aligned_size = (size + self.config.hugepage_size - 1) & !(self.config.hugepage_size - 1); - - let addr = unsafe { - // For anonymous mapping, we can use -1 as the file descriptor - use std::os::fd::BorrowedFd; - let fake_fd = BorrowedFd::borrow_raw(-1); // Anonymous mapping uses -1 - - mmap( - None, // ptr::null_mut() -> None - std::num::NonZero::new(aligned_size).unwrap(), // aligned_size -> NonZero - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_HUGETLB, - Some(&fake_fd), // Use borrowed FD for -1 wrapped in Some - 0, - ) - }; - - match addr { - Ok(addr) => { - let addr_u64 = addr as u64; - let region = HugePageRegion { - addr: addr as *mut u8, - size: aligned_size, - created_at: std::time::Instant::now(), - }; - - { - let mut regions = self.hugepage_regions.write(); - regions.insert(addr_u64, region); - } - - info!("๐Ÿ”ฅ Allocated hugepage buffer: addr=0x{:x}, size={}", addr_u64, aligned_size); - Ok(RdmaBuffer::HugePage { addr: addr_u64, size: aligned_size }) - } - Err(_) => { - warn!("Failed to allocate hugepage buffer, falling back to mmap"); - self.allocate_mmap_buffer(size) - } - } - } - - #[cfg(not(target_os = "linux"))] - { - warn!("HugePages not supported on this platform, using mmap"); - self.allocate_mmap_buffer(size) - } - } - - /// Deallocate RDMA buffer - pub fn deallocate_buffer(&self, buffer: RdmaBuffer) -> RdmaResult<()> { - match buffer { - RdmaBuffer::Pool { buffer, .. } => { - self.pool.deallocate(buffer) - } - RdmaBuffer::Mmap { addr, .. } => { - let mut regions = self.mmapped_regions.write(); - regions.remove(&addr); - debug!("๐Ÿ—‘๏ธ Deallocated mmap buffer: addr=0x{:x}", addr); - Ok(()) - } - RdmaBuffer::HugePage { addr, size } => { - { - let mut regions = self.hugepage_regions.write(); - regions.remove(&addr); - } - - #[cfg(target_os = "linux")] - { - use nix::sys::mman::munmap; - unsafe { - let _ = munmap(addr as *mut std::ffi::c_void, size); - } - } - - debug!("๐Ÿ—‘๏ธ Deallocated hugepage buffer: addr=0x{:x}, size={}", addr, size); - Ok(()) - } - } - } - - /// Get memory manager statistics - pub fn stats(&self) -> MemoryManagerStats { - let pool_stats = self.pool.stats(); - let mmap_count = self.mmapped_regions.read().len(); - let hugepage_count = self.hugepage_regions.read().len(); - - MemoryManagerStats { - pool_stats, - mmap_regions: mmap_count, - hugepage_regions: hugepage_count, - total_memory_usage: self.pool.current_usage(), - } - } - - /// Start background cleanup task - pub async fn start_cleanup_task(&self) -> tokio::task::JoinHandle<()> { - let pool = MemoryPool::new(self.config.pool_max_size, self.config.max_total_memory); - let cleanup_interval = std::time::Duration::from_secs(self.config.cleanup_interval_secs); - - tokio::spawn(async move { - let mut interval = tokio::time::interval( - tokio::time::Duration::from_secs(300) // 5 minutes - ); - - loop { - interval.tick().await; - pool.cleanup_old_buffers(cleanup_interval); - } - }) - } -} - -/// RDMA buffer types -pub enum RdmaBuffer { - /// Buffer from memory pool - Pool { - buffer: Arc>, - size: usize, - }, - /// Memory-mapped buffer - Mmap { - addr: u64, - size: usize, - }, - /// HugePage buffer - HugePage { - addr: u64, - size: usize, - }, -} - -impl RdmaBuffer { - /// Get buffer address - pub fn addr(&self) -> u64 { - match self { - Self::Pool { buffer, .. } => { - buffer.read().as_ptr() as u64 - } - Self::Mmap { addr, .. } => *addr, - Self::HugePage { addr, .. } => *addr, - } - } - - /// Get buffer size - pub fn size(&self) -> usize { - match self { - Self::Pool { size, .. } => *size, - Self::Mmap { size, .. } => *size, - Self::HugePage { size, .. } => *size, - } - } - - /// Get buffer as Vec (copy to avoid lifetime issues) - pub fn to_vec(&self) -> Vec { - match self { - Self::Pool { buffer, .. } => { - buffer.read().as_slice().to_vec() - } - Self::Mmap { addr, size } => { - unsafe { - let slice = std::slice::from_raw_parts(*addr as *const u8, *size); - slice.to_vec() - } - } - Self::HugePage { addr, size } => { - unsafe { - let slice = std::slice::from_raw_parts(*addr as *const u8, *size); - slice.to_vec() - } - } - } - } - - /// Get buffer type name - pub fn buffer_type(&self) -> &'static str { - match self { - Self::Pool { .. } => "pool", - Self::Mmap { .. } => "mmap", - Self::HugePage { .. } => "hugepage", - } - } -} - -/// Memory manager statistics -#[derive(Debug, Clone)] -pub struct MemoryManagerStats { - /// Pool statistics - pub pool_stats: MemoryPoolStats, - /// Number of mmap regions - pub mmap_regions: usize, - /// Number of hugepage regions - pub hugepage_regions: usize, - /// Total memory usage in bytes - pub total_memory_usage: usize, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_memory_pool_allocation() { - let pool = MemoryPool::new(10, 1024 * 1024); - - let buffer1 = pool.allocate(4096).unwrap(); - let buffer2 = pool.allocate(4096).unwrap(); - - assert_eq!(buffer1.read().size(), 4096); - assert_eq!(buffer2.read().size(), 4096); - - let stats = pool.stats(); - assert_eq!(stats.total_allocations, 2); - assert_eq!(stats.cache_misses, 2); - } - - #[test] - fn test_memory_pool_reuse() { - let pool = MemoryPool::new(10, 1024 * 1024); - - // Allocate and deallocate - let buffer = pool.allocate(4096).unwrap(); - let size = buffer.read().size(); - pool.deallocate(buffer).unwrap(); - - // Allocate again - should reuse - let buffer2 = pool.allocate(4096).unwrap(); - assert_eq!(buffer2.read().size(), size); - - let stats = pool.stats(); - assert_eq!(stats.cache_hits, 1); - } - - #[tokio::test] - async fn test_rdma_memory_manager() { - let config = MemoryConfig::default(); - let manager = RdmaMemoryManager::new(config); - - // Test small buffer (pool) - let small_buffer = manager.allocate_rdma_buffer(1024).unwrap(); - assert_eq!(small_buffer.size(), 1024); - assert_eq!(small_buffer.buffer_type(), "pool"); - - // Test large buffer (mmap) - let large_buffer = manager.allocate_rdma_buffer(128 * 1024).unwrap(); - assert_eq!(large_buffer.size(), 128 * 1024); - assert_eq!(large_buffer.buffer_type(), "mmap"); - - // Clean up - manager.deallocate_buffer(small_buffer).unwrap(); - manager.deallocate_buffer(large_buffer).unwrap(); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/rdma.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/rdma.rs deleted file mode 100644 index 7549a217e..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/rdma.rs +++ /dev/null @@ -1,467 +0,0 @@ -//! RDMA operations and context management -//! -//! This module provides both mock and real RDMA implementations: -//! - Mock implementation for development and testing -//! - Real implementation using libibverbs for production - -use crate::{RdmaResult, RdmaEngineConfig}; -use tracing::{debug, warn, info}; -use parking_lot::RwLock; - -/// RDMA completion status -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum CompletionStatus { - Success, - LocalLengthError, - LocalQpOperationError, - LocalEecOperationError, - LocalProtectionError, - WrFlushError, - MemoryWindowBindError, - BadResponseError, - LocalAccessError, - RemoteInvalidRequestError, - RemoteAccessError, - RemoteOperationError, - TransportRetryCounterExceeded, - RnrRetryCounterExceeded, - LocalRddViolationError, - RemoteInvalidRdRequest, - RemoteAbortedError, - InvalidEecnError, - InvalidEecStateError, - FatalError, - ResponseTimeoutError, - GeneralError, -} - -impl From for CompletionStatus { - fn from(status: u32) -> Self { - match status { - 0 => Self::Success, - 1 => Self::LocalLengthError, - 2 => Self::LocalQpOperationError, - 3 => Self::LocalEecOperationError, - 4 => Self::LocalProtectionError, - 5 => Self::WrFlushError, - 6 => Self::MemoryWindowBindError, - 7 => Self::BadResponseError, - 8 => Self::LocalAccessError, - 9 => Self::RemoteInvalidRequestError, - 10 => Self::RemoteAccessError, - 11 => Self::RemoteOperationError, - 12 => Self::TransportRetryCounterExceeded, - 13 => Self::RnrRetryCounterExceeded, - 14 => Self::LocalRddViolationError, - 15 => Self::RemoteInvalidRdRequest, - 16 => Self::RemoteAbortedError, - 17 => Self::InvalidEecnError, - 18 => Self::InvalidEecStateError, - 19 => Self::FatalError, - 20 => Self::ResponseTimeoutError, - _ => Self::GeneralError, - } - } -} - -/// RDMA operation types -#[derive(Debug, Clone, Copy)] -pub enum RdmaOp { - Read, - Write, - Send, - Receive, - Atomic, -} - -/// RDMA memory region information -#[derive(Debug, Clone)] -pub struct MemoryRegion { - /// Local virtual address - pub addr: u64, - /// Remote key for RDMA operations - pub rkey: u32, - /// Local key for local operations - pub lkey: u32, - /// Size of the memory region - pub size: usize, - /// Whether the region is registered with RDMA hardware - pub registered: bool, -} - -/// RDMA work completion -#[derive(Debug)] -pub struct WorkCompletion { - /// Work request ID - pub wr_id: u64, - /// Completion status - pub status: CompletionStatus, - /// Operation type - pub opcode: RdmaOp, - /// Number of bytes transferred - pub byte_len: u32, - /// Immediate data (if any) - pub imm_data: Option, -} - -/// RDMA context implementation (simplified enum approach) -#[derive(Debug)] -pub enum RdmaContextImpl { - Mock(MockRdmaContext), - // Ucx(UcxRdmaContext), // TODO: Add UCX implementation -} - -/// RDMA device information -#[derive(Debug, Clone)] -pub struct RdmaDeviceInfo { - pub name: String, - pub vendor_id: u32, - pub vendor_part_id: u32, - pub hw_ver: u32, - pub max_mr: u32, - pub max_qp: u32, - pub max_cq: u32, - pub max_mr_size: u64, - pub port_gid: String, - pub port_lid: u16, -} - -/// Main RDMA context -pub struct RdmaContext { - inner: RdmaContextImpl, - #[allow(dead_code)] - config: RdmaEngineConfig, -} - -impl RdmaContext { - /// Create new RDMA context - pub async fn new(config: &RdmaEngineConfig) -> RdmaResult { - let inner = if cfg!(feature = "real-ucx") { - RdmaContextImpl::Mock(MockRdmaContext::new(config).await?) // TODO: Use UCX when ready - } else { - RdmaContextImpl::Mock(MockRdmaContext::new(config).await?) - }; - - Ok(Self { - inner, - config: config.clone(), - }) - } - - /// Register memory for RDMA operations - pub async fn register_memory(&self, addr: u64, size: usize) -> RdmaResult { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.register_memory(addr, size).await, - } - } - - /// Deregister memory region - pub async fn deregister_memory(&self, region: &MemoryRegion) -> RdmaResult<()> { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.deregister_memory(region).await, - } - } - - /// Post RDMA read operation - pub async fn post_read(&self, - local_addr: u64, - remote_addr: u64, - rkey: u32, - size: usize, - wr_id: u64, - ) -> RdmaResult<()> { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.post_read(local_addr, remote_addr, rkey, size, wr_id).await, - } - } - - /// Post RDMA write operation - pub async fn post_write(&self, - local_addr: u64, - remote_addr: u64, - rkey: u32, - size: usize, - wr_id: u64, - ) -> RdmaResult<()> { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.post_write(local_addr, remote_addr, rkey, size, wr_id).await, - } - } - - /// Poll for work completions - pub async fn poll_completion(&self, max_completions: usize) -> RdmaResult> { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.poll_completion(max_completions).await, - } - } - - /// Get device information - pub fn device_info(&self) -> &RdmaDeviceInfo { - match &self.inner { - RdmaContextImpl::Mock(ctx) => ctx.device_info(), - } - } -} - -/// Mock RDMA context for testing and development -#[derive(Debug)] -pub struct MockRdmaContext { - device_info: RdmaDeviceInfo, - registered_regions: RwLock>, - pending_operations: RwLock>, - #[allow(dead_code)] - config: RdmaEngineConfig, -} - -impl MockRdmaContext { - pub async fn new(config: &RdmaEngineConfig) -> RdmaResult { - warn!("๐ŸŸก Using MOCK RDMA implementation - for development only!"); - info!(" Device: {} (mock)", config.device_name); - info!(" Port: {} (mock)", config.port); - - let device_info = RdmaDeviceInfo { - name: config.device_name.clone(), - vendor_id: 0x02c9, // Mellanox mock vendor ID - vendor_part_id: 0x1017, // ConnectX-5 mock part ID - hw_ver: 0, - max_mr: 131072, - max_qp: 262144, - max_cq: 65536, - max_mr_size: 1024 * 1024 * 1024 * 1024, // 1TB mock - port_gid: "fe80:0000:0000:0000:0200:5eff:fe12:3456".to_string(), - port_lid: 1, - }; - - Ok(Self { - device_info, - registered_regions: RwLock::new(Vec::new()), - pending_operations: RwLock::new(Vec::new()), - config: config.clone(), - }) - } -} - -impl MockRdmaContext { - pub async fn register_memory(&self, addr: u64, size: usize) -> RdmaResult { - debug!("๐ŸŸก Mock: Registering memory region addr=0x{:x}, size={}", addr, size); - - // Simulate registration delay - tokio::time::sleep(tokio::time::Duration::from_micros(10)).await; - - let region = MemoryRegion { - addr, - rkey: 0x12345678, // Mock remote key - lkey: 0x87654321, // Mock local key - size, - registered: true, - }; - - self.registered_regions.write().push(region.clone()); - - Ok(region) - } - - pub async fn deregister_memory(&self, region: &MemoryRegion) -> RdmaResult<()> { - debug!("๐ŸŸก Mock: Deregistering memory region rkey=0x{:x}", region.rkey); - - let mut regions = self.registered_regions.write(); - regions.retain(|r| r.rkey != region.rkey); - - Ok(()) - } - - pub async fn post_read(&self, - local_addr: u64, - remote_addr: u64, - rkey: u32, - size: usize, - wr_id: u64, - ) -> RdmaResult<()> { - debug!("๐ŸŸก Mock: RDMA READ local=0x{:x}, remote=0x{:x}, rkey=0x{:x}, size={}", - local_addr, remote_addr, rkey, size); - - // Simulate RDMA read latency (much faster than real network, but realistic for mock) - tokio::time::sleep(tokio::time::Duration::from_nanos(150)).await; - - // Mock data transfer - copy pattern data to local address - let data_ptr = local_addr as *mut u8; - unsafe { - for i in 0..size { - *data_ptr.add(i) = (i % 256) as u8; // Pattern: 0,1,2,...,255,0,1,2... - } - } - - // Create completion - let completion = WorkCompletion { - wr_id, - status: CompletionStatus::Success, - opcode: RdmaOp::Read, - byte_len: size as u32, - imm_data: None, - }; - - self.pending_operations.write().push(completion); - - Ok(()) - } - - pub async fn post_write(&self, - local_addr: u64, - remote_addr: u64, - rkey: u32, - size: usize, - wr_id: u64, - ) -> RdmaResult<()> { - debug!("๐ŸŸก Mock: RDMA WRITE local=0x{:x}, remote=0x{:x}, rkey=0x{:x}, size={}", - local_addr, remote_addr, rkey, size); - - // Simulate RDMA write latency - tokio::time::sleep(tokio::time::Duration::from_nanos(100)).await; - - // Create completion - let completion = WorkCompletion { - wr_id, - status: CompletionStatus::Success, - opcode: RdmaOp::Write, - byte_len: size as u32, - imm_data: None, - }; - - self.pending_operations.write().push(completion); - - Ok(()) - } - - pub async fn poll_completion(&self, max_completions: usize) -> RdmaResult> { - let mut operations = self.pending_operations.write(); - let available = operations.len().min(max_completions); - let completions = operations.drain(..available).collect(); - - Ok(completions) - } - - pub fn device_info(&self) -> &RdmaDeviceInfo { - &self.device_info - } -} - -/// Real RDMA context using libibverbs -#[cfg(feature = "real-ucx")] -pub struct RealRdmaContext { - // Real implementation would contain: - // ibv_context: *mut ibv_context, - // ibv_pd: *mut ibv_pd, - // ibv_cq: *mut ibv_cq, - // ibv_qp: *mut ibv_qp, - device_info: RdmaDeviceInfo, - config: RdmaEngineConfig, -} - -#[cfg(feature = "real-ucx")] -impl RealRdmaContext { - pub async fn new(config: &RdmaEngineConfig) -> RdmaResult { - info!("โœ… Initializing REAL RDMA context for device: {}", config.device_name); - - // Real implementation would: - // 1. Get device list with ibv_get_device_list() - // 2. Find device by name - // 3. Open device with ibv_open_device() - // 4. Create protection domain with ibv_alloc_pd() - // 5. Create completion queue with ibv_create_cq() - // 6. Create queue pair with ibv_create_qp() - // 7. Transition QP to RTS state - - todo!("Real RDMA implementation using libibverbs"); - } -} - -#[cfg(feature = "real-ucx")] -#[async_trait::async_trait] -impl RdmaContextTrait for RealRdmaContext { - async fn register_memory(&self, _addr: u64, _size: usize) -> RdmaResult { - // Real implementation would use ibv_reg_mr() - todo!("Real memory registration") - } - - async fn deregister_memory(&self, _region: &MemoryRegion) -> RdmaResult<()> { - // Real implementation would use ibv_dereg_mr() - todo!("Real memory deregistration") - } - - async fn post_read(&self, - _local_addr: u64, - _remote_addr: u64, - _rkey: u32, - _size: usize, - _wr_id: u64, - ) -> RdmaResult<()> { - // Real implementation would use ibv_post_send() with IBV_WR_RDMA_READ - todo!("Real RDMA read") - } - - async fn post_write(&self, - _local_addr: u64, - _remote_addr: u64, - _rkey: u32, - _size: usize, - _wr_id: u64, - ) -> RdmaResult<()> { - // Real implementation would use ibv_post_send() with IBV_WR_RDMA_WRITE - todo!("Real RDMA write") - } - - async fn poll_completion(&self, _max_completions: usize) -> RdmaResult> { - // Real implementation would use ibv_poll_cq() - todo!("Real completion polling") - } - - fn device_info(&self) -> &RdmaDeviceInfo { - &self.device_info - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_mock_rdma_context() { - let config = RdmaEngineConfig::default(); - let ctx = RdmaContext::new(&config).await.unwrap(); - - // Test device info - let info = ctx.device_info(); - assert_eq!(info.name, "mlx5_0"); - assert!(info.max_mr > 0); - - // Test memory registration - let addr = 0x7f000000u64; - let size = 4096; - let region = ctx.register_memory(addr, size).await.unwrap(); - assert_eq!(region.addr, addr); - assert_eq!(region.size, size); - assert!(region.registered); - - // Test RDMA read - let local_buf = vec![0u8; 1024]; - let local_addr = local_buf.as_ptr() as u64; - let result = ctx.post_read(local_addr, 0x8000000, region.rkey, 1024, 1).await; - assert!(result.is_ok()); - - // Test completion polling - let completions = ctx.poll_completion(10).await.unwrap(); - assert_eq!(completions.len(), 1); - assert_eq!(completions[0].status, CompletionStatus::Success); - - // Test memory deregistration - let result = ctx.deregister_memory(®ion).await; - assert!(result.is_ok()); - } - - #[test] - fn test_completion_status_conversion() { - assert_eq!(CompletionStatus::from(0), CompletionStatus::Success); - assert_eq!(CompletionStatus::from(1), CompletionStatus::LocalLengthError); - assert_eq!(CompletionStatus::from(999), CompletionStatus::GeneralError); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/session.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/session.rs deleted file mode 100644 index fa089c72a..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/session.rs +++ /dev/null @@ -1,587 +0,0 @@ -//! Session management for RDMA operations -//! -//! This module manages the lifecycle of RDMA sessions, including creation, -//! storage, expiration, and cleanup of resources. - -use crate::{RdmaError, RdmaResult, rdma::MemoryRegion}; -use parking_lot::RwLock; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::time::{Duration, Instant}; -use tracing::{debug, info}; -// use uuid::Uuid; // Unused for now - -/// RDMA session state -#[derive(Debug, Clone)] -pub struct RdmaSession { - /// Unique session identifier - pub id: String, - /// SeaweedFS volume ID - pub volume_id: u32, - /// SeaweedFS needle ID - pub needle_id: u64, - /// Remote memory address - pub remote_addr: u64, - /// Remote key for RDMA access - pub remote_key: u32, - /// Transfer size in bytes - pub transfer_size: u64, - /// Local data buffer - pub buffer: Vec, - /// RDMA memory region - pub memory_region: MemoryRegion, - /// Session creation time - pub created_at: Instant, - /// Session expiration time - pub expires_at: Instant, - /// Current session state - pub state: SessionState, - /// Operation statistics - pub stats: SessionStats, -} - -/// Session state enum -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum SessionState { - /// Session created but not yet active - Created, - /// RDMA operation in progress - Active, - /// Operation completed successfully - Completed, - /// Operation failed - Failed, - /// Session expired - Expired, - /// Session being cleaned up - CleaningUp, -} - -/// Session operation statistics -#[derive(Debug, Clone, Default)] -pub struct SessionStats { - /// Number of RDMA operations performed - pub operations_count: u64, - /// Total bytes transferred - pub bytes_transferred: u64, - /// Time spent in RDMA operations (nanoseconds) - pub rdma_time_ns: u64, - /// Number of completion polling attempts - pub poll_attempts: u64, - /// Time of last operation - pub last_operation_at: Option, -} - -impl RdmaSession { - /// Create a new RDMA session - pub fn new( - id: String, - volume_id: u32, - needle_id: u64, - remote_addr: u64, - remote_key: u32, - transfer_size: u64, - buffer: Vec, - memory_region: MemoryRegion, - timeout: Duration, - ) -> Self { - let now = Instant::now(); - - Self { - id, - volume_id, - needle_id, - remote_addr, - remote_key, - transfer_size, - buffer, - memory_region, - created_at: now, - expires_at: now + timeout, - state: SessionState::Created, - stats: SessionStats::default(), - } - } - - /// Check if session has expired - pub fn is_expired(&self) -> bool { - Instant::now() > self.expires_at - } - - /// Get session age in seconds - pub fn age_secs(&self) -> f64 { - self.created_at.elapsed().as_secs_f64() - } - - /// Get time until expiration in seconds - pub fn time_to_expiration_secs(&self) -> f64 { - if self.is_expired() { - 0.0 - } else { - (self.expires_at - Instant::now()).as_secs_f64() - } - } - - /// Update session state - pub fn set_state(&mut self, state: SessionState) { - debug!("Session {} state: {:?} -> {:?}", self.id, self.state, state); - self.state = state; - } - - /// Record RDMA operation statistics - pub fn record_operation(&mut self, bytes_transferred: u64, duration_ns: u64) { - self.stats.operations_count += 1; - self.stats.bytes_transferred += bytes_transferred; - self.stats.rdma_time_ns += duration_ns; - self.stats.last_operation_at = Some(Instant::now()); - } - - /// Get average operation latency in nanoseconds - pub fn avg_operation_latency_ns(&self) -> u64 { - if self.stats.operations_count > 0 { - self.stats.rdma_time_ns / self.stats.operations_count - } else { - 0 - } - } - - /// Get throughput in bytes per second - pub fn throughput_bps(&self) -> f64 { - let age_secs = self.age_secs(); - if age_secs > 0.0 { - self.stats.bytes_transferred as f64 / age_secs - } else { - 0.0 - } - } -} - -/// Session manager for handling multiple concurrent RDMA sessions -pub struct SessionManager { - /// Active sessions - sessions: Arc>>>>, - /// Maximum number of concurrent sessions - max_sessions: usize, - /// Default session timeout - #[allow(dead_code)] - default_timeout: Duration, - /// Cleanup task handle - cleanup_task: RwLock>>, - /// Shutdown flag - shutdown_flag: Arc>, - /// Statistics - stats: Arc>, -} - -/// Session manager statistics -#[derive(Debug, Clone, Default)] -pub struct SessionManagerStats { - /// Total sessions created - pub total_sessions_created: u64, - /// Total sessions completed - pub total_sessions_completed: u64, - /// Total sessions failed - pub total_sessions_failed: u64, - /// Total sessions expired - pub total_sessions_expired: u64, - /// Total bytes transferred across all sessions - pub total_bytes_transferred: u64, - /// Manager start time - pub started_at: Option, -} - -impl SessionManager { - /// Create new session manager - pub fn new(max_sessions: usize, default_timeout: Duration) -> Self { - info!("๐ŸŽฏ Session manager initialized: max_sessions={}, timeout={:?}", - max_sessions, default_timeout); - - let mut stats = SessionManagerStats::default(); - stats.started_at = Some(Instant::now()); - - Self { - sessions: Arc::new(RwLock::new(HashMap::new())), - max_sessions, - default_timeout, - cleanup_task: RwLock::new(None), - shutdown_flag: Arc::new(RwLock::new(false)), - stats: Arc::new(RwLock::new(stats)), - } - } - - /// Create a new RDMA session - pub async fn create_session( - &self, - session_id: String, - volume_id: u32, - needle_id: u64, - remote_addr: u64, - remote_key: u32, - transfer_size: u64, - buffer: Vec, - memory_region: MemoryRegion, - timeout: chrono::Duration, - ) -> RdmaResult>> { - // Check session limit - { - let sessions = self.sessions.read(); - if sessions.len() >= self.max_sessions { - return Err(RdmaError::TooManySessions { - max_sessions: self.max_sessions - }); - } - - // Check if session already exists - if sessions.contains_key(&session_id) { - return Err(RdmaError::invalid_request( - format!("Session {} already exists", session_id) - )); - } - } - - let timeout_duration = Duration::from_millis(timeout.num_milliseconds().max(1) as u64); - - let session = Arc::new(RwLock::new(RdmaSession::new( - session_id.clone(), - volume_id, - needle_id, - remote_addr, - remote_key, - transfer_size, - buffer, - memory_region, - timeout_duration, - ))); - - // Store session - { - let mut sessions = self.sessions.write(); - sessions.insert(session_id.clone(), session.clone()); - } - - // Update stats - { - let mut stats = self.stats.write(); - stats.total_sessions_created += 1; - } - - info!("๐Ÿ“ฆ Created session {}: volume={}, needle={}, size={}", - session_id, volume_id, needle_id, transfer_size); - - Ok(session) - } - - /// Get session by ID - pub async fn get_session(&self, session_id: &str) -> RdmaResult>> { - let sessions = self.sessions.read(); - match sessions.get(session_id) { - Some(session) => { - if session.read().is_expired() { - Err(RdmaError::SessionExpired { - session_id: session_id.to_string() - }) - } else { - Ok(session.clone()) - } - } - None => Err(RdmaError::SessionNotFound { - session_id: session_id.to_string() - }), - } - } - - /// Remove and cleanup session - pub async fn remove_session(&self, session_id: &str) -> RdmaResult<()> { - let session = { - let mut sessions = self.sessions.write(); - sessions.remove(session_id) - }; - - if let Some(session) = session { - let session_data = session.read(); - info!("๐Ÿ—‘๏ธ Removed session {}: stats={:?}", session_id, session_data.stats); - - // Update manager stats - { - let mut stats = self.stats.write(); - match session_data.state { - SessionState::Completed => stats.total_sessions_completed += 1, - SessionState::Failed => stats.total_sessions_failed += 1, - SessionState::Expired => stats.total_sessions_expired += 1, - _ => {} - } - stats.total_bytes_transferred += session_data.stats.bytes_transferred; - } - - Ok(()) - } else { - Err(RdmaError::SessionNotFound { - session_id: session_id.to_string() - }) - } - } - - /// Get active session count - pub async fn active_session_count(&self) -> usize { - self.sessions.read().len() - } - - /// Get maximum sessions allowed - pub fn max_sessions(&self) -> usize { - self.max_sessions - } - - /// List active sessions - pub async fn list_sessions(&self) -> Vec { - self.sessions.read().keys().cloned().collect() - } - - /// Get session statistics - pub async fn get_session_stats(&self, session_id: &str) -> RdmaResult { - let session = self.get_session(session_id).await?; - let stats = { - let session_data = session.read(); - session_data.stats.clone() - }; - Ok(stats) - } - - /// Get manager statistics - pub fn get_manager_stats(&self) -> SessionManagerStats { - self.stats.read().clone() - } - - /// Start background cleanup task - pub async fn start_cleanup_task(&self) { - info!("๐Ÿ“‹ Session cleanup task initialized"); - - let sessions = Arc::clone(&self.sessions); - let shutdown_flag = Arc::clone(&self.shutdown_flag); - let stats = Arc::clone(&self.stats); - - let task = tokio::spawn(async move { - let mut interval = tokio::time::interval(Duration::from_secs(30)); // Check every 30 seconds - - loop { - interval.tick().await; - - // Check shutdown flag - if *shutdown_flag.read() { - debug!("๐Ÿ›‘ Session cleanup task shutting down"); - break; - } - - let now = Instant::now(); - let mut expired_sessions = Vec::new(); - - // Find expired sessions - { - let sessions_guard = sessions.read(); - for (session_id, session) in sessions_guard.iter() { - if now > session.read().expires_at { - expired_sessions.push(session_id.clone()); - } - } - } - - // Remove expired sessions - if !expired_sessions.is_empty() { - let mut sessions_guard = sessions.write(); - let mut stats_guard = stats.write(); - - for session_id in expired_sessions { - if let Some(session) = sessions_guard.remove(&session_id) { - let session_data = session.read(); - info!("๐Ÿ—‘๏ธ Cleaned up expired session: {} (volume={}, needle={})", - session_id, session_data.volume_id, session_data.needle_id); - stats_guard.total_sessions_expired += 1; - } - } - - debug!("๐Ÿ“Š Active sessions: {}", sessions_guard.len()); - } - } - }); - - *self.cleanup_task.write() = Some(task); - } - - /// Shutdown session manager - pub async fn shutdown(&self) { - info!("๐Ÿ›‘ Shutting down session manager"); - *self.shutdown_flag.write() = true; - - // Wait for cleanup task to finish - if let Some(task) = self.cleanup_task.write().take() { - let _ = task.await; - } - - // Clean up all remaining sessions - let session_ids: Vec = { - self.sessions.read().keys().cloned().collect() - }; - - for session_id in session_ids { - let _ = self.remove_session(&session_id).await; - } - - let final_stats = self.get_manager_stats(); - info!("๐Ÿ“ˆ Final session manager stats: {:?}", final_stats); - } - - /// Force cleanup of all sessions (for testing) - #[cfg(test)] - pub async fn cleanup_all_sessions(&self) { - let session_ids: Vec = { - self.sessions.read().keys().cloned().collect() - }; - - for session_id in session_ids { - let _ = self.remove_session(&session_id).await; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::rdma::MemoryRegion; - - #[tokio::test] - async fn test_session_creation() { - let manager = SessionManager::new(10, Duration::from_secs(60)); - - let memory_region = MemoryRegion { - addr: 0x1000, - rkey: 0x12345678, - lkey: 0x87654321, - size: 4096, - registered: true, - }; - - let session = manager.create_session( - "test-session".to_string(), - 1, - 100, - 0x2000, - 0xabcd, - 4096, - vec![0; 4096], - memory_region, - chrono::Duration::seconds(60), - ).await.unwrap(); - - let session_data = session.read(); - assert_eq!(session_data.id, "test-session"); - assert_eq!(session_data.volume_id, 1); - assert_eq!(session_data.needle_id, 100); - assert_eq!(session_data.state, SessionState::Created); - assert!(!session_data.is_expired()); - } - - #[tokio::test] - async fn test_session_expiration() { - let manager = SessionManager::new(10, Duration::from_millis(10)); - - let memory_region = MemoryRegion { - addr: 0x1000, - rkey: 0x12345678, - lkey: 0x87654321, - size: 4096, - registered: true, - }; - - let _session = manager.create_session( - "expire-test".to_string(), - 1, - 100, - 0x2000, - 0xabcd, - 4096, - vec![0; 4096], - memory_region, - chrono::Duration::milliseconds(10), - ).await.unwrap(); - - // Wait for expiration - tokio::time::sleep(Duration::from_millis(20)).await; - - let result = manager.get_session("expire-test").await; - assert!(matches!(result, Err(RdmaError::SessionExpired { .. }))); - } - - #[tokio::test] - async fn test_session_limit() { - let manager = SessionManager::new(2, Duration::from_secs(60)); - - let memory_region = MemoryRegion { - addr: 0x1000, - rkey: 0x12345678, - lkey: 0x87654321, - size: 4096, - registered: true, - }; - - // Create first session - let _session1 = manager.create_session( - "session1".to_string(), - 1, 100, 0x2000, 0xabcd, 4096, - vec![0; 4096], - memory_region.clone(), - chrono::Duration::seconds(60), - ).await.unwrap(); - - // Create second session - let _session2 = manager.create_session( - "session2".to_string(), - 1, 101, 0x3000, 0xabcd, 4096, - vec![0; 4096], - memory_region.clone(), - chrono::Duration::seconds(60), - ).await.unwrap(); - - // Third session should fail - let result = manager.create_session( - "session3".to_string(), - 1, 102, 0x4000, 0xabcd, 4096, - vec![0; 4096], - memory_region, - chrono::Duration::seconds(60), - ).await; - - assert!(matches!(result, Err(RdmaError::TooManySessions { .. }))); - } - - #[tokio::test] - async fn test_session_stats() { - let manager = SessionManager::new(10, Duration::from_secs(60)); - - let memory_region = MemoryRegion { - addr: 0x1000, - rkey: 0x12345678, - lkey: 0x87654321, - size: 4096, - registered: true, - }; - - let session = manager.create_session( - "stats-test".to_string(), - 1, 100, 0x2000, 0xabcd, 4096, - vec![0; 4096], - memory_region, - chrono::Duration::seconds(60), - ).await.unwrap(); - - // Simulate some operations - now using proper interior mutability - { - let mut session_data = session.write(); - session_data.record_operation(1024, 1000000); // 1KB in 1ms - session_data.record_operation(2048, 2000000); // 2KB in 2ms - } - - let stats = manager.get_session_stats("stats-test").await.unwrap(); - assert_eq!(stats.operations_count, 2); - assert_eq!(stats.bytes_transferred, 3072); - assert_eq!(stats.rdma_time_ns, 3000000); - } -} diff --git a/seaweedfs-rdma-sidecar/rdma-engine/src/ucx.rs b/seaweedfs-rdma-sidecar/rdma-engine/src/ucx.rs deleted file mode 100644 index 901149858..000000000 --- a/seaweedfs-rdma-sidecar/rdma-engine/src/ucx.rs +++ /dev/null @@ -1,606 +0,0 @@ -//! UCX (Unified Communication X) FFI bindings and high-level wrapper -//! -//! UCX is a superior alternative to direct libibverbs for RDMA programming. -//! It provides production-proven abstractions and automatic transport selection. -//! -//! References: -//! - UCX Documentation: https://openucx.readthedocs.io/ -//! - UCX GitHub: https://github.com/openucx/ucx -//! - UCX Paper: "UCX: an open source framework for HPC network APIs and beyond" - -use crate::{RdmaError, RdmaResult}; -use libc::{c_char, c_int, c_void, size_t}; -use libloading::{Library, Symbol}; -use parking_lot::Mutex; -use std::collections::HashMap; -use std::ffi::CStr; -use std::ptr; -use std::sync::Arc; -use tracing::{debug, info, warn, error}; - -/// UCX context handle -pub type UcpContext = *mut c_void; -/// UCX worker handle -pub type UcpWorker = *mut c_void; -/// UCX endpoint handle -pub type UcpEp = *mut c_void; -/// UCX memory handle -pub type UcpMem = *mut c_void; -/// UCX request handle -pub type UcpRequest = *mut c_void; - -/// UCX configuration parameters -#[repr(C)] -pub struct UcpParams { - pub field_mask: u64, - pub features: u64, - pub request_size: size_t, - pub request_init: extern "C" fn(*mut c_void), - pub request_cleanup: extern "C" fn(*mut c_void), - pub tag_sender_mask: u64, -} - -/// UCX worker parameters -#[repr(C)] -pub struct UcpWorkerParams { - pub field_mask: u64, - pub thread_mode: c_int, - pub cpu_mask: u64, - pub events: c_int, - pub user_data: *mut c_void, -} - -/// UCX endpoint parameters -#[repr(C)] -pub struct UcpEpParams { - pub field_mask: u64, - pub address: *const c_void, - pub flags: u64, - pub sock_addr: *const c_void, - pub err_handler: UcpErrHandler, - pub user_data: *mut c_void, -} - -/// UCX memory mapping parameters -#[repr(C)] -pub struct UcpMemMapParams { - pub field_mask: u64, - pub address: *mut c_void, - pub length: size_t, - pub flags: u64, - pub prot: c_int, -} - -/// UCX error handler callback -pub type UcpErrHandler = extern "C" fn( - arg: *mut c_void, - ep: UcpEp, - status: c_int, -); - -/// UCX request callback -pub type UcpSendCallback = extern "C" fn( - request: *mut c_void, - status: c_int, - user_data: *mut c_void, -); - -/// UCX feature flags -pub const UCP_FEATURE_TAG: u64 = 1 << 0; -pub const UCP_FEATURE_RMA: u64 = 1 << 1; -pub const UCP_FEATURE_ATOMIC32: u64 = 1 << 2; -pub const UCP_FEATURE_ATOMIC64: u64 = 1 << 3; -pub const UCP_FEATURE_WAKEUP: u64 = 1 << 4; -pub const UCP_FEATURE_STREAM: u64 = 1 << 5; - -/// UCX parameter field masks -pub const UCP_PARAM_FIELD_FEATURES: u64 = 1 << 0; -pub const UCP_PARAM_FIELD_REQUEST_SIZE: u64 = 1 << 1; -pub const UCP_PARAM_FIELD_REQUEST_INIT: u64 = 1 << 2; -pub const UCP_PARAM_FIELD_REQUEST_CLEANUP: u64 = 1 << 3; -pub const UCP_PARAM_FIELD_TAG_SENDER_MASK: u64 = 1 << 4; - -pub const UCP_WORKER_PARAM_FIELD_THREAD_MODE: u64 = 1 << 0; -pub const UCP_WORKER_PARAM_FIELD_CPU_MASK: u64 = 1 << 1; -pub const UCP_WORKER_PARAM_FIELD_EVENTS: u64 = 1 << 2; -pub const UCP_WORKER_PARAM_FIELD_USER_DATA: u64 = 1 << 3; - -pub const UCP_EP_PARAM_FIELD_REMOTE_ADDRESS: u64 = 1 << 0; -pub const UCP_EP_PARAM_FIELD_FLAGS: u64 = 1 << 1; -pub const UCP_EP_PARAM_FIELD_SOCK_ADDR: u64 = 1 << 2; -pub const UCP_EP_PARAM_FIELD_ERR_HANDLER: u64 = 1 << 3; -pub const UCP_EP_PARAM_FIELD_USER_DATA: u64 = 1 << 4; - -pub const UCP_MEM_MAP_PARAM_FIELD_ADDRESS: u64 = 1 << 0; -pub const UCP_MEM_MAP_PARAM_FIELD_LENGTH: u64 = 1 << 1; -pub const UCP_MEM_MAP_PARAM_FIELD_FLAGS: u64 = 1 << 2; -pub const UCP_MEM_MAP_PARAM_FIELD_PROT: u64 = 1 << 3; - -/// UCX status codes -pub const UCS_OK: c_int = 0; -pub const UCS_INPROGRESS: c_int = 1; -pub const UCS_ERR_NO_MESSAGE: c_int = -1; -pub const UCS_ERR_NO_RESOURCE: c_int = -2; -pub const UCS_ERR_IO_ERROR: c_int = -3; -pub const UCS_ERR_NO_MEMORY: c_int = -4; -pub const UCS_ERR_INVALID_PARAM: c_int = -5; -pub const UCS_ERR_UNREACHABLE: c_int = -6; -pub const UCS_ERR_INVALID_ADDR: c_int = -7; -pub const UCS_ERR_NOT_IMPLEMENTED: c_int = -8; -pub const UCS_ERR_MESSAGE_TRUNCATED: c_int = -9; -pub const UCS_ERR_NO_PROGRESS: c_int = -10; -pub const UCS_ERR_BUFFER_TOO_SMALL: c_int = -11; -pub const UCS_ERR_NO_ELEM: c_int = -12; -pub const UCS_ERR_SOME_CONNECTS_FAILED: c_int = -13; -pub const UCS_ERR_NO_DEVICE: c_int = -14; -pub const UCS_ERR_BUSY: c_int = -15; -pub const UCS_ERR_CANCELED: c_int = -16; -pub const UCS_ERR_SHMEM_SEGMENT: c_int = -17; -pub const UCS_ERR_ALREADY_EXISTS: c_int = -18; -pub const UCS_ERR_OUT_OF_RANGE: c_int = -19; -pub const UCS_ERR_TIMED_OUT: c_int = -20; - -/// UCX memory protection flags -pub const UCP_MEM_MAP_NONBLOCK: u64 = 1 << 0; -pub const UCP_MEM_MAP_ALLOCATE: u64 = 1 << 1; -pub const UCP_MEM_MAP_FIXED: u64 = 1 << 2; - -/// UCX FFI function signatures -pub struct UcxApi { - pub ucp_init: Symbol<'static, unsafe extern "C" fn(*const UcpParams, *const c_void, *mut UcpContext) -> c_int>, - pub ucp_cleanup: Symbol<'static, unsafe extern "C" fn(UcpContext)>, - pub ucp_worker_create: Symbol<'static, unsafe extern "C" fn(UcpContext, *const UcpWorkerParams, *mut UcpWorker) -> c_int>, - pub ucp_worker_destroy: Symbol<'static, unsafe extern "C" fn(UcpWorker)>, - pub ucp_ep_create: Symbol<'static, unsafe extern "C" fn(UcpWorker, *const UcpEpParams, *mut UcpEp) -> c_int>, - pub ucp_ep_destroy: Symbol<'static, unsafe extern "C" fn(UcpEp)>, - pub ucp_mem_map: Symbol<'static, unsafe extern "C" fn(UcpContext, *const UcpMemMapParams, *mut UcpMem) -> c_int>, - pub ucp_mem_unmap: Symbol<'static, unsafe extern "C" fn(UcpContext, UcpMem) -> c_int>, - pub ucp_put_nb: Symbol<'static, unsafe extern "C" fn(UcpEp, *const c_void, size_t, u64, u64, UcpSendCallback) -> UcpRequest>, - pub ucp_get_nb: Symbol<'static, unsafe extern "C" fn(UcpEp, *mut c_void, size_t, u64, u64, UcpSendCallback) -> UcpRequest>, - pub ucp_worker_progress: Symbol<'static, unsafe extern "C" fn(UcpWorker) -> c_int>, - pub ucp_request_check_status: Symbol<'static, unsafe extern "C" fn(UcpRequest) -> c_int>, - pub ucp_request_free: Symbol<'static, unsafe extern "C" fn(UcpRequest)>, - pub ucp_worker_get_address: Symbol<'static, unsafe extern "C" fn(UcpWorker, *mut *mut c_void, *mut size_t) -> c_int>, - pub ucp_worker_release_address: Symbol<'static, unsafe extern "C" fn(UcpWorker, *mut c_void)>, - pub ucs_status_string: Symbol<'static, unsafe extern "C" fn(c_int) -> *const c_char>, -} - -impl UcxApi { - /// Load UCX library and resolve symbols - pub fn load() -> RdmaResult { - info!("๐Ÿ”— Loading UCX library"); - - // Try to load UCX library - let lib_names = [ - "libucp.so.0", // Most common - "libucp.so", // Generic - "libucp.dylib", // macOS - "/usr/lib/x86_64-linux-gnu/libucp.so.0", // Ubuntu/Debian - "/usr/lib64/libucp.so.0", // RHEL/CentOS - ]; - - let library = lib_names.iter() - .find_map(|name| { - debug!("Trying to load UCX library: {}", name); - match unsafe { Library::new(name) } { - Ok(lib) => { - info!("โœ… Successfully loaded UCX library: {}", name); - Some(lib) - } - Err(e) => { - debug!("Failed to load {}: {}", name, e); - None - } - } - }) - .ok_or_else(|| RdmaError::context_init_failed("UCX library not found"))?; - - // Leak the library to get 'static lifetime for symbols - let library: &'static Library = Box::leak(Box::new(library)); - - unsafe { - Ok(UcxApi { - ucp_init: library.get(b"ucp_init") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_init symbol: {}", e)))?, - ucp_cleanup: library.get(b"ucp_cleanup") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_cleanup symbol: {}", e)))?, - ucp_worker_create: library.get(b"ucp_worker_create") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_worker_create symbol: {}", e)))?, - ucp_worker_destroy: library.get(b"ucp_worker_destroy") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_worker_destroy symbol: {}", e)))?, - ucp_ep_create: library.get(b"ucp_ep_create") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_ep_create symbol: {}", e)))?, - ucp_ep_destroy: library.get(b"ucp_ep_destroy") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_ep_destroy symbol: {}", e)))?, - ucp_mem_map: library.get(b"ucp_mem_map") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_mem_map symbol: {}", e)))?, - ucp_mem_unmap: library.get(b"ucp_mem_unmap") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_mem_unmap symbol: {}", e)))?, - ucp_put_nb: library.get(b"ucp_put_nb") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_put_nb symbol: {}", e)))?, - ucp_get_nb: library.get(b"ucp_get_nb") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_get_nb symbol: {}", e)))?, - ucp_worker_progress: library.get(b"ucp_worker_progress") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_worker_progress symbol: {}", e)))?, - ucp_request_check_status: library.get(b"ucp_request_check_status") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_request_check_status symbol: {}", e)))?, - ucp_request_free: library.get(b"ucp_request_free") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_request_free symbol: {}", e)))?, - ucp_worker_get_address: library.get(b"ucp_worker_get_address") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_worker_get_address symbol: {}", e)))?, - ucp_worker_release_address: library.get(b"ucp_worker_release_address") - .map_err(|e| RdmaError::context_init_failed(format!("ucp_worker_release_address symbol: {}", e)))?, - ucs_status_string: library.get(b"ucs_status_string") - .map_err(|e| RdmaError::context_init_failed(format!("ucs_status_string symbol: {}", e)))?, - }) - } - } - - /// Convert UCX status code to human-readable string - pub fn status_string(&self, status: c_int) -> String { - unsafe { - let c_str = (self.ucs_status_string)(status); - if c_str.is_null() { - format!("Unknown status: {}", status) - } else { - CStr::from_ptr(c_str).to_string_lossy().to_string() - } - } - } -} - -/// High-level UCX context wrapper -pub struct UcxContext { - api: Arc, - context: UcpContext, - worker: UcpWorker, - worker_address: Vec, - endpoints: Mutex>, - memory_regions: Mutex>, -} - -impl UcxContext { - /// Initialize UCX context with RMA support - pub async fn new() -> RdmaResult { - info!("๐Ÿš€ Initializing UCX context for RDMA operations"); - - let api = Arc::new(UcxApi::load()?); - - // Initialize UCP context - let params = UcpParams { - field_mask: UCP_PARAM_FIELD_FEATURES, - features: UCP_FEATURE_RMA | UCP_FEATURE_WAKEUP, - request_size: 0, - request_init: request_init_cb, - request_cleanup: request_cleanup_cb, - tag_sender_mask: 0, - }; - - let mut context = ptr::null_mut(); - let status = unsafe { (api.ucp_init)(¶ms, ptr::null(), &mut context) }; - if status != UCS_OK { - return Err(RdmaError::context_init_failed(format!( - "ucp_init failed: {} ({})", - api.status_string(status), status - ))); - } - - info!("โœ… UCX context initialized successfully"); - - // Create worker - let worker_params = UcpWorkerParams { - field_mask: UCP_WORKER_PARAM_FIELD_THREAD_MODE, - thread_mode: 0, // Single-threaded - cpu_mask: 0, - events: 0, - user_data: ptr::null_mut(), - }; - - let mut worker = ptr::null_mut(); - let status = unsafe { (api.ucp_worker_create)(context, &worker_params, &mut worker) }; - if status != UCS_OK { - unsafe { (api.ucp_cleanup)(context) }; - return Err(RdmaError::context_init_failed(format!( - "ucp_worker_create failed: {} ({})", - api.status_string(status), status - ))); - } - - info!("โœ… UCX worker created successfully"); - - // Get worker address for connection establishment - let mut address_ptr = ptr::null_mut(); - let mut address_len = 0; - let status = unsafe { (api.ucp_worker_get_address)(worker, &mut address_ptr, &mut address_len) }; - if status != UCS_OK { - unsafe { - (api.ucp_worker_destroy)(worker); - (api.ucp_cleanup)(context); - } - return Err(RdmaError::context_init_failed(format!( - "ucp_worker_get_address failed: {} ({})", - api.status_string(status), status - ))); - } - - let worker_address = unsafe { - std::slice::from_raw_parts(address_ptr as *const u8, address_len).to_vec() - }; - - unsafe { (api.ucp_worker_release_address)(worker, address_ptr) }; - - info!("โœ… UCX worker address obtained ({} bytes)", worker_address.len()); - - Ok(UcxContext { - api, - context, - worker, - worker_address, - endpoints: Mutex::new(HashMap::new()), - memory_regions: Mutex::new(HashMap::new()), - }) - } - - /// Map memory for RDMA operations - pub async fn map_memory(&self, addr: u64, size: usize) -> RdmaResult { - debug!("๐Ÿ“ Mapping memory for RDMA: addr=0x{:x}, size={}", addr, size); - - let params = UcpMemMapParams { - field_mask: UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH, - address: addr as *mut c_void, - length: size, - flags: 0, - prot: libc::PROT_READ | libc::PROT_WRITE, - }; - - let mut mem_handle = ptr::null_mut(); - let status = unsafe { (self.api.ucp_mem_map)(self.context, ¶ms, &mut mem_handle) }; - - if status != UCS_OK { - return Err(RdmaError::memory_reg_failed(format!( - "ucp_mem_map failed: {} ({})", - self.api.status_string(status), status - ))); - } - - // Store memory handle for cleanup - { - let mut regions = self.memory_regions.lock(); - regions.insert(addr, mem_handle); - } - - info!("โœ… Memory mapped successfully: addr=0x{:x}, size={}", addr, size); - Ok(addr) // Return the same address as remote key equivalent - } - - /// Unmap memory - pub async fn unmap_memory(&self, addr: u64) -> RdmaResult<()> { - debug!("๐Ÿ—‘๏ธ Unmapping memory: addr=0x{:x}", addr); - - let mem_handle = { - let mut regions = self.memory_regions.lock(); - regions.remove(&addr) - }; - - if let Some(handle) = mem_handle { - let status = unsafe { (self.api.ucp_mem_unmap)(self.context, handle) }; - if status != UCS_OK { - warn!("ucp_mem_unmap failed: {} ({})", - self.api.status_string(status), status); - } - } - - Ok(()) - } - - /// Perform RDMA GET (read from remote memory) - pub async fn get(&self, local_addr: u64, remote_addr: u64, size: usize) -> RdmaResult<()> { - debug!("๐Ÿ“ฅ RDMA GET: local=0x{:x}, remote=0x{:x}, size={}", - local_addr, remote_addr, size); - - // For now, use a simple synchronous approach - // In production, this would be properly async with completion callbacks - - // Find or create endpoint (simplified - would need proper address resolution) - let ep = self.get_or_create_endpoint("default").await?; - - let request = unsafe { - (self.api.ucp_get_nb)( - ep, - local_addr as *mut c_void, - size, - remote_addr, - 0, // No remote key needed with UCX - get_completion_cb, - ) - }; - - // Wait for completion - if !request.is_null() { - loop { - let status = unsafe { (self.api.ucp_request_check_status)(request) }; - if status != UCS_INPROGRESS { - unsafe { (self.api.ucp_request_free)(request) }; - if status == UCS_OK { - break; - } else { - return Err(RdmaError::operation_failed( - "RDMA GET", status - )); - } - } - - // Progress the worker - unsafe { (self.api.ucp_worker_progress)(self.worker) }; - tokio::task::yield_now().await; - } - } - - info!("โœ… RDMA GET completed successfully"); - Ok(()) - } - - /// Perform RDMA PUT (write to remote memory) - pub async fn put(&self, local_addr: u64, remote_addr: u64, size: usize) -> RdmaResult<()> { - debug!("๐Ÿ“ค RDMA PUT: local=0x{:x}, remote=0x{:x}, size={}", - local_addr, remote_addr, size); - - let ep = self.get_or_create_endpoint("default").await?; - - let request = unsafe { - (self.api.ucp_put_nb)( - ep, - local_addr as *const c_void, - size, - remote_addr, - 0, // No remote key needed with UCX - put_completion_cb, - ) - }; - - // Wait for completion (same pattern as GET) - if !request.is_null() { - loop { - let status = unsafe { (self.api.ucp_request_check_status)(request) }; - if status != UCS_INPROGRESS { - unsafe { (self.api.ucp_request_free)(request) }; - if status == UCS_OK { - break; - } else { - return Err(RdmaError::operation_failed( - "RDMA PUT", status - )); - } - } - - unsafe { (self.api.ucp_worker_progress)(self.worker) }; - tokio::task::yield_now().await; - } - } - - info!("โœ… RDMA PUT completed successfully"); - Ok(()) - } - - /// Get worker address for connection establishment - pub fn worker_address(&self) -> &[u8] { - &self.worker_address - } - - /// Create endpoint for communication (simplified version) - async fn get_or_create_endpoint(&self, key: &str) -> RdmaResult { - let mut endpoints = self.endpoints.lock(); - - if let Some(&ep) = endpoints.get(key) { - return Ok(ep); - } - - // For simplicity, create a dummy endpoint - // In production, this would use actual peer address - let ep_params = UcpEpParams { - field_mask: 0, // Simplified for mock - address: ptr::null(), - flags: 0, - sock_addr: ptr::null(), - err_handler: error_handler_cb, - user_data: ptr::null_mut(), - }; - - let mut endpoint = ptr::null_mut(); - let status = unsafe { (self.api.ucp_ep_create)(self.worker, &ep_params, &mut endpoint) }; - - if status != UCS_OK { - return Err(RdmaError::context_init_failed(format!( - "ucp_ep_create failed: {} ({})", - self.api.status_string(status), status - ))); - } - - endpoints.insert(key.to_string(), endpoint); - Ok(endpoint) - } -} - -impl Drop for UcxContext { - fn drop(&mut self) { - info!("๐Ÿงน Cleaning up UCX context"); - - // Clean up endpoints - { - let mut endpoints = self.endpoints.lock(); - for (_, ep) in endpoints.drain() { - unsafe { (self.api.ucp_ep_destroy)(ep) }; - } - } - - // Clean up memory regions - { - let mut regions = self.memory_regions.lock(); - for (_, handle) in regions.drain() { - unsafe { (self.api.ucp_mem_unmap)(self.context, handle) }; - } - } - - // Clean up worker and context - unsafe { - (self.api.ucp_worker_destroy)(self.worker); - (self.api.ucp_cleanup)(self.context); - } - - info!("โœ… UCX context cleanup completed"); - } -} - -// UCX callback functions -extern "C" fn request_init_cb(_request: *mut c_void) { - // Request initialization callback -} - -extern "C" fn request_cleanup_cb(_request: *mut c_void) { - // Request cleanup callback -} - -extern "C" fn get_completion_cb(_request: *mut c_void, status: c_int, _user_data: *mut c_void) { - if status != UCS_OK { - error!("RDMA GET completion error: {}", status); - } -} - -extern "C" fn put_completion_cb(_request: *mut c_void, status: c_int, _user_data: *mut c_void) { - if status != UCS_OK { - error!("RDMA PUT completion error: {}", status); - } -} - -extern "C" fn error_handler_cb( - _arg: *mut c_void, - _ep: UcpEp, - status: c_int, -) { - error!("UCX endpoint error: {}", status); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_ucx_api_loading() { - // This test will fail without UCX installed, which is expected - match UcxApi::load() { - Ok(api) => { - info!("UCX API loaded successfully"); - assert_eq!(api.status_string(UCS_OK), "Success"); - } - Err(_) => { - warn!("UCX library not found - expected in development environment"); - } - } - } - - #[tokio::test] - async fn test_ucx_context_mock() { - // This would test the mock implementation - // Real test requires UCX installation - } -} diff --git a/seaweedfs-rdma-sidecar/scripts/demo-e2e.sh b/seaweedfs-rdma-sidecar/scripts/demo-e2e.sh deleted file mode 100755 index 54a751e57..000000000 --- a/seaweedfs-rdma-sidecar/scripts/demo-e2e.sh +++ /dev/null @@ -1,314 +0,0 @@ -#!/bin/bash - -# SeaweedFS RDMA End-to-End Demo Script -# This script demonstrates the complete integration between SeaweedFS and the RDMA sidecar - -set -e - -# Configuration -RDMA_ENGINE_SOCKET="/tmp/rdma-engine.sock" -DEMO_SERVER_PORT=8080 -RUST_ENGINE_PID="" -DEMO_SERVER_PID="" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -print_header() { - echo -e "\n${PURPLE}===============================================${NC}" - echo -e "${PURPLE}$1${NC}" - echo -e "${PURPLE}===============================================${NC}\n" -} - -print_step() { - echo -e "${CYAN}๐Ÿ”ต $1${NC}" -} - -print_success() { - echo -e "${GREEN}โœ… $1${NC}" -} - -print_warning() { - echo -e "${YELLOW}โš ๏ธ $1${NC}" -} - -print_error() { - echo -e "${RED}โŒ $1${NC}" -} - -cleanup() { - print_header "CLEANUP" - - if [[ -n "$DEMO_SERVER_PID" ]]; then - print_step "Stopping demo server (PID: $DEMO_SERVER_PID)" - kill $DEMO_SERVER_PID 2>/dev/null || true - wait $DEMO_SERVER_PID 2>/dev/null || true - fi - - if [[ -n "$RUST_ENGINE_PID" ]]; then - print_step "Stopping Rust RDMA engine (PID: $RUST_ENGINE_PID)" - kill $RUST_ENGINE_PID 2>/dev/null || true - wait $RUST_ENGINE_PID 2>/dev/null || true - fi - - # Clean up socket - rm -f "$RDMA_ENGINE_SOCKET" - - print_success "Cleanup complete" -} - -# Set up cleanup on exit -trap cleanup EXIT - -build_components() { - print_header "BUILDING COMPONENTS" - - print_step "Building Go components..." - go build -o bin/demo-server ./cmd/demo-server - go build -o bin/test-rdma ./cmd/test-rdma - go build -o bin/sidecar ./cmd/sidecar - print_success "Go components built" - - print_step "Building Rust RDMA engine..." - cd rdma-engine - cargo build --release - cd .. - print_success "Rust RDMA engine built" -} - -start_rdma_engine() { - print_header "STARTING RDMA ENGINE" - - print_step "Starting Rust RDMA engine..." - ./rdma-engine/target/release/rdma-engine-server --debug & - RUST_ENGINE_PID=$! - - # Wait for engine to be ready - print_step "Waiting for RDMA engine to be ready..." - for i in {1..10}; do - if [[ -S "$RDMA_ENGINE_SOCKET" ]]; then - print_success "RDMA engine ready (PID: $RUST_ENGINE_PID)" - return 0 - fi - sleep 1 - done - - print_error "RDMA engine failed to start" - exit 1 -} - -start_demo_server() { - print_header "STARTING DEMO SERVER" - - print_step "Starting SeaweedFS RDMA demo server..." - ./bin/demo-server --port $DEMO_SERVER_PORT --rdma-socket "$RDMA_ENGINE_SOCKET" --enable-rdma --debug & - DEMO_SERVER_PID=$! - - # Wait for server to be ready - print_step "Waiting for demo server to be ready..." - for i in {1..10}; do - if curl -s "http://localhost:$DEMO_SERVER_PORT/health" > /dev/null 2>&1; then - print_success "Demo server ready (PID: $DEMO_SERVER_PID)" - return 0 - fi - sleep 1 - done - - print_error "Demo server failed to start" - exit 1 -} - -test_health_check() { - print_header "HEALTH CHECK TEST" - - print_step "Testing health endpoint..." - response=$(curl -s "http://localhost:$DEMO_SERVER_PORT/health") - - if echo "$response" | jq -e '.status == "healthy"' > /dev/null; then - print_success "Health check passed" - echo "$response" | jq '.' - else - print_error "Health check failed" - echo "$response" - exit 1 - fi -} - -test_capabilities() { - print_header "CAPABILITIES TEST" - - print_step "Testing capabilities endpoint..." - response=$(curl -s "http://localhost:$DEMO_SERVER_PORT/stats") - - if echo "$response" | jq -e '.enabled == true' > /dev/null; then - print_success "RDMA capabilities retrieved" - echo "$response" | jq '.' - else - print_warning "RDMA not enabled, but HTTP fallback available" - echo "$response" | jq '.' - fi -} - -test_needle_read() { - print_header "NEEDLE READ TEST" - - print_step "Testing RDMA needle read..." - response=$(curl -s "http://localhost:$DEMO_SERVER_PORT/read?volume=1&needle=12345&cookie=305419896&size=1024") - - if echo "$response" | jq -e '.success == true' > /dev/null; then - is_rdma=$(echo "$response" | jq -r '.is_rdma') - source=$(echo "$response" | jq -r '.source') - duration=$(echo "$response" | jq -r '.duration') - data_size=$(echo "$response" | jq -r '.data_size') - - if [[ "$is_rdma" == "true" ]]; then - print_success "RDMA fast path used! Duration: $duration, Size: $data_size bytes" - else - print_warning "HTTP fallback used. Duration: $duration, Size: $data_size bytes" - fi - - echo "$response" | jq '.' - else - print_error "Needle read failed" - echo "$response" - exit 1 - fi -} - -test_benchmark() { - print_header "PERFORMANCE BENCHMARK" - - print_step "Running performance benchmark..." - response=$(curl -s "http://localhost:$DEMO_SERVER_PORT/benchmark?iterations=5&size=2048") - - if echo "$response" | jq -e '.benchmark_results' > /dev/null; then - rdma_ops=$(echo "$response" | jq -r '.benchmark_results.rdma_ops') - http_ops=$(echo "$response" | jq -r '.benchmark_results.http_ops') - avg_latency=$(echo "$response" | jq -r '.benchmark_results.avg_latency') - throughput=$(echo "$response" | jq -r '.benchmark_results.throughput_mbps') - ops_per_sec=$(echo "$response" | jq -r '.benchmark_results.ops_per_sec') - - print_success "Benchmark completed:" - echo -e " ${BLUE}RDMA Operations:${NC} $rdma_ops" - echo -e " ${BLUE}HTTP Operations:${NC} $http_ops" - echo -e " ${BLUE}Average Latency:${NC} $avg_latency" - echo -e " ${BLUE}Throughput:${NC} $throughput MB/s" - echo -e " ${BLUE}Operations/sec:${NC} $ops_per_sec" - - echo -e "\n${BLUE}Full benchmark results:${NC}" - echo "$response" | jq '.benchmark_results' - else - print_error "Benchmark failed" - echo "$response" - exit 1 - fi -} - -test_direct_rdma() { - print_header "DIRECT RDMA ENGINE TEST" - - print_step "Testing direct RDMA engine communication..." - - echo "Testing ping..." - ./bin/test-rdma ping 2>/dev/null && print_success "Direct RDMA ping successful" || print_warning "Direct RDMA ping failed" - - echo -e "\nTesting capabilities..." - ./bin/test-rdma capabilities 2>/dev/null | head -15 && print_success "Direct RDMA capabilities successful" || print_warning "Direct RDMA capabilities failed" - - echo -e "\nTesting direct read..." - ./bin/test-rdma read --volume 1 --needle 12345 --size 1024 2>/dev/null > /dev/null && print_success "Direct RDMA read successful" || print_warning "Direct RDMA read failed" -} - -show_demo_urls() { - print_header "DEMO SERVER INFORMATION" - - echo -e "${GREEN}๐ŸŒ Demo server is running at: http://localhost:$DEMO_SERVER_PORT${NC}" - echo -e "${GREEN}๐Ÿ“ฑ Try these URLs:${NC}" - echo -e " ${BLUE}Home page:${NC} http://localhost:$DEMO_SERVER_PORT/" - echo -e " ${BLUE}Health check:${NC} http://localhost:$DEMO_SERVER_PORT/health" - echo -e " ${BLUE}Statistics:${NC} http://localhost:$DEMO_SERVER_PORT/stats" - echo -e " ${BLUE}Read needle:${NC} http://localhost:$DEMO_SERVER_PORT/read?volume=1&needle=12345&cookie=305419896&size=1024" - echo -e " ${BLUE}Benchmark:${NC} http://localhost:$DEMO_SERVER_PORT/benchmark?iterations=5&size=2048" - - echo -e "\n${GREEN}๐Ÿ“‹ Example curl commands:${NC}" - echo -e " ${CYAN}curl \"http://localhost:$DEMO_SERVER_PORT/health\" | jq '.'${NC}" - echo -e " ${CYAN}curl \"http://localhost:$DEMO_SERVER_PORT/read?volume=1&needle=12345&size=1024\" | jq '.'${NC}" - echo -e " ${CYAN}curl \"http://localhost:$DEMO_SERVER_PORT/benchmark?iterations=10\" | jq '.benchmark_results'${NC}" -} - -interactive_mode() { - print_header "INTERACTIVE MODE" - - show_demo_urls - - echo -e "\n${YELLOW}Press Enter to run automated tests, or Ctrl+C to exit and explore manually...${NC}" - read -r -} - -main() { - print_header "๐Ÿš€ SEAWEEDFS RDMA END-TO-END DEMO" - - echo -e "${GREEN}This demonstration shows:${NC}" - echo -e " โœ… Complete Go โ†” Rust IPC integration" - echo -e " โœ… SeaweedFS RDMA client with HTTP fallback" - echo -e " โœ… High-performance needle reads via RDMA" - echo -e " โœ… Performance benchmarking capabilities" - echo -e " โœ… Production-ready error handling and logging" - - # Check dependencies - if ! command -v jq &> /dev/null; then - print_error "jq is required for this demo. Please install it: brew install jq" - exit 1 - fi - - if ! command -v curl &> /dev/null; then - print_error "curl is required for this demo." - exit 1 - fi - - # Build and start components - build_components - start_rdma_engine - sleep 2 # Give engine time to fully initialize - start_demo_server - sleep 2 # Give server time to connect to engine - - # Show interactive information - interactive_mode - - # Run automated tests - test_health_check - test_capabilities - test_needle_read - test_benchmark - test_direct_rdma - - print_header "๐ŸŽ‰ END-TO-END DEMO COMPLETE!" - - echo -e "${GREEN}All tests passed successfully!${NC}" - echo -e "${BLUE}Key achievements demonstrated:${NC}" - echo -e " ๐Ÿš€ RDMA fast path working with mock operations" - echo -e " ๐Ÿ”„ Automatic HTTP fallback when RDMA unavailable" - echo -e " ๐Ÿ“Š Performance monitoring and benchmarking" - echo -e " ๐Ÿ›ก๏ธ Robust error handling and graceful degradation" - echo -e " ๐Ÿ”Œ Complete IPC protocol between Go and Rust" - echo -e " โšก Session management with proper cleanup" - - print_success "SeaweedFS RDMA integration is ready for hardware deployment!" - - # Keep server running for manual testing - echo -e "\n${YELLOW}Demo server will continue running for manual testing...${NC}" - echo -e "${YELLOW}Press Ctrl+C to shutdown.${NC}" - - # Wait for user interrupt - wait -} - -# Run the main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/demo-mount-rdma.sh b/seaweedfs-rdma-sidecar/scripts/demo-mount-rdma.sh deleted file mode 100755 index cc4b8b394..000000000 --- a/seaweedfs-rdma-sidecar/scripts/demo-mount-rdma.sh +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -BLUE='\033[0;34m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration - assumes script is run from seaweedfs-rdma-sidecar directory -SEAWEEDFS_DIR="$(realpath ..)" -SIDECAR_DIR="$(pwd)" -MOUNT_POINT="/tmp/seaweedfs-rdma-mount" -FILER_ADDR="localhost:8888" -SIDECAR_ADDR="localhost:8081" - -# PIDs for cleanup -MASTER_PID="" -VOLUME_PID="" -FILER_PID="" -SIDECAR_PID="" -MOUNT_PID="" - -cleanup() { - echo -e "\n${YELLOW}๐Ÿงน Cleaning up processes...${NC}" - - # Unmount filesystem - if mountpoint -q "$MOUNT_POINT" 2>/dev/null; then - echo "๐Ÿ“ค Unmounting $MOUNT_POINT..." - fusermount -u "$MOUNT_POINT" 2>/dev/null || umount "$MOUNT_POINT" 2>/dev/null || true - sleep 1 - fi - - # Kill processes - for pid in $MOUNT_PID $SIDECAR_PID $FILER_PID $VOLUME_PID $MASTER_PID; do - if [[ -n "$pid" ]] && kill -0 "$pid" 2>/dev/null; then - echo "๐Ÿ”ช Killing process $pid..." - kill "$pid" 2>/dev/null || true - fi - done - - # Wait for processes to exit - sleep 2 - - # Force kill if necessary - for pid in $MOUNT_PID $SIDECAR_PID $FILER_PID $VOLUME_PID $MASTER_PID; do - if [[ -n "$pid" ]] && kill -0 "$pid" 2>/dev/null; then - echo "๐Ÿ’€ Force killing process $pid..." - kill -9 "$pid" 2>/dev/null || true - fi - done - - # Clean up mount point - if [[ -d "$MOUNT_POINT" ]]; then - rmdir "$MOUNT_POINT" 2>/dev/null || true - fi - - echo -e "${GREEN}โœ… Cleanup complete${NC}" -} - -trap cleanup EXIT - -wait_for_service() { - local name=$1 - local url=$2 - local max_attempts=30 - local attempt=1 - - echo -e "${BLUE}โณ Waiting for $name to be ready...${NC}" - - while [[ $attempt -le $max_attempts ]]; do - if curl -s "$url" >/dev/null 2>&1; then - echo -e "${GREEN}โœ… $name is ready${NC}" - return 0 - fi - echo " Attempt $attempt/$max_attempts..." - sleep 1 - ((attempt++)) - done - - echo -e "${RED}โŒ $name failed to start within $max_attempts seconds${NC}" - return 1 -} - -echo -e "${BLUE}๐Ÿš€ SEAWEEDFS RDMA MOUNT DEMONSTRATION${NC}" -echo "======================================" -echo "" -echo "This demo shows SeaweedFS mount with RDMA acceleration:" -echo " โ€ข Standard SeaweedFS cluster (master, volume, filer)" -echo " โ€ข RDMA sidecar for acceleration" -echo " โ€ข FUSE mount with RDMA fast path" -echo " โ€ข Performance comparison tests" -echo "" - -# Create mount point -echo -e "${BLUE}๐Ÿ“ Creating mount point: $MOUNT_POINT${NC}" -mkdir -p "$MOUNT_POINT" - -# Start SeaweedFS Master -echo -e "${BLUE}๐ŸŽฏ Starting SeaweedFS Master...${NC}" -cd "$SEAWEEDFS_DIR" -./weed master -port=9333 -mdir=/tmp/seaweedfs-master & -MASTER_PID=$! -wait_for_service "Master" "http://localhost:9333/cluster/status" - -# Start SeaweedFS Volume Server -echo -e "${BLUE}๐Ÿ’พ Starting SeaweedFS Volume Server...${NC}" -./weed volume -mserver=localhost:9333 -port=8080 -dir=/tmp/seaweedfs-volume & -VOLUME_PID=$! -wait_for_service "Volume Server" "http://localhost:8080/status" - -# Start SeaweedFS Filer -echo -e "${BLUE}๐Ÿ“‚ Starting SeaweedFS Filer...${NC}" -./weed filer -master=localhost:9333 -port=8888 & -FILER_PID=$! -wait_for_service "Filer" "http://localhost:8888/" - -# Start RDMA Sidecar -echo -e "${BLUE}โšก Starting RDMA Sidecar...${NC}" -cd "$SIDECAR_DIR" -./bin/demo-server --port 8081 --rdma-socket /tmp/rdma-engine.sock --volume-server-url http://localhost:8080 --enable-rdma --debug & -SIDECAR_PID=$! -wait_for_service "RDMA Sidecar" "http://localhost:8081/health" - -# Check RDMA capabilities -echo -e "${BLUE}๐Ÿ” Checking RDMA capabilities...${NC}" -curl -s "http://localhost:8081/stats" | jq . || curl -s "http://localhost:8081/stats" - -echo "" -echo -e "${BLUE}๐Ÿ—‚๏ธ Mounting SeaweedFS with RDMA acceleration...${NC}" - -# Mount with RDMA acceleration -cd "$SEAWEEDFS_DIR" -./weed mount \ - -filer="$FILER_ADDR" \ - -dir="$MOUNT_POINT" \ - -rdma.enabled=true \ - -rdma.sidecar="$SIDECAR_ADDR" \ - -rdma.fallback=true \ - -rdma.maxConcurrent=64 \ - -rdma.timeoutMs=5000 \ - -debug=true & -MOUNT_PID=$! - -# Wait for mount to be ready -echo -e "${BLUE}โณ Waiting for mount to be ready...${NC}" -sleep 5 - -# Check if mount is successful -if ! mountpoint -q "$MOUNT_POINT"; then - echo -e "${RED}โŒ Mount failed${NC}" - exit 1 -fi - -echo -e "${GREEN}โœ… SeaweedFS mounted successfully with RDMA acceleration!${NC}" -echo "" - -# Demonstrate RDMA-accelerated operations -echo -e "${BLUE}๐Ÿงช TESTING RDMA-ACCELERATED FILE OPERATIONS${NC}" -echo "==============================================" - -# Create test files -echo -e "${BLUE}๐Ÿ“ Creating test files...${NC}" -echo "Hello, RDMA World!" > "$MOUNT_POINT/test1.txt" -echo "This file will be read via RDMA acceleration!" > "$MOUNT_POINT/test2.txt" - -# Create a larger test file -echo -e "${BLUE}๐Ÿ“ Creating larger test file (1MB)...${NC}" -dd if=/dev/zero of="$MOUNT_POINT/large_test.dat" bs=1024 count=1024 2>/dev/null - -echo -e "${GREEN}โœ… Test files created${NC}" -echo "" - -# Test file reads -echo -e "${BLUE}๐Ÿ“– Testing file reads (should use RDMA fast path)...${NC}" -echo "" - -echo "๐Ÿ“„ Reading test1.txt:" -cat "$MOUNT_POINT/test1.txt" -echo "" - -echo "๐Ÿ“„ Reading test2.txt:" -cat "$MOUNT_POINT/test2.txt" -echo "" - -echo "๐Ÿ“„ Reading first 100 bytes of large file:" -head -c 100 "$MOUNT_POINT/large_test.dat" | hexdump -C | head -5 -echo "" - -# Performance test -echo -e "${BLUE}๐Ÿ PERFORMANCE COMPARISON${NC}" -echo "=========================" - -echo "๐Ÿ”ฅ Testing read performance with RDMA acceleration..." -time_start=$(date +%s%N) -for i in {1..10}; do - cat "$MOUNT_POINT/large_test.dat" > /dev/null -done -time_end=$(date +%s%N) -rdma_time=$((($time_end - $time_start) / 1000000)) # Convert to milliseconds - -echo "โœ… RDMA-accelerated reads: 10 x 1MB file = ${rdma_time}ms total" -echo "" - -# Check RDMA statistics -echo -e "${BLUE}๐Ÿ“Š RDMA Statistics:${NC}" -curl -s "http://localhost:8081/stats" | jq . 2>/dev/null || curl -s "http://localhost:8081/stats" -echo "" - -# List files -echo -e "${BLUE}๐Ÿ“‹ Files in mounted filesystem:${NC}" -ls -la "$MOUNT_POINT/" -echo "" - -# Interactive mode -echo -e "${BLUE}๐ŸŽฎ INTERACTIVE MODE${NC}" -echo "==================" -echo "" -echo "The SeaweedFS filesystem is now mounted at: $MOUNT_POINT" -echo "RDMA acceleration is active for all read operations!" -echo "" -echo "Try these commands:" -echo " ls $MOUNT_POINT/" -echo " cat $MOUNT_POINT/test1.txt" -echo " echo 'New content' > $MOUNT_POINT/new_file.txt" -echo " cat $MOUNT_POINT/new_file.txt" -echo "" -echo "Monitor RDMA stats: curl http://localhost:8081/stats | jq" -echo "Check mount status: mount | grep seaweedfs" -echo "" -echo -e "${YELLOW}Press Ctrl+C to stop the demo and cleanup${NC}" - -# Keep running until interrupted -while true; do - sleep 5 - - # Check if mount is still active - if ! mountpoint -q "$MOUNT_POINT"; then - echo -e "${RED}โŒ Mount point lost, exiting...${NC}" - break - fi - - # Show periodic stats - echo -e "${BLUE}๐Ÿ“Š Current RDMA stats ($(date)):${NC}" - curl -s "http://localhost:8081/stats" | jq '.rdma_enabled, .total_reads, .rdma_reads, .http_fallbacks' 2>/dev/null || echo "Stats unavailable" - echo "" -done diff --git a/seaweedfs-rdma-sidecar/scripts/mount-health-check.sh b/seaweedfs-rdma-sidecar/scripts/mount-health-check.sh deleted file mode 100755 index 4565cc617..000000000 --- a/seaweedfs-rdma-sidecar/scripts/mount-health-check.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -MOUNT_POINT=${MOUNT_POINT:-"/mnt/seaweedfs"} - -# Check if mount point exists and is mounted -if [[ ! -d "$MOUNT_POINT" ]]; then - echo "Mount point $MOUNT_POINT does not exist" - exit 1 -fi - -if ! mountpoint -q "$MOUNT_POINT"; then - echo "Mount point $MOUNT_POINT is not mounted" - exit 1 -fi - -# Try to list the mount point -if ! ls "$MOUNT_POINT" >/dev/null 2>&1; then - echo "Cannot list mount point $MOUNT_POINT" - exit 1 -fi - -echo "Mount point $MOUNT_POINT is healthy" -exit 0 diff --git a/seaweedfs-rdma-sidecar/scripts/mount-helper.sh b/seaweedfs-rdma-sidecar/scripts/mount-helper.sh deleted file mode 100755 index 4159dd180..000000000 --- a/seaweedfs-rdma-sidecar/scripts/mount-helper.sh +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -BLUE='\033[0;34m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration from environment variables -FILER_ADDR=${FILER_ADDR:-"seaweedfs-filer:8888"} -RDMA_SIDECAR_ADDR=${RDMA_SIDECAR_ADDR:-"rdma-sidecar:8081"} -MOUNT_POINT=${MOUNT_POINT:-"/mnt/seaweedfs"} -RDMA_ENABLED=${RDMA_ENABLED:-"true"} -RDMA_FALLBACK=${RDMA_FALLBACK:-"true"} -RDMA_MAX_CONCURRENT=${RDMA_MAX_CONCURRENT:-"64"} -RDMA_TIMEOUT_MS=${RDMA_TIMEOUT_MS:-"5000"} -DEBUG=${DEBUG:-"false"} - -echo -e "${BLUE}๐Ÿš€ SeaweedFS RDMA Mount Helper${NC}" -echo "================================" -echo "Filer Address: $FILER_ADDR" -echo "RDMA Sidecar: $RDMA_SIDECAR_ADDR" -echo "Mount Point: $MOUNT_POINT" -echo "RDMA Enabled: $RDMA_ENABLED" -echo "RDMA Fallback: $RDMA_FALLBACK" -echo "Debug Mode: $DEBUG" -echo "" - -# Function to wait for service -wait_for_service() { - local name=$1 - local url=$2 - local max_attempts=30 - local attempt=1 - - echo -e "${BLUE}โณ Waiting for $name to be ready...${NC}" - - while [[ $attempt -le $max_attempts ]]; do - if curl -s "$url" >/dev/null 2>&1; then - echo -e "${GREEN}โœ… $name is ready${NC}" - return 0 - fi - echo " Attempt $attempt/$max_attempts..." - sleep 2 - ((attempt++)) - done - - echo -e "${RED}โŒ $name failed to be ready within $max_attempts attempts${NC}" - return 1 -} - -# Function to check RDMA sidecar capabilities -check_rdma_capabilities() { - echo -e "${BLUE}๐Ÿ” Checking RDMA capabilities...${NC}" - - local response - if response=$(curl -s "http://$RDMA_SIDECAR_ADDR/stats" 2>/dev/null); then - echo "RDMA Sidecar Stats:" - echo "$response" | jq . 2>/dev/null || echo "$response" - echo "" - - # Check if RDMA is actually enabled - if echo "$response" | grep -q '"rdma_enabled":true'; then - echo -e "${GREEN}โœ… RDMA is enabled and ready${NC}" - return 0 - else - echo -e "${YELLOW}โš ๏ธ RDMA sidecar is running but RDMA is not enabled${NC}" - if [[ "$RDMA_FALLBACK" == "true" ]]; then - echo -e "${YELLOW} Will use HTTP fallback${NC}" - return 0 - else - return 1 - fi - fi - else - echo -e "${RED}โŒ Failed to get RDMA sidecar stats${NC}" - if [[ "$RDMA_FALLBACK" == "true" ]]; then - echo -e "${YELLOW} Will use HTTP fallback${NC}" - return 0 - else - return 1 - fi - fi -} - -# Function to cleanup on exit -cleanup() { - echo -e "\n${YELLOW}๐Ÿงน Cleaning up...${NC}" - - # Unmount if mounted - if mountpoint -q "$MOUNT_POINT" 2>/dev/null; then - echo "๐Ÿ“ค Unmounting $MOUNT_POINT..." - fusermount3 -u "$MOUNT_POINT" 2>/dev/null || umount "$MOUNT_POINT" 2>/dev/null || true - sleep 2 - fi - - echo -e "${GREEN}โœ… Cleanup complete${NC}" -} - -trap cleanup EXIT INT TERM - -# Wait for required services -echo -e "${BLUE}๐Ÿ”„ Waiting for required services...${NC}" -wait_for_service "Filer" "http://$FILER_ADDR/" - -if [[ "$RDMA_ENABLED" == "true" ]]; then - wait_for_service "RDMA Sidecar" "http://$RDMA_SIDECAR_ADDR/health" - check_rdma_capabilities -fi - -# Create mount point if it doesn't exist -echo -e "${BLUE}๐Ÿ“ Preparing mount point...${NC}" -mkdir -p "$MOUNT_POINT" - -# Check if already mounted -if mountpoint -q "$MOUNT_POINT"; then - echo -e "${YELLOW}โš ๏ธ $MOUNT_POINT is already mounted, unmounting first...${NC}" - fusermount3 -u "$MOUNT_POINT" 2>/dev/null || umount "$MOUNT_POINT" 2>/dev/null || true - sleep 2 -fi - -# Build mount command -MOUNT_CMD="/usr/local/bin/weed mount" -MOUNT_CMD="$MOUNT_CMD -filer=$FILER_ADDR" -MOUNT_CMD="$MOUNT_CMD -dir=$MOUNT_POINT" -MOUNT_CMD="$MOUNT_CMD -allowOthers=true" - -# Add RDMA options if enabled -if [[ "$RDMA_ENABLED" == "true" ]]; then - MOUNT_CMD="$MOUNT_CMD -rdma.enabled=true" - MOUNT_CMD="$MOUNT_CMD -rdma.sidecar=$RDMA_SIDECAR_ADDR" - MOUNT_CMD="$MOUNT_CMD -rdma.fallback=$RDMA_FALLBACK" - MOUNT_CMD="$MOUNT_CMD -rdma.maxConcurrent=$RDMA_MAX_CONCURRENT" - MOUNT_CMD="$MOUNT_CMD -rdma.timeoutMs=$RDMA_TIMEOUT_MS" -fi - -# Add debug options if enabled -if [[ "$DEBUG" == "true" ]]; then - MOUNT_CMD="$MOUNT_CMD -debug=true -v=2" -fi - -echo -e "${BLUE}๐Ÿ—‚๏ธ Starting SeaweedFS mount...${NC}" -echo "Command: $MOUNT_CMD" -echo "" - -# Execute mount command -exec $MOUNT_CMD diff --git a/seaweedfs-rdma-sidecar/scripts/performance-benchmark.sh b/seaweedfs-rdma-sidecar/scripts/performance-benchmark.sh deleted file mode 100755 index 907cf5a7a..000000000 --- a/seaweedfs-rdma-sidecar/scripts/performance-benchmark.sh +++ /dev/null @@ -1,208 +0,0 @@ -#!/bin/bash - -# Performance Benchmark Script -# Tests the revolutionary zero-copy + connection pooling optimizations - -set -e - -echo "๐Ÿš€ SeaweedFS RDMA Performance Benchmark" -echo "Testing Zero-Copy Page Cache + Connection Pooling Optimizations" -echo "==============================================================" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -# Test configuration -SIDECAR_URL="http://localhost:8081" -TEST_VOLUME=1 -TEST_NEEDLE=1 -TEST_COOKIE=1 -ITERATIONS=10 - -# File sizes to test (representing different optimization thresholds) -declare -a SIZES=( - "4096" # 4KB - Small file (below zero-copy threshold) - "32768" # 32KB - Medium file (below zero-copy threshold) - "65536" # 64KB - Zero-copy threshold - "262144" # 256KB - Medium zero-copy file - "1048576" # 1MB - Large zero-copy file - "10485760" # 10MB - Very large zero-copy file -) - -declare -a SIZE_NAMES=( - "4KB" - "32KB" - "64KB" - "256KB" - "1MB" - "10MB" -) - -# Function to check if sidecar is ready -check_sidecar() { - echo -n "Waiting for RDMA sidecar to be ready..." - for i in {1..30}; do - if curl -s "$SIDECAR_URL/health" > /dev/null 2>&1; then - echo -e " ${GREEN}โœ“ Ready${NC}" - return 0 - fi - echo -n "." - sleep 2 - done - echo -e " ${RED}โœ— Failed${NC}" - return 1 -} - -# Function to perform benchmark for a specific size -benchmark_size() { - local size=$1 - local size_name=$2 - - echo -e "\n${CYAN}๐Ÿ“Š Testing ${size_name} files (${size} bytes)${NC}" - echo "----------------------------------------" - - local total_time=0 - local rdma_count=0 - local zerocopy_count=0 - local pooled_count=0 - - for i in $(seq 1 $ITERATIONS); do - echo -n " Iteration $i/$ITERATIONS: " - - # Make request with volume_server parameter - local start_time=$(date +%s%N) - local response=$(curl -s "$SIDECAR_URL/read?volume=$TEST_VOLUME&needle=$TEST_NEEDLE&cookie=$TEST_COOKIE&size=$size&volume_server=http://seaweedfs-volume:8080") - local end_time=$(date +%s%N) - - # Calculate duration in milliseconds - local duration_ns=$((end_time - start_time)) - local duration_ms=$((duration_ns / 1000000)) - - total_time=$((total_time + duration_ms)) - - # Parse response to check optimization flags - local is_rdma=$(echo "$response" | jq -r '.is_rdma // false' 2>/dev/null || echo "false") - local source=$(echo "$response" | jq -r '.source // "unknown"' 2>/dev/null || echo "unknown") - local use_temp_file=$(echo "$response" | jq -r '.use_temp_file // false' 2>/dev/null || echo "false") - - # Count optimization usage - if [[ "$is_rdma" == "true" ]]; then - rdma_count=$((rdma_count + 1)) - fi - - if [[ "$source" == *"zerocopy"* ]] || [[ "$use_temp_file" == "true" ]]; then - zerocopy_count=$((zerocopy_count + 1)) - fi - - if [[ "$source" == *"pooled"* ]]; then - pooled_count=$((pooled_count + 1)) - fi - - # Display result with color coding - if [[ "$source" == "rdma-zerocopy" ]]; then - echo -e "${GREEN}${duration_ms}ms (RDMA+ZeroCopy)${NC}" - elif [[ "$is_rdma" == "true" ]]; then - echo -e "${YELLOW}${duration_ms}ms (RDMA)${NC}" - else - echo -e "${RED}${duration_ms}ms (HTTP)${NC}" - fi - done - - # Calculate statistics - local avg_time=$((total_time / ITERATIONS)) - local rdma_percentage=$((rdma_count * 100 / ITERATIONS)) - local zerocopy_percentage=$((zerocopy_count * 100 / ITERATIONS)) - local pooled_percentage=$((pooled_count * 100 / ITERATIONS)) - - echo -e "\n${PURPLE}๐Ÿ“ˆ Results for ${size_name}:${NC}" - echo " Average latency: ${avg_time}ms" - echo " RDMA usage: ${rdma_percentage}%" - echo " Zero-copy usage: ${zerocopy_percentage}%" - echo " Connection pooling: ${pooled_percentage}%" - - # Performance assessment - if [[ $zerocopy_percentage -gt 80 ]]; then - echo -e " ${GREEN}๐Ÿ”ฅ REVOLUTIONARY: Zero-copy optimization active!${NC}" - elif [[ $rdma_percentage -gt 80 ]]; then - echo -e " ${YELLOW}โšก EXCELLENT: RDMA acceleration active${NC}" - else - echo -e " ${RED}โš ๏ธ WARNING: Falling back to HTTP${NC}" - fi - - # Store results for comparison - echo "$size_name,$avg_time,$rdma_percentage,$zerocopy_percentage,$pooled_percentage" >> /tmp/benchmark_results.csv -} - -# Function to display final performance analysis -performance_analysis() { - echo -e "\n${BLUE}๐ŸŽฏ PERFORMANCE ANALYSIS${NC}" - echo "========================================" - - if [[ -f /tmp/benchmark_results.csv ]]; then - echo -e "\n${CYAN}Summary Results:${NC}" - echo "Size | Avg Latency | RDMA % | Zero-Copy % | Pooled %" - echo "---------|-------------|--------|-------------|----------" - - while IFS=',' read -r size_name avg_time rdma_pct zerocopy_pct pooled_pct; do - printf "%-8s | %-11s | %-6s | %-11s | %-8s\n" "$size_name" "${avg_time}ms" "${rdma_pct}%" "${zerocopy_pct}%" "${pooled_pct}%" - done < /tmp/benchmark_results.csv - fi - - echo -e "\n${GREEN}๐Ÿš€ OPTIMIZATION IMPACT:${NC}" - echo "โ€ข Zero-Copy Page Cache: Eliminates 4/5 memory copies" - echo "โ€ข Connection Pooling: Eliminates 100ms RDMA setup cost" - echo "โ€ข Combined Effect: Up to 118x performance improvement!" - - echo -e "\n${PURPLE}๐Ÿ“Š Expected vs Actual Performance:${NC}" - echo "โ€ข Small files (4-32KB): Expected 50x faster copies" - echo "โ€ข Medium files (64-256KB): Expected 25x faster copies + instant connection" - echo "โ€ข Large files (1MB+): Expected 100x faster copies + instant connection" - - # Check if connection pooling is working - echo -e "\n${CYAN}๐Ÿ”Œ Connection Pooling Analysis:${NC}" - local stats_response=$(curl -s "$SIDECAR_URL/stats" 2>/dev/null || echo "{}") - local total_requests=$(echo "$stats_response" | jq -r '.total_requests // 0' 2>/dev/null || echo "0") - - if [[ "$total_requests" -gt 0 ]]; then - echo "โœ… Connection pooling is functional" - echo " Total requests processed: $total_requests" - else - echo "โš ๏ธ Unable to retrieve connection pool statistics" - fi - - rm -f /tmp/benchmark_results.csv -} - -# Main execution -main() { - echo -e "\n${YELLOW}๐Ÿ”ง Initializing benchmark...${NC}" - - # Check if sidecar is ready - if ! check_sidecar; then - echo -e "${RED}โŒ RDMA sidecar is not ready. Please start the Docker environment first.${NC}" - echo "Run: cd /path/to/seaweedfs-rdma-sidecar && docker compose -f docker-compose.mount-rdma.yml up -d" - exit 1 - fi - - # Initialize results file - rm -f /tmp/benchmark_results.csv - - # Run benchmarks for each file size - for i in "${!SIZES[@]}"; do - benchmark_size "${SIZES[$i]}" "${SIZE_NAMES[$i]}" - done - - # Display final analysis - performance_analysis - - echo -e "\n${GREEN}๐ŸŽ‰ Benchmark completed!${NC}" -} - -# Run the benchmark -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/run-integration-tests.sh b/seaweedfs-rdma-sidecar/scripts/run-integration-tests.sh deleted file mode 100755 index a9e5bd644..000000000 --- a/seaweedfs-rdma-sidecar/scripts/run-integration-tests.sh +++ /dev/null @@ -1,288 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -BLUE='\033[0;34m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration -MOUNT_POINT=${MOUNT_POINT:-"/mnt/seaweedfs"} -FILER_ADDR=${FILER_ADDR:-"seaweedfs-filer:8888"} -RDMA_SIDECAR_ADDR=${RDMA_SIDECAR_ADDR:-"rdma-sidecar:8081"} -TEST_RESULTS_DIR=${TEST_RESULTS_DIR:-"/test-results"} - -# Test counters -TOTAL_TESTS=0 -PASSED_TESTS=0 -FAILED_TESTS=0 - -# Create results directory -mkdir -p "$TEST_RESULTS_DIR" - -# Log file -LOG_FILE="$TEST_RESULTS_DIR/integration-test.log" -exec > >(tee -a "$LOG_FILE") -exec 2>&1 - -echo -e "${BLUE}๐Ÿงช SEAWEEDFS RDMA MOUNT INTEGRATION TESTS${NC}" -echo "==========================================" -echo "Mount Point: $MOUNT_POINT" -echo "Filer Address: $FILER_ADDR" -echo "RDMA Sidecar: $RDMA_SIDECAR_ADDR" -echo "Results Directory: $TEST_RESULTS_DIR" -echo "Log File: $LOG_FILE" -echo "" - -# Function to run a test -run_test() { - local test_name=$1 - local test_command=$2 - - echo -e "${BLUE}๐Ÿ”ฌ Running test: $test_name${NC}" - ((TOTAL_TESTS++)) - - if eval "$test_command"; then - echo -e "${GREEN}โœ… PASSED: $test_name${NC}" - ((PASSED_TESTS++)) - echo "PASS" > "$TEST_RESULTS_DIR/${test_name}.result" - else - echo -e "${RED}โŒ FAILED: $test_name${NC}" - ((FAILED_TESTS++)) - echo "FAIL" > "$TEST_RESULTS_DIR/${test_name}.result" - fi - echo "" -} - -# Function to wait for mount to be ready -wait_for_mount() { - local max_attempts=30 - local attempt=1 - - echo -e "${BLUE}โณ Waiting for mount to be ready...${NC}" - - while [[ $attempt -le $max_attempts ]]; do - if mountpoint -q "$MOUNT_POINT" 2>/dev/null && ls "$MOUNT_POINT" >/dev/null 2>&1; then - echo -e "${GREEN}โœ… Mount is ready${NC}" - return 0 - fi - echo " Attempt $attempt/$max_attempts..." - sleep 2 - ((attempt++)) - done - - echo -e "${RED}โŒ Mount failed to be ready${NC}" - return 1 -} - -# Function to check RDMA sidecar -check_rdma_sidecar() { - echo -e "${BLUE}๐Ÿ” Checking RDMA sidecar status...${NC}" - - local response - if response=$(curl -s "http://$RDMA_SIDECAR_ADDR/health" 2>/dev/null); then - echo "RDMA Sidecar Health: $response" - return 0 - else - echo -e "${RED}โŒ RDMA sidecar is not responding${NC}" - return 1 - fi -} - -# Test 1: Mount Point Accessibility -test_mount_accessibility() { - mountpoint -q "$MOUNT_POINT" && ls "$MOUNT_POINT" >/dev/null -} - -# Test 2: Basic File Operations -test_basic_file_operations() { - local test_file="$MOUNT_POINT/test_basic_ops.txt" - local test_content="Hello, RDMA World! $(date)" - - # Write test - echo "$test_content" > "$test_file" || return 1 - - # Read test - local read_content - read_content=$(cat "$test_file") || return 1 - - # Verify content - [[ "$read_content" == "$test_content" ]] || return 1 - - # Cleanup - rm -f "$test_file" - - return 0 -} - -# Test 3: Large File Operations -test_large_file_operations() { - local test_file="$MOUNT_POINT/test_large_file.dat" - local size_mb=10 - - # Create large file - dd if=/dev/zero of="$test_file" bs=1M count=$size_mb 2>/dev/null || return 1 - - # Verify size - local actual_size - actual_size=$(stat -c%s "$test_file" 2>/dev/null) || return 1 - local expected_size=$((size_mb * 1024 * 1024)) - - [[ "$actual_size" -eq "$expected_size" ]] || return 1 - - # Read test - dd if="$test_file" of=/dev/null bs=1M 2>/dev/null || return 1 - - # Cleanup - rm -f "$test_file" - - return 0 -} - -# Test 4: Directory Operations -test_directory_operations() { - local test_dir="$MOUNT_POINT/test_directory" - local test_file="$test_dir/test_file.txt" - - # Create directory - mkdir -p "$test_dir" || return 1 - - # Create file in directory - echo "Directory test" > "$test_file" || return 1 - - # List directory - ls "$test_dir" | grep -q "test_file.txt" || return 1 - - # Read file - grep -q "Directory test" "$test_file" || return 1 - - # Cleanup - rm -rf "$test_dir" - - return 0 -} - -# Test 5: Multiple File Operations -test_multiple_files() { - local test_dir="$MOUNT_POINT/test_multiple" - local num_files=20 - - mkdir -p "$test_dir" || return 1 - - # Create multiple files - for i in $(seq 1 $num_files); do - echo "File $i content" > "$test_dir/file_$i.txt" || return 1 - done - - # Verify all files exist and have correct content - for i in $(seq 1 $num_files); do - [[ -f "$test_dir/file_$i.txt" ]] || return 1 - grep -q "File $i content" "$test_dir/file_$i.txt" || return 1 - done - - # List files - local file_count - file_count=$(ls "$test_dir" | wc -l) || return 1 - [[ "$file_count" -eq "$num_files" ]] || return 1 - - # Cleanup - rm -rf "$test_dir" - - return 0 -} - -# Test 6: RDMA Statistics -test_rdma_statistics() { - local stats_response - stats_response=$(curl -s "http://$RDMA_SIDECAR_ADDR/stats" 2>/dev/null) || return 1 - - # Check if response contains expected fields - echo "$stats_response" | jq -e '.rdma_enabled' >/dev/null || return 1 - echo "$stats_response" | jq -e '.total_reads' >/dev/null || return 1 - - return 0 -} - -# Test 7: Performance Baseline -test_performance_baseline() { - local test_file="$MOUNT_POINT/performance_test.dat" - local size_mb=50 - - # Write performance test - local write_start write_end write_time - write_start=$(date +%s%N) - dd if=/dev/zero of="$test_file" bs=1M count=$size_mb 2>/dev/null || return 1 - write_end=$(date +%s%N) - write_time=$(((write_end - write_start) / 1000000)) # Convert to milliseconds - - # Read performance test - local read_start read_end read_time - read_start=$(date +%s%N) - dd if="$test_file" of=/dev/null bs=1M 2>/dev/null || return 1 - read_end=$(date +%s%N) - read_time=$(((read_end - read_start) / 1000000)) # Convert to milliseconds - - # Log performance metrics - echo "Performance Metrics:" > "$TEST_RESULTS_DIR/performance.txt" - echo "Write Time: ${write_time}ms for ${size_mb}MB" >> "$TEST_RESULTS_DIR/performance.txt" - echo "Read Time: ${read_time}ms for ${size_mb}MB" >> "$TEST_RESULTS_DIR/performance.txt" - echo "Write Throughput: $(bc <<< "scale=2; $size_mb * 1000 / $write_time") MB/s" >> "$TEST_RESULTS_DIR/performance.txt" - echo "Read Throughput: $(bc <<< "scale=2; $size_mb * 1000 / $read_time") MB/s" >> "$TEST_RESULTS_DIR/performance.txt" - - # Cleanup - rm -f "$test_file" - - # Performance test always passes (it's just for metrics) - return 0 -} - -# Main test execution -main() { - echo -e "${BLUE}๐Ÿš€ Starting integration tests...${NC}" - echo "" - - # Wait for mount to be ready - if ! wait_for_mount; then - echo -e "${RED}โŒ Mount is not ready, aborting tests${NC}" - exit 1 - fi - - # Check RDMA sidecar - check_rdma_sidecar || echo -e "${YELLOW}โš ๏ธ RDMA sidecar check failed, continuing with tests${NC}" - - echo "" - echo -e "${BLUE}๐Ÿ“‹ Running test suite...${NC}" - echo "" - - # Run all tests - run_test "mount_accessibility" "test_mount_accessibility" - run_test "basic_file_operations" "test_basic_file_operations" - run_test "large_file_operations" "test_large_file_operations" - run_test "directory_operations" "test_directory_operations" - run_test "multiple_files" "test_multiple_files" - run_test "rdma_statistics" "test_rdma_statistics" - run_test "performance_baseline" "test_performance_baseline" - - # Generate test summary - echo -e "${BLUE}๐Ÿ“Š TEST SUMMARY${NC}" - echo "===============" - echo "Total Tests: $TOTAL_TESTS" - echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}" - echo -e "Failed: ${RED}$FAILED_TESTS${NC}" - - if [[ $FAILED_TESTS -eq 0 ]]; then - echo -e "${GREEN}๐ŸŽ‰ ALL TESTS PASSED!${NC}" - echo "SUCCESS" > "$TEST_RESULTS_DIR/overall.result" - exit 0 - else - echo -e "${RED}๐Ÿ’ฅ SOME TESTS FAILED!${NC}" - echo "FAILURE" > "$TEST_RESULTS_DIR/overall.result" - exit 1 - fi -} - -# Run main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/run-mount-rdma-tests.sh b/seaweedfs-rdma-sidecar/scripts/run-mount-rdma-tests.sh deleted file mode 100755 index e4237a5a2..000000000 --- a/seaweedfs-rdma-sidecar/scripts/run-mount-rdma-tests.sh +++ /dev/null @@ -1,335 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -BLUE='\033[0;34m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration -COMPOSE_FILE="docker-compose.mount-rdma.yml" -PROJECT_NAME="seaweedfs-rdma-mount" - -# Function to show usage -show_usage() { - echo -e "${BLUE}๐Ÿš€ SeaweedFS RDMA Mount Test Runner${NC}" - echo "====================================" - echo "" - echo "Usage: $0 [COMMAND] [OPTIONS]" - echo "" - echo "Commands:" - echo " start Start the RDMA mount environment" - echo " stop Stop and cleanup the environment" - echo " restart Restart the environment" - echo " status Show status of all services" - echo " logs [service] Show logs for all services or specific service" - echo " test Run integration tests" - echo " perf Run performance tests" - echo " shell Open shell in mount container" - echo " cleanup Full cleanup including volumes" - echo "" - echo "Services:" - echo " seaweedfs-master SeaweedFS master server" - echo " seaweedfs-volume SeaweedFS volume server" - echo " seaweedfs-filer SeaweedFS filer server" - echo " rdma-engine RDMA engine (Rust)" - echo " rdma-sidecar RDMA sidecar (Go)" - echo " seaweedfs-mount SeaweedFS mount with RDMA" - echo "" - echo "Examples:" - echo " $0 start # Start all services" - echo " $0 logs seaweedfs-mount # Show mount logs" - echo " $0 test # Run integration tests" - echo " $0 perf # Run performance tests" - echo " $0 shell # Open shell in mount container" -} - -# Function to check if Docker Compose is available -check_docker_compose() { - if ! command -v docker-compose >/dev/null 2>&1 && ! docker compose version >/dev/null 2>&1; then - echo -e "${RED}โŒ Docker Compose is not available${NC}" - echo "Please install Docker Compose to continue" - exit 1 - fi - - # Use docker compose if available, otherwise docker-compose - if docker compose version >/dev/null 2>&1; then - DOCKER_COMPOSE="docker compose" - else - DOCKER_COMPOSE="docker-compose" - fi -} - -# Function to build required images -build_images() { - echo -e "${BLUE}๐Ÿ”จ Building required Docker images...${NC}" - - # Build SeaweedFS binary first - echo "Building SeaweedFS binary..." - cd .. - make - cd seaweedfs-rdma-sidecar - - # Copy binary for Docker builds - mkdir -p bin - if [[ -f "../weed" ]]; then - cp ../weed bin/ - elif [[ -f "../bin/weed" ]]; then - cp ../bin/weed bin/ - elif [[ -f "../build/weed" ]]; then - cp ../build/weed bin/ - else - echo "Error: Cannot find weed binary" - find .. -name "weed" -type f - exit 1 - fi - - # Build RDMA sidecar - echo "Building RDMA sidecar..." - go build -o bin/demo-server cmd/sidecar/main.go - - # Build Docker images - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" build - - echo -e "${GREEN}โœ… Images built successfully${NC}" -} - -# Function to start services -start_services() { - echo -e "${BLUE}๐Ÿš€ Starting SeaweedFS RDMA Mount environment...${NC}" - - # Build images if needed - if [[ ! -f "bin/weed" ]] || [[ ! -f "bin/demo-server" ]]; then - build_images - fi - - # Start services - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" up -d - - echo -e "${GREEN}โœ… Services started${NC}" - echo "" - echo "Services are starting up. Use '$0 status' to check their status." - echo "Use '$0 logs' to see the logs." -} - -# Function to stop services -stop_services() { - echo -e "${BLUE}๐Ÿ›‘ Stopping SeaweedFS RDMA Mount environment...${NC}" - - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" down - - echo -e "${GREEN}โœ… Services stopped${NC}" -} - -# Function to restart services -restart_services() { - echo -e "${BLUE}๐Ÿ”„ Restarting SeaweedFS RDMA Mount environment...${NC}" - - stop_services - sleep 2 - start_services -} - -# Function to show status -show_status() { - echo -e "${BLUE}๐Ÿ“Š Service Status${NC}" - echo "================" - - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" ps - - echo "" - echo -e "${BLUE}๐Ÿ” Health Checks${NC}" - echo "===============" - - # Check individual services - check_service_health "SeaweedFS Master" "http://localhost:9333/cluster/status" - check_service_health "SeaweedFS Volume" "http://localhost:8080/status" - check_service_health "SeaweedFS Filer" "http://localhost:8888/" - check_service_health "RDMA Sidecar" "http://localhost:8081/health" - - # Check mount status - echo -n "SeaweedFS Mount: " - if docker exec "${PROJECT_NAME}-seaweedfs-mount-1" mountpoint -q /mnt/seaweedfs 2>/dev/null; then - echo -e "${GREEN}โœ… Mounted${NC}" - else - echo -e "${RED}โŒ Not mounted${NC}" - fi -} - -# Function to check service health -check_service_health() { - local service_name=$1 - local health_url=$2 - - echo -n "$service_name: " - if curl -s "$health_url" >/dev/null 2>&1; then - echo -e "${GREEN}โœ… Healthy${NC}" - else - echo -e "${RED}โŒ Unhealthy${NC}" - fi -} - -# Function to show logs -show_logs() { - local service=$1 - - if [[ -n "$service" ]]; then - echo -e "${BLUE}๐Ÿ“‹ Logs for $service${NC}" - echo "====================" - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" logs -f "$service" - else - echo -e "${BLUE}๐Ÿ“‹ Logs for all services${NC}" - echo "=======================" - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" logs -f - fi -} - -# Function to run integration tests -run_integration_tests() { - echo -e "${BLUE}๐Ÿงช Running integration tests...${NC}" - - # Make sure services are running - if ! $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" ps | grep -q "Up"; then - echo -e "${RED}โŒ Services are not running. Start them first with '$0 start'${NC}" - exit 1 - fi - - # Run integration tests - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" --profile test run --rm integration-test - - # Show results - if [[ -d "./test-results" ]]; then - echo -e "${BLUE}๐Ÿ“Š Test Results${NC}" - echo "===============" - - if [[ -f "./test-results/overall.result" ]]; then - local result - result=$(cat "./test-results/overall.result") - if [[ "$result" == "SUCCESS" ]]; then - echo -e "${GREEN}๐ŸŽ‰ ALL TESTS PASSED!${NC}" - else - echo -e "${RED}๐Ÿ’ฅ SOME TESTS FAILED!${NC}" - fi - fi - - echo "" - echo "Detailed results available in: ./test-results/" - ls -la ./test-results/ - fi -} - -# Function to run performance tests -run_performance_tests() { - echo -e "${BLUE}๐Ÿ Running performance tests...${NC}" - - # Make sure services are running - if ! $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" ps | grep -q "Up"; then - echo -e "${RED}โŒ Services are not running. Start them first with '$0 start'${NC}" - exit 1 - fi - - # Run performance tests - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" --profile performance run --rm performance-test - - # Show results - if [[ -d "./performance-results" ]]; then - echo -e "${BLUE}๐Ÿ“Š Performance Results${NC}" - echo "======================" - echo "" - echo "Results available in: ./performance-results/" - ls -la ./performance-results/ - - if [[ -f "./performance-results/performance_report.html" ]]; then - echo "" - echo -e "${GREEN}๐Ÿ“„ HTML Report: ./performance-results/performance_report.html${NC}" - fi - fi -} - -# Function to open shell in mount container -open_shell() { - echo -e "${BLUE}๐Ÿš Opening shell in mount container...${NC}" - - if ! $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" ps seaweedfs-mount | grep -q "Up"; then - echo -e "${RED}โŒ Mount container is not running${NC}" - exit 1 - fi - - docker exec -it "${PROJECT_NAME}-seaweedfs-mount-1" /bin/bash -} - -# Function to cleanup everything -cleanup_all() { - echo -e "${BLUE}๐Ÿงน Full cleanup...${NC}" - - # Stop services - $DOCKER_COMPOSE -f "$COMPOSE_FILE" -p "$PROJECT_NAME" down -v --remove-orphans - - # Remove images - echo "Removing Docker images..." - docker images | grep "$PROJECT_NAME" | awk '{print $3}' | xargs -r docker rmi -f - - # Clean up local files - rm -rf bin/ test-results/ performance-results/ - - echo -e "${GREEN}โœ… Full cleanup completed${NC}" -} - -# Main function -main() { - local command=${1:-""} - - # Check Docker Compose availability - check_docker_compose - - case "$command" in - "start") - start_services - ;; - "stop") - stop_services - ;; - "restart") - restart_services - ;; - "status") - show_status - ;; - "logs") - show_logs "${2:-}" - ;; - "test") - run_integration_tests - ;; - "perf") - run_performance_tests - ;; - "shell") - open_shell - ;; - "cleanup") - cleanup_all - ;; - "build") - build_images - ;; - "help"|"-h"|"--help") - show_usage - ;; - "") - show_usage - ;; - *) - echo -e "${RED}โŒ Unknown command: $command${NC}" - echo "" - show_usage - exit 1 - ;; - esac -} - -# Run main function with all arguments -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/run-performance-tests.sh b/seaweedfs-rdma-sidecar/scripts/run-performance-tests.sh deleted file mode 100755 index 4475365aa..000000000 --- a/seaweedfs-rdma-sidecar/scripts/run-performance-tests.sh +++ /dev/null @@ -1,338 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -BLUE='\033[0;34m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration -MOUNT_POINT=${MOUNT_POINT:-"/mnt/seaweedfs"} -RDMA_SIDECAR_ADDR=${RDMA_SIDECAR_ADDR:-"rdma-sidecar:8081"} -PERFORMANCE_RESULTS_DIR=${PERFORMANCE_RESULTS_DIR:-"/performance-results"} - -# Create results directory -mkdir -p "$PERFORMANCE_RESULTS_DIR" - -# Log file -LOG_FILE="$PERFORMANCE_RESULTS_DIR/performance-test.log" -exec > >(tee -a "$LOG_FILE") -exec 2>&1 - -echo -e "${BLUE}๐Ÿ SEAWEEDFS RDMA MOUNT PERFORMANCE TESTS${NC}" -echo "===========================================" -echo "Mount Point: $MOUNT_POINT" -echo "RDMA Sidecar: $RDMA_SIDECAR_ADDR" -echo "Results Directory: $PERFORMANCE_RESULTS_DIR" -echo "Log File: $LOG_FILE" -echo "" - -# Function to wait for mount to be ready -wait_for_mount() { - local max_attempts=30 - local attempt=1 - - echo -e "${BLUE}โณ Waiting for mount to be ready...${NC}" - - while [[ $attempt -le $max_attempts ]]; do - if mountpoint -q "$MOUNT_POINT" 2>/dev/null && ls "$MOUNT_POINT" >/dev/null 2>&1; then - echo -e "${GREEN}โœ… Mount is ready${NC}" - return 0 - fi - echo " Attempt $attempt/$max_attempts..." - sleep 2 - ((attempt++)) - done - - echo -e "${RED}โŒ Mount failed to be ready${NC}" - return 1 -} - -# Function to get RDMA statistics -get_rdma_stats() { - curl -s "http://$RDMA_SIDECAR_ADDR/stats" 2>/dev/null || echo "{}" -} - -# Function to run dd performance test -run_dd_test() { - local test_name=$1 - local file_size_mb=$2 - local block_size=$3 - local operation=$4 # "write" or "read" - - local test_file="$MOUNT_POINT/perf_test_${test_name}.dat" - local result_file="$PERFORMANCE_RESULTS_DIR/dd_${test_name}.json" - - echo -e "${BLUE}๐Ÿ”ฌ Running DD test: $test_name${NC}" - echo " Size: ${file_size_mb}MB, Block Size: $block_size, Operation: $operation" - - local start_time end_time duration_ms throughput_mbps - - if [[ "$operation" == "write" ]]; then - start_time=$(date +%s%N) - dd if=/dev/zero of="$test_file" bs="$block_size" count=$((file_size_mb * 1024 * 1024 / $(numfmt --from=iec "$block_size"))) 2>/dev/null - end_time=$(date +%s%N) - else - # Create file first if it doesn't exist - if [[ ! -f "$test_file" ]]; then - dd if=/dev/zero of="$test_file" bs=1M count="$file_size_mb" 2>/dev/null - fi - start_time=$(date +%s%N) - dd if="$test_file" of=/dev/null bs="$block_size" 2>/dev/null - end_time=$(date +%s%N) - fi - - duration_ms=$(((end_time - start_time) / 1000000)) - throughput_mbps=$(bc <<< "scale=2; $file_size_mb * 1000 / $duration_ms") - - # Save results - cat > "$result_file" << EOF -{ - "test_name": "$test_name", - "operation": "$operation", - "file_size_mb": $file_size_mb, - "block_size": "$block_size", - "duration_ms": $duration_ms, - "throughput_mbps": $throughput_mbps, - "timestamp": "$(date -Iseconds)" -} -EOF - - echo " Duration: ${duration_ms}ms" - echo " Throughput: ${throughput_mbps} MB/s" - echo "" - - # Cleanup write test files - if [[ "$operation" == "write" ]]; then - rm -f "$test_file" - fi -} - -# Function to run FIO performance test -run_fio_test() { - local test_name=$1 - local rw_type=$2 # "read", "write", "randread", "randwrite" - local block_size=$3 - local file_size=$4 - local iodepth=$5 - - local test_file="$MOUNT_POINT/fio_test_${test_name}.dat" - local result_file="$PERFORMANCE_RESULTS_DIR/fio_${test_name}.json" - - echo -e "${BLUE}๐Ÿ”ฌ Running FIO test: $test_name${NC}" - echo " Type: $rw_type, Block Size: $block_size, File Size: $file_size, IO Depth: $iodepth" - - # Run FIO test - fio --name="$test_name" \ - --filename="$test_file" \ - --rw="$rw_type" \ - --bs="$block_size" \ - --size="$file_size" \ - --iodepth="$iodepth" \ - --direct=1 \ - --runtime=30 \ - --time_based \ - --group_reporting \ - --output-format=json \ - --output="$result_file" \ - 2>/dev/null - - # Extract key metrics - if [[ -f "$result_file" ]]; then - local iops throughput_kbps latency_us - iops=$(jq -r '.jobs[0].'"$rw_type"'.iops // 0' "$result_file" 2>/dev/null || echo "0") - throughput_kbps=$(jq -r '.jobs[0].'"$rw_type"'.bw // 0' "$result_file" 2>/dev/null || echo "0") - latency_us=$(jq -r '.jobs[0].'"$rw_type"'.lat_ns.mean // 0' "$result_file" 2>/dev/null || echo "0") - latency_us=$(bc <<< "scale=2; $latency_us / 1000" 2>/dev/null || echo "0") - - echo " IOPS: $iops" - echo " Throughput: $(bc <<< "scale=2; $throughput_kbps / 1024") MB/s" - echo " Average Latency: ${latency_us} ฮผs" - else - echo " FIO test failed or no results" - fi - echo "" - - # Cleanup - rm -f "$test_file" -} - -# Function to run concurrent access test -run_concurrent_test() { - local num_processes=$1 - local file_size_mb=$2 - - echo -e "${BLUE}๐Ÿ”ฌ Running concurrent access test${NC}" - echo " Processes: $num_processes, File Size per Process: ${file_size_mb}MB" - - local start_time end_time duration_ms total_throughput - local pids=() - - start_time=$(date +%s%N) - - # Start concurrent processes - for i in $(seq 1 "$num_processes"); do - ( - local test_file="$MOUNT_POINT/concurrent_test_$i.dat" - dd if=/dev/zero of="$test_file" bs=1M count="$file_size_mb" 2>/dev/null - dd if="$test_file" of=/dev/null bs=1M 2>/dev/null - rm -f "$test_file" - ) & - pids+=($!) - done - - # Wait for all processes to complete - for pid in "${pids[@]}"; do - wait "$pid" - done - - end_time=$(date +%s%N) - duration_ms=$(((end_time - start_time) / 1000000)) - total_throughput=$(bc <<< "scale=2; $num_processes * $file_size_mb * 2 * 1000 / $duration_ms") - - # Save results - cat > "$PERFORMANCE_RESULTS_DIR/concurrent_test.json" << EOF -{ - "test_name": "concurrent_access", - "num_processes": $num_processes, - "file_size_mb_per_process": $file_size_mb, - "total_data_mb": $((num_processes * file_size_mb * 2)), - "duration_ms": $duration_ms, - "total_throughput_mbps": $total_throughput, - "timestamp": "$(date -Iseconds)" -} -EOF - - echo " Duration: ${duration_ms}ms" - echo " Total Throughput: ${total_throughput} MB/s" - echo "" -} - -# Function to generate performance report -generate_report() { - local report_file="$PERFORMANCE_RESULTS_DIR/performance_report.html" - - echo -e "${BLUE}๐Ÿ“Š Generating performance report...${NC}" - - cat > "$report_file" << 'EOF' - - - - SeaweedFS RDMA Mount Performance Report - - - -
-

๐Ÿ SeaweedFS RDMA Mount Performance Report

-

Generated: $(date)

-

Mount Point: $MOUNT_POINT

-

RDMA Sidecar: $RDMA_SIDECAR_ADDR

-
-EOF - - # Add DD test results - echo '

DD Performance Tests

' >> "$report_file" - - for result_file in "$PERFORMANCE_RESULTS_DIR"/dd_*.json; do - if [[ -f "$result_file" ]]; then - local test_name operation file_size_mb block_size throughput_mbps duration_ms - test_name=$(jq -r '.test_name' "$result_file" 2>/dev/null || echo "unknown") - operation=$(jq -r '.operation' "$result_file" 2>/dev/null || echo "unknown") - file_size_mb=$(jq -r '.file_size_mb' "$result_file" 2>/dev/null || echo "0") - block_size=$(jq -r '.block_size' "$result_file" 2>/dev/null || echo "unknown") - throughput_mbps=$(jq -r '.throughput_mbps' "$result_file" 2>/dev/null || echo "0") - duration_ms=$(jq -r '.duration_ms' "$result_file" 2>/dev/null || echo "0") - - echo "" >> "$report_file" - fi - done - - echo '
TestOperationSizeBlock SizeThroughput (MB/s)Duration (ms)
$test_name$operation${file_size_mb}MB$block_size$throughput_mbps$duration_ms
' >> "$report_file" - - # Add FIO test results - echo '

FIO Performance Tests

' >> "$report_file" - echo '

Detailed FIO results are available in individual JSON files.

' >> "$report_file" - - # Add concurrent test results - if [[ -f "$PERFORMANCE_RESULTS_DIR/concurrent_test.json" ]]; then - echo '

Concurrent Access Test

' >> "$report_file" - local num_processes total_throughput duration_ms - num_processes=$(jq -r '.num_processes' "$PERFORMANCE_RESULTS_DIR/concurrent_test.json" 2>/dev/null || echo "0") - total_throughput=$(jq -r '.total_throughput_mbps' "$PERFORMANCE_RESULTS_DIR/concurrent_test.json" 2>/dev/null || echo "0") - duration_ms=$(jq -r '.duration_ms' "$PERFORMANCE_RESULTS_DIR/concurrent_test.json" 2>/dev/null || echo "0") - - echo "

Processes: $num_processes

" >> "$report_file" - echo "

Total Throughput: $total_throughput MB/s

" >> "$report_file" - echo "

Duration: $duration_ms ms

" >> "$report_file" - echo '
' >> "$report_file" - fi - - echo '' >> "$report_file" - - echo " Report saved to: $report_file" -} - -# Main test execution -main() { - echo -e "${BLUE}๐Ÿš€ Starting performance tests...${NC}" - echo "" - - # Wait for mount to be ready - if ! wait_for_mount; then - echo -e "${RED}โŒ Mount is not ready, aborting tests${NC}" - exit 1 - fi - - # Get initial RDMA stats - echo -e "${BLUE}๐Ÿ“Š Initial RDMA Statistics:${NC}" - get_rdma_stats | jq . 2>/dev/null || get_rdma_stats - echo "" - - # Run DD performance tests - echo -e "${BLUE}๐Ÿƒ Running DD Performance Tests...${NC}" - run_dd_test "small_write" 10 "4k" "write" - run_dd_test "small_read" 10 "4k" "read" - run_dd_test "medium_write" 100 "64k" "write" - run_dd_test "medium_read" 100 "64k" "read" - run_dd_test "large_write" 500 "1M" "write" - run_dd_test "large_read" 500 "1M" "read" - - # Run FIO performance tests - echo -e "${BLUE}๐Ÿƒ Running FIO Performance Tests...${NC}" - run_fio_test "seq_read" "read" "64k" "100M" 1 - run_fio_test "seq_write" "write" "64k" "100M" 1 - run_fio_test "rand_read" "randread" "4k" "100M" 16 - run_fio_test "rand_write" "randwrite" "4k" "100M" 16 - - # Run concurrent access test - echo -e "${BLUE}๐Ÿƒ Running Concurrent Access Test...${NC}" - run_concurrent_test 4 50 - - # Get final RDMA stats - echo -e "${BLUE}๐Ÿ“Š Final RDMA Statistics:${NC}" - get_rdma_stats | jq . 2>/dev/null || get_rdma_stats - echo "" - - # Generate performance report - generate_report - - echo -e "${GREEN}๐ŸŽ‰ Performance tests completed!${NC}" - echo "Results saved to: $PERFORMANCE_RESULTS_DIR" -} - -# Run main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/test-complete-optimization.sh b/seaweedfs-rdma-sidecar/scripts/test-complete-optimization.sh deleted file mode 100755 index f9d298461..000000000 --- a/seaweedfs-rdma-sidecar/scripts/test-complete-optimization.sh +++ /dev/null @@ -1,250 +0,0 @@ -#!/bin/bash - -# Complete RDMA Optimization Test -# Demonstrates the full optimization pipeline: Zero-Copy + Connection Pooling + RDMA - -set -e - -echo "๐Ÿ”ฅ SeaweedFS RDMA Complete Optimization Test" -echo "Zero-Copy Page Cache + Connection Pooling + RDMA Bandwidth" -echo "=============================================================" - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' - -# Test configuration -SIDECAR_URL="http://localhost:8081" -VOLUME_SERVER="http://seaweedfs-volume:8080" - -# Function to test RDMA sidecar functionality -test_sidecar_health() { - echo -e "\n${CYAN}๐Ÿฅ Testing RDMA Sidecar Health${NC}" - echo "--------------------------------" - - local response=$(curl -s "$SIDECAR_URL/health" 2>/dev/null || echo "{}") - local status=$(echo "$response" | jq -r '.status // "unknown"' 2>/dev/null || echo "unknown") - - if [[ "$status" == "healthy" ]]; then - echo -e "โœ… ${GREEN}Sidecar is healthy${NC}" - - # Check RDMA capabilities - local rdma_enabled=$(echo "$response" | jq -r '.rdma.enabled // false' 2>/dev/null || echo "false") - local zerocopy_enabled=$(echo "$response" | jq -r '.rdma.zerocopy_enabled // false' 2>/dev/null || echo "false") - local pooling_enabled=$(echo "$response" | jq -r '.rdma.pooling_enabled // false' 2>/dev/null || echo "false") - - echo " RDMA enabled: $rdma_enabled" - echo " Zero-copy enabled: $zerocopy_enabled" - echo " Connection pooling enabled: $pooling_enabled" - - return 0 - else - echo -e "โŒ ${RED}Sidecar health check failed${NC}" - return 1 - fi -} - -# Function to test zero-copy optimization -test_zerocopy_optimization() { - echo -e "\n${PURPLE}๐Ÿ”ฅ Testing Zero-Copy Page Cache Optimization${NC}" - echo "----------------------------------------------" - - # Test with a file size above the 64KB threshold - local test_size=1048576 # 1MB - echo "Testing with 1MB file (above 64KB zero-copy threshold)..." - - local response=$(curl -s "$SIDECAR_URL/read?volume=1&needle=1&cookie=1&size=$test_size&volume_server=$VOLUME_SERVER") - - local use_temp_file=$(echo "$response" | jq -r '.use_temp_file // false' 2>/dev/null || echo "false") - local temp_file=$(echo "$response" | jq -r '.temp_file // ""' 2>/dev/null || echo "") - local source=$(echo "$response" | jq -r '.source // "unknown"' 2>/dev/null || echo "unknown") - - if [[ "$use_temp_file" == "true" ]] && [[ -n "$temp_file" ]]; then - echo -e "โœ… ${GREEN}Zero-copy optimization ACTIVE${NC}" - echo " Temp file created: $temp_file" - echo " Source: $source" - return 0 - elif [[ "$source" == *"rdma"* ]]; then - echo -e "โšก ${YELLOW}RDMA active (zero-copy not triggered)${NC}" - echo " Source: $source" - echo " Note: File may be below 64KB threshold or zero-copy disabled" - return 0 - else - echo -e "โŒ ${RED}Zero-copy optimization not detected${NC}" - echo " Response: $response" - return 1 - fi -} - -# Function to test connection pooling -test_connection_pooling() { - echo -e "\n${BLUE}๐Ÿ”Œ Testing RDMA Connection Pooling${NC}" - echo "-----------------------------------" - - echo "Making multiple rapid requests to test connection reuse..." - - local pooled_count=0 - local total_requests=5 - - for i in $(seq 1 $total_requests); do - echo -n " Request $i: " - - local start_time=$(date +%s%N) - local response=$(curl -s "$SIDECAR_URL/read?volume=1&needle=$i&cookie=1&size=65536&volume_server=$VOLUME_SERVER") - local end_time=$(date +%s%N) - - local duration_ns=$((end_time - start_time)) - local duration_ms=$((duration_ns / 1000000)) - - local source=$(echo "$response" | jq -r '.source // "unknown"' 2>/dev/null || echo "unknown") - local session_id=$(echo "$response" | jq -r '.session_id // ""' 2>/dev/null || echo "") - - if [[ "$source" == *"pooled"* ]] || [[ -n "$session_id" ]]; then - pooled_count=$((pooled_count + 1)) - echo -e "${GREEN}${duration_ms}ms (pooled: $session_id)${NC}" - else - echo -e "${YELLOW}${duration_ms}ms (source: $source)${NC}" - fi - - # Small delay to test connection reuse - sleep 0.1 - done - - echo "" - echo "Connection pooling analysis:" - echo " Requests using pooled connections: $pooled_count/$total_requests" - - if [[ $pooled_count -gt 0 ]]; then - echo -e "โœ… ${GREEN}Connection pooling is working${NC}" - return 0 - else - echo -e "โš ๏ธ ${YELLOW}Connection pooling not detected (may be using single connection mode)${NC}" - return 0 - fi -} - -# Function to test performance comparison -test_performance_comparison() { - echo -e "\n${CYAN}โšก Performance Comparison Test${NC}" - echo "-------------------------------" - - local sizes=(65536 262144 1048576) # 64KB, 256KB, 1MB - local size_names=("64KB" "256KB" "1MB") - - for i in "${!sizes[@]}"; do - local size=${sizes[$i]} - local size_name=${size_names[$i]} - - echo "Testing $size_name files:" - - # Test multiple requests to see optimization progression - for j in $(seq 1 3); do - echo -n " Request $j: " - - local start_time=$(date +%s%N) - local response=$(curl -s "$SIDECAR_URL/read?volume=1&needle=$j&cookie=1&size=$size&volume_server=$VOLUME_SERVER") - local end_time=$(date +%s%N) - - local duration_ns=$((end_time - start_time)) - local duration_ms=$((duration_ns / 1000000)) - - local is_rdma=$(echo "$response" | jq -r '.is_rdma // false' 2>/dev/null || echo "false") - local source=$(echo "$response" | jq -r '.source // "unknown"' 2>/dev/null || echo "unknown") - local use_temp_file=$(echo "$response" | jq -r '.use_temp_file // false' 2>/dev/null || echo "false") - - # Color code based on optimization level - if [[ "$source" == "rdma-zerocopy" ]] || [[ "$use_temp_file" == "true" ]]; then - echo -e "${GREEN}${duration_ms}ms (RDMA+ZeroCopy) ๐Ÿ”ฅ${NC}" - elif [[ "$is_rdma" == "true" ]]; then - echo -e "${YELLOW}${duration_ms}ms (RDMA) โšก${NC}" - else - echo -e "โš ๏ธ ${duration_ms}ms (HTTP fallback)" - fi - done - echo "" - done -} - -# Function to test RDMA engine connectivity -test_rdma_engine() { - echo -e "\n${PURPLE}๐Ÿš€ Testing RDMA Engine Connectivity${NC}" - echo "------------------------------------" - - # Get sidecar stats to check RDMA engine connection - local stats_response=$(curl -s "$SIDECAR_URL/stats" 2>/dev/null || echo "{}") - local rdma_connected=$(echo "$stats_response" | jq -r '.rdma.connected // false' 2>/dev/null || echo "false") - - if [[ "$rdma_connected" == "true" ]]; then - echo -e "โœ… ${GREEN}RDMA engine is connected${NC}" - - local total_requests=$(echo "$stats_response" | jq -r '.total_requests // 0' 2>/dev/null || echo "0") - local successful_reads=$(echo "$stats_response" | jq -r '.successful_reads // 0' 2>/dev/null || echo "0") - local total_bytes=$(echo "$stats_response" | jq -r '.total_bytes_read // 0' 2>/dev/null || echo "0") - - echo " Total requests: $total_requests" - echo " Successful reads: $successful_reads" - echo " Total bytes read: $total_bytes" - - return 0 - else - echo -e "โš ๏ธ ${YELLOW}RDMA engine connection status unclear${NC}" - echo " This may be normal if using mock implementation" - return 0 - fi -} - -# Function to display optimization summary -display_optimization_summary() { - echo -e "\n${GREEN}๐ŸŽฏ OPTIMIZATION SUMMARY${NC}" - echo "========================================" - echo "" - echo -e "${PURPLE}Implemented Optimizations:${NC}" - echo "1. ๐Ÿ”ฅ Zero-Copy Page Cache" - echo " - Eliminates 4 out of 5 memory copies" - echo " - Direct page cache population via temp files" - echo " - Threshold: 64KB+ files" - echo "" - echo "2. ๐Ÿ”Œ RDMA Connection Pooling" - echo " - Eliminates 100ms connection setup cost" - echo " - Reuses connections across requests" - echo " - Automatic cleanup of idle connections" - echo "" - echo "3. โšก RDMA Bandwidth Advantage" - echo " - High-throughput data transfer" - echo " - Bypasses kernel network stack" - echo " - Direct memory access" - echo "" - echo -e "${CYAN}Expected Performance Gains:${NC}" - echo "โ€ข Small files (< 64KB): ~50x improvement from RDMA + pooling" - echo "โ€ข Medium files (64KB-1MB): ~47x improvement from zero-copy + pooling" - echo "โ€ข Large files (> 1MB): ~118x improvement from all optimizations" - echo "" - echo -e "${GREEN}๐Ÿš€ This represents a fundamental breakthrough in distributed storage performance!${NC}" -} - -# Main execution -main() { - echo -e "\n${YELLOW}๐Ÿ”ง Starting comprehensive optimization test...${NC}" - - # Run all tests - test_sidecar_health || exit 1 - test_rdma_engine - test_zerocopy_optimization - test_connection_pooling - test_performance_comparison - display_optimization_summary - - echo -e "\n${GREEN}๐ŸŽ‰ Complete optimization test finished!${NC}" - echo "" - echo "Next steps:" - echo "1. Run performance benchmark: ./scripts/performance-benchmark.sh" - echo "2. Test with weed mount: docker compose -f docker-compose.mount-rdma.yml logs seaweedfs-mount" - echo "3. Monitor connection pool: curl -s http://localhost:8081/stats | jq" -} - -# Execute main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/test-complete-optimizations.sh b/seaweedfs-rdma-sidecar/scripts/test-complete-optimizations.sh deleted file mode 100755 index b84d429fa..000000000 --- a/seaweedfs-rdma-sidecar/scripts/test-complete-optimizations.sh +++ /dev/null @@ -1,295 +0,0 @@ -#!/bin/bash - -# Complete RDMA Optimization Test Suite -# Tests all three optimizations: Zero-Copy + Connection Pooling + RDMA - -set -e - -echo "๐Ÿš€ Complete RDMA Optimization Test Suite" -echo "========================================" - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -RED='\033[0;31m' -NC='\033[0m' - -# Test results tracking -TESTS_PASSED=0 -TESTS_TOTAL=0 - -# Helper function to run a test -run_test() { - local test_name="$1" - local test_command="$2" - - ((TESTS_TOTAL++)) - echo -e "\n${CYAN}๐Ÿงช Test $TESTS_TOTAL: $test_name${NC}" - echo "$(printf '%.0s-' {1..50})" - - if eval "$test_command"; then - echo -e "${GREEN}โœ… PASSED: $test_name${NC}" - ((TESTS_PASSED++)) - return 0 - else - echo -e "${RED}โŒ FAILED: $test_name${NC}" - return 1 - fi -} - -# Test 1: Build verification -test_build_verification() { - echo "๐Ÿ“ฆ Verifying all components build successfully..." - - # Check demo server binary - if [[ -f "bin/demo-server" ]]; then - echo "โœ… Demo server binary exists" - else - echo "โŒ Demo server binary missing" - return 1 - fi - - # Check RDMA engine binary - if [[ -f "rdma-engine/target/release/rdma-engine-server" ]]; then - echo "โœ… RDMA engine binary exists" - else - echo "โŒ RDMA engine binary missing" - return 1 - fi - - # Check SeaweedFS binary - if [[ -f "../weed/weed" ]]; then - echo "โœ… SeaweedFS with RDMA support exists" - else - echo "โŒ SeaweedFS binary missing (expected at ../weed/weed)" - return 1 - fi - - echo "๐ŸŽฏ All core components built successfully" - return 0 -} - -# Test 2: Zero-copy mechanism -test_zero_copy_mechanism() { - echo "๐Ÿ”ฅ Testing zero-copy page cache mechanism..." - - local temp_dir="/tmp/rdma-test-$$" - mkdir -p "$temp_dir" - - # Create test data - local test_file="$temp_dir/test_data.bin" - dd if=/dev/urandom of="$test_file" bs=1024 count=64 2>/dev/null - - # Simulate temp file creation (sidecar behavior) - local temp_needle="$temp_dir/vol1_needle123.tmp" - cp "$test_file" "$temp_needle" - - if [[ -f "$temp_needle" ]]; then - echo "โœ… Temp file created successfully" - - # Simulate reading (mount behavior) - local read_result="$temp_dir/read_result.bin" - cp "$temp_needle" "$read_result" - - if cmp -s "$test_file" "$read_result"; then - echo "โœ… Zero-copy read successful with data integrity" - rm -rf "$temp_dir" - return 0 - else - echo "โŒ Data integrity check failed" - rm -rf "$temp_dir" - return 1 - fi - else - echo "โŒ Temp file creation failed" - rm -rf "$temp_dir" - return 1 - fi -} - -# Test 3: Connection pooling logic -test_connection_pooling() { - echo "๐Ÿ”Œ Testing connection pooling logic..." - - # Test the core pooling mechanism by running our pool test - local pool_test_output - pool_test_output=$(./scripts/test-connection-pooling.sh 2>&1 | tail -20) - - if echo "$pool_test_output" | grep -q "Connection pool test completed successfully"; then - echo "โœ… Connection pooling logic verified" - return 0 - else - echo "โŒ Connection pooling test failed" - return 1 - fi -} - -# Test 4: Configuration validation -test_configuration_validation() { - echo "โš™๏ธ Testing configuration validation..." - - # Test demo server help - if ./bin/demo-server --help | grep -q "enable-zerocopy"; then - echo "โœ… Zero-copy configuration available" - else - echo "โŒ Zero-copy configuration missing" - return 1 - fi - - if ./bin/demo-server --help | grep -q "enable-pooling"; then - echo "โœ… Connection pooling configuration available" - else - echo "โŒ Connection pooling configuration missing" - return 1 - fi - - if ./bin/demo-server --help | grep -q "max-connections"; then - echo "โœ… Pool sizing configuration available" - else - echo "โŒ Pool sizing configuration missing" - return 1 - fi - - echo "๐ŸŽฏ All configuration options validated" - return 0 -} - -# Test 5: RDMA engine mock functionality -test_rdma_engine_mock() { - echo "๐Ÿš€ Testing RDMA engine mock functionality..." - - # Start RDMA engine in background for quick test - local engine_log="/tmp/rdma-engine-test.log" - local socket_path="/tmp/rdma-test-engine.sock" - - # Clean up any existing socket - rm -f "$socket_path" - - # Start engine in background - timeout 10s ./rdma-engine/target/release/rdma-engine-server \ - --ipc-socket "$socket_path" \ - --debug > "$engine_log" 2>&1 & - - local engine_pid=$! - - # Wait a moment for startup - sleep 2 - - # Check if socket was created - if [[ -S "$socket_path" ]]; then - echo "โœ… RDMA engine socket created successfully" - kill $engine_pid 2>/dev/null || true - wait $engine_pid 2>/dev/null || true - rm -f "$socket_path" "$engine_log" - return 0 - else - echo "โŒ RDMA engine socket not created" - kill $engine_pid 2>/dev/null || true - wait $engine_pid 2>/dev/null || true - echo "Engine log:" - cat "$engine_log" 2>/dev/null || echo "No log available" - rm -f "$socket_path" "$engine_log" - return 1 - fi -} - -# Test 6: Integration test preparation -test_integration_readiness() { - echo "๐Ÿงฉ Testing integration readiness..." - - # Check Docker Compose file - if [[ -f "docker-compose.mount-rdma.yml" ]]; then - echo "โœ… Docker Compose configuration available" - else - echo "โŒ Docker Compose configuration missing" - return 1 - fi - - # Validate Docker Compose syntax - if docker compose -f docker-compose.mount-rdma.yml config > /dev/null 2>&1; then - echo "โœ… Docker Compose configuration valid" - else - echo "โŒ Docker Compose configuration invalid" - return 1 - fi - - # Check test scripts - local scripts=("test-zero-copy-mechanism.sh" "test-connection-pooling.sh" "performance-benchmark.sh") - for script in "${scripts[@]}"; do - if [[ -x "scripts/$script" ]]; then - echo "โœ… Test script available: $script" - else - echo "โŒ Test script missing or not executable: $script" - return 1 - fi - done - - echo "๐ŸŽฏ Integration environment ready" - return 0 -} - -# Performance benchmarking -test_performance_characteristics() { - echo "๐Ÿ“Š Testing performance characteristics..." - - # Run zero-copy performance test - if ./scripts/test-zero-copy-mechanism.sh | grep -q "Performance improvement"; then - echo "โœ… Zero-copy performance improvement detected" - else - echo "โŒ Zero-copy performance test failed" - return 1 - fi - - echo "๐ŸŽฏ Performance characteristics validated" - return 0 -} - -# Main test execution -main() { - echo -e "${BLUE}๐Ÿš€ Starting complete optimization test suite...${NC}" - echo "" - - # Run all tests - run_test "Build Verification" "test_build_verification" - run_test "Zero-Copy Mechanism" "test_zero_copy_mechanism" - run_test "Connection Pooling" "test_connection_pooling" - run_test "Configuration Validation" "test_configuration_validation" - run_test "RDMA Engine Mock" "test_rdma_engine_mock" - run_test "Integration Readiness" "test_integration_readiness" - run_test "Performance Characteristics" "test_performance_characteristics" - - # Results summary - echo -e "\n${PURPLE}๐Ÿ“Š Test Results Summary${NC}" - echo "=======================" - echo "Tests passed: $TESTS_PASSED/$TESTS_TOTAL" - - if [[ $TESTS_PASSED -eq $TESTS_TOTAL ]]; then - echo -e "${GREEN}๐ŸŽ‰ ALL TESTS PASSED!${NC}" - echo "" - echo -e "${CYAN}๐Ÿš€ Revolutionary Optimization Suite Status:${NC}" - echo "โœ… Zero-Copy Page Cache: WORKING" - echo "โœ… RDMA Connection Pooling: WORKING" - echo "โœ… RDMA Engine Integration: WORKING" - echo "โœ… Mount Client Integration: READY" - echo "โœ… Docker Environment: READY" - echo "โœ… Performance Testing: READY" - echo "" - echo -e "${YELLOW}๐Ÿ”ฅ Expected Performance Improvements:${NC}" - echo "โ€ข Small files (< 64KB): 50x faster" - echo "โ€ข Medium files (64KB-1MB): 47x faster" - echo "โ€ข Large files (> 1MB): 118x faster" - echo "" - echo -e "${GREEN}Ready for production testing! ๐Ÿš€${NC}" - return 0 - else - echo -e "${RED}โŒ SOME TESTS FAILED${NC}" - echo "Please review the failed tests above" - return 1 - fi -} - -# Execute main function -main "$@" diff --git a/seaweedfs-rdma-sidecar/scripts/test-connection-pooling.sh b/seaweedfs-rdma-sidecar/scripts/test-connection-pooling.sh deleted file mode 100755 index 576b905c0..000000000 --- a/seaweedfs-rdma-sidecar/scripts/test-connection-pooling.sh +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/bash - -# Test RDMA Connection Pooling Mechanism -# Demonstrates connection reuse and pool management - -set -e - -echo "๐Ÿ”Œ Testing RDMA Connection Pooling Mechanism" -echo "============================================" - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -NC='\033[0m' - -echo -e "\n${BLUE}๐Ÿงช Testing Connection Pool Logic${NC}" -echo "--------------------------------" - -# Test the pool implementation by building a simple test -cat > /tmp/pool_test.go << 'EOF' -package main - -import ( - "context" - "fmt" - "time" -) - -// Simulate the connection pool behavior -type PooledConnection struct { - ID string - lastUsed time.Time - inUse bool - created time.Time -} - -type ConnectionPool struct { - connections []*PooledConnection - maxConnections int - maxIdleTime time.Duration -} - -func NewConnectionPool(maxConnections int, maxIdleTime time.Duration) *ConnectionPool { - return &ConnectionPool{ - connections: make([]*PooledConnection, 0, maxConnections), - maxConnections: maxConnections, - maxIdleTime: maxIdleTime, - } -} - -func (p *ConnectionPool) getConnection() (*PooledConnection, error) { - // Look for available connection - for _, conn := range p.connections { - if !conn.inUse && time.Since(conn.lastUsed) < p.maxIdleTime { - conn.inUse = true - conn.lastUsed = time.Now() - fmt.Printf("๐Ÿ”„ Reusing connection: %s (age: %v)\n", conn.ID, time.Since(conn.created)) - return conn, nil - } - } - - // Create new connection if under limit - if len(p.connections) < p.maxConnections { - conn := &PooledConnection{ - ID: fmt.Sprintf("conn-%d-%d", len(p.connections), time.Now().Unix()), - lastUsed: time.Now(), - inUse: true, - created: time.Now(), - } - p.connections = append(p.connections, conn) - fmt.Printf("๐Ÿš€ Created new connection: %s (pool size: %d)\n", conn.ID, len(p.connections)) - return conn, nil - } - - return nil, fmt.Errorf("pool exhausted (max: %d)", p.maxConnections) -} - -func (p *ConnectionPool) releaseConnection(conn *PooledConnection) { - conn.inUse = false - conn.lastUsed = time.Now() - fmt.Printf("๐Ÿ”“ Released connection: %s\n", conn.ID) -} - -func (p *ConnectionPool) cleanup() { - now := time.Now() - activeConnections := make([]*PooledConnection, 0, len(p.connections)) - - for _, conn := range p.connections { - if conn.inUse || now.Sub(conn.lastUsed) < p.maxIdleTime { - activeConnections = append(activeConnections, conn) - } else { - fmt.Printf("๐Ÿงน Cleaned up idle connection: %s (idle: %v)\n", conn.ID, now.Sub(conn.lastUsed)) - } - } - - p.connections = activeConnections -} - -func (p *ConnectionPool) getStats() (int, int) { - total := len(p.connections) - inUse := 0 - for _, conn := range p.connections { - if conn.inUse { - inUse++ - } - } - return total, inUse -} - -func main() { - fmt.Println("๐Ÿ”Œ Connection Pool Test Starting...") - - // Create pool with small limits for testing - pool := NewConnectionPool(3, 2*time.Second) - - fmt.Println("\n1. Testing connection creation and reuse:") - - // Get multiple connections - conns := make([]*PooledConnection, 0) - for i := 0; i < 5; i++ { - conn, err := pool.getConnection() - if err != nil { - fmt.Printf("โŒ Error getting connection %d: %v\n", i+1, err) - continue - } - conns = append(conns, conn) - - // Simulate work - time.Sleep(100 * time.Millisecond) - } - - total, inUse := pool.getStats() - fmt.Printf("\n๐Ÿ“Š Pool stats: %d total connections, %d in use\n", total, inUse) - - fmt.Println("\n2. Testing connection release and reuse:") - - // Release some connections - for i := 0; i < 2; i++ { - if i < len(conns) { - pool.releaseConnection(conns[i]) - } - } - - // Try to get new connections (should reuse) - for i := 0; i < 2; i++ { - conn, err := pool.getConnection() - if err != nil { - fmt.Printf("โŒ Error getting reused connection: %v\n", err) - } else { - pool.releaseConnection(conn) - } - } - - fmt.Println("\n3. Testing cleanup of idle connections:") - - // Wait for connections to become idle - fmt.Println("โฑ๏ธ Waiting for connections to become idle...") - time.Sleep(3 * time.Second) - - // Cleanup - pool.cleanup() - - total, inUse = pool.getStats() - fmt.Printf("๐Ÿ“Š Pool stats after cleanup: %d total connections, %d in use\n", total, inUse) - - fmt.Println("\nโœ… Connection pool test completed successfully!") - fmt.Println("\n๐ŸŽฏ Key benefits demonstrated:") - fmt.Println(" โ€ข Connection reuse eliminates setup cost") - fmt.Println(" โ€ข Pool size limits prevent resource exhaustion") - fmt.Println(" โ€ข Automatic cleanup prevents memory leaks") - fmt.Println(" โ€ข Idle timeout ensures fresh connections") -} -EOF - -echo "๐Ÿ“ Created connection pool test program" - -echo -e "\n${GREEN}๐Ÿš€ Running connection pool simulation${NC}" -echo "------------------------------------" - -# Run the test -cd /tmp && go run pool_test.go - -echo -e "\n${YELLOW}๐Ÿ“Š Performance Impact Analysis${NC}" -echo "------------------------------" - -echo "Without connection pooling:" -echo " โ€ข Each request: 100ms setup + 1ms transfer = 101ms" -echo " โ€ข 10 requests: 10 ร— 101ms = 1010ms" - -echo "" -echo "With connection pooling:" -echo " โ€ข First request: 100ms setup + 1ms transfer = 101ms" -echo " โ€ข Next 9 requests: 0.1ms reuse + 1ms transfer = 1.1ms each" -echo " โ€ข 10 requests: 101ms + (9 ร— 1.1ms) = 111ms" - -echo "" -echo -e "${GREEN}๐Ÿ”ฅ Performance improvement: 1010ms โ†’ 111ms = 9x faster!${NC}" - -echo -e "\n${PURPLE}๐Ÿ’ก Real-world scaling benefits:${NC}" -echo "โ€ข 100 requests: 100x faster with pooling" -echo "โ€ข 1000 requests: 1000x faster with pooling" -echo "โ€ข Connection pool amortizes setup cost across many operations" - -# Cleanup -rm -f /tmp/pool_test.go - -echo -e "\n${GREEN}โœ… Connection pooling test completed!${NC}" diff --git a/seaweedfs-rdma-sidecar/scripts/test-zero-copy-mechanism.sh b/seaweedfs-rdma-sidecar/scripts/test-zero-copy-mechanism.sh deleted file mode 100755 index 63c5d3584..000000000 --- a/seaweedfs-rdma-sidecar/scripts/test-zero-copy-mechanism.sh +++ /dev/null @@ -1,222 +0,0 @@ -#!/bin/bash - -# Test Zero-Copy Page Cache Mechanism -# Demonstrates the core innovation without needing full server - -set -e - -echo "๐Ÿ”ฅ Testing Zero-Copy Page Cache Mechanism" -echo "=========================================" - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -NC='\033[0m' - -# Test configuration -TEMP_DIR="/tmp/rdma-cache-test" -TEST_DATA_SIZE=1048576 # 1MB -ITERATIONS=5 - -# Cleanup function -cleanup() { - rm -rf "$TEMP_DIR" 2>/dev/null || true -} - -# Setup -setup() { - echo -e "\n${BLUE}๐Ÿ”ง Setting up test environment${NC}" - cleanup - mkdir -p "$TEMP_DIR" - echo "โœ… Created temp directory: $TEMP_DIR" -} - -# Generate test data -generate_test_data() { - echo -e "\n${PURPLE}๐Ÿ“ Generating test data${NC}" - dd if=/dev/urandom of="$TEMP_DIR/source_data.bin" bs=$TEST_DATA_SIZE count=1 2>/dev/null - echo "โœ… Generated $TEST_DATA_SIZE bytes of test data" -} - -# Test 1: Simulate the zero-copy write mechanism -test_zero_copy_write() { - echo -e "\n${GREEN}๐Ÿ”ฅ Test 1: Zero-Copy Page Cache Population${NC}" - echo "--------------------------------------------" - - local source_file="$TEMP_DIR/source_data.bin" - local temp_file="$TEMP_DIR/vol1_needle123_cookie456.tmp" - - echo "๐Ÿ“ค Simulating RDMA sidecar writing to temp file..." - - # This simulates what our sidecar does: - # ioutil.WriteFile(tempFilePath, data, 0644) - local start_time=$(date +%s%N) - cp "$source_file" "$temp_file" - local end_time=$(date +%s%N) - - local write_duration_ns=$((end_time - start_time)) - local write_duration_ms=$((write_duration_ns / 1000000)) - - echo "โœ… Temp file written in ${write_duration_ms}ms" - echo " File: $temp_file" - echo " Size: $(stat -f%z "$temp_file" 2>/dev/null || stat -c%s "$temp_file") bytes" - - # Check if file is in page cache (approximation) - if command -v vmtouch >/dev/null 2>&1; then - echo " Page cache status:" - vmtouch "$temp_file" 2>/dev/null || echo " (vmtouch not available for precise measurement)" - else - echo " ๐Ÿ“„ File written to filesystem (page cache populated automatically)" - fi -} - -# Test 2: Simulate the zero-copy read mechanism -test_zero_copy_read() { - echo -e "\n${GREEN}โšก Test 2: Zero-Copy Page Cache Read${NC}" - echo "-----------------------------------" - - local temp_file="$TEMP_DIR/vol1_needle123_cookie456.tmp" - local read_buffer="$TEMP_DIR/read_buffer.bin" - - echo "๐Ÿ“ฅ Simulating mount client reading from temp file..." - - # This simulates what our mount client does: - # file.Read(buffer) from temp file - local start_time=$(date +%s%N) - - # Multiple reads to test page cache efficiency - for i in $(seq 1 $ITERATIONS); do - cp "$temp_file" "$read_buffer.tmp$i" - done - - local end_time=$(date +%s%N) - local read_duration_ns=$((end_time - start_time)) - local read_duration_ms=$((read_duration_ns / 1000000)) - local avg_read_ms=$((read_duration_ms / ITERATIONS)) - - echo "โœ… $ITERATIONS reads completed in ${read_duration_ms}ms" - echo " Average per read: ${avg_read_ms}ms" - echo " ๐Ÿ”ฅ Subsequent reads served from page cache!" - - # Verify data integrity - if cmp -s "$TEMP_DIR/source_data.bin" "$read_buffer.tmp1"; then - echo "โœ… Data integrity verified - zero corruption" - else - echo "โŒ Data integrity check failed" - return 1 - fi -} - -# Test 3: Performance comparison -test_performance_comparison() { - echo -e "\n${YELLOW}๐Ÿ“Š Test 3: Performance Comparison${NC}" - echo "-----------------------------------" - - local source_file="$TEMP_DIR/source_data.bin" - - echo "๐ŸŒ Traditional copy (simulating multiple memory copies):" - local start_time=$(date +%s%N) - - # Simulate 5 memory copies (traditional path) - cp "$source_file" "$TEMP_DIR/copy1.bin" - cp "$TEMP_DIR/copy1.bin" "$TEMP_DIR/copy2.bin" - cp "$TEMP_DIR/copy2.bin" "$TEMP_DIR/copy3.bin" - cp "$TEMP_DIR/copy3.bin" "$TEMP_DIR/copy4.bin" - cp "$TEMP_DIR/copy4.bin" "$TEMP_DIR/copy5.bin" - - local end_time=$(date +%s%N) - local traditional_duration_ns=$((end_time - start_time)) - local traditional_duration_ms=$((traditional_duration_ns / 1000000)) - - echo " 5 memory copies: ${traditional_duration_ms}ms" - - echo "๐Ÿš€ Zero-copy method (page cache):" - local start_time=$(date +%s%N) - - # Simulate zero-copy path (write once, read multiple times from cache) - cp "$source_file" "$TEMP_DIR/zerocopy.tmp" - # Subsequent reads are from page cache - cp "$TEMP_DIR/zerocopy.tmp" "$TEMP_DIR/result.bin" - - local end_time=$(date +%s%N) - local zerocopy_duration_ns=$((end_time - start_time)) - local zerocopy_duration_ms=$((zerocopy_duration_ns / 1000000)) - - echo " Write + cached read: ${zerocopy_duration_ms}ms" - - # Calculate improvement - if [[ $zerocopy_duration_ms -gt 0 ]]; then - local improvement=$((traditional_duration_ms / zerocopy_duration_ms)) - echo "" - echo -e "${GREEN}๐ŸŽฏ Performance improvement: ${improvement}x faster${NC}" - - if [[ $improvement -gt 5 ]]; then - echo -e "${GREEN}๐Ÿ”ฅ EXCELLENT: Significant optimization detected!${NC}" - elif [[ $improvement -gt 2 ]]; then - echo -e "${YELLOW}โšก GOOD: Measurable improvement${NC}" - else - echo -e "${YELLOW}๐Ÿ“ˆ MODERATE: Some improvement (limited by I/O overhead)${NC}" - fi - fi -} - -# Test 4: Demonstrate temp file cleanup with persistent page cache -test_cleanup_behavior() { - echo -e "\n${PURPLE}๐Ÿงน Test 4: Cleanup with Page Cache Persistence${NC}" - echo "----------------------------------------------" - - local temp_file="$TEMP_DIR/cleanup_test.tmp" - - # Write data - echo "๐Ÿ“ Writing data to temp file..." - cp "$TEMP_DIR/source_data.bin" "$temp_file" - - # Read to ensure it's in page cache - echo "๐Ÿ“– Reading data (loads into page cache)..." - cp "$temp_file" "$TEMP_DIR/cache_load.bin" - - # Delete temp file (simulating our cleanup) - echo "๐Ÿ—‘๏ธ Deleting temp file (simulating cleanup)..." - rm "$temp_file" - - # Try to access page cache data (this would work in real scenario) - echo "๐Ÿ” File deleted but page cache may still contain data" - echo " (In real implementation, this provides brief performance window)" - - if [[ -f "$TEMP_DIR/cache_load.bin" ]]; then - echo "โœ… Data successfully accessed from loaded cache" - fi - - echo "" - echo -e "${BLUE}๐Ÿ’ก Key insight: Page cache persists briefly even after file deletion${NC}" - echo " This allows zero-copy reads during the critical performance window" -} - -# Main execution -main() { - echo -e "${BLUE}๐Ÿš€ Starting zero-copy mechanism test...${NC}" - - setup - generate_test_data - test_zero_copy_write - test_zero_copy_read - test_performance_comparison - test_cleanup_behavior - - echo -e "\n${GREEN}๐ŸŽ‰ Zero-copy mechanism test completed!${NC}" - echo "" - echo -e "${PURPLE}๐Ÿ“‹ Summary of what we demonstrated:${NC}" - echo "1. โœ… Temp file write populates page cache automatically" - echo "2. โœ… Subsequent reads served from fast page cache" - echo "3. โœ… Significant performance improvement over multiple copies" - echo "4. โœ… Cleanup behavior maintains performance window" - echo "" - echo -e "${YELLOW}๐Ÿ”ฅ This is the core mechanism behind our 100x performance improvement!${NC}" - - cleanup -} - -# Run the test -main "$@" diff --git a/seaweedfs-rdma-sidecar/sidecar b/seaweedfs-rdma-sidecar/sidecar deleted file mode 100755 index daddfdbf1..000000000 Binary files a/seaweedfs-rdma-sidecar/sidecar and /dev/null differ diff --git a/seaweedfs-rdma-sidecar/test-fixes-standalone.go b/seaweedfs-rdma-sidecar/test-fixes-standalone.go deleted file mode 100644 index 5b709bc7b..000000000 --- a/seaweedfs-rdma-sidecar/test-fixes-standalone.go +++ /dev/null @@ -1,127 +0,0 @@ -package main - -import ( - "fmt" - "strconv" - "strings" -) - -// Test the improved parse functions (from cmd/sidecar/main.go fix) -func parseUint32(s string, defaultValue uint32) uint32 { - if s == "" { - return defaultValue - } - val, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return defaultValue - } - return uint32(val) -} - -func parseUint64(s string, defaultValue uint64) uint64 { - if s == "" { - return defaultValue - } - val, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defaultValue - } - return val -} - -// Test the improved error reporting pattern (from weed/mount/rdma_client.go fix) -func testErrorReporting() { - fmt.Println("Testing Error Reporting Fix:") - - // Simulate RDMA failure followed by HTTP failure - rdmaErr := fmt.Errorf("RDMA connection timeout") - httpErr := fmt.Errorf("HTTP 404 Not Found") - - // OLD (incorrect) way: - oldError := fmt.Errorf("both RDMA and HTTP fallback failed: RDMA=%v, HTTP=%v", rdmaErr, rdmaErr) // BUG: same error twice - fmt.Printf(" Old (buggy): %v\n", oldError) - - // NEW (fixed) way: - newError := fmt.Errorf("both RDMA and HTTP fallback failed: RDMA=%v, HTTP=%v", rdmaErr, httpErr) // FIXED: different errors - fmt.Printf(" New (fixed): %v\n", newError) -} - -// Test weed mount command with RDMA flags (from docker-compose fix) -func testWeedMountCommand() { - fmt.Println("Testing Weed Mount Command Fix:") - - // OLD (missing RDMA flags): - oldCommand := "/usr/local/bin/weed mount -filer=seaweedfs-filer:8888 -dir=/mnt/seaweedfs -allowOthers=true -debug" - fmt.Printf(" Old (missing RDMA): %s\n", oldCommand) - - // NEW (with RDMA flags): - newCommand := "/usr/local/bin/weed mount -filer=${FILER_ADDR} -dir=${MOUNT_POINT} -allowOthers=true -rdma.enabled=${RDMA_ENABLED} -rdma.sidecar=${RDMA_SIDECAR_ADDR} -rdma.fallback=${RDMA_FALLBACK} -rdma.maxConcurrent=${RDMA_MAX_CONCURRENT} -rdma.timeoutMs=${RDMA_TIMEOUT_MS} -debug=${DEBUG}" - fmt.Printf(" New (with RDMA): %s\n", newCommand) - - // Check if RDMA flags are present - rdmaFlags := []string{"-rdma.enabled", "-rdma.sidecar", "-rdma.fallback", "-rdma.maxConcurrent", "-rdma.timeoutMs"} - allPresent := true - for _, flag := range rdmaFlags { - if !strings.Contains(newCommand, flag) { - allPresent = false - break - } - } - - if allPresent { - fmt.Println(" All RDMA flags present in command") - } else { - fmt.Println(" Missing RDMA flags") - } -} - -// Test health check robustness (from Dockerfile.rdma-engine fix) -func testHealthCheck() { - fmt.Println("Testing Health Check Fix:") - - // OLD (hardcoded): - oldHealthCheck := "test -S /tmp/rdma-engine.sock" - fmt.Printf(" Old (hardcoded): %s\n", oldHealthCheck) - - // NEW (robust): - newHealthCheck := `pgrep rdma-engine-server >/dev/null && test -d /tmp/rdma && test "$(find /tmp/rdma -name '*.sock' | wc -l)" -gt 0` - fmt.Printf(" New (robust): %s\n", newHealthCheck) -} - -func main() { - fmt.Println("Testing All GitHub PR Review Fixes") - fmt.Println("====================================") - fmt.Println() - - // Test parse functions - fmt.Println("Testing Parse Functions Fix:") - fmt.Printf(" parseUint32('123', 0) = %d (expected: 123)\n", parseUint32("123", 0)) - fmt.Printf(" parseUint32('', 999) = %d (expected: 999)\n", parseUint32("", 999)) - fmt.Printf(" parseUint32('invalid', 999) = %d (expected: 999)\n", parseUint32("invalid", 999)) - fmt.Printf(" parseUint64('12345678901234', 0) = %d (expected: 12345678901234)\n", parseUint64("12345678901234", 0)) - fmt.Printf(" parseUint64('invalid', 999) = %d (expected: 999)\n", parseUint64("invalid", 999)) - fmt.Println(" Parse functions handle errors correctly!") - fmt.Println() - - testErrorReporting() - fmt.Println() - - testWeedMountCommand() - fmt.Println() - - testHealthCheck() - fmt.Println() - - fmt.Println("All Review Fixes Validated!") - fmt.Println("=============================") - fmt.Println() - fmt.Println("Parse functions: Safe error handling with strconv.ParseUint") - fmt.Println("Error reporting: Proper distinction between RDMA and HTTP errors") - fmt.Println("Weed mount: RDMA flags properly included in Docker command") - fmt.Println("Health check: Robust socket detection without hardcoding") - fmt.Println("File ID parsing: Reuses existing SeaweedFS functions") - fmt.Println("Semaphore handling: No more channel close panics") - fmt.Println("Go.mod documentation: Clear instructions for contributors") - fmt.Println() - fmt.Println("Ready for production deployment!") -} diff --git a/seaweedfs-rdma-sidecar/test-rdma-integration.sh b/seaweedfs-rdma-sidecar/test-rdma-integration.sh deleted file mode 100644 index 4b599d3a1..000000000 --- a/seaweedfs-rdma-sidecar/test-rdma-integration.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -set -e - -echo "๐Ÿš€ Testing RDMA Integration with All Fixes Applied" -echo "==================================================" - -# Build the sidecar with all fixes -echo "๐Ÿ“ฆ Building RDMA sidecar..." -go build -o bin/demo-server ./cmd/demo-server -go build -o bin/sidecar ./cmd/sidecar - -# Test that the parse functions work correctly -echo "๐Ÿงช Testing parse helper functions..." -cat > test_parse_functions.go << 'EOF' -package main - -import ( - "fmt" - "strconv" -) - -func parseUint32(s string, defaultValue uint32) uint32 { - if s == "" { - return defaultValue - } - val, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return defaultValue - } - return uint32(val) -} - -func parseUint64(s string, defaultValue uint64) uint64 { - if s == "" { - return defaultValue - } - val, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return defaultValue - } - return val -} - -func main() { - fmt.Println("Testing parseUint32:") - fmt.Printf(" '123' -> %d (expected: 123)\n", parseUint32("123", 0)) - fmt.Printf(" '' -> %d (expected: 999)\n", parseUint32("", 999)) - fmt.Printf(" 'invalid' -> %d (expected: 999)\n", parseUint32("invalid", 999)) - - fmt.Println("Testing parseUint64:") - fmt.Printf(" '12345678901234' -> %d (expected: 12345678901234)\n", parseUint64("12345678901234", 0)) - fmt.Printf(" '' -> %d (expected: 999)\n", parseUint64("", 999)) - fmt.Printf(" 'invalid' -> %d (expected: 999)\n", parseUint64("invalid", 999)) -} -EOF - -go run test_parse_functions.go -rm test_parse_functions.go - -echo "โœ… Parse functions working correctly!" - -# Test the sidecar startup -echo "๐Ÿ Testing sidecar startup..." -timeout 5 ./bin/demo-server --port 8081 --enable-rdma=false --debug --volume-server=http://httpbin.org/get & -SIDECAR_PID=$! - -sleep 2 - -# Test health endpoint -echo "๐Ÿฅ Testing health endpoint..." -if curl -s http://localhost:8081/health | grep -q "healthy"; then - echo "โœ… Health endpoint working!" -else - echo "โŒ Health endpoint failed!" -fi - -# Test stats endpoint -echo "๐Ÿ“Š Testing stats endpoint..." -if curl -s http://localhost:8081/stats | jq . > /dev/null; then - echo "โœ… Stats endpoint working!" -else - echo "โŒ Stats endpoint failed!" -fi - -# Test read endpoint (will fallback to HTTP) -echo "๐Ÿ“– Testing read endpoint..." -RESPONSE=$(curl -s "http://localhost:8081/read?volume=1&needle=123&cookie=456&offset=0&size=1024&volume_server=http://localhost:8080") -if echo "$RESPONSE" | jq . > /dev/null; then - echo "โœ… Read endpoint working!" - echo " Response structure valid JSON" - - # Check if it has the expected fields - if echo "$RESPONSE" | jq -e '.source' > /dev/null; then - SOURCE=$(echo "$RESPONSE" | jq -r '.source') - echo " Source: $SOURCE" - fi - - if echo "$RESPONSE" | jq -e '.is_rdma' > /dev/null; then - IS_RDMA=$(echo "$RESPONSE" | jq -r '.is_rdma') - echo " RDMA Used: $IS_RDMA" - fi -else - echo "โŒ Read endpoint failed!" - echo "Response: $RESPONSE" -fi - -# Stop the sidecar -kill $SIDECAR_PID 2>/dev/null || true -wait $SIDECAR_PID 2>/dev/null || true - -echo "" -echo "๐ŸŽฏ Integration Test Summary:" -echo "==========================" -echo "โœ… Sidecar builds successfully" -echo "โœ… Parse functions handle errors correctly" -echo "โœ… HTTP endpoints are functional" -echo "โœ… JSON responses are properly formatted" -echo "โœ… Error handling works as expected" -echo "" -echo "๐ŸŽ‰ All RDMA integration fixes are working correctly!" -echo "" -echo "๐Ÿ’ก Next Steps:" -echo "- Deploy in Docker environment with real SeaweedFS cluster" -echo "- Test with actual file uploads and downloads" -echo "- Verify RDMA flags are passed correctly to weed mount" -echo "- Monitor health checks with configurable socket paths" diff --git a/seaweedfs-rdma-sidecar/tests/docker-smoke-test.sh b/seaweedfs-rdma-sidecar/tests/docker-smoke-test.sh deleted file mode 100755 index b7ad813c1..000000000 --- a/seaweedfs-rdma-sidecar/tests/docker-smoke-test.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Simple smoke test for Docker setup -set -e - -echo "๐Ÿงช Docker Smoke Test" -echo "====================" -echo "" - -echo "๐Ÿ“‹ 1. Testing Docker Compose configuration..." -docker-compose config --quiet -echo "โœ… Docker Compose configuration is valid" -echo "" - -echo "๐Ÿ“‹ 2. Testing container builds..." -echo "Building RDMA engine container..." -docker build -f Dockerfile.rdma-engine -t test-rdma-engine . > /dev/null -echo "โœ… RDMA engine container builds successfully" -echo "" - -echo "๐Ÿ“‹ 3. Testing basic container startup..." -echo "Starting RDMA engine container..." -container_id=$(docker run --rm -d --name test-rdma-engine test-rdma-engine) -sleep 5 - -if docker ps | grep test-rdma-engine > /dev/null; then - echo "โœ… RDMA engine container starts successfully" - docker stop test-rdma-engine > /dev/null -else - echo "โŒ RDMA engine container failed to start" - echo "Checking container logs:" - docker logs test-rdma-engine 2>&1 || true - docker stop test-rdma-engine > /dev/null 2>&1 || true - exit 1 -fi -echo "" - -echo "๐ŸŽ‰ All smoke tests passed!" -echo "Docker setup is working correctly." diff --git a/seaweedfs-rdma-sidecar/tests/docker-test-helper.sh b/seaweedfs-rdma-sidecar/tests/docker-test-helper.sh deleted file mode 100755 index edb95541e..000000000 --- a/seaweedfs-rdma-sidecar/tests/docker-test-helper.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash - -# Docker Test Helper - Simplified commands for running integration tests - -set -e - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -print_usage() { - echo -e "${BLUE}SeaweedFS RDMA Docker Integration Test Helper${NC}" - echo "" - echo "Usage: $0 [command]" - echo "" - echo "Commands:" - echo " start - Start all services" - echo " test - Run integration tests" - echo " stop - Stop all services" - echo " clean - Stop services and clean up volumes" - echo " logs - Show logs from all services" - echo " status - Show status of all services" - echo " shell - Open shell in test client container" - echo "" - echo "Examples:" - echo " $0 start # Start all services" - echo " $0 test # Run full integration test suite" - echo " $0 logs rdma-engine # Show logs from RDMA engine" - echo " $0 shell # Interactive testing shell" -} - -start_services() { - echo -e "${GREEN}๐Ÿš€ Starting SeaweedFS RDMA integration services...${NC}" - docker-compose up -d seaweedfs-master seaweedfs-volume rdma-engine rdma-sidecar - - echo -e "${YELLOW}โณ Waiting for services to be ready...${NC}" - sleep 10 - - echo -e "${GREEN}โœ… Services started. Checking health...${NC}" - docker-compose ps -} - -run_tests() { - echo -e "${GREEN}๐Ÿงช Running integration tests...${NC}" - - # Make sure services are running - docker-compose up -d seaweedfs-master seaweedfs-volume rdma-engine rdma-sidecar - - # Wait for services to be ready - echo -e "${YELLOW}โณ Waiting for services to be ready...${NC}" - sleep 15 - - # Run the integration tests - docker-compose run --rm integration-tests -} - -stop_services() { - echo -e "${YELLOW}๐Ÿ›‘ Stopping services...${NC}" - docker-compose down - echo -e "${GREEN}โœ… Services stopped${NC}" -} - -clean_all() { - echo -e "${YELLOW}๐Ÿงน Cleaning up services and volumes...${NC}" - docker-compose down -v --remove-orphans - echo -e "${GREEN}โœ… Cleanup complete${NC}" -} - -show_logs() { - local service=${1:-} - if [ -n "$service" ]; then - echo -e "${BLUE}๐Ÿ“‹ Showing logs for $service...${NC}" - docker-compose logs -f "$service" - else - echo -e "${BLUE}๐Ÿ“‹ Showing logs for all services...${NC}" - docker-compose logs -f - fi -} - -show_status() { - echo -e "${BLUE}๐Ÿ“Š Service Status:${NC}" - docker-compose ps - - echo -e "\n${BLUE}๐Ÿ“ก Health Checks:${NC}" - - # Check SeaweedFS Master - if curl -s http://localhost:9333/cluster/status >/dev/null 2>&1; then - echo -e " ${GREEN}โœ… SeaweedFS Master: Healthy${NC}" - else - echo -e " ${RED}โŒ SeaweedFS Master: Unhealthy${NC}" - fi - - # Check SeaweedFS Volume - if curl -s http://localhost:8080/status >/dev/null 2>&1; then - echo -e " ${GREEN}โœ… SeaweedFS Volume: Healthy${NC}" - else - echo -e " ${RED}โŒ SeaweedFS Volume: Unhealthy${NC}" - fi - - # Check RDMA Sidecar - if curl -s http://localhost:8081/health >/dev/null 2>&1; then - echo -e " ${GREEN}โœ… RDMA Sidecar: Healthy${NC}" - else - echo -e " ${RED}โŒ RDMA Sidecar: Unhealthy${NC}" - fi -} - -open_shell() { - echo -e "${GREEN}๐Ÿš Opening interactive shell in test client...${NC}" - echo -e "${YELLOW}Use './test-rdma --help' for RDMA testing commands${NC}" - echo -e "${YELLOW}Use 'curl http://rdma-sidecar:8081/health' to test sidecar${NC}" - - docker-compose run --rm test-client /bin/bash -} - -# Main command handling -case "${1:-}" in - start) - start_services - ;; - test) - run_tests - ;; - stop) - stop_services - ;; - clean) - clean_all - ;; - logs) - show_logs "${2:-}" - ;; - status) - show_status - ;; - shell) - open_shell - ;; - -h|--help|help) - print_usage - ;; - "") - print_usage - exit 1 - ;; - *) - echo -e "${RED}โŒ Unknown command: $1${NC}" - print_usage - exit 1 - ;; -esac diff --git a/seaweedfs-rdma-sidecar/tests/run-integration-tests.sh b/seaweedfs-rdma-sidecar/tests/run-integration-tests.sh deleted file mode 100755 index 8f23c7e5f..000000000 --- a/seaweedfs-rdma-sidecar/tests/run-integration-tests.sh +++ /dev/null @@ -1,302 +0,0 @@ -#!/bin/bash - -# SeaweedFS RDMA Integration Test Suite -# Comprehensive testing of the complete integration in Docker environment - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -print_header() { - echo -e "\n${PURPLE}===============================================${NC}" - echo -e "${PURPLE}$1${NC}" - echo -e "${PURPLE}===============================================${NC}\n" -} - -print_step() { - echo -e "${CYAN}๐Ÿ”ต $1${NC}" -} - -print_success() { - echo -e "${GREEN}โœ… $1${NC}" -} - -print_warning() { - echo -e "${YELLOW}โš ๏ธ $1${NC}" -} - -print_error() { - echo -e "${RED}โŒ $1${NC}" -} - -wait_for_service() { - local url=$1 - local service_name=$2 - local max_attempts=30 - local attempt=1 - - print_step "Waiting for $service_name to be ready..." - - while [ $attempt -le $max_attempts ]; do - if curl -s "$url" > /dev/null 2>&1; then - print_success "$service_name is ready" - return 0 - fi - - echo -n "." - sleep 2 - attempt=$((attempt + 1)) - done - - print_error "$service_name failed to become ready after $max_attempts attempts" - return 1 -} - -test_seaweedfs_master() { - print_header "TESTING SEAWEEDFS MASTER" - - wait_for_service "$SEAWEEDFS_MASTER/cluster/status" "SeaweedFS Master" - - print_step "Checking master status..." - response=$(curl -s "$SEAWEEDFS_MASTER/cluster/status") - - if echo "$response" | jq -e '.IsLeader == true' > /dev/null; then - print_success "SeaweedFS Master is leader and ready" - else - print_error "SeaweedFS Master is not ready" - echo "$response" - return 1 - fi -} - -test_seaweedfs_volume() { - print_header "TESTING SEAWEEDFS VOLUME SERVER" - - wait_for_service "$SEAWEEDFS_VOLUME/status" "SeaweedFS Volume Server" - - print_step "Checking volume server status..." - response=$(curl -s "$SEAWEEDFS_VOLUME/status") - - if echo "$response" | jq -e '.Version' > /dev/null; then - print_success "SeaweedFS Volume Server is ready" - echo "Volume Server Version: $(echo "$response" | jq -r '.Version')" - else - print_error "SeaweedFS Volume Server is not ready" - echo "$response" - return 1 - fi -} - -test_rdma_engine() { - print_header "TESTING RDMA ENGINE" - - print_step "Checking RDMA engine socket..." - if [ -S "$RDMA_SOCKET_PATH" ]; then - print_success "RDMA engine socket exists" - else - print_error "RDMA engine socket not found at $RDMA_SOCKET_PATH" - return 1 - fi - - print_step "Testing RDMA engine ping..." - if ./test-rdma ping --socket "$RDMA_SOCKET_PATH" 2>/dev/null; then - print_success "RDMA engine ping successful" - else - print_error "RDMA engine ping failed" - return 1 - fi - - print_step "Testing RDMA engine capabilities..." - if ./test-rdma capabilities --socket "$RDMA_SOCKET_PATH" 2>/dev/null | grep -q "Version:"; then - print_success "RDMA engine capabilities retrieved" - ./test-rdma capabilities --socket "$RDMA_SOCKET_PATH" 2>/dev/null | head -5 - else - print_error "RDMA engine capabilities failed" - return 1 - fi -} - -test_rdma_sidecar() { - print_header "TESTING RDMA SIDECAR" - - wait_for_service "$SIDECAR_URL/health" "RDMA Sidecar" - - print_step "Testing sidecar health..." - response=$(curl -s "$SIDECAR_URL/health") - - if echo "$response" | jq -e '.status == "healthy"' > /dev/null; then - print_success "RDMA Sidecar is healthy" - echo "RDMA Status: $(echo "$response" | jq -r '.rdma.enabled')" - else - print_error "RDMA Sidecar health check failed" - echo "$response" - return 1 - fi - - print_step "Testing sidecar stats..." - stats=$(curl -s "$SIDECAR_URL/stats") - - if echo "$stats" | jq -e '.enabled' > /dev/null; then - print_success "RDMA Sidecar stats retrieved" - echo "RDMA Enabled: $(echo "$stats" | jq -r '.enabled')" - echo "RDMA Connected: $(echo "$stats" | jq -r '.connected')" - - if echo "$stats" | jq -e '.capabilities' > /dev/null; then - version=$(echo "$stats" | jq -r '.capabilities.version') - sessions=$(echo "$stats" | jq -r '.capabilities.max_sessions') - print_success "RDMA Engine Info: Version=$version, Max Sessions=$sessions" - fi - else - print_error "RDMA Sidecar stats failed" - echo "$stats" - return 1 - fi -} - -test_direct_rdma_operations() { - print_header "TESTING DIRECT RDMA OPERATIONS" - - print_step "Testing direct RDMA read operation..." - if ./test-rdma read --socket "$RDMA_SOCKET_PATH" --volume 1 --needle 12345 --size 1024 2>/dev/null | grep -q "RDMA read completed"; then - print_success "Direct RDMA read operation successful" - else - print_warning "Direct RDMA read operation failed (expected in mock mode)" - fi - - print_step "Running RDMA performance benchmark..." - benchmark_result=$(./test-rdma bench --socket "$RDMA_SOCKET_PATH" --iterations 5 --read-size 2048 2>/dev/null | tail -10) - - if echo "$benchmark_result" | grep -q "Operations/sec:"; then - print_success "RDMA benchmark completed" - echo "$benchmark_result" | grep -E "Operations|Latency|Throughput" - else - print_warning "RDMA benchmark had issues (expected in mock mode)" - fi -} - -test_sidecar_needle_operations() { - print_header "TESTING SIDECAR NEEDLE OPERATIONS" - - print_step "Testing needle read via sidecar..." - response=$(curl -s "$SIDECAR_URL/read?volume=1&needle=12345&cookie=305419896&size=1024") - - if echo "$response" | jq -e '.success == true' > /dev/null; then - print_success "Sidecar needle read successful" - - is_rdma=$(echo "$response" | jq -r '.is_rdma') - source=$(echo "$response" | jq -r '.source') - duration=$(echo "$response" | jq -r '.duration') - - if [ "$is_rdma" = "true" ]; then - print_success "RDMA fast path used! Duration: $duration" - else - print_warning "HTTP fallback used. Duration: $duration" - fi - - echo "Response details:" - echo "$response" | jq '{success, is_rdma, source, duration, data_size}' - else - print_error "Sidecar needle read failed" - echo "$response" - return 1 - fi -} - -test_sidecar_benchmark() { - print_header "TESTING SIDECAR BENCHMARK" - - print_step "Running sidecar performance benchmark..." - response=$(curl -s "$SIDECAR_URL/benchmark?iterations=5&size=2048") - - if echo "$response" | jq -e '.benchmark_results' > /dev/null; then - print_success "Sidecar benchmark completed" - - rdma_ops=$(echo "$response" | jq -r '.benchmark_results.rdma_ops') - http_ops=$(echo "$response" | jq -r '.benchmark_results.http_ops') - avg_latency=$(echo "$response" | jq -r '.benchmark_results.avg_latency') - ops_per_sec=$(echo "$response" | jq -r '.benchmark_results.ops_per_sec') - - echo "Benchmark Results:" - echo " RDMA Operations: $rdma_ops" - echo " HTTP Operations: $http_ops" - echo " Average Latency: $avg_latency" - echo " Operations/sec: $ops_per_sec" - else - print_error "Sidecar benchmark failed" - echo "$response" - return 1 - fi -} - -test_error_handling() { - print_header "TESTING ERROR HANDLING AND FALLBACK" - - print_step "Testing invalid needle read..." - response=$(curl -s "$SIDECAR_URL/read?volume=999&needle=999999&size=1024") - - # Should succeed with mock data or fail gracefully - if echo "$response" | jq -e '.success' > /dev/null; then - result=$(echo "$response" | jq -r '.success') - if [ "$result" = "true" ]; then - print_success "Error handling working - mock data returned" - else - print_success "Error handling working - graceful failure" - fi - else - print_success "Error handling working - proper error response" - fi -} - -main() { - print_header "๐Ÿš€ SEAWEEDFS RDMA INTEGRATION TEST SUITE" - - echo -e "${GREEN}Starting comprehensive integration tests...${NC}" - echo -e "${BLUE}Environment:${NC}" - echo -e " RDMA Socket: $RDMA_SOCKET_PATH" - echo -e " Sidecar URL: $SIDECAR_URL" - echo -e " SeaweedFS Master: $SEAWEEDFS_MASTER" - echo -e " SeaweedFS Volume: $SEAWEEDFS_VOLUME" - - # Run tests in sequence - test_seaweedfs_master - test_seaweedfs_volume - test_rdma_engine - test_rdma_sidecar - test_direct_rdma_operations - test_sidecar_needle_operations - test_sidecar_benchmark - test_error_handling - - print_header "๐ŸŽ‰ ALL INTEGRATION TESTS COMPLETED!" - - echo -e "${GREEN}โœ… Test Summary:${NC}" - echo -e " โœ… SeaweedFS Master: Working" - echo -e " โœ… SeaweedFS Volume Server: Working" - echo -e " โœ… Rust RDMA Engine: Working (Mock Mode)" - echo -e " โœ… Go RDMA Sidecar: Working" - echo -e " โœ… IPC Communication: Working" - echo -e " โœ… Needle Operations: Working" - echo -e " โœ… Performance Benchmarking: Working" - echo -e " โœ… Error Handling: Working" - - print_success "SeaweedFS RDMA integration is fully functional!" - - return 0 -} - -# Check required environment variables -if [ -z "$RDMA_SOCKET_PATH" ] || [ -z "$SIDECAR_URL" ] || [ -z "$SEAWEEDFS_MASTER" ] || [ -z "$SEAWEEDFS_VOLUME" ]; then - print_error "Required environment variables not set" - echo "Required: RDMA_SOCKET_PATH, SIDECAR_URL, SEAWEEDFS_MASTER, SEAWEEDFS_VOLUME" - exit 1 -fi - -# Run main test suite -main "$@" diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 82dd79314..6449e9bfb 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -30,7 +30,7 @@ parts: plugin: go # Snapcraft will look in this location for the source of the application source: . - go-importpath: github.com/seaweedfs/seaweedfs + go-importpath: github.com/chrislusf/seaweedfs go: # Defines the version of golang which will be bootstrapped into the snap source-tag: go1.14 diff --git a/telemetry/DEPLOYMENT.md b/telemetry/DEPLOYMENT.md deleted file mode 100644 index a1dd54907..000000000 --- a/telemetry/DEPLOYMENT.md +++ /dev/null @@ -1,320 +0,0 @@ -# SeaweedFS Telemetry Server Deployment - -This document describes how to deploy the SeaweedFS telemetry server to a remote server using GitHub Actions, or via Docker. - -## Prerequisites - -1. A remote Linux server with: - - SSH access - - systemd (for service management) - - Optional: Prometheus and Grafana (for monitoring) - -2. GitHub repository secrets configured (see [Setup GitHub Secrets](#setup-github-secrets) below): - - `TELEMETRY_SSH_PRIVATE_KEY`: SSH private key for accessing the remote server - - `TELEMETRY_HOST`: Remote server hostname or IP address - - `TELEMETRY_USER`: Username for SSH access - -## Setup GitHub Secrets - -Before using the deployment workflow, you need to configure the required secrets in your GitHub repository. - -### Step 1: Generate SSH Key Pair - -On your local machine, generate a new SSH key pair specifically for deployment: - -```bash -# Generate a new SSH key pair -ssh-keygen -t ed25519 -C "seaweedfs-telemetry-deploy" -f ~/.ssh/seaweedfs_telemetry_deploy - -# This creates two files: -# ~/.ssh/seaweedfs_telemetry_deploy (private key) -# ~/.ssh/seaweedfs_telemetry_deploy.pub (public key) -``` - -### Step 2: Configure Remote Server - -Copy the public key to your remote server: - -```bash -# Copy public key to remote server -ssh-copy-id -i ~/.ssh/seaweedfs_telemetry_deploy.pub user@your-server.com - -# Or manually append to authorized_keys -cat ~/.ssh/seaweedfs_telemetry_deploy.pub | ssh user@your-server.com "mkdir -p ~/.ssh && cat >> ~/.ssh/authorized_keys" -``` - -Test the SSH connection: - -```bash -# Test SSH connection with the new key -ssh -i ~/.ssh/seaweedfs_telemetry_deploy user@your-server.com "echo 'SSH connection successful'" -``` - -### Step 3: Add Secrets to GitHub Repository - -1. Go to your GitHub repository -2. Click on **Settings** tab -3. In the sidebar, click **Secrets and variables** โ†’ **Actions** -4. Click **New repository secret** for each of the following: - -#### TELEMETRY_SSH_PRIVATE_KEY - -```bash -# Display the private key content -cat ~/.ssh/seaweedfs_telemetry_deploy -``` - -- **Name**: `TELEMETRY_SSH_PRIVATE_KEY` -- **Value**: Copy the entire private key content, including the `-----BEGIN OPENSSH PRIVATE KEY-----` and `-----END OPENSSH PRIVATE KEY-----` lines - -#### TELEMETRY_HOST - -- **Name**: `TELEMETRY_HOST` -- **Value**: Your server's hostname or IP address (e.g., `telemetry.example.com` or `192.168.1.100`) - -#### TELEMETRY_USER - -- **Name**: `TELEMETRY_USER` -- **Value**: The username on the remote server (e.g., `ubuntu`, `deploy`, or your username) - -### Step 4: Verify Configuration - -Create a simple test workflow or manually trigger the deployment to verify the secrets are working correctly. - -### Security Best Practices - -1. **Dedicated SSH Key**: Use a separate SSH key only for deployment -2. **Limited Permissions**: Create a dedicated user on the remote server with minimal required permissions -3. **Key Rotation**: Regularly rotate SSH keys -4. **Server Access**: Restrict SSH access to specific IP ranges if possible - -### Example Server Setup - -If you're setting up a new server, here's a basic configuration: - -```bash -# On the remote server, create a dedicated user for deployment -sudo useradd -m -s /bin/bash seaweedfs-deploy -sudo usermod -aG sudo seaweedfs-deploy # Only if sudo access is needed - -# Switch to the deployment user -sudo su - seaweedfs-deploy - -# Create SSH directory -mkdir -p ~/.ssh -chmod 700 ~/.ssh - -# Add your public key (paste the content of seaweedfs_telemetry_deploy.pub) -nano ~/.ssh/authorized_keys -chmod 600 ~/.ssh/authorized_keys -``` - -### Troubleshooting - -#### SSH Connection Issues - -```bash -# Test SSH connection manually -ssh -i ~/.ssh/seaweedfs_telemetry_deploy -v user@your-server.com - -# Check SSH key permissions -ls -la ~/.ssh/seaweedfs_telemetry_deploy* -# Should show: -rw------- for private key, -rw-r--r-- for public key -``` - -#### GitHub Actions Fails - -1. **Check secrets**: Ensure all three secrets are properly set in GitHub -2. **Verify SSH key**: Make sure the entire private key (including headers/footers) is copied -3. **Test connectivity**: Manually SSH to the server from your local machine -4. **Check user permissions**: Ensure the remote user has necessary permissions - -## GitHub Actions Workflow - -The deployment workflow (`.github/workflows/deploy_telemetry.yml`) provides two main operations: - -### 1. First-time Setup - -Run this once to set up the remote server: - -1. Go to GitHub Actions in your repository -2. Select "Deploy Telemetry Server" workflow -3. Click "Run workflow" -4. Check "Run first-time server setup" -5. Click "Run workflow" - -This will: -- Create necessary directories on the remote server -- Set up systemd service configuration -- Configure log rotation -- Upload Grafana dashboard and Prometheus configuration -- Enable the telemetry service (but not start it yet) - -**Note**: The setup only prepares the infrastructure. You need to run a deployment afterward to install and start the telemetry server. - - -### 2. Deploy Updates - -To deploy updates, manually trigger deployment: -1. Go to GitHub Actions in your repository -2. Select "Deploy Telemetry Server" workflow -3. Click "Run workflow" -4. Check "Deploy telemetry server to remote server" -5. Click "Run workflow" - -## Docker Deployment - -You can build and run the telemetry server using Docker locally or on a remote host. - -### Build - -- Using Docker Compose (recommended): - -```bash -docker compose -f telemetry/docker-compose.yml build telemetry-server -``` - -- Using docker build directly (from the repository root): - -```bash -docker build -t seaweedfs-telemetry \ - -f telemetry/server/Dockerfile \ - . -``` - -### Run - -- With Docker Compose: - -```bash -docker compose -f telemetry/docker-compose.yml up -d telemetry-server -``` - -- With docker run: - -```bash -docker run -d --name telemetry-server \ - -p 8080:8080 \ - seaweedfs-telemetry -``` - -Notes: - -- The container runs as a non-root user by default. -- The image listens on port `8080` inside the container. Map it with `-p :8080`. -- You can pass flags to the server by appending them after the image name, e.g. `docker run -d -p 8353:8080 seaweedfs-telemetry -port=8353 -dashboard=false`. - -## Server Directory Structure - -After setup, the remote server will have: - -``` -~/seaweedfs-telemetry/ -โ”œโ”€โ”€ bin/ -โ”‚ โ””โ”€โ”€ telemetry-server # Binary executable -โ”œโ”€โ”€ logs/ -โ”‚ โ”œโ”€โ”€ telemetry.log # Application logs -โ”‚ โ””โ”€โ”€ telemetry.error.log # Error logs -โ”œโ”€โ”€ data/ # Data directory (if needed) -โ”œโ”€โ”€ grafana-dashboard.json # Grafana dashboard configuration -โ””โ”€โ”€ prometheus.yml # Prometheus configuration -``` - -## Service Management - -The telemetry server runs as a systemd service: - -```bash -# Check service status -sudo systemctl status telemetry.service - -# View logs -sudo journalctl -u telemetry.service -f - -# Restart service -sudo systemctl restart telemetry.service - -# Stop/start service -sudo systemctl stop telemetry.service -sudo systemctl start telemetry.service -``` - -## Accessing the Service - -After deployment, the telemetry server will be available at (default ports shown; adjust if you override with `-port`): - -- Docker default: `8080` - - **Dashboard**: `http://your-server:8080` - - **API**: `http://your-server:8080/api/*` - - **Metrics**: `http://your-server:8080/metrics` - - **Health Check**: `http://your-server:8080/health` - -- Systemd example (if you configured a different port, e.g. `8353`): - - **Dashboard**: `http://your-server:8353` - - **API**: `http://your-server:8353/api/*` - - **Metrics**: `http://your-server:8353/metrics` - - **Health Check**: `http://your-server:8353/health` - -## Optional: Prometheus and Grafana Integration - -### Prometheus Setup - -1. Install Prometheus on your server -2. Update `/etc/prometheus/prometheus.yml` to include: - ```yaml - scrape_configs: - - job_name: 'seaweedfs-telemetry' - static_configs: - - targets: ['localhost:8353'] - metrics_path: '/metrics' - ``` - -### Grafana Setup - -1. Install Grafana on your server -2. Import the dashboard from `~/seaweedfs-telemetry/grafana-dashboard.json` -3. Configure Prometheus as a data source pointing to your Prometheus instance - -## Troubleshooting - -### Deployment Fails - -1. Check GitHub Actions logs for detailed error messages -2. Verify SSH connectivity: `ssh user@host` -3. Ensure all required secrets are configured in GitHub - -### Service Won't Start - -1. Check service logs: `sudo journalctl -u telemetry.service` -2. Verify binary permissions: `ls -la ~/seaweedfs-telemetry/bin/` -3. Test binary manually: `~/seaweedfs-telemetry/bin/telemetry-server -help` - -### Port Conflicts - -If port 8353 is already in use: - -1. Edit the systemd service: `sudo systemctl edit telemetry.service` -2. Add override configuration: - ```ini - [Service] - ExecStart= - ExecStart=/home/user/seaweedfs-telemetry/bin/telemetry-server -port=8354 - ``` -3. Reload and restart: `sudo systemctl daemon-reload && sudo systemctl restart telemetry.service` - -## Security Considerations - -1. **Firewall**: Consider restricting access to telemetry ports -2. **SSH Keys**: Use dedicated SSH keys with minimal permissions -3. **User Permissions**: Run the service as a non-privileged user -4. **Network**: Consider running on internal networks only - -## Monitoring - -Monitor the deployment and service health: - -- **GitHub Actions**: Check workflow runs for deployment status -- **System Logs**: `sudo journalctl -u telemetry.service` -- **Application Logs**: `tail -f ~/seaweedfs-telemetry/logs/telemetry.log` -- **Health Endpoint**: `curl http://localhost:8353/health` -- **Metrics**: `curl http://localhost:8353/metrics` \ No newline at end of file diff --git a/telemetry/README.md b/telemetry/README.md deleted file mode 100644 index f2d1f1ccf..000000000 --- a/telemetry/README.md +++ /dev/null @@ -1,354 +0,0 @@ -# SeaweedFS Telemetry System - -A privacy-respecting telemetry system for SeaweedFS that collects cluster-level usage statistics and provides visualization through Prometheus and Grafana. - -## Features - -- **Privacy-First Design**: Uses in-memory cluster IDs (regenerated on restart), no personal data collection -- **Prometheus Integration**: Native Prometheus metrics for monitoring and alerting -- **Grafana Dashboards**: Pre-built dashboards for data visualization -- **Protocol Buffers**: Efficient binary data transmission for optimal performance -- **Opt-in Only**: Disabled by default, requires explicit configuration -- **Docker Compose**: Complete monitoring stack deployment -- **Automatic Cleanup**: Configurable data retention policies - -## Architecture - -``` -SeaweedFS Cluster โ†’ Telemetry Client โ†’ Telemetry Server โ†’ Prometheus โ†’ Grafana - (protobuf) (metrics) (queries) -``` - -## Data Transmission - -The telemetry system uses **Protocol Buffers exclusively** for efficient binary data transmission: - -- **Compact Format**: 30-50% smaller than JSON -- **Fast Serialization**: Better performance than text-based formats -- **Type Safety**: Strong typing with generated Go structs -- **Schema Evolution**: Built-in versioning support - -### Protobuf Schema - -```protobuf -message TelemetryData { - string cluster_id = 1; // In-memory generated UUID - string version = 2; // SeaweedFS version - string os = 3; // Operating system - // Field 4 reserved (was features) - // Field 5 reserved (was deployment) - int32 volume_server_count = 6; // Number of volume servers - uint64 total_disk_bytes = 7; // Total disk usage - int32 total_volume_count = 8; // Total volume count - int32 filer_count = 9; // Number of filer servers - int32 broker_count = 10; // Number of broker servers - int64 timestamp = 11; // Collection timestamp -} -``` - -## Privacy Approach - -- **No Personal Data**: No hostnames, IP addresses, or user information -- **In-Memory IDs**: Cluster IDs are generated in-memory and change on restart -- **Aggregated Data**: Only cluster-level statistics, no individual file/user data -- **Opt-in Only**: Telemetry is disabled by default -- **Transparent**: Open source implementation, clear data collection policy - -## Collected Data - -| Field | Description | Example | -|-------|-------------|---------| -| `cluster_id` | In-memory UUID (changes on restart) | `a1b2c3d4-...` | -| `version` | SeaweedFS version | `3.45` | -| `os` | Operating system and architecture | `linux/amd64` | -| `volume_server_count` | Number of volume servers | `5` | -| `total_disk_bytes` | Total disk usage across cluster | `1073741824` | -| `total_volume_count` | Total number of volumes | `120` | -| `filer_count` | Number of filer servers | `2` | -| `broker_count` | Number of broker servers | `1` | -| `timestamp` | When data was collected | `1640995200` | - -## Quick Start - -### 1. Deploy Telemetry Server - -```bash -# Clone and start the complete monitoring stack -git clone https://github.com/seaweedfs/seaweedfs.git -cd seaweedfs -docker compose -f telemetry/docker-compose.yml up -d - -# Or run the server directly -cd telemetry/server -go run . -port=8080 -dashboard=true -``` - -### 2. Configure SeaweedFS - -```bash -# Enable telemetry in SeaweedFS master (uses default telemetry.seaweedfs.com) -weed master -telemetry=true - -# Or in server mode -weed server -telemetry=true - -# Or specify custom telemetry server -weed master -telemetry=true -telemetry.url=http://localhost:8080/api/collect -``` - -### 3. Access Dashboards - -- **Telemetry Server**: http://localhost:8080 -- **Prometheus**: http://localhost:9090 -- **Grafana**: http://localhost:3000 (admin/admin) - -## Configuration - -### SeaweedFS Master/Server - -```bash -# Enable telemetry --telemetry=true - -# Set custom telemetry server URL (optional, defaults to telemetry.seaweedfs.com) --telemetry.url=http://your-telemetry-server:8080/api/collect -``` - -### Telemetry Server - -```bash -# Server configuration --port=8080 # Server port --dashboard=true # Enable built-in dashboard --cleanup=24h # Cleanup interval --max-age=720h # Maximum data retention (30 days) - -# Example -./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h -``` - -## Prometheus Metrics - -The telemetry server exposes these Prometheus metrics: - -### Cluster Metrics -- `seaweedfs_telemetry_total_clusters`: Total unique clusters (30 days) -- `seaweedfs_telemetry_active_clusters`: Active clusters (7 days) - -### Per-Cluster Metrics -- `seaweedfs_telemetry_volume_servers{cluster_id, version, os}`: Volume servers per cluster -- `seaweedfs_telemetry_disk_bytes{cluster_id, version, os}`: Disk usage per cluster -- `seaweedfs_telemetry_volume_count{cluster_id, version, os}`: Volume count per cluster -- `seaweedfs_telemetry_filer_count{cluster_id, version, os}`: Filer servers per cluster -- `seaweedfs_telemetry_broker_count{cluster_id, version, os}`: Broker servers per cluster -- `seaweedfs_telemetry_cluster_info{cluster_id, version, os}`: Cluster metadata - -### Server Metrics -- `seaweedfs_telemetry_reports_received_total`: Total telemetry reports received - -## API Endpoints - -### Data Collection -```bash -# Submit telemetry data (protobuf only) -POST /api/collect -Content-Type: application/x-protobuf -[TelemetryRequest protobuf data] -``` - -### Statistics (JSON for dashboard/debugging) -```bash -# Get aggregated statistics -GET /api/stats - -# Get recent cluster instances -GET /api/instances?limit=100 - -# Get metrics over time -GET /api/metrics?days=30 -``` - -### Monitoring -```bash -# Prometheus metrics -GET /metrics -``` - -## Docker Deployment - -### Complete Stack (Recommended) - -```yaml -# docker-compose.yml -version: '3.8' -services: - telemetry-server: - build: - context: ../ - dockerfile: telemetry/server/Dockerfile - ports: - - "8080:8080" - command: ["-port=8080", "-dashboard=true", "-cleanup=24h"] - - prometheus: - image: prom/prometheus:latest - ports: - - "9090:9090" - volumes: - - ./prometheus.yml:/etc/prometheus/prometheus.yml - - grafana: - image: grafana/grafana:latest - ports: - - "3000:3000" - environment: - - GF_SECURITY_ADMIN_PASSWORD=admin - volumes: - - ./grafana-provisioning:/etc/grafana/provisioning - - ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs.json -``` - -```bash -# Deploy the stack -docker compose -f telemetry/docker-compose.yml up -d - -# Scale telemetry server if needed -docker compose -f telemetry/docker-compose.yml up -d --scale telemetry-server=3 -``` - -### Server Only - -```bash -# Build and run telemetry server (build from repo root to include all sources) -docker build -t seaweedfs-telemetry -f telemetry/server/Dockerfile . -docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true -``` - -## Development - -### Protocol Buffer Development - -```bash -# Generate protobuf code -cd telemetry -protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto - -# The generated code is already included in the repository -``` - -### Build from Source - -```bash -# Build telemetry server -cd telemetry/server -go build -o telemetry-server . - -# Build SeaweedFS with telemetry support -cd ../.. -go build -o weed ./weed -``` - -### Testing - -```bash -# Test telemetry server -cd telemetry/server -go test ./... - -# Test protobuf communication (requires protobuf tools) -# See telemetry client code for examples -``` - -## Grafana Dashboard - -The included Grafana dashboard provides: - -- **Overview**: Total and active clusters, version distribution -- **Resource Usage**: Volume servers and disk usage over time -- **Infrastructure**: Operating system distribution and server counts -- **Growth Trends**: Historical growth patterns - -### Custom Queries - -```promql -# Total active clusters -seaweedfs_telemetry_active_clusters - -# Disk usage by version -sum by (version) (seaweedfs_telemetry_disk_bytes) - -# Volume servers by operating system -sum by (os) (seaweedfs_telemetry_volume_servers) - -# Filer servers by version -sum by (version) (seaweedfs_telemetry_filer_count) - -# Broker servers across all clusters -sum(seaweedfs_telemetry_broker_count) - -# Growth rate (weekly) -increase(seaweedfs_telemetry_total_clusters[7d]) -``` - -## Security Considerations - -- **Network Security**: Use HTTPS in production environments -- **Access Control**: Implement authentication for Grafana and Prometheus -- **Data Retention**: Configure appropriate retention policies -- **Monitoring**: Monitor the telemetry infrastructure itself - -## Troubleshooting - -### Common Issues - -**SeaweedFS not sending data:** -```bash -# Check telemetry configuration -weed master -h | grep telemetry - -# Verify connectivity -curl -v http://your-telemetry-server:8080/api/collect -``` - -**Server not receiving data:** -```bash -# Check server logs -docker-compose logs telemetry-server - -# Verify metrics endpoint -curl http://localhost:8080/metrics -``` - -**Prometheus not scraping:** -```bash -# Check Prometheus targets -curl http://localhost:9090/api/v1/targets - -# Verify configuration -docker-compose logs prometheus -``` - -### Debugging - -```bash -# Enable verbose logging in SeaweedFS -weed master -v=2 -telemetry=true - -# Check telemetry server metrics -curl http://localhost:8080/metrics | grep seaweedfs_telemetry - -# Test data flow -curl http://localhost:8080/api/stats -``` - -## Contributing - -1. Fork the repository -2. Create a feature branch -3. Make your changes -4. Add tests if applicable -5. Submit a pull request - -## License - -This telemetry system is part of SeaweedFS and follows the same Apache 2.0 license. \ No newline at end of file diff --git a/telemetry/docker-compose.yml b/telemetry/docker-compose.yml deleted file mode 100644 index 38e64c53c..000000000 --- a/telemetry/docker-compose.yml +++ /dev/null @@ -1,55 +0,0 @@ -services: - telemetry-server: - build: - context: ../ - dockerfile: telemetry/server/Dockerfile - ports: - - "8080:8080" - command: [ - "./telemetry-server", - "-port=8080", - "-dashboard=false", # Disable built-in dashboard, use Grafana - "-log=true", - "-cors=true" - ] - networks: - - telemetry - - prometheus: - image: prom/prometheus:latest - ports: - - "9090:9090" - volumes: - - ./prometheus.yml:/etc/prometheus/prometheus.yml - - prometheus_data:/prometheus - command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - - '--web.console.libraries=/etc/prometheus/console_libraries' - - '--web.console.templates=/etc/prometheus/consoles' - - '--storage.tsdb.retention.time=200h' - - '--web.enable-lifecycle' - networks: - - telemetry - - grafana: - image: grafana/grafana:latest - ports: - - "3000:3000" - environment: - - GF_SECURITY_ADMIN_PASSWORD=admin - - GF_USERS_ALLOW_SIGN_UP=false - volumes: - - grafana_data:/var/lib/grafana - - ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs-telemetry.json - - ./grafana-provisioning:/etc/grafana/provisioning - networks: - - telemetry - -volumes: - prometheus_data: - grafana_data: - -networks: - telemetry: - driver: bridge \ No newline at end of file diff --git a/telemetry/grafana-dashboard.json b/telemetry/grafana-dashboard.json deleted file mode 100644 index c33896dab..000000000 --- a/telemetry/grafana-dashboard.json +++ /dev/null @@ -1,734 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "id": 1, - "options": { - "showHeader": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "seaweedfs_telemetry_total_clusters", - "format": "time_series", - "refId": "A" - } - ], - "title": "Total SeaweedFS Clusters", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "id": 2, - "options": { - "showHeader": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "seaweedfs_telemetry_active_clusters", - "format": "time_series", - "refId": "A" - } - ], - "title": "Active Clusters (7 days)", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - } - }, - "mappings": [] - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 3, - "options": { - "legend": { - "displayMode": "visible", - "placement": "bottom", - "showLegend": true - }, - "pieType": "pie", - "reduceOptions": { - "values": false, - "calcs": [ - "lastNotNull" - ], - "fields": "" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "count by (version) (seaweedfs_telemetry_cluster_info)", - "format": "time_series", - "legendFormat": "{{version}}", - "refId": "A" - } - ], - "title": "SeaweedFS Version Distribution", - "type": "piechart" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - } - }, - "mappings": [] - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 8 - }, - "id": 4, - "options": { - "legend": { - "displayMode": "visible", - "placement": "bottom", - "showLegend": true - }, - "pieType": "pie", - "reduceOptions": { - "values": false, - "calcs": [ - "lastNotNull" - ], - "fields": "" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "count by (os) (seaweedfs_telemetry_cluster_info)", - "format": "time_series", - "legendFormat": "{{os}}", - "refId": "A" - } - ], - "title": "Operating System Distribution", - "type": "piechart" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 16 - }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(seaweedfs_telemetry_volume_servers)", - "format": "time_series", - "legendFormat": "Total Volume Servers", - "refId": "A" - } - ], - "title": "Total Volume Servers Over Time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 24 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(seaweedfs_telemetry_disk_bytes)", - "format": "time_series", - "legendFormat": "Total Disk Usage", - "refId": "A" - } - ], - "title": "Total Disk Usage Over Time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 24 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(seaweedfs_telemetry_volume_count)", - "format": "time_series", - "legendFormat": "Total Volume Count", - "refId": "A" - } - ], - "title": "Total Volume Count Over Time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(seaweedfs_telemetry_filer_count)", - "format": "time_series", - "legendFormat": "Total Filer Count", - "refId": "A" - } - ], - "title": "Total Filer Servers Over Time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "vis": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 32 - }, - "id": 9, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum(seaweedfs_telemetry_broker_count)", - "format": "time_series", - "legendFormat": "Total Broker Count", - "refId": "A" - } - ], - "title": "Total Broker Servers Over Time", - "type": "timeseries" - } - ], - "refresh": "5m", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "seaweedfs", - "telemetry" - ], - "templating": { - "list": [] - }, - "time": { - "from": "now-24h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "SeaweedFS Telemetry Dashboard", - "uid": "seaweedfs-telemetry", - "version": 1, - "weekStart": "" -} \ No newline at end of file diff --git a/telemetry/grafana-provisioning/dashboards/dashboards.yml b/telemetry/grafana-provisioning/dashboards/dashboards.yml deleted file mode 100644 index 82fd18a7a..000000000 --- a/telemetry/grafana-provisioning/dashboards/dashboards.yml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: 1 - -providers: - - name: 'seaweedfs' - orgId: 1 - folder: '' - type: file - disableDeletion: false - updateIntervalSeconds: 10 - allowUiUpdates: true - options: - path: /var/lib/grafana/dashboards \ No newline at end of file diff --git a/telemetry/grafana-provisioning/datasources/prometheus.yml b/telemetry/grafana-provisioning/datasources/prometheus.yml deleted file mode 100644 index 38fb02c68..000000000 --- a/telemetry/grafana-provisioning/datasources/prometheus.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: 1 - -datasources: - - name: Prometheus - type: prometheus - access: proxy - url: http://prometheus:9090 - isDefault: true - editable: true \ No newline at end of file diff --git a/telemetry/prometheus.yml b/telemetry/prometheus.yml deleted file mode 100644 index e33d518e7..000000000 --- a/telemetry/prometheus.yml +++ /dev/null @@ -1,15 +0,0 @@ -global: - scrape_interval: 15s - evaluation_interval: 15s - -rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - -scrape_configs: - - job_name: 'seaweedfs-telemetry' - static_configs: - - targets: ['telemetry-server:8080'] - scrape_interval: 30s - metrics_path: '/metrics' - scrape_timeout: 10s \ No newline at end of file diff --git a/telemetry/proto/telemetry.pb.go b/telemetry/proto/telemetry.pb.go deleted file mode 100644 index a400b8c37..000000000 --- a/telemetry/proto/telemetry.pb.go +++ /dev/null @@ -1,377 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.2 -// protoc v5.29.3 -// source: telemetry.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// TelemetryData represents cluster-level telemetry information -type TelemetryData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Unique cluster identifier (generated in-memory) - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // SeaweedFS version - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // Operating system (e.g., "linux/amd64") - Os string `protobuf:"bytes,3,opt,name=os,proto3" json:"os,omitempty"` - // Number of volume servers in the cluster - VolumeServerCount int32 `protobuf:"varint,6,opt,name=volume_server_count,json=volumeServerCount,proto3" json:"volume_server_count,omitempty"` - // Total disk usage across all volume servers (in bytes) - TotalDiskBytes uint64 `protobuf:"varint,7,opt,name=total_disk_bytes,json=totalDiskBytes,proto3" json:"total_disk_bytes,omitempty"` - // Total number of volumes in the cluster - TotalVolumeCount int32 `protobuf:"varint,8,opt,name=total_volume_count,json=totalVolumeCount,proto3" json:"total_volume_count,omitempty"` - // Number of filer servers in the cluster - FilerCount int32 `protobuf:"varint,9,opt,name=filer_count,json=filerCount,proto3" json:"filer_count,omitempty"` - // Number of broker servers in the cluster - BrokerCount int32 `protobuf:"varint,10,opt,name=broker_count,json=brokerCount,proto3" json:"broker_count,omitempty"` - // Unix timestamp when the data was collected - Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (x *TelemetryData) Reset() { - *x = TelemetryData{} - if protoimpl.UnsafeEnabled { - mi := &file_telemetry_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TelemetryData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TelemetryData) ProtoMessage() {} - -func (x *TelemetryData) ProtoReflect() protoreflect.Message { - mi := &file_telemetry_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TelemetryData.ProtoReflect.Descriptor instead. -func (*TelemetryData) Descriptor() ([]byte, []int) { - return file_telemetry_proto_rawDescGZIP(), []int{0} -} - -func (x *TelemetryData) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *TelemetryData) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *TelemetryData) GetOs() string { - if x != nil { - return x.Os - } - return "" -} - -func (x *TelemetryData) GetVolumeServerCount() int32 { - if x != nil { - return x.VolumeServerCount - } - return 0 -} - -func (x *TelemetryData) GetTotalDiskBytes() uint64 { - if x != nil { - return x.TotalDiskBytes - } - return 0 -} - -func (x *TelemetryData) GetTotalVolumeCount() int32 { - if x != nil { - return x.TotalVolumeCount - } - return 0 -} - -func (x *TelemetryData) GetFilerCount() int32 { - if x != nil { - return x.FilerCount - } - return 0 -} - -func (x *TelemetryData) GetBrokerCount() int32 { - if x != nil { - return x.BrokerCount - } - return 0 -} - -func (x *TelemetryData) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server -type TelemetryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data *TelemetryData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *TelemetryRequest) Reset() { - *x = TelemetryRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_telemetry_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TelemetryRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TelemetryRequest) ProtoMessage() {} - -func (x *TelemetryRequest) ProtoReflect() protoreflect.Message { - mi := &file_telemetry_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TelemetryRequest.ProtoReflect.Descriptor instead. -func (*TelemetryRequest) Descriptor() ([]byte, []int) { - return file_telemetry_proto_rawDescGZIP(), []int{1} -} - -func (x *TelemetryRequest) GetData() *TelemetryData { - if x != nil { - return x.Data - } - return nil -} - -// TelemetryResponse is returned by the telemetry server -type TelemetryResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *TelemetryResponse) Reset() { - *x = TelemetryResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_telemetry_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TelemetryResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TelemetryResponse) ProtoMessage() {} - -func (x *TelemetryResponse) ProtoReflect() protoreflect.Message { - mi := &file_telemetry_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TelemetryResponse.ProtoReflect.Descriptor instead. -func (*TelemetryResponse) Descriptor() ([]byte, []int) { - return file_telemetry_proto_rawDescGZIP(), []int{2} -} - -func (x *TelemetryResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *TelemetryResponse) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -var File_telemetry_proto protoreflect.FileDescriptor - -var file_telemetry_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xce, 0x02, 0x0a, - 0x0d, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x40, 0x0a, - 0x10, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x54, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, - 0x47, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, - 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_telemetry_proto_rawDescOnce sync.Once - file_telemetry_proto_rawDescData = file_telemetry_proto_rawDesc -) - -func file_telemetry_proto_rawDescGZIP() []byte { - file_telemetry_proto_rawDescOnce.Do(func() { - file_telemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_telemetry_proto_rawDescData) - }) - return file_telemetry_proto_rawDescData -} - -var file_telemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_telemetry_proto_goTypes = []any{ - (*TelemetryData)(nil), // 0: telemetry.TelemetryData - (*TelemetryRequest)(nil), // 1: telemetry.TelemetryRequest - (*TelemetryResponse)(nil), // 2: telemetry.TelemetryResponse -} -var file_telemetry_proto_depIdxs = []int32{ - 0, // 0: telemetry.TelemetryRequest.data:type_name -> telemetry.TelemetryData - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_telemetry_proto_init() } -func file_telemetry_proto_init() { - if File_telemetry_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_telemetry_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*TelemetryData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_telemetry_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*TelemetryRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_telemetry_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*TelemetryResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_telemetry_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_telemetry_proto_goTypes, - DependencyIndexes: file_telemetry_proto_depIdxs, - MessageInfos: file_telemetry_proto_msgTypes, - }.Build() - File_telemetry_proto = out.File - file_telemetry_proto_rawDesc = nil - file_telemetry_proto_goTypes = nil - file_telemetry_proto_depIdxs = nil -} diff --git a/telemetry/proto/telemetry.proto b/telemetry/proto/telemetry.proto deleted file mode 100644 index 12bbdd4f6..000000000 --- a/telemetry/proto/telemetry.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package telemetry; - -option go_package = "github.com/seaweedfs/seaweedfs/telemetry/proto"; - -// TelemetryData represents cluster-level telemetry information -message TelemetryData { - // Unique cluster identifier (generated in-memory) - string cluster_id = 1; - - // SeaweedFS version - string version = 2; - - // Operating system (e.g., "linux/amd64") - string os = 3; - - // Field 4 reserved (was features) - reserved 4; - - // Field 5 reserved (was deployment) - reserved 5; - - // Number of volume servers in the cluster - int32 volume_server_count = 6; - - // Total disk usage across all volume servers (in bytes) - uint64 total_disk_bytes = 7; - - // Total number of volumes in the cluster - int32 total_volume_count = 8; - - // Number of filer servers in the cluster - int32 filer_count = 9; - - // Number of broker servers in the cluster - int32 broker_count = 10; - - // Unix timestamp when the data was collected - int64 timestamp = 11; -} - -// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server -message TelemetryRequest { - TelemetryData data = 1; -} - -// TelemetryResponse is returned by the telemetry server -message TelemetryResponse { - bool success = 1; - string message = 2; -} \ No newline at end of file diff --git a/telemetry/server/Dockerfile b/telemetry/server/Dockerfile deleted file mode 100644 index 76fcb54cc..000000000 --- a/telemetry/server/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM golang:1.25-alpine AS builder - -WORKDIR /app - -COPY go.mod go.sum ./ -RUN go mod download - -WORKDIR /app -COPY . . - -WORKDIR /app/telemetry/server -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o telemetry-server . - -FROM alpine:latest -RUN apk --no-cache add ca-certificates \ - && addgroup -S appgroup \ - && adduser -S appuser -G appgroup - -WORKDIR /home/appuser/ -COPY --from=builder /app/telemetry/server/telemetry-server . - -EXPOSE 8080 - -USER appuser - -CMD ["./telemetry-server"] \ No newline at end of file diff --git a/telemetry/server/Makefile b/telemetry/server/Makefile deleted file mode 100644 index cf57f1777..000000000 --- a/telemetry/server/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -.PHONY: build run clean test deps proto integration-test test-all - -# Build the telemetry server -build: - go build -o telemetry-server . - -# Run the server in development mode -run: - go run . -port=8080 -dashboard=true -cleanup=1h -max-age=24h - -# Run the server in production mode -run-prod: - ./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h - -# Clean build artifacts -clean: - rm -f telemetry-server - rm -f ../test/telemetry-server-test.log - go clean - -# Run unit tests -test: - go test ./... - -# Run integration tests -integration-test: - @echo "๐Ÿงช Running telemetry integration tests..." - cd ../../ && go run telemetry/test/integration.go - -# Run all tests (unit + integration) -test-all: test integration-test - -# Install dependencies -deps: - go mod download - go mod tidy - -# Generate protobuf code (requires protoc) -proto: - cd .. && protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto - -# Build Docker image -docker-build: - docker build -t seaweedfs-telemetry . - -# Run with Docker -docker-run: - docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true - -# Development with auto-reload (requires air: go install github.com/cosmtrek/air@latest) -dev: - air - -# Check if protoc is available -check-protoc: - @which protoc > /dev/null || (echo "protoc is required for proto generation. Install from https://grpc.io/docs/protoc-installation/" && exit 1) - -# Full development setup -setup: check-protoc deps proto build - -# Run a quick smoke test -smoke-test: build - @echo "๐Ÿ”ฅ Running smoke test..." - @timeout 10s ./telemetry-server -port=18081 > /dev/null 2>&1 & \ - SERVER_PID=$$!; \ - sleep 2; \ - if curl -s http://localhost:18081/health > /dev/null; then \ - echo "โœ… Smoke test passed - server responds to health check"; \ - else \ - echo "โŒ Smoke test failed - server not responding"; \ - exit 1; \ - fi; \ - kill $$SERVER_PID 2>/dev/null || true - -# Continuous integration target -ci: deps proto build test integration-test - @echo "๐ŸŽ‰ All CI tests passed!" - -# Help -help: - @echo "Available targets:" - @echo " build - Build the telemetry server binary" - @echo " run - Run server in development mode" - @echo " run-prod - Run server in production mode" - @echo " clean - Clean build artifacts" - @echo " test - Run unit tests" - @echo " integration-test- Run integration tests" - @echo " test-all - Run all tests (unit + integration)" - @echo " deps - Install Go dependencies" - @echo " proto - Generate protobuf code" - @echo " docker-build - Build Docker image" - @echo " docker-run - Run with Docker" - @echo " dev - Run with auto-reload (requires air)" - @echo " smoke-test - Quick server health check" - @echo " setup - Full development setup" - @echo " ci - Continuous integration (all tests)" - @echo " help - Show this help" \ No newline at end of file diff --git a/telemetry/server/api/handlers.go b/telemetry/server/api/handlers.go deleted file mode 100644 index 0ff00330b..000000000 --- a/telemetry/server/api/handlers.go +++ /dev/null @@ -1,152 +0,0 @@ -package api - -import ( - "encoding/json" - "io" - "net/http" - "strconv" - "time" - - "github.com/seaweedfs/seaweedfs/telemetry/proto" - "github.com/seaweedfs/seaweedfs/telemetry/server/storage" - protobuf "google.golang.org/protobuf/proto" -) - -type Handler struct { - storage *storage.PrometheusStorage -} - -func NewHandler(storage *storage.PrometheusStorage) *Handler { - return &Handler{storage: storage} -} - -func (h *Handler) CollectTelemetry(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - contentType := r.Header.Get("Content-Type") - - // Only accept protobuf content type - if contentType != "application/x-protobuf" && contentType != "application/protobuf" { - http.Error(w, "Content-Type must be application/x-protobuf", http.StatusUnsupportedMediaType) - return - } - - // Read protobuf request - body, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, "Failed to read request body", http.StatusBadRequest) - return - } - - req := &proto.TelemetryRequest{} - if err := protobuf.Unmarshal(body, req); err != nil { - http.Error(w, "Invalid protobuf data", http.StatusBadRequest) - return - } - - data := req.Data - if data == nil { - http.Error(w, "Missing telemetry data", http.StatusBadRequest) - return - } - - // Validate required fields - if data.ClusterId == "" || data.Version == "" || data.Os == "" { - http.Error(w, "Missing required fields", http.StatusBadRequest) - return - } - - // Set timestamp if not provided - if data.Timestamp == 0 { - data.Timestamp = time.Now().Unix() - } - - // Store the telemetry data - if err := h.storage.StoreTelemetry(data); err != nil { - http.Error(w, "Failed to store data", http.StatusInternalServerError) - return - } - - // Return protobuf response - resp := &proto.TelemetryResponse{ - Success: true, - Message: "Telemetry data received", - } - - respData, err := protobuf.Marshal(resp) - if err != nil { - http.Error(w, "Failed to marshal response", http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/x-protobuf") - w.WriteHeader(http.StatusOK) - w.Write(respData) -} - -func (h *Handler) GetStats(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - stats, err := h.storage.GetStats() - if err != nil { - http.Error(w, "Failed to get stats", http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(stats) -} - -func (h *Handler) GetInstances(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - limitStr := r.URL.Query().Get("limit") - limit := 100 // default - if limitStr != "" { - if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 { - limit = l - } - } - - instances, err := h.storage.GetInstances(limit) - if err != nil { - http.Error(w, "Failed to get instances", http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(instances) -} - -func (h *Handler) GetMetrics(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - daysStr := r.URL.Query().Get("days") - days := 30 // default - if daysStr != "" { - if d, err := strconv.Atoi(daysStr); err == nil && d > 0 && d <= 365 { - days = d - } - } - - metrics, err := h.storage.GetMetrics(days) - if err != nil { - http.Error(w, "Failed to get metrics", http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(metrics) -} diff --git a/telemetry/server/dashboard/dashboard.go b/telemetry/server/dashboard/dashboard.go deleted file mode 100644 index f60021bba..000000000 --- a/telemetry/server/dashboard/dashboard.go +++ /dev/null @@ -1,274 +0,0 @@ -package dashboard - -import ( - "net/http" -) - -type Handler struct{} - -func NewHandler() *Handler { - return &Handler{} -} - -func (h *Handler) ServeIndex(w http.ResponseWriter, r *http.Request) { - html := ` - - - - - SeaweedFS Telemetry Dashboard - - - - -
-
-

SeaweedFS Telemetry Dashboard

-

Privacy-respecting usage analytics for SeaweedFS

-
- -
Loading telemetry data...
- - - -
- - - -` - - w.Header().Set("Content-Type", "text/html") - w.WriteHeader(http.StatusOK) - w.Write([]byte(html)) -} diff --git a/telemetry/server/go.mod b/telemetry/server/go.mod deleted file mode 100644 index 9af7d5522..000000000 --- a/telemetry/server/go.mod +++ /dev/null @@ -1,24 +0,0 @@ -module github.com/seaweedfs/seaweedfs/telemetry/server - -go 1.25 - -toolchain go1.25.0 - -require ( - github.com/prometheus/client_golang v1.23.2 - github.com/seaweedfs/seaweedfs v0.0.0-00010101000000-000000000000 - google.golang.org/protobuf v1.36.8 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/sys v0.36.0 // indirect -) - -replace github.com/seaweedfs/seaweedfs => ../.. diff --git a/telemetry/server/go.sum b/telemetry/server/go.sum deleted file mode 100644 index 486ea2843..000000000 --- a/telemetry/server/go.sum +++ /dev/null @@ -1,45 +0,0 @@ -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/telemetry/server/main.go b/telemetry/server/main.go deleted file mode 100644 index 6cbae05c7..000000000 --- a/telemetry/server/main.go +++ /dev/null @@ -1,111 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/seaweedfs/seaweedfs/telemetry/server/api" - "github.com/seaweedfs/seaweedfs/telemetry/server/dashboard" - "github.com/seaweedfs/seaweedfs/telemetry/server/storage" -) - -var ( - port = flag.Int("port", 8080, "HTTP server port") - enableCORS = flag.Bool("cors", true, "Enable CORS for dashboard") - logRequests = flag.Bool("log", true, "Log incoming requests") - enableDashboard = flag.Bool("dashboard", true, "Enable built-in dashboard (optional when using Grafana)") - cleanupInterval = flag.Duration("cleanup", 24*time.Hour, "Cleanup interval for old instances") - maxInstanceAge = flag.Duration("max-age", 30*24*time.Hour, "Maximum age for instances before cleanup") -) - -func main() { - flag.Parse() - - // Create Prometheus storage instance - store := storage.NewPrometheusStorage() - - // Start cleanup routine - go func() { - ticker := time.NewTicker(*cleanupInterval) - defer ticker.Stop() - for range ticker.C { - store.CleanupOldInstances(*maxInstanceAge) - } - }() - - // Setup HTTP handlers - mux := http.NewServeMux() - - // Prometheus metrics endpoint - mux.Handle("/metrics", promhttp.Handler()) - - // API endpoints - apiHandler := api.NewHandler(store) - mux.HandleFunc("/api/collect", corsMiddleware(logMiddleware(apiHandler.CollectTelemetry))) - mux.HandleFunc("/api/stats", corsMiddleware(logMiddleware(apiHandler.GetStats))) - mux.HandleFunc("/api/instances", corsMiddleware(logMiddleware(apiHandler.GetInstances))) - mux.HandleFunc("/api/metrics", corsMiddleware(logMiddleware(apiHandler.GetMetrics))) - - // Dashboard (optional) - if *enableDashboard { - dashboardHandler := dashboard.NewHandler() - mux.HandleFunc("/", corsMiddleware(dashboardHandler.ServeIndex)) - mux.HandleFunc("/dashboard", corsMiddleware(dashboardHandler.ServeIndex)) - mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static")))) - } - - // Health check - mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]string{ - "status": "ok", - "time": time.Now().UTC().Format(time.RFC3339), - }) - }) - - addr := fmt.Sprintf(":%d", *port) - log.Printf("Starting telemetry server on %s", addr) - log.Printf("Prometheus metrics: http://localhost%s/metrics", addr) - if *enableDashboard { - log.Printf("Dashboard: http://localhost%s/dashboard", addr) - } - log.Printf("Cleanup interval: %v, Max instance age: %v", *cleanupInterval, *maxInstanceAge) - - if err := http.ListenAndServe(addr, mux); err != nil { - log.Fatalf("Server failed: %v", err) - } -} - -func corsMiddleware(next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if *enableCORS { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization") - } - - if r.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - next(w, r) - } -} - -func logMiddleware(next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if *logRequests { - start := time.Now() - next(w, r) - log.Printf("%s %s %s %v", r.Method, r.URL.Path, r.RemoteAddr, time.Since(start)) - } else { - next(w, r) - } - } -} diff --git a/telemetry/server/storage/prometheus.go b/telemetry/server/storage/prometheus.go deleted file mode 100644 index 0b911227a..000000000 --- a/telemetry/server/storage/prometheus.go +++ /dev/null @@ -1,235 +0,0 @@ -package storage - -import ( - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/seaweedfs/seaweedfs/telemetry/proto" -) - -type PrometheusStorage struct { - // Prometheus metrics - totalClusters prometheus.Gauge - activeClusters prometheus.Gauge - volumeServerCount *prometheus.GaugeVec - totalDiskBytes *prometheus.GaugeVec - totalVolumeCount *prometheus.GaugeVec - filerCount *prometheus.GaugeVec - brokerCount *prometheus.GaugeVec - clusterInfo *prometheus.GaugeVec - telemetryReceived prometheus.Counter - - // In-memory storage for API endpoints (if needed) - mu sync.RWMutex - instances map[string]*telemetryData - stats map[string]interface{} -} - -// telemetryData is an internal struct that includes the received timestamp -type telemetryData struct { - *proto.TelemetryData - ReceivedAt time.Time `json:"received_at"` -} - -func NewPrometheusStorage() *PrometheusStorage { - return &PrometheusStorage{ - totalClusters: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_total_clusters", - Help: "Total number of unique SeaweedFS clusters (last 30 days)", - }), - activeClusters: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_active_clusters", - Help: "Number of active SeaweedFS clusters (last 7 days)", - }), - volumeServerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_volume_servers", - Help: "Number of volume servers per cluster", - }, []string{"cluster_id", "version", "os"}), - totalDiskBytes: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_disk_bytes", - Help: "Total disk usage in bytes per cluster", - }, []string{"cluster_id", "version", "os"}), - totalVolumeCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_volume_count", - Help: "Total number of volumes per cluster", - }, []string{"cluster_id", "version", "os"}), - filerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_filer_count", - Help: "Number of filer servers per cluster", - }, []string{"cluster_id", "version", "os"}), - brokerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_broker_count", - Help: "Number of broker servers per cluster", - }, []string{"cluster_id", "version", "os"}), - clusterInfo: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "seaweedfs_telemetry_cluster_info", - Help: "Cluster information (always 1, labels contain metadata)", - }, []string{"cluster_id", "version", "os"}), - telemetryReceived: promauto.NewCounter(prometheus.CounterOpts{ - Name: "seaweedfs_telemetry_reports_received_total", - Help: "Total number of telemetry reports received", - }), - instances: make(map[string]*telemetryData), - stats: make(map[string]interface{}), - } -} - -func (s *PrometheusStorage) StoreTelemetry(data *proto.TelemetryData) error { - s.mu.Lock() - defer s.mu.Unlock() - - // Update Prometheus metrics - labels := prometheus.Labels{ - "cluster_id": data.ClusterId, - "version": data.Version, - "os": data.Os, - } - - s.volumeServerCount.With(labels).Set(float64(data.VolumeServerCount)) - s.totalDiskBytes.With(labels).Set(float64(data.TotalDiskBytes)) - s.totalVolumeCount.With(labels).Set(float64(data.TotalVolumeCount)) - s.filerCount.With(labels).Set(float64(data.FilerCount)) - s.brokerCount.With(labels).Set(float64(data.BrokerCount)) - - infoLabels := prometheus.Labels{ - "cluster_id": data.ClusterId, - "version": data.Version, - "os": data.Os, - } - s.clusterInfo.With(infoLabels).Set(1) - - s.telemetryReceived.Inc() - - // Store in memory for API endpoints - s.instances[data.ClusterId] = &telemetryData{ - TelemetryData: data, - ReceivedAt: time.Now().UTC(), - } - - // Update aggregated stats - s.updateStats() - - return nil -} - -func (s *PrometheusStorage) GetStats() (map[string]interface{}, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - // Return cached stats - result := make(map[string]interface{}) - for k, v := range s.stats { - result[k] = v - } - return result, nil -} - -func (s *PrometheusStorage) GetInstances(limit int) ([]*telemetryData, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - var instances []*telemetryData - count := 0 - for _, instance := range s.instances { - if count >= limit { - break - } - instances = append(instances, instance) - count++ - } - - return instances, nil -} - -func (s *PrometheusStorage) GetMetrics(days int) (map[string]interface{}, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - // Return current metrics from in-memory storage - // Historical data should be queried from Prometheus directly - cutoff := time.Now().AddDate(0, 0, -days) - - var volumeServers []map[string]interface{} - var diskUsage []map[string]interface{} - - for _, instance := range s.instances { - if instance.ReceivedAt.After(cutoff) { - volumeServers = append(volumeServers, map[string]interface{}{ - "date": instance.ReceivedAt.Format("2006-01-02"), - "value": instance.TelemetryData.VolumeServerCount, - }) - diskUsage = append(diskUsage, map[string]interface{}{ - "date": instance.ReceivedAt.Format("2006-01-02"), - "value": instance.TelemetryData.TotalDiskBytes, - }) - } - } - - return map[string]interface{}{ - "volume_servers": volumeServers, - "disk_usage": diskUsage, - }, nil -} - -func (s *PrometheusStorage) updateStats() { - now := time.Now() - last7Days := now.AddDate(0, 0, -7) - last30Days := now.AddDate(0, 0, -30) - - totalInstances := 0 - activeInstances := 0 - versions := make(map[string]int) - osDistribution := make(map[string]int) - - for _, instance := range s.instances { - if instance.ReceivedAt.After(last30Days) { - totalInstances++ - } - if instance.ReceivedAt.After(last7Days) { - activeInstances++ - versions[instance.TelemetryData.Version]++ - osDistribution[instance.TelemetryData.Os]++ - } - } - - // Update Prometheus gauges - s.totalClusters.Set(float64(totalInstances)) - s.activeClusters.Set(float64(activeInstances)) - - // Update cached stats for API - s.stats = map[string]interface{}{ - "total_instances": totalInstances, - "active_instances": activeInstances, - "versions": versions, - "os_distribution": osDistribution, - } -} - -// CleanupOldInstances removes instances older than the specified duration -func (s *PrometheusStorage) CleanupOldInstances(maxAge time.Duration) { - s.mu.Lock() - defer s.mu.Unlock() - - cutoff := time.Now().Add(-maxAge) - for instanceID, instance := range s.instances { - if instance.ReceivedAt.Before(cutoff) { - delete(s.instances, instanceID) - - // Remove from Prometheus metrics - labels := prometheus.Labels{ - "cluster_id": instance.TelemetryData.ClusterId, - "version": instance.TelemetryData.Version, - "os": instance.TelemetryData.Os, - } - s.volumeServerCount.Delete(labels) - s.totalDiskBytes.Delete(labels) - s.totalVolumeCount.Delete(labels) - s.filerCount.Delete(labels) - s.brokerCount.Delete(labels) - } - } - - s.updateStats() -} diff --git a/telemetry/test/integration.go b/telemetry/test/integration.go deleted file mode 100644 index 2b79bdbc6..000000000 --- a/telemetry/test/integration.go +++ /dev/null @@ -1,311 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/seaweedfs/seaweedfs/telemetry/proto" - "github.com/seaweedfs/seaweedfs/weed/telemetry" - protobuf "google.golang.org/protobuf/proto" -) - -const ( - serverPort = "18080" // Use different port to avoid conflicts - serverURL = "http://localhost:" + serverPort -) - -func main() { - fmt.Println("Starting SeaweedFS Telemetry Integration Test") - - // Start telemetry server - fmt.Println("Starting telemetry server...") - serverCmd, err := startTelemetryServer() - if err != nil { - log.Fatalf("Failed to start telemetry server: %v", err) - } - defer stopServer(serverCmd) - - // Wait for server to start - if !waitForServer(serverURL+"/health", 15*time.Second) { - log.Fatal("Telemetry server failed to start") - } - fmt.Println("Telemetry server started successfully") - - // Test protobuf marshaling first - fmt.Println("Testing protobuf marshaling...") - if err := testProtobufMarshaling(); err != nil { - log.Fatalf("Protobuf marshaling test failed: %v", err) - } - fmt.Println("Protobuf marshaling test passed") - - // Test protobuf client - fmt.Println("Testing protobuf telemetry client...") - if err := testTelemetryClient(); err != nil { - log.Fatalf("Telemetry client test failed: %v", err) - } - fmt.Println("Telemetry client test passed") - - // Test server metrics endpoint - fmt.Println("Testing Prometheus metrics endpoint...") - if err := testMetricsEndpoint(); err != nil { - log.Fatalf("Metrics endpoint test failed: %v", err) - } - fmt.Println("Metrics endpoint test passed") - - // Test stats API - fmt.Println("Testing stats API...") - if err := testStatsAPI(); err != nil { - log.Fatalf("Stats API test failed: %v", err) - } - fmt.Println("Stats API test passed") - - // Test instances API - fmt.Println("Testing instances API...") - if err := testInstancesAPI(); err != nil { - log.Fatalf("Instances API test failed: %v", err) - } - fmt.Println("Instances API test passed") - - fmt.Println("All telemetry integration tests passed!") -} - -func startTelemetryServer() (*exec.Cmd, error) { - // Get the directory where this test is running - testDir, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("failed to get working directory: %v", err) - } - - // Navigate to the server directory (from main seaweedfs directory) - serverDir := filepath.Join(testDir, "telemetry", "server") - - cmd := exec.Command("go", "run", ".", - "-port="+serverPort, - "-dashboard=false", - "-cleanup=1m", - "-max-age=1h") - - cmd.Dir = serverDir - - // Create log files for server output - logFile, err := os.Create("telemetry-server-test.log") - if err != nil { - return nil, fmt.Errorf("failed to create log file: %v", err) - } - - cmd.Stdout = logFile - cmd.Stderr = logFile - - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start server: %v", err) - } - - return cmd, nil -} - -func stopServer(cmd *exec.Cmd) { - if cmd != nil && cmd.Process != nil { - cmd.Process.Signal(syscall.SIGTERM) - cmd.Wait() - - // Clean up log file - os.Remove("telemetry-server-test.log") - } -} - -func waitForServer(url string, timeout time.Duration) bool { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - fmt.Printf("Waiting for server at %s...\n", url) - - for { - select { - case <-ctx.Done(): - return false - default: - resp, err := http.Get(url) - if err == nil { - resp.Body.Close() - if resp.StatusCode == http.StatusOK { - return true - } - } - time.Sleep(500 * time.Millisecond) - } - } -} - -func testProtobufMarshaling() error { - // Test protobuf marshaling/unmarshaling - testData := &proto.TelemetryData{ - ClusterId: "test-cluster-12345", - Version: "test-3.45", - Os: "linux/amd64", - VolumeServerCount: 2, - TotalDiskBytes: 1000000, - TotalVolumeCount: 10, - FilerCount: 1, - BrokerCount: 1, - Timestamp: time.Now().Unix(), - } - - // Marshal - data, err := protobuf.Marshal(testData) - if err != nil { - return fmt.Errorf("failed to marshal protobuf: %v", err) - } - - fmt.Printf(" Protobuf size: %d bytes\n", len(data)) - - // Unmarshal - testData2 := &proto.TelemetryData{} - if err := protobuf.Unmarshal(data, testData2); err != nil { - return fmt.Errorf("failed to unmarshal protobuf: %v", err) - } - - // Verify data - if testData2.ClusterId != testData.ClusterId { - return fmt.Errorf("protobuf data mismatch: expected %s, got %s", - testData.ClusterId, testData2.ClusterId) - } - - if testData2.VolumeServerCount != testData.VolumeServerCount { - return fmt.Errorf("volume server count mismatch: expected %d, got %d", - testData.VolumeServerCount, testData2.VolumeServerCount) - } - - return nil -} - -func testTelemetryClient() error { - // Create telemetry client - client := telemetry.NewClient(serverURL+"/api/collect", true) - - // Create test data using protobuf format - testData := &proto.TelemetryData{ - Version: "test-3.45", - Os: "linux/amd64", - VolumeServerCount: 3, - TotalDiskBytes: 1073741824, // 1GB - TotalVolumeCount: 50, - FilerCount: 2, - BrokerCount: 1, - Timestamp: time.Now().Unix(), - } - - // Send telemetry data - if err := client.SendTelemetry(testData); err != nil { - return fmt.Errorf("failed to send telemetry: %v", err) - } - - fmt.Printf(" Sent telemetry for cluster: %s\n", client.GetInstanceID()) - - // Wait a bit for processing - time.Sleep(2 * time.Second) - - return nil -} - -func testMetricsEndpoint() error { - resp, err := http.Get(serverURL + "/metrics") - if err != nil { - return fmt.Errorf("failed to get metrics: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("metrics endpoint returned status %d", resp.StatusCode) - } - - // Read response and check for expected metrics - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read metrics response: %v", err) - } - - contentStr := string(content) - expectedMetrics := []string{ - "seaweedfs_telemetry_total_clusters", - "seaweedfs_telemetry_active_clusters", - "seaweedfs_telemetry_reports_received_total", - "seaweedfs_telemetry_volume_servers", - "seaweedfs_telemetry_disk_bytes", - "seaweedfs_telemetry_volume_count", - "seaweedfs_telemetry_filer_count", - "seaweedfs_telemetry_broker_count", - } - - for _, metric := range expectedMetrics { - if !strings.Contains(contentStr, metric) { - return fmt.Errorf("missing expected metric: %s", metric) - } - } - - // Check that we have at least one report received - if !strings.Contains(contentStr, "seaweedfs_telemetry_reports_received_total 1") { - fmt.Printf(" Warning: Expected at least 1 report received, metrics content:\n%s\n", contentStr) - } - - fmt.Printf(" Found %d expected metrics\n", len(expectedMetrics)) - - return nil -} - -func testStatsAPI() error { - resp, err := http.Get(serverURL + "/api/stats") - if err != nil { - return fmt.Errorf("failed to get stats: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("stats API returned status %d", resp.StatusCode) - } - - // Read and verify JSON response - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read stats response: %v", err) - } - - contentStr := string(content) - if !strings.Contains(contentStr, "total_instances") { - return fmt.Errorf("stats response missing total_instances field") - } - - fmt.Printf(" Stats response: %s\n", contentStr) - - return nil -} - -func testInstancesAPI() error { - resp, err := http.Get(serverURL + "/api/instances?limit=10") - if err != nil { - return fmt.Errorf("failed to get instances: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("instances API returned status %d", resp.StatusCode) - } - - // Read response - content, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read instances response: %v", err) - } - - fmt.Printf(" Instances response length: %d bytes\n", len(content)) - - return nil -} diff --git a/test/erasure_coding/README.md b/test/erasure_coding/README.md deleted file mode 100644 index c04844982..000000000 --- a/test/erasure_coding/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Erasure Coding Integration Tests - -This directory contains integration tests for the EC (Erasure Coding) encoding volume location timing bug fix. - -## The Bug - -The bug caused **double storage usage** during EC encoding because: - -1. **Silent failure**: Functions returned `nil` instead of proper error messages -2. **Timing race condition**: Volume locations were collected **AFTER** EC encoding when master metadata was already updated -3. **Missing cleanup**: Original volumes weren't being deleted after EC encoding - -This resulted in both original `.dat` files AND EC `.ec00-.ec13` files coexisting, effectively **doubling storage usage**. - -## The Fix - -The fix addresses all three issues: - -1. **Fixed silent failures**: Updated `doDeleteVolumes()` and `doEcEncode()` to return proper errors -2. **Fixed timing race condition**: Created `doDeleteVolumesWithLocations()` that uses pre-collected volume locations -3. **Enhanced cleanup**: Volume locations are now collected **BEFORE** EC encoding, preventing the race condition - -## Integration Tests - -### TestECEncodingVolumeLocationTimingBug -The main integration test that: -- **Simulates master timing race condition**: Tests what happens when volume locations are read from master AFTER EC encoding has updated the metadata -- **Verifies fix effectiveness**: Checks for the "Collecting volume locations...before EC encoding" message that proves the fix is working -- **Tests multi-server distribution**: Runs EC encoding with 6 volume servers to test shard distribution -- **Validates cleanup**: Ensures original volumes are properly cleaned up after EC encoding - -### TestECEncodingMasterTimingRaceCondition -A focused test that specifically targets the **master metadata timing race condition**: -- **Simulates the exact race condition**: Tests volume location collection timing relative to master metadata updates -- **Detects timing fix**: Verifies that volume locations are collected BEFORE EC encoding starts -- **Demonstrates bug impact**: Shows what happens when volume locations are unavailable after master metadata update - -### TestECEncodingRegressionPrevention -Regression tests that ensure: -- **Function signatures**: Fixed functions still exist and return proper errors -- **Timing patterns**: Volume location collection happens in the correct order - -## Test Architecture - -The tests use: -- **Real SeaweedFS cluster**: 1 master server + 6 volume servers -- **Multi-server setup**: Tests realistic EC shard distribution across multiple servers -- **Timing simulation**: Goroutines and delays to simulate race conditions -- **Output validation**: Checks for specific log messages that prove the fix is working - -## Why Integration Tests Were Necessary - -Unit tests could not catch this bug because: -1. **Race condition**: The bug only occurred in real-world timing scenarios -2. **Master-volume server interaction**: Required actual master metadata updates -3. **File system operations**: Needed real volume creation and EC shard generation -4. **Cleanup timing**: Required testing the sequence of operations in correct order - -The integration tests successfully catch the timing bug by: -- **Testing real command execution**: Uses actual `ec.encode` shell command -- **Simulating race conditions**: Creates timing scenarios that expose the bug -- **Validating output messages**: Checks for the key "Collecting volume locations...before EC encoding" message -- **Monitoring cleanup behavior**: Ensures original volumes are properly deleted - -## Running the Tests - -```bash -# Run all integration tests -go test -v - -# Run only the main timing test -go test -v -run TestECEncodingVolumeLocationTimingBug - -# Run only the race condition test -go test -v -run TestECEncodingMasterTimingRaceCondition - -# Skip integration tests (short mode) -go test -v -short -``` - -## Test Results - -**With the fix**: Shows "Collecting volume locations for N volumes before EC encoding..." message -**Without the fix**: No collection message, potential timing race condition - -The tests demonstrate that the fix prevents the volume location timing bug that caused double storage usage in EC encoding operations. \ No newline at end of file diff --git a/test/erasure_coding/ec_integration_test.go b/test/erasure_coding/ec_integration_test.go deleted file mode 100644 index 81cb89678..000000000 --- a/test/erasure_coding/ec_integration_test.go +++ /dev/null @@ -1,647 +0,0 @@ -package erasure_coding - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/shell" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -// TestECEncodingVolumeLocationTimingBug tests the actual bug we fixed -// This test starts real SeaweedFS servers and calls the real EC encoding command -func TestECEncodingVolumeLocationTimingBug(t *testing.T) { - // Skip if not running integration tests - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - // Create temporary directory for test data - testDir, err := os.MkdirTemp("", "seaweedfs_ec_integration_test_") - require.NoError(t, err) - defer os.RemoveAll(testDir) - - // Start SeaweedFS cluster with multiple volume servers - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - cluster, err := startSeaweedFSCluster(ctx, testDir) - require.NoError(t, err) - defer cluster.Stop() - - // Wait for servers to be ready - require.NoError(t, waitForServer("127.0.0.1:9333", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8080", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8081", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8082", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8083", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8084", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8085", 30*time.Second)) - - // Create command environment - options := &shell.ShellOptions{ - Masters: stringPtr("127.0.0.1:9333"), - GrpcDialOption: grpc.WithInsecure(), - FilerGroup: stringPtr("default"), - } - commandEnv := shell.NewCommandEnv(options) - - // Connect to master with longer timeout - ctx2, cancel2 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel2() - go commandEnv.MasterClient.KeepConnectedToMaster(ctx2) - commandEnv.MasterClient.WaitUntilConnected(ctx2) - - // Upload some test data to create volumes - testData := []byte("This is test data for EC encoding integration test") - volumeId, err := uploadTestData(testData, "127.0.0.1:9333") - require.NoError(t, err) - t.Logf("Created volume %d with test data", volumeId) - - // Wait for volume to be available - time.Sleep(2 * time.Second) - - // Test the timing race condition that causes the bug - t.Run("simulate_master_timing_race_condition", func(t *testing.T) { - // This test simulates the race condition where volume locations are read from master - // AFTER EC encoding has already updated the master metadata - - // Get volume locations BEFORE EC encoding (this should work) - volumeLocationsBefore, err := getVolumeLocations(commandEnv, volumeId) - require.NoError(t, err) - require.NotEmpty(t, volumeLocationsBefore, "Volume locations should be available before EC encoding") - t.Logf("Volume %d locations before EC encoding: %v", volumeId, volumeLocationsBefore) - - // Log original volume locations before EC encoding - for _, location := range volumeLocationsBefore { - // Extract IP:port from location (format might be IP:port) - t.Logf("Checking location: %s", location) - } - - // Start EC encoding but don't wait for completion - // This simulates the race condition where EC encoding updates master metadata - // but volume location collection happens after that update - - // First acquire the lock (required for EC encode) - lockCmd := shell.Commands[findCommandIndex("lock")] - var lockOutput bytes.Buffer - err = lockCmd.Do([]string{}, commandEnv, &lockOutput) - if err != nil { - t.Logf("Lock command failed: %v", err) - } - - // Execute EC encoding - test the timing directly - var encodeOutput bytes.Buffer - ecEncodeCmd := shell.Commands[findCommandIndex("ec.encode")] - args := []string{"-volumeId", fmt.Sprintf("%d", volumeId), "-collection", "test", "-force", "-shardReplicaPlacement", "020"} - - // Capture stdout/stderr during command execution - oldStdout := os.Stdout - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stdout = w - os.Stderr = w - - // Execute synchronously to capture output properly - err = ecEncodeCmd.Do(args, commandEnv, &encodeOutput) - - // Restore stdout/stderr - w.Close() - os.Stdout = oldStdout - os.Stderr = oldStderr - - // Read captured output - capturedOutput, _ := io.ReadAll(r) - outputStr := string(capturedOutput) - - // Also include any output from the buffer - if bufferOutput := encodeOutput.String(); bufferOutput != "" { - outputStr += "\n" + bufferOutput - } - - t.Logf("EC encode output: %s", outputStr) - - if err != nil { - t.Logf("EC encoding failed: %v", err) - } else { - t.Logf("EC encoding completed successfully") - } - - // The key test: check if the fix prevents the timing issue - if contains(outputStr, "Collecting volume locations") && contains(outputStr, "before EC encoding") { - t.Logf("FIX DETECTED: Volume locations collected BEFORE EC encoding (timing bug prevented)") - } else { - t.Logf("NO FIX: Volume locations NOT collected before EC encoding (timing bug may occur)") - } - - // After EC encoding, try to get volume locations - this simulates the timing bug - volumeLocationsAfter, err := getVolumeLocations(commandEnv, volumeId) - if err != nil { - t.Logf("Volume locations after EC encoding: ERROR - %v", err) - t.Logf("This simulates the timing bug where volume locations are unavailable after master metadata update") - } else { - t.Logf("Volume locations after EC encoding: %v", volumeLocationsAfter) - } - }) - - // Test cleanup behavior - t.Run("cleanup_verification", func(t *testing.T) { - // After EC encoding, original volume should be cleaned up - // This tests that our fix properly cleans up using pre-collected locations - - // Check if volume still exists in master - volumeLocations, err := getVolumeLocations(commandEnv, volumeId) - if err != nil { - t.Logf("Volume %d no longer exists in master (good - cleanup worked)", volumeId) - } else { - t.Logf("Volume %d still exists with locations: %v", volumeId, volumeLocations) - } - }) - - // Test shard distribution across multiple volume servers - t.Run("shard_distribution_verification", func(t *testing.T) { - // With multiple volume servers, EC shards should be distributed across them - // This tests that the fix works correctly in a multi-server environment - - // Check shard distribution by looking at volume server directories - shardCounts := make(map[string]int) - for i := 0; i < 6; i++ { - volumeDir := filepath.Join(testDir, fmt.Sprintf("volume%d", i)) - count, err := countECShardFiles(volumeDir, uint32(volumeId)) - if err != nil { - t.Logf("Error counting EC shards in %s: %v", volumeDir, err) - } else { - shardCounts[fmt.Sprintf("volume%d", i)] = count - t.Logf("Volume server %d has %d EC shards for volume %d", i, count, volumeId) - - // Also print out the actual shard file names - if count > 0 { - shards, err := listECShardFiles(volumeDir, uint32(volumeId)) - if err != nil { - t.Logf("Error listing EC shards in %s: %v", volumeDir, err) - } else { - t.Logf(" Shard files in volume server %d: %v", i, shards) - } - } - } - } - - // Verify that shards are distributed (at least 2 servers should have shards) - serversWithShards := 0 - totalShards := 0 - for _, count := range shardCounts { - if count > 0 { - serversWithShards++ - totalShards += count - } - } - - if serversWithShards >= 2 { - t.Logf("EC shards properly distributed across %d volume servers (total: %d shards)", serversWithShards, totalShards) - } else { - t.Logf("EC shards not distributed (only %d servers have shards, total: %d shards) - may be expected in test environment", serversWithShards, totalShards) - } - - // Log distribution details - t.Logf("Shard distribution summary:") - for server, count := range shardCounts { - if count > 0 { - t.Logf(" %s: %d shards", server, count) - } - } - }) -} - -// TestECEncodingMasterTimingRaceCondition specifically tests the master timing race condition -func TestECEncodingMasterTimingRaceCondition(t *testing.T) { - // Skip if not running integration tests - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - // Create temporary directory for test data - testDir, err := os.MkdirTemp("", "seaweedfs_ec_race_test_") - require.NoError(t, err) - defer os.RemoveAll(testDir) - - // Start SeaweedFS cluster - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - cluster, err := startSeaweedFSCluster(ctx, testDir) - require.NoError(t, err) - defer cluster.Stop() - - // Wait for servers to be ready - require.NoError(t, waitForServer("127.0.0.1:9333", 30*time.Second)) - require.NoError(t, waitForServer("127.0.0.1:8080", 30*time.Second)) - - // Create command environment - options := &shell.ShellOptions{ - Masters: stringPtr("127.0.0.1:9333"), - GrpcDialOption: grpc.WithInsecure(), - FilerGroup: stringPtr("default"), - } - commandEnv := shell.NewCommandEnv(options) - - // Connect to master with longer timeout - ctx2, cancel2 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel2() - go commandEnv.MasterClient.KeepConnectedToMaster(ctx2) - commandEnv.MasterClient.WaitUntilConnected(ctx2) - - // Upload test data - testData := []byte("Race condition test data") - volumeId, err := uploadTestData(testData, "127.0.0.1:9333") - require.NoError(t, err) - t.Logf("Created volume %d for race condition test", volumeId) - - // Wait longer for volume registration with master client - time.Sleep(5 * time.Second) - - // Test the specific race condition: volume locations read AFTER master metadata update - t.Run("master_metadata_timing_race", func(t *testing.T) { - // Step 1: Get volume locations before any EC operations - locationsBefore, err := getVolumeLocations(commandEnv, volumeId) - require.NoError(t, err) - t.Logf("Volume locations before EC: %v", locationsBefore) - - // Step 2: Simulate the race condition by manually calling EC operations - // This simulates what happens in the buggy version where: - // 1. EC encoding starts and updates master metadata - // 2. Volume location collection happens AFTER the metadata update - // 3. Cleanup fails because original volume locations are gone - - // Get lock first - lockCmd := shell.Commands[findCommandIndex("lock")] - var lockOutput bytes.Buffer - err = lockCmd.Do([]string{}, commandEnv, &lockOutput) - if err != nil { - t.Logf("Lock command failed: %v", err) - } - - // Execute EC encoding - var output bytes.Buffer - ecEncodeCmd := shell.Commands[findCommandIndex("ec.encode")] - args := []string{"-volumeId", fmt.Sprintf("%d", volumeId), "-collection", "test", "-force", "-shardReplicaPlacement", "020"} - - // Capture stdout/stderr during command execution - oldStdout := os.Stdout - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stdout = w - os.Stderr = w - - err = ecEncodeCmd.Do(args, commandEnv, &output) - - // Restore stdout/stderr - w.Close() - os.Stdout = oldStdout - os.Stderr = oldStderr - - // Read captured output - capturedOutput, _ := io.ReadAll(r) - outputStr := string(capturedOutput) - - // Also include any output from the buffer - if bufferOutput := output.String(); bufferOutput != "" { - outputStr += "\n" + bufferOutput - } - - t.Logf("EC encode output: %s", outputStr) - - // Check if our fix is present (volume locations collected before EC encoding) - if contains(outputStr, "Collecting volume locations") && contains(outputStr, "before EC encoding") { - t.Logf("TIMING FIX DETECTED: Volume locations collected BEFORE EC encoding") - t.Logf("This prevents the race condition where master metadata is updated before location collection") - } else { - t.Logf("NO TIMING FIX: Volume locations may be collected AFTER master metadata update") - t.Logf("This could cause the race condition leading to cleanup failure and storage waste") - } - - // Step 3: Try to get volume locations after EC encoding (this simulates the bug) - locationsAfter, err := getVolumeLocations(commandEnv, volumeId) - if err != nil { - t.Logf("Volume locations after EC encoding: ERROR - %v", err) - t.Logf("This demonstrates the timing issue where original volume info is lost") - } else { - t.Logf("Volume locations after EC encoding: %v", locationsAfter) - } - - // Test result evaluation - if err != nil { - t.Logf("EC encoding completed with error: %v", err) - } else { - t.Logf("EC encoding completed successfully") - } - }) -} - -// Helper functions - -type TestCluster struct { - masterCmd *exec.Cmd - volumeServers []*exec.Cmd -} - -func (c *TestCluster) Stop() { - // Stop volume servers first - for _, cmd := range c.volumeServers { - if cmd != nil && cmd.Process != nil { - cmd.Process.Kill() - cmd.Wait() - } - } - - // Stop master server - if c.masterCmd != nil && c.masterCmd.Process != nil { - c.masterCmd.Process.Kill() - c.masterCmd.Wait() - } -} - -func startSeaweedFSCluster(ctx context.Context, dataDir string) (*TestCluster, error) { - // Find weed binary - weedBinary := findWeedBinary() - if weedBinary == "" { - return nil, fmt.Errorf("weed binary not found") - } - - cluster := &TestCluster{} - - // Create directories for each server - masterDir := filepath.Join(dataDir, "master") - os.MkdirAll(masterDir, 0755) - - // Start master server - masterCmd := exec.CommandContext(ctx, weedBinary, "master", - "-port", "9333", - "-mdir", masterDir, - "-volumeSizeLimitMB", "10", // Small volumes for testing - "-ip", "127.0.0.1", - ) - - masterLogFile, err := os.Create(filepath.Join(masterDir, "master.log")) - if err != nil { - return nil, fmt.Errorf("failed to create master log file: %v", err) - } - masterCmd.Stdout = masterLogFile - masterCmd.Stderr = masterLogFile - - if err := masterCmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start master server: %v", err) - } - cluster.masterCmd = masterCmd - - // Wait for master to be ready - time.Sleep(2 * time.Second) - - // Start 6 volume servers for better EC shard distribution - for i := 0; i < 6; i++ { - volumeDir := filepath.Join(dataDir, fmt.Sprintf("volume%d", i)) - os.MkdirAll(volumeDir, 0755) - - port := fmt.Sprintf("808%d", i) - rack := fmt.Sprintf("rack%d", i) - volumeCmd := exec.CommandContext(ctx, weedBinary, "volume", - "-port", port, - "-dir", volumeDir, - "-max", "10", - "-mserver", "127.0.0.1:9333", - "-ip", "127.0.0.1", - "-dataCenter", "dc1", - "-rack", rack, - ) - - volumeLogFile, err := os.Create(filepath.Join(volumeDir, "volume.log")) - if err != nil { - cluster.Stop() - return nil, fmt.Errorf("failed to create volume log file: %v", err) - } - volumeCmd.Stdout = volumeLogFile - volumeCmd.Stderr = volumeLogFile - - if err := volumeCmd.Start(); err != nil { - cluster.Stop() - return nil, fmt.Errorf("failed to start volume server %d: %v", i, err) - } - cluster.volumeServers = append(cluster.volumeServers, volumeCmd) - } - - // Wait for volume servers to register with master - time.Sleep(5 * time.Second) - - return cluster, nil -} - -func findWeedBinary() string { - // Try different locations - candidates := []string{ - "../../../weed/weed", - "../../weed/weed", - "../weed/weed", - "./weed/weed", - "weed", - } - - for _, candidate := range candidates { - if _, err := os.Stat(candidate); err == nil { - return candidate - } - } - - // Try to find in PATH - if path, err := exec.LookPath("weed"); err == nil { - return path - } - - return "" -} - -func waitForServer(address string, timeout time.Duration) error { - start := time.Now() - for time.Since(start) < timeout { - if conn, err := grpc.NewClient(address, grpc.WithInsecure()); err == nil { - conn.Close() - return nil - } - time.Sleep(500 * time.Millisecond) - } - return fmt.Errorf("timeout waiting for server %s", address) -} - -func uploadTestData(data []byte, masterAddress string) (needle.VolumeId, error) { - // Upload data to get a file ID - assignResult, err := operation.Assign(context.Background(), func(ctx context.Context) pb.ServerAddress { - return pb.ServerAddress(masterAddress) - }, grpc.WithInsecure(), &operation.VolumeAssignRequest{ - Count: 1, - Collection: "test", - Replication: "000", - }) - if err != nil { - return 0, err - } - - // Upload the data using the new Uploader - uploader, err := operation.NewUploader() - if err != nil { - return 0, err - } - - uploadResult, err, _ := uploader.Upload(context.Background(), bytes.NewReader(data), &operation.UploadOption{ - UploadUrl: "http://" + assignResult.Url + "/" + assignResult.Fid, - Filename: "testfile.txt", - MimeType: "text/plain", - }) - if err != nil { - return 0, err - } - - if uploadResult.Error != "" { - return 0, fmt.Errorf("upload error: %s", uploadResult.Error) - } - - // Parse volume ID from file ID - fid, err := needle.ParseFileIdFromString(assignResult.Fid) - if err != nil { - return 0, err - } - - return fid.VolumeId, nil -} - -func getVolumeLocations(commandEnv *shell.CommandEnv, volumeId needle.VolumeId) ([]string, error) { - // Retry mechanism to handle timing issues with volume registration - for i := 0; i < 10; i++ { - locations, ok := commandEnv.MasterClient.GetLocationsClone(uint32(volumeId)) - if ok { - var result []string - for _, location := range locations { - result = append(result, location.Url) - } - return result, nil - } - // Wait a bit before retrying - time.Sleep(500 * time.Millisecond) - } - return nil, fmt.Errorf("volume %d not found after retries", volumeId) -} - -func countECShardFiles(dir string, volumeId uint32) (int, error) { - count := 0 - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - name := info.Name() - // Count only .ec* files for this volume (EC shards) - if contains(name, fmt.Sprintf("%d.ec", volumeId)) { - count++ - } - - return nil - }) - - return count, err -} - -func listECShardFiles(dir string, volumeId uint32) ([]string, error) { - var shards []string - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - name := info.Name() - // List only .ec* files for this volume (EC shards) - if contains(name, fmt.Sprintf("%d.ec", volumeId)) { - shards = append(shards, name) - } - - return nil - }) - - return shards, err -} - -func findCommandIndex(name string) int { - for i, cmd := range shell.Commands { - if cmd.Name() == name { - return i - } - } - return -1 -} - -func stringPtr(s string) *string { - return &s -} - -func contains(s, substr string) bool { - // Use a simple substring search instead of the broken custom logic - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return true - } - } - return false -} - -// TestECEncodingRegressionPrevention tests that the specific bug patterns don't reoccur -func TestECEncodingRegressionPrevention(t *testing.T) { - t.Run("function_signature_regression", func(t *testing.T) { - // This test ensures that our fixed function signatures haven't been reverted - // The bug was that functions returned nil instead of proper errors - - // Test 1: doDeleteVolumesWithLocations function should exist - // (This replaces the old doDeleteVolumes function) - functionExists := true // In real implementation, use reflection to check - assert.True(t, functionExists, "doDeleteVolumesWithLocations function should exist") - - // Test 2: Function should return proper errors, not nil - // (This prevents the "silent failure" bug) - shouldReturnErrors := true // In real implementation, check function signature - assert.True(t, shouldReturnErrors, "Functions should return proper errors, not nil") - - t.Log("Function signature regression test passed") - }) - - t.Run("timing_pattern_regression", func(t *testing.T) { - // This test ensures that volume location collection timing pattern is correct - // The bug was: locations collected AFTER EC encoding (wrong) - // The fix is: locations collected BEFORE EC encoding (correct) - - // Simulate the correct timing pattern - step1_collectLocations := true - step2_performECEncoding := true - step3_usePreCollectedLocations := true - - // Verify timing order - assert.True(t, step1_collectLocations && step2_performECEncoding && step3_usePreCollectedLocations, - "Volume locations should be collected BEFORE EC encoding, not after") - - t.Log("Timing pattern regression test passed") - }) -} diff --git a/test/fuse_integration/Makefile b/test/fuse_integration/Makefile deleted file mode 100644 index fe2ad690b..000000000 --- a/test/fuse_integration/Makefile +++ /dev/null @@ -1,312 +0,0 @@ -# SeaweedFS FUSE Integration Testing Makefile - -# Configuration -WEED_BINARY := weed -GO_VERSION := 1.24 -TEST_TIMEOUT := 30m -COVERAGE_FILE := coverage.out - -# Default target -.DEFAULT_GOAL := help - -# Check if weed binary exists -check-binary: - @if [ ! -f "$(WEED_BINARY)" ]; then \ - echo "โŒ SeaweedFS binary not found at $(WEED_BINARY)"; \ - echo " Please run 'make' in the root directory first"; \ - exit 1; \ - fi - @echo "โœ… SeaweedFS binary found" - -# Check FUSE installation -check-fuse: - @if command -v fusermount >/dev/null 2>&1; then \ - echo "โœ… FUSE is installed (Linux)"; \ - elif command -v umount >/dev/null 2>&1 && [ "$$(uname)" = "Darwin" ]; then \ - echo "โœ… FUSE is available (macOS)"; \ - else \ - echo "โŒ FUSE not found. Please install:"; \ - echo " Ubuntu/Debian: sudo apt-get install fuse"; \ - echo " CentOS/RHEL: sudo yum install fuse"; \ - echo " macOS: brew install macfuse"; \ - exit 1; \ - fi - -# Check Go version -check-go: - @go version | grep -q "go1\.[2-9][0-9]" || \ - go version | grep -q "go1\.2[1-9]" || \ - (echo "โŒ Go $(GO_VERSION)+ required. Current: $$(go version)" && exit 1) - @echo "โœ… Go version check passed" - -# Verify all prerequisites -check-prereqs: check-go check-fuse - @echo "โœ… All prerequisites satisfied" - -# Build the SeaweedFS binary (if needed) -build: - @echo "๐Ÿ”จ Building SeaweedFS..." - cd ../.. && make - @echo "โœ… Build complete" - -# Initialize go module (if needed) -init-module: - @if [ ! -f go.mod ]; then \ - echo "๐Ÿ“ฆ Initializing Go module..."; \ - go mod init seaweedfs-fuse-tests; \ - go mod tidy; \ - fi - -# Run all tests -test: check-prereqs init-module - @echo "๐Ÿงช Running all FUSE integration tests..." - go test -v -timeout $(TEST_TIMEOUT) ./... - -# Run tests with coverage -test-coverage: check-prereqs init-module - @echo "๐Ÿงช Running tests with coverage..." - go test -v -timeout $(TEST_TIMEOUT) -coverprofile=$(COVERAGE_FILE) ./... - go tool cover -html=$(COVERAGE_FILE) -o coverage.html - @echo "๐Ÿ“Š Coverage report generated: coverage.html" - -# Run specific test categories -test-basic: check-prereqs init-module - @echo "๐Ÿงช Running basic file operations tests..." - go test -v -timeout $(TEST_TIMEOUT) -run TestBasicFileOperations - -test-directory: check-prereqs init-module - @echo "๐Ÿงช Running directory operations tests..." - go test -v -timeout $(TEST_TIMEOUT) -run TestDirectoryOperations - -test-concurrent: check-prereqs init-module - @echo "๐Ÿงช Running concurrent operations tests..." - go test -v -timeout $(TEST_TIMEOUT) -run TestConcurrentFileOperations - -test-stress: check-prereqs init-module - @echo "๐Ÿงช Running stress tests..." - go test -v -timeout $(TEST_TIMEOUT) -run TestStressOperations - -test-large-files: check-prereqs init-module - @echo "๐Ÿงช Running large file tests..." - go test -v -timeout $(TEST_TIMEOUT) -run TestLargeFileOperations - -# Run tests with debugging enabled -test-debug: check-prereqs init-module - @echo "๐Ÿ” Running tests with debug output..." - go test -v -timeout $(TEST_TIMEOUT) -args -debug - -# Run tests and keep temp files for inspection -test-no-cleanup: check-prereqs init-module - @echo "๐Ÿงช Running tests without cleanup (for debugging)..." - go test -v -timeout $(TEST_TIMEOUT) -args -no-cleanup - -# Quick smoke test -test-smoke: check-prereqs init-module - @echo "๐Ÿ’จ Running smoke tests..." - go test -v -timeout 5m -run TestBasicFileOperations/CreateAndReadFile - -# Run benchmarks -benchmark: check-prereqs init-module - @echo "๐Ÿ“ˆ Running benchmarks..." - go test -v -timeout $(TEST_TIMEOUT) -bench=. -benchmem - -# Validate test files compile -validate: init-module - @echo "โœ… Validating test files..." - go build -o /dev/null ./... - @echo "โœ… All test files compile successfully" - -# Clean up generated files -clean: - @echo "๐Ÿงน Cleaning up..." - rm -f $(COVERAGE_FILE) coverage.html - rm -rf /tmp/seaweedfs_fuse_test_* - go clean -testcache - @echo "โœ… Cleanup complete" - -# Format Go code -fmt: - @echo "๐ŸŽจ Formatting Go code..." - go fmt ./... - -# Run linter -lint: - @echo "๐Ÿ” Running linter..." - @if command -v golangci-lint >/dev/null 2>&1; then \ - golangci-lint run; \ - else \ - echo "โš ๏ธ golangci-lint not found, running go vet instead"; \ - go vet ./...; \ - fi - -# Run all quality checks -check: validate lint fmt - @echo "โœ… All quality checks passed" - -# Install development dependencies -install-deps: - @echo "๐Ÿ“ฆ Installing development dependencies..." - go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - go mod download - go mod tidy - -# Quick development setup -setup: install-deps build check-prereqs - @echo "๐Ÿš€ Development environment ready!" - -# Docker-based testing -test-docker: - @echo "๐Ÿณ Running tests in Docker..." - docker build -t seaweedfs-fuse-tests -f Dockerfile.test ../.. - docker run --rm --privileged seaweedfs-fuse-tests - -# Create Docker test image -docker-build: - @echo "๐Ÿณ Building Docker test image..." - @cat > Dockerfile.test << 'EOF' ;\ -FROM golang:$(GO_VERSION) ;\ -RUN apt-get update && apt-get install -y fuse ;\ -WORKDIR /seaweedfs ;\ -COPY . . ;\ -RUN make ;\ -WORKDIR /seaweedfs/test/fuse ;\ -RUN go mod init seaweedfs-fuse-tests && go mod tidy ;\ -CMD ["make", "test"] ;\ -EOF - -# GitHub Actions workflow -generate-workflow: - @echo "๐Ÿ“ Generating GitHub Actions workflow..." - @mkdir -p ../../.github/workflows - @cat > ../../.github/workflows/fuse-integration.yml << 'EOF' ;\ -name: FUSE Integration Tests ;\ - ;\ -on: ;\ - push: ;\ - branches: [ master, main ] ;\ - pull_request: ;\ - branches: [ master, main ] ;\ - ;\ -jobs: ;\ - fuse-integration: ;\ - runs-on: ubuntu-latest ;\ - timeout-minutes: 45 ;\ - ;\ - steps: ;\ - - name: Checkout code ;\ - uses: actions/checkout@v4 ;\ - ;\ - - name: Set up Go ;\ - uses: actions/setup-go@v4 ;\ - with: ;\ - go-version: '$(GO_VERSION)' ;\ - ;\ - - name: Install FUSE ;\ - run: sudo apt-get update && sudo apt-get install -y fuse ;\ - ;\ - - name: Build SeaweedFS ;\ - run: make ;\ - ;\ - - name: Run FUSE Integration Tests ;\ - run: | ;\ - cd test/fuse ;\ - make test ;\ - ;\ - - name: Upload test artifacts ;\ - if: failure() ;\ - uses: actions/upload-artifact@v3 ;\ - with: ;\ - name: test-logs ;\ - path: /tmp/seaweedfs_fuse_test_* ;\ -EOF - @echo "โœ… GitHub Actions workflow generated" - -# Performance profiling -profile: check-prereqs init-module - @echo "๐Ÿ“Š Running performance profiling..." - go test -v -timeout $(TEST_TIMEOUT) -cpuprofile cpu.prof -memprofile mem.prof -bench=. - @echo "๐Ÿ“Š Profiles generated: cpu.prof, mem.prof" - @echo "๐Ÿ“Š View with: go tool pprof cpu.prof" - -# Memory leak detection -test-memory: check-prereqs init-module - @echo "๐Ÿ” Running memory leak detection..." - go test -v -timeout $(TEST_TIMEOUT) -race -test.memprofile mem.prof - -# List available test functions -list-tests: - @echo "๐Ÿ“‹ Available test functions:" - @grep -r "^func Test" *.go | sed 's/.*func \(Test[^(]*\).*/ \1/' | sort - -# Get test status and statistics -test-stats: check-prereqs init-module - @echo "๐Ÿ“Š Test statistics:" - @go test -v ./... | grep -E "(PASS|FAIL|RUN)" | \ - awk '{ \ - if ($$1 == "RUN") tests++; \ - else if ($$1 == "PASS") passed++; \ - else if ($$1 == "FAIL") failed++; \ - } END { \ - printf " Total tests: %d\n", tests; \ - printf " Passed: %d\n", passed; \ - printf " Failed: %d\n", failed; \ - printf " Success rate: %.1f%%\n", (passed/tests)*100; \ - }' - -# Watch for file changes and run tests -watch: - @echo "๐Ÿ‘€ Watching for changes..." - @if command -v entr >/dev/null 2>&1; then \ - find . -name "*.go" | entr -c make test-smoke; \ - else \ - echo "โš ๏ธ 'entr' not found. Install with: apt-get install entr"; \ - echo " Falling back to manual test run"; \ - make test-smoke; \ - fi - -# Show help -help: - @echo "SeaweedFS FUSE Integration Testing" - @echo "==================================" - @echo "" - @echo "Prerequisites:" - @echo " make check-prereqs - Check all prerequisites" - @echo " make setup - Complete development setup" - @echo " make build - Build SeaweedFS binary" - @echo "" - @echo "Testing:" - @echo " make test - Run all tests" - @echo " make test-basic - Run basic file operations tests" - @echo " make test-directory - Run directory operations tests" - @echo " make test-concurrent - Run concurrent operations tests" - @echo " make test-stress - Run stress tests" - @echo " make test-smoke - Quick smoke test" - @echo " make test-coverage - Run tests with coverage report" - @echo "" - @echo "Debugging:" - @echo " make test-debug - Run tests with debug output" - @echo " make test-no-cleanup - Keep temp files for inspection" - @echo " make profile - Performance profiling" - @echo " make test-memory - Memory leak detection" - @echo "" - @echo "Quality:" - @echo " make validate - Validate test files compile" - @echo " make lint - Run linter" - @echo " make fmt - Format code" - @echo " make check - Run all quality checks" - @echo "" - @echo "Utilities:" - @echo " make clean - Clean up generated files" - @echo " make list-tests - List available test functions" - @echo " make test-stats - Show test statistics" - @echo " make watch - Watch files and run smoke tests" - @echo "" - @echo "Docker & CI:" - @echo " make test-docker - Run tests in Docker" - @echo " make generate-workflow - Generate GitHub Actions workflow" - -.PHONY: help check-prereqs check-binary check-fuse check-go build init-module \ - test test-coverage test-basic test-directory test-concurrent test-stress \ - test-large-files test-debug test-no-cleanup test-smoke benchmark validate \ - clean fmt lint check install-deps setup test-docker docker-build \ - generate-workflow profile test-memory list-tests test-stats watch \ No newline at end of file diff --git a/test/fuse_integration/README.md b/test/fuse_integration/README.md deleted file mode 100644 index 6f520eaf5..000000000 --- a/test/fuse_integration/README.md +++ /dev/null @@ -1,327 +0,0 @@ -# SeaweedFS FUSE Integration Testing Framework - -## Overview - -This directory contains a comprehensive integration testing framework for SeaweedFS FUSE operations. The current SeaweedFS FUSE tests are primarily performance-focused (using FIO) but lack comprehensive functional testing. This framework addresses those gaps. - -## โš ๏ธ Current Status - -**Note**: Due to Go module conflicts between this test framework and the parent SeaweedFS module, the full test suite currently requires manual setup. The framework files are provided as a foundation for comprehensive FUSE testing once the module structure is resolved. - -### Working Components -- โœ… Framework design and architecture (`framework.go`) -- โœ… Individual test file structure and compilation -- โœ… Test methodology and comprehensive coverage -- โœ… Documentation and usage examples -- โš ๏ธ Full test suite execution (requires Go module isolation) - -### Verified Working Test -```bash -cd test/fuse_integration -go test -v simple_test.go -``` - -## Current Testing Gaps Addressed - -### 1. **Limited Functional Coverage** -- **Current**: Only basic FIO performance tests -- **New**: Comprehensive testing of all FUSE operations (create, read, write, delete, mkdir, rmdir, permissions, etc.) - -### 2. **No Concurrency Testing** -- **Current**: Single-threaded performance tests -- **New**: Extensive concurrent operation tests, race condition detection, thread safety validation - -### 3. **Insufficient Error Handling** -- **Current**: Basic error scenarios -- **New**: Comprehensive error condition testing, edge cases, failure recovery - -### 4. **Missing Edge Cases** -- **Current**: Simple file operations -- **New**: Large files, sparse files, deep directory nesting, many small files, permission variations - -## Framework Architecture - -### Core Components - -1. **`framework.go`** - Test infrastructure and utilities - - `FuseTestFramework` - Main test management struct - - Automated SeaweedFS cluster setup/teardown - - FUSE mount/unmount management - - Helper functions for file operations and assertions - -2. **`basic_operations_test.go`** - Fundamental FUSE operations - - File create, read, write, delete - - File attributes and permissions - - Large file handling - - Sparse file operations - -3. **`directory_operations_test.go`** - Directory-specific tests - - Directory creation, deletion, listing - - Nested directory structures - - Directory permissions and rename operations - - Complex directory scenarios - -4. **`concurrent_operations_test.go`** - Concurrency and stress testing - - Concurrent file and directory operations - - Race condition detection - - High-frequency operations - - Stress testing scenarios - -## Key Features - -### Automated Test Environment -```go -framework := NewFuseTestFramework(t, DefaultTestConfig()) -defer framework.Cleanup() -require.NoError(t, framework.Setup(DefaultTestConfig())) -``` - -- **Automatic cluster setup**: Master, Volume, Filer servers -- **FUSE mounting**: Proper mount point management -- **Cleanup**: Automatic teardown of all resources - -### Configurable Test Parameters -```go -config := &TestConfig{ - Collection: "test", - Replication: "001", - ChunkSizeMB: 8, - CacheSizeMB: 200, - NumVolumes: 5, - EnableDebug: true, - MountOptions: []string{"-allowOthers"}, -} -``` - -### Rich Assertion Helpers -```go -framework.AssertFileExists("path/to/file") -framework.AssertFileContent("file.txt", expectedContent) -framework.AssertFileMode("script.sh", 0755) -framework.CreateTestFile("test.txt", []byte("content")) -``` - -## Test Categories - -### 1. Basic File Operations -- **Create/Read/Write/Delete**: Fundamental file operations -- **File Attributes**: Size, timestamps, permissions -- **Append Operations**: File appending behavior -- **Large Files**: Files exceeding chunk size limits -- **Sparse Files**: Non-contiguous file data - -### 2. Directory Operations -- **Directory Lifecycle**: Create, list, remove directories -- **Nested Structures**: Deep directory hierarchies -- **Directory Permissions**: Access control testing -- **Directory Rename**: Move operations -- **Complex Scenarios**: Many files, deep nesting - -### 3. Concurrent Operations -- **Multi-threaded Access**: Simultaneous file operations -- **Race Condition Detection**: Concurrent read/write scenarios -- **Directory Concurrency**: Parallel directory operations -- **Stress Testing**: High-frequency operations - -### 4. Error Handling & Edge Cases -- **Permission Denied**: Access control violations -- **Disk Full**: Storage limit scenarios -- **Network Issues**: Filer/Volume server failures -- **Invalid Operations**: Malformed requests -- **Recovery Testing**: Error recovery scenarios - -## Usage Examples - -### Basic Test Run -```bash -# Build SeaweedFS binary -make - -# Run all FUSE tests -cd test/fuse_integration -go test -v - -# Run specific test category -go test -v -run TestBasicFileOperations -go test -v -run TestConcurrentFileOperations -``` - -### Custom Configuration -```go -func TestCustomFUSE(t *testing.T) { - config := &TestConfig{ - ChunkSizeMB: 16, // Larger chunks - CacheSizeMB: 500, // More cache - EnableDebug: true, // Debug output - SkipCleanup: true, // Keep files for inspection - } - - framework := NewFuseTestFramework(t, config) - defer framework.Cleanup() - require.NoError(t, framework.Setup(config)) - - // Your tests here... -} -``` - -### Debugging Failed Tests -```go -config := &TestConfig{ - EnableDebug: true, // Enable verbose logging - SkipCleanup: true, // Keep temp files for inspection -} -``` - -## Advanced Features - -### Performance Benchmarking -```go -func BenchmarkLargeFileWrite(b *testing.B) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - require.NoError(t, framework.Setup(DefaultTestConfig())) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Benchmark file operations - } -} -``` - -### Custom Test Scenarios -```go -func TestCustomWorkload(t *testing.T) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - require.NoError(t, framework.Setup(DefaultTestConfig())) - - // Simulate specific application workload - simulateWebServerWorkload(t, framework) - simulateDatabaseWorkload(t, framework) - simulateBackupWorkload(t, framework) -} -``` - -## Integration with CI/CD - -### GitHub Actions Example -```yaml -name: FUSE Integration Tests -on: [push, pull_request] - -jobs: - fuse-tests: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.21' - - - name: Install FUSE - run: sudo apt-get install -y fuse - - - name: Build SeaweedFS - run: make - - - name: Run FUSE Tests - run: | - cd test/fuse_integration - go test -v -timeout 30m -``` - -### Docker Testing -```dockerfile -FROM golang:1.24 -RUN apt-get update && apt-get install -y fuse -COPY . /seaweedfs -WORKDIR /seaweedfs -RUN make -CMD ["go", "test", "-v", "./test/fuse_integration/..."] -``` - -## Comparison with Current Testing - -| Aspect | Current Tests | New Framework | -|--------|---------------|---------------| -| **Operations Covered** | Basic FIO read/write | All FUSE operations | -| **Concurrency** | Single-threaded | Multi-threaded stress tests | -| **Error Scenarios** | Limited | Comprehensive error handling | -| **File Types** | Regular files only | Large, sparse, many small files | -| **Directory Testing** | None | Complete directory operations | -| **Setup Complexity** | Manual Docker setup | Automated cluster management | -| **Test Isolation** | Shared environment | Isolated per-test environments | -| **Debugging** | Limited | Rich debugging and inspection | - -## Benefits - -### 1. **Comprehensive Coverage** -- Tests all FUSE operations supported by SeaweedFS -- Covers edge cases and error conditions -- Validates behavior under concurrent access - -### 2. **Reliable Testing** -- Isolated test environments prevent test interference -- Automatic cleanup ensures consistent state -- Deterministic test execution - -### 3. **Easy Maintenance** -- Clear test organization and naming -- Rich helper functions reduce code duplication -- Configurable test parameters for different scenarios - -### 4. **Real-world Validation** -- Tests actual FUSE filesystem behavior -- Validates integration between all SeaweedFS components -- Catches issues that unit tests might miss - -## Future Enhancements - -### 1. **Extended FUSE Features** -- Extended attributes (xattr) testing -- Symbolic link operations -- Hard link behavior -- File locking mechanisms - -### 2. **Performance Profiling** -- Built-in performance measurement -- Memory usage tracking -- Latency distribution analysis -- Throughput benchmarking - -### 3. **Fault Injection** -- Network partition simulation -- Server failure scenarios -- Disk full conditions -- Memory pressure testing - -### 4. **Integration Testing** -- Multi-filer configurations -- Cross-datacenter replication -- S3 API compatibility while mounted -- Backup/restore operations - -## Getting Started - -1. **Prerequisites** - ```bash - # Install FUSE - sudo apt-get install fuse # Ubuntu/Debian - brew install macfuse # macOS - - # Build SeaweedFS - make - ``` - -2. **Run Tests** - ```bash - cd test/fuse_integration - go test -v - ``` - -3. **View Results** - - Test output shows detailed operation results - - Failed tests include specific error information - - Debug mode provides verbose logging - -This framework represents a significant improvement in SeaweedFS FUSE testing capabilities, providing comprehensive coverage, real-world validation, and reliable automation that will help ensure the robustness and reliability of the FUSE implementation. \ No newline at end of file diff --git a/test/fuse_integration/concurrent_operations_test.go b/test/fuse_integration/concurrent_operations_test.go deleted file mode 100644 index 7a5cdd0d3..000000000 --- a/test/fuse_integration/concurrent_operations_test.go +++ /dev/null @@ -1,448 +0,0 @@ -package fuse_test - -import ( - "bytes" - "crypto/rand" - "fmt" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestConcurrentFileOperations tests concurrent file operations -func TestConcurrentFileOperations(t *testing.T) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - - require.NoError(t, framework.Setup(DefaultTestConfig())) - - t.Run("ConcurrentFileWrites", func(t *testing.T) { - testConcurrentFileWrites(t, framework) - }) - - t.Run("ConcurrentFileReads", func(t *testing.T) { - testConcurrentFileReads(t, framework) - }) - - t.Run("ConcurrentReadWrite", func(t *testing.T) { - testConcurrentReadWrite(t, framework) - }) - - t.Run("ConcurrentDirectoryOperations", func(t *testing.T) { - testConcurrentDirectoryOperations(t, framework) - }) - - t.Run("ConcurrentFileCreation", func(t *testing.T) { - testConcurrentFileCreation(t, framework) - }) -} - -// testConcurrentFileWrites tests multiple goroutines writing to different files -func testConcurrentFileWrites(t *testing.T, framework *FuseTestFramework) { - numWorkers := 10 - filesPerWorker := 5 - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - - // Function to collect errors safely - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - // Start concurrent workers - for worker := 0; worker < numWorkers; worker++ { - wg.Add(1) - go func(workerID int) { - defer wg.Done() - - for file := 0; file < filesPerWorker; file++ { - filename := fmt.Sprintf("worker_%d_file_%d.txt", workerID, file) - content := []byte(fmt.Sprintf("Worker %d, File %d - %s", workerID, file, time.Now().String())) - - mountPath := filepath.Join(framework.GetMountPoint(), filename) - if err := os.WriteFile(mountPath, content, 0644); err != nil { - addError(fmt.Errorf("worker %d file %d: %v", workerID, file, err)) - return - } - - // Verify file was written correctly - readContent, err := os.ReadFile(mountPath) - if err != nil { - addError(fmt.Errorf("worker %d file %d read: %v", workerID, file, err)) - return - } - - if !bytes.Equal(content, readContent) { - addError(fmt.Errorf("worker %d file %d: content mismatch", workerID, file)) - return - } - } - }(worker) - } - - wg.Wait() - - // Check for errors - require.Empty(t, errors, "Concurrent writes failed: %v", errors) - - // Verify all files exist and have correct content - for worker := 0; worker < numWorkers; worker++ { - for file := 0; file < filesPerWorker; file++ { - filename := fmt.Sprintf("worker_%d_file_%d.txt", worker, file) - framework.AssertFileExists(filename) - } - } -} - -// testConcurrentFileReads tests multiple goroutines reading from the same file -func testConcurrentFileReads(t *testing.T, framework *FuseTestFramework) { - // Create a test file - filename := "concurrent_read_test.txt" - testData := make([]byte, 1024*1024) // 1MB - _, err := rand.Read(testData) - require.NoError(t, err) - - framework.CreateTestFile(filename, testData) - - numReaders := 20 - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - // Start concurrent readers - for reader := 0; reader < numReaders; reader++ { - wg.Add(1) - go func(readerID int) { - defer wg.Done() - - mountPath := filepath.Join(framework.GetMountPoint(), filename) - - // Read multiple times - for i := 0; i < 3; i++ { - readData, err := os.ReadFile(mountPath) - if err != nil { - addError(fmt.Errorf("reader %d iteration %d: %v", readerID, i, err)) - return - } - - if !bytes.Equal(testData, readData) { - addError(fmt.Errorf("reader %d iteration %d: data mismatch", readerID, i)) - return - } - } - }(reader) - } - - wg.Wait() - require.Empty(t, errors, "Concurrent reads failed: %v", errors) -} - -// testConcurrentReadWrite tests simultaneous read and write operations -func testConcurrentReadWrite(t *testing.T, framework *FuseTestFramework) { - filename := "concurrent_rw_test.txt" - initialData := bytes.Repeat([]byte("INITIAL"), 1000) - framework.CreateTestFile(filename, initialData) - - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - mountPath := filepath.Join(framework.GetMountPoint(), filename) - - // Start readers - numReaders := 5 - for i := 0; i < numReaders; i++ { - wg.Add(1) - go func(readerID int) { - defer wg.Done() - - for j := 0; j < 10; j++ { - _, err := os.ReadFile(mountPath) - if err != nil { - addError(fmt.Errorf("reader %d: %v", readerID, err)) - return - } - time.Sleep(10 * time.Millisecond) - } - }(i) - } - - // Start writers - numWriters := 2 - for i := 0; i < numWriters; i++ { - wg.Add(1) - go func(writerID int) { - defer wg.Done() - - for j := 0; j < 5; j++ { - newData := bytes.Repeat([]byte(fmt.Sprintf("WRITER%d", writerID)), 1000) - err := os.WriteFile(mountPath, newData, 0644) - if err != nil { - addError(fmt.Errorf("writer %d: %v", writerID, err)) - return - } - time.Sleep(50 * time.Millisecond) - } - }(i) - } - - wg.Wait() - require.Empty(t, errors, "Concurrent read/write failed: %v", errors) - - // Verify file still exists and is readable - framework.AssertFileExists(filename) -} - -// testConcurrentDirectoryOperations tests concurrent directory operations -func testConcurrentDirectoryOperations(t *testing.T, framework *FuseTestFramework) { - numWorkers := 8 - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - // Each worker creates a directory tree - for worker := 0; worker < numWorkers; worker++ { - wg.Add(1) - go func(workerID int) { - defer wg.Done() - - // Create worker directory - workerDir := fmt.Sprintf("worker_%d", workerID) - mountPath := filepath.Join(framework.GetMountPoint(), workerDir) - - if err := os.Mkdir(mountPath, 0755); err != nil { - addError(fmt.Errorf("worker %d mkdir: %v", workerID, err)) - return - } - - // Create subdirectories and files - for i := 0; i < 5; i++ { - subDir := filepath.Join(mountPath, fmt.Sprintf("subdir_%d", i)) - if err := os.Mkdir(subDir, 0755); err != nil { - addError(fmt.Errorf("worker %d subdir %d: %v", workerID, i, err)) - return - } - - // Create file in subdirectory - testFile := filepath.Join(subDir, "test.txt") - content := []byte(fmt.Sprintf("Worker %d, Subdir %d", workerID, i)) - if err := os.WriteFile(testFile, content, 0644); err != nil { - addError(fmt.Errorf("worker %d file %d: %v", workerID, i, err)) - return - } - } - }(worker) - } - - wg.Wait() - require.Empty(t, errors, "Concurrent directory operations failed: %v", errors) - - // Verify all structures were created - for worker := 0; worker < numWorkers; worker++ { - workerDir := fmt.Sprintf("worker_%d", worker) - mountPath := filepath.Join(framework.GetMountPoint(), workerDir) - - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.True(t, info.IsDir()) - - // Check subdirectories - for i := 0; i < 5; i++ { - subDir := filepath.Join(mountPath, fmt.Sprintf("subdir_%d", i)) - info, err := os.Stat(subDir) - require.NoError(t, err) - assert.True(t, info.IsDir()) - - testFile := filepath.Join(subDir, "test.txt") - expectedContent := []byte(fmt.Sprintf("Worker %d, Subdir %d", worker, i)) - actualContent, err := os.ReadFile(testFile) - require.NoError(t, err) - assert.Equal(t, expectedContent, actualContent) - } - } -} - -// testConcurrentFileCreation tests concurrent creation of files in same directory -func testConcurrentFileCreation(t *testing.T, framework *FuseTestFramework) { - // Create test directory - testDir := "concurrent_creation" - framework.CreateTestDir(testDir) - - numWorkers := 15 - filesPerWorker := 10 - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - createdFiles := make(map[string]bool) - - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - addFile := func(filename string) { - mutex.Lock() - defer mutex.Unlock() - createdFiles[filename] = true - } - - // Create files concurrently - for worker := 0; worker < numWorkers; worker++ { - wg.Add(1) - go func(workerID int) { - defer wg.Done() - - for file := 0; file < filesPerWorker; file++ { - filename := fmt.Sprintf("file_%d_%d.txt", workerID, file) - relativePath := filepath.Join(testDir, filename) - mountPath := filepath.Join(framework.GetMountPoint(), relativePath) - - content := []byte(fmt.Sprintf("Worker %d, File %d, Time: %s", - workerID, file, time.Now().Format(time.RFC3339Nano))) - - if err := os.WriteFile(mountPath, content, 0644); err != nil { - addError(fmt.Errorf("worker %d file %d: %v", workerID, file, err)) - return - } - - addFile(filename) - } - }(worker) - } - - wg.Wait() - require.Empty(t, errors, "Concurrent file creation failed: %v", errors) - - // Verify all files were created - expectedCount := numWorkers * filesPerWorker - assert.Equal(t, expectedCount, len(createdFiles)) - - // Read directory and verify count - mountPath := filepath.Join(framework.GetMountPoint(), testDir) - entries, err := os.ReadDir(mountPath) - require.NoError(t, err) - assert.Equal(t, expectedCount, len(entries)) - - // Verify each file exists and has content - for filename := range createdFiles { - relativePath := filepath.Join(testDir, filename) - framework.AssertFileExists(relativePath) - } -} - -// TestStressOperations tests high-load scenarios -func TestStressOperations(t *testing.T) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - - require.NoError(t, framework.Setup(DefaultTestConfig())) - - t.Run("HighFrequencySmallWrites", func(t *testing.T) { - testHighFrequencySmallWrites(t, framework) - }) - - t.Run("ManySmallFiles", func(t *testing.T) { - testManySmallFiles(t, framework) - }) -} - -// testHighFrequencySmallWrites tests many small writes to the same file -func testHighFrequencySmallWrites(t *testing.T, framework *FuseTestFramework) { - filename := "high_freq_writes.txt" - mountPath := filepath.Join(framework.GetMountPoint(), filename) - - // Open file for writing - file, err := os.OpenFile(mountPath, os.O_CREATE|os.O_WRONLY, 0644) - require.NoError(t, err) - defer file.Close() - - // Perform many small writes - numWrites := 1000 - writeSize := 100 - - for i := 0; i < numWrites; i++ { - data := []byte(fmt.Sprintf("Write %04d: %s\n", i, bytes.Repeat([]byte("x"), writeSize-20))) - _, err := file.Write(data) - require.NoError(t, err) - } - file.Close() - - // Verify file size - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.Equal(t, totalSize, info.Size()) -} - -// testManySmallFiles tests creating many small files -func testManySmallFiles(t *testing.T, framework *FuseTestFramework) { - testDir := "many_small_files" - framework.CreateTestDir(testDir) - - numFiles := 500 - var wg sync.WaitGroup - var mutex sync.Mutex - errors := make([]error, 0) - - addError := func(err error) { - mutex.Lock() - defer mutex.Unlock() - errors = append(errors, err) - } - - // Create files in batches - batchSize := 50 - for batch := 0; batch < numFiles/batchSize; batch++ { - wg.Add(1) - go func(batchID int) { - defer wg.Done() - - for i := 0; i < batchSize; i++ { - fileNum := batchID*batchSize + i - filename := filepath.Join(testDir, fmt.Sprintf("small_file_%04d.txt", fileNum)) - content := []byte(fmt.Sprintf("File %d content", fileNum)) - - mountPath := filepath.Join(framework.GetMountPoint(), filename) - if err := os.WriteFile(mountPath, content, 0644); err != nil { - addError(fmt.Errorf("file %d: %v", fileNum, err)) - return - } - } - }(batch) - } - - wg.Wait() - require.Empty(t, errors, "Many small files creation failed: %v", errors) - - // Verify directory listing - mountPath := filepath.Join(framework.GetMountPoint(), testDir) - entries, err := os.ReadDir(mountPath) - require.NoError(t, err) - assert.Equal(t, numFiles, len(entries)) -} diff --git a/test/fuse_integration/directory_operations_test.go b/test/fuse_integration/directory_operations_test.go deleted file mode 100644 index 060a3a027..000000000 --- a/test/fuse_integration/directory_operations_test.go +++ /dev/null @@ -1,351 +0,0 @@ -package fuse_test - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestDirectoryOperations tests fundamental FUSE directory operations -func TestDirectoryOperations(t *testing.T) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - - require.NoError(t, framework.Setup(DefaultTestConfig())) - - t.Run("CreateDirectory", func(t *testing.T) { - testCreateDirectory(t, framework) - }) - - t.Run("RemoveDirectory", func(t *testing.T) { - testRemoveDirectory(t, framework) - }) - - t.Run("ReadDirectory", func(t *testing.T) { - testReadDirectory(t, framework) - }) - - t.Run("NestedDirectories", func(t *testing.T) { - testNestedDirectories(t, framework) - }) - - t.Run("DirectoryPermissions", func(t *testing.T) { - testDirectoryPermissions(t, framework) - }) - - t.Run("DirectoryRename", func(t *testing.T) { - testDirectoryRename(t, framework) - }) -} - -// testCreateDirectory tests creating directories -func testCreateDirectory(t *testing.T, framework *FuseTestFramework) { - dirName := "test_directory" - mountPath := filepath.Join(framework.GetMountPoint(), dirName) - - // Create directory - require.NoError(t, os.Mkdir(mountPath, 0755)) - - // Verify directory exists - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.True(t, info.IsDir()) - assert.Equal(t, os.FileMode(0755), info.Mode().Perm()) -} - -// testRemoveDirectory tests removing directories -func testRemoveDirectory(t *testing.T, framework *FuseTestFramework) { - dirName := "test_remove_dir" - mountPath := filepath.Join(framework.GetMountPoint(), dirName) - - // Create directory - require.NoError(t, os.Mkdir(mountPath, 0755)) - - // Verify it exists - _, err := os.Stat(mountPath) - require.NoError(t, err) - - // Remove directory - require.NoError(t, os.Remove(mountPath)) - - // Verify it's gone - _, err = os.Stat(mountPath) - require.True(t, os.IsNotExist(err)) -} - -// testReadDirectory tests reading directory contents -func testReadDirectory(t *testing.T, framework *FuseTestFramework) { - testDir := "test_read_dir" - framework.CreateTestDir(testDir) - - // Create various types of entries - entries := []string{ - "file1.txt", - "file2.log", - "subdir1", - "subdir2", - "script.sh", - } - - // Create files and subdirectories - for _, entry := range entries { - entryPath := filepath.Join(testDir, entry) - if entry == "subdir1" || entry == "subdir2" { - framework.CreateTestDir(entryPath) - } else { - framework.CreateTestFile(entryPath, []byte("content of "+entry)) - } - } - - // Read directory - mountPath := filepath.Join(framework.GetMountPoint(), testDir) - dirEntries, err := os.ReadDir(mountPath) - require.NoError(t, err) - - // Verify all entries are present - var actualNames []string - for _, entry := range dirEntries { - actualNames = append(actualNames, entry.Name()) - } - - sort.Strings(entries) - sort.Strings(actualNames) - assert.Equal(t, entries, actualNames) - - // Verify entry types - for _, entry := range dirEntries { - if entry.Name() == "subdir1" || entry.Name() == "subdir2" { - assert.True(t, entry.IsDir()) - } else { - assert.False(t, entry.IsDir()) - } - } -} - -// testNestedDirectories tests operations on nested directory structures -func testNestedDirectories(t *testing.T, framework *FuseTestFramework) { - // Create nested structure: parent/child1/grandchild/child2 - structure := []string{ - "parent", - "parent/child1", - "parent/child1/grandchild", - "parent/child2", - } - - // Create directories - for _, dir := range structure { - framework.CreateTestDir(dir) - } - - // Create files at various levels - files := map[string][]byte{ - "parent/root_file.txt": []byte("root level"), - "parent/child1/child_file.txt": []byte("child level"), - "parent/child1/grandchild/deep_file.txt": []byte("deep level"), - "parent/child2/another_file.txt": []byte("another child"), - } - - for path, content := range files { - framework.CreateTestFile(path, content) - } - - // Verify structure by walking - mountPath := filepath.Join(framework.GetMountPoint(), "parent") - var foundPaths []string - - err := filepath.Walk(mountPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Get relative path from mount point - relPath, _ := filepath.Rel(framework.GetMountPoint(), path) - foundPaths = append(foundPaths, relPath) - return nil - }) - require.NoError(t, err) - - // Verify all expected paths were found - expectedPaths := []string{ - "parent", - "parent/child1", - "parent/child1/grandchild", - "parent/child1/grandchild/deep_file.txt", - "parent/child1/child_file.txt", - "parent/child2", - "parent/child2/another_file.txt", - "parent/root_file.txt", - } - - sort.Strings(expectedPaths) - sort.Strings(foundPaths) - assert.Equal(t, expectedPaths, foundPaths) - - // Verify file contents - for path, expectedContent := range files { - framework.AssertFileContent(path, expectedContent) - } -} - -// testDirectoryPermissions tests directory permission operations -func testDirectoryPermissions(t *testing.T, framework *FuseTestFramework) { - dirName := "test_permissions_dir" - mountPath := filepath.Join(framework.GetMountPoint(), dirName) - - // Create directory with specific permissions - require.NoError(t, os.Mkdir(mountPath, 0700)) - - // Check initial permissions - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.Equal(t, os.FileMode(0700), info.Mode().Perm()) - - // Change permissions - require.NoError(t, os.Chmod(mountPath, 0755)) - - // Verify permission change - info, err = os.Stat(mountPath) - require.NoError(t, err) - assert.Equal(t, os.FileMode(0755), info.Mode().Perm()) -} - -// testDirectoryRename tests renaming directories -func testDirectoryRename(t *testing.T, framework *FuseTestFramework) { - oldName := "old_directory" - newName := "new_directory" - - // Create directory with content - framework.CreateTestDir(oldName) - framework.CreateTestFile(filepath.Join(oldName, "test_file.txt"), []byte("test content")) - - oldPath := filepath.Join(framework.GetMountPoint(), oldName) - newPath := filepath.Join(framework.GetMountPoint(), newName) - - // Rename directory - require.NoError(t, os.Rename(oldPath, newPath)) - - // Verify old path doesn't exist - _, err := os.Stat(oldPath) - require.True(t, os.IsNotExist(err)) - - // Verify new path exists and is a directory - info, err := os.Stat(newPath) - require.NoError(t, err) - assert.True(t, info.IsDir()) - - // Verify content still exists - framework.AssertFileContent(filepath.Join(newName, "test_file.txt"), []byte("test content")) -} - -// TestComplexDirectoryOperations tests more complex directory scenarios -func TestComplexDirectoryOperations(t *testing.T) { - framework := NewFuseTestFramework(t, DefaultTestConfig()) - defer framework.Cleanup() - - require.NoError(t, framework.Setup(DefaultTestConfig())) - - t.Run("RemoveNonEmptyDirectory", func(t *testing.T) { - testRemoveNonEmptyDirectory(t, framework) - }) - - t.Run("DirectoryWithManyFiles", func(t *testing.T) { - testDirectoryWithManyFiles(t, framework) - }) - - t.Run("DeepDirectoryNesting", func(t *testing.T) { - testDeepDirectoryNesting(t, framework) - }) -} - -// testRemoveNonEmptyDirectory tests behavior when trying to remove non-empty directories -func testRemoveNonEmptyDirectory(t *testing.T, framework *FuseTestFramework) { - dirName := "non_empty_dir" - framework.CreateTestDir(dirName) - - // Add content to directory - framework.CreateTestFile(filepath.Join(dirName, "file.txt"), []byte("content")) - framework.CreateTestDir(filepath.Join(dirName, "subdir")) - - mountPath := filepath.Join(framework.GetMountPoint(), dirName) - - // Try to remove non-empty directory (should fail) - err := os.Remove(mountPath) - require.Error(t, err) - - // Directory should still exist - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.True(t, info.IsDir()) - - // Remove with RemoveAll should work - require.NoError(t, os.RemoveAll(mountPath)) - - // Verify it's gone - _, err = os.Stat(mountPath) - require.True(t, os.IsNotExist(err)) -} - -// testDirectoryWithManyFiles tests directories with large numbers of files -func testDirectoryWithManyFiles(t *testing.T, framework *FuseTestFramework) { - dirName := "many_files_dir" - framework.CreateTestDir(dirName) - - // Create many files - numFiles := 100 - for i := 0; i < numFiles; i++ { - filename := filepath.Join(dirName, fmt.Sprintf("file_%03d.txt", i)) - content := []byte(fmt.Sprintf("Content of file %d", i)) - framework.CreateTestFile(filename, content) - } - - // Read directory - mountPath := filepath.Join(framework.GetMountPoint(), dirName) - entries, err := os.ReadDir(mountPath) - require.NoError(t, err) - - // Verify count - assert.Equal(t, numFiles, len(entries)) - - // Verify some random files - testIndices := []int{0, 10, 50, 99} - for _, i := range testIndices { - filename := filepath.Join(dirName, fmt.Sprintf("file_%03d.txt", i)) - expectedContent := []byte(fmt.Sprintf("Content of file %d", i)) - framework.AssertFileContent(filename, expectedContent) - } -} - -// testDeepDirectoryNesting tests very deep directory structures -func testDeepDirectoryNesting(t *testing.T, framework *FuseTestFramework) { - // Create deep nesting (20 levels) - depth := 20 - currentPath := "" - - for i := 0; i < depth; i++ { - if i == 0 { - currentPath = fmt.Sprintf("level_%02d", i) - } else { - currentPath = filepath.Join(currentPath, fmt.Sprintf("level_%02d", i)) - } - framework.CreateTestDir(currentPath) - } - - // Create a file at the deepest level - deepFile := filepath.Join(currentPath, "deep_file.txt") - deepContent := []byte("This is very deep!") - framework.CreateTestFile(deepFile, deepContent) - - // Verify file exists and has correct content - framework.AssertFileContent(deepFile, deepContent) - - // Verify we can navigate the full structure - mountPath := filepath.Join(framework.GetMountPoint(), currentPath) - info, err := os.Stat(mountPath) - require.NoError(t, err) - assert.True(t, info.IsDir()) -} diff --git a/test/fuse_integration/framework.go b/test/fuse_integration/framework.go deleted file mode 100644 index 9cff1badb..000000000 --- a/test/fuse_integration/framework.go +++ /dev/null @@ -1,384 +0,0 @@ -package fuse_test - -import ( - "fmt" - "io/fs" - "os" - "os/exec" - "path/filepath" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// FuseTestFramework provides utilities for FUSE integration testing -type FuseTestFramework struct { - t *testing.T - tempDir string - mountPoint string - dataDir string - masterProcess *os.Process - volumeProcess *os.Process - filerProcess *os.Process - mountProcess *os.Process - masterAddr string - volumeAddr string - filerAddr string - weedBinary string - isSetup bool -} - -// TestConfig holds configuration for FUSE tests -type TestConfig struct { - Collection string - Replication string - ChunkSizeMB int - CacheSizeMB int - NumVolumes int - EnableDebug bool - MountOptions []string - SkipCleanup bool // for debugging failed tests -} - -// DefaultTestConfig returns a default configuration for FUSE tests -func DefaultTestConfig() *TestConfig { - return &TestConfig{ - Collection: "", - Replication: "000", - ChunkSizeMB: 4, - CacheSizeMB: 100, - NumVolumes: 3, - EnableDebug: false, - MountOptions: []string{}, - SkipCleanup: false, - } -} - -// NewFuseTestFramework creates a new FUSE testing framework -func NewFuseTestFramework(t *testing.T, config *TestConfig) *FuseTestFramework { - if config == nil { - config = DefaultTestConfig() - } - - tempDir, err := os.MkdirTemp("", "seaweedfs_fuse_test_") - require.NoError(t, err) - - return &FuseTestFramework{ - t: t, - tempDir: tempDir, - mountPoint: filepath.Join(tempDir, "mount"), - dataDir: filepath.Join(tempDir, "data"), - masterAddr: "127.0.0.1:19333", - volumeAddr: "127.0.0.1:18080", - filerAddr: "127.0.0.1:18888", - weedBinary: findWeedBinary(), - isSetup: false, - } -} - -// Setup starts SeaweedFS cluster and mounts FUSE filesystem -func (f *FuseTestFramework) Setup(config *TestConfig) error { - if f.isSetup { - return fmt.Errorf("framework already setup") - } - - // Create directories - dirs := []string{f.mountPoint, f.dataDir} - for _, dir := range dirs { - if err := os.MkdirAll(dir, 0755); err != nil { - return fmt.Errorf("failed to create directory %s: %v", dir, err) - } - } - - // Start master - if err := f.startMaster(config); err != nil { - return fmt.Errorf("failed to start master: %v", err) - } - - // Wait for master to be ready - if err := f.waitForService(f.masterAddr, 30*time.Second); err != nil { - return fmt.Errorf("master not ready: %v", err) - } - - // Start volume servers - if err := f.startVolumeServers(config); err != nil { - return fmt.Errorf("failed to start volume servers: %v", err) - } - - // Wait for volume server to be ready - if err := f.waitForService(f.volumeAddr, 30*time.Second); err != nil { - return fmt.Errorf("volume server not ready: %v", err) - } - - // Start filer - if err := f.startFiler(config); err != nil { - return fmt.Errorf("failed to start filer: %v", err) - } - - // Wait for filer to be ready - if err := f.waitForService(f.filerAddr, 30*time.Second); err != nil { - return fmt.Errorf("filer not ready: %v", err) - } - - // Mount FUSE filesystem - if err := f.mountFuse(config); err != nil { - return fmt.Errorf("failed to mount FUSE: %v", err) - } - - // Wait for mount to be ready - if err := f.waitForMount(30 * time.Second); err != nil { - return fmt.Errorf("FUSE mount not ready: %v", err) - } - - f.isSetup = true - return nil -} - -// Cleanup stops all processes and removes temporary files -func (f *FuseTestFramework) Cleanup() { - if f.mountProcess != nil { - f.unmountFuse() - } - - // Stop processes in reverse order - processes := []*os.Process{f.mountProcess, f.filerProcess, f.volumeProcess, f.masterProcess} - for _, proc := range processes { - if proc != nil { - proc.Signal(syscall.SIGTERM) - proc.Wait() - } - } - - // Remove temp directory - if !DefaultTestConfig().SkipCleanup { - os.RemoveAll(f.tempDir) - } -} - -// GetMountPoint returns the FUSE mount point path -func (f *FuseTestFramework) GetMountPoint() string { - return f.mountPoint -} - -// GetFilerAddr returns the filer address -func (f *FuseTestFramework) GetFilerAddr() string { - return f.filerAddr -} - -// startMaster starts the SeaweedFS master server -func (f *FuseTestFramework) startMaster(config *TestConfig) error { - args := []string{ - "master", - "-ip=127.0.0.1", - "-port=19333", - "-mdir=" + filepath.Join(f.dataDir, "master"), - "-raftBootstrap", - } - if config.EnableDebug { - args = append(args, "-v=4") - } - - cmd := exec.Command(f.weedBinary, args...) - cmd.Dir = f.tempDir - if err := cmd.Start(); err != nil { - return err - } - f.masterProcess = cmd.Process - return nil -} - -// startVolumeServers starts SeaweedFS volume servers -func (f *FuseTestFramework) startVolumeServers(config *TestConfig) error { - args := []string{ - "volume", - "-mserver=" + f.masterAddr, - "-ip=127.0.0.1", - "-port=18080", - "-dir=" + filepath.Join(f.dataDir, "volume"), - fmt.Sprintf("-max=%d", config.NumVolumes), - } - if config.EnableDebug { - args = append(args, "-v=4") - } - - cmd := exec.Command(f.weedBinary, args...) - cmd.Dir = f.tempDir - if err := cmd.Start(); err != nil { - return err - } - f.volumeProcess = cmd.Process - return nil -} - -// startFiler starts the SeaweedFS filer server -func (f *FuseTestFramework) startFiler(config *TestConfig) error { - args := []string{ - "filer", - "-master=" + f.masterAddr, - "-ip=127.0.0.1", - "-port=18888", - } - if config.EnableDebug { - args = append(args, "-v=4") - } - - cmd := exec.Command(f.weedBinary, args...) - cmd.Dir = f.tempDir - if err := cmd.Start(); err != nil { - return err - } - f.filerProcess = cmd.Process - return nil -} - -// mountFuse mounts the SeaweedFS FUSE filesystem -func (f *FuseTestFramework) mountFuse(config *TestConfig) error { - args := []string{ - "mount", - "-filer=" + f.filerAddr, - "-dir=" + f.mountPoint, - "-filer.path=/", - "-dirAutoCreate", - } - - if config.Collection != "" { - args = append(args, "-collection="+config.Collection) - } - if config.Replication != "" { - args = append(args, "-replication="+config.Replication) - } - if config.ChunkSizeMB > 0 { - args = append(args, fmt.Sprintf("-chunkSizeLimitMB=%d", config.ChunkSizeMB)) - } - if config.CacheSizeMB > 0 { - args = append(args, fmt.Sprintf("-cacheSizeMB=%d", config.CacheSizeMB)) - } - if config.EnableDebug { - args = append(args, "-v=4") - } - - args = append(args, config.MountOptions...) - - cmd := exec.Command(f.weedBinary, args...) - cmd.Dir = f.tempDir - if err := cmd.Start(); err != nil { - return err - } - f.mountProcess = cmd.Process - return nil -} - -// unmountFuse unmounts the FUSE filesystem -func (f *FuseTestFramework) unmountFuse() error { - if f.mountProcess != nil { - f.mountProcess.Signal(syscall.SIGTERM) - f.mountProcess.Wait() - f.mountProcess = nil - } - - // Also try system unmount as backup - exec.Command("umount", f.mountPoint).Run() - return nil -} - -// waitForService waits for a service to be available -func (f *FuseTestFramework) waitForService(addr string, timeout time.Duration) error { - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - conn, err := net.DialTimeout("tcp", addr, 1*time.Second) - if err == nil { - conn.Close() - return nil - } - time.Sleep(100 * time.Millisecond) - } - return fmt.Errorf("service at %s not ready within timeout", addr) -} - -// waitForMount waits for the FUSE mount to be ready -func (f *FuseTestFramework) waitForMount(timeout time.Duration) error { - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - // Check if mount point is accessible - if _, err := os.Stat(f.mountPoint); err == nil { - // Try to list directory - if _, err := os.ReadDir(f.mountPoint); err == nil { - return nil - } - } - time.Sleep(100 * time.Millisecond) - } - return fmt.Errorf("mount point not ready within timeout") -} - -// findWeedBinary locates the weed binary -func findWeedBinary() string { - // Try different possible locations - candidates := []string{ - "./weed", - "../weed", - "../../weed", - "weed", // in PATH - } - - for _, candidate := range candidates { - if _, err := exec.LookPath(candidate); err == nil { - return candidate - } - if _, err := os.Stat(candidate); err == nil { - abs, _ := filepath.Abs(candidate) - return abs - } - } - - // Default fallback - return "weed" -} - -// Helper functions for test assertions - -// AssertFileExists checks if a file exists in the mount point -func (f *FuseTestFramework) AssertFileExists(relativePath string) { - fullPath := filepath.Join(f.mountPoint, relativePath) - _, err := os.Stat(fullPath) - require.NoError(f.t, err, "file should exist: %s", relativePath) -} - -// AssertFileNotExists checks if a file does not exist in the mount point -func (f *FuseTestFramework) AssertFileNotExists(relativePath string) { - fullPath := filepath.Join(f.mountPoint, relativePath) - _, err := os.Stat(fullPath) - require.True(f.t, os.IsNotExist(err), "file should not exist: %s", relativePath) -} - -// AssertFileContent checks if a file has expected content -func (f *FuseTestFramework) AssertFileContent(relativePath string, expectedContent []byte) { - fullPath := filepath.Join(f.mountPoint, relativePath) - actualContent, err := os.ReadFile(fullPath) - require.NoError(f.t, err, "failed to read file: %s", relativePath) - require.Equal(f.t, expectedContent, actualContent, "file content mismatch: %s", relativePath) -} - -// AssertFileMode checks if a file has expected permissions -func (f *FuseTestFramework) AssertFileMode(relativePath string, expectedMode fs.FileMode) { - fullPath := filepath.Join(f.mountPoint, relativePath) - info, err := os.Stat(fullPath) - require.NoError(f.t, err, "failed to stat file: %s", relativePath) - require.Equal(f.t, expectedMode, info.Mode(), "file mode mismatch: %s", relativePath) -} - -// CreateTestFile creates a test file with specified content -func (f *FuseTestFramework) CreateTestFile(relativePath string, content []byte) { - fullPath := filepath.Join(f.mountPoint, relativePath) - dir := filepath.Dir(fullPath) - require.NoError(f.t, os.MkdirAll(dir, 0755), "failed to create directory: %s", dir) - require.NoError(f.t, os.WriteFile(fullPath, content, 0644), "failed to create file: %s", relativePath) -} - -// CreateTestDir creates a test directory -func (f *FuseTestFramework) CreateTestDir(relativePath string) { - fullPath := filepath.Join(f.mountPoint, relativePath) - require.NoError(f.t, os.MkdirAll(fullPath, 0755), "failed to create directory: %s", relativePath) -} diff --git a/test/fuse_integration/go.mod b/test/fuse_integration/go.mod deleted file mode 100644 index 47246cdd8..000000000 --- a/test/fuse_integration/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module seaweedfs-fuse-tests - -go 1.21 - -require github.com/stretchr/testify v1.8.4 - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/fuse_integration/go.sum b/test/fuse_integration/go.sum deleted file mode 100644 index fa4b6e682..000000000 --- a/test/fuse_integration/go.sum +++ /dev/null @@ -1,10 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/fuse_integration/minimal_test.go b/test/fuse_integration/minimal_test.go deleted file mode 100644 index 8d849fe77..000000000 --- a/test/fuse_integration/minimal_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package fuse_test - -import "testing" - -func TestMinimal(t *testing.T) { - t.Log("minimal test") -} diff --git a/test/fuse_integration/simple_test.go b/test/fuse_integration/simple_test.go deleted file mode 100644 index a82157181..000000000 --- a/test/fuse_integration/simple_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package fuse_test - -import ( - "testing" -) - -// Simple test to verify the package structure is correct -func TestPackageStructure(t *testing.T) { - t.Log("FUSE integration test package structure is correct") - - // This test verifies that we can compile and run tests - // in the fuse_test package without package name conflicts - - t.Log("Package name verification passed") -} diff --git a/test/fuse_integration/working_demo_test.go b/test/fuse_integration/working_demo_test.go deleted file mode 100644 index da5d8c50d..000000000 --- a/test/fuse_integration/working_demo_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package fuse_test - -import ( - "os" - "path/filepath" - "testing" - "time" -) - -// ============================================================================ -// IMPORTANT: This file contains a STANDALONE demonstration of the FUSE testing -// framework that works around Go module conflicts between the main framework -// and the SeaweedFS parent module. -// -// PURPOSE: -// - Provides a working demonstration of framework capabilities for CI/CD -// - Simulates FUSE operations using local filesystem (not actual FUSE mounts) -// - Validates the testing approach and framework design -// - Enables CI integration while module conflicts are resolved -// -// DUPLICATION RATIONALE: -// - The full framework (framework.go) has Go module conflicts with parent project -// - This standalone version proves the concept works without those conflicts -// - Once module issues are resolved, this can be removed or simplified -// -// TODO: Remove this file once framework.go module conflicts are resolved -// ============================================================================ - -// DemoTestConfig represents test configuration for the standalone demo -// Note: This duplicates TestConfig from framework.go due to module conflicts -type DemoTestConfig struct { - ChunkSizeMB int - Replication string - TestTimeout time.Duration -} - -// DefaultDemoTestConfig returns default test configuration for demo -func DefaultDemoTestConfig() DemoTestConfig { - return DemoTestConfig{ - ChunkSizeMB: 8, - Replication: "000", - TestTimeout: 30 * time.Minute, - } -} - -// DemoFuseTestFramework represents the standalone testing framework -// Note: This simulates FUSE operations using local filesystem for demonstration -type DemoFuseTestFramework struct { - t *testing.T - config DemoTestConfig - mountPath string - cleanup []func() -} - -// NewDemoFuseTestFramework creates a new demo test framework instance -func NewDemoFuseTestFramework(t *testing.T, config DemoTestConfig) *DemoFuseTestFramework { - return &DemoFuseTestFramework{ - t: t, - config: config, - cleanup: make([]func(), 0), - } -} - -// CreateTestFile creates a test file with given content -func (f *DemoFuseTestFramework) CreateTestFile(filename string, content []byte) { - if f.mountPath == "" { - f.mountPath = "/tmp/fuse_test_mount" - } - - fullPath := filepath.Join(f.mountPath, filename) - - // Ensure directory exists - os.MkdirAll(filepath.Dir(fullPath), 0755) - - // Write file (simulated - in real implementation would use FUSE mount) - err := os.WriteFile(fullPath, content, 0644) - if err != nil { - f.t.Fatalf("Failed to create test file %s: %v", filename, err) - } -} - -// AssertFileExists checks if file exists -func (f *DemoFuseTestFramework) AssertFileExists(filename string) { - fullPath := filepath.Join(f.mountPath, filename) - if _, err := os.Stat(fullPath); os.IsNotExist(err) { - f.t.Fatalf("Expected file %s to exist, but it doesn't", filename) - } -} - -// AssertFileContent checks file content matches expected -func (f *DemoFuseTestFramework) AssertFileContent(filename string, expected []byte) { - fullPath := filepath.Join(f.mountPath, filename) - actual, err := os.ReadFile(fullPath) - if err != nil { - f.t.Fatalf("Failed to read file %s: %v", filename, err) - } - - if string(actual) != string(expected) { - f.t.Fatalf("File content mismatch for %s.\nExpected: %q\nActual: %q", - filename, string(expected), string(actual)) - } -} - -// Cleanup performs test cleanup -func (f *DemoFuseTestFramework) Cleanup() { - for i := len(f.cleanup) - 1; i >= 0; i-- { - f.cleanup[i]() - } - - // Clean up test mount directory - if f.mountPath != "" { - os.RemoveAll(f.mountPath) - } -} - -// TestFrameworkDemo demonstrates the FUSE testing framework capabilities -// NOTE: This is a STANDALONE DEMONSTRATION that simulates FUSE operations -// using local filesystem instead of actual FUSE mounts. It exists to prove -// the framework concept works while Go module conflicts are resolved. -func TestFrameworkDemo(t *testing.T) { - t.Log("SeaweedFS FUSE Integration Testing Framework Demo") - t.Log("This demo simulates FUSE operations using local filesystem") - - // Initialize demo framework - framework := NewDemoFuseTestFramework(t, DefaultDemoTestConfig()) - defer framework.Cleanup() - - t.Run("ConfigurationValidation", func(t *testing.T) { - config := DefaultDemoTestConfig() - if config.ChunkSizeMB != 8 { - t.Errorf("Expected chunk size 8MB, got %d", config.ChunkSizeMB) - } - if config.Replication != "000" { - t.Errorf("Expected replication '000', got %s", config.Replication) - } - t.Log("Configuration validation passed") - }) - - t.Run("BasicFileOperations", func(t *testing.T) { - // Test file creation and reading - content := []byte("Hello, SeaweedFS FUSE Testing!") - filename := "demo_test.txt" - - t.Log("Creating test file...") - framework.CreateTestFile(filename, content) - - t.Log("Verifying file exists...") - framework.AssertFileExists(filename) - - t.Log("Verifying file content...") - framework.AssertFileContent(filename, content) - - t.Log("Basic file operations test passed") - }) - - t.Run("LargeFileSimulation", func(t *testing.T) { - // Simulate large file testing - largeContent := make([]byte, 1024*1024) // 1MB - for i := range largeContent { - largeContent[i] = byte(i % 256) - } - - filename := "large_file_demo.dat" - - t.Log("Creating large test file (1MB)...") - framework.CreateTestFile(filename, largeContent) - - t.Log("Verifying large file...") - framework.AssertFileExists(filename) - framework.AssertFileContent(filename, largeContent) - - t.Log("Large file operations test passed") - }) - - t.Run("ConcurrencySimulation", func(t *testing.T) { - // Simulate concurrent operations - numFiles := 5 - - t.Logf("Creating %d files concurrently...", numFiles) - - for i := 0; i < numFiles; i++ { - filename := filepath.Join("concurrent", "file_"+string(rune('A'+i))+".txt") - content := []byte("Concurrent file content " + string(rune('A'+i))) - - framework.CreateTestFile(filename, content) - framework.AssertFileExists(filename) - } - - t.Log("Concurrent operations simulation passed") - }) - - t.Log("Framework demonstration completed successfully!") - t.Log("This DEMO shows the planned FUSE testing capabilities:") - t.Log(" โ€ข Automated cluster setup/teardown (simulated)") - t.Log(" โ€ข File operations testing (local filesystem simulation)") - t.Log(" โ€ข Directory operations testing (planned)") - t.Log(" โ€ข Large file handling (demonstrated)") - t.Log(" โ€ข Concurrent operations testing (simulated)") - t.Log(" โ€ข Error scenario validation (planned)") - t.Log(" โ€ข Performance validation (planned)") - t.Log("Full framework available in framework.go (pending module resolution)") -} diff --git a/test/kafka/Dockerfile.kafka-gateway b/test/kafka/Dockerfile.kafka-gateway deleted file mode 100644 index c2f975f6d..000000000 --- a/test/kafka/Dockerfile.kafka-gateway +++ /dev/null @@ -1,56 +0,0 @@ -# Dockerfile for Kafka Gateway Integration Testing -FROM golang:1.24-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git make gcc musl-dev sqlite-dev - -# Set working directory -WORKDIR /app - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY . . - -# Build the weed binary with Kafka gateway support -RUN CGO_ENABLED=1 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o weed ./weed - -# Final stage -FROM alpine:latest - -# Install runtime dependencies -RUN apk --no-cache add ca-certificates wget curl netcat-openbsd sqlite - -# Create non-root user -RUN addgroup -g 1000 seaweedfs && \ - adduser -D -s /bin/sh -u 1000 -G seaweedfs seaweedfs - -# Set working directory -WORKDIR /usr/bin - -# Copy binary from builder -COPY --from=builder /app/weed . - -# Create data directory -RUN mkdir -p /data && chown seaweedfs:seaweedfs /data - -# Copy startup script -COPY test/kafka/scripts/kafka-gateway-start.sh /usr/bin/kafka-gateway-start.sh -RUN chmod +x /usr/bin/kafka-gateway-start.sh - -# Switch to non-root user -USER seaweedfs - -# Expose Kafka protocol port and pprof port -EXPOSE 9093 10093 - -# Health check -HEALTHCHECK --interval=10s --timeout=5s --start-period=30s --retries=3 \ - CMD nc -z localhost 9093 || exit 1 - -# Default command -CMD ["/usr/bin/kafka-gateway-start.sh"] diff --git a/test/kafka/Dockerfile.seaweedfs b/test/kafka/Dockerfile.seaweedfs deleted file mode 100644 index bd2983fe8..000000000 --- a/test/kafka/Dockerfile.seaweedfs +++ /dev/null @@ -1,25 +0,0 @@ -# Dockerfile for building SeaweedFS components from the current workspace -FROM golang:1.24-alpine AS builder - -RUN apk add --no-cache git make gcc musl-dev sqlite-dev - -WORKDIR /app - -COPY go.mod go.sum ./ -RUN go mod download - -COPY . . - -RUN CGO_ENABLED=1 GOOS=linux go build -o /out/weed ./weed - -FROM alpine:latest - -RUN apk --no-cache add ca-certificates curl wget netcat-openbsd sqlite - -COPY --from=builder /out/weed /usr/bin/weed - -WORKDIR /data - -EXPOSE 9333 19333 8080 18080 8888 18888 16777 17777 - -ENTRYPOINT ["/usr/bin/weed"] diff --git a/test/kafka/Dockerfile.test-setup b/test/kafka/Dockerfile.test-setup deleted file mode 100644 index 16652f269..000000000 --- a/test/kafka/Dockerfile.test-setup +++ /dev/null @@ -1,29 +0,0 @@ -# Dockerfile for Kafka Integration Test Setup -FROM golang:1.24-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git make gcc musl-dev - -# Copy repository -WORKDIR /app -COPY . . - -# Build test setup utility from the test module -WORKDIR /app/test/kafka -RUN go mod download -RUN CGO_ENABLED=1 GOOS=linux go build -o /out/test-setup ./cmd/setup - -# Final stage -FROM alpine:latest - -# Install runtime dependencies -RUN apk --no-cache add ca-certificates curl jq netcat-openbsd - -# Copy binary from builder -COPY --from=builder /out/test-setup /usr/bin/test-setup - -# Make executable -RUN chmod +x /usr/bin/test-setup - -# Default command -CMD ["/usr/bin/test-setup"] diff --git a/test/kafka/Makefile b/test/kafka/Makefile deleted file mode 100644 index 00f7efbf7..000000000 --- a/test/kafka/Makefile +++ /dev/null @@ -1,206 +0,0 @@ -# Kafka Integration Testing Makefile - Refactored -# This replaces the existing Makefile with better organization - -# Configuration -ifndef DOCKER_COMPOSE -DOCKER_COMPOSE := $(if $(shell command -v docker-compose 2>/dev/null),docker-compose,docker compose) -endif -TEST_TIMEOUT ?= 10m -KAFKA_BOOTSTRAP_SERVERS ?= localhost:9092 -KAFKA_GATEWAY_URL ?= localhost:9093 -SCHEMA_REGISTRY_URL ?= http://localhost:8081 - -# Colors for output -BLUE := \033[36m -GREEN := \033[32m -YELLOW := \033[33m -RED := \033[31m -NC := \033[0m # No Color - -.PHONY: help setup test clean logs status - -help: ## Show this help message - @echo "$(BLUE)SeaweedFS Kafka Integration Testing - Refactored$(NC)" - @echo "" - @echo "Available targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " $(GREEN)%-20s$(NC) %s\n", $$1, $$2}' $(MAKEFILE_LIST) - -# Environment Setup -setup: ## Set up test environment (Kafka + Schema Registry + SeaweedFS) - @echo "$(YELLOW)Setting up Kafka integration test environment...$(NC)" - @$(DOCKER_COMPOSE) up -d - @echo "$(BLUE)Waiting for all services to be ready...$(NC)" - @./scripts/wait-for-services.sh - @echo "$(GREEN)Test environment ready!$(NC)" - -setup-schemas: setup ## Set up test environment and register schemas - @echo "$(YELLOW)Registering test schemas...$(NC)" - @$(DOCKER_COMPOSE) --profile setup run --rm test-setup - @echo "$(GREEN)Schemas registered!$(NC)" - -# Test Categories -test: test-unit test-integration test-e2e ## Run all tests - -test-unit: ## Run unit tests - @echo "$(YELLOW)Running unit tests...$(NC)" - @go test -v -timeout=$(TEST_TIMEOUT) ./unit/... - -test-integration: ## Run integration tests - @echo "$(YELLOW)Running integration tests...$(NC)" - @go test -v -timeout=$(TEST_TIMEOUT) ./integration/... - -test-e2e: setup-schemas ## Run end-to-end tests - @echo "$(YELLOW)Running end-to-end tests...$(NC)" - @KAFKA_BOOTSTRAP_SERVERS=$(KAFKA_BOOTSTRAP_SERVERS) \ - KAFKA_GATEWAY_URL=$(KAFKA_GATEWAY_URL) \ - SCHEMA_REGISTRY_URL=$(SCHEMA_REGISTRY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) ./e2e/... - -test-docker: setup-schemas ## Run Docker integration tests - @echo "$(YELLOW)Running Docker integration tests...$(NC)" - @KAFKA_BOOTSTRAP_SERVERS=$(KAFKA_BOOTSTRAP_SERVERS) \ - KAFKA_GATEWAY_URL=$(KAFKA_GATEWAY_URL) \ - SCHEMA_REGISTRY_URL=$(SCHEMA_REGISTRY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) ./integration/ -run Docker - -# Schema-specific tests -test-schema: setup-schemas ## Run schema registry integration tests - @echo "$(YELLOW)Running schema registry integration tests...$(NC)" - @SCHEMA_REGISTRY_URL=$(SCHEMA_REGISTRY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) ./integration/ -run Schema - -# Client-specific tests -test-sarama: setup-schemas ## Run Sarama client tests - @echo "$(YELLOW)Running Sarama client tests...$(NC)" - @KAFKA_BOOTSTRAP_SERVERS=$(KAFKA_BOOTSTRAP_SERVERS) \ - KAFKA_GATEWAY_URL=$(KAFKA_GATEWAY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) ./integration/ -run Sarama - -test-kafka-go: setup-schemas ## Run kafka-go client tests - @echo "$(YELLOW)Running kafka-go client tests...$(NC)" - @KAFKA_BOOTSTRAP_SERVERS=$(KAFKA_BOOTSTRAP_SERVERS) \ - KAFKA_GATEWAY_URL=$(KAFKA_GATEWAY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) ./integration/ -run KafkaGo - -# Performance tests -test-performance: setup-schemas ## Run performance benchmarks - @echo "$(YELLOW)Running Kafka performance benchmarks...$(NC)" - @KAFKA_BOOTSTRAP_SERVERS=$(KAFKA_BOOTSTRAP_SERVERS) \ - KAFKA_GATEWAY_URL=$(KAFKA_GATEWAY_URL) \ - SCHEMA_REGISTRY_URL=$(SCHEMA_REGISTRY_URL) \ - go test -v -timeout=$(TEST_TIMEOUT) -bench=. ./... - -# Development targets -dev-kafka: ## Start only Kafka ecosystem for development - @$(DOCKER_COMPOSE) up -d zookeeper kafka schema-registry - @sleep 20 - @$(DOCKER_COMPOSE) --profile setup run --rm test-setup - -dev-seaweedfs: ## Start only SeaweedFS for development - @$(DOCKER_COMPOSE) up -d seaweedfs-master seaweedfs-volume seaweedfs-filer seaweedfs-mq-broker seaweedfs-mq-agent - -dev-gateway: dev-seaweedfs ## Start Kafka Gateway for development - @$(DOCKER_COMPOSE) up -d kafka-gateway - -dev-test: dev-kafka ## Quick test with just Kafka ecosystem - @SCHEMA_REGISTRY_URL=$(SCHEMA_REGISTRY_URL) go test -v -timeout=30s ./unit/... - -# Cleanup -clean: ## Clean up test environment - @echo "$(YELLOW)Cleaning up test environment...$(NC)" - @$(DOCKER_COMPOSE) down -v --remove-orphans - @docker system prune -f - @echo "$(GREEN)Environment cleaned up!$(NC)" - -# Monitoring and debugging -logs: ## Show logs from all services - @$(DOCKER_COMPOSE) logs --tail=50 -f - -logs-kafka: ## Show Kafka logs - @$(DOCKER_COMPOSE) logs --tail=100 -f kafka - -logs-schema-registry: ## Show Schema Registry logs - @$(DOCKER_COMPOSE) logs --tail=100 -f schema-registry - -logs-seaweedfs: ## Show SeaweedFS logs - @$(DOCKER_COMPOSE) logs --tail=100 -f seaweedfs-master seaweedfs-volume seaweedfs-filer seaweedfs-mq-broker seaweedfs-mq-agent - -logs-gateway: ## Show Kafka Gateway logs - @$(DOCKER_COMPOSE) logs --tail=100 -f kafka-gateway - -status: ## Show status of all services - @echo "$(BLUE)Service Status:$(NC)" - @$(DOCKER_COMPOSE) ps - @echo "" - @echo "$(BLUE)Kafka Status:$(NC)" - @curl -s http://localhost:9092 > /dev/null && echo "Kafka accessible" || echo "Kafka not accessible" - @echo "" - @echo "$(BLUE)Schema Registry Status:$(NC)" - @curl -s $(SCHEMA_REGISTRY_URL)/subjects > /dev/null && echo "Schema Registry accessible" || echo "Schema Registry not accessible" - @echo "" - @echo "$(BLUE)Kafka Gateway Status:$(NC)" - @nc -z localhost 9093 && echo "Kafka Gateway accessible" || echo "Kafka Gateway not accessible" - -debug: ## Debug test environment - @echo "$(BLUE)Debug Information:$(NC)" - @echo "Kafka Bootstrap Servers: $(KAFKA_BOOTSTRAP_SERVERS)" - @echo "Schema Registry URL: $(SCHEMA_REGISTRY_URL)" - @echo "Kafka Gateway URL: $(KAFKA_GATEWAY_URL)" - @echo "" - @echo "Docker Compose Status:" - @$(DOCKER_COMPOSE) ps - @echo "" - @echo "Network connectivity:" - @docker network ls | grep kafka-integration-test || echo "No Kafka test network found" - @echo "" - @echo "Schema Registry subjects:" - @curl -s $(SCHEMA_REGISTRY_URL)/subjects 2>/dev/null || echo "Schema Registry not accessible" - -# Utility targets -install-deps: ## Install required dependencies - @echo "$(YELLOW)Installing test dependencies...$(NC)" - @which docker > /dev/null || (echo "$(RED)Docker not found$(NC)" && exit 1) - @which docker-compose > /dev/null || (echo "$(RED)Docker Compose not found$(NC)" && exit 1) - @which curl > /dev/null || (echo "$(RED)curl not found$(NC)" && exit 1) - @which nc > /dev/null || (echo "$(RED)netcat not found$(NC)" && exit 1) - @echo "$(GREEN)All dependencies available$(NC)" - -check-env: ## Check test environment setup - @echo "$(BLUE)Environment Check:$(NC)" - @echo "KAFKA_BOOTSTRAP_SERVERS: $(KAFKA_BOOTSTRAP_SERVERS)" - @echo "SCHEMA_REGISTRY_URL: $(SCHEMA_REGISTRY_URL)" - @echo "KAFKA_GATEWAY_URL: $(KAFKA_GATEWAY_URL)" - @echo "TEST_TIMEOUT: $(TEST_TIMEOUT)" - @make install-deps - -# CI targets -ci-test: ## Run tests in CI environment - @echo "$(YELLOW)Running CI tests...$(NC)" - @make setup-schemas - @make test-unit - @make test-integration - @make clean - -ci-e2e: ## Run end-to-end tests in CI - @echo "$(YELLOW)Running CI end-to-end tests...$(NC)" - @make test-e2e - @make clean - -# Interactive targets -shell-kafka: ## Open shell in Kafka container - @$(DOCKER_COMPOSE) exec kafka bash - -shell-gateway: ## Open shell in Kafka Gateway container - @$(DOCKER_COMPOSE) exec kafka-gateway sh - -topics: ## List Kafka topics - @$(DOCKER_COMPOSE) exec kafka kafka-topics --list --bootstrap-server localhost:29092 - -create-topic: ## Create a test topic (usage: make create-topic TOPIC=my-topic) - @$(DOCKER_COMPOSE) exec kafka kafka-topics --create --topic $(TOPIC) --bootstrap-server localhost:29092 --partitions 3 --replication-factor 1 - -produce: ## Produce test messages (usage: make produce TOPIC=my-topic) - @$(DOCKER_COMPOSE) exec kafka kafka-console-producer --bootstrap-server localhost:29092 --topic $(TOPIC) - -consume: ## Consume messages (usage: make consume TOPIC=my-topic) - @$(DOCKER_COMPOSE) exec kafka kafka-console-consumer --bootstrap-server localhost:29092 --topic $(TOPIC) --from-beginning diff --git a/test/kafka/README.md b/test/kafka/README.md deleted file mode 100644 index a39855ed6..000000000 --- a/test/kafka/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# Kafka Gateway Tests with SMQ Integration - -This directory contains tests for the SeaweedFS Kafka Gateway with full SeaweedMQ (SMQ) integration. - -## Test Types - -### **Unit Tests** (`./unit/`) -- Basic gateway functionality -- Protocol compatibility -- No SeaweedFS backend required -- Uses mock handlers - -### **Integration Tests** (`./integration/`) -- **Mock Mode** (default): Uses in-memory handlers for protocol testing -- **SMQ Mode** (with `SEAWEEDFS_MASTERS`): Uses real SeaweedFS backend for full integration - -### **E2E Tests** (`./e2e/`) -- End-to-end workflows -- Automatically detects SMQ availability -- Falls back to mock mode if SMQ unavailable - -## Running Tests Locally - -### Quick Protocol Testing (Mock Mode) -```bash -# Run all integration tests with mock backend -cd test/kafka -go test ./integration/... - -# Run specific test -go test -v ./integration/ -run TestClientCompatibility -``` - -### Full Integration Testing (SMQ Mode) -Requires running SeaweedFS instance: - -1. **Start SeaweedFS with MQ support:** -```bash -# Terminal 1: Start SeaweedFS server -weed server -ip="127.0.0.1" -ip.bind="0.0.0.0" -dir=/tmp/seaweedfs-data -master.port=9333 -volume.port=8081 -filer.port=8888 -filer=true - -# Terminal 2: Start MQ broker -weed mq.broker -master="127.0.0.1:9333" -ip="127.0.0.1" -port=17777 -``` - -2. **Run tests with SMQ backend:** -```bash -cd test/kafka -SEAWEEDFS_MASTERS=127.0.0.1:9333 go test ./integration/... - -# Run specific SMQ integration tests -SEAWEEDFS_MASTERS=127.0.0.1:9333 go test -v ./integration/ -run TestSMQIntegration -``` - -### Test Broker Startup -If you're having broker startup issues: -```bash -# Debug broker startup locally -./scripts/test-broker-startup.sh -``` - -## CI/CD Integration - -### GitHub Actions Jobs - -1. **Unit Tests** - Fast protocol tests with mock backend -2. **Integration Tests** - Mock mode by default -3. **E2E Tests (with SMQ)** - Full SeaweedFS + MQ broker stack -4. **Client Compatibility (with SMQ)** - Tests different Kafka clients against real backend -5. **Consumer Group Tests (with SMQ)** - Tests consumer group persistence -6. **SMQ Integration Tests** - Dedicated SMQ-specific functionality tests - -### What Gets Tested with SMQ - -When `SEAWEEDFS_MASTERS` is available, tests exercise: - -- **Real Message Persistence** - Messages stored in SeaweedFS volumes -- **Offset Persistence** - Consumer group offsets stored in SeaweedFS filer -- **Topic Persistence** - Topic metadata persisted in SeaweedFS filer -- **Consumer Group Coordination** - Distributed coordinator assignment -- **Cross-Client Compatibility** - Sarama, kafka-go with real backend -- **Broker Discovery** - Gateway discovers MQ brokers via masters - -## Test Infrastructure - -### `testutil.NewGatewayTestServerWithSMQ(t, mode)` - -Smart gateway creation that automatically: -- Detects SMQ availability via `SEAWEEDFS_MASTERS` -- Uses production handler when available -- Falls back to mock when unavailable -- Provides timeout protection against hanging - -**Modes:** -- `SMQRequired` - Skip test if SMQ unavailable -- `SMQAvailable` - Use SMQ if available, otherwise mock -- `SMQUnavailable` - Always use mock - -### Timeout Protection - -Gateway creation includes timeout protection to prevent CI hanging: -- 20 second timeout for `SMQRequired` mode -- 15 second timeout for `SMQAvailable` mode -- Clear error messages when broker discovery fails - -## Debugging Failed Tests - -### CI Logs to Check -1. **"SeaweedFS master is up"** - Master started successfully -2. **"SeaweedFS filer is up"** - Filer ready -3. **"SeaweedFS MQ broker is up"** - Broker started successfully -4. **Broker/Server logs** - Shown on broker startup failure - -### Local Debugging -1. Run `./scripts/test-broker-startup.sh` to test broker startup -2. Check logs at `/tmp/weed-*.log` -3. Test individual components: - ```bash - # Test master - curl http://127.0.0.1:9333/cluster/status - - # Test filer - curl http://127.0.0.1:8888/status - - # Test broker - nc -z 127.0.0.1 17777 - ``` - -### Common Issues -- **Broker fails to start**: Check filer is ready before starting broker -- **Gateway timeout**: Broker discovery fails, check broker is accessible -- **Test hangs**: Timeout protection not working, reduce timeout values - -## Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Kafka Client โ”‚โ”€โ”€โ”€โ–ถโ”‚ Kafka Gateway โ”‚โ”€โ”€โ”€โ–ถโ”‚ SeaweedMQ Brokerโ”‚ -โ”‚ (Sarama, โ”‚ โ”‚ (Protocol โ”‚ โ”‚ (Message โ”‚ -โ”‚ kafka-go) โ”‚ โ”‚ Handler) โ”‚ โ”‚ Persistence) โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ - โ–ผ โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ SeaweedFS Filer โ”‚ โ”‚ SeaweedFS Masterโ”‚ - โ”‚ (Offset Storage)โ”‚ โ”‚ (Coordination) โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ - โ–ผ โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ SeaweedFS Volumes โ”‚ - โ”‚ (Message Storage) โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -This architecture ensures full integration testing of the entire Kafka โ†’ SeaweedFS message path. \ No newline at end of file diff --git a/test/kafka/cmd/setup/main.go b/test/kafka/cmd/setup/main.go deleted file mode 100644 index bfb190748..000000000 --- a/test/kafka/cmd/setup/main.go +++ /dev/null @@ -1,172 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "log" - "net" - "net/http" - "os" - "time" -) - -// Schema represents a schema registry schema -type Schema struct { - Subject string `json:"subject"` - Version int `json:"version"` - Schema string `json:"schema"` -} - -// SchemaResponse represents the response from schema registry -type SchemaResponse struct { - ID int `json:"id"` -} - -func main() { - log.Println("Setting up Kafka integration test environment...") - - kafkaBootstrap := getEnv("KAFKA_BOOTSTRAP_SERVERS", "kafka:29092") - schemaRegistryURL := getEnv("SCHEMA_REGISTRY_URL", "http://schema-registry:8081") - kafkaGatewayURL := getEnv("KAFKA_GATEWAY_URL", "kafka-gateway:9093") - - log.Printf("Kafka Bootstrap Servers: %s", kafkaBootstrap) - log.Printf("Schema Registry URL: %s", schemaRegistryURL) - log.Printf("Kafka Gateway URL: %s", kafkaGatewayURL) - - // Wait for services to be ready - waitForHTTPService("Schema Registry", schemaRegistryURL+"/subjects") - waitForTCPService("Kafka Gateway", kafkaGatewayURL) // TCP connectivity check for Kafka protocol - - // Register test schemas - if err := registerSchemas(schemaRegistryURL); err != nil { - log.Fatalf("Failed to register schemas: %v", err) - } - - log.Println("Test environment setup completed successfully!") -} - -func getEnv(key, defaultValue string) string { - if value := os.Getenv(key); value != "" { - return value - } - return defaultValue -} - -func waitForHTTPService(name, url string) { - log.Printf("Waiting for %s to be ready...", name) - for i := 0; i < 60; i++ { // Wait up to 60 seconds - resp, err := http.Get(url) - if err == nil && resp.StatusCode < 400 { - resp.Body.Close() - log.Printf("%s is ready", name) - return - } - if resp != nil { - resp.Body.Close() - } - time.Sleep(1 * time.Second) - } - log.Fatalf("%s is not ready after 60 seconds", name) -} - -func waitForTCPService(name, address string) { - log.Printf("Waiting for %s to be ready...", name) - for i := 0; i < 60; i++ { // Wait up to 60 seconds - conn, err := net.DialTimeout("tcp", address, 2*time.Second) - if err == nil { - conn.Close() - log.Printf("%s is ready", name) - return - } - time.Sleep(1 * time.Second) - } - log.Fatalf("%s is not ready after 60 seconds", name) -} - -func registerSchemas(registryURL string) error { - schemas := []Schema{ - { - Subject: "user-value", - Schema: `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null} - ] - }`, - }, - { - Subject: "user-event-value", - Schema: `{ - "type": "record", - "name": "UserEvent", - "fields": [ - {"name": "userId", "type": "int"}, - {"name": "eventType", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "data", "type": ["null", "string"], "default": null} - ] - }`, - }, - { - Subject: "log-entry-value", - Schema: `{ - "type": "record", - "name": "LogEntry", - "fields": [ - {"name": "level", "type": "string"}, - {"name": "message", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "service", "type": "string"}, - {"name": "metadata", "type": {"type": "map", "values": "string"}} - ] - }`, - }, - } - - for _, schema := range schemas { - if err := registerSchema(registryURL, schema); err != nil { - return fmt.Errorf("failed to register schema %s: %w", schema.Subject, err) - } - log.Printf("Registered schema: %s", schema.Subject) - } - - return nil -} - -func registerSchema(registryURL string, schema Schema) error { - url := fmt.Sprintf("%s/subjects/%s/versions", registryURL, schema.Subject) - - payload := map[string]interface{}{ - "schema": schema.Schema, - } - - jsonData, err := json.Marshal(payload) - if err != nil { - return err - } - - client := &http.Client{Timeout: 10 * time.Second} - resp, err := client.Post(url, "application/vnd.schemaregistry.v1+json", bytes.NewBuffer(jsonData)) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) - } - - var response SchemaResponse - if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - return err - } - - log.Printf("Schema %s registered with ID: %d", schema.Subject, response.ID) - return nil -} diff --git a/test/kafka/docker-compose.yml b/test/kafka/docker-compose.yml deleted file mode 100644 index 73e70cbe0..000000000 --- a/test/kafka/docker-compose.yml +++ /dev/null @@ -1,325 +0,0 @@ -x-seaweedfs-build: &seaweedfs-build - build: - context: ../.. - dockerfile: test/kafka/Dockerfile.seaweedfs - image: kafka-seaweedfs-dev - -services: - # Zookeeper for Kafka - zookeeper: - image: confluentinc/cp-zookeeper:7.4.0 - container_name: kafka-zookeeper - ports: - - "2181:2181" - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "2181"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - networks: - - kafka-test-net - - # Kafka Broker - kafka: - image: confluentinc/cp-kafka:7.4.0 - container_name: kafka-broker - ports: - - "9092:9092" - - "29092:29092" - depends_on: - zookeeper: - condition: service_healthy - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_NUM_PARTITIONS: 3 - KAFKA_DEFAULT_REPLICATION_FACTOR: 1 - healthcheck: - test: ["CMD", "kafka-broker-api-versions", "--bootstrap-server", "localhost:29092"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 30s - networks: - - kafka-test-net - - # Schema Registry - schema-registry: - image: confluentinc/cp-schema-registry:7.4.0 - container_name: kafka-schema-registry - ports: - - "8081:8081" - depends_on: - kafka: - condition: service_healthy - environment: - SCHEMA_REGISTRY_HOST_NAME: schema-registry - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: kafka:29092 - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas - SCHEMA_REGISTRY_DEBUG: "true" - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/subjects"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 20s - networks: - - kafka-test-net - - # SeaweedFS Master - seaweedfs-master: - <<: *seaweedfs-build - container_name: seaweedfs-master - ports: - - "9333:9333" - - "19333:19333" # gRPC port - command: - - master - - -ip=seaweedfs-master - - -port=9333 - - -port.grpc=19333 - - -volumeSizeLimitMB=1024 - - -defaultReplication=000 - volumes: - - seaweedfs-master-data:/data - healthcheck: - test: ["CMD-SHELL", "wget --quiet --tries=1 --spider http://seaweedfs-master:9333/cluster/status || curl -sf http://seaweedfs-master:9333/cluster/status"] - interval: 10s - timeout: 5s - retries: 10 - start_period: 20s - networks: - - kafka-test-net - - # SeaweedFS Volume Server - seaweedfs-volume: - <<: *seaweedfs-build - container_name: seaweedfs-volume - ports: - - "8080:8080" - - "18080:18080" # gRPC port - command: - - volume - - -mserver=seaweedfs-master:9333 - - -ip=seaweedfs-volume - - -port=8080 - - -port.grpc=18080 - - -publicUrl=seaweedfs-volume:8080 - - -preStopSeconds=1 - depends_on: - seaweedfs-master: - condition: service_healthy - volumes: - - seaweedfs-volume-data:/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://seaweedfs-volume:8080/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - networks: - - kafka-test-net - - # SeaweedFS Filer - seaweedfs-filer: - <<: *seaweedfs-build - container_name: seaweedfs-filer - ports: - - "8888:8888" - - "18888:18888" # gRPC port - command: - - filer - - -master=seaweedfs-master:9333 - - -ip=seaweedfs-filer - - -port=8888 - - -port.grpc=18888 - depends_on: - seaweedfs-master: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - volumes: - - seaweedfs-filer-data:/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://seaweedfs-filer:8888/"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 15s - networks: - - kafka-test-net - - # SeaweedFS MQ Broker - seaweedfs-mq-broker: - <<: *seaweedfs-build - container_name: seaweedfs-mq-broker - ports: - - "17777:17777" # MQ Broker port - - "18777:18777" # pprof profiling port - command: - - mq.broker - - -master=seaweedfs-master:9333 - - -ip=seaweedfs-mq-broker - - -port=17777 - - -port.pprof=18777 - depends_on: - seaweedfs-filer: - condition: service_healthy - volumes: - - seaweedfs-mq-data:/data - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "17777"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 20s - networks: - - kafka-test-net - - # SeaweedFS MQ Agent - seaweedfs-mq-agent: - <<: *seaweedfs-build - container_name: seaweedfs-mq-agent - ports: - - "16777:16777" # MQ Agent port - command: - - mq.agent - - -broker=seaweedfs-mq-broker:17777 - - -ip=0.0.0.0 - - -port=16777 - depends_on: - seaweedfs-mq-broker: - condition: service_healthy - volumes: - - seaweedfs-mq-data:/data - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "16777"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 25s - networks: - - kafka-test-net - - # Kafka Gateway (SeaweedFS with Kafka protocol) - kafka-gateway: - build: - context: ../.. # Build from project root - dockerfile: test/kafka/Dockerfile.kafka-gateway - container_name: kafka-gateway - ports: - - "9093:9093" # Kafka protocol port - - "10093:10093" # pprof profiling port - depends_on: - seaweedfs-mq-agent: - condition: service_healthy - schema-registry: - condition: service_healthy - environment: - - SEAWEEDFS_MASTERS=seaweedfs-master:9333 - - SEAWEEDFS_FILER_GROUP= - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - KAFKA_PORT=9093 - - PPROF_PORT=10093 - volumes: - - kafka-gateway-data:/data - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "9093"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 30s - networks: - - kafka-test-net - - # Test Data Setup Service - test-setup: - build: - context: ../.. - dockerfile: test/kafka/Dockerfile.test-setup - container_name: kafka-test-setup - depends_on: - kafka: - condition: service_healthy - schema-registry: - condition: service_healthy - kafka-gateway: - condition: service_healthy - environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - KAFKA_GATEWAY_URL=kafka-gateway:9093 - networks: - - kafka-test-net - restart: "no" # Run once to set up test data - profiles: - - setup # Only start when explicitly requested - - # Kafka Producer for Testing - kafka-producer: - image: confluentinc/cp-kafka:7.4.0 - container_name: kafka-producer - depends_on: - kafka: - condition: service_healthy - schema-registry: - condition: service_healthy - environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - networks: - - kafka-test-net - profiles: - - producer # Only start when explicitly requested - command: > - sh -c " - echo 'Creating test topics...'; - kafka-topics --create --topic test-topic --bootstrap-server kafka:29092 --partitions 3 --replication-factor 1 --if-not-exists; - kafka-topics --create --topic avro-topic --bootstrap-server kafka:29092 --partitions 3 --replication-factor 1 --if-not-exists; - kafka-topics --create --topic schema-test --bootstrap-server kafka:29092 --partitions 1 --replication-factor 1 --if-not-exists; - echo 'Topics created successfully'; - kafka-topics --list --bootstrap-server kafka:29092; - " - - # Kafka Consumer for Testing - kafka-consumer: - image: confluentinc/cp-kafka:7.4.0 - container_name: kafka-consumer - depends_on: - kafka: - condition: service_healthy - environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka:29092 - networks: - - kafka-test-net - profiles: - - consumer # Only start when explicitly requested - command: > - kafka-console-consumer - --bootstrap-server kafka:29092 - --topic test-topic - --from-beginning - --max-messages 10 - -volumes: - seaweedfs-master-data: - seaweedfs-volume-data: - seaweedfs-filer-data: - seaweedfs-mq-data: - kafka-gateway-data: - -networks: - kafka-test-net: - driver: bridge - name: kafka-integration-test diff --git a/test/kafka/e2e/comprehensive_test.go b/test/kafka/e2e/comprehensive_test.go deleted file mode 100644 index 739ccd3a3..000000000 --- a/test/kafka/e2e/comprehensive_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package e2e - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestComprehensiveE2E tests complete end-to-end workflows -// This test will use SMQ backend if SEAWEEDFS_MASTERS is available, otherwise mock -func TestComprehensiveE2E(t *testing.T) { - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQAvailable) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - - // Log which backend we're using - if gateway.IsSMQMode() { - t.Logf("Running comprehensive E2E tests with SMQ backend") - } else { - t.Logf("Running comprehensive E2E tests with mock backend") - } - - // Create topics for different test scenarios - topics := []string{ - testutil.GenerateUniqueTopicName("e2e-kafka-go"), - testutil.GenerateUniqueTopicName("e2e-sarama"), - testutil.GenerateUniqueTopicName("e2e-mixed"), - } - gateway.AddTestTopics(topics...) - - t.Run("KafkaGo_to_KafkaGo", func(t *testing.T) { - testKafkaGoToKafkaGo(t, addr, topics[0]) - }) - - t.Run("Sarama_to_Sarama", func(t *testing.T) { - testSaramaToSarama(t, addr, topics[1]) - }) - - t.Run("KafkaGo_to_Sarama", func(t *testing.T) { - testKafkaGoToSarama(t, addr, topics[2]) - }) - - t.Run("Sarama_to_KafkaGo", func(t *testing.T) { - testSaramaToKafkaGo(t, addr, topics[2]) - }) -} - -func testKafkaGoToKafkaGo(t *testing.T, addr, topic string) { - client := testutil.NewKafkaGoClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Generate test messages - messages := msgGen.GenerateKafkaGoMessages(2) - - // Produce with kafka-go - err := client.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "kafka-go produce failed") - - // Consume with kafka-go - consumed, err := client.ConsumeMessages(topic, len(messages)) - testutil.AssertNoError(t, err, "kafka-go consume failed") - - // Validate message content - err = testutil.ValidateKafkaGoMessageContent(messages, consumed) - testutil.AssertNoError(t, err, "Message content validation failed") - - t.Logf("kafka-go to kafka-go test PASSED") -} - -func testSaramaToSarama(t *testing.T, addr, topic string) { - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Generate test messages - messages := msgGen.GenerateStringMessages(2) - - // Produce with Sarama - err := client.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "Sarama produce failed") - - // Consume with Sarama - consumed, err := client.ConsumeMessages(topic, 0, len(messages)) - testutil.AssertNoError(t, err, "Sarama consume failed") - - // Validate message content - err = testutil.ValidateMessageContent(messages, consumed) - testutil.AssertNoError(t, err, "Message content validation failed") - - t.Logf("Sarama to Sarama test PASSED") -} - -func testKafkaGoToSarama(t *testing.T, addr, topic string) { - kafkaGoClient := testutil.NewKafkaGoClient(t, addr) - saramaClient := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Produce with kafka-go - messages := msgGen.GenerateKafkaGoMessages(2) - err := kafkaGoClient.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "kafka-go produce failed") - - // Consume with Sarama - consumed, err := saramaClient.ConsumeMessages(topic, 0, len(messages)) - testutil.AssertNoError(t, err, "Sarama consume failed") - - // Validate that we got the expected number of messages - testutil.AssertEqual(t, len(messages), len(consumed), "Message count mismatch") - - t.Logf("kafka-go to Sarama test PASSED") -} - -func testSaramaToKafkaGo(t *testing.T, addr, topic string) { - kafkaGoClient := testutil.NewKafkaGoClient(t, addr) - saramaClient := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Produce with Sarama - messages := msgGen.GenerateStringMessages(2) - err := saramaClient.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "Sarama produce failed") - - // Consume with kafka-go - consumed, err := kafkaGoClient.ConsumeMessages(topic, len(messages)) - testutil.AssertNoError(t, err, "kafka-go consume failed") - - // Validate that we got the expected number of messages - testutil.AssertEqual(t, len(messages), len(consumed), "Message count mismatch") - - t.Logf("Sarama to kafka-go test PASSED") -} diff --git a/test/kafka/e2e/offset_management_test.go b/test/kafka/e2e/offset_management_test.go deleted file mode 100644 index 11bbdc5ea..000000000 --- a/test/kafka/e2e/offset_management_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package e2e - -import ( - "os" - "testing" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestOffsetManagement tests end-to-end offset management scenarios -// This test will use SMQ backend if SEAWEEDFS_MASTERS is available, otherwise mock -func TestOffsetManagement(t *testing.T) { - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQAvailable) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - - // If schema registry is configured, ensure gateway is in schema mode and log - if v := os.Getenv("SCHEMA_REGISTRY_URL"); v != "" { - t.Logf("Schema Registry detected at %s - running offset tests in schematized mode", v) - } - - // Log which backend we're using - if gateway.IsSMQMode() { - t.Logf("Running offset management tests with SMQ backend - offsets will be persisted") - } else { - t.Logf("Running offset management tests with mock backend - offsets are in-memory only") - } - - topic := testutil.GenerateUniqueTopicName("offset-management") - groupID := testutil.GenerateUniqueGroupID("offset-test-group") - - gateway.AddTestTopic(topic) - - t.Run("BasicOffsetCommitFetch", func(t *testing.T) { - testBasicOffsetCommitFetch(t, addr, topic, groupID) - }) - - t.Run("ConsumerGroupResumption", func(t *testing.T) { - testConsumerGroupResumption(t, addr, topic, groupID+"2") - }) -} - -func testBasicOffsetCommitFetch(t *testing.T, addr, topic, groupID string) { - client := testutil.NewKafkaGoClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Produce test messages - if url := os.Getenv("SCHEMA_REGISTRY_URL"); url != "" { - if id, err := testutil.EnsureValueSchema(t, url, topic); err == nil { - t.Logf("Ensured value schema id=%d for subject %s-value", id, topic) - } else { - t.Logf("Schema registration failed (non-fatal for test): %v", err) - } - } - messages := msgGen.GenerateKafkaGoMessages(5) - err := client.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "Failed to produce offset test messages") - - // Phase 1: Consume first 3 messages and commit offsets - t.Logf("=== Phase 1: Consuming first 3 messages ===") - consumed1, err := client.ConsumeWithGroup(topic, groupID, 3) - testutil.AssertNoError(t, err, "Failed to consume first batch") - testutil.AssertEqual(t, 3, len(consumed1), "Should consume exactly 3 messages") - - // Phase 2: Create new consumer with same group ID - should resume from committed offset - t.Logf("=== Phase 2: Resuming from committed offset ===") - consumed2, err := client.ConsumeWithGroup(topic, groupID, 2) - testutil.AssertNoError(t, err, "Failed to consume remaining messages") - testutil.AssertEqual(t, 2, len(consumed2), "Should consume remaining 2 messages") - - // Verify that we got all messages without duplicates - totalConsumed := len(consumed1) + len(consumed2) - testutil.AssertEqual(t, len(messages), totalConsumed, "Should consume all messages exactly once") - - t.Logf("SUCCESS: Offset management test completed - consumed %d + %d messages", len(consumed1), len(consumed2)) -} - -func testConsumerGroupResumption(t *testing.T, addr, topic, groupID string) { - client := testutil.NewKafkaGoClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Produce messages - t.Logf("=== Phase 1: Producing 4 messages to topic %s ===", topic) - messages := msgGen.GenerateKafkaGoMessages(4) - err := client.ProduceMessages(topic, messages) - testutil.AssertNoError(t, err, "Failed to produce messages for resumption test") - t.Logf("Successfully produced %d messages", len(messages)) - - // Consume some messages - t.Logf("=== Phase 2: First consumer - consuming 2 messages with group %s ===", groupID) - consumed1, err := client.ConsumeWithGroup(topic, groupID, 2) - testutil.AssertNoError(t, err, "Failed to consume first batch") - t.Logf("First consumer consumed %d messages:", len(consumed1)) - for i, msg := range consumed1 { - t.Logf(" Message %d: offset=%d, partition=%d, value=%s", i, msg.Offset, msg.Partition, string(msg.Value)) - } - - // Simulate consumer restart by consuming remaining messages with same group ID - t.Logf("=== Phase 3: Second consumer (simulated restart) - consuming remaining messages with same group %s ===", groupID) - consumed2, err := client.ConsumeWithGroup(topic, groupID, 2) - testutil.AssertNoError(t, err, "Failed to consume after restart") - t.Logf("Second consumer consumed %d messages:", len(consumed2)) - for i, msg := range consumed2 { - t.Logf(" Message %d: offset=%d, partition=%d, value=%s", i, msg.Offset, msg.Partition, string(msg.Value)) - } - - // Verify total consumption - totalConsumed := len(consumed1) + len(consumed2) - t.Logf("=== Verification: Total consumed %d messages (expected %d) ===", totalConsumed, len(messages)) - - // Check for duplicates - offsetsSeen := make(map[int64]bool) - duplicateCount := 0 - for _, msg := range append(consumed1, consumed2...) { - if offsetsSeen[msg.Offset] { - t.Logf("WARNING: Duplicate offset detected: %d", msg.Offset) - duplicateCount++ - } - offsetsSeen[msg.Offset] = true - } - - if duplicateCount > 0 { - t.Logf("ERROR: Found %d duplicate messages", duplicateCount) - } - - testutil.AssertEqual(t, len(messages), totalConsumed, "Should consume all messages after restart") - - t.Logf("SUCCESS: Consumer group resumption test completed - no duplicates, all messages consumed exactly once") -} diff --git a/test/kafka/go.mod b/test/kafka/go.mod deleted file mode 100644 index 5bc5442db..000000000 --- a/test/kafka/go.mod +++ /dev/null @@ -1,258 +0,0 @@ -module github.com/seaweedfs/seaweedfs/test/kafka - -go 1.24.0 - -toolchain go1.24.7 - -require ( - github.com/IBM/sarama v1.46.0 - github.com/linkedin/goavro/v2 v2.14.0 - github.com/seaweedfs/seaweedfs v0.0.0-00010101000000-000000000000 - github.com/segmentio/kafka-go v0.4.49 - github.com/stretchr/testify v1.11.1 - google.golang.org/grpc v1.75.1 -) - -replace github.com/seaweedfs/seaweedfs => ../../ - -require ( - cloud.google.com/go/auth v0.16.5 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.8.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect - github.com/Files-com/files-sdk-go/v3 v3.2.218 // indirect - github.com/IBM/go-sdk-core/v5 v5.21.0 // indirect - github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect - github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect - github.com/ProtonMail/go-crypto v1.3.0 // indirect - github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect - github.com/ProtonMail/go-srp v0.0.7 // indirect - github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect - github.com/abbot/go-http-auth v0.4.0 // indirect - github.com/andybalholm/brotli v1.2.0 // indirect - github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.55.8 // indirect - github.com/aws/aws-sdk-go-v2 v1.39.2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.31.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect - github.com/aws/smithy-go v1.23.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bradenaw/juniper v0.15.3 // indirect - github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect - github.com/buengese/sgzip v0.1.1 // indirect - github.com/bufbuild/protocompile v0.14.1 // indirect - github.com/calebcase/tmpfile v1.0.3 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect - github.com/cloudflare/circl v1.6.1 // indirect - github.com/cloudinary/cloudinary-go/v2 v2.12.0 // indirect - github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect - github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect - github.com/cognusion/imaging v1.0.2 // indirect - github.com/colinmarc/hdfs/v2 v2.4.0 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/creasty/defaults v1.8.0 // indirect - github.com/cronokirby/saferith v0.33.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect - github.com/eapache/go-resiliency v1.7.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect - github.com/eapache/queue v1.1.0 // indirect - github.com/ebitengine/purego v0.9.0 // indirect - github.com/emersion/go-message v0.18.2 // indirect - github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/flynn/noise v1.1.0 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.9 // indirect - github.com/geoffgarside/ber v1.2.0 // indirect - github.com/go-chi/chi/v5 v5.2.2 // indirect - github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect - github.com/go-jose/go-jose/v4 v4.1.1 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/errors v0.22.2 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-playground/locales v0.14.1 // indirect - github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.27.0 // indirect - github.com/go-resty/resty/v2 v2.16.5 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gofrs/flock v0.12.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.2 // indirect - github.com/golang-jwt/jwt/v5 v5.3.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v1.0.0 // indirect - github.com/google/btree v1.1.3 // indirect - github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/gorilla/schema v1.4.1 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/henrybear327/Proton-API-Bridge v1.0.0 // indirect - github.com/henrybear327/go-proton-api v1.0.0 // indirect - github.com/jcmturner/aescts/v2 v2.0.0 // indirect - github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/goidentity/v6 v6.0.1 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect - github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/jhump/protoreflect v1.17.0 // indirect - github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect - github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect - github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect - github.com/karlseguin/ccache/v2 v2.0.8 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/klauspost/reedsolomon v1.12.5 // indirect - github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect - github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect - github.com/kr/fs v0.1.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lanrat/extsort v1.4.0 // indirect - github.com/leodido/go-urn v1.4.0 // indirect - github.com/lpar/date v1.0.0 // indirect - github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/ncw/swift/v2 v2.0.4 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/oracle/oci-go-sdk/v65 v65.98.0 // indirect - github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect - github.com/panjf2000/ants/v2 v2.11.3 // indirect - github.com/parquet-go/parquet-go v0.25.1 // indirect - github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect - github.com/peterh/liner v1.2.2 // indirect - github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/sftp v1.13.9 // indirect - github.com/pkg/xattr v0.4.12 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/prometheus/client_golang v1.23.2 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect - github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect - github.com/rclone/rclone v1.71.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect - github.com/rdleal/intervalst v1.5.0 // indirect - github.com/relvacode/iso8601 v1.6.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rfjakob/eme v1.1.2 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/samber/lo v1.51.0 // indirect - github.com/seaweedfs/goexif v1.0.3 // indirect - github.com/shirou/gopsutil/v4 v4.25.9 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/smarty/assertions v1.16.0 // indirect - github.com/sony/gobreaker v1.0.0 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/spf13/viper v1.21.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 // indirect - github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect - github.com/tklauser/go-sysconf v0.3.15 // indirect - github.com/tklauser/numcpus v0.10.0 // indirect - github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 // indirect - github.com/unknwon/goconfig v1.0.0 // indirect - github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/viant/ptrie v1.0.1 // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - github.com/zeebo/blake3 v0.2.4 // indirect - github.com/zeebo/errs v1.4.0 // indirect - github.com/zeebo/xxh3 v1.0.2 // indirect - go.etcd.io/bbolt v1.4.2 // indirect - go.mongodb.org/mongo-driver v1.17.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.43.0 // indirect - golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect - golang.org/x/image v0.32.0 // indirect - golang.org/x/net v0.45.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect - golang.org/x/time v0.12.0 // indirect - google.golang.org/api v0.247.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect - google.golang.org/grpc/security/advancedtls v1.0.0 // indirect - google.golang.org/protobuf v1.36.9 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/validator.v2 v2.0.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/mathutil v1.7.1 // indirect - moul.io/http2curl/v2 v2.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect - storj.io/common v0.0.0-20250808122759-804533d519c1 // indirect - storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect - storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect - storj.io/infectious v0.0.2 // indirect - storj.io/picobuf v0.0.4 // indirect - storj.io/uplink v1.13.1 // indirect -) diff --git a/test/kafka/go.sum b/test/kafka/go.sum deleted file mode 100644 index 9cfddc48d..000000000 --- a/test/kafka/go.sum +++ /dev/null @@ -1,1126 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= -cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= -cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 h1:l3SabZmNuXCMCbQUIeR4W6/N4j8SeH/lwX+a6leZhHo= -github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2/go.mod h1:k+mEZ4f1pVqZTRqtSDW2AhZ/3wT5qLpsUA75C/k7dtE= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= -github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= -github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Files-com/files-sdk-go/v3 v3.2.218 h1:tIvcbHXNY/bq+Sno6vajOJOxhe5XbU59Fa1ohOybK+s= -github.com/Files-com/files-sdk-go/v3 v3.2.218/go.mod h1:E0BaGQbcMUcql+AfubCR/iasWKBxX5UZPivnQGC2z0M= -github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk= -github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= -github.com/IBM/sarama v1.46.0 h1:+YTM1fNd6WKMchlnLKRUB5Z0qD4M8YbvwIIPLvJD53s= -github.com/IBM/sarama v1.46.0/go.mod h1:0lOcuQziJ1/mBGHkdp5uYrltqQuKQKM5O5FOWUQVVvo= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE= -github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I= -github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug= -github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo= -github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk= -github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo= -github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= -github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= -github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= -github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= -github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= -github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI= -github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk= -github.com/ProtonMail/gopenpgp/v2 v2.9.0 h1:ruLzBmwe4dR1hdnrsEJ/S7psSBmV15gFttFUPP/+/kE= -github.com/ProtonMail/gopenpgp/v2 v2.9.0/go.mod h1:IldDyh9Hv1ZCCYatTuuEt1XZJ0OPjxLpTarDfglih7s= -github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= -github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= -github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= -github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3/go.mod h1:XaUnRxSCYgL3kkgX0QHIV0D+znljPIDImxlv2kbGv0Y= -github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= -github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= -github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= -github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= -github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= -github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= -github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU= -github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= -github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= -github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco= -github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= -github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9 h1:by3nYZLR9l8bUH7kgaMU4dJgYFjyRdFEfORlDpPILB4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.9/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3 h1:P18I4ipbk+b/3dZNq5YYh+Hq6XC0vp5RWkLp1tJldDA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.3/go.mod h1:Rm3gw2Jov6e6kDuamDvyIlZJDMYk97VeCZ82wz/mVZ0= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= -github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= -github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo= -github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8= -github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= -github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= -github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU= -github.com/buengese/sgzip v0.1.1/go.mod h1:i5ZiXGF3fhV7gL1xaRRL1nDnmpNj0X061FQzOS8VMas= -github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= -github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= -github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= -github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= -github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo= -github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 h1:z0uK8UQqjMVYzvk4tiiu3obv2B44+XBsvgEJREQfnO8= -github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9/go.mod h1:Jl2neWsQaDanWORdqZ4emBl50J4/aRBBS4FyyG9/PFo= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= -github.com/cloudinary/cloudinary-go/v2 v2.12.0 h1:uveBJeNpJztKDwFW/B+Wuklq584hQmQXlo+hGTSOGZ8= -github.com/cloudinary/cloudinary-go/v2 v2.12.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo= -github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg= -github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA= -github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs= -github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc/go.mod h1:uvR42Hb/t52HQd7x5/ZLzZEK8oihrFpgnodIJ1vte2E= -github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= -github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cognusion/imaging v1.0.2 h1:BQwBV8V8eF3+dwffp8Udl9xF1JKh5Z0z5JkJwAi98Mc= -github.com/cognusion/imaging v1.0.2/go.mod h1:mj7FvH7cT2dlFogQOSUQRtotBxJ4gFQ2ySMSmBm5dSk= -github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0= -github.com/colinmarc/hdfs/v2 v2.4.0/go.mod h1:0NAO+/3knbMx6+5pCv+Hcbaz4xn/Zzbn9+WIib2rKVI= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk= -github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= -github.com/cronokirby/saferith v0.33.0 h1:TgoQlfsD4LIwx71+ChfRcIpjkw+RPOapDEVxa+LhwLo= -github.com/cronokirby/saferith v0.33.0/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA= -github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= -github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU= -github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M= -github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= -github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= -github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= -github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= -github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k= -github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg= -github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA= -github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg= -github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= -github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= -github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= -github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64= -github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= -github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= -github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= -github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= -github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= -github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= -github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= -github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo= -github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= -github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= -github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= -github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg= -github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= -github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= -github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= -github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= -github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= -github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= -github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= -github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= -github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= -github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= -github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 h1:velgFPYr1X9TDwLIfkV7fWqsFlf7TeP11M/7kPd/dVI= -github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= -github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= -github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0= -github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts= -github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw= -github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= -github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= -github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= -github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= -github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= -github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= -github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= -github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= -github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= -github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= -github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= -github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs= -github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 h1:JcltaO1HXM5S2KYOYcKgAV7slU0xPy1OcvrVgn98sRQ= -github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk= -github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg= -github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= -github.com/karlseguin/ccache/v2 v2.0.8 h1:lT38cE//uyf6KcFok0rlgXtGFBWxkI6h/qg4tbFyDnA= -github.com/karlseguin/ccache/v2 v2.0.8/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= -github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= -github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= -github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= -github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/klauspost/reedsolomon v1.12.5 h1:4cJuyH926If33BeDgiZpI5OU0pE+wUHZvMSyNGqN73Y= -github.com/klauspost/reedsolomon v1.12.5/go.mod h1:LkXRjLYGM8K/iQfujYnaPeDmhZLqkrGUyG9p7zs5L68= -github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU= -github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988/go.mod h1:/agobYum3uo/8V6yPVnq+R82pyVGCeuWW5arT4Txn8A= -github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 h1:FHVoZMOVRA+6/y4yRlbiR3WvsrOcKBd/f64H7YiWR2U= -github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lanrat/extsort v1.4.0 h1:jysS/Tjnp7mBwJ6NG8SY+XYFi8HF3LujGbqY9jOWjco= -github.com/lanrat/extsort v1.4.0/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4= -github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= -github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/linkedin/goavro/v2 v2.14.0 h1:aNO/js65U+Mwq4yB5f1h01c3wiM458qtRad1DN0CMUI= -github.com/linkedin/goavro/v2 v2.14.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= -github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I= -github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/ncw/swift/v2 v2.0.4 h1:hHWVFxn5/YaTWAASmn4qyq2p6OyP/Hm3vMLzkjEqR7w= -github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/oracle/oci-go-sdk/v65 v65.98.0 h1:ZKsy97KezSiYSN1Fml4hcwjpO+wq01rjBkPqIiUejVc= -github.com/oracle/oci-go-sdk/v65 v65.98.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q= -github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= -github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= -github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= -github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= -github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= -github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= -github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= -github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw= -github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= -github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= -github.com/pkg/xattr v0.4.12 h1:rRTkSyFNTRElv6pkA3zpjHpQ90p/OdHQC1GmGh1aTjM= -github.com/pkg/xattr v0.4.12/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= -github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= -github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= -github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= -github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= -github.com/rclone/rclone v1.71.1 h1:cpODfWTRz5i/WAzXsyW85tzfIKNsd1aq8CE8lUB+0zg= -github.com/rclone/rclone v1.71.1/go.mod h1:NLyX57FrnZ9nVLTY5TRdMmGelrGKbIRYGcgRkNdqqlA= -github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= -github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rdleal/intervalst v1.5.0 h1:SEB9bCFz5IqD1yhfH1Wv8IBnY/JQxDplwkxHjT6hamU= -github.com/rdleal/intervalst v1.5.0/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ= -github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= -github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4= -github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= -github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI= -github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= -github.com/seaweedfs/goexif v1.0.3 h1:ve/OjI7dxPW8X9YQsv3JuVMaxEyF9Rvfd04ouL+Bz30= -github.com/seaweedfs/goexif v1.0.3/go.mod h1:Oni780Z236sXpIQzk1XoJlTwqrJ02smEin9zQeff7Fk= -github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk= -github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU= -github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= -github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/snabb/httpreaderat v1.0.1 h1:whlb+vuZmyjqVop8x1EKOg05l2NE4z9lsMMXjmSUCnY= -github.com/snabb/httpreaderat v1.0.1/go.mod h1:lpbGrKDWF37yvRbtRvQsbesS6Ty5c83t8ztannPoMsA= -github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= -github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spacemonkeygo/monkit/v3 v3.0.24 h1:cKixJ+evHnfJhWNyIZjBy5hoW8LTWmrJXPo18tzLNrk= -github.com/spacemonkeygo/monkit/v3 v3.0.24/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= -github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc= -github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY= -github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= -github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= -github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= -github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= -github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43 h1:QEePdg0ty2r0t1+qwfZmQ4OOl/MB2UXIeJSpIZv56lg= -github.com/tylertreat/BoomFilters v0.0.0-20210315201527-1a82519a3e43/go.mod h1:OYRfF6eb5wY9VRFkXJH8FFBi3plw2v+giaIu7P054pM= -github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= -github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= -github.com/unknwon/goconfig v1.0.0 h1:rS7O+CmUdli1T+oDm7fYj1MwqNWtEJfNj+FqcUHML8U= -github.com/unknwon/goconfig v1.0.0/go.mod h1:qu2ZQ/wcC/if2u32263HTVC39PeOQRSmidQk3DuDFQ8= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/viant/assertly v0.9.0 h1:uB3jO+qmWQcrSCHQRxA2kk88eXAdaklUUDxxCU5wBHQ= -github.com/viant/assertly v0.9.0/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/ptrie v1.0.1 h1:3fFC8XqCSchf11sCSS5sbb8eGDNEP2g2Hj96lNdHlZY= -github.com/viant/ptrie v1.0.1/go.mod h1:Y+mwwNCIUgFrCZcrG4/QChfi4ubvnNBsyrENBIgigu0= -github.com/viant/toolbox v0.34.5 h1:szWNPiGHjo8Dd4v2a59saEhG31DRL2Xf3aJ0ZtTSuqc= -github.com/viant/toolbox v0.34.5/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= -github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= -github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= -github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8= -github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A= -github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= -github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= -github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= -github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= -go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= -go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= -golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= -golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= -google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 h1:Nt6z9UHqSlIdIGJdz6KhTIs2VRx/iOsA5iE8bmQNcxs= -google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s= -google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= -google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20 h1:MLBCGN1O7GzIx+cBiwfYPwtmZ41U3Mn/cotLJciaArI= -google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= -google.golang.org/grpc/security/advancedtls v1.0.0 h1:/KQ7VP/1bs53/aopk9QhuPyFAp9Dm9Ejix3lzYkCrDA= -google.golang.org/grpc/security/advancedtls v1.0.0/go.mod h1:o+s4go+e1PJ2AjuQMY5hU82W7lDlefjJA6FqEHRVHWk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= -gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= -modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= -moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= -moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -storj.io/common v0.0.0-20250808122759-804533d519c1 h1:z7ZjU+TlPZ2Lq2S12hT6+Fr7jFsBxPMrPBH4zZpZuUA= -storj.io/common v0.0.0-20250808122759-804533d519c1/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY= -storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro= -storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg= -storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 h1:5MZ0CyMbG6Pi0rRzUWVG6dvpXjbBYEX2oyXuj+tT+sk= -storj.io/eventkit v0.0.0-20250410172343-61f26d3de156/go.mod h1:CpnM6kfZV58dcq3lpbo/IQ4/KoutarnTSHY0GYVwnYw= -storj.io/infectious v0.0.2 h1:rGIdDC/6gNYAStsxsZU79D/MqFjNyJc1tsyyj9sTl7Q= -storj.io/infectious v0.0.2/go.mod h1:QEjKKww28Sjl1x8iDsjBpOM4r1Yp8RsowNcItsZJ1Vs= -storj.io/picobuf v0.0.4 h1:qswHDla+YZ2TovGtMnU4astjvrADSIz84FXRn0qgP6o= -storj.io/picobuf v0.0.4/go.mod h1:hSMxmZc58MS/2qSLy1I0idovlO7+6K47wIGUyRZa6mg= -storj.io/uplink v1.13.1 h1:C8RdW/upALoCyuF16Lod9XGCXEdbJAS+ABQy9JO/0pA= -storj.io/uplink v1.13.1/go.mod h1:x0MQr4UfFsQBwgVWZAtEsLpuwAn6dg7G0Mpne1r516E= diff --git a/test/kafka/integration/client_compatibility_test.go b/test/kafka/integration/client_compatibility_test.go deleted file mode 100644 index e106d26d5..000000000 --- a/test/kafka/integration/client_compatibility_test.go +++ /dev/null @@ -1,549 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/IBM/sarama" - "github.com/segmentio/kafka-go" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestClientCompatibility tests compatibility with different Kafka client libraries and versions -// This test will use SMQ backend if SEAWEEDFS_MASTERS is available, otherwise mock -func TestClientCompatibility(t *testing.T) { - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQAvailable) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - time.Sleep(200 * time.Millisecond) // Allow gateway to be ready - - // Log which backend we're using - if gateway.IsSMQMode() { - t.Logf("Running client compatibility tests with SMQ backend") - } else { - t.Logf("Running client compatibility tests with mock backend") - } - - t.Run("SaramaVersionCompatibility", func(t *testing.T) { - testSaramaVersionCompatibility(t, addr) - }) - - t.Run("KafkaGoVersionCompatibility", func(t *testing.T) { - testKafkaGoVersionCompatibility(t, addr) - }) - - t.Run("APIVersionNegotiation", func(t *testing.T) { - testAPIVersionNegotiation(t, addr) - }) - - t.Run("ProducerConsumerCompatibility", func(t *testing.T) { - testProducerConsumerCompatibility(t, addr) - }) - - t.Run("ConsumerGroupCompatibility", func(t *testing.T) { - testConsumerGroupCompatibility(t, addr) - }) - - t.Run("AdminClientCompatibility", func(t *testing.T) { - testAdminClientCompatibility(t, addr) - }) -} - -func testSaramaVersionCompatibility(t *testing.T, addr string) { - versions := []sarama.KafkaVersion{ - sarama.V2_6_0_0, - sarama.V2_8_0_0, - sarama.V3_0_0_0, - sarama.V3_4_0_0, - } - - for _, version := range versions { - t.Run(fmt.Sprintf("Sarama_%s", version.String()), func(t *testing.T) { - config := sarama.NewConfig() - config.Version = version - config.Producer.Return.Successes = true - config.Consumer.Return.Errors = true - - client, err := sarama.NewClient([]string{addr}, config) - if err != nil { - t.Fatalf("Failed to create Sarama client for version %s: %v", version, err) - } - defer client.Close() - - // Test basic operations - topicName := testutil.GenerateUniqueTopicName(fmt.Sprintf("sarama-%s", version.String())) - - // Test topic creation via admin client - admin, err := sarama.NewClusterAdminFromClient(client) - if err != nil { - t.Fatalf("Failed to create admin client: %v", err) - } - defer admin.Close() - - topicDetail := &sarama.TopicDetail{ - NumPartitions: 1, - ReplicationFactor: 1, - } - - err = admin.CreateTopic(topicName, topicDetail, false) - if err != nil { - t.Logf("Topic creation failed (may already exist): %v", err) - } - - // Test produce - producer, err := sarama.NewSyncProducerFromClient(client) - if err != nil { - t.Fatalf("Failed to create producer: %v", err) - } - defer producer.Close() - - message := &sarama.ProducerMessage{ - Topic: topicName, - Value: sarama.StringEncoder(fmt.Sprintf("test-message-%s", version.String())), - } - - partition, offset, err := producer.SendMessage(message) - if err != nil { - t.Fatalf("Failed to send message: %v", err) - } - - t.Logf("Sarama %s: Message sent to partition %d at offset %d", version, partition, offset) - - // Test consume - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - t.Fatalf("Failed to create consumer: %v", err) - } - defer consumer.Close() - - partitionConsumer, err := consumer.ConsumePartition(topicName, 0, sarama.OffsetOldest) - if err != nil { - t.Fatalf("Failed to create partition consumer: %v", err) - } - defer partitionConsumer.Close() - - select { - case msg := <-partitionConsumer.Messages(): - if string(msg.Value) != fmt.Sprintf("test-message-%s", version.String()) { - t.Errorf("Message content mismatch: expected %s, got %s", - fmt.Sprintf("test-message-%s", version.String()), string(msg.Value)) - } - t.Logf("Sarama %s: Successfully consumed message", version) - case err := <-partitionConsumer.Errors(): - t.Fatalf("Consumer error: %v", err) - case <-time.After(5 * time.Second): - t.Fatal("Timeout waiting for message") - } - }) - } -} - -func testKafkaGoVersionCompatibility(t *testing.T, addr string) { - // Test different kafka-go configurations - configs := []struct { - name string - readerConfig kafka.ReaderConfig - writerConfig kafka.WriterConfig - }{ - { - name: "kafka-go-default", - readerConfig: kafka.ReaderConfig{ - Brokers: []string{addr}, - Partition: 0, // Read from specific partition instead of using consumer group - }, - writerConfig: kafka.WriterConfig{ - Brokers: []string{addr}, - }, - }, - { - name: "kafka-go-with-batching", - readerConfig: kafka.ReaderConfig{ - Brokers: []string{addr}, - Partition: 0, // Read from specific partition instead of using consumer group - MinBytes: 1, - MaxBytes: 10e6, - }, - writerConfig: kafka.WriterConfig{ - Brokers: []string{addr}, - BatchSize: 100, - BatchTimeout: 10 * time.Millisecond, - }, - }, - } - - for _, config := range configs { - t.Run(config.name, func(t *testing.T) { - topicName := testutil.GenerateUniqueTopicName(config.name) - - // Create topic first using Sarama admin client (kafka-go doesn't have admin client) - saramaConfig := sarama.NewConfig() - saramaClient, err := sarama.NewClient([]string{addr}, saramaConfig) - if err != nil { - t.Fatalf("Failed to create Sarama client for topic creation: %v", err) - } - defer saramaClient.Close() - - admin, err := sarama.NewClusterAdminFromClient(saramaClient) - if err != nil { - t.Fatalf("Failed to create admin client: %v", err) - } - defer admin.Close() - - topicDetail := &sarama.TopicDetail{ - NumPartitions: 1, - ReplicationFactor: 1, - } - - err = admin.CreateTopic(topicName, topicDetail, false) - if err != nil { - t.Logf("Topic creation failed (may already exist): %v", err) - } - - // Wait for topic to be fully created - time.Sleep(200 * time.Millisecond) - - // Configure writer first and write message - config.writerConfig.Topic = topicName - writer := kafka.NewWriter(config.writerConfig) - - // Test produce - produceCtx, produceCancel := context.WithTimeout(context.Background(), 15*time.Second) - defer produceCancel() - - message := kafka.Message{ - Value: []byte(fmt.Sprintf("test-message-%s", config.name)), - } - - err = writer.WriteMessages(produceCtx, message) - if err != nil { - writer.Close() - t.Fatalf("Failed to write message: %v", err) - } - - // Close writer before reading to ensure flush - if err := writer.Close(); err != nil { - t.Logf("Warning: writer close error: %v", err) - } - - t.Logf("%s: Message written successfully", config.name) - - // Wait for message to be available - time.Sleep(100 * time.Millisecond) - - // Configure and create reader - config.readerConfig.Topic = topicName - config.readerConfig.StartOffset = kafka.FirstOffset - reader := kafka.NewReader(config.readerConfig) - - // Test consume with dedicated context - consumeCtx, consumeCancel := context.WithTimeout(context.Background(), 15*time.Second) - - msg, err := reader.ReadMessage(consumeCtx) - consumeCancel() - - if err != nil { - reader.Close() - t.Fatalf("Failed to read message: %v", err) - } - - if string(msg.Value) != fmt.Sprintf("test-message-%s", config.name) { - reader.Close() - t.Errorf("Message content mismatch: expected %s, got %s", - fmt.Sprintf("test-message-%s", config.name), string(msg.Value)) - } - - t.Logf("%s: Successfully consumed message", config.name) - - // Close reader and wait for cleanup - if err := reader.Close(); err != nil { - t.Logf("Warning: reader close error: %v", err) - } - - // Give time for background goroutines to clean up - time.Sleep(100 * time.Millisecond) - }) - } -} - -func testAPIVersionNegotiation(t *testing.T, addr string) { - // Test that clients can negotiate API versions properly - config := sarama.NewConfig() - config.Version = sarama.V2_8_0_0 - - client, err := sarama.NewClient([]string{addr}, config) - if err != nil { - t.Fatalf("Failed to create client: %v", err) - } - defer client.Close() - - // Test that the client can get API versions - coordinator, err := client.Coordinator("test-group") - if err != nil { - t.Logf("Coordinator lookup failed (expected for test): %v", err) - } else { - t.Logf("Successfully found coordinator: %s", coordinator.Addr()) - } - - // Test metadata request (should work with version negotiation) - topics, err := client.Topics() - if err != nil { - t.Fatalf("Failed to get topics: %v", err) - } - - t.Logf("API version negotiation successful, found %d topics", len(topics)) -} - -func testProducerConsumerCompatibility(t *testing.T, addr string) { - // Test cross-client compatibility: produce with one client, consume with another - topicName := testutil.GenerateUniqueTopicName("cross-client-test") - - // Create topic first - saramaConfig := sarama.NewConfig() - saramaConfig.Producer.Return.Successes = true - - saramaClient, err := sarama.NewClient([]string{addr}, saramaConfig) - if err != nil { - t.Fatalf("Failed to create Sarama client: %v", err) - } - defer saramaClient.Close() - - admin, err := sarama.NewClusterAdminFromClient(saramaClient) - if err != nil { - t.Fatalf("Failed to create admin client: %v", err) - } - defer admin.Close() - - topicDetail := &sarama.TopicDetail{ - NumPartitions: 1, - ReplicationFactor: 1, - } - - err = admin.CreateTopic(topicName, topicDetail, false) - if err != nil { - t.Logf("Topic creation failed (may already exist): %v", err) - } - - // Wait for topic to be fully created - time.Sleep(200 * time.Millisecond) - - producer, err := sarama.NewSyncProducerFromClient(saramaClient) - if err != nil { - t.Fatalf("Failed to create producer: %v", err) - } - defer producer.Close() - - message := &sarama.ProducerMessage{ - Topic: topicName, - Value: sarama.StringEncoder("cross-client-message"), - } - - _, _, err = producer.SendMessage(message) - if err != nil { - t.Fatalf("Failed to send message with Sarama: %v", err) - } - - t.Logf("Produced message with Sarama") - - // Wait for message to be available - time.Sleep(100 * time.Millisecond) - - // Consume with kafka-go (without consumer group to avoid offset commit issues) - reader := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{addr}, - Topic: topicName, - Partition: 0, - StartOffset: kafka.FirstOffset, - }) - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - msg, err := reader.ReadMessage(ctx) - cancel() - - // Close reader immediately after reading - if closeErr := reader.Close(); closeErr != nil { - t.Logf("Warning: reader close error: %v", closeErr) - } - - if err != nil { - t.Fatalf("Failed to read message with kafka-go: %v", err) - } - - if string(msg.Value) != "cross-client-message" { - t.Errorf("Message content mismatch: expected 'cross-client-message', got '%s'", string(msg.Value)) - } - - t.Logf("Cross-client compatibility test passed") -} - -func testConsumerGroupCompatibility(t *testing.T, addr string) { - // Test consumer group functionality with different clients - topicName := testutil.GenerateUniqueTopicName("consumer-group-test") - - // Create topic and produce messages - config := sarama.NewConfig() - config.Producer.Return.Successes = true - - client, err := sarama.NewClient([]string{addr}, config) - if err != nil { - t.Fatalf("Failed to create client: %v", err) - } - defer client.Close() - - // Create topic first - admin, err := sarama.NewClusterAdminFromClient(client) - if err != nil { - t.Fatalf("Failed to create admin client: %v", err) - } - defer admin.Close() - - topicDetail := &sarama.TopicDetail{ - NumPartitions: 1, - ReplicationFactor: 1, - } - - err = admin.CreateTopic(topicName, topicDetail, false) - if err != nil { - t.Logf("Topic creation failed (may already exist): %v", err) - } - - // Wait for topic to be fully created - time.Sleep(200 * time.Millisecond) - - producer, err := sarama.NewSyncProducerFromClient(client) - if err != nil { - t.Fatalf("Failed to create producer: %v", err) - } - defer producer.Close() - - // Produce test messages - for i := 0; i < 5; i++ { - message := &sarama.ProducerMessage{ - Topic: topicName, - Value: sarama.StringEncoder(fmt.Sprintf("group-message-%d", i)), - } - - _, _, err = producer.SendMessage(message) - if err != nil { - t.Fatalf("Failed to send message %d: %v", i, err) - } - } - - t.Logf("Produced 5 messages successfully") - - // Wait for messages to be available - time.Sleep(200 * time.Millisecond) - - // Test consumer group with Sarama (kafka-go consumer groups have offset commit issues) - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - t.Fatalf("Failed to create consumer: %v", err) - } - defer consumer.Close() - - partitionConsumer, err := consumer.ConsumePartition(topicName, 0, sarama.OffsetOldest) - if err != nil { - t.Fatalf("Failed to create partition consumer: %v", err) - } - defer partitionConsumer.Close() - - messagesReceived := 0 - timeout := time.After(30 * time.Second) - - for messagesReceived < 5 { - select { - case msg := <-partitionConsumer.Messages(): - t.Logf("Received message %d: %s", messagesReceived, string(msg.Value)) - messagesReceived++ - case err := <-partitionConsumer.Errors(): - t.Logf("Consumer error (continuing): %v", err) - case <-timeout: - t.Fatalf("Timeout waiting for messages, received %d out of 5", messagesReceived) - } - } - - t.Logf("Consumer group compatibility test passed: received %d messages", messagesReceived) -} - -func testAdminClientCompatibility(t *testing.T, addr string) { - // Test admin operations with different clients - config := sarama.NewConfig() - config.Version = sarama.V2_8_0_0 - config.Admin.Timeout = 30 * time.Second - - client, err := sarama.NewClient([]string{addr}, config) - if err != nil { - t.Fatalf("Failed to create client: %v", err) - } - defer client.Close() - - admin, err := sarama.NewClusterAdminFromClient(client) - if err != nil { - t.Fatalf("Failed to create admin client: %v", err) - } - defer admin.Close() - - // Test topic operations - topicName := testutil.GenerateUniqueTopicName("admin-test") - - topicDetail := &sarama.TopicDetail{ - NumPartitions: 2, - ReplicationFactor: 1, - } - - err = admin.CreateTopic(topicName, topicDetail, false) - if err != nil { - t.Logf("Topic creation failed (may already exist): %v", err) - } - - // Wait for topic to be fully created and propagated - time.Sleep(500 * time.Millisecond) - - // List topics with retry logic - var topics map[string]sarama.TopicDetail - maxRetries := 3 - for i := 0; i < maxRetries; i++ { - topics, err = admin.ListTopics() - if err == nil { - break - } - t.Logf("List topics attempt %d failed: %v, retrying...", i+1, err) - time.Sleep(time.Duration(500*(i+1)) * time.Millisecond) - } - - if err != nil { - t.Fatalf("Failed to list topics after %d attempts: %v", maxRetries, err) - } - - found := false - for topic := range topics { - if topic == topicName { - found = true - t.Logf("Found created topic: %s", topicName) - break - } - } - - if !found { - // Log all topics for debugging - allTopics := make([]string, 0, len(topics)) - for topic := range topics { - allTopics = append(allTopics, topic) - } - t.Logf("Available topics: %v", allTopics) - t.Errorf("Created topic %s not found in topic list", topicName) - } - - // Test describe consumer groups (if supported) - groups, err := admin.ListConsumerGroups() - if err != nil { - t.Logf("List consumer groups failed (may not be implemented): %v", err) - } else { - t.Logf("Found %d consumer groups", len(groups)) - } - - t.Logf("Admin client compatibility test passed") -} diff --git a/test/kafka/integration/consumer_groups_test.go b/test/kafka/integration/consumer_groups_test.go deleted file mode 100644 index 5407a2999..000000000 --- a/test/kafka/integration/consumer_groups_test.go +++ /dev/null @@ -1,351 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/IBM/sarama" - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestConsumerGroups tests consumer group functionality -// This test requires SeaweedFS masters to be running and will skip if not available -func TestConsumerGroups(t *testing.T) { - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQRequired) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - - t.Logf("Running consumer group tests with SMQ backend for offset persistence") - - t.Run("BasicFunctionality", func(t *testing.T) { - testConsumerGroupBasicFunctionality(t, addr) - }) - - t.Run("OffsetCommitAndFetch", func(t *testing.T) { - testConsumerGroupOffsetCommitAndFetch(t, addr) - }) - - t.Run("Rebalancing", func(t *testing.T) { - testConsumerGroupRebalancing(t, addr) - }) -} - -func testConsumerGroupBasicFunctionality(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("consumer-group-basic") - groupID := testutil.GenerateUniqueGroupID("basic-group") - - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Create topic and produce messages - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - messages := msgGen.GenerateStringMessages(9) // 3 messages per consumer - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - // Test with multiple consumers in the same group - numConsumers := 3 - handler := &ConsumerGroupHandler{ - messages: make(chan *sarama.ConsumerMessage, len(messages)), - ready: make(chan bool), - t: t, - } - - var wg sync.WaitGroup - consumerErrors := make(chan error, numConsumers) - - for i := 0; i < numConsumers; i++ { - wg.Add(1) - go func(consumerID int) { - defer wg.Done() - - consumerGroup, err := sarama.NewConsumerGroup([]string{addr}, groupID, client.GetConfig()) - if err != nil { - consumerErrors <- fmt.Errorf("consumer %d: failed to create consumer group: %v", consumerID, err) - return - } - defer consumerGroup.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - err = consumerGroup.Consume(ctx, []string{topicName}, handler) - if err != nil && err != context.DeadlineExceeded { - consumerErrors <- fmt.Errorf("consumer %d: consumption error: %v", consumerID, err) - return - } - }(i) - } - - // Wait for consumers to be ready - readyCount := 0 - for readyCount < numConsumers { - select { - case <-handler.ready: - readyCount++ - case <-time.After(5 * time.Second): - t.Fatalf("Timeout waiting for consumers to be ready") - } - } - - // Collect consumed messages - consumedMessages := make([]*sarama.ConsumerMessage, 0, len(messages)) - messageTimeout := time.After(10 * time.Second) - - for len(consumedMessages) < len(messages) { - select { - case msg := <-handler.messages: - consumedMessages = append(consumedMessages, msg) - case err := <-consumerErrors: - t.Fatalf("Consumer error: %v", err) - case <-messageTimeout: - t.Fatalf("Timeout waiting for messages. Got %d/%d messages", len(consumedMessages), len(messages)) - } - } - - wg.Wait() - - // Verify all messages were consumed exactly once - testutil.AssertEqual(t, len(messages), len(consumedMessages), "Message count mismatch") - - // Verify message uniqueness (no duplicates) - messageKeys := make(map[string]bool) - for _, msg := range consumedMessages { - key := string(msg.Key) - if messageKeys[key] { - t.Errorf("Duplicate message key: %s", key) - } - messageKeys[key] = true - } -} - -func testConsumerGroupOffsetCommitAndFetch(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("offset-commit-test") - groupID := testutil.GenerateUniqueGroupID("offset-group") - - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Create topic and produce messages - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - messages := msgGen.GenerateStringMessages(5) - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - // First consumer: consume first 3 messages and commit offsets - handler1 := &OffsetTestHandler{ - messages: make(chan *sarama.ConsumerMessage, len(messages)), - ready: make(chan bool), - stopAfter: 3, - t: t, - } - - consumerGroup1, err := sarama.NewConsumerGroup([]string{addr}, groupID, client.GetConfig()) - testutil.AssertNoError(t, err, "Failed to create first consumer group") - - ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel1() - - go func() { - err := consumerGroup1.Consume(ctx1, []string{topicName}, handler1) - if err != nil && err != context.DeadlineExceeded { - t.Logf("First consumer error: %v", err) - } - }() - - // Wait for first consumer to be ready and consume messages - <-handler1.ready - consumedCount := 0 - for consumedCount < 3 { - select { - case <-handler1.messages: - consumedCount++ - case <-time.After(5 * time.Second): - t.Fatalf("Timeout waiting for first consumer messages") - } - } - - consumerGroup1.Close() - cancel1() - time.Sleep(500 * time.Millisecond) // Wait for cleanup - - // Stop the first consumer after N messages - // Allow a brief moment for commit/heartbeat to flush - time.Sleep(1 * time.Second) - - // Start a second consumer in the same group to verify resumption from committed offset - handler2 := &OffsetTestHandler{ - messages: make(chan *sarama.ConsumerMessage, len(messages)), - ready: make(chan bool), - stopAfter: 2, - t: t, - } - consumerGroup2, err := sarama.NewConsumerGroup([]string{addr}, groupID, client.GetConfig()) - testutil.AssertNoError(t, err, "Failed to create second consumer group") - defer consumerGroup2.Close() - - ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel2() - - go func() { - err := consumerGroup2.Consume(ctx2, []string{topicName}, handler2) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Second consumer error: %v", err) - } - }() - - // Wait for second consumer and collect remaining messages - <-handler2.ready - secondConsumerMessages := make([]*sarama.ConsumerMessage, 0) - consumedCount = 0 - for consumedCount < 2 { - select { - case msg := <-handler2.messages: - consumedCount++ - secondConsumerMessages = append(secondConsumerMessages, msg) - case <-time.After(5 * time.Second): - t.Fatalf("Timeout waiting for second consumer messages. Got %d/2", consumedCount) - } - } - - // Verify second consumer started from correct offset - if len(secondConsumerMessages) > 0 { - firstMessageOffset := secondConsumerMessages[0].Offset - if firstMessageOffset < 3 { - t.Fatalf("Second consumer should start from offset >= 3: got %d", firstMessageOffset) - } - } -} - -func testConsumerGroupRebalancing(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("rebalancing-test") - groupID := testutil.GenerateUniqueGroupID("rebalance-group") - - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Create topic with multiple partitions for rebalancing - err := client.CreateTopic(topicName, 4, 1) // 4 partitions - testutil.AssertNoError(t, err, "Failed to create topic") - - // Produce messages to all partitions - messages := msgGen.GenerateStringMessages(12) // 3 messages per partition - for i, msg := range messages { - partition := int32(i % 4) - err = client.ProduceMessageToPartition(topicName, partition, msg) - testutil.AssertNoError(t, err, "Failed to produce message") - } - - t.Logf("Produced %d messages across 4 partitions", len(messages)) - - // Test scenario 1: Single consumer gets all partitions - t.Run("SingleConsumerAllPartitions", func(t *testing.T) { - testSingleConsumerAllPartitions(t, addr, topicName, groupID+"-single") - }) - - // Test scenario 2: Add second consumer, verify rebalancing - t.Run("TwoConsumersRebalance", func(t *testing.T) { - testTwoConsumersRebalance(t, addr, topicName, groupID+"-two") - }) - - // Test scenario 3: Remove consumer, verify rebalancing - t.Run("ConsumerLeaveRebalance", func(t *testing.T) { - testConsumerLeaveRebalance(t, addr, topicName, groupID+"-leave") - }) - - // Test scenario 4: Multiple consumers join simultaneously - t.Run("MultipleConsumersJoin", func(t *testing.T) { - testMultipleConsumersJoin(t, addr, topicName, groupID+"-multi") - }) -} - -// ConsumerGroupHandler implements sarama.ConsumerGroupHandler -type ConsumerGroupHandler struct { - messages chan *sarama.ConsumerMessage - ready chan bool - readyOnce sync.Once - t *testing.T -} - -func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { - h.t.Logf("Consumer group session setup") - h.readyOnce.Do(func() { - close(h.ready) - }) - return nil -} - -func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error { - h.t.Logf("Consumer group session cleanup") - return nil -} - -func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - for { - select { - case message := <-claim.Messages(): - if message == nil { - return nil - } - h.messages <- message - session.MarkMessage(message, "") - case <-session.Context().Done(): - return nil - } - } -} - -// OffsetTestHandler implements sarama.ConsumerGroupHandler for offset testing -type OffsetTestHandler struct { - messages chan *sarama.ConsumerMessage - ready chan bool - readyOnce sync.Once - stopAfter int - consumed int - t *testing.T -} - -func (h *OffsetTestHandler) Setup(sarama.ConsumerGroupSession) error { - h.t.Logf("Offset test consumer setup") - h.readyOnce.Do(func() { - close(h.ready) - }) - return nil -} - -func (h *OffsetTestHandler) Cleanup(sarama.ConsumerGroupSession) error { - h.t.Logf("Offset test consumer cleanup") - return nil -} - -func (h *OffsetTestHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - for { - select { - case message := <-claim.Messages(): - if message == nil { - return nil - } - h.consumed++ - h.messages <- message - session.MarkMessage(message, "") - - // Stop after consuming the specified number of messages - if h.consumed >= h.stopAfter { - h.t.Logf("Stopping consumer after %d messages", h.consumed) - // Ensure commits are flushed before exiting the claim - session.Commit() - return nil - } - case <-session.Context().Done(): - return nil - } - } -} diff --git a/test/kafka/integration/docker_test.go b/test/kafka/integration/docker_test.go deleted file mode 100644 index 333ec40c5..000000000 --- a/test/kafka/integration/docker_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package integration - -import ( - "encoding/json" - "io" - "net/http" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestDockerIntegration tests the complete Kafka integration using Docker Compose -func TestDockerIntegration(t *testing.T) { - env := testutil.NewDockerEnvironment(t) - env.SkipIfNotAvailable(t) - - t.Run("KafkaConnectivity", func(t *testing.T) { - env.RequireKafka(t) - testDockerKafkaConnectivity(t, env.KafkaBootstrap) - }) - - t.Run("SchemaRegistryConnectivity", func(t *testing.T) { - env.RequireSchemaRegistry(t) - testDockerSchemaRegistryConnectivity(t, env.SchemaRegistry) - }) - - t.Run("KafkaGatewayConnectivity", func(t *testing.T) { - env.RequireGateway(t) - testDockerKafkaGatewayConnectivity(t, env.KafkaGateway) - }) - - t.Run("SaramaProduceConsume", func(t *testing.T) { - env.RequireKafka(t) - testDockerSaramaProduceConsume(t, env.KafkaBootstrap) - }) - - t.Run("KafkaGoProduceConsume", func(t *testing.T) { - env.RequireKafka(t) - testDockerKafkaGoProduceConsume(t, env.KafkaBootstrap) - }) - - t.Run("GatewayProduceConsume", func(t *testing.T) { - env.RequireGateway(t) - testDockerGatewayProduceConsume(t, env.KafkaGateway) - }) - - t.Run("CrossClientCompatibility", func(t *testing.T) { - env.RequireKafka(t) - env.RequireGateway(t) - testDockerCrossClientCompatibility(t, env.KafkaBootstrap, env.KafkaGateway) - }) -} - -func testDockerKafkaConnectivity(t *testing.T, bootstrap string) { - client := testutil.NewSaramaClient(t, bootstrap) - - // Test basic connectivity by creating a topic - topicName := testutil.GenerateUniqueTopicName("connectivity-test") - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic for connectivity test") - - t.Logf("Kafka connectivity test passed") -} - -func testDockerSchemaRegistryConnectivity(t *testing.T, registryURL string) { - // Test basic HTTP connectivity to Schema Registry - client := &http.Client{Timeout: 10 * time.Second} - - // Test 1: Check if Schema Registry is responding - resp, err := client.Get(registryURL + "/subjects") - if err != nil { - t.Fatalf("Failed to connect to Schema Registry at %s: %v", registryURL, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - t.Fatalf("Schema Registry returned status %d, expected 200", resp.StatusCode) - } - - // Test 2: Verify response is valid JSON array - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Failed to read response body: %v", err) - } - - var subjects []string - if err := json.Unmarshal(body, &subjects); err != nil { - t.Fatalf("Schema Registry response is not valid JSON array: %v", err) - } - - t.Logf("Schema Registry is accessible with %d subjects", len(subjects)) - - // Test 3: Check config endpoint - configResp, err := client.Get(registryURL + "/config") - if err != nil { - t.Fatalf("Failed to get Schema Registry config: %v", err) - } - defer configResp.Body.Close() - - if configResp.StatusCode != http.StatusOK { - t.Fatalf("Schema Registry config endpoint returned status %d", configResp.StatusCode) - } - - configBody, err := io.ReadAll(configResp.Body) - if err != nil { - t.Fatalf("Failed to read config response: %v", err) - } - - var config map[string]interface{} - if err := json.Unmarshal(configBody, &config); err != nil { - t.Fatalf("Schema Registry config response is not valid JSON: %v", err) - } - - t.Logf("Schema Registry config: %v", config) - t.Logf("Schema Registry connectivity test passed") -} - -func testDockerKafkaGatewayConnectivity(t *testing.T, gatewayURL string) { - client := testutil.NewSaramaClient(t, gatewayURL) - - // Test basic connectivity to gateway - topicName := testutil.GenerateUniqueTopicName("gateway-connectivity-test") - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic via gateway") - - t.Logf("Kafka Gateway connectivity test passed") -} - -func testDockerSaramaProduceConsume(t *testing.T, bootstrap string) { - client := testutil.NewSaramaClient(t, bootstrap) - msgGen := testutil.NewMessageGenerator() - - topicName := testutil.GenerateUniqueTopicName("sarama-docker-test") - - // Create topic - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - // Produce and consume messages - messages := msgGen.GenerateStringMessages(3) - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - consumed, err := client.ConsumeMessages(topicName, 0, len(messages)) - testutil.AssertNoError(t, err, "Failed to consume messages") - - err = testutil.ValidateMessageContent(messages, consumed) - testutil.AssertNoError(t, err, "Message validation failed") - - t.Logf("Sarama produce/consume test passed") -} - -func testDockerKafkaGoProduceConsume(t *testing.T, bootstrap string) { - client := testutil.NewKafkaGoClient(t, bootstrap) - msgGen := testutil.NewMessageGenerator() - - topicName := testutil.GenerateUniqueTopicName("kafka-go-docker-test") - - // Create topic - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - // Produce and consume messages - messages := msgGen.GenerateKafkaGoMessages(3) - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - consumed, err := client.ConsumeMessages(topicName, len(messages)) - testutil.AssertNoError(t, err, "Failed to consume messages") - - err = testutil.ValidateKafkaGoMessageContent(messages, consumed) - testutil.AssertNoError(t, err, "Message validation failed") - - t.Logf("kafka-go produce/consume test passed") -} - -func testDockerGatewayProduceConsume(t *testing.T, gatewayURL string) { - client := testutil.NewSaramaClient(t, gatewayURL) - msgGen := testutil.NewMessageGenerator() - - topicName := testutil.GenerateUniqueTopicName("gateway-docker-test") - - // Produce and consume via gateway - messages := msgGen.GenerateStringMessages(3) - err := client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages via gateway") - - consumed, err := client.ConsumeMessages(topicName, 0, len(messages)) - testutil.AssertNoError(t, err, "Failed to consume messages via gateway") - - err = testutil.ValidateMessageContent(messages, consumed) - testutil.AssertNoError(t, err, "Message validation failed") - - t.Logf("Gateway produce/consume test passed") -} - -func testDockerCrossClientCompatibility(t *testing.T, kafkaBootstrap, gatewayURL string) { - kafkaClient := testutil.NewSaramaClient(t, kafkaBootstrap) - msgGen := testutil.NewMessageGenerator() - - topicName := testutil.GenerateUniqueTopicName("cross-client-docker-test") - - // Create topic on Kafka - err := kafkaClient.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic on Kafka") - - // Produce to Kafka - messages := msgGen.GenerateStringMessages(2) - err = kafkaClient.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce to Kafka") - - // This tests the integration between Kafka and the Gateway - // In a real scenario, messages would be replicated or bridged - t.Logf("Cross-client compatibility test passed") -} diff --git a/test/kafka/integration/rebalancing_test.go b/test/kafka/integration/rebalancing_test.go deleted file mode 100644 index f5ddeed56..000000000 --- a/test/kafka/integration/rebalancing_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/IBM/sarama" - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -func testSingleConsumerAllPartitions(t *testing.T, addr, topicName, groupID string) { - config := sarama.NewConfig() - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange - config.Consumer.Offsets.Initial = sarama.OffsetOldest - config.Consumer.Return.Errors = true - - client, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create client") - defer client.Close() - - consumerGroup, err := sarama.NewConsumerGroupFromClient(groupID, client) - testutil.AssertNoError(t, err, "Failed to create consumer group") - defer consumerGroup.Close() - - handler := &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // Start consumer - go func() { - err := consumerGroup.Consume(ctx, []string{topicName}, handler) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer error: %v", err) - } - }() - - // Wait for consumer to be ready - <-handler.ready - - // Wait for assignment - select { - case partitions := <-handler.assignments: - t.Logf("Single consumer assigned partitions: %v", partitions) - if len(partitions) != 4 { - t.Errorf("Expected single consumer to get all 4 partitions, got %d", len(partitions)) - } - case <-time.After(10 * time.Second): - t.Fatal("Timeout waiting for partition assignment") - } - - // Consume some messages to verify functionality - consumedCount := 0 - for consumedCount < 4 { // At least one from each partition - select { - case msg := <-handler.messages: - t.Logf("Consumed message from partition %d: %s", msg.Partition, string(msg.Value)) - consumedCount++ - case <-time.After(5 * time.Second): - t.Logf("Consumed %d messages so far", consumedCount) - break - } - } - - if consumedCount == 0 { - t.Error("No messages consumed by single consumer") - } -} - -func testTwoConsumersRebalance(t *testing.T, addr, topicName, groupID string) { - config := sarama.NewConfig() - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange - config.Consumer.Offsets.Initial = sarama.OffsetOldest - config.Consumer.Return.Errors = true - - // Start first consumer - client1, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create client1") - defer client1.Close() - - consumerGroup1, err := sarama.NewConsumerGroupFromClient(groupID, client1) - testutil.AssertNoError(t, err, "Failed to create consumer group 1") - defer consumerGroup1.Close() - - handler1 := &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - name: "Consumer1", - } - - ctx1, cancel1 := context.WithTimeout(context.Background(), 45*time.Second) - defer cancel1() - - go func() { - err := consumerGroup1.Consume(ctx1, []string{topicName}, handler1) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer1 error: %v", err) - } - }() - - // Wait for first consumer to be ready and get initial assignment - <-handler1.ready - select { - case partitions := <-handler1.assignments: - t.Logf("Consumer1 initial assignment: %v", partitions) - if len(partitions) != 4 { - t.Errorf("Expected Consumer1 to initially get all 4 partitions, got %d", len(partitions)) - } - case <-time.After(10 * time.Second): - t.Fatal("Timeout waiting for Consumer1 initial assignment") - } - - // Start second consumer - client2, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create client2") - defer client2.Close() - - consumerGroup2, err := sarama.NewConsumerGroupFromClient(groupID, client2) - testutil.AssertNoError(t, err, "Failed to create consumer group 2") - defer consumerGroup2.Close() - - handler2 := &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - name: "Consumer2", - } - - ctx2, cancel2 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel2() - - go func() { - err := consumerGroup2.Consume(ctx2, []string{topicName}, handler2) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer2 error: %v", err) - } - }() - - // Wait for second consumer to be ready - <-handler2.ready - - // Wait for rebalancing to occur - both consumers should get new assignments - var rebalancedAssignment1, rebalancedAssignment2 []int32 - - // Consumer1 should get a rebalance assignment - select { - case partitions := <-handler1.assignments: - rebalancedAssignment1 = partitions - t.Logf("Consumer1 rebalanced assignment: %v", partitions) - case <-time.After(15 * time.Second): - t.Error("Timeout waiting for Consumer1 rebalance assignment") - } - - // Consumer2 should get its assignment - select { - case partitions := <-handler2.assignments: - rebalancedAssignment2 = partitions - t.Logf("Consumer2 assignment: %v", partitions) - case <-time.After(15 * time.Second): - t.Error("Timeout waiting for Consumer2 assignment") - } - - // Verify rebalancing occurred correctly - totalPartitions := len(rebalancedAssignment1) + len(rebalancedAssignment2) - if totalPartitions != 4 { - t.Errorf("Expected total of 4 partitions assigned, got %d", totalPartitions) - } - - // Each consumer should have at least 1 partition, and no more than 3 - if len(rebalancedAssignment1) == 0 || len(rebalancedAssignment1) > 3 { - t.Errorf("Consumer1 should have 1-3 partitions, got %d", len(rebalancedAssignment1)) - } - if len(rebalancedAssignment2) == 0 || len(rebalancedAssignment2) > 3 { - t.Errorf("Consumer2 should have 1-3 partitions, got %d", len(rebalancedAssignment2)) - } - - // Verify no partition overlap - partitionSet := make(map[int32]bool) - for _, p := range rebalancedAssignment1 { - if partitionSet[p] { - t.Errorf("Partition %d assigned to multiple consumers", p) - } - partitionSet[p] = true - } - for _, p := range rebalancedAssignment2 { - if partitionSet[p] { - t.Errorf("Partition %d assigned to multiple consumers", p) - } - partitionSet[p] = true - } - - t.Logf("Rebalancing test completed successfully") -} - -func testConsumerLeaveRebalance(t *testing.T, addr, topicName, groupID string) { - config := sarama.NewConfig() - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange - config.Consumer.Offsets.Initial = sarama.OffsetOldest - config.Consumer.Return.Errors = true - - // Start two consumers - client1, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create client1") - defer client1.Close() - - client2, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create client2") - defer client2.Close() - - consumerGroup1, err := sarama.NewConsumerGroupFromClient(groupID, client1) - testutil.AssertNoError(t, err, "Failed to create consumer group 1") - defer consumerGroup1.Close() - - consumerGroup2, err := sarama.NewConsumerGroupFromClient(groupID, client2) - testutil.AssertNoError(t, err, "Failed to create consumer group 2") - - handler1 := &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - name: "Consumer1", - } - - handler2 := &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - name: "Consumer2", - } - - ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel1() - - ctx2, cancel2 := context.WithTimeout(context.Background(), 30*time.Second) - - // Start both consumers - go func() { - err := consumerGroup1.Consume(ctx1, []string{topicName}, handler1) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer1 error: %v", err) - } - }() - - go func() { - err := consumerGroup2.Consume(ctx2, []string{topicName}, handler2) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer2 error: %v", err) - } - }() - - // Wait for both consumers to be ready - <-handler1.ready - <-handler2.ready - - // Wait for initial assignments - <-handler1.assignments - <-handler2.assignments - - t.Logf("Both consumers started, now stopping Consumer2") - - // Stop second consumer (simulate leave) - cancel2() - consumerGroup2.Close() - - // Wait for Consumer1 to get rebalanced assignment (should get all partitions) - select { - case partitions := <-handler1.assignments: - t.Logf("Consumer1 rebalanced assignment after Consumer2 left: %v", partitions) - if len(partitions) != 4 { - t.Errorf("Expected Consumer1 to get all 4 partitions after Consumer2 left, got %d", len(partitions)) - } - case <-time.After(20 * time.Second): - t.Error("Timeout waiting for Consumer1 rebalance after Consumer2 left") - } - - t.Logf("Consumer leave rebalancing test completed successfully") -} - -func testMultipleConsumersJoin(t *testing.T, addr, topicName, groupID string) { - config := sarama.NewConfig() - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange - config.Consumer.Offsets.Initial = sarama.OffsetOldest - config.Consumer.Return.Errors = true - - numConsumers := 4 - consumers := make([]sarama.ConsumerGroup, numConsumers) - clients := make([]sarama.Client, numConsumers) - handlers := make([]*RebalanceTestHandler, numConsumers) - contexts := make([]context.Context, numConsumers) - cancels := make([]context.CancelFunc, numConsumers) - - // Start all consumers simultaneously - for i := 0; i < numConsumers; i++ { - client, err := sarama.NewClient([]string{addr}, config) - testutil.AssertNoError(t, err, fmt.Sprintf("Failed to create client%d", i)) - clients[i] = client - - consumerGroup, err := sarama.NewConsumerGroupFromClient(groupID, client) - testutil.AssertNoError(t, err, fmt.Sprintf("Failed to create consumer group %d", i)) - consumers[i] = consumerGroup - - handlers[i] = &RebalanceTestHandler{ - messages: make(chan *sarama.ConsumerMessage, 20), - ready: make(chan bool), - assignments: make(chan []int32, 5), - t: t, - name: fmt.Sprintf("Consumer%d", i), - } - - contexts[i], cancels[i] = context.WithTimeout(context.Background(), 45*time.Second) - - go func(idx int) { - err := consumers[idx].Consume(contexts[idx], []string{topicName}, handlers[idx]) - if err != nil && err != context.DeadlineExceeded { - t.Logf("Consumer%d error: %v", idx, err) - } - }(i) - } - - // Cleanup - defer func() { - for i := 0; i < numConsumers; i++ { - cancels[i]() - consumers[i].Close() - clients[i].Close() - } - }() - - // Wait for all consumers to be ready - for i := 0; i < numConsumers; i++ { - select { - case <-handlers[i].ready: - t.Logf("Consumer%d ready", i) - case <-time.After(15 * time.Second): - t.Fatalf("Timeout waiting for Consumer%d to be ready", i) - } - } - - // Collect final assignments from all consumers - assignments := make([][]int32, numConsumers) - for i := 0; i < numConsumers; i++ { - select { - case partitions := <-handlers[i].assignments: - assignments[i] = partitions - t.Logf("Consumer%d final assignment: %v", i, partitions) - case <-time.After(20 * time.Second): - t.Errorf("Timeout waiting for Consumer%d assignment", i) - } - } - - // Verify all partitions are assigned exactly once - assignedPartitions := make(map[int32]int) - totalAssigned := 0 - for i, assignment := range assignments { - totalAssigned += len(assignment) - for _, partition := range assignment { - assignedPartitions[partition]++ - if assignedPartitions[partition] > 1 { - t.Errorf("Partition %d assigned to multiple consumers", partition) - } - } - - // Each consumer should get exactly 1 partition (4 partitions / 4 consumers) - if len(assignment) != 1 { - t.Errorf("Consumer%d should get exactly 1 partition, got %d", i, len(assignment)) - } - } - - if totalAssigned != 4 { - t.Errorf("Expected 4 total partitions assigned, got %d", totalAssigned) - } - - // Verify all partitions 0-3 are assigned - for i := int32(0); i < 4; i++ { - if assignedPartitions[i] != 1 { - t.Errorf("Partition %d assigned %d times, expected 1", i, assignedPartitions[i]) - } - } - - t.Logf("Multiple consumers join test completed successfully") -} - -// RebalanceTestHandler implements sarama.ConsumerGroupHandler with rebalancing awareness -type RebalanceTestHandler struct { - messages chan *sarama.ConsumerMessage - ready chan bool - assignments chan []int32 - readyOnce sync.Once - t *testing.T - name string -} - -func (h *RebalanceTestHandler) Setup(session sarama.ConsumerGroupSession) error { - h.t.Logf("%s: Consumer group session setup", h.name) - h.readyOnce.Do(func() { - close(h.ready) - }) - - // Send partition assignment - partitions := make([]int32, 0) - for topic, partitionList := range session.Claims() { - h.t.Logf("%s: Assigned topic %s with partitions %v", h.name, topic, partitionList) - for _, partition := range partitionList { - partitions = append(partitions, partition) - } - } - - select { - case h.assignments <- partitions: - default: - // Channel might be full, that's ok - } - - return nil -} - -func (h *RebalanceTestHandler) Cleanup(sarama.ConsumerGroupSession) error { - h.t.Logf("%s: Consumer group session cleanup", h.name) - return nil -} - -func (h *RebalanceTestHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - for { - select { - case message := <-claim.Messages(): - if message == nil { - return nil - } - h.t.Logf("%s: Received message from partition %d: %s", h.name, message.Partition, string(message.Value)) - select { - case h.messages <- message: - default: - // Channel full, drop message for test - } - session.MarkMessage(message, "") - case <-session.Context().Done(): - return nil - } - } -} diff --git a/test/kafka/integration/schema_end_to_end_test.go b/test/kafka/integration/schema_end_to_end_test.go deleted file mode 100644 index 414056dd0..000000000 --- a/test/kafka/integration/schema_end_to_end_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package integration - -import ( - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" -) - -// TestSchemaEndToEnd_AvroRoundTrip tests the complete Avro schema round-trip workflow -func TestSchemaEndToEnd_AvroRoundTrip(t *testing.T) { - // Create mock schema registry - server := createMockSchemaRegistryForE2E(t) - defer server.Close() - - // Create schema manager - config := schema.ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: schema.ValidationPermissive, - } - manager, err := schema.NewManager(config) - require.NoError(t, err) - - // Test data - avroSchema := getUserAvroSchemaForE2E() - testData := map[string]interface{}{ - "id": int32(12345), - "name": "Alice Johnson", - "email": map[string]interface{}{"string": "alice@example.com"}, // Avro union - "age": map[string]interface{}{"int": int32(28)}, // Avro union - "preferences": map[string]interface{}{ - "Preferences": map[string]interface{}{ // Avro union with record type - "notifications": true, - "theme": "dark", - }, - }, - } - - t.Run("SchemaManagerRoundTrip", func(t *testing.T) { - // Step 1: Create Confluent envelope (simulate producer) - codec, err := goavro.NewCodec(avroSchema) - require.NoError(t, err) - - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - - confluentMsg := schema.CreateConfluentEnvelope(schema.FormatAvro, 1, nil, avroBinary) - require.True(t, len(confluentMsg) > 0, "Confluent envelope should not be empty") - - t.Logf("Created Confluent envelope: %d bytes", len(confluentMsg)) - - // Step 2: Decode message using schema manager - decodedMsg, err := manager.DecodeMessage(confluentMsg) - require.NoError(t, err) - require.NotNil(t, decodedMsg.RecordValue, "RecordValue should not be nil") - - t.Logf("Decoded message with schema ID %d, format %v", decodedMsg.SchemaID, decodedMsg.SchemaFormat) - - // Step 3: Re-encode message using schema manager - reconstructedMsg, err := manager.EncodeMessage(decodedMsg.RecordValue, 1, schema.FormatAvro) - require.NoError(t, err) - require.True(t, len(reconstructedMsg) > 0, "Reconstructed message should not be empty") - - t.Logf("Re-encoded message: %d bytes", len(reconstructedMsg)) - - // Step 4: Verify the reconstructed message is a valid Confluent envelope - envelope, ok := schema.ParseConfluentEnvelope(reconstructedMsg) - require.True(t, ok, "Reconstructed message should be a valid Confluent envelope") - require.Equal(t, uint32(1), envelope.SchemaID, "Schema ID should match") - require.Equal(t, schema.FormatAvro, envelope.Format, "Schema format should be Avro") - - // Step 5: Decode and verify the content - decodedNative, _, err := codec.NativeFromBinary(envelope.Payload) - require.NoError(t, err) - - decodedMap, ok := decodedNative.(map[string]interface{}) - require.True(t, ok, "Decoded data should be a map") - - // Verify all fields - assert.Equal(t, int32(12345), decodedMap["id"]) - assert.Equal(t, "Alice Johnson", decodedMap["name"]) - - // Verify union fields - emailUnion, ok := decodedMap["email"].(map[string]interface{}) - require.True(t, ok, "Email should be a union") - assert.Equal(t, "alice@example.com", emailUnion["string"]) - - ageUnion, ok := decodedMap["age"].(map[string]interface{}) - require.True(t, ok, "Age should be a union") - assert.Equal(t, int32(28), ageUnion["int"]) - - preferencesUnion, ok := decodedMap["preferences"].(map[string]interface{}) - require.True(t, ok, "Preferences should be a union") - preferencesRecord, ok := preferencesUnion["Preferences"].(map[string]interface{}) - require.True(t, ok, "Preferences should contain a record") - assert.Equal(t, true, preferencesRecord["notifications"]) - assert.Equal(t, "dark", preferencesRecord["theme"]) - - t.Log("Successfully completed Avro schema round-trip test") - }) -} - -// TestSchemaEndToEnd_ProtobufRoundTrip tests the complete Protobuf schema round-trip workflow -func TestSchemaEndToEnd_ProtobufRoundTrip(t *testing.T) { - t.Run("ProtobufEnvelopeCreation", func(t *testing.T) { - // Create a simple Protobuf message (simulated) - // In a real scenario, this would be generated from a .proto file - protobufData := []byte{0x08, 0x96, 0x01, 0x12, 0x04, 0x74, 0x65, 0x73, 0x74} // id=150, name="test" - - // Create Confluent envelope with Protobuf format - confluentMsg := schema.CreateConfluentEnvelope(schema.FormatProtobuf, 2, []int{0}, protobufData) - require.True(t, len(confluentMsg) > 0, "Confluent envelope should not be empty") - - t.Logf("Created Protobuf Confluent envelope: %d bytes", len(confluentMsg)) - - // Verify Confluent envelope - envelope, ok := schema.ParseConfluentEnvelope(confluentMsg) - require.True(t, ok, "Message should be a valid Confluent envelope") - require.Equal(t, uint32(2), envelope.SchemaID, "Schema ID should match") - // Note: ParseConfluentEnvelope defaults to FormatAvro; format detection requires schema registry - require.Equal(t, schema.FormatAvro, envelope.Format, "Format defaults to Avro without schema registry lookup") - - // For Protobuf with indexes, we need to use the specialized parser - protobufEnvelope, ok := schema.ParseConfluentProtobufEnvelopeWithIndexCount(confluentMsg, 1) - require.True(t, ok, "Message should be a valid Protobuf envelope") - require.Equal(t, uint32(2), protobufEnvelope.SchemaID, "Schema ID should match") - require.Equal(t, schema.FormatProtobuf, protobufEnvelope.Format, "Schema format should be Protobuf") - require.Equal(t, []int{0}, protobufEnvelope.Indexes, "Indexes should match") - require.Equal(t, protobufData, protobufEnvelope.Payload, "Payload should match") - - t.Log("Successfully completed Protobuf envelope test") - }) -} - -// TestSchemaEndToEnd_JSONSchemaRoundTrip tests the complete JSON Schema round-trip workflow -func TestSchemaEndToEnd_JSONSchemaRoundTrip(t *testing.T) { - t.Run("JSONSchemaEnvelopeCreation", func(t *testing.T) { - // Create JSON data - jsonData := []byte(`{"id": 123, "name": "Bob Smith", "active": true}`) - - // Create Confluent envelope with JSON Schema format - confluentMsg := schema.CreateConfluentEnvelope(schema.FormatJSONSchema, 3, nil, jsonData) - require.True(t, len(confluentMsg) > 0, "Confluent envelope should not be empty") - - t.Logf("Created JSON Schema Confluent envelope: %d bytes", len(confluentMsg)) - - // Verify Confluent envelope - envelope, ok := schema.ParseConfluentEnvelope(confluentMsg) - require.True(t, ok, "Message should be a valid Confluent envelope") - require.Equal(t, uint32(3), envelope.SchemaID, "Schema ID should match") - // Note: ParseConfluentEnvelope defaults to FormatAvro; format detection requires schema registry - require.Equal(t, schema.FormatAvro, envelope.Format, "Format defaults to Avro without schema registry lookup") - - // Verify JSON content - assert.JSONEq(t, string(jsonData), string(envelope.Payload), "JSON payload should match") - - t.Log("Successfully completed JSON Schema envelope test") - }) -} - -// TestSchemaEndToEnd_CompressionAndBatching tests schema handling with compression and batching -func TestSchemaEndToEnd_CompressionAndBatching(t *testing.T) { - // Create mock schema registry - server := createMockSchemaRegistryForE2E(t) - defer server.Close() - - // Create schema manager - config := schema.ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: schema.ValidationPermissive, - } - manager, err := schema.NewManager(config) - require.NoError(t, err) - - t.Run("BatchedSchematizedMessages", func(t *testing.T) { - // Create multiple messages - avroSchema := getUserAvroSchemaForE2E() - codec, err := goavro.NewCodec(avroSchema) - require.NoError(t, err) - - messageCount := 5 - var confluentMessages [][]byte - - // Create multiple Confluent envelopes - for i := 0; i < messageCount; i++ { - testData := map[string]interface{}{ - "id": int32(1000 + i), - "name": fmt.Sprintf("User %d", i), - "email": map[string]interface{}{"string": fmt.Sprintf("user%d@example.com", i)}, - "age": map[string]interface{}{"int": int32(20 + i)}, - "preferences": map[string]interface{}{ - "Preferences": map[string]interface{}{ - "notifications": i%2 == 0, // Alternate true/false - "theme": "light", - }, - }, - } - - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - - confluentMsg := schema.CreateConfluentEnvelope(schema.FormatAvro, 1, nil, avroBinary) - confluentMessages = append(confluentMessages, confluentMsg) - } - - t.Logf("Created %d schematized messages", messageCount) - - // Test round-trip for each message - for i, confluentMsg := range confluentMessages { - // Decode message - decodedMsg, err := manager.DecodeMessage(confluentMsg) - require.NoError(t, err, "Message %d should decode", i) - - // Re-encode message - reconstructedMsg, err := manager.EncodeMessage(decodedMsg.RecordValue, 1, schema.FormatAvro) - require.NoError(t, err, "Message %d should re-encode", i) - - // Verify envelope - envelope, ok := schema.ParseConfluentEnvelope(reconstructedMsg) - require.True(t, ok, "Message %d should be a valid Confluent envelope", i) - require.Equal(t, uint32(1), envelope.SchemaID, "Message %d schema ID should match", i) - - // Decode and verify content - decodedNative, _, err := codec.NativeFromBinary(envelope.Payload) - require.NoError(t, err, "Message %d should decode successfully", i) - - decodedMap, ok := decodedNative.(map[string]interface{}) - require.True(t, ok, "Message %d should be a map", i) - - expectedID := int32(1000 + i) - assert.Equal(t, expectedID, decodedMap["id"], "Message %d ID should match", i) - assert.Equal(t, fmt.Sprintf("User %d", i), decodedMap["name"], "Message %d name should match", i) - } - - t.Log("Successfully verified batched schematized messages") - }) -} - -// Helper functions for creating mock schema registries - -func createMockSchemaRegistryForE2E(t *testing.T) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/schemas/ids/1": - response := map[string]interface{}{ - "schema": getUserAvroSchemaForE2E(), - "subject": "user-events-e2e-value", - "version": 1, - } - writeJSONResponse(w, response) - case "/subjects/user-events-e2e-value/versions/latest": - response := map[string]interface{}{ - "id": 1, - "schema": getUserAvroSchemaForE2E(), - "subject": "user-events-e2e-value", - "version": 1, - } - writeJSONResponse(w, response) - default: - w.WriteHeader(http.StatusNotFound) - } - })) -} - - -func getUserAvroSchemaForE2E() string { - return `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null}, - {"name": "age", "type": ["null", "int"], "default": null}, - {"name": "preferences", "type": ["null", { - "type": "record", - "name": "Preferences", - "fields": [ - {"name": "notifications", "type": "boolean", "default": true}, - {"name": "theme", "type": "string", "default": "light"} - ] - }], "default": null} - ] - }` -} - -func writeJSONResponse(w http.ResponseWriter, data interface{}) { - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(data); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} diff --git a/test/kafka/integration/schema_registry_test.go b/test/kafka/integration/schema_registry_test.go deleted file mode 100644 index 9f6d32849..000000000 --- a/test/kafka/integration/schema_registry_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package integration - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestSchemaRegistryEventualConsistency reproduces the issue where schemas -// are registered successfully but are not immediately queryable due to -// Schema Registry's consumer lag -func TestSchemaRegistryEventualConsistency(t *testing.T) { - // This test requires real SMQ backend - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQRequired) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - t.Logf("Gateway running on %s", addr) - - // Schema Registry URL from environment or default - schemaRegistryURL := "http://localhost:8081" - - // Wait for Schema Registry to be ready - if !waitForSchemaRegistry(t, schemaRegistryURL, 30*time.Second) { - t.Fatal("Schema Registry not ready") - } - - // Define test schemas - valueSchema := `{"type":"record","name":"TestMessage","fields":[{"name":"id","type":"string"}]}` - keySchema := `{"type":"string"}` - - // Register multiple schemas rapidly (simulates the load test scenario) - subjects := []string{ - "test-topic-0-value", - "test-topic-0-key", - "test-topic-1-value", - "test-topic-1-key", - "test-topic-2-value", - "test-topic-2-key", - "test-topic-3-value", - "test-topic-3-key", - } - - t.Log("Registering schemas rapidly...") - registeredIDs := make(map[string]int) - for _, subject := range subjects { - schema := valueSchema - if strings.HasSuffix(subject, "-key") { - schema = keySchema - } - - id, err := registerSchema(schemaRegistryURL, subject, schema) - if err != nil { - t.Fatalf("Failed to register schema for %s: %v", subject, err) - } - registeredIDs[subject] = id - t.Logf("Registered %s with ID %d", subject, id) - } - - t.Log("All schemas registered successfully!") - - // Now immediately try to verify them (this reproduces the bug) - t.Log("Immediately verifying schemas (without delay)...") - immediateFailures := 0 - for _, subject := range subjects { - exists, id, version, err := verifySchema(schemaRegistryURL, subject) - if err != nil || !exists { - immediateFailures++ - t.Logf("Immediate verification failed for %s: exists=%v id=%d err=%v", subject, exists, id, err) - } else { - t.Logf("Immediate verification passed for %s: ID=%d Version=%d", subject, id, version) - } - } - - if immediateFailures > 0 { - t.Logf("BUG REPRODUCED: %d/%d schemas not immediately queryable after registration", - immediateFailures, len(subjects)) - t.Logf(" This is due to Schema Registry's KafkaStoreReaderThread lag") - } - - // Now verify with retry logic (this should succeed) - t.Log("Verifying schemas with retry logic...") - for _, subject := range subjects { - expectedID := registeredIDs[subject] - if !verifySchemaWithRetry(t, schemaRegistryURL, subject, expectedID, 5*time.Second) { - t.Errorf("Failed to verify %s even with retry", subject) - } - } - - t.Log("โœ“ All schemas verified successfully with retry logic!") -} - -// registerSchema registers a schema and returns its ID -func registerSchema(registryURL, subject, schema string) (int, error) { - // Escape the schema JSON - escapedSchema, err := json.Marshal(schema) - if err != nil { - return 0, err - } - - payload := fmt.Sprintf(`{"schema":%s,"schemaType":"AVRO"}`, escapedSchema) - - resp, err := http.Post( - fmt.Sprintf("%s/subjects/%s/versions", registryURL, subject), - "application/vnd.schemaregistry.v1+json", - strings.NewReader(payload), - ) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - body, _ := io.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - return 0, fmt.Errorf("registration failed: %s - %s", resp.Status, string(body)) - } - - var result struct { - ID int `json:"id"` - } - if err := json.Unmarshal(body, &result); err != nil { - return 0, err - } - - return result.ID, nil -} - -// verifySchema checks if a schema exists -func verifySchema(registryURL, subject string) (exists bool, id int, version int, err error) { - resp, err := http.Get(fmt.Sprintf("%s/subjects/%s/versions/latest", registryURL, subject)) - if err != nil { - return false, 0, 0, err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return false, 0, 0, nil - } - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return false, 0, 0, fmt.Errorf("verification failed: %s - %s", resp.Status, string(body)) - } - - var result struct { - ID int `json:"id"` - Version int `json:"version"` - Schema string `json:"schema"` - } - body, _ := io.ReadAll(resp.Body) - if err := json.Unmarshal(body, &result); err != nil { - return false, 0, 0, err - } - - return true, result.ID, result.Version, nil -} - -// verifySchemaWithRetry verifies a schema with retry logic -func verifySchemaWithRetry(t *testing.T, registryURL, subject string, expectedID int, timeout time.Duration) bool { - deadline := time.Now().Add(timeout) - attempt := 0 - - for time.Now().Before(deadline) { - attempt++ - exists, id, version, err := verifySchema(registryURL, subject) - - if err == nil && exists && id == expectedID { - if attempt > 1 { - t.Logf("โœ“ %s verified after %d attempts (ID=%d, Version=%d)", subject, attempt, id, version) - } - return true - } - - // Wait before retry (exponential backoff) - waitTime := time.Duration(attempt*100) * time.Millisecond - if waitTime > 1*time.Second { - waitTime = 1 * time.Second - } - time.Sleep(waitTime) - } - - t.Logf("%s verification timed out after %d attempts", subject, attempt) - return false -} - -// waitForSchemaRegistry waits for Schema Registry to be ready -func waitForSchemaRegistry(t *testing.T, url string, timeout time.Duration) bool { - deadline := time.Now().Add(timeout) - - for time.Now().Before(deadline) { - resp, err := http.Get(url + "/subjects") - if err == nil && resp.StatusCode == http.StatusOK { - resp.Body.Close() - return true - } - if resp != nil { - resp.Body.Close() - } - time.Sleep(500 * time.Millisecond) - } - - return false -} diff --git a/test/kafka/integration/smq_integration_test.go b/test/kafka/integration/smq_integration_test.go deleted file mode 100644 index f0c140178..000000000 --- a/test/kafka/integration/smq_integration_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package integration - -import ( - "context" - "testing" - "time" - - "github.com/IBM/sarama" - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestSMQIntegration tests that the Kafka gateway properly integrates with SeaweedMQ -// This test REQUIRES SeaweedFS masters to be running and will skip if not available -func TestSMQIntegration(t *testing.T) { - // This test requires SMQ to be available - gateway := testutil.NewGatewayTestServerWithSMQ(t, testutil.SMQRequired) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - - t.Logf("Running SMQ integration test with SeaweedFS backend") - - t.Run("ProduceConsumeWithPersistence", func(t *testing.T) { - testProduceConsumeWithPersistence(t, addr) - }) - - t.Run("ConsumerGroupOffsetPersistence", func(t *testing.T) { - testConsumerGroupOffsetPersistence(t, addr) - }) - - t.Run("TopicPersistence", func(t *testing.T) { - testTopicPersistence(t, addr) - }) -} - -func testProduceConsumeWithPersistence(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("smq-integration-produce-consume") - - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Create topic - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - // Allow time for topic to propagate in SMQ backend - time.Sleep(500 * time.Millisecond) - - // Produce messages - messages := msgGen.GenerateStringMessages(5) - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - // Allow time for messages to be fully persisted in SMQ backend - time.Sleep(200 * time.Millisecond) - - t.Logf("Produced %d messages to topic %s", len(messages), topicName) - - // Consume messages - consumed, err := client.ConsumeMessages(topicName, 0, len(messages)) - testutil.AssertNoError(t, err, "Failed to consume messages") - - // Verify all messages were consumed - testutil.AssertEqual(t, len(messages), len(consumed), "Message count mismatch") - - t.Logf("Successfully consumed %d messages from SMQ backend", len(consumed)) -} - -func testConsumerGroupOffsetPersistence(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("smq-integration-offset-persistence") - groupID := testutil.GenerateUniqueGroupID("smq-offset-group") - - client := testutil.NewSaramaClient(t, addr) - msgGen := testutil.NewMessageGenerator() - - // Create topic and produce messages - err := client.CreateTopic(topicName, 1, 1) - testutil.AssertNoError(t, err, "Failed to create topic") - - // Allow time for topic to propagate in SMQ backend - time.Sleep(500 * time.Millisecond) - - messages := msgGen.GenerateStringMessages(10) - err = client.ProduceMessages(topicName, messages) - testutil.AssertNoError(t, err, "Failed to produce messages") - - // Allow time for messages to be fully persisted in SMQ backend - time.Sleep(200 * time.Millisecond) - - // Phase 1: Consume first 5 messages with consumer group and commit offsets - t.Logf("Phase 1: Consuming first 5 messages and committing offsets") - - config := client.GetConfig() - config.Consumer.Offsets.Initial = sarama.OffsetOldest - // Enable auto-commit for more reliable offset handling - config.Consumer.Offsets.AutoCommit.Enable = true - config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second - - consumerGroup1, err := sarama.NewConsumerGroup([]string{addr}, groupID, config) - testutil.AssertNoError(t, err, "Failed to create first consumer group") - - handler := &SMQOffsetTestHandler{ - messages: make(chan *sarama.ConsumerMessage, len(messages)), - ready: make(chan bool), - stopAfter: 5, - t: t, - } - - ctx1, cancel1 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel1() - - consumeErrChan1 := make(chan error, 1) - go func() { - err := consumerGroup1.Consume(ctx1, []string{topicName}, handler) - if err != nil && err != context.DeadlineExceeded && err != context.Canceled { - t.Logf("First consumer error: %v", err) - consumeErrChan1 <- err - } - }() - - // Wait for consumer to be ready with timeout - select { - case <-handler.ready: - // Consumer is ready, continue - case err := <-consumeErrChan1: - t.Fatalf("First consumer failed to start: %v", err) - case <-time.After(10 * time.Second): - t.Fatalf("Timeout waiting for first consumer to be ready") - } - consumedCount := 0 - for consumedCount < 5 { - select { - case <-handler.messages: - consumedCount++ - case <-time.After(20 * time.Second): - t.Fatalf("Timeout waiting for first batch of messages. Got %d/5", consumedCount) - } - } - - consumerGroup1.Close() - cancel1() - time.Sleep(7 * time.Second) // Allow auto-commit to complete and offset commits to be processed in SMQ - - t.Logf("Consumed %d messages in first phase", consumedCount) - - // Phase 2: Start new consumer group with same ID - should resume from committed offset - t.Logf("Phase 2: Starting new consumer group to test offset persistence") - - // Create a fresh config for the second consumer group to avoid any state issues - config2 := client.GetConfig() - config2.Consumer.Offsets.Initial = sarama.OffsetOldest - config2.Consumer.Offsets.AutoCommit.Enable = true - config2.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second - - consumerGroup2, err := sarama.NewConsumerGroup([]string{addr}, groupID, config2) - testutil.AssertNoError(t, err, "Failed to create second consumer group") - defer consumerGroup2.Close() - - handler2 := &SMQOffsetTestHandler{ - messages: make(chan *sarama.ConsumerMessage, len(messages)), - ready: make(chan bool), - stopAfter: 5, // Should consume remaining 5 messages - t: t, - } - - ctx2, cancel2 := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel2() - - consumeErrChan := make(chan error, 1) - go func() { - err := consumerGroup2.Consume(ctx2, []string{topicName}, handler2) - if err != nil && err != context.DeadlineExceeded && err != context.Canceled { - t.Logf("Second consumer error: %v", err) - consumeErrChan <- err - } - }() - - // Wait for second consumer to be ready with timeout - select { - case <-handler2.ready: - // Consumer is ready, continue - case err := <-consumeErrChan: - t.Fatalf("Second consumer failed to start: %v", err) - case <-time.After(10 * time.Second): - t.Fatalf("Timeout waiting for second consumer to be ready") - } - secondConsumerMessages := make([]*sarama.ConsumerMessage, 0) - consumedCount = 0 - for consumedCount < 5 { - select { - case msg := <-handler2.messages: - consumedCount++ - secondConsumerMessages = append(secondConsumerMessages, msg) - case <-time.After(20 * time.Second): - t.Fatalf("Timeout waiting for second batch of messages. Got %d/5", consumedCount) - } - } - - // Verify second consumer started from correct offset (should be >= 5) - if len(secondConsumerMessages) > 0 { - firstMessageOffset := secondConsumerMessages[0].Offset - if firstMessageOffset < 5 { - t.Fatalf("Second consumer should start from offset >= 5: got %d", firstMessageOffset) - } - t.Logf("Second consumer correctly resumed from offset %d", firstMessageOffset) - } - - t.Logf("Successfully verified SMQ offset persistence") -} - -func testTopicPersistence(t *testing.T, addr string) { - topicName := testutil.GenerateUniqueTopicName("smq-integration-topic-persistence") - - client := testutil.NewSaramaClient(t, addr) - - // Create topic - err := client.CreateTopic(topicName, 2, 1) // 2 partitions - testutil.AssertNoError(t, err, "Failed to create topic") - - // Allow time for topic to propagate and persist in SMQ backend - time.Sleep(1 * time.Second) - - // Verify topic exists by listing topics using admin client - config := client.GetConfig() - config.Admin.Timeout = 30 * time.Second - - admin, err := sarama.NewClusterAdmin([]string{addr}, config) - testutil.AssertNoError(t, err, "Failed to create admin client") - defer admin.Close() - - // Retry topic listing to handle potential delays in topic propagation - var topics map[string]sarama.TopicDetail - var listErr error - for attempt := 0; attempt < 3; attempt++ { - if attempt > 0 { - sleepDuration := time.Duration(500*(1<<(attempt-1))) * time.Millisecond - t.Logf("Retrying ListTopics after %v (attempt %d/3)", sleepDuration, attempt+1) - time.Sleep(sleepDuration) - } - - topics, listErr = admin.ListTopics() - if listErr == nil { - break - } - } - testutil.AssertNoError(t, listErr, "Failed to list topics") - - topicDetails, exists := topics[topicName] - if !exists { - t.Fatalf("Topic %s not found in topic list", topicName) - } - - if topicDetails.NumPartitions != 2 { - t.Errorf("Expected 2 partitions, got %d", topicDetails.NumPartitions) - } - - t.Logf("Successfully verified topic persistence with %d partitions", topicDetails.NumPartitions) -} - -// SMQOffsetTestHandler implements sarama.ConsumerGroupHandler for SMQ offset testing -type SMQOffsetTestHandler struct { - messages chan *sarama.ConsumerMessage - ready chan bool - readyOnce bool - stopAfter int - consumed int - t *testing.T -} - -func (h *SMQOffsetTestHandler) Setup(sarama.ConsumerGroupSession) error { - h.t.Logf("SMQ offset test consumer setup") - if !h.readyOnce { - close(h.ready) - h.readyOnce = true - } - return nil -} - -func (h *SMQOffsetTestHandler) Cleanup(sarama.ConsumerGroupSession) error { - h.t.Logf("SMQ offset test consumer cleanup") - return nil -} - -func (h *SMQOffsetTestHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - for { - select { - case message := <-claim.Messages(): - if message == nil { - return nil - } - h.consumed++ - h.messages <- message - session.MarkMessage(message, "") - - // Stop after consuming the specified number of messages - if h.consumed >= h.stopAfter { - h.t.Logf("Stopping SMQ consumer after %d messages", h.consumed) - // Auto-commit will handle offset commits automatically - return nil - } - case <-session.Context().Done(): - return nil - } - } -} diff --git a/test/kafka/internal/testutil/assertions.go b/test/kafka/internal/testutil/assertions.go deleted file mode 100644 index 605c61f8e..000000000 --- a/test/kafka/internal/testutil/assertions.go +++ /dev/null @@ -1,150 +0,0 @@ -package testutil - -import ( - "fmt" - "testing" - "time" -) - -// AssertEventually retries an assertion until it passes or times out -func AssertEventually(t *testing.T, assertion func() error, timeout time.Duration, interval time.Duration, msgAndArgs ...interface{}) { - t.Helper() - - deadline := time.Now().Add(timeout) - var lastErr error - - for time.Now().Before(deadline) { - if err := assertion(); err == nil { - return // Success - } else { - lastErr = err - } - time.Sleep(interval) - } - - // Format the failure message - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "assertion failed" - } - - t.Fatalf("%s after %v: %v", msg, timeout, lastErr) -} - -// AssertNoError fails the test if err is not nil -func AssertNoError(t *testing.T, err error, msgAndArgs ...interface{}) { - t.Helper() - if err != nil { - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "unexpected error" - } - t.Fatalf("%s: %v", msg, err) - } -} - -// AssertError fails the test if err is nil -func AssertError(t *testing.T, err error, msgAndArgs ...interface{}) { - t.Helper() - if err == nil { - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "expected error but got nil" - } - t.Fatal(msg) - } -} - -// AssertEqual fails the test if expected != actual -func AssertEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { - t.Helper() - if expected != actual { - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "values not equal" - } - t.Fatalf("%s: expected %v, got %v", msg, expected, actual) - } -} - -// AssertNotEqual fails the test if expected == actual -func AssertNotEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { - t.Helper() - if expected == actual { - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "values should not be equal" - } - t.Fatalf("%s: both values are %v", msg, expected) - } -} - -// AssertGreaterThan fails the test if actual <= expected -func AssertGreaterThan(t *testing.T, expected, actual int, msgAndArgs ...interface{}) { - t.Helper() - if actual <= expected { - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "value not greater than expected" - } - t.Fatalf("%s: expected > %d, got %d", msg, expected, actual) - } -} - -// AssertContains fails the test if slice doesn't contain item -func AssertContains(t *testing.T, slice []string, item string, msgAndArgs ...interface{}) { - t.Helper() - for _, s := range slice { - if s == item { - return // Found it - } - } - - var msg string - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) - } else { - msg = fmt.Sprint(msgAndArgs...) - } - } else { - msg = "item not found in slice" - } - t.Fatalf("%s: %q not found in %v", msg, item, slice) -} diff --git a/test/kafka/internal/testutil/clients.go b/test/kafka/internal/testutil/clients.go deleted file mode 100644 index 40d29b55d..000000000 --- a/test/kafka/internal/testutil/clients.go +++ /dev/null @@ -1,305 +0,0 @@ -package testutil - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/IBM/sarama" - "github.com/segmentio/kafka-go" -) - -// KafkaGoClient wraps kafka-go client with test utilities -type KafkaGoClient struct { - brokerAddr string - t *testing.T -} - -// SaramaClient wraps Sarama client with test utilities -type SaramaClient struct { - brokerAddr string - config *sarama.Config - t *testing.T -} - -// NewKafkaGoClient creates a new kafka-go test client -func NewKafkaGoClient(t *testing.T, brokerAddr string) *KafkaGoClient { - return &KafkaGoClient{ - brokerAddr: brokerAddr, - t: t, - } -} - -// NewSaramaClient creates a new Sarama test client with default config -func NewSaramaClient(t *testing.T, brokerAddr string) *SaramaClient { - config := sarama.NewConfig() - config.Version = sarama.V2_8_0_0 - config.Producer.Return.Successes = true - config.Consumer.Return.Errors = true - config.Consumer.Offsets.Initial = sarama.OffsetOldest // Start from earliest when no committed offset - - return &SaramaClient{ - brokerAddr: brokerAddr, - config: config, - t: t, - } -} - -// CreateTopic creates a topic using kafka-go -func (k *KafkaGoClient) CreateTopic(topicName string, partitions int, replicationFactor int) error { - k.t.Helper() - - conn, err := kafka.Dial("tcp", k.brokerAddr) - if err != nil { - return fmt.Errorf("dial broker: %w", err) - } - defer conn.Close() - - topicConfig := kafka.TopicConfig{ - Topic: topicName, - NumPartitions: partitions, - ReplicationFactor: replicationFactor, - } - - err = conn.CreateTopics(topicConfig) - if err != nil { - return fmt.Errorf("create topic: %w", err) - } - - k.t.Logf("Created topic %s with %d partitions", topicName, partitions) - return nil -} - -// ProduceMessages produces messages using kafka-go -func (k *KafkaGoClient) ProduceMessages(topicName string, messages []kafka.Message) error { - k.t.Helper() - - writer := &kafka.Writer{ - Addr: kafka.TCP(k.brokerAddr), - Topic: topicName, - Balancer: &kafka.LeastBytes{}, - BatchTimeout: 50 * time.Millisecond, - RequiredAcks: kafka.RequireOne, - } - defer writer.Close() - - // Increased timeout to handle slow CI environments, especially when consumer groups - // are active and holding locks or requiring offset commits - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - err := writer.WriteMessages(ctx, messages...) - if err != nil { - return fmt.Errorf("write messages: %w", err) - } - - k.t.Logf("Produced %d messages to topic %s", len(messages), topicName) - return nil -} - -// ConsumeMessages consumes messages using kafka-go -func (k *KafkaGoClient) ConsumeMessages(topicName string, expectedCount int) ([]kafka.Message, error) { - k.t.Helper() - - reader := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{k.brokerAddr}, - Topic: topicName, - Partition: 0, // Explicitly set partition 0 for simple consumption - StartOffset: kafka.FirstOffset, - MinBytes: 1, - MaxBytes: 10e6, - }) - defer reader.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - var messages []kafka.Message - for i := 0; i < expectedCount; i++ { - msg, err := reader.ReadMessage(ctx) - if err != nil { - return messages, fmt.Errorf("read message %d: %w", i, err) - } - messages = append(messages, msg) - } - - k.t.Logf("Consumed %d messages from topic %s", len(messages), topicName) - return messages, nil -} - -// ConsumeWithGroup consumes messages using consumer group -func (k *KafkaGoClient) ConsumeWithGroup(topicName, groupID string, expectedCount int) ([]kafka.Message, error) { - k.t.Helper() - - reader := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{k.brokerAddr}, - Topic: topicName, - GroupID: groupID, - MinBytes: 1, - MaxBytes: 10e6, - CommitInterval: 500 * time.Millisecond, - }) - defer reader.Close() - - // Log the initial offset position - offset := reader.Offset() - k.t.Logf("Consumer group reader created for group %s, initial offset: %d", groupID, offset) - - // Increased timeout for consumer groups - they require coordinator discovery, - // offset fetching, and offset commits which can be slow in CI environments - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - var messages []kafka.Message - for i := 0; i < expectedCount; i++ { - // Fetch then explicitly commit to better control commit timing - msg, err := reader.FetchMessage(ctx) - if err != nil { - return messages, fmt.Errorf("read message %d: %w", i, err) - } - messages = append(messages, msg) - k.t.Logf(" Fetched message %d: offset=%d, partition=%d", i, msg.Offset, msg.Partition) - - // Commit with simple retry to handle transient connection churn - var commitErr error - for attempt := 0; attempt < 3; attempt++ { - commitErr = reader.CommitMessages(ctx, msg) - if commitErr == nil { - k.t.Logf(" Committed offset %d (attempt %d)", msg.Offset, attempt+1) - break - } - k.t.Logf(" Commit attempt %d failed for offset %d: %v", attempt+1, msg.Offset, commitErr) - // brief backoff - time.Sleep(time.Duration(50*(1<= len(actual) { - return fmt.Errorf("missing message at index %d", i) - } - if actual[i] != expectedMsg { - return fmt.Errorf("message mismatch at index %d: expected %q, got %q", i, expectedMsg, actual[i]) - } - } - - return nil -} - -// ValidateKafkaGoMessageContent validates kafka-go messages -func ValidateKafkaGoMessageContent(expected, actual []kafka.Message) error { - if len(expected) != len(actual) { - return fmt.Errorf("message count mismatch: expected %d, got %d", len(expected), len(actual)) - } - - for i, expectedMsg := range expected { - if i >= len(actual) { - return fmt.Errorf("missing message at index %d", i) - } - if string(actual[i].Key) != string(expectedMsg.Key) { - return fmt.Errorf("key mismatch at index %d: expected %q, got %q", i, string(expectedMsg.Key), string(actual[i].Key)) - } - if string(actual[i].Value) != string(expectedMsg.Value) { - return fmt.Errorf("value mismatch at index %d: expected %q, got %q", i, string(expectedMsg.Value), string(actual[i].Value)) - } - } - - return nil -} diff --git a/test/kafka/internal/testutil/schema_helper.go b/test/kafka/internal/testutil/schema_helper.go deleted file mode 100644 index 868cc286b..000000000 --- a/test/kafka/internal/testutil/schema_helper.go +++ /dev/null @@ -1,33 +0,0 @@ -package testutil - -import ( - "testing" - - kschema "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" -) - -// EnsureValueSchema registers a minimal Avro value schema for the given topic if not present. -// Returns the latest schema ID if successful. -func EnsureValueSchema(t *testing.T, registryURL, topic string) (uint32, error) { - t.Helper() - subject := topic + "-value" - rc := kschema.NewRegistryClient(kschema.RegistryConfig{URL: registryURL}) - - // Minimal Avro record schema with string field "value" - schemaJSON := `{"type":"record","name":"TestRecord","fields":[{"name":"value","type":"string"}]}` - - // Try to get existing - if latest, err := rc.GetLatestSchema(subject); err == nil { - return latest.LatestID, nil - } - - // Register and fetch latest - if _, err := rc.RegisterSchema(subject, schemaJSON); err != nil { - return 0, err - } - latest, err := rc.GetLatestSchema(subject) - if err != nil { - return 0, err - } - return latest.LatestID, nil -} diff --git a/test/kafka/kafka-client-loadtest/.dockerignore b/test/kafka/kafka-client-loadtest/.dockerignore deleted file mode 100644 index 1354ab263..000000000 --- a/test/kafka/kafka-client-loadtest/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -# Keep only the Linux binaries -!weed-linux-amd64 -!weed-linux-arm64 diff --git a/test/kafka/kafka-client-loadtest/.gitignore b/test/kafka/kafka-client-loadtest/.gitignore deleted file mode 100644 index ef136a5e2..000000000 --- a/test/kafka/kafka-client-loadtest/.gitignore +++ /dev/null @@ -1,63 +0,0 @@ -# Binaries -kafka-loadtest -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool -*.out - -# Go workspace file -go.work - -# Test results and logs -test-results/ -*.log -logs/ - -# Docker volumes and data -data/ -volumes/ - -# Monitoring data -monitoring/prometheus/data/ -monitoring/grafana/data/ - -# IDE files -.vscode/ -.idea/ -*.swp -*.swo - -# OS generated files -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db - -# Environment files -.env -.env.local -.env.*.local - -# Temporary files -tmp/ -temp/ -*.tmp - -# Coverage reports -coverage.html -coverage.out - -# Build artifacts -bin/ -build/ -dist/ diff --git a/test/kafka/kafka-client-loadtest/Dockerfile.loadtest b/test/kafka/kafka-client-loadtest/Dockerfile.loadtest deleted file mode 100644 index ccf7e5e16..000000000 --- a/test/kafka/kafka-client-loadtest/Dockerfile.loadtest +++ /dev/null @@ -1,49 +0,0 @@ -# Kafka Client Load Test Runner Dockerfile -# Multi-stage build for cross-platform support - -# Stage 1: Builder -FROM golang:1.24-alpine AS builder - -WORKDIR /app - -# Copy go module files -COPY test/kafka/kafka-client-loadtest/go.mod test/kafka/kafka-client-loadtest/go.sum ./ -RUN go mod download - -# Copy source code -COPY test/kafka/kafka-client-loadtest/ ./ - -# Build the loadtest binary -RUN CGO_ENABLED=0 GOOS=linux go build -o /kafka-loadtest ./cmd/loadtest - -# Stage 2: Runtime -FROM ubuntu:22.04 - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - jq \ - bash \ - netcat \ - && rm -rf /var/lib/apt/lists/* - -# Copy built binary from builder stage -COPY --from=builder /kafka-loadtest /usr/local/bin/kafka-loadtest -RUN chmod +x /usr/local/bin/kafka-loadtest - -# Copy scripts and configuration -COPY test/kafka/kafka-client-loadtest/scripts/ /scripts/ -COPY test/kafka/kafka-client-loadtest/config/ /config/ - -# Create results directory -RUN mkdir -p /test-results - -# Make scripts executable -RUN chmod +x /scripts/*.sh - -WORKDIR /app - -# Default command runs the comprehensive load test -CMD ["/usr/local/bin/kafka-loadtest", "-config", "/config/loadtest.yaml"] - diff --git a/test/kafka/kafka-client-loadtest/Dockerfile.seaweedfs b/test/kafka/kafka-client-loadtest/Dockerfile.seaweedfs deleted file mode 100644 index cde2e3df1..000000000 --- a/test/kafka/kafka-client-loadtest/Dockerfile.seaweedfs +++ /dev/null @@ -1,37 +0,0 @@ -# SeaweedFS Runtime Dockerfile for Kafka Client Load Tests -# Optimized for fast builds - binary built locally and copied in -FROM alpine:3.18 - -# Install runtime dependencies -RUN apk add --no-cache \ - ca-certificates \ - wget \ - netcat-openbsd \ - curl \ - tzdata \ - && rm -rf /var/cache/apk/* - -# Copy pre-built SeaweedFS binary (built locally for linux/amd64 or linux/arm64) -# Cache-busting: Use build arg to force layer rebuild on every build -ARG TARGETARCH=arm64 -ARG CACHE_BUST=unknown -RUN echo "Building with cache bust: ${CACHE_BUST}" -COPY weed-linux-${TARGETARCH} /usr/local/bin/weed -RUN chmod +x /usr/local/bin/weed - -# Create data directory -RUN mkdir -p /data - -# Set timezone -ENV TZ=UTC - -# Health check script -RUN echo '#!/bin/sh' > /usr/local/bin/health-check && \ - echo 'exec "$@"' >> /usr/local/bin/health-check && \ - chmod +x /usr/local/bin/health-check - -VOLUME ["/data"] -WORKDIR /data - -ENTRYPOINT ["/usr/local/bin/weed"] - diff --git a/test/kafka/kafka-client-loadtest/Dockerfile.seektest b/test/kafka/kafka-client-loadtest/Dockerfile.seektest deleted file mode 100644 index 5ce9d9602..000000000 --- a/test/kafka/kafka-client-loadtest/Dockerfile.seektest +++ /dev/null @@ -1,20 +0,0 @@ -FROM openjdk:11-jdk-slim - -# Install Maven -RUN apt-get update && apt-get install -y maven && rm -rf /var/lib/apt/lists/* - -WORKDIR /app - -# Create source directory -RUN mkdir -p src/main/java - -# Copy source and build files -COPY SeekToBeginningTest.java src/main/java/ -COPY pom.xml . - -# Compile and package -RUN mvn clean package -DskipTests - -# Run the test -ENTRYPOINT ["java", "-cp", "target/seek-test.jar", "SeekToBeginningTest"] -CMD ["kafka-gateway:9093"] diff --git a/test/kafka/kafka-client-loadtest/Makefile b/test/kafka/kafka-client-loadtest/Makefile deleted file mode 100644 index 362b5c680..000000000 --- a/test/kafka/kafka-client-loadtest/Makefile +++ /dev/null @@ -1,446 +0,0 @@ -# Kafka Client Load Test Makefile -# Provides convenient targets for running load tests against SeaweedFS Kafka Gateway - -.PHONY: help build start stop restart clean test quick-test stress-test endurance-test monitor logs status - -# Configuration -DOCKER_COMPOSE := docker compose -PROJECT_NAME := kafka-client-loadtest -CONFIG_FILE := config/loadtest.yaml - -# Build configuration -GOARCH ?= arm64 -GOOS ?= linux - -# Default test parameters -TEST_MODE ?= comprehensive -TEST_DURATION ?= 300s -PRODUCER_COUNT ?= 10 -CONSUMER_COUNT ?= 5 -MESSAGE_RATE ?= 1000 -MESSAGE_SIZE ?= 1024 - -# Colors for output -GREEN := \033[0;32m -YELLOW := \033[0;33m -BLUE := \033[0;34m -NC := \033[0m - -help: ## Show this help message - @echo "Kafka Client Load Test Makefile" - @echo "" - @echo "Available targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " $(BLUE)%-20s$(NC) %s\n", $$1, $$2}' $(MAKEFILE_LIST) - @echo "" - @echo "Environment variables:" - @echo " TEST_MODE Test mode: producer, consumer, comprehensive (default: comprehensive)" - @echo " TEST_DURATION Test duration (default: 300s)" - @echo " PRODUCER_COUNT Number of producers (default: 10)" - @echo " CONSUMER_COUNT Number of consumers (default: 5)" - @echo " MESSAGE_RATE Messages per second per producer (default: 1000)" - @echo " MESSAGE_SIZE Message size in bytes (default: 1024)" - @echo "" - @echo "Examples:" - @echo " make test # Run default comprehensive test" - @echo " make test TEST_DURATION=10m # Run 10-minute test" - @echo " make quick-test # Run quick smoke test (rebuilds gateway)" - @echo " make stress-test # Run high-load stress test" - @echo " make test TEST_MODE=producer # Producer-only test" - @echo " make schema-test # Run schema integration test with Schema Registry" - @echo " make schema-quick-test # Run quick schema test (30s timeout)" - @echo " make schema-loadtest # Run load test with schemas enabled" - @echo " make build-binary # Build SeaweedFS binary locally for Linux" - @echo " make build-gateway # Build Kafka Gateway (builds binary + Docker image)" - @echo " make build-gateway-clean # Build Kafka Gateway with no cache (fresh build)" - -build: ## Build the load test application - @echo "$(BLUE)Building load test application...$(NC)" - $(DOCKER_COMPOSE) build kafka-client-loadtest - @echo "$(GREEN)Build completed$(NC)" - -build-binary: ## Build the SeaweedFS binary locally for Linux - @echo "$(BLUE)Building SeaweedFS binary locally for $(GOOS) $(GOARCH)...$(NC)" - cd ../../.. && \ - CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build \ - -ldflags="-s -w" \ - -tags "5BytesOffset" \ - -o test/kafka/kafka-client-loadtest/weed-$(GOOS)-$(GOARCH) \ - weed/weed.go - @echo "$(GREEN)Binary build completed: weed-$(GOOS)-$(GOARCH)$(NC)" - -build-gateway: build-binary ## Build the Kafka Gateway with latest changes - @echo "$(BLUE)Building Kafka Gateway Docker image...$(NC)" - CACHE_BUST=$$(date +%s) $(DOCKER_COMPOSE) build kafka-gateway - @echo "$(GREEN)Kafka Gateway build completed$(NC)" - -build-gateway-clean: build-binary ## Build the Kafka Gateway with no cache (force fresh build) - @echo "$(BLUE)Building Kafka Gateway Docker image with no cache...$(NC)" - $(DOCKER_COMPOSE) build --no-cache kafka-gateway - @echo "$(GREEN)Kafka Gateway clean build completed$(NC)" - -setup: ## Set up monitoring and configuration - @echo "$(BLUE)Setting up monitoring configuration...$(NC)" - ./scripts/setup-monitoring.sh - @echo "$(GREEN)Setup completed$(NC)" - -start: build-gateway ## Start the infrastructure services (without load test) - @echo "$(BLUE)Starting SeaweedFS infrastructure...$(NC)" - $(DOCKER_COMPOSE) up -d \ - seaweedfs-master \ - seaweedfs-volume \ - seaweedfs-filer \ - seaweedfs-mq-broker \ - kafka-gateway \ - schema-registry-init \ - schema-registry - @echo "$(GREEN)Infrastructure started$(NC)" - @echo "Waiting for services to be ready..." - ./scripts/wait-for-services.sh wait - @echo "$(GREEN)All services are ready!$(NC)" - -stop: ## Stop all services - @echo "$(BLUE)Stopping all services...$(NC)" - $(DOCKER_COMPOSE) --profile loadtest --profile monitoring down - @echo "$(GREEN)Services stopped$(NC)" - -restart: stop start ## Restart all services - -clean: ## Clean up all resources (containers, volumes, networks, local data) - @echo "$(YELLOW)Warning: This will remove all volumes and data!$(NC)" - @echo "Press Ctrl+C to cancel, or wait 5 seconds to continue..." - @sleep 5 - @echo "$(BLUE)Cleaning up all resources...$(NC)" - $(DOCKER_COMPOSE) --profile loadtest --profile monitoring down -v --remove-orphans - docker system prune -f - @if [ -f "weed-linux-arm64" ]; then \ - echo "$(BLUE)Removing local binary...$(NC)"; \ - rm -f weed-linux-arm64; \ - fi - @if [ -d "data" ]; then \ - echo "$(BLUE)Removing ALL local data directories (including offset state)...$(NC)"; \ - rm -rf data/*; \ - fi - @echo "$(GREEN)Cleanup completed - all data removed$(NC)" - -clean-binary: ## Clean up only the local binary - @echo "$(BLUE)Removing local binary...$(NC)" - @rm -f weed-linux-arm64 - @echo "$(GREEN)Binary cleanup completed$(NC)" - -status: ## Show service status - @echo "$(BLUE)Service Status:$(NC)" - $(DOCKER_COMPOSE) ps - -logs: ## Show logs from all services - $(DOCKER_COMPOSE) logs -f - -test: start ## Run the comprehensive load test - @echo "$(BLUE)Running Kafka client load test...$(NC)" - @echo "Mode: $(TEST_MODE), Duration: $(TEST_DURATION)" - @echo "Producers: $(PRODUCER_COUNT), Consumers: $(CONSUMER_COUNT)" - @echo "Message Rate: $(MESSAGE_RATE) msgs/sec, Size: $(MESSAGE_SIZE) bytes" - @echo "" - @docker rm -f kafka-client-loadtest-runner 2>/dev/null || true - TEST_MODE=$(TEST_MODE) TEST_DURATION=$(TEST_DURATION) PRODUCER_COUNT=$(PRODUCER_COUNT) CONSUMER_COUNT=$(CONSUMER_COUNT) MESSAGE_RATE=$(MESSAGE_RATE) MESSAGE_SIZE=$(MESSAGE_SIZE) VALUE_TYPE=$(VALUE_TYPE) $(DOCKER_COMPOSE) --profile loadtest up --abort-on-container-exit kafka-client-loadtest - @echo "$(GREEN)Load test completed!$(NC)" - @$(MAKE) show-results - -quick-test: build-gateway ## Run a quick smoke test (1 min, low load, WITH schemas) - @echo "$(BLUE)================================================================$(NC)" - @echo "$(BLUE) Quick Test (Low Load, WITH Schema Registry + Avro) $(NC)" - @echo "$(BLUE) - Duration: 1 minute $(NC)" - @echo "$(BLUE) - Load: 1 producer ร— 10 msg/sec = 10 total msg/sec $(NC)" - @echo "$(BLUE) - Message Type: Avro (with schema encoding) $(NC)" - @echo "$(BLUE) - Schema-First: Registers schemas BEFORE producing $(NC)" - @echo "$(BLUE)================================================================$(NC)" - @echo "" - @$(MAKE) start - @echo "" - @echo "$(BLUE)=== Step 1: Registering schemas in Schema Registry ===$(NC)" - @echo "$(YELLOW)[WARN] IMPORTANT: Schemas MUST be registered before producing Avro messages!$(NC)" - @./scripts/register-schemas.sh full - @echo "$(GREEN)- Schemas registered successfully$(NC)" - @echo "" - @echo "$(BLUE)=== Step 2: Running load test with Avro messages ===$(NC)" - @$(MAKE) test \ - TEST_MODE=comprehensive \ - TEST_DURATION=60s \ - PRODUCER_COUNT=1 \ - CONSUMER_COUNT=1 \ - MESSAGE_RATE=10 \ - MESSAGE_SIZE=256 \ - VALUE_TYPE=avro - @echo "" - @echo "$(GREEN)================================================================$(NC)" - @echo "$(GREEN) Quick Test Complete! $(NC)" - @echo "$(GREEN) - Schema Registration $(NC)" - @echo "$(GREEN) - Avro Message Production $(NC)" - @echo "$(GREEN) - Message Consumption $(NC)" - @echo "$(GREEN)================================================================$(NC)" - -standard-test: ## Run a standard load test (2 min, medium load, WITH Schema Registry + Avro) - @echo "$(BLUE)================================================================$(NC)" - @echo "$(BLUE) Standard Test (Medium Load, WITH Schema Registry) $(NC)" - @echo "$(BLUE) - Duration: 2 minutes $(NC)" - @echo "$(BLUE) - Load: 2 producers ร— 50 msg/sec = 100 total msg/sec $(NC)" - @echo "$(BLUE) - Message Type: Avro (with schema encoding) $(NC)" - @echo "$(BLUE) - IMPORTANT: Schemas registered FIRST in Schema Registry $(NC)" - @echo "$(BLUE)================================================================$(NC)" - @echo "" - @$(MAKE) start - @echo "" - @echo "$(BLUE)=== Step 1: Registering schemas in Schema Registry ===$(NC)" - @echo "$(YELLOW)Note: Schemas MUST be registered before producing Avro messages!$(NC)" - @./scripts/register-schemas.sh full - @echo "$(GREEN)- Schemas registered$(NC)" - @echo "" - @echo "$(BLUE)=== Step 2: Running load test with Avro messages ===$(NC)" - @$(MAKE) test \ - TEST_MODE=comprehensive \ - TEST_DURATION=2m \ - PRODUCER_COUNT=2 \ - CONSUMER_COUNT=2 \ - MESSAGE_RATE=50 \ - MESSAGE_SIZE=512 \ - VALUE_TYPE=avro - @echo "" - @echo "$(GREEN)================================================================$(NC)" - @echo "$(GREEN) Standard Test Complete! $(NC)" - @echo "$(GREEN)================================================================$(NC)" - -stress-test: ## Run a stress test (10 minutes, high load) with schemas - @echo "$(BLUE)Starting stress test with schema registration...$(NC)" - @$(MAKE) start - @echo "$(BLUE)Registering schemas with Schema Registry...$(NC)" - @./scripts/register-schemas.sh full - @echo "$(BLUE)Running stress test with registered schemas...$(NC)" - @$(MAKE) test \ - TEST_MODE=comprehensive \ - TEST_DURATION=10m \ - PRODUCER_COUNT=20 \ - CONSUMER_COUNT=10 \ - MESSAGE_RATE=2000 \ - MESSAGE_SIZE=2048 \ - VALUE_TYPE=avro - -endurance-test: ## Run an endurance test (30 minutes, sustained load) with schemas - @echo "$(BLUE)Starting endurance test with schema registration...$(NC)" - @$(MAKE) start - @echo "$(BLUE)Registering schemas with Schema Registry...$(NC)" - @./scripts/register-schemas.sh full - @echo "$(BLUE)Running endurance test with registered schemas...$(NC)" - @$(MAKE) test \ - TEST_MODE=comprehensive \ - TEST_DURATION=30m \ - PRODUCER_COUNT=10 \ - CONSUMER_COUNT=5 \ - MESSAGE_RATE=1000 \ - MESSAGE_SIZE=1024 \ - VALUE_TYPE=avro - -producer-test: ## Run producer-only load test - @$(MAKE) test TEST_MODE=producer - -consumer-test: ## Run consumer-only load test (requires existing messages) - @$(MAKE) test TEST_MODE=consumer - -register-schemas: start ## Register schemas with Schema Registry - @echo "$(BLUE)Registering schemas with Schema Registry...$(NC)" - @./scripts/register-schemas.sh full - @echo "$(GREEN)Schema registration completed!$(NC)" - -verify-schemas: ## Verify schemas are registered in Schema Registry - @echo "$(BLUE)Verifying schemas in Schema Registry...$(NC)" - @./scripts/register-schemas.sh verify - @echo "$(GREEN)Schema verification completed!$(NC)" - -list-schemas: ## List all registered schemas in Schema Registry - @echo "$(BLUE)Listing registered schemas...$(NC)" - @./scripts/register-schemas.sh list - -cleanup-schemas: ## Clean up test schemas from Schema Registry - @echo "$(YELLOW)Cleaning up test schemas...$(NC)" - @./scripts/register-schemas.sh cleanup - @echo "$(GREEN)Schema cleanup completed!$(NC)" - -schema-test: start ## Run schema integration test (with Schema Registry) - @echo "$(BLUE)Running schema integration test...$(NC)" - @echo "Testing Schema Registry integration with schematized topics" - @echo "" - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o schema-test-linux test_schema_integration.go - docker run --rm --network kafka-client-loadtest \ - -v $(PWD)/schema-test-linux:/usr/local/bin/schema-test \ - alpine:3.18 /usr/local/bin/schema-test - @rm -f schema-test-linux - @echo "$(GREEN)Schema integration test completed!$(NC)" - -schema-quick-test: start ## Run quick schema test (lighter version) - @echo "$(BLUE)Running quick schema test...$(NC)" - @echo "Testing basic schema functionality" - @echo "" - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o schema-test-linux test_schema_integration.go - timeout 60s docker run --rm --network kafka-client-loadtest \ - -v $(PWD)/schema-test-linux:/usr/local/bin/schema-test \ - alpine:3.18 /usr/local/bin/schema-test || true - @rm -f schema-test-linux - @echo "$(GREEN)Quick schema test completed!$(NC)" - -simple-schema-test: start ## Run simple schema test (step-by-step) - @echo "$(BLUE)Running simple schema test...$(NC)" - @echo "Step-by-step schema functionality test" - @echo "" - @mkdir -p simple-test - @cp simple_schema_test.go simple-test/main.go - cd simple-test && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ../simple-schema-test-linux . - docker run --rm --network kafka-client-loadtest \ - -v $(PWD)/simple-schema-test-linux:/usr/local/bin/simple-schema-test \ - alpine:3.18 /usr/local/bin/simple-schema-test - @rm -f simple-schema-test-linux - @rm -rf simple-test - @echo "$(GREEN)Simple schema test completed!$(NC)" - -basic-schema-test: start ## Run basic schema test (manual schema handling without Schema Registry) - @echo "$(BLUE)Running basic schema test...$(NC)" - @echo "Testing schema functionality without Schema Registry dependency" - @echo "" - @mkdir -p basic-test - @cp basic_schema_test.go basic-test/main.go - cd basic-test && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ../basic-schema-test-linux . - timeout 60s docker run --rm --network kafka-client-loadtest \ - -v $(PWD)/basic-schema-test-linux:/usr/local/bin/basic-schema-test \ - alpine:3.18 /usr/local/bin/basic-schema-test - @rm -f basic-schema-test-linux - @rm -rf basic-test - @echo "$(GREEN)Basic schema test completed!$(NC)" - -schema-loadtest: start ## Run load test with schemas enabled - @echo "$(BLUE)Running schema-enabled load test...$(NC)" - @echo "Mode: comprehensive with schemas, Duration: 3m" - @echo "Producers: 3, Consumers: 2, Message Rate: 50 msgs/sec" - @echo "" - TEST_MODE=comprehensive \ - TEST_DURATION=3m \ - PRODUCER_COUNT=3 \ - CONSUMER_COUNT=2 \ - MESSAGE_RATE=50 \ - MESSAGE_SIZE=1024 \ - SCHEMA_REGISTRY_URL=http://schema-registry:8081 \ - $(DOCKER_COMPOSE) --profile loadtest up --abort-on-container-exit kafka-client-loadtest - @echo "$(GREEN)Schema load test completed!$(NC)" - @$(MAKE) show-results - -monitor: setup ## Start monitoring stack (Prometheus + Grafana) - @echo "$(BLUE)Starting monitoring stack...$(NC)" - $(DOCKER_COMPOSE) --profile monitoring up -d prometheus grafana - @echo "$(GREEN)Monitoring stack started!$(NC)" - @echo "" - @echo "Access points:" - @echo " Prometheus: http://localhost:9090" - @echo " Grafana: http://localhost:3000 (admin/admin)" - -monitor-stop: ## Stop monitoring stack - @echo "$(BLUE)Stopping monitoring stack...$(NC)" - $(DOCKER_COMPOSE) --profile monitoring stop prometheus grafana - @echo "$(GREEN)Monitoring stack stopped$(NC)" - -test-with-monitoring: monitor start ## Run test with monitoring enabled - @echo "$(BLUE)Running load test with monitoring...$(NC)" - @$(MAKE) test - @echo "" - @echo "$(GREEN)Test completed! Check the monitoring dashboards:$(NC)" - @echo " Prometheus: http://localhost:9090" - @echo " Grafana: http://localhost:3000 (admin/admin)" - -show-results: ## Show test results - @echo "$(BLUE)Test Results Summary:$(NC)" - @if $(DOCKER_COMPOSE) ps -q kafka-client-loadtest-runner >/dev/null 2>&1; then \ - $(DOCKER_COMPOSE) exec -T kafka-client-loadtest-runner curl -s http://localhost:8080/stats 2>/dev/null || echo "Results not available"; \ - else \ - echo "Load test container not running"; \ - fi - @echo "" - @if [ -d "test-results" ]; then \ - echo "Detailed results saved to: test-results/"; \ - ls -la test-results/ 2>/dev/null || true; \ - fi - -health-check: ## Check health of all services - @echo "$(BLUE)Checking service health...$(NC)" - ./scripts/wait-for-services.sh check - -validate-setup: ## Validate the test setup - @echo "$(BLUE)Validating test setup...$(NC)" - @echo "Checking Docker and Docker Compose..." - @docker --version - @docker compose version || docker-compose --version - @echo "" - @echo "Checking configuration file..." - @if [ -f "$(CONFIG_FILE)" ]; then \ - echo "- Configuration file exists: $(CONFIG_FILE)"; \ - else \ - echo "x Configuration file not found: $(CONFIG_FILE)"; \ - exit 1; \ - fi - @echo "" - @echo "Checking scripts..." - @for script in scripts/*.sh; do \ - if [ -x "$$script" ]; then \ - echo "- $$script is executable"; \ - else \ - echo "x $$script is not executable"; \ - fi; \ - done - @echo "$(GREEN)Setup validation completed$(NC)" - -dev-env: ## Set up development environment - @echo "$(BLUE)Setting up development environment...$(NC)" - @echo "Installing Go dependencies..." - go mod download - go mod tidy - @echo "$(GREEN)Development environment ready$(NC)" - -benchmark: ## Run comprehensive benchmarking suite - @echo "$(BLUE)Running comprehensive benchmark suite...$(NC)" - @echo "This will run multiple test scenarios and collect detailed metrics" - @echo "" - @$(MAKE) quick-test - @sleep 10 - @$(MAKE) standard-test - @sleep 10 - @$(MAKE) stress-test - @echo "$(GREEN)Benchmark suite completed!$(NC)" - -# Advanced targets -debug: ## Start services in debug mode with verbose logging - @echo "$(BLUE)Starting services in debug mode...$(NC)" - SEAWEEDFS_LOG_LEVEL=debug \ - KAFKA_LOG_LEVEL=debug \ - $(DOCKER_COMPOSE) up \ - seaweedfs-master \ - seaweedfs-volume \ - seaweedfs-filer \ - seaweedfs-mq-broker \ - kafka-gateway \ - schema-registry - -attach-loadtest: ## Attach to running load test container - $(DOCKER_COMPOSE) exec kafka-client-loadtest-runner /bin/sh - -exec-master: ## Execute shell in SeaweedFS master container - $(DOCKER_COMPOSE) exec seaweedfs-master /bin/sh - -exec-filer: ## Execute shell in SeaweedFS filer container - $(DOCKER_COMPOSE) exec seaweedfs-filer /bin/sh - -exec-gateway: ## Execute shell in Kafka gateway container - $(DOCKER_COMPOSE) exec kafka-gateway /bin/sh - -# Utility targets -ps: status ## Alias for status - -up: start ## Alias for start - -down: stop ## Alias for stop - -# Help is the default target -.DEFAULT_GOAL := help diff --git a/test/kafka/kafka-client-loadtest/README.md b/test/kafka/kafka-client-loadtest/README.md deleted file mode 100644 index 4f465a21b..000000000 --- a/test/kafka/kafka-client-loadtest/README.md +++ /dev/null @@ -1,397 +0,0 @@ -# Kafka Client Load Test for SeaweedFS - -This comprehensive load testing suite validates the SeaweedFS MQ stack using real Kafka client libraries. Unlike the existing SMQ tests, this uses actual Kafka clients (`sarama` and `confluent-kafka-go`) to test the complete integration through: - -- **Kafka Clients** โ†’ **SeaweedFS Kafka Gateway** โ†’ **SeaweedFS MQ Broker** โ†’ **SeaweedFS Storage** - -## Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Kafka Client โ”‚ โ”‚ Kafka Gateway โ”‚ โ”‚ SeaweedFS MQ โ”‚ -โ”‚ Load Test โ”‚โ”€โ”€โ”€โ–ถโ”‚ (Port 9093) โ”‚โ”€โ”€โ”€โ–ถโ”‚ Broker โ”‚ -โ”‚ - Producers โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ”‚ - Consumers โ”‚ โ”‚ Protocol โ”‚ โ”‚ Topic Management โ”‚ -โ”‚ โ”‚ โ”‚ Translation โ”‚ โ”‚ Message Storage โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ SeaweedFS Storage โ”‚ - โ”‚ - Master โ”‚ - โ”‚ - Volume Server โ”‚ - โ”‚ - Filer โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## Features - -### ๐Ÿš€ **Multiple Test Modes** -- **Producer-only**: Pure message production testing -- **Consumer-only**: Consumption from existing topics -- **Comprehensive**: Full producer + consumer load testing - -### ๐Ÿ“Š **Rich Metrics & Monitoring** -- Prometheus metrics collection -- Grafana dashboards -- Real-time throughput and latency tracking -- Consumer lag monitoring -- Error rate analysis - -### ๐Ÿ”ง **Configurable Test Scenarios** -- **Quick Test**: 1-minute smoke test -- **Standard Test**: 5-minute medium load -- **Stress Test**: 10-minute high load -- **Endurance Test**: 30-minute sustained load -- **Custom**: Fully configurable parameters - -### ๐Ÿ“ˆ **Message Types** -- **JSON**: Structured test messages -- **Avro**: Schema Registry integration -- **Binary**: Raw binary payloads - -### ๐Ÿ›  **Kafka Client Support** -- **Sarama**: Native Go Kafka client -- **Confluent**: Official Confluent Go client -- Schema Registry integration -- Consumer group management - -## Quick Start - -### Prerequisites -- Docker & Docker Compose -- Make (optional, but recommended) - -### 1. Run Default Test -```bash -make test -``` -This runs a 5-minute comprehensive test with 10 producers and 5 consumers. - -### 2. Quick Smoke Test -```bash -make quick-test -``` -1-minute test with minimal load for validation. - -### 3. Stress Test -```bash -make stress-test -``` -10-minute high-throughput test with 20 producers and 10 consumers. - -### 4. Test with Monitoring -```bash -make test-with-monitoring -``` -Includes Prometheus + Grafana dashboards for real-time monitoring. - -## Detailed Usage - -### Manual Control -```bash -# Start infrastructure only -make start - -# Run load test against running infrastructure -make test TEST_MODE=comprehensive TEST_DURATION=10m - -# Stop everything -make stop - -# Clean up all resources -make clean -``` - -### Using Scripts Directly -```bash -# Full control with the main script -./scripts/run-loadtest.sh start -m comprehensive -d 10m --monitoring - -# Check service health -./scripts/wait-for-services.sh check - -# Setup monitoring configurations -./scripts/setup-monitoring.sh -``` - -### Environment Variables -```bash -export TEST_MODE=comprehensive # producer, consumer, comprehensive -export TEST_DURATION=300s # Test duration -export PRODUCER_COUNT=10 # Number of producer instances -export CONSUMER_COUNT=5 # Number of consumer instances -export MESSAGE_RATE=1000 # Messages/second per producer -export MESSAGE_SIZE=1024 # Message size in bytes -export TOPIC_COUNT=5 # Number of topics to create -export PARTITIONS_PER_TOPIC=3 # Partitions per topic - -make test -``` - -## Configuration - -### Main Configuration File -Edit `config/loadtest.yaml` to customize: - -- **Kafka Settings**: Bootstrap servers, security, timeouts -- **Producer Config**: Batching, compression, acknowledgments -- **Consumer Config**: Group settings, fetch parameters -- **Message Settings**: Size, format (JSON/Avro/Binary) -- **Schema Registry**: Avro/Protobuf schema validation -- **Metrics**: Prometheus collection intervals -- **Test Scenarios**: Predefined load patterns - -### Example Custom Configuration -```yaml -test_mode: "comprehensive" -duration: "600s" # 10 minutes - -producers: - count: 15 - message_rate: 2000 - message_size: 2048 - compression_type: "snappy" - acks: "all" - -consumers: - count: 8 - group_prefix: "high-load-group" - max_poll_records: 1000 - -topics: - count: 10 - partitions: 6 - replication_factor: 1 -``` - -## Test Scenarios - -### 1. Producer Performance Test -```bash -make producer-test TEST_DURATION=10m PRODUCER_COUNT=20 MESSAGE_RATE=3000 -``` -Tests maximum message production throughput. - -### 2. Consumer Performance Test -```bash -# First produce messages -make producer-test TEST_DURATION=5m - -# Then test consumption -make consumer-test TEST_DURATION=10m CONSUMER_COUNT=15 -``` - -### 3. Schema Registry Integration -```bash -# Enable schemas in config/loadtest.yaml -schemas: - enabled: true - -make test -``` -Tests Avro message serialization through Schema Registry. - -### 4. High Availability Test -```bash -# Test with container restarts during load -make test TEST_DURATION=20m & -sleep 300 -docker restart kafka-gateway -``` - -## Monitoring & Metrics - -### Real-Time Dashboards -When monitoring is enabled: -- **Prometheus**: http://localhost:9090 -- **Grafana**: http://localhost:3000 (admin/admin) - -### Key Metrics Tracked -- **Throughput**: Messages/second, MB/second -- **Latency**: End-to-end message latency percentiles -- **Errors**: Producer/consumer error rates -- **Consumer Lag**: Per-partition lag monitoring -- **Resource Usage**: CPU, memory, disk I/O - -### Grafana Dashboards -- **Kafka Load Test**: Comprehensive test metrics -- **SeaweedFS Cluster**: Storage system health -- **Custom Dashboards**: Extensible monitoring - -## Advanced Features - -### Schema Registry Testing -```bash -# Test Avro message serialization -export KAFKA_VALUE_TYPE=avro -make test -``` - -The load test includes: -- Schema registration -- Avro message encoding/decoding -- Schema evolution testing -- Compatibility validation - -### Multi-Client Testing -The test supports both Sarama and Confluent clients: -```go -// Configure in producer/consumer code -useConfluent := true // Switch client implementation -``` - -### Consumer Group Rebalancing -- Automatic consumer group management -- Partition rebalancing simulation -- Consumer failure recovery testing - -### Chaos Testing -```yaml -chaos: - enabled: true - producer_failure_rate: 0.01 - consumer_failure_rate: 0.01 - network_partition_probability: 0.001 -``` - -## Troubleshooting - -### Common Issues - -#### Services Not Starting -```bash -# Check service health -make health-check - -# View detailed logs -make logs - -# Debug mode -make debug -``` - -#### Low Throughput -- Increase `MESSAGE_RATE` and `PRODUCER_COUNT` -- Adjust `batch_size` and `linger_ms` in config -- Check consumer `max_poll_records` setting - -#### High Latency -- Reduce `linger_ms` for lower latency -- Adjust `acks` setting (0, 1, or "all") -- Monitor consumer lag - -#### Memory Issues -```bash -# Reduce concurrent clients -make test PRODUCER_COUNT=5 CONSUMER_COUNT=3 - -# Adjust message size -make test MESSAGE_SIZE=512 -``` - -### Debug Commands -```bash -# Execute shell in containers -make exec-master -make exec-filer -make exec-gateway - -# Attach to load test -make attach-loadtest - -# View real-time stats -curl http://localhost:8080/stats -``` - -## Development - -### Building from Source -```bash -# Set up development environment -make dev-env - -# Build load test binary -make build - -# Run tests locally (requires Go 1.21+) -cd cmd/loadtest && go run main.go -config ../../config/loadtest.yaml -``` - -### Extending the Tests -1. **Add new message formats** in `internal/producer/` -2. **Add custom metrics** in `internal/metrics/` -3. **Create new test scenarios** in `config/loadtest.yaml` -4. **Add monitoring panels** in `monitoring/grafana/dashboards/` - -### Contributing -1. Fork the repository -2. Create a feature branch -3. Add tests for new functionality -4. Ensure all tests pass: `make test` -5. Submit a pull request - -## Performance Benchmarks - -### Expected Performance (on typical hardware) - -| Scenario | Producers | Consumers | Rate (msg/s) | Latency (p95) | -|----------|-----------|-----------|--------------|---------------| -| Quick | 2 | 2 | 200 | <10ms | -| Standard | 5 | 3 | 2,500 | <20ms | -| Stress | 20 | 10 | 40,000 | <50ms | -| Endurance| 10 | 5 | 10,000 | <30ms | - -*Results vary based on hardware, network, and SeaweedFS configuration* - -### Tuning for Maximum Performance -```yaml -producers: - batch_size: 1000 - linger_ms: 10 - compression_type: "lz4" - acks: "1" # Balance between speed and durability - -consumers: - max_poll_records: 5000 - fetch_min_bytes: 1048576 # 1MB - fetch_max_wait_ms: 100 -``` - -## Comparison with Existing Tests - -| Feature | SMQ Tests | **Kafka Client Load Test** | -|---------|-----------|----------------------------| -| Protocol | SMQ (SeaweedFS native) | **Kafka (industry standard)** | -| Clients | SMQ clients | **Real Kafka clients (Sarama, Confluent)** | -| Schema Registry | โŒ | **โœ… Full Avro/Protobuf support** | -| Consumer Groups | Basic | **โœ… Full Kafka consumer group features** | -| Monitoring | Basic | **โœ… Prometheus + Grafana dashboards** | -| Test Scenarios | Limited | **โœ… Multiple predefined scenarios** | -| Real-world | Synthetic | **โœ… Production-like workloads** | - -This load test provides comprehensive validation of the SeaweedFS Kafka Gateway using real-world Kafka clients and protocols. - ---- - -## Quick Reference - -```bash -# Essential Commands -make help # Show all available commands -make test # Run default comprehensive test -make quick-test # 1-minute smoke test -make stress-test # High-load stress test -make test-with-monitoring # Include Grafana dashboards -make clean # Clean up all resources - -# Monitoring -make monitor # Start Prometheus + Grafana -# โ†’ http://localhost:9090 (Prometheus) -# โ†’ http://localhost:3000 (Grafana, admin/admin) - -# Advanced -make benchmark # Run full benchmark suite -make health-check # Validate service health -make validate-setup # Check configuration -``` diff --git a/test/kafka/kafka-client-loadtest/SeekToBeginningTest.java b/test/kafka/kafka-client-loadtest/SeekToBeginningTest.java deleted file mode 100644 index d2f324f3a..000000000 --- a/test/kafka/kafka-client-loadtest/SeekToBeginningTest.java +++ /dev/null @@ -1,179 +0,0 @@ -import org.apache.kafka.clients.consumer.*; -import org.apache.kafka.clients.consumer.internals.*; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.serialization.ByteArrayDeserializer; -import org.apache.kafka.common.errors.TimeoutException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import java.util.*; - -/** - * Enhanced test program to reproduce and diagnose the seekToBeginning() hang issue - * - * This test: - * 1. Adds detailed logging of Kafka client operations - * 2. Captures exceptions and timeouts - * 3. Shows what the consumer is waiting for - * 4. Tracks request/response lifecycle - */ -public class SeekToBeginningTest { - private static final Logger log = LoggerFactory.getLogger(SeekToBeginningTest.class); - - public static void main(String[] args) throws Exception { - String bootstrapServers = "localhost:9093"; - String topicName = "_schemas"; - - if (args.length > 0) { - bootstrapServers = args[0]; - } - - Properties props = new Properties(); - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-seek-group"); - props.put(ConsumerConfig.CLIENT_ID_CONFIG, "test-seek-client"); - props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); - props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); - props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); - props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "45000"); - props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "60000"); - - // Add comprehensive debug logging - props.put("log4j.logger.org.apache.kafka.clients.consumer.internals", "DEBUG"); - props.put("log4j.logger.org.apache.kafka.clients.producer.internals", "DEBUG"); - props.put("log4j.logger.org.apache.kafka.clients.Metadata", "DEBUG"); - - // Add shorter timeouts to fail faster - props.put(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "10000"); // 10 seconds instead of 60 - - System.out.println("\nโ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—"); - System.out.println("โ•‘ SeekToBeginning Diagnostic Test โ•‘"); - System.out.println(String.format("โ•‘ Connecting to: %-42sโ•‘", bootstrapServers)); - System.out.println("โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•\n"); - - System.out.println("[TEST] Creating KafkaConsumer..."); - System.out.println("[TEST] Bootstrap servers: " + bootstrapServers); - System.out.println("[TEST] Group ID: test-seek-group"); - System.out.println("[TEST] Client ID: test-seek-client"); - - KafkaConsumer consumer = new KafkaConsumer<>(props); - - TopicPartition tp = new TopicPartition(topicName, 0); - List partitions = Arrays.asList(tp); - - System.out.println("\n[STEP 1] Assigning to partition: " + tp); - consumer.assign(partitions); - System.out.println("[STEP 1] โœ“ Assigned successfully"); - - System.out.println("\n[STEP 2] Calling seekToBeginning()..."); - long startTime = System.currentTimeMillis(); - try { - consumer.seekToBeginning(partitions); - long seekTime = System.currentTimeMillis() - startTime; - System.out.println("[STEP 2] โœ“ seekToBeginning() completed in " + seekTime + "ms"); - } catch (Exception e) { - System.out.println("[STEP 2] โœ— EXCEPTION in seekToBeginning():"); - e.printStackTrace(); - consumer.close(); - return; - } - - System.out.println("\n[STEP 3] Starting poll loop..."); - System.out.println("[STEP 3] First poll will trigger offset lookup (ListOffsets)"); - System.out.println("[STEP 3] Then will fetch initial records\n"); - - int successfulPolls = 0; - int failedPolls = 0; - int totalRecords = 0; - - for (int i = 0; i < 3; i++) { - System.out.println("โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•"); - System.out.println("[POLL " + (i + 1) + "] Starting poll with 15-second timeout..."); - long pollStart = System.currentTimeMillis(); - - try { - System.out.println("[POLL " + (i + 1) + "] Calling consumer.poll()..."); - ConsumerRecords records = consumer.poll(java.time.Duration.ofSeconds(15)); - long pollTime = System.currentTimeMillis() - pollStart; - - System.out.println("[POLL " + (i + 1) + "] โœ“ Poll completed in " + pollTime + "ms"); - System.out.println("[POLL " + (i + 1) + "] Records received: " + records.count()); - - if (records.count() > 0) { - successfulPolls++; - totalRecords += records.count(); - for (ConsumerRecord record : records) { - System.out.println(" [RECORD] offset=" + record.offset() + - ", key.len=" + (record.key() != null ? record.key().length : 0) + - ", value.len=" + (record.value() != null ? record.value().length : 0)); - } - } else { - System.out.println("[POLL " + (i + 1) + "] โ„น No records in this poll (but no error)"); - successfulPolls++; - } - } catch (TimeoutException e) { - long pollTime = System.currentTimeMillis() - pollStart; - failedPolls++; - System.out.println("[POLL " + (i + 1) + "] โœ— TIMEOUT after " + pollTime + "ms"); - System.out.println("[POLL " + (i + 1) + "] This means consumer is waiting for something from broker"); - System.out.println("[POLL " + (i + 1) + "] Possible causes:"); - System.out.println(" - ListOffsetsRequest never sent"); - System.out.println(" - ListOffsetsResponse not received"); - System.out.println(" - Broker metadata parsing failed"); - System.out.println(" - Connection issue"); - - // Print current position info if available - try { - long position = consumer.position(tp); - System.out.println("[POLL " + (i + 1) + "] Current position: " + position); - } catch (Exception e2) { - System.out.println("[POLL " + (i + 1) + "] Could not get position: " + e2.getMessage()); - } - } catch (Exception e) { - failedPolls++; - long pollTime = System.currentTimeMillis() - pollStart; - System.out.println("[POLL " + (i + 1) + "] โœ— EXCEPTION after " + pollTime + "ms:"); - System.out.println("[POLL " + (i + 1) + "] Exception type: " + e.getClass().getSimpleName()); - System.out.println("[POLL " + (i + 1) + "] Message: " + e.getMessage()); - - // Print stack trace for first exception - if (i == 0) { - System.out.println("[POLL " + (i + 1) + "] Stack trace:"); - e.printStackTrace(); - } - } - } - - System.out.println("\nโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•"); - System.out.println("[RESULTS] Test Summary:"); - System.out.println(" Successful polls: " + successfulPolls); - System.out.println(" Failed polls: " + failedPolls); - System.out.println(" Total records received: " + totalRecords); - - if (failedPolls > 0) { - System.out.println("\n[DIAGNOSIS] Consumer is BLOCKED during poll()"); - System.out.println(" This indicates the consumer cannot:"); - System.out.println(" 1. Send ListOffsetsRequest to determine offset 0, OR"); - System.out.println(" 2. Receive/parse ListOffsetsResponse from broker, OR"); - System.out.println(" 3. Parse broker metadata for partition leader lookup"); - } else if (totalRecords == 0) { - System.out.println("\n[DIAGNOSIS] Consumer is working but NO records found"); - System.out.println(" This might mean:"); - System.out.println(" 1. Topic has no messages, OR"); - System.out.println(" 2. Fetch is working but broker returns empty"); - } else { - System.out.println("\n[SUCCESS] Consumer working correctly!"); - System.out.println(" Received " + totalRecords + " records"); - } - - System.out.println("\n[CLEANUP] Closing consumer..."); - try { - consumer.close(); - System.out.println("[CLEANUP] โœ“ Consumer closed successfully"); - } catch (Exception e) { - System.out.println("[CLEANUP] โœ— Error closing consumer: " + e.getMessage()); - } - - System.out.println("\n[TEST] Done!\n"); - } -} diff --git a/test/kafka/kafka-client-loadtest/cmd/loadtest/main.go b/test/kafka/kafka-client-loadtest/cmd/loadtest/main.go deleted file mode 100644 index bfd53501e..000000000 --- a/test/kafka/kafka-client-loadtest/cmd/loadtest/main.go +++ /dev/null @@ -1,502 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "flag" - "fmt" - "io" - "log" - "net/http" - "os" - "os/signal" - "strings" - "sync" - "syscall" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/config" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/consumer" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/metrics" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/producer" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/tracker" -) - -var ( - configFile = flag.String("config", "/config/loadtest.yaml", "Path to configuration file") - testMode = flag.String("mode", "", "Test mode override (producer|consumer|comprehensive)") - duration = flag.Duration("duration", 0, "Test duration override") - help = flag.Bool("help", false, "Show help") -) - -func main() { - flag.Parse() - - if *help { - printHelp() - return - } - - // Load configuration - cfg, err := config.Load(*configFile) - if err != nil { - log.Fatalf("Failed to load configuration: %v", err) - } - - // Override configuration with environment variables and flags - cfg.ApplyOverrides(*testMode, *duration) - - // Initialize metrics - metricsCollector := metrics.NewCollector() - - // Start metrics HTTP server - go func() { - http.Handle("/metrics", promhttp.Handler()) - http.HandleFunc("/health", healthCheck) - http.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) { - metricsCollector.WriteStats(w) - }) - - log.Printf("Starting metrics server on :8080") - if err := http.ListenAndServe(":8080", nil); err != nil { - log.Printf("Metrics server error: %v", err) - } - }() - - // Set up signal handling - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - - log.Printf("Starting Kafka Client Load Test") - log.Printf("Mode: %s, Duration: %v", cfg.TestMode, cfg.Duration) - log.Printf("Kafka Brokers: %v", cfg.Kafka.BootstrapServers) - log.Printf("Schema Registry: %s", cfg.SchemaRegistry.URL) - log.Printf("Schemas Enabled: %v", cfg.Schemas.Enabled) - - // Register schemas if enabled - if cfg.Schemas.Enabled { - log.Printf("Registering schemas with Schema Registry...") - if err := registerSchemas(cfg); err != nil { - log.Fatalf("Failed to register schemas: %v", err) - } - log.Printf("Schemas registered successfully") - } - - var wg sync.WaitGroup - - // Start test based on mode - var testErr error - switch cfg.TestMode { - case "producer": - testErr = runProducerTest(ctx, cfg, metricsCollector, &wg) - case "consumer": - testErr = runConsumerTest(ctx, cfg, metricsCollector, &wg) - case "comprehensive": - testErr = runComprehensiveTest(ctx, cancel, cfg, metricsCollector, &wg) - default: - log.Fatalf("Unknown test mode: %s", cfg.TestMode) - } - - // If test returned an error (e.g., circuit breaker), exit - if testErr != nil { - log.Printf("Test failed with error: %v", testErr) - cancel() // Cancel context to stop any remaining goroutines - return - } - - // Wait for completion or signal - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - select { - case <-sigCh: - log.Printf("Received shutdown signal, stopping tests...") - cancel() - - // Wait for graceful shutdown with timeout - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer shutdownCancel() - - select { - case <-done: - log.Printf("All tests completed gracefully") - case <-shutdownCtx.Done(): - log.Printf("Shutdown timeout, forcing exit") - } - case <-done: - log.Printf("All tests completed") - } - - // Print final statistics - log.Printf("Final Test Statistics:") - metricsCollector.PrintSummary() -} - -func runProducerTest(ctx context.Context, cfg *config.Config, collector *metrics.Collector, wg *sync.WaitGroup) error { - log.Printf("Starting producer-only test with %d producers", cfg.Producers.Count) - - // Create record tracker with current timestamp to filter old messages - testStartTime := time.Now().UnixNano() - recordTracker := tracker.NewTracker("/test-results/produced.jsonl", "/test-results/consumed.jsonl", testStartTime) - - errChan := make(chan error, cfg.Producers.Count) - - for i := 0; i < cfg.Producers.Count; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - - prod, err := producer.New(cfg, collector, id, recordTracker) - if err != nil { - log.Printf("Failed to create producer %d: %v", id, err) - errChan <- err - return - } - defer prod.Close() - - if err := prod.Run(ctx); err != nil { - log.Printf("Producer %d failed: %v", id, err) - errChan <- err - return - } - }(i) - } - - // Wait for any producer error - select { - case err := <-errChan: - log.Printf("Producer test failed: %v", err) - return err - default: - return nil - } -} - -func runConsumerTest(ctx context.Context, cfg *config.Config, collector *metrics.Collector, wg *sync.WaitGroup) error { - log.Printf("Starting consumer-only test with %d consumers", cfg.Consumers.Count) - - // Create record tracker with current timestamp to filter old messages - testStartTime := time.Now().UnixNano() - recordTracker := tracker.NewTracker("/test-results/produced.jsonl", "/test-results/consumed.jsonl", testStartTime) - - errChan := make(chan error, cfg.Consumers.Count) - - for i := 0; i < cfg.Consumers.Count; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - - cons, err := consumer.New(cfg, collector, id, recordTracker) - if err != nil { - log.Printf("Failed to create consumer %d: %v", id, err) - errChan <- err - return - } - defer cons.Close() - - cons.Run(ctx) - }(i) - } - - // Consumers don't typically return errors in the same way, so just return nil - return nil -} - -func runComprehensiveTest(ctx context.Context, cancel context.CancelFunc, cfg *config.Config, collector *metrics.Collector, wg *sync.WaitGroup) error { - log.Printf("Starting comprehensive test with %d producers and %d consumers", - cfg.Producers.Count, cfg.Consumers.Count) - - // Create record tracker with current timestamp to filter old messages - testStartTime := time.Now().UnixNano() - log.Printf("Test run starting at %d - only tracking messages from this run", testStartTime) - recordTracker := tracker.NewTracker("/test-results/produced.jsonl", "/test-results/consumed.jsonl", testStartTime) - - errChan := make(chan error, cfg.Producers.Count) - - // Create separate contexts for producers and consumers - producerCtx, producerCancel := context.WithCancel(ctx) - consumerCtx, consumerCancel := context.WithCancel(ctx) - - // Start producers - for i := 0; i < cfg.Producers.Count; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - - prod, err := producer.New(cfg, collector, id, recordTracker) - if err != nil { - log.Printf("Failed to create producer %d: %v", id, err) - errChan <- err - return - } - defer prod.Close() - - if err := prod.Run(producerCtx); err != nil { - log.Printf("Producer %d failed: %v", id, err) - errChan <- err - return - } - }(i) - } - - // Wait briefly for producers to start producing messages - // Reduced from 5s to 2s to minimize message backlog - time.Sleep(2 * time.Second) - - // Start consumers - // NOTE: With unique ClientIDs, all consumers can start simultaneously without connection storms - for i := 0; i < cfg.Consumers.Count; i++ { - wg.Add(1) - go func(id int) { - defer wg.Done() - - cons, err := consumer.New(cfg, collector, id, recordTracker) - if err != nil { - log.Printf("Failed to create consumer %d: %v", id, err) - return - } - defer cons.Close() - - cons.Run(consumerCtx) - }(i) - } - - // Check for producer errors - select { - case err := <-errChan: - log.Printf("Comprehensive test failed due to producer error: %v", err) - producerCancel() - consumerCancel() - return err - default: - // No immediate error, continue - } - - // If duration is set, stop producers first, then allow consumers extra time to drain - if cfg.Duration > 0 { - go func() { - timer := time.NewTimer(cfg.Duration) - defer timer.Stop() - - select { - case <-timer.C: - log.Printf("Test duration (%v) reached, stopping producers", cfg.Duration) - producerCancel() - - // Allow consumers extra time to drain remaining messages - // Calculate drain time based on test duration (minimum 60s, up to test duration) - drainTime := 60 * time.Second - if cfg.Duration > drainTime { - drainTime = cfg.Duration // Match test duration for longer tests - } - log.Printf("Allowing %v for consumers to drain remaining messages...", drainTime) - time.Sleep(drainTime) - - log.Printf("Stopping consumers after drain period") - consumerCancel() - cancel() - case <-ctx.Done(): - // Context already cancelled - producerCancel() - consumerCancel() - } - }() - } else { - // No duration set, wait for cancellation and ensure cleanup - go func() { - <-ctx.Done() - producerCancel() - consumerCancel() - }() - } - - // Wait for all producer and consumer goroutines to complete - log.Printf("Waiting for all producers and consumers to complete...") - wg.Wait() - log.Printf("All producers and consumers completed, starting verification...") - - // Save produced and consumed records - log.Printf("Saving produced records...") - if err := recordTracker.SaveProduced(); err != nil { - log.Printf("Failed to save produced records: %v", err) - } - - log.Printf("Saving consumed records...") - if err := recordTracker.SaveConsumed(); err != nil { - log.Printf("Failed to save consumed records: %v", err) - } - - // Compare records - log.Printf("Comparing produced vs consumed records...") - result := recordTracker.Compare() - result.PrintSummary() - - log.Printf("Verification complete!") - return nil -} - -func healthCheck(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, "OK") -} - -func printHelp() { - fmt.Printf(`Kafka Client Load Test for SeaweedFS - -Usage: %s [options] - -Options: - -config string - Path to configuration file (default "/config/loadtest.yaml") - -mode string - Test mode override (producer|consumer|comprehensive) - -duration duration - Test duration override - -help - Show this help message - -Environment Variables: - KAFKA_BOOTSTRAP_SERVERS Comma-separated list of Kafka brokers - SCHEMA_REGISTRY_URL URL of the Schema Registry - TEST_DURATION Test duration (e.g., "5m", "300s") - TEST_MODE Test mode (producer|consumer|comprehensive) - PRODUCER_COUNT Number of producer instances - CONSUMER_COUNT Number of consumer instances - MESSAGE_RATE Messages per second per producer - MESSAGE_SIZE Message size in bytes - TOPIC_COUNT Number of topics to create - PARTITIONS_PER_TOPIC Number of partitions per topic - VALUE_TYPE Message value type (json/avro/binary) - -Test Modes: - producer - Run only producers (generate load) - consumer - Run only consumers (consume existing messages) - comprehensive - Run both producers and consumers simultaneously - -Example: - %s -config ./config/loadtest.yaml -mode comprehensive -duration 10m - -`, os.Args[0], os.Args[0]) -} - -// registerSchemas registers schemas with Schema Registry for all topics -func registerSchemas(cfg *config.Config) error { - // Wait for Schema Registry to be ready - if err := waitForSchemaRegistry(cfg.SchemaRegistry.URL); err != nil { - return fmt.Errorf("schema registry not ready: %w", err) - } - - // Register schemas for each topic with different formats for variety - topics := cfg.GetTopicNames() - - // Determine schema formats - use different formats for different topics - // This provides comprehensive testing of all schema format variations - for i, topic := range topics { - var schemaFormat string - - // Distribute topics across three schema formats for comprehensive testing - // Format 0: AVRO (default, most common) - // Format 1: JSON (modern, human-readable) - // Format 2: PROTOBUF (efficient binary format) - switch i % 3 { - case 0: - schemaFormat = "AVRO" - case 1: - schemaFormat = "JSON" - case 2: - schemaFormat = "PROTOBUF" - } - - // Allow override from config if specified - if cfg.Producers.SchemaFormat != "" { - schemaFormat = cfg.Producers.SchemaFormat - } - - if err := registerTopicSchema(cfg.SchemaRegistry.URL, topic, schemaFormat); err != nil { - return fmt.Errorf("failed to register schema for topic %s (format: %s): %w", topic, schemaFormat, err) - } - log.Printf("Schema registered for topic %s with format: %s", topic, schemaFormat) - } - - return nil -} - -// waitForSchemaRegistry waits for Schema Registry to be ready -func waitForSchemaRegistry(url string) error { - maxRetries := 30 - for i := 0; i < maxRetries; i++ { - resp, err := http.Get(url + "/subjects") - if err == nil && resp.StatusCode == 200 { - resp.Body.Close() - return nil - } - if resp != nil { - resp.Body.Close() - } - time.Sleep(2 * time.Second) - } - return fmt.Errorf("schema registry not ready after %d retries", maxRetries) -} - -// registerTopicSchema registers a schema for a specific topic -func registerTopicSchema(registryURL, topicName, schemaFormat string) error { - // Determine schema format, default to AVRO - if schemaFormat == "" { - schemaFormat = "AVRO" - } - - var schemaStr string - var schemaType string - - switch strings.ToUpper(schemaFormat) { - case "AVRO": - schemaStr = schema.GetAvroSchema() - schemaType = "AVRO" - case "JSON", "JSON_SCHEMA": - schemaStr = schema.GetJSONSchema() - schemaType = "JSON" - case "PROTOBUF": - schemaStr = schema.GetProtobufSchema() - schemaType = "PROTOBUF" - default: - return fmt.Errorf("unsupported schema format: %s", schemaFormat) - } - - schemaReq := map[string]interface{}{ - "schema": schemaStr, - "schemaType": schemaType, - } - - jsonData, err := json.Marshal(schemaReq) - if err != nil { - return err - } - - // Register schema for topic value - subject := topicName + "-value" - url := fmt.Sprintf("%s/subjects/%s/versions", registryURL, subject) - - client := &http.Client{Timeout: 10 * time.Second} - resp, err := client.Post(url, "application/vnd.schemaregistry.v1+json", bytes.NewBuffer(jsonData)) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("schema registration failed: status=%d, body=%s", resp.StatusCode, string(body)) - } - - log.Printf("Schema registered for topic %s (format: %s)", topicName, schemaType) - return nil -} diff --git a/test/kafka/kafka-client-loadtest/config/loadtest.yaml b/test/kafka/kafka-client-loadtest/config/loadtest.yaml deleted file mode 100644 index 35c6ef399..000000000 --- a/test/kafka/kafka-client-loadtest/config/loadtest.yaml +++ /dev/null @@ -1,169 +0,0 @@ -# Kafka Client Load Test Configuration - -# Test execution settings -test_mode: "comprehensive" # producer, consumer, comprehensive -duration: "60s" # Test duration (0 = run indefinitely) - producers will stop at this time, consumers get +120s to drain - -# Kafka cluster configuration -kafka: - bootstrap_servers: - - "kafka-gateway:9093" - # Security settings (if needed) - security_protocol: "PLAINTEXT" # PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL - sasl_mechanism: "" # PLAIN, SCRAM-SHA-256, SCRAM-SHA-512 - sasl_username: "" - sasl_password: "" - -# Schema Registry configuration -schema_registry: - url: "http://schema-registry:8081" - auth: - username: "" - password: "" - -# Producer configuration -producers: - count: 10 # Number of producer instances - message_rate: 1000 # Messages per second per producer - message_size: 1024 # Message size in bytes - batch_size: 100 # Batch size for batching - linger_ms: 5 # Time to wait for batching - compression_type: "snappy" # none, gzip, snappy, lz4, zstd - acks: "all" # 0, 1, all - retries: 3 - retry_backoff_ms: 100 - request_timeout_ms: 30000 - delivery_timeout_ms: 120000 - - # Message generation settings - key_distribution: "random" # random, sequential, uuid - value_type: "avro" # json, avro, protobuf, binary - schema_format: "" # AVRO, JSON, PROTOBUF - schema registry format (when schemas enabled) - # Leave empty to auto-distribute formats across topics for testing: - # topic-0: AVRO, topic-1: JSON, topic-2: PROTOBUF, topic-3: AVRO, topic-4: JSON - # Set to specific format (e.g. "AVRO") to use same format for all topics - include_timestamp: true - include_headers: true - -# Consumer configuration -consumers: - count: 5 # Number of consumer instances - group_prefix: "loadtest-group" # Consumer group prefix - auto_offset_reset: "earliest" # earliest, latest - enable_auto_commit: true - auto_commit_interval_ms: 100 # Reduced from 1000ms to 100ms to minimize duplicate window - session_timeout_ms: 30000 - heartbeat_interval_ms: 3000 - max_poll_records: 500 - max_poll_interval_ms: 300000 - fetch_min_bytes: 1 - fetch_max_bytes: 52428800 # 50MB - fetch_max_wait_ms: 100 # 100ms - very fast polling for concurrent fetches and quick drain - -# Topic configuration -topics: - count: 5 # Number of topics to create/use - prefix: "loadtest-topic" # Topic name prefix - partitions: 4 # Partitions per topic (default: 4) - replication_factor: 1 # Replication factor - cleanup_policy: "delete" # delete, compact - retention_ms: 604800000 # 7 days - segment_ms: 86400000 # 1 day - -# Schema configuration (for Avro/Protobuf tests) -schemas: - enabled: true - registry_timeout_ms: 10000 - - # Test schemas - user_event: - type: "avro" - schema: | - { - "type": "record", - "name": "UserEvent", - "namespace": "com.seaweedfs.test", - "fields": [ - {"name": "user_id", "type": "string"}, - {"name": "event_type", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "properties", "type": {"type": "map", "values": "string"}} - ] - } - - transaction: - type: "avro" - schema: | - { - "type": "record", - "name": "Transaction", - "namespace": "com.seaweedfs.test", - "fields": [ - {"name": "transaction_id", "type": "string"}, - {"name": "amount", "type": "double"}, - {"name": "currency", "type": "string"}, - {"name": "merchant_id", "type": "string"}, - {"name": "timestamp", "type": "long"} - ] - } - -# Metrics and monitoring -metrics: - enabled: true - collection_interval: "10s" - prometheus_port: 8080 - - # What to measure - track_latency: true - track_throughput: true - track_errors: true - track_consumer_lag: true - - # Latency percentiles to track - latency_percentiles: [50, 90, 95, 99, 99.9] - -# Load test scenarios -scenarios: - # Steady state load test - steady_load: - producer_rate: 1000 # messages/sec per producer - ramp_up_time: "30s" - steady_duration: "240s" - ramp_down_time: "30s" - - # Burst load test - burst_load: - base_rate: 500 - burst_rate: 5000 - burst_duration: "10s" - burst_interval: "60s" - - # Gradual ramp test - ramp_test: - start_rate: 100 - end_rate: 2000 - ramp_duration: "300s" - step_duration: "30s" - -# Error injection (for resilience testing) -chaos: - enabled: false - producer_failure_rate: 0.01 # 1% of producers fail randomly - consumer_failure_rate: 0.01 # 1% of consumers fail randomly - network_partition_probability: 0.001 # Network issues - broker_restart_interval: "0s" # Restart brokers periodically (0s = disabled) - -# Output and reporting -output: - results_dir: "/test-results" - export_prometheus: true - export_csv: true - export_json: true - real_time_stats: true - stats_interval: "30s" - -# Logging -logging: - level: "info" # debug, info, warn, error - format: "text" # text, json - enable_kafka_logs: false # Enable Kafka client debug logs \ No newline at end of file diff --git a/test/kafka/kafka-client-loadtest/docker-compose-kafka-compare.yml b/test/kafka/kafka-client-loadtest/docker-compose-kafka-compare.yml deleted file mode 100644 index e3184941b..000000000 --- a/test/kafka/kafka-client-loadtest/docker-compose-kafka-compare.yml +++ /dev/null @@ -1,46 +0,0 @@ -version: '3.8' - -services: - zookeeper: - image: confluentinc/cp-zookeeper:7.5.0 - hostname: zookeeper - container_name: compare-zookeeper - ports: - - "2181:2181" - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - - kafka: - image: confluentinc/cp-kafka:7.5.0 - hostname: kafka - container_name: compare-kafka - depends_on: - - zookeeper - ports: - - "9092:9092" - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_LOG_RETENTION_HOURS: 1 - KAFKA_LOG_SEGMENT_BYTES: 1073741824 - - schema-registry: - image: confluentinc/cp-schema-registry:7.5.0 - hostname: schema-registry - container_name: compare-schema-registry - depends_on: - - kafka - ports: - - "8082:8081" - environment: - SCHEMA_REGISTRY_HOST_NAME: schema-registry - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'kafka:29092' - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - diff --git a/test/kafka/kafka-client-loadtest/docker-compose.yml b/test/kafka/kafka-client-loadtest/docker-compose.yml deleted file mode 100644 index 5ac715610..000000000 --- a/test/kafka/kafka-client-loadtest/docker-compose.yml +++ /dev/null @@ -1,336 +0,0 @@ -# SeaweedFS Kafka Client Load Test -# Tests the full stack: Kafka Clients -> SeaweedFS Kafka Gateway -> SeaweedFS MQ Broker -> Storage - -x-seaweedfs-build: &seaweedfs-build - build: - context: . - dockerfile: Dockerfile.seaweedfs - args: - TARGETARCH: ${GOARCH:-arm64} - CACHE_BUST: ${CACHE_BUST:-latest} - image: kafka-client-loadtest-seaweedfs - -services: - # Schema Registry (for Avro/Protobuf support) - # Using host networking to connect to localhost:9093 (where our gateway advertises) - # WORKAROUND: Schema Registry hangs on empty _schemas topic during bootstrap - # Pre-create the topic first to avoid "wait to catch up" hang - schema-registry-init: - image: confluentinc/cp-kafka:8.0.0 - container_name: loadtest-schema-registry-init - networks: - - kafka-loadtest-net - depends_on: - kafka-gateway: - condition: service_healthy - command: > - bash -c " - echo 'Creating _schemas topic...'; - kafka-topics --create --topic _schemas --partitions 1 --replication-factor 1 --bootstrap-server kafka-gateway:9093 --if-not-exists || exit 0; - echo '_schemas topic created successfully'; - " - - schema-registry: - image: confluentinc/cp-schema-registry:8.0.0 - container_name: loadtest-schema-registry - restart: on-failure:3 - ports: - - "8081:8081" - environment: - SCHEMA_REGISTRY_HOST_NAME: schema-registry - SCHEMA_REGISTRY_HOST_PORT: 8081 - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'kafka-gateway:9093' - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas - SCHEMA_REGISTRY_DEBUG: "true" - SCHEMA_REGISTRY_SCHEMA_COMPATIBILITY_LEVEL: "full" - SCHEMA_REGISTRY_LEADER_ELIGIBILITY: "true" - SCHEMA_REGISTRY_MODE: "READWRITE" - SCHEMA_REGISTRY_GROUP_ID: "schema-registry" - SCHEMA_REGISTRY_KAFKASTORE_GROUP_ID: "schema-registry" - SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: "PLAINTEXT" - SCHEMA_REGISTRY_KAFKASTORE_TOPIC_REPLICATION_FACTOR: "1" - SCHEMA_REGISTRY_KAFKASTORE_INIT_TIMEOUT: "120000" - SCHEMA_REGISTRY_KAFKASTORE_TIMEOUT: "60000" - SCHEMA_REGISTRY_REQUEST_TIMEOUT_MS: "60000" - SCHEMA_REGISTRY_RETRY_BACKOFF_MS: "1000" - # Force IPv4 to work around Java IPv6 issues - # Enable verbose logging and set reasonable memory limits - KAFKA_OPTS: "-Djava.net.preferIPv4Stack=true -Djava.net.preferIPv4Addresses=true -Xmx512M -Xms256M" - KAFKA_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/kafka/log4j.properties" - SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: "INFO" - SCHEMA_REGISTRY_KAFKASTORE_WRITE_TIMEOUT_MS: "60000" - SCHEMA_REGISTRY_KAFKASTORE_INIT_RETRY_BACKOFF_MS: "5000" - SCHEMA_REGISTRY_KAFKASTORE_CONSUMER_AUTO_OFFSET_RESET: "earliest" - # Enable comprehensive Kafka client DEBUG logging to trace offset management - SCHEMA_REGISTRY_LOG4J_LOGGERS: "org.apache.kafka.clients.consumer.internals.OffsetsRequestManager=DEBUG,org.apache.kafka.clients.consumer.internals.Fetcher=DEBUG,org.apache.kafka.clients.consumer.internals.AbstractFetch=DEBUG,org.apache.kafka.clients.Metadata=DEBUG,org.apache.kafka.common.network=DEBUG" - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8081/subjects"] - interval: 15s - timeout: 10s - retries: 10 - start_period: 30s - depends_on: - schema-registry-init: - condition: service_completed_successfully - kafka-gateway: - condition: service_healthy - networks: - - kafka-loadtest-net - - # SeaweedFS Master (coordinator) - seaweedfs-master: - <<: *seaweedfs-build - container_name: loadtest-seaweedfs-master - ports: - - "9333:9333" - - "19333:19333" - command: - - master - - -ip=seaweedfs-master - - -port=9333 - - -port.grpc=19333 - - -volumeSizeLimitMB=48 - - -defaultReplication=000 - - -garbageThreshold=0.3 - volumes: - - ./data/seaweedfs-master:/data - healthcheck: - test: ["CMD-SHELL", "wget --quiet --tries=1 --spider http://seaweedfs-master:9333/cluster/status || exit 1"] - interval: 10s - timeout: 5s - retries: 10 - start_period: 20s - networks: - - kafka-loadtest-net - - # SeaweedFS Volume Server (storage) - seaweedfs-volume: - <<: *seaweedfs-build - container_name: loadtest-seaweedfs-volume - ports: - - "8080:8080" - - "18080:18080" - command: - - volume - - -mserver=seaweedfs-master:9333 - - -ip=seaweedfs-volume - - -port=8080 - - -port.grpc=18080 - - -publicUrl=seaweedfs-volume:8080 - - -preStopSeconds=1 - - -compactionMBps=50 - - -max=0 - - -dir=/data - depends_on: - seaweedfs-master: - condition: service_healthy - volumes: - - ./data/seaweedfs-volume:/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://seaweedfs-volume:8080/status"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 15s - networks: - - kafka-loadtest-net - - # SeaweedFS Filer (metadata) - seaweedfs-filer: - <<: *seaweedfs-build - container_name: loadtest-seaweedfs-filer - ports: - - "8888:8888" - - "18888:18888" - - "18889:18889" - command: - - filer - - -master=seaweedfs-master:9333 - - -ip=seaweedfs-filer - - -port=8888 - - -port.grpc=18888 - - -metricsPort=18889 - - -defaultReplicaPlacement=000 - depends_on: - seaweedfs-master: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - volumes: - - ./data/seaweedfs-filer:/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://seaweedfs-filer:8888/"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 15s - networks: - - kafka-loadtest-net - - # SeaweedFS MQ Broker (message handling) - seaweedfs-mq-broker: - <<: *seaweedfs-build - container_name: loadtest-seaweedfs-mq-broker - ports: - - "17777:17777" - - "18777:18777" # pprof profiling port - command: - - mq.broker - - -master=seaweedfs-master:9333 - - -ip=seaweedfs-mq-broker - - -port=17777 - - -logFlushInterval=0 - - -port.pprof=18777 - depends_on: - seaweedfs-filer: - condition: service_healthy - volumes: - - ./data/seaweedfs-mq:/data - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "17777"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 20s - networks: - - kafka-loadtest-net - - # SeaweedFS Kafka Gateway (Kafka protocol compatibility) - kafka-gateway: - <<: *seaweedfs-build - container_name: loadtest-kafka-gateway - ports: - - "9093:9093" - - "10093:10093" # pprof profiling port - command: - - mq.kafka.gateway - - -master=seaweedfs-master:9333 - - -ip=kafka-gateway - - -ip.bind=0.0.0.0 - - -port=9093 - - -default-partitions=4 - - -schema-registry-url=http://schema-registry:8081 - - -port.pprof=10093 - depends_on: - seaweedfs-filer: - condition: service_healthy - seaweedfs-mq-broker: - condition: service_healthy - environment: - - SEAWEEDFS_MASTERS=seaweedfs-master:9333 - # - KAFKA_DEBUG=1 # Enable debug logging for Schema Registry troubleshooting - - KAFKA_ADVERTISED_HOST=kafka-gateway - volumes: - - ./data/kafka-gateway:/data - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "9093"] - interval: 10s - timeout: 5s - retries: 10 - start_period: 45s # Increased to account for 10s startup delay + filer discovery - networks: - - kafka-loadtest-net - - # Kafka Client Load Test Runner - kafka-client-loadtest: - build: - context: ../../.. - dockerfile: test/kafka/kafka-client-loadtest/Dockerfile.loadtest - container_name: kafka-client-loadtest-runner - depends_on: - kafka-gateway: - condition: service_healthy - # schema-registry: - # condition: service_healthy - environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka-gateway:9093 - - SCHEMA_REGISTRY_URL=http://schema-registry:8081 - - TEST_DURATION=${TEST_DURATION:-300s} - - PRODUCER_COUNT=${PRODUCER_COUNT:-10} - - CONSUMER_COUNT=${CONSUMER_COUNT:-5} - - MESSAGE_RATE=${MESSAGE_RATE:-1000} - - MESSAGE_SIZE=${MESSAGE_SIZE:-1024} - - TOPIC_COUNT=${TOPIC_COUNT:-5} - - PARTITIONS_PER_TOPIC=${PARTITIONS_PER_TOPIC:-3} - - TEST_MODE=${TEST_MODE:-comprehensive} - - SCHEMAS_ENABLED=${SCHEMAS_ENABLED:-true} - - VALUE_TYPE=${VALUE_TYPE:-avro} - profiles: - - loadtest - volumes: - - ./test-results:/test-results - networks: - - kafka-loadtest-net - - # Monitoring and Metrics - prometheus: - image: prom/prometheus:latest - container_name: loadtest-prometheus - ports: - - "9090:9090" - volumes: - - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml - - prometheus-data:/prometheus - networks: - - kafka-loadtest-net - profiles: - - monitoring - - grafana: - image: grafana/grafana:latest - container_name: loadtest-grafana - ports: - - "3000:3000" - environment: - - GF_SECURITY_ADMIN_PASSWORD=admin - volumes: - - ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards - - ./monitoring/grafana/provisioning:/etc/grafana/provisioning - - grafana-data:/var/lib/grafana - networks: - - kafka-loadtest-net - profiles: - - monitoring - - # Schema Registry Debug Runner - schema-registry-debug: - build: - context: debug-client - dockerfile: Dockerfile - container_name: schema-registry-debug-runner - depends_on: - kafka-gateway: - condition: service_healthy - networks: - - kafka-loadtest-net - profiles: - - debug - - # SeekToBeginning test - reproduces the hang issue - seek-test: - build: - context: . - dockerfile: Dockerfile.seektest - container_name: loadtest-seek-test - depends_on: - kafka-gateway: - condition: service_healthy - schema-registry: - condition: service_healthy - environment: - - KAFKA_BOOTSTRAP_SERVERS=kafka-gateway:9093 - networks: - - kafka-loadtest-net - entrypoint: ["java", "-cp", "target/seek-test.jar", "SeekToBeginningTest"] - command: ["kafka-gateway:9093"] - -volumes: - prometheus-data: - grafana-data: - -networks: - kafka-loadtest-net: - driver: bridge - name: kafka-client-loadtest - diff --git a/test/kafka/kafka-client-loadtest/go.mod b/test/kafka/kafka-client-loadtest/go.mod deleted file mode 100644 index 72f087b85..000000000 --- a/test/kafka/kafka-client-loadtest/go.mod +++ /dev/null @@ -1,41 +0,0 @@ -module github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest - -go 1.24.0 - -toolchain go1.24.7 - -require ( - github.com/IBM/sarama v1.46.1 - github.com/linkedin/goavro/v2 v2.14.0 - github.com/prometheus/client_golang v1.23.2 - google.golang.org/protobuf v1.36.8 - gopkg.in/yaml.v3 v3.0.1 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/eapache/go-resiliency v1.7.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect - github.com/eapache/queue v1.1.0 // indirect - github.com/golang/snappy v1.0.0 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/jcmturner/aescts/v2 v2.0.0 // indirect - github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect - github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pierrec/lz4/v4 v4.1.22 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/crypto v0.43.0 // indirect - golang.org/x/net v0.46.0 // indirect - golang.org/x/sys v0.37.0 // indirect -) diff --git a/test/kafka/kafka-client-loadtest/go.sum b/test/kafka/kafka-client-loadtest/go.sum deleted file mode 100644 index 80340f879..000000000 --- a/test/kafka/kafka-client-loadtest/go.sum +++ /dev/null @@ -1,129 +0,0 @@ -github.com/IBM/sarama v1.46.1 h1:AlDkvyQm4LKktoQZxv0sbTfH3xukeH7r/UFBbUmFV9M= -github.com/IBM/sarama v1.46.1/go.mod h1:ipyOREIx+o9rMSrrPGLZHGuT0mzecNzKd19Quq+Q8AA= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= -github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= -github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= -github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= -github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= -github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= -github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= -github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= -github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= -github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= -github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= -github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= -github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= -github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linkedin/goavro/v2 v2.14.0 h1:aNO/js65U+Mwq4yB5f1h01c3wiM458qtRad1DN0CMUI= -github.com/linkedin/goavro/v2 v2.14.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= -github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= -github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/kafka/kafka-client-loadtest/internal/config/config.go b/test/kafka/kafka-client-loadtest/internal/config/config.go deleted file mode 100644 index dd9f6d6b2..000000000 --- a/test/kafka/kafka-client-loadtest/internal/config/config.go +++ /dev/null @@ -1,361 +0,0 @@ -package config - -import ( - "fmt" - "os" - "strconv" - "strings" - "time" - - "gopkg.in/yaml.v3" -) - -// Config represents the complete load test configuration -type Config struct { - TestMode string `yaml:"test_mode"` - Duration time.Duration `yaml:"duration"` - - Kafka KafkaConfig `yaml:"kafka"` - SchemaRegistry SchemaRegistryConfig `yaml:"schema_registry"` - Producers ProducersConfig `yaml:"producers"` - Consumers ConsumersConfig `yaml:"consumers"` - Topics TopicsConfig `yaml:"topics"` - Schemas SchemasConfig `yaml:"schemas"` - Metrics MetricsConfig `yaml:"metrics"` - Scenarios ScenariosConfig `yaml:"scenarios"` - Chaos ChaosConfig `yaml:"chaos"` - Output OutputConfig `yaml:"output"` - Logging LoggingConfig `yaml:"logging"` -} - -type KafkaConfig struct { - BootstrapServers []string `yaml:"bootstrap_servers"` - SecurityProtocol string `yaml:"security_protocol"` - SASLMechanism string `yaml:"sasl_mechanism"` - SASLUsername string `yaml:"sasl_username"` - SASLPassword string `yaml:"sasl_password"` -} - -type SchemaRegistryConfig struct { - URL string `yaml:"url"` - Auth struct { - Username string `yaml:"username"` - Password string `yaml:"password"` - } `yaml:"auth"` -} - -type ProducersConfig struct { - Count int `yaml:"count"` - MessageRate int `yaml:"message_rate"` - MessageSize int `yaml:"message_size"` - BatchSize int `yaml:"batch_size"` - LingerMs int `yaml:"linger_ms"` - CompressionType string `yaml:"compression_type"` - Acks string `yaml:"acks"` - Retries int `yaml:"retries"` - RetryBackoffMs int `yaml:"retry_backoff_ms"` - RequestTimeoutMs int `yaml:"request_timeout_ms"` - DeliveryTimeoutMs int `yaml:"delivery_timeout_ms"` - KeyDistribution string `yaml:"key_distribution"` - ValueType string `yaml:"value_type"` // json, avro, protobuf, binary - SchemaFormat string `yaml:"schema_format"` // AVRO, JSON, PROTOBUF (schema registry format) - IncludeTimestamp bool `yaml:"include_timestamp"` - IncludeHeaders bool `yaml:"include_headers"` -} - -type ConsumersConfig struct { - Count int `yaml:"count"` - GroupPrefix string `yaml:"group_prefix"` - AutoOffsetReset string `yaml:"auto_offset_reset"` - EnableAutoCommit bool `yaml:"enable_auto_commit"` - AutoCommitIntervalMs int `yaml:"auto_commit_interval_ms"` - SessionTimeoutMs int `yaml:"session_timeout_ms"` - HeartbeatIntervalMs int `yaml:"heartbeat_interval_ms"` - MaxPollRecords int `yaml:"max_poll_records"` - MaxPollIntervalMs int `yaml:"max_poll_interval_ms"` - FetchMinBytes int `yaml:"fetch_min_bytes"` - FetchMaxBytes int `yaml:"fetch_max_bytes"` - FetchMaxWaitMs int `yaml:"fetch_max_wait_ms"` -} - -type TopicsConfig struct { - Count int `yaml:"count"` - Prefix string `yaml:"prefix"` - Partitions int `yaml:"partitions"` - ReplicationFactor int `yaml:"replication_factor"` - CleanupPolicy string `yaml:"cleanup_policy"` - RetentionMs int64 `yaml:"retention_ms"` - SegmentMs int64 `yaml:"segment_ms"` -} - -type SchemaConfig struct { - Type string `yaml:"type"` - Schema string `yaml:"schema"` -} - -type SchemasConfig struct { - Enabled bool `yaml:"enabled"` - RegistryTimeoutMs int `yaml:"registry_timeout_ms"` - UserEvent SchemaConfig `yaml:"user_event"` - Transaction SchemaConfig `yaml:"transaction"` -} - -type MetricsConfig struct { - Enabled bool `yaml:"enabled"` - CollectionInterval time.Duration `yaml:"collection_interval"` - PrometheusPort int `yaml:"prometheus_port"` - TrackLatency bool `yaml:"track_latency"` - TrackThroughput bool `yaml:"track_throughput"` - TrackErrors bool `yaml:"track_errors"` - TrackConsumerLag bool `yaml:"track_consumer_lag"` - LatencyPercentiles []float64 `yaml:"latency_percentiles"` -} - -type ScenarioConfig struct { - ProducerRate int `yaml:"producer_rate"` - RampUpTime time.Duration `yaml:"ramp_up_time"` - SteadyDuration time.Duration `yaml:"steady_duration"` - RampDownTime time.Duration `yaml:"ramp_down_time"` - BaseRate int `yaml:"base_rate"` - BurstRate int `yaml:"burst_rate"` - BurstDuration time.Duration `yaml:"burst_duration"` - BurstInterval time.Duration `yaml:"burst_interval"` - StartRate int `yaml:"start_rate"` - EndRate int `yaml:"end_rate"` - RampDuration time.Duration `yaml:"ramp_duration"` - StepDuration time.Duration `yaml:"step_duration"` -} - -type ScenariosConfig struct { - SteadyLoad ScenarioConfig `yaml:"steady_load"` - BurstLoad ScenarioConfig `yaml:"burst_load"` - RampTest ScenarioConfig `yaml:"ramp_test"` -} - -type ChaosConfig struct { - Enabled bool `yaml:"enabled"` - ProducerFailureRate float64 `yaml:"producer_failure_rate"` - ConsumerFailureRate float64 `yaml:"consumer_failure_rate"` - NetworkPartitionProbability float64 `yaml:"network_partition_probability"` - BrokerRestartInterval time.Duration `yaml:"broker_restart_interval"` -} - -type OutputConfig struct { - ResultsDir string `yaml:"results_dir"` - ExportPrometheus bool `yaml:"export_prometheus"` - ExportCSV bool `yaml:"export_csv"` - ExportJSON bool `yaml:"export_json"` - RealTimeStats bool `yaml:"real_time_stats"` - StatsInterval time.Duration `yaml:"stats_interval"` -} - -type LoggingConfig struct { - Level string `yaml:"level"` - Format string `yaml:"format"` - EnableKafkaLogs bool `yaml:"enable_kafka_logs"` -} - -// Load reads and parses the configuration file -func Load(configFile string) (*Config, error) { - data, err := os.ReadFile(configFile) - if err != nil { - return nil, fmt.Errorf("failed to read config file %s: %w", configFile, err) - } - - var cfg Config - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("failed to parse config file %s: %w", configFile, err) - } - - // Apply default values - cfg.setDefaults() - - // Apply environment variable overrides - cfg.applyEnvOverrides() - - return &cfg, nil -} - -// ApplyOverrides applies command-line flag overrides -func (c *Config) ApplyOverrides(testMode string, duration time.Duration) { - if testMode != "" { - c.TestMode = testMode - } - if duration > 0 { - c.Duration = duration - } -} - -// setDefaults sets default values for optional fields -func (c *Config) setDefaults() { - if c.TestMode == "" { - c.TestMode = "comprehensive" - } - - if len(c.Kafka.BootstrapServers) == 0 { - c.Kafka.BootstrapServers = []string{"kafka-gateway:9093"} - } - - if c.SchemaRegistry.URL == "" { - c.SchemaRegistry.URL = "http://schema-registry:8081" - } - - // Schema support is always enabled since Kafka Gateway now enforces schema-first behavior - c.Schemas.Enabled = true - - if c.Producers.Count == 0 { - c.Producers.Count = 10 - } - - if c.Consumers.Count == 0 { - c.Consumers.Count = 5 - } - - if c.Topics.Count == 0 { - c.Topics.Count = 5 - } - - if c.Topics.Prefix == "" { - c.Topics.Prefix = "loadtest-topic" - } - - if c.Topics.Partitions == 0 { - c.Topics.Partitions = 4 // Default to 4 partitions - } - - if c.Topics.ReplicationFactor == 0 { - c.Topics.ReplicationFactor = 1 // Default to 1 replica - } - - if c.Consumers.GroupPrefix == "" { - c.Consumers.GroupPrefix = "loadtest-group" - } - - if c.Output.ResultsDir == "" { - c.Output.ResultsDir = "/test-results" - } - - if c.Metrics.CollectionInterval == 0 { - c.Metrics.CollectionInterval = 10 * time.Second - } - - if c.Output.StatsInterval == 0 { - c.Output.StatsInterval = 30 * time.Second - } -} - -// applyEnvOverrides applies environment variable overrides -func (c *Config) applyEnvOverrides() { - if servers := os.Getenv("KAFKA_BOOTSTRAP_SERVERS"); servers != "" { - c.Kafka.BootstrapServers = strings.Split(servers, ",") - } - - if url := os.Getenv("SCHEMA_REGISTRY_URL"); url != "" { - c.SchemaRegistry.URL = url - } - - if mode := os.Getenv("TEST_MODE"); mode != "" { - c.TestMode = mode - } - - if duration := os.Getenv("TEST_DURATION"); duration != "" { - if d, err := time.ParseDuration(duration); err == nil { - c.Duration = d - } - } - - if count := os.Getenv("PRODUCER_COUNT"); count != "" { - if i, err := strconv.Atoi(count); err == nil { - c.Producers.Count = i - } - } - - if count := os.Getenv("CONSUMER_COUNT"); count != "" { - if i, err := strconv.Atoi(count); err == nil { - c.Consumers.Count = i - } - } - - if rate := os.Getenv("MESSAGE_RATE"); rate != "" { - if i, err := strconv.Atoi(rate); err == nil { - c.Producers.MessageRate = i - } - } - - if size := os.Getenv("MESSAGE_SIZE"); size != "" { - if i, err := strconv.Atoi(size); err == nil { - c.Producers.MessageSize = i - } - } - - if count := os.Getenv("TOPIC_COUNT"); count != "" { - if i, err := strconv.Atoi(count); err == nil { - c.Topics.Count = i - } - } - - if partitions := os.Getenv("PARTITIONS_PER_TOPIC"); partitions != "" { - if i, err := strconv.Atoi(partitions); err == nil { - c.Topics.Partitions = i - } - } - - if valueType := os.Getenv("VALUE_TYPE"); valueType != "" { - c.Producers.ValueType = valueType - } - - if schemaFormat := os.Getenv("SCHEMA_FORMAT"); schemaFormat != "" { - c.Producers.SchemaFormat = schemaFormat - } - - if enabled := os.Getenv("SCHEMAS_ENABLED"); enabled != "" { - c.Schemas.Enabled = enabled == "true" - } -} - -// GetTopicNames returns the list of topic names to use for testing -func (c *Config) GetTopicNames() []string { - topics := make([]string, c.Topics.Count) - for i := 0; i < c.Topics.Count; i++ { - topics[i] = fmt.Sprintf("%s-%d", c.Topics.Prefix, i) - } - return topics -} - -// GetConsumerGroupNames returns the list of consumer group names -func (c *Config) GetConsumerGroupNames() []string { - groups := make([]string, c.Consumers.Count) - for i := 0; i < c.Consumers.Count; i++ { - groups[i] = fmt.Sprintf("%s-%d", c.Consumers.GroupPrefix, i) - } - return groups -} - -// Validate validates the configuration -func (c *Config) Validate() error { - if c.TestMode != "producer" && c.TestMode != "consumer" && c.TestMode != "comprehensive" { - return fmt.Errorf("invalid test mode: %s", c.TestMode) - } - - if len(c.Kafka.BootstrapServers) == 0 { - return fmt.Errorf("kafka bootstrap servers not specified") - } - - if c.Producers.Count <= 0 && (c.TestMode == "producer" || c.TestMode == "comprehensive") { - return fmt.Errorf("producer count must be greater than 0 for producer or comprehensive tests") - } - - if c.Consumers.Count <= 0 && (c.TestMode == "consumer" || c.TestMode == "comprehensive") { - return fmt.Errorf("consumer count must be greater than 0 for consumer or comprehensive tests") - } - - if c.Topics.Count <= 0 { - return fmt.Errorf("topic count must be greater than 0") - } - - if c.Topics.Partitions <= 0 { - return fmt.Errorf("partitions per topic must be greater than 0") - } - - return nil -} diff --git a/test/kafka/kafka-client-loadtest/internal/consumer/consumer.go b/test/kafka/kafka-client-loadtest/internal/consumer/consumer.go deleted file mode 100644 index 6b23fdfe9..000000000 --- a/test/kafka/kafka-client-loadtest/internal/consumer/consumer.go +++ /dev/null @@ -1,776 +0,0 @@ -package consumer - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/IBM/sarama" - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/config" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/metrics" - pb "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema/pb" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/tracker" - "google.golang.org/protobuf/proto" -) - -// Consumer represents a Kafka consumer for load testing -type Consumer struct { - id int - config *config.Config - metricsCollector *metrics.Collector - saramaConsumer sarama.ConsumerGroup - useConfluent bool // Always false, Sarama only - topics []string - consumerGroup string - avroCodec *goavro.Codec - - // Schema format tracking per topic - schemaFormats map[string]string // topic -> schema format mapping (AVRO, JSON, PROTOBUF) - - // Processing tracking - messagesProcessed int64 - lastOffset map[string]map[int32]int64 - offsetMutex sync.RWMutex - - // Record tracking - tracker *tracker.Tracker -} - -// New creates a new consumer instance -func New(cfg *config.Config, collector *metrics.Collector, id int, recordTracker *tracker.Tracker) (*Consumer, error) { - // All consumers share the same group for load balancing across partitions - consumerGroup := cfg.Consumers.GroupPrefix - - c := &Consumer{ - id: id, - config: cfg, - metricsCollector: collector, - topics: cfg.GetTopicNames(), - consumerGroup: consumerGroup, - useConfluent: false, // Use Sarama by default - lastOffset: make(map[string]map[int32]int64), - schemaFormats: make(map[string]string), - tracker: recordTracker, - } - - // Initialize schema formats for each topic (must match producer logic) - // This mirrors the format distribution in cmd/loadtest/main.go registerSchemas() - for i, topic := range c.topics { - var schemaFormat string - if cfg.Producers.SchemaFormat != "" { - // Use explicit config if provided - schemaFormat = cfg.Producers.SchemaFormat - } else { - // Distribute across formats (same as producer) - switch i % 3 { - case 0: - schemaFormat = "AVRO" - case 1: - schemaFormat = "JSON" - case 2: - schemaFormat = "PROTOBUF" - } - } - c.schemaFormats[topic] = schemaFormat - log.Printf("Consumer %d: Topic %s will use schema format: %s", id, topic, schemaFormat) - } - - // Initialize consumer based on configuration - if c.useConfluent { - if err := c.initConfluentConsumer(); err != nil { - return nil, fmt.Errorf("failed to initialize Confluent consumer: %w", err) - } - } else { - if err := c.initSaramaConsumer(); err != nil { - return nil, fmt.Errorf("failed to initialize Sarama consumer: %w", err) - } - } - - // Initialize Avro codec if schemas are enabled - if cfg.Schemas.Enabled { - if err := c.initAvroCodec(); err != nil { - return nil, fmt.Errorf("failed to initialize Avro codec: %w", err) - } - } - - log.Printf("Consumer %d initialized for group %s", id, consumerGroup) - return c, nil -} - -// initSaramaConsumer initializes the Sarama consumer group -func (c *Consumer) initSaramaConsumer() error { - config := sarama.NewConfig() - - // Enable Sarama debug logging to diagnose connection issues - sarama.Logger = log.New(os.Stdout, fmt.Sprintf("[Sarama Consumer %d] ", c.id), log.LstdFlags) - - // Consumer configuration - config.Consumer.Return.Errors = true - config.Consumer.Offsets.Initial = sarama.OffsetOldest - if c.config.Consumers.AutoOffsetReset == "latest" { - config.Consumer.Offsets.Initial = sarama.OffsetNewest - } - - // Auto commit configuration - config.Consumer.Offsets.AutoCommit.Enable = c.config.Consumers.EnableAutoCommit - config.Consumer.Offsets.AutoCommit.Interval = time.Duration(c.config.Consumers.AutoCommitIntervalMs) * time.Millisecond - - // Session and heartbeat configuration - config.Consumer.Group.Session.Timeout = time.Duration(c.config.Consumers.SessionTimeoutMs) * time.Millisecond - config.Consumer.Group.Heartbeat.Interval = time.Duration(c.config.Consumers.HeartbeatIntervalMs) * time.Millisecond - - // Fetch configuration - config.Consumer.Fetch.Min = int32(c.config.Consumers.FetchMinBytes) - config.Consumer.Fetch.Default = 10 * 1024 * 1024 // 10MB per partition (increased from 1MB default) - config.Consumer.Fetch.Max = int32(c.config.Consumers.FetchMaxBytes) - config.Consumer.MaxWaitTime = time.Duration(c.config.Consumers.FetchMaxWaitMs) * time.Millisecond - config.Consumer.MaxProcessingTime = time.Duration(c.config.Consumers.MaxPollIntervalMs) * time.Millisecond - - // Channel buffer sizes for concurrent partition consumption - config.ChannelBufferSize = 256 // Increase from default 256 to allow more buffering - - // Enable concurrent partition fetching by increasing the number of broker connections - // This allows Sarama to fetch from multiple partitions in parallel - config.Net.MaxOpenRequests = 20 // Increase from default 5 to allow 20 concurrent requests - - // Connection retry and timeout configuration - config.Net.DialTimeout = 30 * time.Second // Increase from default 30s - config.Net.ReadTimeout = 30 * time.Second // Increase from default 30s - config.Net.WriteTimeout = 30 * time.Second // Increase from default 30s - config.Metadata.Retry.Max = 5 // Retry metadata fetch up to 5 times - config.Metadata.Retry.Backoff = 500 * time.Millisecond - config.Metadata.Timeout = 30 * time.Second // Increase metadata timeout - - // Version - config.Version = sarama.V2_8_0_0 - - // CRITICAL: Set unique ClientID to ensure each consumer gets a unique member ID - // Without this, all consumers from the same process get the same member ID and only 1 joins! - // Sarama uses ClientID as part of the member ID generation - // Use consumer ID directly - no timestamp needed since IDs are already unique per process - config.ClientID = fmt.Sprintf("loadtest-consumer-%d", c.id) - log.Printf("Consumer %d: Setting Sarama ClientID to: %s", c.id, config.ClientID) - - // Create consumer group - consumerGroup, err := sarama.NewConsumerGroup(c.config.Kafka.BootstrapServers, c.consumerGroup, config) - if err != nil { - return fmt.Errorf("failed to create Sarama consumer group: %w", err) - } - - c.saramaConsumer = consumerGroup - return nil -} - -// initConfluentConsumer initializes the Confluent Kafka Go consumer -func (c *Consumer) initConfluentConsumer() error { - // Confluent consumer disabled, using Sarama only - return fmt.Errorf("confluent consumer not enabled") -} - -// initAvroCodec initializes the Avro codec for schema-based messages -func (c *Consumer) initAvroCodec() error { - // Use the LoadTestMessage schema (matches what producer uses) - loadTestSchema := `{ - "type": "record", - "name": "LoadTestMessage", - "namespace": "com.seaweedfs.loadtest", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "producer_id", "type": "int"}, - {"name": "counter", "type": "long"}, - {"name": "user_id", "type": "string"}, - {"name": "event_type", "type": "string"}, - {"name": "properties", "type": {"type": "map", "values": "string"}} - ] - }` - - codec, err := goavro.NewCodec(loadTestSchema) - if err != nil { - return fmt.Errorf("failed to create Avro codec: %w", err) - } - - c.avroCodec = codec - return nil -} - -// Run starts the consumer and consumes messages until the context is cancelled -func (c *Consumer) Run(ctx context.Context) { - log.Printf("Consumer %d starting for group %s", c.id, c.consumerGroup) - defer log.Printf("Consumer %d stopped", c.id) - - if c.useConfluent { - c.runConfluentConsumer(ctx) - } else { - c.runSaramaConsumer(ctx) - } -} - -// runSaramaConsumer runs the Sarama consumer group -func (c *Consumer) runSaramaConsumer(ctx context.Context) { - handler := &ConsumerGroupHandler{ - consumer: c, - } - - var wg sync.WaitGroup - - // Start error handler - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case err, ok := <-c.saramaConsumer.Errors(): - if !ok { - return - } - log.Printf("Consumer %d error: %v", c.id, err) - c.metricsCollector.RecordConsumerError() - case <-ctx.Done(): - return - } - } - }() - - // Start consumer group session - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-ctx.Done(): - return - default: - if err := c.saramaConsumer.Consume(ctx, c.topics, handler); err != nil { - log.Printf("Consumer %d: Error consuming: %v", c.id, err) - c.metricsCollector.RecordConsumerError() - - // Wait briefly before retrying (reduced from 5s to 1s for faster recovery) - select { - case <-time.After(1 * time.Second): - case <-ctx.Done(): - return - } - } - } - } - }() - - // Start lag monitoring - wg.Add(1) - go func() { - defer wg.Done() - c.monitorConsumerLag(ctx) - }() - - // Wait for completion - <-ctx.Done() - log.Printf("Consumer %d: Context cancelled, shutting down", c.id) - wg.Wait() -} - -// runConfluentConsumer runs the Confluent consumer -func (c *Consumer) runConfluentConsumer(ctx context.Context) { - // Confluent consumer disabled, using Sarama only - log.Printf("Consumer %d: Confluent consumer not enabled", c.id) -} - -// processMessage processes a consumed message -func (c *Consumer) processMessage(topicPtr *string, partition int32, offset int64, key, value []byte) error { - topic := "" - if topicPtr != nil { - topic = *topicPtr - } - - // Update offset tracking - c.updateOffset(topic, partition, offset) - - // Decode message based on topic-specific schema format - var decodedMessage interface{} - var err error - - // Determine schema format for this topic (if schemas are enabled) - var schemaFormat string - if c.config.Schemas.Enabled { - schemaFormat = c.schemaFormats[topic] - if schemaFormat == "" { - // Fallback to config if topic not in map - schemaFormat = c.config.Producers.ValueType - } - } else { - // No schemas, use global value type - schemaFormat = c.config.Producers.ValueType - } - - // Decode message based on format - switch schemaFormat { - case "avro", "AVRO": - decodedMessage, err = c.decodeAvroMessage(value) - case "json", "JSON", "JSON_SCHEMA": - decodedMessage, err = c.decodeJSONSchemaMessage(value) - case "protobuf", "PROTOBUF": - decodedMessage, err = c.decodeProtobufMessage(value) - case "binary": - decodedMessage, err = c.decodeBinaryMessage(value) - default: - // Fallback to plain JSON - decodedMessage, err = c.decodeJSONMessage(value) - } - - if err != nil { - return fmt.Errorf("failed to decode message: %w", err) - } - - // Note: Removed artificial delay to allow maximum throughput - // If you need to simulate processing time, add a configurable delay setting - // time.Sleep(time.Millisecond) // Minimal processing delay - - // Record metrics - c.metricsCollector.RecordConsumedMessage(len(value)) - c.messagesProcessed++ - - // Log progress - if c.id == 0 && c.messagesProcessed%1000 == 0 { - log.Printf("Consumer %d: Processed %d messages (latest: %s[%d]@%d)", - c.id, c.messagesProcessed, topic, partition, offset) - } - - // Optional: Validate message content (for testing purposes) - if c.config.Chaos.Enabled { - if err := c.validateMessage(decodedMessage); err != nil { - log.Printf("Consumer %d: Message validation failed: %v", c.id, err) - } - } - - return nil -} - -// decodeJSONMessage decodes a JSON message -func (c *Consumer) decodeJSONMessage(value []byte) (interface{}, error) { - var message map[string]interface{} - if err := json.Unmarshal(value, &message); err != nil { - // DEBUG: Log the raw bytes when JSON parsing fails - log.Printf("Consumer %d: JSON decode failed. Length: %d, Raw bytes (hex): %x, Raw string: %q, Error: %v", - c.id, len(value), value, string(value), err) - return nil, err - } - return message, nil -} - -// decodeAvroMessage decodes an Avro message (handles Confluent Wire Format) -func (c *Consumer) decodeAvroMessage(value []byte) (interface{}, error) { - if c.avroCodec == nil { - return nil, fmt.Errorf("Avro codec not initialized") - } - - // Handle Confluent Wire Format when schemas are enabled - var avroData []byte - if c.config.Schemas.Enabled { - if len(value) < 5 { - return nil, fmt.Errorf("message too short for Confluent Wire Format: %d bytes", len(value)) - } - - // Check magic byte (should be 0) - if value[0] != 0 { - return nil, fmt.Errorf("invalid Confluent Wire Format magic byte: %d", value[0]) - } - - // Extract schema ID (bytes 1-4, big-endian) - schemaID := binary.BigEndian.Uint32(value[1:5]) - _ = schemaID // TODO: Could validate schema ID matches expected schema - - // Extract Avro data (bytes 5+) - avroData = value[5:] - } else { - // No wire format, use raw data - avroData = value - } - - native, _, err := c.avroCodec.NativeFromBinary(avroData) - if err != nil { - return nil, fmt.Errorf("failed to decode Avro data: %w", err) - } - - return native, nil -} - -// decodeJSONSchemaMessage decodes a JSON Schema message (handles Confluent Wire Format) -func (c *Consumer) decodeJSONSchemaMessage(value []byte) (interface{}, error) { - // Handle Confluent Wire Format when schemas are enabled - var jsonData []byte - if c.config.Schemas.Enabled { - if len(value) < 5 { - return nil, fmt.Errorf("message too short for Confluent Wire Format: %d bytes", len(value)) - } - - // Check magic byte (should be 0) - if value[0] != 0 { - return nil, fmt.Errorf("invalid Confluent Wire Format magic byte: %d", value[0]) - } - - // Extract schema ID (bytes 1-4, big-endian) - schemaID := binary.BigEndian.Uint32(value[1:5]) - _ = schemaID // TODO: Could validate schema ID matches expected schema - - // Extract JSON data (bytes 5+) - jsonData = value[5:] - } else { - // No wire format, use raw data - jsonData = value - } - - // Decode JSON - var message map[string]interface{} - if err := json.Unmarshal(jsonData, &message); err != nil { - return nil, fmt.Errorf("failed to decode JSON data: %w", err) - } - - return message, nil -} - -// decodeProtobufMessage decodes a Protobuf message (handles Confluent Wire Format) -func (c *Consumer) decodeProtobufMessage(value []byte) (interface{}, error) { - // Handle Confluent Wire Format when schemas are enabled - var protoData []byte - if c.config.Schemas.Enabled { - if len(value) < 5 { - return nil, fmt.Errorf("message too short for Confluent Wire Format: %d bytes", len(value)) - } - - // Check magic byte (should be 0) - if value[0] != 0 { - return nil, fmt.Errorf("invalid Confluent Wire Format magic byte: %d", value[0]) - } - - // Extract schema ID (bytes 1-4, big-endian) - schemaID := binary.BigEndian.Uint32(value[1:5]) - _ = schemaID // TODO: Could validate schema ID matches expected schema - - // Extract Protobuf data (bytes 5+) - protoData = value[5:] - } else { - // No wire format, use raw data - protoData = value - } - - // Unmarshal protobuf message - var protoMsg pb.LoadTestMessage - if err := proto.Unmarshal(protoData, &protoMsg); err != nil { - return nil, fmt.Errorf("failed to unmarshal Protobuf data: %w", err) - } - - // Convert to map for consistency with other decoders - return map[string]interface{}{ - "id": protoMsg.Id, - "timestamp": protoMsg.Timestamp, - "producer_id": protoMsg.ProducerId, - "counter": protoMsg.Counter, - "user_id": protoMsg.UserId, - "event_type": protoMsg.EventType, - "properties": protoMsg.Properties, - }, nil -} - -// decodeBinaryMessage decodes a binary message -func (c *Consumer) decodeBinaryMessage(value []byte) (interface{}, error) { - if len(value) < 20 { - return nil, fmt.Errorf("binary message too short") - } - - // Extract fields from the binary format: - // [producer_id:4][counter:8][timestamp:8][random_data:...] - - producerID := int(value[0])<<24 | int(value[1])<<16 | int(value[2])<<8 | int(value[3]) - - var counter int64 - for i := 0; i < 8; i++ { - counter |= int64(value[4+i]) << (56 - i*8) - } - - var timestamp int64 - for i := 0; i < 8; i++ { - timestamp |= int64(value[12+i]) << (56 - i*8) - } - - return map[string]interface{}{ - "producer_id": producerID, - "counter": counter, - "timestamp": timestamp, - "data_size": len(value), - }, nil -} - -// validateMessage performs basic message validation -func (c *Consumer) validateMessage(message interface{}) error { - // This is a placeholder for message validation logic - // In a real load test, you might validate: - // - Message structure - // - Required fields - // - Data consistency - // - Schema compliance - - if message == nil { - return fmt.Errorf("message is nil") - } - - return nil -} - -// updateOffset updates the last seen offset for lag calculation -func (c *Consumer) updateOffset(topic string, partition int32, offset int64) { - c.offsetMutex.Lock() - defer c.offsetMutex.Unlock() - - if c.lastOffset[topic] == nil { - c.lastOffset[topic] = make(map[int32]int64) - } - c.lastOffset[topic][partition] = offset -} - -// monitorConsumerLag monitors and reports consumer lag -func (c *Consumer) monitorConsumerLag(ctx context.Context) { - ticker := time.NewTicker(30 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - c.reportConsumerLag() - } - } -} - -// reportConsumerLag calculates and reports consumer lag -func (c *Consumer) reportConsumerLag() { - // This is a simplified lag calculation - // In a real implementation, you would query the broker for high water marks - - c.offsetMutex.RLock() - defer c.offsetMutex.RUnlock() - - for topic, partitions := range c.lastOffset { - for partition, _ := range partitions { - // For simplicity, assume lag is always 0 when we're consuming actively - // In a real test, you would compare against the high water mark - lag := int64(0) - - c.metricsCollector.UpdateConsumerLag(c.consumerGroup, topic, partition, lag) - } - } -} - -// Close closes the consumer and cleans up resources -func (c *Consumer) Close() error { - log.Printf("Consumer %d: Closing", c.id) - - if c.saramaConsumer != nil { - return c.saramaConsumer.Close() - } - - return nil -} - -// ConsumerGroupHandler implements sarama.ConsumerGroupHandler -type ConsumerGroupHandler struct { - consumer *Consumer -} - -// Setup is run at the beginning of a new session, before ConsumeClaim -func (h *ConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { - log.Printf("Consumer %d: Consumer group session setup", h.consumer.id) - - // Log the generation ID and member ID for this session - log.Printf("Consumer %d: Generation=%d, MemberID=%s", - h.consumer.id, session.GenerationID(), session.MemberID()) - - // Log all assigned partitions and their starting offsets - assignments := session.Claims() - totalPartitions := 0 - for topic, partitions := range assignments { - for _, partition := range partitions { - totalPartitions++ - log.Printf("Consumer %d: ASSIGNED %s[%d]", - h.consumer.id, topic, partition) - } - } - log.Printf("Consumer %d: Total partitions assigned: %d", h.consumer.id, totalPartitions) - return nil -} - -// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited -// CRITICAL: Commit all marked offsets before partition reassignment to minimize duplicates -func (h *ConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - log.Printf("Consumer %d: Consumer group session cleanup - committing final offsets before rebalance", h.consumer.id) - - // Commit all marked offsets before releasing partitions - // This ensures that when partitions are reassigned to other consumers, - // they start from the last processed offset, minimizing duplicate reads - session.Commit() - - log.Printf("Consumer %d: Cleanup complete - offsets committed", h.consumer.id) - return nil -} - -// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages() -func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - msgCount := 0 - topic := claim.Topic() - partition := claim.Partition() - initialOffset := claim.InitialOffset() - lastTrackedOffset := int64(-1) - gapCount := 0 - var gaps []string // Track gap ranges for detailed analysis - - // Log the starting offset for this partition - log.Printf("Consumer %d: START consuming %s[%d] from offset %d (HWM=%d)", - h.consumer.id, topic, partition, initialOffset, claim.HighWaterMarkOffset()) - - startTime := time.Now() - lastLogTime := time.Now() - - for { - select { - case message, ok := <-claim.Messages(): - if !ok { - elapsed := time.Since(startTime) - // Log detailed gap analysis - gapSummary := "none" - if len(gaps) > 0 { - gapSummary = fmt.Sprintf("[%s]", strings.Join(gaps, ", ")) - } - - // Check if we consumed just a few messages before stopping - if msgCount <= 10 { - log.Printf("Consumer %d: CRITICAL - Messages() channel CLOSED early on %s[%d] after only %d messages at offset=%d (HWM=%d, gaps=%d %s)", - h.consumer.id, topic, partition, msgCount, lastTrackedOffset, claim.HighWaterMarkOffset()-1, gapCount, gapSummary) - } else { - log.Printf("Consumer %d: STOP consuming %s[%d] after %d messages (%.1f sec, %.1f msgs/sec, last offset=%d, HWM=%d, gaps=%d %s)", - h.consumer.id, topic, partition, msgCount, elapsed.Seconds(), - float64(msgCount)/elapsed.Seconds(), lastTrackedOffset, claim.HighWaterMarkOffset()-1, gapCount, gapSummary) - } - return nil - } - msgCount++ - - // Track gaps in offset sequence (indicates missed messages) - if lastTrackedOffset >= 0 && message.Offset != lastTrackedOffset+1 { - gap := message.Offset - lastTrackedOffset - 1 - gapCount++ - gapDesc := fmt.Sprintf("%d-%d", lastTrackedOffset+1, message.Offset-1) - gaps = append(gaps, gapDesc) - elapsed := time.Since(startTime) - log.Printf("Consumer %d: DEBUG offset gap in %s[%d] at %.1fs: offset %d -> %d (gap=%d messages, gapDesc=%s)", - h.consumer.id, topic, partition, elapsed.Seconds(), lastTrackedOffset, message.Offset, gap, gapDesc) - } - lastTrackedOffset = message.Offset - - // Log progress every 500 messages OR every 5 seconds - now := time.Now() - if msgCount%500 == 0 || now.Sub(lastLogTime) > 5*time.Second { - elapsed := time.Since(startTime) - throughput := float64(msgCount) / elapsed.Seconds() - log.Printf("Consumer %d: %s[%d] progress: %d messages, offset=%d, HWM=%d, rate=%.1f msgs/sec, gaps=%d", - h.consumer.id, topic, partition, msgCount, message.Offset, claim.HighWaterMarkOffset(), throughput, gapCount) - lastLogTime = now - } - - // Process the message - var key []byte - if message.Key != nil { - key = message.Key - } - - if err := h.consumer.processMessage(&message.Topic, message.Partition, message.Offset, key, message.Value); err != nil { - log.Printf("Consumer %d: Error processing message at %s[%d]@%d: %v", - h.consumer.id, message.Topic, message.Partition, message.Offset, err) - h.consumer.metricsCollector.RecordConsumerError() - } else { - // Track consumed message - if h.consumer.tracker != nil { - h.consumer.tracker.TrackConsumed(tracker.Record{ - Key: string(key), - Topic: message.Topic, - Partition: message.Partition, - Offset: message.Offset, - Timestamp: message.Timestamp.UnixNano(), - ConsumerID: h.consumer.id, - }) - } - - // Mark message as processed - session.MarkMessage(message, "") - - // Commit offset frequently to minimize both message loss and duplicates - // Every 20 messages balances: - // - ~600 commits per 12k messages (reasonable overhead) - // - ~20 message loss window if consumer fails - // - Reduces duplicate reads from rebalancing - if msgCount%20 == 0 { - session.Commit() - } - } - - case <-session.Context().Done(): - elapsed := time.Since(startTime) - lastOffset := claim.HighWaterMarkOffset() - 1 - gapSummary := "none" - if len(gaps) > 0 { - gapSummary = fmt.Sprintf("[%s]", strings.Join(gaps, ", ")) - } - - // Determine if we reached HWM - reachedHWM := lastTrackedOffset >= lastOffset - hwmStatus := "INCOMPLETE" - if reachedHWM { - hwmStatus := "COMPLETE" - _ = hwmStatus // Use it to avoid warning - } - - // Calculate consumption rate for this partition - consumptionRate := float64(0) - if elapsed.Seconds() > 0 { - consumptionRate = float64(msgCount) / elapsed.Seconds() - } - - // Log both normal and abnormal completions - if msgCount == 0 { - // Partition never got ANY messages - critical issue - log.Printf("Consumer %d: CRITICAL - NO MESSAGES from %s[%d] (HWM=%d, status=%s)", - h.consumer.id, topic, partition, claim.HighWaterMarkOffset()-1, hwmStatus) - } else if msgCount < 10 && msgCount > 0 { - // Very few messages then stopped - likely hung fetch - log.Printf("Consumer %d: HUNG FETCH on %s[%d]: only %d messages before stop at offset=%d (HWM=%d, rate=%.2f msgs/sec, gaps=%d %s)", - h.consumer.id, topic, partition, msgCount, lastTrackedOffset, claim.HighWaterMarkOffset()-1, consumptionRate, gapCount, gapSummary) - } else { - // Normal completion - log.Printf("Consumer %d: Context CANCELLED for %s[%d] after %d messages (%.1f sec, %.1f msgs/sec, last offset=%d, HWM=%d, status=%s, gaps=%d %s)", - h.consumer.id, topic, partition, msgCount, elapsed.Seconds(), - consumptionRate, lastTrackedOffset, claim.HighWaterMarkOffset()-1, hwmStatus, gapCount, gapSummary) - } - return nil - } - } -} - -// Helper functions - -func joinStrings(strs []string, sep string) string { - if len(strs) == 0 { - return "" - } - - result := strs[0] - for i := 1; i < len(strs); i++ { - result += sep + strs[i] - } - return result -} diff --git a/test/kafka/kafka-client-loadtest/internal/consumer/consumer_stalling_test.go b/test/kafka/kafka-client-loadtest/internal/consumer/consumer_stalling_test.go deleted file mode 100644 index 8e67f703e..000000000 --- a/test/kafka/kafka-client-loadtest/internal/consumer/consumer_stalling_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package consumer - -import ( - "testing" -) - -// TestConsumerStallingPattern is a REPRODUCER for the consumer stalling bug. -// -// This test simulates the exact pattern that causes consumers to stall: -// 1. Consumer reads messages in batches -// 2. Consumer commits offset after each batch -// 3. On next batch, consumer fetches offset+1 but gets empty response -// 4. Consumer stops fetching (BUG!) -// -// Expected: Consumer should retry and eventually get messages -// Actual (before fix): Consumer gives up silently -// -// To run this test against a real load test: -// 1. Start infrastructure: make start -// 2. Produce messages: make clean && rm -rf ./data && TEST_MODE=producer TEST_DURATION=30s make standard-test -// 3. Run reproducer: go test -v -run TestConsumerStallingPattern ./internal/consumer -// -// If the test FAILS, it reproduces the bug (consumer stalls before offset 1000) -// If the test PASSES, it means consumer successfully fetches all messages (bug fixed) -func TestConsumerStallingPattern(t *testing.T) { - t.Skip("REPRODUCER TEST: Requires running load test infrastructure. See comments for setup.") - - // This test documents the exact stalling pattern: - // - Consumers consume messages 0-163, commit offset 163 - // - Next iteration: fetch offset 164+ - // - But fetch returns empty instead of data - // - Consumer stops instead of retrying - // - // The fix involves ensuring: - // 1. Offset+1 is calculated correctly after commit - // 2. Empty fetch doesn't mean "end of partition" (could be transient) - // 3. Consumer retries on empty fetch instead of giving up - // 4. Logging shows why fetch stopped - - t.Logf("=== CONSUMER STALLING REPRODUCER ===") - t.Logf("") - t.Logf("Setup Steps:") - t.Logf("1. cd test/kafka/kafka-client-loadtest") - t.Logf("2. make clean && rm -rf ./data && make start") - t.Logf("3. TEST_MODE=producer TEST_DURATION=60s docker compose --profile loadtest up") - t.Logf(" (Let it run to produce ~3000 messages)") - t.Logf("4. Stop producers (Ctrl+C)") - t.Logf("5. Run this test: go test -v -run TestConsumerStallingPattern ./internal/consumer") - t.Logf("") - t.Logf("Expected Behavior:") - t.Logf("- Test should create consumer and consume all produced messages") - t.Logf("- Consumer should reach message count near HWM") - t.Logf("- No errors during consumption") - t.Logf("") - t.Logf("Bug Symptoms (before fix):") - t.Logf("- Consumer stops at offset ~160-500") - t.Logf("- No more messages fetched after commit") - t.Logf("- Test hangs or times out waiting for more messages") - t.Logf("- Consumer logs show: 'Consumer stops after offset X'") - t.Logf("") - t.Logf("Root Cause:") - t.Logf("- After committing offset N, fetch(N+1) returns empty") - t.Logf("- Consumer treats empty as 'end of partition' and stops") - t.Logf("- Should instead retry with exponential backoff") - t.Logf("") - t.Logf("Fix Verification:") - t.Logf("- If test PASSES: consumer fetches all messages, no stalling") - t.Logf("- If test FAILS: consumer stalls, reproducing the bug") -} - -// TestOffsetPlusOneCalculation verifies offset arithmetic is correct -// This is a UNIT reproducer that can run standalone -func TestOffsetPlusOneCalculation(t *testing.T) { - testCases := []struct { - name string - committedOffset int64 - expectedNextOffset int64 - }{ - {"Offset 0", 0, 1}, - {"Offset 99", 99, 100}, - {"Offset 163", 163, 164}, // The exact stalling point! - {"Offset 999", 999, 1000}, - {"Large offset", 10000, 10001}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // This is the critical calculation - nextOffset := tc.committedOffset + 1 - - if nextOffset != tc.expectedNextOffset { - t.Fatalf("OFFSET MATH BUG: committed=%d, next=%d (expected %d)", - tc.committedOffset, nextOffset, tc.expectedNextOffset) - } - - t.Logf("โœ“ offset %d โ†’ next fetch at %d", tc.committedOffset, nextOffset) - }) - } -} - -// TestEmptyFetchShouldNotStopConsumer verifies consumer doesn't give up on empty fetch -// This is a LOGIC reproducer -func TestEmptyFetchShouldNotStopConsumer(t *testing.T) { - t.Run("EmptyFetchRetry", func(t *testing.T) { - // Scenario: Consumer committed offset 163, then fetches 164+ - committedOffset := int64(163) - nextFetchOffset := committedOffset + 1 - - // First attempt: get empty (transient - data might not be available yet) - // WRONG behavior (bug): Consumer sees 0 bytes and stops - // wrongConsumerLogic := (firstFetchResult == 0) // gives up! - - // CORRECT behavior: Consumer should retry - correctConsumerLogic := true // continues retrying - - if !correctConsumerLogic { - t.Fatalf("Consumer incorrectly gave up after empty fetch at offset %d", nextFetchOffset) - } - - t.Logf("โœ“ Empty fetch doesn't stop consumer, continues retrying") - }) -} diff --git a/test/kafka/kafka-client-loadtest/internal/metrics/collector.go b/test/kafka/kafka-client-loadtest/internal/metrics/collector.go deleted file mode 100644 index d6a1edb8e..000000000 --- a/test/kafka/kafka-client-loadtest/internal/metrics/collector.go +++ /dev/null @@ -1,353 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -// Collector handles metrics collection for the load test -type Collector struct { - // Atomic counters for thread-safe operations - messagesProduced int64 - messagesConsumed int64 - bytesProduced int64 - bytesConsumed int64 - producerErrors int64 - consumerErrors int64 - - // Latency tracking - latencies []time.Duration - latencyMutex sync.RWMutex - - // Consumer lag tracking - consumerLag map[string]int64 - consumerLagMutex sync.RWMutex - - // Test timing - startTime time.Time - - // Prometheus metrics - prometheusMetrics *PrometheusMetrics -} - -// PrometheusMetrics holds all Prometheus metric definitions -type PrometheusMetrics struct { - MessagesProducedTotal prometheus.Counter - MessagesConsumedTotal prometheus.Counter - BytesProducedTotal prometheus.Counter - BytesConsumedTotal prometheus.Counter - ProducerErrorsTotal prometheus.Counter - ConsumerErrorsTotal prometheus.Counter - - MessageLatencyHistogram prometheus.Histogram - ProducerThroughput prometheus.Gauge - ConsumerThroughput prometheus.Gauge - ConsumerLagGauge *prometheus.GaugeVec - - ActiveProducers prometheus.Gauge - ActiveConsumers prometheus.Gauge -} - -// NewCollector creates a new metrics collector -func NewCollector() *Collector { - return &Collector{ - startTime: time.Now(), - consumerLag: make(map[string]int64), - prometheusMetrics: &PrometheusMetrics{ - MessagesProducedTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_messages_produced_total", - Help: "Total number of messages produced", - }), - MessagesConsumedTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_messages_consumed_total", - Help: "Total number of messages consumed", - }), - BytesProducedTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_bytes_produced_total", - Help: "Total bytes produced", - }), - BytesConsumedTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_bytes_consumed_total", - Help: "Total bytes consumed", - }), - ProducerErrorsTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_producer_errors_total", - Help: "Total number of producer errors", - }), - ConsumerErrorsTotal: promauto.NewCounter(prometheus.CounterOpts{ - Name: "kafka_loadtest_consumer_errors_total", - Help: "Total number of consumer errors", - }), - MessageLatencyHistogram: promauto.NewHistogram(prometheus.HistogramOpts{ - Name: "kafka_loadtest_message_latency_seconds", - Help: "Message end-to-end latency in seconds", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), // 1ms to ~32s - }), - ProducerThroughput: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "kafka_loadtest_producer_throughput_msgs_per_sec", - Help: "Current producer throughput in messages per second", - }), - ConsumerThroughput: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "kafka_loadtest_consumer_throughput_msgs_per_sec", - Help: "Current consumer throughput in messages per second", - }), - ConsumerLagGauge: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "kafka_loadtest_consumer_lag_messages", - Help: "Consumer lag in messages", - }, []string{"consumer_group", "topic", "partition"}), - ActiveProducers: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "kafka_loadtest_active_producers", - Help: "Number of active producers", - }), - ActiveConsumers: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "kafka_loadtest_active_consumers", - Help: "Number of active consumers", - }), - }, - } -} - -// RecordProducedMessage records a successfully produced message -func (c *Collector) RecordProducedMessage(size int, latency time.Duration) { - atomic.AddInt64(&c.messagesProduced, 1) - atomic.AddInt64(&c.bytesProduced, int64(size)) - - c.prometheusMetrics.MessagesProducedTotal.Inc() - c.prometheusMetrics.BytesProducedTotal.Add(float64(size)) - c.prometheusMetrics.MessageLatencyHistogram.Observe(latency.Seconds()) - - // Store latency for percentile calculations - c.latencyMutex.Lock() - c.latencies = append(c.latencies, latency) - // Keep only recent latencies to avoid memory bloat - if len(c.latencies) > 100000 { - c.latencies = c.latencies[50000:] - } - c.latencyMutex.Unlock() -} - -// RecordConsumedMessage records a successfully consumed message -func (c *Collector) RecordConsumedMessage(size int) { - atomic.AddInt64(&c.messagesConsumed, 1) - atomic.AddInt64(&c.bytesConsumed, int64(size)) - - c.prometheusMetrics.MessagesConsumedTotal.Inc() - c.prometheusMetrics.BytesConsumedTotal.Add(float64(size)) -} - -// RecordProducerError records a producer error -func (c *Collector) RecordProducerError() { - atomic.AddInt64(&c.producerErrors, 1) - c.prometheusMetrics.ProducerErrorsTotal.Inc() -} - -// RecordConsumerError records a consumer error -func (c *Collector) RecordConsumerError() { - atomic.AddInt64(&c.consumerErrors, 1) - c.prometheusMetrics.ConsumerErrorsTotal.Inc() -} - -// UpdateConsumerLag updates consumer lag metrics -func (c *Collector) UpdateConsumerLag(consumerGroup, topic string, partition int32, lag int64) { - key := fmt.Sprintf("%s-%s-%d", consumerGroup, topic, partition) - - c.consumerLagMutex.Lock() - c.consumerLag[key] = lag - c.consumerLagMutex.Unlock() - - c.prometheusMetrics.ConsumerLagGauge.WithLabelValues( - consumerGroup, topic, fmt.Sprintf("%d", partition), - ).Set(float64(lag)) -} - -// UpdateThroughput updates throughput gauges -func (c *Collector) UpdateThroughput(producerRate, consumerRate float64) { - c.prometheusMetrics.ProducerThroughput.Set(producerRate) - c.prometheusMetrics.ConsumerThroughput.Set(consumerRate) -} - -// UpdateActiveClients updates active client counts -func (c *Collector) UpdateActiveClients(producers, consumers int) { - c.prometheusMetrics.ActiveProducers.Set(float64(producers)) - c.prometheusMetrics.ActiveConsumers.Set(float64(consumers)) -} - -// GetStats returns current statistics -func (c *Collector) GetStats() Stats { - produced := atomic.LoadInt64(&c.messagesProduced) - consumed := atomic.LoadInt64(&c.messagesConsumed) - bytesProduced := atomic.LoadInt64(&c.bytesProduced) - bytesConsumed := atomic.LoadInt64(&c.bytesConsumed) - producerErrors := atomic.LoadInt64(&c.producerErrors) - consumerErrors := atomic.LoadInt64(&c.consumerErrors) - - duration := time.Since(c.startTime) - - // Calculate throughput - producerThroughput := float64(produced) / duration.Seconds() - consumerThroughput := float64(consumed) / duration.Seconds() - - // Calculate latency percentiles - var latencyPercentiles map[float64]time.Duration - c.latencyMutex.RLock() - if len(c.latencies) > 0 { - latencyPercentiles = c.calculatePercentiles(c.latencies) - } - c.latencyMutex.RUnlock() - - // Get consumer lag summary - c.consumerLagMutex.RLock() - totalLag := int64(0) - maxLag := int64(0) - for _, lag := range c.consumerLag { - totalLag += lag - if lag > maxLag { - maxLag = lag - } - } - avgLag := float64(0) - if len(c.consumerLag) > 0 { - avgLag = float64(totalLag) / float64(len(c.consumerLag)) - } - c.consumerLagMutex.RUnlock() - - return Stats{ - Duration: duration, - MessagesProduced: produced, - MessagesConsumed: consumed, - BytesProduced: bytesProduced, - BytesConsumed: bytesConsumed, - ProducerErrors: producerErrors, - ConsumerErrors: consumerErrors, - ProducerThroughput: producerThroughput, - ConsumerThroughput: consumerThroughput, - LatencyPercentiles: latencyPercentiles, - TotalConsumerLag: totalLag, - MaxConsumerLag: maxLag, - AvgConsumerLag: avgLag, - } -} - -// PrintSummary prints a summary of the test statistics -func (c *Collector) PrintSummary() { - stats := c.GetStats() - - fmt.Printf("\n=== Load Test Summary ===\n") - fmt.Printf("Test Duration: %v\n", stats.Duration) - fmt.Printf("\nMessages:\n") - fmt.Printf(" Produced: %d (%.2f MB)\n", stats.MessagesProduced, float64(stats.BytesProduced)/1024/1024) - fmt.Printf(" Consumed: %d (%.2f MB)\n", stats.MessagesConsumed, float64(stats.BytesConsumed)/1024/1024) - fmt.Printf(" Producer Errors: %d\n", stats.ProducerErrors) - fmt.Printf(" Consumer Errors: %d\n", stats.ConsumerErrors) - - fmt.Printf("\nThroughput:\n") - fmt.Printf(" Producer: %.2f msgs/sec\n", stats.ProducerThroughput) - fmt.Printf(" Consumer: %.2f msgs/sec\n", stats.ConsumerThroughput) - - if stats.LatencyPercentiles != nil { - fmt.Printf("\nLatency Percentiles:\n") - percentiles := []float64{50, 90, 95, 99, 99.9} - for _, p := range percentiles { - if latency, exists := stats.LatencyPercentiles[p]; exists { - fmt.Printf(" p%.1f: %v\n", p, latency) - } - } - } - - fmt.Printf("\nConsumer Lag:\n") - fmt.Printf(" Total: %d messages\n", stats.TotalConsumerLag) - fmt.Printf(" Max: %d messages\n", stats.MaxConsumerLag) - fmt.Printf(" Average: %.2f messages\n", stats.AvgConsumerLag) - fmt.Printf("=========================\n") -} - -// WriteStats writes statistics to a writer (for HTTP endpoint) -func (c *Collector) WriteStats(w io.Writer) { - stats := c.GetStats() - - fmt.Fprintf(w, "# Load Test Statistics\n") - fmt.Fprintf(w, "duration_seconds %v\n", stats.Duration.Seconds()) - fmt.Fprintf(w, "messages_produced %d\n", stats.MessagesProduced) - fmt.Fprintf(w, "messages_consumed %d\n", stats.MessagesConsumed) - fmt.Fprintf(w, "bytes_produced %d\n", stats.BytesProduced) - fmt.Fprintf(w, "bytes_consumed %d\n", stats.BytesConsumed) - fmt.Fprintf(w, "producer_errors %d\n", stats.ProducerErrors) - fmt.Fprintf(w, "consumer_errors %d\n", stats.ConsumerErrors) - fmt.Fprintf(w, "producer_throughput_msgs_per_sec %f\n", stats.ProducerThroughput) - fmt.Fprintf(w, "consumer_throughput_msgs_per_sec %f\n", stats.ConsumerThroughput) - fmt.Fprintf(w, "total_consumer_lag %d\n", stats.TotalConsumerLag) - fmt.Fprintf(w, "max_consumer_lag %d\n", stats.MaxConsumerLag) - fmt.Fprintf(w, "avg_consumer_lag %f\n", stats.AvgConsumerLag) - - if stats.LatencyPercentiles != nil { - for percentile, latency := range stats.LatencyPercentiles { - fmt.Fprintf(w, "latency_p%g_seconds %f\n", percentile, latency.Seconds()) - } - } -} - -// calculatePercentiles calculates latency percentiles -func (c *Collector) calculatePercentiles(latencies []time.Duration) map[float64]time.Duration { - if len(latencies) == 0 { - return nil - } - - // Make a copy and sort - sorted := make([]time.Duration, len(latencies)) - copy(sorted, latencies) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i] < sorted[j] - }) - - percentiles := map[float64]time.Duration{ - 50: calculatePercentile(sorted, 50), - 90: calculatePercentile(sorted, 90), - 95: calculatePercentile(sorted, 95), - 99: calculatePercentile(sorted, 99), - 99.9: calculatePercentile(sorted, 99.9), - } - - return percentiles -} - -// calculatePercentile calculates a specific percentile from sorted data -func calculatePercentile(sorted []time.Duration, percentile float64) time.Duration { - if len(sorted) == 0 { - return 0 - } - - index := percentile / 100.0 * float64(len(sorted)-1) - if index == float64(int(index)) { - return sorted[int(index)] - } - - lower := sorted[int(index)] - upper := sorted[int(index)+1] - weight := index - float64(int(index)) - - return time.Duration(float64(lower) + weight*float64(upper-lower)) -} - -// Stats represents the current test statistics -type Stats struct { - Duration time.Duration - MessagesProduced int64 - MessagesConsumed int64 - BytesProduced int64 - BytesConsumed int64 - ProducerErrors int64 - ConsumerErrors int64 - ProducerThroughput float64 - ConsumerThroughput float64 - LatencyPercentiles map[float64]time.Duration - TotalConsumerLag int64 - MaxConsumerLag int64 - AvgConsumerLag float64 -} diff --git a/test/kafka/kafka-client-loadtest/internal/producer/producer.go b/test/kafka/kafka-client-loadtest/internal/producer/producer.go deleted file mode 100644 index f8b8db7f7..000000000 --- a/test/kafka/kafka-client-loadtest/internal/producer/producer.go +++ /dev/null @@ -1,787 +0,0 @@ -package producer - -import ( - "context" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "math/rand" - "net/http" - "strings" - "sync" - "time" - - "github.com/IBM/sarama" - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/config" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/metrics" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema" - pb "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema/pb" - "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/tracker" - "google.golang.org/protobuf/proto" -) - -// ErrCircuitBreakerOpen indicates that the circuit breaker is open due to consecutive failures -var ErrCircuitBreakerOpen = errors.New("circuit breaker is open") - -// Producer represents a Kafka producer for load testing -type Producer struct { - id int - config *config.Config - metricsCollector *metrics.Collector - saramaProducer sarama.SyncProducer - useConfluent bool - topics []string - avroCodec *goavro.Codec - startTime time.Time // Test run start time for generating unique keys - - // Schema management - schemaIDs map[string]int // topic -> schema ID mapping - schemaFormats map[string]string // topic -> schema format mapping (AVRO, JSON, etc.) - - // Rate limiting - rateLimiter *time.Ticker - - // Message generation - messageCounter int64 - random *rand.Rand - - // Circuit breaker detection - consecutiveFailures int - - // Record tracking - tracker *tracker.Tracker -} - -// Message represents a test message -type Message struct { - ID string `json:"id"` - Timestamp int64 `json:"timestamp"` - ProducerID int `json:"producer_id"` - Counter int64 `json:"counter"` - UserID string `json:"user_id"` - EventType string `json:"event_type"` - Properties map[string]interface{} `json:"properties"` -} - -// New creates a new producer instance -func New(cfg *config.Config, collector *metrics.Collector, id int, recordTracker *tracker.Tracker) (*Producer, error) { - p := &Producer{ - id: id, - config: cfg, - metricsCollector: collector, - topics: cfg.GetTopicNames(), - random: rand.New(rand.NewSource(time.Now().UnixNano() + int64(id))), - useConfluent: false, // Use Sarama by default, can be made configurable - schemaIDs: make(map[string]int), - schemaFormats: make(map[string]string), - startTime: time.Now(), // Record test start time for unique key generation - tracker: recordTracker, - } - - // Initialize schema formats for each topic - // Distribute across AVRO, JSON, and PROTOBUF formats - for i, topic := range p.topics { - var schemaFormat string - if cfg.Producers.SchemaFormat != "" { - // Use explicit config if provided - schemaFormat = cfg.Producers.SchemaFormat - } else { - // Distribute across three formats: AVRO, JSON, PROTOBUF - switch i % 3 { - case 0: - schemaFormat = "AVRO" - case 1: - schemaFormat = "JSON" - case 2: - schemaFormat = "PROTOBUF" - } - } - p.schemaFormats[topic] = schemaFormat - log.Printf("Producer %d: Topic %s will use schema format: %s", id, topic, schemaFormat) - } - - // Set up rate limiter if specified - if cfg.Producers.MessageRate > 0 { - p.rateLimiter = time.NewTicker(time.Second / time.Duration(cfg.Producers.MessageRate)) - } - - // Initialize Sarama producer - if err := p.initSaramaProducer(); err != nil { - return nil, fmt.Errorf("failed to initialize Sarama producer: %w", err) - } - - // Initialize Avro codec and register/fetch schemas if schemas are enabled - if cfg.Schemas.Enabled { - if err := p.initAvroCodec(); err != nil { - return nil, fmt.Errorf("failed to initialize Avro codec: %w", err) - } - if err := p.ensureSchemasRegistered(); err != nil { - return nil, fmt.Errorf("failed to ensure schemas are registered: %w", err) - } - if err := p.fetchSchemaIDs(); err != nil { - return nil, fmt.Errorf("failed to fetch schema IDs: %w", err) - } - } - - log.Printf("Producer %d initialized successfully", id) - return p, nil -} - -// initSaramaProducer initializes the Sarama producer -func (p *Producer) initSaramaProducer() error { - config := sarama.NewConfig() - - // Producer configuration - config.Producer.RequiredAcks = sarama.WaitForAll - if p.config.Producers.Acks == "0" { - config.Producer.RequiredAcks = sarama.NoResponse - } else if p.config.Producers.Acks == "1" { - config.Producer.RequiredAcks = sarama.WaitForLocal - } - - config.Producer.Retry.Max = p.config.Producers.Retries - config.Producer.Retry.Backoff = time.Duration(p.config.Producers.RetryBackoffMs) * time.Millisecond - config.Producer.Return.Successes = true - config.Producer.Return.Errors = true - - // Compression - switch p.config.Producers.CompressionType { - case "gzip": - config.Producer.Compression = sarama.CompressionGZIP - case "snappy": - config.Producer.Compression = sarama.CompressionSnappy - case "lz4": - config.Producer.Compression = sarama.CompressionLZ4 - case "zstd": - config.Producer.Compression = sarama.CompressionZSTD - default: - config.Producer.Compression = sarama.CompressionNone - } - - // Batching - config.Producer.Flush.Messages = p.config.Producers.BatchSize - config.Producer.Flush.Frequency = time.Duration(p.config.Producers.LingerMs) * time.Millisecond - - // Timeouts - config.Net.DialTimeout = 30 * time.Second - config.Net.ReadTimeout = 30 * time.Second - config.Net.WriteTimeout = 30 * time.Second - - // Version - config.Version = sarama.V2_8_0_0 - - // Create producer - producer, err := sarama.NewSyncProducer(p.config.Kafka.BootstrapServers, config) - if err != nil { - return fmt.Errorf("failed to create Sarama producer: %w", err) - } - - p.saramaProducer = producer - return nil -} - -// initAvroCodec initializes the Avro codec for schema-based messages -func (p *Producer) initAvroCodec() error { - // Use the shared LoadTestMessage schema - codec, err := goavro.NewCodec(schema.GetAvroSchema()) - if err != nil { - return fmt.Errorf("failed to create Avro codec: %w", err) - } - - p.avroCodec = codec - return nil -} - -// Run starts the producer and produces messages until the context is cancelled -func (p *Producer) Run(ctx context.Context) error { - log.Printf("Producer %d starting", p.id) - defer log.Printf("Producer %d stopped", p.id) - - // Create topics if they don't exist - if err := p.createTopics(); err != nil { - log.Printf("Producer %d: Failed to create topics: %v", p.id, err) - p.metricsCollector.RecordProducerError() - return err - } - - var wg sync.WaitGroup - errChan := make(chan error, 1) - - // Main production loop - wg.Add(1) - go func() { - defer wg.Done() - if err := p.produceMessages(ctx); err != nil { - errChan <- err - } - }() - - // Wait for completion or error - select { - case <-ctx.Done(): - log.Printf("Producer %d: Context cancelled, shutting down", p.id) - case err := <-errChan: - log.Printf("Producer %d: Stopping due to error: %v", p.id, err) - return err - } - - // Stop rate limiter - if p.rateLimiter != nil { - p.rateLimiter.Stop() - } - - // Wait for goroutines to finish - wg.Wait() - return nil -} - -// produceMessages is the main message production loop -func (p *Producer) produceMessages(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - default: - // Rate limiting - if p.rateLimiter != nil { - select { - case <-p.rateLimiter.C: - // Proceed - case <-ctx.Done(): - return nil - } - } - - if err := p.produceMessage(); err != nil { - log.Printf("Producer %d: Failed to produce message: %v", p.id, err) - p.metricsCollector.RecordProducerError() - - // Check for circuit breaker error - if p.isCircuitBreakerError(err) { - p.consecutiveFailures++ - log.Printf("Producer %d: Circuit breaker error detected (%d/%d consecutive failures)", - p.id, p.consecutiveFailures, 3) - - // Progressive backoff delay to avoid overloading the gateway - backoffDelay := time.Duration(p.consecutiveFailures) * 500 * time.Millisecond - log.Printf("Producer %d: Backing off for %v to avoid overloading gateway", p.id, backoffDelay) - - select { - case <-time.After(backoffDelay): - // Continue after delay - case <-ctx.Done(): - return nil - } - - // If we've hit 3 consecutive circuit breaker errors, stop the producer - if p.consecutiveFailures >= 3 { - log.Printf("Producer %d: Circuit breaker is open - stopping producer after %d consecutive failures", - p.id, p.consecutiveFailures) - return fmt.Errorf("%w: stopping producer after %d consecutive failures", ErrCircuitBreakerOpen, p.consecutiveFailures) - } - } else { - // Reset counter for non-circuit breaker errors - p.consecutiveFailures = 0 - } - } else { - // Reset counter on successful message - p.consecutiveFailures = 0 - } - } - } -} - -// produceMessage produces a single message -func (p *Producer) produceMessage() error { - startTime := time.Now() - - // Select random topic - topic := p.topics[p.random.Intn(len(p.topics))] - - // Produce message using Sarama (message will be generated based on topic's schema format) - return p.produceSaramaMessage(topic, startTime) -} - -// produceSaramaMessage produces a message using Sarama -// The message is generated internally based on the topic's schema format -func (p *Producer) produceSaramaMessage(topic string, startTime time.Time) error { - // Generate key - key := p.generateMessageKey() - - // If schemas are enabled, wrap in Confluent Wire Format based on topic's schema format - var messageValue []byte - if p.config.Schemas.Enabled { - schemaID, exists := p.schemaIDs[topic] - if !exists { - return fmt.Errorf("schema ID not found for topic %s", topic) - } - - // Get the schema format for this topic - schemaFormat := p.schemaFormats[topic] - - // CRITICAL FIX: Encode based on schema format, NOT config value_type - // The encoding MUST match what the schema registry and gateway expect - var encodedMessage []byte - var err error - switch schemaFormat { - case "AVRO": - // For Avro schema, encode as Avro binary - encodedMessage, err = p.generateAvroMessage() - if err != nil { - return fmt.Errorf("failed to encode as Avro for topic %s: %w", topic, err) - } - case "JSON": - // For JSON schema, encode as JSON - encodedMessage, err = p.generateJSONMessage() - if err != nil { - return fmt.Errorf("failed to encode as JSON for topic %s: %w", topic, err) - } - case "PROTOBUF": - // For PROTOBUF schema, encode as Protobuf binary - encodedMessage, err = p.generateProtobufMessage() - if err != nil { - return fmt.Errorf("failed to encode as Protobuf for topic %s: %w", topic, err) - } - default: - // Unknown format - fallback to JSON - encodedMessage, err = p.generateJSONMessage() - if err != nil { - return fmt.Errorf("failed to encode as JSON (unknown format fallback) for topic %s: %w", topic, err) - } - } - - // Wrap in Confluent wire format (magic byte + schema ID + payload) - messageValue = p.createConfluentWireFormat(schemaID, encodedMessage) - } else { - // No schemas - generate message based on config value_type - var err error - messageValue, err = p.generateMessage() - if err != nil { - return fmt.Errorf("failed to generate message: %w", err) - } - } - - msg := &sarama.ProducerMessage{ - Topic: topic, - Key: sarama.StringEncoder(key), - Value: sarama.ByteEncoder(messageValue), - } - - // Add headers if configured - if p.config.Producers.IncludeHeaders { - msg.Headers = []sarama.RecordHeader{ - {Key: []byte("producer_id"), Value: []byte(fmt.Sprintf("%d", p.id))}, - {Key: []byte("timestamp"), Value: []byte(fmt.Sprintf("%d", startTime.UnixNano()))}, - } - } - - // Produce message - partition, offset, err := p.saramaProducer.SendMessage(msg) - if err != nil { - return err - } - - // Track produced message - if p.tracker != nil { - p.tracker.TrackProduced(tracker.Record{ - Key: key, - Topic: topic, - Partition: partition, - Offset: offset, - Timestamp: startTime.UnixNano(), - ProducerID: p.id, - }) - } - - // Record metrics - latency := time.Since(startTime) - p.metricsCollector.RecordProducedMessage(len(messageValue), latency) - - return nil -} - -// generateMessage generates a test message -func (p *Producer) generateMessage() ([]byte, error) { - p.messageCounter++ - - switch p.config.Producers.ValueType { - case "avro": - return p.generateAvroMessage() - case "json": - return p.generateJSONMessage() - case "binary": - return p.generateBinaryMessage() - default: - return p.generateJSONMessage() - } -} - -// generateJSONMessage generates a JSON test message -func (p *Producer) generateJSONMessage() ([]byte, error) { - msg := Message{ - ID: fmt.Sprintf("msg-%d-%d", p.id, p.messageCounter), - Timestamp: time.Now().UnixNano(), - ProducerID: p.id, - Counter: p.messageCounter, - UserID: fmt.Sprintf("user-%d", p.random.Intn(10000)), - EventType: p.randomEventType(), - Properties: map[string]interface{}{ - "session_id": fmt.Sprintf("sess-%d-%d", p.id, p.random.Intn(1000)), - "page_views": fmt.Sprintf("%d", p.random.Intn(100)), // String for Avro map - "duration_ms": fmt.Sprintf("%d", p.random.Intn(300000)), // String for Avro map - "country": p.randomCountry(), - "device_type": p.randomDeviceType(), - "app_version": fmt.Sprintf("v%d.%d.%d", p.random.Intn(10), p.random.Intn(10), p.random.Intn(100)), - }, - } - - // Marshal to JSON (no padding - let natural message size be used) - messageBytes, err := json.Marshal(msg) - if err != nil { - return nil, err - } - - return messageBytes, nil -} - -// generateProtobufMessage generates a Protobuf-encoded message -func (p *Producer) generateProtobufMessage() ([]byte, error) { - // Create protobuf message - protoMsg := &pb.LoadTestMessage{ - Id: fmt.Sprintf("msg-%d-%d", p.id, p.messageCounter), - Timestamp: time.Now().UnixNano(), - ProducerId: int32(p.id), - Counter: p.messageCounter, - UserId: fmt.Sprintf("user-%d", p.random.Intn(10000)), - EventType: p.randomEventType(), - Properties: map[string]string{ - "session_id": fmt.Sprintf("sess-%d-%d", p.id, p.random.Intn(1000)), - "page_views": fmt.Sprintf("%d", p.random.Intn(100)), - "duration_ms": fmt.Sprintf("%d", p.random.Intn(300000)), - "country": p.randomCountry(), - "device_type": p.randomDeviceType(), - "app_version": fmt.Sprintf("v%d.%d.%d", p.random.Intn(10), p.random.Intn(10), p.random.Intn(100)), - }, - } - - // Marshal to protobuf binary - messageBytes, err := proto.Marshal(protoMsg) - if err != nil { - return nil, err - } - - return messageBytes, nil -} - -// generateAvroMessage generates an Avro-encoded message with Confluent Wire Format -// NOTE: Avro messages are NOT padded - they have their own binary format -func (p *Producer) generateAvroMessage() ([]byte, error) { - if p.avroCodec == nil { - return nil, fmt.Errorf("Avro codec not initialized") - } - - // Create Avro-compatible record matching the LoadTestMessage schema - record := map[string]interface{}{ - "id": fmt.Sprintf("msg-%d-%d", p.id, p.messageCounter), - "timestamp": time.Now().UnixNano(), - "producer_id": p.id, - "counter": p.messageCounter, - "user_id": fmt.Sprintf("user-%d", p.random.Intn(10000)), - "event_type": p.randomEventType(), - "properties": map[string]interface{}{ - "session_id": fmt.Sprintf("sess-%d-%d", p.id, p.random.Intn(1000)), - "page_views": fmt.Sprintf("%d", p.random.Intn(100)), - "duration_ms": fmt.Sprintf("%d", p.random.Intn(300000)), - "country": p.randomCountry(), - "device_type": p.randomDeviceType(), - "app_version": fmt.Sprintf("v%d.%d.%d", p.random.Intn(10), p.random.Intn(10), p.random.Intn(100)), - }, - } - - // Encode to Avro binary - avroBytes, err := p.avroCodec.BinaryFromNative(nil, record) - if err != nil { - return nil, err - } - - return avroBytes, nil -} - -// generateBinaryMessage generates a binary test message (no padding) -func (p *Producer) generateBinaryMessage() ([]byte, error) { - // Create a simple binary message format: - // [producer_id:4][counter:8][timestamp:8] - message := make([]byte, 20) - - // Producer ID (4 bytes) - message[0] = byte(p.id >> 24) - message[1] = byte(p.id >> 16) - message[2] = byte(p.id >> 8) - message[3] = byte(p.id) - - // Counter (8 bytes) - for i := 0; i < 8; i++ { - message[4+i] = byte(p.messageCounter >> (56 - i*8)) - } - - // Timestamp (8 bytes) - timestamp := time.Now().UnixNano() - for i := 0; i < 8; i++ { - message[12+i] = byte(timestamp >> (56 - i*8)) - } - - return message, nil -} - -// generateMessageKey generates a message key based on the configured distribution -// Keys are prefixed with a test run ID to track messages across test runs -func (p *Producer) generateMessageKey() string { - // Use test start time as run ID (format: YYYYMMDD-HHMMSS) - runID := p.startTime.Format("20060102-150405") - - switch p.config.Producers.KeyDistribution { - case "sequential": - return fmt.Sprintf("run-%s-key-%d", runID, p.messageCounter) - case "uuid": - return fmt.Sprintf("run-%s-uuid-%d-%d-%d", runID, p.id, time.Now().UnixNano(), p.random.Intn(1000000)) - default: // random - return fmt.Sprintf("run-%s-key-%d", runID, p.random.Intn(10000)) - } -} - -// createTopics creates the test topics if they don't exist -func (p *Producer) createTopics() error { - // Use Sarama admin client to create topics - config := sarama.NewConfig() - config.Version = sarama.V2_8_0_0 - - admin, err := sarama.NewClusterAdmin(p.config.Kafka.BootstrapServers, config) - if err != nil { - return fmt.Errorf("failed to create admin client: %w", err) - } - defer admin.Close() - - // Create topic specifications - topicSpecs := make(map[string]*sarama.TopicDetail) - for _, topic := range p.topics { - topicSpecs[topic] = &sarama.TopicDetail{ - NumPartitions: int32(p.config.Topics.Partitions), - ReplicationFactor: int16(p.config.Topics.ReplicationFactor), - ConfigEntries: map[string]*string{ - "cleanup.policy": &p.config.Topics.CleanupPolicy, - "retention.ms": stringPtr(fmt.Sprintf("%d", p.config.Topics.RetentionMs)), - "segment.ms": stringPtr(fmt.Sprintf("%d", p.config.Topics.SegmentMs)), - }, - } - } - - // Create topics - for _, topic := range p.topics { - err = admin.CreateTopic(topic, topicSpecs[topic], false) - if err != nil && err != sarama.ErrTopicAlreadyExists { - log.Printf("Producer %d: Warning - failed to create topic %s: %v", p.id, topic, err) - } else { - log.Printf("Producer %d: Successfully created topic %s", p.id, topic) - } - } - - return nil -} - -// Close closes the producer and cleans up resources -func (p *Producer) Close() error { - log.Printf("Producer %d: Closing", p.id) - - if p.rateLimiter != nil { - p.rateLimiter.Stop() - } - - if p.saramaProducer != nil { - return p.saramaProducer.Close() - } - - return nil -} - -// Helper functions - -func stringPtr(s string) *string { - return &s -} - -func joinStrings(strs []string, sep string) string { - if len(strs) == 0 { - return "" - } - - result := strs[0] - for i := 1; i < len(strs); i++ { - result += sep + strs[i] - } - return result -} - -func (p *Producer) randomEventType() string { - events := []string{"login", "logout", "view", "click", "purchase", "signup", "search", "download"} - return events[p.random.Intn(len(events))] -} - -func (p *Producer) randomCountry() string { - countries := []string{"US", "CA", "UK", "DE", "FR", "JP", "AU", "BR", "IN", "CN"} - return countries[p.random.Intn(len(countries))] -} - -func (p *Producer) randomDeviceType() string { - devices := []string{"desktop", "mobile", "tablet", "tv", "watch"} - return devices[p.random.Intn(len(devices))] -} - -// fetchSchemaIDs fetches schema IDs from Schema Registry for all topics -func (p *Producer) fetchSchemaIDs() error { - for _, topic := range p.topics { - subject := topic + "-value" - schemaID, err := p.getSchemaID(subject) - if err != nil { - return fmt.Errorf("failed to get schema ID for subject %s: %w", subject, err) - } - p.schemaIDs[topic] = schemaID - log.Printf("Producer %d: Fetched schema ID %d for topic %s", p.id, schemaID, topic) - } - return nil -} - -// getSchemaID fetches the latest schema ID for a subject from Schema Registry -func (p *Producer) getSchemaID(subject string) (int, error) { - url := fmt.Sprintf("%s/subjects/%s/versions/latest", p.config.SchemaRegistry.URL, subject) - - resp, err := http.Get(url) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return 0, fmt.Errorf("failed to get schema: status=%d, body=%s", resp.StatusCode, string(body)) - } - - var schemaResp struct { - ID int `json:"id"` - } - if err := json.NewDecoder(resp.Body).Decode(&schemaResp); err != nil { - return 0, err - } - - return schemaResp.ID, nil -} - -// ensureSchemasRegistered ensures that schemas are registered for all topics -// It registers schemas if they don't exist, but doesn't fail if they already do -func (p *Producer) ensureSchemasRegistered() error { - for _, topic := range p.topics { - subject := topic + "-value" - - // First check if schema already exists - schemaID, err := p.getSchemaID(subject) - if err == nil { - log.Printf("Producer %d: Schema already exists for topic %s (ID: %d), skipping registration", p.id, topic, schemaID) - continue - } - - // Schema doesn't exist, register it - log.Printf("Producer %d: Registering schema for topic %s", p.id, topic) - if err := p.registerTopicSchema(subject); err != nil { - return fmt.Errorf("failed to register schema for topic %s: %w", topic, err) - } - log.Printf("Producer %d: Schema registered successfully for topic %s", p.id, topic) - } - return nil -} - -// registerTopicSchema registers the schema for a specific topic based on configured format -func (p *Producer) registerTopicSchema(subject string) error { - // Extract topic name from subject (remove -value or -key suffix) - topicName := strings.TrimSuffix(strings.TrimSuffix(subject, "-value"), "-key") - - // Get schema format for this topic - schemaFormat, ok := p.schemaFormats[topicName] - if !ok { - // Fallback to config or default - schemaFormat = p.config.Producers.SchemaFormat - if schemaFormat == "" { - schemaFormat = "AVRO" - } - } - - var schemaStr string - var schemaType string - - switch strings.ToUpper(schemaFormat) { - case "AVRO": - schemaStr = schema.GetAvroSchema() - schemaType = "AVRO" - case "JSON", "JSON_SCHEMA": - schemaStr = schema.GetJSONSchema() - schemaType = "JSON" - case "PROTOBUF": - schemaStr = schema.GetProtobufSchema() - schemaType = "PROTOBUF" - default: - return fmt.Errorf("unsupported schema format: %s", schemaFormat) - } - - url := fmt.Sprintf("%s/subjects/%s/versions", p.config.SchemaRegistry.URL, subject) - - payload := map[string]interface{}{ - "schema": schemaStr, - "schemaType": schemaType, - } - - jsonPayload, err := json.Marshal(payload) - if err != nil { - return fmt.Errorf("failed to marshal schema payload: %w", err) - } - - resp, err := http.Post(url, "application/vnd.schemaregistry.v1+json", strings.NewReader(string(jsonPayload))) - if err != nil { - return fmt.Errorf("failed to register schema: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("schema registration failed: status=%d, body=%s", resp.StatusCode, string(body)) - } - - var registerResp struct { - ID int `json:"id"` - } - if err := json.NewDecoder(resp.Body).Decode(®isterResp); err != nil { - return fmt.Errorf("failed to decode registration response: %w", err) - } - - log.Printf("Schema registered with ID: %d (format: %s)", registerResp.ID, schemaType) - return nil -} - -// createConfluentWireFormat creates a message in Confluent Wire Format -// This matches the implementation in weed/mq/kafka/schema/envelope.go CreateConfluentEnvelope -func (p *Producer) createConfluentWireFormat(schemaID int, avroData []byte) []byte { - // Confluent Wire Format: [magic_byte(1)][schema_id(4)][payload(n)] - // magic_byte = 0x00 - // schema_id = 4 bytes big-endian - wireFormat := make([]byte, 5+len(avroData)) - wireFormat[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(wireFormat[1:5], uint32(schemaID)) - copy(wireFormat[5:], avroData) - return wireFormat -} - -// isCircuitBreakerError checks if an error indicates that the circuit breaker is open -func (p *Producer) isCircuitBreakerError(err error) bool { - return errors.Is(err, ErrCircuitBreakerOpen) -} diff --git a/test/kafka/kafka-client-loadtest/internal/schema/loadtest.proto b/test/kafka/kafka-client-loadtest/internal/schema/loadtest.proto deleted file mode 100644 index dfe00b72f..000000000 --- a/test/kafka/kafka-client-loadtest/internal/schema/loadtest.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package com.seaweedfs.loadtest; - -option go_package = "github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema/pb"; - -message LoadTestMessage { - string id = 1; - int64 timestamp = 2; - int32 producer_id = 3; - int64 counter = 4; - string user_id = 5; - string event_type = 6; - map properties = 7; -} - diff --git a/test/kafka/kafka-client-loadtest/internal/schema/pb/loadtest.pb.go b/test/kafka/kafka-client-loadtest/internal/schema/pb/loadtest.pb.go deleted file mode 100644 index 3ed58aa9e..000000000 --- a/test/kafka/kafka-client-loadtest/internal/schema/pb/loadtest.pb.go +++ /dev/null @@ -1,185 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 -// source: loadtest.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type LoadTestMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - ProducerId int32 `protobuf:"varint,3,opt,name=producer_id,json=producerId,proto3" json:"producer_id,omitempty"` - Counter int64 `protobuf:"varint,4,opt,name=counter,proto3" json:"counter,omitempty"` - UserId string `protobuf:"bytes,5,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - EventType string `protobuf:"bytes,6,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` - Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *LoadTestMessage) Reset() { - *x = LoadTestMessage{} - mi := &file_loadtest_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *LoadTestMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadTestMessage) ProtoMessage() {} - -func (x *LoadTestMessage) ProtoReflect() protoreflect.Message { - mi := &file_loadtest_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadTestMessage.ProtoReflect.Descriptor instead. -func (*LoadTestMessage) Descriptor() ([]byte, []int) { - return file_loadtest_proto_rawDescGZIP(), []int{0} -} - -func (x *LoadTestMessage) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *LoadTestMessage) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *LoadTestMessage) GetProducerId() int32 { - if x != nil { - return x.ProducerId - } - return 0 -} - -func (x *LoadTestMessage) GetCounter() int64 { - if x != nil { - return x.Counter - } - return 0 -} - -func (x *LoadTestMessage) GetUserId() string { - if x != nil { - return x.UserId - } - return "" -} - -func (x *LoadTestMessage) GetEventType() string { - if x != nil { - return x.EventType - } - return "" -} - -func (x *LoadTestMessage) GetProperties() map[string]string { - if x != nil { - return x.Properties - } - return nil -} - -var File_loadtest_proto protoreflect.FileDescriptor - -const file_loadtest_proto_rawDesc = "" + - "\n" + - "\x0eloadtest.proto\x12\x16com.seaweedfs.loadtest\"\xca\x02\n" + - "\x0fLoadTestMessage\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x1c\n" + - "\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12\x1f\n" + - "\vproducer_id\x18\x03 \x01(\x05R\n" + - "producerId\x12\x18\n" + - "\acounter\x18\x04 \x01(\x03R\acounter\x12\x17\n" + - "\auser_id\x18\x05 \x01(\tR\x06userId\x12\x1d\n" + - "\n" + - "event_type\x18\x06 \x01(\tR\teventType\x12W\n" + - "\n" + - "properties\x18\a \x03(\v27.com.seaweedfs.loadtest.LoadTestMessage.PropertiesEntryR\n" + - "properties\x1a=\n" + - "\x0fPropertiesEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01BTZRgithub.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest/internal/schema/pbb\x06proto3" - -var ( - file_loadtest_proto_rawDescOnce sync.Once - file_loadtest_proto_rawDescData []byte -) - -func file_loadtest_proto_rawDescGZIP() []byte { - file_loadtest_proto_rawDescOnce.Do(func() { - file_loadtest_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_loadtest_proto_rawDesc), len(file_loadtest_proto_rawDesc))) - }) - return file_loadtest_proto_rawDescData -} - -var file_loadtest_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_loadtest_proto_goTypes = []any{ - (*LoadTestMessage)(nil), // 0: com.seaweedfs.loadtest.LoadTestMessage - nil, // 1: com.seaweedfs.loadtest.LoadTestMessage.PropertiesEntry -} -var file_loadtest_proto_depIdxs = []int32{ - 1, // 0: com.seaweedfs.loadtest.LoadTestMessage.properties:type_name -> com.seaweedfs.loadtest.LoadTestMessage.PropertiesEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_loadtest_proto_init() } -func file_loadtest_proto_init() { - if File_loadtest_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_loadtest_proto_rawDesc), len(file_loadtest_proto_rawDesc)), - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_loadtest_proto_goTypes, - DependencyIndexes: file_loadtest_proto_depIdxs, - MessageInfos: file_loadtest_proto_msgTypes, - }.Build() - File_loadtest_proto = out.File - file_loadtest_proto_goTypes = nil - file_loadtest_proto_depIdxs = nil -} diff --git a/test/kafka/kafka-client-loadtest/internal/schema/schemas.go b/test/kafka/kafka-client-loadtest/internal/schema/schemas.go deleted file mode 100644 index 011b28ef2..000000000 --- a/test/kafka/kafka-client-loadtest/internal/schema/schemas.go +++ /dev/null @@ -1,58 +0,0 @@ -package schema - -// GetAvroSchema returns the Avro schema for load test messages -func GetAvroSchema() string { - return `{ - "type": "record", - "name": "LoadTestMessage", - "namespace": "com.seaweedfs.loadtest", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "producer_id", "type": "int"}, - {"name": "counter", "type": "long"}, - {"name": "user_id", "type": "string"}, - {"name": "event_type", "type": "string"}, - {"name": "properties", "type": {"type": "map", "values": "string"}} - ] - }` -} - -// GetJSONSchema returns the JSON Schema for load test messages -func GetJSONSchema() string { - return `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "LoadTestMessage", - "type": "object", - "properties": { - "id": {"type": "string"}, - "timestamp": {"type": "integer"}, - "producer_id": {"type": "integer"}, - "counter": {"type": "integer"}, - "user_id": {"type": "string"}, - "event_type": {"type": "string"}, - "properties": { - "type": "object", - "additionalProperties": {"type": "string"} - } - }, - "required": ["id", "timestamp", "producer_id", "counter", "user_id", "event_type"] - }` -} - -// GetProtobufSchema returns the Protobuf schema for load test messages -func GetProtobufSchema() string { - return `syntax = "proto3"; - -package com.seaweedfs.loadtest; - -message LoadTestMessage { - string id = 1; - int64 timestamp = 2; - int32 producer_id = 3; - int64 counter = 4; - string user_id = 5; - string event_type = 6; - map properties = 7; -}` -} diff --git a/test/kafka/kafka-client-loadtest/internal/tracker/tracker.go b/test/kafka/kafka-client-loadtest/internal/tracker/tracker.go deleted file mode 100644 index 1f67c7a65..000000000 --- a/test/kafka/kafka-client-loadtest/internal/tracker/tracker.go +++ /dev/null @@ -1,281 +0,0 @@ -package tracker - -import ( - "encoding/json" - "fmt" - "os" - "sort" - "strings" - "sync" - "time" -) - -// Record represents a tracked message -type Record struct { - Key string `json:"key"` - Topic string `json:"topic"` - Partition int32 `json:"partition"` - Offset int64 `json:"offset"` - Timestamp int64 `json:"timestamp"` - ProducerID int `json:"producer_id,omitempty"` - ConsumerID int `json:"consumer_id,omitempty"` -} - -// Tracker tracks produced and consumed records -type Tracker struct { - mu sync.Mutex - producedRecords []Record - consumedRecords []Record - producedFile string - consumedFile string - testStartTime int64 // Unix timestamp in nanoseconds - used to filter old messages - testRunPrefix string // Key prefix for this test run (e.g., "run-20251015-170150") - filteredOldCount int // Count of old messages consumed but not tracked -} - -// NewTracker creates a new record tracker -func NewTracker(producedFile, consumedFile string, testStartTime int64) *Tracker { - // Generate test run prefix from start time using same format as producer - // Producer format: p.startTime.Format("20060102-150405") -> "20251015-170859" - startTime := time.Unix(0, testStartTime) - runID := startTime.Format("20060102-150405") - testRunPrefix := fmt.Sprintf("run-%s", runID) - - fmt.Printf("Tracker initialized with prefix: %s (filtering messages not matching this prefix)\n", testRunPrefix) - - return &Tracker{ - producedRecords: make([]Record, 0, 100000), - consumedRecords: make([]Record, 0, 100000), - producedFile: producedFile, - consumedFile: consumedFile, - testStartTime: testStartTime, - testRunPrefix: testRunPrefix, - filteredOldCount: 0, - } -} - -// TrackProduced records a produced message -func (t *Tracker) TrackProduced(record Record) { - t.mu.Lock() - defer t.mu.Unlock() - t.producedRecords = append(t.producedRecords, record) -} - -// TrackConsumed records a consumed message -// Only tracks messages from the current test run (filters out old messages from previous tests) -func (t *Tracker) TrackConsumed(record Record) { - t.mu.Lock() - defer t.mu.Unlock() - - // Filter: Only track messages from current test run based on key prefix - // Producer keys look like: "run-20251015-170150-key-123" - // We only want messages that match our test run prefix - if !strings.HasPrefix(record.Key, t.testRunPrefix) { - // Count old messages consumed but not tracked - t.filteredOldCount++ - return - } - - t.consumedRecords = append(t.consumedRecords, record) -} - -// SaveProduced writes produced records to file -func (t *Tracker) SaveProduced() error { - t.mu.Lock() - defer t.mu.Unlock() - - f, err := os.Create(t.producedFile) - if err != nil { - return fmt.Errorf("failed to create produced file: %v", err) - } - defer f.Close() - - encoder := json.NewEncoder(f) - for _, record := range t.producedRecords { - if err := encoder.Encode(record); err != nil { - return fmt.Errorf("failed to encode produced record: %v", err) - } - } - - fmt.Printf("Saved %d produced records to %s\n", len(t.producedRecords), t.producedFile) - return nil -} - -// SaveConsumed writes consumed records to file -func (t *Tracker) SaveConsumed() error { - t.mu.Lock() - defer t.mu.Unlock() - - f, err := os.Create(t.consumedFile) - if err != nil { - return fmt.Errorf("failed to create consumed file: %v", err) - } - defer f.Close() - - encoder := json.NewEncoder(f) - for _, record := range t.consumedRecords { - if err := encoder.Encode(record); err != nil { - return fmt.Errorf("failed to encode consumed record: %v", err) - } - } - - fmt.Printf("Saved %d consumed records to %s\n", len(t.consumedRecords), t.consumedFile) - return nil -} - -// Compare compares produced and consumed records -func (t *Tracker) Compare() ComparisonResult { - t.mu.Lock() - defer t.mu.Unlock() - - result := ComparisonResult{ - TotalProduced: len(t.producedRecords), - TotalConsumed: len(t.consumedRecords), - FilteredOldCount: t.filteredOldCount, - } - - // Build maps for efficient lookup - producedMap := make(map[string]Record) - for _, record := range t.producedRecords { - key := fmt.Sprintf("%s-%d-%d", record.Topic, record.Partition, record.Offset) - producedMap[key] = record - } - - consumedMap := make(map[string]int) - duplicateKeys := make(map[string][]Record) - - for _, record := range t.consumedRecords { - key := fmt.Sprintf("%s-%d-%d", record.Topic, record.Partition, record.Offset) - consumedMap[key]++ - - if consumedMap[key] > 1 { - duplicateKeys[key] = append(duplicateKeys[key], record) - } - } - - // Find missing records (produced but not consumed) - for key, record := range producedMap { - if _, found := consumedMap[key]; !found { - result.Missing = append(result.Missing, record) - } - } - - // Find duplicate records (consumed multiple times) - for key, records := range duplicateKeys { - if len(records) > 0 { - // Add first occurrence for context - result.Duplicates = append(result.Duplicates, DuplicateRecord{ - Record: records[0], - Count: consumedMap[key], - }) - } - } - - result.MissingCount = len(result.Missing) - result.DuplicateCount = len(result.Duplicates) - result.UniqueConsumed = result.TotalConsumed - sumDuplicates(result.Duplicates) - - return result -} - -// ComparisonResult holds the comparison results -type ComparisonResult struct { - TotalProduced int - TotalConsumed int - UniqueConsumed int - MissingCount int - DuplicateCount int - FilteredOldCount int // Old messages consumed but filtered out - Missing []Record - Duplicates []DuplicateRecord -} - -// DuplicateRecord represents a record consumed multiple times -type DuplicateRecord struct { - Record Record - Count int -} - -// PrintSummary prints a summary of the comparison -func (r *ComparisonResult) PrintSummary() { - fmt.Println("\n" + strings.Repeat("=", 70)) - fmt.Println(" MESSAGE VERIFICATION RESULTS") - fmt.Println(strings.Repeat("=", 70)) - - fmt.Printf("\nProduction Summary:\n") - fmt.Printf(" Total Produced: %d messages\n", r.TotalProduced) - - fmt.Printf("\nConsumption Summary:\n") - fmt.Printf(" Total Consumed: %d messages (from current test)\n", r.TotalConsumed) - fmt.Printf(" Unique Consumed: %d messages\n", r.UniqueConsumed) - fmt.Printf(" Duplicate Reads: %d messages\n", r.TotalConsumed-r.UniqueConsumed) - if r.FilteredOldCount > 0 { - fmt.Printf(" Filtered Old: %d messages (from previous tests, not tracked)\n", r.FilteredOldCount) - } - - fmt.Printf("\nVerification Results:\n") - if r.MissingCount == 0 { - fmt.Printf(" โœ… Missing Records: 0 (all messages delivered)\n") - } else { - fmt.Printf(" โŒ Missing Records: %d (data loss detected!)\n", r.MissingCount) - } - - if r.DuplicateCount == 0 { - fmt.Printf(" โœ… Duplicate Records: 0 (no duplicates)\n") - } else { - duplicatePercent := float64(r.TotalConsumed-r.UniqueConsumed) * 100.0 / float64(r.TotalProduced) - fmt.Printf(" โš ๏ธ Duplicate Records: %d unique messages read multiple times (%.1f%%)\n", - r.DuplicateCount, duplicatePercent) - } - - fmt.Printf("\nDelivery Guarantee:\n") - if r.MissingCount == 0 && r.DuplicateCount == 0 { - fmt.Printf(" โœ… EXACTLY-ONCE: All messages delivered exactly once\n") - } else if r.MissingCount == 0 { - fmt.Printf(" โœ… AT-LEAST-ONCE: All messages delivered (some duplicates)\n") - } else { - fmt.Printf(" โŒ AT-MOST-ONCE: Some messages lost\n") - } - - // Print sample of missing records (up to 10) - if len(r.Missing) > 0 { - fmt.Printf("\nSample Missing Records (first 10 of %d):\n", len(r.Missing)) - for i, record := range r.Missing { - if i >= 10 { - break - } - fmt.Printf(" - %s[%d]@%d (key=%s)\n", - record.Topic, record.Partition, record.Offset, record.Key) - } - } - - // Print sample of duplicate records (up to 10) - if len(r.Duplicates) > 0 { - fmt.Printf("\nSample Duplicate Records (first 10 of %d):\n", len(r.Duplicates)) - // Sort by count descending - sorted := make([]DuplicateRecord, len(r.Duplicates)) - copy(sorted, r.Duplicates) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Count > sorted[j].Count - }) - - for i, dup := range sorted { - if i >= 10 { - break - } - fmt.Printf(" - %s[%d]@%d (key=%s, read %d times)\n", - dup.Record.Topic, dup.Record.Partition, dup.Record.Offset, - dup.Record.Key, dup.Count) - } - } - - fmt.Println(strings.Repeat("=", 70)) -} - -func sumDuplicates(duplicates []DuplicateRecord) int { - sum := 0 - for _, dup := range duplicates { - sum += dup.Count - 1 // Don't count the first occurrence - } - return sum -} diff --git a/test/kafka/kafka-client-loadtest/loadtest b/test/kafka/kafka-client-loadtest/loadtest deleted file mode 100755 index e5a23f173..000000000 Binary files a/test/kafka/kafka-client-loadtest/loadtest and /dev/null differ diff --git a/test/kafka/kafka-client-loadtest/log4j2.properties b/test/kafka/kafka-client-loadtest/log4j2.properties deleted file mode 100644 index 1461240e0..000000000 --- a/test/kafka/kafka-client-loadtest/log4j2.properties +++ /dev/null @@ -1,13 +0,0 @@ -# Set everything to debug -log4j.rootLogger=INFO, CONSOLE - -# Enable DEBUG for Kafka client internals -log4j.logger.org.apache.kafka.clients.consumer=DEBUG -log4j.logger.org.apache.kafka.clients.producer=DEBUG -log4j.logger.org.apache.kafka.clients.Metadata=DEBUG -log4j.logger.org.apache.kafka.common.network=WARN -log4j.logger.org.apache.kafka.common.utils=WARN - -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=[%d{HH:mm:ss}] [%-5p] [%c] %m%n diff --git a/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/kafka-loadtest.json b/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/kafka-loadtest.json deleted file mode 100644 index 3ea04fb68..000000000 --- a/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/kafka-loadtest.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "dashboard": { - "id": null, - "title": "Kafka Client Load Test Dashboard", - "tags": ["kafka", "loadtest", "seaweedfs"], - "timezone": "browser", - "panels": [ - { - "id": 1, - "title": "Messages Produced/Consumed", - "type": "stat", - "targets": [ - { - "expr": "rate(kafka_loadtest_messages_produced_total[5m])", - "legendFormat": "Produced/sec" - }, - { - "expr": "rate(kafka_loadtest_messages_consumed_total[5m])", - "legendFormat": "Consumed/sec" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0} - }, - { - "id": 2, - "title": "Message Latency", - "type": "graph", - "targets": [ - { - "expr": "histogram_quantile(0.95, kafka_loadtest_message_latency_seconds)", - "legendFormat": "95th percentile" - }, - { - "expr": "histogram_quantile(0.99, kafka_loadtest_message_latency_seconds)", - "legendFormat": "99th percentile" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0} - }, - { - "id": 3, - "title": "Error Rates", - "type": "graph", - "targets": [ - { - "expr": "rate(kafka_loadtest_producer_errors_total[5m])", - "legendFormat": "Producer Errors/sec" - }, - { - "expr": "rate(kafka_loadtest_consumer_errors_total[5m])", - "legendFormat": "Consumer Errors/sec" - } - ], - "gridPos": {"h": 8, "w": 24, "x": 0, "y": 8} - }, - { - "id": 4, - "title": "Throughput (MB/s)", - "type": "graph", - "targets": [ - { - "expr": "rate(kafka_loadtest_bytes_produced_total[5m]) / 1024 / 1024", - "legendFormat": "Produced MB/s" - }, - { - "expr": "rate(kafka_loadtest_bytes_consumed_total[5m]) / 1024 / 1024", - "legendFormat": "Consumed MB/s" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 16} - }, - { - "id": 5, - "title": "Active Clients", - "type": "stat", - "targets": [ - { - "expr": "kafka_loadtest_active_producers", - "legendFormat": "Producers" - }, - { - "expr": "kafka_loadtest_active_consumers", - "legendFormat": "Consumers" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 16} - }, - { - "id": 6, - "title": "Consumer Lag", - "type": "graph", - "targets": [ - { - "expr": "kafka_loadtest_consumer_lag_messages", - "legendFormat": "{{consumer_group}}-{{topic}}-{{partition}}" - } - ], - "gridPos": {"h": 8, "w": 24, "x": 0, "y": 24} - } - ], - "time": {"from": "now-30m", "to": "now"}, - "refresh": "5s", - "schemaVersion": 16, - "version": 0 - } -} diff --git a/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/seaweedfs.json b/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/seaweedfs.json deleted file mode 100644 index 4c2261f22..000000000 --- a/test/kafka/kafka-client-loadtest/monitoring/grafana/dashboards/seaweedfs.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "dashboard": { - "id": null, - "title": "SeaweedFS Cluster Dashboard", - "tags": ["seaweedfs", "storage"], - "timezone": "browser", - "panels": [ - { - "id": 1, - "title": "Master Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-master\"}", - "legendFormat": "Master Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 0, "y": 0} - }, - { - "id": 2, - "title": "Volume Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-volume\"}", - "legendFormat": "Volume Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 6, "y": 0} - }, - { - "id": 3, - "title": "Filer Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-filer\"}", - "legendFormat": "Filer Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 12, "y": 0} - }, - { - "id": 4, - "title": "MQ Broker Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-mq-broker\"}", - "legendFormat": "MQ Broker Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 18, "y": 0} - } - ], - "time": {"from": "now-30m", "to": "now"}, - "refresh": "10s", - "schemaVersion": 16, - "version": 0 - } -} diff --git a/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/dashboards/dashboard.yml b/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/dashboards/dashboard.yml deleted file mode 100644 index 0bcf3d818..000000000 --- a/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/dashboards/dashboard.yml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: 1 - -providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards diff --git a/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/datasources/datasource.yml b/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/datasources/datasource.yml deleted file mode 100644 index fb78be722..000000000 --- a/test/kafka/kafka-client-loadtest/monitoring/grafana/provisioning/datasources/datasource.yml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: 1 - -datasources: - - name: Prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - editable: true - version: 1 diff --git a/test/kafka/kafka-client-loadtest/monitoring/prometheus/prometheus.yml b/test/kafka/kafka-client-loadtest/monitoring/prometheus/prometheus.yml deleted file mode 100644 index f62091d52..000000000 --- a/test/kafka/kafka-client-loadtest/monitoring/prometheus/prometheus.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Prometheus configuration for Kafka Load Test monitoring - -global: - scrape_interval: 15s - evaluation_interval: 15s - -rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - -scrape_configs: - # Scrape Prometheus itself - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - # Scrape load test metrics - - job_name: 'kafka-loadtest' - static_configs: - - targets: ['kafka-client-loadtest-runner:8080'] - scrape_interval: 5s - metrics_path: '/metrics' - - # Scrape SeaweedFS Master metrics - - job_name: 'seaweedfs-master' - static_configs: - - targets: ['seaweedfs-master:9333'] - metrics_path: '/metrics' - - # Scrape SeaweedFS Volume metrics - - job_name: 'seaweedfs-volume' - static_configs: - - targets: ['seaweedfs-volume:8080'] - metrics_path: '/metrics' - - # Scrape SeaweedFS Filer metrics - - job_name: 'seaweedfs-filer' - static_configs: - - targets: ['seaweedfs-filer:8888'] - metrics_path: '/metrics' - - # Scrape SeaweedFS MQ Broker metrics (if available) - - job_name: 'seaweedfs-mq-broker' - static_configs: - - targets: ['seaweedfs-mq-broker:17777'] - metrics_path: '/metrics' - scrape_interval: 10s - - # Scrape Kafka Gateway metrics (if available) - - job_name: 'kafka-gateway' - static_configs: - - targets: ['kafka-gateway:9093'] - metrics_path: '/metrics' - scrape_interval: 10s diff --git a/test/kafka/kafka-client-loadtest/pom.xml b/test/kafka/kafka-client-loadtest/pom.xml deleted file mode 100644 index 22d89e1b4..000000000 --- a/test/kafka/kafka-client-loadtest/pom.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - 4.0.0 - - io.confluent.test - seek-test - 1.0 - - - 11 - 11 - 3.9.1 - - - - - org.apache.kafka - kafka-clients - ${kafka.version} - - - org.slf4j - slf4j-simple - 2.0.0 - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.8.1 - - - org.apache.maven.plugins - maven-shade-plugin - 3.2.4 - - - package - - shade - - - - - SeekToBeginningTest - - - seek-test - - - - - - . - - diff --git a/test/kafka/kafka-client-loadtest/scripts/register-schemas.sh b/test/kafka/kafka-client-loadtest/scripts/register-schemas.sh deleted file mode 100755 index 58cb0f114..000000000 --- a/test/kafka/kafka-client-loadtest/scripts/register-schemas.sh +++ /dev/null @@ -1,423 +0,0 @@ -#!/bin/bash - -# Register schemas with Schema Registry for load testing -# This script registers the necessary schemas before running load tests - -set -euo pipefail - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -log_warning() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Configuration -SCHEMA_REGISTRY_URL=${SCHEMA_REGISTRY_URL:-"http://localhost:8081"} -TIMEOUT=${TIMEOUT:-60} -CHECK_INTERVAL=${CHECK_INTERVAL:-2} - -# Wait for Schema Registry to be ready -wait_for_schema_registry() { - log_info "Waiting for Schema Registry to be ready..." - - local elapsed=0 - while [[ $elapsed -lt $TIMEOUT ]]; do - if curl -sf --max-time 5 "$SCHEMA_REGISTRY_URL/subjects" >/dev/null 2>&1; then - log_success "Schema Registry is ready!" - return 0 - fi - - log_info "Schema Registry not ready yet. Waiting ${CHECK_INTERVAL}s... (${elapsed}/${TIMEOUT}s)" - sleep $CHECK_INTERVAL - elapsed=$((elapsed + CHECK_INTERVAL)) - done - - log_error "Schema Registry did not become ready within ${TIMEOUT} seconds" - return 1 -} - -# Register a schema for a subject -register_schema() { - local subject=$1 - local schema=$2 - local schema_type=${3:-"AVRO"} - local max_attempts=5 - local attempt=1 - - log_info "Registering schema for subject: $subject" - - # Create the schema registration payload - local escaped_schema=$(echo "$schema" | jq -Rs .) - local payload=$(cat </dev/null) - - if echo "$response" | jq -e '.id' >/dev/null 2>&1; then - local schema_id - schema_id=$(echo "$response" | jq -r '.id') - if [[ $attempt -gt 1 ]]; then - log_success "- Schema registered for $subject with ID: $schema_id [attempt $attempt]" - else - log_success "- Schema registered for $subject with ID: $schema_id" - fi - return 0 - fi - - # Check if it's a consumer lag timeout (error_code 50002) - local error_code - error_code=$(echo "$response" | jq -r '.error_code // empty' 2>/dev/null) - - if [[ "$error_code" == "50002" && $attempt -lt $max_attempts ]]; then - # Consumer lag timeout - wait longer for consumer to catch up - # Use exponential backoff: 1s, 2s, 4s, 8s - local wait_time=$(echo "2 ^ ($attempt - 1)" | bc) - log_warning "Schema Registry consumer lag detected for $subject, waiting ${wait_time}s before retry (attempt $attempt)..." - sleep "$wait_time" - attempt=$((attempt + 1)) - else - # Other error or max attempts reached - log_error "x Failed to register schema for $subject" - log_error "Response: $response" - return 1 - fi - done - - return 1 -} - -# Verify a schema exists (single attempt) -verify_schema() { - local subject=$1 - - local response - response=$(curl -s --max-time 10 "$SCHEMA_REGISTRY_URL/subjects/$subject/versions/latest" 2>/dev/null) - - if echo "$response" | jq -e '.id' >/dev/null 2>&1; then - local schema_id - local version - schema_id=$(echo "$response" | jq -r '.id') - version=$(echo "$response" | jq -r '.version') - log_success "- Schema verified for $subject (ID: $schema_id, Version: $version)" - return 0 - else - return 1 - fi -} - -# Verify a schema exists with retry logic (handles Schema Registry consumer lag) -verify_schema_with_retry() { - local subject=$1 - local max_attempts=10 - local attempt=1 - - log_info "Verifying schema for subject: $subject" - - while [[ $attempt -le $max_attempts ]]; do - local response - response=$(curl -s --max-time 10 "$SCHEMA_REGISTRY_URL/subjects/$subject/versions/latest" 2>/dev/null) - - if echo "$response" | jq -e '.id' >/dev/null 2>&1; then - local schema_id - local version - schema_id=$(echo "$response" | jq -r '.id') - version=$(echo "$response" | jq -r '.version') - - if [[ $attempt -gt 1 ]]; then - log_success "- Schema verified for $subject (ID: $schema_id, Version: $version) [attempt $attempt]" - else - log_success "- Schema verified for $subject (ID: $schema_id, Version: $version)" - fi - return 0 - fi - - # Schema not found, wait and retry (handles Schema Registry consumer lag) - if [[ $attempt -lt $max_attempts ]]; then - # Longer exponential backoff for Schema Registry consumer lag: 0.5s, 1s, 2s, 3s, 4s... - local wait_time=$(echo "scale=1; 0.5 * $attempt" | bc) - sleep "$wait_time" - attempt=$((attempt + 1)) - else - log_error "x Schema not found for $subject (tried $max_attempts times)" - return 1 - fi - done - - return 1 -} - -# Register load test schemas (optimized for batch registration) -register_loadtest_schemas() { - log_info "Registering load test schemas with multiple formats..." - - # Define the Avro schema for load test messages - local avro_value_schema='{ - "type": "record", - "name": "LoadTestMessage", - "namespace": "com.seaweedfs.loadtest", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "producer_id", "type": "int"}, - {"name": "counter", "type": "long"}, - {"name": "user_id", "type": "string"}, - {"name": "event_type", "type": "string"}, - {"name": "properties", "type": {"type": "map", "values": "string"}} - ] - }' - - # Define the JSON schema for load test messages - local json_value_schema='{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "LoadTestMessage", - "type": "object", - "properties": { - "id": {"type": "string"}, - "timestamp": {"type": "integer"}, - "producer_id": {"type": "integer"}, - "counter": {"type": "integer"}, - "user_id": {"type": "string"}, - "event_type": {"type": "string"}, - "properties": { - "type": "object", - "additionalProperties": {"type": "string"} - } - }, - "required": ["id", "timestamp", "producer_id", "counter", "user_id", "event_type"] - }' - - # Define the Protobuf schema for load test messages - local protobuf_value_schema='syntax = "proto3"; - -package com.seaweedfs.loadtest; - -message LoadTestMessage { - string id = 1; - int64 timestamp = 2; - int32 producer_id = 3; - int64 counter = 4; - string user_id = 5; - string event_type = 6; - map properties = 7; -}' - - # Define the key schema (simple string) - local avro_key_schema='{"type": "string"}' - local json_key_schema='{"type": "string"}' - local protobuf_key_schema='syntax = "proto3"; message Key { string key = 1; }' - - # Register schemas for all load test topics with different formats - local topics=("loadtest-topic-0" "loadtest-topic-1" "loadtest-topic-2" "loadtest-topic-3" "loadtest-topic-4") - local success_count=0 - local total_schemas=0 - - # Distribute formats: topic-0=AVRO, topic-1=JSON, topic-2=PROTOBUF, topic-3=AVRO, topic-4=JSON - local idx=0 - for topic in "${topics[@]}"; do - local format - local value_schema - local key_schema - - # Determine format based on topic index (same as producer logic) - case $((idx % 3)) in - 0) - format="AVRO" - value_schema="$avro_value_schema" - key_schema="$avro_key_schema" - ;; - 1) - format="JSON" - value_schema="$json_value_schema" - key_schema="$json_key_schema" - ;; - 2) - format="PROTOBUF" - value_schema="$protobuf_value_schema" - key_schema="$protobuf_key_schema" - ;; - esac - - log_info "Registering $topic with $format schema..." - - # Register value schema - if register_schema "${topic}-value" "$value_schema" "$format"; then - success_count=$((success_count + 1)) - fi - total_schemas=$((total_schemas + 1)) - - # Small delay to let Schema Registry consumer process (prevents consumer lag) - sleep 0.2 - - # Register key schema - if register_schema "${topic}-key" "$key_schema" "$format"; then - success_count=$((success_count + 1)) - fi - total_schemas=$((total_schemas + 1)) - - # Small delay to let Schema Registry consumer process (prevents consumer lag) - sleep 0.2 - - idx=$((idx + 1)) - done - - log_info "Schema registration summary: $success_count/$total_schemas schemas registered successfully" - log_info "Format distribution: topic-0=AVRO, topic-1=JSON, topic-2=PROTOBUF, topic-3=AVRO, topic-4=JSON" - - if [[ $success_count -eq $total_schemas ]]; then - log_success "All load test schemas registered successfully with multiple formats!" - return 0 - else - log_error "Some schemas failed to register" - return 1 - fi -} - -# Verify all schemas are registered -verify_loadtest_schemas() { - log_info "Verifying load test schemas..." - - local topics=("loadtest-topic-0" "loadtest-topic-1" "loadtest-topic-2" "loadtest-topic-3" "loadtest-topic-4") - local success_count=0 - local total_schemas=0 - - for topic in "${topics[@]}"; do - # Verify value schema with retry (handles Schema Registry consumer lag) - if verify_schema_with_retry "${topic}-value"; then - success_count=$((success_count + 1)) - fi - total_schemas=$((total_schemas + 1)) - - # Verify key schema with retry (handles Schema Registry consumer lag) - if verify_schema_with_retry "${topic}-key"; then - success_count=$((success_count + 1)) - fi - total_schemas=$((total_schemas + 1)) - done - - log_info "Schema verification summary: $success_count/$total_schemas schemas verified" - - if [[ $success_count -eq $total_schemas ]]; then - log_success "All load test schemas verified successfully!" - return 0 - else - log_error "Some schemas are missing or invalid" - return 1 - fi -} - -# List all registered subjects -list_subjects() { - log_info "Listing all registered subjects..." - - local subjects - subjects=$(curl -s --max-time 10 "$SCHEMA_REGISTRY_URL/subjects" 2>/dev/null) - - if echo "$subjects" | jq -e '.[]' >/dev/null 2>&1; then - # Use process substitution instead of pipeline to avoid subshell exit code issues - while IFS= read -r subject; do - log_info " - $subject" - done < <(echo "$subjects" | jq -r '.[]') - else - log_warning "No subjects found or Schema Registry not accessible" - fi - - return 0 -} - -# Clean up schemas (for testing) -cleanup_schemas() { - log_warning "Cleaning up load test schemas..." - - local topics=("loadtest-topic-0" "loadtest-topic-1" "loadtest-topic-2" "loadtest-topic-3" "loadtest-topic-4") - - for topic in "${topics[@]}"; do - # Delete value schema (with timeout) - curl -s --max-time 10 -X DELETE "$SCHEMA_REGISTRY_URL/subjects/${topic}-value" >/dev/null 2>&1 || true - curl -s --max-time 10 -X DELETE "$SCHEMA_REGISTRY_URL/subjects/${topic}-value?permanent=true" >/dev/null 2>&1 || true - - # Delete key schema (with timeout) - curl -s --max-time 10 -X DELETE "$SCHEMA_REGISTRY_URL/subjects/${topic}-key" >/dev/null 2>&1 || true - curl -s --max-time 10 -X DELETE "$SCHEMA_REGISTRY_URL/subjects/${topic}-key?permanent=true" >/dev/null 2>&1 || true - done - - log_success "Schema cleanup completed" -} - -# Main function -main() { - case "${1:-register}" in - "register") - wait_for_schema_registry - register_loadtest_schemas - ;; - "verify") - wait_for_schema_registry - verify_loadtest_schemas - ;; - "list") - wait_for_schema_registry - list_subjects - ;; - "cleanup") - wait_for_schema_registry - cleanup_schemas - ;; - "full") - wait_for_schema_registry - register_loadtest_schemas - # Wait for Schema Registry consumer to catch up before verification - log_info "Waiting 3 seconds for Schema Registry consumer to process all schemas..." - sleep 3 - verify_loadtest_schemas - list_subjects - ;; - *) - echo "Usage: $0 [register|verify|list|cleanup|full]" - echo "" - echo "Commands:" - echo " register - Register load test schemas (default)" - echo " verify - Verify schemas are registered" - echo " list - List all registered subjects" - echo " cleanup - Clean up load test schemas" - echo " full - Register, verify, and list schemas" - echo "" - echo "Environment variables:" - echo " SCHEMA_REGISTRY_URL - Schema Registry URL (default: http://localhost:8081)" - echo " TIMEOUT - Maximum time to wait for Schema Registry (default: 60)" - echo " CHECK_INTERVAL - Check interval in seconds (default: 2)" - exit 1 - ;; - esac - - return 0 -} - -main "$@" diff --git a/test/kafka/kafka-client-loadtest/scripts/run-loadtest.sh b/test/kafka/kafka-client-loadtest/scripts/run-loadtest.sh deleted file mode 100755 index 7f6ddc79a..000000000 --- a/test/kafka/kafka-client-loadtest/scripts/run-loadtest.sh +++ /dev/null @@ -1,480 +0,0 @@ -#!/bin/bash - -# Kafka Client Load Test Runner Script -# This script helps run various load test scenarios against SeaweedFS Kafka Gateway - -set -euo pipefail - -# Default configuration -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_DIR="$(dirname "$SCRIPT_DIR")" -DOCKER_COMPOSE_FILE="$PROJECT_DIR/docker-compose.yml" -CONFIG_FILE="$PROJECT_DIR/config/loadtest.yaml" - -# Default test parameters -TEST_MODE="comprehensive" -TEST_DURATION="300s" -PRODUCER_COUNT=10 -CONSUMER_COUNT=5 -MESSAGE_RATE=1000 -MESSAGE_SIZE=1024 -TOPIC_COUNT=5 -PARTITIONS_PER_TOPIC=3 - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Function to print colored output -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -log_warning() { - echo -e "${YELLOW}[WARNING]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Function to show usage -show_usage() { - cat << EOF -Kafka Client Load Test Runner - -Usage: $0 [OPTIONS] [COMMAND] - -Commands: - start Start the load test infrastructure and run tests - stop Stop all services - restart Restart all services - status Show service status - logs Show logs from all services - clean Clean up all resources (volumes, networks, etc.) - monitor Start monitoring stack (Prometheus + Grafana) - scenarios Run predefined test scenarios - -Options: - -m, --mode MODE Test mode: producer, consumer, comprehensive (default: comprehensive) - -d, --duration DURATION Test duration (default: 300s) - -p, --producers COUNT Number of producers (default: 10) - -c, --consumers COUNT Number of consumers (default: 5) - -r, --rate RATE Messages per second per producer (default: 1000) - -s, --size SIZE Message size in bytes (default: 1024) - -t, --topics COUNT Number of topics (default: 5) - --partitions COUNT Partitions per topic (default: 3) - --config FILE Configuration file (default: config/loadtest.yaml) - --monitoring Enable monitoring stack - --wait-ready Wait for services to be ready before starting tests - -v, --verbose Verbose output - -h, --help Show this help message - -Examples: - # Run comprehensive test for 5 minutes - $0 start -m comprehensive -d 5m - - # Run producer-only test with high throughput - $0 start -m producer -p 20 -r 2000 -d 10m - - # Run consumer-only test - $0 start -m consumer -c 10 - - # Run with monitoring - $0 start --monitoring -d 15m - - # Clean up everything - $0 clean - -Predefined Scenarios: - quick Quick smoke test (1 min, low load) - standard Standard load test (5 min, medium load) - stress Stress test (10 min, high load) - endurance Endurance test (30 min, sustained load) - burst Burst test (variable load) - -EOF -} - -# Parse command line arguments -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - -m|--mode) - TEST_MODE="$2" - shift 2 - ;; - -d|--duration) - TEST_DURATION="$2" - shift 2 - ;; - -p|--producers) - PRODUCER_COUNT="$2" - shift 2 - ;; - -c|--consumers) - CONSUMER_COUNT="$2" - shift 2 - ;; - -r|--rate) - MESSAGE_RATE="$2" - shift 2 - ;; - -s|--size) - MESSAGE_SIZE="$2" - shift 2 - ;; - -t|--topics) - TOPIC_COUNT="$2" - shift 2 - ;; - --partitions) - PARTITIONS_PER_TOPIC="$2" - shift 2 - ;; - --config) - CONFIG_FILE="$2" - shift 2 - ;; - --monitoring) - ENABLE_MONITORING=1 - shift - ;; - --wait-ready) - WAIT_READY=1 - shift - ;; - -v|--verbose) - VERBOSE=1 - shift - ;; - -h|--help) - show_usage - exit 0 - ;; - -*) - log_error "Unknown option: $1" - show_usage - exit 1 - ;; - *) - if [[ -z "${COMMAND:-}" ]]; then - COMMAND="$1" - else - log_error "Multiple commands specified" - show_usage - exit 1 - fi - shift - ;; - esac - done -} - -# Check if Docker and Docker Compose are available -check_dependencies() { - if ! command -v docker &> /dev/null; then - log_error "Docker is not installed or not in PATH" - exit 1 - fi - - if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then - log_error "Docker Compose is not installed or not in PATH" - exit 1 - fi - - # Use docker compose if available, otherwise docker-compose - if docker compose version &> /dev/null; then - DOCKER_COMPOSE="docker compose" - else - DOCKER_COMPOSE="docker-compose" - fi -} - -# Wait for services to be ready -wait_for_services() { - log_info "Waiting for services to be ready..." - - local timeout=300 # 5 minutes timeout - local elapsed=0 - local check_interval=5 - - while [[ $elapsed -lt $timeout ]]; do - if $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" ps --format table | grep -q "healthy"; then - if check_service_health; then - log_success "All services are ready!" - return 0 - fi - fi - - sleep $check_interval - elapsed=$((elapsed + check_interval)) - log_info "Waiting... ($elapsed/${timeout}s)" - done - - log_error "Services did not become ready within $timeout seconds" - return 1 -} - -# Check health of critical services -check_service_health() { - # Check Kafka Gateway - if ! curl -s http://localhost:9093 >/dev/null 2>&1; then - return 1 - fi - - # Check Schema Registry - if ! curl -s http://localhost:8081/subjects >/dev/null 2>&1; then - return 1 - fi - - return 0 -} - -# Start the load test infrastructure -start_services() { - log_info "Starting SeaweedFS Kafka load test infrastructure..." - - # Set environment variables - export TEST_MODE="$TEST_MODE" - export TEST_DURATION="$TEST_DURATION" - export PRODUCER_COUNT="$PRODUCER_COUNT" - export CONSUMER_COUNT="$CONSUMER_COUNT" - export MESSAGE_RATE="$MESSAGE_RATE" - export MESSAGE_SIZE="$MESSAGE_SIZE" - export TOPIC_COUNT="$TOPIC_COUNT" - export PARTITIONS_PER_TOPIC="$PARTITIONS_PER_TOPIC" - - # Start core services - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" up -d \ - seaweedfs-master \ - seaweedfs-volume \ - seaweedfs-filer \ - seaweedfs-mq-broker \ - kafka-gateway \ - schema-registry - - # Start monitoring if enabled - if [[ "${ENABLE_MONITORING:-0}" == "1" ]]; then - log_info "Starting monitoring stack..." - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" --profile monitoring up -d - fi - - # Wait for services to be ready if requested - if [[ "${WAIT_READY:-0}" == "1" ]]; then - wait_for_services - fi - - log_success "Infrastructure started successfully" -} - -# Run the load test -run_loadtest() { - log_info "Starting Kafka client load test..." - log_info "Mode: $TEST_MODE, Duration: $TEST_DURATION" - log_info "Producers: $PRODUCER_COUNT, Consumers: $CONSUMER_COUNT" - log_info "Message Rate: $MESSAGE_RATE msgs/sec, Size: $MESSAGE_SIZE bytes" - - # Run the load test - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" --profile loadtest up --abort-on-container-exit kafka-client-loadtest - - # Show test results - show_results -} - -# Show test results -show_results() { - log_info "Load test completed! Gathering results..." - - # Get final metrics from the load test container - if $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" ps kafka-client-loadtest-runner &>/dev/null; then - log_info "Final test statistics:" - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" exec -T kafka-client-loadtest-runner curl -s http://localhost:8080/stats || true - fi - - # Show Prometheus metrics if monitoring is enabled - if [[ "${ENABLE_MONITORING:-0}" == "1" ]]; then - log_info "Monitoring dashboards available at:" - log_info " Prometheus: http://localhost:9090" - log_info " Grafana: http://localhost:3000 (admin/admin)" - fi - - # Show where results are stored - if [[ -d "$PROJECT_DIR/test-results" ]]; then - log_info "Test results saved to: $PROJECT_DIR/test-results/" - fi -} - -# Stop services -stop_services() { - log_info "Stopping all services..." - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" --profile loadtest --profile monitoring down - log_success "Services stopped" -} - -# Show service status -show_status() { - log_info "Service status:" - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" ps -} - -# Show logs -show_logs() { - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" logs -f "${1:-}" -} - -# Clean up all resources -clean_all() { - log_warning "This will remove all volumes, networks, and containers. Are you sure? (y/N)" - read -r response - if [[ "$response" =~ ^[Yy]$ ]]; then - log_info "Cleaning up all resources..." - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" --profile loadtest --profile monitoring down -v --remove-orphans - - # Remove any remaining volumes - docker volume ls -q | grep -E "(kafka-client-loadtest|seaweedfs)" | xargs -r docker volume rm - - # Remove networks - docker network ls -q | grep -E "kafka-client-loadtest" | xargs -r docker network rm - - log_success "Cleanup completed" - else - log_info "Cleanup cancelled" - fi -} - -# Run predefined scenarios -run_scenario() { - local scenario="$1" - - case "$scenario" in - quick) - TEST_MODE="comprehensive" - TEST_DURATION="1m" - PRODUCER_COUNT=2 - CONSUMER_COUNT=2 - MESSAGE_RATE=100 - MESSAGE_SIZE=512 - TOPIC_COUNT=2 - ;; - standard) - TEST_MODE="comprehensive" - TEST_DURATION="5m" - PRODUCER_COUNT=5 - CONSUMER_COUNT=3 - MESSAGE_RATE=500 - MESSAGE_SIZE=1024 - TOPIC_COUNT=3 - ;; - stress) - TEST_MODE="comprehensive" - TEST_DURATION="10m" - PRODUCER_COUNT=20 - CONSUMER_COUNT=10 - MESSAGE_RATE=2000 - MESSAGE_SIZE=2048 - TOPIC_COUNT=10 - ;; - endurance) - TEST_MODE="comprehensive" - TEST_DURATION="30m" - PRODUCER_COUNT=10 - CONSUMER_COUNT=5 - MESSAGE_RATE=1000 - MESSAGE_SIZE=1024 - TOPIC_COUNT=5 - ;; - burst) - TEST_MODE="comprehensive" - TEST_DURATION="10m" - PRODUCER_COUNT=10 - CONSUMER_COUNT=5 - MESSAGE_RATE=1000 - MESSAGE_SIZE=1024 - TOPIC_COUNT=5 - # Note: Burst behavior would be configured in the load test config - ;; - *) - log_error "Unknown scenario: $scenario" - log_info "Available scenarios: quick, standard, stress, endurance, burst" - exit 1 - ;; - esac - - log_info "Running $scenario scenario..." - start_services - if [[ "${WAIT_READY:-0}" == "1" ]]; then - wait_for_services - fi - run_loadtest -} - -# Main execution -main() { - if [[ $# -eq 0 ]]; then - show_usage - exit 0 - fi - - parse_args "$@" - check_dependencies - - case "${COMMAND:-}" in - start) - start_services - run_loadtest - ;; - stop) - stop_services - ;; - restart) - stop_services - start_services - ;; - status) - show_status - ;; - logs) - show_logs - ;; - clean) - clean_all - ;; - monitor) - ENABLE_MONITORING=1 - $DOCKER_COMPOSE -f "$DOCKER_COMPOSE_FILE" --profile monitoring up -d - log_success "Monitoring stack started" - log_info "Prometheus: http://localhost:9090" - log_info "Grafana: http://localhost:3000 (admin/admin)" - ;; - scenarios) - if [[ -n "${2:-}" ]]; then - run_scenario "$2" - else - log_error "Please specify a scenario" - log_info "Available scenarios: quick, standard, stress, endurance, burst" - exit 1 - fi - ;; - *) - log_error "Unknown command: ${COMMAND:-}" - show_usage - exit 1 - ;; - esac -} - -# Set default values -ENABLE_MONITORING=0 -WAIT_READY=0 -VERBOSE=0 - -# Run main function -main "$@" diff --git a/test/kafka/kafka-client-loadtest/scripts/setup-monitoring.sh b/test/kafka/kafka-client-loadtest/scripts/setup-monitoring.sh deleted file mode 100755 index 3ea43f998..000000000 --- a/test/kafka/kafka-client-loadtest/scripts/setup-monitoring.sh +++ /dev/null @@ -1,352 +0,0 @@ -#!/bin/bash - -# Setup monitoring for Kafka Client Load Test -# This script sets up Prometheus and Grafana configurations - -set -euo pipefail - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_DIR="$(dirname "$SCRIPT_DIR")" -MONITORING_DIR="$PROJECT_DIR/monitoring" - -# Colors -GREEN='\033[0;32m' -BLUE='\033[0;34m' -NC='\033[0m' - -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -# Create monitoring directory structure -setup_directories() { - log_info "Setting up monitoring directories..." - - mkdir -p "$MONITORING_DIR/prometheus" - mkdir -p "$MONITORING_DIR/grafana/dashboards" - mkdir -p "$MONITORING_DIR/grafana/provisioning/dashboards" - mkdir -p "$MONITORING_DIR/grafana/provisioning/datasources" - - log_success "Directories created" -} - -# Create Prometheus configuration -create_prometheus_config() { - log_info "Creating Prometheus configuration..." - - cat > "$MONITORING_DIR/prometheus/prometheus.yml" << 'EOF' -# Prometheus configuration for Kafka Load Test monitoring - -global: - scrape_interval: 15s - evaluation_interval: 15s - -rule_files: - # - "first_rules.yml" - # - "second_rules.yml" - -scrape_configs: - # Scrape Prometheus itself - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:9090'] - - # Scrape load test metrics - - job_name: 'kafka-loadtest' - static_configs: - - targets: ['kafka-client-loadtest-runner:8080'] - scrape_interval: 5s - metrics_path: '/metrics' - - # Scrape SeaweedFS Master metrics - - job_name: 'seaweedfs-master' - static_configs: - - targets: ['seaweedfs-master:9333'] - metrics_path: '/metrics' - - # Scrape SeaweedFS Volume metrics - - job_name: 'seaweedfs-volume' - static_configs: - - targets: ['seaweedfs-volume:8080'] - metrics_path: '/metrics' - - # Scrape SeaweedFS Filer metrics - - job_name: 'seaweedfs-filer' - static_configs: - - targets: ['seaweedfs-filer:8888'] - metrics_path: '/metrics' - - # Scrape SeaweedFS MQ Broker metrics (if available) - - job_name: 'seaweedfs-mq-broker' - static_configs: - - targets: ['seaweedfs-mq-broker:17777'] - metrics_path: '/metrics' - scrape_interval: 10s - - # Scrape Kafka Gateway metrics (if available) - - job_name: 'kafka-gateway' - static_configs: - - targets: ['kafka-gateway:9093'] - metrics_path: '/metrics' - scrape_interval: 10s -EOF - - log_success "Prometheus configuration created" -} - -# Create Grafana datasource configuration -create_grafana_datasource() { - log_info "Creating Grafana datasource configuration..." - - cat > "$MONITORING_DIR/grafana/provisioning/datasources/datasource.yml" << 'EOF' -apiVersion: 1 - -datasources: - - name: Prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - editable: true - version: 1 -EOF - - log_success "Grafana datasource configuration created" -} - -# Create Grafana dashboard provisioning -create_grafana_dashboard_provisioning() { - log_info "Creating Grafana dashboard provisioning..." - - cat > "$MONITORING_DIR/grafana/provisioning/dashboards/dashboard.yml" << 'EOF' -apiVersion: 1 - -providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards -EOF - - log_success "Grafana dashboard provisioning created" -} - -# Create Kafka Load Test dashboard -create_loadtest_dashboard() { - log_info "Creating Kafka Load Test Grafana dashboard..." - - cat > "$MONITORING_DIR/grafana/dashboards/kafka-loadtest.json" << 'EOF' -{ - "dashboard": { - "id": null, - "title": "Kafka Client Load Test Dashboard", - "tags": ["kafka", "loadtest", "seaweedfs"], - "timezone": "browser", - "panels": [ - { - "id": 1, - "title": "Messages Produced/Consumed", - "type": "stat", - "targets": [ - { - "expr": "rate(kafka_loadtest_messages_produced_total[5m])", - "legendFormat": "Produced/sec" - }, - { - "expr": "rate(kafka_loadtest_messages_consumed_total[5m])", - "legendFormat": "Consumed/sec" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0} - }, - { - "id": 2, - "title": "Message Latency", - "type": "graph", - "targets": [ - { - "expr": "histogram_quantile(0.95, kafka_loadtest_message_latency_seconds)", - "legendFormat": "95th percentile" - }, - { - "expr": "histogram_quantile(0.99, kafka_loadtest_message_latency_seconds)", - "legendFormat": "99th percentile" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0} - }, - { - "id": 3, - "title": "Error Rates", - "type": "graph", - "targets": [ - { - "expr": "rate(kafka_loadtest_producer_errors_total[5m])", - "legendFormat": "Producer Errors/sec" - }, - { - "expr": "rate(kafka_loadtest_consumer_errors_total[5m])", - "legendFormat": "Consumer Errors/sec" - } - ], - "gridPos": {"h": 8, "w": 24, "x": 0, "y": 8} - }, - { - "id": 4, - "title": "Throughput (MB/s)", - "type": "graph", - "targets": [ - { - "expr": "rate(kafka_loadtest_bytes_produced_total[5m]) / 1024 / 1024", - "legendFormat": "Produced MB/s" - }, - { - "expr": "rate(kafka_loadtest_bytes_consumed_total[5m]) / 1024 / 1024", - "legendFormat": "Consumed MB/s" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 16} - }, - { - "id": 5, - "title": "Active Clients", - "type": "stat", - "targets": [ - { - "expr": "kafka_loadtest_active_producers", - "legendFormat": "Producers" - }, - { - "expr": "kafka_loadtest_active_consumers", - "legendFormat": "Consumers" - } - ], - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 16} - }, - { - "id": 6, - "title": "Consumer Lag", - "type": "graph", - "targets": [ - { - "expr": "kafka_loadtest_consumer_lag_messages", - "legendFormat": "{{consumer_group}}-{{topic}}-{{partition}}" - } - ], - "gridPos": {"h": 8, "w": 24, "x": 0, "y": 24} - } - ], - "time": {"from": "now-30m", "to": "now"}, - "refresh": "5s", - "schemaVersion": 16, - "version": 0 - } -} -EOF - - log_success "Kafka Load Test dashboard created" -} - -# Create SeaweedFS dashboard -create_seaweedfs_dashboard() { - log_info "Creating SeaweedFS Grafana dashboard..." - - cat > "$MONITORING_DIR/grafana/dashboards/seaweedfs.json" << 'EOF' -{ - "dashboard": { - "id": null, - "title": "SeaweedFS Cluster Dashboard", - "tags": ["seaweedfs", "storage"], - "timezone": "browser", - "panels": [ - { - "id": 1, - "title": "Master Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-master\"}", - "legendFormat": "Master Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 0, "y": 0} - }, - { - "id": 2, - "title": "Volume Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-volume\"}", - "legendFormat": "Volume Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 6, "y": 0} - }, - { - "id": 3, - "title": "Filer Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-filer\"}", - "legendFormat": "Filer Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 12, "y": 0} - }, - { - "id": 4, - "title": "MQ Broker Status", - "type": "stat", - "targets": [ - { - "expr": "up{job=\"seaweedfs-mq-broker\"}", - "legendFormat": "MQ Broker Up" - } - ], - "gridPos": {"h": 4, "w": 6, "x": 18, "y": 0} - } - ], - "time": {"from": "now-30m", "to": "now"}, - "refresh": "10s", - "schemaVersion": 16, - "version": 0 - } -} -EOF - - log_success "SeaweedFS dashboard created" -} - -# Main setup function -main() { - log_info "Setting up monitoring for Kafka Client Load Test..." - - setup_directories - create_prometheus_config - create_grafana_datasource - create_grafana_dashboard_provisioning - create_loadtest_dashboard - create_seaweedfs_dashboard - - log_success "Monitoring setup completed!" - log_info "You can now start the monitoring stack with:" - log_info " ./scripts/run-loadtest.sh monitor" - log_info "" - log_info "After starting, access:" - log_info " Prometheus: http://localhost:9090" - log_info " Grafana: http://localhost:3000 (admin/admin)" -} - -main "$@" diff --git a/test/kafka/kafka-client-loadtest/scripts/test-retry-logic.sh b/test/kafka/kafka-client-loadtest/scripts/test-retry-logic.sh deleted file mode 100755 index e1a2f73e2..000000000 --- a/test/kafka/kafka-client-loadtest/scripts/test-retry-logic.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/bin/bash - -# Test script to verify the retry logic works correctly -# Simulates Schema Registry eventual consistency behavior - -set -euo pipefail - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -log_info() { - echo -e "${BLUE}[TEST]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[PASS]${NC} $1" -} - -log_error() { - echo -e "${RED}[FAIL]${NC} $1" -} - -# Mock function that simulates Schema Registry eventual consistency -# First N attempts fail, then succeeds -mock_schema_registry_query() { - local subject=$1 - local min_attempts_to_succeed=$2 - local current_attempt=$3 - - if [[ $current_attempt -ge $min_attempts_to_succeed ]]; then - # Simulate successful response - echo '{"id":1,"version":1,"schema":"test"}' - return 0 - else - # Simulate 404 Not Found - echo '{"error_code":40401,"message":"Subject not found"}' - return 1 - fi -} - -# Simulate verify_schema_with_retry logic -test_verify_with_retry() { - local subject=$1 - local min_attempts_to_succeed=$2 - local max_attempts=5 - local attempt=1 - - log_info "Testing $subject (should succeed after $min_attempts_to_succeed attempts)" - - while [[ $attempt -le $max_attempts ]]; do - local response - if response=$(mock_schema_registry_query "$subject" "$min_attempts_to_succeed" "$attempt"); then - if echo "$response" | grep -q '"id"'; then - if [[ $attempt -gt 1 ]]; then - log_success "$subject verified after $attempt attempts" - else - log_success "$subject verified on first attempt" - fi - return 0 - fi - fi - - # Schema not found, wait and retry - if [[ $attempt -lt $max_attempts ]]; then - # Exponential backoff: 0.1s, 0.2s, 0.4s, 0.8s - local wait_time=$(echo "scale=3; 0.1 * (2 ^ ($attempt - 1))" | bc) - log_info " Attempt $attempt failed, waiting ${wait_time}s before retry..." - sleep "$wait_time" - attempt=$((attempt + 1)) - else - log_error "$subject verification failed after $max_attempts attempts" - return 1 - fi - done - - return 1 -} - -# Run tests -log_info "==========================================" -log_info "Testing Schema Registry Retry Logic" -log_info "==========================================" -echo "" - -# Test 1: Schema available immediately -log_info "Test 1: Schema available immediately" -if test_verify_with_retry "immediate-schema" 1; then - log_success "โœ“ Test 1 passed" -else - log_error "โœ— Test 1 failed" - exit 1 -fi -echo "" - -# Test 2: Schema available after 2 attempts (200ms delay) -log_info "Test 2: Schema available after 2 attempts" -if test_verify_with_retry "delayed-schema-2" 2; then - log_success "โœ“ Test 2 passed" -else - log_error "โœ— Test 2 failed" - exit 1 -fi -echo "" - -# Test 3: Schema available after 3 attempts (600ms delay) -log_info "Test 3: Schema available after 3 attempts" -if test_verify_with_retry "delayed-schema-3" 3; then - log_success "โœ“ Test 3 passed" -else - log_error "โœ— Test 3 failed" - exit 1 -fi -echo "" - -# Test 4: Schema available after 4 attempts (1400ms delay) -log_info "Test 4: Schema available after 4 attempts" -if test_verify_with_retry "delayed-schema-4" 4; then - log_success "โœ“ Test 4 passed" -else - log_error "โœ— Test 4 failed" - exit 1 -fi -echo "" - -# Test 5: Schema never available (should fail) -log_info "Test 5: Schema never available (should fail gracefully)" -if test_verify_with_retry "missing-schema" 10; then - log_error "โœ— Test 5 failed (should have failed but passed)" - exit 1 -else - log_success "โœ“ Test 5 passed (correctly failed after max attempts)" -fi -echo "" - -log_success "==========================================" -log_success "All tests passed! โœ“" -log_success "==========================================" -log_info "" -log_info "Summary:" -log_info "- Immediate availability: works โœ“" -log_info "- 2-4 retry attempts: works โœ“" -log_info "- Max attempts handling: works โœ“" -log_info "- Exponential backoff: works โœ“" -log_info "" -log_info "Total retry time budget: ~1.5 seconds (0.1+0.2+0.4+0.8)" -log_info "This should handle Schema Registry consumer lag gracefully." - diff --git a/test/kafka/kafka-client-loadtest/scripts/wait-for-services.sh b/test/kafka/kafka-client-loadtest/scripts/wait-for-services.sh deleted file mode 100755 index d2560728b..000000000 --- a/test/kafka/kafka-client-loadtest/scripts/wait-for-services.sh +++ /dev/null @@ -1,291 +0,0 @@ -#!/bin/bash - -# Wait for SeaweedFS and Kafka Gateway services to be ready -# This script checks service health and waits until all services are operational - -set -euo pipefail - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -log_warning() { - echo -e "${YELLOW}[WARNING]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Configuration -TIMEOUT=${TIMEOUT:-300} # 5 minutes default timeout -CHECK_INTERVAL=${CHECK_INTERVAL:-5} # Check every 5 seconds -SEAWEEDFS_MASTER_URL=${SEAWEEDFS_MASTER_URL:-"http://localhost:9333"} -KAFKA_GATEWAY_URL=${KAFKA_GATEWAY_URL:-"localhost:9093"} -SCHEMA_REGISTRY_URL=${SCHEMA_REGISTRY_URL:-"http://localhost:8081"} -SEAWEEDFS_FILER_URL=${SEAWEEDFS_FILER_URL:-"http://localhost:8888"} - -# Check if a service is reachable -check_http_service() { - local url=$1 - local name=$2 - - if curl -sf "$url" >/dev/null 2>&1; then - return 0 - else - return 1 - fi -} - -# Check TCP port -check_tcp_service() { - local host=$1 - local port=$2 - local name=$3 - - if timeout 3 bash -c "/dev/null; then - return 0 - else - return 1 - fi -} - -# Check SeaweedFS Master -check_seaweedfs_master() { - if check_http_service "$SEAWEEDFS_MASTER_URL/cluster/status" "SeaweedFS Master"; then - # Additional check: ensure cluster has volumes - local status_json - status_json=$(curl -s "$SEAWEEDFS_MASTER_URL/cluster/status" 2>/dev/null || echo "{}") - - # Check if we have at least one volume server - if echo "$status_json" | grep -q '"Max":0'; then - log_warning "SeaweedFS Master is running but no volumes are available" - return 1 - fi - - return 0 - fi - return 1 -} - -# Check SeaweedFS Filer -check_seaweedfs_filer() { - check_http_service "$SEAWEEDFS_FILER_URL/" "SeaweedFS Filer" -} - -# Check Kafka Gateway -check_kafka_gateway() { - local host="localhost" - local port="9093" - check_tcp_service "$host" "$port" "Kafka Gateway" -} - -# Check Schema Registry -check_schema_registry() { - # Check if Schema Registry container is running first - if ! docker compose ps schema-registry | grep -q "Up"; then - # Schema Registry is not running, which is okay for basic tests - return 0 - fi - - # FIXED: Wait for Docker healthcheck to report "healthy", not just "Up" - # Schema Registry has a 30s start_period, so we need to wait for the actual healthcheck - local health_status - health_status=$(docker inspect loadtest-schema-registry --format='{{.State.Health.Status}}' 2>/dev/null || echo "none") - - # If container has no healthcheck or healthcheck is not yet healthy, check HTTP directly - if [[ "$health_status" == "healthy" ]]; then - # Container reports healthy, do a final verification - if check_http_service "$SCHEMA_REGISTRY_URL/subjects" "Schema Registry"; then - return 0 - fi - elif [[ "$health_status" == "starting" ]]; then - # Still in startup period, wait longer - return 1 - elif [[ "$health_status" == "none" ]]; then - # No healthcheck defined (shouldn't happen), fall back to HTTP check - if check_http_service "$SCHEMA_REGISTRY_URL/subjects" "Schema Registry"; then - local subjects - subjects=$(curl -s "$SCHEMA_REGISTRY_URL/subjects" 2>/dev/null || echo "[]") - - # Schema registry should at least return an empty array - if [[ "$subjects" == "[]" ]]; then - return 0 - elif echo "$subjects" | grep -q '\['; then - return 0 - else - log_warning "Schema Registry is not properly connected" - return 1 - fi - fi - fi - return 1 -} - -# Check MQ Broker -check_mq_broker() { - check_tcp_service "localhost" "17777" "SeaweedFS MQ Broker" -} - -# Main health check function -check_all_services() { - local all_healthy=true - - log_info "Checking service health..." - - # Check SeaweedFS Master - if check_seaweedfs_master; then - log_success "โœ“ SeaweedFS Master is healthy" - else - log_error "โœ— SeaweedFS Master is not ready" - all_healthy=false - fi - - # Check SeaweedFS Filer - if check_seaweedfs_filer; then - log_success "โœ“ SeaweedFS Filer is healthy" - else - log_error "โœ— SeaweedFS Filer is not ready" - all_healthy=false - fi - - # Check MQ Broker - if check_mq_broker; then - log_success "โœ“ SeaweedFS MQ Broker is healthy" - else - log_error "โœ— SeaweedFS MQ Broker is not ready" - all_healthy=false - fi - - # Check Kafka Gateway - if check_kafka_gateway; then - log_success "โœ“ Kafka Gateway is healthy" - else - log_error "โœ— Kafka Gateway is not ready" - all_healthy=false - fi - - # Check Schema Registry - if ! docker compose ps schema-registry | grep -q "Up"; then - log_warning "โš  Schema Registry is stopped (skipping)" - elif check_schema_registry; then - log_success "โœ“ Schema Registry is healthy" - else - # Check if it's still starting up (healthcheck start_period) - local health_status - health_status=$(docker inspect loadtest-schema-registry --format='{{.State.Health.Status}}' 2>/dev/null || echo "unknown") - if [[ "$health_status" == "starting" ]]; then - log_warning "โณ Schema Registry is starting (waiting for healthcheck...)" - else - log_error "โœ— Schema Registry is not ready (status: $health_status)" - fi - all_healthy=false - fi - - $all_healthy -} - -# Wait for all services to be ready -wait_for_services() { - log_info "Waiting for all services to be ready (timeout: ${TIMEOUT}s)..." - - local elapsed=0 - - while [[ $elapsed -lt $TIMEOUT ]]; do - if check_all_services; then - log_success "All services are ready! (took ${elapsed}s)" - return 0 - fi - - log_info "Some services are not ready yet. Waiting ${CHECK_INTERVAL}s... (${elapsed}/${TIMEOUT}s)" - sleep $CHECK_INTERVAL - elapsed=$((elapsed + CHECK_INTERVAL)) - done - - log_error "Services did not become ready within ${TIMEOUT} seconds" - log_error "Final service status:" - check_all_services - - # Always dump Schema Registry diagnostics on timeout since it's the problematic service - log_error "===========================================" - log_error "Schema Registry Container Status:" - log_error "===========================================" - docker compose ps schema-registry 2>&1 || echo "Failed to get container status" - docker inspect loadtest-schema-registry --format='Health: {{.State.Health.Status}} ({{len .State.Health.Log}} checks)' 2>&1 || echo "Failed to inspect container" - log_error "===========================================" - - log_error "Network Connectivity Check:" - log_error "===========================================" - log_error "Can Schema Registry reach Kafka Gateway?" - docker compose exec -T schema-registry ping -c 3 kafka-gateway 2>&1 || echo "Ping failed" - docker compose exec -T schema-registry nc -zv kafka-gateway 9093 2>&1 || echo "Port 9093 unreachable" - log_error "===========================================" - - log_error "Schema Registry Logs (last 100 lines):" - log_error "===========================================" - docker compose logs --tail=100 schema-registry 2>&1 || echo "Failed to get Schema Registry logs" - log_error "===========================================" - - log_error "Kafka Gateway Logs (last 50 lines with 'SR' prefix):" - log_error "===========================================" - docker compose logs --tail=200 kafka-gateway 2>&1 | grep -i "SR" | tail -50 || echo "No SR-related logs found in Kafka Gateway" - log_error "===========================================" - - log_error "MQ Broker Logs (last 30 lines):" - log_error "===========================================" - docker compose logs --tail=30 seaweedfs-mq-broker 2>&1 || echo "Failed to get MQ Broker logs" - log_error "===========================================" - - return 1 -} - -# Show current service status -show_status() { - log_info "Current service status:" - check_all_services -} - -# Main function -main() { - case "${1:-wait}" in - "wait") - wait_for_services - ;; - "check") - show_status - ;; - "status") - show_status - ;; - *) - echo "Usage: $0 [wait|check|status]" - echo "" - echo "Commands:" - echo " wait - Wait for all services to be ready (default)" - echo " check - Check current service status" - echo " status - Same as check" - echo "" - echo "Environment variables:" - echo " TIMEOUT - Maximum time to wait in seconds (default: 300)" - echo " CHECK_INTERVAL - Check interval in seconds (default: 5)" - echo " SEAWEEDFS_MASTER_URL - Master URL (default: http://localhost:9333)" - echo " KAFKA_GATEWAY_URL - Gateway URL (default: localhost:9093)" - echo " SCHEMA_REGISTRY_URL - Schema Registry URL (default: http://localhost:8081)" - echo " SEAWEEDFS_FILER_URL - Filer URL (default: http://localhost:8888)" - exit 1 - ;; - esac -} - -main "$@" diff --git a/test/kafka/kafka-client-loadtest/single-partition-test.sh b/test/kafka/kafka-client-loadtest/single-partition-test.sh deleted file mode 100755 index 9c8b8a712..000000000 --- a/test/kafka/kafka-client-loadtest/single-partition-test.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Single partition test - produce and consume from ONE topic, ONE partition - -set -e - -echo "================================================================" -echo " Single Partition Test - Isolate Missing Messages" -echo " - Topic: single-test-topic (1 partition only)" -echo " - Duration: 2 minutes" -echo " - Producer: 1 (50 msgs/sec)" -echo " - Consumer: 1 (reading from partition 0 only)" -echo "================================================================" - -# Clean up -make clean -make start - -# Run test with single topic, single partition -TEST_MODE=comprehensive \ -TEST_DURATION=2m \ -PRODUCER_COUNT=1 \ -CONSUMER_COUNT=1 \ -MESSAGE_RATE=50 \ -MESSAGE_SIZE=512 \ -TOPIC_COUNT=1 \ -PARTITIONS_PER_TOPIC=1 \ -VALUE_TYPE=avro \ -docker compose --profile loadtest up --abort-on-container-exit kafka-client-loadtest - -echo "" -echo "================================================================" -echo " Single Partition Test Complete!" -echo "================================================================" -echo "" -echo "Analyzing results..." -cd test-results && python3 analyze_missing.py diff --git a/test/kafka/kafka-client-loadtest/test-no-schema.sh b/test/kafka/kafka-client-loadtest/test-no-schema.sh deleted file mode 100755 index 6c852cf8d..000000000 --- a/test/kafka/kafka-client-loadtest/test-no-schema.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Test without schema registry to isolate missing messages issue - -# Clean old data -find test-results -name "*.jsonl" -delete 2>/dev/null || true - -# Run test without schemas -TEST_MODE=comprehensive \ -TEST_DURATION=1m \ -PRODUCER_COUNT=2 \ -CONSUMER_COUNT=2 \ -MESSAGE_RATE=50 \ -MESSAGE_SIZE=512 \ -VALUE_TYPE=json \ -SCHEMAS_ENABLED=false \ -docker compose --profile loadtest up --abort-on-container-exit kafka-client-loadtest - -echo "" -echo "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" -echo "Analyzing results..." -if [ -f test-results/produced.jsonl ] && [ -f test-results/consumed.jsonl ]; then - produced=$(wc -l < test-results/produced.jsonl) - consumed=$(wc -l < test-results/consumed.jsonl) - echo "Produced: $produced" - echo "Consumed: $consumed" - - # Check for missing messages - jq -r '"\(.topic)[\(.partition)]@\(.offset)"' test-results/produced.jsonl | sort > /tmp/produced.txt - jq -r '"\(.topic)[\(.partition)]@\(.offset)"' test-results/consumed.jsonl | sort > /tmp/consumed.txt - missing=$(comm -23 /tmp/produced.txt /tmp/consumed.txt | wc -l) - echo "Missing: $missing" - - if [ $missing -eq 0 ]; then - echo "โœ“ NO MISSING MESSAGES!" - else - echo "โœ— Still have missing messages" - echo "Sample missing:" - comm -23 /tmp/produced.txt /tmp/consumed.txt | head -10 - fi -else - echo "โœ— Result files not found" -fi -echo "โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•" diff --git a/test/kafka/kafka-client-loadtest/test_offset_fetch.go b/test/kafka/kafka-client-loadtest/test_offset_fetch.go deleted file mode 100644 index 0cb99dbf7..000000000 --- a/test/kafka/kafka-client-loadtest/test_offset_fetch.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "context" - "log" - "time" - - "github.com/IBM/sarama" -) - -func main() { - log.Println("=== Testing OffsetFetch with Debug Sarama ===") - - config := sarama.NewConfig() - config.Version = sarama.V2_8_0_0 - config.Consumer.Return.Errors = true - config.Consumer.Offsets.Initial = sarama.OffsetOldest - config.Consumer.Offsets.AutoCommit.Enable = true - config.Consumer.Offsets.AutoCommit.Interval = 100 * time.Millisecond - config.Consumer.Group.Session.Timeout = 30 * time.Second - config.Consumer.Group.Heartbeat.Interval = 3 * time.Second - - brokers := []string{"localhost:9093"} - group := "test-offset-fetch-group" - topics := []string{"loadtest-topic-0"} - - log.Printf("Creating consumer group: group=%s brokers=%v topics=%v", group, brokers, topics) - - consumerGroup, err := sarama.NewConsumerGroup(brokers, group, config) - if err != nil { - log.Fatalf("Failed to create consumer group: %v", err) - } - defer consumerGroup.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - handler := &testHandler{} - - log.Println("Starting consumer group session...") - log.Println("Watch for ๐Ÿ” [SARAMA-DEBUG] logs to trace OffsetFetch calls") - - go func() { - for { - if err := consumerGroup.Consume(ctx, topics, handler); err != nil { - log.Printf("Error from consumer: %v", err) - } - if ctx.Err() != nil { - return - } - } - }() - - // Wait for context to be done - <-ctx.Done() - log.Println("Test completed") -} - -type testHandler struct{} - -func (h *testHandler) Setup(session sarama.ConsumerGroupSession) error { - log.Printf("โœ“ Consumer group session setup: generation=%d memberID=%s", session.GenerationID(), session.MemberID()) - return nil -} - -func (h *testHandler) Cleanup(session sarama.ConsumerGroupSession) error { - log.Println("Consumer group session cleanup") - return nil -} - -func (h *testHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - log.Printf("โœ“ Started consuming: topic=%s partition=%d offset=%d", claim.Topic(), claim.Partition(), claim.InitialOffset()) - - count := 0 - for message := range claim.Messages() { - count++ - log.Printf(" Received message #%d: offset=%d", count, message.Offset) - session.MarkMessage(message, "") - - if count >= 5 { - log.Println("Received 5 messages, stopping") - return nil - } - } - return nil -} diff --git a/test/kafka/kafka-client-loadtest/tools/AdminClientDebugger.java b/test/kafka/kafka-client-loadtest/tools/AdminClientDebugger.java deleted file mode 100644 index f511b4cf6..000000000 --- a/test/kafka/kafka-client-loadtest/tools/AdminClientDebugger.java +++ /dev/null @@ -1,290 +0,0 @@ -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.admin.AdminClientConfig; -import org.apache.kafka.clients.admin.DescribeClusterResult; -import org.apache.kafka.common.Node; - -import java.io.*; -import java.net.*; -import java.nio.ByteBuffer; -import java.util.*; -import java.util.concurrent.ExecutionException; - -public class AdminClientDebugger { - - public static void main(String[] args) throws Exception { - String broker = args.length > 0 ? args[0] : "localhost:9093"; - - System.out.println("=".repeat(80)); - System.out.println("KAFKA ADMINCLIENT DEBUGGER"); - System.out.println("=".repeat(80)); - System.out.println("Target broker: " + broker); - - // Test 1: Raw socket - capture exact bytes - System.out.println("\n" + "=".repeat(80)); - System.out.println("TEST 1: Raw Socket - Capture ApiVersions Exchange"); - System.out.println("=".repeat(80)); - testRawSocket(broker); - - // Test 2: AdminClient with detailed logging - System.out.println("\n" + "=".repeat(80)); - System.out.println("TEST 2: AdminClient with Logging"); - System.out.println("=".repeat(80)); - testAdminClient(broker); - } - - private static void testRawSocket(String broker) { - String[] parts = broker.split(":"); - String host = parts[0]; - int port = Integer.parseInt(parts[1]); - - try (Socket socket = new Socket(host, port)) { - socket.setSoTimeout(10000); - - InputStream in = socket.getInputStream(); - OutputStream out = socket.getOutputStream(); - - System.out.println("Connected to " + broker); - - // Build ApiVersions request (v4) - // Format: - // [Size][ApiKey=18][ApiVersion=4][CorrelationId=0][ClientId][TaggedFields] - ByteArrayOutputStream requestBody = new ByteArrayOutputStream(); - - // ApiKey (2 bytes) = 18 - requestBody.write(0); - requestBody.write(18); - - // ApiVersion (2 bytes) = 4 - requestBody.write(0); - requestBody.write(4); - - // CorrelationId (4 bytes) = 0 - requestBody.write(new byte[] { 0, 0, 0, 0 }); - - // ClientId (compact string) = "debug-client" - String clientId = "debug-client"; - writeCompactString(requestBody, clientId); - - // Tagged fields (empty) - requestBody.write(0x00); - - byte[] request = requestBody.toByteArray(); - - // Write size - ByteBuffer sizeBuffer = ByteBuffer.allocate(4); - sizeBuffer.putInt(request.length); - out.write(sizeBuffer.array()); - - // Write request - out.write(request); - out.flush(); - - System.out.println("\nSENT ApiVersions v4 Request:"); - System.out.println(" Size: " + request.length + " bytes"); - hexDump(" Request", request, Math.min(64, request.length)); - - // Read response size - byte[] sizeBytes = new byte[4]; - int read = in.read(sizeBytes); - if (read != 4) { - System.out.println("Failed to read response size (got " + read + " bytes)"); - return; - } - - int responseSize = ByteBuffer.wrap(sizeBytes).getInt(); - System.out.println("\nRECEIVED Response:"); - System.out.println(" Size: " + responseSize + " bytes"); - - // Read response body - byte[] responseBytes = new byte[responseSize]; - int totalRead = 0; - while (totalRead < responseSize) { - int n = in.read(responseBytes, totalRead, responseSize - totalRead); - if (n == -1) { - System.out.println("Unexpected EOF after " + totalRead + " bytes"); - return; - } - totalRead += n; - } - - System.out.println(" Read complete response: " + totalRead + " bytes"); - - // Decode response - System.out.println("\nRESPONSE STRUCTURE:"); - decodeApiVersionsResponse(responseBytes); - - // Try to read more (should timeout or get EOF) - System.out.println("\nโฑ๏ธ Waiting for any additional data (10s timeout)..."); - socket.setSoTimeout(10000); - try { - int nextByte = in.read(); - if (nextByte == -1) { - System.out.println(" Server closed connection (EOF)"); - } else { - System.out.println(" Unexpected data: " + nextByte); - } - } catch (SocketTimeoutException e) { - System.out.println(" Timeout - no additional data"); - } - - } catch (Exception e) { - System.out.println("Error: " + e.getMessage()); - e.printStackTrace(); - } - } - - private static void testAdminClient(String broker) { - Properties props = new Properties(); - props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, broker); - props.put(AdminClientConfig.CLIENT_ID_CONFIG, "admin-client-debugger"); - props.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, 10000); - props.put(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 10000); - - System.out.println("Creating AdminClient with config:"); - props.forEach((k, v) -> System.out.println(" " + k + " = " + v)); - - try (AdminClient adminClient = AdminClient.create(props)) { - System.out.println("AdminClient created"); - - // Give the thread time to start - Thread.sleep(1000); - - System.out.println("\nCalling describeCluster()..."); - DescribeClusterResult result = adminClient.describeCluster(); - - System.out.println(" Waiting for nodes..."); - Collection nodes = result.nodes().get(); - - System.out.println("Cluster description retrieved:"); - System.out.println(" Nodes: " + nodes.size()); - for (Node node : nodes) { - System.out.println(" - Node " + node.id() + ": " + node.host() + ":" + node.port()); - } - - System.out.println("\n Cluster ID: " + result.clusterId().get()); - - Node controller = result.controller().get(); - if (controller != null) { - System.out.println(" Controller: Node " + controller.id()); - } - - } catch (ExecutionException e) { - System.out.println("Execution error: " + e.getCause().getMessage()); - e.getCause().printStackTrace(); - } catch (Exception e) { - System.out.println("Error: " + e.getMessage()); - e.printStackTrace(); - } - } - - private static void decodeApiVersionsResponse(byte[] data) { - int offset = 0; - - try { - // Correlation ID (4 bytes) - int correlationId = ByteBuffer.wrap(data, offset, 4).getInt(); - System.out.println(" [Offset " + offset + "] Correlation ID: " + correlationId); - offset += 4; - - // Header tagged fields (varint - should be 0x00 for flexible v3+) - int taggedFieldsLength = readUnsignedVarint(data, offset); - System.out.println(" [Offset " + offset + "] Header Tagged Fields Length: " + taggedFieldsLength); - offset += varintSize(data[offset]); - - // Error code (2 bytes) - short errorCode = ByteBuffer.wrap(data, offset, 2).getShort(); - System.out.println(" [Offset " + offset + "] Error Code: " + errorCode); - offset += 2; - - // API Keys array (compact array - varint length) - int apiKeysLength = readUnsignedVarint(data, offset) - 1; // Compact array: length+1 - System.out.println(" [Offset " + offset + "] API Keys Count: " + apiKeysLength); - offset += varintSize(data[offset]); - - // Show first few API keys - System.out.println(" First 5 API Keys:"); - for (int i = 0; i < Math.min(5, apiKeysLength); i++) { - short apiKey = ByteBuffer.wrap(data, offset, 2).getShort(); - offset += 2; - short minVersion = ByteBuffer.wrap(data, offset, 2).getShort(); - offset += 2; - short maxVersion = ByteBuffer.wrap(data, offset, 2).getShort(); - offset += 2; - // Per-element tagged fields - int perElementTagged = readUnsignedVarint(data, offset); - offset += varintSize(data[offset]); - - System.out.println(" " + (i + 1) + ". API " + apiKey + ": v" + minVersion + "-v" + maxVersion); - } - - System.out.println(" ... (showing first 5 of " + apiKeysLength + " APIs)"); - System.out.println(" Response structure is valid!"); - - // Hex dump of first 64 bytes - hexDump("\n First 64 bytes", data, Math.min(64, data.length)); - - } catch (Exception e) { - System.out.println(" Failed to decode at offset " + offset + ": " + e.getMessage()); - hexDump(" Raw bytes", data, Math.min(128, data.length)); - } - } - - private static int readUnsignedVarint(byte[] data, int offset) { - int value = 0; - int shift = 0; - while (true) { - byte b = data[offset++]; - value |= (b & 0x7F) << shift; - if ((b & 0x80) == 0) - break; - shift += 7; - } - return value; - } - - private static int varintSize(byte firstByte) { - int size = 1; - byte b = firstByte; - while ((b & 0x80) != 0) { - size++; - b = (byte) (b << 1); - } - return size; - } - - private static void writeCompactString(ByteArrayOutputStream out, String str) { - byte[] bytes = str.getBytes(); - writeUnsignedVarint(out, bytes.length + 1); // Compact string: length+1 - out.write(bytes, 0, bytes.length); - } - - private static void writeUnsignedVarint(ByteArrayOutputStream out, int value) { - while ((value & ~0x7F) != 0) { - out.write((byte) ((value & 0x7F) | 0x80)); - value >>>= 7; - } - out.write((byte) value); - } - - private static void hexDump(String label, byte[] data, int length) { - System.out.println(label + " (hex dump):"); - for (int i = 0; i < length; i += 16) { - System.out.printf(" %04x ", i); - for (int j = 0; j < 16; j++) { - if (i + j < length) { - System.out.printf("%02x ", data[i + j] & 0xFF); - } else { - System.out.print(" "); - } - if (j == 7) - System.out.print(" "); - } - System.out.print(" |"); - for (int j = 0; j < 16 && i + j < length; j++) { - byte b = data[i + j]; - System.out.print((b >= 32 && b < 127) ? (char) b : '.'); - } - System.out.println("|"); - } - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/JavaAdminClientTest.java b/test/kafka/kafka-client-loadtest/tools/JavaAdminClientTest.java deleted file mode 100644 index 177a86233..000000000 --- a/test/kafka/kafka-client-loadtest/tools/JavaAdminClientTest.java +++ /dev/null @@ -1,72 +0,0 @@ -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.admin.AdminClientConfig; -import org.apache.kafka.clients.admin.DescribeClusterResult; -import org.apache.kafka.clients.admin.ListTopicsResult; - -import java.util.Properties; -import java.util.concurrent.TimeUnit; - -public class JavaAdminClientTest { - public static void main(String[] args) { - // Set uncaught exception handler to catch AdminClient thread errors - Thread.setDefaultUncaughtExceptionHandler((t, e) -> { - System.err.println("UNCAUGHT EXCEPTION in thread " + t.getName() + ":"); - e.printStackTrace(); - }); - - String bootstrapServers = args.length > 0 ? args[0] : "localhost:9093"; - - System.out.println("Testing Kafka wire protocol with broker: " + bootstrapServers); - - Properties props = new Properties(); - props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - props.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, 10000); - props.put(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 10000); - props.put(AdminClientConfig.CLIENT_ID_CONFIG, "java-admin-test"); - props.put(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, 120000); - props.put(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, 10000); - props.put(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, 30000); - props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT"); - props.put(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG, 50); - props.put(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, 1000); - - System.out.println("Creating AdminClient with config:"); - props.forEach((k, v) -> System.out.println(" " + k + " = " + v)); - - try (AdminClient adminClient = AdminClient.create(props)) { - System.out.println("AdminClient created successfully"); - Thread.sleep(2000); // Give it time to initialize - - // Test 1: Describe Cluster (uses Metadata API internally) - System.out.println("\n=== Test 1: Describe Cluster ==="); - try { - DescribeClusterResult clusterResult = adminClient.describeCluster(); - String clusterId = clusterResult.clusterId().get(10, TimeUnit.SECONDS); - int nodeCount = clusterResult.nodes().get(10, TimeUnit.SECONDS).size(); - System.out.println("Cluster ID: " + clusterId); - System.out.println("Nodes: " + nodeCount); - } catch (Exception e) { - System.err.println("Describe Cluster failed: " + e.getMessage()); - e.printStackTrace(); - } - - // Test 2: List Topics - System.out.println("\n=== Test 2: List Topics ==="); - try { - ListTopicsResult topicsResult = adminClient.listTopics(); - int topicCount = topicsResult.names().get(10, TimeUnit.SECONDS).size(); - System.out.println("Topics: " + topicCount); - } catch (Exception e) { - System.err.println("List Topics failed: " + e.getMessage()); - e.printStackTrace(); - } - - System.out.println("\nAll tests completed!"); - - } catch (Exception e) { - System.err.println("AdminClient creation failed: " + e.getMessage()); - e.printStackTrace(); - System.exit(1); - } - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/JavaKafkaConsumer.java b/test/kafka/kafka-client-loadtest/tools/JavaKafkaConsumer.java deleted file mode 100644 index 41c884544..000000000 --- a/test/kafka/kafka-client-loadtest/tools/JavaKafkaConsumer.java +++ /dev/null @@ -1,82 +0,0 @@ -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.serialization.StringDeserializer; - -import java.time.Duration; -import java.util.Collections; -import java.util.Properties; - -public class JavaKafkaConsumer { - public static void main(String[] args) { - if (args.length < 2) { - System.err.println("Usage: java JavaKafkaConsumer "); - System.exit(1); - } - - String broker = args[0]; - String topic = args[1]; - - System.out.println("Connecting to Kafka broker: " + broker); - System.out.println("Topic: " + topic); - - Properties props = new Properties(); - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, broker); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "java-test-group"); - props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); - props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10"); - props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "1"); - props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "1000"); - - KafkaConsumer consumer = new KafkaConsumer<>(props); - consumer.subscribe(Collections.singletonList(topic)); - - System.out.println("Starting to consume messages..."); - - int messageCount = 0; - int errorCount = 0; - long startTime = System.currentTimeMillis(); - - try { - while (true) { - try { - ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); - - for (ConsumerRecord record : records) { - messageCount++; - System.out.printf("Message #%d: topic=%s partition=%d offset=%d key=%s value=%s%n", - messageCount, record.topic(), record.partition(), record.offset(), - record.key(), record.value()); - } - - // Stop after 100 messages or 60 seconds - if (messageCount >= 100 || (System.currentTimeMillis() - startTime) > 60000) { - long duration = System.currentTimeMillis() - startTime; - System.out.printf("%nSuccessfully consumed %d messages in %dms%n", messageCount, duration); - System.out.printf("Success rate: %.1f%% (%d/%d including errors)%n", - (double) messageCount / (messageCount + errorCount) * 100, messageCount, - messageCount + errorCount); - break; - } - } catch (Exception e) { - errorCount++; - System.err.printf("Error during poll #%d: %s%n", errorCount, e.getMessage()); - e.printStackTrace(); - - // Stop after 10 consecutive errors or 60 seconds - if (errorCount > 10 || (System.currentTimeMillis() - startTime) > 60000) { - long duration = System.currentTimeMillis() - startTime; - System.err.printf("%nStopping after %d errors in %dms%n", errorCount, duration); - break; - } - } - } - } finally { - consumer.close(); - } - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/JavaProducerTest.java b/test/kafka/kafka-client-loadtest/tools/JavaProducerTest.java deleted file mode 100644 index e9898d5f0..000000000 --- a/test/kafka/kafka-client-loadtest/tools/JavaProducerTest.java +++ /dev/null @@ -1,68 +0,0 @@ -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.serialization.StringSerializer; - -import java.util.Properties; -import java.util.concurrent.Future; - -public class JavaProducerTest { - public static void main(String[] args) { - String bootstrapServers = args.length > 0 ? args[0] : "localhost:9093"; - String topicName = args.length > 1 ? args[1] : "test-topic"; - - System.out.println("Testing Kafka Producer with broker: " + bootstrapServers); - System.out.println(" Topic: " + topicName); - - Properties props = new Properties(); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - props.put(ProducerConfig.CLIENT_ID_CONFIG, "java-producer-test"); - props.put(ProducerConfig.ACKS_CONFIG, "1"); - props.put(ProducerConfig.RETRIES_CONFIG, 0); - props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000); - - System.out.println("Creating Producer with config:"); - props.forEach((k, v) -> System.out.println(" " + k + " = " + v)); - - try (KafkaProducer producer = new KafkaProducer<>(props)) { - System.out.println("Producer created successfully"); - - // Try to send a test message - System.out.println("\n=== Test: Send Message ==="); - try { - ProducerRecord record = new ProducerRecord<>(topicName, "key1", "value1"); - System.out.println("Sending record to topic: " + topicName); - Future future = producer.send(record); - - RecordMetadata metadata = future.get(); // This will block and wait for response - System.out.println("Message sent successfully!"); - System.out.println(" Topic: " + metadata.topic()); - System.out.println(" Partition: " + metadata.partition()); - System.out.println(" Offset: " + metadata.offset()); - } catch (Exception e) { - System.err.println("Send failed: " + e.getMessage()); - e.printStackTrace(); - - // Print cause chain - Throwable cause = e.getCause(); - int depth = 1; - while (cause != null && depth < 5) { - System.err.println( - " Cause " + depth + ": " + cause.getClass().getName() + ": " + cause.getMessage()); - cause = cause.getCause(); - depth++; - } - } - - System.out.println("\nTest completed!"); - - } catch (Exception e) { - System.err.println("Producer creation or operation failed: " + e.getMessage()); - e.printStackTrace(); - System.exit(1); - } - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/SchemaRegistryTest.java b/test/kafka/kafka-client-loadtest/tools/SchemaRegistryTest.java deleted file mode 100644 index 3c33ae0ea..000000000 --- a/test/kafka/kafka-client-loadtest/tools/SchemaRegistryTest.java +++ /dev/null @@ -1,124 +0,0 @@ -package tools; - -import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient; -import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; -import org.apache.avro.Schema; -import org.apache.avro.SchemaBuilder; - -public class SchemaRegistryTest { - private static final String SCHEMA_REGISTRY_URL = "http://localhost:8081"; - - public static void main(String[] args) { - System.out.println("================================================================================"); - System.out.println("Schema Registry Test - Verifying In-Memory Read Optimization"); - System.out.println("================================================================================\n"); - - SchemaRegistryClient schemaRegistry = new CachedSchemaRegistryClient(SCHEMA_REGISTRY_URL, 100); - boolean allTestsPassed = true; - - try { - // Test 1: Register first schema - System.out.println("Test 1: Registering first schema (user-value)..."); - Schema userValueSchema = SchemaBuilder - .record("User").fields() - .requiredString("name") - .requiredInt("age") - .endRecord(); - - long startTime = System.currentTimeMillis(); - int schema1Id = schemaRegistry.register("user-value", userValueSchema); - long elapsedTime = System.currentTimeMillis() - startTime; - System.out.println("โœ“ SUCCESS: Schema registered with ID: " + schema1Id + " (took " + elapsedTime + "ms)"); - - // Test 2: Register second schema immediately (tests read-after-write) - System.out.println("\nTest 2: Registering second schema immediately (user-key)..."); - Schema userKeySchema = SchemaBuilder - .record("UserKey").fields() - .requiredString("userId") - .endRecord(); - - startTime = System.currentTimeMillis(); - int schema2Id = schemaRegistry.register("user-key", userKeySchema); - elapsedTime = System.currentTimeMillis() - startTime; - System.out.println("โœ“ SUCCESS: Schema registered with ID: " + schema2Id + " (took " + elapsedTime + "ms)"); - - // Test 3: Rapid fire registrations (tests concurrent writes) - System.out.println("\nTest 3: Rapid fire registrations (10 schemas in parallel)..."); - startTime = System.currentTimeMillis(); - Thread[] threads = new Thread[10]; - final boolean[] results = new boolean[10]; - - for (int i = 0; i < 10; i++) { - final int index = i; - threads[i] = new Thread(() -> { - try { - Schema schema = SchemaBuilder - .record("Test" + index).fields() - .requiredString("field" + index) - .endRecord(); - schemaRegistry.register("test-" + index + "-value", schema); - results[index] = true; - } catch (Exception e) { - System.err.println("โœ— ERROR in thread " + index + ": " + e.getMessage()); - results[index] = false; - } - }); - threads[i].start(); - } - - for (Thread thread : threads) { - thread.join(); - } - - elapsedTime = System.currentTimeMillis() - startTime; - int successCount = 0; - for (boolean result : results) { - if (result) successCount++; - } - - if (successCount == 10) { - System.out.println("โœ“ SUCCESS: All 10 schemas registered (took " + elapsedTime + "ms total, ~" + (elapsedTime / 10) + "ms per schema)"); - } else { - System.out.println("โœ— PARTIAL FAILURE: Only " + successCount + "/10 schemas registered"); - allTestsPassed = false; - } - - // Test 4: Verify we can retrieve all schemas - System.out.println("\nTest 4: Verifying all schemas are retrievable..."); - startTime = System.currentTimeMillis(); - Schema retrieved1 = schemaRegistry.getById(schema1Id); - Schema retrieved2 = schemaRegistry.getById(schema2Id); - elapsedTime = System.currentTimeMillis() - startTime; - - if (retrieved1.equals(userValueSchema) && retrieved2.equals(userKeySchema)) { - System.out.println("โœ“ SUCCESS: All schemas retrieved correctly (took " + elapsedTime + "ms)"); - } else { - System.out.println("โœ— FAILURE: Schema mismatch"); - allTestsPassed = false; - } - - // Summary - System.out.println("\n==============================================================================="); - if (allTestsPassed) { - System.out.println("โœ“ ALL TESTS PASSED!"); - System.out.println("==============================================================================="); - System.out.println("\nOptimization verified:"); - System.out.println("- ForceFlush is NO LONGER NEEDED"); - System.out.println("- Subscribers read from in-memory buffer using IsOffsetInMemory()"); - System.out.println("- Per-subscriber notification channels provide instant wake-up"); - System.out.println("- True concurrent writes without serialization"); - System.exit(0); - } else { - System.out.println("โœ— SOME TESTS FAILED"); - System.out.println("==============================================================================="); - System.exit(1); - } - - } catch (Exception e) { - System.err.println("\nโœ— FATAL ERROR: " + e.getMessage()); - e.printStackTrace(); - System.exit(1); - } - } -} - diff --git a/test/kafka/kafka-client-loadtest/tools/TestSocketReadiness.java b/test/kafka/kafka-client-loadtest/tools/TestSocketReadiness.java deleted file mode 100644 index f334c045a..000000000 --- a/test/kafka/kafka-client-loadtest/tools/TestSocketReadiness.java +++ /dev/null @@ -1,78 +0,0 @@ -import java.net.*; -import java.nio.*; -import java.nio.channels.*; - -public class TestSocketReadiness { - public static void main(String[] args) throws Exception { - String host = args.length > 0 ? args[0] : "localhost"; - int port = args.length > 1 ? Integer.parseInt(args[1]) : 9093; - - System.out.println("Testing socket readiness with " + host + ":" + port); - - // Test 1: Simple blocking connect - System.out.println("\n=== Test 1: Blocking Socket ==="); - try (Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(host, port), 5000); - System.out.println("Blocking socket connected"); - System.out.println(" Available bytes: " + socket.getInputStream().available()); - Thread.sleep(100); - System.out.println(" Available bytes after 100ms: " + socket.getInputStream().available()); - } catch (Exception e) { - System.err.println("Blocking socket failed: " + e.getMessage()); - } - - // Test 2: Non-blocking NIO socket (like Kafka client uses) - System.out.println("\n=== Test 2: Non-blocking NIO Socket ==="); - Selector selector = Selector.open(); - SocketChannel channel = SocketChannel.open(); - channel.configureBlocking(false); - - try { - boolean connected = channel.connect(new InetSocketAddress(host, port)); - System.out.println(" connect() returned: " + connected); - - SelectionKey key = channel.register(selector, SelectionKey.OP_CONNECT); - - int ready = selector.select(5000); - System.out.println(" selector.select() returned: " + ready); - - if (ready > 0) { - for (SelectionKey k : selector.selectedKeys()) { - if (k.isConnectable()) { - System.out.println(" isConnectable: true"); - boolean finished = channel.finishConnect(); - System.out.println(" finishConnect() returned: " + finished); - - if (finished) { - k.interestOps(SelectionKey.OP_READ); - - // Now check if immediately readable (THIS is what might be wrong) - selector.selectedKeys().clear(); - int readReady = selector.selectNow(); - System.out.println(" Immediately after connect, selectNow() = " + readReady); - - if (readReady > 0) { - System.out.println(" Socket is IMMEDIATELY readable (unexpected!)"); - ByteBuffer buf = ByteBuffer.allocate(1); - int bytesRead = channel.read(buf); - System.out.println(" read() returned: " + bytesRead); - } else { - System.out.println(" Socket is NOT immediately readable (correct)"); - } - } - } - } - } - - System.out.println("NIO socket test completed"); - } catch (Exception e) { - System.err.println("NIO socket failed: " + e.getMessage()); - e.printStackTrace(); - } finally { - channel.close(); - selector.close(); - } - - System.out.println("\nAll tests completed"); - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/go.mod b/test/kafka/kafka-client-loadtest/tools/go.mod deleted file mode 100644 index c63d94230..000000000 --- a/test/kafka/kafka-client-loadtest/tools/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module simple-test - -go 1.24.7 - -require github.com/segmentio/kafka-go v0.4.49 - -require ( - github.com/klauspost/compress v1.15.9 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect -) diff --git a/test/kafka/kafka-client-loadtest/tools/go.sum b/test/kafka/kafka-client-loadtest/tools/go.sum deleted file mode 100644 index 74b476c2d..000000000 --- a/test/kafka/kafka-client-loadtest/tools/go.sum +++ /dev/null @@ -1,24 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk= -github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/kafka/kafka-client-loadtest/tools/kafka-go-consumer.go b/test/kafka/kafka-client-loadtest/tools/kafka-go-consumer.go deleted file mode 100644 index 1da40c89f..000000000 --- a/test/kafka/kafka-client-loadtest/tools/kafka-go-consumer.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "context" - "log" - "os" - "time" - - "github.com/segmentio/kafka-go" -) - -func main() { - if len(os.Args) < 3 { - log.Fatal("Usage: kafka-go-consumer ") - } - broker := os.Args[1] - topic := os.Args[2] - - log.Printf("Connecting to Kafka broker: %s", broker) - log.Printf("Topic: %s", topic) - - // Create a new reader - r := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{broker}, - Topic: topic, - GroupID: "kafka-go-test-group", - MinBytes: 1, - MaxBytes: 10e6, // 10MB - MaxWait: 1 * time.Second, - }) - defer r.Close() - - log.Printf("Starting to consume messages...") - - ctx := context.Background() - messageCount := 0 - errorCount := 0 - startTime := time.Now() - - for { - m, err := r.ReadMessage(ctx) - if err != nil { - errorCount++ - log.Printf("Error reading message #%d: %v", messageCount+1, err) - - // Stop after 10 consecutive errors or 60 seconds - if errorCount > 10 || time.Since(startTime) > 60*time.Second { - log.Printf("\nStopping after %d errors in %v", errorCount, time.Since(startTime)) - break - } - continue - } - - // Reset error count on successful read - errorCount = 0 - messageCount++ - - log.Printf("Message #%d: topic=%s partition=%d offset=%d key=%s value=%s", - messageCount, m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) - - // Stop after 100 messages or 60 seconds - if messageCount >= 100 || time.Since(startTime) > 60*time.Second { - log.Printf("\nSuccessfully consumed %d messages in %v", messageCount, time.Since(startTime)) - log.Printf("Success rate: %.1f%% (%d/%d including errors)", - float64(messageCount)/float64(messageCount+errorCount)*100, messageCount, messageCount+errorCount) - break - } - } -} diff --git a/test/kafka/kafka-client-loadtest/tools/log4j.properties b/test/kafka/kafka-client-loadtest/tools/log4j.properties deleted file mode 100644 index ed0cd0fe5..000000000 --- a/test/kafka/kafka-client-loadtest/tools/log4j.properties +++ /dev/null @@ -1,12 +0,0 @@ -log4j.rootLogger=DEBUG, stdout - -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c: %m%n - -# More verbose for Kafka client -log4j.logger.org.apache.kafka=DEBUG -log4j.logger.org.apache.kafka.clients=TRACE -log4j.logger.org.apache.kafka.clients.NetworkClient=TRACE - - diff --git a/test/kafka/kafka-client-loadtest/tools/pom.xml b/test/kafka/kafka-client-loadtest/tools/pom.xml deleted file mode 100644 index 58a858e95..000000000 --- a/test/kafka/kafka-client-loadtest/tools/pom.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - 4.0.0 - - com.seaweedfs.test - kafka-consumer-test - 1.0-SNAPSHOT - - - 11 - 11 - 3.9.1 - 7.6.0 - - - - - confluent - https://packages.confluent.io/maven/ - - - - - - org.apache.kafka - kafka-clients - ${kafka.version} - - - io.confluent - kafka-schema-registry-client - ${confluent.version} - - - io.confluent - kafka-avro-serializer - ${confluent.version} - - - org.apache.avro - avro - 1.11.4 - - - org.slf4j - slf4j-simple - 2.0.9 - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.11.0 - - - org.codehaus.mojo - exec-maven-plugin - 3.1.0 - - tools.SchemaRegistryTest - - - - - - - diff --git a/test/kafka/kafka-client-loadtest/tools/simple-test b/test/kafka/kafka-client-loadtest/tools/simple-test deleted file mode 100755 index 47eef7386..000000000 Binary files a/test/kafka/kafka-client-loadtest/tools/simple-test and /dev/null differ diff --git a/test/kafka/kafka-client-loadtest/verify_schema_formats.sh b/test/kafka/kafka-client-loadtest/verify_schema_formats.sh deleted file mode 100755 index 6ded75b33..000000000 --- a/test/kafka/kafka-client-loadtest/verify_schema_formats.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# Verify schema format distribution across topics - -set -e - -SCHEMA_REGISTRY_URL="${SCHEMA_REGISTRY_URL:-http://localhost:8081}" -TOPIC_PREFIX="${TOPIC_PREFIX:-loadtest-topic}" -TOPIC_COUNT="${TOPIC_COUNT:-5}" - -echo "================================" -echo "Schema Format Verification" -echo "================================" -echo "" -echo "Schema Registry: $SCHEMA_REGISTRY_URL" -echo "Topic Prefix: $TOPIC_PREFIX" -echo "Topic Count: $TOPIC_COUNT" -echo "" - -echo "Registered Schemas:" -echo "-------------------" - -for i in $(seq 0 $((TOPIC_COUNT-1))); do - topic="${TOPIC_PREFIX}-${i}" - subject="${topic}-value" - - echo -n "Topic $i ($topic): " - - # Try to get schema - response=$(curl -s "${SCHEMA_REGISTRY_URL}/subjects/${subject}/versions/latest" 2>/dev/null || echo '{"error":"not found"}') - - if echo "$response" | grep -q "error"; then - echo "โŒ NOT REGISTERED" - else - schema_type=$(echo "$response" | grep -o '"schemaType":"[^"]*"' | cut -d'"' -f4) - schema_id=$(echo "$response" | grep -o '"id":[0-9]*' | cut -d':' -f2) - - if [ -z "$schema_type" ]; then - schema_type="AVRO" # Default if not specified - fi - - # Expected format based on index - if [ $((i % 2)) -eq 0 ]; then - expected="AVRO" - else - expected="JSON" - fi - - if [ "$schema_type" = "$expected" ]; then - echo "โœ… $schema_type (ID: $schema_id) - matches expected" - else - echo "โš ๏ธ $schema_type (ID: $schema_id) - expected $expected" - fi - fi -done - -echo "" -echo "Expected Distribution:" -echo "----------------------" -echo "Even indices (0, 2, 4, ...): AVRO" -echo "Odd indices (1, 3, 5, ...): JSON" -echo "" - - diff --git a/test/kafka/loadtest/mock_million_record_test.go b/test/kafka/loadtest/mock_million_record_test.go deleted file mode 100644 index ada018cbb..000000000 --- a/test/kafka/loadtest/mock_million_record_test.go +++ /dev/null @@ -1,622 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "math/rand" - "strconv" - "sync" - "sync/atomic" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestRecord represents a record with reasonable fields for integration testing -type MockTestRecord struct { - ID string - UserID int64 - Timestamp int64 - Event string - Data map[string]interface{} - Metadata map[string]string -} - -// GenerateTestRecord creates a realistic test record -func GenerateMockTestRecord(id int) MockTestRecord { - events := []string{"user_login", "user_logout", "page_view", "purchase", "signup", "profile_update", "search"} - metadata := map[string]string{ - "source": "web", - "version": "1.0.0", - "region": "us-west-2", - "client_ip": fmt.Sprintf("192.168.%d.%d", rand.Intn(255), rand.Intn(255)), - } - - data := map[string]interface{}{ - "session_id": fmt.Sprintf("sess_%d_%d", id, time.Now().Unix()), - "user_agent": "Mozilla/5.0 (compatible; SeaweedFS-Test/1.0)", - "referrer": "https://example.com/page" + strconv.Itoa(rand.Intn(100)), - "duration": rand.Intn(3600), // seconds - "score": rand.Float64() * 100, - } - - return MockTestRecord{ - ID: fmt.Sprintf("record_%d", id), - UserID: int64(rand.Intn(10000) + 1), - Timestamp: time.Now().UnixNano(), - Event: events[rand.Intn(len(events))], - Data: data, - Metadata: metadata, - } -} - -// SerializeTestRecord converts TestRecord to key-value pair for Kafka -func SerializeMockTestRecord(record MockTestRecord) ([]byte, []byte) { - key := fmt.Sprintf("user_%d:%s", record.UserID, record.ID) - - // Create a realistic JSON-like value with reasonable size (200-500 bytes) - value := fmt.Sprintf(`{ - "id": "%s", - "user_id": %d, - "timestamp": %d, - "event": "%s", - "session_id": "%v", - "user_agent": "%v", - "referrer": "%v", - "duration": %v, - "score": %.2f, - "source": "%s", - "version": "%s", - "region": "%s", - "client_ip": "%s", - "batch_info": "This is additional data to make the record size more realistic for testing purposes. It simulates the kind of metadata and context that would typically be included in real-world event data." - }`, - record.ID, - record.UserID, - record.Timestamp, - record.Event, - record.Data["session_id"], - record.Data["user_agent"], - record.Data["referrer"], - record.Data["duration"], - record.Data["score"], - record.Metadata["source"], - record.Metadata["version"], - record.Metadata["region"], - record.Metadata["client_ip"], - ) - - return []byte(key), []byte(value) -} - -// DirectBrokerClient connects directly to the broker without discovery -type DirectBrokerClient struct { - brokerAddress string - conn *grpc.ClientConn - client mq_pb.SeaweedMessagingClient - - // Publisher streams: topic-partition -> stream info - publishersLock sync.RWMutex - publishers map[string]*PublisherSession - - ctx context.Context - cancel context.CancelFunc -} - -// PublisherSession tracks a publishing stream to SeaweedMQ broker -type PublisherSession struct { - Topic string - Partition int32 - Stream mq_pb.SeaweedMessaging_PublishMessageClient - MessageCount int64 // Track messages sent for batch ack handling -} - -func NewDirectBrokerClient(brokerAddr string) (*DirectBrokerClient, error) { - ctx, cancel := context.WithCancel(context.Background()) - - // Add connection timeout and keepalive settings - conn, err := grpc.DialContext(ctx, brokerAddr, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithTimeout(30*time.Second), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 30 * time.Second, // Increased from 10s to 30s - Timeout: 10 * time.Second, // Increased from 5s to 10s - PermitWithoutStream: false, // Changed to false to reduce pings - })) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to connect to broker: %v", err) - } - - client := mq_pb.NewSeaweedMessagingClient(conn) - - return &DirectBrokerClient{ - brokerAddress: brokerAddr, - conn: conn, - client: client, - publishers: make(map[string]*PublisherSession), - ctx: ctx, - cancel: cancel, - }, nil -} - -func (c *DirectBrokerClient) Close() { - c.cancel() - - // Close all publisher streams - c.publishersLock.Lock() - for key := range c.publishers { - delete(c.publishers, key) - } - c.publishersLock.Unlock() - - c.conn.Close() -} - -func (c *DirectBrokerClient) ConfigureTopic(topicName string, partitions int32) error { - topic := &schema_pb.Topic{ - Namespace: "kafka", - Name: topicName, - } - - // Create schema for MockTestRecord - recordType := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "id", - FieldIndex: 0, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}, - }, - }, - { - Name: "user_id", - FieldIndex: 1, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}, - }, - }, - { - Name: "timestamp", - FieldIndex: 2, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}, - }, - }, - { - Name: "event", - FieldIndex: 3, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}, - }, - }, - { - Name: "data", - FieldIndex: 4, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}, // JSON string - }, - }, - { - Name: "metadata", - FieldIndex: 5, - Type: &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}, // JSON string - }, - }, - }, - } - - // Use user_id as the key column for partitioning - keyColumns := []string{"user_id"} - - _, err := c.client.ConfigureTopic(c.ctx, &mq_pb.ConfigureTopicRequest{ - Topic: topic, - PartitionCount: partitions, - MessageRecordType: recordType, - KeyColumns: keyColumns, - }) - return err -} - -func (c *DirectBrokerClient) PublishRecord(topicName string, partition int32, key, value []byte) error { - session, err := c.getOrCreatePublisher(topicName, partition) - if err != nil { - return err - } - - // Send data message using broker API format - dataMsg := &mq_pb.DataMessage{ - Key: key, - Value: value, - TsNs: time.Now().UnixNano(), - } - - if err := session.Stream.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Data{ - Data: dataMsg, - }, - }); err != nil { - return fmt.Errorf("failed to send data: %v", err) - } - - // Don't wait for individual acks! AckInterval=100 means acks come in batches - // The broker will handle acknowledgments asynchronously - return nil -} - -// getOrCreatePublisher gets or creates a publisher stream for a topic-partition -func (c *DirectBrokerClient) getOrCreatePublisher(topic string, partition int32) (*PublisherSession, error) { - key := fmt.Sprintf("%s-%d", topic, partition) - - // Try to get existing publisher - c.publishersLock.RLock() - if session, exists := c.publishers[key]; exists { - c.publishersLock.RUnlock() - return session, nil - } - c.publishersLock.RUnlock() - - // Create new publisher stream - c.publishersLock.Lock() - defer c.publishersLock.Unlock() - - // Double-check after acquiring write lock - if session, exists := c.publishers[key]; exists { - return session, nil - } - - // Create the stream - stream, err := c.client.PublishMessage(c.ctx) - if err != nil { - return nil, fmt.Errorf("failed to create publish stream: %v", err) - } - - // Get the actual partition assignment from the broker - actualPartition, err := c.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get actual partition assignment: %v", err) - } - - // Send init message using the actual partition structure that the broker allocated - if err := stream.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Init{ - Init: &mq_pb.PublishMessageRequest_InitMessage{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - Partition: actualPartition, - AckInterval: 200, // Ack every 200 messages for better balance - PublisherName: "direct-test", - }, - }, - }); err != nil { - return nil, fmt.Errorf("failed to send init message: %v", err) - } - - session := &PublisherSession{ - Topic: topic, - Partition: partition, - Stream: stream, - MessageCount: 0, - } - - c.publishers[key] = session - return session, nil -} - -// getActualPartitionAssignment looks up the actual partition assignment from the broker configuration -func (c *DirectBrokerClient) getActualPartitionAssignment(topic string, kafkaPartition int32) (*schema_pb.Partition, error) { - // Look up the topic configuration from the broker to get the actual partition assignments - lookupResp, err := c.client.LookupTopicBrokers(c.ctx, &mq_pb.LookupTopicBrokersRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to lookup topic brokers: %v", err) - } - - if len(lookupResp.BrokerPartitionAssignments) == 0 { - return nil, fmt.Errorf("no partition assignments found for topic %s", topic) - } - - totalPartitions := int32(len(lookupResp.BrokerPartitionAssignments)) - if kafkaPartition >= totalPartitions { - return nil, fmt.Errorf("kafka partition %d out of range, topic %s has %d partitions", - kafkaPartition, topic, totalPartitions) - } - - // Calculate expected range for this Kafka partition - // Ring is divided equally among partitions, with last partition getting any remainder - const ringSize = int32(2520) // MaxPartitionCount constant - rangeSize := ringSize / totalPartitions - expectedRangeStart := kafkaPartition * rangeSize - var expectedRangeStop int32 - - if kafkaPartition == totalPartitions-1 { - // Last partition gets the remainder to fill the entire ring - expectedRangeStop = ringSize - } else { - expectedRangeStop = (kafkaPartition + 1) * rangeSize - } - - // Find the broker assignment that matches this range - for _, assignment := range lookupResp.BrokerPartitionAssignments { - if assignment.Partition == nil { - continue - } - - // Check if this assignment's range matches our expected range - if assignment.Partition.RangeStart == expectedRangeStart && assignment.Partition.RangeStop == expectedRangeStop { - return assignment.Partition, nil - } - } - - return nil, fmt.Errorf("no broker assignment found for Kafka partition %d with expected range [%d, %d]", - kafkaPartition, expectedRangeStart, expectedRangeStop) -} - -// TestDirectBroker_MillionRecordsIntegration tests the broker directly without discovery -func TestDirectBroker_MillionRecordsIntegration(t *testing.T) { - // Skip by default - this is a large integration test - if testing.Short() { - t.Skip("Skipping million-record integration test in short mode") - } - - // Configuration - const ( - totalRecords = 1000000 - numPartitions = int32(8) // Use multiple partitions for better performance - numProducers = 4 // Concurrent producers - brokerAddr = "localhost:17777" - ) - - // Create direct broker client for topic configuration - configClient, err := NewDirectBrokerClient(brokerAddr) - if err != nil { - t.Fatalf("Failed to create direct broker client: %v", err) - } - defer configClient.Close() - - topicName := fmt.Sprintf("million-records-direct-test-%d", time.Now().Unix()) - - // Create topic - glog.Infof("Creating topic %s with %d partitions", topicName, numPartitions) - err = configClient.ConfigureTopic(topicName, numPartitions) - if err != nil { - t.Fatalf("Failed to configure topic: %v", err) - } - - // Performance tracking - var totalProduced int64 - var totalErrors int64 - startTime := time.Now() - - // Progress tracking - ticker := time.NewTicker(10 * time.Second) - defer ticker.Stop() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { - for { - select { - case <-ticker.C: - produced := atomic.LoadInt64(&totalProduced) - errors := atomic.LoadInt64(&totalErrors) - elapsed := time.Since(startTime) - rate := float64(produced) / elapsed.Seconds() - glog.Infof("Progress: %d/%d records (%.1f%%), rate: %.0f records/sec, errors: %d", - produced, totalRecords, float64(produced)/float64(totalRecords)*100, rate, errors) - case <-ctx.Done(): - return - } - } - }() - - // Producer function - producer := func(producerID int, recordsPerProducer int) error { - defer func() { - glog.Infof("Producer %d finished", producerID) - }() - - // Create dedicated client for this producer - producerClient, err := NewDirectBrokerClient(brokerAddr) - if err != nil { - return fmt.Errorf("Producer %d failed to create client: %v", producerID, err) - } - defer producerClient.Close() - - // Add timeout context for each producer - producerCtx, producerCancel := context.WithTimeout(ctx, 10*time.Minute) - defer producerCancel() - - glog.Infof("Producer %d: About to start producing %d records with dedicated client", producerID, recordsPerProducer) - - for i := 0; i < recordsPerProducer; i++ { - // Check if context is cancelled or timed out - select { - case <-producerCtx.Done(): - glog.Errorf("Producer %d timed out or cancelled after %d records", producerID, i) - return producerCtx.Err() - default: - } - - // Debug progress for all producers every 50k records - if i > 0 && i%50000 == 0 { - glog.Infof("Producer %d: Progress %d/%d records (%.1f%%)", producerID, i, recordsPerProducer, float64(i)/float64(recordsPerProducer)*100) - } - // Calculate global record ID - recordID := producerID*recordsPerProducer + i - - // Generate test record - testRecord := GenerateMockTestRecord(recordID) - key, value := SerializeMockTestRecord(testRecord) - - // Distribute across partitions based on user ID - partition := int32(testRecord.UserID % int64(numPartitions)) - - // Debug first few records for each producer - if i < 3 { - glog.Infof("Producer %d: Record %d -> UserID %d -> Partition %d", producerID, i, testRecord.UserID, partition) - } - - // Produce the record with retry logic - var err error - maxRetries := 3 - for retry := 0; retry < maxRetries; retry++ { - err = producerClient.PublishRecord(topicName, partition, key, value) - if err == nil { - break // Success - } - - // If it's an EOF error, wait a bit before retrying - if err.Error() == "failed to send data: EOF" { - time.Sleep(time.Duration(retry+1) * 100 * time.Millisecond) - continue - } - - // For other errors, don't retry - break - } - - if err != nil { - atomic.AddInt64(&totalErrors, 1) - errorCount := atomic.LoadInt64(&totalErrors) - if errorCount < 20 { // Log first 20 errors to get more insight - glog.Errorf("Producer %d failed to produce record %d (i=%d) after %d retries: %v", producerID, recordID, i, maxRetries, err) - } - // Don't continue - this might be causing producers to exit early - // Let's see what happens if we return the error instead - if errorCount > 1000 { // If too many errors, give up - glog.Errorf("Producer %d giving up after %d errors", producerID, errorCount) - return fmt.Errorf("too many errors: %d", errorCount) - } - continue - } - - atomic.AddInt64(&totalProduced, 1) - - // Log progress for first producer - if producerID == 0 && (i+1)%10000 == 0 { - glog.Infof("Producer %d: produced %d records", producerID, i+1) - } - } - - glog.Infof("Producer %d: Completed loop, produced %d records successfully", producerID, recordsPerProducer) - return nil - } - - // Start concurrent producers - glog.Infof("Starting %d concurrent producers to produce %d records", numProducers, totalRecords) - - var wg sync.WaitGroup - recordsPerProducer := totalRecords / numProducers - - for i := 0; i < numProducers; i++ { - wg.Add(1) - go func(producerID int) { - defer wg.Done() - glog.Infof("Producer %d starting with %d records to produce", producerID, recordsPerProducer) - if err := producer(producerID, recordsPerProducer); err != nil { - glog.Errorf("Producer %d failed: %v", producerID, err) - } - }(i) - } - - // Wait for all producers to complete - wg.Wait() - cancel() // Stop progress reporting - - produceTime := time.Since(startTime) - finalProduced := atomic.LoadInt64(&totalProduced) - finalErrors := atomic.LoadInt64(&totalErrors) - - glog.Infof("Production completed: %d records in %v (%.0f records/sec), errors: %d", - finalProduced, produceTime, float64(finalProduced)/produceTime.Seconds(), finalErrors) - - // Performance summary - if finalProduced > 0 { - glog.Infof("\n"+ - "=== PERFORMANCE SUMMARY ===\n"+ - "Records produced: %d\n"+ - "Production time: %v\n"+ - "Production rate: %.0f records/sec\n"+ - "Errors: %d (%.2f%%)\n"+ - "Partitions: %d\n"+ - "Concurrent producers: %d\n"+ - "Average record size: ~300 bytes\n"+ - "Total data: ~%.1f MB\n"+ - "Throughput: ~%.1f MB/sec\n", - finalProduced, - produceTime, - float64(finalProduced)/produceTime.Seconds(), - finalErrors, - float64(finalErrors)/float64(totalRecords)*100, - numPartitions, - numProducers, - float64(finalProduced)*300/(1024*1024), - float64(finalProduced)*300/(1024*1024)/produceTime.Seconds(), - ) - } - - // Test assertions - if finalProduced < int64(totalRecords*0.95) { // Allow 5% tolerance for errors - t.Errorf("Too few records produced: %d < %d (95%% of target)", finalProduced, int64(float64(totalRecords)*0.95)) - } - - if finalErrors > int64(totalRecords*0.05) { // Error rate should be < 5% - t.Errorf("Too many errors: %d > %d (5%% of target)", finalErrors, int64(float64(totalRecords)*0.05)) - } - - glog.Infof("Direct broker million-record integration test completed successfully!") -} - -// BenchmarkDirectBroker_ProduceThroughput benchmarks the production throughput -func BenchmarkDirectBroker_ProduceThroughput(b *testing.B) { - if testing.Short() { - b.Skip("Skipping benchmark in short mode") - } - - client, err := NewDirectBrokerClient("localhost:17777") - if err != nil { - b.Fatalf("Failed to create client: %v", err) - } - defer client.Close() - - topicName := fmt.Sprintf("benchmark-topic-%d", time.Now().Unix()) - err = client.ConfigureTopic(topicName, 1) - if err != nil { - b.Fatalf("Failed to configure topic: %v", err) - } - - // Pre-generate test data - records := make([]MockTestRecord, b.N) - for i := 0; i < b.N; i++ { - records[i] = GenerateMockTestRecord(i) - } - - b.ResetTimer() - b.StartTimer() - - for i := 0; i < b.N; i++ { - key, value := SerializeMockTestRecord(records[i]) - err := client.PublishRecord(topicName, 0, key, value) - if err != nil { - b.Fatalf("Failed to produce record %d: %v", i, err) - } - } - - b.StopTimer() -} diff --git a/test/kafka/loadtest/quick_performance_test.go b/test/kafka/loadtest/quick_performance_test.go deleted file mode 100644 index 299a7d948..000000000 --- a/test/kafka/loadtest/quick_performance_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package integration - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// TestQuickPerformance_10K tests the fixed broker with 10K records -func TestQuickPerformance_10K(t *testing.T) { - const ( - totalRecords = 10000 // 10K records for quick test - numPartitions = int32(4) - numProducers = 4 - brokerAddr = "localhost:17777" - ) - - // Create direct broker client - client, err := NewDirectBrokerClient(brokerAddr) - if err != nil { - t.Fatalf("Failed to create direct broker client: %v", err) - } - defer client.Close() - - topicName := fmt.Sprintf("quick-test-%d", time.Now().Unix()) - - // Create topic - glog.Infof("Creating topic %s with %d partitions", topicName, numPartitions) - err = client.ConfigureTopic(topicName, numPartitions) - if err != nil { - t.Fatalf("Failed to configure topic: %v", err) - } - - // Performance tracking - var totalProduced int64 - var totalErrors int64 - startTime := time.Now() - - // Producer function - producer := func(producerID int, recordsPerProducer int) error { - for i := 0; i < recordsPerProducer; i++ { - recordID := producerID*recordsPerProducer + i - - // Generate test record - testRecord := GenerateMockTestRecord(recordID) - key, value := SerializeMockTestRecord(testRecord) - - partition := int32(testRecord.UserID % int64(numPartitions)) - - // Produce the record (now async!) - err := client.PublishRecord(topicName, partition, key, value) - if err != nil { - atomic.AddInt64(&totalErrors, 1) - if atomic.LoadInt64(&totalErrors) < 5 { - glog.Errorf("Producer %d failed to produce record %d: %v", producerID, recordID, err) - } - continue - } - - atomic.AddInt64(&totalProduced, 1) - - // Log progress - if (i+1)%1000 == 0 { - elapsed := time.Since(startTime) - rate := float64(atomic.LoadInt64(&totalProduced)) / elapsed.Seconds() - glog.Infof("Producer %d: %d records, current rate: %.0f records/sec", - producerID, i+1, rate) - } - } - return nil - } - - // Start concurrent producers - glog.Infof("Starting %d producers for %d records total", numProducers, totalRecords) - - var wg sync.WaitGroup - recordsPerProducer := totalRecords / numProducers - - for i := 0; i < numProducers; i++ { - wg.Add(1) - go func(producerID int) { - defer wg.Done() - if err := producer(producerID, recordsPerProducer); err != nil { - glog.Errorf("Producer %d failed: %v", producerID, err) - } - }(i) - } - - // Wait for completion - wg.Wait() - - produceTime := time.Since(startTime) - finalProduced := atomic.LoadInt64(&totalProduced) - finalErrors := atomic.LoadInt64(&totalErrors) - - // Performance results - throughputPerSec := float64(finalProduced) / produceTime.Seconds() - dataVolumeMB := float64(finalProduced) * 300 / (1024 * 1024) // ~300 bytes per record - throughputMBPerSec := dataVolumeMB / produceTime.Seconds() - - glog.Infof("\n"+ - "QUICK PERFORMANCE TEST RESULTS\n"+ - "=====================================\n"+ - "Records produced: %d / %d\n"+ - "Production time: %v\n"+ - "Throughput: %.0f records/sec\n"+ - "Data volume: %.1f MB\n"+ - "Bandwidth: %.1f MB/sec\n"+ - "Errors: %d (%.2f%%)\n"+ - "Success rate: %.1f%%\n", - finalProduced, totalRecords, - produceTime, - throughputPerSec, - dataVolumeMB, - throughputMBPerSec, - finalErrors, - float64(finalErrors)/float64(totalRecords)*100, - float64(finalProduced)/float64(totalRecords)*100, - ) - - // Assertions - if finalProduced < int64(totalRecords*0.90) { // Allow 10% tolerance - t.Errorf("Too few records produced: %d < %d (90%% of target)", finalProduced, int64(float64(totalRecords)*0.90)) - } - - if throughputPerSec < 100 { // Should be much higher than 1 record/sec now! - t.Errorf("Throughput too low: %.0f records/sec (expected > 100)", throughputPerSec) - } - - if finalErrors > int64(totalRecords*0.10) { // Error rate should be < 10% - t.Errorf("Too many errors: %d > %d (10%% of target)", finalErrors, int64(float64(totalRecords)*0.10)) - } - - glog.Infof("Performance test passed! Ready for million-record test.") -} diff --git a/test/kafka/loadtest/resume_million_test.go b/test/kafka/loadtest/resume_million_test.go deleted file mode 100644 index 48656c154..000000000 --- a/test/kafka/loadtest/resume_million_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package integration - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// TestResumeMillionRecords_Fixed - Fixed version with better concurrency handling -func TestResumeMillionRecords_Fixed(t *testing.T) { - const ( - totalRecords = 1000000 - numPartitions = int32(8) - numProducers = 4 - brokerAddr = "localhost:17777" - batchSize = 100 // Process in smaller batches to avoid overwhelming - ) - - // Create direct broker client - client, err := NewDirectBrokerClient(brokerAddr) - if err != nil { - t.Fatalf("Failed to create direct broker client: %v", err) - } - defer client.Close() - - topicName := fmt.Sprintf("resume-million-test-%d", time.Now().Unix()) - - // Create topic - glog.Infof("Creating topic %s with %d partitions for RESUMED test", topicName, numPartitions) - err = client.ConfigureTopic(topicName, numPartitions) - if err != nil { - t.Fatalf("Failed to configure topic: %v", err) - } - - // Performance tracking - var totalProduced int64 - var totalErrors int64 - startTime := time.Now() - - // Progress tracking - ticker := time.NewTicker(5 * time.Second) // More frequent updates - defer ticker.Stop() - - go func() { - for range ticker.C { - produced := atomic.LoadInt64(&totalProduced) - errors := atomic.LoadInt64(&totalErrors) - elapsed := time.Since(startTime) - rate := float64(produced) / elapsed.Seconds() - progressPercent := float64(produced) / float64(totalRecords) * 100 - - glog.Infof("PROGRESS: %d/%d records (%.1f%%), rate: %.0f records/sec, errors: %d", - produced, totalRecords, progressPercent, rate, errors) - - if produced >= totalRecords { - return - } - } - }() - - // Fixed producer function with better error handling - producer := func(producerID int, recordsPerProducer int) error { - defer glog.Infof("Producer %d FINISHED", producerID) - - // Create dedicated clients per producer to avoid contention - producerClient, err := NewDirectBrokerClient(brokerAddr) - if err != nil { - return fmt.Errorf("producer %d failed to create client: %v", producerID, err) - } - defer producerClient.Close() - - successCount := 0 - for i := 0; i < recordsPerProducer; i++ { - recordID := producerID*recordsPerProducer + i - - // Generate test record - testRecord := GenerateMockTestRecord(recordID) - key, value := SerializeMockTestRecord(testRecord) - - partition := int32(testRecord.UserID % int64(numPartitions)) - - // Produce with retry logic - maxRetries := 3 - var lastErr error - success := false - - for retry := 0; retry < maxRetries; retry++ { - err := producerClient.PublishRecord(topicName, partition, key, value) - if err == nil { - success = true - break - } - lastErr = err - time.Sleep(time.Duration(retry+1) * 100 * time.Millisecond) // Exponential backoff - } - - if success { - atomic.AddInt64(&totalProduced, 1) - successCount++ - } else { - atomic.AddInt64(&totalErrors, 1) - if atomic.LoadInt64(&totalErrors) < 10 { - glog.Errorf("Producer %d failed record %d after retries: %v", producerID, recordID, lastErr) - } - } - - // Batch progress logging - if successCount > 0 && successCount%10000 == 0 { - glog.Infof("Producer %d: %d/%d records completed", producerID, successCount, recordsPerProducer) - } - - // Small delay to prevent overwhelming the broker - if i > 0 && i%batchSize == 0 { - time.Sleep(10 * time.Millisecond) - } - } - - glog.Infof("Producer %d completed: %d successful, %d errors", - producerID, successCount, recordsPerProducer-successCount) - return nil - } - - // Start concurrent producers - glog.Infof("Starting FIXED %d producers for %d records total", numProducers, totalRecords) - - var wg sync.WaitGroup - recordsPerProducer := totalRecords / numProducers - - for i := 0; i < numProducers; i++ { - wg.Add(1) - go func(producerID int) { - defer wg.Done() - if err := producer(producerID, recordsPerProducer); err != nil { - glog.Errorf("Producer %d FAILED: %v", producerID, err) - } - }(i) - } - - // Wait for completion with timeout - done := make(chan bool) - go func() { - wg.Wait() - done <- true - }() - - select { - case <-done: - glog.Infof("All producers completed normally") - case <-time.After(30 * time.Minute): // 30-minute timeout - glog.Errorf("Test timed out after 30 minutes") - t.Errorf("Test timed out") - return - } - - produceTime := time.Since(startTime) - finalProduced := atomic.LoadInt64(&totalProduced) - finalErrors := atomic.LoadInt64(&totalErrors) - - // Performance results - throughputPerSec := float64(finalProduced) / produceTime.Seconds() - dataVolumeMB := float64(finalProduced) * 300 / (1024 * 1024) - throughputMBPerSec := dataVolumeMB / produceTime.Seconds() - successRate := float64(finalProduced) / float64(totalRecords) * 100 - - glog.Infof("\n"+ - "=== FINAL MILLION RECORD TEST RESULTS ===\n"+ - "==========================================\n"+ - "Records produced: %d / %d\n"+ - "Production time: %v\n"+ - "Average throughput: %.0f records/sec\n"+ - "Data volume: %.1f MB\n"+ - "Bandwidth: %.1f MB/sec\n"+ - "Errors: %d (%.2f%%)\n"+ - "Success rate: %.1f%%\n"+ - "Partitions used: %d\n"+ - "Concurrent producers: %d\n", - finalProduced, totalRecords, - produceTime, - throughputPerSec, - dataVolumeMB, - throughputMBPerSec, - finalErrors, - float64(finalErrors)/float64(totalRecords)*100, - successRate, - numPartitions, - numProducers, - ) - - // Test assertions - if finalProduced < int64(totalRecords*0.95) { // Allow 5% tolerance - t.Errorf("Too few records produced: %d < %d (95%% of target)", finalProduced, int64(float64(totalRecords)*0.95)) - } - - if finalErrors > int64(totalRecords*0.05) { // Error rate should be < 5% - t.Errorf("Too many errors: %d > %d (5%% of target)", finalErrors, int64(float64(totalRecords)*0.05)) - } - - if throughputPerSec < 100 { - t.Errorf("Throughput too low: %.0f records/sec (expected > 100)", throughputPerSec) - } - - glog.Infof("๐Ÿ† MILLION RECORD KAFKA INTEGRATION TEST COMPLETED SUCCESSFULLY!") -} - diff --git a/test/kafka/loadtest/run_million_record_test.sh b/test/kafka/loadtest/run_million_record_test.sh deleted file mode 100755 index 0728e8121..000000000 --- a/test/kafka/loadtest/run_million_record_test.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -# Script to run the Kafka Gateway Million Record Integration Test -# This test requires a running SeaweedFS infrastructure (Master, Filer, MQ Broker) - -set -e - -echo "=== SeaweedFS Kafka Gateway Million Record Integration Test ===" -echo "Test Date: $(date)" -echo "Hostname: $(hostname)" -echo "" - -# Configuration -MASTERS=${SEAWEED_MASTERS:-"localhost:9333"} -FILER_GROUP=${SEAWEED_FILER_GROUP:-"default"} -TEST_DIR="." -TEST_NAME="TestDirectBroker_MillionRecordsIntegration" - -echo "Configuration:" -echo " Masters: $MASTERS" -echo " Filer Group: $FILER_GROUP" -echo " Test Directory: $TEST_DIR" -echo "" - -# Check if SeaweedFS infrastructure is running -echo "=== Checking Infrastructure ===" - -# Function to check if a service is running -check_service() { - local host_port=$1 - local service_name=$2 - - if timeout 3 bash -c "/dev/null; then - echo "โœ“ $service_name is running on $host_port" - return 0 - else - echo "โœ— $service_name is NOT running on $host_port" - return 1 - fi -} - -# Check each master -IFS=',' read -ra MASTER_ARRAY <<< "$MASTERS" -MASTERS_OK=true -for master in "${MASTER_ARRAY[@]}"; do - if ! check_service "$master" "SeaweedFS Master"; then - MASTERS_OK=false - fi -done - -if [ "$MASTERS_OK" = false ]; then - echo "" - echo "ERROR: One or more SeaweedFS Masters are not running." - echo "Please start your SeaweedFS infrastructure before running this test." - echo "" - echo "Example commands to start SeaweedFS:" - echo " # Terminal 1: Start Master" - echo " weed master -defaultReplication=001 -mdir=/tmp/seaweedfs/master" - echo "" - echo " # Terminal 2: Start Filer" - echo " weed filer -master=localhost:9333 -filer.dir=/tmp/seaweedfs/filer" - echo "" - echo " # Terminal 3: Start MQ Broker" - echo " weed mq.broker -filer=localhost:8888 -master=localhost:9333" - echo "" - exit 1 -fi - -echo "" -echo "=== Infrastructure Check Passed ===" -echo "" - -# Change to the correct directory -cd "$TEST_DIR" - -# Set environment variables for the test -export SEAWEED_MASTERS="$MASTERS" -export SEAWEED_FILER_GROUP="$FILER_GROUP" - -# Run the test with verbose output -echo "=== Running Million Record Integration Test ===" -echo "This may take several minutes..." -echo "" - -# Run the specific test with timeout and verbose output -timeout 1800 go test -v -run "$TEST_NAME" -timeout=30m 2>&1 | tee /tmp/seaweed_million_record_test.log - -TEST_EXIT_CODE=${PIPESTATUS[0]} - -echo "" -echo "=== Test Completed ===" -echo "Exit Code: $TEST_EXIT_CODE" -echo "Full log available at: /tmp/seaweed_million_record_test.log" -echo "" - -# Show summary from the log -echo "=== Performance Summary ===" -if grep -q "PERFORMANCE SUMMARY" /tmp/seaweed_million_record_test.log; then - grep -A 15 "PERFORMANCE SUMMARY" /tmp/seaweed_million_record_test.log -else - echo "Performance summary not found in log" -fi - -echo "" - -if [ $TEST_EXIT_CODE -eq 0 ]; then - echo "๐ŸŽ‰ TEST PASSED: Million record integration test completed successfully!" -else - echo "โŒ TEST FAILED: Million record integration test failed with exit code $TEST_EXIT_CODE" - echo "Check the log file for details: /tmp/seaweed_million_record_test.log" -fi - -echo "" -echo "=== Test Run Complete ===" -exit $TEST_EXIT_CODE diff --git a/test/kafka/loadtest/setup_seaweed_infrastructure.sh b/test/kafka/loadtest/setup_seaweed_infrastructure.sh deleted file mode 100755 index 448119097..000000000 --- a/test/kafka/loadtest/setup_seaweed_infrastructure.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -# Script to set up SeaweedFS infrastructure for Kafka Gateway testing -# This script will start Master, Filer, and MQ Broker components - -set -e - -BASE_DIR="/tmp/seaweedfs" -LOG_DIR="$BASE_DIR/logs" -DATA_DIR="$BASE_DIR/data" - -echo "=== SeaweedFS Infrastructure Setup ===" -echo "Setup Date: $(date)" -echo "Base Directory: $BASE_DIR" -echo "" - -# Create directories -mkdir -p "$BASE_DIR/master" "$BASE_DIR/filer" "$BASE_DIR/broker" "$LOG_DIR" - -# Function to check if a service is running -check_service() { - local host_port=$1 - local service_name=$2 - - if timeout 3 bash -c "/dev/null; then - echo "โœ“ $service_name is already running on $host_port" - return 0 - else - echo "โœ— $service_name is NOT running on $host_port" - return 1 - fi -} - -# Function to start a service in background -start_service() { - local cmd="$1" - local service_name="$2" - local log_file="$3" - local check_port="$4" - - echo "Starting $service_name..." - echo "Command: $cmd" - echo "Log: $log_file" - - # Start in background - nohup $cmd > "$log_file" 2>&1 & - local pid=$! - echo "PID: $pid" - - # Wait for service to be ready - local retries=30 - while [ $retries -gt 0 ]; do - if check_service "$check_port" "$service_name" 2>/dev/null; then - echo "โœ“ $service_name is ready" - return 0 - fi - retries=$((retries - 1)) - sleep 1 - echo -n "." - done - echo "" - echo "โŒ $service_name failed to start within 30 seconds" - return 1 -} - -# Stop any existing processes -echo "=== Cleaning up existing processes ===" -pkill -f "weed master" || true -pkill -f "weed filer" || true -pkill -f "weed mq.broker" || true -sleep 2 - -echo "" -echo "=== Starting SeaweedFS Components ===" - -# Start Master -if ! check_service "localhost:9333" "SeaweedFS Master"; then - start_service \ - "weed master -defaultReplication=001 -mdir=$BASE_DIR/master" \ - "SeaweedFS Master" \ - "$LOG_DIR/master.log" \ - "localhost:9333" - echo "" -fi - -# Start Filer -if ! check_service "localhost:8888" "SeaweedFS Filer"; then - start_service \ - "weed filer -master=localhost:9333 -filer.dir=$BASE_DIR/filer" \ - "SeaweedFS Filer" \ - "$LOG_DIR/filer.log" \ - "localhost:8888" - echo "" -fi - -# Start MQ Broker -if ! check_service "localhost:17777" "SeaweedFS MQ Broker"; then - start_service \ - "weed mq.broker -filer=localhost:8888 -master=localhost:9333" \ - "SeaweedFS MQ Broker" \ - "$LOG_DIR/broker.log" \ - "localhost:17777" - echo "" -fi - -echo "=== Infrastructure Status ===" -check_service "localhost:9333" "Master (gRPC)" -check_service "localhost:9334" "Master (HTTP)" -check_service "localhost:8888" "Filer (HTTP)" -check_service "localhost:18888" "Filer (gRPC)" -check_service "localhost:17777" "MQ Broker" - -echo "" -echo "=== Infrastructure Ready ===" -echo "Log files:" -echo " Master: $LOG_DIR/master.log" -echo " Filer: $LOG_DIR/filer.log" -echo " Broker: $LOG_DIR/broker.log" -echo "" -echo "To view logs in real-time:" -echo " tail -f $LOG_DIR/master.log" -echo " tail -f $LOG_DIR/filer.log" -echo " tail -f $LOG_DIR/broker.log" -echo "" -echo "To stop all services:" -echo " pkill -f \"weed master\"" -echo " pkill -f \"weed filer\"" -echo " pkill -f \"weed mq.broker\"" -echo "" -echo "[OK] SeaweedFS infrastructure is ready for testing!" - diff --git a/test/kafka/scripts/kafka-gateway-start.sh b/test/kafka/scripts/kafka-gateway-start.sh deleted file mode 100755 index 08561cef5..000000000 --- a/test/kafka/scripts/kafka-gateway-start.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh - -# Kafka Gateway Startup Script for Integration Testing - -set -e - -echo "Starting Kafka Gateway..." - -SEAWEEDFS_MASTERS=${SEAWEEDFS_MASTERS:-seaweedfs-master:9333} -SEAWEEDFS_FILER=${SEAWEEDFS_FILER:-seaweedfs-filer:8888} -SEAWEEDFS_MQ_BROKER=${SEAWEEDFS_MQ_BROKER:-seaweedfs-mq-broker:17777} -SEAWEEDFS_FILER_GROUP=${SEAWEEDFS_FILER_GROUP:-} - -# Wait for dependencies -echo "Waiting for SeaweedFS master(s)..." -OLD_IFS="$IFS" -IFS=',' -for MASTER in $SEAWEEDFS_MASTERS; do - MASTER_HOST=${MASTER%:*} - MASTER_PORT=${MASTER#*:} - while ! nc -z "$MASTER_HOST" "$MASTER_PORT"; do - sleep 1 - done - echo "SeaweedFS master $MASTER is ready" -done -IFS="$OLD_IFS" - -echo "Waiting for SeaweedFS Filer..." -while ! nc -z "${SEAWEEDFS_FILER%:*}" "${SEAWEEDFS_FILER#*:}"; do - sleep 1 -done -echo "SeaweedFS Filer is ready" - -echo "Waiting for SeaweedFS MQ Broker..." -while ! nc -z "${SEAWEEDFS_MQ_BROKER%:*}" "${SEAWEEDFS_MQ_BROKER#*:}"; do - sleep 1 -done -echo "SeaweedFS MQ Broker is ready" - -echo "Waiting for Schema Registry..." -while ! curl -f "${SCHEMA_REGISTRY_URL}/subjects" > /dev/null 2>&1; do - sleep 1 -done -echo "Schema Registry is ready" - -# Start Kafka Gateway -echo "Starting Kafka Gateway on port ${KAFKA_PORT:-9093}..." -exec /usr/bin/weed mq.kafka.gateway \ - -master=${SEAWEEDFS_MASTERS} \ - -filerGroup=${SEAWEEDFS_FILER_GROUP} \ - -port=${KAFKA_PORT:-9093} \ - -port.pprof=${PPROF_PORT:-10093} \ - -schema-registry-url=${SCHEMA_REGISTRY_URL} \ - -ip=0.0.0.0 diff --git a/test/kafka/scripts/test-broker-discovery.sh b/test/kafka/scripts/test-broker-discovery.sh deleted file mode 100644 index b4937b7f7..000000000 --- a/test/kafka/scripts/test-broker-discovery.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash - -# Test script to verify broker discovery works end-to-end - -set -e - -echo "=== Testing SeaweedFS Broker Discovery ===" - -cd /Users/chrislu/go/src/github.com/seaweedfs/seaweedfs - -# Build weed binary -echo "Building weed binary..." -go build -o /tmp/weed-discovery ./weed - -# Setup data directory -WEED_DATA_DIR="/tmp/seaweedfs-discovery-test-$$" -mkdir -p "$WEED_DATA_DIR" -echo "Using data directory: $WEED_DATA_DIR" - -# Cleanup function -cleanup() { - echo "Cleaning up..." - pkill -f "weed.*server" || true - pkill -f "weed.*mq.broker" || true - sleep 2 - rm -rf "$WEED_DATA_DIR" - rm -f /tmp/weed-discovery* /tmp/broker-discovery-test* -} -trap cleanup EXIT - -# Start SeaweedFS server with consistent IP configuration -echo "Starting SeaweedFS server..." -/tmp/weed-discovery -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="127.0.0.1" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-discovery-server.log 2>&1 & - -SERVER_PID=$! -echo "Server PID: $SERVER_PID" - -# Wait for master -echo "Waiting for master..." -for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "โœ“ Master is up" - break - fi - echo " Waiting for master... ($i/30)" - sleep 1 -done - -# Give components time to initialize -echo "Waiting for components to initialize..." -sleep 10 - -# Start MQ broker -echo "Starting MQ broker..." -/tmp/weed-discovery -v 2 mq.broker \ - -master="127.0.0.1:9333" \ - -port=17777 \ - > /tmp/weed-discovery-broker.log 2>&1 & - -BROKER_PID=$! -echo "Broker PID: $BROKER_PID" - -# Wait for broker -echo "Waiting for broker to register..." -sleep 15 -broker_ready=false -for i in $(seq 1 20); do - if nc -z 127.0.0.1 17777; then - echo "โœ“ MQ broker is accepting connections" - broker_ready=true - break - fi - echo " Waiting for MQ broker... ($i/20)" - sleep 1 -done - -if [ "$broker_ready" = false ]; then - echo "[FAIL] MQ broker failed to start" - echo "Server logs:" - cat /tmp/weed-discovery-server.log - echo "Broker logs:" - cat /tmp/weed-discovery-broker.log - exit 1 -fi - -# Additional wait for broker registration -echo "Allowing broker to register with master..." -sleep 15 - -# Check cluster status -echo "Checking cluster status..." -CLUSTER_STATUS=$(curl -s "http://127.0.0.1:9333/cluster/status") -echo "Cluster status: $CLUSTER_STATUS" - -# Now test broker discovery using the same approach as the Kafka gateway -echo "Testing broker discovery..." -cd test/kafka -SEAWEEDFS_MASTERS=127.0.0.1:9333 timeout 30s go test -v -run "TestOffsetManagement" -timeout 25s ./e2e/... > /tmp/broker-discovery-test.log 2>&1 && discovery_success=true || discovery_success=false - -if [ "$discovery_success" = true ]; then - echo "[OK] Broker discovery test PASSED!" - echo "Gateway was able to discover and connect to MQ brokers" -else - echo "[FAIL] Broker discovery test FAILED" - echo "Last few lines of test output:" - tail -20 /tmp/broker-discovery-test.log || echo "No test logs available" -fi - -echo -echo "๐Ÿ“Š Test Results:" -echo " Broker startup: โœ…" -echo " Broker registration: โœ…" -echo " Gateway discovery: $([ "$discovery_success" = true ] && echo "โœ…" || echo "โŒ")" - -echo -echo "๐Ÿ“ Logs available:" -echo " Server: /tmp/weed-discovery-server.log" -echo " Broker: /tmp/weed-discovery-broker.log" -echo " Discovery test: /tmp/broker-discovery-test.log" diff --git a/test/kafka/scripts/test-broker-startup.sh b/test/kafka/scripts/test-broker-startup.sh deleted file mode 100755 index 410376d3b..000000000 --- a/test/kafka/scripts/test-broker-startup.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/bash - -# Script to test SeaweedFS MQ broker startup locally -# This helps debug broker startup issues before running CI - -set -e - -echo "=== Testing SeaweedFS MQ Broker Startup ===" - -# Build weed binary -echo "Building weed binary..." -cd "$(dirname "$0")/../../.." -go build -o /tmp/weed ./weed - -# Setup data directory -WEED_DATA_DIR="/tmp/seaweedfs-broker-test-$$" -mkdir -p "$WEED_DATA_DIR" -echo "Using data directory: $WEED_DATA_DIR" - -# Cleanup function -cleanup() { - echo "Cleaning up..." - pkill -f "weed.*server" || true - pkill -f "weed.*mq.broker" || true - sleep 2 - rm -rf "$WEED_DATA_DIR" - rm -f /tmp/weed-*.log -} -trap cleanup EXIT - -# Start SeaweedFS server -echo "Starting SeaweedFS server..." -/tmp/weed -v 1 server \ - -ip="127.0.0.1" \ - -ip.bind="0.0.0.0" \ - -dir="$WEED_DATA_DIR" \ - -master.raftHashicorp \ - -master.port=9333 \ - -volume.port=8081 \ - -filer.port=8888 \ - -filer=true \ - -metricsPort=9325 \ - > /tmp/weed-server-test.log 2>&1 & - -SERVER_PID=$! -echo "Server PID: $SERVER_PID" - -# Wait for master -echo "Waiting for master..." -for i in $(seq 1 30); do - if curl -s http://127.0.0.1:9333/cluster/status >/dev/null; then - echo "โœ“ Master is up" - break - fi - echo " Waiting for master... ($i/30)" - sleep 1 -done - -# Wait for filer -echo "Waiting for filer..." -for i in $(seq 1 30); do - if nc -z 127.0.0.1 8888; then - echo "โœ“ Filer is up" - break - fi - echo " Waiting for filer... ($i/30)" - sleep 1 -done - -# Start MQ broker -echo "Starting MQ broker..." -/tmp/weed -v 2 mq.broker \ - -master="127.0.0.1:9333" \ - -ip="127.0.0.1" \ - -port=17777 \ - > /tmp/weed-mq-broker-test.log 2>&1 & - -BROKER_PID=$! -echo "Broker PID: $BROKER_PID" - -# Wait for broker -echo "Waiting for broker..." -broker_ready=false -for i in $(seq 1 30); do - if nc -z 127.0.0.1 17777; then - echo "โœ“ MQ broker is up" - broker_ready=true - break - fi - echo " Waiting for MQ broker... ($i/30)" - sleep 1 -done - -if [ "$broker_ready" = false ]; then - echo "โŒ MQ broker failed to start" - echo - echo "=== Server logs ===" - cat /tmp/weed-server-test.log - echo - echo "=== Broker logs ===" - cat /tmp/weed-mq-broker-test.log - exit 1 -fi - -# Broker started successfully - discovery will be tested by Kafka gateway -echo "โœ“ Broker started successfully and accepting connections" - -echo -echo "[OK] All tests passed!" -echo "Server logs: /tmp/weed-server-test.log" -echo "Broker logs: /tmp/weed-mq-broker-test.log" diff --git a/test/kafka/scripts/test_schema_registry.sh b/test/kafka/scripts/test_schema_registry.sh deleted file mode 100755 index d5ba8574a..000000000 --- a/test/kafka/scripts/test_schema_registry.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -# Test script for schema registry E2E testing -# This script sets up a mock schema registry and runs the E2E tests - -set -e - -echo "๐Ÿš€ Starting Schema Registry E2E Test" - -# Check if we have a real schema registry URL -if [ -n "$SCHEMA_REGISTRY_URL" ]; then - echo "๐Ÿ“ก Using real Schema Registry: $SCHEMA_REGISTRY_URL" -else - echo "๐Ÿ”ง No SCHEMA_REGISTRY_URL set, using mock registry" - # For now, we'll skip the test if no real registry is available - # In the future, we could start a mock registry here - export SCHEMA_REGISTRY_URL="http://localhost:8081" - echo "โš ๏ธ Mock registry not implemented yet, test will be skipped" -fi - -# Start SeaweedFS infrastructure -echo "๐ŸŒฑ Starting SeaweedFS infrastructure..." -cd /Users/chrislu/go/src/github.com/seaweedfs/seaweedfs - -# Clean up any existing processes -pkill -f "weed server" || true -pkill -f "weed mq.broker" || true -sleep 2 - -# Start SeaweedFS server -echo "๐Ÿ—„๏ธ Starting SeaweedFS server..." -/tmp/weed server -dir=/tmp/seaweedfs-test -master.port=9333 -volume.port=8080 -filer.port=8888 -ip=localhost > /tmp/seaweed-server.log 2>&1 & -SERVER_PID=$! - -# Wait for server to be ready -sleep 5 - -# Start MQ broker -echo "๐Ÿ“จ Starting SeaweedMQ broker..." -/tmp/weed mq.broker -master=localhost:9333 -port=17777 > /tmp/seaweed-broker.log 2>&1 & -BROKER_PID=$! - -# Wait for broker to be ready -sleep 3 - -# Check if services are running -if ! curl -s http://localhost:9333/cluster/status > /dev/null; then - echo "[FAIL] SeaweedFS server not ready" - exit 1 -fi - -echo "[OK] SeaweedFS infrastructure ready" - -# Run the schema registry E2E tests -echo "๐Ÿงช Running Schema Registry E2E tests..." -cd /Users/chrislu/go/src/github.com/seaweedfs/seaweedfs/test/kafka - -export SEAWEEDFS_MASTERS=127.0.0.1:9333 - -# Run the tests -if go test -v ./integration -run TestSchemaRegistryE2E -timeout 5m; then - echo "[OK] Schema Registry E2E tests PASSED!" - TEST_RESULT=0 -else - echo "[FAIL] Schema Registry E2E tests FAILED!" - TEST_RESULT=1 -fi - -# Cleanup -echo "๐Ÿงน Cleaning up..." -kill $BROKER_PID $SERVER_PID 2>/dev/null || true -sleep 2 -pkill -f "weed server" || true -pkill -f "weed mq.broker" || true - -echo "๐Ÿ Schema Registry E2E Test completed" -exit $TEST_RESULT diff --git a/test/kafka/scripts/wait-for-services.sh b/test/kafka/scripts/wait-for-services.sh deleted file mode 100755 index 8f1a965f5..000000000 --- a/test/kafka/scripts/wait-for-services.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash - -# Wait for Services Script for Kafka Integration Tests - -set -e - -echo "Waiting for services to be ready..." - -# Configuration -KAFKA_HOST=${KAFKA_HOST:-localhost} -KAFKA_PORT=${KAFKA_PORT:-9092} -SCHEMA_REGISTRY_URL=${SCHEMA_REGISTRY_URL:-http://localhost:8081} -KAFKA_GATEWAY_HOST=${KAFKA_GATEWAY_HOST:-localhost} -KAFKA_GATEWAY_PORT=${KAFKA_GATEWAY_PORT:-9093} -SEAWEEDFS_MASTER_URL=${SEAWEEDFS_MASTER_URL:-http://localhost:9333} -MAX_WAIT=${MAX_WAIT:-300} # 5 minutes - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Helper function to wait for a service -wait_for_service() { - local service_name=$1 - local check_command=$2 - local timeout=${3:-60} - - echo -e "${BLUE}Waiting for ${service_name}...${NC}" - - local count=0 - while [ $count -lt $timeout ]; do - if eval "$check_command" > /dev/null 2>&1; then - echo -e "${GREEN}[OK] ${service_name} is ready${NC}" - return 0 - fi - - if [ $((count % 10)) -eq 0 ]; then - echo -e "${YELLOW}Still waiting for ${service_name}... (${count}s)${NC}" - fi - - sleep 1 - count=$((count + 1)) - done - - echo -e "${RED}[FAIL] ${service_name} failed to start within ${timeout} seconds${NC}" - return 1 -} - -# Wait for Zookeeper -echo "=== Checking Zookeeper ===" -wait_for_service "Zookeeper" "nc -z localhost 2181" 30 - -# Wait for Kafka -echo "=== Checking Kafka ===" -wait_for_service "Kafka" "nc -z ${KAFKA_HOST} ${KAFKA_PORT}" 60 - -# Test Kafka broker API -echo "=== Testing Kafka API ===" -wait_for_service "Kafka API" "timeout 5 kafka-broker-api-versions --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT}" 30 - -# Wait for Schema Registry -echo "=== Checking Schema Registry ===" -wait_for_service "Schema Registry" "curl -f ${SCHEMA_REGISTRY_URL}/subjects" 60 - -# Wait for SeaweedFS Master -echo "=== Checking SeaweedFS Master ===" -wait_for_service "SeaweedFS Master" "curl -f ${SEAWEEDFS_MASTER_URL}/cluster/status" 30 - -# Wait for SeaweedFS Volume -echo "=== Checking SeaweedFS Volume ===" -wait_for_service "SeaweedFS Volume" "curl -f http://localhost:8080/status" 30 - -# Wait for SeaweedFS Filer -echo "=== Checking SeaweedFS Filer ===" -wait_for_service "SeaweedFS Filer" "curl -f http://localhost:8888/" 30 - -# Wait for SeaweedFS MQ Broker -echo "=== Checking SeaweedFS MQ Broker ===" -wait_for_service "SeaweedFS MQ Broker" "nc -z localhost 17777" 30 - -# Wait for SeaweedFS MQ Agent -echo "=== Checking SeaweedFS MQ Agent ===" -wait_for_service "SeaweedFS MQ Agent" "nc -z localhost 16777" 30 - -# Wait for Kafka Gateway -echo "=== Checking Kafka Gateway ===" -wait_for_service "Kafka Gateway" "nc -z ${KAFKA_GATEWAY_HOST} ${KAFKA_GATEWAY_PORT}" 60 - -# Final verification -echo "=== Final Verification ===" - -# Test Kafka topic creation -echo "Testing Kafka topic operations..." -TEST_TOPIC="health-check-$(date +%s)" -if kafka-topics --create --topic "$TEST_TOPIC" --bootstrap-server "${KAFKA_HOST}:${KAFKA_PORT}" --partitions 1 --replication-factor 1 > /dev/null 2>&1; then - echo -e "${GREEN}[OK] Kafka topic creation works${NC}" - kafka-topics --delete --topic "$TEST_TOPIC" --bootstrap-server "${KAFKA_HOST}:${KAFKA_PORT}" > /dev/null 2>&1 || true -else - echo -e "${RED}[FAIL] Kafka topic creation failed${NC}" - exit 1 -fi - -# Test Schema Registry -echo "Testing Schema Registry..." -if curl -f "${SCHEMA_REGISTRY_URL}/subjects" > /dev/null 2>&1; then - echo -e "${GREEN}[OK] Schema Registry is accessible${NC}" -else - echo -e "${RED}[FAIL] Schema Registry is not accessible${NC}" - exit 1 -fi - -# Test Kafka Gateway connectivity -echo "Testing Kafka Gateway..." -if nc -z "${KAFKA_GATEWAY_HOST}" "${KAFKA_GATEWAY_PORT}"; then - echo -e "${GREEN}[OK] Kafka Gateway is accessible${NC}" -else - echo -e "${RED}[FAIL] Kafka Gateway is not accessible${NC}" - exit 1 -fi - -echo -e "${GREEN}All services are ready!${NC}" -echo "" -echo "Service endpoints:" -echo " Kafka: ${KAFKA_HOST}:${KAFKA_PORT}" -echo " Schema Registry: ${SCHEMA_REGISTRY_URL}" -echo " Kafka Gateway: ${KAFKA_GATEWAY_HOST}:${KAFKA_GATEWAY_PORT}" -echo " SeaweedFS Master: ${SEAWEEDFS_MASTER_URL}" -echo " SeaweedFS Filer: http://localhost:8888" -echo " SeaweedFS MQ Broker: localhost:17777" -echo " SeaweedFS MQ Agent: localhost:16777" -echo "" -echo "Ready to run integration tests!" diff --git a/test/kafka/simple-consumer/go.mod b/test/kafka/simple-consumer/go.mod deleted file mode 100644 index 1ced43c66..000000000 --- a/test/kafka/simple-consumer/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module simple-consumer - -go 1.21 - -require github.com/segmentio/kafka-go v0.4.47 - -require ( - github.com/klauspost/compress v1.17.0 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect -) diff --git a/test/kafka/simple-consumer/go.sum b/test/kafka/simple-consumer/go.sum deleted file mode 100644 index c9f731f2b..000000000 --- a/test/kafka/simple-consumer/go.sum +++ /dev/null @@ -1,69 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= -github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/kafka/simple-consumer/main.go b/test/kafka/simple-consumer/main.go deleted file mode 100644 index 0d7c6383a..000000000 --- a/test/kafka/simple-consumer/main.go +++ /dev/null @@ -1,123 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - "os" - "os/signal" - "syscall" - "time" - - "github.com/segmentio/kafka-go" -) - -func main() { - // Configuration - brokerAddress := "localhost:9093" // Kafka gateway port (not SeaweedMQ broker port 17777) - topicName := "_raw_messages" // Topic with "_" prefix - should skip schema validation - groupID := "raw-message-consumer" - - fmt.Printf("Consuming messages from topic '%s' on broker '%s'\n", topicName, brokerAddress) - - // Create a new reader - reader := kafka.NewReader(kafka.ReaderConfig{ - Brokers: []string{brokerAddress}, - Topic: topicName, - GroupID: groupID, - // Start reading from the beginning for testing - StartOffset: kafka.FirstOffset, - // Configure for quick consumption - MinBytes: 1, - MaxBytes: 10e6, // 10MB - }) - defer reader.Close() - - // Set up signal handling for graceful shutdown - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - <-sigChan - fmt.Println("\nReceived shutdown signal, stopping consumer...") - cancel() - }() - - fmt.Println("Starting to consume messages (Press Ctrl+C to stop)...") - fmt.Println("=" + fmt.Sprintf("%60s", "=")) - - messageCount := 0 - - for { - select { - case <-ctx.Done(): - fmt.Printf("\nStopped consuming. Total messages processed: %d\n", messageCount) - return - default: - // Set a timeout for reading messages - msgCtx, msgCancel := context.WithTimeout(ctx, 5*time.Second) - - message, err := reader.ReadMessage(msgCtx) - msgCancel() - - if err != nil { - if err == context.DeadlineExceeded { - fmt.Print(".") - continue - } - log.Printf("Error reading message: %v", err) - continue - } - - messageCount++ - - // Display message details - fmt.Printf("\nMessage #%d:\n", messageCount) - fmt.Printf(" Partition: %d, Offset: %d\n", message.Partition, message.Offset) - fmt.Printf(" Key: %s\n", string(message.Key)) - fmt.Printf(" Value: %s\n", string(message.Value)) - fmt.Printf(" Timestamp: %s\n", message.Time.Format(time.RFC3339)) - - // Display headers if present - if len(message.Headers) > 0 { - fmt.Printf(" Headers:\n") - for _, header := range message.Headers { - fmt.Printf(" %s: %s\n", header.Key, string(header.Value)) - } - } - - // Try to detect content type - contentType := detectContentType(message.Value) - fmt.Printf(" Content Type: %s\n", contentType) - - fmt.Printf(" Raw Size: %d bytes\n", len(message.Value)) - fmt.Println(" " + fmt.Sprintf("%50s", "-")) - } - } -} - -// detectContentType tries to determine the content type of the message -func detectContentType(data []byte) string { - if len(data) == 0 { - return "empty" - } - - // Check if it looks like JSON - trimmed := string(data) - if (trimmed[0] == '{' && trimmed[len(trimmed)-1] == '}') || - (trimmed[0] == '[' && trimmed[len(trimmed)-1] == ']') { - return "JSON" - } - - // Check if it's printable text - for _, b := range data { - if b < 32 && b != 9 && b != 10 && b != 13 { // Allow tab, LF, CR - return "binary" - } - } - - return "text" -} diff --git a/test/kafka/simple-consumer/simple-consumer b/test/kafka/simple-consumer/simple-consumer deleted file mode 100755 index 1f7a32775..000000000 Binary files a/test/kafka/simple-consumer/simple-consumer and /dev/null differ diff --git a/test/kafka/simple-publisher/README.md b/test/kafka/simple-publisher/README.md deleted file mode 100644 index 8c42c8ee8..000000000 --- a/test/kafka/simple-publisher/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Simple Kafka-Go Publisher for SeaweedMQ - -This is a simple publisher client that demonstrates publishing raw messages to SeaweedMQ topics with "_" prefix, which bypass schema validation. - -## Features - -- **Schema-Free Publishing**: Topics with "_" prefix don't require schema validation -- **Raw Message Storage**: Messages are stored in a "value" field as raw bytes -- **Multiple Message Formats**: Supports JSON, binary, and empty messages -- **Kafka-Go Compatible**: Uses the popular kafka-go library - -## Prerequisites - -1. **SeaweedMQ Running**: Make sure SeaweedMQ is running on `localhost:17777` (default Kafka port) -2. **Go Modules**: The project uses Go modules for dependency management - -## Setup and Run - -```bash -# Navigate to the publisher directory -cd test/kafka/simple-publisher - -# Download dependencies -go mod tidy - -# Run the publisher -go run main.go -``` - -## Expected Output - -``` -Publishing messages to topic '_raw_messages' on broker 'localhost:17777' -Publishing messages... -- Published message 1: {"id":1,"message":"Hello from kafka-go client",...} -- Published message 2: {"id":2,"message":"Raw message without schema validation",...} -- Published message 3: {"id":3,"message":"Testing SMQ with underscore prefix topic",...} - -Publishing different raw message formats... -- Published raw message 1: key=binary_key, value=Simple string message -- Published raw message 2: key=json_key, value={"raw_field": "raw_value", "number": 42} -- Published raw message 3: key=empty_key, value= -- Published raw message 4: key=, value=Message with no key - -All test messages published to topic with '_' prefix! -These messages should be stored as raw bytes without schema validation. -``` - -## Topic Naming Convention - -- **Schema-Required Topics**: `user-events`, `orders`, `payments` (require schema validation) -- **Schema-Free Topics**: `_raw_messages`, `_logs`, `_metrics` (bypass schema validation) - -The "_" prefix tells SeaweedMQ to treat the topic as a system topic and skip schema processing entirely. - -## Message Storage - -For topics with "_" prefix: -- Messages are stored as raw bytes without schema validation -- No Confluent Schema Registry envelope is required -- Any binary data or text can be published -- SMQ assumes raw messages are stored in a "value" field internally - -## Integration with SeaweedMQ - -This client works with SeaweedMQ's existing schema bypass logic: - -1. **`isSystemTopic()`** function identifies "_" prefix topics as system topics -2. **`produceSchemaBasedRecord()`** bypasses schema processing for system topics -3. **Raw storage** via `seaweedMQHandler.ProduceRecord()` stores messages as-is - -## Use Cases - -- **Log ingestion**: Store application logs without predefined schema -- **Metrics collection**: Publish time-series data in various formats -- **Raw data pipelines**: Process unstructured data before applying schemas -- **Development/testing**: Quickly publish test data without schema setup diff --git a/test/kafka/simple-publisher/go.mod b/test/kafka/simple-publisher/go.mod deleted file mode 100644 index 09309f0f2..000000000 --- a/test/kafka/simple-publisher/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module simple-publisher - -go 1.21 - -require github.com/segmentio/kafka-go v0.4.47 - -require ( - github.com/klauspost/compress v1.17.0 // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect -) diff --git a/test/kafka/simple-publisher/go.sum b/test/kafka/simple-publisher/go.sum deleted file mode 100644 index c9f731f2b..000000000 --- a/test/kafka/simple-publisher/go.sum +++ /dev/null @@ -1,69 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= -github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/kafka/simple-publisher/main.go b/test/kafka/simple-publisher/main.go deleted file mode 100644 index 6b7b4dffe..000000000 --- a/test/kafka/simple-publisher/main.go +++ /dev/null @@ -1,127 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "log" - "time" - - "github.com/segmentio/kafka-go" -) - -func main() { - // Configuration - brokerAddress := "localhost:9093" // Kafka gateway port (not SeaweedMQ broker port 17777) - topicName := "_raw_messages" // Topic with "_" prefix - should skip schema validation - - fmt.Printf("Publishing messages to topic '%s' on broker '%s'\n", topicName, brokerAddress) - - // Create a new writer - writer := &kafka.Writer{ - Addr: kafka.TCP(brokerAddress), - Topic: topicName, - Balancer: &kafka.LeastBytes{}, - // Configure for immediate delivery (useful for testing) - BatchTimeout: 10 * time.Millisecond, - BatchSize: 1, - } - defer writer.Close() - - // Sample data to publish - messages := []map[string]interface{}{ - { - "id": 1, - "message": "Hello from kafka-go client", - "timestamp": time.Now().Unix(), - "user_id": "user123", - }, - { - "id": 2, - "message": "Raw message without schema validation", - "timestamp": time.Now().Unix(), - "user_id": "user456", - "metadata": map[string]string{ - "source": "test-client", - "type": "raw", - }, - }, - { - "id": 3, - "message": "Testing SMQ with underscore prefix topic", - "timestamp": time.Now().Unix(), - "user_id": "user789", - "data": []byte("Some binary data here"), - }, - } - - ctx := context.Background() - - fmt.Println("Publishing messages...") - for i, msgData := range messages { - // Convert message to JSON (simulating raw messages stored in "value" field) - valueBytes, err := json.Marshal(msgData) - if err != nil { - log.Fatalf("Failed to marshal message %d: %v", i+1, err) - } - - // Create Kafka message - msg := kafka.Message{ - Key: []byte(fmt.Sprintf("key_%d", msgData["id"])), - Value: valueBytes, - Headers: []kafka.Header{ - {Key: "source", Value: []byte("kafka-go-client")}, - {Key: "content-type", Value: []byte("application/json")}, - }, - } - - // Write message - err = writer.WriteMessages(ctx, msg) - if err != nil { - log.Printf("Failed to write message %d: %v", i+1, err) - continue - } - - fmt.Printf("-Published message %d: %s\n", i+1, string(valueBytes)) - - // Small delay between messages - time.Sleep(100 * time.Millisecond) - } - - fmt.Println("\nAll messages published successfully!") - - // Test with different raw message types - fmt.Println("\nPublishing different raw message formats...") - - rawMessages := []kafka.Message{ - { - Key: []byte("binary_key"), - Value: []byte("Simple string message"), - }, - { - Key: []byte("json_key"), - Value: []byte(`{"raw_field": "raw_value", "number": 42}`), - }, - { - Key: []byte("empty_key"), - Value: []byte{}, // Empty value - }, - { - Key: nil, // No key - Value: []byte("Message with no key"), - }, - } - - for i, msg := range rawMessages { - err := writer.WriteMessages(ctx, msg) - if err != nil { - log.Printf("Failed to write raw message %d: %v", i+1, err) - continue - } - fmt.Printf("-Published raw message %d: key=%s, value=%s\n", - i+1, string(msg.Key), string(msg.Value)) - } - - fmt.Println("\nAll test messages published to topic with '_' prefix!") - fmt.Println("These messages should be stored as raw bytes without schema validation.") -} diff --git a/test/kafka/simple-publisher/simple-publisher b/test/kafka/simple-publisher/simple-publisher deleted file mode 100755 index e53b44407..000000000 Binary files a/test/kafka/simple-publisher/simple-publisher and /dev/null differ diff --git a/test/kafka/test-schema-bypass.sh b/test/kafka/test-schema-bypass.sh deleted file mode 100755 index 8635d94d3..000000000 --- a/test/kafka/test-schema-bypass.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash - -# Test script for SMQ schema bypass functionality -# This script tests publishing to topics with "_" prefix which should bypass schema validation - -set -e - -echo "๐Ÿงช Testing SMQ Schema Bypass for Topics with '_' Prefix" -echo "=========================================================" - -# Check if Kafka gateway is running -echo "Checking if Kafka gateway is running on localhost:9093..." -if ! nc -z localhost 9093 2>/dev/null; then - echo "[FAIL] Kafka gateway is not running on localhost:9093" - echo "Please start SeaweedMQ with Kafka gateway enabled first" - exit 1 -fi -echo "[OK] Kafka gateway is running" - -# Test with schema-required topic (should require schema) -echo -echo "Testing schema-required topic (should require schema validation)..." -SCHEMA_TOPIC="user-events" -echo "Topic: $SCHEMA_TOPIC (regular topic, requires schema)" - -# Test with underscore prefix topic (should bypass schema) -echo -echo "Testing schema-bypass topic (should skip schema validation)..." -BYPASS_TOPIC="_raw_messages" -echo "Topic: $BYPASS_TOPIC (underscore prefix, bypasses schema)" - -# Build and test the publisher -echo -echo "Building publisher..." -cd simple-publisher -go mod tidy -echo "[OK] Publisher dependencies ready" - -echo -echo "Running publisher test..." -timeout 30s go run main.go || { - echo "[FAIL] Publisher test failed or timed out" - exit 1 -} -echo "[OK] Publisher test completed" - -# Build consumer -echo -echo "Building consumer..." -cd ../simple-consumer -go mod tidy -echo "[OK] Consumer dependencies ready" - -echo -echo "Testing consumer (will run for 10 seconds)..." -timeout 10s go run main.go || { - if [ $? -eq 124 ]; then - echo "[OK] Consumer test completed (timed out as expected)" - else - echo "[FAIL] Consumer test failed" - exit 1 - fi -} - -echo -echo "All tests completed successfully!" -echo -echo "Summary:" -echo "- [OK] Topics with '_' prefix bypass schema validation" -echo "- [OK] Raw messages are stored as bytes in the 'value' field" -echo "- [OK] kafka-go client works with SeaweedMQ" -echo "- [OK] No schema validation errors for '_raw_messages' topic" -echo -echo "The SMQ schema bypass functionality is working correctly!" -echo "Topics with '_' prefix are treated as system topics and bypass all schema processing." diff --git a/test/kafka/test_json_timestamp.sh b/test/kafka/test_json_timestamp.sh deleted file mode 100755 index 545c07d6f..000000000 --- a/test/kafka/test_json_timestamp.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Test script to produce JSON messages and check timestamp field - -# Produce 3 JSON messages -for i in 1 2 3; do - TS=$(date +%s%N) - echo "{\"id\":\"test-msg-$i\",\"timestamp\":$TS,\"producer_id\":999,\"counter\":$i,\"user_id\":\"user-test\",\"event_type\":\"test\"}" -done | docker run --rm -i --network kafka-client-loadtest \ - edenhill/kcat:1.7.1 \ - -P -b kafka-gateway:9093 -t test-json-topic - -echo "Messages produced. Waiting 2 seconds for processing..." -sleep 2 - -echo "Querying messages..." -cd /Users/chrislu/go/src/github.com/seaweedfs/seaweedfs/test/kafka/kafka-client-loadtest -docker compose exec kafka-gateway /usr/local/bin/weed sql \ - -master=seaweedfs-master:9333 \ - -database=kafka \ - -query="SELECT id, timestamp, producer_id, counter, user_id, event_type FROM \"test-json-topic\" LIMIT 5;" - diff --git a/test/kafka/unit/gateway_test.go b/test/kafka/unit/gateway_test.go deleted file mode 100644 index 7f6d076e0..000000000 --- a/test/kafka/unit/gateway_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package unit - -import ( - "fmt" - "net" - "strings" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/test/kafka/internal/testutil" -) - -// TestGatewayBasicFunctionality tests basic gateway operations -func TestGatewayBasicFunctionality(t *testing.T) { - gateway := testutil.NewGatewayTestServer(t, testutil.GatewayOptions{}) - defer gateway.CleanupAndClose() - - addr := gateway.StartAndWait() - - // Give the gateway a bit more time to be fully ready - time.Sleep(200 * time.Millisecond) - - t.Run("AcceptsConnections", func(t *testing.T) { - testGatewayAcceptsConnections(t, addr) - }) - - t.Run("RefusesAfterClose", func(t *testing.T) { - testGatewayRefusesAfterClose(t, gateway) - }) -} - -func testGatewayAcceptsConnections(t *testing.T, addr string) { - // Test basic TCP connection to gateway - t.Logf("Testing connection to gateway at %s", addr) - - conn, err := net.DialTimeout("tcp", addr, 5*time.Second) - if err != nil { - t.Fatalf("Failed to connect to gateway: %v", err) - } - defer conn.Close() - - // Test that we can establish a connection and the gateway is listening - // We don't need to send a full Kafka request for this basic test - t.Logf("Successfully connected to gateway at %s", addr) - - // Optional: Test that we can write some data without error - testData := []byte("test") - conn.SetWriteDeadline(time.Now().Add(1 * time.Second)) - if _, err := conn.Write(testData); err != nil { - t.Logf("Write test failed (expected for basic connectivity test): %v", err) - } else { - t.Logf("Write test succeeded") - } -} - -func testGatewayRefusesAfterClose(t *testing.T, gateway *testutil.GatewayTestServer) { - // Get the address from the gateway's listener - host, port := gateway.GetListenerAddr() - addr := fmt.Sprintf("%s:%d", host, port) - - // Close the gateway - gateway.CleanupAndClose() - - t.Log("Testing that gateway refuses connections after close") - - // Attempt to connect - should fail - conn, err := net.DialTimeout("tcp", addr, 2*time.Second) - if err == nil { - conn.Close() - t.Fatal("Expected connection to fail after gateway close, but it succeeded") - } - - // Verify it's a connection refused error - if !strings.Contains(err.Error(), "connection refused") && !strings.Contains(err.Error(), "connect: connection refused") { - t.Logf("Connection failed as expected with error: %v", err) - } else { - t.Logf("Connection properly refused: %v", err) - } -} diff --git a/test/kms/Makefile b/test/kms/Makefile deleted file mode 100644 index bfbe51ec9..000000000 --- a/test/kms/Makefile +++ /dev/null @@ -1,139 +0,0 @@ -# SeaweedFS KMS Integration Testing Makefile - -# Configuration -OPENBAO_ADDR ?= http://127.0.0.1:8200 -OPENBAO_TOKEN ?= root-token-for-testing -SEAWEEDFS_S3_ENDPOINT ?= http://127.0.0.1:8333 -TEST_TIMEOUT ?= 5m -DOCKER_COMPOSE ?= docker-compose - -# Colors for output -BLUE := \033[36m -GREEN := \033[32m -YELLOW := \033[33m -RED := \033[31m -NC := \033[0m # No Color - -.PHONY: help setup test test-unit test-integration test-e2e clean logs status - -help: ## Show this help message - @echo "$(BLUE)SeaweedFS KMS Integration Testing$(NC)" - @echo "" - @echo "Available targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " $(GREEN)%-15s$(NC) %s\n", $$1, $$2}' $(MAKEFILE_LIST) - -setup: ## Set up test environment (OpenBao + SeaweedFS) - @echo "$(YELLOW)Setting up test environment...$(NC)" - @chmod +x setup_openbao.sh - @$(DOCKER_COMPOSE) up -d openbao - @sleep 5 - @echo "$(BLUE)Configuring OpenBao...$(NC)" - @OPENBAO_ADDR=$(OPENBAO_ADDR) OPENBAO_TOKEN=$(OPENBAO_TOKEN) ./setup_openbao.sh - @echo "$(GREEN)โœ… Test environment ready!$(NC)" - -test: setup test-unit test-integration ## Run all tests - -test-unit: ## Run unit tests for KMS providers - @echo "$(YELLOW)Running KMS provider unit tests...$(NC)" - @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) ./weed/kms/... - -test-integration: ## Run integration tests with OpenBao - @echo "$(YELLOW)Running KMS integration tests...$(NC)" - @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) ./test/kms/... - -test-benchmark: ## Run performance benchmarks - @echo "$(YELLOW)Running KMS performance benchmarks...$(NC)" - @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) -bench=. ./test/kms/... - -test-e2e: setup-seaweedfs ## Run end-to-end tests with SeaweedFS + KMS - @echo "$(YELLOW)Running end-to-end KMS tests...$(NC)" - @sleep 10 # Wait for SeaweedFS to be ready - @./test_s3_kms.sh - -setup-seaweedfs: ## Start complete SeaweedFS cluster with KMS - @echo "$(YELLOW)Starting SeaweedFS cluster...$(NC)" - @$(DOCKER_COMPOSE) up -d - @echo "$(BLUE)Waiting for services to be ready...$(NC)" - @./wait_for_services.sh - -test-aws-compat: ## Test AWS KMS API compatibility - @echo "$(YELLOW)Testing AWS KMS compatibility...$(NC)" - @cd ../../ && go test -v -timeout=$(TEST_TIMEOUT) -run TestAWSKMSCompat ./test/kms/... - -clean: ## Clean up test environment - @echo "$(YELLOW)Cleaning up test environment...$(NC)" - @$(DOCKER_COMPOSE) down -v --remove-orphans - @docker system prune -f - @echo "$(GREEN)โœ… Environment cleaned up!$(NC)" - -logs: ## Show logs from all services - @$(DOCKER_COMPOSE) logs --tail=50 -f - -logs-openbao: ## Show OpenBao logs - @$(DOCKER_COMPOSE) logs --tail=100 -f openbao - -logs-seaweedfs: ## Show SeaweedFS logs - @$(DOCKER_COMPOSE) logs --tail=100 -f seaweedfs-filer seaweedfs-master seaweedfs-volume - -status: ## Show status of all services - @echo "$(BLUE)Service Status:$(NC)" - @$(DOCKER_COMPOSE) ps - @echo "" - @echo "$(BLUE)OpenBao Status:$(NC)" - @curl -s $(OPENBAO_ADDR)/v1/sys/health | jq '.' || echo "OpenBao not accessible" - @echo "" - @echo "$(BLUE)SeaweedFS S3 Status:$(NC)" - @curl -s $(SEAWEEDFS_S3_ENDPOINT) || echo "SeaweedFS S3 not accessible" - -debug: ## Debug test environment - @echo "$(BLUE)Debug Information:$(NC)" - @echo "OpenBao Address: $(OPENBAO_ADDR)" - @echo "SeaweedFS S3 Endpoint: $(SEAWEEDFS_S3_ENDPOINT)" - @echo "Docker Compose Status:" - @$(DOCKER_COMPOSE) ps - @echo "" - @echo "Network connectivity:" - @docker network ls | grep seaweedfs || echo "No SeaweedFS network found" - @echo "" - @echo "OpenBao health:" - @curl -v $(OPENBAO_ADDR)/v1/sys/health 2>&1 || true - -# Development targets -dev-openbao: ## Start only OpenBao for development - @$(DOCKER_COMPOSE) up -d openbao - @sleep 5 - @OPENBAO_ADDR=$(OPENBAO_ADDR) OPENBAO_TOKEN=$(OPENBAO_TOKEN) ./setup_openbao.sh - -dev-test: dev-openbao ## Quick test with just OpenBao - @cd ../../ && go test -v -timeout=30s -run TestOpenBaoKMSProvider_Integration ./test/kms/ - -# Utility targets -install-deps: ## Install required dependencies - @echo "$(YELLOW)Installing test dependencies...$(NC)" - @which docker > /dev/null || (echo "$(RED)Docker not found$(NC)" && exit 1) - @which docker-compose > /dev/null || (echo "$(RED)Docker Compose not found$(NC)" && exit 1) - @which jq > /dev/null || (echo "$(RED)jq not found - please install jq$(NC)" && exit 1) - @which curl > /dev/null || (echo "$(RED)curl not found$(NC)" && exit 1) - @echo "$(GREEN)โœ… All dependencies available$(NC)" - -check-env: ## Check test environment setup - @echo "$(BLUE)Environment Check:$(NC)" - @echo "OPENBAO_ADDR: $(OPENBAO_ADDR)" - @echo "OPENBAO_TOKEN: $(OPENBAO_TOKEN)" - @echo "SEAWEEDFS_S3_ENDPOINT: $(SEAWEEDFS_S3_ENDPOINT)" - @echo "TEST_TIMEOUT: $(TEST_TIMEOUT)" - @make install-deps - -# CI targets -ci-test: ## Run tests in CI environment - @echo "$(YELLOW)Running CI tests...$(NC)" - @make setup - @make test-unit - @make test-integration - @make clean - -ci-e2e: ## Run end-to-end tests in CI - @echo "$(YELLOW)Running CI end-to-end tests...$(NC)" - @make setup-seaweedfs - @make test-e2e - @make clean diff --git a/test/kms/README.md b/test/kms/README.md deleted file mode 100644 index f0e61dfd1..000000000 --- a/test/kms/README.md +++ /dev/null @@ -1,394 +0,0 @@ -# ๐Ÿ” SeaweedFS KMS Integration Tests - -This directory contains comprehensive integration tests for SeaweedFS Server-Side Encryption (SSE) with Key Management Service (KMS) providers. The tests validate the complete encryption/decryption workflow using **OpenBao** (open source fork of HashiCorp Vault) as the KMS provider. - -## ๐ŸŽฏ Overview - -The KMS integration tests simulate **AWS KMS** functionality using **OpenBao**, providing: - -- โœ… **Production-grade KMS testing** with real encryption/decryption operations -- โœ… **S3 API compatibility testing** with SSE-KMS headers and bucket encryption -- โœ… **Per-bucket KMS configuration** validation -- โœ… **Performance benchmarks** for KMS operations -- โœ… **Error handling and edge case** coverage -- โœ… **End-to-end workflows** from S3 API to KMS provider - -## ๐Ÿ—๏ธ Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ S3 Client โ”‚ โ”‚ SeaweedFS โ”‚ โ”‚ OpenBao โ”‚ -โ”‚ (aws s3) โ”‚โ”€โ”€โ”€โ–ถโ”‚ S3 API โ”‚โ”€โ”€โ”€โ–ถโ”‚ Transit โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ โ”‚ - โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ - โ”‚ โ”‚ KMS Manager โ”‚ โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ถโ”‚ - AWS Provider โ”‚โ—€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - Azure Providerโ”‚ - โ”‚ - GCP Provider โ”‚ - โ”‚ - OpenBao โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## ๐Ÿ“‹ Prerequisites - -### Required Tools - -- **Docker & Docker Compose** - For running OpenBao and SeaweedFS -- **OpenBao CLI** (`bao`) - For direct OpenBao interaction *(optional)* -- **AWS CLI** - For S3 API testing -- **jq** - For JSON processing in scripts -- **curl** - For HTTP API testing -- **Go 1.19+** - For running Go tests - -### Installation - -```bash -# Install Docker (macOS) -brew install docker docker-compose - -# Install OpenBao (optional - used by some tests) -brew install openbao - -# Install AWS CLI -brew install awscli - -# Install jq -brew install jq -``` - -## ๐Ÿš€ Quick Start - -### 1. Run All Tests - -```bash -cd test/kms -make test -``` - -### 2. Run Specific Test Types - -```bash -# Unit tests only -make test-unit - -# Integration tests with OpenBao -make test-integration - -# End-to-end S3 API tests -make test-e2e - -# Performance benchmarks -make test-benchmark -``` - -### 3. Manual Setup - -```bash -# Start OpenBao only -make dev-openbao - -# Start full environment (OpenBao + SeaweedFS) -make setup-seaweedfs - -# Run manual tests -make dev-test -``` - -## ๐Ÿงช Test Components - -### 1. **OpenBao KMS Provider** (`openbao_integration_test.go`) - -**What it tests:** -- KMS provider registration and initialization -- Data key generation using Transit engine -- Encryption/decryption of data keys -- Key metadata and validation -- Error handling (invalid tokens, missing keys, etc.) -- Multiple key scenarios -- Performance benchmarks - -**Key test cases:** -```go -TestOpenBaoKMSProvider_Integration -TestOpenBaoKMSProvider_ErrorHandling -TestKMSManager_WithOpenBao -BenchmarkOpenBaoKMS_GenerateDataKey -BenchmarkOpenBaoKMS_Decrypt -``` - -### 2. **S3 API Integration** (`test_s3_kms.sh`) - -**What it tests:** -- Bucket encryption configuration via S3 API -- Default bucket encryption behavior -- Explicit SSE-KMS headers in PUT operations -- Object upload/download with encryption -- Multipart uploads with KMS encryption -- Encryption metadata in object headers -- Cross-bucket KMS provider isolation - -**Key scenarios:** -```bash -# Bucket encryption setup -aws s3api put-bucket-encryption --bucket test-openbao \ - --server-side-encryption-configuration '{ - "Rules": [{ - "ApplyServerSideEncryptionByDefault": { - "SSEAlgorithm": "aws:kms", - "KMSMasterKeyID": "test-key-1" - } - }] - }' - -# Object upload with encryption -aws s3 cp file.txt s3://test-openbao/encrypted-file.txt \ - --sse aws:kms --sse-kms-key-id "test-key-2" -``` - -### 3. **Docker Environment** (`docker-compose.yml`) - -**Services:** -- **OpenBao** - KMS provider (port 8200) -- **Vault** - Alternative KMS (port 8201) -- **SeaweedFS Master** - Cluster coordination (port 9333) -- **SeaweedFS Volume** - Data storage (port 8080) -- **SeaweedFS Filer** - S3 API endpoint (port 8333) - -### 4. **Configuration** (`filer.toml`) - -**KMS Configuration:** -```toml -[kms] -default_provider = "openbao-test" - -[kms.providers.openbao-test] -type = "openbao" -address = "http://openbao:8200" -token = "root-token-for-testing" -transit_path = "transit" - -[kms.buckets.test-openbao] -provider = "openbao-test" -``` - -## ๐Ÿ“Š Test Data - -### Encryption Keys Created - -The setup script creates these test keys in OpenBao: - -| Key Name | Type | Purpose | -|----------|------|---------| -| `test-key-1` | AES256-GCM96 | Basic operations | -| `test-key-2` | AES256-GCM96 | Multi-key scenarios | -| `seaweedfs-test-key` | AES256-GCM96 | Integration testing | -| `bucket-default-key` | AES256-GCM96 | Default bucket encryption | -| `high-security-key` | AES256-GCM96 | Security testing | -| `performance-key` | AES256-GCM96 | Performance benchmarks | -| `multipart-key` | AES256-GCM96 | Multipart upload testing | - -### Test Buckets - -| Bucket Name | KMS Provider | Purpose | -|-------------|--------------|---------| -| `test-openbao` | openbao-test | OpenBao integration | -| `test-vault` | vault-test | Vault compatibility | -| `test-local` | local-test | Local KMS testing | -| `secure-data` | openbao-test | High security scenarios | - -## ๐Ÿ”ง Configuration Options - -### Environment Variables - -```bash -# OpenBao configuration -export OPENBAO_ADDR="http://127.0.0.1:8200" -export OPENBAO_TOKEN="root-token-for-testing" - -# SeaweedFS configuration -export SEAWEEDFS_S3_ENDPOINT="http://127.0.0.1:8333" -export ACCESS_KEY="any" -export SECRET_KEY="any" - -# Test configuration -export TEST_TIMEOUT="5m" -``` - -### Makefile Targets - -| Target | Description | -|--------|-------------| -| `make help` | Show available commands | -| `make setup` | Set up test environment | -| `make test` | Run all tests | -| `make test-unit` | Run unit tests only | -| `make test-integration` | Run integration tests | -| `make test-e2e` | Run end-to-end tests | -| `make clean` | Clean up environment | -| `make logs` | Show service logs | -| `make status` | Check service status | - -## ๐Ÿงฉ How It Works - -### 1. **KMS Provider Registration** - -OpenBao provider is automatically registered via `init()`: - -```go -func init() { - seaweedkms.RegisterProvider("openbao", NewOpenBaoKMSProvider) - seaweedkms.RegisterProvider("vault", NewOpenBaoKMSProvider) // Alias -} -``` - -### 2. **Data Key Generation Flow** - -``` -1. S3 PUT with SSE-KMS headers -2. SeaweedFS extracts KMS key ID -3. KMSManager routes to OpenBao provider -4. OpenBao generates random data key -5. OpenBao encrypts data key with master key -6. SeaweedFS encrypts object with data key -7. Encrypted data key stored in metadata -``` - -### 3. **Decryption Flow** - -``` -1. S3 GET request for encrypted object -2. SeaweedFS extracts encrypted data key from metadata -3. KMSManager routes to OpenBao provider -4. OpenBao decrypts data key with master key -5. SeaweedFS decrypts object with data key -6. Plaintext object returned to client -``` - -## ๐Ÿ” Troubleshooting - -### Common Issues - -**OpenBao not starting:** -```bash -# Check if port 8200 is in use -lsof -i :8200 - -# Check Docker logs -docker-compose logs openbao -``` - -**KMS provider not found:** -```bash -# Verify provider registration -go test -v -run TestProviderRegistration ./test/kms/ - -# Check imports in filer_kms.go -grep -n "kms/" weed/command/filer_kms.go -``` - -**S3 API connection refused:** -```bash -# Check SeaweedFS services -make status - -# Wait for services to be ready -./wait_for_services.sh -``` - -### Debug Commands - -```bash -# Test OpenBao directly -curl -H "X-Vault-Token: root-token-for-testing" \ - http://127.0.0.1:8200/v1/sys/health - -# Test transit engine -curl -X POST \ - -H "X-Vault-Token: root-token-for-testing" \ - -d '{"plaintext":"SGVsbG8gV29ybGQ="}' \ - http://127.0.0.1:8200/v1/transit/encrypt/test-key-1 - -# Test S3 API -aws s3 ls --endpoint-url http://127.0.0.1:8333 -``` - -## ๐ŸŽฏ AWS KMS Integration Testing - -This test suite **simulates AWS KMS behavior** using OpenBao, enabling: - -### โœ… **Compatibility Validation** - -- **S3 API compatibility** - Same headers, same behavior as AWS S3 -- **KMS API patterns** - GenerateDataKey, Decrypt, DescribeKey operations -- **Error codes** - AWS-compatible error responses -- **Encryption context** - Proper context handling and validation - -### โœ… **Production Readiness Testing** - -- **Key rotation scenarios** - Multiple keys per bucket -- **Performance characteristics** - Latency and throughput metrics -- **Error recovery** - Network failures, invalid keys, timeout handling -- **Security validation** - Encryption/decryption correctness - -### โœ… **Integration Patterns** - -- **Bucket-level configuration** - Different KMS keys per bucket -- **Cross-region simulation** - Multiple KMS providers -- **Caching behavior** - Data key caching validation -- **Metadata handling** - Encrypted metadata storage - -## ๐Ÿ“ˆ Performance Expectations - -**Typical performance metrics** (local testing): - -- **Data key generation**: ~50-100ms (including network roundtrip) -- **Data key decryption**: ~30-50ms (cached provider instance) -- **Object encryption**: ~1-5ms per MB (AES-256-GCM) -- **S3 PUT with SSE-KMS**: +100-200ms overhead vs. unencrypted - -## ๐Ÿš€ Production Deployment - -After successful integration testing, deploy with real KMS providers: - -```toml -[kms.providers.aws-prod] -type = "aws" -region = "us-east-1" -# IAM roles preferred over access keys - -[kms.providers.azure-prod] -type = "azure" -vault_url = "https://prod-vault.vault.azure.net/" -use_default_creds = true # Managed identity - -[kms.providers.gcp-prod] -type = "gcp" -project_id = "prod-project" -use_default_credentials = true # Service account -``` - -## ๐ŸŽ‰ Success Criteria - -Tests pass when: - -- โœ… All KMS providers register successfully -- โœ… Data key generation/decryption works end-to-end -- โœ… S3 API encryption headers are handled correctly -- โœ… Bucket-level KMS configuration is respected -- โœ… Multipart uploads maintain encryption consistency -- โœ… Performance meets acceptable thresholds -- โœ… Error scenarios are handled gracefully - ---- - -## ๐Ÿ“ž Support - -For issues with KMS integration tests: - -1. **Check logs**: `make logs` -2. **Verify environment**: `make status` -3. **Run debug**: `make debug` -4. **Clean restart**: `make clean && make setup` - -**Happy testing!** ๐Ÿ”โœจ diff --git a/test/kms/docker-compose.yml b/test/kms/docker-compose.yml deleted file mode 100644 index 381d9fbb4..000000000 --- a/test/kms/docker-compose.yml +++ /dev/null @@ -1,101 +0,0 @@ -services: - # OpenBao server for KMS integration testing - openbao: - image: ghcr.io/openbao/openbao:latest - ports: - - "8200:8200" - environment: - - BAO_DEV_ROOT_TOKEN_ID=root-token-for-testing - - BAO_DEV_LISTEN_ADDRESS=0.0.0.0:8200 - - BAO_LOCAL_CONFIG={"backend":{"file":{"path":"/bao/data"}},"default_lease_ttl":"168h","max_lease_ttl":"720h","ui":true,"disable_mlock":true} - command: - - bao - - server - - -dev - - -dev-root-token-id=root-token-for-testing - - -dev-listen-address=0.0.0.0:8200 - volumes: - - openbao-data:/bao/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8200/v1/sys/health"] - interval: 5s - timeout: 3s - retries: 5 - start_period: 10s - - # HashiCorp Vault for compatibility testing (alternative to OpenBao) - vault: - image: vault:latest - ports: - - "8201:8200" - environment: - - VAULT_DEV_ROOT_TOKEN_ID=root-token-for-testing - - VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200 - command: - - vault - - server - - -dev - - -dev-root-token-id=root-token-for-testing - - -dev-listen-address=0.0.0.0:8200 - cap_add: - - IPC_LOCK - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8200/v1/sys/health"] - interval: 5s - timeout: 3s - retries: 5 - start_period: 10s - - # SeaweedFS components for end-to-end testing - seaweedfs-master: - image: chrislusf/seaweedfs:latest - ports: - - "9333:9333" - command: - - master - - -ip=seaweedfs-master - - -volumeSizeLimitMB=1024 - volumes: - - seaweedfs-master-data:/data - - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - ports: - - "8080:8080" - command: - - volume - - -mserver=seaweedfs-master:9333 - - -ip=seaweedfs-volume - - -publicUrl=seaweedfs-volume:8080 - depends_on: - - seaweedfs-master - volumes: - - seaweedfs-volume-data:/data - - seaweedfs-filer: - image: chrislusf/seaweedfs:latest - ports: - - "8888:8888" - - "8333:8333" # S3 API port - command: - - filer - - -master=seaweedfs-master:9333 - - -ip=seaweedfs-filer - - -s3 - - -s3.port=8333 - depends_on: - - seaweedfs-master - - seaweedfs-volume - volumes: - - ./filer.toml:/etc/seaweedfs/filer.toml - - seaweedfs-filer-data:/data - -volumes: - openbao-data: - seaweedfs-master-data: - seaweedfs-volume-data: - seaweedfs-filer-data: - -networks: - default: - name: seaweedfs-kms-test diff --git a/test/kms/filer.toml b/test/kms/filer.toml deleted file mode 100644 index a4f032aae..000000000 --- a/test/kms/filer.toml +++ /dev/null @@ -1,85 +0,0 @@ -# SeaweedFS Filer Configuration for KMS Integration Testing - -[leveldb2] -# Use LevelDB for simple testing -enabled = true -dir = "/data/filerdb" - -# KMS Configuration for Integration Testing -[kms] -# Default KMS provider -default_provider = "openbao-test" - -# KMS provider configurations -[kms.providers] - -# OpenBao provider for integration testing -[kms.providers.openbao-test] -type = "openbao" -address = "http://openbao:8200" -token = "root-token-for-testing" -transit_path = "transit" -tls_skip_verify = true -request_timeout = 30 -cache_enabled = true -cache_ttl = "5m" # Shorter TTL for testing -max_cache_size = 100 - -# Alternative Vault provider (for compatibility testing) -[kms.providers.vault-test] -type = "vault" -address = "http://vault:8200" -token = "root-token-for-testing" -transit_path = "transit" -tls_skip_verify = true -request_timeout = 30 -cache_enabled = true -cache_ttl = "5m" -max_cache_size = 100 - -# Local KMS provider (for comparison/fallback) -[kms.providers.local-test] -type = "local" -enableOnDemandCreate = true -cache_enabled = false # Local doesn't need caching - -# Simulated AWS KMS provider (for testing AWS integration patterns) -[kms.providers.aws-localstack] -type = "aws" -region = "us-east-1" -endpoint = "http://localstack:4566" # LocalStack endpoint -access_key = "test" -secret_key = "test" -tls_skip_verify = true -connect_timeout = 10 -request_timeout = 30 -max_retries = 3 -cache_enabled = true -cache_ttl = "10m" - -# Bucket-specific KMS provider assignments for testing -[kms.buckets] - -# Test bucket using OpenBao -[kms.buckets.test-openbao] -provider = "openbao-test" - -# Test bucket using Vault (compatibility) -[kms.buckets.test-vault] -provider = "vault-test" - -# Test bucket using local KMS -[kms.buckets.test-local] -provider = "local-test" - -# Test bucket using simulated AWS KMS -[kms.buckets.test-aws] -provider = "aws-localstack" - -# High security test bucket -[kms.buckets.secure-data] -provider = "openbao-test" - -# Performance test bucket -[kms.buckets.perf-test] -provider = "openbao-test" diff --git a/test/kms/openbao_integration_test.go b/test/kms/openbao_integration_test.go deleted file mode 100644 index d4e62ed4d..000000000 --- a/test/kms/openbao_integration_test.go +++ /dev/null @@ -1,598 +0,0 @@ -package kms_test - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/api" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/kms" - _ "github.com/seaweedfs/seaweedfs/weed/kms/openbao" -) - -const ( - OpenBaoAddress = "http://127.0.0.1:8200" - OpenBaoToken = "root-token-for-testing" - TransitPath = "transit" -) - -// Test configuration for OpenBao KMS provider -type testConfig struct { - config map[string]interface{} -} - -func (c *testConfig) GetString(key string) string { - if val, ok := c.config[key]; ok { - if str, ok := val.(string); ok { - return str - } - } - return "" -} - -func (c *testConfig) GetBool(key string) bool { - if val, ok := c.config[key]; ok { - if b, ok := val.(bool); ok { - return b - } - } - return false -} - -func (c *testConfig) GetInt(key string) int { - if val, ok := c.config[key]; ok { - if i, ok := val.(int); ok { - return i - } - if f, ok := val.(float64); ok { - return int(f) - } - } - return 0 -} - -func (c *testConfig) GetStringSlice(key string) []string { - if val, ok := c.config[key]; ok { - if slice, ok := val.([]string); ok { - return slice - } - } - return nil -} - -func (c *testConfig) SetDefault(key string, value interface{}) { - if c.config == nil { - c.config = make(map[string]interface{}) - } - if _, exists := c.config[key]; !exists { - c.config[key] = value - } -} - -// setupOpenBao starts OpenBao in development mode for testing -func setupOpenBao(t *testing.T) (*exec.Cmd, func()) { - // Check if OpenBao is running in Docker (via make dev-openbao) - client, err := api.NewClient(&api.Config{Address: OpenBaoAddress}) - if err == nil { - client.SetToken(OpenBaoToken) - _, err = client.Sys().Health() - if err == nil { - glog.V(1).Infof("Using existing OpenBao server at %s", OpenBaoAddress) - // Return dummy command and cleanup function for existing server - return nil, func() {} - } - } - - // Check if OpenBao binary is available for starting locally - _, err = exec.LookPath("bao") - if err != nil { - t.Skip("OpenBao not running and bao binary not found. Run 'cd test/kms && make dev-openbao' first") - } - - // Start OpenBao in dev mode - cmd := exec.Command("bao", "server", "-dev", "-dev-root-token-id="+OpenBaoToken, "-dev-listen-address=127.0.0.1:8200") - cmd.Env = append(os.Environ(), "BAO_DEV_ROOT_TOKEN_ID="+OpenBaoToken) - - // Capture output for debugging - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - err = cmd.Start() - require.NoError(t, err, "Failed to start OpenBao server") - - // Wait for OpenBao to be ready - client, err = api.NewClient(&api.Config{Address: OpenBaoAddress}) - require.NoError(t, err) - client.SetToken(OpenBaoToken) - - // Wait up to 30 seconds for OpenBao to be ready - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - for { - select { - case <-ctx.Done(): - cmd.Process.Kill() - t.Fatal("Timeout waiting for OpenBao to start") - default: - // Try to check health - resp, err := client.Sys().Health() - if err == nil && resp.Initialized { - glog.V(1).Infof("OpenBao server ready") - goto ready - } - time.Sleep(500 * time.Millisecond) - } - } - -ready: - // Setup cleanup function - cleanup := func() { - if cmd != nil && cmd.Process != nil { - glog.V(1).Infof("Stopping OpenBao server") - cmd.Process.Kill() - cmd.Wait() - } - } - - return cmd, cleanup -} - -// setupTransitEngine enables and configures the transit secrets engine -func setupTransitEngine(t *testing.T) { - client, err := api.NewClient(&api.Config{Address: OpenBaoAddress}) - require.NoError(t, err) - client.SetToken(OpenBaoToken) - - // Enable transit secrets engine - err = client.Sys().Mount(TransitPath, &api.MountInput{ - Type: "transit", - Description: "Transit engine for KMS testing", - }) - if err != nil && !strings.Contains(err.Error(), "path is already in use") { - require.NoError(t, err, "Failed to enable transit engine") - } - - // Create test encryption keys - testKeys := []string{"test-key-1", "test-key-2", "seaweedfs-test-key"} - - for _, keyName := range testKeys { - keyData := map[string]interface{}{ - "type": "aes256-gcm96", - } - - path := fmt.Sprintf("%s/keys/%s", TransitPath, keyName) - _, err = client.Logical().Write(path, keyData) - if err != nil && !strings.Contains(err.Error(), "key already exists") { - require.NoError(t, err, "Failed to create test key %s", keyName) - } - - glog.V(2).Infof("Created/verified test key: %s", keyName) - } -} - -func TestOpenBaoKMSProvider_Integration(t *testing.T) { - // Start OpenBao server - _, cleanup := setupOpenBao(t) - defer cleanup() - - // Setup transit engine and keys - setupTransitEngine(t) - - t.Run("CreateProvider", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - require.NotNil(t, provider) - - defer provider.Close() - }) - - t.Run("ProviderRegistration", func(t *testing.T) { - // Test that the provider is registered - providers := kms.ListProviders() - assert.Contains(t, providers, "openbao") - assert.Contains(t, providers, "vault") // Compatibility alias - }) - - t.Run("GenerateDataKey", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - defer provider.Close() - - ctx := context.Background() - req := &kms.GenerateDataKeyRequest{ - KeyID: "test-key-1", - KeySpec: kms.KeySpecAES256, - EncryptionContext: map[string]string{ - "test": "context", - "env": "integration", - }, - } - - resp, err := provider.GenerateDataKey(ctx, req) - require.NoError(t, err) - require.NotNil(t, resp) - - assert.Equal(t, "test-key-1", resp.KeyID) - assert.Len(t, resp.Plaintext, 32) // 256 bits - assert.NotEmpty(t, resp.CiphertextBlob) - - // Verify the response is in standardized envelope format - envelope, err := kms.ParseEnvelope(resp.CiphertextBlob) - assert.NoError(t, err) - assert.Equal(t, "openbao", envelope.Provider) - assert.Equal(t, "test-key-1", envelope.KeyID) - assert.True(t, strings.HasPrefix(envelope.Ciphertext, "vault:")) // Raw OpenBao format inside envelope - }) - - t.Run("DecryptDataKey", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - defer provider.Close() - - ctx := context.Background() - - // First generate a data key - genReq := &kms.GenerateDataKeyRequest{ - KeyID: "test-key-1", - KeySpec: kms.KeySpecAES256, - EncryptionContext: map[string]string{ - "test": "decrypt", - "env": "integration", - }, - } - - genResp, err := provider.GenerateDataKey(ctx, genReq) - require.NoError(t, err) - - // Now decrypt it - decReq := &kms.DecryptRequest{ - CiphertextBlob: genResp.CiphertextBlob, - EncryptionContext: map[string]string{ - "openbao:key:name": "test-key-1", - "test": "decrypt", - "env": "integration", - }, - } - - decResp, err := provider.Decrypt(ctx, decReq) - require.NoError(t, err) - require.NotNil(t, decResp) - - assert.Equal(t, "test-key-1", decResp.KeyID) - assert.Equal(t, genResp.Plaintext, decResp.Plaintext) - }) - - t.Run("DescribeKey", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - defer provider.Close() - - ctx := context.Background() - req := &kms.DescribeKeyRequest{ - KeyID: "test-key-1", - } - - resp, err := provider.DescribeKey(ctx, req) - require.NoError(t, err) - require.NotNil(t, resp) - - assert.Equal(t, "test-key-1", resp.KeyID) - assert.Contains(t, resp.ARN, "openbao:") - assert.Equal(t, kms.KeyStateEnabled, resp.KeyState) - assert.Equal(t, kms.KeyUsageEncryptDecrypt, resp.KeyUsage) - }) - - t.Run("NonExistentKey", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - defer provider.Close() - - ctx := context.Background() - req := &kms.DescribeKeyRequest{ - KeyID: "non-existent-key", - } - - _, err = provider.DescribeKey(ctx, req) - require.Error(t, err) - - kmsErr, ok := err.(*kms.KMSError) - require.True(t, ok) - assert.Equal(t, kms.ErrCodeNotFoundException, kmsErr.Code) - }) - - t.Run("MultipleKeys", func(t *testing.T) { - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) - defer provider.Close() - - ctx := context.Background() - - // Test with multiple keys - testKeys := []string{"test-key-1", "test-key-2", "seaweedfs-test-key"} - - for _, keyName := range testKeys { - t.Run(fmt.Sprintf("Key_%s", keyName), func(t *testing.T) { - // Generate data key - genReq := &kms.GenerateDataKeyRequest{ - KeyID: keyName, - KeySpec: kms.KeySpecAES256, - EncryptionContext: map[string]string{ - "key": keyName, - }, - } - - genResp, err := provider.GenerateDataKey(ctx, genReq) - require.NoError(t, err) - assert.Equal(t, keyName, genResp.KeyID) - - // Decrypt data key - decReq := &kms.DecryptRequest{ - CiphertextBlob: genResp.CiphertextBlob, - EncryptionContext: map[string]string{ - "openbao:key:name": keyName, - "key": keyName, - }, - } - - decResp, err := provider.Decrypt(ctx, decReq) - require.NoError(t, err) - assert.Equal(t, genResp.Plaintext, decResp.Plaintext) - }) - } - }) -} - -func TestOpenBaoKMSProvider_ErrorHandling(t *testing.T) { - // Start OpenBao server - _, cleanup := setupOpenBao(t) - defer cleanup() - - setupTransitEngine(t) - - t.Run("InvalidToken", func(t *testing.T) { - t.Skip("Skipping invalid token test - OpenBao dev mode may be too permissive") - - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": "invalid-token", - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - require.NoError(t, err) // Provider creation doesn't validate token - defer provider.Close() - - ctx := context.Background() - req := &kms.GenerateDataKeyRequest{ - KeyID: "test-key-1", - KeySpec: kms.KeySpecAES256, - } - - _, err = provider.GenerateDataKey(ctx, req) - require.Error(t, err) - - // Check that it's a KMS error (could be access denied or other auth error) - kmsErr, ok := err.(*kms.KMSError) - require.True(t, ok, "Expected KMSError but got: %T", err) - // OpenBao might return different error codes for invalid tokens - assert.Contains(t, []string{kms.ErrCodeAccessDenied, kms.ErrCodeKMSInternalFailure}, kmsErr.Code) - }) - -} - -func TestKMSManager_WithOpenBao(t *testing.T) { - // Start OpenBao server - _, cleanup := setupOpenBao(t) - defer cleanup() - - setupTransitEngine(t) - - t.Run("KMSManagerIntegration", func(t *testing.T) { - manager := kms.InitializeKMSManager() - - // Add OpenBao provider to manager - kmsConfig := &kms.KMSConfig{ - Provider: "openbao", - Config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - CacheEnabled: true, - CacheTTL: time.Hour, - } - - err := manager.AddKMSProvider("openbao-test", kmsConfig) - require.NoError(t, err) - - // Set as default provider - err = manager.SetDefaultKMSProvider("openbao-test") - require.NoError(t, err) - - // Test bucket-specific assignment - err = manager.SetBucketKMSProvider("test-bucket", "openbao-test") - require.NoError(t, err) - - // Test key operations through manager - ctx := context.Background() - resp, err := manager.GenerateDataKeyForBucket(ctx, "test-bucket", "test-key-1", kms.KeySpecAES256, map[string]string{ - "bucket": "test-bucket", - }) - require.NoError(t, err) - require.NotNil(t, resp) - - assert.Equal(t, "test-key-1", resp.KeyID) - assert.Len(t, resp.Plaintext, 32) - - // Test decryption through manager - decResp, err := manager.DecryptForBucket(ctx, "test-bucket", resp.CiphertextBlob, map[string]string{ - "bucket": "test-bucket", - }) - require.NoError(t, err) - assert.Equal(t, resp.Plaintext, decResp.Plaintext) - - // Test health check - health := manager.GetKMSHealth(ctx) - assert.Contains(t, health, "openbao-test") - assert.NoError(t, health["openbao-test"]) // Should be healthy - - // Cleanup - manager.Close() - }) -} - -// Benchmark tests for performance -func BenchmarkOpenBaoKMS_GenerateDataKey(b *testing.B) { - if testing.Short() { - b.Skip("Skipping benchmark in short mode") - } - - // Start OpenBao server - _, cleanup := setupOpenBao(&testing.T{}) - defer cleanup() - - setupTransitEngine(&testing.T{}) - - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - if err != nil { - b.Fatal(err) - } - defer provider.Close() - - ctx := context.Background() - req := &kms.GenerateDataKeyRequest{ - KeyID: "test-key-1", - KeySpec: kms.KeySpecAES256, - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := provider.GenerateDataKey(ctx, req) - if err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkOpenBaoKMS_Decrypt(b *testing.B) { - if testing.Short() { - b.Skip("Skipping benchmark in short mode") - } - - // Start OpenBao server - _, cleanup := setupOpenBao(&testing.T{}) - defer cleanup() - - setupTransitEngine(&testing.T{}) - - config := &testConfig{ - config: map[string]interface{}{ - "address": OpenBaoAddress, - "token": OpenBaoToken, - "transit_path": TransitPath, - }, - } - - provider, err := kms.GetProvider("openbao", config) - if err != nil { - b.Fatal(err) - } - defer provider.Close() - - ctx := context.Background() - - // Generate a data key for decryption testing - genResp, err := provider.GenerateDataKey(ctx, &kms.GenerateDataKeyRequest{ - KeyID: "test-key-1", - KeySpec: kms.KeySpecAES256, - }) - if err != nil { - b.Fatal(err) - } - - decReq := &kms.DecryptRequest{ - CiphertextBlob: genResp.CiphertextBlob, - EncryptionContext: map[string]string{ - "openbao:key:name": "test-key-1", - }, - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := provider.Decrypt(ctx, decReq) - if err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/test/kms/setup_openbao.sh b/test/kms/setup_openbao.sh deleted file mode 100755 index dc8fdf6dd..000000000 --- a/test/kms/setup_openbao.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash - -# Setup script for OpenBao KMS integration testing -set -e - -OPENBAO_ADDR=${OPENBAO_ADDR:-"http://127.0.0.1:8200"} -OPENBAO_TOKEN=${OPENBAO_TOKEN:-"root-token-for-testing"} -TRANSIT_PATH=${TRANSIT_PATH:-"transit"} - -echo "๐Ÿš€ Setting up OpenBao for KMS integration testing..." -echo "OpenBao Address: $OPENBAO_ADDR" -echo "Transit Path: $TRANSIT_PATH" - -# Wait for OpenBao to be ready -echo "โณ Waiting for OpenBao to be ready..." -for i in {1..30}; do - if curl -s "$OPENBAO_ADDR/v1/sys/health" >/dev/null 2>&1; then - echo "[OK] OpenBao is ready!" - break - fi - echo " Attempt $i/30: OpenBao not ready yet, waiting..." - sleep 2 -done - -# Check if we can connect -if ! curl -s -H "X-Vault-Token: $OPENBAO_TOKEN" "$OPENBAO_ADDR/v1/sys/health" >/dev/null; then - echo "[FAIL] Cannot connect to OpenBao at $OPENBAO_ADDR" - exit 1 -fi - -echo "๐Ÿ”ง Setting up transit secrets engine..." - -# Enable transit secrets engine (ignore if already enabled) -curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{"type":"transit","description":"Transit engine for KMS testing"}' \ - "$OPENBAO_ADDR/v1/sys/mounts/$TRANSIT_PATH" || true - -echo "๐Ÿ”‘ Creating test encryption keys..." - -# Define test keys -declare -a TEST_KEYS=( - "test-key-1:aes256-gcm96:Test key 1 for basic operations" - "test-key-2:aes256-gcm96:Test key 2 for multi-key scenarios" - "seaweedfs-test-key:aes256-gcm96:SeaweedFS integration test key" - "bucket-default-key:aes256-gcm96:Default key for bucket encryption" - "high-security-key:aes256-gcm96:High security test key" - "performance-key:aes256-gcm96:Performance testing key" - "aws-compat-key:aes256-gcm96:AWS compatibility test key" - "multipart-key:aes256-gcm96:Multipart upload test key" -) - -# Create each test key -for key_spec in "${TEST_KEYS[@]}"; do - IFS=':' read -r key_name key_type key_desc <<< "$key_spec" - - echo " Creating key: $key_name ($key_type)" - - # Create the encryption key - curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"type\":\"$key_type\",\"description\":\"$key_desc\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/keys/$key_name" || { - echo " โš ๏ธ Key $key_name might already exist" - } - - # Verify the key was created - if curl -s -H "X-Vault-Token: $OPENBAO_TOKEN" "$OPENBAO_ADDR/v1/$TRANSIT_PATH/keys/$key_name" >/dev/null; then - echo " [OK] Key $key_name verified" - else - echo " [FAIL] Failed to create/verify key $key_name" - exit 1 - fi -done - -echo "๐Ÿงช Testing basic encryption/decryption..." - -# Test basic encrypt/decrypt operation -TEST_PLAINTEXT="Hello, SeaweedFS KMS Integration!" -PLAINTEXT_B64=$(echo -n "$TEST_PLAINTEXT" | base64) - -echo " Testing with key: test-key-1" - -# Encrypt -ENCRYPT_RESPONSE=$(curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"plaintext\":\"$PLAINTEXT_B64\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/encrypt/test-key-1") - -CIPHERTEXT=$(echo "$ENCRYPT_RESPONSE" | jq -r '.data.ciphertext') - -if [[ "$CIPHERTEXT" == "null" || -z "$CIPHERTEXT" ]]; then - echo " [FAIL] Encryption test failed" - echo " Response: $ENCRYPT_RESPONSE" - exit 1 -fi - -echo " [OK] Encryption successful: ${CIPHERTEXT:0:50}..." - -# Decrypt -DECRYPT_RESPONSE=$(curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"ciphertext\":\"$CIPHERTEXT\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/decrypt/test-key-1") - -DECRYPTED_B64=$(echo "$DECRYPT_RESPONSE" | jq -r '.data.plaintext') -DECRYPTED_TEXT=$(echo "$DECRYPTED_B64" | base64 -d) - -if [[ "$DECRYPTED_TEXT" != "$TEST_PLAINTEXT" ]]; then - echo " [FAIL] Decryption test failed" - echo " Expected: $TEST_PLAINTEXT" - echo " Got: $DECRYPTED_TEXT" - exit 1 -fi - -echo " [OK] Decryption successful: $DECRYPTED_TEXT" - -echo "๐Ÿ“Š OpenBao KMS setup summary:" -echo " Address: $OPENBAO_ADDR" -echo " Transit Path: $TRANSIT_PATH" -echo " Keys Created: ${#TEST_KEYS[@]}" -echo " Status: Ready for integration testing" - -echo "" -echo "๐ŸŽฏ Ready to run KMS integration tests!" -echo "" -echo "Usage:" -echo " # Run Go integration tests" -echo " go test -v ./test/kms/..." -echo "" -echo " # Run with Docker Compose" -echo " cd test/kms && docker-compose up -d" -echo " docker-compose exec openbao bao status" -echo "" -echo " # Test S3 API with encryption" -echo " aws s3api put-bucket-encryption \\" -echo " --endpoint-url http://localhost:8333 \\" -echo " --bucket test-bucket \\" -echo " --server-side-encryption-configuration file://bucket-encryption.json" -echo "" -echo "[OK] OpenBao KMS setup complete!" diff --git a/test/kms/test_s3_kms.sh b/test/kms/test_s3_kms.sh deleted file mode 100755 index 7b5444a84..000000000 --- a/test/kms/test_s3_kms.sh +++ /dev/null @@ -1,217 +0,0 @@ -#!/bin/bash - -# End-to-end S3 KMS integration tests -set -e - -SEAWEEDFS_S3_ENDPOINT=${SEAWEEDFS_S3_ENDPOINT:-"http://127.0.0.1:8333"} -ACCESS_KEY=${ACCESS_KEY:-"any"} -SECRET_KEY=${SECRET_KEY:-"any"} - -echo "๐Ÿงช Running S3 KMS Integration Tests" -echo "S3 Endpoint: $SEAWEEDFS_S3_ENDPOINT" - -# Test file content -TEST_CONTENT="Hello, SeaweedFS KMS Integration! This is test data that should be encrypted." -TEST_FILE="/tmp/seaweedfs-kms-test.txt" -DOWNLOAD_FILE="/tmp/seaweedfs-kms-download.txt" - -# Create test file -echo "$TEST_CONTENT" > "$TEST_FILE" - -# AWS CLI configuration -export AWS_ACCESS_KEY_ID="$ACCESS_KEY" -export AWS_SECRET_ACCESS_KEY="$SECRET_KEY" -export AWS_DEFAULT_REGION="us-east-1" - -echo "๐Ÿ“ Creating test buckets..." - -# Create test buckets -BUCKETS=("test-openbao" "test-vault" "test-local" "secure-data") - -for bucket in "${BUCKETS[@]}"; do - echo " Creating bucket: $bucket" - aws s3 mb "s3://$bucket" --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" || { - echo " โš ๏ธ Bucket $bucket might already exist" - } -done - -echo "๐Ÿ” Setting up bucket encryption..." - -# Test 1: OpenBao KMS Encryption -echo " Setting OpenBao encryption for test-openbao bucket..." -cat > /tmp/openbao-encryption.json << EOF -{ - "Rules": [ - { - "ApplyServerSideEncryptionByDefault": { - "SSEAlgorithm": "aws:kms", - "KMSMasterKeyID": "test-key-1" - }, - "BucketKeyEnabled": false - } - ] -} -EOF - -aws s3api put-bucket-encryption \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --bucket test-openbao \ - --server-side-encryption-configuration file:///tmp/openbao-encryption.json || { - echo " โš ๏ธ Failed to set bucket encryption for test-openbao" -} - -# Test 2: Verify bucket encryption -echo " Verifying bucket encryption configuration..." -aws s3api get-bucket-encryption \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --bucket test-openbao | jq '.' || { - echo " โš ๏ธ Failed to get bucket encryption for test-openbao" -} - -echo "โฌ†๏ธ Testing object uploads with KMS encryption..." - -# Test 3: Upload objects with default bucket encryption -echo " Uploading object with default bucket encryption..." -aws s3 cp "$TEST_FILE" "s3://test-openbao/encrypted-object-1.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -# Test 4: Upload object with explicit SSE-KMS -echo " Uploading object with explicit SSE-KMS headers..." -aws s3 cp "$TEST_FILE" "s3://test-openbao/encrypted-object-2.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --sse aws:kms \ - --sse-kms-key-id "test-key-2" - -# Test 5: Upload to unencrypted bucket -echo " Uploading object to unencrypted bucket..." -aws s3 cp "$TEST_FILE" "s3://test-local/unencrypted-object.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -echo "โฌ‡๏ธ Testing object downloads and decryption..." - -# Test 6: Download encrypted objects -echo " Downloading encrypted object 1..." -aws s3 cp "s3://test-openbao/encrypted-object-1.txt" "$DOWNLOAD_FILE" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -# Verify content -if cmp -s "$TEST_FILE" "$DOWNLOAD_FILE"; then - echo " [OK] Encrypted object 1 downloaded and decrypted successfully" -else - echo " [FAIL] Encrypted object 1 content mismatch" - exit 1 -fi - -echo " Downloading encrypted object 2..." -aws s3 cp "s3://test-openbao/encrypted-object-2.txt" "$DOWNLOAD_FILE" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -# Verify content -if cmp -s "$TEST_FILE" "$DOWNLOAD_FILE"; then - echo " [OK] Encrypted object 2 downloaded and decrypted successfully" -else - echo " [FAIL] Encrypted object 2 content mismatch" - exit 1 -fi - -echo "๐Ÿ“Š Testing object metadata..." - -# Test 7: Check encryption metadata -echo " Checking encryption metadata..." -METADATA=$(aws s3api head-object \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --bucket test-openbao \ - --key encrypted-object-1.txt) - -echo "$METADATA" | jq '.' - -# Verify SSE headers are present -if echo "$METADATA" | grep -q "ServerSideEncryption"; then - echo " [OK] SSE metadata found in object headers" -else - echo " โš ๏ธ No SSE metadata found (might be internal only)" -fi - -echo "๐Ÿ“‹ Testing list operations..." - -# Test 8: List objects -echo " Listing objects in encrypted bucket..." -aws s3 ls "s3://test-openbao/" --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -echo "๐Ÿ”„ Testing multipart uploads with encryption..." - -# Test 9: Multipart upload with encryption -LARGE_FILE="/tmp/large-test-file.txt" -echo " Creating large test file..." -for i in {1..1000}; do - echo "Line $i: $TEST_CONTENT" >> "$LARGE_FILE" -done - -echo " Uploading large file with multipart and SSE-KMS..." -aws s3 cp "$LARGE_FILE" "s3://test-openbao/large-encrypted-file.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --sse aws:kms \ - --sse-kms-key-id "multipart-key" - -# Download and verify -echo " Downloading and verifying large encrypted file..." -DOWNLOAD_LARGE_FILE="/tmp/downloaded-large-file.txt" -aws s3 cp "s3://test-openbao/large-encrypted-file.txt" "$DOWNLOAD_LARGE_FILE" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -if cmp -s "$LARGE_FILE" "$DOWNLOAD_LARGE_FILE"; then - echo " [OK] Large encrypted file uploaded and downloaded successfully" -else - echo " [FAIL] Large encrypted file content mismatch" - exit 1 -fi - -echo "๐Ÿงน Cleaning up test files..." -rm -f "$TEST_FILE" "$DOWNLOAD_FILE" "$LARGE_FILE" "$DOWNLOAD_LARGE_FILE" /tmp/*-encryption.json - -echo "๐Ÿ“ˆ Running performance test..." - -# Test 10: Performance test -PERF_FILE="/tmp/perf-test.txt" -for i in {1..100}; do - echo "Performance test line $i: $TEST_CONTENT" >> "$PERF_FILE" -done - -echo " Testing upload/download performance with encryption..." -start_time=$(date +%s) - -aws s3 cp "$PERF_FILE" "s3://test-openbao/perf-test.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" \ - --sse aws:kms \ - --sse-kms-key-id "performance-key" - -aws s3 cp "s3://test-openbao/perf-test.txt" "/tmp/perf-download.txt" \ - --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" - -end_time=$(date +%s) -duration=$((end_time - start_time)) - -echo " โฑ๏ธ Performance test completed in ${duration} seconds" - -rm -f "$PERF_FILE" "/tmp/perf-download.txt" - -echo "" -echo "๐ŸŽ‰ S3 KMS Integration Tests Summary:" -echo " [OK] Bucket creation and encryption configuration" -echo " [OK] Default bucket encryption" -echo " [OK] Explicit SSE-KMS encryption" -echo " [OK] Object upload and download" -echo " [OK] Encryption/decryption verification" -echo " [OK] Metadata handling" -echo " [OK] Multipart upload with encryption" -echo " [OK] Performance test" -echo "" -echo "๐Ÿ” All S3 KMS integration tests passed successfully!" -echo "" - -# Optional: Show bucket sizes and object counts -echo "๐Ÿ“Š Final Statistics:" -for bucket in "${BUCKETS[@]}"; do - COUNT=$(aws s3 ls "s3://$bucket/" --endpoint-url "$SEAWEEDFS_S3_ENDPOINT" | wc -l) - echo " Bucket $bucket: $COUNT objects" -done diff --git a/test/kms/wait_for_services.sh b/test/kms/wait_for_services.sh deleted file mode 100755 index 2e72defc2..000000000 --- a/test/kms/wait_for_services.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -# Wait for services to be ready -set -e - -OPENBAO_ADDR=${OPENBAO_ADDR:-"http://127.0.0.1:8200"} -SEAWEEDFS_S3_ENDPOINT=${SEAWEEDFS_S3_ENDPOINT:-"http://127.0.0.1:8333"} -MAX_WAIT=120 # 2 minutes - -echo "๐Ÿ• Waiting for services to be ready..." - -# Wait for OpenBao -echo " Waiting for OpenBao at $OPENBAO_ADDR..." -for i in $(seq 1 $MAX_WAIT); do - if curl -s "$OPENBAO_ADDR/v1/sys/health" >/dev/null 2>&1; then - echo " [OK] OpenBao is ready!" - break - fi - if [ $i -eq $MAX_WAIT ]; then - echo " [FAIL] Timeout waiting for OpenBao" - exit 1 - fi - sleep 1 -done - -# Wait for SeaweedFS Master -echo " Waiting for SeaweedFS Master at http://127.0.0.1:9333..." -for i in $(seq 1 $MAX_WAIT); do - if curl -s "http://127.0.0.1:9333/cluster/status" >/dev/null 2>&1; then - echo " [OK] SeaweedFS Master is ready!" - break - fi - if [ $i -eq $MAX_WAIT ]; then - echo " [FAIL] Timeout waiting for SeaweedFS Master" - exit 1 - fi - sleep 1 -done - -# Wait for SeaweedFS Volume Server -echo " Waiting for SeaweedFS Volume Server at http://127.0.0.1:8080..." -for i in $(seq 1 $MAX_WAIT); do - if curl -s "http://127.0.0.1:8080/status" >/dev/null 2>&1; then - echo " [OK] SeaweedFS Volume Server is ready!" - break - fi - if [ $i -eq $MAX_WAIT ]; then - echo " [FAIL] Timeout waiting for SeaweedFS Volume Server" - exit 1 - fi - sleep 1 -done - -# Wait for SeaweedFS S3 API -echo " Waiting for SeaweedFS S3 API at $SEAWEEDFS_S3_ENDPOINT..." -for i in $(seq 1 $MAX_WAIT); do - if curl -s "$SEAWEEDFS_S3_ENDPOINT/" >/dev/null 2>&1; then - echo " [OK] SeaweedFS S3 API is ready!" - break - fi - if [ $i -eq $MAX_WAIT ]; then - echo " [FAIL] Timeout waiting for SeaweedFS S3 API" - exit 1 - fi - sleep 1 -done - -echo "๐ŸŽ‰ All services are ready!" - -# Show service status -echo "" -echo "๐Ÿ“Š Service Status:" -echo " OpenBao: $(curl -s $OPENBAO_ADDR/v1/sys/health | jq -r '.initialized // "Unknown"')" -echo " SeaweedFS Master: $(curl -s http://127.0.0.1:9333/cluster/status | jq -r '.IsLeader // "Unknown"')" -echo " SeaweedFS Volume: $(curl -s http://127.0.0.1:8080/status | jq -r '.Version // "Unknown"')" -echo " SeaweedFS S3 API: Ready" -echo "" diff --git a/test/mq/Makefile b/test/mq/Makefile deleted file mode 100644 index da5ebd1bd..000000000 --- a/test/mq/Makefile +++ /dev/null @@ -1,228 +0,0 @@ -# SeaweedFS Message Queue Test Makefile - -# Build configuration -GO_BUILD_CMD=go build -o bin/$(1) $(2) -GO_RUN_CMD=go run $(1) $(2) - -# Default values -AGENT_ADDR?=localhost:16777 -TOPIC_NAMESPACE?=test -TOPIC_NAME?=test-topic -PARTITION_COUNT?=4 -MESSAGE_COUNT?=100 -CONSUMER_GROUP?=test-consumer-group -CONSUMER_INSTANCE?=test-consumer-1 - -# Create bin directory -$(shell mkdir -p bin) - -.PHONY: all build clean producer consumer test help - -all: build - -# Build targets -build: build-producer build-consumer - -build-producer: - @echo "Building producer..." - $(call GO_BUILD_CMD,producer,./producer) - -build-consumer: - @echo "Building consumer..." - $(call GO_BUILD_CMD,consumer,./consumer) - -# Run targets -producer: build-producer - @echo "Starting producer..." - ./bin/producer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -partitions=$(PARTITION_COUNT) \ - -messages=$(MESSAGE_COUNT) \ - -publisher=test-producer \ - -size=1024 \ - -interval=100ms - -consumer: build-consumer - @echo "Starting consumer..." - ./bin/consumer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -group=$(CONSUMER_GROUP) \ - -instance=$(CONSUMER_INSTANCE) \ - -max-partitions=10 \ - -window-size=100 \ - -offset=latest \ - -show-messages=true \ - -log-progress=true - -# Run producer directly with go run -run-producer: - @echo "Running producer directly..." - $(call GO_RUN_CMD,./producer, \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -partitions=$(PARTITION_COUNT) \ - -messages=$(MESSAGE_COUNT) \ - -publisher=test-producer \ - -size=1024 \ - -interval=100ms) - -# Run consumer directly with go run -run-consumer: - @echo "Running consumer directly..." - $(call GO_RUN_CMD,./consumer, \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -group=$(CONSUMER_GROUP) \ - -instance=$(CONSUMER_INSTANCE) \ - -max-partitions=10 \ - -window-size=100 \ - -offset=latest \ - -show-messages=true \ - -log-progress=true) - -# Test scenarios -test: test-basic - -test-basic: build - @echo "Running basic producer/consumer test..." - @echo "1. Starting consumer in background..." - ./bin/consumer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -group=$(CONSUMER_GROUP) \ - -instance=$(CONSUMER_INSTANCE) \ - -offset=earliest \ - -show-messages=false \ - -log-progress=true & \ - CONSUMER_PID=$$!; \ - echo "Consumer PID: $$CONSUMER_PID"; \ - sleep 2; \ - echo "2. Starting producer..."; \ - ./bin/producer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=$(TOPIC_NAME) \ - -partitions=$(PARTITION_COUNT) \ - -messages=$(MESSAGE_COUNT) \ - -publisher=test-producer \ - -size=1024 \ - -interval=50ms; \ - echo "3. Waiting for consumer to process messages..."; \ - sleep 5; \ - echo "4. Stopping consumer..."; \ - kill $$CONSUMER_PID || true; \ - echo "Test completed!" - -test-performance: build - @echo "Running performance test..." - @echo "1. Starting consumer in background..." - ./bin/consumer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=perf-test \ - -group=perf-consumer-group \ - -instance=perf-consumer-1 \ - -offset=earliest \ - -show-messages=false \ - -log-progress=true & \ - CONSUMER_PID=$$!; \ - echo "Consumer PID: $$CONSUMER_PID"; \ - sleep 2; \ - echo "2. Starting high-throughput producer..."; \ - ./bin/producer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=perf-test \ - -partitions=8 \ - -messages=1000 \ - -publisher=perf-producer \ - -size=512 \ - -interval=10ms; \ - echo "3. Waiting for consumer to process messages..."; \ - sleep 10; \ - echo "4. Stopping consumer..."; \ - kill $$CONSUMER_PID || true; \ - echo "Performance test completed!" - -test-multiple-consumers: build - @echo "Running multiple consumers test..." - @echo "1. Starting multiple consumers in background..." - ./bin/consumer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=multi-test \ - -group=multi-consumer-group \ - -instance=consumer-1 \ - -offset=earliest \ - -show-messages=false \ - -log-progress=true & \ - CONSUMER1_PID=$$!; \ - ./bin/consumer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=multi-test \ - -group=multi-consumer-group \ - -instance=consumer-2 \ - -offset=earliest \ - -show-messages=false \ - -log-progress=true & \ - CONSUMER2_PID=$$!; \ - echo "Consumer PIDs: $$CONSUMER1_PID, $$CONSUMER2_PID"; \ - sleep 2; \ - echo "2. Starting producer..."; \ - ./bin/producer \ - -agent=$(AGENT_ADDR) \ - -namespace=$(TOPIC_NAMESPACE) \ - -topic=multi-test \ - -partitions=8 \ - -messages=200 \ - -publisher=multi-producer \ - -size=256 \ - -interval=50ms; \ - echo "3. Waiting for consumers to process messages..."; \ - sleep 10; \ - echo "4. Stopping consumers..."; \ - kill $$CONSUMER1_PID $$CONSUMER2_PID || true; \ - echo "Multiple consumers test completed!" - -# Clean up -clean: - @echo "Cleaning up..." - rm -rf bin/ - go clean -cache - -# Help -help: - @echo "SeaweedFS Message Queue Test Makefile" - @echo "" - @echo "Usage:" - @echo " make build - Build producer and consumer binaries" - @echo " make producer - Run producer (builds first)" - @echo " make consumer - Run consumer (builds first)" - @echo " make run-producer - Run producer directly with go run" - @echo " make run-consumer - Run consumer directly with go run" - @echo " make test - Run basic producer/consumer test" - @echo " make test-performance - Run performance test" - @echo " make test-multiple-consumers - Run multiple consumers test" - @echo " make clean - Clean up build artifacts" - @echo "" - @echo "Configuration (set via environment variables):" - @echo " AGENT_ADDR=10.21.152.113:16777 - MQ agent address" - @echo " TOPIC_NAMESPACE=test - Topic namespace" - @echo " TOPIC_NAME=test-topic - Topic name" - @echo " PARTITION_COUNT=4 - Number of partitions" - @echo " MESSAGE_COUNT=100 - Number of messages to produce" - @echo " CONSUMER_GROUP=test-consumer-group - Consumer group name" - @echo " CONSUMER_INSTANCE=test-consumer-1 - Consumer instance ID" - @echo "" - @echo "Examples:" - @echo " make producer MESSAGE_COUNT=1000 PARTITION_COUNT=8" - @echo " make consumer CONSUMER_GROUP=my-group" - @echo " make test AGENT_ADDR=10.21.152.113:16777 MESSAGE_COUNT=500" \ No newline at end of file diff --git a/test/mq/README.md b/test/mq/README.md deleted file mode 100644 index 7fa7e39eb..000000000 --- a/test/mq/README.md +++ /dev/null @@ -1,244 +0,0 @@ -# SeaweedFS Message Queue Test Suite - -This directory contains test programs for SeaweedFS Message Queue (MQ) functionality, including message producers and consumers. - -## Prerequisites - -1. **SeaweedFS with MQ Broker and Agent**: You need a running SeaweedFS instance with MQ broker and agent enabled -2. **Go**: Go 1.19 or later required for building the test programs - -## Quick Start - -### 1. Start SeaweedFS with MQ Broker and Agent - -```bash -# Start SeaweedFS server with MQ broker and agent -weed server -mq.broker -mq.agent -filer -volume - -# Or start components separately -weed master -weed volume -mserver=localhost:9333 -weed filer -master=localhost:9333 -weed mq.broker -filer=localhost:8888 -weed mq.agent -brokers=localhost:17777 -``` - -### 2. Build Test Programs - -```bash -# Build both producer and consumer -make build - -# Or build individually -make build-producer -make build-consumer -``` - -### 3. Run Basic Test - -```bash -# Run a basic producer/consumer test -make test - -# Or run producer and consumer manually -make consumer & # Start consumer in background -make producer # Start producer -``` - -## Test Programs - -### Producer (`producer/main.go`) - -Generates structured messages and publishes them to a SeaweedMQ topic via the MQ agent. - -**Usage:** -```bash -./bin/producer [options] -``` - -**Options:** -- `-agent`: MQ agent address (default: localhost:16777) -- `-namespace`: Topic namespace (default: test) -- `-topic`: Topic name (default: test-topic) -- `-partitions`: Number of partitions (default: 4) -- `-messages`: Number of messages to produce (default: 100) -- `-publisher`: Publisher name (default: test-producer) -- `-size`: Message size in bytes (default: 1024) -- `-interval`: Interval between messages (default: 100ms) - -**Example:** -```bash -./bin/producer -agent=localhost:16777 -namespace=test -topic=my-topic -messages=1000 -interval=50ms -``` - -### Consumer (`consumer/main.go`) - -Consumes structured messages from a SeaweedMQ topic via the MQ agent. - -**Usage:** -```bash -./bin/consumer [options] -``` - -**Options:** -- `-agent`: MQ agent address (default: localhost:16777) -- `-namespace`: Topic namespace (default: test) -- `-topic`: Topic name (default: test-topic) -- `-group`: Consumer group name (default: test-consumer-group) -- `-instance`: Consumer group instance ID (default: test-consumer-1) -- `-max-partitions`: Maximum number of partitions to consume (default: 10) -- `-window-size`: Sliding window size for concurrent processing (default: 100) -- `-offset`: Offset type: earliest, latest, timestamp (default: latest) -- `-offset-ts`: Offset timestamp in nanoseconds (for timestamp offset type) -- `-filter`: Message filter (default: empty) -- `-show-messages`: Show consumed messages (default: true) -- `-log-progress`: Log progress every 10 messages (default: true) - -**Example:** -```bash -./bin/consumer -agent=localhost:16777 -namespace=test -topic=my-topic -group=my-group -offset=earliest -``` - -## Makefile Commands - -### Building -- `make build`: Build both producer and consumer binaries -- `make build-producer`: Build producer only -- `make build-consumer`: Build consumer only - -### Running -- `make producer`: Build and run producer -- `make consumer`: Build and run consumer -- `make run-producer`: Run producer directly with go run -- `make run-consumer`: Run consumer directly with go run - -### Testing -- `make test`: Run basic producer/consumer test -- `make test-performance`: Run performance test (1000 messages, 8 partitions) -- `make test-multiple-consumers`: Run test with multiple consumers - -### Cleanup -- `make clean`: Remove build artifacts - -### Help -- `make help`: Show detailed help - -## Configuration - -Configure tests using environment variables: - -```bash -export AGENT_ADDR=localhost:16777 -export TOPIC_NAMESPACE=test -export TOPIC_NAME=test-topic -export PARTITION_COUNT=4 -export MESSAGE_COUNT=100 -export CONSUMER_GROUP=test-consumer-group -export CONSUMER_INSTANCE=test-consumer-1 -``` - -## Example Usage Scenarios - -### 1. Basic Producer/Consumer Test - -```bash -# Terminal 1: Start consumer -make consumer - -# Terminal 2: Run producer -make producer MESSAGE_COUNT=50 -``` - -### 2. Performance Testing - -```bash -# Test with high throughput -make test-performance -``` - -### 3. Multiple Consumer Groups - -```bash -# Terminal 1: Consumer group 1 -make consumer CONSUMER_GROUP=group1 - -# Terminal 2: Consumer group 2 -make consumer CONSUMER_GROUP=group2 - -# Terminal 3: Producer -make producer MESSAGE_COUNT=200 -``` - -### 4. Different Offset Types - -```bash -# Consume from earliest -make consumer OFFSET=earliest - -# Consume from latest -make consumer OFFSET=latest - -# Consume from timestamp -make consumer OFFSET=timestamp OFFSET_TS=1699000000000000000 -``` - -## Troubleshooting - -### Common Issues - -1. **Connection Refused**: Make sure SeaweedFS MQ agent is running on the specified address -2. **Agent Not Found**: Ensure both MQ broker and agent are running (agent requires broker) -3. **Topic Not Found**: The producer will create the topic automatically on first publish -4. **Consumer Not Receiving Messages**: Check if consumer group offset is correct (try `earliest`) -5. **Build Failures**: Ensure you're running from the SeaweedFS root directory - -### Debug Mode - -Enable verbose logging: -```bash -# Run with debug logging -GLOG_v=4 make producer -GLOG_v=4 make consumer -``` - -### Check Broker and Agent Status - -```bash -# Check if broker is running -curl http://localhost:9333/cluster/brokers - -# Check if agent is running (if running as server) -curl http://localhost:9333/cluster/agents - -# Or use weed shell -weed shell -master=localhost:9333 -> mq.broker.list -``` - -## Architecture - -The test setup demonstrates: - -1. **Agent-Based Architecture**: Uses MQ agent as intermediary between clients and brokers -2. **Structured Messages**: Messages use schema-based RecordValue format instead of raw bytes -3. **Topic Management**: Creating and configuring topics with multiple partitions -4. **Message Production**: Publishing structured messages with keys for partitioning -5. **Message Consumption**: Consuming structured messages with consumer groups and offset management -6. **Load Balancing**: Multiple consumers in same group share partition assignments -7. **Fault Tolerance**: Graceful handling of agent and broker failures and reconnections - -## Files - -- `producer/main.go`: Message producer implementation -- `consumer/main.go`: Message consumer implementation -- `Makefile`: Build and test automation -- `README.md`: This documentation -- `bin/`: Built binaries (created during build) - -## Next Steps - -1. Modify the producer to send structured data using `RecordType` -2. Implement message filtering in the consumer -3. Add metrics collection and monitoring -4. Test with multiple broker instances -5. Implement schema evolution testing \ No newline at end of file diff --git a/test/mq/consumer/main.go b/test/mq/consumer/main.go deleted file mode 100644 index e842b9721..000000000 --- a/test/mq/consumer/main.go +++ /dev/null @@ -1,192 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "sync" - "syscall" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -var ( - agentAddr = flag.String("agent", "localhost:16777", "MQ agent address") - topicNamespace = flag.String("namespace", "test", "topic namespace") - topicName = flag.String("topic", "test-topic", "topic name") - consumerGroup = flag.String("group", "test-consumer-group", "consumer group name") - consumerGroupInstanceId = flag.String("instance", "test-consumer-1", "consumer group instance id") - maxPartitions = flag.Int("max-partitions", 10, "maximum number of partitions to consume") - slidingWindowSize = flag.Int("window-size", 100, "sliding window size for concurrent processing") - offsetType = flag.String("offset", "latest", "offset type: earliest, latest, timestamp") - offsetTsNs = flag.Int64("offset-ts", 0, "offset timestamp in nanoseconds (for timestamp offset type)") - showMessages = flag.Bool("show-messages", true, "show consumed messages") - logProgress = flag.Bool("log-progress", true, "log progress every 10 messages") - filter = flag.String("filter", "", "message filter") -) - -func main() { - flag.Parse() - - fmt.Printf("Starting message consumer:\n") - fmt.Printf(" Agent: %s\n", *agentAddr) - fmt.Printf(" Topic: %s.%s\n", *topicNamespace, *topicName) - fmt.Printf(" Consumer Group: %s\n", *consumerGroup) - fmt.Printf(" Consumer Instance: %s\n", *consumerGroupInstanceId) - fmt.Printf(" Max Partitions: %d\n", *maxPartitions) - fmt.Printf(" Sliding Window Size: %d\n", *slidingWindowSize) - fmt.Printf(" Offset Type: %s\n", *offsetType) - fmt.Printf(" Filter: %s\n", *filter) - - // Create topic - topicObj := topic.NewTopic(*topicNamespace, *topicName) - - // Determine offset type - var pbOffsetType schema_pb.OffsetType - switch *offsetType { - case "earliest": - pbOffsetType = schema_pb.OffsetType_RESET_TO_EARLIEST - case "latest": - pbOffsetType = schema_pb.OffsetType_RESET_TO_LATEST - case "timestamp": - pbOffsetType = schema_pb.OffsetType_EXACT_TS_NS - default: - pbOffsetType = schema_pb.OffsetType_RESET_TO_LATEST - } - - // Create subscribe option - option := &agent_client.SubscribeOption{ - ConsumerGroup: *consumerGroup, - ConsumerGroupInstanceId: *consumerGroupInstanceId, - Topic: topicObj, - OffsetType: pbOffsetType, - OffsetTsNs: *offsetTsNs, - Filter: *filter, - MaxSubscribedPartitions: int32(*maxPartitions), - SlidingWindowSize: int32(*slidingWindowSize), - } - - // Create subscribe session - session, err := agent_client.NewSubscribeSession(*agentAddr, option) - if err != nil { - log.Fatalf("Failed to create subscribe session: %v", err) - } - defer session.CloseSession() - - // Statistics - var messageCount int64 - var mu sync.Mutex - startTime := time.Now() - - // Handle graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - // Channel to signal completion - done := make(chan error, 1) - - // Start consuming messages - fmt.Printf("\nStarting to consume messages...\n") - go func() { - err := session.SubscribeMessageRecord( - // onEachMessageFn - func(key []byte, record *schema_pb.RecordValue) { - mu.Lock() - messageCount++ - currentCount := messageCount - mu.Unlock() - - if *showMessages { - fmt.Printf("Received message: key=%s\n", string(key)) - printRecordValue(record) - } - - if *logProgress && currentCount%10 == 0 { - elapsed := time.Since(startTime) - rate := float64(currentCount) / elapsed.Seconds() - fmt.Printf("Consumed %d messages (%.2f msg/sec)\n", currentCount, rate) - } - }, - // onCompletionFn - func() { - fmt.Printf("Subscription completed\n") - done <- nil - }, - ) - if err != nil { - done <- err - } - }() - - // Wait for signal or completion - select { - case <-sigChan: - fmt.Printf("\nReceived shutdown signal, stopping consumer...\n") - case err := <-done: - if err != nil { - log.Printf("Subscription error: %v", err) - } - } - - // Print final statistics - mu.Lock() - finalCount := messageCount - mu.Unlock() - - duration := time.Since(startTime) - fmt.Printf("Consumed %d messages in %v\n", finalCount, duration) - if duration.Seconds() > 0 { - fmt.Printf("Average throughput: %.2f messages/sec\n", float64(finalCount)/duration.Seconds()) - } -} - -func printRecordValue(record *schema_pb.RecordValue) { - if record == nil || record.Fields == nil { - fmt.Printf(" (empty record)\n") - return - } - - for fieldName, value := range record.Fields { - fmt.Printf(" %s: %s\n", fieldName, formatValue(value)) - } -} - -func formatValue(value *schema_pb.Value) string { - if value == nil { - return "(nil)" - } - - switch kind := value.Kind.(type) { - case *schema_pb.Value_BoolValue: - return fmt.Sprintf("%t", kind.BoolValue) - case *schema_pb.Value_Int32Value: - return fmt.Sprintf("%d", kind.Int32Value) - case *schema_pb.Value_Int64Value: - return fmt.Sprintf("%d", kind.Int64Value) - case *schema_pb.Value_FloatValue: - return fmt.Sprintf("%f", kind.FloatValue) - case *schema_pb.Value_DoubleValue: - return fmt.Sprintf("%f", kind.DoubleValue) - case *schema_pb.Value_BytesValue: - if len(kind.BytesValue) > 50 { - return fmt.Sprintf("bytes[%d] %x...", len(kind.BytesValue), kind.BytesValue[:50]) - } - return fmt.Sprintf("bytes[%d] %x", len(kind.BytesValue), kind.BytesValue) - case *schema_pb.Value_StringValue: - if len(kind.StringValue) > 100 { - return fmt.Sprintf("\"%s...\"", kind.StringValue[:100]) - } - return fmt.Sprintf("\"%s\"", kind.StringValue) - case *schema_pb.Value_ListValue: - return fmt.Sprintf("list[%d items]", len(kind.ListValue.Values)) - case *schema_pb.Value_RecordValue: - return fmt.Sprintf("record[%d fields]", len(kind.RecordValue.Fields)) - default: - return "(unknown)" - } -} diff --git a/test/mq/producer/main.go b/test/mq/producer/main.go deleted file mode 100644 index 31fba4ca7..000000000 --- a/test/mq/producer/main.go +++ /dev/null @@ -1,172 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/client/agent_client" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -var ( - agentAddr = flag.String("agent", "localhost:16777", "MQ agent address") - topicNamespace = flag.String("namespace", "test", "topic namespace") - topicName = flag.String("topic", "test-topic", "topic name") - partitionCount = flag.Int("partitions", 4, "number of partitions") - messageCount = flag.Int("messages", 100, "number of messages to produce") - publisherName = flag.String("publisher", "test-producer", "publisher name") - messageSize = flag.Int("size", 1024, "message size in bytes") - interval = flag.Duration("interval", 100*time.Millisecond, "interval between messages") -) - -// TestMessage represents the structure of messages we'll be sending -type TestMessage struct { - ID int64 `json:"id"` - Message string `json:"message"` - Payload []byte `json:"payload"` - Timestamp int64 `json:"timestamp"` -} - -func main() { - flag.Parse() - - fmt.Printf("Starting message producer:\n") - fmt.Printf(" Agent: %s\n", *agentAddr) - fmt.Printf(" Topic: %s.%s\n", *topicNamespace, *topicName) - fmt.Printf(" Partitions: %d\n", *partitionCount) - fmt.Printf(" Messages: %d\n", *messageCount) - fmt.Printf(" Publisher: %s\n", *publisherName) - fmt.Printf(" Message Size: %d bytes\n", *messageSize) - fmt.Printf(" Interval: %v\n", *interval) - - // Create an instance of the message struct to generate schema from - messageInstance := TestMessage{} - - // Automatically generate RecordType from the struct - recordType := schema.StructToSchema(messageInstance) - if recordType == nil { - log.Fatalf("Failed to generate schema from struct") - } - - fmt.Printf("\nGenerated schema with %d fields:\n", len(recordType.Fields)) - for _, field := range recordType.Fields { - fmt.Printf(" - %s: %s\n", field.Name, getTypeString(field.Type)) - } - - topicSchema := schema.NewSchema(*topicNamespace, *topicName, recordType) - - // Create publish session - session, err := agent_client.NewPublishSession(*agentAddr, topicSchema, *partitionCount, *publisherName) - if err != nil { - log.Fatalf("Failed to create publish session: %v", err) - } - defer session.CloseSession() - - // Create message payload - payload := make([]byte, *messageSize) - for i := range payload { - payload[i] = byte(i % 256) - } - - // Start producing messages - fmt.Printf("\nStarting to produce messages...\n") - startTime := time.Now() - - for i := 0; i < *messageCount; i++ { - key := fmt.Sprintf("key-%d", i) - - // Create a message struct - message := TestMessage{ - ID: int64(i), - Message: fmt.Sprintf("This is message number %d", i), - Payload: payload[:min(100, len(payload))], // First 100 bytes - Timestamp: time.Now().UnixNano(), - } - - // Convert struct to RecordValue - record := structToRecordValue(message) - - err := session.PublishMessageRecord([]byte(key), record) - if err != nil { - log.Printf("Failed to publish message %d: %v", i, err) - continue - } - - if (i+1)%10 == 0 { - fmt.Printf("Published %d messages\n", i+1) - } - - if *interval > 0 { - time.Sleep(*interval) - } - } - - duration := time.Since(startTime) - fmt.Printf("\nCompleted producing %d messages in %v\n", *messageCount, duration) - fmt.Printf("Throughput: %.2f messages/sec\n", float64(*messageCount)/duration.Seconds()) -} - -// Helper function to convert struct to RecordValue -func structToRecordValue(msg TestMessage) *schema_pb.RecordValue { - return &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "ID": { - Kind: &schema_pb.Value_Int64Value{ - Int64Value: msg.ID, - }, - }, - "Message": { - Kind: &schema_pb.Value_StringValue{ - StringValue: msg.Message, - }, - }, - "Payload": { - Kind: &schema_pb.Value_BytesValue{ - BytesValue: msg.Payload, - }, - }, - "Timestamp": { - Kind: &schema_pb.Value_Int64Value{ - Int64Value: msg.Timestamp, - }, - }, - }, - } -} - -func getTypeString(t *schema_pb.Type) string { - switch kind := t.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch kind.ScalarType { - case schema_pb.ScalarType_BOOL: - return "bool" - case schema_pb.ScalarType_INT32: - return "int32" - case schema_pb.ScalarType_INT64: - return "int64" - case schema_pb.ScalarType_FLOAT: - return "float" - case schema_pb.ScalarType_DOUBLE: - return "double" - case schema_pb.ScalarType_BYTES: - return "bytes" - case schema_pb.ScalarType_STRING: - return "string" - } - case *schema_pb.Type_ListType: - return fmt.Sprintf("list<%s>", getTypeString(kind.ListType.ElementType)) - case *schema_pb.Type_RecordType: - return "record" - } - return "unknown" -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/test/postgres/.dockerignore b/test/postgres/.dockerignore deleted file mode 100644 index fe972add1..000000000 --- a/test/postgres/.dockerignore +++ /dev/null @@ -1,31 +0,0 @@ -# Ignore unnecessary files for Docker builds -.git -.gitignore -README.md -docker-compose.yml -run-tests.sh -Makefile -*.md -.env* - -# Ignore test data and logs -data/ -logs/ -*.log - -# Ignore temporary files -.DS_Store -Thumbs.db -*.tmp -*.swp -*.swo -*~ - -# Ignore IDE files -.vscode/ -.idea/ -*.iml - -# Ignore other Docker files -Dockerfile* -docker-compose* diff --git a/test/postgres/Dockerfile.client b/test/postgres/Dockerfile.client deleted file mode 100644 index 2b85bc76e..000000000 --- a/test/postgres/Dockerfile.client +++ /dev/null @@ -1,37 +0,0 @@ -FROM golang:1.24-alpine AS builder - -# Set working directory -WORKDIR /app - -# Copy go mod files first for better caching -COPY go.mod go.sum ./ -RUN go mod download - -# Copy source code -COPY . . - -# Build the client -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o client ./test/postgres/client.go - -# Final stage -FROM alpine:latest - -# Install ca-certificates and netcat for health checks -RUN apk --no-cache add ca-certificates netcat-openbsd - -WORKDIR /root/ - -# Copy the binary from builder stage -COPY --from=builder /app/client . - -# Make it executable -RUN chmod +x ./client - -# Set environment variables with defaults -ENV POSTGRES_HOST=localhost -ENV POSTGRES_PORT=5432 -ENV POSTGRES_USER=seaweedfs -ENV POSTGRES_DB=default - -# Run the client -CMD ["./client"] diff --git a/test/postgres/Dockerfile.producer b/test/postgres/Dockerfile.producer deleted file mode 100644 index 98a91643b..000000000 --- a/test/postgres/Dockerfile.producer +++ /dev/null @@ -1,35 +0,0 @@ -FROM golang:1.24-alpine AS builder - -# Set working directory -WORKDIR /app - -# Copy go mod files first for better caching -COPY go.mod go.sum ./ -RUN go mod download - -# Copy source code -COPY . . - -# Build the producer -RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o producer ./test/postgres/producer.go - -# Final stage -FROM alpine:latest - -# Install ca-certificates for HTTPS calls -RUN apk --no-cache add ca-certificates curl - -WORKDIR /root/ - -# Copy the binary from builder stage -COPY --from=builder /app/producer . - -# Make it executable -RUN chmod +x ./producer - -# Set environment variables with defaults -ENV SEAWEEDFS_MASTER=localhost:9333 -ENV SEAWEEDFS_FILER=localhost:8888 - -# Run the producer -CMD ["./producer"] diff --git a/test/postgres/Dockerfile.seaweedfs b/test/postgres/Dockerfile.seaweedfs deleted file mode 100644 index 49ff74930..000000000 --- a/test/postgres/Dockerfile.seaweedfs +++ /dev/null @@ -1,40 +0,0 @@ -FROM golang:1.24-alpine AS builder - -# Install git and other build dependencies -RUN apk add --no-cache git make - -# Set working directory -WORKDIR /app - -# Copy go mod files first for better caching -COPY go.mod go.sum ./ -RUN go mod download - -# Copy source code -COPY . . - -# Build the weed binary without CGO -RUN CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w" -o weed ./weed/ - -# Final stage - minimal runtime image -FROM alpine:latest - -# Install ca-certificates for HTTPS calls and netcat for health checks -RUN apk --no-cache add ca-certificates netcat-openbsd curl - -WORKDIR /root/ - -# Copy the weed binary from builder stage -COPY --from=builder /app/weed . - -# Make it executable -RUN chmod +x ./weed - -# Expose ports -EXPOSE 9333 8888 8333 8085 9533 5432 - -# Create data directory -RUN mkdir -p /data - -# Default command (can be overridden) -CMD ["./weed", "server", "-dir=/data"] diff --git a/test/postgres/Makefile b/test/postgres/Makefile deleted file mode 100644 index fd177f49b..000000000 --- a/test/postgres/Makefile +++ /dev/null @@ -1,80 +0,0 @@ -# SeaweedFS PostgreSQL Test Suite Makefile - -.PHONY: help start stop clean produce test psql logs status all dev - -# Default target -help: ## Show this help message - @echo "SeaweedFS PostgreSQL Test Suite" - @echo "===============================" - @echo "Available targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-12s %s\n", $$1, $$2}' $(MAKEFILE_LIST) - @echo "" - @echo "Quick start: make all" - -start: ## Start SeaweedFS and PostgreSQL servers - @./run-tests.sh start - -stop: ## Stop all services - @./run-tests.sh stop - -clean: ## Stop services and remove all data - @./run-tests.sh clean - -produce: ## Create MQ test data - @./run-tests.sh produce - -test: ## Run PostgreSQL client tests - @./run-tests.sh test - -psql: ## Connect with interactive psql client - @./run-tests.sh psql - -logs: ## Show service logs - @./run-tests.sh logs - -status: ## Show service status - @./run-tests.sh status - -all: ## Run complete test suite (start -> produce -> test) - @./run-tests.sh all - -# Development targets -dev-start: ## Start services for development - @echo "Starting development environment..." - @docker compose up -d seaweedfs postgres-server || (echo "=== Container startup failed, showing logs ===" && docker compose logs && exit 1) - @echo "Services started. Run 'make dev-logs' to watch logs." - -dev-logs: ## Follow logs for development - @docker compose logs -f seaweedfs postgres-server - -dev-rebuild: ## Rebuild and restart services - @docker compose down - @docker compose up -d --build seaweedfs postgres-server - -# Individual service targets -start-seaweedfs: ## Start only SeaweedFS - @docker compose up -d seaweedfs - -restart-postgres: ## Start only PostgreSQL server - @docker compose down -d postgres-server - @docker compose up -d --build seaweedfs postgres-server - -# Testing targets -test-basic: ## Run basic connectivity test - @docker run --rm --network postgres_seaweedfs-net postgres:15-alpine \ - psql -h postgres-server -p 5432 -U seaweedfs -d default -c "SELECT version();" - -test-producer: ## Test data producer only - @docker compose up --build mq-producer - -test-client: ## Test client only - @docker compose up --build postgres-client - -# Cleanup targets -clean-images: ## Remove Docker images - @docker compose down - @docker image prune -f - -clean-all: ## Complete cleanup including images - @docker compose down -v --rmi all - @docker system prune -f diff --git a/test/postgres/README.md b/test/postgres/README.md deleted file mode 100644 index 2466c6069..000000000 --- a/test/postgres/README.md +++ /dev/null @@ -1,320 +0,0 @@ -# SeaweedFS PostgreSQL Protocol Test Suite - -This directory contains a comprehensive Docker Compose test setup for the SeaweedFS PostgreSQL wire protocol implementation. - -## Overview - -The test suite includes: -- **SeaweedFS Cluster**: Full SeaweedFS server with MQ broker and agent -- **PostgreSQL Server**: SeaweedFS PostgreSQL wire protocol server -- **MQ Data Producer**: Creates realistic test data across multiple topics and namespaces -- **PostgreSQL Test Client**: Comprehensive Go client testing all functionality -- **Interactive Tools**: psql CLI access for manual testing - -## Quick Start - -### 1. Run Complete Test Suite (Automated) -```bash -./run-tests.sh all -``` - -This will automatically: -1. Start SeaweedFS and PostgreSQL servers -2. Create test data in multiple MQ topics -3. Run comprehensive PostgreSQL client tests -4. Show results - -### 2. Manual Step-by-Step Testing -```bash -# Start the services -./run-tests.sh start - -# Create test data -./run-tests.sh produce - -# Run automated tests -./run-tests.sh test - -# Connect with psql for interactive testing -./run-tests.sh psql -``` - -### 3. Interactive PostgreSQL Testing -```bash -# Connect with psql -./run-tests.sh psql - -# Inside psql session: -postgres=> SHOW DATABASES; -postgres=> \c analytics; -postgres=> SHOW TABLES; -postgres=> SELECT COUNT(*) FROM user_events; -postgres=> SELECT COUNT(*) FROM user_events; -postgres=> \q -``` - -## Test Data Structure - -The producer creates realistic test data across multiple namespaces: - -### Analytics Namespace -- **`user_events`** (1000 records): User interaction events - - Fields: id, user_id, user_type, action, status, amount, timestamp, metadata - - User types: premium, standard, trial, enterprise - - Actions: login, logout, purchase, view, search, click, download - -- **`system_logs`** (500 records): System operation logs - - Fields: id, level, service, message, error_code, timestamp - - Levels: debug, info, warning, error, critical - - Services: auth-service, payment-service, user-service, etc. - -- **`metrics`** (800 records): System metrics - - Fields: id, name, value, tags, timestamp - - Metrics: cpu_usage, memory_usage, disk_usage, request_latency, etc. - -### E-commerce Namespace -- **`product_views`** (1200 records): Product interaction data - - Fields: id, product_id, user_id, category, price, view_count, timestamp - - Categories: electronics, books, clothing, home, sports, automotive - -- **`user_events`** (600 records): E-commerce specific user events - -### Logs Namespace -- **`application_logs`** (2000 records): Application logs -- **`error_logs`** (300 records): Error-specific logs with 4xx/5xx error codes - -## Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ PostgreSQL โ”‚ โ”‚ PostgreSQL โ”‚ โ”‚ SeaweedFS โ”‚ -โ”‚ Clients โ”‚โ—„โ”€โ”€โ–บโ”‚ Wire Protocol โ”‚โ—„โ”€โ”€โ–บโ”‚ SQL Engine โ”‚ -โ”‚ (psql, Go) โ”‚ โ”‚ Server โ”‚ โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ โ”‚ - โ–ผ โ–ผ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ Session โ”‚ โ”‚ MQ Broker โ”‚ - โ”‚ Management โ”‚ โ”‚ & Topics โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## Services - -### SeaweedFS Server -- **Ports**: 9333 (master), 8888 (filer), 8333 (S3), 8085 (volume), 9533 (metrics), 26777โ†’16777 (MQ agent), 27777โ†’17777 (MQ broker) -- **Features**: Full MQ broker, S3 API, filer, volume server -- **Data**: Persistent storage in Docker volume -- **Health Check**: Cluster status endpoint - -### PostgreSQL Server -- **Port**: 5432 (standard PostgreSQL port) -- **Protocol**: Full PostgreSQL 3.0 wire protocol -- **Authentication**: Trust mode (no password for testing) -- **Features**: Real-time MQ topic discovery, database context switching - -### MQ Producer -- **Purpose**: Creates realistic test data -- **Topics**: 7 topics across 3 namespaces -- **Data Types**: JSON messages with varied schemas -- **Volume**: ~4,400 total records with realistic distributions - -### Test Client -- **Language**: Go with standard `lib/pq` PostgreSQL driver -- **Tests**: 8 comprehensive test categories -- **Coverage**: System info, discovery, queries, aggregations, context switching - -## Available Commands - -```bash -./run-tests.sh start # Start services -./run-tests.sh produce # Create test data -./run-tests.sh test # Run client tests -./run-tests.sh psql # Interactive psql -./run-tests.sh logs # Show service logs -./run-tests.sh status # Service status -./run-tests.sh stop # Stop services -./run-tests.sh clean # Complete cleanup -./run-tests.sh all # Full automated test -``` - -## Test Categories - -### 1. System Information -- PostgreSQL version compatibility -- Current user and database -- Server settings and encoding - -### 2. Database Discovery -- `SHOW DATABASES` - List MQ namespaces -- Dynamic namespace discovery from filer - -### 3. Table Discovery -- `SHOW TABLES` - List topics in current namespace -- Real-time topic discovery - -### 4. Data Queries -- Basic `SELECT * FROM table` queries -- Sample data retrieval and display -- Column information - -### 5. Aggregation Queries -- `COUNT(*)`, `SUM()`, `AVG()`, `MIN()`, `MAX()` -- Aggregation operations -- Statistical analysis - -### 6. Database Context Switching -- `USE database` commands -- Session isolation testing -- Cross-namespace queries - -### 7. System Columns -- `_timestamp_ns`, `_key`, `_source` access -- MQ metadata exposure - -### 8. Complex Queries -- `WHERE` clauses with comparisons -- `LIMIT` -- Multi-condition filtering - -## Expected Results - -After running the complete test suite, you should see: - -``` -=== Test Results === -โœ… Test PASSED: System Information -โœ… Test PASSED: Database Discovery -โœ… Test PASSED: Table Discovery -โœ… Test PASSED: Data Queries -โœ… Test PASSED: Aggregation Queries -โœ… Test PASSED: Database Context Switching -โœ… Test PASSED: System Columns -โœ… Test PASSED: Complex Queries - -Test Results: 8/8 tests passed -๐ŸŽ‰ All tests passed! -``` - -## Manual Testing Examples - -### Connect with psql -```bash -./run-tests.sh psql -``` - -### Basic Exploration -```sql --- Check system information -SELECT version(); -SELECT current_user, current_database(); - --- Discover data structure -SHOW DATABASES; -\c analytics; -SHOW TABLES; -DESCRIBE user_events; -``` - -### Data Analysis -```sql --- Basic queries -SELECT COUNT(*) FROM user_events; -SELECT * FROM user_events LIMIT 5; - --- Aggregations -SELECT - COUNT(*) as events, - AVG(amount) as avg_amount -FROM user_events -WHERE amount IS NOT NULL; - --- Time-based analysis -SELECT - COUNT(*) as count -FROM user_events -WHERE status = 'active'; -``` - -### Cross-Namespace Analysis -```sql --- Switch between namespaces -USE ecommerce; -SELECT COUNT(*) FROM product_views; - -USE logs; -SELECT COUNT(*) FROM application_logs; -``` - -## Troubleshooting - -### Services Not Starting -```bash -# Check service status -./run-tests.sh status - -# View logs -./run-tests.sh logs seaweedfs -./run-tests.sh logs postgres-server -``` - -### No Test Data -```bash -# Recreate test data -./run-tests.sh produce - -# Check producer logs -./run-tests.sh logs mq-producer -``` - -### Connection Issues -```bash -# Test PostgreSQL server health -docker-compose exec postgres-server nc -z localhost 5432 - -# Test SeaweedFS health -curl http://localhost:9333/cluster/status -``` - -### Clean Restart -```bash -# Complete cleanup and restart -./run-tests.sh clean -./run-tests.sh all -``` - -## Development - -### Modifying Test Data -Edit `producer.go` to change: -- Data schemas and volume -- Topic names and namespaces -- Record generation logic - -### Adding Tests -Edit `client.go` to add new test functions: -```go -func testNewFeature(db *sql.DB) error { - // Your test implementation - return nil -} - -// Add to tests slice in main() -{"New Feature", testNewFeature}, -``` - -### Custom Queries -Use the interactive psql session: -```bash -./run-tests.sh psql -``` - -## Production Considerations - -This test setup demonstrates: -- **Real MQ Integration**: Actual topic discovery and data access -- **Universal PostgreSQL Compatibility**: Works with any PostgreSQL client -- **Production-Ready Features**: Authentication, session management, error handling -- **Scalable Architecture**: Direct SQL engine integration, no translation overhead - -The test validates that SeaweedFS can serve as a drop-in PostgreSQL replacement for read-only analytics workloads on MQ data. diff --git a/test/postgres/SETUP_OVERVIEW.md b/test/postgres/SETUP_OVERVIEW.md deleted file mode 100644 index 8715e5a9f..000000000 --- a/test/postgres/SETUP_OVERVIEW.md +++ /dev/null @@ -1,307 +0,0 @@ -# SeaweedFS PostgreSQL Test Setup - Complete Overview - -## ๐ŸŽฏ What Was Created - -A comprehensive Docker Compose test environment that validates the SeaweedFS PostgreSQL wire protocol implementation with real MQ data. - -## ๐Ÿ“ Complete File Structure - -``` -test/postgres/ -โ”œโ”€โ”€ docker-compose.yml # Multi-service orchestration -โ”œโ”€โ”€ config/ -โ”‚ โ””โ”€โ”€ s3config.json # SeaweedFS S3 API configuration -โ”œโ”€โ”€ producer.go # MQ test data generator (7 topics, 4400+ records) -โ”œโ”€โ”€ client.go # Comprehensive PostgreSQL test client -โ”œโ”€โ”€ Dockerfile.producer # Producer service container -โ”œโ”€โ”€ Dockerfile.client # Test client container -โ”œโ”€โ”€ run-tests.sh # Main automation script โญ -โ”œโ”€โ”€ validate-setup.sh # Prerequisites checker -โ”œโ”€โ”€ Makefile # Development workflow commands -โ”œโ”€โ”€ README.md # Complete documentation -โ”œโ”€โ”€ .dockerignore # Docker build optimization -โ””โ”€โ”€ SETUP_OVERVIEW.md # This file -``` - -## ๐Ÿš€ Quick Start - -### Option 1: One-Command Test (Recommended) -```bash -cd test/postgres -./run-tests.sh all -``` - -### Option 2: Using Makefile -```bash -cd test/postgres -make all -``` - -### Option 3: Manual Step-by-Step -```bash -cd test/postgres -./validate-setup.sh # Check prerequisites -./run-tests.sh start # Start services -./run-tests.sh produce # Create test data -./run-tests.sh test # Run tests -./run-tests.sh psql # Interactive testing -``` - -## ๐Ÿ—๏ธ Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ Docker Host โ”‚ โ”‚ SeaweedFS โ”‚ โ”‚ PostgreSQL โ”‚ -โ”‚ โ”‚ โ”‚ Cluster โ”‚ โ”‚ Wire Protocol โ”‚ -โ”‚ psql clients โ”‚โ—„โ”€โ”€โ”ค - Master:9333 โ”‚โ—„โ”€โ”€โ”ค Server:5432 โ”‚ -โ”‚ Go clients โ”‚ โ”‚ - Filer:8888 โ”‚ โ”‚ โ”‚ -โ”‚ BI tools โ”‚ โ”‚ - S3:8333 โ”‚ โ”‚ โ”‚ -โ”‚ โ”‚ โ”‚ - Volume:8085 โ”‚ โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - โ”‚ - โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” - โ”‚ MQ Topics โ”‚ - โ”‚ & Real Data โ”‚ - โ”‚ โ”‚ - โ”‚ โ€ข analytics/* โ”‚ - โ”‚ โ€ข ecommerce/* โ”‚ - โ”‚ โ€ข logs/* โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## ๐ŸŽฏ Services Created - -| Service | Purpose | Port | Health Check | -|---------|---------|------|--------------| -| **seaweedfs** | Complete SeaweedFS cluster | 9333,8888,8333,8085,26777โ†’16777,27777โ†’17777 | `/cluster/status` | -| **postgres-server** | PostgreSQL wire protocol | 5432 | TCP connection | -| **mq-producer** | Test data generator | - | One-time execution | -| **postgres-client** | Automated test suite | - | On-demand | -| **psql-cli** | Interactive PostgreSQL CLI | - | On-demand | - -## ๐Ÿ“Š Test Data Created - -### Analytics Namespace -- **user_events** (1,000 records) - - User interactions: login, purchase, view, search - - User types: premium, standard, trial, enterprise - - Status tracking: active, inactive, pending, completed - -- **system_logs** (500 records) - - Log levels: debug, info, warning, error, critical - - Services: auth, payment, user, notification, api-gateway - - Error codes and timestamps - -- **metrics** (800 records) - - System metrics: CPU, memory, disk usage - - Performance: request latency, error rate, throughput - - Multi-region tagging - -### E-commerce Namespace -- **product_views** (1,200 records) - - Product interactions across categories - - Price ranges and view counts - - User behavior tracking - -- **user_events** (600 records) - - E-commerce specific user actions - - Purchase flows and interactions - -### Logs Namespace -- **application_logs** (2,000 records) - - Application-level logging - - Service health monitoring - -- **error_logs** (300 records) - - Error-specific logs with 4xx/5xx codes - - Critical system failures - -**Total: ~4,400 realistic test records across 7 topics in 3 namespaces** - -## ๐Ÿงช Comprehensive Testing - -The test client validates: - -### 1. System Information -- โœ… PostgreSQL version compatibility -- โœ… Current user and database context -- โœ… Server settings and encoding - -### 2. Real MQ Integration -- โœ… Live namespace discovery (`SHOW DATABASES`) -- โœ… Dynamic topic discovery (`SHOW TABLES`) -- โœ… Actual data access from Parquet and log files - -### 3. Data Access Patterns -- โœ… Basic SELECT queries with real data -- โœ… Column information and data types -- โœ… Sample data retrieval and display - -### 4. Advanced SQL Features -- โœ… Aggregation functions (COUNT, SUM, AVG, MIN, MAX) -- โœ… WHERE clauses with comparisons -- โœ… LIMIT functionality - -### 5. Database Context Management -- โœ… USE database commands -- โœ… Session isolation between connections -- โœ… Cross-namespace query switching - -### 6. System Columns Access -- โœ… MQ metadata exposure (_timestamp_ns, _key, _source) -- โœ… System column queries and filtering - -### 7. Complex Query Patterns -- โœ… Multi-condition WHERE clauses -- โœ… Statistical analysis queries -- โœ… Time-based data filtering - -### 8. PostgreSQL Client Compatibility -- โœ… Native psql CLI compatibility -- โœ… Go database/sql driver (lib/pq) -- โœ… Standard PostgreSQL wire protocol - -## ๐Ÿ› ๏ธ Available Commands - -### Main Test Script (`run-tests.sh`) -```bash -./run-tests.sh start # Start services -./run-tests.sh produce # Create test data -./run-tests.sh test # Run comprehensive tests -./run-tests.sh psql # Interactive psql session -./run-tests.sh logs [service] # View service logs -./run-tests.sh status # Service status -./run-tests.sh stop # Stop services -./run-tests.sh clean # Complete cleanup -./run-tests.sh all # Full automated test โญ -``` - -### Makefile Targets -```bash -make help # Show available targets -make all # Complete test suite -make start # Start services -make test # Run tests -make psql # Interactive psql -make clean # Cleanup -make dev-start # Development mode -``` - -### Validation Script -```bash -./validate-setup.sh # Check prerequisites and smoke test -``` - -## ๐Ÿ“‹ Expected Test Results - -After running `./run-tests.sh all`, you should see: - -``` -=== Test Results === -โœ… Test PASSED: System Information -โœ… Test PASSED: Database Discovery -โœ… Test PASSED: Table Discovery -โœ… Test PASSED: Data Queries -โœ… Test PASSED: Aggregation Queries -โœ… Test PASSED: Database Context Switching -โœ… Test PASSED: System Columns -โœ… Test PASSED: Complex Queries - -Test Results: 8/8 tests passed -๐ŸŽ‰ All tests passed! -``` - -## ๐Ÿ” Manual Testing Examples - -### Basic Exploration -```bash -./run-tests.sh psql -``` - -```sql --- System information -SELECT version(); -SELECT current_user, current_database(); - --- Discover structure -SHOW DATABASES; -\c analytics; -SHOW TABLES; -DESCRIBE user_events; - --- Query real data -SELECT COUNT(*) FROM user_events; -SELECT * FROM user_events WHERE user_type = 'premium' LIMIT 5; -``` - -### Data Analysis -```sql --- User behavior analysis -SELECT - COUNT(*) as events, - AVG(amount) as avg_amount -FROM user_events -WHERE amount IS NOT NULL; - --- System health monitoring -USE logs; -SELECT - COUNT(*) as count -FROM application_logs; - --- Cross-namespace analysis -USE ecommerce; -SELECT - COUNT(*) as views, - AVG(price) as avg_price -FROM product_views; -``` - -## ๐ŸŽฏ Production Validation - -This test setup proves: - -### โœ… Real MQ Integration -- Actual topic discovery from filer storage -- Real schema reading from broker configuration -- Live data access from Parquet files and log entries -- Automatic topic registration on first access - -### โœ… Universal PostgreSQL Compatibility -- Standard PostgreSQL wire protocol (v3.0) -- Compatible with any PostgreSQL client -- Proper authentication and session management -- Standard SQL syntax support - -### โœ… Enterprise Features -- Multi-namespace (database) organization -- Session-based database context switching -- System metadata access for debugging -- Comprehensive error handling - -### โœ… Performance and Scalability -- Direct SQL engine integration (same as `weed sql`) -- No translation overhead for real queries -- Efficient data access from stored formats -- Scalable architecture with service discovery - -## ๐Ÿš€ Ready for Production - -The test environment demonstrates that SeaweedFS can serve as a **drop-in PostgreSQL replacement** for: -- **Analytics workloads** on MQ data -- **BI tool integration** with standard PostgreSQL drivers -- **Application integration** using existing PostgreSQL libraries -- **Data exploration** with familiar SQL tools like psql - -## ๐Ÿ† Success Metrics - -- โœ… **8/8 comprehensive tests pass** -- โœ… **4,400+ real records** across multiple schemas -- โœ… **3 namespaces, 7 topics** with varied data -- โœ… **Universal client compatibility** (psql, Go, BI tools) -- โœ… **Production-ready features** validated -- โœ… **One-command deployment** achieved -- โœ… **Complete automation** with health checks -- โœ… **Comprehensive documentation** provided - -This test setup validates that the PostgreSQL wire protocol implementation is **production-ready** and provides **enterprise-grade database access** to SeaweedFS MQ data. diff --git a/test/postgres/client.go b/test/postgres/client.go deleted file mode 100644 index 3bf1a0007..000000000 --- a/test/postgres/client.go +++ /dev/null @@ -1,506 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - "strings" - "time" - - _ "github.com/lib/pq" -) - -func main() { - // Get PostgreSQL connection details from environment - host := getEnv("POSTGRES_HOST", "localhost") - port := getEnv("POSTGRES_PORT", "5432") - user := getEnv("POSTGRES_USER", "seaweedfs") - dbname := getEnv("POSTGRES_DB", "default") - - // Build connection string - connStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", - host, port, user, dbname) - - log.Println("SeaweedFS PostgreSQL Client Test") - log.Println("=================================") - log.Printf("Connecting to: %s\n", connStr) - - // Wait for PostgreSQL server to be ready - log.Println("Waiting for PostgreSQL server...") - time.Sleep(5 * time.Second) - - // Connect to PostgreSQL server - db, err := sql.Open("postgres", connStr) - if err != nil { - log.Fatalf("Error connecting to PostgreSQL: %v", err) - } - defer db.Close() - - // Test connection with a simple query instead of Ping() - var result int - err = db.QueryRow("SELECT COUNT(*) FROM application_logs LIMIT 1").Scan(&result) - if err != nil { - log.Printf("Warning: Simple query test failed: %v", err) - log.Printf("Trying alternative connection test...") - - // Try a different table - err = db.QueryRow("SELECT COUNT(*) FROM user_events LIMIT 1").Scan(&result) - if err != nil { - log.Fatalf("Error testing PostgreSQL connection: %v", err) - } else { - log.Printf("โœ“ Connected successfully! Found %d records in user_events", result) - } - } else { - log.Printf("โœ“ Connected successfully! Found %d records in application_logs", result) - } - - // Run comprehensive tests - tests := []struct { - name string - test func(*sql.DB) error - }{ - {"System Information", testSystemInfo}, // Re-enabled - segfault was fixed - {"Database Discovery", testDatabaseDiscovery}, - {"Table Discovery", testTableDiscovery}, - {"Data Queries", testDataQueries}, - {"Aggregation Queries", testAggregationQueries}, - {"Database Context Switching", testDatabaseSwitching}, - {"System Columns", testSystemColumns}, // Re-enabled with crash-safe implementation - {"Complex Queries", testComplexQueries}, // Re-enabled with crash-safe implementation - } - - successCount := 0 - for _, test := range tests { - log.Printf("\n--- Running Test: %s ---", test.name) - if err := test.test(db); err != nil { - log.Printf("โŒ Test FAILED: %s - %v", test.name, err) - } else { - log.Printf("โœ… Test PASSED: %s", test.name) - successCount++ - } - } - - log.Printf("\n=================================") - log.Printf("Test Results: %d/%d tests passed", successCount, len(tests)) - if successCount == len(tests) { - log.Println("๐ŸŽ‰ All tests passed!") - } else { - log.Printf("โš ๏ธ %d tests failed", len(tests)-successCount) - } -} - -func testSystemInfo(db *sql.DB) error { - queries := []struct { - name string - query string - }{ - {"Version", "SELECT version()"}, - {"Current User", "SELECT current_user"}, - {"Current Database", "SELECT current_database()"}, - {"Server Encoding", "SELECT current_setting('server_encoding')"}, - } - - // Use individual connections for each query to avoid protocol issues - connStr := getEnv("POSTGRES_HOST", "postgres-server") - port := getEnv("POSTGRES_PORT", "5432") - user := getEnv("POSTGRES_USER", "seaweedfs") - dbname := getEnv("POSTGRES_DB", "logs") - - for _, q := range queries { - log.Printf(" Executing: %s", q.query) - - // Create a fresh connection for each query - tempConnStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", - connStr, port, user, dbname) - tempDB, err := sql.Open("postgres", tempConnStr) - if err != nil { - log.Printf(" Query '%s' failed to connect: %v", q.query, err) - continue - } - defer tempDB.Close() - - var result string - err = tempDB.QueryRow(q.query).Scan(&result) - if err != nil { - log.Printf(" Query '%s' failed: %v", q.query, err) - continue - } - log.Printf(" %s: %s", q.name, result) - tempDB.Close() - } - - return nil -} - -func testDatabaseDiscovery(db *sql.DB) error { - rows, err := db.Query("SHOW DATABASES") - if err != nil { - return fmt.Errorf("SHOW DATABASES failed: %v", err) - } - defer rows.Close() - - databases := []string{} - for rows.Next() { - var dbName string - if err := rows.Scan(&dbName); err != nil { - return fmt.Errorf("scanning database name: %v", err) - } - databases = append(databases, dbName) - } - - log.Printf(" Found %d databases: %s", len(databases), strings.Join(databases, ", ")) - return nil -} - -func testTableDiscovery(db *sql.DB) error { - rows, err := db.Query("SHOW TABLES") - if err != nil { - return fmt.Errorf("SHOW TABLES failed: %v", err) - } - defer rows.Close() - - tables := []string{} - for rows.Next() { - var tableName string - if err := rows.Scan(&tableName); err != nil { - return fmt.Errorf("scanning table name: %v", err) - } - tables = append(tables, tableName) - } - - log.Printf(" Found %d tables in current database: %s", len(tables), strings.Join(tables, ", ")) - return nil -} - -func testDataQueries(db *sql.DB) error { - // Try to find a table with data - tables := []string{"user_events", "system_logs", "metrics", "product_views", "application_logs"} - - for _, table := range tables { - // Try to query the table - var count int - err := db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", table)).Scan(&count) - if err == nil && count > 0 { - log.Printf(" Table '%s' has %d records", table, count) - - // Try to get sample data - rows, err := db.Query(fmt.Sprintf("SELECT * FROM %s LIMIT 3", table)) - if err != nil { - log.Printf(" Warning: Could not query sample data: %v", err) - continue - } - - columns, err := rows.Columns() - if err != nil { - rows.Close() - log.Printf(" Warning: Could not get columns: %v", err) - continue - } - - log.Printf(" Sample columns: %s", strings.Join(columns, ", ")) - - sampleCount := 0 - for rows.Next() && sampleCount < 2 { - // Create slice to hold column values - values := make([]interface{}, len(columns)) - valuePtrs := make([]interface{}, len(columns)) - for i := range values { - valuePtrs[i] = &values[i] - } - - err := rows.Scan(valuePtrs...) - if err != nil { - log.Printf(" Warning: Could not scan row: %v", err) - break - } - - // Convert to strings for display - stringValues := make([]string, len(values)) - for i, val := range values { - if val != nil { - str := fmt.Sprintf("%v", val) - if len(str) > 30 { - str = str[:30] + "..." - } - stringValues[i] = str - } else { - stringValues[i] = "NULL" - } - } - - log.Printf(" Sample row %d: %s", sampleCount+1, strings.Join(stringValues, " | ")) - sampleCount++ - } - rows.Close() - break - } - } - - return nil -} - -func testAggregationQueries(db *sql.DB) error { - // Try to find a table for aggregation testing - tables := []string{"user_events", "system_logs", "metrics", "product_views"} - - for _, table := range tables { - // Check if table exists and has data - var count int - err := db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", table)).Scan(&count) - if err != nil { - continue // Table doesn't exist or no access - } - - if count == 0 { - continue // No data - } - - log.Printf(" Testing aggregations on '%s' (%d records)", table, count) - - // Test basic aggregation - var avgId, maxId, minId float64 - err = db.QueryRow(fmt.Sprintf("SELECT AVG(id), MAX(id), MIN(id) FROM %s", table)).Scan(&avgId, &maxId, &minId) - if err != nil { - log.Printf(" Warning: Aggregation query failed: %v", err) - } else { - log.Printf(" ID stats - AVG: %.2f, MAX: %.0f, MIN: %.0f", avgId, maxId, minId) - } - - // Test COUNT with GROUP BY if possible (try common column names) - groupByColumns := []string{"user_type", "level", "service", "category", "status"} - for _, col := range groupByColumns { - rows, err := db.Query(fmt.Sprintf("SELECT %s, COUNT(*) FROM %s GROUP BY %s LIMIT 5", col, table, col)) - if err == nil { - log.Printf(" Group by %s:", col) - for rows.Next() { - var group string - var groupCount int - if err := rows.Scan(&group, &groupCount); err == nil { - log.Printf(" %s: %d", group, groupCount) - } - } - rows.Close() - break - } - } - - return nil - } - - log.Println(" No suitable tables found for aggregation testing") - return nil -} - -func testDatabaseSwitching(db *sql.DB) error { - // Get current database with retry logic - var currentDB string - var err error - for retries := 0; retries < 3; retries++ { - err = db.QueryRow("SELECT current_database()").Scan(¤tDB) - if err == nil { - break - } - log.Printf(" Retry %d: Getting current database failed: %v", retries+1, err) - time.Sleep(time.Millisecond * 100) - } - if err != nil { - return fmt.Errorf("getting current database after retries: %v", err) - } - log.Printf(" Current database: %s", currentDB) - - // Try to switch to different databases - databases := []string{"analytics", "ecommerce", "logs"} - - // Use fresh connections to avoid protocol issues - connStr := getEnv("POSTGRES_HOST", "postgres-server") - port := getEnv("POSTGRES_PORT", "5432") - user := getEnv("POSTGRES_USER", "seaweedfs") - - for _, dbName := range databases { - log.Printf(" Attempting to switch to database: %s", dbName) - - // Create fresh connection for USE command - tempConnStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", - connStr, port, user, dbName) - tempDB, err := sql.Open("postgres", tempConnStr) - if err != nil { - log.Printf(" Could not connect to '%s': %v", dbName, err) - continue - } - defer tempDB.Close() - - // Test the connection by executing a simple query - var newDB string - err = tempDB.QueryRow("SELECT current_database()").Scan(&newDB) - if err != nil { - log.Printf(" Could not verify database '%s': %v", dbName, err) - tempDB.Close() - continue - } - - log.Printf(" โœ“ Successfully connected to database: %s", newDB) - - // Check tables in this database - temporarily disabled due to SHOW TABLES protocol issue - // rows, err := tempDB.Query("SHOW TABLES") - // if err == nil { - // tables := []string{} - // for rows.Next() { - // var tableName string - // if err := rows.Scan(&tableName); err == nil { - // tables = append(tables, tableName) - // } - // } - // rows.Close() - // if len(tables) > 0 { - // log.Printf(" Tables: %s", strings.Join(tables, ", ")) - // } - // } - tempDB.Close() - break - } - - return nil -} - -func testSystemColumns(db *sql.DB) error { - // Test system columns with safer approach - focus on existing tables - tables := []string{"application_logs", "error_logs"} - - for _, table := range tables { - log.Printf(" Testing system columns availability on '%s'", table) - - // Use fresh connection to avoid protocol state issues - connStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", - getEnv("POSTGRES_HOST", "postgres-server"), - getEnv("POSTGRES_PORT", "5432"), - getEnv("POSTGRES_USER", "seaweedfs"), - getEnv("POSTGRES_DB", "logs")) - - tempDB, err := sql.Open("postgres", connStr) - if err != nil { - log.Printf(" Could not create connection: %v", err) - continue - } - defer tempDB.Close() - - // First check if table exists and has data (safer than COUNT which was causing crashes) - rows, err := tempDB.Query(fmt.Sprintf("SELECT id FROM %s LIMIT 1", table)) - if err != nil { - log.Printf(" Table '%s' not accessible: %v", table, err) - tempDB.Close() - continue - } - rows.Close() - - // Try to query just regular columns first to test connection - rows, err = tempDB.Query(fmt.Sprintf("SELECT id FROM %s LIMIT 1", table)) - if err != nil { - log.Printf(" Basic query failed on '%s': %v", table, err) - tempDB.Close() - continue - } - - hasData := false - for rows.Next() { - var id int64 - if err := rows.Scan(&id); err == nil { - hasData = true - log.Printf(" โœ“ Table '%s' has data (sample ID: %d)", table, id) - } - break - } - rows.Close() - - if hasData { - log.Printf(" โœ“ System columns test passed for '%s' - table is accessible", table) - tempDB.Close() - return nil - } - - tempDB.Close() - } - - log.Println(" System columns test completed - focused on table accessibility") - return nil -} - -func testComplexQueries(db *sql.DB) error { - // Test complex queries with safer approach using known tables - tables := []string{"application_logs", "error_logs"} - - for _, table := range tables { - log.Printf(" Testing complex queries on '%s'", table) - - // Use fresh connection to avoid protocol state issues - connStr := fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", - getEnv("POSTGRES_HOST", "postgres-server"), - getEnv("POSTGRES_PORT", "5432"), - getEnv("POSTGRES_USER", "seaweedfs"), - getEnv("POSTGRES_DB", "logs")) - - tempDB, err := sql.Open("postgres", connStr) - if err != nil { - log.Printf(" Could not create connection: %v", err) - continue - } - defer tempDB.Close() - - // Test basic SELECT with LIMIT (avoid COUNT which was causing crashes) - rows, err := tempDB.Query(fmt.Sprintf("SELECT id FROM %s LIMIT 5", table)) - if err != nil { - log.Printf(" Basic SELECT failed on '%s': %v", table, err) - tempDB.Close() - continue - } - - var ids []int64 - for rows.Next() { - var id int64 - if err := rows.Scan(&id); err == nil { - ids = append(ids, id) - } - } - rows.Close() - - if len(ids) > 0 { - log.Printf(" โœ“ Basic SELECT with LIMIT: found %d records", len(ids)) - - // Test WHERE clause with known ID (safer than arbitrary conditions) - testID := ids[0] - rows, err = tempDB.Query(fmt.Sprintf("SELECT id FROM %s WHERE id = %d", table, testID)) - if err == nil { - var foundID int64 - if rows.Next() { - if err := rows.Scan(&foundID); err == nil && foundID == testID { - log.Printf(" โœ“ WHERE clause working: found record with ID %d", foundID) - } - } - rows.Close() - } - - log.Printf(" โœ“ Complex queries test passed for '%s'", table) - tempDB.Close() - return nil - } - - tempDB.Close() - } - - log.Println(" Complex queries test completed - avoided crash-prone patterns") - return nil -} - -func stringOrNull(ns sql.NullString) string { - if ns.Valid { - return ns.String - } - return "NULL" -} - -func getEnv(key, defaultValue string) string { - if value, exists := os.LookupEnv(key); exists { - return value - } - return defaultValue -} diff --git a/test/postgres/config/s3config.json b/test/postgres/config/s3config.json deleted file mode 100644 index 4a649a0fe..000000000 --- a/test/postgres/config/s3config.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "identities": [ - { - "name": "anonymous", - "actions": [ - "Read", - "Write", - "List", - "Tagging", - "Admin" - ] - }, - { - "name": "testuser", - "credentials": [ - { - "accessKey": "testuser", - "secretKey": "testpassword" - } - ], - "actions": [ - "Read", - "Write", - "List", - "Tagging" - ] - } - ] -} diff --git a/test/postgres/docker-compose.yml b/test/postgres/docker-compose.yml deleted file mode 100644 index 6d222f83d..000000000 --- a/test/postgres/docker-compose.yml +++ /dev/null @@ -1,138 +0,0 @@ -services: - # SeaweedFS All-in-One Server (Custom Build with PostgreSQL support) - seaweedfs: - build: - context: ../.. # Build from project root - dockerfile: test/postgres/Dockerfile.seaweedfs - container_name: seaweedfs-server - ports: - - "9333:9333" # Master port - - "8888:8888" # Filer port - - "8333:8333" # S3 port - - "8085:8085" # Volume port - - "9533:9533" # Metrics port - - "26777:16777" # MQ Agent port (mapped to avoid conflicts) - - "27777:17777" # MQ Broker port (mapped to avoid conflicts) - volumes: - - seaweedfs_data:/data - command: - - ./weed - - server - - -dir=/data - - -master.volumeSizeLimitMB=50 - - -master.port=9333 - - -metricsPort=9533 - - -volume.max=0 - - -volume.port=8085 - - -volume.preStopSeconds=1 - - -filer=true - - -filer.port=8888 - - -s3=true - - -s3.port=8333 - - -webdav=false - - -s3.allowEmptyFolder=false - - -mq.broker=true - - -mq.agent=true - - -ip=seaweedfs - networks: - - seaweedfs-net - healthcheck: - test: ["CMD", "curl", "--fail", "--silent", "http://seaweedfs:9333/cluster/status"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 60s - - # Database Server (PostgreSQL Wire Protocol Compatible) - postgres-server: - build: - context: ../.. # Build from project root - dockerfile: test/postgres/Dockerfile.seaweedfs - container_name: postgres-server - ports: - - "5432:5432" # PostgreSQL port - depends_on: - seaweedfs: - condition: service_healthy - command: > - ./weed db - -host=0.0.0.0 - -port=5432 - -master=seaweedfs:9333 - -auth=trust - -database=default - -max-connections=50 - -idle-timeout=30m - networks: - - seaweedfs-net - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "5432"] - interval: 5s - timeout: 3s - retries: 3 - start_period: 10s - - # MQ Data Producer - Creates test topics and data - mq-producer: - build: - context: ../.. # Build from project root - dockerfile: test/postgres/Dockerfile.producer - container_name: mq-producer - depends_on: - seaweedfs: - condition: service_healthy - environment: - - SEAWEEDFS_MASTER=seaweedfs:9333 - - SEAWEEDFS_FILER=seaweedfs:8888 - networks: - - seaweedfs-net - restart: "no" # Run once to create data - - # PostgreSQL Test Client - postgres-client: - build: - context: ../.. # Build from project root - dockerfile: test/postgres/Dockerfile.client - container_name: postgres-client - depends_on: - postgres-server: - condition: service_healthy - environment: - - POSTGRES_HOST=postgres-server - - POSTGRES_PORT=5432 - - POSTGRES_USER=seaweedfs - - POSTGRES_DB=logs - networks: - - seaweedfs-net - profiles: - - client # Only start when explicitly requested - - # PostgreSQL CLI for manual testing - psql-cli: - image: postgres:15-alpine - container_name: psql-cli - depends_on: - postgres-server: - condition: service_healthy - environment: - - PGHOST=postgres-server - - PGPORT=5432 - - PGUSER=seaweedfs - - PGDATABASE=default - networks: - - seaweedfs-net - profiles: - - cli # Only start when explicitly requested - command: > - sh -c " - echo 'Connecting to PostgreSQL server...'; - psql -c 'SELECT version();' - " - -volumes: - seaweedfs_data: - driver: local - -networks: - seaweedfs-net: - driver: bridge diff --git a/test/postgres/producer.go b/test/postgres/producer.go deleted file mode 100644 index 2d49519e8..000000000 --- a/test/postgres/producer.go +++ /dev/null @@ -1,534 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "log" - "math/big" - "math/rand" - "os" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/mq/client/pub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -type UserEvent struct { - ID int64 `json:"id"` - UserID int64 `json:"user_id"` - UserType string `json:"user_type"` - Action string `json:"action"` - Status string `json:"status"` - Amount float64 `json:"amount,omitempty"` - PreciseAmount string `json:"precise_amount,omitempty"` // Will be converted to DECIMAL - BirthDate time.Time `json:"birth_date"` // Will be converted to DATE - Timestamp time.Time `json:"timestamp"` - Metadata string `json:"metadata,omitempty"` -} - -type SystemLog struct { - ID int64 `json:"id"` - Level string `json:"level"` - Service string `json:"service"` - Message string `json:"message"` - ErrorCode int `json:"error_code,omitempty"` - Timestamp time.Time `json:"timestamp"` -} - -type MetricEntry struct { - ID int64 `json:"id"` - Name string `json:"name"` - Value float64 `json:"value"` - Tags string `json:"tags"` - Timestamp time.Time `json:"timestamp"` -} - -type ProductView struct { - ID int64 `json:"id"` - ProductID int64 `json:"product_id"` - UserID int64 `json:"user_id"` - Category string `json:"category"` - Price float64 `json:"price"` - ViewCount int `json:"view_count"` - Timestamp time.Time `json:"timestamp"` -} - -func main() { - // Get SeaweedFS configuration from environment - masterAddr := getEnv("SEAWEEDFS_MASTER", "localhost:9333") - filerAddr := getEnv("SEAWEEDFS_FILER", "localhost:8888") - - log.Printf("Creating MQ test data...") - log.Printf("Master: %s", masterAddr) - log.Printf("Filer: %s", filerAddr) - - // Wait for SeaweedFS to be ready - log.Println("Waiting for SeaweedFS to be ready...") - time.Sleep(10 * time.Second) - - // Create topics and populate with data - topics := []struct { - namespace string - topic string - generator func() interface{} - count int - }{ - {"analytics", "user_events", generateUserEvent, 1000}, - {"analytics", "system_logs", generateSystemLog, 500}, - {"analytics", "metrics", generateMetric, 800}, - {"ecommerce", "product_views", generateProductView, 1200}, - {"ecommerce", "user_events", generateUserEvent, 600}, - {"logs", "application_logs", generateSystemLog, 2000}, - {"logs", "error_logs", generateErrorLog, 300}, - } - - for _, topicConfig := range topics { - log.Printf("Creating topic %s.%s with %d records...", - topicConfig.namespace, topicConfig.topic, topicConfig.count) - - err := createTopicData(masterAddr, filerAddr, - topicConfig.namespace, topicConfig.topic, - topicConfig.generator, topicConfig.count) - if err != nil { - log.Printf("Error creating topic %s.%s: %v", - topicConfig.namespace, topicConfig.topic, err) - } else { - log.Printf("-Successfully created %s.%s", - topicConfig.namespace, topicConfig.topic) - } - - // Small delay between topics - time.Sleep(2 * time.Second) - } - - log.Println("-MQ test data creation completed!") - log.Println("\nCreated namespaces:") - log.Println(" - analytics (user_events, system_logs, metrics)") - log.Println(" - ecommerce (product_views, user_events)") - log.Println(" - logs (application_logs, error_logs)") - log.Println("\nYou can now test with PostgreSQL clients:") - log.Println(" psql -h localhost -p 5432 -U seaweedfs -d analytics") - log.Println(" postgres=> SHOW TABLES;") - log.Println(" postgres=> SELECT COUNT(*) FROM user_events;") -} - -// createSchemaForTopic creates a proper RecordType schema based on topic name -func createSchemaForTopic(topicName string) *schema_pb.RecordType { - switch topicName { - case "user_events": - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "user_id", FieldIndex: 1, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "user_type", FieldIndex: 2, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "action", FieldIndex: 3, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "status", FieldIndex: 4, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "amount", FieldIndex: 5, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}, IsRequired: false}, - {Name: "timestamp", FieldIndex: 6, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "metadata", FieldIndex: 7, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: false}, - }, - } - case "system_logs": - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "level", FieldIndex: 1, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "service", FieldIndex: 2, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "message", FieldIndex: 3, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "error_code", FieldIndex: 4, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}, IsRequired: false}, - {Name: "timestamp", FieldIndex: 5, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - }, - } - case "metrics": - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "name", FieldIndex: 1, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "value", FieldIndex: 2, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}, IsRequired: true}, - {Name: "tags", FieldIndex: 3, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "timestamp", FieldIndex: 4, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - }, - } - case "product_views": - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "product_id", FieldIndex: 1, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "user_id", FieldIndex: 2, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "category", FieldIndex: 3, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "price", FieldIndex: 4, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}, IsRequired: true}, - {Name: "view_count", FieldIndex: 5, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}, IsRequired: true}, - {Name: "timestamp", FieldIndex: 6, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - }, - } - case "application_logs", "error_logs": - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, IsRequired: true}, - {Name: "level", FieldIndex: 1, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "service", FieldIndex: 2, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "message", FieldIndex: 3, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - {Name: "error_code", FieldIndex: 4, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}, IsRequired: false}, - {Name: "timestamp", FieldIndex: 5, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, IsRequired: true}, - }, - } - default: - // Default generic schema - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "data", FieldIndex: 0, Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BYTES}}, IsRequired: true}, - }, - } - } -} - -// convertToDecimal converts a string to decimal format for Parquet logical type -func convertToDecimal(value string) ([]byte, int32, int32) { - // Parse the decimal string using big.Rat for precision - rat := new(big.Rat) - if _, success := rat.SetString(value); !success { - return nil, 0, 0 - } - - // Convert to a fixed scale (e.g., 4 decimal places) - scale := int32(4) - precision := int32(18) // Total digits - - // Scale the rational number to integer representation - multiplier := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(scale)), nil) - scaled := new(big.Int).Mul(rat.Num(), multiplier) - scaled.Div(scaled, rat.Denom()) - - return scaled.Bytes(), precision, scale -} - -// convertToRecordValue converts Go structs to RecordValue format -func convertToRecordValue(data interface{}) (*schema_pb.RecordValue, error) { - fields := make(map[string]*schema_pb.Value) - - switch v := data.(type) { - case UserEvent: - fields["id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.ID}} - fields["user_id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.UserID}} - fields["user_type"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.UserType}} - fields["action"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Action}} - fields["status"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Status}} - fields["amount"] = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v.Amount}} - - // Convert precise amount to DECIMAL logical type - if v.PreciseAmount != "" { - if decimal, precision, scale := convertToDecimal(v.PreciseAmount); decimal != nil { - fields["precise_amount"] = &schema_pb.Value{Kind: &schema_pb.Value_DecimalValue{DecimalValue: &schema_pb.DecimalValue{ - Value: decimal, - Precision: precision, - Scale: scale, - }}} - } - } - - // Convert birth date to DATE logical type - fields["birth_date"] = &schema_pb.Value{Kind: &schema_pb.Value_DateValue{DateValue: &schema_pb.DateValue{ - DaysSinceEpoch: int32(v.BirthDate.Unix() / 86400), // Convert to days since epoch - }}} - - fields["timestamp"] = &schema_pb.Value{Kind: &schema_pb.Value_TimestampValue{TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: v.Timestamp.UnixMicro(), - IsUtc: true, - }}} - fields["metadata"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Metadata}} - - case SystemLog: - fields["id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.ID}} - fields["level"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Level}} - fields["service"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Service}} - fields["message"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Message}} - fields["error_code"] = &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: int32(v.ErrorCode)}} - fields["timestamp"] = &schema_pb.Value{Kind: &schema_pb.Value_TimestampValue{TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: v.Timestamp.UnixMicro(), - IsUtc: true, - }}} - - case MetricEntry: - fields["id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.ID}} - fields["name"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Name}} - fields["value"] = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v.Value}} - fields["tags"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Tags}} - fields["timestamp"] = &schema_pb.Value{Kind: &schema_pb.Value_TimestampValue{TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: v.Timestamp.UnixMicro(), - IsUtc: true, - }}} - - case ProductView: - fields["id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.ID}} - fields["product_id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.ProductID}} - fields["user_id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v.UserID}} - fields["category"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v.Category}} - fields["price"] = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v.Price}} - fields["view_count"] = &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: int32(v.ViewCount)}} - fields["timestamp"] = &schema_pb.Value{Kind: &schema_pb.Value_TimestampValue{TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: v.Timestamp.UnixMicro(), - IsUtc: true, - }}} - - default: - // Fallback to JSON for unknown types - jsonData, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("failed to marshal unknown type: %v", err) - } - fields["data"] = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: jsonData}} - } - - return &schema_pb.RecordValue{Fields: fields}, nil -} - -// No need for convertHTTPToGRPC - pb.ServerAddress.ToGrpcAddress() already handles this - -// discoverFiler finds a filer from the master server -func discoverFiler(masterHTTPAddress string) (string, error) { - httpAddr := pb.ServerAddress(masterHTTPAddress) - masterGRPCAddress := httpAddr.ToGrpcAddress() - - conn, err := grpc.NewClient(masterGRPCAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return "", fmt.Errorf("failed to connect to master at %s: %v", masterGRPCAddress, err) - } - defer conn.Close() - - client := master_pb.NewSeaweedClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.ListClusterNodes(ctx, &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - return "", fmt.Errorf("failed to list filers from master: %v", err) - } - - if len(resp.ClusterNodes) == 0 { - return "", fmt.Errorf("no filers found in cluster") - } - - // Use the first available filer and convert HTTP address to gRPC - filerHTTPAddress := resp.ClusterNodes[0].Address - httpAddr := pb.ServerAddress(filerHTTPAddress) - return httpAddr.ToGrpcAddress(), nil -} - -// discoverBroker finds the broker balancer using filer lock mechanism -func discoverBroker(masterHTTPAddress string) (string, error) { - // First discover filer from master - filerAddress, err := discoverFiler(masterHTTPAddress) - if err != nil { - return "", fmt.Errorf("failed to discover filer: %v", err) - } - - conn, err := grpc.NewClient(filerAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return "", fmt.Errorf("failed to connect to filer at %s: %v", filerAddress, err) - } - defer conn.Close() - - client := filer_pb.NewSeaweedFilerClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.FindLockOwner(ctx, &filer_pb.FindLockOwnerRequest{ - Name: pub_balancer.LockBrokerBalancer, - }) - if err != nil { - return "", fmt.Errorf("failed to find broker balancer: %v", err) - } - - return resp.Owner, nil -} - -func createTopicData(masterAddr, filerAddr, namespace, topicName string, - generator func() interface{}, count int) error { - - // Create schema based on topic type - recordType := createSchemaForTopic(topicName) - - // Dynamically discover broker address instead of hardcoded port replacement - brokerAddress, err := discoverBroker(masterAddr) - if err != nil { - // Fallback to hardcoded port replacement if discovery fails - log.Printf("Warning: Failed to discover broker dynamically (%v), using hardcoded port replacement", err) - brokerAddress = strings.Replace(masterAddr, ":9333", ":17777", 1) - } - - // Create publisher configuration - config := &pub_client.PublisherConfiguration{ - Topic: topic.NewTopic(namespace, topicName), - PartitionCount: 1, - Brokers: []string{brokerAddress}, // Use dynamically discovered broker address - PublisherName: fmt.Sprintf("test-producer-%s-%s", namespace, topicName), - RecordType: recordType, // Use structured schema - } - - // Create publisher - publisher, err := pub_client.NewTopicPublisher(config) - if err != nil { - return fmt.Errorf("failed to create publisher: %v", err) - } - defer publisher.Shutdown() - - // Generate and publish data - for i := 0; i < count; i++ { - data := generator() - - // Convert struct to RecordValue - recordValue, err := convertToRecordValue(data) - if err != nil { - log.Printf("Error converting data to RecordValue: %v", err) - continue - } - - // Publish structured record - err = publisher.PublishRecord([]byte(fmt.Sprintf("key-%d", i)), recordValue) - if err != nil { - log.Printf("Error publishing message %d: %v", i+1, err) - continue - } - - // Small delay every 100 messages - if (i+1)%100 == 0 { - log.Printf(" Published %d/%d messages to %s.%s", - i+1, count, namespace, topicName) - time.Sleep(100 * time.Millisecond) - } - } - - // Finish publishing - err = publisher.FinishPublish() - if err != nil { - return fmt.Errorf("failed to finish publishing: %v", err) - } - - return nil -} - -func generateUserEvent() interface{} { - userTypes := []string{"premium", "standard", "trial", "enterprise"} - actions := []string{"login", "logout", "purchase", "view", "search", "click", "download"} - statuses := []string{"active", "inactive", "pending", "completed", "failed"} - - // Generate a birth date between 1970 and 2005 (18+ years old) - birthYear := 1970 + rand.Intn(35) - birthMonth := 1 + rand.Intn(12) - birthDay := 1 + rand.Intn(28) // Keep it simple, avoid month-specific day issues - birthDate := time.Date(birthYear, time.Month(birthMonth), birthDay, 0, 0, 0, 0, time.UTC) - - // Generate a precise amount as a string with 4 decimal places - preciseAmount := fmt.Sprintf("%.4f", rand.Float64()*10000) - - return UserEvent{ - ID: rand.Int63n(1000000) + 1, - UserID: rand.Int63n(10000) + 1, - UserType: userTypes[rand.Intn(len(userTypes))], - Action: actions[rand.Intn(len(actions))], - Status: statuses[rand.Intn(len(statuses))], - Amount: rand.Float64() * 1000, - PreciseAmount: preciseAmount, - BirthDate: birthDate, - Timestamp: time.Now().Add(-time.Duration(rand.Intn(86400*30)) * time.Second), - Metadata: fmt.Sprintf("{\"session_id\":\"%d\"}", rand.Int63n(100000)), - } -} - -func generateSystemLog() interface{} { - levels := []string{"debug", "info", "warning", "error", "critical"} - services := []string{"auth-service", "payment-service", "user-service", "notification-service", "api-gateway"} - messages := []string{ - "Request processed successfully", - "User authentication completed", - "Payment transaction initiated", - "Database connection established", - "Cache miss for key", - "API rate limit exceeded", - "Service health check passed", - } - - return SystemLog{ - ID: rand.Int63n(1000000) + 1, - Level: levels[rand.Intn(len(levels))], - Service: services[rand.Intn(len(services))], - Message: messages[rand.Intn(len(messages))], - ErrorCode: rand.Intn(1000), - Timestamp: time.Now().Add(-time.Duration(rand.Intn(86400*7)) * time.Second), - } -} - -func generateErrorLog() interface{} { - levels := []string{"error", "critical", "fatal"} - services := []string{"auth-service", "payment-service", "user-service", "notification-service", "api-gateway"} - messages := []string{ - "Database connection failed", - "Authentication token expired", - "Payment processing error", - "Service unavailable", - "Memory limit exceeded", - "Timeout waiting for response", - "Invalid request parameters", - } - - return SystemLog{ - ID: rand.Int63n(1000000) + 1, - Level: levels[rand.Intn(len(levels))], - Service: services[rand.Intn(len(services))], - Message: messages[rand.Intn(len(messages))], - ErrorCode: rand.Intn(100) + 400, // 400-499 error codes - Timestamp: time.Now().Add(-time.Duration(rand.Intn(86400*7)) * time.Second), - } -} - -func generateMetric() interface{} { - names := []string{"cpu_usage", "memory_usage", "disk_usage", "request_latency", "error_rate", "throughput"} - tags := []string{ - "service=web,region=us-east", - "service=api,region=us-west", - "service=db,region=eu-central", - "service=cache,region=asia-pacific", - } - - return MetricEntry{ - ID: rand.Int63n(1000000) + 1, - Name: names[rand.Intn(len(names))], - Value: rand.Float64() * 100, - Tags: tags[rand.Intn(len(tags))], - Timestamp: time.Now().Add(-time.Duration(rand.Intn(86400*3)) * time.Second), - } -} - -func generateProductView() interface{} { - categories := []string{"electronics", "books", "clothing", "home", "sports", "automotive"} - - return ProductView{ - ID: rand.Int63n(1000000) + 1, - ProductID: rand.Int63n(10000) + 1, - UserID: rand.Int63n(5000) + 1, - Category: categories[rand.Intn(len(categories))], - Price: rand.Float64() * 500, - ViewCount: rand.Intn(100) + 1, - Timestamp: time.Now().Add(-time.Duration(rand.Intn(86400*14)) * time.Second), - } -} - -func getEnv(key, defaultValue string) string { - if value, exists := os.LookupEnv(key); exists { - return value - } - return defaultValue -} diff --git a/test/postgres/run-tests.sh b/test/postgres/run-tests.sh deleted file mode 100755 index 6ca85958c..000000000 --- a/test/postgres/run-tests.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/bin/bash - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -echo -e "${BLUE}=== SeaweedFS PostgreSQL Test Setup ===${NC}" - -# Function to get the correct docker compose command -get_docker_compose_cmd() { - if command -v docker &> /dev/null && docker compose version &> /dev/null 2>&1; then - echo "docker compose" - elif command -v docker-compose &> /dev/null; then - echo "docker-compose" - else - echo -e "${RED}x Neither 'docker compose' nor 'docker-compose' is available${NC}" - exit 1 - fi -} - -# Get the docker compose command to use -DOCKER_COMPOSE_CMD=$(get_docker_compose_cmd) -echo -e "${BLUE}Using: ${DOCKER_COMPOSE_CMD}${NC}" - -# Function to wait for service -wait_for_service() { - local service=$1 - local max_wait=$2 - local count=0 - - echo -e "${YELLOW}Waiting for $service to be ready...${NC}" - while [ $count -lt $max_wait ]; do - if $DOCKER_COMPOSE_CMD ps $service | grep -q "healthy\|Up"; then - echo -e "${GREEN}- $service is ready${NC}" - return 0 - fi - sleep 2 - count=$((count + 1)) - echo -n "." - done - - echo -e "${RED}x Timeout waiting for $service${NC}" - return 1 -} - -# Function to show logs -show_logs() { - local service=$1 - echo -e "${BLUE}=== $service logs ===${NC}" - $DOCKER_COMPOSE_CMD logs --tail=20 $service - echo -} - -# Parse command line arguments -case "$1" in - "start") - echo -e "${YELLOW}Starting SeaweedFS cluster and PostgreSQL server...${NC}" - $DOCKER_COMPOSE_CMD up -d seaweedfs postgres-server - - wait_for_service "seaweedfs" 30 - wait_for_service "postgres-server" 15 - - echo -e "${GREEN}- SeaweedFS and PostgreSQL server are running${NC}" - echo - echo "You can now:" - echo " โ€ข Run data producer: $0 produce" - echo " โ€ข Run test client: $0 test" - echo " โ€ข Connect with psql: $0 psql" - echo " โ€ข View logs: $0 logs [service]" - echo " โ€ข Stop services: $0 stop" - ;; - - "produce") - echo -e "${YELLOW}Creating MQ test data...${NC}" - $DOCKER_COMPOSE_CMD up --build mq-producer - - if [ $? -eq 0 ]; then - echo -e "${GREEN}- Test data created successfully${NC}" - echo - echo "You can now run: $0 test" - else - echo -e "${RED}x Data production failed${NC}" - show_logs "mq-producer" - fi - ;; - - "test") - echo -e "${YELLOW}Running PostgreSQL client tests...${NC}" - $DOCKER_COMPOSE_CMD up --build postgres-client - - if [ $? -eq 0 ]; then - echo -e "${GREEN}- Client tests completed${NC}" - else - echo -e "${RED}x Client tests failed${NC}" - show_logs "postgres-client" - fi - ;; - - "psql") - echo -e "${YELLOW}Connecting to PostgreSQL with psql...${NC}" - $DOCKER_COMPOSE_CMD run --rm psql-cli psql -h postgres-server -p 5432 -U seaweedfs -d default - ;; - - "logs") - service=${2:-"seaweedfs"} - show_logs "$service" - ;; - - "status") - echo -e "${BLUE}=== Service Status ===${NC}" - $DOCKER_COMPOSE_CMD ps - ;; - - "stop") - echo -e "${YELLOW}Stopping all services...${NC}" - $DOCKER_COMPOSE_CMD down - echo -e "${GREEN}- All services stopped${NC}" - ;; - - "clean") - echo -e "${YELLOW}Cleaning up everything (including data)...${NC}" - $DOCKER_COMPOSE_CMD down -v - docker system prune -f - echo -e "${GREEN}- Cleanup completed${NC}" - ;; - - "all") - echo -e "${YELLOW}Running complete test suite...${NC}" - - # Start services (wait_for_service ensures they're ready) - $0 start - - # Create data ($DOCKER_COMPOSE_CMD up is synchronous) - $0 produce - - # Run tests - $0 test - - echo -e "${GREEN}- Complete test suite finished${NC}" - ;; - - *) - echo "Usage: $0 {start|produce|test|psql|logs|status|stop|clean|all}" - echo - echo "Commands:" - echo " start - Start SeaweedFS and PostgreSQL server" - echo " produce - Create MQ test data (run after start)" - echo " test - Run PostgreSQL client tests (run after produce)" - echo " psql - Connect with psql CLI" - echo " logs - Show service logs (optionally specify service name)" - echo " status - Show service status" - echo " stop - Stop all services" - echo " clean - Stop and remove all data" - echo " all - Run complete test suite (start -> produce -> test)" - echo - echo "Example workflow:" - echo " $0 all # Complete automated test" - echo " $0 start # Manual step-by-step" - echo " $0 produce" - echo " $0 test" - echo " $0 psql # Interactive testing" - exit 1 - ;; -esac diff --git a/test/postgres/validate-setup.sh b/test/postgres/validate-setup.sh deleted file mode 100755 index c11100ba3..000000000 --- a/test/postgres/validate-setup.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -echo -e "${BLUE}=== SeaweedFS PostgreSQL Setup Validation ===${NC}" - -# Check prerequisites -echo -e "${YELLOW}Checking prerequisites...${NC}" - -if ! command -v docker &> /dev/null; then - echo -e "${RED}โœ— Docker not found. Please install Docker.${NC}" - exit 1 -fi -echo -e "${GREEN}โœ“ Docker found${NC}" - -if ! command -v docker-compose &> /dev/null; then - echo -e "${RED}โœ— Docker Compose not found. Please install Docker Compose.${NC}" - exit 1 -fi -echo -e "${GREEN}โœ“ Docker Compose found${NC}" - -# Check if running from correct directory -if [[ ! -f "docker-compose.yml" ]]; then - echo -e "${RED}โœ— Must run from test/postgres directory${NC}" - echo " cd test/postgres && ./validate-setup.sh" - exit 1 -fi -echo -e "${GREEN}โœ“ Running from correct directory${NC}" - -# Check required files -required_files=("docker-compose.yml" "producer.go" "client.go" "Dockerfile.producer" "Dockerfile.client" "run-tests.sh") -for file in "${required_files[@]}"; do - if [[ ! -f "$file" ]]; then - echo -e "${RED}โœ— Missing required file: $file${NC}" - exit 1 - fi -done -echo -e "${GREEN}โœ“ All required files present${NC}" - -# Test Docker Compose syntax -echo -e "${YELLOW}Validating Docker Compose configuration...${NC}" -if docker-compose config > /dev/null 2>&1; then - echo -e "${GREEN}โœ“ Docker Compose configuration valid${NC}" -else - echo -e "${RED}โœ— Docker Compose configuration invalid${NC}" - docker-compose config - exit 1 -fi - -# Quick smoke test -echo -e "${YELLOW}Running smoke test...${NC}" - -# Start services -echo "Starting services..." -docker-compose up -d seaweedfs postgres-server 2>/dev/null - -# Wait a bit for services to start -sleep 15 - -# Check if services are running -seaweedfs_running=$(docker-compose ps seaweedfs | grep -c "Up") -postgres_running=$(docker-compose ps postgres-server | grep -c "Up") - -if [[ $seaweedfs_running -eq 1 ]]; then - echo -e "${GREEN}โœ“ SeaweedFS service is running${NC}" -else - echo -e "${RED}โœ— SeaweedFS service failed to start${NC}" - docker-compose logs seaweedfs | tail -10 -fi - -if [[ $postgres_running -eq 1 ]]; then - echo -e "${GREEN}โœ“ PostgreSQL server is running${NC}" -else - echo -e "${RED}โœ— PostgreSQL server failed to start${NC}" - docker-compose logs postgres-server | tail -10 -fi - -# Test PostgreSQL connectivity -echo "Testing PostgreSQL connectivity..." -if timeout 10 docker run --rm --network "$(basename $(pwd))_seaweedfs-net" postgres:15-alpine \ - psql -h postgres-server -p 5432 -U seaweedfs -d default -c "SELECT version();" > /dev/null 2>&1; then - echo -e "${GREEN}โœ“ PostgreSQL connectivity test passed${NC}" -else - echo -e "${RED}โœ— PostgreSQL connectivity test failed${NC}" -fi - -# Test SeaweedFS API -echo "Testing SeaweedFS API..." -if curl -s http://localhost:9333/cluster/status > /dev/null 2>&1; then - echo -e "${GREEN}โœ“ SeaweedFS API accessible${NC}" -else - echo -e "${RED}โœ— SeaweedFS API not accessible${NC}" -fi - -# Cleanup -echo -e "${YELLOW}Cleaning up...${NC}" -docker-compose down > /dev/null 2>&1 - -echo -e "${BLUE}=== Validation Summary ===${NC}" - -if [[ $seaweedfs_running -eq 1 ]] && [[ $postgres_running -eq 1 ]]; then - echo -e "${GREEN}โœ“ Setup validation PASSED${NC}" - echo - echo "Your setup is ready! You can now run:" - echo " ./run-tests.sh all # Complete automated test" - echo " make all # Using Makefile" - echo " ./run-tests.sh start # Manual step-by-step" - echo - echo "For interactive testing:" - echo " ./run-tests.sh psql # Connect with psql" - echo - echo "Documentation:" - echo " cat README.md # Full documentation" - exit 0 -else - echo -e "${RED}โœ— Setup validation FAILED${NC}" - echo - echo "Please check the logs above and ensure:" - echo " โ€ข Docker and Docker Compose are properly installed" - echo " โ€ข All required files are present" - echo " โ€ข No other services are using ports 5432, 9333, 8888" - echo " โ€ข Docker daemon is running" - exit 1 -fi diff --git a/test/random_access/pom.xml b/test/random_access/pom.xml index 949c84448..36fe6b256 100644 --- a/test/random_access/pom.xml +++ b/test/random_access/pom.xml @@ -8,7 +8,7 @@ 1.0-SNAPSHOT - 32.0.0-jre + 30.0-jre @@ -50,7 +50,6 @@ 8 8 - 8
diff --git a/test/s3/basic/delimiter_test.go b/test/s3/basic/delimiter_test.go deleted file mode 100644 index a501f83b6..000000000 --- a/test/s3/basic/delimiter_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package basic - -import ( - "fmt" - "math/rand" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestS3ListDelimiterWithDirectoryKeyObjects tests the specific scenario from -// test_bucket_list_delimiter_not_skip_special where directory key objects -// should be properly grouped into common prefixes when using delimiters -func TestS3ListDelimiterWithDirectoryKeyObjects(t *testing.T) { - bucketName := fmt.Sprintf("test-delimiter-dir-key-%d", rand.Int31()) - - // Create bucket - _, err := svc.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - defer cleanupBucket(t, bucketName) - - // Create objects matching the failing test scenario: - // ['0/'] + ['0/1000', '0/1001', '0/1002'] + ['1999', '1999#', '1999+', '2000'] - objects := []string{ - "0/", // Directory key object - "0/1000", // Objects under 0/ prefix - "0/1001", - "0/1002", - "1999", // Objects without delimiter - "1999#", - "1999+", - "2000", - } - - // Create all objects - for _, key := range objects { - _, err := svc.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(fmt.Sprintf("content for %s", key)), - }) - require.NoError(t, err, "Failed to create object %s", key) - } - - // Test with delimiter='/' - resp, err := svc.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - Delimiter: aws.String("/"), - }) - require.NoError(t, err) - - // Extract keys and prefixes - var keys []string - for _, content := range resp.Contents { - keys = append(keys, *content.Key) - } - - var prefixes []string - for _, prefix := range resp.CommonPrefixes { - prefixes = append(prefixes, *prefix.Prefix) - } - - // Expected results: - // Keys should be: ['1999', '1999#', '1999+', '2000'] (objects without delimiters) - // Prefixes should be: ['0/'] (grouping '0/' and all '0/xxxx' objects) - - expectedKeys := []string{"1999", "1999#", "1999+", "2000"} - expectedPrefixes := []string{"0/"} - - t.Logf("Actual keys: %v", keys) - t.Logf("Actual prefixes: %v", prefixes) - - assert.ElementsMatch(t, expectedKeys, keys, "Keys should only include objects without delimiters") - assert.ElementsMatch(t, expectedPrefixes, prefixes, "CommonPrefixes should group directory key object with other objects sharing prefix") - - // Additional validation - assert.Equal(t, "/", *resp.Delimiter, "Delimiter should be set correctly") - assert.Contains(t, prefixes, "0/", "Directory key object '0/' should be grouped into common prefix '0/'") - assert.NotContains(t, keys, "0/", "Directory key object '0/' should NOT appear as individual key when delimiter is used") - - // Verify none of the '0/xxxx' objects appear as individual keys - for _, key := range keys { - assert.False(t, strings.HasPrefix(key, "0/"), "No object with '0/' prefix should appear as individual key, found: %s", key) - } -} - -// TestS3ListWithoutDelimiter tests that directory key objects appear as individual keys when no delimiter is used -func TestS3ListWithoutDelimiter(t *testing.T) { - bucketName := fmt.Sprintf("test-no-delimiter-%d", rand.Int31()) - - // Create bucket - _, err := svc.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - defer cleanupBucket(t, bucketName) - - // Create objects - objects := []string{"0/", "0/1000", "1999"} - - for _, key := range objects { - _, err := svc.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(fmt.Sprintf("content for %s", key)), - }) - require.NoError(t, err) - } - - // Test without delimiter - resp, err := svc.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - // No delimiter specified - }) - require.NoError(t, err) - - // Extract keys - var keys []string - for _, content := range resp.Contents { - keys = append(keys, *content.Key) - } - - // When no delimiter is used, all objects should be returned as individual keys - expectedKeys := []string{"0/", "0/1000", "1999"} - assert.ElementsMatch(t, expectedKeys, keys, "All objects should be individual keys when no delimiter is used") - - // No common prefixes should be present - assert.Empty(t, resp.CommonPrefixes, "No common prefixes should be present when no delimiter is used") - assert.Contains(t, keys, "0/", "Directory key object '0/' should appear as individual key when no delimiter is used") -} - -func cleanupBucket(t *testing.T, bucketName string) { - // Delete all objects - resp, err := svc.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Failed to list objects for cleanup: %v", err) - return - } - - for _, obj := range resp.Contents { - _, err := svc.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: obj.Key, - }) - if err != nil { - t.Logf("Failed to delete object %s: %v", *obj.Key, err) - } - } - - // Give some time for eventual consistency - time.Sleep(100 * time.Millisecond) - - // Delete bucket - _, err = svc.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Failed to delete bucket %s: %v", bucketName, err) - } -} diff --git a/test/s3/basic/object_tagging_test.go b/test/s3/basic/object_tagging_test.go index 04b0fb594..2b9b7e5aa 100644 --- a/test/s3/basic/object_tagging_test.go +++ b/test/s3/basic/object_tagging_test.go @@ -2,11 +2,9 @@ package basic import ( "fmt" - "strings" - "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "testing" ) func TestObjectTagging(t *testing.T) { @@ -82,147 +80,3 @@ func clearTags() { fmt.Println(response.String()) } - -func TestObjectTaggingWithEncodedValues(t *testing.T) { - // Test for URL encoded tag values - input := &s3.PutObjectInput{ - Bucket: aws.String("theBucket"), - Key: aws.String("testDir/testObjectWithEncodedTags"), - } - - svc.PutObject(input) - - // Set tags with encoded values (simulating what would happen with timestamps containing spaces and colons) - _, err := svc.PutObjectTagging(&s3.PutObjectTaggingInput{ - Bucket: aws.String("theBucket"), - Key: aws.String("testDir/testObjectWithEncodedTags"), - Tagging: &s3.Tagging{ - TagSet: []*s3.Tag{ - { - Key: aws.String("Timestamp"), - Value: aws.String("2025-07-16 14:40:39"), // This would be URL encoded as "2025-07-16%2014%3A40%3A39" in the header - }, - { - Key: aws.String("Path"), - Value: aws.String("/tmp/file.txt"), // This would be URL encoded as "/tmp%2Ffile.txt" in the header - }, - }, - }, - }) - - if err != nil { - t.Fatalf("Failed to set tags with encoded values: %v", err) - } - - // Get tags back and verify they are properly decoded - response, err := svc.GetObjectTagging(&s3.GetObjectTaggingInput{ - Bucket: aws.String("theBucket"), - Key: aws.String("testDir/testObjectWithEncodedTags"), - }) - - if err != nil { - t.Fatalf("Failed to get tags: %v", err) - } - - // Verify that the tags are properly decoded - tagMap := make(map[string]string) - for _, tag := range response.TagSet { - tagMap[*tag.Key] = *tag.Value - } - - expectedTimestamp := "2025-07-16 14:40:39" - if tagMap["Timestamp"] != expectedTimestamp { - t.Errorf("Expected Timestamp tag to be '%s', got '%s'", expectedTimestamp, tagMap["Timestamp"]) - } - - expectedPath := "/tmp/file.txt" - if tagMap["Path"] != expectedPath { - t.Errorf("Expected Path tag to be '%s', got '%s'", expectedPath, tagMap["Path"]) - } - - fmt.Printf("โœ“ URL encoded tags test passed - Timestamp: %s, Path: %s\n", tagMap["Timestamp"], tagMap["Path"]) - - // Clean up - svc.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{ - Bucket: aws.String("theBucket"), - Key: aws.String("testDir/testObjectWithEncodedTags"), - }) -} - -// TestObjectUploadWithEncodedTags tests the specific issue reported in GitHub issue #7040 -// where tags sent via X-Amz-Tagging header during object upload are not URL decoded properly -func TestObjectUploadWithEncodedTags(t *testing.T) { - // This test specifically addresses the issue where tags with special characters - // (like spaces, colons, slashes) sent during object upload are not URL decoded - // This tests the fix in filer_server_handlers_write_autochunk.go - - objectKey := "testDir/testObjectUploadWithTags" - - // Upload object with tags that contain special characters that would be URL encoded - // The AWS SDK will automatically URL encode these when sending the X-Amz-Tagging header - // Test edge cases that url.ParseQuery handles better than manual parsing: - // - Values containing "=" characters - // - Empty values - // - Complex special characters - _, err := svc.PutObject(&s3.PutObjectInput{ - Bucket: aws.String("theBucket"), - Key: aws.String(objectKey), - Body: aws.ReadSeekCloser(strings.NewReader("test content")), - Tagging: aws.String("Timestamp=2025-07-16 14:40:39&Path=/tmp/file.txt&Description=A test file with spaces&Equation=x=y+1&EmptyValue=&Complex=A%20tag%20with%20%26%20%3D%20chars"), - }) - - if err != nil { - t.Fatalf("Failed to upload object with tags: %v", err) - } - - // Get the tags back to verify they were properly URL decoded during upload - response, err := svc.GetObjectTagging(&s3.GetObjectTaggingInput{ - Bucket: aws.String("theBucket"), - Key: aws.String(objectKey), - }) - - if err != nil { - t.Fatalf("Failed to get object tags: %v", err) - } - - // Verify that the tags are properly decoded (not URL encoded) - tagMap := make(map[string]string) - for _, tag := range response.TagSet { - tagMap[*tag.Key] = *tag.Value - } - - // Test cases for values that would be URL encoded in the X-Amz-Tagging header - testCases := []struct { - key string - expectedValue string - description string - }{ - {"Timestamp", "2025-07-16 14:40:39", "timestamp with spaces and colons"}, - {"Path", "/tmp/file.txt", "file path with slashes"}, - {"Description", "A test file with spaces", "description with spaces"}, - } - - for _, tc := range testCases { - actualValue, exists := tagMap[tc.key] - if !exists { - t.Errorf("Expected tag key '%s' not found", tc.key) - continue - } - - if actualValue != tc.expectedValue { - t.Errorf("Tag '%s' (%s): expected '%s', got '%s'", - tc.key, tc.description, tc.expectedValue, actualValue) - } else { - fmt.Printf("โœ“ Tag '%s' correctly decoded: '%s'\n", tc.key, actualValue) - } - } - - // Clean up - _, err = svc.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String("theBucket"), - Key: aws.String(objectKey), - }) - if err != nil { - t.Logf("Warning: Failed to clean up test object: %v", err) - } -} diff --git a/test/s3/compatibility/.gitignore b/test/s3/compatibility/.gitignore index f719dbc98..dc3cc5207 100644 --- a/test/s3/compatibility/.gitignore +++ b/test/s3/compatibility/.gitignore @@ -1,8 +1,2 @@ -# Test run scratch data /s3-tests /tmp - -# Test run outputs -weed.log -compat.raw.txt -compat.summary.txt \ No newline at end of file diff --git a/test/s3/compatibility/Dockerfile b/test/s3/compatibility/Dockerfile index a3a76b742..b2a1040cb 100644 --- a/test/s3/compatibility/Dockerfile +++ b/test/s3/compatibility/Dockerfile @@ -1,21 +1,11 @@ # the tests only support python 3.6, not newer -#FROM ubuntu:latest -FROM python:3.6.15-slim-buster +FROM ubuntu:latest -# Installed required system deps -RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y git-core sudo tzdata - -# Install python deps -RUN pip install virtualenv - -# Clone Ceph S3 tests +RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y git-core sudo tzdata RUN git clone https://github.com/ceph/s3-tests.git - WORKDIR s3-tests -# Pin to a certain commit on ceph/s3-tests -# https://github.com/ceph/s3-tests/commit/9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e +# we pin a certain commit RUN git checkout 9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e RUN ./bootstrap diff --git a/test/s3/compatibility/Makefile b/test/s3/compatibility/Makefile deleted file mode 100644 index b6171b10f..000000000 --- a/test/s3/compatibility/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: all build-image test - -all: build-image test - -build-image: - ./prepare.sh - -test: - ./run.sh diff --git a/test/s3/compatibility/README.md b/test/s3/compatibility/README.md index 5f57cdc9c..de1b6e9ec 100644 --- a/test/s3/compatibility/README.md +++ b/test/s3/compatibility/README.md @@ -1,245 +1,13 @@ # Running S3 Compatibility tests against SeaweedFS -This is using [the tests from CephFS][s3-tests]. - -[s3-tests]: https://github.com/ceph/s3-tests +This is using [the tests from CephFS](https://github.com/ceph/s3-tests). ## Prerequisites -- have [Docker][docker] installed +- have Docker installed - this has been executed on Mac. On Linux, the hostname in `s3tests.conf` needs to be adjusted. -[docker]: https://docs.docker.com - ## Running tests -To build the docker image that is used for the tests: - -```console -./prepare.sh -``` - -To execute all tests: - -```console -./run.sh -``` -To see debug output including all commands run by the script: - -```console -DEBUG=y ./run.sh -``` - -> [!WARNING] -> -> If your output does *not* look like the content in [`results.summary.txt`](./results.summary.txt) -> and it is full of HTTP level exceptions, there is likely an error contacting the `weed` server from -> the container that is runnin the S3 compatibility tests. -> -> There are at least a couple ways to solve this: -> -> - Modify your `docker` setup to ensure `host.docker.internal` is connected to your host running `weed` -> - Use `--net=host` and modify `host` in `s3tests.conf` to `localhost` -> -> The `--net=host` solution is potentially *unsafe*, as the container running [s3-tests][s3-tests] could -> visit unexpected websites or use the host-passthrough internet access maliciously. -> -> If you are OK with the risk of allowing `--net=host`: -> -> - Set `host = localhost` in `s3tests.conf` -> - Set `DOCKER_NET_HOST=y` when running `run.sh` - -## Most recent results - -See [`results.summary.txt`](./results.summary.txt) for the latest results of compatibility testing (with the caveat that `s3-tests` is pinned to [`ceph/s3-tests` @ 9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e](https://github.com/ceph/s3-tests/commit/9a6a1e9f197fc9fb031b809d1e057635c2ff8d4e)). - -The file is reproduced below for ease of access: - -``` -/s3-tests/virtualenv/lib/python3.6/site-packages/boto3/compat.py:88: PythonDeprecationWarning: Boto3 will no longer support Python 3.6 starting May 30, 2022. To continue receiving service updates, bug fixes, and security updates please upgrade to Python 3.7 or later. More information can be found here: https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/ - warnings.warn(warning, PythonDeprecationWarning) -s3tests_boto3.functional.test_s3.test_bucket_list_return_data ... ERROR -s3tests_boto3.functional.test_s3.test_object_write_to_nonexist_bucket ... FAIL -s3tests_boto3.functional.test_s3.test_object_read_not_exist ... ok -s3tests_boto3.functional.test_s3.test_object_requestid_matches_header_on_error ... FAIL -s3tests_boto3.functional.test_s3.test_multi_object_delete ... ok -s3tests_boto3.functional.test_s3.test_multi_objectv2_delete ... ok -s3tests_boto3.functional.test_s3.test_multi_object_delete_key_limit ... ok -s3tests_boto3.functional.test_s3.test_multi_objectv2_delete_key_limit ... ok -s3tests_boto3.functional.test_s3.test_object_head_zero_bytes ... ok -s3tests_boto3.functional.test_s3.test_object_write_check_etag ... ok -s3tests_boto3.functional.test_s3.test_object_write_cache_control ... ok -s3tests_boto3.functional.test_s3.test_object_write_expires ... ok -s3tests_boto3.functional.test_s3.test_object_write_read_update_read_delete ... ok -s3tests_boto3.functional.test_s3.test_object_metadata_replaced_on_put ... ok -s3tests_boto3.functional.test_s3.test_object_write_file ... ok -s3tests_boto3.functional.test_s3.test_post_object_anonymous_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_no_content_type ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_request_bad_access_key ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_success_code ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_invalid_success_code ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_upload_larger_than_chunk ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_key_from_filename ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_ignored_header ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_case_insensitive_condition_fields ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_escaped_field_values ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_success_redirect_action ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_signature ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_access_key ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_date_format ... ok -s3tests_boto3.functional.test_s3.test_post_object_no_key_specified ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_signature ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_policy_condition ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_user_specified_header ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_request_missing_policy_specified_field ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_condition_is_case_sensitive ... ok -s3tests_boto3.functional.test_s3.test_post_object_expires_is_case_sensitive ... ok -s3tests_boto3.functional.test_s3.test_post_object_expired_policy ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_request_field_value ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_missing_expires_condition ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_conditions_list ... ok -s3tests_boto3.functional.test_s3.test_post_object_upload_size_limit_exceeded ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_content_length_argument ... ok -s3tests_boto3.functional.test_s3.test_post_object_invalid_content_length_argument ... ok -s3tests_boto3.functional.test_s3.test_post_object_upload_size_below_minimum ... ok -s3tests_boto3.functional.test_s3.test_post_object_empty_conditions ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmatch_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifnonematch_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifnonematch_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmodifiedsince_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmodifiedsince_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifunmodifiedsince_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifunmodifiedsince_failed ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_overwrite_existed_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_nonexisted_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_nonexisted_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_overwrite_existed_failed ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_delete_key_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_object_gone ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated ... ok -s3tests_boto3.functional.test_s3.test_object_raw_response_headers ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_bucket_acl ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_object_acl ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_object_gone ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_not_expired ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_range_zero ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_max_range ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_positive_range ... FAIL -s3tests_boto3.functional.test_s3.test_object_anon_put ... FAIL -s3tests_boto3.functional.test_s3.test_object_anon_put_write_access ... ok -s3tests_boto3.functional.test_s3.test_object_put_authenticated ... ok -s3tests_boto3.functional.test_s3.test_object_raw_put_authenticated_expired ... FAIL -s3tests_boto3.functional.test_s3.test_object_acl_canned_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_write ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_writeacp ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_read ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_readacp ... ERROR -s3tests_boto3.functional.test_s3.test_object_header_acl_grants ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_private ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_private ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_publicread ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_publicread ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_publicreadwrite ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_publicreadwrite ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_private ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_publicread ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_private ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_publicread ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_bucket_create_special_key_names ... ok -s3tests_boto3.functional.test_s3.test_object_copy_zero_size ... ok -s3tests_boto3.functional.test_s3.test_object_copy_same_bucket ... ok -s3tests_boto3.functional.test_s3.test_object_copy_verify_contenttype ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_to_itself ... ok -s3tests_boto3.functional.test_s3.test_object_copy_to_itself_with_metadata ... ok -s3tests_boto3.functional.test_s3.test_object_copy_diff_bucket ... ok -s3tests_boto3.functional.test_s3.test_object_copy_not_owned_bucket ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_not_owned_object_bucket ... ERROR -s3tests_boto3.functional.test_s3.test_object_copy_canned_acl ... ok -s3tests_boto3.functional.test_s3.test_object_copy_retaining_metadata ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_replacing_metadata ... ok -s3tests_boto3.functional.test_s3.test_object_copy_bucket_not_found ... ok -s3tests_boto3.functional.test_s3.test_object_copy_key_not_found ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_empty ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_small ... ERROR -s3tests_boto3.functional.test_s3.test_multipart_copy_small ... ok -s3tests_boto3.functional.test_s3.test_multipart_copy_invalid_range ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_copy_improper_range ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_copy_without_range ... ok -s3tests_boto3.functional.test_s3.test_multipart_copy_special_names ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload ... ERROR -s3tests_boto3.functional.test_s3.test_multipart_upload_resend_part ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_size_too_small ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_upload_contents ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_overwrite_existing_object ... ok -s3tests_boto3.functional.test_s3.test_abort_multipart_upload ... ok -s3tests_boto3.functional.test_s3.test_abort_multipart_upload_not_found ... ok -s3tests_boto3.functional.test_s3.test_list_multipart_upload ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_missing_part ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_incorrect_etag ... ok -s3tests_boto3.functional.test_s3.test_100_continue ... FAIL -s3tests_boto3.functional.test_s3.test_atomic_read_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_read_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_read_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_conditional_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_conditional_write_1mb ... FAIL -s3tests_boto3.functional.test_s3.test_atomic_write_bucket_gone ... ok -s3tests_boto3.functional.test_s3.test_atomic_multipart_upload_write ... ok -s3tests_boto3.functional.test_s3.test_multipart_resend_first_finishes_last ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_big_request_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_skip_leading_bytes_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_return_trailing_bytes_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_invalid_range ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_empty_object ... ok -s3tests_boto3.functional.test_s3.test_get_obj_tagging ... ok -s3tests_boto3.functional.test_s3.test_get_obj_head_tagging ... ok -s3tests_boto3.functional.test_s3.test_put_max_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_tags ... ok -s3tests_boto3.functional.test_s3.test_put_max_kvsize_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_key_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_val_tags ... ok -s3tests_boto3.functional.test_s3.test_put_modify_tags ... ok -s3tests_boto3.functional.test_s3.test_put_delete_tags ... ok -s3tests_boto3.functional.test_s3.test_post_object_tags_anonymous_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_tags_authenticated_request ... FAIL -s3tests_boto3.functional.test_s3.test_put_obj_with_tags ... ok -s3tests_boto3.functional.test_s3.test_object_lock_multi_delete_object_with_retention ... ERROR -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_governance_with_bypass ... ok -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_governance_without_bypass ... FAIL -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_compliance ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_copy_object_ifmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifnonematch_good ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifnonematch_failed ... ok -s3tests_boto3.functional.test_s3.test_object_read_unreadable ... FAIL -ERROR - -====================================================================== -ERROR: s3tests_boto3.functional.test_s3.test_bucket_list_return_data ----------------------------------------------------------------------- -Traceback (most recent call last): - File "/s3-tests/virtualenv/lib/python3.6/site-packages/nose/case.py", line 198, in runTest - self.test(*self.arg) - File "/s3-tests/s3tests_boto3/functional/test_s3.py", line 1669, in test_bucket_list_return_data - 'DisplayName': acl_response['Owner']['DisplayName'], -KeyError: 'Owner' --------------------- >> begin captured logging << -------------------- -``` +- `./prepare.sh` to build the docker image +- `./run.sh` to execute all tests diff --git a/test/s3/compatibility/results.summary.txt b/test/s3/compatibility/results.summary.txt deleted file mode 100644 index 371b9e81a..000000000 --- a/test/s3/compatibility/results.summary.txt +++ /dev/null @@ -1,186 +0,0 @@ -/s3-tests/virtualenv/lib/python3.6/site-packages/boto3/compat.py:88: PythonDeprecationWarning: Boto3 will no longer support Python 3.6 starting May 30, 2022. To continue receiving service updates, bug fixes, and security updates please upgrade to Python 3.7 or later. More information can be found here: https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/ - warnings.warn(warning, PythonDeprecationWarning) -s3tests_boto3.functional.test_s3.test_bucket_list_return_data ... ERROR -s3tests_boto3.functional.test_s3.test_object_write_to_nonexist_bucket ... FAIL -s3tests_boto3.functional.test_s3.test_object_read_not_exist ... ok -s3tests_boto3.functional.test_s3.test_object_requestid_matches_header_on_error ... FAIL -s3tests_boto3.functional.test_s3.test_multi_object_delete ... ok -s3tests_boto3.functional.test_s3.test_multi_objectv2_delete ... ok -s3tests_boto3.functional.test_s3.test_multi_object_delete_key_limit ... ok -s3tests_boto3.functional.test_s3.test_multi_objectv2_delete_key_limit ... ok -s3tests_boto3.functional.test_s3.test_object_head_zero_bytes ... ok -s3tests_boto3.functional.test_s3.test_object_write_check_etag ... ok -s3tests_boto3.functional.test_s3.test_object_write_cache_control ... ok -s3tests_boto3.functional.test_s3.test_object_write_expires ... ok -s3tests_boto3.functional.test_s3.test_object_write_read_update_read_delete ... ok -s3tests_boto3.functional.test_s3.test_object_metadata_replaced_on_put ... ok -s3tests_boto3.functional.test_s3.test_object_write_file ... ok -s3tests_boto3.functional.test_s3.test_post_object_anonymous_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_no_content_type ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_authenticated_request_bad_access_key ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_success_code ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_invalid_success_code ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_upload_larger_than_chunk ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_set_key_from_filename ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_ignored_header ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_case_insensitive_condition_fields ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_escaped_field_values ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_success_redirect_action ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_signature ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_access_key ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_date_format ... ok -s3tests_boto3.functional.test_s3.test_post_object_no_key_specified ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_signature ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_policy_condition ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_user_specified_header ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_request_missing_policy_specified_field ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_condition_is_case_sensitive ... ok -s3tests_boto3.functional.test_s3.test_post_object_expires_is_case_sensitive ... ok -s3tests_boto3.functional.test_s3.test_post_object_expired_policy ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_invalid_request_field_value ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_missing_expires_condition ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_conditions_list ... ok -s3tests_boto3.functional.test_s3.test_post_object_upload_size_limit_exceeded ... ok -s3tests_boto3.functional.test_s3.test_post_object_missing_content_length_argument ... ok -s3tests_boto3.functional.test_s3.test_post_object_invalid_content_length_argument ... ok -s3tests_boto3.functional.test_s3.test_post_object_upload_size_below_minimum ... ok -s3tests_boto3.functional.test_s3.test_post_object_empty_conditions ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmatch_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifnonematch_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifnonematch_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmodifiedsince_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifmodifiedsince_failed ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifunmodifiedsince_good ... ok -s3tests_boto3.functional.test_s3.test_get_object_ifunmodifiedsince_failed ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_overwrite_existed_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifmatch_nonexisted_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_nonexisted_good ... ok -s3tests_boto3.functional.test_s3.test_put_object_ifnonmatch_overwrite_existed_failed ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_delete_key_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_object_gone ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated ... ok -s3tests_boto3.functional.test_s3.test_object_raw_response_headers ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_bucket_acl ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_object_acl ... ok -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_bucket_gone ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_authenticated_object_gone ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_not_expired ... ok -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_range_zero ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_max_range ... FAIL -s3tests_boto3.functional.test_s3.test_object_raw_get_x_amz_expires_out_positive_range ... FAIL -s3tests_boto3.functional.test_s3.test_object_anon_put ... FAIL -s3tests_boto3.functional.test_s3.test_object_anon_put_write_access ... ok -s3tests_boto3.functional.test_s3.test_object_put_authenticated ... ok -s3tests_boto3.functional.test_s3.test_object_raw_put_authenticated_expired ... FAIL -s3tests_boto3.functional.test_s3.test_object_acl_canned_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_write ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_writeacp ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_read ... ERROR -s3tests_boto3.functional.test_s3.test_object_acl_readacp ... ERROR -s3tests_boto3.functional.test_s3.test_object_header_acl_grants ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_private ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_private ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_publicread ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_publicread ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_object_publicreadwrite ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_private_objectv2_publicreadwrite ... FAIL -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_private ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_publicread ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicread_object_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_private ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_publicread ... ERROR -s3tests_boto3.functional.test_s3.test_access_bucket_publicreadwrite_object_publicreadwrite ... ERROR -s3tests_boto3.functional.test_s3.test_bucket_create_special_key_names ... ok -s3tests_boto3.functional.test_s3.test_object_copy_zero_size ... ok -s3tests_boto3.functional.test_s3.test_object_copy_same_bucket ... ok -s3tests_boto3.functional.test_s3.test_object_copy_verify_contenttype ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_to_itself ... ok -s3tests_boto3.functional.test_s3.test_object_copy_to_itself_with_metadata ... ok -s3tests_boto3.functional.test_s3.test_object_copy_diff_bucket ... ok -s3tests_boto3.functional.test_s3.test_object_copy_not_owned_bucket ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_not_owned_object_bucket ... ERROR -s3tests_boto3.functional.test_s3.test_object_copy_canned_acl ... ok -s3tests_boto3.functional.test_s3.test_object_copy_retaining_metadata ... FAIL -s3tests_boto3.functional.test_s3.test_object_copy_replacing_metadata ... ok -s3tests_boto3.functional.test_s3.test_object_copy_bucket_not_found ... ok -s3tests_boto3.functional.test_s3.test_object_copy_key_not_found ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_empty ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_small ... ERROR -s3tests_boto3.functional.test_s3.test_multipart_copy_small ... ok -s3tests_boto3.functional.test_s3.test_multipart_copy_invalid_range ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_copy_improper_range ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_copy_without_range ... ok -s3tests_boto3.functional.test_s3.test_multipart_copy_special_names ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload ... ERROR -s3tests_boto3.functional.test_s3.test_multipart_upload_resend_part ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_size_too_small ... FAIL -s3tests_boto3.functional.test_s3.test_multipart_upload_contents ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_overwrite_existing_object ... ok -s3tests_boto3.functional.test_s3.test_abort_multipart_upload ... ok -s3tests_boto3.functional.test_s3.test_abort_multipart_upload_not_found ... ok -s3tests_boto3.functional.test_s3.test_list_multipart_upload ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_missing_part ... ok -s3tests_boto3.functional.test_s3.test_multipart_upload_incorrect_etag ... ok -s3tests_boto3.functional.test_s3.test_100_continue ... FAIL -s3tests_boto3.functional.test_s3.test_atomic_read_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_read_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_read_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_write_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_4mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_write_8mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_conditional_write_1mb ... ok -s3tests_boto3.functional.test_s3.test_atomic_dual_conditional_write_1mb ... FAIL -s3tests_boto3.functional.test_s3.test_atomic_write_bucket_gone ... ok -s3tests_boto3.functional.test_s3.test_atomic_multipart_upload_write ... ok -s3tests_boto3.functional.test_s3.test_multipart_resend_first_finishes_last ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_big_request_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_skip_leading_bytes_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_return_trailing_bytes_response_code ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_invalid_range ... ok -s3tests_boto3.functional.test_s3.test_ranged_request_empty_object ... ok -s3tests_boto3.functional.test_s3.test_get_obj_tagging ... ok -s3tests_boto3.functional.test_s3.test_get_obj_head_tagging ... ok -s3tests_boto3.functional.test_s3.test_put_max_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_tags ... ok -s3tests_boto3.functional.test_s3.test_put_max_kvsize_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_key_tags ... ok -s3tests_boto3.functional.test_s3.test_put_excess_val_tags ... ok -s3tests_boto3.functional.test_s3.test_put_modify_tags ... ok -s3tests_boto3.functional.test_s3.test_put_delete_tags ... ok -s3tests_boto3.functional.test_s3.test_post_object_tags_anonymous_request ... FAIL -s3tests_boto3.functional.test_s3.test_post_object_tags_authenticated_request ... FAIL -s3tests_boto3.functional.test_s3.test_put_obj_with_tags ... ok -s3tests_boto3.functional.test_s3.test_object_lock_multi_delete_object_with_retention ... ERROR -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_governance_with_bypass ... ok -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_governance_without_bypass ... FAIL -s3tests_boto3.functional.test_s3.test_object_lock_changing_mode_from_compliance ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifmatch_good ... ok -s3tests_boto3.functional.test_s3.test_copy_object_ifmatch_failed ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifnonematch_good ... FAIL -s3tests_boto3.functional.test_s3.test_copy_object_ifnonematch_failed ... ok -s3tests_boto3.functional.test_s3.test_object_read_unreadable ... FAIL -ERROR - -====================================================================== -ERROR: s3tests_boto3.functional.test_s3.test_bucket_list_return_data ----------------------------------------------------------------------- -Traceback (most recent call last): - File "/s3-tests/virtualenv/lib/python3.6/site-packages/nose/case.py", line 198, in runTest - self.test(*self.arg) - File "/s3-tests/s3tests_boto3/functional/test_s3.py", line 1669, in test_bucket_list_return_data - 'DisplayName': acl_response['Owner']['DisplayName'], -KeyError: 'Owner' --------------------- >> begin captured logging << -------------------- diff --git a/test/s3/compatibility/run.sh b/test/s3/compatibility/run.sh index adfee1366..990599df5 100755 --- a/test/s3/compatibility/run.sh +++ b/test/s3/compatibility/run.sh @@ -1,88 +1,24 @@ #!/usr/bin/env bash -CONTAINER_NAME=${CONTAINER_NAME:-s3test-instance} -CONF_FILE=${CONF_FILE:-s3tests.conf} -WEED_BIN=${WEED_BIN:-../../../weed/weed} -TEST_RAW_OUTPUT_FILE=${TEST_RAW_OUTPUT_FILE:-compat.raw.txt} -TEST_PROCESSED_OUTPUT_FILE=${TEST_PROCESSED_OUTPUT_FILE:-compat.summary.txt} +set -ex -# Set up debugging for this bash script if DEBUG is set -if [ -n "${DEBUG}" ]; then - echo -e "DEBUG set [${DEBUG}], enabling debugging output..."; - set -ex -fi - -# Reset from possible previous test run killall -9 weed || echo "already stopped" rm -Rf tmp mkdir tmp -docker stop $CONTAINER_NAME || echo "already stopped" +docker stop s3test-instance || echo "already stopped" -# Ensure ulimit is set to reasonable value ulimit -n 10000 +../../../weed/weed server -filer -s3 -volume.max 0 -master.volumeSizeLimitMB 5 -dir "$(pwd)/tmp" 1>&2>weed.log & -# Start weed w/ filer + s3 in the background -$WEED_BIN server \ - -filer \ - -s3 \ - -volume.max 0 \ - -master.volumeSizeLimitMB 5 \ - -dir "$(pwd)/tmp" \ - 1>&2>weed.log & - -# Wait for master to start up -echo -e "\n[info] waiting for master @ 9333..."; until curl --output /dev/null --silent --head --fail http://127.0.0.1:9333; do - printf '.'; - sleep 5; + printf '.' + sleep 5 done -sleep 3; +sleep 3 -# Wait for s3 to start up -echo -e "\n[info] waiting for S3 @ 8333..."; -until curl --output /dev/null --silent --fail http://127.0.0.1:8333; do - printf '.'; - sleep 5; -done -sleep 3; +rm -Rf logs-full.txt logs-summary.txt +# docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v `pwd`/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py:test_get_obj_tagging -v -a 'resource=object,!bucket-policy,!versioning,!encryption' +docker run --name s3test-instance --rm -e S3TEST_CONF=s3tests.conf -v "$(pwd)"/s3tests.conf:/s3-tests/s3tests.conf -it s3tests ./virtualenv/bin/nosetests s3tests_boto3/functional/test_s3.py -v -a 'resource=object,!bucket-policy,!versioning,!encryption' | sed -n -e '/botocore.hooks/!p;//q' | tee logs-summary.txt -# Determine whether docker net -DOCKER_NET_HOST_ARGS="" -if [ -n "${DOCKER_NET_HOST}" ]; then - DOCKER_NET_HOST_ARGS="--net=host" - echo -e "\n[info] setting docker to het nost" -fi - -echo -e "\n[warn] You may have to run with UNFILTERED=y to disable output filtering, if you get the broken pipe error"; -echo -e "\n[info] running tests with unfiltered output..."; -docker run \ - --name $CONTAINER_NAME \ - --rm \ - ${DOCKER_NET_HOST_ARGS} \ - -e S3TEST_CONF=$CONF_FILE \ - -v "$(pwd)"/$CONF_FILE:/s3-tests/s3tests.conf \ - -it \ - s3tests \ - ./virtualenv/bin/nosetests \ - s3tests_boto3/functional/test_s3.py \ - -v \ - -a 'resource=object,!bucket-policy,!versioning,!encryption' \ - | tee ${TEST_RAW_OUTPUT_FILE} - -# If the summary logs are present, process them -if [ -f "${TEST_RAW_OUTPUT_FILE}" ]; then - cat ${TEST_RAW_OUTPUT_FILE} | sed -n -e '/botocore.hooks/!p;//q' | tee ${TEST_PROCESSED_OUTPUT_FILE} - echo -e "\n[info] โœ… Successfully wrote processed output @ [${TEST_PROCESSED_OUTPUT_FILE}]"; - if [ -z "${TEST_KEEP_RAW_OUTPUT}" ]; then - echo -e "\n[info] removing test raw output file @ [${TEST_RAW_OUTPUT_FILE}] (to disable this, set TEST_KEEP_RAW_OUTPUT=y)..."; - rm -rf ${TEST_RAW_OUTPUT_FILE}; - fi -else - echo -e "\n[warn] failed to find raw output @ [${TEST_RAW_OUTPUT_FILE}]"; -fi - -echo -e "\n[info] stopping [${CONTAINER_NAME}] container..."; -docker stop $CONTAINER_NAME || echo "[info] already stopped"; - -echo -e "\n[info] stopping seaweedfs processes (all, via kill -9)..."; -killall -9 weed; +docker stop s3test-instance || echo "already stopped" +killall -9 weed diff --git a/test/s3/compatibility/s3tests.conf b/test/s3/compatibility/s3tests.conf index 191168b32..5adb61791 100644 --- a/test/s3/compatibility/s3tests.conf +++ b/test/s3/compatibility/s3tests.conf @@ -4,14 +4,6 @@ # host set for rgw in vstart.sh host = host.docker.internal -## NOTE: if running docker --net=host (e.x. `DOCKER_NET_HOST=y ./run.sh`) -## then use 'localhost', so the container can more easily access your *local* weed instance -## -## This is *unsafe* -- it is possible that this script will do malicious -## things with host-level access. only run this if you trust the programmatic content of -## ceph/s3-tests (https://github.com/ceph/s3-tests) which will be using this file -#host = localhost - # port set for rgw in vstart.sh port = 8333 diff --git a/test/s3/copying/Makefile b/test/s3/copying/Makefile deleted file mode 100644 index 81e3fc19d..000000000 --- a/test/s3/copying/Makefile +++ /dev/null @@ -1,234 +0,0 @@ -# Makefile for S3 Copying Tests -# This Makefile provides targets for running comprehensive S3 copying tests - -# Default values -SEAWEEDFS_BINARY ?= weed -S3_PORT ?= 8333 -FILER_PORT ?= 8888 -VOLUME_PORT ?= 8080 -MASTER_PORT ?= 9333 -TEST_TIMEOUT ?= 10m -BUCKET_PREFIX ?= test-copying- -ACCESS_KEY ?= some_access_key1 -SECRET_KEY ?= some_secret_key1 -VOLUME_MAX_SIZE_MB ?= 50 - -# Test directory -TEST_DIR := $(shell pwd) -SEAWEEDFS_ROOT := $(shell cd ../../../ && pwd) - -# Colors for output -RED := \033[0;31m -GREEN := \033[0;32m -YELLOW := \033[1;33m -NC := \033[0m # No Color - -.PHONY: all test clean start-seaweedfs stop-seaweedfs check-binary help - -all: test-basic - -help: - @echo "SeaweedFS S3 Copying Tests" - @echo "" - @echo "Available targets:" - @echo " test-basic - Run basic S3 put/get tests first" - @echo " test - Run all S3 copying tests" - @echo " test-quick - Run quick tests only" - @echo " test-full - Run full test suite including large files" - @echo " start-seaweedfs - Start SeaweedFS server for testing" - @echo " stop-seaweedfs - Stop SeaweedFS server" - @echo " clean - Clean up test artifacts" - @echo " check-binary - Check if SeaweedFS binary exists" - @echo "" - @echo "Configuration:" - @echo " SEAWEEDFS_BINARY=$(SEAWEEDFS_BINARY)" - @echo " S3_PORT=$(S3_PORT)" - @echo " FILER_PORT=$(FILER_PORT)" - @echo " VOLUME_PORT=$(VOLUME_PORT)" - @echo " MASTER_PORT=$(MASTER_PORT)" - @echo " TEST_TIMEOUT=$(TEST_TIMEOUT)" - @echo " VOLUME_MAX_SIZE_MB=$(VOLUME_MAX_SIZE_MB)" - -check-binary: - @if ! command -v $(SEAWEEDFS_BINARY) > /dev/null 2>&1; then \ - echo "$(RED)Error: SeaweedFS binary '$(SEAWEEDFS_BINARY)' not found in PATH$(NC)"; \ - echo "Please build SeaweedFS first by running 'make' in the root directory"; \ - exit 1; \ - fi - @echo "$(GREEN)SeaweedFS binary found: $$(which $(SEAWEEDFS_BINARY))$(NC)" - -start-seaweedfs: check-binary - @echo "$(YELLOW)Starting SeaweedFS server...$(NC)" - @pkill -f "weed master" || true - @pkill -f "weed volume" || true - @pkill -f "weed filer" || true - @pkill -f "weed s3" || true - @sleep 2 - - # Create necessary directories - @mkdir -p /tmp/seaweedfs-test-copying-master - @mkdir -p /tmp/seaweedfs-test-copying-volume - - # Start master server with volume size limit - @nohup $(SEAWEEDFS_BINARY) master -port=$(MASTER_PORT) -mdir=/tmp/seaweedfs-test-copying-master -volumeSizeLimitMB=$(VOLUME_MAX_SIZE_MB) -ip=127.0.0.1 > /tmp/seaweedfs-master.log 2>&1 & - @sleep 3 - - # Start volume server - @nohup $(SEAWEEDFS_BINARY) volume -port=$(VOLUME_PORT) -mserver=127.0.0.1:$(MASTER_PORT) -dir=/tmp/seaweedfs-test-copying-volume -ip=127.0.0.1 > /tmp/seaweedfs-volume.log 2>&1 & - @sleep 3 - - # Start filer server (using standard SeaweedFS gRPC port convention: HTTP port + 10000) - @nohup $(SEAWEEDFS_BINARY) filer -port=$(FILER_PORT) -port.grpc=$$(( $(FILER_PORT) + 10000 )) -master=127.0.0.1:$(MASTER_PORT) -ip=127.0.0.1 > /tmp/seaweedfs-filer.log 2>&1 & - @sleep 3 - - # Create S3 configuration - @echo '{"identities":[{"name":"$(ACCESS_KEY)","credentials":[{"accessKey":"$(ACCESS_KEY)","secretKey":"$(SECRET_KEY)"}],"actions":["Admin","Read","Write"]}]}' > /tmp/seaweedfs-s3.json - - # Start S3 server - @nohup $(SEAWEEDFS_BINARY) s3 -port=$(S3_PORT) -filer=127.0.0.1:$(FILER_PORT) -config=/tmp/seaweedfs-s3.json -ip.bind=127.0.0.1 > /tmp/seaweedfs-s3.log 2>&1 & - @sleep 5 - - # Wait for S3 service to be ready - @echo "$(YELLOW)Waiting for S3 service to be ready...$(NC)" - @for i in $$(seq 1 30); do \ - if curl -s -f http://127.0.0.1:$(S3_PORT) > /dev/null 2>&1; then \ - echo "$(GREEN)S3 service is ready$(NC)"; \ - break; \ - fi; \ - echo "Waiting for S3 service... ($$i/30)"; \ - sleep 1; \ - done - - # Additional wait for filer gRPC to be ready - @echo "$(YELLOW)Waiting for filer gRPC to be ready...$(NC)" - @sleep 2 - @echo "$(GREEN)SeaweedFS server started successfully$(NC)" - @echo "Master: http://localhost:$(MASTER_PORT)" - @echo "Volume: http://localhost:$(VOLUME_PORT)" - @echo "Filer: http://localhost:$(FILER_PORT)" - @echo "S3: http://localhost:$(S3_PORT)" - @echo "Volume Max Size: $(VOLUME_MAX_SIZE_MB)MB" - -stop-seaweedfs: - @echo "$(YELLOW)Stopping SeaweedFS server...$(NC)" - @pkill -f "weed master" || true - @pkill -f "weed volume" || true - @pkill -f "weed filer" || true - @pkill -f "weed s3" || true - @sleep 2 - @echo "$(GREEN)SeaweedFS server stopped$(NC)" - -clean: - @echo "$(YELLOW)Cleaning up test artifacts...$(NC)" - @rm -rf /tmp/seaweedfs-test-copying-* - @rm -f /tmp/seaweedfs-*.log - @rm -f /tmp/seaweedfs-s3.json - @echo "$(GREEN)Cleanup completed$(NC)" - -test-basic: check-binary - @echo "$(YELLOW)Running basic S3 put/get tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting basic tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasic" ./test/s3/copying || (echo "$(RED)Basic tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Basic tests completed successfully!$(NC)" - -test: test-basic - @echo "$(YELLOW)Running S3 copying tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "Test.*" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)All tests completed successfully!$(NC)" - -test-quick: check-binary - @echo "$(YELLOW)Running quick S3 copying tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting quick tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectCopy|TestCopyObjectIf" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Quick tests completed successfully!$(NC)" - -test-full: check-binary - @echo "$(YELLOW)Running full S3 copying test suite...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting full test suite...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=30m -run "Test.*" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Full test suite completed successfully!$(NC)" - -test-multipart: check-binary - @echo "$(YELLOW)Running multipart copying tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting multipart tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestMultipart" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Multipart tests completed successfully!$(NC)" - -test-conditional: check-binary - @echo "$(YELLOW)Running conditional copying tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @echo "$(GREEN)Starting conditional tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestCopyObjectIf" ./test/s3/copying || (echo "$(RED)Tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Conditional tests completed successfully!$(NC)" - -# Debug targets -debug-logs: - @echo "$(YELLOW)=== Master Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-master.log || echo "No master log found" - @echo "$(YELLOW)=== Volume Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-volume.log || echo "No volume log found" - @echo "$(YELLOW)=== Filer Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-filer.log || echo "No filer log found" - @echo "$(YELLOW)=== S3 Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-s3.log || echo "No S3 log found" - -debug-status: - @echo "$(YELLOW)=== Process Status ===$(NC)" - @ps aux | grep -E "(weed|seaweedfs)" | grep -v grep || echo "No SeaweedFS processes found" - @echo "$(YELLOW)=== Port Status ===$(NC)" - @netstat -an | grep -E "($(MASTER_PORT)|$(VOLUME_PORT)|$(FILER_PORT)|$(S3_PORT))" || echo "No ports in use" - -# Manual test targets for development -manual-start: start-seaweedfs - @echo "$(GREEN)SeaweedFS is now running for manual testing$(NC)" - @echo "Run 'make manual-stop' when finished" - -manual-stop: stop-seaweedfs clean - -# CI/CD targets -ci-test: test-quick - -# Benchmark targets -benchmark: check-binary - @echo "$(YELLOW)Running S3 copying benchmarks...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=30m -bench=. -run=Benchmark ./test/s3/copying || (echo "$(RED)Benchmarks failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Benchmarks completed!$(NC)" - -# Stress test -stress: check-binary - @echo "$(YELLOW)Running S3 copying stress tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run="TestMultipartCopyMultipleSizes" -count=10 ./test/s3/copying || (echo "$(RED)Stress tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Stress tests completed!$(NC)" - -# Performance test with larger files -perf: check-binary - @echo "$(YELLOW)Running S3 copying performance tests...$(NC)" - @$(MAKE) start-seaweedfs - @sleep 5 - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run="TestMultipartCopyMultipleSizes" ./test/s3/copying || (echo "$(RED)Performance tests failed$(NC)" && $(MAKE) stop-seaweedfs && exit 1) - @$(MAKE) stop-seaweedfs - @echo "$(GREEN)Performance tests completed!$(NC)" \ No newline at end of file diff --git a/test/s3/copying/README.md b/test/s3/copying/README.md deleted file mode 100644 index a5605e196..000000000 --- a/test/s3/copying/README.md +++ /dev/null @@ -1,325 +0,0 @@ -# SeaweedFS S3 Copying Tests - -This directory contains comprehensive Go tests for SeaweedFS S3 copying functionality, converted from the failing Python tests in the s3-tests repository. - -## Overview - -These tests verify that SeaweedFS correctly implements S3 operations, starting with basic put/get operations and progressing to advanced copy operations, including: -- **Basic S3 Operations**: Put/Get operations, bucket management, and metadata handling -- **Basic object copying**: within the same bucket -- **Cross-bucket copying**: across different buckets -- **Multipart copy operations**: for large files -- **Conditional copy operations**: ETag-based conditional copying -- **Metadata handling**: during copy operations -- **ACL handling**: during copy operations - -## Test Coverage - -### Basic S3 Operations (Run First) -- **TestBasicPutGet**: Tests fundamental S3 put/get operations with various object types -- **TestBasicBucketOperations**: Tests bucket creation, listing, and deletion -- **TestBasicLargeObject**: Tests handling of larger objects (up to 10MB) - -### Basic Copy Operations -- **TestObjectCopySameBucket**: Tests copying objects within the same bucket -- **TestObjectCopyDiffBucket**: Tests copying objects to different buckets -- **TestObjectCopyCannedAcl**: Tests copying with ACL settings -- **TestObjectCopyRetainingMetadata**: Tests metadata preservation during copy - -### Multipart Copy Operations -- **TestMultipartCopySmall**: Tests multipart copying of small files -- **TestMultipartCopyWithoutRange**: Tests multipart copying without range specification -- **TestMultipartCopySpecialNames**: Tests multipart copying with special character names -- **TestMultipartCopyMultipleSizes**: Tests multipart copying with various file sizes - -### Conditional Copy Operations -- **TestCopyObjectIfMatchGood**: Tests copying with matching ETag condition -- **TestCopyObjectIfMatchFailed**: Tests copying with non-matching ETag condition (should fail) -- **TestCopyObjectIfNoneMatchFailed**: Tests copying with non-matching ETag condition (should succeed) -- **TestCopyObjectIfNoneMatchGood**: Tests copying with matching ETag condition (should fail) - -## Requirements - -1. **Go 1.19+**: Required for AWS SDK v2 and modern Go features -2. **SeaweedFS Binary**: Built from source (`../../../weed/weed`) -3. **Free Ports**: 8333 (S3), 8888 (Filer), 8080 (Volume), 9333 (Master) -4. **Dependencies**: Uses the main repository's go.mod with existing AWS SDK v2 and testify dependencies - -## Quick Start - -### 1. Build SeaweedFS -```bash -cd ../../../ -make -``` - -### 2. Run Tests -```bash -# Run basic S3 operations first (recommended) -make test-basic - -# Run all tests (starts with basic, then copy tests) -make test - -# Run quick tests only -make test-quick - -# Run multipart tests only -make test-multipart - -# Run conditional tests only -make test-conditional -``` - -## Available Make Targets - -### Basic Test Execution -- `make test-basic` - Run basic S3 put/get operations (recommended first) -- `make test` - Run all S3 tests (starts with basic, then copying) -- `make test-quick` - Run quick tests only (basic copying) -- `make test-full` - Run full test suite including large files -- `make test-multipart` - Run multipart copying tests only -- `make test-conditional` - Run conditional copying tests only - -### Server Management -- `make start-seaweedfs` - Start SeaweedFS server for testing -- `make stop-seaweedfs` - Stop SeaweedFS server -- `make manual-start` - Start server for manual testing -- `make manual-stop` - Stop server and clean up - -### Debugging -- `make debug-logs` - Show recent log entries from all services -- `make debug-status` - Show process and port status -- `make check-binary` - Verify SeaweedFS binary exists - -### Performance Testing -- `make benchmark` - Run performance benchmarks -- `make stress` - Run stress tests with multiple iterations -- `make perf` - Run performance tests with large files - -### Cleanup -- `make clean` - Clean up test artifacts and temporary files - -## Configuration - -The tests use the following default configuration: - -```json -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-copying-", - "use_ssl": false, - "skip_verify_ssl": true -} -``` - -You can modify these values in `test_config.json` or by setting environment variables: - -```bash -export SEAWEEDFS_BINARY=/path/to/weed -export S3_PORT=8333 -export FILER_PORT=8888 -export VOLUME_PORT=8080 -export MASTER_PORT=9333 -export TEST_TIMEOUT=10m -export VOLUME_MAX_SIZE_MB=50 -``` - -**Note**: The volume size limit is set to 50MB to ensure proper testing of volume boundaries and multipart operations. - -## Test Details - -### TestBasicPutGet -- Tests fundamental S3 put/get operations with various object types: - - Simple text objects - - Empty objects - - Binary objects (1KB random data) - - Objects with metadata and content-type -- Verifies ETag consistency between put and get operations -- Tests metadata preservation - -### TestBasicBucketOperations -- Tests bucket creation and existence verification -- Tests object listing in buckets -- Tests object creation and listing with directory-like prefixes -- Tests bucket deletion and cleanup -- Verifies proper error handling for operations on non-existent buckets - -### TestBasicLargeObject -- Tests handling of progressively larger objects: - - 1KB, 10KB, 100KB, 1MB, 5MB, 10MB -- Verifies data integrity for large objects -- Tests memory handling and streaming for large files -- Ensures proper handling up to the 50MB volume limit - -### TestObjectCopySameBucket -- Creates a bucket with a source object -- Copies the object to a different key within the same bucket -- Verifies the copied object has the same content - -### TestObjectCopyDiffBucket -- Creates source and destination buckets -- Copies an object from source to destination bucket -- Verifies the copied object has the same content - -### TestObjectCopyCannedAcl -- Tests copying with ACL settings (`public-read`) -- Tests metadata replacement during copy with ACL -- Verifies both basic copying and metadata handling - -### TestObjectCopyRetainingMetadata -- Tests with different file sizes (3 bytes, 1MB) -- Verifies metadata and content-type preservation -- Checks that all metadata is correctly copied - -### TestMultipartCopySmall -- Tests multipart copy with 1-byte files -- Uses range-based copying (`bytes=0-0`) -- Verifies multipart upload completion - -### TestMultipartCopyWithoutRange -- Tests multipart copy without specifying range -- Should copy entire source object -- Verifies correct content length and data - -### TestMultipartCopySpecialNames -- Tests with special character names: `" "`, `"_"`, `"__"`, `"?versionId"` -- Verifies proper URL encoding and handling -- Each special name is tested in isolation - -### TestMultipartCopyMultipleSizes -- Tests with various copy sizes: - - 5MB (single part) - - 5MB + 100KB (multi-part) - - 5MB + 600KB (multi-part) - - 10MB + 100KB (multi-part) - - 10MB + 600KB (multi-part) - - 10MB (exact multi-part boundary) -- Uses 5MB part size for all copies -- Verifies data integrity across all sizes - -### TestCopyObjectIfMatchGood -- Tests conditional copy with matching ETag -- Should succeed when ETag matches -- Verifies successful copy operation - -### TestCopyObjectIfMatchFailed -- Tests conditional copy with non-matching ETag -- Should fail with precondition error -- Verifies proper error handling - -### TestCopyObjectIfNoneMatchFailed -- Tests conditional copy with non-matching ETag for IfNoneMatch -- Should succeed when ETag doesn't match -- Verifies successful copy operation - -### TestCopyObjectIfNoneMatchGood -- Tests conditional copy with matching ETag for IfNoneMatch -- Should fail with precondition error -- Verifies proper error handling - -## Expected Behavior - -These tests verify that SeaweedFS correctly implements: - -1. **Basic S3 Operations**: Standard `PutObject`, `GetObject`, `ListBuckets`, `ListObjects` APIs -2. **Bucket Management**: Bucket creation, deletion, and listing -3. **Object Storage**: Binary and text data storage with metadata -4. **Large Object Handling**: Efficient storage and retrieval of large files -5. **Basic S3 Copy Operations**: Standard `CopyObject` API -6. **Multipart Copy Operations**: `UploadPartCopy` API with range support -7. **Conditional Operations**: ETag-based conditional copying -8. **Metadata Handling**: Proper metadata preservation and replacement -9. **ACL Handling**: Access control list management during copy -10. **Error Handling**: Proper error responses for invalid operations - -## Troubleshooting - -### Common Issues - -1. **Port Already in Use** - ```bash - make stop-seaweedfs - make clean - ``` - -2. **SeaweedFS Binary Not Found** - ```bash - cd ../../../ - make - ``` - -3. **Test Timeouts** - ```bash - export TEST_TIMEOUT=30m - make test - ``` - -4. **Permission Denied** - ```bash - sudo make clean - ``` - -### Debug Information - -```bash -# Check server status -make debug-status - -# View recent logs -make debug-logs - -# Manual server start for investigation -make manual-start -# ... perform manual testing ... -make manual-stop -``` - -### Log Locations - -When running tests, logs are stored in: -- Master: `/tmp/seaweedfs-master.log` -- Volume: `/tmp/seaweedfs-volume.log` -- Filer: `/tmp/seaweedfs-filer.log` -- S3: `/tmp/seaweedfs-s3.log` - -## Contributing - -When adding new tests: - -1. Follow the existing naming convention (`TestXxxYyy`) -2. Use the helper functions for common operations -3. Add cleanup with `defer deleteBucket(t, client, bucketName)` -4. Include error checking with `require.NoError(t, err)` -5. Use assertions with `assert.Equal(t, expected, actual)` -6. Add the test to the appropriate Make target - -## Performance Notes - -- **TestMultipartCopyMultipleSizes** is the most resource-intensive test -- Large file tests may take several minutes to complete -- Memory usage scales with file sizes being tested -- Network latency affects multipart copy performance - -## Integration with CI/CD - -For automated testing: - -```bash -# Basic validation (recommended first) -make test-basic - -# Quick validation -make ci-test - -# Full validation -make test-full - -# Performance validation -make perf -``` - -The tests are designed to be self-contained and can run in containerized environments. \ No newline at end of file diff --git a/test/s3/copying/s3_copying_test.go b/test/s3/copying/s3_copying_test.go deleted file mode 100644 index 4bad01de4..000000000 --- a/test/s3/copying/s3_copying_test.go +++ /dev/null @@ -1,1014 +0,0 @@ -package copying_test - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "io" - mathrand "math/rand" - "net/url" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// S3TestConfig holds configuration for S3 tests -type S3TestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool -} - -// Default test configuration - should match test_config.json -var defaultConfig = &S3TestConfig{ - Endpoint: "http://127.0.0.1:8000", // Use explicit IPv4 address - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-copying-", - UseSSL: false, - SkipVerifySSL: true, -} - -// Initialize math/rand with current time to ensure randomness -func init() { - mathrand.Seed(time.Now().UnixNano()) -} - -// getS3Client creates an AWS S3 client for testing -func getS3Client(t *testing.T) *s3.Client { - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(defaultConfig.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - defaultConfig.AccessKey, - defaultConfig.SecretKey, - "", - )), - config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: defaultConfig.Endpoint, - SigningRegion: defaultConfig.Region, - HostnameImmutable: true, - }, nil - })), - ) - require.NoError(t, err) - - return s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = true // Important for SeaweedFS - }) -} - -// waitForS3Service waits for the S3 service to be ready -func waitForS3Service(t *testing.T, client *s3.Client, timeout time.Duration) { - start := time.Now() - for time.Since(start) < timeout { - _, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{}) - if err == nil { - return - } - t.Logf("Waiting for S3 service to be ready... (error: %v)", err) - time.Sleep(time.Second) - } - t.Fatalf("S3 service not ready after %v", timeout) -} - -// getNewBucketName generates a unique bucket name -func getNewBucketName() string { - timestamp := time.Now().UnixNano() - // Add random suffix to prevent collisions when tests run quickly - randomSuffix := mathrand.Intn(100000) - return fmt.Sprintf("%s%d-%d", defaultConfig.BucketPrefix, timestamp, randomSuffix) -} - -// cleanupTestBuckets removes any leftover test buckets from previous runs -func cleanupTestBuckets(t *testing.T, client *s3.Client) { - resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{}) - if err != nil { - t.Logf("Warning: failed to list buckets for cleanup: %v", err) - return - } - - for _, bucket := range resp.Buckets { - bucketName := *bucket.Name - // Only delete buckets that match our test prefix - if strings.HasPrefix(bucketName, defaultConfig.BucketPrefix) { - t.Logf("Cleaning up leftover test bucket: %s", bucketName) - deleteBucket(t, client, bucketName) - } - } -} - -// createBucket creates a new bucket for testing -func createBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, try to delete the bucket if it exists (cleanup from previous failed tests) - deleteBucket(t, client, bucketName) - - // Create the bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) -} - -// deleteBucket deletes a bucket and all its contents -func deleteBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, delete all objects - deleteAllObjects(t, client, bucketName) - - // Then delete the bucket - _, err := client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - // Only log warnings for actual errors, not "bucket doesn't exist" - if !strings.Contains(err.Error(), "NoSuchBucket") { - t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) - } - } -} - -// deleteAllObjects deletes all objects in a bucket -func deleteAllObjects(t *testing.T, client *s3.Client, bucketName string) { - // List all objects - paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - }) - - for paginator.HasMorePages() { - page, err := paginator.NextPage(context.TODO()) - if err != nil { - // Only log warnings for actual errors, not "bucket doesn't exist" - if !strings.Contains(err.Error(), "NoSuchBucket") { - t.Logf("Warning: failed to list objects in bucket %s: %v", bucketName, err) - } - return - } - - if len(page.Contents) == 0 { - break - } - - var objectsToDelete []types.ObjectIdentifier - for _, obj := range page.Contents { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: obj.Key, - }) - } - - // Delete objects in batches - if len(objectsToDelete) > 0 { - _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(true), - }, - }) - if err != nil { - t.Logf("Warning: failed to delete objects in bucket %s: %v", bucketName, err) - } - } - } -} - -// putObject puts an object into a bucket -func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput { - resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - return resp -} - -// putObjectWithMetadata puts an object with metadata into a bucket -func putObjectWithMetadata(t *testing.T, client *s3.Client, bucketName, key, content string, metadata map[string]string, contentType string) *s3.PutObjectOutput { - input := &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - } - - if metadata != nil { - input.Metadata = metadata - } - - if contentType != "" { - input.ContentType = aws.String(contentType) - } - - resp, err := client.PutObject(context.TODO(), input) - require.NoError(t, err) - return resp -} - -// getObject gets an object from a bucket -func getObject(t *testing.T, client *s3.Client, bucketName, key string) *s3.GetObjectOutput { - resp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - return resp -} - -// getObjectBody gets the body content of an object -func getObjectBody(t *testing.T, resp *s3.GetObjectOutput) string { - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - resp.Body.Close() - return string(body) -} - -// generateRandomData generates random data of specified size -func generateRandomData(size int) []byte { - data := make([]byte, size) - _, err := rand.Read(data) - if err != nil { - panic(err) - } - return data -} - -// createCopySource creates a properly URL-encoded copy source string -func createCopySource(bucketName, key string) string { - // URL encode the key to handle special characters like spaces - encodedKey := url.PathEscape(key) - return fmt.Sprintf("%s/%s", bucketName, encodedKey) -} - -// TestBasicPutGet tests basic S3 put and get operations -func TestBasicPutGet(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Test 1: Put and get a simple text object - t.Run("Simple text object", func(t *testing.T) { - key := "test-simple.txt" - content := "Hello, SeaweedFS S3!" - - // Put object - putResp := putObject(t, client, bucketName, key, content) - assert.NotNil(t, putResp.ETag) - - // Get object - getResp := getObject(t, client, bucketName, key) - body := getObjectBody(t, getResp) - assert.Equal(t, content, body) - assert.Equal(t, putResp.ETag, getResp.ETag) - }) - - // Test 2: Put and get an empty object - t.Run("Empty object", func(t *testing.T) { - key := "test-empty.txt" - content := "" - - putResp := putObject(t, client, bucketName, key, content) - assert.NotNil(t, putResp.ETag) - - getResp := getObject(t, client, bucketName, key) - body := getObjectBody(t, getResp) - assert.Equal(t, content, body) - assert.Equal(t, putResp.ETag, getResp.ETag) - }) - - // Test 3: Put and get a binary object - t.Run("Binary object", func(t *testing.T) { - key := "test-binary.bin" - content := string(generateRandomData(1024)) // 1KB of random data - - putResp := putObject(t, client, bucketName, key, content) - assert.NotNil(t, putResp.ETag) - - getResp := getObject(t, client, bucketName, key) - body := getObjectBody(t, getResp) - assert.Equal(t, content, body) - assert.Equal(t, putResp.ETag, getResp.ETag) - }) - - // Test 4: Put and get object with metadata - t.Run("Object with metadata", func(t *testing.T) { - key := "test-metadata.txt" - content := "Content with metadata" - metadata := map[string]string{ - "author": "test", - "description": "test object with metadata", - } - contentType := "text/plain" - - putResp := putObjectWithMetadata(t, client, bucketName, key, content, metadata, contentType) - assert.NotNil(t, putResp.ETag) - - getResp := getObject(t, client, bucketName, key) - body := getObjectBody(t, getResp) - assert.Equal(t, content, body) - assert.Equal(t, putResp.ETag, getResp.ETag) - assert.Equal(t, contentType, *getResp.ContentType) - assert.Equal(t, metadata["author"], getResp.Metadata["author"]) - assert.Equal(t, metadata["description"], getResp.Metadata["description"]) - }) -} - -// TestBasicBucketOperations tests basic bucket operations -func TestBasicBucketOperations(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Test 1: Create bucket - t.Run("Create bucket", func(t *testing.T) { - createBucket(t, client, bucketName) - - // Verify bucket exists by listing buckets - resp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{}) - require.NoError(t, err) - - found := false - for _, bucket := range resp.Buckets { - if *bucket.Name == bucketName { - found = true - break - } - } - assert.True(t, found, "Bucket should exist after creation") - }) - - // Test 2: Put objects and list them - t.Run("List objects", func(t *testing.T) { - // Put multiple objects - objects := []string{"test1.txt", "test2.txt", "dir/test3.txt"} - for _, key := range objects { - putObject(t, client, bucketName, key, fmt.Sprintf("content of %s", key)) - } - - // List objects - resp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - assert.Equal(t, len(objects), len(resp.Contents)) - - // Verify each object exists - for _, obj := range resp.Contents { - found := false - for _, expected := range objects { - if *obj.Key == expected { - found = true - break - } - } - assert.True(t, found, "Object %s should be in list", *obj.Key) - } - }) - - // Test 3: Delete bucket (cleanup) - t.Run("Delete bucket", func(t *testing.T) { - deleteBucket(t, client, bucketName) - - // Verify bucket is deleted by trying to list its contents - _, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - }) - assert.Error(t, err, "Bucket should not exist after deletion") - }) -} - -// TestBasicLargeObject tests handling of larger objects (up to volume limit) -func TestBasicLargeObject(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Test with progressively larger objects - sizes := []int{ - 1024, // 1KB - 1024 * 10, // 10KB - 1024 * 100, // 100KB - 1024 * 1024, // 1MB - 1024 * 1024 * 5, // 5MB - 1024 * 1024 * 10, // 10MB - } - - for _, size := range sizes { - t.Run(fmt.Sprintf("Size_%dMB", size/(1024*1024)), func(t *testing.T) { - key := fmt.Sprintf("large-object-%d.bin", size) - content := string(generateRandomData(size)) - - putResp := putObject(t, client, bucketName, key, content) - assert.NotNil(t, putResp.ETag) - - getResp := getObject(t, client, bucketName, key) - body := getObjectBody(t, getResp) - assert.Equal(t, len(content), len(body)) - assert.Equal(t, content, body) - assert.Equal(t, putResp.ETag, getResp.ETag) - }) - } -} - -// TestObjectCopySameBucket tests copying an object within the same bucket -func TestObjectCopySameBucket(t *testing.T) { - client := getS3Client(t) - - // Wait for S3 service to be ready - waitForS3Service(t, client, 30*time.Second) - - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo123bar" - sourceContent := "foo" - putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object within the same bucket - destKey := "bar321foo" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - }) - require.NoError(t, err, "Failed to copy object within same bucket") - - // Verify the copied object - resp := getObject(t, client, bucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) -} - -// TestObjectCopyDiffBucket tests copying an object to a different bucket -func TestObjectCopyDiffBucket(t *testing.T) { - client := getS3Client(t) - sourceBucketName := getNewBucketName() - destBucketName := getNewBucketName() - - // Create buckets - createBucket(t, client, sourceBucketName) - defer deleteBucket(t, client, sourceBucketName) - createBucket(t, client, destBucketName) - defer deleteBucket(t, client, destBucketName) - - // Put source object - sourceKey := "foo123bar" - sourceContent := "foo" - putObject(t, client, sourceBucketName, sourceKey, sourceContent) - - // Copy object to different bucket - destKey := "bar321foo" - copySource := createCopySource(sourceBucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, destBucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) -} - -// TestObjectCopyCannedAcl tests copying with ACL settings -func TestObjectCopyCannedAcl(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo123bar" - sourceContent := "foo" - putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object with public-read ACL - destKey := "bar321foo" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - ACL: types.ObjectCannedACLPublicRead, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, bucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) - - // Test metadata replacement with ACL - metadata := map[string]string{"abc": "def"} - destKey2 := "foo123bar2" - copySource2 := createCopySource(bucketName, destKey) - _, err = client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey2), - CopySource: aws.String(copySource2), - ACL: types.ObjectCannedACLPublicRead, - Metadata: metadata, - MetadataDirective: types.MetadataDirectiveReplace, - }) - require.NoError(t, err) - - // Verify the copied object with metadata - resp2 := getObject(t, client, bucketName, destKey2) - body2 := getObjectBody(t, resp2) - assert.Equal(t, sourceContent, body2) - assert.Equal(t, metadata, resp2.Metadata) -} - -// TestObjectCopyRetainingMetadata tests copying while retaining metadata -func TestObjectCopyRetainingMetadata(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Test with different sizes - sizes := []int{3, 1024 * 1024} // 3 bytes and 1MB - for _, size := range sizes { - t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) { - sourceKey := fmt.Sprintf("foo123bar_%d", size) - sourceContent := string(generateRandomData(size)) - contentType := "audio/ogg" - metadata := map[string]string{"key1": "value1", "key2": "value2"} - - // Put source object with metadata - putObjectWithMetadata(t, client, bucketName, sourceKey, sourceContent, metadata, contentType) - - // Copy object (should retain metadata) - destKey := fmt.Sprintf("bar321foo_%d", size) - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, bucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) - assert.Equal(t, contentType, *resp.ContentType) - assert.Equal(t, metadata, resp.Metadata) - require.NotNil(t, resp.ContentLength) - assert.Equal(t, int64(size), *resp.ContentLength) - }) - } -} - -// TestMultipartCopySmall tests multipart copying of small files -func TestMultipartCopySmall(t *testing.T) { - client := getS3Client(t) - - // Clean up any leftover buckets from previous test runs - cleanupTestBuckets(t, client) - - sourceBucketName := getNewBucketName() - destBucketName := getNewBucketName() - - // Create buckets - createBucket(t, client, sourceBucketName) - defer deleteBucket(t, client, sourceBucketName) - createBucket(t, client, destBucketName) - defer deleteBucket(t, client, destBucketName) - - // Put source object - sourceKey := "foo" - sourceContent := "x" // 1 byte - putObject(t, client, sourceBucketName, sourceKey, sourceContent) - - // Create multipart upload - destKey := "mymultipart" - createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err) - uploadID := *createResp.UploadId - - // Upload part copy - copySource := createCopySource(sourceBucketName, sourceKey) - copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - PartNumber: aws.Int32(1), - CopySource: aws.String(copySource), - CopySourceRange: aws.String("bytes=0-0"), - }) - require.NoError(t, err) - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: copyResp.CopyPartResult.ETag, - PartNumber: aws.Int32(1), - }, - }, - }, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, destBucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) - require.NotNil(t, resp.ContentLength) - assert.Equal(t, int64(1), *resp.ContentLength) -} - -// TestMultipartCopyWithoutRange tests multipart copying without range specification -func TestMultipartCopyWithoutRange(t *testing.T) { - client := getS3Client(t) - - // Clean up any leftover buckets from previous test runs - cleanupTestBuckets(t, client) - - sourceBucketName := getNewBucketName() - destBucketName := getNewBucketName() - - // Create buckets - createBucket(t, client, sourceBucketName) - defer deleteBucket(t, client, sourceBucketName) - createBucket(t, client, destBucketName) - defer deleteBucket(t, client, destBucketName) - - // Put source object - sourceKey := "source" - sourceContent := string(generateRandomData(10)) - putObject(t, client, sourceBucketName, sourceKey, sourceContent) - - // Create multipart upload - destKey := "mymultipartcopy" - createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err) - uploadID := *createResp.UploadId - - // Upload part copy without range (should copy entire object) - copySource := createCopySource(sourceBucketName, sourceKey) - copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - PartNumber: aws.Int32(1), - CopySource: aws.String(copySource), - }) - require.NoError(t, err) - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: copyResp.CopyPartResult.ETag, - PartNumber: aws.Int32(1), - }, - }, - }, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, destBucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) - require.NotNil(t, resp.ContentLength) - assert.Equal(t, int64(10), *resp.ContentLength) -} - -// TestMultipartCopySpecialNames tests multipart copying with special character names -func TestMultipartCopySpecialNames(t *testing.T) { - client := getS3Client(t) - - // Clean up any leftover buckets from previous test runs - cleanupTestBuckets(t, client) - - sourceBucketName := getNewBucketName() - destBucketName := getNewBucketName() - - // Create buckets - createBucket(t, client, sourceBucketName) - defer deleteBucket(t, client, sourceBucketName) - createBucket(t, client, destBucketName) - defer deleteBucket(t, client, destBucketName) - - // Test with special key names - specialKeys := []string{" ", "_", "__", "?versionId"} - sourceContent := "x" // 1 byte - destKey := "mymultipart" - - for i, sourceKey := range specialKeys { - t.Run(fmt.Sprintf("special_key_%d", i), func(t *testing.T) { - // Put source object - putObject(t, client, sourceBucketName, sourceKey, sourceContent) - - // Create multipart upload - createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err) - uploadID := *createResp.UploadId - - // Upload part copy - copySource := createCopySource(sourceBucketName, sourceKey) - copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - PartNumber: aws.Int32(1), - CopySource: aws.String(copySource), - CopySourceRange: aws.String("bytes=0-0"), - }) - require.NoError(t, err) - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: copyResp.CopyPartResult.ETag, - PartNumber: aws.Int32(1), - }, - }, - }, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, destBucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) - require.NotNil(t, resp.ContentLength) - assert.Equal(t, int64(1), *resp.ContentLength) - }) - } -} - -// TestMultipartCopyMultipleSizes tests multipart copying with various file sizes -func TestMultipartCopyMultipleSizes(t *testing.T) { - client := getS3Client(t) - - // Clean up any leftover buckets from previous test runs - cleanupTestBuckets(t, client) - - sourceBucketName := getNewBucketName() - destBucketName := getNewBucketName() - - // Create buckets - createBucket(t, client, sourceBucketName) - defer deleteBucket(t, client, sourceBucketName) - createBucket(t, client, destBucketName) - defer deleteBucket(t, client, destBucketName) - - // Put source object (12MB) - sourceKey := "foo" - sourceSize := 12 * 1024 * 1024 - sourceContent := generateRandomData(sourceSize) - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(sourceBucketName), - Key: aws.String(sourceKey), - Body: bytes.NewReader(sourceContent), - }) - require.NoError(t, err) - - destKey := "mymultipart" - partSize := 5 * 1024 * 1024 // 5MB parts - - // Test different copy sizes - testSizes := []int{ - 5 * 1024 * 1024, // 5MB - 5*1024*1024 + 100*1024, // 5MB + 100KB - 5*1024*1024 + 600*1024, // 5MB + 600KB - 10*1024*1024 + 100*1024, // 10MB + 100KB - 10*1024*1024 + 600*1024, // 10MB + 600KB - 10 * 1024 * 1024, // 10MB - } - - for _, size := range testSizes { - t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) { - // Create multipart upload - createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err) - uploadID := *createResp.UploadId - - // Upload parts - var parts []types.CompletedPart - copySource := createCopySource(sourceBucketName, sourceKey) - - for i := 0; i < size; i += partSize { - partNum := int32(len(parts) + 1) - endOffset := i + partSize - 1 - if endOffset >= size { - endOffset = size - 1 - } - - copyRange := fmt.Sprintf("bytes=%d-%d", i, endOffset) - copyResp, err := client.UploadPartCopy(context.TODO(), &s3.UploadPartCopyInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - PartNumber: aws.Int32(partNum), - CopySource: aws.String(copySource), - CopySourceRange: aws.String(copyRange), - }) - require.NoError(t, err) - - parts = append(parts, types.CompletedPart{ - ETag: copyResp.CopyPartResult.ETag, - PartNumber: aws.Int32(partNum), - }) - } - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(destBucketName), - Key: aws.String(destKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: parts, - }, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, destBucketName, destKey) - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - resp.Body.Close() - - require.NotNil(t, resp.ContentLength) - assert.Equal(t, int64(size), *resp.ContentLength) - assert.Equal(t, sourceContent[:size], body) - }) - } -} - -// TestCopyObjectIfMatchGood tests copying with matching ETag condition -func TestCopyObjectIfMatchGood(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo" - sourceContent := "bar" - putResp := putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object with matching ETag - destKey := "bar" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - CopySourceIfMatch: putResp.ETag, - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, bucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) -} - -// TestCopyObjectIfNoneMatchFailed tests copying with non-matching ETag condition -func TestCopyObjectIfNoneMatchFailed(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo" - sourceContent := "bar" - putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object with non-matching ETag (should succeed) - destKey := "bar" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - CopySourceIfNoneMatch: aws.String("ABCORZ"), - }) - require.NoError(t, err) - - // Verify the copied object - resp := getObject(t, client, bucketName, destKey) - body := getObjectBody(t, resp) - assert.Equal(t, sourceContent, body) -} - -// TestCopyObjectIfMatchFailed tests copying with non-matching ETag condition (should fail) -func TestCopyObjectIfMatchFailed(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo" - sourceContent := "bar" - putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object with non-matching ETag (should fail) - destKey := "bar" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - CopySourceIfMatch: aws.String("ABCORZ"), - }) - - // Should fail with precondition failed - require.Error(t, err) - // Note: We could check for specific error types, but SeaweedFS might return different error codes -} - -// TestCopyObjectIfNoneMatchGood tests copying with matching ETag condition (should fail) -func TestCopyObjectIfNoneMatchGood(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Put source object - sourceKey := "foo" - sourceContent := "bar" - putResp := putObject(t, client, bucketName, sourceKey, sourceContent) - - // Copy object with matching ETag for IfNoneMatch (should fail) - destKey := "bar" - copySource := createCopySource(bucketName, sourceKey) - _, err := client.CopyObject(context.TODO(), &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(copySource), - CopySourceIfNoneMatch: putResp.ETag, - }) - - // Should fail with precondition failed - require.Error(t, err) -} diff --git a/test/s3/copying/test_config.json b/test/s3/copying/test_config.json deleted file mode 100644 index 0453f8501..000000000 --- a/test/s3/copying/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-copying-", - "use_ssl": false, - "skip_verify_ssl": true -} \ No newline at end of file diff --git a/test/s3/cors/Makefile b/test/s3/cors/Makefile deleted file mode 100644 index e59124a6a..000000000 --- a/test/s3/cors/Makefile +++ /dev/null @@ -1,337 +0,0 @@ -# CORS Integration Tests Makefile -# This Makefile provides comprehensive targets for running CORS integration tests - -.PHONY: help build-weed setup-server start-server stop-server test-cors test-cors-quick test-cors-comprehensive test-all clean logs check-deps - -# Configuration -WEED_BINARY := ../../../weed/weed_binary -S3_PORT := 8333 -MASTER_PORT := 9333 -VOLUME_PORT := 8080 -FILER_PORT := 8888 -TEST_TIMEOUT := 10m -TEST_PATTERN := TestCORS - -# Default target -help: - @echo "CORS Integration Tests Makefile" - @echo "" - @echo "Available targets:" - @echo " help - Show this help message" - @echo " build-weed - Build the SeaweedFS binary" - @echo " check-deps - Check dependencies and build binary if needed" - @echo " start-server - Start SeaweedFS server for testing" - @echo " start-server-simple - Start server without process cleanup (for CI)" - @echo " stop-server - Stop SeaweedFS server" - @echo " test-cors - Run all CORS tests" - @echo " test-cors-quick - Run core CORS tests only" - @echo " test-cors-simple - Run tests without server management" - @echo " test-cors-comprehensive - Run comprehensive CORS tests" - @echo " test-with-server - Start server, run tests, stop server" - @echo " logs - Show server logs" - @echo " clean - Clean up test artifacts and stop server" - @echo " health-check - Check if server is accessible" - @echo "" - @echo "Configuration:" - @echo " S3_PORT=${S3_PORT}" - @echo " TEST_TIMEOUT=${TEST_TIMEOUT}" - -# Build the SeaweedFS binary -build-weed: - @echo "Building SeaweedFS binary..." - @cd ../../../weed && go build -o weed_binary . - @chmod +x $(WEED_BINARY) - @echo "โœ… SeaweedFS binary built at $(WEED_BINARY)" - -check-deps: build-weed - @echo "Checking dependencies..." - @echo "๐Ÿ” DEBUG: Checking Go installation..." - @command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1) - @echo "๐Ÿ” DEBUG: Go version: $$(go version)" - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)..." - @test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1) - @echo "๐Ÿ” DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')" - @echo "๐Ÿ” DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')" - @echo "๐Ÿ” DEBUG: Checking Go module dependencies..." - @go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1) - @go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1) - @echo "โœ… All dependencies are available" - -# Start SeaweedFS server for testing -start-server: check-deps - @echo "Starting SeaweedFS server..." - @echo "๐Ÿ” DEBUG: Current working directory: $$(pwd)" - @echo "๐Ÿ” DEBUG: Checking for existing weed processes..." - @ps aux | grep weed | grep -v grep || echo "No existing weed processes found" - @echo "๐Ÿ” DEBUG: Cleaning up any existing PID file..." - @rm -f weed-server.pid - @echo "๐Ÿ” DEBUG: Checking for port conflicts..." - @if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \ - echo "โš ๏ธ Port $(S3_PORT) is already in use, trying to find the process..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \ - else \ - echo "โœ… Port $(S3_PORT) is available"; \ - fi - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)" - @ls -la $(WEED_BINARY) || (echo "โŒ Binary not found!" && exit 1) - @echo "๐Ÿ” DEBUG: Checking config file at ../../../docker/compose/s3.json" - @ls -la ../../../docker/compose/s3.json || echo "โš ๏ธ Config file not found, continuing without it" - @echo "๐Ÿ” DEBUG: Creating volume directory..." - @mkdir -p ./test-volume-data - @echo "๐Ÿ” DEBUG: Launching SeaweedFS server in background..." - @echo "๐Ÿ” DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324" - @$(WEED_BINARY) server \ - -debug \ - -s3 \ - -s3.port=$(S3_PORT) \ - -s3.allowEmptyFolder=false \ - -s3.allowDeleteBucketNotEmpty=true \ - -s3.config=../../../docker/compose/s3.json \ - -filer \ - -filer.maxMB=64 \ - -master.volumeSizeLimitMB=50 \ - -volume.max=100 \ - -dir=./test-volume-data \ - -volume.preStopSeconds=1 \ - -metricsPort=9324 \ - > weed-test.log 2>&1 & echo $$! > weed-server.pid - @echo "๐Ÿ” DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')" - @echo "๐Ÿ” DEBUG: Checking if PID is still running..." - @sleep 2 - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - ps -p $$SERVER_PID || echo "โš ๏ธ Server PID $$SERVER_PID not found after 2 seconds"; \ - else \ - echo "โš ๏ธ PID file not found"; \ - fi - @echo "๐Ÿ” DEBUG: Waiting for server to start (up to 90 seconds)..." - @for i in $$(seq 1 90); do \ - echo "๐Ÿ” DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \ - if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ - echo "โœ… SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \ - exit 0; \ - fi; \ - if [ $$i -eq 5 ]; then \ - echo "๐Ÿ” DEBUG: After 5 seconds, checking process and logs..."; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - if [ -f weed-test.log ]; then \ - echo "=== First server logs ==="; \ - head -20 weed-test.log; \ - fi; \ - fi; \ - if [ $$i -eq 15 ]; then \ - echo "๐Ÿ” DEBUG: After 15 seconds, checking port bindings..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \ - netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \ - netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \ - fi; \ - if [ $$i -eq 30 ]; then \ - echo "โš ๏ธ Server taking longer than expected (30s), checking logs..."; \ - if [ -f weed-test.log ]; then \ - echo "=== Recent server logs ==="; \ - tail -20 weed-test.log; \ - fi; \ - fi; \ - sleep 1; \ - done; \ - echo "โŒ Server failed to start within 90 seconds"; \ - echo "๐Ÿ” DEBUG: Final process check:"; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - echo "๐Ÿ” DEBUG: Final port check:"; \ - netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \ - echo "=== Full server logs ==="; \ - if [ -f weed-test.log ]; then \ - cat weed-test.log; \ - else \ - echo "No log file found"; \ - fi; \ - exit 1 - -# Stop SeaweedFS server -stop-server: - @echo "Stopping SeaweedFS server..." - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - echo "Killing server PID $$SERVER_PID"; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - kill -TERM $$SERVER_PID 2>/dev/null || true; \ - sleep 2; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - echo "Process still running, sending KILL signal..."; \ - kill -KILL $$SERVER_PID 2>/dev/null || true; \ - sleep 1; \ - fi; \ - else \ - echo "Process $$SERVER_PID not found (already stopped)"; \ - fi; \ - rm -f weed-server.pid; \ - else \ - echo "No PID file found, checking for running processes..."; \ - echo "โš ๏ธ Skipping automatic process cleanup to avoid CI issues"; \ - echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \ - fi - @echo "โœ… SeaweedFS server stopped" - -# Show server logs -logs: - @if test -f weed-test.log; then \ - echo "=== SeaweedFS Server Logs ==="; \ - tail -f weed-test.log; \ - else \ - echo "No log file found. Server may not be running."; \ - fi - -# Core CORS tests (basic functionality) -test-cors-quick: check-deps - @echo "Running core CORS tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSConfigurationManagement|TestCORSPreflightRequest|TestCORSActualRequest" . - @echo "โœ… Core CORS tests completed" - -# All CORS tests (comprehensive) -test-cors: check-deps - @echo "Running all CORS tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . - @echo "โœ… All CORS tests completed" - -# Comprehensive CORS tests (all features) -test-cors-comprehensive: check-deps - @echo "Running comprehensive CORS tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORS" . - @echo "โœ… Comprehensive CORS tests completed" - -# All tests without server management -test-cors-simple: check-deps - @echo "Running CORS tests (assuming server is already running)..." - @go test -v -timeout=$(TEST_TIMEOUT) . - @echo "โœ… All CORS tests completed" - -# Start server, run tests, stop server -test-with-server: start-server - @echo "Running CORS tests with managed server..." - @sleep 5 # Give server time to fully start - @make test-cors-comprehensive || (echo "Tests failed, stopping server..." && make stop-server && exit 1) - @make stop-server - @echo "โœ… All tests completed with managed server" - -# Health check -health-check: - @echo "Checking server health..." - @if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ - echo "โœ… Server is accessible on port $(S3_PORT)"; \ - else \ - echo "โŒ Server is not accessible on port $(S3_PORT)"; \ - exit 1; \ - fi - -# Clean up -clean: - @echo "Cleaning up test artifacts..." - @make stop-server - @rm -f weed-test.log - @rm -f weed-server.pid - @rm -rf ./test-volume-data - @rm -f cors.test - @go clean -testcache - @echo "โœ… Cleanup completed" - -# Individual test targets for specific functionality -test-basic-cors: - @echo "Running basic CORS tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSConfigurationManagement" . - -test-preflight-cors: - @echo "Running preflight CORS tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSPreflightRequest" . - -test-actual-cors: - @echo "Running actual CORS request tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSActualRequest" . - -test-origin-matching: - @echo "Running origin matching tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSOriginMatching" . - -test-header-matching: - @echo "Running header matching tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSHeaderMatching" . - -test-method-matching: - @echo "Running method matching tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSMethodMatching" . - -test-multiple-rules: - @echo "Running multiple rules tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSMultipleRulesMatching" . - -test-validation: - @echo "Running validation tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSValidation" . - -test-caching: - @echo "Running caching tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSCaching" . - -test-error-handling: - @echo "Running error handling tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSErrorHandling" . - -# Development targets -dev-start: start-server - @echo "Development server started. Access S3 API at http://localhost:$(S3_PORT)" - @echo "To stop: make stop-server" - -dev-test: check-deps - @echo "Running tests in development mode..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestCORSConfigurationManagement" . - -# CI targets -ci-test: check-deps - @echo "Running tests in CI mode..." - @go test -v -timeout=$(TEST_TIMEOUT) -race . - -# All targets -test-all: test-cors test-cors-comprehensive - @echo "โœ… All CORS tests completed" - -# Benchmark targets -benchmark-cors: - @echo "Running CORS performance benchmarks..." - @go test -v -timeout=$(TEST_TIMEOUT) -bench=. -benchmem . - -# Coverage targets -coverage: - @echo "Running tests with coverage..." - @go test -v -timeout=$(TEST_TIMEOUT) -coverprofile=coverage.out . - @go tool cover -html=coverage.out -o coverage.html - @echo "Coverage report generated: coverage.html" - -# Format and lint -fmt: - @echo "Formatting Go code..." - @go fmt . - -lint: - @echo "Running linter..." - @golint . || echo "golint not available, skipping..." - -# Install dependencies for development -install-deps: - @echo "Installing Go dependencies..." - @go mod tidy - @go mod download - -# Show current configuration -show-config: - @echo "Current configuration:" - @echo " WEED_BINARY: $(WEED_BINARY)" - @echo " S3_PORT: $(S3_PORT)" - @echo " TEST_TIMEOUT: $(TEST_TIMEOUT)" - @echo " TEST_PATTERN: $(TEST_PATTERN)" - -# Legacy targets for backward compatibility -test: test-with-server -test-verbose: test-cors-comprehensive -test-single: test-basic-cors -test-clean: clean -build: check-deps -setup: check-deps \ No newline at end of file diff --git a/test/s3/cors/README.md b/test/s3/cors/README.md deleted file mode 100644 index 1b93d9ccc..000000000 --- a/test/s3/cors/README.md +++ /dev/null @@ -1,362 +0,0 @@ -# CORS Integration Tests for SeaweedFS S3 API - -This directory contains comprehensive integration tests for the CORS (Cross-Origin Resource Sharing) functionality in SeaweedFS S3 API. - -## Overview - -The CORS integration tests validate the complete CORS implementation including: -- CORS configuration management (PUT/GET/DELETE) -- CORS rule validation -- CORS middleware behavior -- Caching functionality -- Error handling -- Real-world CORS scenarios - -## Prerequisites - -1. **Go 1.19+**: For building SeaweedFS and running tests -2. **Network Access**: Tests use `localhost:8333` by default -3. **System Dependencies**: `curl` and `netstat` for health checks - -## Quick Start - -The tests now automatically start their own SeaweedFS server, so you don't need to manually start one. - -### 1. Run All Tests with Managed Server - -```bash -# Run all tests with automatic server management -make test-with-server - -# Run core CORS tests only -make test-cors-quick - -# Run comprehensive CORS tests -make test-cors-comprehensive -``` - -### 2. Manual Server Management - -If you prefer to manage the server manually: - -```bash -# Start server -make start-server - -# Run tests (assuming server is running) -make test-cors-simple - -# Stop server -make stop-server -``` - -### 3. Individual Test Categories - -```bash -# Run specific test types -make test-basic-cors # Basic CORS configuration -make test-preflight-cors # Preflight OPTIONS requests -make test-actual-cors # Actual CORS request handling -make test-origin-matching # Origin matching logic -make test-header-matching # Header matching logic -make test-method-matching # Method matching logic -make test-multiple-rules # Multiple CORS rules -make test-validation # CORS validation -make test-caching # CORS caching behavior -make test-error-handling # Error handling -``` - -## Test Server Management - -The tests use a comprehensive server management system similar to other SeaweedFS integration tests: - -### Server Configuration - -- **S3 Port**: 8333 (configurable via `S3_PORT`) -- **Master Port**: 9333 -- **Volume Port**: 8080 -- **Filer Port**: 8888 -- **Metrics Port**: 9324 -- **Data Directory**: `./test-volume-data` (auto-created) -- **Log File**: `weed-test.log` - -### Server Lifecycle - -1. **Build**: Automatically builds `../../../weed/weed_binary` -2. **Start**: Launches SeaweedFS with S3 API enabled -3. **Health Check**: Waits up to 90 seconds for server to be ready -4. **Test**: Runs the requested tests -5. **Stop**: Gracefully shuts down the server -6. **Cleanup**: Removes temporary files and data - -### Available Commands - -```bash -# Server management -make start-server # Start SeaweedFS server -make stop-server # Stop SeaweedFS server -make health-check # Check server health -make logs # View server logs - -# Test execution -make test-with-server # Full test cycle with server management -make test-cors-simple # Run tests without server management -make test-cors-quick # Run core tests only -make test-cors-comprehensive # Run all tests - -# Development -make dev-start # Start server for development -make dev-test # Run development tests -make build-weed # Build SeaweedFS binary -make check-deps # Check dependencies - -# Maintenance -make clean # Clean up all artifacts -make coverage # Generate coverage report -make fmt # Format code -make lint # Run linter -``` - -## Test Configuration - -### Default Configuration - -The tests use these default settings (configurable via environment variables): - -```bash -WEED_BINARY=../../../weed/weed_binary -S3_PORT=8333 -TEST_TIMEOUT=10m -TEST_PATTERN=TestCORS -``` - -### Configuration File - -The `test_config.json` file contains S3 client configuration: - -```json -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-cors-", - "use_ssl": false, - "skip_verify_ssl": true -} -``` - -## Troubleshooting - -### Compilation Issues - -If you encounter compilation errors, the most common issues are: - -1. **AWS SDK v2 Type Mismatches**: The `MaxAgeSeconds` field in `types.CORSRule` expects `int32`, not `*int32`. Use direct values like `3600` instead of `aws.Int32(3600)`. - -2. **Field Name Issues**: The `GetBucketCorsOutput` type has a `CORSRules` field directly, not a `CORSConfiguration` field. - -Example fix: -```go -// โŒ Incorrect -MaxAgeSeconds: aws.Int32(3600), -assert.Len(t, getResp.CORSConfiguration.CORSRules, 1) - -// โœ… Correct -MaxAgeSeconds: 3600, -assert.Len(t, getResp.CORSRules, 1) -``` - -### Server Issues - -1. **Server Won't Start** - ```bash - # Check for port conflicts - netstat -tlnp | grep 8333 - - # View server logs - make logs - - # Force cleanup - make clean - ``` - -2. **Test Failures** - ```bash - # Run with server management - make test-with-server - - # Run specific test - make test-basic-cors - - # Check server health - make health-check - ``` - -3. **Connection Issues** - ```bash - # Verify server is running - curl -s http://localhost:8333 - - # Check server logs - tail -f weed-test.log - ``` - -### Performance Issues - -If tests are slow or timing out: - -```bash -# Increase timeout -export TEST_TIMEOUT=30m -make test-with-server - -# Run quick tests only -make test-cors-quick - -# Check server resources -make debug-status -``` - -## Test Coverage - -### Core Functionality Tests - -#### 1. CORS Configuration Management (`TestCORSConfigurationManagement`) -- PUT CORS configuration -- GET CORS configuration -- DELETE CORS configuration -- Configuration updates -- Error handling for non-existent configurations - -#### 2. Multiple CORS Rules (`TestCORSMultipleRules`) -- Multiple rules in single configuration -- Rule precedence and ordering -- Complex rule combinations - -#### 3. CORS Validation (`TestCORSValidation`) -- Invalid HTTP methods -- Empty origins validation -- Negative MaxAge validation -- Rule limit validation - -#### 4. Wildcard Support (`TestCORSWithWildcards`) -- Wildcard origins (`*`, `https://*.example.com`) -- Wildcard headers (`*`) -- Wildcard expose headers - -#### 5. Rule Limits (`TestCORSRuleLimit`) -- Maximum 100 rules per configuration -- Rule limit enforcement -- Large configuration handling - -#### 6. Error Handling (`TestCORSErrorHandling`) -- Non-existent bucket operations -- Invalid configurations -- Malformed requests - -### HTTP-Level Tests - -#### 1. Preflight Requests (`TestCORSPreflightRequest`) -- OPTIONS request handling -- CORS headers in preflight responses -- Access-Control-Request-Method validation -- Access-Control-Request-Headers validation - -#### 2. Actual Requests (`TestCORSActualRequest`) -- CORS headers in actual responses -- Origin validation for real requests -- Proper expose headers handling - -#### 3. Origin Matching (`TestCORSOriginMatching`) -- Exact origin matching -- Wildcard origin matching (`*`) -- Subdomain wildcard matching (`https://*.example.com`) -- Non-matching origins (should be rejected) - -#### 4. Header Matching (`TestCORSHeaderMatching`) -- Wildcard header matching (`*`) -- Specific header matching -- Case-insensitive matching -- Disallowed headers - -#### 5. Method Matching (`TestCORSMethodMatching`) -- Allowed methods verification -- Disallowed methods rejection -- Method-specific CORS behavior - -#### 6. Multiple Rules (`TestCORSMultipleRulesMatching`) -- Rule precedence and selection -- Multiple rules with different configurations -- Complex rule interactions - -### Integration Tests - -#### 1. Caching (`TestCORSCaching`) -- CORS configuration caching -- Cache invalidation -- Cache performance - -#### 2. Object Operations (`TestCORSObjectOperations`) -- CORS with actual S3 operations -- PUT/GET/DELETE objects with CORS -- CORS headers in object responses - -#### 3. Without Configuration (`TestCORSWithoutConfiguration`) -- Behavior when no CORS configuration exists -- Default CORS behavior -- Graceful degradation - -## Development - -### Running Tests During Development - -```bash -# Start server for development -make dev-start - -# Run quick test -make dev-test - -# View logs in real-time -make logs -``` - -### Adding New Tests - -1. Follow the existing naming convention (`TestCORSXxxYyy`) -2. Use the helper functions (`getS3Client`, `createTestBucket`, etc.) -3. Add cleanup with `defer cleanupTestBucket(t, client, bucketName)` -4. Include proper error checking with `require.NoError(t, err)` -5. Use assertions with `assert.Equal(t, expected, actual)` -6. Add the test to the appropriate Makefile target - -### Code Quality - -```bash -# Format code -make fmt - -# Run linter -make lint - -# Generate coverage report -make coverage -``` - -## Performance Notes - -- Tests create and destroy buckets for each test case -- Large configuration tests may take several minutes -- Server startup typically takes 15-30 seconds -- Tests run in parallel where possible for efficiency - -## Integration with SeaweedFS - -These tests validate the CORS implementation in: -- `weed/s3api/cors/` - Core CORS package -- `weed/s3api/s3api_bucket_cors_handlers.go` - HTTP handlers -- `weed/s3api/s3api_server.go` - Router integration -- `weed/s3api/s3api_bucket_config.go` - Configuration management - -The tests ensure AWS S3 API compatibility and proper CORS behavior across all supported scenarios. \ No newline at end of file diff --git a/test/s3/cors/s3_cors_http_test.go b/test/s3/cors/s3_cors_http_test.go deleted file mode 100644 index 872831a2a..000000000 --- a/test/s3/cors/s3_cors_http_test.go +++ /dev/null @@ -1,630 +0,0 @@ -package cors - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestCORSPreflightRequest tests CORS preflight OPTIONS requests -func TestCORSPreflightRequest(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up CORS configuration - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"Content-Type", "Authorization"}, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag", "Content-Length"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Test preflight request with raw HTTP - httpClient := &http.Client{Timeout: 10 * time.Second} - - // Create OPTIONS request - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - // Add CORS preflight headers - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", "PUT") - req.Header.Set("Access-Control-Request-Headers", "Content-Type, Authorization") - - // Send the request - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - // Verify CORS headers in response - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Methods"), "PUT", "Should allow PUT method") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Headers"), "Content-Type", "Should allow Content-Type header") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Headers"), "Authorization", "Should allow Authorization header") - assert.Equal(t, "3600", resp.Header.Get("Access-Control-Max-Age"), "Should have correct Max-Age header") - assert.Contains(t, resp.Header.Get("Access-Control-Expose-Headers"), "ETag", "Should expose ETag header") - assert.Equal(t, http.StatusOK, resp.StatusCode, "OPTIONS request should return 200") -} - -// TestCORSActualRequest tests CORS behavior with actual requests -func TestCORSActualRequest(t *testing.T) { - // Temporarily clear AWS environment variables to ensure truly anonymous requests - // This prevents AWS SDK from auto-signing requests in GitHub Actions - originalAccessKey := os.Getenv("AWS_ACCESS_KEY_ID") - originalSecretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - originalSessionToken := os.Getenv("AWS_SESSION_TOKEN") - originalProfile := os.Getenv("AWS_PROFILE") - originalRegion := os.Getenv("AWS_REGION") - - os.Setenv("AWS_ACCESS_KEY_ID", "") - os.Setenv("AWS_SECRET_ACCESS_KEY", "") - os.Setenv("AWS_SESSION_TOKEN", "") - os.Setenv("AWS_PROFILE", "") - os.Setenv("AWS_REGION", "") - - defer func() { - // Restore original environment variables - os.Setenv("AWS_ACCESS_KEY_ID", originalAccessKey) - os.Setenv("AWS_SECRET_ACCESS_KEY", originalSecretKey) - os.Setenv("AWS_SESSION_TOKEN", originalSessionToken) - os.Setenv("AWS_PROFILE", originalProfile) - os.Setenv("AWS_REGION", originalRegion) - }() - - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up CORS configuration - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "PUT"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag", "Content-Length"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for CORS configuration to be fully processed - time.Sleep(100 * time.Millisecond) - - // First, put an object using S3 client - objectKey := "test-cors-object" - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("Test CORS content"), - }) - require.NoError(t, err, "Should be able to put object") - - // Test GET request with CORS headers using raw HTTP - // Create a completely isolated HTTP client to avoid AWS SDK auto-signing - transport := &http.Transport{ - // Completely disable any proxy or middleware - Proxy: nil, - } - - httpClient := &http.Client{ - Timeout: 10 * time.Second, - // Use a completely clean transport to avoid any AWS SDK middleware - Transport: transport, - } - - // Create URL manually to avoid any AWS SDK endpoint processing - // Use the same endpoint as the S3 client to ensure compatibility with GitHub Actions - config := getDefaultConfig() - endpoint := config.Endpoint - // Remove any protocol prefix and ensure it's http for anonymous requests - if strings.HasPrefix(endpoint, "https://") { - endpoint = strings.Replace(endpoint, "https://", "http://", 1) - } - if !strings.HasPrefix(endpoint, "http://") { - endpoint = "http://" + endpoint - } - - requestURL := fmt.Sprintf("%s/%s/%s", endpoint, bucketName, objectKey) - req, err := http.NewRequest("GET", requestURL, nil) - require.NoError(t, err, "Should be able to create GET request") - - // Add Origin header to simulate CORS request - req.Header.Set("Origin", "https://example.com") - - // Explicitly ensure no AWS headers are present (defensive programming) - // Clear ALL potential AWS-related headers that might be auto-added - req.Header.Del("Authorization") - req.Header.Del("X-Amz-Content-Sha256") - req.Header.Del("X-Amz-Date") - req.Header.Del("Amz-Sdk-Invocation-Id") - req.Header.Del("Amz-Sdk-Request") - req.Header.Del("X-Amz-Security-Token") - req.Header.Del("X-Amz-Session-Token") - req.Header.Del("AWS-Session-Token") - req.Header.Del("X-Amz-Target") - req.Header.Del("X-Amz-User-Agent") - - // Ensure User-Agent doesn't indicate AWS SDK - req.Header.Set("User-Agent", "anonymous-cors-test/1.0") - - // Verify no AWS-related headers are present - for name := range req.Header { - headerLower := strings.ToLower(name) - if strings.Contains(headerLower, "aws") || - strings.Contains(headerLower, "amz") || - strings.Contains(headerLower, "authorization") { - t.Fatalf("Found AWS-related header in anonymous request: %s", name) - } - } - - // Send the request - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send GET request") - defer resp.Body.Close() - - // Verify CORS headers are present - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - assert.Contains(t, resp.Header.Get("Access-Control-Expose-Headers"), "ETag", "Should expose ETag header") - - // Anonymous requests should succeed when anonymous read permission is configured in IAM - // The server configuration allows anonymous users to have Read permissions - assert.Equal(t, http.StatusOK, resp.StatusCode, "Anonymous GET request should succeed when anonymous read is configured") -} - -// TestCORSOriginMatching tests origin matching with different patterns -func TestCORSOriginMatching(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - testCases := []struct { - name string - allowedOrigins []string - requestOrigin string - shouldAllow bool - }{ - { - name: "exact match", - allowedOrigins: []string{"https://example.com"}, - requestOrigin: "https://example.com", - shouldAllow: true, - }, - { - name: "wildcard match", - allowedOrigins: []string{"*"}, - requestOrigin: "https://example.com", - shouldAllow: true, - }, - { - name: "subdomain wildcard match", - allowedOrigins: []string{"https://*.example.com"}, - requestOrigin: "https://api.example.com", - shouldAllow: true, - }, - { - name: "no match", - allowedOrigins: []string{"https://example.com"}, - requestOrigin: "https://malicious.com", - shouldAllow: false, - }, - { - name: "subdomain wildcard no match", - allowedOrigins: []string{"https://*.example.com"}, - requestOrigin: "https://example.com", - shouldAllow: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Set up CORS configuration for this test case - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: tc.allowedOrigins, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Test preflight request - httpClient := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req.Header.Set("Origin", tc.requestOrigin) - req.Header.Set("Access-Control-Request-Method", "GET") - - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - if tc.shouldAllow { - assert.Equal(t, tc.requestOrigin, resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Methods"), "GET", "Should allow GET method") - } else { - assert.Empty(t, resp.Header.Get("Access-Control-Allow-Origin"), "Should not have Allow-Origin header for disallowed origin") - } - }) - } -} - -// TestCORSHeaderMatching tests header matching with different patterns -func TestCORSHeaderMatching(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - testCases := []struct { - name string - allowedHeaders []string - requestHeaders string - shouldAllow bool - expectedHeaders string - }{ - { - name: "wildcard headers", - allowedHeaders: []string{"*"}, - requestHeaders: "Content-Type, Authorization", - shouldAllow: true, - expectedHeaders: "Content-Type, Authorization", - }, - { - name: "specific headers match", - allowedHeaders: []string{"Content-Type", "Authorization"}, - requestHeaders: "Content-Type, Authorization", - shouldAllow: true, - expectedHeaders: "Content-Type, Authorization", - }, - { - name: "partial header match", - allowedHeaders: []string{"Content-Type"}, - requestHeaders: "Content-Type", - shouldAllow: true, - expectedHeaders: "Content-Type", - }, - { - name: "case insensitive match", - allowedHeaders: []string{"content-type"}, - requestHeaders: "Content-Type", - shouldAllow: true, - expectedHeaders: "Content-Type", - }, - { - name: "disallowed header", - allowedHeaders: []string{"Content-Type"}, - requestHeaders: "Authorization", - shouldAllow: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Set up CORS configuration for this test case - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: tc.allowedHeaders, - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Test preflight request - httpClient := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", "POST") - req.Header.Set("Access-Control-Request-Headers", tc.requestHeaders) - - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - if tc.shouldAllow { - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - allowedHeaders := resp.Header.Get("Access-Control-Allow-Headers") - for _, header := range strings.Split(tc.expectedHeaders, ", ") { - assert.Contains(t, allowedHeaders, header, "Should allow header: %s", header) - } - } else { - // Even if headers are not allowed, the origin should still be in the response - // but the headers should not be echoed back - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - allowedHeaders := resp.Header.Get("Access-Control-Allow-Headers") - assert.NotContains(t, allowedHeaders, "Authorization", "Should not allow Authorization header") - } - }) - } -} - -// TestCORSWithoutConfiguration tests CORS behavior when no configuration is set -func TestCORSWithoutConfiguration(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Test preflight request without CORS configuration - httpClient := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", "GET") - - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - // Without CORS configuration, CORS headers should not be present - assert.Empty(t, resp.Header.Get("Access-Control-Allow-Origin"), "Should not have Allow-Origin header without CORS config") - assert.Empty(t, resp.Header.Get("Access-Control-Allow-Methods"), "Should not have Allow-Methods header without CORS config") - assert.Empty(t, resp.Header.Get("Access-Control-Allow-Headers"), "Should not have Allow-Headers header without CORS config") -} - -// TestCORSMethodMatching tests method matching -func TestCORSMethodMatching(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up CORS configuration with limited methods - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - testCases := []struct { - method string - shouldAllow bool - }{ - {"GET", true}, - {"POST", true}, - {"PUT", false}, - {"DELETE", false}, - {"HEAD", false}, - } - - for _, tc := range testCases { - t.Run(fmt.Sprintf("method_%s", tc.method), func(t *testing.T) { - httpClient := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", tc.method) - - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - if tc.shouldAllow { - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Methods"), tc.method, "Should allow method: %s", tc.method) - } else { - // Even if method is not allowed, the origin should still be in the response - // but the method should not be in the allowed methods - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should have correct Allow-Origin header") - allowedMethods := resp.Header.Get("Access-Control-Allow-Methods") - assert.NotContains(t, allowedMethods, tc.method, "Should not allow method: %s", tc.method) - } - }) - } -} - -// TestCORSMultipleRulesMatching tests CORS with multiple rules -func TestCORSMultipleRulesMatching(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up CORS configuration with multiple rules - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"Content-Type"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - { - AllowedHeaders: []string{"Authorization"}, - AllowedMethods: []string{"POST", "PUT"}, - AllowedOrigins: []string{"https://api.example.com"}, - ExposeHeaders: []string{"Content-Length"}, - MaxAgeSeconds: aws.Int32(7200), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Test first rule - httpClient := &http.Client{Timeout: 10 * time.Second} - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", "GET") - req.Header.Set("Access-Control-Request-Headers", "Content-Type") - - resp, err := httpClient.Do(req) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp.Body.Close() - - assert.Equal(t, "https://example.com", resp.Header.Get("Access-Control-Allow-Origin"), "Should match first rule") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Methods"), "GET", "Should allow GET method") - assert.Contains(t, resp.Header.Get("Access-Control-Allow-Headers"), "Content-Type", "Should allow Content-Type header") - assert.Equal(t, "3600", resp.Header.Get("Access-Control-Max-Age"), "Should have first rule's max age") - - // Test second rule - req2, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/%s/test-object", getDefaultConfig().Endpoint, bucketName), nil) - require.NoError(t, err, "Should be able to create OPTIONS request") - - req2.Header.Set("Origin", "https://api.example.com") - req2.Header.Set("Access-Control-Request-Method", "POST") - req2.Header.Set("Access-Control-Request-Headers", "Authorization") - - resp2, err := httpClient.Do(req2) - require.NoError(t, err, "Should be able to send OPTIONS request") - defer resp2.Body.Close() - - assert.Equal(t, "https://api.example.com", resp2.Header.Get("Access-Control-Allow-Origin"), "Should match second rule") - assert.Contains(t, resp2.Header.Get("Access-Control-Allow-Methods"), "POST", "Should allow POST method") - assert.Contains(t, resp2.Header.Get("Access-Control-Allow-Headers"), "Authorization", "Should allow Authorization header") - assert.Equal(t, "7200", resp2.Header.Get("Access-Control-Max-Age"), "Should have second rule's max age") -} - -// TestServiceLevelCORS tests that service-level endpoints (like /status) get proper CORS headers -func TestServiceLevelCORS(t *testing.T) { - assert := assert.New(t) - - endpoints := []string{ - "/", - "/status", - "/healthz", - } - - for _, endpoint := range endpoints { - t.Run(fmt.Sprintf("endpoint_%s", strings.ReplaceAll(endpoint, "/", "_")), func(t *testing.T) { - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s%s", getDefaultConfig().Endpoint, endpoint), nil) - assert.NoError(err) - - // Add Origin header to trigger CORS - req.Header.Set("Origin", "http://example.com") - - client := &http.Client{} - resp, err := client.Do(req) - assert.NoError(err) - defer resp.Body.Close() - - // Should return 200 OK - assert.Equal(http.StatusOK, resp.StatusCode) - - // Should have CORS headers set - assert.Equal("*", resp.Header.Get("Access-Control-Allow-Origin")) - assert.Equal("*", resp.Header.Get("Access-Control-Expose-Headers")) - assert.Equal("*", resp.Header.Get("Access-Control-Allow-Methods")) - assert.Equal("*", resp.Header.Get("Access-Control-Allow-Headers")) - }) - } -} - -// TestServiceLevelCORSWithoutOrigin tests that service-level endpoints without Origin header don't get CORS headers -func TestServiceLevelCORSWithoutOrigin(t *testing.T) { - assert := assert.New(t) - - req, err := http.NewRequest("OPTIONS", fmt.Sprintf("%s/status", getDefaultConfig().Endpoint), nil) - assert.NoError(err) - - // No Origin header - - client := &http.Client{} - resp, err := client.Do(req) - assert.NoError(err) - defer resp.Body.Close() - - // Should return 200 OK - assert.Equal(http.StatusOK, resp.StatusCode) - - // Should not have CORS headers set (or have empty values) - corsHeaders := []string{ - "Access-Control-Allow-Origin", - "Access-Control-Expose-Headers", - "Access-Control-Allow-Methods", - "Access-Control-Allow-Headers", - } - - for _, header := range corsHeaders { - value := resp.Header.Get(header) - // Headers should either be empty or not present - assert.True(value == "" || value == "*", "Header %s should be empty or wildcard, got: %s", header, value) - } -} diff --git a/test/s3/cors/s3_cors_test.go b/test/s3/cors/s3_cors_test.go deleted file mode 100644 index 4d3d4555e..000000000 --- a/test/s3/cors/s3_cors_test.go +++ /dev/null @@ -1,686 +0,0 @@ -package cors - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/k0kubun/pp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// S3TestConfig holds configuration for S3 tests -type S3TestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool -} - -// getDefaultConfig returns a fresh instance of the default test configuration -// to avoid parallel test issues with global mutable state -func getDefaultConfig() *S3TestConfig { - return &S3TestConfig{ - Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-cors-", - UseSSL: false, - SkipVerifySSL: true, - } -} - -// getS3Client creates an AWS S3 client for testing -func getS3Client(t *testing.T) *s3.Client { - defaultConfig := getDefaultConfig() - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(defaultConfig.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - defaultConfig.AccessKey, - defaultConfig.SecretKey, - "", - )), - config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: defaultConfig.Endpoint, - SigningRegion: defaultConfig.Region, - }, nil - })), - ) - require.NoError(t, err) - - client := s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = true - }) - return client -} - -// createTestBucket creates a test bucket with a unique name -func createTestBucket(t *testing.T, client *s3.Client) string { - defaultConfig := getDefaultConfig() - bucketName := fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, time.Now().UnixNano()) - - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Wait for bucket metadata to be fully processed - time.Sleep(50 * time.Millisecond) - - return bucketName -} - -// cleanupTestBucket removes the test bucket and all its contents -func cleanupTestBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, delete all objects in the bucket - listResp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - }) - if err == nil { - for _, obj := range listResp.Contents { - _, err := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: obj.Key, - }) - if err != nil { - t.Logf("Warning: failed to delete object %s: %v", *obj.Key, err) - } - } - } - - // Then delete the bucket - _, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) - } -} - -// TestCORSConfigurationManagement tests basic CORS configuration CRUD operations -func TestCORSConfigurationManagement(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Test 1: Get CORS configuration when none exists (should return error) - _, err := client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - assert.Error(t, err, "Should get error when no CORS configuration exists") - - // Test 2: Put CORS configuration - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "POST", "PUT"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - assert.NoError(t, err, "Should be able to put CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Test 3: Get CORS configuration - getResp, err := client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - assert.NoError(t, err, "Should be able to get CORS configuration") - assert.NotNil(t, getResp.CORSRules, "CORS configuration should not be nil") - assert.Len(t, getResp.CORSRules, 1, "Should have one CORS rule") - - rule := getResp.CORSRules[0] - assert.Equal(t, []string{"*"}, rule.AllowedHeaders, "Allowed headers should match") - assert.Equal(t, []string{"GET", "POST", "PUT"}, rule.AllowedMethods, "Allowed methods should match") - assert.Equal(t, []string{"https://example.com"}, rule.AllowedOrigins, "Allowed origins should match") - assert.Equal(t, []string{"ETag"}, rule.ExposeHeaders, "Expose headers should match") - assert.Equal(t, aws.Int32(3600), rule.MaxAgeSeconds, "Max age should match") - - // Test 4: Update CORS configuration - updatedCorsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"Content-Type"}, - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"https://example.com", "https://another.com"}, - ExposeHeaders: []string{"ETag", "Content-Length"}, - MaxAgeSeconds: aws.Int32(7200), - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: updatedCorsConfig, - }) - require.NoError(t, err, "Should be able to update CORS configuration") - - // Wait for CORS configuration update to be fully processed - time.Sleep(100 * time.Millisecond) - - // Verify the update with retries for robustness - var updateSuccess bool - for i := 0; i < 3; i++ { - getResp, err = client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Attempt %d: Failed to get updated CORS config: %v", i+1, err) - time.Sleep(50 * time.Millisecond) - continue - } - - if len(getResp.CORSRules) > 0 { - rule = getResp.CORSRules[0] - // Check if the update actually took effect - if len(rule.AllowedHeaders) > 0 && rule.AllowedHeaders[0] == "Content-Type" && - len(rule.AllowedOrigins) > 1 { - updateSuccess = true - break - } - } - t.Logf("Attempt %d: CORS config not updated yet, retrying...", i+1) - time.Sleep(50 * time.Millisecond) - } - - require.NoError(t, err, "Should be able to get updated CORS configuration") - require.True(t, updateSuccess, "CORS configuration should be updated after retries") - assert.Equal(t, []string{"Content-Type"}, rule.AllowedHeaders, "Updated allowed headers should match") - assert.Equal(t, []string{"https://example.com", "https://another.com"}, rule.AllowedOrigins, "Updated allowed origins should match") - - // Test 5: Delete CORS configuration - _, err = client.DeleteBucketCors(context.TODO(), &s3.DeleteBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Should be able to delete CORS configuration") - - // Wait for deletion to be fully processed - time.Sleep(100 * time.Millisecond) - - // Verify deletion - should get NoSuchCORSConfiguration error - _, err = client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - - // Check that we get the expected error type - if err != nil { - // Log the error for debugging - t.Logf("Got expected error after CORS deletion: %v", err) - // Check if it's the correct error type (NoSuchCORSConfiguration) - errMsg := err.Error() - if !strings.Contains(errMsg, "NoSuchCORSConfiguration") && !strings.Contains(errMsg, "404") { - t.Errorf("Expected NoSuchCORSConfiguration error, got: %v", err) - } - } else { - // If no error, this might be a SeaweedFS implementation difference - // Some implementations might return empty config instead of error - t.Logf("CORS deletion test: No error returned - this may be implementation-specific behavior") - } -} - -// TestCORSMultipleRules tests CORS configuration with multiple rules -func TestCORSMultipleRules(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Create CORS configuration with multiple rules - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(3600), - }, - { - AllowedHeaders: []string{"Content-Type", "Authorization"}, - AllowedMethods: []string{"POST", "PUT", "DELETE"}, - AllowedOrigins: []string{"https://app.example.com"}, - ExposeHeaders: []string{"ETag", "Content-Length"}, - MaxAgeSeconds: aws.Int32(7200), - }, - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"*"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: aws.Int32(1800), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration with multiple rules") - - // Wait for CORS configuration to be fully processed - time.Sleep(100 * time.Millisecond) - - // Get and verify the configuration with retries for robustness - var getResp *s3.GetBucketCorsOutput - var getErr error - - // Retry getting CORS config up to 3 times to handle timing issues - for i := 0; i < 3; i++ { - getResp, getErr = client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - if getErr == nil { - break - } - t.Logf("Attempt %d: Failed to get multiple rules CORS config: %v", i+1, getErr) - time.Sleep(50 * time.Millisecond) - } - - require.NoError(t, getErr, "Should be able to get CORS configuration after retries") - require.NotNil(t, getResp, "GetBucketCors response should not be nil") - require.Len(t, getResp.CORSRules, 3, "Should have three CORS rules") - - // Verify first rule - rule1 := getResp.CORSRules[0] - assert.Equal(t, []string{"*"}, rule1.AllowedHeaders) - assert.Equal(t, []string{"GET", "HEAD"}, rule1.AllowedMethods) - assert.Equal(t, []string{"https://example.com"}, rule1.AllowedOrigins) - - // Verify second rule - rule2 := getResp.CORSRules[1] - assert.Equal(t, []string{"Content-Type", "Authorization"}, rule2.AllowedHeaders) - assert.Equal(t, []string{"POST", "PUT", "DELETE"}, rule2.AllowedMethods) - assert.Equal(t, []string{"https://app.example.com"}, rule2.AllowedOrigins) - - // Verify third rule - rule3 := getResp.CORSRules[2] - assert.Equal(t, []string{"*"}, rule3.AllowedHeaders) - assert.Equal(t, []string{"GET"}, rule3.AllowedMethods) - assert.Equal(t, []string{"*"}, rule3.AllowedOrigins) -} - -// TestCORSValidation tests CORS configuration validation -func TestCORSValidation(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Test invalid HTTP method - invalidMethodConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"INVALID_METHOD"}, - AllowedOrigins: []string{"https://example.com"}, - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: invalidMethodConfig, - }) - assert.Error(t, err, "Should get error for invalid HTTP method") - - // Test empty origins - emptyOriginsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{}, - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: emptyOriginsConfig, - }) - assert.Error(t, err, "Should get error for empty origins") - - // Test negative MaxAge - negativeMaxAgeConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"https://example.com"}, - MaxAgeSeconds: aws.Int32(-1), - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: negativeMaxAgeConfig, - }) - assert.Error(t, err, "Should get error for negative MaxAge") -} - -// TestCORSWithWildcards tests CORS configuration with wildcard patterns -func TestCORSWithWildcards(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Create CORS configuration with wildcard patterns - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"https://*.example.com"}, - ExposeHeaders: []string{"*"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - require.NoError(t, err, "Should be able to put CORS configuration with wildcards") - - // Wait for CORS configuration to be fully processed and available - time.Sleep(100 * time.Millisecond) - - // Get and verify the configuration with retries for robustness - var getResp *s3.GetBucketCorsOutput - var getErr error - - // Retry getting CORS config up to 3 times to handle timing issues - for i := 0; i < 3; i++ { - getResp, getErr = client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - if getErr == nil { - break - } - t.Logf("Attempt %d: Failed to get CORS config: %v", i+1, getErr) - time.Sleep(50 * time.Millisecond) - } - - require.NoError(t, getErr, "Should be able to get CORS configuration after retries") - require.NotNil(t, getResp, "GetBucketCors response should not be nil") - require.Len(t, getResp.CORSRules, 1, "Should have one CORS rule") - - rule := getResp.CORSRules[0] - require.NotNil(t, rule, "CORS rule should not be nil") - assert.Equal(t, []string{"*"}, rule.AllowedHeaders, "Wildcard headers should be preserved") - assert.Equal(t, []string{"https://*.example.com"}, rule.AllowedOrigins, "Wildcard origins should be preserved") - assert.Equal(t, []string{"*"}, rule.ExposeHeaders, "Wildcard expose headers should be preserved") -} - -// TestCORSRuleLimit tests the maximum number of CORS rules -func TestCORSRuleLimit(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Create CORS configuration with maximum allowed rules (100) - rules := make([]types.CORSRule, 100) - for i := 0; i < 100; i++ { - rules[i] = types.CORSRule{ - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{fmt.Sprintf("https://example%d.com", i)}, - MaxAgeSeconds: aws.Int32(3600), - } - } - - corsConfig := &types.CORSConfiguration{ - CORSRules: rules, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - assert.NoError(t, err, "Should be able to put CORS configuration with 100 rules") - - // Try to add one more rule (should fail) - rules = append(rules, types.CORSRule{ - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"https://example101.com"}, - MaxAgeSeconds: aws.Int32(3600), - }) - - corsConfig.CORSRules = rules - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - assert.Error(t, err, "Should get error when exceeding maximum number of rules") -} - -// TestCORSNonExistentBucket tests CORS operations on non-existent bucket -func TestCORSNonExistentBucket(t *testing.T) { - client := getS3Client(t) - nonExistentBucket := "non-existent-bucket-cors-test" - - // Test Get CORS on non-existent bucket - _, err := client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(nonExistentBucket), - }) - assert.Error(t, err, "Should get error for non-existent bucket") - - // Test Put CORS on non-existent bucket - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"https://example.com"}, - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(nonExistentBucket), - CORSConfiguration: corsConfig, - }) - assert.Error(t, err, "Should get error for non-existent bucket") - - // Test Delete CORS on non-existent bucket - _, err = client.DeleteBucketCors(context.TODO(), &s3.DeleteBucketCorsInput{ - Bucket: aws.String(nonExistentBucket), - }) - assert.Error(t, err, "Should get error for non-existent bucket") -} - -// TestCORSObjectOperations tests CORS behavior with object operations -func TestCORSObjectOperations(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up CORS configuration - corsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"}, - AllowedOrigins: []string{"https://example.com"}, - ExposeHeaders: []string{"ETag", "Content-Length"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig, - }) - assert.NoError(t, err, "Should be able to put CORS configuration") - - // Test putting an object (this should work normally) - objectKey := "test-object.txt" - objectContent := "Hello, CORS World!" - - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(objectContent), - }) - assert.NoError(t, err, "Should be able to put object in CORS-enabled bucket") - - // Test getting the object - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - assert.NoError(t, err, "Should be able to get object from CORS-enabled bucket") - assert.NotNil(t, getResp.Body, "Object body should not be nil") - - // Test deleting the object - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - assert.NoError(t, err, "Should be able to delete object from CORS-enabled bucket") -} - -// TestCORSCaching tests CORS configuration caching behavior -func TestCORSCaching(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Set up initial CORS configuration - corsConfig1 := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"https://example.com"}, - MaxAgeSeconds: aws.Int32(3600), - }, - }, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig1, - }) - assert.NoError(t, err, "Should be able to put initial CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Get the configuration - getResp1, err := client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - assert.NoError(t, err, "Should be able to get initial CORS configuration") - assert.Len(t, getResp1.CORSRules, 1, "Should have one CORS rule") - - // Update the configuration - corsConfig2 := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"Content-Type"}, - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"https://example.com", "https://another.com"}, - MaxAgeSeconds: aws.Int32(7200), - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: corsConfig2, - }) - assert.NoError(t, err, "Should be able to update CORS configuration") - - // Wait for metadata subscription to update cache - time.Sleep(50 * time.Millisecond) - - // Get the updated configuration (should reflect the changes) - getResp2, err := client.GetBucketCors(context.TODO(), &s3.GetBucketCorsInput{ - Bucket: aws.String(bucketName), - }) - assert.NoError(t, err, "Should be able to get updated CORS configuration") - assert.Len(t, getResp2.CORSRules, 1, "Should have one CORS rule") - - rule := getResp2.CORSRules[0] - assert.Equal(t, []string{"Content-Type"}, rule.AllowedHeaders, "Should have updated headers") - assert.Equal(t, []string{"GET", "POST"}, rule.AllowedMethods, "Should have updated methods") - assert.Equal(t, []string{"https://example.com", "https://another.com"}, rule.AllowedOrigins, "Should have updated origins") - assert.Equal(t, aws.Int32(7200), rule.MaxAgeSeconds, "Should have updated max age") -} - -// TestCORSErrorHandling tests various error conditions -func TestCORSErrorHandling(t *testing.T) { - client := getS3Client(t) - bucketName := createTestBucket(t, client) - defer cleanupTestBucket(t, client, bucketName) - - // Test empty CORS configuration - emptyCorsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{}, - } - - _, err := client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: emptyCorsConfig, - }) - assert.Error(t, err, "Should get error for empty CORS configuration") - - // Test nil CORS configuration - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: nil, - }) - assert.Error(t, err, "Should get error for nil CORS configuration") - - // Test CORS rule with empty methods - emptyMethodsConfig := &types.CORSConfiguration{ - CORSRules: []types.CORSRule{ - { - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{}, - AllowedOrigins: []string{"https://example.com"}, - }, - }, - } - - _, err = client.PutBucketCors(context.TODO(), &s3.PutBucketCorsInput{ - Bucket: aws.String(bucketName), - CORSConfiguration: emptyMethodsConfig, - }) - assert.Error(t, err, "Should get error for empty methods") -} - -// Debugging helper to pretty print responses -func debugResponse(t *testing.T, title string, response interface{}) { - t.Logf("=== %s ===", title) - pp.Println(response) -} diff --git a/test/s3/fix_s3_tests_bucket_conflicts.py b/test/s3/fix_s3_tests_bucket_conflicts.py deleted file mode 100644 index 39019d460..000000000 --- a/test/s3/fix_s3_tests_bucket_conflicts.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python3 -""" -Patch Ceph s3-tests helpers to avoid bucket name mismatches and make bucket -creation idempotent when a fixed bucket name is provided. - -Why: -- Some tests call get_new_bucket() to get a name, then call - get_new_bucket_resource(name=) which unconditionally calls - CreateBucket again. If the bucket already exists, boto3 raises a - ClientError. We want to treat that as idempotent and reuse the bucket. -- We must NOT silently generate a different bucket name when a name is - explicitly provided, otherwise subsequent test steps still reference the - original string and read from the wrong (empty) bucket. - -What this does: -- get_new_bucket_resource(name=...): - - Try to create the exact bucket name. - - If error code is BucketAlreadyOwnedByYou OR BucketAlreadyExists, simply - reuse and return the bucket object for that SAME name. - - Only when name is None, generate a new unique name with retries. -- get_new_bucket(client=None, name=None): - - If name is None, generate unique names with retries until creation - succeeds, and return the actual name string to the caller. - -This keeps bucket names consistent across the test helper calls and prevents -404s or KeyErrors later in the tests that depend on that bucket name. -""" - -import os -import sys - - -def patch_s3_tests_init_file(file_path: str) -> bool: - if not os.path.exists(file_path): - print(f"Error: File {file_path} not found") - return False - - print(f"Patching {file_path}...") - with open(file_path, "r", encoding="utf-8") as f: - content = f.read() - - # If already patched, skip - if "max_retries = 10" in content and "BucketAlreadyOwnedByYou" in content and "BucketAlreadyExists" in content: - print("Already patched. Skipping.") - return True - - old_resource_func = '''def get_new_bucket_resource(name=None): - """ - Get a bucket that exists and is empty. - - Always recreates a bucket from scratch. This is useful to also - reset ACLs and such. - """ - s3 = boto3.resource('s3', - aws_access_key_id=config.main_access_key, - aws_secret_access_key=config.main_secret_key, - endpoint_url=config.default_endpoint, - use_ssl=config.default_is_secure, - verify=config.default_ssl_verify) - if name is None: - name = get_new_bucket_name() - bucket = s3.Bucket(name) - bucket_location = bucket.create() - return bucket''' - - new_resource_func = '''def get_new_bucket_resource(name=None): - """ - Get a bucket that exists and is empty. - - Always recreates a bucket from scratch. This is useful to also - reset ACLs and such. - """ - s3 = boto3.resource('s3', - aws_access_key_id=config.main_access_key, - aws_secret_access_key=config.main_secret_key, - endpoint_url=config.default_endpoint, - use_ssl=config.default_is_secure, - verify=config.default_ssl_verify) - - from botocore.exceptions import ClientError - - # If a name is provided, do not change it. Reuse that exact bucket name. - if name is not None: - bucket = s3.Bucket(name) - try: - bucket.create() - except ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'): - # Treat as idempotent create for an explicitly provided name. - # We must not change the name or tests will read from the wrong bucket. - return bucket - # Other errors should surface - raise - else: - return bucket - - # Only generate unique names when no name was provided - max_retries = 10 - for attempt in range(max_retries): - gen_name = get_new_bucket_name() - bucket = s3.Bucket(gen_name) - try: - bucket.create() - return bucket - except ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'): - if attempt == max_retries - 1: - raise Exception(f"Failed to create unique bucket after {max_retries} attempts") - continue - else: - raise''' - - old_client_func = '''def get_new_bucket(client=None, name=None): - """ - Get a bucket that exists and is empty. - - Always recreates a bucket from scratch. This is useful to also - reset ACLs and such. - """ - if client is None: - client = get_client() - if name is None: - name = get_new_bucket_name() - - client.create_bucket(Bucket=name) - return name''' - - new_client_func = '''def get_new_bucket(client=None, name=None): - """ - Get a bucket that exists and is empty. - - Always recreates a bucket from scratch. This is useful to also - reset ACLs and such. - """ - if client is None: - client = get_client() - - from botocore.exceptions import ClientError - - # If a name is provided, just try to create it once and fall back to idempotent reuse - if name is not None: - try: - client.create_bucket(Bucket=name) - except ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'): - return name - raise - else: - return name - - # Otherwise, generate a unique name with retries and return the actual name string - max_retries = 10 - for attempt in range(max_retries): - gen_name = get_new_bucket_name() - try: - client.create_bucket(Bucket=gen_name) - return gen_name - except ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'): - if attempt == max_retries - 1: - raise Exception(f"Failed to create unique bucket after {max_retries} attempts") - continue - else: - raise''' - - updated = content - updated = updated.replace(old_resource_func, new_resource_func) - updated = updated.replace(old_client_func, new_client_func) - - if updated == content: - print("Patterns not found; appending override implementations to end of file.") - append_patch = ''' - -# --- SeaweedFS override start --- -from botocore.exceptions import ClientError as _Sw_ClientError - - -# Idempotent create for provided name; generate unique only when no name given -# Keep the bucket name stable when provided by the caller - -def _sw_get_new_bucket_resource(name=None): - s3 = boto3.resource('s3', - aws_access_key_id=config.main_access_key, - aws_secret_access_key=config.main_secret_key, - endpoint_url=config.default_endpoint, - use_ssl=config.default_is_secure, - verify=config.default_ssl_verify) - if name is not None: - bucket = s3.Bucket(name) - try: - bucket.create() - except _Sw_ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'): - return bucket - raise - else: - return bucket - # name not provided: generate unique - max_retries = 10 - for attempt in range(max_retries): - gen_name = get_new_bucket_name() - bucket = s3.Bucket(gen_name) - try: - bucket.create() - return bucket - except _Sw_ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'): - if attempt == max_retries - 1: - raise Exception(f"Failed to create unique bucket after {max_retries} attempts") - continue - else: - raise - - -from botocore.exceptions import ClientError as _Sw2_ClientError - - -def _sw_get_new_bucket(client=None, name=None): - if client is None: - client = get_client() - if name is not None: - try: - client.create_bucket(Bucket=name) - except _Sw2_ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyOwnedByYou', 'BucketAlreadyExists'): - return name - raise - else: - return name - max_retries = 10 - for attempt in range(max_retries): - gen_name = get_new_bucket_name() - try: - client.create_bucket(Bucket=gen_name) - return gen_name - except _Sw2_ClientError as e: - code = e.response.get('Error', {}).get('Code') - if code in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'): - if attempt == max_retries - 1: - raise Exception(f"Failed to create unique bucket after {max_retries} attempts") - continue - else: - raise - -# Override original helper functions -get_new_bucket_resource = _sw_get_new_bucket_resource -get_new_bucket = _sw_get_new_bucket -# --- SeaweedFS override end --- -''' - with open(file_path, "a", encoding="utf-8") as f: - f.write(append_patch) - print("Appended override implementations.") - return True - - with open(file_path, "w", encoding="utf-8") as f: - f.write(updated) - - print("Successfully patched s3-tests helpers.") - return True - - -def main() -> int: - s3_tests_path = os.environ.get("S3_TESTS_PATH", "s3-tests") - init_file_path = os.path.join(s3_tests_path, "s3tests", "functional", "__init__.py") - print("Applying s3-tests patch for bucket creation idempotency...") - print(f"Target repo path: {s3_tests_path}") - if not os.path.exists(s3_tests_path): - print(f"Warning: s3-tests directory not found at {s3_tests_path}") - print("Skipping patch - directory structure may have changed in the upstream repository") - return 0 # Return success to not break CI - if not os.path.exists(init_file_path): - print(f"Warning: Target file {init_file_path} not found") - print("This may indicate the s3-tests repository structure has changed.") - print("Skipping patch - tests may still work without it") - return 0 # Return success to not break CI - ok = patch_s3_tests_init_file(init_file_path) - return 0 if ok else 1 - - -if __name__ == "__main__": - sys.exit(main()) - - diff --git a/test/s3/iam/Dockerfile.s3 b/test/s3/iam/Dockerfile.s3 deleted file mode 100644 index 36f0ead1f..000000000 --- a/test/s3/iam/Dockerfile.s3 +++ /dev/null @@ -1,33 +0,0 @@ -# Multi-stage build for SeaweedFS S3 with IAM -FROM golang:1.23-alpine AS builder - -# Install build dependencies -RUN apk add --no-cache git make curl wget - -# Set working directory -WORKDIR /app - -# Copy source code -COPY . . - -# Build SeaweedFS with IAM integration -RUN cd weed && go build -o /usr/local/bin/weed - -# Final runtime image -FROM alpine:latest - -# Install runtime dependencies -RUN apk add --no-cache ca-certificates wget curl - -# Copy weed binary -COPY --from=builder /usr/local/bin/weed /usr/local/bin/weed - -# Create directories -RUN mkdir -p /etc/seaweedfs /data - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD wget --quiet --tries=1 --spider http://localhost:8333/ || exit 1 - -# Set entrypoint -ENTRYPOINT ["/usr/local/bin/weed"] diff --git a/test/s3/iam/Makefile b/test/s3/iam/Makefile deleted file mode 100644 index 57d0ca9df..000000000 --- a/test/s3/iam/Makefile +++ /dev/null @@ -1,306 +0,0 @@ -# SeaweedFS S3 IAM Integration Tests Makefile - -.PHONY: all test clean setup start-services stop-services wait-for-services help - -# Default target -all: test - -# Test configuration -WEED_BINARY ?= $(shell go env GOPATH)/bin/weed -LOG_LEVEL ?= 2 -S3_PORT ?= 8333 -FILER_PORT ?= 8888 -MASTER_PORT ?= 9333 -VOLUME_PORT ?= 8081 -TEST_TIMEOUT ?= 30m - -# Service PIDs -MASTER_PID_FILE = /tmp/weed-master.pid -VOLUME_PID_FILE = /tmp/weed-volume.pid -FILER_PID_FILE = /tmp/weed-filer.pid -S3_PID_FILE = /tmp/weed-s3.pid - -help: ## Show this help message - @echo "SeaweedFS S3 IAM Integration Tests" - @echo "" - @echo "Usage:" - @echo " make [target]" - @echo "" - @echo "Standard Targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-25s %s\n", $$1, $$2}' $(MAKEFILE_LIST) | head -20 - @echo "" - @echo "New Test Targets (Previously Skipped):" - @echo " test-distributed Run distributed IAM tests" - @echo " test-performance Run performance tests" - @echo " test-stress Run stress tests" - @echo " test-versioning-stress Run S3 versioning stress tests" - @echo " test-keycloak-full Run complete Keycloak integration tests" - @echo " test-all-previously-skipped Run all previously skipped tests" - @echo " setup-all-tests Setup environment for all tests" - @echo "" - @echo "Docker Compose Targets:" - @echo " docker-test Run tests with Docker Compose including Keycloak" - @echo " docker-up Start all services with Docker Compose" - @echo " docker-down Stop all Docker Compose services" - @echo " docker-logs Show logs from all services" - -test: clean setup start-services run-tests stop-services ## Run complete IAM integration test suite - -test-quick: run-tests ## Run tests assuming services are already running - -run-tests: ## Execute the Go tests - @echo "๐Ÿงช Running S3 IAM Integration Tests..." - go test -v -timeout $(TEST_TIMEOUT) ./... - -setup: ## Setup test environment - @echo "๐Ÿ”ง Setting up test environment..." - @mkdir -p test-volume-data/filerldb2 - @mkdir -p test-volume-data/m9333 - -start-services: ## Start SeaweedFS services for testing - @echo "๐Ÿš€ Starting SeaweedFS services..." - @echo "Starting master server..." - @$(WEED_BINARY) master -port=$(MASTER_PORT) \ - -mdir=test-volume-data/m9333 > weed-master.log 2>&1 & \ - echo $$! > $(MASTER_PID_FILE) - - @echo "Waiting for master server to be ready..." - @timeout 60 bash -c 'until curl -s http://localhost:$(MASTER_PORT)/cluster/status > /dev/null 2>&1; do echo "Waiting for master server..."; sleep 2; done' || (echo "โŒ Master failed to start, checking logs..." && tail -20 weed-master.log && exit 1) - @echo "โœ… Master server is ready" - - @echo "Starting volume server..." - @$(WEED_BINARY) volume -port=$(VOLUME_PORT) \ - -ip=localhost \ - -dataCenter=dc1 -rack=rack1 \ - -dir=test-volume-data \ - -max=100 \ - -mserver=localhost:$(MASTER_PORT) > weed-volume.log 2>&1 & \ - echo $$! > $(VOLUME_PID_FILE) - - @echo "Waiting for volume server to be ready..." - @timeout 60 bash -c 'until curl -s http://localhost:$(VOLUME_PORT)/status > /dev/null 2>&1; do echo "Waiting for volume server..."; sleep 2; done' || (echo "โŒ Volume server failed to start, checking logs..." && tail -20 weed-volume.log && exit 1) - @echo "โœ… Volume server is ready" - - @echo "Starting filer server..." - @$(WEED_BINARY) filer -port=$(FILER_PORT) \ - -defaultStoreDir=test-volume-data/filerldb2 \ - -master=localhost:$(MASTER_PORT) > weed-filer.log 2>&1 & \ - echo $$! > $(FILER_PID_FILE) - - @echo "Waiting for filer server to be ready..." - @timeout 60 bash -c 'until curl -s http://localhost:$(FILER_PORT)/status > /dev/null 2>&1; do echo "Waiting for filer server..."; sleep 2; done' || (echo "โŒ Filer failed to start, checking logs..." && tail -20 weed-filer.log && exit 1) - @echo "โœ… Filer server is ready" - - @echo "Starting S3 API server with IAM..." - @$(WEED_BINARY) -v=3 s3 -port=$(S3_PORT) \ - -filer=localhost:$(FILER_PORT) \ - -config=test_config.json \ - -iam.config=$(CURDIR)/iam_config.json > weed-s3.log 2>&1 & \ - echo $$! > $(S3_PID_FILE) - - @echo "Waiting for S3 API server to be ready..." - @timeout 60 bash -c 'until curl -s http://localhost:$(S3_PORT) > /dev/null 2>&1; do echo "Waiting for S3 API server..."; sleep 2; done' || (echo "โŒ S3 API failed to start, checking logs..." && tail -20 weed-s3.log && exit 1) - @echo "โœ… S3 API server is ready" - - @echo "โœ… All services started and ready" - -wait-for-services: ## Wait for all services to be ready - @echo "โณ Waiting for services to be ready..." - @echo "Checking master server..." - @timeout 30 bash -c 'until curl -s http://localhost:$(MASTER_PORT)/cluster/status > /dev/null; do sleep 1; done' || (echo "โŒ Master failed to start" && exit 1) - - @echo "Checking filer server..." - @timeout 30 bash -c 'until curl -s http://localhost:$(FILER_PORT)/status > /dev/null; do sleep 1; done' || (echo "โŒ Filer failed to start" && exit 1) - - @echo "Checking S3 API server..." - @timeout 30 bash -c 'until curl -s http://localhost:$(S3_PORT) > /dev/null 2>&1; do sleep 1; done' || (echo "โŒ S3 API failed to start" && exit 1) - - @echo "Pre-allocating volumes for concurrent operations..." - @curl -s "http://localhost:$(MASTER_PORT)/vol/grow?collection=default&count=10&replication=000" > /dev/null || echo "โš ๏ธ Volume pre-allocation failed, but continuing..." - @sleep 3 - @echo "โœ… All services are ready" - -stop-services: ## Stop all SeaweedFS services - @echo "๐Ÿ›‘ Stopping SeaweedFS services..." - @if [ -f $(S3_PID_FILE) ]; then \ - echo "Stopping S3 API server..."; \ - kill $$(cat $(S3_PID_FILE)) 2>/dev/null || true; \ - rm -f $(S3_PID_FILE); \ - fi - @if [ -f $(FILER_PID_FILE) ]; then \ - echo "Stopping filer server..."; \ - kill $$(cat $(FILER_PID_FILE)) 2>/dev/null || true; \ - rm -f $(FILER_PID_FILE); \ - fi - @if [ -f $(VOLUME_PID_FILE) ]; then \ - echo "Stopping volume server..."; \ - kill $$(cat $(VOLUME_PID_FILE)) 2>/dev/null || true; \ - rm -f $(VOLUME_PID_FILE); \ - fi - @if [ -f $(MASTER_PID_FILE) ]; then \ - echo "Stopping master server..."; \ - kill $$(cat $(MASTER_PID_FILE)) 2>/dev/null || true; \ - rm -f $(MASTER_PID_FILE); \ - fi - @echo "โœ… All services stopped" - -clean: stop-services ## Clean up test environment - @echo "๐Ÿงน Cleaning up test environment..." - @rm -rf test-volume-data - @rm -f weed-*.log - @rm -f *.test - @echo "โœ… Cleanup complete" - -logs: ## Show service logs - @echo "๐Ÿ“‹ Service Logs:" - @echo "=== Master Log ===" - @tail -20 weed-master.log 2>/dev/null || echo "No master log" - @echo "" - @echo "=== Volume Log ===" - @tail -20 weed-volume.log 2>/dev/null || echo "No volume log" - @echo "" - @echo "=== Filer Log ===" - @tail -20 weed-filer.log 2>/dev/null || echo "No filer log" - @echo "" - @echo "=== S3 API Log ===" - @tail -20 weed-s3.log 2>/dev/null || echo "No S3 log" - -status: ## Check service status - @echo "๐Ÿ“Š Service Status:" - @echo -n "Master: "; curl -s http://localhost:$(MASTER_PORT)/cluster/status > /dev/null 2>&1 && echo "โœ… Running" || echo "โŒ Not running" - @echo -n "Filer: "; curl -s http://localhost:$(FILER_PORT)/status > /dev/null 2>&1 && echo "โœ… Running" || echo "โŒ Not running" - @echo -n "S3 API: "; curl -s http://localhost:$(S3_PORT) > /dev/null 2>&1 && echo "โœ… Running" || echo "โŒ Not running" - -debug: start-services wait-for-services ## Start services and keep them running for debugging - @echo "๐Ÿ› Services started in debug mode. Press Ctrl+C to stop..." - @trap 'make stop-services' INT; \ - while true; do \ - sleep 1; \ - done - -# Test specific scenarios -test-auth: ## Test only authentication scenarios - go test -v -run TestS3IAMAuthentication ./... - -test-policy: ## Test only policy enforcement - go test -v -run TestS3IAMPolicyEnforcement ./... - -test-expiration: ## Test only session expiration - go test -v -run TestS3IAMSessionExpiration ./... - -test-multipart: ## Test only multipart upload IAM integration - go test -v -run TestS3IAMMultipartUploadPolicyEnforcement ./... - -test-bucket-policy: ## Test only bucket policy integration - go test -v -run TestS3IAMBucketPolicyIntegration ./... - -test-context: ## Test only contextual policy enforcement - go test -v -run TestS3IAMContextualPolicyEnforcement ./... - -test-presigned: ## Test only presigned URL integration - go test -v -run TestS3IAMPresignedURLIntegration ./... - -# Performance testing -benchmark: setup start-services wait-for-services ## Run performance benchmarks - @echo "๐Ÿ Running IAM performance benchmarks..." - go test -bench=. -benchmem -timeout $(TEST_TIMEOUT) ./... - @make stop-services - -# Continuous integration -ci: ## Run tests suitable for CI environment - @echo "๐Ÿ”„ Running CI tests..." - @export CGO_ENABLED=0; make test - -# Development helpers -watch: ## Watch for file changes and re-run tests - @echo "๐Ÿ‘€ Watching for changes..." - @command -v entr >/dev/null 2>&1 || (echo "entr is required for watch mode. Install with: brew install entr" && exit 1) - @find . -name "*.go" | entr -r make test-quick - -install-deps: ## Install test dependencies - @echo "๐Ÿ“ฆ Installing test dependencies..." - go mod tidy - go get -u github.com/stretchr/testify - go get -u github.com/aws/aws-sdk-go - go get -u github.com/golang-jwt/jwt/v5 - -# Docker support -docker-test-legacy: ## Run tests in Docker container (legacy) - @echo "๐Ÿณ Running tests in Docker..." - docker build -f Dockerfile.test -t seaweedfs-s3-iam-test . - docker run --rm -v $(PWD)/../../../:/app seaweedfs-s3-iam-test - -# Docker Compose support with Keycloak -docker-up: ## Start all services with Docker Compose (including Keycloak) - @echo "๐Ÿณ Starting services with Docker Compose including Keycloak..." - @docker compose up -d - @echo "โณ Waiting for services to be healthy..." - @timeout 120 bash -c 'until curl -s http://localhost:8080/health/ready > /dev/null 2>&1; do sleep 2; done' || (echo "โŒ Keycloak failed to become ready" && exit 1) - @timeout 60 bash -c 'until curl -s http://localhost:8333 > /dev/null 2>&1; do sleep 2; done' || (echo "โŒ S3 API failed to become ready" && exit 1) - @timeout 60 bash -c 'until curl -s http://localhost:8888 > /dev/null 2>&1; do sleep 2; done' || (echo "โŒ Filer failed to become ready" && exit 1) - @timeout 60 bash -c 'until curl -s http://localhost:9333 > /dev/null 2>&1; do sleep 2; done' || (echo "โŒ Master failed to become ready" && exit 1) - @echo "โœ… All services are healthy and ready" - -docker-down: ## Stop all Docker Compose services - @echo "๐Ÿณ Stopping Docker Compose services..." - @docker compose down -v - @echo "โœ… All services stopped" - -docker-logs: ## Show logs from all services - @docker compose logs -f - -docker-test: docker-up ## Run tests with Docker Compose including Keycloak - @echo "๐Ÿงช Running Keycloak integration tests..." - @export KEYCLOAK_URL="http://localhost:8080" && \ - export S3_ENDPOINT="http://localhost:8333" && \ - go test -v -timeout $(TEST_TIMEOUT) -run "TestKeycloak" ./... - @echo "๐Ÿณ Stopping services after tests..." - @make docker-down - -docker-build: ## Build custom SeaweedFS image for Docker tests - @echo "๐Ÿ—๏ธ Building custom SeaweedFS image..." - @docker build -f Dockerfile.s3 -t seaweedfs-iam:latest ../../.. - @echo "โœ… Image built successfully" - -# All PHONY targets -.PHONY: test test-quick run-tests setup start-services stop-services wait-for-services clean logs status debug -.PHONY: test-auth test-policy test-expiration test-multipart test-bucket-policy test-context test-presigned -.PHONY: benchmark ci watch install-deps docker-test docker-up docker-down docker-logs docker-build -.PHONY: test-distributed test-performance test-stress test-versioning-stress test-keycloak-full test-all-previously-skipped setup-all-tests help-advanced - - - -# New test targets for previously skipped tests - -test-distributed: ## Run distributed IAM tests - @echo "๐ŸŒ Running distributed IAM tests..." - @export ENABLE_DISTRIBUTED_TESTS=true && go test -v -timeout $(TEST_TIMEOUT) -run "TestS3IAMDistributedTests" ./... - -test-performance: ## Run performance tests - @echo "๐Ÿ Running performance tests..." - @export ENABLE_PERFORMANCE_TESTS=true && go test -v -timeout $(TEST_TIMEOUT) -run "TestS3IAMPerformanceTests" ./... - -test-stress: ## Run stress tests - @echo "๐Ÿ’ช Running stress tests..." - @export ENABLE_STRESS_TESTS=true && ./run_stress_tests.sh - -test-versioning-stress: ## Run S3 versioning stress tests - @echo "๐Ÿ“š Running versioning stress tests..." - @cd ../versioning && ./enable_stress_tests.sh - -test-keycloak-full: docker-up ## Run complete Keycloak integration tests - @echo "๐Ÿ” Running complete Keycloak integration tests..." - @export KEYCLOAK_URL="http://localhost:8080" && \ - export S3_ENDPOINT="http://localhost:8333" && \ - go test -v -timeout $(TEST_TIMEOUT) -run "TestKeycloak" ./... - @make docker-down - -test-all-previously-skipped: ## Run all previously skipped tests - @echo "๐ŸŽฏ Running all previously skipped tests..." - @./run_all_tests.sh - -setup-all-tests: ## Setup environment for all tests (including Keycloak) - @echo "๐Ÿš€ Setting up complete test environment..." - @./setup_all_tests.sh - - diff --git a/test/s3/iam/Makefile.docker b/test/s3/iam/Makefile.docker deleted file mode 100644 index 0e175a1aa..000000000 --- a/test/s3/iam/Makefile.docker +++ /dev/null @@ -1,166 +0,0 @@ -# Makefile for SeaweedFS S3 IAM Integration Tests with Docker Compose -.PHONY: help docker-build docker-up docker-down docker-logs docker-test docker-clean docker-status docker-keycloak-setup - -# Default target -.DEFAULT_GOAL := help - -# Docker Compose configuration -COMPOSE_FILE := docker-compose.yml -PROJECT_NAME := seaweedfs-iam-test - -help: ## Show this help message - @echo "SeaweedFS S3 IAM Integration Tests - Docker Compose" - @echo "" - @echo "Available commands:" - @echo "" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) - @echo "" - @echo "Environment:" - @echo " COMPOSE_FILE: $(COMPOSE_FILE)" - @echo " PROJECT_NAME: $(PROJECT_NAME)" - -docker-build: ## Build local SeaweedFS image for testing - @echo "๐Ÿ”จ Building local SeaweedFS image..." - @echo "Creating build directory..." - @cd ../../.. && mkdir -p .docker-build - @echo "Building weed binary..." - @cd ../../.. && cd weed && go build -o ../.docker-build/weed - @echo "Copying required files to build directory..." - @cd ../../.. && cp docker/filer.toml .docker-build/ && cp docker/entrypoint.sh .docker-build/ - @echo "Building Docker image..." - @cd ../../.. && docker build -f docker/Dockerfile.local -t local/seaweedfs:latest .docker-build/ - @echo "Cleaning up build directory..." - @cd ../../.. && rm -rf .docker-build - @echo "โœ… Built local/seaweedfs:latest" - -docker-up: ## Start all services with Docker Compose - @echo "๐Ÿš€ Starting SeaweedFS S3 IAM integration environment..." - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) up -d - @echo "" - @echo "โœ… Environment started! Services will be available at:" - @echo " ๐Ÿ” Keycloak: http://localhost:8080 (admin/admin)" - @echo " ๐Ÿ—„๏ธ S3 API: http://localhost:8333" - @echo " ๐Ÿ“ Filer: http://localhost:8888" - @echo " ๐ŸŽฏ Master: http://localhost:9333" - @echo "" - @echo "โณ Waiting for all services to be healthy..." - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) ps - -docker-down: ## Stop and remove all containers - @echo "๐Ÿ›‘ Stopping SeaweedFS S3 IAM integration environment..." - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) down -v - @echo "โœ… Environment stopped and cleaned up" - -docker-restart: docker-down docker-up ## Restart the entire environment - -docker-logs: ## Show logs from all services - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) logs -f - -docker-logs-s3: ## Show logs from S3 service only - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) logs -f weed-s3 - -docker-logs-keycloak: ## Show logs from Keycloak service only - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) logs -f keycloak - -docker-status: ## Check status of all services - @echo "๐Ÿ“Š Service Status:" - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) ps - @echo "" - @echo "๐Ÿฅ Health Checks:" - @docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep $(PROJECT_NAME) || true - -docker-test: docker-wait-healthy ## Run integration tests against Docker environment - @echo "๐Ÿงช Running SeaweedFS S3 IAM integration tests..." - @echo "" - @KEYCLOAK_URL=http://localhost:8080 go test -v -timeout 10m ./... - -docker-test-single: ## Run a single test (use TEST_NAME=TestName) - @if [ -z "$(TEST_NAME)" ]; then \ - echo "โŒ Please specify TEST_NAME, e.g., make docker-test-single TEST_NAME=TestKeycloakAuthentication"; \ - exit 1; \ - fi - @echo "๐Ÿงช Running single test: $(TEST_NAME)" - @KEYCLOAK_URL=http://localhost:8080 go test -v -run "$(TEST_NAME)" -timeout 5m ./... - -docker-keycloak-setup: ## Manually run Keycloak setup (usually automatic) - @echo "๐Ÿ”ง Running Keycloak setup manually..." - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) run --rm keycloak-setup - -docker-clean: ## Clean up everything (containers, volumes, images) - @echo "๐Ÿงน Cleaning up Docker environment..." - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) down -v --remove-orphans - @docker system prune -f - @echo "โœ… Cleanup complete" - -docker-shell-s3: ## Get shell access to S3 container - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) exec weed-s3 sh - -docker-shell-keycloak: ## Get shell access to Keycloak container - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) exec keycloak bash - -docker-debug: ## Show debug information - @echo "๐Ÿ” Docker Environment Debug Information" - @echo "" - @echo "๐Ÿ“‹ Docker Compose Config:" - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) config - @echo "" - @echo "๐Ÿ“Š Container Status:" - @docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) ps - @echo "" - @echo "๐ŸŒ Network Information:" - @docker network ls | grep $(PROJECT_NAME) || echo "No networks found" - @echo "" - @echo "๐Ÿ’พ Volume Information:" - @docker volume ls | grep $(PROJECT_NAME) || echo "No volumes found" - -# Quick test targets -docker-test-auth: ## Quick test of authentication only - @KEYCLOAK_URL=http://localhost:8080 go test -v -run "TestKeycloakAuthentication" -timeout 2m ./... - -docker-test-roles: ## Quick test of role mapping only - @KEYCLOAK_URL=http://localhost:8080 go test -v -run "TestKeycloakRoleMapping" -timeout 2m ./... - -docker-test-s3ops: ## Quick test of S3 operations only - @KEYCLOAK_URL=http://localhost:8080 go test -v -run "TestKeycloakS3Operations" -timeout 2m ./... - -# Development workflow -docker-dev: docker-down docker-up docker-test ## Complete dev workflow: down -> up -> test - -# Show service URLs for easy access -docker-urls: ## Display all service URLs - @echo "๐ŸŒ Service URLs:" - @echo "" - @echo " ๐Ÿ” Keycloak Admin: http://localhost:8080 (admin/admin)" - @echo " ๐Ÿ” Keycloak Realm: http://localhost:8080/realms/seaweedfs-test" - @echo " ๐Ÿ“ S3 API: http://localhost:8333" - @echo " ๐Ÿ“‚ Filer UI: http://localhost:8888" - @echo " ๐ŸŽฏ Master UI: http://localhost:9333" - @echo " ๐Ÿ’พ Volume Server: http://localhost:8080" - @echo "" - @echo " ๐Ÿ“– Test Users:" - @echo " โ€ข admin-user (password: adminuser123) - s3-admin role" - @echo " โ€ข read-user (password: readuser123) - s3-read-only role" - @echo " โ€ข write-user (password: writeuser123) - s3-read-write role" - @echo " โ€ข write-only-user (password: writeonlyuser123) - s3-write-only role" - -# Wait targets for CI/CD -docker-wait-healthy: ## Wait for all services to be healthy - @echo "โณ Waiting for all services to be healthy..." - @timeout 300 bash -c ' \ - required_services="keycloak weed-master weed-volume weed-filer weed-s3"; \ - while true; do \ - all_healthy=true; \ - for service in $$required_services; do \ - if ! docker-compose -p $(PROJECT_NAME) -f $(COMPOSE_FILE) ps $$service | grep -q "healthy"; then \ - echo "Waiting for $$service to be healthy..."; \ - all_healthy=false; \ - break; \ - fi; \ - done; \ - if [ "$$all_healthy" = "true" ]; then \ - break; \ - fi; \ - sleep 5; \ - done \ - ' - @echo "โœ… All required services are healthy" diff --git a/test/s3/iam/README-Docker.md b/test/s3/iam/README-Docker.md deleted file mode 100644 index 3759d7fae..000000000 --- a/test/s3/iam/README-Docker.md +++ /dev/null @@ -1,241 +0,0 @@ -# SeaweedFS S3 IAM Integration with Docker Compose - -This directory contains a complete Docker Compose setup for testing SeaweedFS S3 IAM integration with Keycloak OIDC authentication. - -## ๐Ÿš€ Quick Start - -1. **Build local SeaweedFS image:** - ```bash - make -f Makefile.docker docker-build - ``` - -2. **Start the environment:** - ```bash - make -f Makefile.docker docker-up - ``` - -3. **Run the tests:** - ```bash - make -f Makefile.docker docker-test - ``` - -4. **Stop the environment:** - ```bash - make -f Makefile.docker docker-down - ``` - -## ๐Ÿ“‹ What's Included - -The Docker Compose setup includes: - -- **๐Ÿ” Keycloak** - Identity provider with OIDC support -- **๐ŸŽฏ SeaweedFS Master** - Metadata management -- **๐Ÿ’พ SeaweedFS Volume** - Data storage -- **๐Ÿ“ SeaweedFS Filer** - File system interface -- **๐Ÿ“Š SeaweedFS S3** - S3-compatible API with IAM integration -- **๐Ÿ”ง Keycloak Setup** - Automated realm and user configuration - -## ๐ŸŒ Service URLs - -After starting with `docker-up`, services are available at: - -| Service | URL | Credentials | -|---------|-----|-------------| -| ๐Ÿ” Keycloak Admin | http://localhost:8080 | admin/admin | -| ๐Ÿ“Š S3 API | http://localhost:8333 | JWT tokens | -| ๐Ÿ“ Filer | http://localhost:8888 | - | -| ๐ŸŽฏ Master | http://localhost:9333 | - | - -## ๐Ÿ‘ฅ Test Users - -The setup automatically creates test users in Keycloak: - -| Username | Password | Role | Permissions | -|----------|----------|------|-------------| -| admin-user | adminuser123 | s3-admin | Full S3 access | -| read-user | readuser123 | s3-read-only | Read-only access | -| write-user | writeuser123 | s3-read-write | Read and write | -| write-only-user | writeonlyuser123 | s3-write-only | Write only | - -## ๐Ÿงช Running Tests - -### All Tests -```bash -make -f Makefile.docker docker-test -``` - -### Specific Test Categories -```bash -# Authentication tests only -make -f Makefile.docker docker-test-auth - -# Role mapping tests only -make -f Makefile.docker docker-test-roles - -# S3 operations tests only -make -f Makefile.docker docker-test-s3ops -``` - -### Single Test -```bash -make -f Makefile.docker docker-test-single TEST_NAME=TestKeycloakAuthentication -``` - -## ๐Ÿ”ง Development Workflow - -### Complete workflow (recommended) -```bash -# Build, start, test, and clean up -make -f Makefile.docker docker-build -make -f Makefile.docker docker-dev -``` -This runs: build โ†’ down โ†’ up โ†’ test - -### Using Published Images (Alternative) -If you want to use published Docker Hub images instead of building locally: -```bash -export SEAWEEDFS_IMAGE=chrislusf/seaweedfs:latest -make -f Makefile.docker docker-up -``` - -### Manual steps -```bash -# Build image (required first time, or after code changes) -make -f Makefile.docker docker-build - -# Start services -make -f Makefile.docker docker-up - -# Watch logs -make -f Makefile.docker docker-logs - -# Check status -make -f Makefile.docker docker-status - -# Run tests -make -f Makefile.docker docker-test - -# Stop services -make -f Makefile.docker docker-down -``` - -## ๐Ÿ” Debugging - -### View logs -```bash -# All services -make -f Makefile.docker docker-logs - -# S3 service only (includes role mapping debug) -make -f Makefile.docker docker-logs-s3 - -# Keycloak only -make -f Makefile.docker docker-logs-keycloak -``` - -### Get shell access -```bash -# S3 container -make -f Makefile.docker docker-shell-s3 - -# Keycloak container -make -f Makefile.docker docker-shell-keycloak -``` - -## ๐Ÿ“ File Structure - -``` -seaweedfs/test/s3/iam/ -โ”œโ”€โ”€ docker-compose.yml # Main Docker Compose configuration -โ”œโ”€โ”€ Makefile.docker # Docker-specific Makefile -โ”œโ”€โ”€ setup_keycloak_docker.sh # Keycloak setup for containers -โ”œโ”€โ”€ README-Docker.md # This file -โ”œโ”€โ”€ iam_config.json # IAM configuration (auto-generated) -โ”œโ”€โ”€ test_config.json # S3 service configuration -โ””โ”€โ”€ *_test.go # Go integration tests -``` - -## ๐Ÿ”„ Configuration - -### IAM Configuration -The `setup_keycloak_docker.sh` script automatically generates `iam_config.json` with: - -- **OIDC Provider**: Keycloak configuration with proper container networking -- **Role Mapping**: Maps Keycloak roles to SeaweedFS IAM roles -- **Policies**: Defines S3 permissions for each role -- **Trust Relationships**: Allows Keycloak users to assume SeaweedFS roles - -### Role Mapping Rules -```json -{ - "claim": "roles", - "value": "s3-admin", - "role": "arn:seaweed:iam::role/KeycloakAdminRole" -} -``` - -## ๐Ÿ› Troubleshooting - -### Services not starting -```bash -# Check service status -make -f Makefile.docker docker-status - -# View logs for specific service -docker-compose -p seaweedfs-iam-test logs -``` - -### Keycloak setup issues -```bash -# Re-run Keycloak setup manually -make -f Makefile.docker docker-keycloak-setup - -# Check Keycloak logs -make -f Makefile.docker docker-logs-keycloak -``` - -### Role mapping not working -```bash -# Check S3 logs for role mapping debug messages -make -f Makefile.docker docker-logs-s3 | grep -i "role\|claim\|mapping" -``` - -### Port conflicts -If ports are already in use, modify `docker-compose.yml`: -```yaml -ports: - - "8081:8080" # Change external port -``` - -## ๐Ÿงน Cleanup - -```bash -# Stop containers and remove volumes -make -f Makefile.docker docker-down - -# Complete cleanup (containers, volumes, images) -make -f Makefile.docker docker-clean -``` - -## ๐ŸŽฏ Key Features - -- **Local Code Testing**: Uses locally built SeaweedFS images to test current code -- **Isolated Environment**: No conflicts with local services -- **Consistent Networking**: Services communicate via Docker network -- **Automated Setup**: Keycloak realm and users created automatically -- **Debug Logging**: Verbose logging enabled for troubleshooting -- **Health Checks**: Proper service dependency management -- **Volume Persistence**: Data persists between restarts (until docker-down) - -## ๐Ÿšฆ CI/CD Integration - -For automated testing: - -```bash -# Build image, run tests with proper cleanup -make -f Makefile.docker docker-build -make -f Makefile.docker docker-up -make -f Makefile.docker docker-wait-healthy -make -f Makefile.docker docker-test -make -f Makefile.docker docker-down -``` diff --git a/test/s3/iam/README.md b/test/s3/iam/README.md deleted file mode 100644 index ba871600c..000000000 --- a/test/s3/iam/README.md +++ /dev/null @@ -1,506 +0,0 @@ -# SeaweedFS S3 IAM Integration Tests - -This directory contains comprehensive integration tests for the SeaweedFS S3 API with Advanced IAM (Identity and Access Management) system integration. - -## Overview - -**Important**: The STS service uses a **stateless JWT design** where all session information is embedded directly in the JWT token. No external session storage is required. - -The S3 IAM integration tests validate the complete end-to-end functionality of: - -- **JWT Authentication**: OIDC token-based authentication with S3 API -- **Policy Enforcement**: Fine-grained access control for S3 operations -- **Stateless Session Management**: JWT-based session token validation and expiration (no external storage) -- **Role-Based Access Control (RBAC)**: IAM roles with different permission levels -- **Bucket Policies**: Resource-based access control integration -- **Multipart Upload IAM**: Policy enforcement for multipart operations -- **Contextual Policies**: IP-based, time-based, and conditional access control -- **Presigned URLs**: IAM-integrated temporary access URL generation - -## Test Architecture - -### Components Tested - -1. **S3 API Gateway** - SeaweedFS S3-compatible API server with IAM integration -2. **IAM Manager** - Core IAM orchestration and policy evaluation -3. **STS Service** - Security Token Service for temporary credentials -4. **Policy Engine** - AWS IAM-compatible policy evaluation -5. **Identity Providers** - OIDC and LDAP authentication providers -6. **Policy Store** - Persistent policy storage using SeaweedFS filer - -### Test Framework - -- **S3IAMTestFramework**: Comprehensive test utilities and setup -- **Mock OIDC Provider**: In-memory OIDC server with JWT signing -- **Service Management**: Automatic SeaweedFS service lifecycle management -- **Resource Cleanup**: Automatic cleanup of buckets and test data - -## Test Scenarios - -### 1. Authentication Tests (`TestS3IAMAuthentication`) - -- โœ… **Valid JWT Token**: Successful authentication with proper OIDC tokens -- โœ… **Invalid JWT Token**: Rejection of malformed or invalid tokens -- โœ… **Expired JWT Token**: Proper handling of expired authentication tokens - -### 2. Policy Enforcement Tests (`TestS3IAMPolicyEnforcement`) - -- โœ… **Read-Only Policy**: Users can only read objects and list buckets -- โœ… **Write-Only Policy**: Users can only create/delete objects but not read -- โœ… **Admin Policy**: Full access to all S3 operations including bucket management - -### 3. Session Expiration Tests (`TestS3IAMSessionExpiration`) - -- โœ… **Short-Lived Sessions**: Creation and validation of time-limited sessions -- โœ… **Manual Expiration**: Testing session expiration enforcement -- โœ… **Expired Session Rejection**: Proper access denial for expired sessions - -### 4. Multipart Upload Tests (`TestS3IAMMultipartUploadPolicyEnforcement`) - -- โœ… **Admin Multipart Access**: Full multipart upload capabilities -- โœ… **Read-Only Denial**: Rejection of multipart operations for read-only users -- โœ… **Complete Upload Flow**: Initiate โ†’ Upload Parts โ†’ Complete workflow - -### 5. Bucket Policy Tests (`TestS3IAMBucketPolicyIntegration`) - -- โœ… **Public Read Policy**: Bucket-level policies allowing public access -- โœ… **Explicit Deny Policy**: Bucket policies that override IAM permissions -- โœ… **Policy CRUD Operations**: Get/Put/Delete bucket policy operations - -### 6. Contextual Policy Tests (`TestS3IAMContextualPolicyEnforcement`) - -- ๐Ÿ”ง **IP-Based Restrictions**: Source IP validation in policy conditions -- ๐Ÿ”ง **Time-Based Restrictions**: Temporal access control policies -- ๐Ÿ”ง **User-Agent Restrictions**: Request context-based policy evaluation - -### 7. Presigned URL Tests (`TestS3IAMPresignedURLIntegration`) - -- โœ… **URL Generation**: IAM-validated presigned URL creation -- โœ… **Permission Validation**: Ensuring users have required permissions -- ๐Ÿ”ง **HTTP Request Testing**: Direct HTTP calls to presigned URLs - -## Quick Start - -### Prerequisites - -1. **Go 1.19+** with modules enabled -2. **SeaweedFS Binary** (`weed`) built with IAM support -3. **Test Dependencies**: - ```bash - go get github.com/stretchr/testify - go get github.com/aws/aws-sdk-go - go get github.com/golang-jwt/jwt/v5 - ``` - -### Running Tests - -#### Complete Test Suite -```bash -# Run all tests with service management -make test - -# Quick test run (assumes services running) -make test-quick -``` - -#### Specific Test Categories -```bash -# Test only authentication -make test-auth - -# Test only policy enforcement -make test-policy - -# Test only session expiration -make test-expiration - -# Test only multipart uploads -make test-multipart - -# Test only bucket policies -make test-bucket-policy -``` - -#### Development & Debugging -```bash -# Start services and keep running -make debug - -# Show service logs -make logs - -# Check service status -make status - -# Watch for changes and re-run tests -make watch -``` - -### Manual Service Management - -If you prefer to manage services manually: - -```bash -# Start services -make start-services - -# Wait for services to be ready -make wait-for-services - -# Run tests -make run-tests - -# Stop services -make stop-services -``` - -## Configuration - -### Test Configuration (`test_config.json`) - -The test configuration defines: - -- **Identity Providers**: OIDC and LDAP configurations -- **IAM Roles**: Role definitions with trust policies -- **IAM Policies**: Permission policies for different access levels -- **Policy Stores**: Persistent storage configurations for IAM policies and roles - -### Service Ports - -| Service | Port | Purpose | -|---------|------|---------| -| Master | 9333 | Cluster coordination | -| Volume | 8080 | Object storage | -| Filer | 8888 | Metadata & IAM storage | -| S3 API | 8333 | S3-compatible API with IAM | - -### Environment Variables - -```bash -# SeaweedFS binary location -export WEED_BINARY=../../../weed - -# Service ports (optional) -export S3_PORT=8333 -export FILER_PORT=8888 -export MASTER_PORT=9333 -export VOLUME_PORT=8080 - -# Test timeout -export TEST_TIMEOUT=30m - -# Log level (0-4) -export LOG_LEVEL=2 -``` - -## Test Data & Cleanup - -### Automatic Cleanup - -The test framework automatically: -- ๐Ÿ—‘๏ธ **Deletes test buckets** created during tests -- ๐Ÿ—‘๏ธ **Removes test objects** and multipart uploads -- ๐Ÿ—‘๏ธ **Cleans up IAM sessions** and temporary tokens -- ๐Ÿ—‘๏ธ **Stops services** after test completion - -### Manual Cleanup - -```bash -# Clean everything -make clean - -# Clean while keeping services running -rm -rf test-volume-data/ -``` - -## Extending Tests - -### Adding New Test Scenarios - -1. **Create Test Function**: - ```go - func TestS3IAMNewFeature(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Test implementation - } - ``` - -2. **Use Test Framework**: - ```go - // Create authenticated S3 client - s3Client, err := framework.CreateS3ClientWithJWT("user", "TestRole") - require.NoError(t, err) - - // Test S3 operations - err = framework.CreateBucket(s3Client, "test-bucket") - require.NoError(t, err) - ``` - -3. **Add to Makefile**: - ```makefile - test-new-feature: ## Test new feature - go test -v -run TestS3IAMNewFeature ./... - ``` - -### Creating Custom Policies - -Add policies to `test_config.json`: - -```json -{ - "policies": { - "CustomPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:GetObject"], - "Resource": ["arn:seaweed:s3:::specific-bucket/*"], - "Condition": { - "StringEquals": { - "s3:prefix": ["allowed-prefix/"] - } - } - } - ] - } - } -} -``` - -### Adding Identity Providers - -1. **Mock Provider Setup**: - ```go - // In test framework - func (f *S3IAMTestFramework) setupCustomProvider() { - provider := custom.NewCustomProvider("test-custom") - // Configure and register - } - ``` - -2. **Configuration**: - ```json - { - "providers": { - "custom": { - "test-custom": { - "endpoint": "http://localhost:8080", - "clientId": "custom-client" - } - } - } - } - ``` - -## Troubleshooting - -### Common Issues - -#### 1. Services Not Starting -```bash -# Check if ports are available -netstat -an | grep -E "(8333|8888|9333|8080)" - -# Check service logs -make logs - -# Try different ports -export S3_PORT=18333 -make start-services -``` - -#### 2. JWT Token Issues -```bash -# Verify OIDC mock server -curl http://localhost:8080/.well-known/openid_configuration - -# Check JWT token format in logs -make logs | grep -i jwt -``` - -#### 3. Permission Denied Errors -```bash -# Verify IAM configuration -cat test_config.json | jq '.policies' - -# Check policy evaluation in logs -export LOG_LEVEL=4 -make start-services -``` - -#### 4. Test Timeouts -```bash -# Increase timeout -export TEST_TIMEOUT=60m -make test - -# Run individual tests -make test-auth -``` - -### Debug Mode - -Start services in debug mode to inspect manually: - -```bash -# Start and keep running -make debug - -# In another terminal, run specific operations -aws s3 ls --endpoint-url http://localhost:8333 - -# Stop when done (Ctrl+C in debug terminal) -``` - -### Log Analysis - -```bash -# Service-specific logs -tail -f weed-s3.log # S3 API server -tail -f weed-filer.log # Filer (IAM storage) -tail -f weed-master.log # Master server -tail -f weed-volume.log # Volume server - -# Filter for IAM-related logs -make logs | grep -i iam -make logs | grep -i jwt -make logs | grep -i policy -``` - -## Performance Testing - -### Benchmarks - -```bash -# Run performance benchmarks -make benchmark - -# Profile memory usage -go test -bench=. -memprofile=mem.prof -go tool pprof mem.prof -``` - -### Load Testing - -For load testing with IAM: - -1. **Create Multiple Clients**: - ```go - // Generate multiple JWT tokens - tokens := framework.GenerateMultipleJWTTokens(100) - - // Create concurrent clients - var wg sync.WaitGroup - for _, token := range tokens { - wg.Add(1) - go func(token string) { - defer wg.Done() - // Perform S3 operations - }(token) - } - wg.Wait() - ``` - -2. **Measure Performance**: - ```bash - # Run with verbose output - go test -v -bench=BenchmarkS3IAMOperations - ``` - -## CI/CD Integration - -### GitHub Actions - -```yaml -name: S3 IAM Integration Tests -on: [push, pull_request] - -jobs: - s3-iam-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.19' - - - name: Build SeaweedFS - run: go build -o weed ./main.go - - - name: Run S3 IAM Tests - run: | - cd test/s3/iam - make ci -``` - -### Jenkins Pipeline - -```groovy -pipeline { - agent any - stages { - stage('Build') { - steps { - sh 'go build -o weed ./main.go' - } - } - stage('S3 IAM Tests') { - steps { - dir('test/s3/iam') { - sh 'make ci' - } - } - post { - always { - dir('test/s3/iam') { - sh 'make clean' - } - } - } - } - } -} -``` - -## Contributing - -### Adding New Tests - -1. **Follow Test Patterns**: - - Use `S3IAMTestFramework` for setup - - Include cleanup with `defer framework.Cleanup()` - - Use descriptive test names and subtests - - Assert both success and failure cases - -2. **Update Documentation**: - - Add test descriptions to this README - - Include Makefile targets for new test categories - - Document any new configuration options - -3. **Ensure Test Reliability**: - - Tests should be deterministic and repeatable - - Include proper error handling and assertions - - Use appropriate timeouts for async operations - -### Code Style - -- Follow standard Go testing conventions -- Use `require.NoError()` for critical assertions -- Use `assert.Equal()` for value comparisons -- Include descriptive error messages in assertions - -## Support - -For issues with S3 IAM integration tests: - -1. **Check Logs**: Use `make logs` to inspect service logs -2. **Verify Configuration**: Ensure `test_config.json` is correct -3. **Test Services**: Run `make status` to check service health -4. **Clean Environment**: Try `make clean && make test` - -## License - -This test suite is part of the SeaweedFS project and follows the same licensing terms. diff --git a/test/s3/iam/STS_DISTRIBUTED.md b/test/s3/iam/STS_DISTRIBUTED.md deleted file mode 100644 index b18ec4fdb..000000000 --- a/test/s3/iam/STS_DISTRIBUTED.md +++ /dev/null @@ -1,511 +0,0 @@ -# Distributed STS Service for SeaweedFS S3 Gateway - -This document explains how to configure and deploy the STS (Security Token Service) for distributed SeaweedFS S3 Gateway deployments with consistent identity provider configurations. - -## Problem Solved - -Previously, identity providers had to be **manually registered** on each S3 gateway instance, leading to: - -- โŒ **Inconsistent authentication**: Different instances might have different providers -- โŒ **Manual synchronization**: No guarantee all instances have same provider configs -- โŒ **Authentication failures**: Users getting different responses from different instances -- โŒ **Operational complexity**: Difficult to manage provider configurations at scale - -## Solution: Configuration-Driven Providers - -The STS service now supports **automatic provider loading** from configuration files, ensuring: - -- โœ… **Consistent providers**: All instances load identical providers from config -- โœ… **Automatic synchronization**: Configuration-driven, no manual registration needed -- โœ… **Reliable authentication**: Same behavior from all instances -- โœ… **Easy management**: Update config file, restart services - -## Configuration Schema - -### Basic STS Configuration - -```json -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "base64-encoded-signing-key-32-chars-min" - } -} -``` - -**Note**: The STS service uses a **stateless JWT design** where all session information is embedded directly in the JWT token. No external session storage is required. - -### Configuration-Driven Providers - -```json -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "base64-encoded-signing-key", - "providers": [ - { - "name": "keycloak-oidc", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "https://keycloak.company.com/realms/seaweedfs", - "clientId": "seaweedfs-s3", - "clientSecret": "super-secret-key", - "jwksUri": "https://keycloak.company.com/realms/seaweedfs/protocol/openid-connect/certs", - "scopes": ["openid", "profile", "email", "roles"], - "claimsMapping": { - "usernameClaim": "preferred_username", - "groupsClaim": "roles" - } - } - }, - { - "name": "backup-oidc", - "type": "oidc", - "enabled": false, - "config": { - "issuer": "https://backup-oidc.company.com", - "clientId": "seaweedfs-backup" - } - }, - { - "name": "dev-mock-provider", - "type": "mock", - "enabled": true, - "config": { - "issuer": "http://localhost:9999", - "clientId": "mock-client" - } - } - ] - } -} -``` - -## Supported Provider Types - -### 1. OIDC Provider (`"type": "oidc"`) - -For production authentication with OpenID Connect providers like Keycloak, Auth0, Google, etc. - -**Required Configuration:** -- `issuer`: OIDC issuer URL -- `clientId`: OAuth2 client ID - -**Optional Configuration:** -- `clientSecret`: OAuth2 client secret (for confidential clients) -- `jwksUri`: JSON Web Key Set URI (auto-discovered if not provided) -- `userInfoUri`: UserInfo endpoint URI (auto-discovered if not provided) -- `scopes`: OAuth2 scopes to request (default: `["openid"]`) -- `claimsMapping`: Map OIDC claims to identity attributes - -**Example:** -```json -{ - "name": "corporate-keycloak", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "https://sso.company.com/realms/production", - "clientId": "seaweedfs-prod", - "clientSecret": "confidential-secret", - "scopes": ["openid", "profile", "email", "groups"], - "claimsMapping": { - "usernameClaim": "preferred_username", - "groupsClaim": "groups", - "emailClaim": "email" - } - } -} -``` - -### 2. Mock Provider (`"type": "mock"`) - -For development, testing, and staging environments. - -**Configuration:** -- `issuer`: Mock issuer URL (default: `http://localhost:9999`) -- `clientId`: Mock client ID - -**Example:** -```json -{ - "name": "dev-mock", - "type": "mock", - "enabled": true, - "config": { - "issuer": "http://dev-mock:9999", - "clientId": "dev-client" - } -} -``` - -**Built-in Test Tokens:** -- `valid_test_token`: Returns test user with developer groups -- `valid-oidc-token`: Compatible with integration tests -- `expired_token`: Returns token expired error -- `invalid_token`: Returns invalid token error - -### 3. Future Provider Types - -The factory pattern supports easy addition of new provider types: - -- `"type": "ldap"`: LDAP/Active Directory authentication -- `"type": "saml"`: SAML 2.0 authentication -- `"type": "oauth2"`: Generic OAuth2 providers -- `"type": "custom"`: Custom authentication backends - -## Deployment Patterns - -### Single Instance (Development) - -```bash -# Standard deployment with config-driven providers -weed s3 -filer=localhost:8888 -port=8333 -iam.config=/path/to/sts_config.json -``` - -### Multiple Instances (Production) - -```bash -# Instance 1 -weed s3 -filer=prod-filer:8888 -port=8333 -iam.config=/shared/sts_distributed.json - -# Instance 2 -weed s3 -filer=prod-filer:8888 -port=8334 -iam.config=/shared/sts_distributed.json - -# Instance N -weed s3 -filer=prod-filer:8888 -port=833N -iam.config=/shared/sts_distributed.json -``` - -**Critical Requirements for Distributed Deployment:** - -1. **Identical Configuration Files**: All instances must use the exact same configuration file -2. **Same Signing Keys**: All instances must have identical `signingKey` values -3. **Same Issuer**: All instances must use the same `issuer` value - -**Note**: STS now uses stateless JWT tokens, eliminating the need for shared session storage. - -### High Availability Setup - -```yaml -# docker-compose.yml for production deployment -services: - filer: - image: seaweedfs/seaweedfs:latest - command: "filer -master=master:9333" - volumes: - - filer-data:/data - - s3-gateway-1: - image: seaweedfs/seaweedfs:latest - command: "s3 -filer=filer:8888 -port=8333 -iam.config=/config/sts_distributed.json" - ports: - - "8333:8333" - volumes: - - ./sts_distributed.json:/config/sts_distributed.json:ro - depends_on: [filer] - - s3-gateway-2: - image: seaweedfs/seaweedfs:latest - command: "s3 -filer=filer:8888 -port=8333 -iam.config=/config/sts_distributed.json" - ports: - - "8334:8333" - volumes: - - ./sts_distributed.json:/config/sts_distributed.json:ro - depends_on: [filer] - - s3-gateway-3: - image: seaweedfs/seaweedfs:latest - command: "s3 -filer=filer:8888 -port=8333 -iam.config=/config/sts_distributed.json" - ports: - - "8335:8333" - volumes: - - ./sts_distributed.json:/config/sts_distributed.json:ro - depends_on: [filer] - - load-balancer: - image: nginx:alpine - ports: - - "80:80" - volumes: - - ./nginx.conf:/etc/nginx/nginx.conf:ro - depends_on: [s3-gateway-1, s3-gateway-2, s3-gateway-3] -``` - -## Authentication Flow - -### 1. OIDC Authentication Flow - -``` -1. User authenticates with OIDC provider (Keycloak, Auth0, etc.) - โ†“ -2. User receives OIDC JWT token from provider - โ†“ -3. User calls SeaweedFS STS AssumeRoleWithWebIdentity - POST /sts/assume-role-with-web-identity - { - "RoleArn": "arn:seaweed:iam::role/S3AdminRole", - "WebIdentityToken": "eyJ0eXAiOiJKV1QiLCJhbGc...", - "RoleSessionName": "user-session" - } - โ†“ -4. STS validates OIDC token with configured provider - - Verifies JWT signature using provider's JWKS - - Validates issuer, audience, expiration - - Extracts user identity and groups - โ†“ -5. STS checks role trust policy - - Verifies user/groups can assume the requested role - - Validates conditions in trust policy - โ†“ -6. STS generates temporary credentials - - Creates temporary access key, secret key, session token - - Session token is signed JWT with all session information embedded (stateless) - โ†“ -7. User receives temporary credentials - { - "Credentials": { - "AccessKeyId": "AKIA...", - "SecretAccessKey": "base64-secret", - "SessionToken": "eyJ0eXAiOiJKV1QiLCJhbGc...", - "Expiration": "2024-01-01T12:00:00Z" - } - } - โ†“ -8. User makes S3 requests with temporary credentials - - AWS SDK signs requests with temporary credentials - - SeaweedFS S3 gateway validates session token - - Gateway checks permissions via policy engine -``` - -### 2. Cross-Instance Token Validation - -``` -User Request โ†’ Load Balancer โ†’ Any S3 Gateway Instance - โ†“ - Extract JWT Session Token - โ†“ - Validate JWT Token - (Self-contained - no external storage needed) - โ†“ - Check Permissions - (Shared policy engine) - โ†“ - Allow/Deny Request -``` - -## Configuration Management - -### Development Environment - -```json -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-dev-sts", - "signingKey": "ZGV2LXNpZ25pbmcta2V5LTMyLWNoYXJhY3RlcnMtbG9uZw==", - "providers": [ - { - "name": "dev-mock", - "type": "mock", - "enabled": true, - "config": { - "issuer": "http://localhost:9999", - "clientId": "dev-mock-client" - } - } - ] - } -} -``` - -### Production Environment - -```json -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-prod-sts", - "signingKey": "cHJvZC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmctcmFuZG9t", - "providers": [ - { - "name": "corporate-sso", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "https://sso.company.com/realms/production", - "clientId": "seaweedfs-prod", - "clientSecret": "${SSO_CLIENT_SECRET}", - "scopes": ["openid", "profile", "email", "groups"], - "claimsMapping": { - "usernameClaim": "preferred_username", - "groupsClaim": "groups" - } - } - }, - { - "name": "backup-auth", - "type": "oidc", - "enabled": false, - "config": { - "issuer": "https://backup-sso.company.com", - "clientId": "seaweedfs-backup" - } - } - ] - } -} -``` - -## Operational Best Practices - -### 1. Configuration Management - -- **Version Control**: Store configurations in Git with proper versioning -- **Environment Separation**: Use separate configs for dev/staging/production -- **Secret Management**: Use environment variable substitution for secrets -- **Configuration Validation**: Test configurations before deployment - -### 2. Security Considerations - -- **Signing Key Security**: Use strong, randomly generated signing keys (32+ bytes) -- **Key Rotation**: Implement signing key rotation procedures -- **Secret Storage**: Store client secrets in secure secret management systems -- **TLS Encryption**: Always use HTTPS for OIDC providers in production - -### 3. Monitoring and Troubleshooting - -- **Provider Health**: Monitor OIDC provider availability and response times -- **Session Metrics**: Track active sessions, token validation errors -- **Configuration Drift**: Alert on configuration inconsistencies between instances -- **Authentication Logs**: Log authentication attempts for security auditing - -### 4. Capacity Planning - -- **Provider Performance**: Monitor OIDC provider response times and rate limits -- **Token Validation**: Monitor JWT validation performance and caching -- **Memory Usage**: Monitor JWT token validation caching and provider metadata - -## Migration Guide - -### From Manual Provider Registration - -**Before (Manual Registration):** -```go -// Each instance needs this code -keycloakProvider := oidc.NewOIDCProvider("keycloak-oidc") -keycloakProvider.Initialize(keycloakConfig) -stsService.RegisterProvider(keycloakProvider) -``` - -**After (Configuration-Driven):** -```json -{ - "sts": { - "providers": [ - { - "name": "keycloak-oidc", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "https://keycloak.company.com/realms/seaweedfs", - "clientId": "seaweedfs-s3" - } - } - ] - } -} -``` - -### Migration Steps - -1. **Create Configuration File**: Convert manual provider registrations to JSON config -2. **Test Single Instance**: Deploy config to one instance and verify functionality -3. **Validate Consistency**: Ensure all instances load identical providers -4. **Rolling Deployment**: Update instances one by one with new configuration -5. **Remove Manual Code**: Clean up manual provider registration code - -## Troubleshooting - -### Common Issues - -#### 1. Provider Inconsistency - -**Symptoms**: Authentication works on some instances but not others -**Diagnosis**: -```bash -# Check provider counts on each instance -curl http://instance1:8333/sts/providers | jq '.providers | length' -curl http://instance2:8334/sts/providers | jq '.providers | length' -``` -**Solution**: Ensure all instances use identical configuration files - -#### 2. Token Validation Failures - -**Symptoms**: "Invalid signature" or "Invalid issuer" errors -**Diagnosis**: Check signing key and issuer consistency -**Solution**: Verify `signingKey` and `issuer` are identical across all instances - -#### 3. Provider Loading Failures - -**Symptoms**: Providers not loaded at startup -**Diagnosis**: Check logs for provider initialization errors -**Solution**: Validate provider configuration against schema - -#### 4. OIDC Provider Connectivity - -**Symptoms**: "Failed to fetch JWKS" errors -**Diagnosis**: Test OIDC provider connectivity from all instances -**Solution**: Check network connectivity, DNS resolution, certificates - -### Debug Commands - -```bash -# Test configuration loading -weed s3 -iam.config=/path/to/config.json -test.config - -# Validate JWT tokens -curl -X POST http://localhost:8333/sts/validate-token \ - -H "Content-Type: application/json" \ - -d '{"sessionToken": "eyJ0eXAiOiJKV1QiLCJhbGc..."}' - -# List loaded providers -curl http://localhost:8333/sts/providers - -# Check session store -curl http://localhost:8333/sts/sessions/count -``` - -## Performance Considerations - -### Token Validation Performance - -- **JWT Validation**: ~1-5ms per token validation -- **JWKS Caching**: Cache JWKS responses to reduce OIDC provider load -- **Session Lookup**: Filer session lookup adds ~10-20ms latency -- **Concurrent Requests**: Each instance can handle 1000+ concurrent validations - -### Scaling Recommendations - -- **Horizontal Scaling**: Add more S3 gateway instances behind load balancer -- **Session Store Optimization**: Use SSD storage for filer session store -- **Provider Caching**: Implement JWKS caching to reduce provider load -- **Connection Pooling**: Use connection pooling for filer communication - -## Summary - -The configuration-driven provider system solves critical distributed deployment issues: - -- โœ… **Automatic Provider Loading**: No manual registration code required -- โœ… **Configuration Consistency**: All instances load identical providers from config -- โœ… **Easy Management**: Update config file, restart services -- โœ… **Production Ready**: Supports OIDC, proper session management, distributed storage -- โœ… **Backwards Compatible**: Existing manual registration still works - -This enables SeaweedFS S3 Gateway to **scale horizontally** with **consistent authentication** across all instances, making it truly **production-ready for enterprise deployments**. diff --git a/test/s3/iam/docker-compose-simple.yml b/test/s3/iam/docker-compose-simple.yml deleted file mode 100644 index b52a158a3..000000000 --- a/test/s3/iam/docker-compose-simple.yml +++ /dev/null @@ -1,20 +0,0 @@ -services: - # Keycloak Identity Provider - keycloak: - image: quay.io/keycloak/keycloak:26.0.7 - container_name: keycloak-test-simple - ports: - - "8080:8080" - environment: - KC_BOOTSTRAP_ADMIN_USERNAME: admin - KC_BOOTSTRAP_ADMIN_PASSWORD: admin - KC_HTTP_ENABLED: "true" - KC_HOSTNAME_STRICT: "false" - KC_HOSTNAME_STRICT_HTTPS: "false" - command: start-dev - networks: - - test-network - -networks: - test-network: - driver: bridge diff --git a/test/s3/iam/docker-compose.test.yml b/test/s3/iam/docker-compose.test.yml deleted file mode 100644 index bb229cfc3..000000000 --- a/test/s3/iam/docker-compose.test.yml +++ /dev/null @@ -1,160 +0,0 @@ -# Docker Compose for SeaweedFS S3 IAM Integration Tests -services: - # SeaweedFS Master - seaweedfs-master: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-master-test - command: master -mdir=/data -defaultReplication=000 -port=9333 - ports: - - "9333:9333" - volumes: - - master-data:/data - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9333/cluster/status"] - interval: 10s - timeout: 5s - retries: 5 - networks: - - seaweedfs-test - - # SeaweedFS Volume - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-volume-test - command: volume -dir=/data -port=8083 -mserver=seaweedfs-master:9333 - ports: - - "8083:8083" - volumes: - - volume-data:/data - depends_on: - seaweedfs-master: - condition: service_healthy - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8083/status"] - interval: 10s - timeout: 5s - retries: 5 - networks: - - seaweedfs-test - - # SeaweedFS Filer - seaweedfs-filer: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-filer-test - command: filer -port=8888 -master=seaweedfs-master:9333 -defaultStoreDir=/data - ports: - - "8888:8888" - volumes: - - filer-data:/data - depends_on: - seaweedfs-master: - condition: service_healthy - seaweedfs-volume: - condition: service_healthy - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8888/status"] - interval: 10s - timeout: 5s - retries: 5 - networks: - - seaweedfs-test - - # SeaweedFS S3 API - seaweedfs-s3: - image: chrislusf/seaweedfs:latest - container_name: seaweedfs-s3-test - command: s3 -port=8333 -filer=seaweedfs-filer:8888 -config=/config/test_config.json - ports: - - "8333:8333" - volumes: - - ./test_config.json:/config/test_config.json:ro - depends_on: - seaweedfs-filer: - condition: service_healthy - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8333/"] - interval: 10s - timeout: 5s - retries: 5 - networks: - - seaweedfs-test - - # Test Runner - integration-tests: - build: - context: ../../../ - dockerfile: test/s3/iam/Dockerfile.s3 - container_name: seaweedfs-s3-iam-tests - environment: - - WEED_BINARY=weed - - S3_PORT=8333 - - FILER_PORT=8888 - - MASTER_PORT=9333 - - VOLUME_PORT=8083 - - TEST_TIMEOUT=30m - - LOG_LEVEL=2 - depends_on: - seaweedfs-s3: - condition: service_healthy - volumes: - - .:/app/test/s3/iam - - test-results:/app/test-results - networks: - - seaweedfs-test - command: ["make", "test"] - - # Optional: Mock LDAP Server for LDAP testing - ldap-server: - image: osixia/openldap:1.5.0 - container_name: ldap-server-test - environment: - LDAP_ORGANISATION: "Example Corp" - LDAP_DOMAIN: "example.com" - LDAP_ADMIN_PASSWORD: "admin-password" - LDAP_CONFIG_PASSWORD: "config-password" - LDAP_READONLY_USER: "true" - LDAP_READONLY_USER_USERNAME: "readonly" - LDAP_READONLY_USER_PASSWORD: "readonly-password" - ports: - - "389:389" - - "636:636" - volumes: - - ldap-data:/var/lib/ldap - - ldap-config:/etc/ldap/slapd.d - networks: - - seaweedfs-test - - # Optional: LDAP Admin UI - ldap-admin: - image: osixia/phpldapadmin:latest - container_name: ldap-admin-test - environment: - PHPLDAPADMIN_LDAP_HOSTS: "ldap-server" - PHPLDAPADMIN_HTTPS: "false" - ports: - - "8080:80" - depends_on: - - ldap-server - networks: - - seaweedfs-test - -volumes: - master-data: - driver: local - volume-data: - driver: local - filer-data: - driver: local - ldap-data: - driver: local - ldap-config: - driver: local - test-results: - driver: local - -networks: - seaweedfs-test: - driver: bridge - ipam: - config: - - subnet: 172.20.0.0/16 diff --git a/test/s3/iam/docker-compose.yml b/test/s3/iam/docker-compose.yml deleted file mode 100644 index fd3e3039f..000000000 --- a/test/s3/iam/docker-compose.yml +++ /dev/null @@ -1,160 +0,0 @@ -services: - # Keycloak Identity Provider - keycloak: - image: quay.io/keycloak/keycloak:26.0.7 - container_name: keycloak-iam-test - hostname: keycloak - environment: - KC_BOOTSTRAP_ADMIN_USERNAME: admin - KC_BOOTSTRAP_ADMIN_PASSWORD: admin - KC_HTTP_ENABLED: "true" - KC_HOSTNAME_STRICT: "false" - KC_HOSTNAME_STRICT_HTTPS: "false" - KC_HTTP_RELATIVE_PATH: / - ports: - - "8080:8080" - command: start-dev - networks: - - seaweedfs-iam - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8080/health/ready"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 60s - - # SeaweedFS Master - weed-master: - image: ${SEAWEEDFS_IMAGE:-local/seaweedfs:latest} - container_name: weed-master - hostname: weed-master - ports: - - "9333:9333" - - "19333:19333" - command: "master -ip=weed-master -port=9333 -mdir=/data" - volumes: - - master-data:/data - networks: - - seaweedfs-iam - healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://localhost:9333/cluster/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - - # SeaweedFS Volume Server - weed-volume: - image: ${SEAWEEDFS_IMAGE:-local/seaweedfs:latest} - container_name: weed-volume - hostname: weed-volume - ports: - - "8083:8083" - - "18083:18083" - command: "volume -ip=weed-volume -port=8083 -dir=/data -mserver=weed-master:9333 -dataCenter=dc1 -rack=rack1" - volumes: - - volume-data:/data - networks: - - seaweedfs-iam - depends_on: - weed-master: - condition: service_healthy - healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://localhost:8083/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - - # SeaweedFS Filer - weed-filer: - image: ${SEAWEEDFS_IMAGE:-local/seaweedfs:latest} - container_name: weed-filer - hostname: weed-filer - ports: - - "8888:8888" - - "18888:18888" - command: "filer -ip=weed-filer -port=8888 -master=weed-master:9333 -defaultStoreDir=/data" - volumes: - - filer-data:/data - networks: - - seaweedfs-iam - depends_on: - weed-master: - condition: service_healthy - weed-volume: - condition: service_healthy - healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://localhost:8888/status"] - interval: 10s - timeout: 5s - retries: 3 - start_period: 10s - - # SeaweedFS S3 API with IAM - weed-s3: - image: ${SEAWEEDFS_IMAGE:-local/seaweedfs:latest} - container_name: weed-s3 - hostname: weed-s3 - ports: - - "8333:8333" - environment: - WEED_FILER: "weed-filer:8888" - WEED_IAM_CONFIG: "/config/iam_config.json" - WEED_S3_CONFIG: "/config/test_config.json" - GLOG_v: "3" - command: > - sh -c " - echo 'Starting S3 API with IAM...' && - weed -v=3 s3 -ip=weed-s3 -port=8333 - -filer=weed-filer:8888 - -config=/config/test_config.json - -iam.config=/config/iam_config.json - " - volumes: - - ./iam_config.json:/config/iam_config.json:ro - - ./test_config.json:/config/test_config.json:ro - networks: - - seaweedfs-iam - depends_on: - weed-filer: - condition: service_healthy - keycloak: - condition: service_healthy - keycloak-setup: - condition: service_completed_successfully - healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://localhost:8333"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 30s - - # Keycloak Setup Service - keycloak-setup: - image: alpine/curl:8.4.0 - container_name: keycloak-setup - volumes: - - ./setup_keycloak_docker.sh:/setup.sh:ro - - .:/workspace:rw - working_dir: /workspace - networks: - - seaweedfs-iam - depends_on: - keycloak: - condition: service_healthy - command: > - sh -c " - apk add --no-cache bash jq && - chmod +x /setup.sh && - /setup.sh - " - -volumes: - master-data: - volume-data: - filer-data: - -networks: - seaweedfs-iam: - driver: bridge diff --git a/test/s3/iam/go.mod b/test/s3/iam/go.mod deleted file mode 100644 index f8a940108..000000000 --- a/test/s3/iam/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/seaweedfs/seaweedfs/test/s3/iam - -go 1.24 - -require ( - github.com/aws/aws-sdk-go v1.44.0 - github.com/golang-jwt/jwt/v5 v5.3.0 - github.com/stretchr/testify v1.8.4 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/s3/iam/go.sum b/test/s3/iam/go.sum deleted file mode 100644 index b1bd7cfcf..000000000 --- a/test/s3/iam/go.sum +++ /dev/null @@ -1,31 +0,0 @@ -github.com/aws/aws-sdk-go v1.44.0 h1:jwtHuNqfnJxL4DKHBUVUmQlfueQqBW7oXP6yebZR/R0= -github.com/aws/aws-sdk-go v1.44.0/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/s3/iam/iam_config.github.json b/test/s3/iam/iam_config.github.json deleted file mode 100644 index b9a2fface..000000000 --- a/test/s3/iam/iam_config.github.json +++ /dev/null @@ -1,293 +0,0 @@ -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" - }, - "providers": [ - { - "name": "test-oidc", - "type": "mock", - "config": { - "issuer": "test-oidc-issuer", - "clientId": "test-oidc-client" - } - }, - { - "name": "keycloak", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://localhost:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - "userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo", - "scopes": ["openid", "profile", "email"], - "claimsMapping": { - "username": "preferred_username", - "email": "email", - "name": "name" - }, - "roleMapping": { - "rules": [ - { - "claim": "roles", - "value": "s3-admin", - "role": "arn:seaweed:iam::role/KeycloakAdminRole" - }, - { - "claim": "roles", - "value": "s3-read-only", - "role": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - }, - { - "claim": "roles", - "value": "s3-write-only", - "role": "arn:seaweed:iam::role/KeycloakWriteOnlyRole" - }, - { - "claim": "roles", - "value": "s3-read-write", - "role": "arn:seaweed:iam::role/KeycloakReadWriteRole" - } - ], - "defaultRole": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - } - } - } - ], - "policy": { - "defaultEffect": "Deny" - }, - "roles": [ - { - "roleName": "TestAdminRole", - "roleArn": "arn:seaweed:iam::role/TestAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Admin role for testing" - }, - { - "roleName": "TestReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only role for testing" - }, - { - "roleName": "TestWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only role for testing" - }, - { - "roleName": "KeycloakAdminRole", - "roleArn": "arn:seaweed:iam::role/KeycloakAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Admin role for Keycloak users" - }, - { - "roleName": "KeycloakReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only role for Keycloak users" - }, - { - "roleName": "KeycloakWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only role for Keycloak users" - }, - { - "roleName": "KeycloakReadWriteRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadWritePolicy"], - "description": "Read-write role for Keycloak users" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3WriteOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Deny", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - } - ] -} diff --git a/test/s3/iam/iam_config.json b/test/s3/iam/iam_config.json deleted file mode 100644 index b9a2fface..000000000 --- a/test/s3/iam/iam_config.json +++ /dev/null @@ -1,293 +0,0 @@ -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" - }, - "providers": [ - { - "name": "test-oidc", - "type": "mock", - "config": { - "issuer": "test-oidc-issuer", - "clientId": "test-oidc-client" - } - }, - { - "name": "keycloak", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://localhost:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - "userInfoUri": "http://localhost:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo", - "scopes": ["openid", "profile", "email"], - "claimsMapping": { - "username": "preferred_username", - "email": "email", - "name": "name" - }, - "roleMapping": { - "rules": [ - { - "claim": "roles", - "value": "s3-admin", - "role": "arn:seaweed:iam::role/KeycloakAdminRole" - }, - { - "claim": "roles", - "value": "s3-read-only", - "role": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - }, - { - "claim": "roles", - "value": "s3-write-only", - "role": "arn:seaweed:iam::role/KeycloakWriteOnlyRole" - }, - { - "claim": "roles", - "value": "s3-read-write", - "role": "arn:seaweed:iam::role/KeycloakReadWriteRole" - } - ], - "defaultRole": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - } - } - } - ], - "policy": { - "defaultEffect": "Deny" - }, - "roles": [ - { - "roleName": "TestAdminRole", - "roleArn": "arn:seaweed:iam::role/TestAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Admin role for testing" - }, - { - "roleName": "TestReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only role for testing" - }, - { - "roleName": "TestWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only role for testing" - }, - { - "roleName": "KeycloakAdminRole", - "roleArn": "arn:seaweed:iam::role/KeycloakAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Admin role for Keycloak users" - }, - { - "roleName": "KeycloakReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only role for Keycloak users" - }, - { - "roleName": "KeycloakWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only role for Keycloak users" - }, - { - "roleName": "KeycloakReadWriteRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadWritePolicy"], - "description": "Read-write role for Keycloak users" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3WriteOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Deny", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - } - ] -} diff --git a/test/s3/iam/iam_config.local.json b/test/s3/iam/iam_config.local.json deleted file mode 100644 index b2b2ef4e5..000000000 --- a/test/s3/iam/iam_config.local.json +++ /dev/null @@ -1,345 +0,0 @@ -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" - }, - "providers": [ - { - "name": "test-oidc", - "type": "mock", - "config": { - "issuer": "test-oidc-issuer", - "clientId": "test-oidc-client" - } - }, - { - "name": "keycloak", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://localhost:8090/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/certs", - "userInfoUri": "http://localhost:8090/realms/seaweedfs-test/protocol/openid-connect/userinfo", - "scopes": [ - "openid", - "profile", - "email" - ], - "claimsMapping": { - "username": "preferred_username", - "email": "email", - "name": "name" - }, - "roleMapping": { - "rules": [ - { - "claim": "roles", - "value": "s3-admin", - "role": "arn:seaweed:iam::role/KeycloakAdminRole" - }, - { - "claim": "roles", - "value": "s3-read-only", - "role": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - }, - { - "claim": "roles", - "value": "s3-write-only", - "role": "arn:seaweed:iam::role/KeycloakWriteOnlyRole" - }, - { - "claim": "roles", - "value": "s3-read-write", - "role": "arn:seaweed:iam::role/KeycloakReadWriteRole" - } - ], - "defaultRole": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - } - } - } - ], - "policy": { - "defaultEffect": "Deny" - }, - "roles": [ - { - "roleName": "TestAdminRole", - "roleArn": "arn:seaweed:iam::role/TestAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3AdminPolicy" - ], - "description": "Admin role for testing" - }, - { - "roleName": "TestReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3ReadOnlyPolicy" - ], - "description": "Read-only role for testing" - }, - { - "roleName": "TestWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/TestWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "test-oidc" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3WriteOnlyPolicy" - ], - "description": "Write-only role for testing" - }, - { - "roleName": "KeycloakAdminRole", - "roleArn": "arn:seaweed:iam::role/KeycloakAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3AdminPolicy" - ], - "description": "Admin role for Keycloak users" - }, - { - "roleName": "KeycloakReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3ReadOnlyPolicy" - ], - "description": "Read-only role for Keycloak users" - }, - { - "roleName": "KeycloakWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3WriteOnlyPolicy" - ], - "description": "Write-only role for Keycloak users" - }, - { - "roleName": "KeycloakReadWriteRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": [ - "sts:AssumeRoleWithWebIdentity" - ] - } - ] - }, - "attachedPolicies": [ - "S3ReadWritePolicy" - ], - "description": "Read-write role for Keycloak users" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:ValidateSession" - ], - "Resource": [ - "*" - ] - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:ValidateSession" - ], - "Resource": [ - "*" - ] - } - ] - } - }, - { - "name": "S3WriteOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Deny", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:ValidateSession" - ], - "Resource": [ - "*" - ] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "sts:ValidateSession" - ], - "Resource": [ - "*" - ] - } - ] - } - } - ] -} diff --git a/test/s3/iam/iam_config_distributed.json b/test/s3/iam/iam_config_distributed.json deleted file mode 100644 index c9827c220..000000000 --- a/test/s3/iam/iam_config_distributed.json +++ /dev/null @@ -1,173 +0,0 @@ -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=", - "providers": [ - { - "name": "keycloak-oidc", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://keycloak:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://keycloak:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - "scopes": ["openid", "profile", "email", "roles"], - "claimsMapping": { - "usernameClaim": "preferred_username", - "groupsClaim": "roles" - } - } - }, - { - "name": "mock-provider", - "type": "mock", - "enabled": false, - "config": { - "issuer": "http://localhost:9999", - "jwksEndpoint": "http://localhost:9999/jwks" - } - } - ] - }, - "policy": { - "defaultEffect": "Deny" - }, - "roleStore": {}, - - "roles": [ - { - "roleName": "S3AdminRole", - "roleArn": "arn:seaweed:iam::role/S3AdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-admin" - } - } - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Full S3 administrator access role" - }, - { - "roleName": "S3ReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/S3ReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-read-only" - } - } - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only access to S3 resources" - }, - { - "roleName": "S3ReadWriteRole", - "roleArn": "arn:seaweed:iam::role/S3ReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-read-write" - } - } - } - ] - }, - "attachedPolicies": ["S3ReadWritePolicy"], - "description": "Read-write access to S3 resources" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": "*" - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:GetObjectAcl", - "s3:GetObjectVersion", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:GetObjectAcl", - "s3:GetObjectVersion", - "s3:PutObject", - "s3:PutObjectAcl", - "s3:DeleteObject", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - } - } - ] -} diff --git a/test/s3/iam/iam_config_docker.json b/test/s3/iam/iam_config_docker.json deleted file mode 100644 index c0fd5ab87..000000000 --- a/test/s3/iam/iam_config_docker.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=", - "providers": [ - { - "name": "keycloak-oidc", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://keycloak:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://keycloak:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - "scopes": ["openid", "profile", "email", "roles"] - } - } - ] - }, - "policy": { - "defaultEffect": "Deny" - }, - "roles": [ - { - "roleName": "S3AdminRole", - "roleArn": "arn:seaweed:iam::role/S3AdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-admin" - } - } - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Full S3 administrator access role" - }, - { - "roleName": "S3ReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/S3ReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-read-only" - } - } - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only access to S3 resources" - }, - { - "roleName": "S3ReadWriteRole", - "roleArn": "arn:seaweed:iam::role/S3ReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak-oidc" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"], - "Condition": { - "StringEquals": { - "roles": "s3-read-write" - } - } - } - ] - }, - "attachedPolicies": ["S3ReadWritePolicy"], - "description": "Read-write access to S3 resources" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:*", - "Resource": "*" - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:GetObjectAcl", - "s3:GetObjectVersion", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:GetObjectAcl", - "s3:GetObjectVersion", - "s3:PutObject", - "s3:PutObjectAcl", - "s3:DeleteObject", - "s3:ListBucket", - "s3:ListBucketVersions" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - } - } - ] -} diff --git a/test/s3/iam/run_all_tests.sh b/test/s3/iam/run_all_tests.sh deleted file mode 100755 index 7bb8ba956..000000000 --- a/test/s3/iam/run_all_tests.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash - -# Master Test Runner - Enables and runs all previously skipped tests - -set -e - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -echo -e "${BLUE}๐ŸŽฏ SeaweedFS S3 IAM Complete Test Suite${NC}" -echo -e "${BLUE}=====================================${NC}" - -# Set environment variables to enable all tests -export ENABLE_DISTRIBUTED_TESTS=true -export ENABLE_PERFORMANCE_TESTS=true -export ENABLE_STRESS_TESTS=true -export KEYCLOAK_URL="http://localhost:8080" -export S3_ENDPOINT="http://localhost:8333" -export TEST_TIMEOUT=60m -export CGO_ENABLED=0 - -# Function to run test category -run_test_category() { - local category="$1" - local test_pattern="$2" - local description="$3" - - echo -e "${YELLOW}๐Ÿงช Running $description...${NC}" - - if go test -v -timeout=$TEST_TIMEOUT -run "$test_pattern" ./...; then - echo -e "${GREEN}[OK] $description completed successfully${NC}" - return 0 - else - echo -e "${RED}[FAIL] $description failed${NC}" - return 1 - fi -} - -# Track results -TOTAL_CATEGORIES=0 -PASSED_CATEGORIES=0 - -# 1. Standard IAM Integration Tests -echo -e "\n${BLUE}1. Standard IAM Integration Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if run_test_category "standard" "TestS3IAM(?!.*Distributed|.*Performance)" "Standard IAM Integration Tests"; then - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) -fi - -# 2. Keycloak Integration Tests (if Keycloak is available) -echo -e "\n${BLUE}2. Keycloak Integration Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if curl -s "http://localhost:8080/health/ready" > /dev/null 2>&1; then - if run_test_category "keycloak" "TestKeycloak" "Keycloak Integration Tests"; then - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) - fi -else - echo -e "${YELLOW}โš ๏ธ Keycloak not available, skipping Keycloak tests${NC}" - echo -e "${YELLOW}๐Ÿ’ก Run './setup_all_tests.sh' to start Keycloak${NC}" -fi - -# 3. Distributed Tests -echo -e "\n${BLUE}3. Distributed IAM Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if run_test_category "distributed" "TestS3IAMDistributedTests" "Distributed IAM Tests"; then - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) -fi - -# 4. Performance Tests -echo -e "\n${BLUE}4. Performance Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if run_test_category "performance" "TestS3IAMPerformanceTests" "Performance Tests"; then - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) -fi - -# 5. Benchmarks -echo -e "\n${BLUE}5. Benchmark Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if go test -bench=. -benchmem -timeout=$TEST_TIMEOUT ./...; then - echo -e "${GREEN}[OK] Benchmark tests completed successfully${NC}" - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) -else - echo -e "${RED}[FAIL] Benchmark tests failed${NC}" -fi - -# 6. Versioning Stress Tests -echo -e "\n${BLUE}6. S3 Versioning Stress Tests${NC}" -TOTAL_CATEGORIES=$((TOTAL_CATEGORIES + 1)) -if [ -f "../versioning/enable_stress_tests.sh" ]; then - if (cd ../versioning && ./enable_stress_tests.sh); then - echo -e "${GREEN}[OK] Versioning stress tests completed successfully${NC}" - PASSED_CATEGORIES=$((PASSED_CATEGORIES + 1)) - else - echo -e "${RED}[FAIL] Versioning stress tests failed${NC}" - fi -else - echo -e "${YELLOW}โš ๏ธ Versioning stress tests not available${NC}" -fi - -# Summary -echo -e "\n${BLUE}๐Ÿ“Š Test Summary${NC}" -echo -e "${BLUE}===============${NC}" -echo -e "Total test categories: $TOTAL_CATEGORIES" -echo -e "Passed: ${GREEN}$PASSED_CATEGORIES${NC}" -echo -e "Failed: ${RED}$((TOTAL_CATEGORIES - PASSED_CATEGORIES))${NC}" - -if [ $PASSED_CATEGORIES -eq $TOTAL_CATEGORIES ]; then - echo -e "\n${GREEN}๐ŸŽ‰ All test categories passed!${NC}" - exit 0 -else - echo -e "\n${RED}[FAIL] Some test categories failed${NC}" - exit 1 -fi diff --git a/test/s3/iam/run_performance_tests.sh b/test/s3/iam/run_performance_tests.sh deleted file mode 100755 index e8e8983fb..000000000 --- a/test/s3/iam/run_performance_tests.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Performance Test Runner for SeaweedFS S3 IAM - -set -e - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' - -echo -e "${YELLOW}๐Ÿ Running S3 IAM Performance Tests${NC}" - -# Enable performance tests -export ENABLE_PERFORMANCE_TESTS=true -export TEST_TIMEOUT=60m - -# Run benchmarks -echo -e "${YELLOW}๐Ÿ“Š Running benchmarks...${NC}" -go test -bench=. -benchmem -timeout=$TEST_TIMEOUT ./... - -# Run performance tests -echo -e "${YELLOW}๐Ÿงช Running performance test suite...${NC}" -go test -v -timeout=$TEST_TIMEOUT -run "TestS3IAMPerformanceTests" ./... - -echo -e "${GREEN}[OK] Performance tests completed${NC}" diff --git a/test/s3/iam/run_stress_tests.sh b/test/s3/iam/run_stress_tests.sh deleted file mode 100755 index d7520012a..000000000 --- a/test/s3/iam/run_stress_tests.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Stress Test Runner for SeaweedFS S3 IAM - -set -e - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -RED='\033[0;31m' -NC='\033[0m' - -echo -e "${YELLOW}๐Ÿ’ช Running S3 IAM Stress Tests${NC}" - -# Enable stress tests -export ENABLE_STRESS_TESTS=true -export TEST_TIMEOUT=60m - -# Run stress tests multiple times -STRESS_ITERATIONS=5 - -echo -e "${YELLOW}๐Ÿ”„ Running stress tests with $STRESS_ITERATIONS iterations...${NC}" - -for i in $(seq 1 $STRESS_ITERATIONS); do - echo -e "${YELLOW}๐Ÿ“Š Iteration $i/$STRESS_ITERATIONS${NC}" - - if ! go test -v -timeout=$TEST_TIMEOUT -run "TestS3IAMDistributedTests.*concurrent" ./... -count=1; then - echo -e "${RED}โŒ Stress test failed on iteration $i${NC}" - exit 1 - fi - - # Brief pause between iterations - sleep 2 -done - -echo -e "${GREEN}[OK] All stress test iterations completed successfully${NC}" diff --git a/test/s3/iam/s3_iam_distributed_test.go b/test/s3/iam/s3_iam_distributed_test.go deleted file mode 100644 index fbaf25e9d..000000000 --- a/test/s3/iam/s3_iam_distributed_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package iam - -import ( - "fmt" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestS3IAMDistributedTests tests IAM functionality across multiple S3 gateway instances -func TestS3IAMDistributedTests(t *testing.T) { - // Skip if not in distributed test mode - if os.Getenv("ENABLE_DISTRIBUTED_TESTS") != "true" { - t.Skip("Distributed tests not enabled. Set ENABLE_DISTRIBUTED_TESTS=true") - } - - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - t.Run("distributed_session_consistency", func(t *testing.T) { - // Test that sessions created on one instance are visible on others - // This requires filer-based session storage - - // Create S3 clients that would connect to different gateway instances - // In a real distributed setup, these would point to different S3 gateway ports - client1, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole") - require.NoError(t, err) - - client2, err := framework.CreateS3ClientWithJWT("test-user", "TestAdminRole") - require.NoError(t, err) - - // Both clients should be able to perform operations - bucketName := "test-distributed-session" - - err = framework.CreateBucket(client1, bucketName) - require.NoError(t, err) - - // Client2 should see the bucket created by client1 - listResult, err := client2.ListBuckets(&s3.ListBucketsInput{}) - require.NoError(t, err) - - found := false - for _, bucket := range listResult.Buckets { - if *bucket.Name == bucketName { - found = true - break - } - } - assert.True(t, found, "Bucket should be visible across distributed instances") - - // Cleanup - _, err = client1.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }) - - t.Run("distributed_role_consistency", func(t *testing.T) { - // Test that role definitions are consistent across instances - // This requires filer-based role storage - - // Create clients with different roles - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - readOnlyClient, err := framework.CreateS3ClientWithJWT("readonly-user", "TestReadOnlyRole") - require.NoError(t, err) - - bucketName := "test-distributed-roles" - objectKey := "test-object.txt" - - // Admin should be able to create bucket - err = framework.CreateBucket(adminClient, bucketName) - require.NoError(t, err) - - // Admin should be able to put object - err = framework.PutTestObject(adminClient, bucketName, objectKey, "test content") - require.NoError(t, err) - - // Read-only user should be able to get object - content, err := framework.GetTestObject(readOnlyClient, bucketName, objectKey) - require.NoError(t, err) - assert.Equal(t, "test content", content) - - // Read-only user should NOT be able to put object - err = framework.PutTestObject(readOnlyClient, bucketName, "forbidden-object.txt", "forbidden content") - require.Error(t, err, "Read-only user should not be able to put objects") - - // Cleanup - err = framework.DeleteTestObject(adminClient, bucketName, objectKey) - require.NoError(t, err) - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }) - - t.Run("distributed_concurrent_operations", func(t *testing.T) { - // Test concurrent operations across distributed instances with robust retry mechanisms - // This approach implements proper retry logic instead of tolerating errors to catch real concurrency issues - const numGoroutines = 3 // Reduced concurrency for better CI reliability - const numOperationsPerGoroutine = 2 // Minimal operations per goroutine - const maxRetries = 3 // Maximum retry attempts for transient failures - const retryDelay = 200 * time.Millisecond // Increased delay for better stability - - var wg sync.WaitGroup - errors := make(chan error, numGoroutines*numOperationsPerGoroutine) - - // Helper function to determine if an error is retryable - isRetryableError := func(err error) bool { - if err == nil { - return false - } - errorMsg := err.Error() - return strings.Contains(errorMsg, "timeout") || - strings.Contains(errorMsg, "connection reset") || - strings.Contains(errorMsg, "temporary failure") || - strings.Contains(errorMsg, "TooManyRequests") || - strings.Contains(errorMsg, "ServiceUnavailable") || - strings.Contains(errorMsg, "InternalError") - } - - // Helper function to execute operations with retry logic - executeWithRetry := func(operation func() error, operationName string) error { - var lastErr error - for attempt := 0; attempt <= maxRetries; attempt++ { - if attempt > 0 { - time.Sleep(retryDelay * time.Duration(attempt)) // Linear backoff - } - - lastErr = operation() - if lastErr == nil { - return nil // Success - } - - if !isRetryableError(lastErr) { - // Non-retryable error - fail immediately - return fmt.Errorf("%s failed with non-retryable error: %w", operationName, lastErr) - } - - // Retryable error - continue to next attempt - if attempt < maxRetries { - t.Logf("Retrying %s (attempt %d/%d) after error: %v", operationName, attempt+1, maxRetries, lastErr) - } - } - - // All retries exhausted - return fmt.Errorf("%s failed after %d retries, last error: %w", operationName, maxRetries, lastErr) - } - - for i := 0; i < numGoroutines; i++ { - wg.Add(1) - go func(goroutineID int) { - defer wg.Done() - - client, err := framework.CreateS3ClientWithJWT(fmt.Sprintf("user-%d", goroutineID), "TestAdminRole") - if err != nil { - errors <- fmt.Errorf("failed to create S3 client for goroutine %d: %w", goroutineID, err) - return - } - - for j := 0; j < numOperationsPerGoroutine; j++ { - bucketName := fmt.Sprintf("test-concurrent-%d-%d", goroutineID, j) - objectKey := "test-object.txt" - objectContent := fmt.Sprintf("content-%d-%d", goroutineID, j) - - // Execute full operation sequence with individual retries - operationFailed := false - - // 1. Create bucket with retry - if err := executeWithRetry(func() error { - return framework.CreateBucket(client, bucketName) - }, fmt.Sprintf("CreateBucket-%s", bucketName)); err != nil { - errors <- err - operationFailed = true - } - - if !operationFailed { - // 2. Put object with retry - if err := executeWithRetry(func() error { - return framework.PutTestObject(client, bucketName, objectKey, objectContent) - }, fmt.Sprintf("PutObject-%s/%s", bucketName, objectKey)); err != nil { - errors <- err - operationFailed = true - } - } - - if !operationFailed { - // 3. Get object with retry - if err := executeWithRetry(func() error { - _, err := framework.GetTestObject(client, bucketName, objectKey) - return err - }, fmt.Sprintf("GetObject-%s/%s", bucketName, objectKey)); err != nil { - errors <- err - operationFailed = true - } - } - - if !operationFailed { - // 4. Delete object with retry - if err := executeWithRetry(func() error { - return framework.DeleteTestObject(client, bucketName, objectKey) - }, fmt.Sprintf("DeleteObject-%s/%s", bucketName, objectKey)); err != nil { - errors <- err - operationFailed = true - } - } - - // 5. Always attempt bucket cleanup, even if previous operations failed - if err := executeWithRetry(func() error { - _, err := client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - return err - }, fmt.Sprintf("DeleteBucket-%s", bucketName)); err != nil { - // Only log cleanup failures, don't fail the test - t.Logf("Warning: Failed to cleanup bucket %s: %v", bucketName, err) - } - - // Increased delay between operation sequences to reduce server load and improve stability - time.Sleep(100 * time.Millisecond) - } - }(i) - } - - wg.Wait() - close(errors) - - // Collect and analyze errors - with retry logic, we should see very few errors - var errorList []error - for err := range errors { - errorList = append(errorList, err) - } - - totalOperations := numGoroutines * numOperationsPerGoroutine - - // Report results - if len(errorList) == 0 { - t.Logf("All %d concurrent operations completed successfully with retry mechanisms!", totalOperations) - } else { - t.Logf("Concurrent operations summary:") - t.Logf(" Total operations: %d", totalOperations) - t.Logf(" Failed operations: %d (%.1f%% error rate)", len(errorList), float64(len(errorList))/float64(totalOperations)*100) - - // Log first few errors for debugging - for i, err := range errorList { - if i >= 3 { // Limit to first 3 errors - t.Logf(" ... and %d more errors", len(errorList)-3) - break - } - t.Logf(" Error %d: %v", i+1, err) - } - } - - // With proper retry mechanisms, we should expect near-zero failures - // Any remaining errors likely indicate real concurrency issues or system problems - if len(errorList) > 0 { - t.Errorf("%d operation(s) failed even after retry mechanisms (%.1f%% failure rate). This indicates potential system issues or race conditions that need investigation.", - len(errorList), float64(len(errorList))/float64(totalOperations)*100) - } - }) -} - -// TestS3IAMPerformanceTests tests IAM performance characteristics -func TestS3IAMPerformanceTests(t *testing.T) { - // Skip if not in performance test mode - if os.Getenv("ENABLE_PERFORMANCE_TESTS") != "true" { - t.Skip("Performance tests not enabled. Set ENABLE_PERFORMANCE_TESTS=true") - } - - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - t.Run("authentication_performance", func(t *testing.T) { - // Test authentication performance - const numRequests = 100 - - client, err := framework.CreateS3ClientWithJWT("perf-user", "TestAdminRole") - require.NoError(t, err) - - bucketName := "test-auth-performance" - err = framework.CreateBucket(client, bucketName) - require.NoError(t, err) - defer func() { - _, err := client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }() - - start := time.Now() - - for i := 0; i < numRequests; i++ { - _, err := client.ListBuckets(&s3.ListBucketsInput{}) - require.NoError(t, err) - } - - duration := time.Since(start) - avgLatency := duration / numRequests - - t.Logf("Authentication performance: %d requests in %v (avg: %v per request)", - numRequests, duration, avgLatency) - - // Performance assertion - should be under 100ms per request on average - assert.Less(t, avgLatency, 100*time.Millisecond, - "Average authentication latency should be under 100ms") - }) - - t.Run("authorization_performance", func(t *testing.T) { - // Test authorization performance with different policy complexities - const numRequests = 50 - - client, err := framework.CreateS3ClientWithJWT("perf-user", "TestAdminRole") - require.NoError(t, err) - - bucketName := "test-authz-performance" - err = framework.CreateBucket(client, bucketName) - require.NoError(t, err) - defer func() { - _, err := client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }() - - start := time.Now() - - for i := 0; i < numRequests; i++ { - objectKey := fmt.Sprintf("perf-object-%d.txt", i) - err := framework.PutTestObject(client, bucketName, objectKey, "performance test content") - require.NoError(t, err) - - _, err = framework.GetTestObject(client, bucketName, objectKey) - require.NoError(t, err) - - err = framework.DeleteTestObject(client, bucketName, objectKey) - require.NoError(t, err) - } - - duration := time.Since(start) - avgLatency := duration / (numRequests * 3) // 3 operations per iteration - - t.Logf("Authorization performance: %d operations in %v (avg: %v per operation)", - numRequests*3, duration, avgLatency) - - // Performance assertion - should be under 50ms per operation on average - assert.Less(t, avgLatency, 50*time.Millisecond, - "Average authorization latency should be under 50ms") - }) -} - -// BenchmarkS3IAMAuthentication benchmarks JWT authentication -func BenchmarkS3IAMAuthentication(b *testing.B) { - if os.Getenv("ENABLE_PERFORMANCE_TESTS") != "true" { - b.Skip("Performance tests not enabled. Set ENABLE_PERFORMANCE_TESTS=true") - } - - framework := NewS3IAMTestFramework(&testing.T{}) - defer framework.Cleanup() - - client, err := framework.CreateS3ClientWithJWT("bench-user", "TestAdminRole") - require.NoError(b, err) - - bucketName := "test-bench-auth" - err = framework.CreateBucket(client, bucketName) - require.NoError(b, err) - defer func() { - _, err := client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(b, err) - }() - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := client.ListBuckets(&s3.ListBucketsInput{}) - if err != nil { - b.Error(err) - } - } - }) -} - -// BenchmarkS3IAMAuthorization benchmarks policy evaluation -func BenchmarkS3IAMAuthorization(b *testing.B) { - if os.Getenv("ENABLE_PERFORMANCE_TESTS") != "true" { - b.Skip("Performance tests not enabled. Set ENABLE_PERFORMANCE_TESTS=true") - } - - framework := NewS3IAMTestFramework(&testing.T{}) - defer framework.Cleanup() - - client, err := framework.CreateS3ClientWithJWT("bench-user", "TestAdminRole") - require.NoError(b, err) - - bucketName := "test-bench-authz" - err = framework.CreateBucket(client, bucketName) - require.NoError(b, err) - defer func() { - _, err := client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(b, err) - }() - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - i := 0 - for pb.Next() { - objectKey := fmt.Sprintf("bench-object-%d.txt", i) - err := framework.PutTestObject(client, bucketName, objectKey, "benchmark content") - if err != nil { - b.Error(err) - } - i++ - } - }) -} diff --git a/test/s3/iam/s3_iam_framework.go b/test/s3/iam/s3_iam_framework.go deleted file mode 100644 index 92e880bdc..000000000 --- a/test/s3/iam/s3_iam_framework.go +++ /dev/null @@ -1,873 +0,0 @@ -package iam - -import ( - "context" - cryptorand "crypto/rand" - "crypto/rsa" - "encoding/base64" - "encoding/json" - "fmt" - "io" - mathrand "math/rand" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/golang-jwt/jwt/v5" - "github.com/stretchr/testify/require" -) - -const ( - TestS3Endpoint = "http://localhost:8333" - TestRegion = "us-west-2" - - // Keycloak configuration - DefaultKeycloakURL = "http://localhost:8080" - KeycloakRealm = "seaweedfs-test" - KeycloakClientID = "seaweedfs-s3" - KeycloakClientSecret = "seaweedfs-s3-secret" -) - -// S3IAMTestFramework provides utilities for S3+IAM integration testing -type S3IAMTestFramework struct { - t *testing.T - mockOIDC *httptest.Server - privateKey *rsa.PrivateKey - publicKey *rsa.PublicKey - createdBuckets []string - ctx context.Context - keycloakClient *KeycloakClient - useKeycloak bool -} - -// KeycloakClient handles authentication with Keycloak -type KeycloakClient struct { - baseURL string - realm string - clientID string - clientSecret string - httpClient *http.Client -} - -// KeycloakTokenResponse represents Keycloak token response -type KeycloakTokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` - RefreshToken string `json:"refresh_token,omitempty"` - Scope string `json:"scope,omitempty"` -} - -// NewS3IAMTestFramework creates a new test framework instance -func NewS3IAMTestFramework(t *testing.T) *S3IAMTestFramework { - framework := &S3IAMTestFramework{ - t: t, - ctx: context.Background(), - createdBuckets: make([]string, 0), - } - - // Check if we should use Keycloak or mock OIDC - keycloakURL := os.Getenv("KEYCLOAK_URL") - if keycloakURL == "" { - keycloakURL = DefaultKeycloakURL - } - - // Test if Keycloak is available - framework.useKeycloak = framework.isKeycloakAvailable(keycloakURL) - - if framework.useKeycloak { - t.Logf("Using real Keycloak instance at %s", keycloakURL) - framework.keycloakClient = NewKeycloakClient(keycloakURL, KeycloakRealm, KeycloakClientID, KeycloakClientSecret) - } else { - t.Logf("Using mock OIDC server for testing") - // Generate RSA keys for JWT signing (mock mode) - var err error - framework.privateKey, err = rsa.GenerateKey(cryptorand.Reader, 2048) - require.NoError(t, err) - framework.publicKey = &framework.privateKey.PublicKey - - // Setup mock OIDC server - framework.setupMockOIDCServer() - } - - return framework -} - -// NewKeycloakClient creates a new Keycloak client -func NewKeycloakClient(baseURL, realm, clientID, clientSecret string) *KeycloakClient { - return &KeycloakClient{ - baseURL: baseURL, - realm: realm, - clientID: clientID, - clientSecret: clientSecret, - httpClient: &http.Client{Timeout: 30 * time.Second}, - } -} - -// isKeycloakAvailable checks if Keycloak is running and accessible -func (f *S3IAMTestFramework) isKeycloakAvailable(keycloakURL string) bool { - client := &http.Client{Timeout: 5 * time.Second} - // Use realms endpoint instead of health/ready for Keycloak v26+ - // First, verify master realm is reachable - masterURL := fmt.Sprintf("%s/realms/master", keycloakURL) - - resp, err := client.Get(masterURL) - if err != nil { - return false - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return false - } - - // Also ensure the specific test realm exists; otherwise fall back to mock - testRealmURL := fmt.Sprintf("%s/realms/%s", keycloakURL, KeycloakRealm) - resp2, err := client.Get(testRealmURL) - if err != nil { - return false - } - defer resp2.Body.Close() - return resp2.StatusCode == http.StatusOK -} - -// AuthenticateUser authenticates a user with Keycloak and returns an access token -func (kc *KeycloakClient) AuthenticateUser(username, password string) (*KeycloakTokenResponse, error) { - tokenURL := fmt.Sprintf("%s/realms/%s/protocol/openid-connect/token", kc.baseURL, kc.realm) - - data := url.Values{} - data.Set("grant_type", "password") - data.Set("client_id", kc.clientID) - data.Set("client_secret", kc.clientSecret) - data.Set("username", username) - data.Set("password", password) - data.Set("scope", "openid profile email") - - resp, err := kc.httpClient.PostForm(tokenURL, data) - if err != nil { - return nil, fmt.Errorf("failed to authenticate with Keycloak: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - // Read the response body for debugging - body, readErr := io.ReadAll(resp.Body) - bodyStr := "" - if readErr == nil { - bodyStr = string(body) - } - return nil, fmt.Errorf("Keycloak authentication failed with status: %d, response: %s", resp.StatusCode, bodyStr) - } - - var tokenResp KeycloakTokenResponse - if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil { - return nil, fmt.Errorf("failed to decode token response: %w", err) - } - - return &tokenResp, nil -} - -// getKeycloakToken authenticates with Keycloak and returns a JWT token -func (f *S3IAMTestFramework) getKeycloakToken(username string) (string, error) { - if f.keycloakClient == nil { - return "", fmt.Errorf("Keycloak client not initialized") - } - - // Map username to password for test users - password := f.getTestUserPassword(username) - if password == "" { - return "", fmt.Errorf("unknown test user: %s", username) - } - - tokenResp, err := f.keycloakClient.AuthenticateUser(username, password) - if err != nil { - return "", fmt.Errorf("failed to authenticate user %s: %w", username, err) - } - - return tokenResp.AccessToken, nil -} - -// getTestUserPassword returns the password for test users -func (f *S3IAMTestFramework) getTestUserPassword(username string) string { - // Password generation matches setup_keycloak_docker.sh logic: - // password="${username//[^a-zA-Z]/}123" (removes non-alphabetic chars + "123") - userPasswords := map[string]string{ - "admin-user": "adminuser123", // "admin-user" -> "adminuser" + "123" - "read-user": "readuser123", // "read-user" -> "readuser" + "123" - "write-user": "writeuser123", // "write-user" -> "writeuser" + "123" - "write-only-user": "writeonlyuser123", // "write-only-user" -> "writeonlyuser" + "123" - } - - return userPasswords[username] -} - -// setupMockOIDCServer creates a mock OIDC server for testing -func (f *S3IAMTestFramework) setupMockOIDCServer() { - - f.mockOIDC = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/.well-known/openid_configuration": - config := map[string]interface{}{ - "issuer": "http://" + r.Host, - "jwks_uri": "http://" + r.Host + "/jwks", - "userinfo_endpoint": "http://" + r.Host + "/userinfo", - } - w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, `{ - "issuer": "%s", - "jwks_uri": "%s", - "userinfo_endpoint": "%s" - }`, config["issuer"], config["jwks_uri"], config["userinfo_endpoint"]) - - case "/jwks": - w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, `{ - "keys": [ - { - "kty": "RSA", - "kid": "test-key-id", - "use": "sig", - "alg": "RS256", - "n": "%s", - "e": "AQAB" - } - ] - }`, f.encodePublicKey()) - - case "/userinfo": - authHeader := r.Header.Get("Authorization") - if !strings.HasPrefix(authHeader, "Bearer ") { - w.WriteHeader(http.StatusUnauthorized) - return - } - - token := strings.TrimPrefix(authHeader, "Bearer ") - userInfo := map[string]interface{}{ - "sub": "test-user", - "email": "test@example.com", - "name": "Test User", - "groups": []string{"users", "developers"}, - } - - if strings.Contains(token, "admin") { - userInfo["groups"] = []string{"admins"} - } - - w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, `{ - "sub": "%s", - "email": "%s", - "name": "%s", - "groups": %v - }`, userInfo["sub"], userInfo["email"], userInfo["name"], userInfo["groups"]) - - default: - http.NotFound(w, r) - } - })) -} - -// encodePublicKey encodes the RSA public key for JWKS -func (f *S3IAMTestFramework) encodePublicKey() string { - return base64.RawURLEncoding.EncodeToString(f.publicKey.N.Bytes()) -} - -// BearerTokenTransport is an HTTP transport that adds Bearer token authentication -type BearerTokenTransport struct { - Transport http.RoundTripper - Token string -} - -// RoundTrip implements the http.RoundTripper interface -func (t *BearerTokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { - // Clone the request to avoid modifying the original - newReq := req.Clone(req.Context()) - - // Remove ALL existing Authorization headers first to prevent conflicts - newReq.Header.Del("Authorization") - newReq.Header.Del("X-Amz-Date") - newReq.Header.Del("X-Amz-Content-Sha256") - newReq.Header.Del("X-Amz-Signature") - newReq.Header.Del("X-Amz-Algorithm") - newReq.Header.Del("X-Amz-Credential") - newReq.Header.Del("X-Amz-SignedHeaders") - newReq.Header.Del("X-Amz-Security-Token") - - // Add Bearer token authorization header - newReq.Header.Set("Authorization", "Bearer "+t.Token) - - // Extract and set the principal ARN from JWT token for security compliance - if principal := t.extractPrincipalFromJWT(t.Token); principal != "" { - newReq.Header.Set("X-SeaweedFS-Principal", principal) - } - - // Token preview for logging (first 50 chars for security) - tokenPreview := t.Token - if len(tokenPreview) > 50 { - tokenPreview = tokenPreview[:50] + "..." - } - - // Use underlying transport - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(newReq) -} - -// extractPrincipalFromJWT extracts the principal ARN from a JWT token without validating it -// This is used to set the X-SeaweedFS-Principal header that's required after our security fix -func (t *BearerTokenTransport) extractPrincipalFromJWT(tokenString string) string { - // Parse the JWT token without validation to extract the principal claim - token, _ := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - // We don't validate the signature here, just extract the claims - // This is safe because the actual validation happens server-side - return []byte("dummy-key"), nil - }) - - // Even if parsing fails due to signature verification, we might still get claims - if claims, ok := token.Claims.(jwt.MapClaims); ok { - // Try multiple possible claim names for the principal ARN - if principal, exists := claims["principal"]; exists { - if principalStr, ok := principal.(string); ok { - return principalStr - } - } - if assumed, exists := claims["assumed"]; exists { - if assumedStr, ok := assumed.(string); ok { - return assumedStr - } - } - } - - return "" -} - -// generateSTSSessionToken creates a session token using the actual STS service for proper validation -func (f *S3IAMTestFramework) generateSTSSessionToken(username, roleName string, validDuration time.Duration) (string, error) { - // For now, simulate what the STS service would return by calling AssumeRoleWithWebIdentity - // In a real test, we'd make an actual HTTP call to the STS endpoint - // But for unit testing, we'll create a realistic JWT manually that will pass validation - - now := time.Now() - signingKeyB64 := "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" - signingKey, err := base64.StdEncoding.DecodeString(signingKeyB64) - if err != nil { - return "", fmt.Errorf("failed to decode signing key: %v", err) - } - - // Generate a session ID that would be created by the STS service - sessionId := fmt.Sprintf("test-session-%s-%s-%d", username, roleName, now.Unix()) - - // Create session token claims exactly matching STSSessionClaims struct - roleArn := fmt.Sprintf("arn:seaweed:iam::role/%s", roleName) - sessionName := fmt.Sprintf("test-session-%s", username) - principalArn := fmt.Sprintf("arn:seaweed:sts::assumed-role/%s/%s", roleName, sessionName) - - // Use jwt.MapClaims but with exact field names that STSSessionClaims expects - sessionClaims := jwt.MapClaims{ - // RegisteredClaims fields - "iss": "seaweedfs-sts", - "sub": sessionId, - "iat": now.Unix(), - "exp": now.Add(validDuration).Unix(), - "nbf": now.Unix(), - - // STSSessionClaims fields (using exact JSON tags from the struct) - "sid": sessionId, // SessionId - "snam": sessionName, // SessionName - "typ": "session", // TokenType - "role": roleArn, // RoleArn - "assumed": principalArn, // AssumedRole - "principal": principalArn, // Principal - "idp": "test-oidc", // IdentityProvider - "ext_uid": username, // ExternalUserId - "assumed_at": now.Format(time.RFC3339Nano), // AssumedAt - "max_dur": int64(validDuration.Seconds()), // MaxDuration - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, sessionClaims) - tokenString, err := token.SignedString(signingKey) - if err != nil { - return "", err - } - - // The generated JWT is self-contained and includes all necessary session information. - // The stateless design of the STS service means no external session storage is required. - - return tokenString, nil -} - -// CreateS3ClientWithJWT creates an S3 client authenticated with a JWT token for the specified role -func (f *S3IAMTestFramework) CreateS3ClientWithJWT(username, roleName string) (*s3.S3, error) { - var token string - var err error - - if f.useKeycloak { - // Use real Keycloak authentication - token, err = f.getKeycloakToken(username) - if err != nil { - return nil, fmt.Errorf("failed to get Keycloak token: %v", err) - } - } else { - // Generate STS session token (mock mode) - token, err = f.generateSTSSessionToken(username, roleName, time.Hour) - if err != nil { - return nil, fmt.Errorf("failed to generate STS session token: %v", err) - } - } - - // Create custom HTTP client with Bearer token transport - httpClient := &http.Client{ - Transport: &BearerTokenTransport{ - Token: token, - }, - } - - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - HTTPClient: httpClient, - // Use anonymous credentials to avoid AWS signature generation - Credentials: credentials.AnonymousCredentials, - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - }) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %v", err) - } - - return s3.New(sess), nil -} - -// CreateS3ClientWithInvalidJWT creates an S3 client with an invalid JWT token -func (f *S3IAMTestFramework) CreateS3ClientWithInvalidJWT() (*s3.S3, error) { - invalidToken := "invalid.jwt.token" - - // Create custom HTTP client with Bearer token transport - httpClient := &http.Client{ - Transport: &BearerTokenTransport{ - Token: invalidToken, - }, - } - - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - HTTPClient: httpClient, - // Use anonymous credentials to avoid AWS signature generation - Credentials: credentials.AnonymousCredentials, - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - }) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %v", err) - } - - return s3.New(sess), nil -} - -// CreateS3ClientWithExpiredJWT creates an S3 client with an expired JWT token -func (f *S3IAMTestFramework) CreateS3ClientWithExpiredJWT(username, roleName string) (*s3.S3, error) { - // Generate expired STS session token (expired 1 hour ago) - token, err := f.generateSTSSessionToken(username, roleName, -time.Hour) - if err != nil { - return nil, fmt.Errorf("failed to generate expired STS session token: %v", err) - } - - // Create custom HTTP client with Bearer token transport - httpClient := &http.Client{ - Transport: &BearerTokenTransport{ - Token: token, - }, - } - - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - HTTPClient: httpClient, - // Use anonymous credentials to avoid AWS signature generation - Credentials: credentials.AnonymousCredentials, - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - }) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %v", err) - } - - return s3.New(sess), nil -} - -// CreateS3ClientWithSessionToken creates an S3 client with a session token -func (f *S3IAMTestFramework) CreateS3ClientWithSessionToken(sessionToken string) (*s3.S3, error) { - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - Credentials: credentials.NewStaticCredentials( - "session-access-key", - "session-secret-key", - sessionToken, - ), - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - }) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %v", err) - } - - return s3.New(sess), nil -} - -// CreateS3ClientWithKeycloakToken creates an S3 client using a Keycloak JWT token -func (f *S3IAMTestFramework) CreateS3ClientWithKeycloakToken(keycloakToken string) (*s3.S3, error) { - // Determine response header timeout based on environment - responseHeaderTimeout := 10 * time.Second - overallTimeout := 30 * time.Second - if os.Getenv("GITHUB_ACTIONS") == "true" { - responseHeaderTimeout = 30 * time.Second // Longer timeout for CI JWT validation - overallTimeout = 60 * time.Second - } - - // Create a fresh HTTP transport with appropriate timeouts - transport := &http.Transport{ - DisableKeepAlives: true, // Force new connections for each request - DisableCompression: true, // Disable compression to simplify requests - MaxIdleConns: 0, // No connection pooling - MaxIdleConnsPerHost: 0, // No connection pooling per host - IdleConnTimeout: 1 * time.Second, - TLSHandshakeTimeout: 5 * time.Second, - ResponseHeaderTimeout: responseHeaderTimeout, // Adjustable for CI environments - ExpectContinueTimeout: 1 * time.Second, - } - - // Create a custom HTTP client with appropriate timeouts - httpClient := &http.Client{ - Timeout: overallTimeout, // Overall request timeout (adjustable for CI) - Transport: &BearerTokenTransport{ - Token: keycloakToken, - Transport: transport, - }, - } - - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - Credentials: credentials.AnonymousCredentials, - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - HTTPClient: httpClient, - MaxRetries: aws.Int(0), // No retries to avoid delays - }) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %v", err) - } - - return s3.New(sess), nil -} - -// TestKeycloakTokenDirectly tests a Keycloak token with direct HTTP request (bypassing AWS SDK) -func (f *S3IAMTestFramework) TestKeycloakTokenDirectly(keycloakToken string) error { - // Create a simple HTTP client with timeout - client := &http.Client{ - Timeout: 10 * time.Second, - } - - // Create request to list buckets - req, err := http.NewRequest("GET", TestS3Endpoint, nil) - if err != nil { - return fmt.Errorf("failed to create request: %v", err) - } - - // Add Bearer token - req.Header.Set("Authorization", "Bearer "+keycloakToken) - req.Header.Set("Host", "localhost:8333") - - // Make request - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("request failed: %v", err) - } - defer resp.Body.Close() - - // Read response - _, err = io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read response: %v", err) - } - - return nil -} - -// generateJWTToken creates a JWT token for testing -func (f *S3IAMTestFramework) generateJWTToken(username, roleName string, validDuration time.Duration) (string, error) { - now := time.Now() - claims := jwt.MapClaims{ - "sub": username, - "iss": f.mockOIDC.URL, - "aud": "test-client", - "exp": now.Add(validDuration).Unix(), - "iat": now.Unix(), - "email": username + "@example.com", - "name": strings.Title(username), - } - - // Add role-specific groups - switch roleName { - case "TestAdminRole": - claims["groups"] = []string{"admins"} - case "TestReadOnlyRole": - claims["groups"] = []string{"users"} - case "TestWriteOnlyRole": - claims["groups"] = []string{"writers"} - default: - claims["groups"] = []string{"users"} - } - - token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) - token.Header["kid"] = "test-key-id" - - tokenString, err := token.SignedString(f.privateKey) - if err != nil { - return "", fmt.Errorf("failed to sign token: %v", err) - } - - return tokenString, nil -} - -// CreateShortLivedSessionToken creates a mock session token for testing -func (f *S3IAMTestFramework) CreateShortLivedSessionToken(username, roleName string, durationSeconds int64) (string, error) { - // For testing purposes, create a mock session token - // In reality, this would be generated by the STS service - return fmt.Sprintf("mock-session-token-%s-%s-%d", username, roleName, time.Now().Unix()), nil -} - -// ExpireSessionForTesting simulates session expiration for testing -func (f *S3IAMTestFramework) ExpireSessionForTesting(sessionToken string) error { - // For integration tests, this would typically involve calling the STS service - // For now, we just simulate success since the actual expiration will be handled by SeaweedFS - return nil -} - -// GenerateUniqueBucketName generates a unique bucket name for testing -func (f *S3IAMTestFramework) GenerateUniqueBucketName(prefix string) string { - // Use test name and timestamp to ensure uniqueness - testName := strings.ToLower(f.t.Name()) - testName = strings.ReplaceAll(testName, "/", "-") - testName = strings.ReplaceAll(testName, "_", "-") - - // Add random suffix to handle parallel tests - randomSuffix := mathrand.Intn(10000) - - return fmt.Sprintf("%s-%s-%d", prefix, testName, randomSuffix) -} - -// CreateBucket creates a bucket and tracks it for cleanup -func (f *S3IAMTestFramework) CreateBucket(s3Client *s3.S3, bucketName string) error { - _, err := s3Client.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - return err - } - - // Track bucket for cleanup - f.createdBuckets = append(f.createdBuckets, bucketName) - return nil -} - -// CreateBucketWithCleanup creates a bucket, cleaning up any existing bucket first -func (f *S3IAMTestFramework) CreateBucketWithCleanup(s3Client *s3.S3, bucketName string) error { - // First try to create the bucket normally - _, err := s3Client.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - - if err != nil { - // If bucket already exists, clean it up first - if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "BucketAlreadyExists" || awsErr.Code() == "BucketAlreadyOwnedByYou") { - f.t.Logf("Bucket %s already exists, cleaning up first", bucketName) - - // First try to delete the bucket completely - f.emptyBucket(s3Client, bucketName) - _, deleteErr := s3Client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if deleteErr != nil { - f.t.Logf("Warning: Failed to delete existing bucket %s: %v", bucketName, deleteErr) - } - - // Now create it fresh - _, err = s3Client.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - return fmt.Errorf("failed to recreate bucket after cleanup: %v", err) - } - } else { - return err - } - } - - // Track bucket for cleanup - f.createdBuckets = append(f.createdBuckets, bucketName) - return nil -} - -// emptyBucket removes all objects from a bucket -func (f *S3IAMTestFramework) emptyBucket(s3Client *s3.S3, bucketName string) { - // Delete all objects - listResult, err := s3Client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - if err == nil { - for _, obj := range listResult.Contents { - _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: obj.Key, - }) - if err != nil { - f.t.Logf("Warning: Failed to delete object %s/%s: %v", bucketName, *obj.Key, err) - } - } - } -} - -// Cleanup cleans up test resources -func (f *S3IAMTestFramework) Cleanup() { - // Clean up buckets (best effort) - if len(f.createdBuckets) > 0 { - // Create admin client for cleanup - adminClient, err := f.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - if err == nil { - for _, bucket := range f.createdBuckets { - // Try to empty bucket first - listResult, err := adminClient.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucket), - }) - if err == nil { - for _, obj := range listResult.Contents { - adminClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: obj.Key, - }) - } - } - - // Delete bucket - adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucket), - }) - } - } - } - - // Close mock OIDC server - if f.mockOIDC != nil { - f.mockOIDC.Close() - } -} - -// WaitForS3Service waits for the S3 service to be available -func (f *S3IAMTestFramework) WaitForS3Service() error { - // Create a basic S3 client - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(TestRegion), - Endpoint: aws.String(TestS3Endpoint), - Credentials: credentials.NewStaticCredentials( - "test-access-key", - "test-secret-key", - "", - ), - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - }) - if err != nil { - return fmt.Errorf("failed to create AWS session: %v", err) - } - - s3Client := s3.New(sess) - - // Try to list buckets to check if service is available - maxRetries := 30 - for i := 0; i < maxRetries; i++ { - _, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) - if err == nil { - return nil - } - time.Sleep(1 * time.Second) - } - - return fmt.Errorf("S3 service not available after %d retries", maxRetries) -} - -// PutTestObject puts a test object in the specified bucket -func (f *S3IAMTestFramework) PutTestObject(client *s3.S3, bucket, key, content string) error { - _, err := client.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - return err -} - -// GetTestObject retrieves a test object from the specified bucket -func (f *S3IAMTestFramework) GetTestObject(client *s3.S3, bucket, key string) (string, error) { - result, err := client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - return "", err - } - defer result.Body.Close() - - content := strings.Builder{} - _, err = io.Copy(&content, result.Body) - if err != nil { - return "", err - } - - return content.String(), nil -} - -// ListTestObjects lists objects in the specified bucket -func (f *S3IAMTestFramework) ListTestObjects(client *s3.S3, bucket string) ([]string, error) { - result, err := client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucket), - }) - if err != nil { - return nil, err - } - - var keys []string - for _, obj := range result.Contents { - keys = append(keys, *obj.Key) - } - - return keys, nil -} - -// DeleteTestObject deletes a test object from the specified bucket -func (f *S3IAMTestFramework) DeleteTestObject(client *s3.S3, bucket, key string) error { - _, err := client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - return err -} - -// WaitForS3Service waits for the S3 service to be available (simplified version) -func (f *S3IAMTestFramework) WaitForS3ServiceSimple() error { - // This is a simplified version that just checks if the endpoint responds - // The full implementation would be in the Makefile's wait-for-services target - return nil -} diff --git a/test/s3/iam/s3_iam_integration_test.go b/test/s3/iam/s3_iam_integration_test.go deleted file mode 100644 index c7836c4bf..000000000 --- a/test/s3/iam/s3_iam_integration_test.go +++ /dev/null @@ -1,586 +0,0 @@ -package iam - -import ( - "fmt" - "io" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - testEndpoint = "http://localhost:8333" - testRegion = "us-west-2" - testBucket = "test-iam-bucket" - testObjectKey = "test-object.txt" - testObjectData = "Hello, SeaweedFS IAM Integration!" -) - -// TestS3IAMAuthentication tests S3 API authentication with IAM JWT tokens -func TestS3IAMAuthentication(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - t.Run("valid_jwt_token_authentication", func(t *testing.T) { - // Create S3 client with valid JWT token - s3Client, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - // Test bucket operations - err = framework.CreateBucket(s3Client, testBucket) - require.NoError(t, err) - - // Verify bucket exists - buckets, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) - require.NoError(t, err) - - found := false - for _, bucket := range buckets.Buckets { - if *bucket.Name == testBucket { - found = true - break - } - } - assert.True(t, found, "Created bucket should be listed") - }) - - t.Run("invalid_jwt_token_authentication", func(t *testing.T) { - // Create S3 client with invalid JWT token - s3Client, err := framework.CreateS3ClientWithInvalidJWT() - require.NoError(t, err) - - // Attempt bucket operations - should fail - err = framework.CreateBucket(s3Client, testBucket+"-invalid") - require.Error(t, err) - - // Verify it's an access denied error - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } else { - t.Error("Expected AWS error with AccessDenied code") - } - }) - - t.Run("expired_jwt_token_authentication", func(t *testing.T) { - // Create S3 client with expired JWT token - s3Client, err := framework.CreateS3ClientWithExpiredJWT("expired-user", "TestAdminRole") - require.NoError(t, err) - - // Attempt bucket operations - should fail - err = framework.CreateBucket(s3Client, testBucket+"-expired") - require.Error(t, err) - - // Verify it's an access denied error - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } else { - t.Error("Expected AWS error with AccessDenied code") - } - }) -} - -// TestS3IAMPolicyEnforcement tests policy enforcement for different S3 operations -func TestS3IAMPolicyEnforcement(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Setup test bucket with admin client - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - // Use unique bucket name to avoid collection conflicts - bucketName := framework.GenerateUniqueBucketName("test-iam-policy") - err = framework.CreateBucket(adminClient, bucketName) - require.NoError(t, err) - - // Put test object with admin client - _, err = adminClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - Body: strings.NewReader(testObjectData), - }) - require.NoError(t, err) - - t.Run("read_only_policy_enforcement", func(t *testing.T) { - // Create S3 client with read-only role - readOnlyClient, err := framework.CreateS3ClientWithJWT("read-user", "TestReadOnlyRole") - require.NoError(t, err) - - // Should be able to read objects - result, err := readOnlyClient.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err) - - data, err := io.ReadAll(result.Body) - require.NoError(t, err) - assert.Equal(t, testObjectData, string(data)) - result.Body.Close() - - // Should be able to list objects - listResult, err := readOnlyClient.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResult.Contents, 1) - assert.Equal(t, testObjectKey, *listResult.Contents[0].Key) - - // Should NOT be able to put objects - _, err = readOnlyClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String("forbidden-object.txt"), - Body: strings.NewReader("This should fail"), - }) - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - - // Should NOT be able to delete objects - _, err = readOnlyClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - }) - - t.Run("write_only_policy_enforcement", func(t *testing.T) { - // Create S3 client with write-only role - writeOnlyClient, err := framework.CreateS3ClientWithJWT("write-user", "TestWriteOnlyRole") - require.NoError(t, err) - - // Should be able to put objects - testWriteKey := "write-test-object.txt" - testWriteData := "Write-only test data" - - _, err = writeOnlyClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testWriteKey), - Body: strings.NewReader(testWriteData), - }) - require.NoError(t, err) - - // Should be able to delete objects - _, err = writeOnlyClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testWriteKey), - }) - require.NoError(t, err) - - // Should NOT be able to read objects - _, err = writeOnlyClient.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - - // Should NOT be able to list objects - _, err = writeOnlyClient.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - }) - - t.Run("admin_policy_enforcement", func(t *testing.T) { - // Admin client should be able to do everything - testAdminKey := "admin-test-object.txt" - testAdminData := "Admin test data" - - // Should be able to put objects - _, err = adminClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testAdminKey), - Body: strings.NewReader(testAdminData), - }) - require.NoError(t, err) - - // Should be able to read objects - result, err := adminClient.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testAdminKey), - }) - require.NoError(t, err) - - data, err := io.ReadAll(result.Body) - require.NoError(t, err) - assert.Equal(t, testAdminData, string(data)) - result.Body.Close() - - // Should be able to list objects - listResult, err := adminClient.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.GreaterOrEqual(t, len(listResult.Contents), 1) - - // Should be able to delete objects - _, err = adminClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testAdminKey), - }) - require.NoError(t, err) - - // Should be able to delete buckets - // First delete remaining objects - _, err = adminClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err) - - // Then delete the bucket - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }) -} - -// TestS3IAMSessionExpiration tests session expiration handling -func TestS3IAMSessionExpiration(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - t.Run("session_expiration_enforcement", func(t *testing.T) { - // Create S3 client with valid JWT token - s3Client, err := framework.CreateS3ClientWithJWT("session-user", "TestAdminRole") - require.NoError(t, err) - - // Initially should work - err = framework.CreateBucket(s3Client, testBucket+"-session") - require.NoError(t, err) - - // Create S3 client with expired JWT token - expiredClient, err := framework.CreateS3ClientWithExpiredJWT("session-user", "TestAdminRole") - require.NoError(t, err) - - // Now operations should fail with expired token - err = framework.CreateBucket(expiredClient, testBucket+"-session-expired") - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - - // Cleanup the successful bucket - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(testBucket + "-session"), - }) - require.NoError(t, err) - }) -} - -// TestS3IAMMultipartUploadPolicyEnforcement tests multipart upload with IAM policies -func TestS3IAMMultipartUploadPolicyEnforcement(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Setup test bucket with admin client - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - err = framework.CreateBucket(adminClient, testBucket) - require.NoError(t, err) - - t.Run("multipart_upload_with_write_permissions", func(t *testing.T) { - // Create S3 client with admin role (has multipart permissions) - s3Client := adminClient - - // Initiate multipart upload - multipartKey := "large-test-file.txt" - initResult, err := s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - }) - require.NoError(t, err) - - uploadId := initResult.UploadId - - // Upload a part - partNumber := int64(1) - partData := strings.Repeat("Test data for multipart upload. ", 1000) // ~30KB - - uploadResult, err := s3Client.UploadPart(&s3.UploadPartInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - PartNumber: aws.Int64(partNumber), - UploadId: uploadId, - Body: strings.NewReader(partData), - }) - require.NoError(t, err) - - // Complete multipart upload - _, err = s3Client.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - UploadId: uploadId, - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: []*s3.CompletedPart{ - { - ETag: uploadResult.ETag, - PartNumber: aws.Int64(partNumber), - }, - }, - }, - }) - require.NoError(t, err) - - // Verify object was created - result, err := s3Client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - }) - require.NoError(t, err) - - data, err := io.ReadAll(result.Body) - require.NoError(t, err) - assert.Equal(t, partData, string(data)) - result.Body.Close() - - // Cleanup - _, err = s3Client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - }) - require.NoError(t, err) - }) - - t.Run("multipart_upload_denied_for_read_only", func(t *testing.T) { - // Create S3 client with read-only role - readOnlyClient, err := framework.CreateS3ClientWithJWT("read-user", "TestReadOnlyRole") - require.NoError(t, err) - - // Attempt to initiate multipart upload - should fail - multipartKey := "denied-multipart-file.txt" - _, err = readOnlyClient.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ - Bucket: aws.String(testBucket), - Key: aws.String(multipartKey), - }) - require.Error(t, err) - if awsErr, ok := err.(awserr.Error); ok { - assert.Equal(t, "AccessDenied", awsErr.Code()) - } - }) - - // Cleanup - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(testBucket), - }) - require.NoError(t, err) -} - -// TestS3IAMBucketPolicyIntegration tests bucket policy integration with IAM -func TestS3IAMBucketPolicyIntegration(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Setup test bucket with admin client - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - // Use unique bucket name to avoid collection conflicts - bucketName := framework.GenerateUniqueBucketName("test-iam-bucket-policy") - err = framework.CreateBucket(adminClient, bucketName) - require.NoError(t, err) - - t.Run("bucket_policy_allows_public_read", func(t *testing.T) { - // Set bucket policy to allow public read access - bucketPolicy := fmt.Sprintf(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "PublicReadGetObject", - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject"], - "Resource": ["arn:seaweed:s3:::%s/*"] - } - ] - }`, bucketName) - - _, err = adminClient.PutBucketPolicy(&s3.PutBucketPolicyInput{ - Bucket: aws.String(bucketName), - Policy: aws.String(bucketPolicy), - }) - require.NoError(t, err) - - // Put test object - _, err = adminClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - Body: strings.NewReader(testObjectData), - }) - require.NoError(t, err) - - // Test with read-only client - should now be allowed due to bucket policy - readOnlyClient, err := framework.CreateS3ClientWithJWT("read-user", "TestReadOnlyRole") - require.NoError(t, err) - - result, err := readOnlyClient.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err) - - data, err := io.ReadAll(result.Body) - require.NoError(t, err) - assert.Equal(t, testObjectData, string(data)) - result.Body.Close() - }) - - t.Run("bucket_policy_denies_specific_action", func(t *testing.T) { - // Set bucket policy to deny delete operations - bucketPolicy := fmt.Sprintf(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "DenyDelete", - "Effect": "Deny", - "Principal": "*", - "Action": ["s3:DeleteObject"], - "Resource": ["arn:seaweed:s3:::%s/*"] - } - ] - }`, bucketName) - - _, err = adminClient.PutBucketPolicy(&s3.PutBucketPolicyInput{ - Bucket: aws.String(bucketName), - Policy: aws.String(bucketPolicy), - }) - require.NoError(t, err) - - // Verify that the bucket policy was stored successfully by retrieving it - policyResult, err := adminClient.GetBucketPolicy(&s3.GetBucketPolicyInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Contains(t, *policyResult.Policy, "s3:DeleteObject") - assert.Contains(t, *policyResult.Policy, "Deny") - - // IMPLEMENTATION NOTE: Bucket policy enforcement in authorization flow - // is planned for a future phase. Currently, this test validates policy - // storage and retrieval. When enforcement is implemented, this test - // should be extended to verify that delete operations are actually denied. - }) - - // Cleanup - delete bucket policy first, then objects and bucket - _, err = adminClient.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - _, err = adminClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err) - - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) -} - -// TestS3IAMContextualPolicyEnforcement tests context-aware policy enforcement -func TestS3IAMContextualPolicyEnforcement(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // This test would verify IP-based restrictions, time-based restrictions, - // and other context-aware policy conditions - // For now, we'll focus on the basic structure - - t.Run("ip_based_policy_enforcement", func(t *testing.T) { - // IMPLEMENTATION NOTE: IP-based policy testing framework planned for future release - // Requirements: - // - Configure IAM policies with IpAddress/NotIpAddress conditions - // - Multi-container test setup with controlled source IP addresses - // - Test policy enforcement from allowed vs denied IP ranges - t.Skip("IP-based policy testing requires advanced network configuration and multi-container setup") - }) - - t.Run("time_based_policy_enforcement", func(t *testing.T) { - // IMPLEMENTATION NOTE: Time-based policy testing framework planned for future release - // Requirements: - // - Configure IAM policies with DateGreaterThan/DateLessThan conditions - // - Time manipulation capabilities for testing different time windows - // - Test policy enforcement during allowed vs restricted time periods - t.Skip("Time-based policy testing requires time manipulation capabilities") - }) -} - -// TestS3IAMPresignedURLIntegration tests presigned URL generation with IAM -func TestS3IAMPresignedURLIntegration(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Setup test bucket with admin client - adminClient, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err) - - // Use static bucket name but with cleanup to handle conflicts - err = framework.CreateBucketWithCleanup(adminClient, testBucket) - require.NoError(t, err) - - // Put test object - _, err = adminClient.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(testObjectKey), - Body: strings.NewReader(testObjectData), - }) - require.NoError(t, err) - - t.Run("presigned_url_generation_and_usage", func(t *testing.T) { - // ARCHITECTURAL NOTE: AWS SDK presigned URLs are incompatible with JWT Bearer authentication - // - // AWS SDK presigned URLs use AWS Signature Version 4 (SigV4) which requires: - // - Access Key ID and Secret Access Key for signing - // - Query parameter-based authentication in the URL - // - // SeaweedFS JWT authentication uses: - // - Bearer tokens in the Authorization header - // - Stateless JWT validation without AWS-style signing - // - // RECOMMENDATION: For JWT-authenticated applications, use direct API calls - // with Bearer tokens rather than presigned URLs. - - // Test direct object access with JWT Bearer token (recommended approach) - _, err := adminClient.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err, "Direct object access with JWT Bearer token works correctly") - - t.Log("JWT Bearer token authentication confirmed working for direct S3 API calls") - t.Log("Note: Presigned URLs are not supported with JWT Bearer authentication by design") - }) - - // Cleanup - _, err = adminClient.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(testBucket), - Key: aws.String(testObjectKey), - }) - require.NoError(t, err) - - _, err = adminClient.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(testBucket), - }) - require.NoError(t, err) -} diff --git a/test/s3/iam/s3_keycloak_integration_test.go b/test/s3/iam/s3_keycloak_integration_test.go deleted file mode 100644 index 0bb87161d..000000000 --- a/test/s3/iam/s3_keycloak_integration_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package iam - -import ( - "encoding/base64" - "encoding/json" - "os" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - testKeycloakBucket = "test-keycloak-bucket" -) - -// TestKeycloakIntegrationAvailable checks if Keycloak is available for testing -func TestKeycloakIntegrationAvailable(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - if !framework.useKeycloak { - t.Skip("Keycloak not available, skipping integration tests") - } - - // Test Keycloak health - assert.True(t, framework.useKeycloak, "Keycloak should be available") - assert.NotNil(t, framework.keycloakClient, "Keycloak client should be initialized") -} - -// TestKeycloakAuthentication tests authentication flow with real Keycloak -func TestKeycloakAuthentication(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - if !framework.useKeycloak { - t.Skip("Keycloak not available, skipping integration tests") - } - - t.Run("admin_user_authentication", func(t *testing.T) { - // Test admin user authentication - token, err := framework.getKeycloakToken("admin-user") - require.NoError(t, err) - assert.NotEmpty(t, token, "JWT token should not be empty") - - // Verify token can be used to create S3 client - s3Client, err := framework.CreateS3ClientWithKeycloakToken(token) - require.NoError(t, err) - assert.NotNil(t, s3Client, "S3 client should be created successfully") - - // Test bucket operations with admin privileges - err = framework.CreateBucket(s3Client, testKeycloakBucket) - assert.NoError(t, err, "Admin user should be able to create buckets") - - // Verify bucket exists - buckets, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) - require.NoError(t, err) - - found := false - for _, bucket := range buckets.Buckets { - if *bucket.Name == testKeycloakBucket { - found = true - break - } - } - assert.True(t, found, "Created bucket should be listed") - }) - - t.Run("read_only_user_authentication", func(t *testing.T) { - // Test read-only user authentication - token, err := framework.getKeycloakToken("read-user") - require.NoError(t, err) - assert.NotEmpty(t, token, "JWT token should not be empty") - - // Debug: decode token to verify it's for read-user - parts := strings.Split(token, ".") - if len(parts) >= 2 { - payload := parts[1] - // JWTs use URL-safe base64 encoding without padding (RFC 4648 ยง5) - decoded, err := base64.RawURLEncoding.DecodeString(payload) - if err == nil { - var claims map[string]interface{} - if json.Unmarshal(decoded, &claims) == nil { - t.Logf("Token username: %v", claims["preferred_username"]) - t.Logf("Token roles: %v", claims["roles"]) - } - } - } - - // First test with direct HTTP request to verify OIDC authentication works - t.Logf("Testing with direct HTTP request...") - err = framework.TestKeycloakTokenDirectly(token) - require.NoError(t, err, "Direct HTTP test should succeed") - - // Create S3 client with Keycloak token - s3Client, err := framework.CreateS3ClientWithKeycloakToken(token) - require.NoError(t, err) - - // Test that read-only user can list buckets - t.Logf("Testing ListBuckets with AWS SDK...") - _, err = s3Client.ListBuckets(&s3.ListBucketsInput{}) - assert.NoError(t, err, "Read-only user should be able to list buckets") - - // Test that read-only user cannot create buckets - t.Logf("Testing CreateBucket with AWS SDK...") - err = framework.CreateBucket(s3Client, testKeycloakBucket+"-readonly") - assert.Error(t, err, "Read-only user should not be able to create buckets") - }) - - t.Run("invalid_user_authentication", func(t *testing.T) { - // Test authentication with invalid credentials - _, err := framework.keycloakClient.AuthenticateUser("invalid-user", "invalid-password") - assert.Error(t, err, "Authentication with invalid credentials should fail") - }) -} - -// TestKeycloakTokenExpiration tests JWT token expiration handling -func TestKeycloakTokenExpiration(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - if !framework.useKeycloak { - t.Skip("Keycloak not available, skipping integration tests") - } - - // Get a short-lived token (if Keycloak is configured for it) - // Use consistent password that matches Docker setup script logic: "adminuser123" - tokenResp, err := framework.keycloakClient.AuthenticateUser("admin-user", "adminuser123") - require.NoError(t, err) - - // Verify token properties - assert.NotEmpty(t, tokenResp.AccessToken, "Access token should not be empty") - assert.Equal(t, "Bearer", tokenResp.TokenType, "Token type should be Bearer") - assert.Greater(t, tokenResp.ExpiresIn, 0, "Token should have expiration time") - - // Test that token works initially - token, err := framework.getKeycloakToken("admin-user") - require.NoError(t, err) - - s3Client, err := framework.CreateS3ClientWithKeycloakToken(token) - require.NoError(t, err) - - _, err = s3Client.ListBuckets(&s3.ListBucketsInput{}) - assert.NoError(t, err, "Fresh token should work for S3 operations") -} - -// TestKeycloakRoleMapping tests role mapping from Keycloak to S3 policies -func TestKeycloakRoleMapping(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - if !framework.useKeycloak { - t.Skip("Keycloak not available, skipping integration tests") - } - - testCases := []struct { - username string - expectedRole string - canCreateBucket bool - canListBuckets bool - description string - }{ - { - username: "admin-user", - expectedRole: "S3AdminRole", - canCreateBucket: true, - canListBuckets: true, - description: "Admin user should have full access", - }, - { - username: "read-user", - expectedRole: "S3ReadOnlyRole", - canCreateBucket: false, - canListBuckets: true, - description: "Read-only user should have read-only access", - }, - { - username: "write-user", - expectedRole: "S3ReadWriteRole", - canCreateBucket: true, - canListBuckets: true, - description: "Read-write user should have read-write access", - }, - } - - for _, tc := range testCases { - t.Run(tc.username, func(t *testing.T) { - // Get Keycloak token for the user - token, err := framework.getKeycloakToken(tc.username) - require.NoError(t, err) - - // Create S3 client with Keycloak token - s3Client, err := framework.CreateS3ClientWithKeycloakToken(token) - require.NoError(t, err, tc.description) - - // Test list buckets permission - _, err = s3Client.ListBuckets(&s3.ListBucketsInput{}) - if tc.canListBuckets { - assert.NoError(t, err, "%s should be able to list buckets", tc.username) - } else { - assert.Error(t, err, "%s should not be able to list buckets", tc.username) - } - - // Test create bucket permission - testBucketName := testKeycloakBucket + "-" + tc.username - err = framework.CreateBucket(s3Client, testBucketName) - if tc.canCreateBucket { - assert.NoError(t, err, "%s should be able to create buckets", tc.username) - } else { - assert.Error(t, err, "%s should not be able to create buckets", tc.username) - } - }) - } -} - -// TestKeycloakS3Operations tests comprehensive S3 operations with Keycloak authentication -func TestKeycloakS3Operations(t *testing.T) { - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - if !framework.useKeycloak { - t.Skip("Keycloak not available, skipping integration tests") - } - - // Use admin user for comprehensive testing - token, err := framework.getKeycloakToken("admin-user") - require.NoError(t, err) - - s3Client, err := framework.CreateS3ClientWithKeycloakToken(token) - require.NoError(t, err) - - bucketName := testKeycloakBucket + "-operations" - - t.Run("bucket_lifecycle", func(t *testing.T) { - // Create bucket - err = framework.CreateBucket(s3Client, bucketName) - require.NoError(t, err, "Should be able to create bucket") - - // Verify bucket exists - buckets, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) - require.NoError(t, err) - - found := false - for _, bucket := range buckets.Buckets { - if *bucket.Name == bucketName { - found = true - break - } - } - assert.True(t, found, "Created bucket should be listed") - }) - - t.Run("object_operations", func(t *testing.T) { - objectKey := "test-object.txt" - objectContent := "Hello from Keycloak-authenticated SeaweedFS!" - - // Put object - err = framework.PutTestObject(s3Client, bucketName, objectKey, objectContent) - require.NoError(t, err, "Should be able to put object") - - // Get object - content, err := framework.GetTestObject(s3Client, bucketName, objectKey) - require.NoError(t, err, "Should be able to get object") - assert.Equal(t, objectContent, content, "Object content should match") - - // List objects - objects, err := framework.ListTestObjects(s3Client, bucketName) - require.NoError(t, err, "Should be able to list objects") - assert.Contains(t, objects, objectKey, "Object should be listed") - - // Delete object - err = framework.DeleteTestObject(s3Client, bucketName, objectKey) - assert.NoError(t, err, "Should be able to delete object") - }) -} - -// TestKeycloakFailover tests fallback to mock OIDC when Keycloak is unavailable -func TestKeycloakFailover(t *testing.T) { - // Temporarily override Keycloak URL to simulate unavailability - originalURL := os.Getenv("KEYCLOAK_URL") - os.Setenv("KEYCLOAK_URL", "http://localhost:9999") // Non-existent service - defer func() { - if originalURL != "" { - os.Setenv("KEYCLOAK_URL", originalURL) - } else { - os.Unsetenv("KEYCLOAK_URL") - } - }() - - framework := NewS3IAMTestFramework(t) - defer framework.Cleanup() - - // Should fall back to mock OIDC - assert.False(t, framework.useKeycloak, "Should fall back to mock OIDC when Keycloak is unavailable") - assert.Nil(t, framework.keycloakClient, "Keycloak client should not be initialized") - assert.NotNil(t, framework.mockOIDC, "Mock OIDC server should be initialized") - - // Test that mock authentication still works - s3Client, err := framework.CreateS3ClientWithJWT("admin-user", "TestAdminRole") - require.NoError(t, err, "Should be able to create S3 client with mock authentication") - - // Basic operation should work - _, err = s3Client.ListBuckets(&s3.ListBucketsInput{}) - // Note: This may still fail due to session store issues, but the client creation should work -} diff --git a/test/s3/iam/setup_all_tests.sh b/test/s3/iam/setup_all_tests.sh deleted file mode 100755 index aaec54691..000000000 --- a/test/s3/iam/setup_all_tests.sh +++ /dev/null @@ -1,212 +0,0 @@ -#!/bin/bash - -# Complete Test Environment Setup Script -# This script sets up all required services and configurations for S3 IAM integration tests - -set -e - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -echo -e "${BLUE}๐Ÿš€ Setting up complete test environment for SeaweedFS S3 IAM...${NC}" -echo -e "${BLUE}==========================================================${NC}" - -# Check prerequisites -check_prerequisites() { - echo -e "${YELLOW}๐Ÿ” Checking prerequisites...${NC}" - - local missing_tools=() - - for tool in docker jq curl; do - if ! command -v "$tool" >/dev/null 2>&1; then - missing_tools+=("$tool") - fi - done - - if [ ${#missing_tools[@]} -gt 0 ]; then - echo -e "${RED}[FAIL] Missing required tools: ${missing_tools[*]}${NC}" - echo -e "${YELLOW}Please install the missing tools and try again${NC}" - exit 1 - fi - - echo -e "${GREEN}[OK] All prerequisites met${NC}" -} - -# Set up Keycloak for OIDC testing -setup_keycloak() { - echo -e "\n${BLUE}1. Setting up Keycloak for OIDC testing...${NC}" - - if ! "${SCRIPT_DIR}/setup_keycloak.sh"; then - echo -e "${RED}[FAIL] Failed to set up Keycloak${NC}" - return 1 - fi - - echo -e "${GREEN}[OK] Keycloak setup completed${NC}" -} - -# Set up SeaweedFS test cluster -setup_seaweedfs_cluster() { - echo -e "\n${BLUE}2. Setting up SeaweedFS test cluster...${NC}" - - # Build SeaweedFS binary if needed - echo -e "${YELLOW}๐Ÿ”ง Building SeaweedFS binary...${NC}" - cd "${SCRIPT_DIR}/../../../" # Go to seaweedfs root - if ! make > /dev/null 2>&1; then - echo -e "${RED}[FAIL] Failed to build SeaweedFS binary${NC}" - return 1 - fi - - cd "${SCRIPT_DIR}" # Return to test directory - - # Clean up any existing test data - echo -e "${YELLOW}๐Ÿงน Cleaning up existing test data...${NC}" - rm -rf test-volume-data/* 2>/dev/null || true - - echo -e "${GREEN}[OK] SeaweedFS cluster setup completed${NC}" -} - -# Set up test data and configurations -setup_test_configurations() { - echo -e "\n${BLUE}3. Setting up test configurations...${NC}" - - # Ensure IAM configuration is properly set up - if [ ! -f "${SCRIPT_DIR}/iam_config.json" ]; then - echo -e "${YELLOW}โš ๏ธ IAM configuration not found, using default config${NC}" - cp "${SCRIPT_DIR}/iam_config.local.json" "${SCRIPT_DIR}/iam_config.json" 2>/dev/null || { - echo -e "${RED}[FAIL] No IAM configuration files found${NC}" - return 1 - } - fi - - # Validate configuration - if ! jq . "${SCRIPT_DIR}/iam_config.json" >/dev/null; then - echo -e "${RED}[FAIL] Invalid IAM configuration JSON${NC}" - return 1 - fi - - echo -e "${GREEN}[OK] Test configurations set up${NC}" -} - -# Verify services are ready -verify_services() { - echo -e "\n${BLUE}4. Verifying services are ready...${NC}" - - # Check if Keycloak is responding - echo -e "${YELLOW}๐Ÿ” Checking Keycloak availability...${NC}" - local keycloak_ready=false - for i in $(seq 1 30); do - if curl -sf "http://localhost:8080/health/ready" >/dev/null 2>&1; then - keycloak_ready=true - break - fi - if curl -sf "http://localhost:8080/realms/master" >/dev/null 2>&1; then - keycloak_ready=true - break - fi - sleep 2 - done - - if [ "$keycloak_ready" = true ]; then - echo -e "${GREEN}[OK] Keycloak is ready${NC}" - else - echo -e "${YELLOW}โš ๏ธ Keycloak may not be fully ready yet${NC}" - echo -e "${YELLOW}This is okay - tests will wait for Keycloak when needed${NC}" - fi - - echo -e "${GREEN}[OK] Service verification completed${NC}" -} - -# Set up environment variables -setup_environment() { - echo -e "\n${BLUE}5. Setting up environment variables...${NC}" - - export ENABLE_DISTRIBUTED_TESTS=true - export ENABLE_PERFORMANCE_TESTS=true - export ENABLE_STRESS_TESTS=true - export KEYCLOAK_URL="http://localhost:8080" - export S3_ENDPOINT="http://localhost:8333" - export TEST_TIMEOUT=60m - export CGO_ENABLED=0 - - # Write environment to a file for other scripts to source - cat > "${SCRIPT_DIR}/.test_env" << EOF -export ENABLE_DISTRIBUTED_TESTS=true -export ENABLE_PERFORMANCE_TESTS=true -export ENABLE_STRESS_TESTS=true -export KEYCLOAK_URL="http://localhost:8080" -export S3_ENDPOINT="http://localhost:8333" -export TEST_TIMEOUT=60m -export CGO_ENABLED=0 -EOF - - echo -e "${GREEN}[OK] Environment variables set${NC}" -} - -# Display setup summary -display_summary() { - echo -e "\n${BLUE}๐Ÿ“Š Setup Summary${NC}" - echo -e "${BLUE}=================${NC}" - echo -e "Keycloak URL: ${KEYCLOAK_URL:-http://localhost:8080}" - echo -e "S3 Endpoint: ${S3_ENDPOINT:-http://localhost:8333}" - echo -e "Test Timeout: ${TEST_TIMEOUT:-60m}" - echo -e "IAM Config: ${SCRIPT_DIR}/iam_config.json" - echo -e "" - echo -e "${GREEN}[OK] Complete test environment setup finished!${NC}" - echo -e "${YELLOW}๐Ÿ’ก You can now run tests with: make run-all-tests${NC}" - echo -e "${YELLOW}๐Ÿ’ก Or run specific tests with: go test -v -timeout=60m -run TestName${NC}" - echo -e "${YELLOW}๐Ÿ’ก To stop Keycloak: docker stop keycloak-iam-test${NC}" -} - -# Main execution -main() { - check_prerequisites - - # Track what was set up for cleanup on failure - local setup_steps=() - - if setup_keycloak; then - setup_steps+=("keycloak") - else - echo -e "${RED}[FAIL] Failed to set up Keycloak${NC}" - exit 1 - fi - - if setup_seaweedfs_cluster; then - setup_steps+=("seaweedfs") - else - echo -e "${RED}[FAIL] Failed to set up SeaweedFS cluster${NC}" - exit 1 - fi - - if setup_test_configurations; then - setup_steps+=("config") - else - echo -e "${RED}[FAIL] Failed to set up test configurations${NC}" - exit 1 - fi - - setup_environment - verify_services - display_summary - - echo -e "${GREEN}๐ŸŽ‰ All setup completed successfully!${NC}" -} - -# Cleanup on script interruption -cleanup() { - echo -e "\n${YELLOW}๐Ÿงน Cleaning up on script interruption...${NC}" - # Note: We don't automatically stop Keycloak as it might be shared - echo -e "${YELLOW}๐Ÿ’ก If you want to stop Keycloak: docker stop keycloak-iam-test${NC}" - exit 1 -} - -trap cleanup INT TERM - -# Execute main function -main "$@" diff --git a/test/s3/iam/setup_keycloak.sh b/test/s3/iam/setup_keycloak.sh deleted file mode 100755 index 14fb08435..000000000 --- a/test/s3/iam/setup_keycloak.sh +++ /dev/null @@ -1,416 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -KEYCLOAK_IMAGE="quay.io/keycloak/keycloak:26.0.7" -CONTAINER_NAME="keycloak-iam-test" -KEYCLOAK_PORT="8080" # Default external port -KEYCLOAK_INTERNAL_PORT="8080" # Internal container port (always 8080) -KEYCLOAK_URL="http://localhost:${KEYCLOAK_PORT}" - -# Realm and test fixtures expected by tests -REALM_NAME="seaweedfs-test" -CLIENT_ID="seaweedfs-s3" -CLIENT_SECRET="seaweedfs-s3-secret" -ROLE_ADMIN="s3-admin" -ROLE_READONLY="s3-read-only" -ROLE_WRITEONLY="s3-write-only" -ROLE_READWRITE="s3-read-write" - -# User credentials (matches Docker setup script logic: removes non-alphabetic chars + "123") -get_user_password() { - case "$1" in - "admin-user") echo "adminuser123" ;; # "admin-user" -> "adminuser123" - "read-user") echo "readuser123" ;; # "read-user" -> "readuser123" - "write-user") echo "writeuser123" ;; # "write-user" -> "writeuser123" - "write-only-user") echo "writeonlyuser123" ;; # "write-only-user" -> "writeonlyuser123" - *) echo "" ;; - esac -} - -# List of users to create -USERS="admin-user read-user write-user write-only-user" - -echo -e "${BLUE}๐Ÿ”ง Setting up Keycloak realm and users for SeaweedFS S3 IAM testing...${NC}" - -ensure_container() { - # Check for any existing Keycloak container and detect its port - local keycloak_containers=$(docker ps --format '{{.Names}}\t{{.Ports}}' | grep -E "(keycloak|quay.io/keycloak)") - - if [[ -n "$keycloak_containers" ]]; then - # Parse the first available Keycloak container - CONTAINER_NAME=$(echo "$keycloak_containers" | head -1 | awk '{print $1}') - - # Extract the external port from the port mapping using sed (compatible with older bash) - local port_mapping=$(echo "$keycloak_containers" | head -1 | awk '{print $2}') - local extracted_port=$(echo "$port_mapping" | sed -n 's/.*:\([0-9]*\)->8080.*/\1/p') - if [[ -n "$extracted_port" ]]; then - KEYCLOAK_PORT="$extracted_port" - KEYCLOAK_URL="http://localhost:${KEYCLOAK_PORT}" - echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}" - return 0 - fi - fi - - # Fallback: check for specific container names - if docker ps --format '{{.Names}}' | grep -q '^keycloak$'; then - CONTAINER_NAME="keycloak" - # Try to detect port for 'keycloak' container using docker port command - local ports=$(docker port keycloak 8080 2>/dev/null | head -1) - if [[ -n "$ports" ]]; then - local extracted_port=$(echo "$ports" | sed -n 's/.*:\([0-9]*\)$/\1/p') - if [[ -n "$extracted_port" ]]; then - KEYCLOAK_PORT="$extracted_port" - KEYCLOAK_URL="http://localhost:${KEYCLOAK_PORT}" - fi - fi - echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}' on port ${KEYCLOAK_PORT}${NC}" - return 0 - fi - if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then - echo -e "${GREEN}[OK] Using existing container '${CONTAINER_NAME}'${NC}" - return 0 - fi - echo -e "${YELLOW}๐Ÿณ Starting Keycloak container (${KEYCLOAK_IMAGE})...${NC}" - docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true - docker run -d --name "${CONTAINER_NAME}" -p "${KEYCLOAK_PORT}:8080" \ - -e KEYCLOAK_ADMIN=admin \ - -e KEYCLOAK_ADMIN_PASSWORD=admin \ - -e KC_HTTP_ENABLED=true \ - -e KC_HOSTNAME_STRICT=false \ - -e KC_HOSTNAME_STRICT_HTTPS=false \ - -e KC_HEALTH_ENABLED=true \ - "${KEYCLOAK_IMAGE}" start-dev >/dev/null -} - -wait_ready() { - echo -e "${YELLOW}โณ Waiting for Keycloak to be ready...${NC}" - for i in $(seq 1 120); do - if curl -sf "${KEYCLOAK_URL}/health/ready" >/dev/null; then - echo -e "${GREEN}[OK] Keycloak health check passed${NC}" - return 0 - fi - if curl -sf "${KEYCLOAK_URL}/realms/master" >/dev/null; then - echo -e "${GREEN}[OK] Keycloak master realm accessible${NC}" - return 0 - fi - sleep 2 - done - echo -e "${RED}[FAIL] Keycloak did not become ready in time${NC}" - exit 1 -} - -kcadm() { - # Always authenticate before each command to ensure context - # Try different admin passwords that might be used in different environments - # GitHub Actions uses "admin", local testing might use "admin123" - local admin_passwords=("admin" "admin123" "password") - local auth_success=false - - for pwd in "${admin_passwords[@]}"; do - if docker exec -i "${CONTAINER_NAME}" /opt/keycloak/bin/kcadm.sh config credentials --server "http://localhost:${KEYCLOAK_INTERNAL_PORT}" --realm master --user admin --password "$pwd" >/dev/null 2>&1; then - auth_success=true - break - fi - done - - if [[ "$auth_success" == false ]]; then - echo -e "${RED}[FAIL] Failed to authenticate with any known admin password${NC}" - return 1 - fi - - docker exec -i "${CONTAINER_NAME}" /opt/keycloak/bin/kcadm.sh "$@" -} - -admin_login() { - # This is now handled by each kcadm() call - echo "Logging into http://localhost:${KEYCLOAK_INTERNAL_PORT} as user admin of realm master" -} - -ensure_realm() { - if kcadm get realms | grep -q "${REALM_NAME}"; then - echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists${NC}" - else - echo -e "${YELLOW}๐Ÿ“ Creating realm '${REALM_NAME}'...${NC}" - if kcadm create realms -s realm="${REALM_NAME}" -s enabled=true 2>/dev/null; then - echo -e "${GREEN}[OK] Realm created${NC}" - else - # Check if it exists now (might have been created by another process) - if kcadm get realms | grep -q "${REALM_NAME}"; then - echo -e "${GREEN}[OK] Realm '${REALM_NAME}' already exists (created concurrently)${NC}" - else - echo -e "${RED}[FAIL] Failed to create realm '${REALM_NAME}'${NC}" - return 1 - fi - fi - fi -} - -ensure_client() { - local id - id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty') - if [[ -n "${id}" ]]; then - echo -e "${GREEN}[OK] Client '${CLIENT_ID}' already exists${NC}" - else - echo -e "${YELLOW}๐Ÿ“ Creating client '${CLIENT_ID}'...${NC}" - kcadm create clients -r "${REALM_NAME}" \ - -s clientId="${CLIENT_ID}" \ - -s protocol=openid-connect \ - -s publicClient=false \ - -s serviceAccountsEnabled=true \ - -s directAccessGrantsEnabled=true \ - -s standardFlowEnabled=true \ - -s implicitFlowEnabled=false \ - -s secret="${CLIENT_SECRET}" >/dev/null - echo -e "${GREEN}[OK] Client created${NC}" - fi - - # Create and configure role mapper for the client - configure_role_mapper "${CLIENT_ID}" -} - -ensure_role() { - local role="$1" - if kcadm get roles -r "${REALM_NAME}" | jq -r '.[].name' | grep -qx "${role}"; then - echo -e "${GREEN}[OK] Role '${role}' exists${NC}" - else - echo -e "${YELLOW}๐Ÿ“ Creating role '${role}'...${NC}" - kcadm create roles -r "${REALM_NAME}" -s name="${role}" >/dev/null - fi -} - -ensure_user() { - local username="$1" password="$2" - local uid - uid=$(kcadm get users -r "${REALM_NAME}" -q username="${username}" | jq -r '.[0].id // empty') - if [[ -z "${uid}" ]]; then - echo -e "${YELLOW}๐Ÿ“ Creating user '${username}'...${NC}" - uid=$(kcadm create users -r "${REALM_NAME}" \ - -s username="${username}" \ - -s enabled=true \ - -s email="${username}@seaweedfs.test" \ - -s emailVerified=true \ - -s firstName="${username}" \ - -s lastName="User" \ - -i) - else - echo -e "${GREEN}[OK] User '${username}' exists${NC}" - fi - echo -e "${YELLOW}๐Ÿ”‘ Setting password for '${username}'...${NC}" - kcadm set-password -r "${REALM_NAME}" --userid "${uid}" --new-password "${password}" --temporary=false >/dev/null -} - -assign_role() { - local username="$1" role="$2" - local uid rid - uid=$(kcadm get users -r "${REALM_NAME}" -q username="${username}" | jq -r '.[0].id') - rid=$(kcadm get roles -r "${REALM_NAME}" | jq -r ".[] | select(.name==\"${role}\") | .id") - # Check if role already assigned - if kcadm get "users/${uid}/role-mappings/realm" -r "${REALM_NAME}" | jq -r '.[].name' | grep -qx "${role}"; then - echo -e "${GREEN}[OK] User '${username}' already has role '${role}'${NC}" - return 0 - fi - echo -e "${YELLOW}โž• Assigning role '${role}' to '${username}'...${NC}" - kcadm add-roles -r "${REALM_NAME}" --uid "${uid}" --rolename "${role}" >/dev/null -} - -configure_role_mapper() { - echo -e "${YELLOW}๐Ÿ”ง Configuring role mapper for client '${CLIENT_ID}'...${NC}" - - # Get client's internal ID - local internal_id - internal_id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty') - - if [[ -z "${internal_id}" ]]; then - echo -e "${RED}[FAIL] Could not find client ${client_id} to configure role mapper${NC}" - return 1 - fi - - # Check if a realm roles mapper already exists for this client - local existing_mapper - existing_mapper=$(kcadm get "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" | jq -r '.[] | select(.name=="realm roles" and .protocolMapper=="oidc-usermodel-realm-role-mapper") | .id // empty') - - if [[ -n "${existing_mapper}" ]]; then - echo -e "${GREEN}[OK] Realm roles mapper already exists${NC}" - else - echo -e "${YELLOW}๐Ÿ“ Creating realm roles mapper...${NC}" - - # Create protocol mapper for realm roles - kcadm create "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" \ - -s name="realm roles" \ - -s protocol="openid-connect" \ - -s protocolMapper="oidc-usermodel-realm-role-mapper" \ - -s consentRequired=false \ - -s 'config."multivalued"=true' \ - -s 'config."userinfo.token.claim"=true' \ - -s 'config."id.token.claim"=true' \ - -s 'config."access.token.claim"=true' \ - -s 'config."claim.name"=roles' \ - -s 'config."jsonType.label"=String' >/dev/null || { - echo -e "${RED}[FAIL] Failed to create realm roles mapper${NC}" - return 1 - } - - echo -e "${GREEN}[OK] Realm roles mapper created${NC}" - fi -} - -configure_audience_mapper() { - echo -e "${YELLOW}๐Ÿ”ง Configuring audience mapper for client '${CLIENT_ID}'...${NC}" - - # Get client's internal ID - local internal_id - internal_id=$(kcadm get clients -r "${REALM_NAME}" -q clientId="${CLIENT_ID}" | jq -r '.[0].id // empty') - - if [[ -z "${internal_id}" ]]; then - echo -e "${RED}[FAIL] Could not find client ${CLIENT_ID} to configure audience mapper${NC}" - return 1 - fi - - # Check if an audience mapper already exists for this client - local existing_mapper - existing_mapper=$(kcadm get "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" | jq -r '.[] | select(.name=="audience-mapper" and .protocolMapper=="oidc-audience-mapper") | .id // empty') - - if [[ -n "${existing_mapper}" ]]; then - echo -e "${GREEN}[OK] Audience mapper already exists${NC}" - else - echo -e "${YELLOW}๐Ÿ“ Creating audience mapper...${NC}" - - # Create protocol mapper for audience - kcadm create "clients/${internal_id}/protocol-mappers/models" -r "${REALM_NAME}" \ - -s name="audience-mapper" \ - -s protocol="openid-connect" \ - -s protocolMapper="oidc-audience-mapper" \ - -s consentRequired=false \ - -s 'config."included.client.audience"='"${CLIENT_ID}" \ - -s 'config."id.token.claim"=false' \ - -s 'config."access.token.claim"=true' >/dev/null || { - echo -e "${RED}[FAIL] Failed to create audience mapper${NC}" - return 1 - } - - echo -e "${GREEN}[OK] Audience mapper created${NC}" - fi -} - -main() { - command -v docker >/dev/null || { echo -e "${RED}[FAIL] Docker is required${NC}"; exit 1; } - command -v jq >/dev/null || { echo -e "${RED}[FAIL] jq is required${NC}"; exit 1; } - - ensure_container - echo "Keycloak URL: ${KEYCLOAK_URL}" - wait_ready - admin_login - ensure_realm - ensure_client - configure_role_mapper - configure_audience_mapper - ensure_role "${ROLE_ADMIN}" - ensure_role "${ROLE_READONLY}" - ensure_role "${ROLE_WRITEONLY}" - ensure_role "${ROLE_READWRITE}" - - for u in $USERS; do - ensure_user "$u" "$(get_user_password "$u")" - done - - assign_role admin-user "${ROLE_ADMIN}" - assign_role read-user "${ROLE_READONLY}" - assign_role write-user "${ROLE_READWRITE}" - - # Also create a dedicated write-only user for testing - ensure_user write-only-user "$(get_user_password write-only-user)" - assign_role write-only-user "${ROLE_WRITEONLY}" - - # Copy the appropriate IAM configuration for this environment - setup_iam_config - - # Validate the setup by testing authentication and role inclusion - echo -e "${YELLOW}๐Ÿ” Validating setup by testing admin-user authentication and role mapping...${NC}" - sleep 2 - - local validation_result=$(curl -s -w "%{http_code}" -X POST "http://localhost:${KEYCLOAK_PORT}/realms/${REALM_NAME}/protocol/openid-connect/token" \ - -H "Content-Type: application/x-www-form-urlencoded" \ - -d "grant_type=password" \ - -d "client_id=${CLIENT_ID}" \ - -d "client_secret=${CLIENT_SECRET}" \ - -d "username=admin-user" \ - -d "password=adminuser123" \ - -d "scope=openid profile email" \ - -o /tmp/auth_test_response.json) - - if [[ "${validation_result: -3}" == "200" ]]; then - echo -e "${GREEN}[OK] Authentication validation successful${NC}" - - # Extract and decode JWT token to check for roles - local access_token=$(cat /tmp/auth_test_response.json | jq -r '.access_token // empty') - if [[ -n "${access_token}" ]]; then - # Decode JWT payload (second part) and check for roles - local payload=$(echo "${access_token}" | cut -d'.' -f2) - # Add padding if needed for base64 decode - while [[ $((${#payload} % 4)) -ne 0 ]]; do - payload="${payload}=" - done - - local decoded=$(echo "${payload}" | base64 -d 2>/dev/null || echo "{}") - local roles=$(echo "${decoded}" | jq -r '.roles // empty' 2>/dev/null || echo "") - - if [[ -n "${roles}" && "${roles}" != "null" ]]; then - echo -e "${GREEN}[OK] JWT token includes roles: ${roles}${NC}" - else - echo -e "${YELLOW}โš ๏ธ JWT token does not include 'roles' claim${NC}" - echo -e "${YELLOW}Decoded payload sample:${NC}" - echo "${decoded}" | jq '.' 2>/dev/null || echo "${decoded}" - fi - fi - else - echo -e "${RED}[FAIL] Authentication validation failed with HTTP ${validation_result: -3}${NC}" - echo -e "${YELLOW}Response body:${NC}" - cat /tmp/auth_test_response.json 2>/dev/null || echo "No response body" - echo -e "${YELLOW}This may indicate a setup issue that needs to be resolved${NC}" - fi - rm -f /tmp/auth_test_response.json - - echo -e "${GREEN}[OK] Keycloak test realm '${REALM_NAME}' configured${NC}" -} - -setup_iam_config() { - echo -e "${BLUE}๐Ÿ”ง Setting up IAM configuration for detected environment${NC}" - - # Change to script directory to ensure config files are found - local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - cd "$script_dir" - - # Choose the appropriate config based on detected port - local config_source - if [[ "${KEYCLOAK_PORT}" == "8080" ]]; then - config_source="iam_config.github.json" - echo " Using GitHub Actions configuration (port 8080)" - else - config_source="iam_config.local.json" - echo " Using local development configuration (port ${KEYCLOAK_PORT})" - fi - - # Verify source config exists - if [[ ! -f "$config_source" ]]; then - echo -e "${RED}[FAIL] Config file $config_source not found in $script_dir${NC}" - exit 1 - fi - - # Copy the appropriate config - cp "$config_source" "iam_config.json" - - local detected_issuer=$(cat iam_config.json | jq -r '.providers[] | select(.name=="keycloak") | .config.issuer') - echo -e "${GREEN}[OK] IAM configuration set successfully${NC}" - echo " - Using config: $config_source" - echo " - Keycloak issuer: $detected_issuer" -} - -main "$@" diff --git a/test/s3/iam/setup_keycloak_docker.sh b/test/s3/iam/setup_keycloak_docker.sh deleted file mode 100755 index 6dce68abf..000000000 --- a/test/s3/iam/setup_keycloak_docker.sh +++ /dev/null @@ -1,419 +0,0 @@ -#!/bin/bash -set -e - -# Keycloak configuration for Docker environment -KEYCLOAK_URL="http://keycloak:8080" -KEYCLOAK_ADMIN_USER="admin" -KEYCLOAK_ADMIN_PASSWORD="admin" -REALM_NAME="seaweedfs-test" -CLIENT_ID="seaweedfs-s3" -CLIENT_SECRET="seaweedfs-s3-secret" - -echo "๐Ÿ”ง Setting up Keycloak realm and users for SeaweedFS S3 IAM testing..." -echo "Keycloak URL: $KEYCLOAK_URL" - -# Wait for Keycloak to be ready -echo "โณ Waiting for Keycloak to be ready..." -timeout 120 bash -c ' - until curl -f "$0/health/ready" > /dev/null 2>&1; do - echo "Waiting for Keycloak..." - sleep 5 - done - echo "[OK] Keycloak health check passed" -' "$KEYCLOAK_URL" - -# Download kcadm.sh if not available -if ! command -v kcadm.sh &> /dev/null; then - echo "๐Ÿ“ฅ Downloading Keycloak admin CLI..." - wget -q https://github.com/keycloak/keycloak/releases/download/26.0.7/keycloak-26.0.7.tar.gz - tar -xzf keycloak-26.0.7.tar.gz - export PATH="$PWD/keycloak-26.0.7/bin:$PATH" -fi - -# Wait a bit more for admin user initialization -echo "โณ Waiting for admin user to be fully initialized..." -sleep 10 - -# Function to execute kcadm commands with retry and multiple password attempts -kcadm() { - local max_retries=3 - local retry_count=0 - local passwords=("admin" "admin123" "password") - - while [ $retry_count -lt $max_retries ]; do - for password in "${passwords[@]}"; do - if kcadm.sh "$@" --server "$KEYCLOAK_URL" --realm master --user "$KEYCLOAK_ADMIN_USER" --password "$password" 2>/dev/null; then - return 0 - fi - done - retry_count=$((retry_count + 1)) - echo "๐Ÿ”„ Retry $retry_count of $max_retries..." - sleep 5 - done - - echo "[FAIL] Failed to execute kcadm command after $max_retries retries" - return 1 -} - -# Create realm -echo "๐Ÿ“ Creating realm '$REALM_NAME'..." -kcadm create realms -s realm="$REALM_NAME" -s enabled=true || echo "Realm may already exist" -echo "[OK] Realm created" - -# Create OIDC client -echo "๐Ÿ“ Creating client '$CLIENT_ID'..." -CLIENT_UUID=$(kcadm create clients -r "$REALM_NAME" \ - -s clientId="$CLIENT_ID" \ - -s secret="$CLIENT_SECRET" \ - -s enabled=true \ - -s serviceAccountsEnabled=true \ - -s standardFlowEnabled=true \ - -s directAccessGrantsEnabled=true \ - -s 'redirectUris=["*"]' \ - -s 'webOrigins=["*"]' \ - -i 2>/dev/null || echo "existing-client") - -if [ "$CLIENT_UUID" != "existing-client" ]; then - echo "[OK] Client created with ID: $CLIENT_UUID" -else - echo "[OK] Using existing client" - CLIENT_UUID=$(kcadm get clients -r "$REALM_NAME" -q clientId="$CLIENT_ID" --fields id --format csv --noquotes | tail -n +2) -fi - -# Configure protocol mapper for roles -echo "๐Ÿ”ง Configuring role mapper for client '$CLIENT_ID'..." -MAPPER_CONFIG='{ - "protocol": "openid-connect", - "protocolMapper": "oidc-usermodel-realm-role-mapper", - "name": "realm-roles", - "config": { - "claim.name": "roles", - "jsonType.label": "String", - "multivalued": "true", - "usermodel.realmRoleMapping.rolePrefix": "" - } -}' - -kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$MAPPER_CONFIG" 2>/dev/null || echo "[OK] Role mapper already exists" -echo "[OK] Realm roles mapper configured" - -# Configure audience mapper to ensure JWT tokens have correct audience claim -echo "๐Ÿ”ง Configuring audience mapper for client '$CLIENT_ID'..." -AUDIENCE_MAPPER_CONFIG='{ - "protocol": "openid-connect", - "protocolMapper": "oidc-audience-mapper", - "name": "audience-mapper", - "config": { - "included.client.audience": "'$CLIENT_ID'", - "id.token.claim": "false", - "access.token.claim": "true" - } -}' - -kcadm create clients/"$CLIENT_UUID"/protocol-mappers/models -r "$REALM_NAME" -b "$AUDIENCE_MAPPER_CONFIG" 2>/dev/null || echo "[OK] Audience mapper already exists" -echo "[OK] Audience mapper configured" - -# Create realm roles -echo "๐Ÿ“ Creating realm roles..." -for role in "s3-admin" "s3-read-only" "s3-write-only" "s3-read-write"; do - kcadm create roles -r "$REALM_NAME" -s name="$role" 2>/dev/null || echo "Role $role may already exist" -done - -# Create users with roles -declare -A USERS=( - ["admin-user"]="s3-admin" - ["read-user"]="s3-read-only" - ["write-user"]="s3-read-write" - ["write-only-user"]="s3-write-only" -) - -for username in "${!USERS[@]}"; do - role="${USERS[$username]}" - password="${username//[^a-zA-Z]/}123" # e.g., "admin-user" -> "adminuser123" - - echo "๐Ÿ“ Creating user '$username'..." - kcadm create users -r "$REALM_NAME" \ - -s username="$username" \ - -s enabled=true \ - -s firstName="Test" \ - -s lastName="User" \ - -s email="$username@test.com" 2>/dev/null || echo "User $username may already exist" - - echo "๐Ÿ”‘ Setting password for '$username'..." - kcadm set-password -r "$REALM_NAME" --username "$username" --new-password "$password" - - echo "โž• Assigning role '$role' to '$username'..." - kcadm add-roles -r "$REALM_NAME" --uusername "$username" --rolename "$role" -done - -# Create IAM configuration for Docker environment -echo "๐Ÿ”ง Setting up IAM configuration for Docker environment..." -cat > iam_config.json << 'EOF' -{ - "sts": { - "tokenDuration": "1h", - "maxSessionLength": "12h", - "issuer": "seaweedfs-sts", - "signingKey": "dGVzdC1zaWduaW5nLWtleS0zMi1jaGFyYWN0ZXJzLWxvbmc=" - }, - "providers": [ - { - "name": "keycloak", - "type": "oidc", - "enabled": true, - "config": { - "issuer": "http://keycloak:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "clientSecret": "seaweedfs-s3-secret", - "jwksUri": "http://keycloak:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - "userInfoUri": "http://keycloak:8080/realms/seaweedfs-test/protocol/openid-connect/userinfo", - "scopes": ["openid", "profile", "email"], - "claimsMapping": { - "username": "preferred_username", - "email": "email", - "name": "name" - }, - "roleMapping": { - "rules": [ - { - "claim": "roles", - "value": "s3-admin", - "role": "arn:seaweed:iam::role/KeycloakAdminRole" - }, - { - "claim": "roles", - "value": "s3-read-only", - "role": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - }, - { - "claim": "roles", - "value": "s3-write-only", - "role": "arn:seaweed:iam::role/KeycloakWriteOnlyRole" - }, - { - "claim": "roles", - "value": "s3-read-write", - "role": "arn:seaweed:iam::role/KeycloakReadWriteRole" - } - ], - "defaultRole": "arn:seaweed:iam::role/KeycloakReadOnlyRole" - } - } - } - ], - "policy": { - "defaultEffect": "Deny" - }, - "roles": [ - { - "roleName": "KeycloakAdminRole", - "roleArn": "arn:seaweed:iam::role/KeycloakAdminRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Admin role for Keycloak users" - }, - { - "roleName": "KeycloakReadOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only role for Keycloak users" - }, - { - "roleName": "KeycloakWriteOnlyRole", - "roleArn": "arn:seaweed:iam::role/KeycloakWriteOnlyRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only role for Keycloak users" - }, - { - "roleName": "KeycloakReadWriteRole", - "roleArn": "arn:seaweed:iam::role/KeycloakReadWriteRole", - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "keycloak" - }, - "Action": ["sts:AssumeRoleWithWebIdentity"] - } - ] - }, - "attachedPolicies": ["S3ReadWritePolicy"], - "description": "Read-write role for Keycloak users" - } - ], - "policies": [ - { - "name": "S3AdminPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3WriteOnlyPolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Deny", - "Action": [ - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - }, - { - "name": "S3ReadWritePolicy", - "document": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - }, - { - "Effect": "Allow", - "Action": ["sts:ValidateSession"], - "Resource": ["*"] - } - ] - } - } - ] -} -EOF - -# Validate setup by testing authentication -echo "๐Ÿ” Validating setup by testing admin-user authentication and role mapping..." -KEYCLOAK_TOKEN_URL="http://keycloak:8080/realms/$REALM_NAME/protocol/openid-connect/token" - -# Get access token for admin-user -ACCESS_TOKEN=$(curl -s -X POST "$KEYCLOAK_TOKEN_URL" \ - -H "Content-Type: application/x-www-form-urlencoded" \ - -d "grant_type=password" \ - -d "client_id=$CLIENT_ID" \ - -d "client_secret=$CLIENT_SECRET" \ - -d "username=admin-user" \ - -d "password=adminuser123" \ - -d "scope=openid profile email" | jq -r '.access_token') - -if [ "$ACCESS_TOKEN" = "null" ] || [ -z "$ACCESS_TOKEN" ]; then - echo "[FAIL] Failed to obtain access token" - exit 1 -fi - -echo "[OK] Authentication validation successful" - -# Decode and check JWT claims -PAYLOAD=$(echo "$ACCESS_TOKEN" | cut -d'.' -f2) -# Add padding for base64 decode -while [ $((${#PAYLOAD} % 4)) -ne 0 ]; do - PAYLOAD="${PAYLOAD}=" -done - -CLAIMS=$(echo "$PAYLOAD" | base64 -d 2>/dev/null | jq .) -ROLES=$(echo "$CLAIMS" | jq -r '.roles[]?') - -if [ -n "$ROLES" ]; then - echo "[OK] JWT token includes roles: [$(echo "$ROLES" | tr '\n' ',' | sed 's/,$//' | sed 's/,/, /g')]" -else - echo "โš ๏ธ No roles found in JWT token" -fi - -echo "[OK] Keycloak test realm '$REALM_NAME' configured for Docker environment" -echo "๐Ÿณ Setup complete! You can now run: docker-compose up -d" diff --git a/test/s3/iam/test_config.json b/test/s3/iam/test_config.json deleted file mode 100644 index d2f1fb09e..000000000 --- a/test/s3/iam/test_config.json +++ /dev/null @@ -1,321 +0,0 @@ -{ - "identities": [ - { - "name": "testuser", - "credentials": [ - { - "accessKey": "test-access-key", - "secretKey": "test-secret-key" - } - ], - "actions": ["Admin"] - }, - { - "name": "readonlyuser", - "credentials": [ - { - "accessKey": "readonly-access-key", - "secretKey": "readonly-secret-key" - } - ], - "actions": ["Read"] - }, - { - "name": "writeonlyuser", - "credentials": [ - { - "accessKey": "writeonly-access-key", - "secretKey": "writeonly-secret-key" - } - ], - "actions": ["Write"] - } - ], - "iam": { - "enabled": true, - "sts": { - "tokenDuration": "15m", - "issuer": "seaweedfs-sts", - "signingKey": "test-sts-signing-key-for-integration-tests" - }, - "policy": { - "defaultEffect": "Deny" - }, - "providers": { - "oidc": { - "test-oidc": { - "issuer": "http://localhost:8080/.well-known/openid_configuration", - "clientId": "test-client-id", - "jwksUri": "http://localhost:8080/jwks", - "userInfoUri": "http://localhost:8080/userinfo", - "roleMapping": { - "rules": [ - { - "claim": "groups", - "claimValue": "admins", - "roleName": "S3AdminRole" - }, - { - "claim": "groups", - "claimValue": "users", - "roleName": "S3ReadOnlyRole" - }, - { - "claim": "groups", - "claimValue": "writers", - "roleName": "S3WriteOnlyRole" - } - ] - }, - "claimsMapping": { - "email": "email", - "displayName": "name", - "groups": "groups" - } - } - }, - "ldap": { - "test-ldap": { - "server": "ldap://localhost:389", - "baseDN": "dc=example,dc=com", - "bindDN": "cn=admin,dc=example,dc=com", - "bindPassword": "admin-password", - "userFilter": "(uid=%s)", - "groupFilter": "(memberUid=%s)", - "attributes": { - "email": "mail", - "displayName": "cn", - "groups": "memberOf" - }, - "roleMapping": { - "rules": [ - { - "claim": "groups", - "claimValue": "cn=admins,ou=groups,dc=example,dc=com", - "roleName": "S3AdminRole" - }, - { - "claim": "groups", - "claimValue": "cn=users,ou=groups,dc=example,dc=com", - "roleName": "S3ReadOnlyRole" - } - ] - } - } - } - }, - "policyStore": {} - }, - "roles": { - "S3AdminRole": { - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": ["test-oidc", "test-ldap"] - }, - "Action": "sts:AssumeRoleWithWebIdentity" - } - ] - }, - "attachedPolicies": ["S3AdminPolicy"], - "description": "Full administrative access to S3 resources" - }, - "S3ReadOnlyRole": { - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": ["test-oidc", "test-ldap"] - }, - "Action": "sts:AssumeRoleWithWebIdentity" - } - ] - }, - "attachedPolicies": ["S3ReadOnlyPolicy"], - "description": "Read-only access to S3 resources" - }, - "S3WriteOnlyRole": { - "trustPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": ["test-oidc", "test-ldap"] - }, - "Action": "sts:AssumeRoleWithWebIdentity" - } - ] - }, - "attachedPolicies": ["S3WriteOnlyPolicy"], - "description": "Write-only access to S3 resources" - } - }, - "policies": { - "S3AdminPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - }, - "S3ReadOnlyPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:GetObjectVersion", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - "s3:GetBucketVersioning" - ], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ] - } - ] - }, - "S3WriteOnlyPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:InitiateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploadParts" - ], - "Resource": [ - "arn:seaweed:s3:::*/*" - ] - } - ] - }, - "S3BucketManagementPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:CreateBucket", - "s3:DeleteBucket", - "s3:GetBucketPolicy", - "s3:PutBucketPolicy", - "s3:DeleteBucketPolicy", - "s3:GetBucketVersioning", - "s3:PutBucketVersioning" - ], - "Resource": [ - "arn:seaweed:s3:::*" - ] - } - ] - }, - "S3IPRestrictedPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:*"], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ], - "Condition": { - "IpAddress": { - "aws:SourceIp": ["192.168.1.0/24", "10.0.0.0/8"] - } - } - } - ] - }, - "S3TimeBasedPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:GetObject", "s3:ListBucket"], - "Resource": [ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*" - ], - "Condition": { - "DateGreaterThan": { - "aws:CurrentTime": "2023-01-01T00:00:00Z" - }, - "DateLessThan": { - "aws:CurrentTime": "2025-12-31T23:59:59Z" - } - } - } - ] - } - }, - "bucketPolicyExamples": { - "PublicReadPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "PublicReadGetObject", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:seaweed:s3:::example-bucket/*" - } - ] - }, - "DenyDeletePolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "DenyDeleteOperations", - "Effect": "Deny", - "Principal": "*", - "Action": ["s3:DeleteObject", "s3:DeleteBucket"], - "Resource": [ - "arn:seaweed:s3:::example-bucket", - "arn:seaweed:s3:::example-bucket/*" - ] - } - ] - }, - "IPRestrictedAccessPolicy": { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "IPRestrictedAccess", - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": "arn:seaweed:s3:::example-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": ["203.0.113.0/24"] - } - } - } - ] - } - } -} diff --git a/test/s3/multipart/aws_upload.go b/test/s3/multipart/aws_upload.go index fbb1cb879..0553bd403 100644 --- a/test/s3/multipart/aws_upload.go +++ b/test/s3/multipart/aws_upload.go @@ -108,6 +108,7 @@ func main() { fmt.Printf("part %d: %v\n", i, part) } + completeResponse, err := completeMultipartUpload(svc, resp, completedParts) if err != nil { fmt.Println(err.Error()) diff --git a/test/s3/retention/Makefile b/test/s3/retention/Makefile deleted file mode 100644 index 092d2caac..000000000 --- a/test/s3/retention/Makefile +++ /dev/null @@ -1,360 +0,0 @@ -# S3 API Retention Test Makefile -# This Makefile provides comprehensive targets for running S3 retention tests - -.PHONY: help build-weed setup-server start-server stop-server test-retention test-retention-quick test-retention-comprehensive test-retention-worm test-all clean logs check-deps - -# Configuration -WEED_BINARY := ../../../weed/weed_binary -S3_PORT := 8333 -MASTER_PORT := 9333 -VOLUME_PORT := 8080 -FILER_PORT := 8888 -TEST_TIMEOUT := 15m -TEST_PATTERN := TestRetention - -# Default target -help: - @echo "S3 API Retention Test Makefile" - @echo "" - @echo "Available targets:" - @echo " help - Show this help message" - @echo " build-weed - Build the SeaweedFS binary" - @echo " check-deps - Check dependencies and build binary if needed" - @echo " start-server - Start SeaweedFS server for testing" - @echo " start-server-simple - Start server without process cleanup (for CI)" - @echo " stop-server - Stop SeaweedFS server" - @echo " test-retention - Run all retention tests" - @echo " test-retention-quick - Run core retention tests only" - @echo " test-retention-simple - Run tests without server management" - @echo " test-retention-comprehensive - Run comprehensive retention tests" - @echo " test-retention-worm - Run WORM integration tests" - @echo " test-all - Run all S3 API retention tests" - @echo " test-with-server - Start server, run tests, stop server" - @echo " logs - Show server logs" - @echo " clean - Clean up test artifacts and stop server" - @echo " health-check - Check if server is accessible" - @echo "" - @echo "Configuration:" - @echo " S3_PORT=${S3_PORT}" - @echo " TEST_TIMEOUT=${TEST_TIMEOUT}" - -# Build the SeaweedFS binary -build-weed: - @echo "Building SeaweedFS binary..." - @cd ../../../weed && go build -o weed_binary . - @chmod +x $(WEED_BINARY) - @echo "โœ… SeaweedFS binary built at $(WEED_BINARY)" - -check-deps: build-weed - @echo "Checking dependencies..." - @echo "๐Ÿ” DEBUG: Checking Go installation..." - @command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1) - @echo "๐Ÿ” DEBUG: Go version: $$(go version)" - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)..." - @test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1) - @echo "๐Ÿ” DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')" - @echo "๐Ÿ” DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')" - @echo "๐Ÿ” DEBUG: Checking Go module dependencies..." - @go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1) - @go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1) - @echo "โœ… All dependencies are available" - -# Start SeaweedFS server for testing -start-server: check-deps - @echo "Starting SeaweedFS server..." - @echo "๐Ÿ” DEBUG: Current working directory: $$(pwd)" - @echo "๐Ÿ” DEBUG: Checking for existing weed processes..." - @ps aux | grep weed | grep -v grep || echo "No existing weed processes found" - @echo "๐Ÿ” DEBUG: Cleaning up any existing PID file..." - @rm -f weed-server.pid - @echo "๐Ÿ” DEBUG: Checking for port conflicts..." - @if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \ - echo "โš ๏ธ Port $(S3_PORT) is already in use, trying to find the process..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \ - else \ - echo "โœ… Port $(S3_PORT) is available"; \ - fi - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)" - @ls -la $(WEED_BINARY) || (echo "โŒ Binary not found!" && exit 1) - @echo "๐Ÿ” DEBUG: Checking config file at ../../../docker/compose/s3.json" - @ls -la ../../../docker/compose/s3.json || echo "โš ๏ธ Config file not found, continuing without it" - @echo "๐Ÿ” DEBUG: Creating volume directory..." - @mkdir -p ./test-volume-data - @echo "๐Ÿ” DEBUG: Launching SeaweedFS server in background..." - @echo "๐Ÿ” DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324" - @$(WEED_BINARY) server \ - -debug \ - -s3 \ - -s3.port=$(S3_PORT) \ - -s3.allowEmptyFolder=false \ - -s3.allowDeleteBucketNotEmpty=true \ - -s3.config=../../../docker/compose/s3.json \ - -filer \ - -filer.maxMB=64 \ - -master.volumeSizeLimitMB=50 \ - -volume.max=100 \ - -dir=./test-volume-data \ - -volume.preStopSeconds=1 \ - -metricsPort=9324 \ - > weed-test.log 2>&1 & echo $$! > weed-server.pid - @echo "๐Ÿ” DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')" - @echo "๐Ÿ” DEBUG: Checking if PID is still running..." - @sleep 2 - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - ps -p $$SERVER_PID || echo "โš ๏ธ Server PID $$SERVER_PID not found after 2 seconds"; \ - else \ - echo "โš ๏ธ PID file not found"; \ - fi - @echo "๐Ÿ” DEBUG: Waiting for server to start (up to 90 seconds)..." - @for i in $$(seq 1 90); do \ - echo "๐Ÿ” DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \ - if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ - echo "โœ… SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \ - exit 0; \ - fi; \ - if [ $$i -eq 5 ]; then \ - echo "๐Ÿ” DEBUG: After 5 seconds, checking process and logs..."; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - if [ -f weed-test.log ]; then \ - echo "=== First server logs ==="; \ - head -20 weed-test.log; \ - fi; \ - fi; \ - if [ $$i -eq 15 ]; then \ - echo "๐Ÿ” DEBUG: After 15 seconds, checking port bindings..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \ - netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \ - netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \ - fi; \ - if [ $$i -eq 30 ]; then \ - echo "โš ๏ธ Server taking longer than expected (30s), checking logs..."; \ - if [ -f weed-test.log ]; then \ - echo "=== Recent server logs ==="; \ - tail -20 weed-test.log; \ - fi; \ - fi; \ - sleep 1; \ - done; \ - echo "โŒ Server failed to start within 90 seconds"; \ - echo "๐Ÿ” DEBUG: Final process check:"; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - echo "๐Ÿ” DEBUG: Final port check:"; \ - netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \ - echo "=== Full server logs ==="; \ - if [ -f weed-test.log ]; then \ - cat weed-test.log; \ - else \ - echo "No log file found"; \ - fi; \ - exit 1 - -# Stop SeaweedFS server -stop-server: - @echo "Stopping SeaweedFS server..." - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - echo "Killing server PID $$SERVER_PID"; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - kill -TERM $$SERVER_PID 2>/dev/null || true; \ - sleep 2; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - echo "Process still running, sending KILL signal..."; \ - kill -KILL $$SERVER_PID 2>/dev/null || true; \ - sleep 1; \ - fi; \ - else \ - echo "Process $$SERVER_PID not found (already stopped)"; \ - fi; \ - rm -f weed-server.pid; \ - else \ - echo "No PID file found, checking for running processes..."; \ - echo "โš ๏ธ Skipping automatic process cleanup to avoid CI issues"; \ - echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \ - fi - @echo "โœ… SeaweedFS server stopped" - -# Show server logs -logs: - @if test -f weed-test.log; then \ - echo "=== SeaweedFS Server Logs ==="; \ - tail -f weed-test.log; \ - else \ - echo "No log file found. Server may not be running."; \ - fi - -# Core retention tests (basic functionality) -test-retention-quick: check-deps - @echo "Running core S3 retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow|TestRetentionModeCompliance|TestLegalHoldWorkflow" . - @echo "โœ… Core retention tests completed" - -# All retention tests (comprehensive) -test-retention: check-deps - @echo "Running all S3 retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . - @echo "โœ… All retention tests completed" - -# WORM integration tests -test-retention-worm: check-deps - @echo "Running WORM integration tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORM|TestRetentionExtendedAttributes|TestRetentionConcurrentOperations" . - @echo "โœ… WORM integration tests completed" - -# Comprehensive retention tests (all features) -test-retention-comprehensive: check-deps - @echo "Running comprehensive S3 retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetention|TestObjectLock|TestLegalHold|TestWORM" . - @echo "โœ… Comprehensive retention tests completed" - -# All tests without server management -test-retention-simple: check-deps - @echo "Running retention tests (assuming server is already running)..." - @go test -v -timeout=$(TEST_TIMEOUT) . - @echo "โœ… All retention tests completed" - -# Start server, run tests, stop server -test-with-server: start-server - @echo "Running retention tests with managed server..." - @sleep 5 # Give server time to fully start - @make test-retention-comprehensive || (echo "Tests failed, stopping server..." && make stop-server && exit 1) - @make stop-server - @echo "โœ… All tests completed with managed server" - -# Health check -health-check: - @echo "Checking server health..." - @if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ - echo "โœ… Server is accessible on port $(S3_PORT)"; \ - else \ - echo "โŒ Server is not accessible on port $(S3_PORT)"; \ - exit 1; \ - fi - -# Clean up -clean: - @echo "Cleaning up test artifacts..." - @make stop-server - @rm -f weed-test.log - @rm -f weed-server.pid - @rm -rf ./test-volume-data - @echo "โœ… Cleanup completed" - -# Individual test targets for specific functionality -test-basic-retention: - @echo "Running basic retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" . - -test-compliance-retention: - @echo "Running compliance retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionModeCompliance" . - -test-legal-hold: - @echo "Running legal hold tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestLegalHoldWorkflow" . - -test-object-lock-config: - @echo "Running object lock configuration tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestObjectLockConfiguration" . - -test-retention-versions: - @echo "Running retention with versions tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithVersions" . - -test-retention-combination: - @echo "Running retention and legal hold combination tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionAndLegalHoldCombination" . - -test-expired-retention: - @echo "Running expired retention tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestExpiredRetention" . - -test-retention-errors: - @echo "Running retention error case tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionErrorCases" . - -# WORM-specific test targets -test-worm-integration: - @echo "Running WORM integration tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMRetentionIntegration" . - -test-worm-legacy: - @echo "Running WORM legacy compatibility tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestWORMLegacyCompatibility" . - -test-retention-overwrite: - @echo "Running retention overwrite protection tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionOverwriteProtection" . - -test-retention-bulk: - @echo "Running retention bulk operations tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBulkOperations" . - -test-retention-multipart: - @echo "Running retention multipart upload tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionWithMultipartUpload" . - -test-retention-extended-attrs: - @echo "Running retention extended attributes tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionExtendedAttributes" . - -test-retention-defaults: - @echo "Running retention bucket defaults tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionBucketDefaults" . - -test-retention-concurrent: - @echo "Running retention concurrent operations tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestRetentionConcurrentOperations" . - -# Development targets -dev-start: start-server - @echo "Development server started. Access S3 API at http://localhost:$(S3_PORT)" - @echo "To stop: make stop-server" - -dev-test: check-deps - @echo "Running tests in development mode..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestBasicRetentionWorkflow" . - -# CI targets -ci-test: check-deps - @echo "Running tests in CI mode..." - @go test -v -timeout=$(TEST_TIMEOUT) -race . - -# All targets -test-all: test-retention test-retention-worm - @echo "โœ… All S3 retention tests completed" - -# Benchmark targets -benchmark-retention: - @echo "Running retention performance benchmarks..." - @go test -v -timeout=$(TEST_TIMEOUT) -bench=. -benchmem . - -# Coverage targets -coverage: - @echo "Running tests with coverage..." - @go test -v -timeout=$(TEST_TIMEOUT) -coverprofile=coverage.out . - @go tool cover -html=coverage.out -o coverage.html - @echo "Coverage report generated: coverage.html" - -# Format and lint -fmt: - @echo "Formatting Go code..." - @go fmt . - -lint: - @echo "Running linter..." - @golint . || echo "golint not available, skipping..." - -# Install dependencies for development -install-deps: - @echo "Installing Go dependencies..." - @go mod tidy - @go mod download - -# Show current configuration -show-config: - @echo "Current configuration:" - @echo " WEED_BINARY: $(WEED_BINARY)" - @echo " S3_PORT: $(S3_PORT)" - @echo " TEST_TIMEOUT: $(TEST_TIMEOUT)" - @echo " TEST_PATTERN: $(TEST_PATTERN)" \ No newline at end of file diff --git a/test/s3/retention/README.md b/test/s3/retention/README.md deleted file mode 100644 index 7d92646e6..000000000 --- a/test/s3/retention/README.md +++ /dev/null @@ -1,264 +0,0 @@ -# SeaweedFS S3 Object Retention Tests - -This directory contains comprehensive tests for SeaweedFS S3 Object Retention functionality, including Object Lock, Legal Hold, and WORM (Write Once Read Many) capabilities. - -## Overview - -The test suite validates AWS S3-compatible object retention features including: - -- **Object Retention**: GOVERNANCE and COMPLIANCE modes with retain-until-date -- **Legal Hold**: Independent protection that can be applied/removed -- **Object Lock Configuration**: Bucket-level default retention policies -- **WORM Integration**: Compatibility with legacy WORM functionality -- **Version-specific Retention**: Different retention policies per object version -- **Enforcement**: Protection against deletion and overwriting - -## Test Files - -- `s3_retention_test.go` - Core retention functionality tests -- `s3_worm_integration_test.go` - WORM integration and advanced scenarios -- `test_config.json` - Test configuration (endpoints, credentials) -- `Makefile` - Comprehensive test automation -- `go.mod` - Go module dependencies - -## Prerequisites - -- Go 1.21 or later -- SeaweedFS binary built (`make build-weed`) -- AWS SDK Go v2 -- Testify testing framework - -## Quick Start - -### 1. Build and Start Server -```bash -# Build SeaweedFS and start test server -make start-server -``` - -### 2. Run Tests -```bash -# Run core retention tests -make test-retention-quick - -# Run all retention tests -make test-retention - -# Run WORM integration tests -make test-retention-worm - -# Run all tests with managed server -make test-with-server -``` - -### 3. Cleanup -```bash -make clean -``` - -## Test Categories - -### Core Retention Tests -- `TestBasicRetentionWorkflow` - Basic GOVERNANCE mode retention -- `TestRetentionModeCompliance` - COMPLIANCE mode (immutable) -- `TestLegalHoldWorkflow` - Legal hold on/off functionality -- `TestObjectLockConfiguration` - Bucket object lock settings - -### Advanced Tests -- `TestRetentionWithVersions` - Version-specific retention policies -- `TestRetentionAndLegalHoldCombination` - Multiple protection types -- `TestExpiredRetention` - Post-expiration behavior -- `TestRetentionErrorCases` - Error handling and edge cases - -### WORM Integration Tests -- `TestWORMRetentionIntegration` - New retention + legacy WORM -- `TestWORMLegacyCompatibility` - Backward compatibility -- `TestRetentionOverwriteProtection` - Prevent overwrites -- `TestRetentionBulkOperations` - Bulk delete with retention -- `TestRetentionWithMultipartUpload` - Multipart upload retention -- `TestRetentionExtendedAttributes` - Extended attribute storage -- `TestRetentionBucketDefaults` - Default retention application -- `TestRetentionConcurrentOperations` - Concurrent operation safety - -## Individual Test Targets - -Run specific test categories: - -```bash -# Basic functionality -make test-basic-retention -make test-compliance-retention -make test-legal-hold - -# Advanced features -make test-retention-versions -make test-retention-combination -make test-expired-retention - -# WORM integration -make test-worm-integration -make test-worm-legacy -make test-retention-bulk -``` - -## Configuration - -### Server Configuration -The tests use these default settings: -- S3 Port: 8333 -- Test timeout: 15 minutes -- Volume directory: `./test-volume-data` - -### Test Configuration (`test_config.json`) -```json -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-retention-", - "use_ssl": false, - "skip_verify_ssl": true -} -``` - -## Expected Behavior - -### GOVERNANCE Mode -- Objects protected until retain-until-date -- Can be bypassed with `x-amz-bypass-governance-retention` header -- Supports time extension (not reduction) - -### COMPLIANCE Mode -- Objects immutably protected until retain-until-date -- Cannot be bypassed or shortened -- Strictest protection level - -### Legal Hold -- Independent ON/OFF protection -- Can coexist with retention policies -- Must be explicitly removed to allow deletion - -### Version Support -- Each object version can have individual retention -- Applies to both versioned and non-versioned buckets -- Version-specific retention retrieval - -## Development - -### Running in Development Mode -```bash -# Start server for development -make dev-start - -# Run quick test -make dev-test -``` - -### Code Quality -```bash -# Format code -make fmt - -# Run linter -make lint - -# Generate coverage report -make coverage -``` - -### Performance Testing -```bash -# Run benchmarks -make benchmark-retention -``` - -## Troubleshooting - -### Server Won't Start -```bash -# Check if port is in use -netstat -tlnp | grep 8333 - -# View server logs -make logs - -# Force cleanup -make clean -``` - -### Test Failures -```bash -# Run with verbose output -go test -v -timeout=15m . - -# Run specific test -go test -v -run TestBasicRetentionWorkflow . - -# Check server health -make health-check -``` - -### Dependencies -```bash -# Install/update dependencies -make install-deps - -# Check dependency status -make check-deps -``` - -## Integration with SeaweedFS - -These tests validate the retention implementation in: -- `weed/s3api/s3api_object_retention.go` - Core retention logic -- `weed/s3api/s3api_object_handlers_retention.go` - HTTP handlers -- `weed/s3api/s3_constants/extend_key.go` - Extended attribute keys -- `weed/s3api/s3err/s3api_errors.go` - Error definitions -- `weed/s3api/s3api_object_handlers_delete.go` - Deletion enforcement -- `weed/s3api/s3api_object_handlers_put.go` - Upload enforcement - -## AWS CLI Compatibility - -The retention implementation supports standard AWS CLI commands: - -```bash -# Set object retention -aws s3api put-object-retention \ - --bucket mybucket \ - --key myobject \ - --retention Mode=GOVERNANCE,RetainUntilDate=2024-12-31T23:59:59Z - -# Get object retention -aws s3api get-object-retention \ - --bucket mybucket \ - --key myobject - -# Set legal hold -aws s3api put-object-legal-hold \ - --bucket mybucket \ - --key myobject \ - --legal-hold Status=ON - -# Configure bucket object lock -aws s3api put-object-lock-configuration \ - --bucket mybucket \ - --object-lock-configuration ObjectLockEnabled=Enabled,Rule='{DefaultRetention={Mode=GOVERNANCE,Days=30}}' -``` - -## Contributing - -When adding new retention tests: - -1. Follow existing test patterns -2. Use descriptive test names -3. Include both positive and negative test cases -4. Test error conditions -5. Update this README with new test descriptions -6. Add appropriate Makefile targets for new test categories - -## References - -- [AWS S3 Object Lock Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -- [AWS S3 API Reference - Object Retention](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html) -- [SeaweedFS S3 API Documentation](https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API) \ No newline at end of file diff --git a/test/s3/retention/object_lock_reproduce_test.go b/test/s3/retention/object_lock_reproduce_test.go deleted file mode 100644 index 0b59dd832..000000000 --- a/test/s3/retention/object_lock_reproduce_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package retention - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/stretchr/testify/require" -) - -// TestReproduceObjectLockIssue reproduces the Object Lock header processing issue step by step -func TestReproduceObjectLockIssue(t *testing.T) { - client := getS3Client(t) - bucketName := fmt.Sprintf("object-lock-test-%d", time.Now().UnixNano()) - - t.Logf("=== Reproducing Object Lock Header Processing Issue ===") - t.Logf("Bucket name: %s", bucketName) - - // Step 1: Create bucket with Object Lock enabled header - t.Logf("\n1. Creating bucket with ObjectLockEnabledForBucket=true") - t.Logf(" This should send x-amz-bucket-object-lock-enabled: true header") - - createResp, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), // This sets the x-amz-bucket-object-lock-enabled header - }) - - if err != nil { - t.Fatalf("Bucket creation failed: %v", err) - } - t.Logf("Bucket created successfully") - t.Logf(" Response: %+v", createResp) - - // Step 2: Check if Object Lock is actually enabled - t.Logf("\n2. Checking Object Lock configuration to verify it was enabled") - - objectLockResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - - if err != nil { - t.Logf("GetObjectLockConfiguration FAILED: %v", err) - t.Logf(" This demonstrates the issue with header processing!") - t.Logf(" S3 clients expect this call to succeed if Object Lock is supported") - t.Logf(" When this fails, clients conclude that Object Lock is not supported") - - // This failure demonstrates the bug - the bucket was created but Object Lock wasn't enabled - t.Logf("\nBUG CONFIRMED:") - t.Logf(" - Bucket creation with ObjectLockEnabledForBucket=true succeeded") - t.Logf(" - But GetObjectLockConfiguration fails") - t.Logf(" - This means the x-amz-bucket-object-lock-enabled header was ignored") - - } else { - t.Logf("GetObjectLockConfiguration succeeded!") - t.Logf(" Response: %+v", objectLockResp) - t.Logf(" Object Lock is properly enabled - this is the expected behavior") - } - - // Step 3: Check versioning status (required for Object Lock) - t.Logf("\n3. Checking bucket versioning status (required for Object Lock)") - - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - t.Logf(" Versioning status: %v", versioningResp.Status) - if versioningResp.Status != "Enabled" { - t.Logf(" Versioning should be automatically enabled when Object Lock is enabled") - } - - // Cleanup - t.Logf("\n4. Cleaning up test bucket") - _, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf(" Warning: Failed to delete bucket: %v", err) - } - - t.Logf("\n=== Issue Reproduction Complete ===") - t.Logf("Expected behavior after fix:") - t.Logf(" - CreateBucket with ObjectLockEnabledForBucket=true should enable Object Lock") - t.Logf(" - GetObjectLockConfiguration should return enabled configuration") - t.Logf(" - Versioning should be automatically enabled") -} - -// TestNormalBucketCreationStillWorks tests that normal bucket creation still works -func TestNormalBucketCreationStillWorks(t *testing.T) { - client := getS3Client(t) - bucketName := fmt.Sprintf("normal-test-%d", time.Now().UnixNano()) - - t.Logf("=== Testing Normal Bucket Creation ===") - - // Create bucket without Object Lock - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - t.Logf("Normal bucket creation works") - - // Object Lock should NOT be enabled - _, err = client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - require.Error(t, err, "GetObjectLockConfiguration should fail for bucket without Object Lock") - t.Logf("GetObjectLockConfiguration correctly fails for normal bucket") - - // Cleanup - client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: aws.String(bucketName)}) -} diff --git a/test/s3/retention/object_lock_validation_test.go b/test/s3/retention/object_lock_validation_test.go deleted file mode 100644 index 4293486e8..000000000 --- a/test/s3/retention/object_lock_validation_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package retention - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/require" -) - -// TestObjectLockValidation tests that S3 Object Lock functionality works end-to-end -// This test focuses on the complete Object Lock workflow that S3 clients expect -func TestObjectLockValidation(t *testing.T) { - client := getS3Client(t) - bucketName := fmt.Sprintf("object-lock-test-%d", time.Now().UnixNano()) - - t.Logf("=== Validating S3 Object Lock Functionality ===") - t.Logf("Bucket: %s", bucketName) - - // Step 1: Create bucket with Object Lock header - t.Log("\n1. Creating bucket with x-amz-bucket-object-lock-enabled: true") - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), // This sends x-amz-bucket-object-lock-enabled: true - }) - require.NoError(t, err, "Bucket creation should succeed") - defer client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{Bucket: aws.String(bucketName)}) - t.Log(" Bucket created successfully") - - // Step 2: Check if Object Lock is supported (standard S3 client behavior) - t.Log("\n2. Testing Object Lock support detection") - _, err = client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "GetObjectLockConfiguration should succeed for Object Lock enabled bucket") - t.Log(" GetObjectLockConfiguration succeeded - Object Lock is properly enabled") - - // Step 3: Verify versioning is enabled (required for Object Lock) - t.Log("\n3. Verifying versioning is automatically enabled") - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - require.Equal(t, types.BucketVersioningStatusEnabled, versioningResp.Status, "Versioning should be automatically enabled") - t.Log(" Versioning automatically enabled") - - // Step 4: Test actual Object Lock functionality - t.Log("\n4. Testing Object Lock retention functionality") - - // Create an object - key := "protected-object.dat" - content := "Important data that needs immutable protection" - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId, "Object should have a version ID") - t.Log(" Object created with versioning") - - // Apply Object Lock retention - retentionUntil := time.Now().Add(24 * time.Hour) - _, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeCompliance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err, "Setting Object Lock retention should succeed") - t.Log(" Object Lock retention applied successfully") - - // Verify retention allows simple DELETE (creates delete marker) but blocks version deletion - // AWS S3 behavior: Simple DELETE (without version ID) is ALWAYS allowed and creates delete marker - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker (AWS S3 behavior)") - t.Log(" Simple DELETE succeeded (creates delete marker - correct AWS behavior)") - - // Now verify that DELETE with version ID is properly blocked by retention - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.Error(t, err, "DELETE with version ID should be blocked by COMPLIANCE retention") - t.Log(" Object version is properly protected by retention policy") - - // Verify we can read the object version (should still work) - // Note: Need to specify version ID since latest version is now a delete marker - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.NoError(t, err, "Reading protected object version should still work") - defer getResp.Body.Close() - t.Log(" Protected object can still be read") - - t.Log("\nS3 OBJECT LOCK VALIDATION SUCCESSFUL!") - t.Log(" - Bucket creation with Object Lock header works") - t.Log(" - Object Lock support detection works (GetObjectLockConfiguration succeeds)") - t.Log(" - Versioning is automatically enabled") - t.Log(" - Object Lock retention functionality works") - t.Log(" - Objects are properly protected from deletion") - t.Log("") - t.Log("S3 clients will now recognize SeaweedFS as supporting Object Lock!") -} diff --git a/test/s3/retention/s3_bucket_object_lock_test.go b/test/s3/retention/s3_bucket_object_lock_test.go deleted file mode 100644 index 44100dce4..000000000 --- a/test/s3/retention/s3_bucket_object_lock_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package retention - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBucketCreationWithObjectLockEnabled tests creating a bucket with the -// x-amz-bucket-object-lock-enabled header, which is required for S3 Object Lock compatibility -func TestBucketCreationWithObjectLockEnabled(t *testing.T) { - // This test verifies that bucket creation with - // x-amz-bucket-object-lock-enabled header should automatically enable Object Lock - - client := getS3Client(t) - bucketName := getNewBucketName() - defer func() { - // Best effort cleanup - deleteBucket(t, client, bucketName) - }() - - // Test 1: Create bucket with Object Lock enabled header using custom HTTP client - t.Run("CreateBucketWithObjectLockHeader", func(t *testing.T) { - // Create bucket with x-amz-bucket-object-lock-enabled header - // This simulates what S3 clients do when testing Object Lock support - createResp, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), // This should set x-amz-bucket-object-lock-enabled header - }) - require.NoError(t, err) - require.NotNil(t, createResp) - - // Verify bucket was created - _, err = client.HeadBucket(context.TODO(), &s3.HeadBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - }) - - // Test 2: Verify that Object Lock is automatically enabled for the bucket - t.Run("VerifyObjectLockAutoEnabled", func(t *testing.T) { - // Try to get the Object Lock configuration - // If the header was processed correctly, this should return an enabled configuration - configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - - require.NoError(t, err, "GetObjectLockConfiguration should not fail if Object Lock is enabled") - require.NotNil(t, configResp.ObjectLockConfiguration, "ObjectLockConfiguration should not be nil") - assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled, "Object Lock should be enabled") - }) - - // Test 3: Verify versioning is automatically enabled (required for Object Lock) - t.Run("VerifyVersioningAutoEnabled", func(t *testing.T) { - // Object Lock requires versioning to be enabled - // When Object Lock is enabled via header, versioning should also be enabled automatically - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Versioning should be automatically enabled for Object Lock - assert.Equal(t, types.BucketVersioningStatusEnabled, versioningResp.Status, "Versioning should be automatically enabled for Object Lock") - }) -} - -// TestBucketCreationWithoutObjectLockHeader tests normal bucket creation -// to ensure we don't break existing functionality -func TestBucketCreationWithoutObjectLockHeader(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - defer deleteBucket(t, client, bucketName) - - // Create bucket without Object Lock header - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Verify bucket was created - _, err = client.HeadBucket(context.TODO(), &s3.HeadBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Object Lock should NOT be enabled - _, err = client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - // This should fail since Object Lock is not enabled - require.Error(t, err) - t.Logf("GetObjectLockConfiguration correctly failed for bucket without Object Lock: %v", err) - - // Versioning should not be enabled by default - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Should be either empty/unset or Suspended, but not Enabled - if versioningResp.Status != types.BucketVersioningStatusEnabled { - t.Logf("Versioning correctly not enabled: %v", versioningResp.Status) - } else { - t.Errorf("Versioning should not be enabled for bucket without Object Lock header") - } -} - -// TestS3ObjectLockWorkflow tests the complete Object Lock workflow that S3 clients would use -func TestS3ObjectLockWorkflow(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - defer deleteBucket(t, client, bucketName) - - // Step 1: Client creates bucket with Object Lock enabled - t.Run("ClientCreatesBucket", func(t *testing.T) { - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err) - }) - - // Step 2: Client checks if Object Lock is supported by getting the configuration - t.Run("ClientChecksObjectLockSupport", func(t *testing.T) { - configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - - require.NoError(t, err, "Object Lock configuration check should succeed") - - // S3 clients should see Object Lock is enabled - require.NotNil(t, configResp.ObjectLockConfiguration) - assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled) - t.Log("Object Lock configuration retrieved successfully - S3 clients would see this as supported") - }) - - // Step 3: Client would then configure retention policies and use Object Lock - t.Run("ClientConfiguresRetention", func(t *testing.T) { - // Verify versioning is automatically enabled (required for Object Lock) - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - require.Equal(t, types.BucketVersioningStatusEnabled, versioningResp.Status, "Versioning should be automatically enabled") - - // Create an object - key := "protected-backup-object" - content := "Backup data with Object Lock protection" - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - - // Set Object Lock retention (what backup clients do to protect data) - retentionUntil := time.Now().Add(24 * time.Hour) - _, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeCompliance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Verify object is protected - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.Error(t, err, "Object should be protected by retention policy") - - t.Log("Object Lock retention successfully applied - data is immutable") - }) -} diff --git a/test/s3/retention/s3_object_lock_headers_test.go b/test/s3/retention/s3_object_lock_headers_test.go deleted file mode 100644 index bf7283617..000000000 --- a/test/s3/retention/s3_object_lock_headers_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package retention - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestPutObjectWithLockHeaders tests that object lock headers in PUT requests -// are properly stored and returned in HEAD responses -func TestPutObjectWithLockHeaders(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket with object lock enabled and versioning - createBucketWithObjectLock(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "test-object-lock-headers" - content := "test content with object lock headers" - retainUntilDate := time.Now().Add(24 * time.Hour) - - // Test 1: PUT with COMPLIANCE mode and retention date - t.Run("PUT with COMPLIANCE mode", func(t *testing.T) { - testKey := key + "-compliance" - - // PUT object with lock headers - putResp := putObjectWithLockHeaders(t, client, bucketName, testKey, content, - "COMPLIANCE", retainUntilDate, "") - require.NotNil(t, putResp.VersionId) - - // HEAD object and verify lock headers are returned - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testKey), - }) - require.NoError(t, err) - - // Verify object lock metadata is present in response - assert.Equal(t, types.ObjectLockModeCompliance, headResp.ObjectLockMode) - assert.NotNil(t, headResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate, *headResp.ObjectLockRetainUntilDate, 5*time.Second) - }) - - // Test 2: PUT with GOVERNANCE mode and retention date - t.Run("PUT with GOVERNANCE mode", func(t *testing.T) { - testKey := key + "-governance" - - putResp := putObjectWithLockHeaders(t, client, bucketName, testKey, content, - "GOVERNANCE", retainUntilDate, "") - require.NotNil(t, putResp.VersionId) - - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testKey), - }) - require.NoError(t, err) - - assert.Equal(t, types.ObjectLockModeGovernance, headResp.ObjectLockMode) - assert.NotNil(t, headResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate, *headResp.ObjectLockRetainUntilDate, 5*time.Second) - }) - - // Test 3: PUT with legal hold - t.Run("PUT with legal hold", func(t *testing.T) { - testKey := key + "-legal-hold" - - putResp := putObjectWithLockHeaders(t, client, bucketName, testKey, content, - "", time.Time{}, "ON") - require.NotNil(t, putResp.VersionId) - - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testKey), - }) - require.NoError(t, err) - - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, headResp.ObjectLockLegalHoldStatus) - }) - - // Test 4: PUT with both retention and legal hold - t.Run("PUT with both retention and legal hold", func(t *testing.T) { - testKey := key + "-both" - - putResp := putObjectWithLockHeaders(t, client, bucketName, testKey, content, - "GOVERNANCE", retainUntilDate, "ON") - require.NotNil(t, putResp.VersionId) - - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(testKey), - }) - require.NoError(t, err) - - assert.Equal(t, types.ObjectLockModeGovernance, headResp.ObjectLockMode) - assert.NotNil(t, headResp.ObjectLockRetainUntilDate) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, headResp.ObjectLockLegalHoldStatus) - }) -} - -// TestGetObjectWithLockHeaders verifies that GET requests also return object lock metadata -func TestGetObjectWithLockHeaders(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucketWithObjectLock(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "test-get-object-lock" - content := "test content for GET with lock headers" - retainUntilDate := time.Now().Add(24 * time.Hour) - - // PUT object with lock headers - putResp := putObjectWithLockHeaders(t, client, bucketName, key, content, - "COMPLIANCE", retainUntilDate, "ON") - require.NotNil(t, putResp.VersionId) - - // GET object and verify lock headers are returned - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - defer getResp.Body.Close() - - // Verify object lock metadata is present in GET response - assert.Equal(t, types.ObjectLockModeCompliance, getResp.ObjectLockMode) - assert.NotNil(t, getResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate, *getResp.ObjectLockRetainUntilDate, 5*time.Second) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, getResp.ObjectLockLegalHoldStatus) -} - -// TestVersionedObjectLockHeaders tests object lock headers work with versioned objects -func TestVersionedObjectLockHeaders(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucketWithObjectLock(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "test-versioned-lock" - content1 := "version 1 content" - content2 := "version 2 content" - retainUntilDate1 := time.Now().Add(12 * time.Hour) - retainUntilDate2 := time.Now().Add(24 * time.Hour) - - // PUT first version with GOVERNANCE mode - putResp1 := putObjectWithLockHeaders(t, client, bucketName, key, content1, - "GOVERNANCE", retainUntilDate1, "") - require.NotNil(t, putResp1.VersionId) - - // PUT second version with COMPLIANCE mode - putResp2 := putObjectWithLockHeaders(t, client, bucketName, key, content2, - "COMPLIANCE", retainUntilDate2, "ON") - require.NotNil(t, putResp2.VersionId) - require.NotEqual(t, *putResp1.VersionId, *putResp2.VersionId) - - // HEAD latest version (version 2) - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockModeCompliance, headResp.ObjectLockMode) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, headResp.ObjectLockLegalHoldStatus) - - // HEAD specific version 1 - headResp1, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockModeGovernance, headResp1.ObjectLockMode) - assert.NotEqual(t, types.ObjectLockLegalHoldStatusOn, headResp1.ObjectLockLegalHoldStatus) -} - -// TestObjectLockHeadersErrorCases tests various error scenarios -func TestObjectLockHeadersErrorCases(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucketWithObjectLock(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "test-error-cases" - content := "test content for error cases" - - // Test 1: Invalid retention mode should be rejected - t.Run("Invalid retention mode", func(t *testing.T) { - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key + "-invalid-mode"), - Body: strings.NewReader(content), - ObjectLockMode: "INVALID_MODE", // Invalid mode - ObjectLockRetainUntilDate: aws.Time(time.Now().Add(24 * time.Hour)), - }) - require.Error(t, err) - }) - - // Test 2: Retention date in the past should be rejected - t.Run("Past retention date", func(t *testing.T) { - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key + "-past-date"), - Body: strings.NewReader(content), - ObjectLockMode: "GOVERNANCE", - ObjectLockRetainUntilDate: aws.Time(time.Now().Add(-24 * time.Hour)), // Past date - }) - require.Error(t, err) - }) - - // Test 3: Mode without date should be rejected - t.Run("Mode without retention date", func(t *testing.T) { - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key + "-no-date"), - Body: strings.NewReader(content), - ObjectLockMode: "GOVERNANCE", - // Missing ObjectLockRetainUntilDate - }) - require.Error(t, err) - }) -} - -// TestObjectLockHeadersNonVersionedBucket tests that object lock fails on non-versioned buckets -func TestObjectLockHeadersNonVersionedBucket(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create regular bucket without object lock/versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "test-non-versioned" - content := "test content" - retainUntilDate := time.Now().Add(24 * time.Hour) - - // Attempting to PUT with object lock headers should fail - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - ObjectLockMode: "GOVERNANCE", - ObjectLockRetainUntilDate: aws.Time(retainUntilDate), - }) - require.Error(t, err) -} - -// Helper Functions - -// putObjectWithLockHeaders puts an object with object lock headers -func putObjectWithLockHeaders(t *testing.T, client *s3.Client, bucketName, key, content string, - mode string, retainUntilDate time.Time, legalHold string) *s3.PutObjectOutput { - - input := &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - } - - // Add retention mode and date if specified - if mode != "" { - switch mode { - case "COMPLIANCE": - input.ObjectLockMode = types.ObjectLockModeCompliance - case "GOVERNANCE": - input.ObjectLockMode = types.ObjectLockModeGovernance - } - if !retainUntilDate.IsZero() { - input.ObjectLockRetainUntilDate = aws.Time(retainUntilDate) - } - } - - // Add legal hold if specified - if legalHold != "" { - switch legalHold { - case "ON": - input.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOn - case "OFF": - input.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatusOff - } - } - - resp, err := client.PutObject(context.TODO(), input) - require.NoError(t, err) - return resp -} - -// createBucketWithObjectLock creates a bucket with object lock enabled -func createBucketWithObjectLock(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err) - - // Enable versioning (required for object lock) - enableVersioning(t, client, bucketName) -} diff --git a/test/s3/retention/s3_retention_test.go b/test/s3/retention/s3_retention_test.go deleted file mode 100644 index 8477a50bf..000000000 --- a/test/s3/retention/s3_retention_test.go +++ /dev/null @@ -1,726 +0,0 @@ -package retention - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// S3TestConfig holds configuration for S3 tests -type S3TestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool -} - -// Default test configuration - should match test_config.json -var defaultConfig = &S3TestConfig{ - Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-retention-", - UseSSL: false, - SkipVerifySSL: true, -} - -// getS3Client creates an AWS S3 client for testing -func getS3Client(t *testing.T) *s3.Client { - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(defaultConfig.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - defaultConfig.AccessKey, - defaultConfig.SecretKey, - "", - )), - config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: defaultConfig.Endpoint, - SigningRegion: defaultConfig.Region, - HostnameImmutable: true, - }, nil - })), - ) - require.NoError(t, err) - - return s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = true // Important for SeaweedFS - }) -} - -// getNewBucketName generates a unique bucket name -func getNewBucketName() string { - timestamp := time.Now().UnixNano() - return fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, timestamp) -} - -// createBucket creates a new bucket for testing -func createBucket(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) -} - -// deleteBucket deletes a bucket and all its contents -func deleteBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, try to delete all objects and versions - err := deleteAllObjectVersions(t, client, bucketName) - if err != nil { - t.Logf("Warning: failed to delete all object versions in first attempt: %v", err) - // Try once more in case of transient errors - time.Sleep(500 * time.Millisecond) - err = deleteAllObjectVersions(t, client, bucketName) - if err != nil { - t.Logf("Warning: failed to delete all object versions in second attempt: %v", err) - } - } - - // Wait a bit for eventual consistency - time.Sleep(100 * time.Millisecond) - - // Try to delete the bucket multiple times in case of eventual consistency issues - for retries := 0; retries < 3; retries++ { - _, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err == nil { - t.Logf("Successfully deleted bucket %s", bucketName) - return - } - - t.Logf("Warning: failed to delete bucket %s (attempt %d): %v", bucketName, retries+1, err) - if retries < 2 { - time.Sleep(200 * time.Millisecond) - } - } -} - -// deleteAllObjectVersions deletes all object versions in a bucket -func deleteAllObjectVersions(t *testing.T, client *s3.Client, bucketName string) error { - // List all object versions - paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - - for paginator.HasMorePages() { - page, err := paginator.NextPage(context.TODO()) - if err != nil { - return err - } - - var objectsToDelete []types.ObjectIdentifier - - // Add versions - first try to remove retention/legal hold - for _, version := range page.Versions { - // Try to remove legal hold if present - _, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: version.Key, - VersionId: version.VersionId, - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOff, - }, - }) - if err != nil { - // Legal hold might not be set, ignore error - t.Logf("Note: could not remove legal hold for %s@%s: %v", *version.Key, *version.VersionId, err) - } - - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: version.Key, - VersionId: version.VersionId, - }) - } - - // Add delete markers - for _, deleteMarker := range page.DeleteMarkers { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: deleteMarker.Key, - VersionId: deleteMarker.VersionId, - }) - } - - // Delete objects in batches with bypass governance retention - if len(objectsToDelete) > 0 { - _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - BypassGovernanceRetention: aws.Bool(true), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(true), - }, - }) - if err != nil { - t.Logf("Warning: batch delete failed, trying individual deletion: %v", err) - // Try individual deletion for each object - for _, obj := range objectsToDelete { - _, delErr := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: obj.Key, - VersionId: obj.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - if delErr != nil { - t.Logf("Warning: failed to delete object %s@%s: %v", *obj.Key, *obj.VersionId, delErr) - } - } - } - } - } - - return nil -} - -// enableVersioning enables versioning on a bucket -func enableVersioning(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) -} - -// putObject puts an object into a bucket -func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput { - resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - return resp -} - -// cleanupAllTestBuckets cleans up any leftover test buckets -func cleanupAllTestBuckets(t *testing.T, client *s3.Client) { - // List all buckets - listResp, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{}) - if err != nil { - t.Logf("Warning: failed to list buckets for cleanup: %v", err) - return - } - - // Delete buckets that match our test prefix - for _, bucket := range listResp.Buckets { - if bucket.Name != nil && strings.HasPrefix(*bucket.Name, defaultConfig.BucketPrefix) { - t.Logf("Cleaning up leftover test bucket: %s", *bucket.Name) - deleteBucket(t, client, *bucket.Name) - } - } -} - -// TestBasicRetentionWorkflow tests the basic retention functionality -func TestBasicRetentionWorkflow(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Enable versioning (required for retention) - enableVersioning(t, client, bucketName) - - // Create object - key := "test-object" - content := "test content for retention" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention with GOVERNANCE mode - retentionUntil := time.Now().Add(24 * time.Hour) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Get retention and verify it was set correctly - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) - assert.WithinDuration(t, retentionUntil, *retentionResp.Retention.RetainUntilDate, time.Second) - - // Try to delete object without bypass - should fail - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.Error(t, err) - - // Delete object with bypass governance - should succeed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - BypassGovernanceRetention: aws.Bool(true), - }) - require.NoError(t, err) -} - -// TestRetentionModeCompliance tests COMPLIANCE mode retention -func TestRetentionModeCompliance(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "compliance-test-object" - content := "compliance test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention with COMPLIANCE mode - retentionUntil := time.Now().Add(1 * time.Hour) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeCompliance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Get retention and verify - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockRetentionModeCompliance, retentionResp.Retention.Mode) - - // Try simple DELETE - should succeed and create delete marker (AWS S3 behavior) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker") - - // Try DELETE with version ID - should fail for COMPLIANCE mode - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.Error(t, err, "DELETE with version ID should be blocked by COMPLIANCE retention") - - // Try DELETE with version ID and bypass - should still fail (COMPLIANCE mode ignores bypass) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - require.Error(t, err, "COMPLIANCE mode should ignore governance bypass") -} - -// TestLegalHoldWorkflow tests legal hold functionality -func TestLegalHoldWorkflow(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "legal-hold-test-object" - content := "legal hold test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set legal hold ON - _, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOn, - }, - }) - require.NoError(t, err) - - // Get legal hold and verify - legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) - - // Try simple DELETE - should succeed and create delete marker (AWS S3 behavior) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker") - - // Try DELETE with version ID - should fail due to legal hold - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.Error(t, err, "DELETE with version ID should be blocked by legal hold") - - // Remove legal hold (must specify version ID since latest version is now delete marker) - _, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOff, - }, - }) - require.NoError(t, err) - - // Verify legal hold is off (must specify version ID) - legalHoldResp, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockLegalHoldStatusOff, legalHoldResp.LegalHold.Status) - - // Now DELETE with version ID should succeed after legal hold removed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.NoError(t, err, "DELETE with version ID should succeed after legal hold removed") -} - -// TestObjectLockConfiguration tests bucket object lock configuration -func TestObjectLockConfiguration(t *testing.T) { - client := getS3Client(t) - // Use a more unique bucket name to avoid conflicts - bucketName := fmt.Sprintf("object-lock-config-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000) - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Set object lock configuration - _, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - ObjectLockConfiguration: &types.ObjectLockConfiguration{ - ObjectLockEnabled: types.ObjectLockEnabledEnabled, - Rule: &types.ObjectLockRule{ - DefaultRetention: &types.DefaultRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - Days: aws.Int32(30), - }, - }, - }, - }) - if err != nil { - t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err) - t.Skip("Object lock configuration not supported, skipping test") - return - } - - // Get object lock configuration and verify - configResp, err := client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockEnabledEnabled, configResp.ObjectLockConfiguration.ObjectLockEnabled) - require.NotNil(t, configResp.ObjectLockConfiguration.Rule.DefaultRetention, "DefaultRetention should not be nil") - require.NotNil(t, configResp.ObjectLockConfiguration.Rule.DefaultRetention.Days, "Days should not be nil") - assert.Equal(t, types.ObjectLockRetentionModeGovernance, configResp.ObjectLockConfiguration.Rule.DefaultRetention.Mode) - assert.Equal(t, int32(30), *configResp.ObjectLockConfiguration.Rule.DefaultRetention.Days) -} - -// TestRetentionWithVersions tests retention with specific object versions -func TestRetentionWithVersions(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create multiple versions of the same object - key := "versioned-retention-test" - content1 := "version 1 content" - content2 := "version 2 content" - - putResp1 := putObject(t, client, bucketName, key, content1) - require.NotNil(t, putResp1.VersionId) - - putResp2 := putObject(t, client, bucketName, key, content2) - require.NotNil(t, putResp2.VersionId) - - // Set retention on first version only - retentionUntil := time.Now().Add(1 * time.Hour) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Get retention for first version - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) - - // Try to get retention for second version - should fail (no retention set) - _, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp2.VersionId, - }) - require.Error(t, err) - - // Delete second version should succeed (no retention) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp2.VersionId, - }) - require.NoError(t, err) - - // Delete first version should fail (has retention) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - }) - require.Error(t, err) - - // Delete first version with bypass should succeed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - require.NoError(t, err) -} - -// TestRetentionAndLegalHoldCombination tests retention and legal hold together -func TestRetentionAndLegalHoldCombination(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "combined-protection-test" - content := "combined protection test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set both retention and legal hold - retentionUntil := time.Now().Add(1 * time.Hour) - - // Set retention - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Set legal hold - _, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOn, - }, - }) - require.NoError(t, err) - - // Try simple DELETE - should succeed and create delete marker (AWS S3 behavior) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker") - - // Try DELETE with version ID and bypass - should still fail due to legal hold - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - require.Error(t, err, "Legal hold should prevent deletion even with governance bypass") - - // Remove legal hold (must specify version ID since latest version is now delete marker) - _, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOff, - }, - }) - require.NoError(t, err) - - // Now DELETE with version ID and bypass governance should succeed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - require.NoError(t, err, "DELETE with version ID should succeed after legal hold removed and with governance bypass") -} - -// TestExpiredRetention tests that objects can be deleted after retention expires -func TestExpiredRetention(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "expired-retention-test" - content := "expired retention test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention for a very short time (2 seconds) - retentionUntil := time.Now().Add(2 * time.Second) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Try to delete immediately - should fail - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.Error(t, err) - - // Wait for retention to expire - time.Sleep(3 * time.Second) - - // Now delete should succeed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) -} - -// TestRetentionErrorCases tests various error conditions -func TestRetentionErrorCases(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Test setting retention on non-existent object - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String("non-existent-key"), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(time.Now().Add(1 * time.Hour)), - }, - }) - require.Error(t, err) - - // Test getting retention on non-existent object - _, err = client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String("non-existent-key"), - }) - require.Error(t, err) - - // Test setting legal hold on non-existent object - _, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String("non-existent-key"), - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOn, - }, - }) - require.Error(t, err) - - // Test getting legal hold on non-existent object - _, err = client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String("non-existent-key"), - }) - require.Error(t, err) - - // Test setting retention with past date - key := "retention-past-date-test" - content := "test content" - putObject(t, client, bucketName, key, content) - - pastDate := time.Now().Add(-1 * time.Hour) - _, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(pastDate), - }, - }) - require.Error(t, err) -} diff --git a/test/s3/retention/s3_worm_integration_test.go b/test/s3/retention/s3_worm_integration_test.go deleted file mode 100644 index 19010092c..000000000 --- a/test/s3/retention/s3_worm_integration_test.go +++ /dev/null @@ -1,536 +0,0 @@ -package retention - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestWORMRetentionIntegration tests that both retention and legacy WORM work together -func TestWORMRetentionIntegration(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "worm-retention-integration-test" - content := "worm retention integration test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention (new system) - retentionUntil := time.Now().Add(1 * time.Hour) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Try simple DELETE - should succeed and create delete marker (AWS S3 behavior) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker") - - // Try DELETE with version ID - should fail due to GOVERNANCE retention - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - require.Error(t, err, "DELETE with version ID should be blocked by GOVERNANCE retention") - - // Delete with version ID and bypass should succeed - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - BypassGovernanceRetention: aws.Bool(true), - }) - require.NoError(t, err) -} - -// TestWORMLegacyCompatibility tests that legacy WORM functionality still works -func TestWORMLegacyCompatibility(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object with legacy WORM headers (if supported) - key := "legacy-worm-test" - content := "legacy worm test content" - - // Try to create object with legacy WORM TTL header - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - // Add legacy WORM headers if supported - Metadata: map[string]string{ - "x-amz-meta-worm-ttl": fmt.Sprintf("%d", time.Now().Add(1*time.Hour).Unix()), - }, - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - - // Object should be created successfully - resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.NotNil(t, resp.Metadata) -} - -// TestRetentionOverwriteProtection tests that retention prevents overwrites -func TestRetentionOverwriteProtection(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "overwrite-protection-test" - content := "original content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Verify object exists before setting retention - _, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Object should exist before setting retention") - - // Set retention with specific version ID - retentionUntil := time.Now().Add(1 * time.Hour) - _, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Try to overwrite object - should fail in non-versioned bucket context - content2 := "new content" - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content2), - }) - // Note: In a real scenario, this might fail or create a new version - // The actual behavior depends on the implementation - if err != nil { - t.Logf("Expected behavior: overwrite blocked due to retention: %v", err) - } else { - t.Logf("Overwrite allowed, likely created new version") - } -} - -// TestRetentionBulkOperations tests retention with bulk operations -func TestRetentionBulkOperations(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create multiple objects with retention - var objectsToDelete []types.ObjectIdentifier - retentionUntil := time.Now().Add(1 * time.Hour) - - for i := 0; i < 3; i++ { - key := fmt.Sprintf("bulk-test-object-%d", i) - content := fmt.Sprintf("bulk test content %d", i) - - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention on each object with version ID - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: aws.String(key), - VersionId: putResp.VersionId, - }) - } - - // Try bulk delete without bypass - should fail or have errors - deleteResp, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(false), - }, - }) - - // Check if operation failed or returned errors for protected objects - if err != nil { - t.Logf("Expected: bulk delete failed due to retention: %v", err) - } else if deleteResp != nil && len(deleteResp.Errors) > 0 { - t.Logf("Expected: bulk delete returned %d errors due to retention", len(deleteResp.Errors)) - for _, delErr := range deleteResp.Errors { - t.Logf("Delete error: %s - %s", *delErr.Code, *delErr.Message) - } - } else { - t.Logf("Warning: bulk delete succeeded - retention may not be enforced for bulk operations") - } - - // Try bulk delete with bypass - should succeed - _, err = client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - BypassGovernanceRetention: aws.Bool(true), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(false), - }, - }) - if err != nil { - t.Logf("Bulk delete with bypass failed (may not be supported): %v", err) - } else { - t.Logf("Bulk delete with bypass succeeded") - } -} - -// TestRetentionWithMultipartUpload tests retention with multipart uploads -func TestRetentionWithMultipartUpload(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Start multipart upload - key := "multipart-retention-test" - createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - uploadId := createResp.UploadId - - // Upload a part - partContent := "This is a test part for multipart upload" - uploadResp, err := client.UploadPart(context.TODO(), &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - PartNumber: aws.Int32(1), - UploadId: uploadId, - Body: strings.NewReader(partContent), - }) - require.NoError(t, err) - - // Complete multipart upload - completeResp, err := client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - UploadId: uploadId, - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: uploadResp.ETag, - PartNumber: aws.Int32(1), - }, - }, - }, - }) - require.NoError(t, err) - - // Add a small delay to ensure the object is fully created - time.Sleep(500 * time.Millisecond) - - // Verify object exists after multipart upload - retry if needed - var headErr error - for retries := 0; retries < 10; retries++ { - _, headErr = client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - if headErr == nil { - break - } - t.Logf("HeadObject attempt %d failed: %v", retries+1, headErr) - time.Sleep(200 * time.Millisecond) - } - - if headErr != nil { - t.Logf("Object not found after multipart upload completion, checking if multipart upload is fully supported") - // Check if the object exists by trying to list it - listResp, listErr := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - Prefix: aws.String(key), - }) - if listErr != nil || len(listResp.Contents) == 0 { - t.Skip("Multipart upload may not be fully supported, skipping test") - return - } - // If object exists in listing but not accessible via HeadObject, skip test - t.Skip("Object exists in listing but not accessible via HeadObject, multipart upload may not be fully supported") - return - } - - require.NoError(t, headErr, "Object should exist after multipart upload") - - // Set retention on the completed multipart object with version ID - retentionUntil := time.Now().Add(1 * time.Hour) - _, err = client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: completeResp.VersionId, - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Try simple DELETE - should succeed and create delete marker (AWS S3 behavior) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err, "Simple DELETE should succeed and create delete marker") - - // Try DELETE with version ID - should fail due to GOVERNANCE retention - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: completeResp.VersionId, - }) - require.Error(t, err, "DELETE with version ID should be blocked by GOVERNANCE retention") -} - -// TestRetentionExtendedAttributes tests that retention uses extended attributes correctly -func TestRetentionExtendedAttributes(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "extended-attrs-test" - content := "extended attributes test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Set retention - retentionUntil := time.Now().Add(1 * time.Hour) - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - require.NoError(t, err) - - // Set legal hold - _, err = client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp.VersionId, - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOn, - }, - }) - require.NoError(t, err) - - // Get object metadata to verify extended attributes are set - resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - - // Check that the object has metadata (may be empty in some implementations) - // Note: The actual metadata keys depend on the implementation - if resp.Metadata != nil && len(resp.Metadata) > 0 { - t.Logf("Object metadata: %+v", resp.Metadata) - } else { - t.Logf("Object metadata: empty (extended attributes may be stored internally)") - } - - // Verify retention can be retrieved - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) - - // Verify legal hold can be retrieved - legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) -} - -// TestRetentionBucketDefaults tests object lock configuration defaults -func TestRetentionBucketDefaults(t *testing.T) { - client := getS3Client(t) - // Use a very unique bucket name to avoid conflicts - bucketName := fmt.Sprintf("bucket-defaults-%d-%d", time.Now().UnixNano(), time.Now().UnixMilli()%10000) - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Set bucket object lock configuration with default retention - _, err := client.PutObjectLockConfiguration(context.TODO(), &s3.PutObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - ObjectLockConfiguration: &types.ObjectLockConfiguration{ - ObjectLockEnabled: types.ObjectLockEnabledEnabled, - Rule: &types.ObjectLockRule{ - DefaultRetention: &types.DefaultRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - Days: aws.Int32(1), // 1 day default - }, - }, - }, - }) - if err != nil { - t.Logf("PutObjectLockConfiguration failed (may not be supported): %v", err) - t.Skip("Object lock configuration not supported, skipping test") - return - } - - // Create object (should inherit default retention) - key := "bucket-defaults-test" - content := "bucket defaults test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Check if object has default retention applied - // Note: This depends on the implementation - some S3 services apply - // default retention automatically, others require explicit setting - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - if err != nil { - t.Logf("No automatic default retention applied: %v", err) - } else { - t.Logf("Default retention applied: %+v", retentionResp.Retention) - assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) - } -} - -// TestRetentionConcurrentOperations tests concurrent retention operations -func TestRetentionConcurrentOperations(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create object - key := "concurrent-ops-test" - content := "concurrent operations test content" - putResp := putObject(t, client, bucketName, key, content) - require.NotNil(t, putResp.VersionId) - - // Test concurrent retention and legal hold operations - retentionUntil := time.Now().Add(1 * time.Hour) - - // Set retention and legal hold concurrently - errChan := make(chan error, 2) - - go func() { - _, err := client.PutObjectRetention(context.TODO(), &s3.PutObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Retention: &types.ObjectLockRetention{ - Mode: types.ObjectLockRetentionModeGovernance, - RetainUntilDate: aws.Time(retentionUntil), - }, - }) - errChan <- err - }() - - go func() { - _, err := client.PutObjectLegalHold(context.TODO(), &s3.PutObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - LegalHold: &types.ObjectLockLegalHold{ - Status: types.ObjectLockLegalHoldStatusOn, - }, - }) - errChan <- err - }() - - // Wait for both operations to complete - for i := 0; i < 2; i++ { - err := <-errChan - if err != nil { - t.Logf("Concurrent operation failed: %v", err) - } - } - - // Verify both settings are applied - retentionResp, err := client.GetObjectRetention(context.TODO(), &s3.GetObjectRetentionInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - if err == nil { - assert.Equal(t, types.ObjectLockRetentionModeGovernance, retentionResp.Retention.Mode) - } - - legalHoldResp, err := client.GetObjectLegalHold(context.TODO(), &s3.GetObjectLegalHoldInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - if err == nil { - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, legalHoldResp.LegalHold.Status) - } -} diff --git a/test/s3/retention/test_config.json b/test/s3/retention/test_config.json deleted file mode 100644 index b3281778b..000000000 --- a/test/s3/retention/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-retention-", - "use_ssl": false, - "skip_verify_ssl": true -} \ No newline at end of file diff --git a/test/s3/s3client/s3client.go b/test/s3/s3client/s3client.go deleted file mode 100644 index e789c57c6..000000000 --- a/test/s3/s3client/s3client.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "time" -) - -func main() { - cfg := MyConfig{ - Key: "any", - Secret: "any", - Region: "US", - Endpoint: MyEndpoint{ - URL: "http://localhost:8333", - }, - Bucket: MyBucketConfig{ - Name: "newbucket", - Versioning: false, - }, - MaxBackoffDelay: aws.Int(int(time.Second * 5)), - MaxRetryAttempts: aws.Int(1), - } - - awsCfg, err := MyAwsConfig(cfg) - if err != nil { - panic(err) - } - svc := s3.NewFromConfig(*awsCfg, func(o *s3.Options) { - o.UsePathStyle = true - }) - - // Use the S3 client to interact with SeaweedFS - // ... - // Example: List all buckets - result, err := svc.ListBuckets(context.Background(), &s3.ListBucketsInput{}) - // no errors - got list of buckets - if err != nil { - panic(err) - } - - // Print the list of buckets - for _, bucket := range result.Buckets { - println(*bucket.Name) - } - - bucket := "bucket1" - _, err = svc.HeadBucket(context.Background(), &s3.HeadBucketInput{Bucket: &bucket}) - // ERROR HERE - if err != nil { - println(err) - } - -} - -// === helpers - -func MyAwsConfig(cfg MyConfig) (*aws.Config, error) { - - cred := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(cfg.Key, cfg.Secret, "")) - customResolver := aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: cfg.Endpoint.URL, - SigningRegion: cfg.Region, - }, nil - }) - - awsCfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(cfg.Region), - config.WithCredentialsProvider(cred), - config.WithEndpointResolverWithOptions(customResolver), - config.WithRetryer(func() aws.Retryer { - r := retry.AddWithMaxAttempts(retry.NewStandard(), *cfg.MaxRetryAttempts) - return retry.AddWithMaxBackoffDelay(r, time.Duration(*cfg.MaxBackoffDelay*1000*1000)) - })) - return &awsCfg, err -} - -type MyConfig struct { - // Access key of S3 AWS. - Key string - // Access secret of S3 AWS. - Secret string - // Region. - Region string - // AWS endpoint. - Endpoint MyEndpoint - // Bucket configuration. - Bucket MyBucketConfig - // File access. - FileAccess MyFileAccessType - // Maximum backoff delay (ms, default: 20 sec). - MaxBackoffDelay *int - // Maximum attempts to retry operation on error (default: 5). - MaxRetryAttempts *int -} - -type MyBucketConfig struct { - // Name of bucket - Name string - // Enable or not versioning - Versioning bool -} - -type MyEndpoint struct { - URL string -} - -type MyFileAccessType byte diff --git a/test/s3/sse/Makefile b/test/s3/sse/Makefile deleted file mode 100644 index b05ef3b7c..000000000 --- a/test/s3/sse/Makefile +++ /dev/null @@ -1,529 +0,0 @@ -# Makefile for S3 SSE Integration Tests -# This Makefile provides targets for running comprehensive S3 Server-Side Encryption tests - -# Default values -SEAWEEDFS_BINARY ?= weed -S3_PORT ?= 8333 -FILER_PORT ?= 8888 -VOLUME_PORT ?= 8080 -MASTER_PORT ?= 9333 -TEST_TIMEOUT ?= 15m -BUCKET_PREFIX ?= test-sse- -ACCESS_KEY ?= some_access_key1 -SECRET_KEY ?= some_secret_key1 -VOLUME_MAX_SIZE_MB ?= 50 -VOLUME_MAX_COUNT ?= 100 - -# SSE-KMS configuration -KMS_KEY_ID ?= test-key-123 -KMS_TYPE ?= local -OPENBAO_ADDR ?= http://127.0.0.1:8200 -OPENBAO_TOKEN ?= root-token-for-testing -DOCKER_COMPOSE ?= docker-compose - -# Test directory -TEST_DIR := $(shell pwd) -SEAWEEDFS_ROOT := $(shell cd ../../../ && pwd) - -# Colors for output -RED := \033[0;31m -GREEN := \033[0;32m -YELLOW := \033[1;33m -NC := \033[0m # No Color - -.PHONY: all test clean start-seaweedfs stop-seaweedfs stop-seaweedfs-safe start-seaweedfs-ci check-binary build-weed help help-extended test-with-server test-quick-with-server test-metadata-persistence setup-openbao test-with-kms test-ssekms-integration clean-kms start-full-stack stop-full-stack - -all: test-basic - -# Build SeaweedFS binary (GitHub Actions compatible) -build-weed: - @echo "Building SeaweedFS binary..." - @cd $(SEAWEEDFS_ROOT)/weed && go install -buildvcs=false - @echo "โœ… SeaweedFS binary built successfully" - -help: - @echo "SeaweedFS S3 SSE Integration Tests" - @echo "" - @echo "Available targets:" - @echo " test-basic - Run basic S3 put/get tests first" - @echo " test - Run all S3 SSE integration tests" - @echo " test-ssec - Run SSE-C tests only" - @echo " test-ssekms - Run SSE-KMS tests only" - @echo " test-copy - Run SSE copy operation tests" - @echo " test-multipart - Run SSE multipart upload tests" - @echo " test-errors - Run SSE error condition tests" - @echo " benchmark - Run SSE performance benchmarks" - @echo " KMS Integration:" - @echo " setup-openbao - Set up OpenBao KMS for testing" - @echo " test-with-kms - Run full SSE integration with real KMS" - @echo " test-ssekms-integration - Run SSE-KMS with OpenBao only" - @echo " start-full-stack - Start SeaweedFS + OpenBao with Docker" - @echo " stop-full-stack - Stop Docker services" - @echo " clean-kms - Clean up KMS test environment" - @echo " start-seaweedfs - Start SeaweedFS server for testing" - @echo " stop-seaweedfs - Stop SeaweedFS server" - @echo " clean - Clean up test artifacts" - @echo " check-binary - Check if SeaweedFS binary exists" - @echo "" - @echo "Configuration:" - @echo " SEAWEEDFS_BINARY=$(SEAWEEDFS_BINARY)" - @echo " S3_PORT=$(S3_PORT)" - @echo " FILER_PORT=$(FILER_PORT)" - @echo " VOLUME_PORT=$(VOLUME_PORT)" - @echo " MASTER_PORT=$(MASTER_PORT)" - @echo " TEST_TIMEOUT=$(TEST_TIMEOUT)" - @echo " VOLUME_MAX_SIZE_MB=$(VOLUME_MAX_SIZE_MB)" - -check-binary: - @if ! command -v $(SEAWEEDFS_BINARY) > /dev/null 2>&1; then \ - echo "$(RED)Error: SeaweedFS binary '$(SEAWEEDFS_BINARY)' not found in PATH$(NC)"; \ - echo "Please build SeaweedFS first by running 'make' in the root directory"; \ - exit 1; \ - fi - @echo "$(GREEN)SeaweedFS binary found: $$(which $(SEAWEEDFS_BINARY))$(NC)" - -start-seaweedfs: check-binary - @echo "$(YELLOW)Starting SeaweedFS server for SSE testing...$(NC)" - @# Use port-based cleanup for consistency and safety - @echo "Cleaning up any existing processes..." - @lsof -ti :$(MASTER_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(VOLUME_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(FILER_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(S3_PORT) | xargs -r kill -TERM || true - @sleep 2 - - # Create necessary directories - @mkdir -p /tmp/seaweedfs-test-sse-master - @mkdir -p /tmp/seaweedfs-test-sse-volume - @mkdir -p /tmp/seaweedfs-test-sse-filer - - # Start master server with volume size limit and explicit gRPC port - @nohup $(SEAWEEDFS_BINARY) master -port=$(MASTER_PORT) -port.grpc=$$(( $(MASTER_PORT) + 10000 )) -mdir=/tmp/seaweedfs-test-sse-master -volumeSizeLimitMB=$(VOLUME_MAX_SIZE_MB) -ip=127.0.0.1 > /tmp/seaweedfs-sse-master.log 2>&1 & - @sleep 3 - - # Start volume server with master HTTP port and increased capacity - @nohup $(SEAWEEDFS_BINARY) volume -port=$(VOLUME_PORT) -mserver=127.0.0.1:$(MASTER_PORT) -dir=/tmp/seaweedfs-test-sse-volume -max=$(VOLUME_MAX_COUNT) -ip=127.0.0.1 > /tmp/seaweedfs-sse-volume.log 2>&1 & - @sleep 5 - - # Start filer server (using standard SeaweedFS gRPC port convention: HTTP port + 10000) - @nohup $(SEAWEEDFS_BINARY) filer -port=$(FILER_PORT) -port.grpc=$$(( $(FILER_PORT) + 10000 )) -master=127.0.0.1:$(MASTER_PORT) -dataCenter=defaultDataCenter -ip=127.0.0.1 > /tmp/seaweedfs-sse-filer.log 2>&1 & - @sleep 3 - - # Create S3 configuration with SSE-KMS support - @printf '{"identities":[{"name":"%s","credentials":[{"accessKey":"%s","secretKey":"%s"}],"actions":["Admin","Read","Write"]}],"kms":{"type":"%s","configs":{"keyId":"%s","encryptionContext":{},"bucketKey":false}}}' "$(ACCESS_KEY)" "$(ACCESS_KEY)" "$(SECRET_KEY)" "$(KMS_TYPE)" "$(KMS_KEY_ID)" > /tmp/seaweedfs-sse-s3.json - - # Start S3 server with KMS configuration - @nohup $(SEAWEEDFS_BINARY) s3 -port=$(S3_PORT) -filer=127.0.0.1:$(FILER_PORT) -config=/tmp/seaweedfs-sse-s3.json -ip.bind=127.0.0.1 > /tmp/seaweedfs-sse-s3.log 2>&1 & - @sleep 5 - - # Wait for S3 service to be ready - @echo "$(YELLOW)Waiting for S3 service to be ready...$(NC)" - @for i in $$(seq 1 30); do \ - if curl -s -f http://127.0.0.1:$(S3_PORT) > /dev/null 2>&1; then \ - echo "$(GREEN)S3 service is ready$(NC)"; \ - break; \ - fi; \ - echo "Waiting for S3 service... ($$i/30)"; \ - sleep 1; \ - done - - # Additional wait for filer gRPC to be ready - @echo "$(YELLOW)Waiting for filer gRPC to be ready...$(NC)" - @sleep 2 - @echo "$(GREEN)SeaweedFS server started successfully for SSE testing$(NC)" - @echo "Master: http://localhost:$(MASTER_PORT)" - @echo "Volume: http://localhost:$(VOLUME_PORT)" - @echo "Filer: http://localhost:$(FILER_PORT)" - @echo "S3: http://localhost:$(S3_PORT)" - @echo "Volume Max Size: $(VOLUME_MAX_SIZE_MB)MB" - @echo "SSE-KMS Support: Enabled" - -stop-seaweedfs: - @echo "$(YELLOW)Stopping SeaweedFS server...$(NC)" - @# Use port-based cleanup for consistency and safety - @lsof -ti :$(MASTER_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(VOLUME_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(FILER_PORT) | xargs -r kill -TERM || true - @lsof -ti :$(S3_PORT) | xargs -r kill -TERM || true - @sleep 2 - @echo "$(GREEN)SeaweedFS server stopped$(NC)" - -# CI-safe server stop that's more conservative -stop-seaweedfs-safe: - @echo "$(YELLOW)Safely stopping SeaweedFS server...$(NC)" - @# Use port-based cleanup which is safer in CI - @if command -v lsof >/dev/null 2>&1; then \ - echo "Using lsof for port-based cleanup..."; \ - lsof -ti :$(MASTER_PORT) 2>/dev/null | head -5 | while read pid; do kill -TERM $$pid 2>/dev/null || true; done; \ - lsof -ti :$(VOLUME_PORT) 2>/dev/null | head -5 | while read pid; do kill -TERM $$pid 2>/dev/null || true; done; \ - lsof -ti :$(FILER_PORT) 2>/dev/null | head -5 | while read pid; do kill -TERM $$pid 2>/dev/null || true; done; \ - lsof -ti :$(S3_PORT) 2>/dev/null | head -5 | while read pid; do kill -TERM $$pid 2>/dev/null || true; done; \ - else \ - echo "lsof not available, using netstat approach..."; \ - netstat -tlnp 2>/dev/null | grep :$(MASTER_PORT) | awk '{print $$7}' | cut -d/ -f1 | head -5 | while read pid; do [ "$$pid" != "-" ] && kill -TERM $$pid 2>/dev/null || true; done; \ - netstat -tlnp 2>/dev/null | grep :$(VOLUME_PORT) | awk '{print $$7}' | cut -d/ -f1 | head -5 | while read pid; do [ "$$pid" != "-" ] && kill -TERM $$pid 2>/dev/null || true; done; \ - netstat -tlnp 2>/dev/null | grep :$(FILER_PORT) | awk '{print $$7}' | cut -d/ -f1 | head -5 | while read pid; do [ "$$pid" != "-" ] && kill -TERM $$pid 2>/dev/null || true; done; \ - netstat -tlnp 2>/dev/null | grep :$(S3_PORT) | awk '{print $$7}' | cut -d/ -f1 | head -5 | while read pid; do [ "$$pid" != "-" ] && kill -TERM $$pid 2>/dev/null || true; done; \ - fi - @sleep 2 - @echo "$(GREEN)SeaweedFS server safely stopped$(NC)" - -clean: - @echo "$(YELLOW)Cleaning up SSE test artifacts...$(NC)" - @rm -rf /tmp/seaweedfs-test-sse-* - @rm -f /tmp/seaweedfs-sse-*.log - @rm -f /tmp/seaweedfs-sse-s3.json - @echo "$(GREEN)SSE test cleanup completed$(NC)" - -test-basic: check-binary - @echo "$(YELLOW)Running basic S3 SSE integration tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting basic SSE tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSECIntegrationBasic|TestSSEKMSIntegrationBasic" ./test/s3/sse || (echo "$(RED)Basic SSE tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)Basic SSE tests completed successfully!$(NC)" - -test: test-basic - @echo "$(YELLOW)Running all S3 SSE integration tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting comprehensive SSE tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSE.*Integration" ./test/s3/sse || (echo "$(RED)SSE tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)All SSE integration tests completed successfully!$(NC)" - -test-ssec: check-binary - @echo "$(YELLOW)Running SSE-C integration tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE-C tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSEC.*Integration" ./test/s3/sse || (echo "$(RED)SSE-C tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE-C tests completed successfully!$(NC)" - -test-ssekms: check-binary - @echo "$(YELLOW)Running SSE-KMS integration tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE-KMS tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSEKMS.*Integration" ./test/s3/sse || (echo "$(RED)SSE-KMS tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE-KMS tests completed successfully!$(NC)" - -test-copy: check-binary - @echo "$(YELLOW)Running SSE copy operation tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE copy tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run ".*CopyIntegration" ./test/s3/sse || (echo "$(RED)SSE copy tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE copy tests completed successfully!$(NC)" - -test-multipart: check-binary - @echo "$(YELLOW)Running SSE multipart upload tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE multipart tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSEMultipartUploadIntegration" ./test/s3/sse || (echo "$(RED)SSE multipart tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE multipart tests completed successfully!$(NC)" - -test-errors: check-binary - @echo "$(YELLOW)Running SSE error condition tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE error tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSEErrorConditions" ./test/s3/sse || (echo "$(RED)SSE error tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE error tests completed successfully!$(NC)" - -test-quick: check-binary - @echo "$(YELLOW)Running quick SSE tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting quick SSE tests...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=5m -run "TestSSECIntegrationBasic|TestSSEKMSIntegrationBasic" ./test/s3/sse || (echo "$(RED)Quick SSE tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)Quick SSE tests completed successfully!$(NC)" - -benchmark: check-binary - @echo "$(YELLOW)Running SSE performance benchmarks...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Starting SSE benchmarks...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=30m -bench=. -run=Benchmark ./test/s3/sse || (echo "$(RED)SSE benchmarks failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE benchmarks completed!$(NC)" - -# Debug targets -debug-logs: - @echo "$(YELLOW)=== Master Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-sse-master.log || echo "No master log found" - @echo "$(YELLOW)=== Volume Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-sse-volume.log || echo "No volume log found" - @echo "$(YELLOW)=== Filer Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-sse-filer.log || echo "No filer log found" - @echo "$(YELLOW)=== S3 Log ===$(NC)" - @tail -n 50 /tmp/seaweedfs-sse-s3.log || echo "No S3 log found" - -debug-status: - @echo "$(YELLOW)=== Process Status ===$(NC)" - @ps aux | grep -E "(weed|seaweedfs)" | grep -v grep || echo "No SeaweedFS processes found" - @echo "$(YELLOW)=== Port Status ===$(NC)" - @netstat -an | grep -E "($(MASTER_PORT)|$(VOLUME_PORT)|$(FILER_PORT)|$(S3_PORT))" || echo "No ports in use" - -# Manual test targets for development -manual-start: start-seaweedfs - @echo "$(GREEN)SeaweedFS with SSE support is now running for manual testing$(NC)" - @echo "You can now run SSE tests manually or use S3 clients to test SSE functionality" - @echo "Run 'make manual-stop' when finished" - -manual-stop: stop-seaweedfs clean - -# CI/CD targets -ci-test: test-quick - -# Stress test -stress: check-binary - @echo "$(YELLOW)Running SSE stress tests...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run="TestSSE.*Integration" -count=5 ./test/s3/sse || (echo "$(RED)SSE stress tests failed$(NC)" && $(MAKE) stop-seaweedfs-safe && exit 1) - @$(MAKE) stop-seaweedfs-safe - @echo "$(GREEN)SSE stress tests completed!$(NC)" - -# Performance test with various data sizes -perf: check-binary - @echo "$(YELLOW)Running SSE performance tests with various data sizes...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=60m -run=".*VariousDataSizes" ./test/s3/sse || (echo "$(RED)SSE performance tests failed$(NC)" && $(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe && exit 1) - @$(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe - @echo "$(GREEN)SSE performance tests completed!$(NC)" - -# Test specific scenarios that would catch the metadata bug -test-metadata-persistence: check-binary - @echo "$(YELLOW)Running SSE metadata persistence tests (would catch filer metadata bugs)...$(NC)" - @$(MAKE) start-seaweedfs-ci - @sleep 5 - @echo "$(GREEN)Testing that SSE metadata survives full PUT/GET cycle...$(NC)" - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSECIntegrationBasic" ./test/s3/sse || (echo "$(RED)SSE metadata persistence tests failed$(NC)" && $(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe && exit 1) - @$(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe - @echo "$(GREEN)SSE metadata persistence tests completed successfully!$(NC)" - @echo "$(GREEN)โœ… These tests would have caught the filer metadata storage bug!$(NC)" - -# GitHub Actions compatible test-with-server target that handles server lifecycle -test-with-server: build-weed - @echo "๐Ÿš€ Starting SSE integration tests with automated server management..." - @echo "Starting SeaweedFS cluster..." - @# Use the CI-safe startup directly without aggressive cleanup - @if $(MAKE) start-seaweedfs-ci > weed-test.log 2>&1; then \ - echo "โœ… SeaweedFS cluster started successfully"; \ - echo "Running SSE integration tests..."; \ - trap '$(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe || true' EXIT; \ - if [ -n "$(TEST_PATTERN)" ]; then \ - echo "๐Ÿ” Running tests matching pattern: $(TEST_PATTERN)"; \ - cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" ./test/s3/sse || exit 1; \ - else \ - echo "๐Ÿ” Running all SSE integration tests"; \ - cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSE.*Integration" ./test/s3/sse || exit 1; \ - fi; \ - echo "โœ… All tests completed successfully"; \ - $(MAKE) -C $(TEST_DIR) stop-seaweedfs-safe || true; \ - else \ - echo "โŒ Failed to start SeaweedFS cluster"; \ - echo "=== Server startup logs ==="; \ - tail -100 weed-test.log 2>/dev/null || echo "No startup log available"; \ - echo "=== System information ==="; \ - ps aux | grep -E "weed|make" | grep -v grep || echo "No relevant processes found"; \ - exit 1; \ - fi - -# CI-safe server startup that avoids process conflicts -start-seaweedfs-ci: check-binary - @echo "$(YELLOW)Starting SeaweedFS server for CI testing...$(NC)" - - # Create necessary directories - @mkdir -p /tmp/seaweedfs-test-sse-master - @mkdir -p /tmp/seaweedfs-test-sse-volume - @mkdir -p /tmp/seaweedfs-test-sse-filer - - # Clean up any old server logs - @rm -f /tmp/seaweedfs-sse-*.log || true - - # Start master server with volume size limit and explicit gRPC port - @echo "Starting master server..." - @nohup $(SEAWEEDFS_BINARY) master -port=$(MASTER_PORT) -port.grpc=$$(( $(MASTER_PORT) + 10000 )) -mdir=/tmp/seaweedfs-test-sse-master -volumeSizeLimitMB=$(VOLUME_MAX_SIZE_MB) -ip=127.0.0.1 > /tmp/seaweedfs-sse-master.log 2>&1 & - @sleep 3 - - # Start volume server with master HTTP port and increased capacity - @echo "Starting volume server..." - @nohup $(SEAWEEDFS_BINARY) volume -port=$(VOLUME_PORT) -mserver=127.0.0.1:$(MASTER_PORT) -dir=/tmp/seaweedfs-test-sse-volume -max=$(VOLUME_MAX_COUNT) -ip=127.0.0.1 > /tmp/seaweedfs-sse-volume.log 2>&1 & - @sleep 5 - - # Create S3 JSON configuration with KMS (Local provider) and basic identity for embedded S3 - @sed -e 's/ACCESS_KEY_PLACEHOLDER/$(ACCESS_KEY)/g' \ - -e 's/SECRET_KEY_PLACEHOLDER/$(SECRET_KEY)/g' \ - s3-config-template.json > /tmp/seaweedfs-s3.json - - # Start filer server with embedded S3 using the JSON config (with verbose logging) - @echo "Starting filer server with embedded S3..." - @AWS_ACCESS_KEY_ID=$(ACCESS_KEY) AWS_SECRET_ACCESS_KEY=$(SECRET_KEY) GLOG_v=4 nohup $(SEAWEEDFS_BINARY) filer -port=$(FILER_PORT) -port.grpc=$$(( $(FILER_PORT) + 10000 )) -master=127.0.0.1:$(MASTER_PORT) -dataCenter=defaultDataCenter -ip=127.0.0.1 -s3 -s3.port=$(S3_PORT) -s3.config=/tmp/seaweedfs-s3.json > /tmp/seaweedfs-sse-filer.log 2>&1 & - @sleep 5 - - # Wait for S3 service to be ready - use port-based checking for reliability - @echo "$(YELLOW)Waiting for S3 service to be ready...$(NC)" - @for i in $$(seq 1 20); do \ - if netstat -an 2>/dev/null | grep -q ":$(S3_PORT).*LISTEN" || \ - ss -an 2>/dev/null | grep -q ":$(S3_PORT).*LISTEN" || \ - lsof -i :$(S3_PORT) >/dev/null 2>&1; then \ - echo "$(GREEN)S3 service is listening on port $(S3_PORT)$(NC)"; \ - sleep 1; \ - break; \ - fi; \ - if [ $$i -eq 20 ]; then \ - echo "$(RED)S3 service failed to start within 20 seconds$(NC)"; \ - echo "=== Detailed Logs ==="; \ - echo "Master log:"; tail -30 /tmp/seaweedfs-sse-master.log || true; \ - echo "Volume log:"; tail -30 /tmp/seaweedfs-sse-volume.log || true; \ - echo "Filer log:"; tail -30 /tmp/seaweedfs-sse-filer.log || true; \ - echo "=== Port Status ==="; \ - netstat -an 2>/dev/null | grep ":$(S3_PORT)" || \ - ss -an 2>/dev/null | grep ":$(S3_PORT)" || \ - echo "No port listening on $(S3_PORT)"; \ - echo "=== Process Status ==="; \ - ps aux | grep -E "weed.*(filer|s3).*$(S3_PORT)" | grep -v grep || echo "No S3 process found"; \ - exit 1; \ - fi; \ - echo "Waiting for S3 service... ($$i/20)"; \ - sleep 1; \ - done - - # Additional wait for filer gRPC to be ready - @echo "$(YELLOW)Waiting for filer gRPC to be ready...$(NC)" - @sleep 2 - @echo "$(GREEN)SeaweedFS server started successfully for SSE testing$(NC)" - @echo "Master: http://localhost:$(MASTER_PORT)" - @echo "Volume: http://localhost:$(VOLUME_PORT)" - @echo "Filer: http://localhost:$(FILER_PORT)" - @echo "S3: http://localhost:$(S3_PORT)" - @echo "Volume Max Size: $(VOLUME_MAX_SIZE_MB)MB" - @echo "SSE-KMS Support: Enabled" - -# GitHub Actions compatible quick test subset -test-quick-with-server: build-weed - @echo "๐Ÿš€ Starting quick SSE tests with automated server management..." - @trap 'make stop-seaweedfs-safe || true' EXIT; \ - echo "Starting SeaweedFS cluster..."; \ - if make start-seaweedfs-ci > weed-test.log 2>&1; then \ - echo "โœ… SeaweedFS cluster started successfully"; \ - echo "Running quick SSE integration tests..."; \ - cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) -run "TestSSECIntegrationBasic|TestSSEKMSIntegrationBasic|TestSimpleSSECIntegration" ./test/s3/sse || exit 1; \ - echo "โœ… Quick tests completed successfully"; \ - make stop-seaweedfs-safe || true; \ - else \ - echo "โŒ Failed to start SeaweedFS cluster"; \ - echo "=== Server startup logs ==="; \ - tail -50 weed-test.log; \ - exit 1; \ - fi - -# Help target - extended version -help-extended: - @echo "Available targets:" - @echo " test - Run all SSE integration tests (requires running server)" - @echo " test-with-server - Run all tests with automatic server management (GitHub Actions compatible)" - @echo " test-quick-with-server - Run quick tests with automatic server management" - @echo " test-ssec - Run only SSE-C tests" - @echo " test-ssekms - Run only SSE-KMS tests" - @echo " test-copy - Run only copy operation tests" - @echo " test-multipart - Run only multipart upload tests" - @echo " benchmark - Run performance benchmarks" - @echo " perf - Run performance tests with various data sizes" - @echo " test-metadata-persistence - Test metadata persistence (catches filer bugs)" - @echo " build-weed - Build SeaweedFS binary" - @echo " check-binary - Check if SeaweedFS binary exists" - @echo " start-seaweedfs - Start SeaweedFS cluster" - @echo " start-seaweedfs-ci - Start SeaweedFS cluster (CI-safe version)" - @echo " stop-seaweedfs - Stop SeaweedFS cluster" - @echo " stop-seaweedfs-safe - Stop SeaweedFS cluster (CI-safe version)" - @echo " clean - Clean up test artifacts" - @echo " debug-logs - Show recent logs from all services" - @echo "" - @echo "Environment Variables:" - @echo " ACCESS_KEY - S3 access key (default: some_access_key1)" - @echo " SECRET_KEY - S3 secret key (default: some_secret_key1)" - @echo " KMS_KEY_ID - KMS key ID for SSE-KMS (default: test-key-123)" - @echo " KMS_TYPE - KMS type (default: local)" - @echo " VOLUME_MAX_SIZE_MB - Volume maximum size in MB (default: 50)" - @echo " TEST_TIMEOUT - Test timeout (default: 15m)" - -#################################################### -# KMS Integration Testing with OpenBao -#################################################### - -setup-openbao: - @echo "$(YELLOW)Setting up OpenBao for SSE-KMS testing...$(NC)" - @$(DOCKER_COMPOSE) up -d openbao - @sleep 10 - @echo "$(YELLOW)Configuring OpenBao...$(NC)" - @OPENBAO_ADDR=$(OPENBAO_ADDR) OPENBAO_TOKEN=$(OPENBAO_TOKEN) ./setup_openbao_sse.sh - @echo "$(GREEN)โœ… OpenBao setup complete!$(NC)" - -start-full-stack: setup-openbao - @echo "$(YELLOW)Starting full SeaweedFS + KMS stack...$(NC)" - @$(DOCKER_COMPOSE) up -d - @echo "$(YELLOW)Waiting for services to be ready...$(NC)" - @sleep 15 - @echo "$(GREEN)โœ… Full stack running!$(NC)" - @echo "OpenBao: $(OPENBAO_ADDR)" - @echo "S3 API: http://localhost:$(S3_PORT)" - -stop-full-stack: - @echo "$(YELLOW)Stopping full stack...$(NC)" - @$(DOCKER_COMPOSE) down - @echo "$(GREEN)โœ… Full stack stopped$(NC)" - -test-with-kms: start-full-stack - @echo "$(YELLOW)Running SSE integration tests with real KMS...$(NC)" - @sleep 5 # Extra time for KMS initialization - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) ./test/s3/sse -run "SSE.*Integration" || (echo "$(RED)Tests failed$(NC)" && make stop-full-stack && exit 1) - @echo "$(GREEN)โœ… All KMS integration tests passed!$(NC)" - @make stop-full-stack - -test-ssekms-integration: start-full-stack - @echo "$(YELLOW)Running SSE-KMS integration tests with OpenBao...$(NC)" - @sleep 5 # Extra time for KMS initialization - @cd $(SEAWEEDFS_ROOT) && go test -v -timeout=$(TEST_TIMEOUT) ./test/s3/sse -run "TestSSEKMS.*Integration" || (echo "$(RED)SSE-KMS tests failed$(NC)" && make stop-full-stack && exit 1) - @echo "$(GREEN)โœ… SSE-KMS integration tests passed!$(NC)" - @make stop-full-stack - -clean-kms: - @echo "$(YELLOW)Cleaning up KMS test environment...$(NC)" - @$(DOCKER_COMPOSE) down -v --remove-orphans || true - @docker system prune -f || true - @echo "$(GREEN)โœ… KMS environment cleaned up!$(NC)" - -status-kms: - @echo "$(YELLOW)KMS Environment Status:$(NC)" - @$(DOCKER_COMPOSE) ps - @echo "" - @echo "$(YELLOW)OpenBao Health:$(NC)" - @curl -s $(OPENBAO_ADDR)/v1/sys/health | jq '.' || echo "OpenBao not accessible" - @echo "" - @echo "$(YELLOW)S3 API Status:$(NC)" - @curl -s http://localhost:$(S3_PORT) || echo "S3 API not accessible" - -# Quick test with just basic KMS functionality -test-kms-quick: setup-openbao - @echo "$(YELLOW)Running quick KMS functionality test...$(NC)" - @cd ../../../test/kms && make dev-test - @echo "$(GREEN)โœ… Quick KMS test passed!$(NC)" - -# Development targets -dev-kms: setup-openbao - @echo "$(GREEN)Development environment ready$(NC)" - @echo "OpenBao: $(OPENBAO_ADDR)" - @echo "Token: $(OPENBAO_TOKEN)" - @echo "Use 'make test-ssekms-integration' to run tests" diff --git a/test/s3/sse/README.md b/test/s3/sse/README.md deleted file mode 100644 index 4f68984b4..000000000 --- a/test/s3/sse/README.md +++ /dev/null @@ -1,253 +0,0 @@ -# S3 Server-Side Encryption (SSE) Integration Tests - -This directory contains comprehensive integration tests for SeaweedFS S3 API Server-Side Encryption functionality. These tests validate the complete end-to-end encryption/decryption pipeline from S3 API requests through filer metadata storage. - -## Overview - -The SSE integration tests cover three main encryption methods: - -- **SSE-C (Customer-Provided Keys)**: Client provides encryption keys via request headers -- **SSE-KMS (Key Management Service)**: Server manages encryption keys through a KMS provider -- **SSE-S3 (Server-Managed Keys)**: Server automatically manages encryption keys - -### ๐Ÿ†• Real KMS Integration - -The tests now include **real KMS integration** with OpenBao, providing: -- โœ… Actual encryption/decryption operations (not mock keys) -- โœ… Multiple KMS keys for different security levels -- โœ… Per-bucket KMS configuration testing -- โœ… Performance benchmarking with real KMS operations - -See [README_KMS.md](README_KMS.md) for detailed KMS integration documentation. - -## Why Integration Tests Matter - -These integration tests were created to address a **critical gap in test coverage** that previously existed. While the SeaweedFS codebase had comprehensive unit tests for SSE components, it lacked integration tests that validated the complete request flow: - -``` -Client Request โ†’ S3 API โ†’ Filer Storage โ†’ Metadata Persistence โ†’ Retrieval โ†’ Decryption -``` - -### The Bug These Tests Would Have Caught - -A critical bug was discovered where: -- โœ… S3 API correctly encrypted data and sent metadata headers to the filer -- โŒ **Filer did not process SSE metadata headers**, losing all encryption metadata -- โŒ Objects could be encrypted but **never decrypted** (metadata was lost) - -**Unit tests passed** because they tested components in isolation, but the **integration was broken**. These integration tests specifically validate that: - -1. Encryption metadata is correctly sent to the filer -2. Filer properly processes and stores the metadata -3. Objects can be successfully retrieved and decrypted -4. Copy operations preserve encryption metadata -5. Multipart uploads maintain encryption consistency - -## Test Structure - -### Core Integration Tests - -#### Basic Functionality -- `TestSSECIntegrationBasic` - Basic SSE-C PUT/GET cycle -- `TestSSEKMSIntegrationBasic` - Basic SSE-KMS PUT/GET cycle - -#### Data Size Validation -- `TestSSECIntegrationVariousDataSizes` - SSE-C with various data sizes (0B to 1MB) -- `TestSSEKMSIntegrationVariousDataSizes` - SSE-KMS with various data sizes - -#### Object Copy Operations -- `TestSSECObjectCopyIntegration` - SSE-C object copying (key rotation, encryption changes) -- `TestSSEKMSObjectCopyIntegration` - SSE-KMS object copying - -#### Multipart Uploads -- `TestSSEMultipartUploadIntegration` - SSE multipart uploads for large objects - -#### Error Conditions -- `TestSSEErrorConditions` - Invalid keys, malformed requests, error handling - -### Performance Tests -- `BenchmarkSSECThroughput` - SSE-C performance benchmarking -- `BenchmarkSSEKMSThroughput` - SSE-KMS performance benchmarking - -## Running Tests - -### Prerequisites - -1. **Build SeaweedFS**: Ensure the `weed` binary is built and available in PATH - ```bash - cd /path/to/seaweedfs - make - ``` - -2. **Dependencies**: Tests use AWS SDK Go v2 and testify - these are handled by Go modules - -### Quick Test - -Run basic SSE integration tests: -```bash -make test-basic -``` - -### Comprehensive Testing - -Run all SSE integration tests: -```bash -make test -``` - -### Specific Test Categories - -```bash -make test-ssec # SSE-C tests only -make test-ssekms # SSE-KMS tests only -make test-copy # Copy operation tests -make test-multipart # Multipart upload tests -make test-errors # Error condition tests -``` - -### Performance Testing - -```bash -make benchmark # Performance benchmarks -make perf # Various data size performance tests -``` - -### KMS Integration Testing - -```bash -make setup-openbao # Set up OpenBao KMS -make test-with-kms # Run all SSE tests with real KMS -make test-ssekms-integration # Run SSE-KMS with OpenBao only -make clean-kms # Clean up KMS environment -``` - -### Development Testing - -```bash -make manual-start # Start SeaweedFS for manual testing -# ... run manual tests ... -make manual-stop # Stop and cleanup -``` - -## Test Configuration - -### Default Configuration - -The tests use these default settings: -- **S3 Endpoint**: `http://127.0.0.1:8333` -- **Access Key**: `some_access_key1` -- **Secret Key**: `some_secret_key1` -- **Region**: `us-east-1` -- **Bucket Prefix**: `test-sse-` - -### Custom Configuration - -Override defaults via environment variables: -```bash -S3_PORT=8444 FILER_PORT=8889 make test -``` - -### Test Environment - -Each test run: -1. Starts a complete SeaweedFS cluster (master, volume, filer, s3) -2. Configures KMS support for SSE-KMS tests -3. Creates temporary buckets with unique names -4. Runs tests with real HTTP requests -5. Cleans up all test artifacts - -## Test Data Coverage - -### Data Sizes Tested -- **0 bytes**: Empty files (edge case) -- **1 byte**: Minimal data -- **16 bytes**: Single AES block -- **31 bytes**: Just under two blocks -- **32 bytes**: Exactly two blocks -- **100 bytes**: Small file -- **1 KB**: Small text file -- **8 KB**: Medium file -- **64 KB**: Large file -- **1 MB**: Very large file - -### Encryption Key Scenarios -- **SSE-C**: Random 256-bit keys, key rotation, wrong keys -- **SSE-KMS**: Various key IDs, encryption contexts, bucket keys -- **Copy Operations**: Same key, different keys, encryption transitions - -## Critical Test Scenarios - -### Metadata Persistence Validation - -The integration tests specifically validate scenarios that would catch metadata storage bugs: - -```go -// 1. Upload with SSE-C -client.PutObject(..., SSECustomerKey: key) // โ† Metadata sent to filer - -// 2. Retrieve with SSE-C -client.GetObject(..., SSECustomerKey: key) // โ† Metadata retrieved from filer - -// 3. Verify decryption works -assert.Equal(originalData, decryptedData) // โ† Would fail if metadata lost -``` - -### Content-Length Validation - -Tests verify that Content-Length headers are correct, which would catch bugs related to IV handling: - -```go -assert.Equal(int64(originalSize), resp.ContentLength) // โ† Would catch IV-in-stream bugs -``` - -## Debugging - -### View Logs -```bash -make debug-logs # Show recent log entries -make debug-status # Show process and port status -``` - -### Manual Testing -```bash -make manual-start # Start SeaweedFS -# Test with S3 clients, curl, etc. -make manual-stop # Cleanup -``` - -## Integration Test Benefits - -These integration tests provide: - -1. **End-to-End Validation**: Complete request pipeline testing -2. **Metadata Persistence**: Validates filer storage/retrieval of encryption metadata -3. **Real Network Communication**: Uses actual HTTP requests and responses -4. **Production-Like Environment**: Full SeaweedFS cluster with all components -5. **Regression Protection**: Prevents critical integration bugs -6. **Performance Baselines**: Benchmarking for performance monitoring - -## Continuous Integration - -For CI/CD pipelines, use: -```bash -make ci-test # Quick tests suitable for CI -make stress # Stress testing for stability validation -``` - -## Key Differences from Unit Tests - -| Aspect | Unit Tests | Integration Tests | -|--------|------------|------------------| -| **Scope** | Individual functions | Complete request pipeline | -| **Dependencies** | Mocked/simulated | Real SeaweedFS cluster | -| **Network** | None | Real HTTP requests | -| **Storage** | In-memory | Real filer database | -| **Metadata** | Manual simulation | Actual storage/retrieval | -| **Speed** | Fast (milliseconds) | Slower (seconds) | -| **Coverage** | Component logic | System integration | - -## Conclusion - -These integration tests ensure that SeaweedFS SSE functionality works correctly in production-like environments. They complement the existing unit tests by validating that all components work together properly, providing confidence that encryption/decryption operations will succeed for real users. - -**Most importantly**, these tests would have immediately caught the critical filer metadata storage bug that was previously undetected, demonstrating the crucial importance of integration testing for distributed systems. diff --git a/test/s3/sse/README_KMS.md b/test/s3/sse/README_KMS.md deleted file mode 100644 index 9e396a7de..000000000 --- a/test/s3/sse/README_KMS.md +++ /dev/null @@ -1,245 +0,0 @@ -# SeaweedFS S3 SSE-KMS Integration with OpenBao - -This directory contains comprehensive integration tests for SeaweedFS S3 Server-Side Encryption with Key Management Service (SSE-KMS) using OpenBao as the KMS provider. - -## ๐ŸŽฏ Overview - -The integration tests verify that SeaweedFS can: -- โœ… **Encrypt data** using real KMS operations (not mock keys) -- โœ… **Decrypt data** correctly with proper key management -- โœ… **Handle multiple KMS keys** for different security levels -- โœ… **Support various data sizes** (0 bytes to 1MB+) -- โœ… **Maintain data integrity** through encryption/decryption cycles -- โœ… **Work with per-bucket KMS configuration** - -## ๐Ÿ—๏ธ Architecture - -``` -โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” -โ”‚ S3 Client โ”‚ โ”‚ SeaweedFS โ”‚ โ”‚ OpenBao โ”‚ -โ”‚ โ”‚ โ”‚ S3 API โ”‚ โ”‚ KMS โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ PUT /object โ”‚โ”€โ”€โ”€โ–ถโ”‚ SSE-KMS Handler โ”‚โ”€โ”€โ”€โ–ถโ”‚ GenerateDataKey โ”‚ -โ”‚ SSEKMSKeyId: โ”‚ โ”‚ โ”‚ โ”‚ Encrypt โ”‚ -โ”‚ "test-key-123" โ”‚ โ”‚ KMS Provider: โ”‚ โ”‚ Decrypt โ”‚ -โ”‚ โ”‚ โ”‚ OpenBao โ”‚ โ”‚ Transit Engine โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## ๐Ÿš€ Quick Start - -### 1. Set up OpenBao KMS -```bash -# Start OpenBao and create encryption keys -make setup-openbao -``` - -### 2. Run SSE-KMS Integration Tests -```bash -# Run all SSE-KMS tests with real KMS -make test-ssekms-integration - -# Or run the full integration suite -make test-with-kms -``` - -### 3. Check KMS Status -```bash -# Verify OpenBao and SeaweedFS are running -make status-kms -``` - -## ๐Ÿ“‹ Available Test Targets - -| Target | Description | -|--------|-------------| -| `setup-openbao` | Set up OpenBao KMS with test encryption keys | -| `test-with-kms` | Run all SSE tests with real KMS integration | -| `test-ssekms-integration` | Run only SSE-KMS tests with OpenBao | -| `start-full-stack` | Start SeaweedFS + OpenBao with Docker Compose | -| `stop-full-stack` | Stop all Docker services | -| `clean-kms` | Clean up KMS test environment | -| `status-kms` | Check status of KMS and S3 services | -| `dev-kms` | Set up development environment | - -## ๐Ÿ”‘ KMS Keys Created - -The setup automatically creates these encryption keys in OpenBao: - -| Key Name | Purpose | -|----------|---------| -| `test-key-123` | Basic SSE-KMS integration tests | -| `source-test-key-123` | Copy operation source key | -| `dest-test-key-456` | Copy operation destination key | -| `test-multipart-key` | Multipart upload tests | -| `test-kms-range-key` | Range request tests | -| `seaweedfs-test-key` | General SeaweedFS SSE tests | -| `bucket-default-key` | Default bucket encryption | -| `high-security-key` | High security scenarios | -| `performance-key` | Performance testing | - -## ๐Ÿงช Test Coverage - -### Basic SSE-KMS Operations -- โœ… PUT object with SSE-KMS encryption -- โœ… GET object with automatic decryption -- โœ… HEAD object metadata verification -- โœ… Multiple KMS key support -- โœ… Various data sizes (0B - 1MB) - -### Advanced Scenarios -- โœ… Large file encryption (chunked) -- โœ… Range requests with encrypted data -- โœ… Per-bucket KMS configuration -- โœ… Error handling for invalid keys -- โš ๏ธ Object copy operations (known issue) - -### Performance Testing -- โœ… KMS operation benchmarks -- โœ… Encryption/decryption latency -- โœ… Throughput with various data sizes - -## โš™๏ธ Configuration - -### S3 KMS Configuration (`s3_kms.json`) -```json -{ - "kms": { - "default_provider": "openbao-test", - "providers": { - "openbao-test": { - "type": "openbao", - "address": "http://openbao:8200", - "token": "root-token-for-testing", - "transit_path": "transit" - } - }, - "buckets": { - "test-sse-kms-basic": { - "provider": "openbao-test" - } - } - } -} -``` - -### Docker Compose Services -- **OpenBao**: KMS provider on port 8200 -- **SeaweedFS Master**: Metadata management on port 9333 -- **SeaweedFS Volume**: Data storage on port 8080 -- **SeaweedFS Filer**: S3 API with KMS on port 8333 - -## ๐ŸŽ›๏ธ Environment Variables - -| Variable | Default | Description | -|----------|---------|-------------| -| `OPENBAO_ADDR` | `http://127.0.0.1:8200` | OpenBao server address | -| `OPENBAO_TOKEN` | `root-token-for-testing` | OpenBao root token | -| `S3_PORT` | `8333` | S3 API port | -| `TEST_TIMEOUT` | `15m` | Test timeout duration | - -## ๐Ÿ“Š Example Test Run - -```bash -$ make test-ssekms-integration - -Setting up OpenBao for SSE-KMS testing... -โœ… OpenBao setup complete! -Starting full SeaweedFS + KMS stack... -โœ… Full stack running! -Running SSE-KMS integration tests with OpenBao... - -=== RUN TestSSEKMSIntegrationBasic -=== RUN TestSSEKMSOpenBaoIntegration -=== RUN TestSSEKMSOpenBaoAvailability ---- PASS: TestSSEKMSIntegrationBasic (0.26s) ---- PASS: TestSSEKMSOpenBaoIntegration (0.45s) ---- PASS: TestSSEKMSOpenBaoAvailability (0.12s) - -โœ… SSE-KMS integration tests passed! -``` - -## ๐Ÿ” Troubleshooting - -### OpenBao Not Starting -```bash -# Check OpenBao logs -docker-compose logs openbao - -# Verify port availability -lsof -ti :8200 -``` - -### SeaweedFS KMS Not Working -```bash -# Check filer logs for KMS errors -docker-compose logs seaweedfs-filer - -# Verify KMS configuration -curl http://localhost:8200/v1/sys/health -``` - -### Tests Failing -```bash -# Run specific test for debugging -cd ../../../ && go test -v -timeout=30s -run TestSSEKMSOpenBaoAvailability ./test/s3/sse - -# Check service status -make status-kms -``` - -## ๐Ÿšง Known Issues - -1. **Object Copy Operations**: Currently failing due to data corruption in copy logic (not KMS-related) -2. **Azure SDK Compatibility**: Azure KMS provider disabled due to SDK issues -3. **Network Timing**: Some tests may need longer startup delays in slow environments - -## ๐Ÿ”„ Development Workflow - -### 1. Development Setup -```bash -# Quick setup for development -make dev-kms - -# Run specific test during development -go test -v -run TestSSEKMSOpenBaoAvailability ./test/s3/sse -``` - -### 2. Integration Testing -```bash -# Full integration test cycle -make clean-kms # Clean environment -make test-with-kms # Run comprehensive tests -make clean-kms # Clean up -``` - -### 3. Performance Testing -```bash -# Run KMS performance benchmarks -cd ../kms && make test-benchmark -``` - -## ๐Ÿ“ˆ Performance Characteristics - -From benchmark results: -- **GenerateDataKey**: ~55,886 ns/op (~18,000 ops/sec) -- **Decrypt**: ~48,009 ns/op (~21,000 ops/sec) -- **End-to-end encryption**: Sub-second for files up to 1MB - -## ๐Ÿ”— Related Documentation - -- [SeaweedFS S3 API Documentation](https://github.com/seaweedfs/seaweedfs/wiki/Amazon-S3-API) -- [OpenBao Transit Secrets Engine](https://github.com/openbao/openbao/blob/main/website/content/docs/secrets/transit.md) -- [AWS S3 Server-Side Encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html) - -## ๐ŸŽ‰ Success Criteria - -The integration is considered successful when: -- โœ… OpenBao KMS provider initializes correctly -- โœ… Encryption keys are created and accessible -- โœ… Data can be encrypted and decrypted reliably -- โœ… Multiple key types work independently -- โœ… Performance meets production requirements -- โœ… Error cases are handled gracefully - -This integration demonstrates that SeaweedFS SSE-KMS is **production-ready** with real KMS providers! ๐Ÿš€ diff --git a/test/s3/sse/docker-compose.yml b/test/s3/sse/docker-compose.yml deleted file mode 100644 index 448788af4..000000000 --- a/test/s3/sse/docker-compose.yml +++ /dev/null @@ -1,100 +0,0 @@ -services: - # OpenBao server for KMS integration testing - openbao: - image: ghcr.io/openbao/openbao:latest - ports: - - "8200:8200" - environment: - - BAO_DEV_ROOT_TOKEN_ID=root-token-for-testing - - BAO_DEV_LISTEN_ADDRESS=0.0.0.0:8200 - - BAO_LOCAL_CONFIG={"backend":{"file":{"path":"/bao/data"}},"default_lease_ttl":"168h","max_lease_ttl":"720h","ui":true,"disable_mlock":true} - command: - - bao - - server - - -dev - - -dev-root-token-id=root-token-for-testing - - -dev-listen-address=0.0.0.0:8200 - volumes: - - openbao-data:/bao/data - healthcheck: - test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8200/v1/sys/health"] - interval: 5s - timeout: 3s - retries: 5 - start_period: 10s - networks: - - seaweedfs-sse-test - - # SeaweedFS Master - seaweedfs-master: - image: chrislusf/seaweedfs:latest - ports: - - "9333:9333" - - "19333:19333" - command: - - master - - -ip=seaweedfs-master - - -port=9333 - - -port.grpc=19333 - - -volumeSizeLimitMB=50 - - -mdir=/data - volumes: - - seaweedfs-master-data:/data - networks: - - seaweedfs-sse-test - - # SeaweedFS Volume Server - seaweedfs-volume: - image: chrislusf/seaweedfs:latest - ports: - - "8080:8080" - command: - - volume - - -mserver=seaweedfs-master:9333 - - -port=8080 - - -ip=seaweedfs-volume - - -publicUrl=seaweedfs-volume:8080 - - -dir=/data - - -max=100 - depends_on: - - seaweedfs-master - volumes: - - seaweedfs-volume-data:/data - networks: - - seaweedfs-sse-test - - # SeaweedFS Filer with S3 API and KMS configuration - seaweedfs-filer: - image: chrislusf/seaweedfs:latest - ports: - - "8888:8888" # Filer HTTP - - "18888:18888" # Filer gRPC - - "8333:8333" # S3 API - command: - - filer - - -master=seaweedfs-master:9333 - - -port=8888 - - -port.grpc=18888 - - -ip=seaweedfs-filer - - -s3 - - -s3.port=8333 - - -s3.config=/etc/seaweedfs/s3.json - depends_on: - - seaweedfs-master - - seaweedfs-volume - - openbao - volumes: - - ./s3_kms.json:/etc/seaweedfs/s3.json - - seaweedfs-filer-data:/data - networks: - - seaweedfs-sse-test - -volumes: - openbao-data: - seaweedfs-master-data: - seaweedfs-volume-data: - seaweedfs-filer-data: - -networks: - seaweedfs-sse-test: - name: seaweedfs-sse-test diff --git a/test/s3/sse/s3-config-template.json b/test/s3/sse/s3-config-template.json deleted file mode 100644 index 86fde486d..000000000 --- a/test/s3/sse/s3-config-template.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "identities": [ - { - "name": "admin", - "credentials": [ - { - "accessKey": "ACCESS_KEY_PLACEHOLDER", - "secretKey": "SECRET_KEY_PLACEHOLDER" - } - ], - "actions": ["Admin", "Read", "Write"] - } - ], - "kms": { - "default_provider": "local-dev", - "providers": { - "local-dev": { - "type": "local", - "enableOnDemandCreate": true - } - } - } -} diff --git a/test/s3/sse/s3_kms.json b/test/s3/sse/s3_kms.json deleted file mode 100644 index 8bf40eb03..000000000 --- a/test/s3/sse/s3_kms.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "identities": [ - { - "name": "admin", - "credentials": [ - { - "accessKey": "some_access_key1", - "secretKey": "some_secret_key1" - } - ], - "actions": ["Admin", "Read", "Write"] - } - ], - "kms": { - "default_provider": "openbao-test", - "providers": { - "openbao-test": { - "type": "openbao", - "address": "http://openbao:8200", - "token": "root-token-for-testing", - "transit_path": "transit", - "cache_enabled": true, - "cache_ttl": "1h" - } - }, - "buckets": { - "test-sse-kms-basic": { - "provider": "openbao-test" - }, - "test-sse-kms-multipart": { - "provider": "openbao-test" - }, - "test-sse-kms-copy": { - "provider": "openbao-test" - }, - "test-sse-kms-range": { - "provider": "openbao-test" - } - } - } -} diff --git a/test/s3/sse/s3_sse_integration_test.go b/test/s3/sse/s3_sse_integration_test.go deleted file mode 100644 index 0b3ff8f04..000000000 --- a/test/s3/sse/s3_sse_integration_test.go +++ /dev/null @@ -1,2267 +0,0 @@ -package sse_test - -import ( - "bytes" - "context" - "crypto/md5" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// assertDataEqual compares two byte slices using MD5 hashes and provides a concise error message -func assertDataEqual(t *testing.T, expected, actual []byte, msgAndArgs ...interface{}) { - if len(expected) == len(actual) && bytes.Equal(expected, actual) { - return // Data matches, no need to fail - } - - expectedMD5 := md5.Sum(expected) - actualMD5 := md5.Sum(actual) - - // Create preview of first 1K bytes for debugging - previewSize := 1024 - if len(expected) < previewSize { - previewSize = len(expected) - } - expectedPreview := expected[:previewSize] - - actualPreviewSize := previewSize - if len(actual) < actualPreviewSize { - actualPreviewSize = len(actual) - } - actualPreview := actual[:actualPreviewSize] - - // Format the assertion failure message - msg := fmt.Sprintf("Data mismatch:\nExpected length: %d, MD5: %x\nActual length: %d, MD5: %x\nExpected preview (first %d bytes): %x\nActual preview (first %d bytes): %x", - len(expected), expectedMD5, len(actual), actualMD5, - len(expectedPreview), expectedPreview, len(actualPreview), actualPreview) - - if len(msgAndArgs) > 0 { - if format, ok := msgAndArgs[0].(string); ok { - msg = fmt.Sprintf(format, msgAndArgs[1:]...) + "\n" + msg - } - } - - t.Error(msg) -} - -// min returns the minimum of two integers -func min(a, b int) int { - if a < b { - return a - } - return b -} - -// S3SSETestConfig holds configuration for S3 SSE integration tests -type S3SSETestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool -} - -// Default test configuration -var defaultConfig = &S3SSETestConfig{ - Endpoint: "http://127.0.0.1:8333", - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-sse-", - UseSSL: false, - SkipVerifySSL: true, -} - -// Test data sizes for comprehensive coverage -var testDataSizes = []int{ - 0, // Empty file - 1, // Single byte - 16, // One AES block - 31, // Just under two blocks - 32, // Exactly two blocks - 100, // Small file - 1024, // 1KB - 8192, // 8KB - 64 * 1024, // 64KB - 1024 * 1024, // 1MB -} - -// SSECKey represents an SSE-C encryption key for testing -type SSECKey struct { - Key []byte - KeyB64 string - KeyMD5 string -} - -// generateSSECKey generates a random SSE-C key for testing -func generateSSECKey() *SSECKey { - key := make([]byte, 32) // 256-bit key - rand.Read(key) - - keyB64 := base64.StdEncoding.EncodeToString(key) - keyMD5Hash := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(keyMD5Hash[:]) - - return &SSECKey{ - Key: key, - KeyB64: keyB64, - KeyMD5: keyMD5, - } -} - -// createS3Client creates an S3 client for testing -func createS3Client(ctx context.Context, cfg *S3SSETestConfig) (*s3.Client, error) { - customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: cfg.Endpoint, - HostnameImmutable: true, - }, nil - }) - - awsCfg, err := config.LoadDefaultConfig(ctx, - config.WithRegion(cfg.Region), - config.WithEndpointResolverWithOptions(customResolver), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - cfg.AccessKey, - cfg.SecretKey, - "", - )), - ) - if err != nil { - return nil, err - } - - return s3.NewFromConfig(awsCfg, func(o *s3.Options) { - o.UsePathStyle = true - }), nil -} - -// generateTestData generates random test data of specified size -func generateTestData(size int) []byte { - data := make([]byte, size) - rand.Read(data) - return data -} - -// createTestBucket creates a test bucket with a unique name -func createTestBucket(ctx context.Context, client *s3.Client, prefix string) (string, error) { - bucketName := fmt.Sprintf("%s%d", prefix, time.Now().UnixNano()) - - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - - return bucketName, err -} - -// cleanupTestBucket removes a test bucket and all its objects -func cleanupTestBucket(ctx context.Context, client *s3.Client, bucketName string) error { - // List and delete all objects first - listResp, err := client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - }) - if err != nil { - return err - } - - if len(listResp.Contents) > 0 { - var objectIds []types.ObjectIdentifier - for _, obj := range listResp.Contents { - objectIds = append(objectIds, types.ObjectIdentifier{ - Key: obj.Key, - }) - } - - _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectIds, - }, - }) - if err != nil { - return err - } - } - - // Delete the bucket - _, err = client.DeleteBucket(ctx, &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - - return err -} - -// TestSSECIntegrationBasic tests basic SSE-C functionality end-to-end -func TestSSECIntegrationBasic(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssec-basic-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Generate test key - sseKey := generateSSECKey() - testData := []byte("Hello, SSE-C integration test!") - objectKey := "test-object-ssec" - - t.Run("PUT with SSE-C", func(t *testing.T) { - // Upload object with SSE-C - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload SSE-C object") - }) - - t.Run("GET with correct SSE-C key", func(t *testing.T) { - // Retrieve object with correct key - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to retrieve SSE-C object") - defer resp.Body.Close() - - // Verify decrypted content matches original - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved data") - assertDataEqual(t, testData, retrievedData, "Decrypted data does not match original") - - // Verify SSE headers are present - assert.Equal(t, "AES256", aws.ToString(resp.SSECustomerAlgorithm)) - assert.Equal(t, sseKey.KeyMD5, aws.ToString(resp.SSECustomerKeyMD5)) - }) - - t.Run("GET without SSE-C key should fail", func(t *testing.T) { - // Try to retrieve object without encryption key - should fail - _, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - assert.Error(t, err, "Should fail to retrieve SSE-C object without key") - }) - - t.Run("GET with wrong SSE-C key should fail", func(t *testing.T) { - wrongKey := generateSSECKey() - - // Try to retrieve object with wrong key - should fail - _, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(wrongKey.KeyB64), - SSECustomerKeyMD5: aws.String(wrongKey.KeyMD5), - }) - assert.Error(t, err, "Should fail to retrieve SSE-C object with wrong key") - }) -} - -// TestSSECIntegrationVariousDataSizes tests SSE-C with various data sizes -func TestSSECIntegrationVariousDataSizes(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssec-sizes-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - sseKey := generateSSECKey() - - for _, size := range testDataSizes { - t.Run(fmt.Sprintf("Size_%d_bytes", size), func(t *testing.T) { - testData := generateTestData(size) - objectKey := fmt.Sprintf("test-object-size-%d", size) - - // Upload with SSE-C - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload object of size %d", size) - - // Retrieve with SSE-C - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to retrieve object of size %d", size) - defer resp.Body.Close() - - // Verify content matches - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved data of size %d", size) - assertDataEqual(t, testData, retrievedData, "Data mismatch for size %d", size) - - // Verify content length is correct (this would have caught the IV-in-stream bug!) - assert.Equal(t, int64(size), aws.ToInt64(resp.ContentLength), - "Content length mismatch for size %d", size) - }) - } -} - -// TestSSEKMSIntegrationBasic tests basic SSE-KMS functionality end-to-end -func TestSSEKMSIntegrationBasic(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssekms-basic-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - testData := []byte("Hello, SSE-KMS integration test!") - objectKey := "test-object-ssekms" - kmsKeyID := "test-key-123" // Test key ID - - t.Run("PUT with SSE-KMS", func(t *testing.T) { - // Upload object with SSE-KMS - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to upload SSE-KMS object") - }) - - t.Run("GET SSE-KMS object", func(t *testing.T) { - // Retrieve object - no additional headers needed for GET - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve SSE-KMS object") - defer resp.Body.Close() - - // Verify decrypted content matches original - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved data") - assertDataEqual(t, testData, retrievedData, "Decrypted data does not match original") - - // Verify SSE-KMS headers are present - assert.Equal(t, types.ServerSideEncryptionAwsKms, resp.ServerSideEncryption) - assert.Equal(t, kmsKeyID, aws.ToString(resp.SSEKMSKeyId)) - }) - - t.Run("HEAD SSE-KMS object", func(t *testing.T) { - // Test HEAD operation to verify metadata - resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to HEAD SSE-KMS object") - - // Verify SSE-KMS metadata - assert.Equal(t, types.ServerSideEncryptionAwsKms, resp.ServerSideEncryption) - assert.Equal(t, kmsKeyID, aws.ToString(resp.SSEKMSKeyId)) - assert.Equal(t, int64(len(testData)), aws.ToInt64(resp.ContentLength)) - }) -} - -// TestSSEKMSIntegrationVariousDataSizes tests SSE-KMS with various data sizes -func TestSSEKMSIntegrationVariousDataSizes(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssekms-sizes-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - kmsKeyID := "test-key-size-tests" - - for _, size := range testDataSizes { - t.Run(fmt.Sprintf("Size_%d_bytes", size), func(t *testing.T) { - testData := generateTestData(size) - objectKey := fmt.Sprintf("test-object-kms-size-%d", size) - - // Upload with SSE-KMS - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to upload KMS object of size %d", size) - - // Retrieve with SSE-KMS - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve KMS object of size %d", size) - defer resp.Body.Close() - - // Verify content matches - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved KMS data of size %d", size) - assertDataEqual(t, testData, retrievedData, "Data mismatch for KMS size %d", size) - - // Verify content length is correct - assert.Equal(t, int64(size), aws.ToInt64(resp.ContentLength), - "Content length mismatch for KMS size %d", size) - }) - } -} - -// TestSSECObjectCopyIntegration tests SSE-C object copying end-to-end -func TestSSECObjectCopyIntegration(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssec-copy-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Generate test keys - sourceKey := generateSSECKey() - destKey := generateSSECKey() - testData := []byte("Hello, SSE-C copy integration test!") - - // Upload source object - sourceObjectKey := "source-object" - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceObjectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sourceKey.KeyB64), - SSECustomerKeyMD5: aws.String(sourceKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload source SSE-C object") - - t.Run("Copy SSE-C to SSE-C with different key", func(t *testing.T) { - destObjectKey := "dest-object-ssec" - copySource := fmt.Sprintf("%s/%s", bucketName, sourceObjectKey) - - // Copy object with different SSE-C key - _, err := client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - CopySource: aws.String(copySource), - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sourceKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sourceKey.KeyMD5), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(destKey.KeyB64), - SSECustomerKeyMD5: aws.String(destKey.KeyMD5), - }) - require.NoError(t, err, "Failed to copy SSE-C object") - - // Retrieve copied object with destination key - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(destKey.KeyB64), - SSECustomerKeyMD5: aws.String(destKey.KeyMD5), - }) - require.NoError(t, err, "Failed to retrieve copied SSE-C object") - defer resp.Body.Close() - - // Verify content matches original - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read copied data") - assertDataEqual(t, testData, retrievedData, "Copied data does not match original") - }) - - t.Run("Copy SSE-C to plain", func(t *testing.T) { - destObjectKey := "dest-object-plain" - copySource := fmt.Sprintf("%s/%s", bucketName, sourceObjectKey) - - // Copy SSE-C object to plain object - _, err := client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - CopySource: aws.String(copySource), - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sourceKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sourceKey.KeyMD5), - // No destination encryption headers = plain object - }) - require.NoError(t, err, "Failed to copy SSE-C to plain object") - - // Retrieve plain object (no encryption headers needed) - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - }) - require.NoError(t, err, "Failed to retrieve plain copied object") - defer resp.Body.Close() - - // Verify content matches original - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read plain copied data") - assertDataEqual(t, testData, retrievedData, "Plain copied data does not match original") - }) -} - -// TestSSEKMSObjectCopyIntegration tests SSE-KMS object copying end-to-end -func TestSSEKMSObjectCopyIntegration(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssekms-copy-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - testData := []byte("Hello, SSE-KMS copy integration test!") - sourceKeyID := "source-test-key-123" - destKeyID := "dest-test-key-456" - - // Upload source object with SSE-KMS - sourceObjectKey := "source-object-kms" - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceObjectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(sourceKeyID), - }) - require.NoError(t, err, "Failed to upload source SSE-KMS object") - - t.Run("Copy SSE-KMS with different key", func(t *testing.T) { - destObjectKey := "dest-object-kms" - copySource := fmt.Sprintf("%s/%s", bucketName, sourceObjectKey) - - // Copy object with different SSE-KMS key - _, err := client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - CopySource: aws.String(copySource), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(destKeyID), - }) - require.NoError(t, err, "Failed to copy SSE-KMS object") - - // Retrieve copied object - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destObjectKey), - }) - require.NoError(t, err, "Failed to retrieve copied SSE-KMS object") - defer resp.Body.Close() - - // Verify content matches original - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read copied KMS data") - assertDataEqual(t, testData, retrievedData, "Copied KMS data does not match original") - - // Verify new key ID is used - assert.Equal(t, destKeyID, aws.ToString(resp.SSEKMSKeyId)) - }) -} - -// TestSSEMultipartUploadIntegration tests SSE multipart uploads end-to-end -func TestSSEMultipartUploadIntegration(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-multipart-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("SSE-C Multipart Upload", func(t *testing.T) { - sseKey := generateSSECKey() - objectKey := "multipart-ssec-object" - - // Create multipart upload - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to create SSE-C multipart upload") - - uploadID := aws.ToString(createResp.UploadId) - - // Upload parts - partSize := 5 * 1024 * 1024 // 5MB - part1Data := generateTestData(partSize) - part2Data := generateTestData(partSize) - - // Upload part 1 - part1Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(1), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part1Data), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload part 1") - - // Upload part 2 - part2Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(2), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part2Data), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload part 2") - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: part1Resp.ETag, - PartNumber: aws.Int32(1), - }, - { - ETag: part2Resp.ETag, - PartNumber: aws.Int32(2), - }, - }, - }, - }) - require.NoError(t, err, "Failed to complete SSE-C multipart upload") - - // Retrieve and verify the complete object - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to retrieve multipart SSE-C object") - defer resp.Body.Close() - - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read multipart data") - - // Verify data matches concatenated parts - expectedData := append(part1Data, part2Data...) - assertDataEqual(t, expectedData, retrievedData, "Multipart data does not match original") - assert.Equal(t, int64(len(expectedData)), aws.ToInt64(resp.ContentLength), - "Multipart content length mismatch") - }) - - t.Run("SSE-KMS Multipart Upload", func(t *testing.T) { - kmsKeyID := "test-multipart-key" - objectKey := "multipart-kms-object" - - // Create multipart upload - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to create SSE-KMS multipart upload") - - uploadID := aws.ToString(createResp.UploadId) - - // Upload parts - partSize := 5 * 1024 * 1024 // 5MB - part1Data := generateTestData(partSize) - part2Data := generateTestData(partSize / 2) // Different size - - // Upload part 1 - part1Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(1), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part1Data), - }) - require.NoError(t, err, "Failed to upload KMS part 1") - - // Upload part 2 - part2Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(2), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part2Data), - }) - require.NoError(t, err, "Failed to upload KMS part 2") - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - { - ETag: part1Resp.ETag, - PartNumber: aws.Int32(1), - }, - { - ETag: part2Resp.ETag, - PartNumber: aws.Int32(2), - }, - }, - }, - }) - require.NoError(t, err, "Failed to complete SSE-KMS multipart upload") - - // Retrieve and verify the complete object - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve multipart SSE-KMS object") - defer resp.Body.Close() - - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read multipart KMS data") - - // Verify data matches concatenated parts - expectedData := append(part1Data, part2Data...) - - // Debug: Print some information about the sizes and first few bytes - t.Logf("Expected data size: %d, Retrieved data size: %d", len(expectedData), len(retrievedData)) - if len(expectedData) > 0 && len(retrievedData) > 0 { - t.Logf("Expected first 32 bytes: %x", expectedData[:min(32, len(expectedData))]) - t.Logf("Retrieved first 32 bytes: %x", retrievedData[:min(32, len(retrievedData))]) - } - - assertDataEqual(t, expectedData, retrievedData, "Multipart KMS data does not match original") - - // Verify KMS metadata - assert.Equal(t, types.ServerSideEncryptionAwsKms, resp.ServerSideEncryption) - assert.Equal(t, kmsKeyID, aws.ToString(resp.SSEKMSKeyId)) - }) -} - -// TestDebugSSEMultipart helps debug the multipart SSE-KMS data mismatch -func TestDebugSSEMultipart(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"debug-multipart-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - objectKey := "debug-multipart-object" - kmsKeyID := "test-multipart-key" - - // Create multipart upload - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to create SSE-KMS multipart upload") - - uploadID := aws.ToString(createResp.UploadId) - - // Upload two parts - exactly like the failing test - partSize := 5 * 1024 * 1024 // 5MB - part1Data := generateTestData(partSize) // 5MB - part2Data := generateTestData(partSize / 2) // 2.5MB - - // Upload part 1 - part1Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(1), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part1Data), - }) - require.NoError(t, err, "Failed to upload part 1") - - // Upload part 2 - part2Resp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(2), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(part2Data), - }) - require.NoError(t, err, "Failed to upload part 2") - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: []types.CompletedPart{ - {ETag: part1Resp.ETag, PartNumber: aws.Int32(1)}, - {ETag: part2Resp.ETag, PartNumber: aws.Int32(2)}, - }, - }, - }) - require.NoError(t, err, "Failed to complete multipart upload") - - // Retrieve the object - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve object") - defer resp.Body.Close() - - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved data") - - // Expected data - expectedData := append(part1Data, part2Data...) - - t.Logf("=== DATA COMPARISON DEBUG ===") - t.Logf("Expected size: %d, Retrieved size: %d", len(expectedData), len(retrievedData)) - - // Find exact point of divergence - divergePoint := -1 - minLen := len(expectedData) - if len(retrievedData) < minLen { - minLen = len(retrievedData) - } - - for i := 0; i < minLen; i++ { - if expectedData[i] != retrievedData[i] { - divergePoint = i - break - } - } - - if divergePoint >= 0 { - t.Logf("Data diverges at byte %d (0x%x)", divergePoint, divergePoint) - t.Logf("Expected: 0x%02x, Retrieved: 0x%02x", expectedData[divergePoint], retrievedData[divergePoint]) - - // Show context around divergence point - start := divergePoint - 10 - if start < 0 { - start = 0 - } - end := divergePoint + 10 - if end > minLen { - end = minLen - } - - t.Logf("Context [%d:%d]:", start, end) - t.Logf("Expected: %x", expectedData[start:end]) - t.Logf("Retrieved: %x", retrievedData[start:end]) - - // Identify chunk boundaries - if divergePoint >= 4194304 { - t.Logf("Divergence is in chunk 2 or 3 (after 4MB boundary)") - } - if divergePoint >= 5242880 { - t.Logf("Divergence is in chunk 3 (part 2, after 5MB boundary)") - } - } else if len(expectedData) != len(retrievedData) { - t.Logf("Data lengths differ but common part matches") - } else { - t.Logf("Data matches completely!") - } - - // Test completed successfully - t.Logf("SSE comparison test completed - data matches completely!") -} - -// TestSSEErrorConditions tests various error conditions in SSE -func TestSSEErrorConditions(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-errors-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("SSE-C Invalid Key Length", func(t *testing.T) { - invalidKey := base64.StdEncoding.EncodeToString([]byte("too-short")) - - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String("invalid-key-test"), - Body: strings.NewReader("test"), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(invalidKey), - SSECustomerKeyMD5: aws.String("invalid-md5"), - }) - assert.Error(t, err, "Should fail with invalid SSE-C key") - }) - - t.Run("SSE-KMS Invalid Key ID", func(t *testing.T) { - // Empty key ID should be rejected - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String("invalid-kms-key-test"), - Body: strings.NewReader("test"), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(""), // Invalid empty key - }) - assert.Error(t, err, "Should fail with empty KMS key ID") - }) -} - -// BenchmarkSSECThroughput benchmarks SSE-C throughput -func BenchmarkSSECThroughput(b *testing.B) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(b, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssec-bench-") - require.NoError(b, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - sseKey := generateSSECKey() - testData := generateTestData(1024 * 1024) // 1MB - - b.ResetTimer() - b.SetBytes(int64(len(testData))) - - for i := 0; i < b.N; i++ { - objectKey := fmt.Sprintf("bench-object-%d", i) - - // Upload - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(b, err, "Failed to upload in benchmark") - - // Download - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(b, err, "Failed to download in benchmark") - - _, err = io.ReadAll(resp.Body) - require.NoError(b, err, "Failed to read data in benchmark") - resp.Body.Close() - } -} - -// TestSSECRangeRequests tests SSE-C with HTTP Range requests -func TestSSECRangeRequests(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssec-range-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - sseKey := generateSSECKey() - // Create test data that's large enough for meaningful range tests - testData := generateTestData(2048) // 2KB - objectKey := "test-range-object" - - // Upload with SSE-C - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to upload SSE-C object") - - // Test various range requests - testCases := []struct { - name string - start int64 - end int64 - }{ - {"First 100 bytes", 0, 99}, - {"Middle 100 bytes", 500, 599}, - {"Last 100 bytes", int64(len(testData) - 100), int64(len(testData) - 1)}, - {"Single byte", 42, 42}, - {"Cross boundary", 15, 17}, // Test AES block boundary crossing - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Get range with SSE-C - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Range: aws.String(fmt.Sprintf("bytes=%d-%d", tc.start, tc.end)), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to get range %d-%d from SSE-C object", tc.start, tc.end) - defer resp.Body.Close() - - // Range requests should return partial content status - // Note: AWS SDK Go v2 doesn't expose HTTP status code directly in GetObject response - // The fact that we get a successful response with correct range data indicates 206 status - - // Read the range data - rangeData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read range data") - - // Verify content matches expected range - expectedLength := tc.end - tc.start + 1 - expectedData := testData[tc.start : tc.start+expectedLength] - assertDataEqual(t, expectedData, rangeData, "Range data mismatch for %s", tc.name) - - // Verify content length header - assert.Equal(t, expectedLength, aws.ToInt64(resp.ContentLength), "Content length mismatch for %s", tc.name) - - // Verify SSE headers are present - assert.Equal(t, "AES256", aws.ToString(resp.SSECustomerAlgorithm)) - assert.Equal(t, sseKey.KeyMD5, aws.ToString(resp.SSECustomerKeyMD5)) - }) - } -} - -// TestSSEKMSRangeRequests tests SSE-KMS with HTTP Range requests -func TestSSEKMSRangeRequests(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssekms-range-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - kmsKeyID := "test-range-key" - // Create test data that's large enough for meaningful range tests - testData := generateTestData(2048) // 2KB - objectKey := "test-kms-range-object" - - // Upload with SSE-KMS - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to upload SSE-KMS object") - - // Test various range requests - testCases := []struct { - name string - start int64 - end int64 - }{ - {"First 100 bytes", 0, 99}, - {"Middle 100 bytes", 500, 599}, - {"Last 100 bytes", int64(len(testData) - 100), int64(len(testData) - 1)}, - {"Single byte", 42, 42}, - {"Cross boundary", 15, 17}, // Test AES block boundary crossing - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Get range with SSE-KMS (no additional headers needed for GET) - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Range: aws.String(fmt.Sprintf("bytes=%d-%d", tc.start, tc.end)), - }) - require.NoError(t, err, "Failed to get range %d-%d from SSE-KMS object", tc.start, tc.end) - defer resp.Body.Close() - - // Range requests should return partial content status - // Note: AWS SDK Go v2 doesn't expose HTTP status code directly in GetObject response - // The fact that we get a successful response with correct range data indicates 206 status - - // Read the range data - rangeData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read range data") - - // Verify content matches expected range - expectedLength := tc.end - tc.start + 1 - expectedData := testData[tc.start : tc.start+expectedLength] - assertDataEqual(t, expectedData, rangeData, "Range data mismatch for %s", tc.name) - - // Verify content length header - assert.Equal(t, expectedLength, aws.ToInt64(resp.ContentLength), "Content length mismatch for %s", tc.name) - - // Verify SSE headers are present - assert.Equal(t, types.ServerSideEncryptionAwsKms, resp.ServerSideEncryption) - assert.Equal(t, kmsKeyID, aws.ToString(resp.SSEKMSKeyId)) - }) - } -} - -// BenchmarkSSEKMSThroughput benchmarks SSE-KMS throughput -func BenchmarkSSEKMSThroughput(b *testing.B) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(b, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"ssekms-bench-") - require.NoError(b, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - kmsKeyID := "bench-test-key" - testData := generateTestData(1024 * 1024) // 1MB - - b.ResetTimer() - b.SetBytes(int64(len(testData))) - - for i := 0; i < b.N; i++ { - objectKey := fmt.Sprintf("bench-kms-object-%d", i) - - // Upload - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(b, err, "Failed to upload in KMS benchmark") - - // Download - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(b, err, "Failed to download in KMS benchmark") - - _, err = io.ReadAll(resp.Body) - require.NoError(b, err, "Failed to read KMS data in benchmark") - resp.Body.Close() - } -} - -// TestSSES3IntegrationBasic tests basic SSE-S3 upload and download functionality -func TestSSES3IntegrationBasic(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-basic") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - testData := []byte("Hello, SSE-S3! This is a test of server-side encryption with S3-managed keys.") - objectKey := "test-sse-s3-object.txt" - - t.Run("SSE-S3 Upload", func(t *testing.T) { - // Upload object with SSE-S3 - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload object with SSE-S3") - }) - - t.Run("SSE-S3 Download", func(t *testing.T) { - // Download and verify object - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download SSE-S3 object") - - // Verify SSE-S3 headers in response - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Server-side encryption header mismatch") - - // Read and verify content - downloadedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read downloaded data") - resp.Body.Close() - - assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original") - }) - - t.Run("SSE-S3 HEAD Request", func(t *testing.T) { - // HEAD request should also return SSE headers - resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to HEAD SSE-S3 object") - - // Verify SSE-S3 headers - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in HEAD response") - }) -} - -// TestSSES3IntegrationVariousDataSizes tests SSE-S3 with various data sizes -func TestSSES3IntegrationVariousDataSizes(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-sizes") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Test various data sizes including edge cases - testSizes := []int{ - 0, // Empty file - 1, // Single byte - 16, // One AES block - 31, // Just under two blocks - 32, // Exactly two blocks - 100, // Small file - 1024, // 1KB - 8192, // 8KB - 65536, // 64KB - 1024 * 1024, // 1MB - } - - for _, size := range testSizes { - t.Run(fmt.Sprintf("Size_%d_bytes", size), func(t *testing.T) { - testData := generateTestData(size) - objectKey := fmt.Sprintf("test-sse-s3-%d.dat", size) - - // Upload with SSE-S3 - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload SSE-S3 object of size %d", size) - - // Download and verify - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download SSE-S3 object of size %d", size) - - // Verify encryption headers - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Missing SSE-S3 header for size %d", size) - - // Verify content - downloadedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read downloaded data for size %d", size) - resp.Body.Close() - - assertDataEqual(t, testData, downloadedData, "Data mismatch for size %d", size) - }) - } -} - -// TestSSES3WithUserMetadata tests SSE-S3 with user-defined metadata -func TestSSES3WithUserMetadata(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-metadata") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - testData := []byte("SSE-S3 with custom metadata") - objectKey := "test-object-with-metadata.txt" - - userMetadata := map[string]string{ - "author": "test-user", - "version": "1.0", - "environment": "test", - } - - t.Run("Upload with Metadata", func(t *testing.T) { - // Upload object with SSE-S3 and user metadata - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - Metadata: userMetadata, - }) - require.NoError(t, err, "Failed to upload object with SSE-S3 and metadata") - }) - - t.Run("Verify Metadata and Encryption", func(t *testing.T) { - // HEAD request to check metadata and encryption - resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to HEAD SSE-S3 object with metadata") - - // Verify SSE-S3 headers - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing with metadata") - - // Verify user metadata - for key, expectedValue := range userMetadata { - actualValue, exists := resp.Metadata[key] - assert.True(t, exists, "Metadata key %s not found", key) - assert.Equal(t, expectedValue, actualValue, "Metadata value mismatch for key %s", key) - } - }) - - t.Run("Download and Verify Content", func(t *testing.T) { - // Download and verify content - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download SSE-S3 object with metadata") - - // Verify SSE-S3 headers - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in GET response") - - // Verify content - downloadedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read downloaded data") - resp.Body.Close() - - assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original") - }) -} - -// TestSSES3RangeRequests tests SSE-S3 with HTTP range requests -func TestSSES3RangeRequests(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-range") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Create test data large enough to ensure multipart storage - testData := generateTestData(1024 * 1024) // 1MB to ensure multipart chunking - objectKey := "test-sse-s3-range.dat" - - // Upload object with SSE-S3 - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload SSE-S3 object for range testing") - - testCases := []struct { - name string - rangeHeader string - expectedStart int - expectedEnd int - }{ - {"First 100 bytes", "bytes=0-99", 0, 99}, - {"Middle range", "bytes=100000-199999", 100000, 199999}, - {"Last 100 bytes", "bytes=1048476-1048575", 1048476, 1048575}, - {"From offset to end", "bytes=500000-", 500000, len(testData) - 1}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Request range - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Range: aws.String(tc.rangeHeader), - }) - require.NoError(t, err, "Failed to get range %s", tc.rangeHeader) - - // Verify SSE-S3 headers are present in range response - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "SSE-S3 header missing in range response") - - // Read range data - rangeData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read range data") - resp.Body.Close() - - // Calculate expected data - endIndex := tc.expectedEnd - if tc.expectedEnd >= len(testData) { - endIndex = len(testData) - 1 - } - expectedData := testData[tc.expectedStart : endIndex+1] - - // Verify range data - assertDataEqual(t, expectedData, rangeData, "Range data mismatch for %s", tc.rangeHeader) - }) - } -} - -// TestSSES3BucketDefaultEncryption tests bucket-level default encryption with SSE-S3 -func TestSSES3BucketDefaultEncryption(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-default") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("Set Bucket Default Encryption", func(t *testing.T) { - // Set bucket encryption configuration - _, err := client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{ - Bucket: aws.String(bucketName), - ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ - Rules: []types.ServerSideEncryptionRule{ - { - ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{ - SSEAlgorithm: types.ServerSideEncryptionAes256, - }, - }, - }, - }, - }) - require.NoError(t, err, "Failed to set bucket default encryption") - }) - - t.Run("Upload Object Without Encryption Headers", func(t *testing.T) { - testData := []byte("This object should be automatically encrypted with SSE-S3 due to bucket default policy.") - objectKey := "test-default-encrypted-object.txt" - - // Upload object WITHOUT any encryption headers - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - // No ServerSideEncryption specified - should use bucket default - }) - require.NoError(t, err, "Failed to upload object without encryption headers") - - // Download and verify it was automatically encrypted - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download object") - - // Verify SSE-S3 headers are present (indicating automatic encryption) - assert.Equal(t, types.ServerSideEncryptionAes256, resp.ServerSideEncryption, "Object should have been automatically encrypted with SSE-S3") - - // Verify content is correct (decryption works) - downloadedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read downloaded data") - resp.Body.Close() - - assertDataEqual(t, testData, downloadedData, "Downloaded data doesn't match original") - }) - - t.Run("Get Bucket Encryption Configuration", func(t *testing.T) { - // Verify we can retrieve the bucket encryption configuration - resp, err := client.GetBucketEncryption(ctx, &s3.GetBucketEncryptionInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Failed to get bucket encryption configuration") - - require.Len(t, resp.ServerSideEncryptionConfiguration.Rules, 1, "Should have one encryption rule") - rule := resp.ServerSideEncryptionConfiguration.Rules[0] - assert.Equal(t, types.ServerSideEncryptionAes256, rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm, "Encryption algorithm should be AES256") - }) - - t.Run("Delete Bucket Encryption Configuration", func(t *testing.T) { - // Remove bucket encryption configuration - _, err := client.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Failed to delete bucket encryption configuration") - - // Verify it's removed by trying to get it (should fail) - _, err = client.GetBucketEncryption(ctx, &s3.GetBucketEncryptionInput{ - Bucket: aws.String(bucketName), - }) - require.Error(t, err, "Getting bucket encryption should fail after deletion") - }) - - t.Run("Upload After Removing Default Encryption", func(t *testing.T) { - testData := []byte("This object should NOT be encrypted after removing bucket default.") - objectKey := "test-no-default-encryption.txt" - - // Upload object without encryption headers (should not be encrypted now) - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - }) - require.NoError(t, err, "Failed to upload object") - - // Verify it's NOT encrypted - resp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to HEAD object") - - // ServerSideEncryption should be empty/nil when no encryption is applied - assert.Empty(t, resp.ServerSideEncryption, "Object should not be encrypted after removing bucket default") - }) -} - -// TestSSES3MultipartUploads tests SSE-S3 multipart upload functionality -func TestSSES3MultipartUploads(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-s3-multipart-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("Large_File_Multipart_Upload", func(t *testing.T) { - objectKey := "test-sse-s3-multipart-large.dat" - // Create 10MB test data to ensure multipart upload - testData := generateTestData(10 * 1024 * 1024) - - // Upload with SSE-S3 - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "SSE-S3 multipart upload failed") - - // Verify encryption headers - headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to head object") - - assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3 encryption") - - // Download and verify content - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download SSE-S3 multipart object") - defer getResp.Body.Close() - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read downloaded data") - - assert.Equal(t, testData, downloadedData, "SSE-S3 multipart upload data should match") - - // Test range requests on multipart SSE-S3 object - t.Run("Range_Request_On_Multipart", func(t *testing.T) { - start := int64(1024 * 1024) // 1MB offset - end := int64(2*1024*1024 - 1) // 2MB - 1 - expectedLength := end - start + 1 - - rangeResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Range: aws.String(fmt.Sprintf("bytes=%d-%d", start, end)), - }) - require.NoError(t, err, "Failed to get range from SSE-S3 multipart object") - defer rangeResp.Body.Close() - - rangeData, err := io.ReadAll(rangeResp.Body) - require.NoError(t, err, "Failed to read range data") - - assert.Equal(t, expectedLength, int64(len(rangeData)), "Range length should match") - - // Verify range content matches original data - expectedRange := testData[start : end+1] - assert.Equal(t, expectedRange, rangeData, "Range content should match for SSE-S3 multipart object") - }) - }) - - t.Run("Explicit_Multipart_Upload_API", func(t *testing.T) { - objectKey := "test-sse-s3-explicit-multipart.dat" - testData := generateTestData(15 * 1024 * 1024) // 15MB - - // Create multipart upload with SSE-S3 - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to create SSE-S3 multipart upload") - - uploadID := *createResp.UploadId - var parts []types.CompletedPart - - // Upload parts (5MB each, except the last part) - partSize := 5 * 1024 * 1024 - for i := 0; i < len(testData); i += partSize { - partNumber := int32(len(parts) + 1) - endIdx := i + partSize - if endIdx > len(testData) { - endIdx = len(testData) - } - partData := testData[i:endIdx] - - uploadPartResp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(partNumber), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(partData), - }) - require.NoError(t, err, "Failed to upload part %d", partNumber) - - parts = append(parts, types.CompletedPart{ - ETag: uploadPartResp.ETag, - PartNumber: aws.Int32(partNumber), - }) - } - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: parts, - }, - }) - require.NoError(t, err, "Failed to complete SSE-S3 multipart upload") - - // Verify the completed object - headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to head completed multipart object") - - assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3 encryption on completed multipart object") - - // Download and verify content - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download completed SSE-S3 multipart object") - defer getResp.Body.Close() - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read downloaded data") - - assert.Equal(t, testData, downloadedData, "Explicit SSE-S3 multipart upload data should match") - }) -} - -// TestCrossSSECopy tests copying objects between different SSE encryption types -func TestCrossSSECopy(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-cross-copy-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Test data - testData := []byte("Cross-SSE copy test data") - - // Generate proper SSE-C key - sseKey := generateSSECKey() - - t.Run("SSE-S3_to_Unencrypted", func(t *testing.T) { - sourceKey := "source-sse-s3-obj" - destKey := "dest-unencrypted-obj" - - // Upload with SSE-S3 - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "SSE-S3 upload failed") - - // Copy to unencrypted - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - }) - require.NoError(t, err, "Copy SSE-S3 to unencrypted failed") - - // Verify destination is unencrypted and content matches - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err, "GET failed") - defer getResp.Body.Close() - - assert.Empty(t, getResp.ServerSideEncryption, "Should be unencrypted") - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Read failed") - assertDataEqual(t, testData, downloadedData) - }) - - t.Run("Unencrypted_to_SSE-S3", func(t *testing.T) { - sourceKey := "source-unencrypted-obj" - destKey := "dest-sse-s3-obj" - - // Upload unencrypted - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceKey), - Body: bytes.NewReader(testData), - }) - require.NoError(t, err, "Unencrypted upload failed") - - // Copy to SSE-S3 - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Copy unencrypted to SSE-S3 failed") - - // Verify destination is SSE-S3 encrypted and content matches - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err, "GET failed") - defer getResp.Body.Close() - - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Expected SSE-S3") - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Read failed") - assertDataEqual(t, testData, downloadedData) - }) - - t.Run("SSE-C_to_SSE-S3", func(t *testing.T) { - sourceKey := "source-sse-c-obj" - destKey := "dest-sse-s3-obj" - - // Upload with SSE-C - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "SSE-C upload failed") - - // Copy to SSE-S3 - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sseKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Copy SSE-C to SSE-S3 failed") - - // Verify destination encryption and content - headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err, "HEAD failed") - assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "Expected SSE-S3") - - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - }) - require.NoError(t, err, "GET failed") - defer getResp.Body.Close() - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Read failed") - assertDataEqual(t, testData, downloadedData) - }) - - t.Run("SSE-S3_to_SSE-C", func(t *testing.T) { - sourceKey := "source-sse-s3-obj" - destKey := "dest-sse-c-obj" - - // Upload with SSE-S3 - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(sourceKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload SSE-S3 source object") - - // Copy to SSE-C - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Copy SSE-S3 to SSE-C failed") - - // Verify destination encryption and content - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "GET with SSE-C failed") - defer getResp.Body.Close() - - assert.Equal(t, "AES256", aws.ToString(getResp.SSECustomerAlgorithm), "Expected SSE-C") - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Read failed") - assertDataEqual(t, testData, downloadedData) - }) -} - -// REGRESSION TESTS FOR CRITICAL BUGS FIXED -// These tests specifically target the IV storage bugs that were fixed - -// TestSSES3IVStorageRegression tests that IVs are properly stored for explicit SSE-S3 uploads -// This test would have caught the critical bug where IVs were discarded in putToFiler -func TestSSES3IVStorageRegression(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-iv-regression") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("Explicit SSE-S3 IV Storage and Retrieval", func(t *testing.T) { - testData := []byte("This tests the critical IV storage bug that was fixed - the IV must be stored on the key object for decryption to work.") - objectKey := "explicit-sse-s3-iv-test.txt" - - // Upload with explicit SSE-S3 header (this used to discard the IV) - putResp, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload explicit SSE-S3 object") - - // Verify PUT response has SSE-S3 headers - assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "PUT response should indicate SSE-S3") - - // Critical test: Download and decrypt the object - // This would have FAILED with the original bug because IV was discarded - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download explicit SSE-S3 object") - - // Verify GET response has SSE-S3 headers - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET response should indicate SSE-S3") - - // This is the critical test - verify data can be decrypted correctly - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read decrypted data") - getResp.Body.Close() - - // This assertion would have FAILED with the original bug - assertDataEqual(t, testData, downloadedData, "CRITICAL: Decryption failed - IV was not stored properly") - }) - - t.Run("Multiple Explicit SSE-S3 Objects", func(t *testing.T) { - // Test multiple objects to ensure each gets its own unique IV - numObjects := 5 - testDataSet := make([][]byte, numObjects) - objectKeys := make([]string, numObjects) - - // Upload multiple objects with explicit SSE-S3 - for i := 0; i < numObjects; i++ { - testDataSet[i] = []byte(fmt.Sprintf("Test data for object %d - verifying unique IV storage", i)) - objectKeys[i] = fmt.Sprintf("explicit-sse-s3-multi-%d.txt", i) - - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKeys[i]), - Body: bytes.NewReader(testDataSet[i]), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload explicit SSE-S3 object %d", i) - } - - // Download and verify each object decrypts correctly - for i := 0; i < numObjects; i++ { - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKeys[i]), - }) - require.NoError(t, err, "Failed to download explicit SSE-S3 object %d", i) - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read decrypted data for object %d", i) - getResp.Body.Close() - - assertDataEqual(t, testDataSet[i], downloadedData, "Decryption failed for object %d - IV not unique/stored", i) - } - }) -} - -// TestSSES3BucketDefaultIVStorageRegression tests bucket default SSE-S3 IV storage -// This test would have caught the critical bug where IVs were not stored on key objects in bucket defaults -func TestSSES3BucketDefaultIVStorageRegression(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-default-iv-regression") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Set bucket default encryption to SSE-S3 - _, err = client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{ - Bucket: aws.String(bucketName), - ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ - Rules: []types.ServerSideEncryptionRule{ - { - ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{ - SSEAlgorithm: types.ServerSideEncryptionAes256, - }, - }, - }, - }, - }) - require.NoError(t, err, "Failed to set bucket default SSE-S3 encryption") - - t.Run("Bucket Default SSE-S3 IV Storage", func(t *testing.T) { - testData := []byte("This tests the bucket default SSE-S3 IV storage bug - IV must be stored on key object for decryption.") - objectKey := "bucket-default-sse-s3-iv-test.txt" - - // Upload WITHOUT encryption headers - should use bucket default SSE-S3 - // This used to fail because applySSES3DefaultEncryption didn't store IV on key - putResp, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - // No ServerSideEncryption specified - should use bucket default - }) - require.NoError(t, err, "Failed to upload object for bucket default SSE-S3") - - // Verify bucket default encryption was applied - assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "PUT response should show bucket default SSE-S3") - - // Critical test: Download and decrypt the object - // This would have FAILED with the original bug because IV wasn't stored on key object - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download bucket default SSE-S3 object") - - // Verify GET response shows SSE-S3 was applied - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET response should show SSE-S3") - - // This is the critical test - verify decryption works - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read decrypted data") - getResp.Body.Close() - - // This assertion would have FAILED with the original bucket default bug - assertDataEqual(t, testData, downloadedData, "CRITICAL: Bucket default SSE-S3 decryption failed - IV not stored on key object") - }) - - t.Run("Multiple Bucket Default Objects", func(t *testing.T) { - // Test multiple objects with bucket default encryption - numObjects := 3 - testDataSet := make([][]byte, numObjects) - objectKeys := make([]string, numObjects) - - // Upload multiple objects without encryption headers - for i := 0; i < numObjects; i++ { - testDataSet[i] = []byte(fmt.Sprintf("Bucket default test data %d - verifying IV storage works", i)) - objectKeys[i] = fmt.Sprintf("bucket-default-multi-%d.txt", i) - - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKeys[i]), - Body: bytes.NewReader(testDataSet[i]), - // No encryption headers - bucket default should apply - }) - require.NoError(t, err, "Failed to upload bucket default object %d", i) - } - - // Verify each object was encrypted and can be decrypted - for i := 0; i < numObjects; i++ { - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKeys[i]), - }) - require.NoError(t, err, "Failed to download bucket default object %d", i) - - // Verify SSE-S3 was applied by bucket default - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Object %d should be SSE-S3 encrypted", i) - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read decrypted data for object %d", i) - getResp.Body.Close() - - assertDataEqual(t, testDataSet[i], downloadedData, "Bucket default SSE-S3 decryption failed for object %d", i) - } - }) -} - -// TestSSES3EdgeCaseRegression tests edge cases that could cause IV storage issues -func TestSSES3EdgeCaseRegression(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-edge-regression") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("Empty Object SSE-S3", func(t *testing.T) { - // Test edge case: empty objects with SSE-S3 (IV storage still required) - objectKey := "empty-sse-s3-object" - - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader([]byte{}), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload empty SSE-S3 object") - - // Verify empty object can be retrieved (IV must be stored even for empty objects) - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download empty SSE-S3 object") - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read empty decrypted data") - getResp.Body.Close() - - assert.Equal(t, []byte{}, downloadedData, "Empty object content mismatch") - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Empty object should be SSE-S3 encrypted") - }) - - t.Run("Large Object SSE-S3", func(t *testing.T) { - // Test large objects to ensure IV storage works for chunked uploads - largeData := generateTestData(1024 * 1024) // 1MB - objectKey := "large-sse-s3-object" - - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(largeData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - }) - require.NoError(t, err, "Failed to upload large SSE-S3 object") - - // Verify large object can be decrypted (IV must be stored properly) - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download large SSE-S3 object") - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read large decrypted data") - getResp.Body.Close() - - assertDataEqual(t, largeData, downloadedData, "Large object decryption failed - IV storage issue") - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Large object should be SSE-S3 encrypted") - }) -} - -// TestSSES3ErrorHandlingRegression tests error handling improvements that were added -func TestSSES3ErrorHandlingRegression(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-error-regression") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("SSE-S3 With Other Valid Operations", func(t *testing.T) { - // Ensure SSE-S3 works with other S3 operations (metadata, tagging, etc.) - testData := []byte("Testing SSE-S3 with metadata and other operations") - objectKey := "sse-s3-with-metadata" - - // Upload with SSE-S3 and metadata - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAes256, - Metadata: map[string]string{ - "test-key": "test-value", - "purpose": "regression-test", - }, - }) - require.NoError(t, err, "Failed to upload SSE-S3 object with metadata") - - // HEAD request to verify metadata and encryption - headResp, err := client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to HEAD SSE-S3 object") - - assert.Equal(t, types.ServerSideEncryptionAes256, headResp.ServerSideEncryption, "HEAD should show SSE-S3") - assert.Equal(t, "test-value", headResp.Metadata["test-key"], "Metadata should be preserved") - assert.Equal(t, "regression-test", headResp.Metadata["purpose"], "Metadata should be preserved") - - // GET to verify decryption still works with metadata - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to GET SSE-S3 object") - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read decrypted data") - getResp.Body.Close() - - assertDataEqual(t, testData, downloadedData, "SSE-S3 with metadata decryption failed") - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "GET should show SSE-S3") - assert.Equal(t, "test-value", getResp.Metadata["test-key"], "GET metadata should be preserved") - }) -} - -// TestSSES3FunctionalityCompletion tests that SSE-S3 feature is now fully functional -func TestSSES3FunctionalityCompletion(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, "sse-s3-completion") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("All SSE-S3 Scenarios Work", func(t *testing.T) { - scenarios := []struct { - name string - setupBucket func() error - encryption *types.ServerSideEncryption - expectSSES3 bool - }{ - { - name: "Explicit SSE-S3 Header", - setupBucket: func() error { return nil }, - encryption: &[]types.ServerSideEncryption{types.ServerSideEncryptionAes256}[0], - expectSSES3: true, - }, - { - name: "Bucket Default SSE-S3", - setupBucket: func() error { - _, err := client.PutBucketEncryption(ctx, &s3.PutBucketEncryptionInput{ - Bucket: aws.String(bucketName), - ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ - Rules: []types.ServerSideEncryptionRule{ - { - ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{ - SSEAlgorithm: types.ServerSideEncryptionAes256, - }, - }, - }, - }, - }) - return err - }, - encryption: nil, - expectSSES3: true, - }, - } - - for i, scenario := range scenarios { - t.Run(scenario.name, func(t *testing.T) { - // Setup bucket if needed - err := scenario.setupBucket() - require.NoError(t, err, "Failed to setup bucket for scenario %s", scenario.name) - - testData := []byte(fmt.Sprintf("Test data for scenario: %s", scenario.name)) - objectKey := fmt.Sprintf("completion-test-%d", i) - - // Upload object - putInput := &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - } - if scenario.encryption != nil { - putInput.ServerSideEncryption = *scenario.encryption - } - - putResp, err := client.PutObject(ctx, putInput) - require.NoError(t, err, "Failed to upload object for scenario %s", scenario.name) - - if scenario.expectSSES3 { - assert.Equal(t, types.ServerSideEncryptionAes256, putResp.ServerSideEncryption, "Should use SSE-S3 for %s", scenario.name) - } - - // Download and verify - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to download object for scenario %s", scenario.name) - - if scenario.expectSSES3 { - assert.Equal(t, types.ServerSideEncryptionAes256, getResp.ServerSideEncryption, "Should return SSE-S3 for %s", scenario.name) - } - - downloadedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read data for scenario %s", scenario.name) - getResp.Body.Close() - - // This is the ultimate test - decryption must work - assertDataEqual(t, testData, downloadedData, "Decryption failed for scenario %s", scenario.name) - - // Clean up bucket encryption for next scenario - client.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{ - Bucket: aws.String(bucketName), - }) - }) - } - }) -} diff --git a/test/s3/sse/s3_sse_multipart_copy_test.go b/test/s3/sse/s3_sse_multipart_copy_test.go deleted file mode 100644 index 0b1e4a24b..000000000 --- a/test/s3/sse/s3_sse_multipart_copy_test.go +++ /dev/null @@ -1,373 +0,0 @@ -package sse_test - -import ( - "bytes" - "context" - "crypto/md5" - "fmt" - "io" - "testing" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/require" -) - -// TestSSEMultipartCopy tests copying multipart encrypted objects -func TestSSEMultipartCopy(t *testing.T) { - ctx := context.Background() - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-multipart-copy-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Generate test data for multipart upload (7.5MB) - originalData := generateTestData(7*1024*1024 + 512*1024) - originalMD5 := fmt.Sprintf("%x", md5.Sum(originalData)) - - t.Run("Copy SSE-C Multipart Object", func(t *testing.T) { - testSSECMultipartCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) - - t.Run("Copy SSE-KMS Multipart Object", func(t *testing.T) { - testSSEKMSMultipartCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) - - t.Run("Copy SSE-C to SSE-KMS", func(t *testing.T) { - testSSECToSSEKMSCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) - - t.Run("Copy SSE-KMS to SSE-C", func(t *testing.T) { - testSSEKMSToSSECCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) - - t.Run("Copy SSE-C to Unencrypted", func(t *testing.T) { - testSSECToUnencryptedCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) - - t.Run("Copy SSE-KMS to Unencrypted", func(t *testing.T) { - testSSEKMSToUnencryptedCopy(t, ctx, client, bucketName, originalData, originalMD5) - }) -} - -// testSSECMultipartCopy tests copying SSE-C multipart objects with same key -func testSSECMultipartCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - sseKey := generateSSECKey() - - // Upload original multipart SSE-C object - sourceKey := "source-ssec-multipart-object" - err := uploadMultipartSSECObject(ctx, client, bucketName, sourceKey, originalData, *sseKey) - require.NoError(t, err, "Failed to upload source SSE-C multipart object") - - // Copy with same SSE-C key - destKey := "dest-ssec-multipart-object" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - // Copy source SSE-C headers - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sseKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - // Destination SSE-C headers (same key) - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to copy SSE-C multipart object") - - // Verify copied object - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, sseKey, nil) -} - -// testSSEKMSMultipartCopy tests copying SSE-KMS multipart objects with same key -func testSSEKMSMultipartCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - // Upload original multipart SSE-KMS object - sourceKey := "source-ssekms-multipart-object" - err := uploadMultipartSSEKMSObject(ctx, client, bucketName, sourceKey, "test-multipart-key", originalData) - require.NoError(t, err, "Failed to upload source SSE-KMS multipart object") - - // Copy with same SSE-KMS key - destKey := "dest-ssekms-multipart-object" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String("test-multipart-key"), - BucketKeyEnabled: aws.Bool(false), - }) - require.NoError(t, err, "Failed to copy SSE-KMS multipart object") - - // Verify copied object - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, nil, aws.String("test-multipart-key")) -} - -// testSSECToSSEKMSCopy tests copying SSE-C multipart objects to SSE-KMS -func testSSECToSSEKMSCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - sseKey := generateSSECKey() - - // Upload original multipart SSE-C object - sourceKey := "source-ssec-multipart-for-kms" - err := uploadMultipartSSECObject(ctx, client, bucketName, sourceKey, originalData, *sseKey) - require.NoError(t, err, "Failed to upload source SSE-C multipart object") - - // Copy to SSE-KMS - destKey := "dest-ssekms-from-ssec" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - // Copy source SSE-C headers - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sseKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - // Destination SSE-KMS headers - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String("test-multipart-key"), - BucketKeyEnabled: aws.Bool(false), - }) - require.NoError(t, err, "Failed to copy SSE-C to SSE-KMS") - - // Verify copied object as SSE-KMS - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, nil, aws.String("test-multipart-key")) -} - -// testSSEKMSToSSECCopy tests copying SSE-KMS multipart objects to SSE-C -func testSSEKMSToSSECCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - sseKey := generateSSECKey() - - // Upload original multipart SSE-KMS object - sourceKey := "source-ssekms-multipart-for-ssec" - err := uploadMultipartSSEKMSObject(ctx, client, bucketName, sourceKey, "test-multipart-key", originalData) - require.NoError(t, err, "Failed to upload source SSE-KMS multipart object") - - // Copy to SSE-C - destKey := "dest-ssec-from-ssekms" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - // Destination SSE-C headers - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - require.NoError(t, err, "Failed to copy SSE-KMS to SSE-C") - - // Verify copied object as SSE-C - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, sseKey, nil) -} - -// testSSECToUnencryptedCopy tests copying SSE-C multipart objects to unencrypted -func testSSECToUnencryptedCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - sseKey := generateSSECKey() - - // Upload original multipart SSE-C object - sourceKey := "source-ssec-multipart-for-plain" - err := uploadMultipartSSECObject(ctx, client, bucketName, sourceKey, originalData, *sseKey) - require.NoError(t, err, "Failed to upload source SSE-C multipart object") - - // Copy to unencrypted - destKey := "dest-plain-from-ssec" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - // Copy source SSE-C headers - CopySourceSSECustomerAlgorithm: aws.String("AES256"), - CopySourceSSECustomerKey: aws.String(sseKey.KeyB64), - CopySourceSSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - // No destination encryption headers - }) - require.NoError(t, err, "Failed to copy SSE-C to unencrypted") - - // Verify copied object as unencrypted - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, nil, nil) -} - -// testSSEKMSToUnencryptedCopy tests copying SSE-KMS multipart objects to unencrypted -func testSSEKMSToUnencryptedCopy(t *testing.T, ctx context.Context, client *s3.Client, bucketName string, originalData []byte, originalMD5 string) { - // Upload original multipart SSE-KMS object - sourceKey := "source-ssekms-multipart-for-plain" - err := uploadMultipartSSEKMSObject(ctx, client, bucketName, sourceKey, "test-multipart-key", originalData) - require.NoError(t, err, "Failed to upload source SSE-KMS multipart object") - - // Copy to unencrypted - destKey := "dest-plain-from-ssekms" - _, err = client.CopyObject(ctx, &s3.CopyObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(destKey), - CopySource: aws.String(fmt.Sprintf("%s/%s", bucketName, sourceKey)), - // No destination encryption headers - }) - require.NoError(t, err, "Failed to copy SSE-KMS to unencrypted") - - // Verify copied object as unencrypted - verifyEncryptedObject(t, ctx, client, bucketName, destKey, originalData, originalMD5, nil, nil) -} - -// uploadMultipartSSECObject uploads a multipart SSE-C object -func uploadMultipartSSECObject(ctx context.Context, client *s3.Client, bucketName, objectKey string, data []byte, sseKey SSECKey) error { - // Create multipart upload - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - if err != nil { - return err - } - uploadID := aws.ToString(createResp.UploadId) - - // Upload parts - partSize := 5 * 1024 * 1024 // 5MB - var completedParts []types.CompletedPart - - for i := 0; i < len(data); i += partSize { - end := i + partSize - if end > len(data) { - end = len(data) - } - - partNumber := int32(len(completedParts) + 1) - partResp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(partNumber), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(data[i:end]), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - }) - if err != nil { - return err - } - - completedParts = append(completedParts, types.CompletedPart{ - ETag: partResp.ETag, - PartNumber: aws.Int32(partNumber), - }) - } - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: completedParts, - }, - }) - - return err -} - -// uploadMultipartSSEKMSObject uploads a multipart SSE-KMS object -func uploadMultipartSSEKMSObject(ctx context.Context, client *s3.Client, bucketName, objectKey, keyID string, data []byte) error { - // Create multipart upload - createResp, err := client.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(keyID), - BucketKeyEnabled: aws.Bool(false), - }) - if err != nil { - return err - } - uploadID := aws.ToString(createResp.UploadId) - - // Upload parts - partSize := 5 * 1024 * 1024 // 5MB - var completedParts []types.CompletedPart - - for i := 0; i < len(data); i += partSize { - end := i + partSize - if end > len(data) { - end = len(data) - } - - partNumber := int32(len(completedParts) + 1) - partResp, err := client.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - PartNumber: aws.Int32(partNumber), - UploadId: aws.String(uploadID), - Body: bytes.NewReader(data[i:end]), - }) - if err != nil { - return err - } - - completedParts = append(completedParts, types.CompletedPart{ - ETag: partResp.ETag, - PartNumber: aws.Int32(partNumber), - }) - } - - // Complete multipart upload - _, err = client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - UploadId: aws.String(uploadID), - MultipartUpload: &types.CompletedMultipartUpload{ - Parts: completedParts, - }, - }) - - return err -} - -// verifyEncryptedObject verifies that a copied object can be retrieved and matches the original data -func verifyEncryptedObject(t *testing.T, ctx context.Context, client *s3.Client, bucketName, objectKey string, expectedData []byte, expectedMD5 string, sseKey *SSECKey, kmsKeyID *string) { - var getInput *s3.GetObjectInput - - if sseKey != nil { - // SSE-C object - getInput = &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(sseKey.KeyB64), - SSECustomerKeyMD5: aws.String(sseKey.KeyMD5), - } - } else { - // SSE-KMS or unencrypted object - getInput = &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - } - } - - getResp, err := client.GetObject(ctx, getInput) - require.NoError(t, err, "Failed to retrieve copied object %s", objectKey) - defer getResp.Body.Close() - - // Read and verify data - retrievedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read copied object data") - - require.Equal(t, len(expectedData), len(retrievedData), "Data size mismatch for object %s", objectKey) - - // Verify data using MD5 - retrievedMD5 := fmt.Sprintf("%x", md5.Sum(retrievedData)) - require.Equal(t, expectedMD5, retrievedMD5, "Data MD5 mismatch for object %s", objectKey) - - // Verify encryption headers - if sseKey != nil { - require.Equal(t, "AES256", aws.ToString(getResp.SSECustomerAlgorithm), "SSE-C algorithm mismatch") - require.Equal(t, sseKey.KeyMD5, aws.ToString(getResp.SSECustomerKeyMD5), "SSE-C key MD5 mismatch") - } else if kmsKeyID != nil { - require.Equal(t, types.ServerSideEncryptionAwsKms, getResp.ServerSideEncryption, "SSE-KMS encryption mismatch") - require.Contains(t, aws.ToString(getResp.SSEKMSKeyId), *kmsKeyID, "SSE-KMS key ID mismatch") - } - - t.Logf("Successfully verified copied object %s: %d bytes, MD5=%s", objectKey, len(retrievedData), retrievedMD5) -} diff --git a/test/s3/sse/setup_openbao_sse.sh b/test/s3/sse/setup_openbao_sse.sh deleted file mode 100755 index 24034289b..000000000 --- a/test/s3/sse/setup_openbao_sse.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash - -# Setup OpenBao for SSE Integration Testing -# This script configures OpenBao with encryption keys for S3 SSE testing - -set -e - -# Configuration -OPENBAO_ADDR="${OPENBAO_ADDR:-http://127.0.0.1:8200}" -OPENBAO_TOKEN="${OPENBAO_TOKEN:-root-token-for-testing}" -TRANSIT_PATH="${TRANSIT_PATH:-transit}" - -echo "๐Ÿš€ Setting up OpenBao for S3 SSE integration testing..." -echo "OpenBao Address: $OPENBAO_ADDR" -echo "Transit Path: $TRANSIT_PATH" - -# Export for API calls -export VAULT_ADDR="$OPENBAO_ADDR" -export VAULT_TOKEN="$OPENBAO_TOKEN" - -# Wait for OpenBao to be ready -echo "โณ Waiting for OpenBao to be ready..." -for i in {1..30}; do - if curl -s "$OPENBAO_ADDR/v1/sys/health" > /dev/null 2>&1; then - echo "[OK] OpenBao is ready!" - break - fi - if [ $i -eq 30 ]; then - echo "[FAIL] OpenBao failed to start within 60 seconds" - exit 1 - fi - sleep 2 -done - -# Enable transit secrets engine (ignore error if already enabled) -echo "๐Ÿ”ง Setting up transit secrets engine..." -curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"type\":\"transit\"}" \ - "$OPENBAO_ADDR/v1/sys/mounts/$TRANSIT_PATH" || echo "Transit engine may already be enabled" - -# Create encryption keys for S3 SSE testing -echo "๐Ÿ”‘ Creating encryption keys for SSE testing..." - -# Test keys that match the existing test expectations -declare -a keys=( - "test-key-123:SSE-KMS basic integration test key" - "source-test-key-123:SSE-KMS copy source key" - "dest-test-key-456:SSE-KMS copy destination key" - "test-multipart-key:SSE-KMS multipart upload test key" - "invalid-test-key:SSE-KMS error testing key" - "test-kms-range-key:SSE-KMS range request test key" - "seaweedfs-test-key:General SeaweedFS SSE test key" - "bucket-default-key:Default bucket encryption key" - "high-security-key:High security encryption key" - "performance-key:Performance testing key" -) - -for key_info in "${keys[@]}"; do - IFS=':' read -r key_name description <<< "$key_info" - echo " Creating key: $key_name ($description)" - - # Create key - response=$(curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"type\":\"aes256-gcm96\",\"description\":\"$description\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/keys/$key_name") - - if echo "$response" | grep -q "errors"; then - echo " Warning: $response" - fi - - # Verify key was created - verify_response=$(curl -s \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/keys/$key_name") - - if echo "$verify_response" | grep -q "\"name\":\"$key_name\""; then - echo " [OK] Key $key_name created successfully" - else - echo " [FAIL] Failed to verify key $key_name" - echo " Response: $verify_response" - fi -done - -# Test basic encryption/decryption functionality -echo "๐Ÿงช Testing basic encryption/decryption..." -test_plaintext="Hello, SeaweedFS SSE Integration!" -test_key="test-key-123" - -# Encrypt -encrypt_response=$(curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"plaintext\":\"$(echo -n "$test_plaintext" | base64)\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/encrypt/$test_key") - -if echo "$encrypt_response" | grep -q "ciphertext"; then - ciphertext=$(echo "$encrypt_response" | grep -o '"ciphertext":"[^"]*"' | cut -d'"' -f4) - echo " [OK] Encryption successful: ${ciphertext:0:50}..." - - # Decrypt to verify - decrypt_response=$(curl -s -X POST \ - -H "X-Vault-Token: $OPENBAO_TOKEN" \ - -H "Content-Type: application/json" \ - -d "{\"ciphertext\":\"$ciphertext\"}" \ - "$OPENBAO_ADDR/v1/$TRANSIT_PATH/decrypt/$test_key") - - if echo "$decrypt_response" | grep -q "plaintext"; then - decrypted_b64=$(echo "$decrypt_response" | grep -o '"plaintext":"[^"]*"' | cut -d'"' -f4) - decrypted=$(echo "$decrypted_b64" | base64 -d) - if [ "$decrypted" = "$test_plaintext" ]; then - echo " [OK] Decryption successful: $decrypted" - else - echo " [FAIL] Decryption failed: expected '$test_plaintext', got '$decrypted'" - fi - else - echo " [FAIL] Decryption failed: $decrypt_response" - fi -else - echo " [FAIL] Encryption failed: $encrypt_response" -fi - -echo "" -echo "๐Ÿ“Š OpenBao SSE setup summary:" -echo " Address: $OPENBAO_ADDR" -echo " Transit Path: $TRANSIT_PATH" -echo " Keys Created: ${#keys[@]}" -echo " Status: Ready for S3 SSE integration testing" -echo "" -echo "๐ŸŽฏ Ready to run S3 SSE integration tests!" -echo "" -echo "Usage:" -echo " # Run with Docker Compose" -echo " make test-with-kms" -echo "" -echo " # Run specific test suites" -echo " make test-ssekms-integration" -echo "" -echo " # Check status" -echo " curl $OPENBAO_ADDR/v1/sys/health" -echo "" - -echo "[OK] OpenBao SSE setup complete!" diff --git a/test/s3/sse/simple_sse_test.go b/test/s3/sse/simple_sse_test.go deleted file mode 100644 index 2fd8f642b..000000000 --- a/test/s3/sse/simple_sse_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package sse_test - -import ( - "bytes" - "context" - "crypto/md5" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSimpleSSECIntegration tests basic SSE-C with a fixed bucket name -func TestSimpleSSECIntegration(t *testing.T) { - ctx := context.Background() - - // Create S3 client - customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: "http://127.0.0.1:8333", - HostnameImmutable: true, - }, nil - }) - - awsCfg, err := config.LoadDefaultConfig(ctx, - config.WithRegion("us-east-1"), - config.WithEndpointResolverWithOptions(customResolver), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - "some_access_key1", - "some_secret_key1", - "", - )), - ) - require.NoError(t, err) - - client := s3.NewFromConfig(awsCfg, func(o *s3.Options) { - o.UsePathStyle = true - }) - - bucketName := "test-debug-bucket" - objectKey := fmt.Sprintf("test-object-prefixed-%d", time.Now().UnixNano()) - - // Generate SSE-C key - key := make([]byte, 32) - rand.Read(key) - keyB64 := base64.StdEncoding.EncodeToString(key) - keyMD5Hash := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(keyMD5Hash[:]) - - testData := []byte("Hello, simple SSE-C integration test!") - - // Ensure bucket exists - _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Bucket creation result: %v (might be OK if exists)", err) - } - - // Wait a moment for bucket to be ready - time.Sleep(1 * time.Second) - - t.Run("PUT with SSE-C", func(t *testing.T) { - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(keyB64), - SSECustomerKeyMD5: aws.String(keyMD5), - }) - require.NoError(t, err, "Failed to upload SSE-C object") - t.Log("SSE-C PUT succeeded!") - }) - - t.Run("GET with SSE-C", func(t *testing.T) { - resp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - SSECustomerAlgorithm: aws.String("AES256"), - SSECustomerKey: aws.String(keyB64), - SSECustomerKeyMD5: aws.String(keyMD5), - }) - require.NoError(t, err, "Failed to retrieve SSE-C object") - defer resp.Body.Close() - - retrievedData, err := io.ReadAll(resp.Body) - require.NoError(t, err, "Failed to read retrieved data") - assert.Equal(t, testData, retrievedData, "Retrieved data doesn't match original") - - // Verify SSE-C headers - assert.Equal(t, "AES256", aws.ToString(resp.SSECustomerAlgorithm)) - assert.Equal(t, keyMD5, aws.ToString(resp.SSECustomerKeyMD5)) - - t.Log("SSE-C GET succeeded and data matches!") - }) - - t.Run("GET without key should fail", func(t *testing.T) { - _, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - assert.Error(t, err, "Should fail to retrieve SSE-C object without key") - t.Log("GET without key correctly failed") - }) -} diff --git a/test/s3/sse/sse.test b/test/s3/sse/sse.test deleted file mode 100755 index 73dd18062..000000000 Binary files a/test/s3/sse/sse.test and /dev/null differ diff --git a/test/s3/sse/sse_kms_openbao_test.go b/test/s3/sse/sse_kms_openbao_test.go deleted file mode 100644 index b7606fe6a..000000000 --- a/test/s3/sse/sse_kms_openbao_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package sse_test - -import ( - "bytes" - "context" - "io" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSSEKMSOpenBaoIntegration tests SSE-KMS with real OpenBao KMS provider -// This test verifies that SeaweedFS can successfully encrypt and decrypt data -// using actual KMS operations through OpenBao, not just mock key IDs -func TestSSEKMSOpenBaoIntegration(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancel() - - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-kms-openbao-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - t.Run("Basic SSE-KMS with OpenBao", func(t *testing.T) { - testData := []byte("Hello, SSE-KMS with OpenBao integration!") - objectKey := "test-openbao-kms-object" - kmsKeyID := "test-key-123" // This key should exist in OpenBao - - // Upload object with SSE-KMS - putResp, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to upload SSE-KMS object with OpenBao") - assert.NotEmpty(t, aws.ToString(putResp.ETag), "ETag should be present") - - // Retrieve and verify object - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve SSE-KMS object") - defer getResp.Body.Close() - - // Verify content matches (this proves encryption/decryption worked) - retrievedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read retrieved data") - assert.Equal(t, testData, retrievedData, "Decrypted data should match original") - - // Verify SSE-KMS headers are present - assert.Equal(t, types.ServerSideEncryptionAwsKms, getResp.ServerSideEncryption, "Should indicate KMS encryption") - assert.Equal(t, kmsKeyID, aws.ToString(getResp.SSEKMSKeyId), "Should return the KMS key ID used") - }) - - t.Run("Multiple KMS Keys with OpenBao", func(t *testing.T) { - testCases := []struct { - keyID string - data string - objectKey string - }{ - {"test-key-123", "Data encrypted with test-key-123", "object-key-123"}, - {"seaweedfs-test-key", "Data encrypted with seaweedfs-test-key", "object-seaweedfs-key"}, - {"high-security-key", "Data encrypted with high-security-key", "object-security-key"}, - } - - for _, tc := range testCases { - t.Run("Key_"+tc.keyID, func(t *testing.T) { - testData := []byte(tc.data) - - // Upload with specific KMS key - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(tc.objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(tc.keyID), - }) - require.NoError(t, err, "Failed to upload with KMS key %s", tc.keyID) - - // Retrieve and verify - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(tc.objectKey), - }) - require.NoError(t, err, "Failed to retrieve object encrypted with key %s", tc.keyID) - defer getResp.Body.Close() - - retrievedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read data for key %s", tc.keyID) - - // Verify data integrity (proves real encryption/decryption occurred) - assert.Equal(t, testData, retrievedData, "Data should match for key %s", tc.keyID) - assert.Equal(t, tc.keyID, aws.ToString(getResp.SSEKMSKeyId), "Should return correct key ID") - }) - } - }) - - t.Run("Large Data with OpenBao KMS", func(t *testing.T) { - // Test with larger data to ensure chunked encryption works - testData := generateTestData(64 * 1024) // 64KB - objectKey := "large-openbao-kms-object" - kmsKeyID := "performance-key" - - // Upload large object with SSE-KMS - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - require.NoError(t, err, "Failed to upload large SSE-KMS object") - - // Retrieve and verify large object - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve large SSE-KMS object") - defer getResp.Body.Close() - - retrievedData, err := io.ReadAll(getResp.Body) - require.NoError(t, err, "Failed to read large data") - - // Use MD5 comparison for large data - assertDataEqual(t, testData, retrievedData, "Large encrypted data should match original") - assert.Equal(t, kmsKeyID, aws.ToString(getResp.SSEKMSKeyId), "Should return performance key ID") - }) -} - -// TestSSEKMSOpenBaoAvailability checks if OpenBao KMS is available for testing -// This test can be run separately to verify the KMS setup -func TestSSEKMSOpenBaoAvailability(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - client, err := createS3Client(ctx, defaultConfig) - require.NoError(t, err, "Failed to create S3 client") - - bucketName, err := createTestBucket(ctx, client, defaultConfig.BucketPrefix+"sse-kms-availability-") - require.NoError(t, err, "Failed to create test bucket") - defer cleanupTestBucket(ctx, client, bucketName) - - // Try a simple KMS operation to verify availability - testData := []byte("KMS availability test") - objectKey := "kms-availability-test" - kmsKeyID := "test-key-123" - - // This should succeed if KMS is properly configured - _, err = client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(testData), - ServerSideEncryption: types.ServerSideEncryptionAwsKms, - SSEKMSKeyId: aws.String(kmsKeyID), - }) - - if err != nil { - t.Skipf("OpenBao KMS not available for testing: %v", err) - } - - t.Logf("OpenBao KMS is available and working") - - // Verify we can retrieve the object - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Failed to retrieve KMS test object") - defer getResp.Body.Close() - - assert.Equal(t, types.ServerSideEncryptionAwsKms, getResp.ServerSideEncryption) - t.Logf("KMS encryption/decryption working correctly") -} diff --git a/test/s3/sse/test_single_ssec.txt b/test/s3/sse/test_single_ssec.txt deleted file mode 100644 index c3e4479ea..000000000 --- a/test/s3/sse/test_single_ssec.txt +++ /dev/null @@ -1 +0,0 @@ -Test data for single object SSE-C diff --git a/test/s3/versioning/Makefile b/test/s3/versioning/Makefile deleted file mode 100644 index ccf5e2092..000000000 --- a/test/s3/versioning/Makefile +++ /dev/null @@ -1,359 +0,0 @@ -# S3 API Test Makefile -# This Makefile provides comprehensive targets for running S3 versioning tests - -.PHONY: help build-weed setup-server start-server stop-server test-versioning test-versioning-quick test-versioning-comprehensive test-all clean logs check-deps - -# Configuration -WEED_BINARY := ../../../weed/weed_binary -S3_PORT := 8333 -MASTER_PORT := 9333 -VOLUME_PORT := 8080 -FILER_PORT := 8888 -TEST_TIMEOUT := 10m -TEST_PATTERN := TestVersioning - -# Default target -help: - @echo "S3 API Test Makefile" - @echo "" - @echo "Available targets:" - @echo " help - Show this help message" - @echo " build-weed - Build the SeaweedFS binary" - @echo " check-deps - Check dependencies and build binary if needed" - @echo " start-server - Start SeaweedFS server for testing" - @echo " start-server-simple - Start server without process cleanup (for CI)" - @echo " stop-server - Stop SeaweedFS server" - @echo " test-versioning - Run all versioning tests" - @echo " test-versioning-quick - Run core versioning tests only" - @echo " test-versioning-simple - Run tests without server management" - @echo " test-versioning-comprehensive - Run comprehensive versioning tests" - @echo " test-all - Run all S3 API tests" - @echo " test-with-server - Start server, run tests, stop server" - @echo " logs - Show server logs" - @echo " clean - Clean up test artifacts and stop server" - @echo " health-check - Check if server is accessible" - @echo "" - @echo "Configuration:" - @echo " S3_PORT=${S3_PORT}" - @echo " TEST_TIMEOUT=${TEST_TIMEOUT}" - -# Check dependencies -# Build the SeaweedFS binary -build-weed: - @echo "Building SeaweedFS binary..." - @cd ../../../weed && go build -o weed_binary . - @chmod +x $(WEED_BINARY) - @echo "โœ… SeaweedFS binary built at $(WEED_BINARY)" - -check-deps: build-weed - @echo "Checking dependencies..." - @echo "๐Ÿ” DEBUG: Checking Go installation..." - @command -v go >/dev/null 2>&1 || (echo "Go is required but not installed" && exit 1) - @echo "๐Ÿ” DEBUG: Go version: $$(go version)" - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)..." - @test -f $(WEED_BINARY) || (echo "SeaweedFS binary not found at $(WEED_BINARY)" && exit 1) - @echo "๐Ÿ” DEBUG: Binary size: $$(ls -lh $(WEED_BINARY) | awk '{print $$5}')" - @echo "๐Ÿ” DEBUG: Binary permissions: $$(ls -la $(WEED_BINARY) | awk '{print $$1}')" - @echo "๐Ÿ” DEBUG: Checking Go module dependencies..." - @go list -m github.com/aws/aws-sdk-go-v2 >/dev/null 2>&1 || (echo "AWS SDK Go v2 not found. Run 'go mod tidy'." && exit 1) - @go list -m github.com/stretchr/testify >/dev/null 2>&1 || (echo "Testify not found. Run 'go mod tidy'." && exit 1) - @echo "โœ… All dependencies are available" - -# Start SeaweedFS server for testing -start-server: check-deps - @echo "Starting SeaweedFS server..." - @echo "๐Ÿ” DEBUG: Current working directory: $$(pwd)" - @echo "๐Ÿ” DEBUG: Checking for existing weed processes..." - @ps aux | grep weed | grep -v grep || echo "No existing weed processes found" - @echo "๐Ÿ” DEBUG: Cleaning up any existing PID file..." - @rm -f weed-server.pid - @echo "๐Ÿ” DEBUG: Checking for port conflicts..." - @if netstat -tlnp 2>/dev/null | grep $(S3_PORT) >/dev/null; then \ - echo "โš ๏ธ Port $(S3_PORT) is already in use, trying to find the process..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || true; \ - else \ - echo "โœ… Port $(S3_PORT) is available"; \ - fi - @echo "๐Ÿ” DEBUG: Checking binary at $(WEED_BINARY)" - @ls -la $(WEED_BINARY) || (echo "โŒ Binary not found!" && exit 1) - @echo "๐Ÿ” DEBUG: Checking config file at ../../../docker/compose/s3.json" - @ls -la ../../../docker/compose/s3.json || echo "โš ๏ธ Config file not found, continuing without it" - @echo "๐Ÿ” DEBUG: Creating volume directory..." - @mkdir -p ./test-volume-data - @echo "๐Ÿ” DEBUG: Launching SeaweedFS server in background..." - @echo "๐Ÿ” DEBUG: Command: $(WEED_BINARY) server -debug -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../../../docker/compose/s3.json -filer -filer.maxMB=64 -master.volumeSizeLimitMB=50 -volume.max=100 -dir=./test-volume-data -volume.preStopSeconds=1 -metricsPort=9324" - @$(WEED_BINARY) server \ - -debug \ - -s3 \ - -s3.port=$(S3_PORT) \ - -s3.allowEmptyFolder=false \ - -s3.allowDeleteBucketNotEmpty=true \ - -s3.config=../../../docker/compose/s3.json \ - -filer \ - -filer.maxMB=64 \ - -master.volumeSizeLimitMB=50 \ - -volume.max=100 \ - -dir=./test-volume-data \ - -volume.preStopSeconds=1 \ - -metricsPort=9324 \ - > weed-test.log 2>&1 & echo $$! > weed-server.pid - @echo "๐Ÿ” DEBUG: Server PID: $$(cat weed-server.pid 2>/dev/null || echo 'PID file not found')" - @echo "๐Ÿ” DEBUG: Checking if PID is still running..." - @sleep 2 - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - ps -p $$SERVER_PID || echo "โš ๏ธ Server PID $$SERVER_PID not found after 2 seconds"; \ - else \ - echo "โš ๏ธ PID file not found"; \ - fi - @echo "๐Ÿ” DEBUG: Waiting for server to start (up to 90 seconds)..." - @for i in $$(seq 1 90); do \ - echo "๐Ÿ” DEBUG: Attempt $$i/90 - checking port $(S3_PORT)"; \ - if curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1; then \ - echo "โœ… SeaweedFS server started successfully on port $(S3_PORT) after $$i seconds"; \ - exit 0; \ - fi; \ - if [ $$i -eq 5 ]; then \ - echo "๐Ÿ” DEBUG: After 5 seconds, checking process and logs..."; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - if [ -f weed-test.log ]; then \ - echo "=== First server logs ==="; \ - head -20 weed-test.log; \ - fi; \ - fi; \ - if [ $$i -eq 15 ]; then \ - echo "๐Ÿ” DEBUG: After 15 seconds, checking port bindings..."; \ - netstat -tlnp 2>/dev/null | grep $(S3_PORT) || echo "Port $(S3_PORT) not bound"; \ - netstat -tlnp 2>/dev/null | grep 9333 || echo "Port 9333 not bound"; \ - netstat -tlnp 2>/dev/null | grep 8080 || echo "Port 8080 not bound"; \ - fi; \ - if [ $$i -eq 30 ]; then \ - echo "โš ๏ธ Server taking longer than expected (30s), checking logs..."; \ - if [ -f weed-test.log ]; then \ - echo "=== Recent server logs ==="; \ - tail -20 weed-test.log; \ - fi; \ - fi; \ - sleep 1; \ - done; \ - echo "โŒ Server failed to start within 90 seconds"; \ - echo "๐Ÿ” DEBUG: Final process check:"; \ - ps aux | grep weed | grep -v grep || echo "No weed processes found"; \ - echo "๐Ÿ” DEBUG: Final port check:"; \ - netstat -tlnp 2>/dev/null | grep -E "(8333|9333|8080)" || echo "No ports bound"; \ - echo "=== Full server logs ==="; \ - if [ -f weed-test.log ]; then \ - cat weed-test.log; \ - else \ - echo "No log file found"; \ - fi; \ - exit 1 - -# Stop SeaweedFS server -stop-server: - @echo "Stopping SeaweedFS server..." - @if [ -f weed-server.pid ]; then \ - SERVER_PID=$$(cat weed-server.pid); \ - echo "Killing server PID $$SERVER_PID"; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - kill -TERM $$SERVER_PID 2>/dev/null || true; \ - sleep 2; \ - if ps -p $$SERVER_PID >/dev/null 2>&1; then \ - echo "Process still running, sending KILL signal..."; \ - kill -KILL $$SERVER_PID 2>/dev/null || true; \ - sleep 1; \ - fi; \ - else \ - echo "Process $$SERVER_PID not found (already stopped)"; \ - fi; \ - rm -f weed-server.pid; \ - else \ - echo "No PID file found, checking for running processes..."; \ - echo "โš ๏ธ Skipping automatic process cleanup to avoid CI issues"; \ - echo "Note: Any remaining weed processes should be cleaned up by the CI environment"; \ - fi - @echo "โœ… SeaweedFS server stopped" - -# Show server logs -logs: - @if test -f weed-test.log; then \ - echo "=== SeaweedFS Server Logs ==="; \ - tail -f weed-test.log; \ - else \ - echo "No log file found. Server may not be running."; \ - fi - -# Core versioning tests (equivalent to Python s3tests) -test-versioning-quick: check-deps - @echo "Running core S3 versioning tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "TestBucketListReturnDataVersioning|TestVersioningBasicWorkflow|TestVersioningDeleteMarkers" . - @echo "โœ… Core versioning tests completed" - -# All versioning tests -test-versioning: check-deps - @echo "Running all S3 versioning tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . - @echo "โœ… All versioning tests completed" - -# Comprehensive versioning tests (including edge cases) -test-versioning-comprehensive: check-deps - @echo "Running comprehensive S3 versioning tests..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . -count=1 - @echo "โœ… Comprehensive versioning tests completed" - -# All S3 API tests -test-all: check-deps - @echo "Running all S3 API tests..." - @go test -v -timeout=$(TEST_TIMEOUT) ./... - @echo "โœ… All S3 API tests completed" - -# Run tests with automatic server management -test-with-server: start-server - @echo "๐Ÿ” DEBUG: Server started successfully, now running versioning tests..." - @echo "๐Ÿ” DEBUG: Test pattern: $(TEST_PATTERN)" - @echo "๐Ÿ” DEBUG: Test timeout: $(TEST_TIMEOUT)" - @echo "Running versioning tests with managed server..." - @trap "$(MAKE) stop-server" EXIT; \ - $(MAKE) test-versioning || (echo "โŒ Tests failed, showing server logs:" && echo "=== Last 50 lines of server logs ===" && tail -50 weed-test.log && echo "=== End of server logs ===" && exit 1) - @$(MAKE) stop-server - @echo "โœ… Tests completed and server stopped" - -# Test with different configurations -test-versioning-with-configs: check-deps - @echo "Testing with different S3 configurations..." - @echo "Testing with empty folder allowed..." - @$(WEED_BINARY) server -s3 -s3.port=$(S3_PORT) -s3.allowEmptyFolder=true -filer -master.volumeSizeLimitMB=100 -volume.max=100 > weed-test-config1.log 2>&1 & echo $$! > weed-config1.pid - @sleep 5 - @go test -v -timeout=5m -run "TestVersioningBasicWorkflow" . || true - @if [ -f weed-config1.pid ]; then kill -TERM $$(cat weed-config1.pid) 2>/dev/null || true; rm -f weed-config1.pid; fi - @sleep 2 - @echo "Testing with delete bucket not empty disabled..." - @$(WEED_BINARY) server -s3 -s3.port=$(S3_PORT) -s3.allowDeleteBucketNotEmpty=false -filer -master.volumeSizeLimitMB=100 -volume.max=100 > weed-test-config2.log 2>&1 & echo $$! > weed-config2.pid - @sleep 5 - @go test -v -timeout=5m -run "TestVersioningBasicWorkflow" . || true - @if [ -f weed-config2.pid ]; then kill -TERM $$(cat weed-config2.pid) 2>/dev/null || true; rm -f weed-config2.pid; fi - @echo "โœ… Configuration tests completed" - -# Performance/stress testing -test-versioning-stress: check-deps - @echo "Running stress tests for versioning..." - @go test -v -timeout=20m -run "TestVersioningConcurrentOperations" . -count=5 - @echo "โœ… Stress tests completed" - -# Generate test reports -test-report: check-deps - @echo "Generating test reports..." - @mkdir -p reports - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . -json > reports/test-results.json 2>&1 || true - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . -coverprofile=reports/coverage.out 2>&1 || true - @go tool cover -html=reports/coverage.out -o reports/coverage.html 2>/dev/null || true - @echo "โœ… Test reports generated in reports/ directory" - -# Clean up test artifacts -clean: - @echo "Cleaning up test artifacts..." - @$(MAKE) stop-server - @rm -f weed-test*.log weed-server.pid weed-config*.pid - @rm -rf reports/ - @rm -rf test-volume-data/ - @go clean -testcache - @echo "โœ… Cleanup completed" - -# Debug mode - start server with verbose logging -debug-server: - @echo "Starting SeaweedFS server in debug mode..." - @$(MAKE) stop-server - @mkdir -p ./test-volume-data - @$(WEED_BINARY) server \ - -debug \ - -s3 \ - -s3.port=$(S3_PORT) \ - -s3.allowEmptyFolder=false \ - -s3.allowDeleteBucketNotEmpty=true \ - -s3.config=../../../docker/compose/s3.json \ - -filer \ - -filer.maxMB=16 \ - -master.volumeSizeLimitMB=50 \ - -volume.max=100 \ - -dir=./test-volume-data \ - -volume.preStopSeconds=1 \ - -metricsPort=9324 - -# Run a single test for debugging -debug-test: check-deps - @echo "Running single test for debugging..." - @go test -v -timeout=5m -run "TestBucketListReturnDataVersioning" . -count=1 - -# Continuous testing (re-run tests on file changes) -watch-tests: - @echo "Starting continuous testing (requires 'entr' command)..." - @command -v entr >/dev/null 2>&1 || (echo "Install 'entr' for file watching: brew install entr (macOS) or apt-get install entr (Linux)" && exit 1) - @find . -name "*.go" | entr -c $(MAKE) test-versioning-quick - -# Install missing Go dependencies -install-deps: - @echo "Installing Go dependencies..." - @go mod download - @go mod tidy - @echo "โœ… Dependencies installed" - -# Validate test configuration -validate-config: - @echo "Validating test configuration..." - @test -f test_config.json || (echo "โŒ test_config.json not found" && exit 1) - @python3 -m json.tool test_config.json > /dev/null 2>&1 || (echo "โŒ test_config.json is not valid JSON" && exit 1) - @echo "โœ… Configuration is valid" - -# Quick health check -health-check: - @echo "Running health check..." - @curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1 && echo "โœ… S3 API is accessible" || echo "โŒ S3 API is not accessible" - @curl -s http://localhost:9324/metrics >/dev/null 2>&1 && echo "โœ… Metrics endpoint is accessible" || echo "โŒ Metrics endpoint is not accessible" - -# Simple server start without process cleanup (for CI troubleshooting) -start-server-simple: check-deps - @echo "Starting SeaweedFS server (simple mode)..." - @$(WEED_BINARY) server \ - -debug \ - -s3 \ - -s3.port=$(S3_PORT) \ - -s3.allowEmptyFolder=false \ - -s3.allowDeleteBucketNotEmpty=true \ - -s3.config=../../../docker/compose/s3.json \ - -filer \ - -filer.maxMB=64 \ - -master.volumeSizeLimitMB=50 \ - -volume.max=100 \ - -volume.preStopSeconds=1 \ - -metricsPort=9324 \ - > weed-test.log 2>&1 & echo $$! > weed-server.pid - @echo "Server PID: $$(cat weed-server.pid)" - @echo "Waiting for server to start..." - @sleep 10 - @curl -s http://localhost:$(S3_PORT) >/dev/null 2>&1 && echo "โœ… Server started successfully" || echo "โŒ Server failed to start" - -# Simple test run without server management -test-versioning-simple: check-deps - @echo "Running versioning tests (assuming server is already running)..." - @go test -v -timeout=$(TEST_TIMEOUT) -run "$(TEST_PATTERN)" . - @echo "โœ… Tests completed" - -# Force cleanup all weed processes (use with caution) -force-cleanup: - @echo "โš ๏ธ Force cleaning up all weed processes..." - @echo "This will attempt to kill ALL weed processes on the system" - @ps aux | grep weed | grep -v grep || echo "No weed processes found" - @killall -TERM weed_binary 2>/dev/null || echo "No weed_binary processes to terminate" - @sleep 2 - @killall -KILL weed_binary 2>/dev/null || echo "No weed_binary processes to kill" - @rm -f weed-server.pid weed-config*.pid - @echo "โœ… Force cleanup completed" - -# Compare with Python s3tests (if available) -compare-python-tests: - @echo "Comparing Go tests with Python s3tests..." - @echo "Go test: TestBucketListReturnDataVersioning" - @echo "Python equivalent: test_bucket_list_return_data_versioning" - @echo "" - @echo "Running Go version..." - @time go test -v -run "TestBucketListReturnDataVersioning" . 2>&1 | grep -E "(PASS|FAIL|took)" \ No newline at end of file diff --git a/test/s3/versioning/enable_stress_tests.sh b/test/s3/versioning/enable_stress_tests.sh deleted file mode 100755 index 5fa169ee0..000000000 --- a/test/s3/versioning/enable_stress_tests.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Enable S3 Versioning Stress Tests - -set -e - -# Colors -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' - -echo -e "${YELLOW}๐Ÿ“š Enabling S3 Versioning Stress Tests${NC}" - -# Disable short mode to enable stress tests -export ENABLE_STRESS_TESTS=true - -# Run versioning stress tests -echo -e "${YELLOW}๐Ÿงช Running versioning stress tests...${NC}" -make test-versioning-stress - -echo -e "${GREEN}โœ… Versioning stress tests completed${NC}" diff --git a/test/s3/versioning/s3_bucket_creation_test.go b/test/s3/versioning/s3_bucket_creation_test.go deleted file mode 100644 index 36bd70ba8..000000000 --- a/test/s3/versioning/s3_bucket_creation_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBucketCreationBehavior tests the S3-compliant bucket creation behavior -func TestBucketCreationBehavior(t *testing.T) { - client := getS3Client(t) - ctx := context.Background() - - // Test cases for bucket creation behavior - testCases := []struct { - name string - setupFunc func(t *testing.T, bucketName string) // Setup before test - bucketName string - objectLockEnabled *bool - expectedStatusCode int - expectedError string - cleanupFunc func(t *testing.T, bucketName string) // Cleanup after test - }{ - { - name: "Create new bucket - should succeed", - bucketName: "test-new-bucket-" + fmt.Sprintf("%d", time.Now().Unix()), - objectLockEnabled: nil, - expectedStatusCode: 200, - expectedError: "", - }, - { - name: "Create existing bucket with same owner - should return BucketAlreadyExists", - setupFunc: func(t *testing.T, bucketName string) { - // Create bucket first - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Setup: failed to create initial bucket") - }, - bucketName: "test-same-owner-same-settings-" + fmt.Sprintf("%d", time.Now().Unix()), - objectLockEnabled: nil, - expectedStatusCode: 409, // SeaweedFS now returns BucketAlreadyExists in all cases - expectedError: "BucketAlreadyExists", - }, - { - name: "Create bucket with same owner but different Object Lock settings - should fail", - setupFunc: func(t *testing.T, bucketName string) { - // Create bucket without Object Lock first - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Setup: failed to create initial bucket") - }, - bucketName: "test-same-owner-diff-settings-" + fmt.Sprintf("%d", time.Now().Unix()), - objectLockEnabled: aws.Bool(true), // Try to enable Object Lock on existing bucket - expectedStatusCode: 409, - expectedError: "BucketAlreadyExists", - }, - { - name: "Create bucket with Object Lock enabled - should succeed", - bucketName: "test-object-lock-new-" + fmt.Sprintf("%d", time.Now().Unix()), - objectLockEnabled: aws.Bool(true), - expectedStatusCode: 200, - expectedError: "", - }, - { - name: "Create bucket with Object Lock enabled twice - should fail", - setupFunc: func(t *testing.T, bucketName string) { - // Create bucket with Object Lock first - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err, "Setup: failed to create initial bucket with Object Lock") - }, - bucketName: "test-object-lock-duplicate-" + fmt.Sprintf("%d", time.Now().Unix()), - objectLockEnabled: aws.Bool(true), - expectedStatusCode: 409, - expectedError: "BucketAlreadyExists", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Setup - if tc.setupFunc != nil { - tc.setupFunc(t, tc.bucketName) - } - - // Cleanup function to ensure bucket is deleted after test - defer func() { - if tc.cleanupFunc != nil { - tc.cleanupFunc(t, tc.bucketName) - } else { - // Default cleanup - delete bucket and all objects - cleanupBucketForCreationTest(t, client, tc.bucketName) - } - }() - - // Execute the test - attempt to create bucket - input := &s3.CreateBucketInput{ - Bucket: aws.String(tc.bucketName), - } - if tc.objectLockEnabled != nil { - input.ObjectLockEnabledForBucket = tc.objectLockEnabled - } - - _, err := client.CreateBucket(ctx, input) - - // Verify results - if tc.expectedError == "" { - // Should succeed - assert.NoError(t, err, "Expected bucket creation to succeed") - } else { - // Should fail with specific error - assert.Error(t, err, "Expected bucket creation to fail") - if err != nil { - assert.Contains(t, err.Error(), tc.expectedError, - "Expected error to contain '%s', got: %v", tc.expectedError, err) - } - } - }) - } -} - -// TestBucketCreationWithDifferentUsers tests bucket creation with different identity contexts -func TestBucketCreationWithDifferentUsers(t *testing.T) { - // This test would require setting up different S3 credentials/identities - // For now, we'll skip this as it requires more complex setup - t.Skip("Different user testing requires IAM setup - implement when IAM is configured") - - // TODO: Implement when we have proper IAM/user management in test setup - // Should test: - // 1. User A creates bucket - // 2. User B tries to create same bucket -> should fail with BucketAlreadyExists -} - -// TestBucketCreationVersioningInteraction tests interaction between bucket creation and versioning -func TestBucketCreationVersioningInteraction(t *testing.T) { - client := getS3Client(t) - ctx := context.Background() - bucketName := "test-versioning-interaction-" + fmt.Sprintf("%d", time.Now().Unix()) - - defer cleanupBucketForCreationTest(t, client, bucketName) - - // Create bucket with Object Lock (which enables versioning) - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err, "Failed to create bucket with Object Lock") - - // Verify versioning is enabled - versioningOutput, err := client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Failed to get bucket versioning status") - assert.Equal(t, types.BucketVersioningStatusEnabled, versioningOutput.Status, - "Expected versioning to be enabled when Object Lock is enabled") - - // Try to create the same bucket again - should fail - _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - assert.Error(t, err, "Expected second bucket creation to fail") - assert.Contains(t, err.Error(), "BucketAlreadyExists", - "Expected BucketAlreadyExists error, got: %v", err) -} - -// TestBucketCreationErrorMessages tests that proper error messages are returned -func TestBucketCreationErrorMessages(t *testing.T) { - client := getS3Client(t) - ctx := context.Background() - bucketName := "test-error-messages-" + fmt.Sprintf("%d", time.Now().Unix()) - - defer cleanupBucketForCreationTest(t, client, bucketName) - - // Create bucket first - _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Failed to create initial bucket") - - // Try to create again and check error details - _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - - require.Error(t, err, "Expected bucket creation to fail") - - // Check that it's the right type of error - assert.Contains(t, err.Error(), "BucketAlreadyExists", - "Expected BucketAlreadyExists error, got: %v", err) -} - -// cleanupBucketForCreationTest removes a bucket and all its contents -func cleanupBucketForCreationTest(t *testing.T, client *s3.Client, bucketName string) { - ctx := context.Background() - - // List and delete all objects (including versions) - listInput := &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - } - - for { - listOutput, err := client.ListObjectVersions(ctx, listInput) - if err != nil { - // Bucket might not exist, which is fine - break - } - - if len(listOutput.Versions) == 0 && len(listOutput.DeleteMarkers) == 0 { - break - } - - // Delete all versions - var objectsToDelete []types.ObjectIdentifier - for _, version := range listOutput.Versions { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: version.Key, - VersionId: version.VersionId, - }) - } - for _, marker := range listOutput.DeleteMarkers { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: marker.Key, - VersionId: marker.VersionId, - }) - } - - if len(objectsToDelete) > 0 { - _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectsToDelete, - }, - }) - if err != nil { - t.Logf("Warning: failed to delete objects from bucket %s: %v", bucketName, err) - } - } - - // Check if there are more objects - if !aws.ToBool(listOutput.IsTruncated) { - break - } - listInput.KeyMarker = listOutput.NextKeyMarker - listInput.VersionIdMarker = listOutput.NextVersionIdMarker - } - - // Delete the bucket - _, err := client.DeleteBucket(ctx, &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) - } -} diff --git a/test/s3/versioning/s3_comprehensive_versioning_test.go b/test/s3/versioning/s3_comprehensive_versioning_test.go deleted file mode 100644 index dd927082c..000000000 --- a/test/s3/versioning/s3_comprehensive_versioning_test.go +++ /dev/null @@ -1,697 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "io" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestVersioningCreateObjectsInOrder tests the exact pattern from Python s3tests -func TestVersioningCreateObjectsInOrder(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Step 1: Create bucket (equivalent to get_new_bucket()) - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Step 2: Enable versioning (equivalent to check_configure_versioning_retry) - enableVersioning(t, client, bucketName) - checkVersioningStatus(t, client, bucketName, types.BucketVersioningStatusEnabled) - - // Step 3: Create objects (equivalent to _create_objects with specific keys) - keyNames := []string{"bar", "baz", "foo"} - - // This mirrors the exact logic from _create_objects function - for _, keyName := range keyNames { - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(keyName), - Body: strings.NewReader(keyName), // content = key name - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - require.NotEmpty(t, *putResp.VersionId) - - t.Logf("Created object %s with version %s", keyName, *putResp.VersionId) - } - - // Step 4: Verify all objects exist and have correct versioning data - objectMetadata := make(map[string]map[string]interface{}) - - for _, keyName := range keyNames { - // Get object metadata (equivalent to head_object) - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(keyName), - }) - require.NoError(t, err) - require.NotNil(t, headResp.VersionId) - - // Store metadata for later comparison - objectMetadata[keyName] = map[string]interface{}{ - "ETag": *headResp.ETag, - "LastModified": *headResp.LastModified, - "ContentLength": headResp.ContentLength, - "VersionId": *headResp.VersionId, - } - } - - // Step 5: List object versions (equivalent to list_object_versions) - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Verify results match Python test expectations - assert.Len(t, listResp.Versions, len(keyNames), "Should have one version per object") - assert.Empty(t, listResp.DeleteMarkers, "Should have no delete markers") - - // Create map for easy lookup - versionsByKey := make(map[string]types.ObjectVersion) - for _, version := range listResp.Versions { - versionsByKey[*version.Key] = version - } - - // Step 6: Verify each object's version data matches head_object data - for _, keyName := range keyNames { - version, exists := versionsByKey[keyName] - require.True(t, exists, "Version should exist for key %s", keyName) - - expectedData := objectMetadata[keyName] - - // These assertions mirror the Python test logic - assert.Equal(t, expectedData["ETag"], *version.ETag, "ETag mismatch for %s", keyName) - assert.Equal(t, expectedData["ContentLength"], version.Size, "Size mismatch for %s", keyName) - assert.Equal(t, expectedData["VersionId"], *version.VersionId, "VersionId mismatch for %s", keyName) - assert.True(t, *version.IsLatest, "Should be marked as latest version for %s", keyName) - - // Time comparison with tolerance (Python uses _compare_dates) - expectedTime := expectedData["LastModified"].(time.Time) - actualTime := *version.LastModified - timeDiff := actualTime.Sub(expectedTime) - if timeDiff < 0 { - timeDiff = -timeDiff - } - assert.True(t, timeDiff < time.Minute, "LastModified times should be close for %s", keyName) - } - - t.Logf("Successfully verified versioning data for %d objects matching Python s3tests expectations", len(keyNames)) -} - -// TestVersioningMultipleVersionsSameObject tests creating multiple versions of the same object -func TestVersioningMultipleVersionsSameObject(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - objectKey := "test-multi-version" - numVersions := 5 - versionIds := make([]string, numVersions) - - // Create multiple versions of the same object - for i := 0; i < numVersions; i++ { - content := fmt.Sprintf("content-version-%d", i+1) - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - versionIds[i] = *putResp.VersionId - } - - // Verify all versions exist - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, numVersions) - - // Verify only the latest is marked as latest - latestCount := 0 - for _, version := range listResp.Versions { - if *version.IsLatest { - latestCount++ - assert.Equal(t, versionIds[numVersions-1], *version.VersionId, "Latest version should be the last one created") - } - } - assert.Equal(t, 1, latestCount, "Only one version should be marked as latest") - - // Verify all version IDs are unique - versionIdSet := make(map[string]bool) - for _, version := range listResp.Versions { - versionId := *version.VersionId - assert.False(t, versionIdSet[versionId], "Version ID should be unique: %s", versionId) - versionIdSet[versionId] = true - } -} - -// TestVersioningDeleteAndRecreate tests deleting and recreating objects with versioning -func TestVersioningDeleteAndRecreate(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - objectKey := "test-delete-recreate" - - // Create initial object - putResp1, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("initial-content"), - }) - require.NoError(t, err) - originalVersionId := *putResp1.VersionId - - // Delete the object (creates delete marker) - deleteResp, err := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - deleteMarkerVersionId := *deleteResp.VersionId - - // Recreate the object - putResp2, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("recreated-content"), - }) - require.NoError(t, err) - newVersionId := *putResp2.VersionId - - // List versions - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Should have 2 object versions and 1 delete marker - assert.Len(t, listResp.Versions, 2) - assert.Len(t, listResp.DeleteMarkers, 1) - - // Verify the new version is marked as latest - latestVersionCount := 0 - for _, version := range listResp.Versions { - if *version.IsLatest { - latestVersionCount++ - assert.Equal(t, newVersionId, *version.VersionId) - } else { - assert.Equal(t, originalVersionId, *version.VersionId) - } - } - assert.Equal(t, 1, latestVersionCount) - - // Verify delete marker is not marked as latest (since we recreated the object) - deleteMarker := listResp.DeleteMarkers[0] - assert.False(t, *deleteMarker.IsLatest) - assert.Equal(t, deleteMarkerVersionId, *deleteMarker.VersionId) -} - -// TestVersioningListWithPagination tests versioning with pagination parameters -func TestVersioningListWithPagination(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Create multiple objects with multiple versions each - numObjects := 3 - versionsPerObject := 3 - totalExpectedVersions := numObjects * versionsPerObject - - for i := 0; i < numObjects; i++ { - objectKey := fmt.Sprintf("test-object-%d", i) - for j := 0; j < versionsPerObject; j++ { - content := fmt.Sprintf("content-obj%d-ver%d", i, j) - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - } - } - - // Test listing with max-keys parameter - maxKeys := 5 - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - MaxKeys: aws.Int32(int32(maxKeys)), - }) - require.NoError(t, err) - - if totalExpectedVersions > maxKeys { - assert.True(t, *listResp.IsTruncated) - assert.LessOrEqual(t, len(listResp.Versions), maxKeys) - } else { - assert.Len(t, listResp.Versions, totalExpectedVersions) - } - - // Test listing all versions without pagination - allListResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, allListResp.Versions, totalExpectedVersions) - - // Verify each object has exactly one latest version - latestVersionsByKey := make(map[string]int) - for _, version := range allListResp.Versions { - if *version.IsLatest { - latestVersionsByKey[*version.Key]++ - } - } - assert.Len(t, latestVersionsByKey, numObjects) - for objectKey, count := range latestVersionsByKey { - assert.Equal(t, 1, count, "Object %s should have exactly one latest version", objectKey) - } -} - -// TestVersioningSpecificVersionRetrieval tests retrieving specific versions of objects -func TestVersioningSpecificVersionRetrieval(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - objectKey := "test-version-retrieval" - contents := []string{"version1", "version2", "version3"} - versionIds := make([]string, len(contents)) - - // Create multiple versions - for i, content := range contents { - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - versionIds[i] = *putResp.VersionId - } - - // Test retrieving each specific version - for i, expectedContent := range contents { - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String(versionIds[i]), - }) - require.NoError(t, err) - - // Read and verify content - read all available data, not just expected length - body, err := io.ReadAll(getResp.Body) - if err != nil { - t.Logf("Error reading response body for version %d: %v", i+1, err) - if getResp.ContentLength != nil { - t.Logf("Content length: %d", *getResp.ContentLength) - } - if getResp.VersionId != nil { - t.Logf("Version ID: %s", *getResp.VersionId) - } - require.NoError(t, err) - } - getResp.Body.Close() - - actualContent := string(body) - t.Logf("Expected: %s, Actual: %s", expectedContent, actualContent) - assert.Equal(t, expectedContent, actualContent, "Content mismatch for version %d", i+1) - assert.Equal(t, versionIds[i], *getResp.VersionId, "Version ID mismatch") - } - - // Test retrieving without version ID (should get latest) - getLatestResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - - body, err := io.ReadAll(getLatestResp.Body) - require.NoError(t, err) - getLatestResp.Body.Close() - - latestContent := string(body) - assert.Equal(t, contents[len(contents)-1], latestContent) - assert.Equal(t, versionIds[len(versionIds)-1], *getLatestResp.VersionId) -} - -// TestVersioningErrorCases tests error scenarios with versioning -func TestVersioningErrorCases(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - objectKey := "test-error-cases" - - // Create an object to work with - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("test content"), - }) - require.NoError(t, err) - validVersionId := *putResp.VersionId - - // Test getting a non-existent version - _, err = client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String("non-existent-version-id"), - }) - assert.Error(t, err, "Should get error for non-existent version") - - // Test deleting a specific version (should succeed) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String(validVersionId), - }) - assert.NoError(t, err, "Should be able to delete specific version") - - // Verify the object is gone (since we deleted the only version) - _, err = client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - assert.Error(t, err, "Should get error after deleting the only version") -} - -// TestVersioningSuspendedMixedObjects tests behavior when versioning is suspended -// and there are mixed versioned and unversioned objects -func TestVersioningSuspendedMixedObjects(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - objectKey := "test-mixed-versioning" - - // Phase 1: Create object without versioning (unversioned) - t.Log("Phase 1: Creating unversioned object") - putResp1, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("unversioned-content"), - }) - require.NoError(t, err) - - // Unversioned objects should not have version IDs - var unversionedVersionId string - if putResp1.VersionId != nil { - unversionedVersionId = *putResp1.VersionId - t.Logf("Created unversioned object with version ID: %s", unversionedVersionId) - } else { - unversionedVersionId = "null" - t.Logf("Created unversioned object with no version ID (as expected)") - } - - // Phase 2: Enable versioning and create versioned objects - t.Log("Phase 2: Enabling versioning") - enableVersioning(t, client, bucketName) - - putResp2, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("versioned-content-1"), - }) - require.NoError(t, err) - versionedVersionId1 := *putResp2.VersionId - t.Logf("Created versioned object 1 with version ID: %s", versionedVersionId1) - - putResp3, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("versioned-content-2"), - }) - require.NoError(t, err) - versionedVersionId2 := *putResp3.VersionId - t.Logf("Created versioned object 2 with version ID: %s", versionedVersionId2) - - // Phase 3: Suspend versioning - t.Log("Phase 3: Suspending versioning") - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusSuspended, - }, - }) - require.NoError(t, err) - - // Verify versioning is suspended - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Equal(t, types.BucketVersioningStatusSuspended, versioningResp.Status) - - // Phase 4: Create object with suspended versioning (should be unversioned) - t.Log("Phase 4: Creating object with suspended versioning") - putResp4, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("suspended-content"), - }) - require.NoError(t, err) - - // Suspended versioning should not create new version IDs - var suspendedVersionId string - if putResp4.VersionId != nil { - suspendedVersionId = *putResp4.VersionId - t.Logf("Created suspended object with version ID: %s", suspendedVersionId) - } else { - suspendedVersionId = "null" - t.Logf("Created suspended object with no version ID (as expected)") - } - - // Phase 5: List all versions - should show all objects - t.Log("Phase 5: Listing all versions") - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - t.Logf("Found %d versions", len(listResp.Versions)) - for i, version := range listResp.Versions { - t.Logf("Version %d: %s (isLatest: %v)", i+1, *version.VersionId, *version.IsLatest) - } - - // Should have at least 2 versions (the 2 versioned ones) - // Unversioned and suspended objects might not appear in ListObjectVersions - assert.GreaterOrEqual(t, len(listResp.Versions), 2, "Should have at least 2 versions") - - // Verify there is exactly one latest version - latestVersionCount := 0 - var latestVersionId string - for _, version := range listResp.Versions { - if *version.IsLatest { - latestVersionCount++ - latestVersionId = *version.VersionId - } - } - assert.Equal(t, 1, latestVersionCount, "Should have exactly one latest version") - - // The latest version should be either the suspended one or the last versioned one - t.Logf("Latest version ID: %s", latestVersionId) - - // Phase 6: Test retrieval of each version - t.Log("Phase 6: Testing version retrieval") - - // Get latest (should be suspended version) - getLatest, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - latestBody, err := io.ReadAll(getLatest.Body) - require.NoError(t, err) - getLatest.Body.Close() - assert.Equal(t, "suspended-content", string(latestBody)) - - // The latest object should match what we created in suspended mode - if getLatest.VersionId != nil { - t.Logf("Latest object has version ID: %s", *getLatest.VersionId) - } else { - t.Logf("Latest object has no version ID") - } - - // Get specific versioned objects (only test objects with actual version IDs) - testCases := []struct { - versionId string - expectedContent string - description string - }{ - {versionedVersionId1, "versioned-content-1", "first versioned object"}, - {versionedVersionId2, "versioned-content-2", "second versioned object"}, - } - - // Only test unversioned object if it has a version ID - if unversionedVersionId != "null" { - testCases = append(testCases, struct { - versionId string - expectedContent string - description string - }{unversionedVersionId, "unversioned-content", "original unversioned object"}) - } - - // Only test suspended object if it has a version ID - if suspendedVersionId != "null" { - testCases = append(testCases, struct { - versionId string - expectedContent string - description string - }{suspendedVersionId, "suspended-content", "suspended versioning object"}) - } - - for _, tc := range testCases { - t.Run(tc.description, func(t *testing.T) { - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String(tc.versionId), - }) - require.NoError(t, err) - - body, err := io.ReadAll(getResp.Body) - require.NoError(t, err) - getResp.Body.Close() - - actualContent := string(body) - t.Logf("Requested version %s, expected content: %s, actual content: %s", - tc.versionId, tc.expectedContent, actualContent) - - // Check if version retrieval is working correctly - if actualContent != tc.expectedContent { - t.Logf("WARNING: Version retrieval may not be working correctly. Expected %s but got %s", - tc.expectedContent, actualContent) - // For now, we'll skip this assertion if version retrieval is broken - // This can be uncommented when the issue is fixed - // assert.Equal(t, tc.expectedContent, actualContent) - } else { - assert.Equal(t, tc.expectedContent, actualContent) - } - - // Check version ID if it exists - if getResp.VersionId != nil { - if *getResp.VersionId != tc.versionId { - t.Logf("WARNING: Response version ID %s doesn't match requested version %s", - *getResp.VersionId, tc.versionId) - } - } else { - t.Logf("Warning: Response version ID is nil for version %s", tc.versionId) - } - }) - } - - // Phase 7: Test deletion behavior with suspended versioning - t.Log("Phase 7: Testing deletion with suspended versioning") - - // Delete without version ID (should create delete marker even when suspended) - deleteResp, err := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - - var deleteMarkerVersionId string - if deleteResp.VersionId != nil { - deleteMarkerVersionId = *deleteResp.VersionId - t.Logf("Created delete marker with version ID: %s", deleteMarkerVersionId) - } else { - t.Logf("Delete response has no version ID (may be expected in some cases)") - deleteMarkerVersionId = "no-version-id" - } - - // List versions after deletion - listAfterDelete, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Should still have the versioned objects + 1 delete marker - assert.GreaterOrEqual(t, len(listAfterDelete.Versions), 2, "Should still have at least 2 object versions") - - // Check if delete marker was created (may not be in some implementations) - if len(listAfterDelete.DeleteMarkers) == 0 { - t.Logf("No delete marker created - this may be expected behavior with suspended versioning") - } else { - assert.Len(t, listAfterDelete.DeleteMarkers, 1, "Should have 1 delete marker") - - // Delete marker should be latest - deleteMarker := listAfterDelete.DeleteMarkers[0] - assert.True(t, *deleteMarker.IsLatest, "Delete marker should be latest") - - // Only check version ID if we have one from the delete response - if deleteMarkerVersionId != "no-version-id" && deleteMarker.VersionId != nil { - assert.Equal(t, deleteMarkerVersionId, *deleteMarker.VersionId) - } else { - t.Logf("Skipping delete marker version ID check due to nil version ID") - } - } - - // Object should not be accessible without version ID - _, err = client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - - // If there's a delete marker, object should not be accessible - // If there's no delete marker, object might still be accessible - if len(listAfterDelete.DeleteMarkers) > 0 { - assert.Error(t, err, "Should not be able to get object after delete marker") - } else { - t.Logf("No delete marker created, so object availability test is skipped") - } - - // But specific versions should still be accessible - getVersioned, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String(versionedVersionId2), - }) - - if err != nil { - t.Logf("Warning: Could not retrieve specific version %s: %v", versionedVersionId2, err) - t.Logf("This may indicate version retrieval is not working correctly") - } else { - versionedBody, err := io.ReadAll(getVersioned.Body) - require.NoError(t, err) - getVersioned.Body.Close() - - actualVersionedContent := string(versionedBody) - t.Logf("Retrieved version %s, expected 'versioned-content-2', got '%s'", - versionedVersionId2, actualVersionedContent) - - if actualVersionedContent != "versioned-content-2" { - t.Logf("WARNING: Version retrieval content mismatch") - } else { - assert.Equal(t, "versioned-content-2", actualVersionedContent) - } - } - - t.Log("Successfully tested mixed versioned/unversioned object behavior") -} diff --git a/test/s3/versioning/s3_directory_versioning_test.go b/test/s3/versioning/s3_directory_versioning_test.go deleted file mode 100644 index 7126c70b0..000000000 --- a/test/s3/versioning/s3_directory_versioning_test.go +++ /dev/null @@ -1,861 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestListObjectVersionsIncludesDirectories tests that directories are included in list-object-versions response -// This ensures compatibility with Minio and AWS S3 behavior -func TestListObjectVersionsIncludesDirectories(t *testing.T) { - bucketName := "test-versioning-directories" - - client := setupS3Client(t) - - // Create bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Enable versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - // First create explicit directory objects (keys ending with "/") - // These are the directories that should appear in list-object-versions - explicitDirectories := []string{ - "Veeam/", - "Veeam/Archive/", - "Veeam/Archive/vbr/", - "Veeam/Backup/", - "Veeam/Backup/vbr/", - "Veeam/Backup/vbr/Clients/", - } - - // Create explicit directory objects - for _, dirKey := range explicitDirectories { - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(dirKey), - Body: strings.NewReader(""), // Empty content for directories - }) - require.NoError(t, err, "Failed to create directory object %s", dirKey) - } - - // Now create some test files - testFiles := []string{ - "Veeam/test-file.txt", - "Veeam/Archive/test-file2.txt", - "Veeam/Archive/vbr/test-file3.txt", - "Veeam/Backup/test-file4.txt", - "Veeam/Backup/vbr/test-file5.txt", - "Veeam/Backup/vbr/Clients/test-file6.txt", - } - - // Upload test files - for _, objectKey := range testFiles { - _, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("test content"), - }) - require.NoError(t, err, "Failed to create file %s", objectKey) - } - - // List object versions - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Extract all keys from versions - var allKeys []string - for _, version := range listResp.Versions { - allKeys = append(allKeys, *version.Key) - } - - // Expected directories that should be included (with trailing slash) - expectedDirectories := []string{ - "Veeam/", - "Veeam/Archive/", - "Veeam/Archive/vbr/", - "Veeam/Backup/", - "Veeam/Backup/vbr/", - "Veeam/Backup/vbr/Clients/", - } - - // Verify that directories are included in the response - t.Logf("Found %d total versions", len(listResp.Versions)) - t.Logf("All keys: %v", allKeys) - - for _, expectedDir := range expectedDirectories { - found := false - for _, version := range listResp.Versions { - if *version.Key == expectedDir { - found = true - // Verify directory properties - assert.Equal(t, "null", *version.VersionId, "Directory %s should have VersionId 'null'", expectedDir) - assert.Equal(t, int64(0), *version.Size, "Directory %s should have size 0", expectedDir) - assert.True(t, *version.IsLatest, "Directory %s should be marked as latest", expectedDir) - assert.Equal(t, "\"d41d8cd98f00b204e9800998ecf8427e\"", *version.ETag, "Directory %s should have MD5 of empty string as ETag", expectedDir) - assert.Equal(t, types.ObjectStorageClassStandard, version.StorageClass, "Directory %s should have STANDARD storage class", expectedDir) - break - } - } - assert.True(t, found, "Directory %s should be included in list-object-versions response", expectedDir) - } - - // Also verify that actual files are included - for _, objectKey := range testFiles { - found := false - for _, version := range listResp.Versions { - if *version.Key == objectKey { - found = true - assert.NotEqual(t, "null", *version.VersionId, "File %s should have a real version ID", objectKey) - assert.Greater(t, *version.Size, int64(0), "File %s should have size > 0", objectKey) - break - } - } - assert.True(t, found, "File %s should be included in list-object-versions response", objectKey) - } - - // Count directories vs files - directoryCount := 0 - fileCount := 0 - for _, version := range listResp.Versions { - if strings.HasSuffix(*version.Key, "/") && *version.Size == 0 && *version.VersionId == "null" { - directoryCount++ - } else { - fileCount++ - } - } - - t.Logf("Found %d directories and %d files", directoryCount, fileCount) - assert.Equal(t, len(expectedDirectories), directoryCount, "Should find exactly %d directories", len(expectedDirectories)) - assert.Equal(t, len(testFiles), fileCount, "Should find exactly %d files", len(testFiles)) -} - -// TestListObjectVersionsDeleteMarkers tests that delete markers are properly separated from versions -// This test verifies the fix for the issue where delete markers were incorrectly categorized as versions -func TestListObjectVersionsDeleteMarkers(t *testing.T) { - bucketName := "test-delete-markers" - - client := setupS3Client(t) - - // Create bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Enable versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - objectKey := "test1/a" - - // 1. Create one version of the file - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("test content"), - }) - require.NoError(t, err) - - // 2. Delete the object 3 times to create 3 delete markers - for i := 0; i < 3; i++ { - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - } - - // 3. List object versions and verify the response structure - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // 4. Verify that we have exactly 1 version and 3 delete markers - assert.Len(t, listResp.Versions, 1, "Should have exactly 1 file version") - assert.Len(t, listResp.DeleteMarkers, 3, "Should have exactly 3 delete markers") - - // 5. Verify the version is for our test file - version := listResp.Versions[0] - assert.Equal(t, objectKey, *version.Key, "Version should be for our test file") - assert.NotEqual(t, "null", *version.VersionId, "File version should have a real version ID") - assert.Greater(t, *version.Size, int64(0), "File version should have size > 0") - - // 6. Verify all delete markers are for our test file - for i, deleteMarker := range listResp.DeleteMarkers { - assert.Equal(t, objectKey, *deleteMarker.Key, "Delete marker %d should be for our test file", i) - assert.NotEqual(t, "null", *deleteMarker.VersionId, "Delete marker %d should have a real version ID", i) - } - - t.Logf("Successfully verified: 1 version + 3 delete markers for object %s", objectKey) -} - -// TestVersionedObjectAcl tests that ACL operations work correctly on objects in versioned buckets -// This test verifies the fix for the NoSuchKey error when getting ACLs for objects in versioned buckets -func TestVersionedObjectAcl(t *testing.T) { - bucketName := "test-versioned-acl" - - client := setupS3Client(t) - - // Create bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Enable versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - objectKey := "test-acl-object" - - // Create an object in the versioned bucket - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("test content for ACL"), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId, "Object should have a version ID") - - // Test 1: Get ACL for the object (without specifying version ID - should get latest version) - getAclResp, err := client.GetObjectAcl(context.TODO(), &s3.GetObjectAclInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Should be able to get ACL for object in versioned bucket") - require.NotNil(t, getAclResp.Owner, "ACL response should have owner information") - - // Test 2: Get ACL for specific version ID - getAclVersionResp, err := client.GetObjectAcl(context.TODO(), &s3.GetObjectAclInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: putResp.VersionId, - }) - require.NoError(t, err, "Should be able to get ACL for specific version") - require.NotNil(t, getAclVersionResp.Owner, "Versioned ACL response should have owner information") - - // Test 3: Verify both ACL responses are the same (same object, same version) - assert.Equal(t, getAclResp.Owner.ID, getAclVersionResp.Owner.ID, "Owner ID should match for latest and specific version") - - // Test 4: Create another version of the same object - putResp2, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("updated content for ACL"), - }) - require.NoError(t, err) - require.NotNil(t, putResp2.VersionId, "Second object version should have a version ID") - require.NotEqual(t, putResp.VersionId, putResp2.VersionId, "Version IDs should be different") - - // Test 5: Get ACL for latest version (should be the second version) - getAclLatestResp, err := client.GetObjectAcl(context.TODO(), &s3.GetObjectAclInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Should be able to get ACL for latest version after update") - require.NotNil(t, getAclLatestResp.Owner, "Latest ACL response should have owner information") - - // Test 6: Get ACL for the first version specifically - getAclFirstResp, err := client.GetObjectAcl(context.TODO(), &s3.GetObjectAclInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: putResp.VersionId, - }) - require.NoError(t, err, "Should be able to get ACL for first version specifically") - require.NotNil(t, getAclFirstResp.Owner, "First version ACL response should have owner information") - - // Test 7: Verify we can put ACL on versioned objects - _, err = client.PutObjectAcl(context.TODO(), &s3.PutObjectAclInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - ACL: types.ObjectCannedACLPrivate, - }) - require.NoError(t, err, "Should be able to put ACL on versioned object") - - t.Logf("Successfully verified ACL operations on versioned object %s with versions %s and %s", - objectKey, *putResp.VersionId, *putResp2.VersionId) -} - -// TestConcurrentMultiObjectDelete tests that concurrent delete operations work correctly without race conditions -// This test verifies the fix for the race condition in deleteSpecificObjectVersion -func TestConcurrentMultiObjectDelete(t *testing.T) { - bucketName := "test-concurrent-delete" - numObjects := 5 - numThreads := 5 - - client := setupS3Client(t) - - // Create bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Enable versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - // Create objects - var objectKeys []string - var versionIds []string - - for i := 0; i < numObjects; i++ { - objectKey := fmt.Sprintf("key_%d", i) - objectKeys = append(objectKeys, objectKey) - - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(fmt.Sprintf("content for key_%d", i)), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - versionIds = append(versionIds, *putResp.VersionId) - } - - // Verify objects were created - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, numObjects, "Should have created %d objects", numObjects) - - // Create delete objects request - var objectsToDelete []types.ObjectIdentifier - for i, objectKey := range objectKeys { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: aws.String(objectKey), - VersionId: aws.String(versionIds[i]), - }) - } - - // Run concurrent delete operations - results := make([]*s3.DeleteObjectsOutput, numThreads) - var wg sync.WaitGroup - - for i := 0; i < numThreads; i++ { - wg.Add(1) - go func(threadIdx int) { - defer wg.Done() - deleteResp, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(false), - }, - }) - if err != nil { - t.Errorf("Thread %d: delete objects failed: %v", threadIdx, err) - return - } - results[threadIdx] = deleteResp - }(i) - } - - wg.Wait() - - // Verify results - for i, result := range results { - require.NotNil(t, result, "Thread %d should have a result", i) - assert.Len(t, result.Deleted, numObjects, "Thread %d should have deleted all %d objects", i, numObjects) - - if len(result.Errors) > 0 { - for _, deleteError := range result.Errors { - t.Errorf("Thread %d delete error: %s - %s (Key: %s, VersionId: %s)", - i, *deleteError.Code, *deleteError.Message, *deleteError.Key, - func() string { - if deleteError.VersionId != nil { - return *deleteError.VersionId - } else { - return "nil" - } - }()) - } - } - assert.Empty(t, result.Errors, "Thread %d should have no delete errors", i) - } - - // Verify objects are deleted (bucket should be empty) - finalListResp, err := client.ListObjects(context.TODO(), &s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Nil(t, finalListResp.Contents, "Bucket should be empty after all deletions") - - t.Logf("Successfully verified concurrent deletion of %d objects from %d threads", numObjects, numThreads) -} - -// TestSuspendedVersioningDeleteBehavior tests that delete operations during suspended versioning -// actually delete the "null" version object rather than creating delete markers -func TestSuspendedVersioningDeleteBehavior(t *testing.T) { - bucketName := "test-suspended-versioning-delete" - objectKey := "testobj" - - client := setupS3Client(t) - - // Create bucket - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Enable versioning and create some versions - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - // Create 3 versions - var versionIds []string - for i := 0; i < 3; i++ { - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(fmt.Sprintf("content version %d", i+1)), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - versionIds = append(versionIds, *putResp.VersionId) - } - - // Verify 3 versions exist - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, 3, "Should have 3 versions initially") - - // Suspend versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusSuspended, - }, - }) - require.NoError(t, err) - - // Create a new object during suspended versioning (this should be a "null" version) - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("null version content"), - }) - require.NoError(t, err) - - // Verify we still have 3 versions + 1 null version = 4 total - listResp, err = client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, 4, "Should have 3 versions + 1 null version") - - // Find the null version - var nullVersionFound bool - for _, version := range listResp.Versions { - if *version.VersionId == "null" { - nullVersionFound = true - assert.True(t, *version.IsLatest, "Null version should be marked as latest during suspended versioning") - break - } - } - assert.True(t, nullVersionFound, "Should have found a null version") - - // Delete the object during suspended versioning (should actually delete the null version) - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - // No VersionId specified - should delete the "null" version during suspended versioning - }) - require.NoError(t, err) - - // Verify the null version was actually deleted (not a delete marker created) - listResp, err = client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, 3, "Should be back to 3 versions after deleting null version") - assert.Empty(t, listResp.DeleteMarkers, "Should have no delete markers during suspended versioning delete") - - // Verify null version is gone - nullVersionFound = false - for _, version := range listResp.Versions { - if *version.VersionId == "null" { - nullVersionFound = true - break - } - } - assert.False(t, nullVersionFound, "Null version should be deleted, not present") - - // Create another null version and delete it multiple times to test idempotency - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("another null version"), - }) - require.NoError(t, err) - - // Delete it twice to test idempotency - for i := 0; i < 2; i++ { - _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err, "Delete should be idempotent - iteration %d", i+1) - } - - // Re-enable versioning - _, err = client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - // Create a new version with versioning enabled - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("new version after re-enabling"), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - - // Now delete without version ID (should create delete marker) - deleteResp, err := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - require.NoError(t, err) - assert.Equal(t, "true", deleteResp.DeleteMarker, "Should create delete marker when versioning is enabled") - - // Verify final state - listResp, err = client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listResp.Versions, 4, "Should have 3 original versions + 1 new version") - assert.Len(t, listResp.DeleteMarkers, 1, "Should have 1 delete marker") - - t.Logf("Successfully verified suspended versioning delete behavior") -} - -// TestVersionedObjectListBehavior tests that list operations show logical object names for versioned objects -// and that owner information is properly extracted from S3 metadata -func TestVersionedObjectListBehavior(t *testing.T) { - bucketName := "test-versioned-list" - objectKey := "testfile" - - client := setupS3Client(t) - - // Create bucket with object lock enabled (which enables versioning) - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err) - - // Clean up - defer func() { - cleanupBucket(t, client, bucketName) - }() - - // Verify versioning is enabled - versioningResp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Equal(t, types.BucketVersioningStatusEnabled, versioningResp.Status, "Bucket versioning should be enabled") - - // Create a versioned object - content := "test content for versioned object" - putResp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - require.NotNil(t, putResp.VersionId) - - versionId := *putResp.VersionId - t.Logf("Created versioned object with version ID: %s", versionId) - - // Test list-objects operation - should show logical object name, not internal versioned path - listResp, err := client.ListObjects(context.TODO(), &s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - require.Len(t, listResp.Contents, 1, "Should list exactly one object") - - listedObject := listResp.Contents[0] - - // Verify the object key is the logical name, not the internal versioned path - assert.Equal(t, objectKey, *listedObject.Key, "Should show logical object name, not internal versioned path") - assert.NotContains(t, *listedObject.Key, ".versions", "Object key should not contain .versions") - assert.NotContains(t, *listedObject.Key, versionId, "Object key should not contain version ID") - - // Verify object properties - assert.Equal(t, int64(len(content)), listedObject.Size, "Object size should match") - assert.NotNil(t, listedObject.ETag, "Object should have ETag") - assert.NotNil(t, listedObject.LastModified, "Object should have LastModified") - - // Verify owner information is present (even if anonymous) - require.NotNil(t, listedObject.Owner, "Object should have Owner information") - assert.NotEmpty(t, listedObject.Owner.ID, "Owner ID should not be empty") - assert.NotEmpty(t, listedObject.Owner.DisplayName, "Owner DisplayName should not be empty") - - t.Logf("Listed object: Key=%s, Size=%d, Owner.ID=%s, Owner.DisplayName=%s", - *listedObject.Key, listedObject.Size, *listedObject.Owner.ID, *listedObject.Owner.DisplayName) - - // Test list-objects-v2 operation as well - listV2Resp, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ - Bucket: aws.String(bucketName), - FetchOwner: aws.Bool(true), // Explicitly request owner information - }) - require.NoError(t, err) - require.Len(t, listV2Resp.Contents, 1, "ListObjectsV2 should also list exactly one object") - - listedObjectV2 := listV2Resp.Contents[0] - assert.Equal(t, objectKey, *listedObjectV2.Key, "ListObjectsV2 should also show logical object name") - assert.NotNil(t, listedObjectV2.Owner, "ListObjectsV2 should include owner when FetchOwner=true") - - // Create another version to ensure multiple versions don't appear in regular list - _, err = client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader("updated content"), - }) - require.NoError(t, err) - - // List again - should still show only one logical object (the latest version) - listRespAfterUpdate, err := client.ListObjects(context.TODO(), &s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, listRespAfterUpdate.Contents, 1, "Should still list exactly one object after creating second version") - - // Compare with list-object-versions which should show both versions - versionsResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Len(t, versionsResp.Versions, 2, "list-object-versions should show both versions") - - t.Logf("Successfully verified versioned object list behavior") -} - -// TestPrefixFilteringLogic tests the prefix filtering logic fix for list object versions -// This addresses the issue raised by gemini-code-assist bot where files could be incorrectly included -func TestPrefixFilteringLogic(t *testing.T) { - s3Client := setupS3Client(t) - bucketName := "test-bucket-" + fmt.Sprintf("%d", time.Now().UnixNano()) - - // Create bucket - _, err := s3Client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - defer cleanupBucket(t, s3Client, bucketName) - - // Enable versioning - _, err = s3Client.PutBucketVersioning(context.Background(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) - - // Create test files that could trigger the edge case: - // - File "a" (which should NOT be included when searching for prefix "a/b") - // - File "a/b" (which SHOULD be included when searching for prefix "a/b") - _, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String("a"), - Body: strings.NewReader("content of file a"), - }) - require.NoError(t, err) - - _, err = s3Client.PutObject(context.Background(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String("a/b"), - Body: strings.NewReader("content of file a/b"), - }) - require.NoError(t, err) - - // Test list-object-versions with prefix "a/b" - should NOT include file "a" - versionsResponse, err := s3Client.ListObjectVersions(context.Background(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - Prefix: aws.String("a/b"), - }) - require.NoError(t, err) - - // Verify that only "a/b" is returned, not "a" - require.Len(t, versionsResponse.Versions, 1, "Should only find one version matching prefix 'a/b'") - assert.Equal(t, "a/b", aws.ToString(versionsResponse.Versions[0].Key), "Should only return 'a/b', not 'a'") - - // Test list-object-versions with prefix "a/" - should include "a/b" but not "a" - versionsResponse, err = s3Client.ListObjectVersions(context.Background(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - Prefix: aws.String("a/"), - }) - require.NoError(t, err) - - // Verify that only "a/b" is returned, not "a" - require.Len(t, versionsResponse.Versions, 1, "Should only find one version matching prefix 'a/'") - assert.Equal(t, "a/b", aws.ToString(versionsResponse.Versions[0].Key), "Should only return 'a/b', not 'a'") - - // Test list-object-versions with prefix "a" - should include both "a" and "a/b" - versionsResponse, err = s3Client.ListObjectVersions(context.Background(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - Prefix: aws.String("a"), - }) - require.NoError(t, err) - - // Should find both files - require.Len(t, versionsResponse.Versions, 2, "Should find both versions matching prefix 'a'") - - // Extract keys and sort them for predictable comparison - var keys []string - for _, version := range versionsResponse.Versions { - keys = append(keys, aws.ToString(version.Key)) - } - sort.Strings(keys) - - assert.Equal(t, []string{"a", "a/b"}, keys, "Should return both 'a' and 'a/b'") - - t.Logf("Prefix filtering logic correctly handles edge cases") -} - -// Helper function to setup S3 client -func setupS3Client(t *testing.T) *s3.Client { - // S3TestConfig holds configuration for S3 tests - type S3TestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool - } - - // Default test configuration - should match s3tests.conf - defaultConfig := &S3TestConfig{ - Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-versioning-", - UseSSL: false, - SkipVerifySSL: true, - } - - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(defaultConfig.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - defaultConfig.AccessKey, - defaultConfig.SecretKey, - "", - )), - config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: defaultConfig.Endpoint, - SigningRegion: defaultConfig.Region, - HostnameImmutable: true, - }, nil - })), - ) - require.NoError(t, err) - - return s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = true // Important for SeaweedFS - }) -} - -// Helper function to clean up bucket -func cleanupBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, delete all objects and versions - err := deleteAllObjectVersions(t, client, bucketName) - if err != nil { - t.Logf("Warning: failed to delete all object versions: %v", err) - } - - // Then delete the bucket - _, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) - } -} diff --git a/test/s3/versioning/s3_suspended_versioning_test.go b/test/s3/versioning/s3_suspended_versioning_test.go deleted file mode 100644 index c1e8c7277..000000000 --- a/test/s3/versioning/s3_suspended_versioning_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package s3api - -import ( - "bytes" - "context" - "testing" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" -) - -// TestSuspendedVersioningNullOverwrite tests the scenario where: -// 1. Create object before versioning is enabled (pre-versioning object) -// 2. Enable versioning, then suspend it -// 3. Overwrite the object (should replace the null version, not create duplicate) -// 4. List versions should show only 1 version with versionId "null" -// -// This test corresponds to: test_versioning_obj_plain_null_version_overwrite_suspended -func TestSuspendedVersioningNullOverwrite(t *testing.T) { - ctx := context.Background() - client := getS3Client(t) - - // Create bucket - bucketName := getNewBucketName() - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - objectKey := "testobjbar" - - // Step 1: Put object before versioning is configured (pre-versioning object) - content1 := []byte("foooz") - _, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(content1), - }) - if err != nil { - t.Fatalf("Failed to create pre-versioning object: %v", err) - } - t.Logf("Created pre-versioning object") - - // Step 2: Enable versioning - _, err = client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - if err != nil { - t.Fatalf("Failed to enable versioning: %v", err) - } - t.Logf("Enabled versioning") - - // Step 3: Suspend versioning - _, err = client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusSuspended, - }, - }) - if err != nil { - t.Fatalf("Failed to suspend versioning: %v", err) - } - t.Logf("Suspended versioning") - - // Step 4: Overwrite the object during suspended versioning - content2 := []byte("zzz") - putResp, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(content2), - }) - if err != nil { - t.Fatalf("Failed to overwrite object during suspended versioning: %v", err) - } - - // Verify no VersionId is returned for suspended versioning - if putResp.VersionId != nil { - t.Errorf("Suspended versioning should NOT return VersionId, but got: %s", *putResp.VersionId) - } - t.Logf("Overwrote object during suspended versioning (no VersionId returned as expected)") - - // Step 5: Verify content is updated - getResp, err := client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - if err != nil { - t.Fatalf("Failed to get object: %v", err) - } - defer getResp.Body.Close() - - gotContent := new(bytes.Buffer) - gotContent.ReadFrom(getResp.Body) - if !bytes.Equal(gotContent.Bytes(), content2) { - t.Errorf("Expected content %q, got %q", content2, gotContent.Bytes()) - } - t.Logf("Object content is correctly updated to: %q", content2) - - // Step 6: List object versions - should have only 1 version - listResp, err := client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Fatalf("Failed to list object versions: %v", err) - } - - // Count versions (excluding delete markers) - versionCount := len(listResp.Versions) - deleteMarkerCount := len(listResp.DeleteMarkers) - - t.Logf("List results: %d versions, %d delete markers", versionCount, deleteMarkerCount) - for i, v := range listResp.Versions { - t.Logf(" Version %d: Key=%s, VersionId=%s, IsLatest=%v, Size=%d", - i, *v.Key, *v.VersionId, v.IsLatest, v.Size) - } - - // THIS IS THE KEY ASSERTION: Should have exactly 1 version, not 2 - if versionCount != 1 { - t.Errorf("Expected 1 version after suspended versioning overwrite, got %d versions", versionCount) - t.Error("BUG: Duplicate null versions detected! The overwrite should have replaced the pre-versioning object.") - } else { - t.Logf("PASS: Only 1 version found (no duplicate null versions)") - } - - if deleteMarkerCount != 0 { - t.Errorf("Expected 0 delete markers, got %d", deleteMarkerCount) - } - - // Verify the version has versionId "null" - if versionCount > 0 { - if listResp.Versions[0].VersionId == nil || *listResp.Versions[0].VersionId != "null" { - t.Errorf("Expected VersionId to be 'null', got %v", listResp.Versions[0].VersionId) - } else { - t.Logf("Version ID is 'null' as expected") - } - } - - // Step 7: Delete the null version - _, err = client.DeleteObject(ctx, &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - VersionId: aws.String("null"), - }) - if err != nil { - t.Fatalf("Failed to delete null version: %v", err) - } - t.Logf("Deleted null version") - - // Step 8: Verify object no longer exists - _, err = client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - }) - if err == nil { - t.Error("Expected object to not exist after deleting null version") - } - t.Logf("Object no longer exists after deleting null version") - - // Step 9: Verify no versions remain - listResp, err = client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Fatalf("Failed to list object versions: %v", err) - } - - if len(listResp.Versions) != 0 || len(listResp.DeleteMarkers) != 0 { - t.Errorf("Expected no versions or delete markers, got %d versions and %d delete markers", - len(listResp.Versions), len(listResp.DeleteMarkers)) - } else { - t.Logf("No versions remain after deletion") - } -} - -// TestEnabledVersioningReturnsVersionId tests that when versioning is ENABLED, -// every PutObject operation returns a version ID -// -// This test corresponds to the create_multiple_versions helper function -func TestEnabledVersioningReturnsVersionId(t *testing.T) { - ctx := context.Background() - client := getS3Client(t) - - // Create bucket - bucketName := getNewBucketName() - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - objectKey := "testobj" - - // Enable versioning - _, err := client.PutBucketVersioning(ctx, &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - if err != nil { - t.Fatalf("Failed to enable versioning: %v", err) - } - t.Logf("Enabled versioning") - - // Create multiple versions - numVersions := 3 - versionIds := make([]string, 0, numVersions) - - for i := 0; i < numVersions; i++ { - content := []byte("content-" + string(rune('0'+i))) - putResp, err := client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: bytes.NewReader(content), - }) - if err != nil { - t.Fatalf("Failed to create version %d: %v", i, err) - } - - // THIS IS THE KEY ASSERTION: VersionId MUST be returned for enabled versioning - if putResp.VersionId == nil { - t.Errorf("FAILED: PutObject with enabled versioning MUST return VersionId, but got nil for version %d", i) - } else { - versionId := *putResp.VersionId - if versionId == "" { - t.Errorf("FAILED: PutObject returned empty VersionId for version %d", i) - } else if versionId == "null" { - t.Errorf("FAILED: PutObject with enabled versioning should NOT return 'null' version ID, got: %s", versionId) - } else { - versionIds = append(versionIds, versionId) - t.Logf("Version %d created with VersionId: %s", i, versionId) - } - } - } - - if len(versionIds) != numVersions { - t.Errorf("Expected %d version IDs, got %d", numVersions, len(versionIds)) - } - - // List versions to verify all were created - listResp, err := client.ListObjectVersions(ctx, &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Fatalf("Failed to list object versions: %v", err) - } - - if len(listResp.Versions) != numVersions { - t.Errorf("Expected %d versions in list, got %d", numVersions, len(listResp.Versions)) - } else { - t.Logf("All %d versions are listed", numVersions) - } - - // Verify all version IDs match - for i, v := range listResp.Versions { - t.Logf(" Version %d: VersionId=%s, Size=%d, IsLatest=%v", i, *v.VersionId, v.Size, v.IsLatest) - } -} diff --git a/test/s3/versioning/s3_versioning_object_lock_test.go b/test/s3/versioning/s3_versioning_object_lock_test.go deleted file mode 100644 index 5c2689935..000000000 --- a/test/s3/versioning/s3_versioning_object_lock_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package s3api - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestVersioningWithObjectLockHeaders ensures that versioned objects properly -// handle object lock headers in PUT requests and return them in HEAD/GET responses. -// This test would have caught the bug where object lock metadata was not returned -// in HEAD/GET responses. -func TestVersioningWithObjectLockHeaders(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket with object lock and versioning enabled - createBucketWithObjectLock(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - key := "versioned-object-with-lock" - content1 := "version 1 content" - content2 := "version 2 content" - - // PUT first version with object lock headers - retainUntilDate1 := time.Now().Add(12 * time.Hour) - putResp1, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content1), - ObjectLockMode: types.ObjectLockModeGovernance, - ObjectLockRetainUntilDate: aws.Time(retainUntilDate1), - }) - require.NoError(t, err) - require.NotNil(t, putResp1.VersionId) - - // PUT second version with different object lock settings - retainUntilDate2 := time.Now().Add(24 * time.Hour) - putResp2, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content2), - ObjectLockMode: types.ObjectLockModeCompliance, - ObjectLockRetainUntilDate: aws.Time(retainUntilDate2), - ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatusOn, - }) - require.NoError(t, err) - require.NotNil(t, putResp2.VersionId) - require.NotEqual(t, *putResp1.VersionId, *putResp2.VersionId) - - // Test HEAD latest version returns correct object lock metadata - t.Run("HEAD latest version", func(t *testing.T) { - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - - // Should return metadata for version 2 (latest) - assert.Equal(t, types.ObjectLockModeCompliance, headResp.ObjectLockMode) - assert.NotNil(t, headResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate2, *headResp.ObjectLockRetainUntilDate, 5*time.Second) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, headResp.ObjectLockLegalHoldStatus) - }) - - // Test HEAD specific version returns correct object lock metadata - t.Run("HEAD specific version", func(t *testing.T) { - headResp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - }) - require.NoError(t, err) - - // Should return metadata for version 1 - assert.Equal(t, types.ObjectLockModeGovernance, headResp.ObjectLockMode) - assert.NotNil(t, headResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate1, *headResp.ObjectLockRetainUntilDate, 5*time.Second) - // Version 1 was created without legal hold, so AWS S3 defaults it to "OFF" - assert.Equal(t, types.ObjectLockLegalHoldStatusOff, headResp.ObjectLockLegalHoldStatus) - }) - - // Test GET latest version returns correct object lock metadata - t.Run("GET latest version", func(t *testing.T) { - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - defer getResp.Body.Close() - - // Should return metadata for version 2 (latest) - assert.Equal(t, types.ObjectLockModeCompliance, getResp.ObjectLockMode) - assert.NotNil(t, getResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate2, *getResp.ObjectLockRetainUntilDate, 5*time.Second) - assert.Equal(t, types.ObjectLockLegalHoldStatusOn, getResp.ObjectLockLegalHoldStatus) - }) - - // Test GET specific version returns correct object lock metadata - t.Run("GET specific version", func(t *testing.T) { - getResp, err := client.GetObject(context.TODO(), &s3.GetObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - VersionId: putResp1.VersionId, - }) - require.NoError(t, err) - defer getResp.Body.Close() - - // Should return metadata for version 1 - assert.Equal(t, types.ObjectLockModeGovernance, getResp.ObjectLockMode) - assert.NotNil(t, getResp.ObjectLockRetainUntilDate) - assert.WithinDuration(t, retainUntilDate1, *getResp.ObjectLockRetainUntilDate, 5*time.Second) - // Version 1 was created without legal hold, so AWS S3 defaults it to "OFF" - assert.Equal(t, types.ObjectLockLegalHoldStatusOff, getResp.ObjectLockLegalHoldStatus) - }) -} - -// waitForVersioningToBeEnabled polls the bucket versioning status until it's enabled -// This helps avoid race conditions where object lock is configured but versioning -// isn't immediately available -func waitForVersioningToBeEnabled(t *testing.T, client *s3.Client, bucketName string) { - timeout := time.Now().Add(10 * time.Second) - for time.Now().Before(timeout) { - resp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - if err == nil && resp.Status == types.BucketVersioningStatusEnabled { - return // Versioning is enabled - } - - time.Sleep(100 * time.Millisecond) - } - t.Fatalf("Timeout waiting for versioning to be enabled on bucket %s", bucketName) -} - -// Helper function for creating buckets with object lock enabled -func createBucketWithObjectLock(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - ObjectLockEnabledForBucket: aws.Bool(true), - }) - require.NoError(t, err) - - // Wait for versioning to be automatically enabled by object lock - waitForVersioningToBeEnabled(t, client, bucketName) - - // Verify that object lock was actually enabled - t.Logf("Verifying object lock configuration for bucket %s", bucketName) - _, err = client.GetObjectLockConfiguration(context.TODO(), &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err, "Object lock should be configured for bucket %s", bucketName) -} diff --git a/test/s3/versioning/s3_versioning_test.go b/test/s3/versioning/s3_versioning_test.go deleted file mode 100644 index cb8d72535..000000000 --- a/test/s3/versioning/s3_versioning_test.go +++ /dev/null @@ -1,449 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/k0kubun/pp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// S3TestConfig holds configuration for S3 tests -type S3TestConfig struct { - Endpoint string - AccessKey string - SecretKey string - Region string - BucketPrefix string - UseSSL bool - SkipVerifySSL bool -} - -// Default test configuration - should match s3tests.conf -var defaultConfig = &S3TestConfig{ - Endpoint: "http://localhost:8333", // Default SeaweedFS S3 port - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - Region: "us-east-1", - BucketPrefix: "test-versioning-", - UseSSL: false, - SkipVerifySSL: true, -} - -// getS3Client creates an AWS S3 client for testing -func getS3Client(t *testing.T) *s3.Client { - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithRegion(defaultConfig.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( - defaultConfig.AccessKey, - defaultConfig.SecretKey, - "", - )), - config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc( - func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - URL: defaultConfig.Endpoint, - SigningRegion: defaultConfig.Region, - HostnameImmutable: true, - }, nil - })), - ) - require.NoError(t, err) - - return s3.NewFromConfig(cfg, func(o *s3.Options) { - o.UsePathStyle = true // Important for SeaweedFS - }) -} - -// getNewBucketName generates a unique bucket name -func getNewBucketName() string { - timestamp := time.Now().UnixNano() - return fmt.Sprintf("%s%d", defaultConfig.BucketPrefix, timestamp) -} - -// createBucket creates a new bucket for testing -func createBucket(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) -} - -// deleteBucket deletes a bucket and all its contents -func deleteBucket(t *testing.T, client *s3.Client, bucketName string) { - // First, delete all objects and versions - err := deleteAllObjectVersions(t, client, bucketName) - if err != nil { - t.Logf("Warning: failed to delete all object versions: %v", err) - } - - // Then delete the bucket - _, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) - if err != nil { - t.Logf("Warning: failed to delete bucket %s: %v", bucketName, err) - } -} - -// deleteAllObjectVersions deletes all object versions in a bucket -func deleteAllObjectVersions(t *testing.T, client *s3.Client, bucketName string) error { - // List all object versions - paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - - for paginator.HasMorePages() { - page, err := paginator.NextPage(context.TODO()) - if err != nil { - return err - } - - var objectsToDelete []types.ObjectIdentifier - - // Add versions - for _, version := range page.Versions { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: version.Key, - VersionId: version.VersionId, - }) - } - - // Add delete markers - for _, deleteMarker := range page.DeleteMarkers { - objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ - Key: deleteMarker.Key, - VersionId: deleteMarker.VersionId, - }) - } - - // Delete objects in batches - if len(objectsToDelete) > 0 { - _, err := client.DeleteObjects(context.TODO(), &s3.DeleteObjectsInput{ - Bucket: aws.String(bucketName), - Delete: &types.Delete{ - Objects: objectsToDelete, - Quiet: aws.Bool(true), - }, - }) - if err != nil { - return err - } - } - } - - return nil -} - -// enableVersioning enables versioning on a bucket -func enableVersioning(t *testing.T, client *s3.Client, bucketName string) { - _, err := client.PutBucketVersioning(context.TODO(), &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucketName), - VersioningConfiguration: &types.VersioningConfiguration{ - Status: types.BucketVersioningStatusEnabled, - }, - }) - require.NoError(t, err) -} - -// checkVersioningStatus verifies the versioning status of a bucket -func checkVersioningStatus(t *testing.T, client *s3.Client, bucketName string, expectedStatus types.BucketVersioningStatus) { - resp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - assert.Equal(t, expectedStatus, resp.Status) -} - -// checkVersioningStatusEmpty verifies that a bucket has no versioning configuration (newly created bucket) -func checkVersioningStatusEmpty(t *testing.T, client *s3.Client, bucketName string) { - resp, err := client.GetBucketVersioning(context.TODO(), &s3.GetBucketVersioningInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - // AWS S3 returns an empty versioning configuration (no Status field) for buckets that have never had versioning configured, such as newly created buckets. - assert.Empty(t, resp.Status, "Newly created bucket should have empty versioning status") -} - -// putObject puts an object into a bucket -func putObject(t *testing.T, client *s3.Client, bucketName, key, content string) *s3.PutObjectOutput { - resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - Body: strings.NewReader(content), - }) - require.NoError(t, err) - return resp -} - -// headObject gets object metadata -func headObject(t *testing.T, client *s3.Client, bucketName, key string) *s3.HeadObjectOutput { - resp, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - return resp -} - -// TestBucketListReturnDataVersioning is the Go equivalent of test_bucket_list_return_data_versioning -func TestBucketListReturnDataVersioning(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Enable versioning - enableVersioning(t, client, bucketName) - checkVersioningStatus(t, client, bucketName, types.BucketVersioningStatusEnabled) - - // Create test objects - keyNames := []string{"bar", "baz", "foo"} - objectData := make(map[string]map[string]interface{}) - - for _, keyName := range keyNames { - // Put the object - putResp := putObject(t, client, bucketName, keyName, keyName) // content = key name - - // Get object metadata - headResp := headObject(t, client, bucketName, keyName) - - // Store expected data for later comparison - objectData[keyName] = map[string]interface{}{ - "ETag": *headResp.ETag, - "LastModified": *headResp.LastModified, - "ContentLength": headResp.ContentLength, - "VersionId": *headResp.VersionId, - } - - // Verify version ID was returned - require.NotNil(t, putResp.VersionId) - require.NotEmpty(t, *putResp.VersionId) - assert.Equal(t, *putResp.VersionId, *headResp.VersionId) - } - - // List object versions - resp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Verify we have the expected number of versions - assert.Len(t, resp.Versions, len(keyNames)) - - // Check each version matches our stored data - versionsByKey := make(map[string]types.ObjectVersion) - for _, version := range resp.Versions { - versionsByKey[*version.Key] = version - } - - for _, keyName := range keyNames { - version, exists := versionsByKey[keyName] - require.True(t, exists, "Expected version for key %s", keyName) - - expectedData := objectData[keyName] - - // Compare ETag - assert.Equal(t, expectedData["ETag"], *version.ETag) - - // Compare Size - assert.Equal(t, expectedData["ContentLength"], version.Size) - - // Compare VersionId - assert.Equal(t, expectedData["VersionId"], *version.VersionId) - - // Compare LastModified (within reasonable tolerance) - expectedTime := expectedData["LastModified"].(time.Time) - actualTime := *version.LastModified - timeDiff := actualTime.Sub(expectedTime) - if timeDiff < 0 { - timeDiff = -timeDiff - } - assert.True(t, timeDiff < time.Minute, "LastModified times should be close") - - // Verify this is marked as the latest version - assert.True(t, *version.IsLatest) - - // Verify it's not a delete marker - // (delete markers should be in resp.DeleteMarkers, not resp.Versions) - } - - // Verify no delete markers - assert.Empty(t, resp.DeleteMarkers) - - t.Logf("Successfully verified %d versioned objects", len(keyNames)) -} - -// TestVersioningBasicWorkflow tests basic versioning operations -func TestVersioningBasicWorkflow(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - - // Initially, versioning should be unset/empty (not suspended) for newly created buckets - // This matches AWS S3 behavior where new buckets have no versioning status - checkVersioningStatusEmpty(t, client, bucketName) - - // Enable versioning - enableVersioning(t, client, bucketName) - checkVersioningStatus(t, client, bucketName, types.BucketVersioningStatusEnabled) - - // Put same object multiple times to create versions - key := "test-object" - version1 := putObject(t, client, bucketName, key, "content-v1") - version2 := putObject(t, client, bucketName, key, "content-v2") - version3 := putObject(t, client, bucketName, key, "content-v3") - - // Verify each put returned a different version ID - require.NotEqual(t, *version1.VersionId, *version2.VersionId) - require.NotEqual(t, *version2.VersionId, *version3.VersionId) - require.NotEqual(t, *version1.VersionId, *version3.VersionId) - - // List versions - resp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Should have 3 versions - assert.Len(t, resp.Versions, 3) - - // Only the latest should be marked as latest - latestCount := 0 - for _, version := range resp.Versions { - if *version.IsLatest { - latestCount++ - assert.Equal(t, *version3.VersionId, *version.VersionId) - } - } - assert.Equal(t, 1, latestCount, "Only one version should be marked as latest") - - t.Logf("Successfully created and verified %d versions", len(resp.Versions)) -} - -// TestVersioningDeleteMarkers tests delete marker creation -func TestVersioningDeleteMarkers(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Put an object - key := "test-delete-marker" - putResp := putObject(t, client, bucketName, key, "content") - require.NotNil(t, putResp.VersionId) - - // Delete the object (should create delete marker) - deleteResp, err := client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(key), - }) - require.NoError(t, err) - require.NotNil(t, deleteResp.VersionId) - - // List versions to see the delete marker - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - require.NoError(t, err) - - // Should have 1 version and 1 delete marker - assert.Len(t, listResp.Versions, 1) - assert.Len(t, listResp.DeleteMarkers, 1) - - // The delete marker should be the latest - deleteMarker := listResp.DeleteMarkers[0] - assert.True(t, *deleteMarker.IsLatest) - assert.Equal(t, *deleteResp.VersionId, *deleteMarker.VersionId) - - // The original version should not be latest - version := listResp.Versions[0] - assert.False(t, *version.IsLatest) - assert.Equal(t, *putResp.VersionId, *version.VersionId) - - t.Logf("Successfully created and verified delete marker") -} - -// TestVersioningConcurrentOperations tests concurrent versioning operations -func TestVersioningConcurrentOperations(t *testing.T) { - client := getS3Client(t) - bucketName := getNewBucketName() - - // Create bucket and enable versioning - createBucket(t, client, bucketName) - defer deleteBucket(t, client, bucketName) - enableVersioning(t, client, bucketName) - - // Concurrently create multiple objects - numObjects := 10 - objectKey := "concurrent-test" - - // Channel to collect version IDs - versionIds := make(chan string, numObjects) - errors := make(chan error, numObjects) - - // Launch concurrent puts - for i := 0; i < numObjects; i++ { - go func(index int) { - content := fmt.Sprintf("content-%d", index) - resp, err := client.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(bucketName), - Key: aws.String(objectKey), - Body: strings.NewReader(content), - }) - if err != nil { - errors <- err - return - } - versionIds <- *resp.VersionId - }(i) - } - - // Collect results - var collectedVersionIds []string - for i := 0; i < numObjects; i++ { - select { - case versionId := <-versionIds: - t.Logf("Received Version ID %d: %s", i, versionId) - collectedVersionIds = append(collectedVersionIds, versionId) - case err := <-errors: - t.Fatalf("Concurrent put failed: %v", err) - case <-time.After(30 * time.Second): - t.Fatalf("Timeout waiting for concurrent operations") - } - } - - // Verify all version IDs are unique - versionIdSet := make(map[string]bool) - for _, versionId := range collectedVersionIds { - assert.False(t, versionIdSet[versionId], "Version ID should be unique: %s", versionId) - versionIdSet[versionId] = true - } - - // List versions and verify count - listResp, err := client.ListObjectVersions(context.TODO(), &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucketName), - }) - pp.Println(listResp) - require.NoError(t, err) - assert.Len(t, listResp.Versions, numObjects) - - t.Logf("Successfully created %d concurrent versions with unique IDs", numObjects) -} diff --git a/test/s3/versioning/test_config.json b/test/s3/versioning/test_config.json deleted file mode 100644 index c8ca80ef9..000000000 --- a/test/s3/versioning/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "endpoint": "http://localhost:8333", - "access_key": "some_access_key1", - "secret_key": "some_secret_key1", - "region": "us-east-1", - "bucket_prefix": "test-versioning-", - "use_ssl": false, - "skip_verify_ssl": true -} \ No newline at end of file diff --git a/test/s3/versioning/versioning.test b/test/s3/versioning/versioning.test deleted file mode 100755 index 0b7e16d28..000000000 Binary files a/test/s3/versioning/versioning.test and /dev/null differ diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index a9bb1fe16..56342a0cb 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -7,11 +7,10 @@ import ( "path" "strconv" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -26,23 +25,21 @@ var ( This is to change replication factor in .dat file header. Need to shut down the volume servers that has those volumes. - 1. fix the .dat file in place - // just see the replication setting - go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads - Current Volume Replication: 000 - // fix the replication setting - go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads -replication 001 - Current Volume Replication: 000 - Changing to: 001 - Done. +1. fix the .dat file in place + // just see the replication setting + go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads + Current Volume Replication: 000 + // fix the replication setting + go run change_replication.go -volumeId=9 -dir=/Users/chrislu/Downloads -replication 001 + Current Volume Replication: 000 + Changing to: 001 + Done. 2. copy the fixed .dat and related .idx files to some remote server 3. restart volume servers or start new volume servers. */ func main() { flag.Parse() - util_http.NewGlobalHttpClient() - fileName := strconv.Itoa(*fixVolumeId) if *fixVolumeCollection != "" { fileName = *fixVolumeCollection + "_" + fileName diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go index b4ceeb58c..0188d18d4 100644 --- a/unmaintained/diff_volume_servers/diff_volume_servers.go +++ b/unmaintained/diff_volume_servers/diff_volume_servers.go @@ -6,21 +6,19 @@ import ( "errors" "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" "io" "math" "os" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/storage/idx" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "google.golang.org/grpc" ) var ( @@ -31,19 +29,18 @@ var ( ) /* -Diff the volume's files across multiple volume servers. -diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5 + Diff the volume's files across multiple volume servers. + diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5 -Example Output: -reference 127.0.0.1:8081 -fileId volumeServer message -5,01617c3f61 127.0.0.1:8080 wrongSize + Example Output: + reference 127.0.0.1:8081 + fileId volumeServer message + 5,01617c3f61 127.0.0.1:8080 wrongSize */ func main() { flag.Parse() - util_http.InitGlobalHttpClient() - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") vid := uint32(*volumeId) @@ -158,7 +155,7 @@ func getVolumeFiles(v uint32, addr pb.ServerAddress) (map[types.NeedleId]needleS var maxOffset int64 files := map[types.NeedleId]needleState{} - err = idx.WalkIndexFile(idxFile, 0, func(key types.NeedleId, offset types.Offset, size types.Size) error { + err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { if offset.IsZero() || size.IsDeleted() { files[key] = needleState{ state: stateDeleted, diff --git a/unmaintained/disk/disk_status.go b/unmaintained/disk/disk_status.go deleted file mode 100644 index e01b16f22..000000000 --- a/unmaintained/disk/disk_status.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build !windows && !openbsd && !netbsd && !plan9 && !solaris -// +build !windows,!openbsd,!netbsd,!plan9,!solaris - -package main - -import ( - "log" - "syscall" -) - -// go run unmaintained/disk/disk_status.go - -type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` - PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` - PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` - DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - - // new fields about availย blocks - Avail uint64 `protobuf:"varint,4,opt,name=avail,proto3" json:"avail,omitempty"` - PercentAvail float32 `protobuf:"fixed32,5,opt,name=percent_avail,json=percentAvail,proto3" json:"percent_avail,omitempty"` -} - -func main() { - dirs := []string{"/mnt/sdb", "/mnt/sdc", "/mnt/sdd", "/mnt/sde", "/mnt/sdf", "/mnt/sdg", "/mnt/sdh", "/mnt/sdi", "/mnt/sdj"} - // dirs := []string{"/mnt/sdb"} - for _, dir := range dirs { - disk := &DiskStatus{Dir: dir} - fillInDiskStatus(disk) - - // bytes, _ := json.Marshal(disk) - // log.Printf("disk status %s", bytes) - log.Printf("disk: %s avail: %f free: %f", disk.Dir, disk.PercentAvail, disk.PercentFree) - } -} - -func fillInDiskStatus(disk *DiskStatus) { - fs := syscall.Statfs_t{} - err := syscall.Statfs(disk.Dir, &fs) - if err != nil { - return - } - - disk.All = fs.Blocks * uint64(fs.Bsize) - disk.Free = fs.Bfree * uint64(fs.Bsize) - disk.Used = disk.All - disk.Free - disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100) - disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100) - - // avail blocks - disk.Avail = fs.Bavail * uint64(fs.Bsize) - disk.PercentAvail = float32((float64(disk.Avail) / float64(disk.All)) * 100) - return -} diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index 5f1ea1375..457c5c592 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -8,13 +8,12 @@ import ( "path" "strconv" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -28,17 +27,15 @@ This is to resolve an one-time issue that caused inconsistency with .dat and .id In this case, the .dat file contains all data, but some deletion caused incorrect offset. The .idx has all correct offsets. - 1. fix the .dat file, a new .dat_fixed file will be generated. - go run fix_dat.go -volumeId=9 -dir=/Users/chrislu/Downloads - 2. move the original .dat and .idx files to some backup folder, and rename .dat_fixed to .dat file +1. fix the .dat file, a new .dat_fixed file will be generated. + go run fix_dat.go -volumeId=9 -dir=/Users/chrislu/Downloads +2. move the original .dat and .idx files to some backup folder, and rename .dat_fixed to .dat file mv 9.dat_fixed 9.dat - 3. fix the .idx file with the "weed fix" - weed fix -volumeId=9 -dir=/Users/chrislu/Downloads +3. fix the .idx file with the "weed fix" + weed fix -volumeId=9 -dir=/Users/chrislu/Downloads */ func main() { flag.Parse() - util_http.InitGlobalHttpClient() - fileName := strconv.Itoa(*fixVolumeId) if *fixVolumeCollection != "" { fileName = *fixVolumeCollection + "_" + fileName diff --git a/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go b/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go index 0544e5f2b..d4b9d63b1 100644 --- a/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go +++ b/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go @@ -1,18 +1,13 @@ package main import ( - "context" "flag" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "strconv" - "strings" "time" ) @@ -25,8 +20,8 @@ var ( ) func main() { + flag.Parse() - util_http.InitGlobalHttpClient() if *isWrite { startGenerateMetadata() @@ -56,12 +51,12 @@ func main() { } func startGenerateMetadata() { - pb.WithFilerClient(false, util.RandomInt32(), pb.ServerAddress(*tailFiler), grpc.WithTransportCredentials(insecure.NewCredentials()), func(client filer_pb.SeaweedFilerClient) error { + pb.WithFilerClient(false, pb.ServerAddress(*tailFiler), grpc.WithInsecure(), func(client filer_pb.SeaweedFilerClient) error { for i := 0; i < *n; i++ { name := fmt.Sprintf("file%d", i) glog.V(0).Infof("write %s/%s", *dir, name) - if err := filer_pb.CreateEntry(context.Background(), client, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: *dir, Entry: &filer_pb.Entry{ Name: name, @@ -82,24 +77,7 @@ func startGenerateMetadata() { func startSubscribeMetadata(eachEntryFunc func(event *filer_pb.SubscribeMetadataResponse) error) { - prefix := *dir - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "tail", - ClientId: 0, - ClientEpoch: 0, - SelfSignature: 0, - PathPrefix: prefix, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: nil, - StartTsNs: 0, - StopTsNs: 0, - EventErrorType: pb.TrivialOnError, - } - tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpc.WithTransportCredentials(insecure.NewCredentials()), metadataFollowOption, eachEntryFunc) + tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpc.WithInsecure(), "tail", 0, *dir, nil, 0, 0, 0, eachEntryFunc, pb.TrivialOnError) if tailErr != nil { fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go index cfac97432..84173a663 100644 --- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -6,12 +6,11 @@ import ( "os" "path/filepath" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -72,7 +71,6 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in func main() { flag.Parse() - util_http.InitGlobalHttpClient() vid := needle.VolumeId(*volumeId) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 0a796a92f..937e764bb 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -1,20 +1,18 @@ package main import ( - "context" "flag" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "log" "math/rand" "time" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -26,9 +24,8 @@ var ( func main() { flag.Parse() - util_http.InitGlobalHttpClient() - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") genFile(grpcDialOption, 0) @@ -36,7 +33,7 @@ func main() { go func() { for { println("vacuum threshold", *garbageThreshold) - _, _, err := util_http.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold)) + _, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", pb.ServerAddress(*master).ToHttpAddress(), *garbageThreshold)) if err != nil { log.Fatalf("vacuum: %v", err) } @@ -49,14 +46,14 @@ func main() { assignResult, targetUrl := genFile(grpcDialOption, i) - util_http.Delete(targetUrl, string(assignResult.Auth)) + util.Delete(targetUrl, string(assignResult.Auth)) } } func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { - assignResult, err := operation.Assign(context.Background(), func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{ + assignResult, err := operation.Assign(func() pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{ Count: 1, Replication: *replication, }) @@ -78,13 +75,7 @@ func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, st PairMap: nil, Jwt: assignResult.Auth, } - - uploader, err := operation.NewUploader() - if err != nil { - log.Fatalf("upload: %v", err) - } - - _, err = uploader.UploadData(context.Background(), data, uploadOption) + _, err = operation.UploadData(data, uploadOption) if err != nil { log.Fatalf("upload: %v", err) } diff --git a/unmaintained/s3/presigned_put/presigned_put.go b/unmaintained/s3/presigned_put/presigned_put.go index 46e4cbf06..e8368d124 100644 --- a/unmaintained/s3/presigned_put/presigned_put.go +++ b/unmaintained/s3/presigned_put/presigned_put.go @@ -1,31 +1,25 @@ package main import ( - "crypto/md5" - "encoding/base64" - "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "net/http" + "encoding/base64" + "fmt" + "crypto/md5" "strings" "time" + "net/http" ) // Downloads an item from an S3 Bucket in the region configured in the shared config // or AWS_REGION environment variable. // // Usage: -// -// go run presigned_put.go -// +// go run presigned_put.go // For this exampl to work, the domainName is needd -// -// weed s3 -domainName=localhost +// weed s3 -domainName=localhost func main() { - util_http.InitGlobalHttpClient() - h := md5.New() content := strings.NewReader(stringContent) content.WriteTo(h) @@ -69,7 +63,6 @@ func main() { fmt.Printf("error put request: %v\n", err) return } - defer util_http.CloseResponse(resp) fmt.Printf("response: %+v\n", resp) } @@ -77,4 +70,4 @@ var stringContent = `Generate a Pre-Signed URL for an Amazon S3 PUT Operation wi You can generate a pre-signed URL for a PUT operation that checks whether users upload the correct content. When the SDK pre-signs a request, it computes the checksum of the request body and generates an MD5 checksum that is included in the pre-signed URL. Users must upload the same content that produces the same MD5 checksum generated by the SDK; otherwise, the operation fails. This is not the Content-MD5, but the signature. To enforce Content-MD5, simply add the header to the request. The following example adds a Body field to generate a pre-signed PUT operation that requires a specific payload to be uploaded by users. -` +` \ No newline at end of file diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index a60e45760..17c494841 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -2,15 +2,13 @@ package main import ( "flag" + "github.com/chrislusf/seaweedfs/weed/util" "time" - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -34,14 +32,13 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) - glog.V(0).Infof("%d,%s%08x offset %d size %d(%s) cookie %08x appendedAt %v name %s", - *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.Name) + glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v", + *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t) return nil } func main() { flag.Parse() - util_http.InitGlobalHttpClient() vid := needle.VolumeId(*volumeId) diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go index 87f00ebb0..22c659351 100644 --- a/unmaintained/see_idx/see_idx.go +++ b/unmaintained/see_idx/see_idx.go @@ -3,16 +3,14 @@ package main import ( "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "os" "path" "strconv" - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/idx" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) var ( @@ -28,8 +26,6 @@ This is to see content in .idx files. */ func main() { flag.Parse() - util_http.InitGlobalHttpClient() - fileName := strconv.Itoa(*fixVolumeId) if *fixVolumeCollection != "" { fileName = *fixVolumeCollection + "_" + fileName @@ -40,7 +36,7 @@ func main() { } defer indexFile.Close() - idx.WalkIndexFile(indexFile, 0, func(key types.NeedleId, offset types.Offset, size types.Size) error { + idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size))) return nil }) diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go index 42a63476b..45480d4dc 100644 --- a/unmaintained/see_log_entry/see_log_entry.go +++ b/unmaintained/see_log_entry/see_log_entry.go @@ -7,21 +7,19 @@ import ( "log" "os" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( - logdataFile = flag.String("logdata", "", "log data file saved under "+filer.SystemLogDir) + logdataFile = flag.String("logdata", "", "log data file saved under "+ filer.SystemLogDir) ) func main() { flag.Parse() - util_http.InitGlobalHttpClient() dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644) if err != nil { diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go index da78f0918..9e496430c 100644 --- a/unmaintained/see_meta/see_meta.go +++ b/unmaintained/see_meta/see_meta.go @@ -7,11 +7,10 @@ import ( "log" "os" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -20,7 +19,6 @@ var ( func main() { flag.Parse() - util_http.InitGlobalHttpClient() dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644) if err != nil { @@ -61,7 +59,7 @@ func walkMetaFile(dst *os.File) error { } fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) - for i, chunk := range fullEntry.Entry.GetChunks() { + for i, chunk := range fullEntry.Entry.Chunks { fmt.Fprintf(os.Stdout, " chunk: %d %v %d,%x%08x\n", i+1, chunk, chunk.Fid.VolumeId, chunk.Fid.FileKey, chunk.Fid.Cookie) } diff --git a/unmaintained/stream_read_volume/stream_read_volume.go b/unmaintained/stream_read_volume/stream_read_volume.go index b148e4a4a..bbe5abedb 100644 --- a/unmaintained/stream_read_volume/stream_read_volume.go +++ b/unmaintained/stream_read_volume/stream_read_volume.go @@ -5,15 +5,13 @@ import ( "errors" "flag" "fmt" - "io" - - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" + "io" ) var ( @@ -24,15 +22,14 @@ var ( func main() { flag.Parse() - util_http.InitGlobalHttpClient() - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") vid := uint32(*volumeId) eachNeedleFunc := func(resp *volume_server_pb.ReadAllNeedlesResponse) error { - fmt.Printf("%d,%x%08x %d %v %d %x\n", resp.VolumeId, resp.NeedleId, resp.Cookie, len(resp.NeedleBlob), resp.NeedleBlobCompressed, resp.LastModified, resp.Crc) + fmt.Printf("%d,%x%08x %d\n", resp.VolumeId, resp.NeedleId, resp.Cookie, len(resp.NeedleBlob)) return nil } diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go index a98da1d01..a41bf1da1 100644 --- a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -4,7 +4,6 @@ import ( "bytes" "flag" "fmt" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "io" "log" "math/rand" @@ -31,8 +30,8 @@ type stat struct { } func main() { + flag.Parse() - util_http.InitGlobalHttpClient() data := make([]byte, *size) println("data len", len(data)) @@ -44,12 +43,16 @@ func main() { go func(x int) { defer wg.Done() + client := &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) for t := 0; t < *times; t++ { for f := 0; f < *fileCount; f++ { fn := r.Intn(*fileCount) - if size, err := uploadFileToFiler(data, fmt.Sprintf("file%04d", fn), *destination); err == nil { + if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil { statsChan <- stat{ size: size, } @@ -63,7 +66,6 @@ func main() { go func() { ticker := time.NewTicker(1000 * time.Millisecond) - defer ticker.Stop() var lastTime time.Time var counter, size int64 @@ -90,7 +92,7 @@ func main() { } -func uploadFileToFiler(data []byte, filename, destination string) (size int64, err error) { +func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) { if !strings.HasSuffix(destination, "/") { destination = destination + "/" @@ -112,14 +114,11 @@ func uploadFileToFiler(data []byte, filename, destination string) (size int64, e uri := destination + filename - request, err := http.NewRequest(http.MethodPost, uri, body) - if err != nil { - return 0, fmt.Errorf("http POST %s: %v", uri, err) - } + request, err := http.NewRequest("POST", uri, body) request.Header.Set("Content-Type", writer.FormDataContentType()) // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also. - resp, err := util_http.GetGlobalHttpClient().Do(request) + resp, err := client.Do(request) if err != nil { return 0, fmt.Errorf("http POST %s: %v", uri, err) } else { diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go index 1c3befe3d..83df54dc3 100644 --- a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -4,7 +4,6 @@ import ( "bytes" "flag" "fmt" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "io" "log" "math/rand" @@ -31,8 +30,8 @@ type stat struct { } func main() { + flag.Parse() - util_http.InitGlobalHttpClient() var fileNames []string @@ -52,6 +51,8 @@ func main() { for x := 0; x < *concurrency; x++ { wg.Add(1) + client := &http.Client{} + go func() { defer wg.Done() rand.Shuffle(len(fileNames), func(i, j int) { @@ -59,7 +60,7 @@ func main() { }) for t := 0; t < *times; t++ { for _, filename := range fileNames { - if size, err := uploadFileToFiler(filename, *destination); err == nil { + if size, err := uploadFileToFiler(client, filename, *destination); err == nil { statsChan <- stat{ size: size, } @@ -71,7 +72,6 @@ func main() { go func() { ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() var lastTime time.Time var counter, size int64 @@ -98,7 +98,7 @@ func main() { } -func uploadFileToFiler(filename, destination string) (size int64, err error) { +func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) { file, err := os.Open(filename) if err != nil { panic(err) @@ -129,14 +129,10 @@ func uploadFileToFiler(filename, destination string) (size int64, err error) { uri := destination + file.Name() - request, err := http.NewRequest(http.MethodPost, uri, body) - if err != nil { - return 0, fmt.Errorf("http POST %s: %v", uri, err) - } - + request, err := http.NewRequest("POST", uri, body) request.Header.Set("Content-Type", writer.FormDataContentType()) - resp, err := util_http.GetGlobalHttpClient().Do(request) + resp, err := client.Do(request) if err != nil { return 0, fmt.Errorf("http POST %s: %v", uri, err) } else { diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go index 03f728ad0..3c6f4a987 100644 --- a/unmaintained/volume_tailer/volume_tailer.go +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -1,17 +1,15 @@ package main import ( - "context" "flag" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "log" "time" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - util2 "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + util2 "github.com/chrislusf/seaweedfs/weed/util" "golang.org/x/tools/godoc/util" ) @@ -25,9 +23,8 @@ var ( func main() { flag.Parse() - util_http.InitGlobalHttpClient() - util2.LoadSecurityConfiguration() + util2.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client") vid := needle.VolumeId(*volumeId) @@ -41,7 +38,7 @@ func main() { sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano() } - err := operation.TailVolume(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { + err := operation.TailVolume(func()pb.ServerAddress{return pb.ServerAddress(*master)}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { if n.Size == 0 { println("-", n.String()) return nil diff --git a/weed/Makefile b/weed/Makefile index ac25d008b..4e871a71e 100644 --- a/weed/Makefile +++ b/weed/Makefile @@ -7,7 +7,7 @@ all: install .PHONY : clean debug_mount install: - go install -ldflags="-s -w" + go install clean: go clean $(SOURCE_DIR) @@ -15,52 +15,40 @@ clean: debug_shell: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- shell + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- shell debug_mount: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/ -umask=000 + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/ -umask=000 debug_server: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- server -dir=~/tmp/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=100 -volume.preStopSeconds=1 + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- server -dir=~/tmp/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 debug_volume: - go build -tags=5BytesOffset -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- volume -dir=/Users/chrislu/tmp/x/volume_data -port 8564 -max=1 -preStopSeconds=2 + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- volume -dir=~/tmp/100 -port 8564 -max=30 -preStopSeconds=2 debug_webdav: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 webdav + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 webdav debug_s3: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 s3 - -debug_mq: - go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 mq.broker - -debug_mq_agent: - go build -gcflags="all=-N -l" - dlv --listen=:2346 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 mq.agent -broker=localhost:17777 + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 s3 debug_filer_copy: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h debug_filer_remote_sync_dir: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -dir=/buckets/b2 -timeAgo=1h debug_filer_remote_sync_buckets: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.remote.sync -filer="localhost:8888" -createBucketAt=cloud1 -timeAgo=1h debug_master_follower: go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 master.follower - -debug_filer_sync: - go build -gcflags="all=-N -l" - dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./weed -- -v=4 filer.sync -a 192.168.2.7:8888 -b 192.168.2.7:8889 -isActivePassive -b.debug + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 master.follower diff --git a/weed/admin/Makefile b/weed/admin/Makefile deleted file mode 100644 index b79ddc1ab..000000000 --- a/weed/admin/Makefile +++ /dev/null @@ -1,165 +0,0 @@ -# SeaweedFS Admin Component Makefile - -# Variables -ADMIN_DIR := . -VIEW_DIR := $(ADMIN_DIR)/view -STATIC_DIR := $(ADMIN_DIR)/static -TEMPL_FILES := $(shell find $(VIEW_DIR) -name "*.templ") -TEMPL_GO_FILES := $(TEMPL_FILES:.templ=_templ.go) -GO_FILES := $(shell find $(ADMIN_DIR) -name "*.go" -not -name "*_templ.go") -BUILD_DIR := ../.. -WEED_BINARY := $(BUILD_DIR)/weed - -# Default target -.PHONY: all -all: build - -# Install templ if not present -.PHONY: install-templ -install-templ: - @which templ > /dev/null || (echo "Installing templ..." && go install github.com/a-h/templ/cmd/templ@latest) - -# Generate templ files -.PHONY: generate -generate: install-templ - @echo "Generating templ files..." - @templ generate - @echo "Generated: $(TEMPL_GO_FILES)" - -# Clean generated files -.PHONY: clean-templ -clean-templ: - @echo "Cleaning generated templ files..." - @find $(VIEW_DIR) -name "*_templ.go" -delete - @echo "Cleaned templ files" - -# Watch for changes and regenerate -.PHONY: watch -watch: install-templ - @echo "Watching for templ file changes..." - @templ generate --watch - -# Build the main weed binary with admin component -.PHONY: build -build: generate - @echo "Building weed binary with admin component..." - @cd $(BUILD_DIR) && go build -o weed ./weed - @echo "Built: $(BUILD_DIR)/weed" - -# Test the admin component -.PHONY: test -test: generate - @echo "Running admin component tests..." - @go test ./... - -# Run the admin server via weed command -.PHONY: run -run: build - @echo "Starting admin server via weed command..." - @cd $(BUILD_DIR) && ./weed admin - -# Development server with auto-reload -.PHONY: dev -dev: generate - @echo "Starting development server with auto-reload..." - @echo "Note: You'll need to manually restart the server when Go files change" - @cd $(BUILD_DIR) && ./weed admin -port=23647 & - @$(MAKE) watch - -# Lint the code -.PHONY: lint -lint: - @echo "Linting admin component..." - @golangci-lint run ./... - -# Format the code -.PHONY: fmt -fmt: - @echo "Formatting Go code..." - @go fmt ./... - @echo "Formatting templ files..." - @templ fmt $(VIEW_DIR) - -# Validate static files exist -.PHONY: validate-static -validate-static: - @echo "Validating static files..." - @test -f $(STATIC_DIR)/css/admin.css || (echo "Missing: admin.css" && exit 1) - @test -f $(STATIC_DIR)/js/admin.js || (echo "Missing: admin.js" && exit 1) - @echo "Static files validated" - -# Check dependencies -.PHONY: deps -deps: - @echo "Checking dependencies..." - @go mod tidy - @go mod verify - -# Clean all build artifacts -.PHONY: clean -clean: clean-templ - @echo "Cleaning build artifacts..." - @rm -f $(BUILD_DIR)/weed 2>/dev/null || true - @echo "Cleaned build artifacts" - -# Install dependencies -.PHONY: install-deps -install-deps: - @echo "Installing Go dependencies..." - @go mod download - @$(MAKE) install-templ - -# Production build -.PHONY: build-prod -build-prod: clean generate validate-static - @echo "Building production binary..." - @cd $(BUILD_DIR) && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o weed-linux-amd64 ./weed - @echo "Built production binary: $(BUILD_DIR)/weed-linux-amd64" - -# Docker build (if needed) -.PHONY: docker-build -docker-build: generate - @echo "Building Docker image with admin component..." - @cd $(BUILD_DIR) && docker build -t seaweedfs/seaweedfs:latest . - -# Help target -.PHONY: help -help: - @echo "SeaweedFS Admin Component Makefile" - @echo "" - @echo "Available targets:" - @echo " all - Build the weed binary with admin component (default)" - @echo " generate - Generate templ files from templates" - @echo " build - Build weed binary with admin component" - @echo " build-prod - Build production binary" - @echo " run - Run admin server via weed command" - @echo " dev - Start development server with template watching" - @echo " test - Run tests" - @echo " watch - Watch for template changes and regenerate" - @echo " clean - Clean all build artifacts" - @echo " clean-templ - Clean generated template files" - @echo " fmt - Format Go and templ code" - @echo " lint - Lint the code" - @echo " deps - Check and tidy dependencies" - @echo " install-deps - Install all dependencies" - @echo " install-templ - Install templ compiler" - @echo " validate-static - Validate static files exist" - @echo " docker-build - Build Docker image" - @echo " help - Show this help message" - @echo "" - @echo "Examples:" - @echo " make generate # Generate templates" - @echo " make build # Build weed binary" - @echo " make run # Start admin server" - @echo " make dev # Development mode with auto-reload" - -# Make sure generated files are up to date before building -$(WEED_BINARY): $(TEMPL_GO_FILES) $(GO_FILES) - @$(MAKE) build - -# Auto-generate templ files when .templ files change -%_templ.go: %.templ - @echo "Regenerating $@ from $<" - @templ generate - -.PHONY: $(TEMPL_GO_FILES) \ No newline at end of file diff --git a/weed/admin/README.md b/weed/admin/README.md deleted file mode 100644 index 7b909174d..000000000 --- a/weed/admin/README.md +++ /dev/null @@ -1,279 +0,0 @@ -# SeaweedFS Admin Component - -A modern web-based administration interface for SeaweedFS clusters built with Go, Gin, Templ, and Bootstrap. - -## Features - -- **Dashboard**: Real-time cluster status and metrics -- **Master Management**: Monitor master nodes and leadership status -- **Volume Server Management**: View volume servers, capacity, and health -- **Object Store Bucket Management**: Create, delete, and manage Object Store buckets with web interface -- **System Health**: Overall cluster health monitoring -- **Responsive Design**: Bootstrap-based UI that works on all devices -- **Authentication**: Optional user authentication with sessions -- **TLS Support**: HTTPS support for production deployments - -## Building - -### Using the Admin Makefile - -The admin component has its own Makefile for development and building: - -```bash -# Navigate to admin directory -cd weed/admin - -# View all available targets -make help - -# Generate templates and build -make build - -# Development mode with template watching -make dev - -# Run the admin server -make run - -# Clean build artifacts -make clean -``` - -### Using the Root Makefile - -The root SeaweedFS Makefile automatically integrates the admin component: - -```bash -# From the root directory -make install # Builds weed with admin component -make full_install # Full build with all tags -make test # Runs tests including admin component - -# Admin-specific targets from root -make admin-generate # Generate admin templates -make admin-build # Build admin component -make admin-run # Run admin server -make admin-dev # Development mode -make admin-clean # Clean admin artifacts -``` - -### Manual Building - -If you prefer to build manually: - -```bash -# Install templ compiler -go install github.com/a-h/templ/cmd/templ@latest - -# Generate templates -templ generate - -# Build the main weed binary -cd ../../../ -go build -o weed ./weed -``` - -## Development - -### Template Development - -The admin interface uses [Templ](https://templ.guide/) for type-safe HTML templates: - -```bash -# Watch for template changes and auto-regenerate -make watch - -# Or manually generate templates -make generate - -# Format templates -make fmt -``` - -### File Structure - -``` -weed/admin/ -โ”œโ”€โ”€ Makefile # Admin-specific build tasks -โ”œโ”€โ”€ README.md # This file -โ”œโ”€โ”€ S3_BUCKETS.md # Object Store bucket management documentation -โ”œโ”€โ”€ admin.go # Main application entry point -โ”œโ”€โ”€ dash/ # Server and handler logic -โ”‚ โ”œโ”€โ”€ admin_server.go # HTTP server setup -โ”‚ โ”œโ”€โ”€ handler_admin.go # Admin dashboard handlers -โ”‚ โ”œโ”€โ”€ handler_auth.go # Authentication handlers -โ”‚ โ””โ”€โ”€ middleware.go # HTTP middleware -โ”œโ”€โ”€ static/ # Static assets -โ”‚ โ”œโ”€โ”€ css/admin.css # Admin-specific styles -โ”‚ โ””โ”€โ”€ js/admin.js # Admin-specific JavaScript -โ””โ”€โ”€ view/ # Templates - โ”œโ”€โ”€ app/ # Application templates - โ”‚ โ”œโ”€โ”€ admin.templ # Main dashboard template - โ”‚ โ”œโ”€โ”€ s3_buckets.templ # Object Store bucket management template - โ”‚ โ””โ”€โ”€ *_templ.go # Generated Go code - โ””โ”€โ”€ layout/ # Layout templates - โ”œโ”€โ”€ layout.templ # Base layout template - โ””โ”€โ”€ layout_templ.go # Generated Go code -``` - -### S3 Bucket Management - -The admin interface includes comprehensive Object Store bucket management capabilities. See [S3_BUCKETS.md](S3_BUCKETS.md) for detailed documentation on: - -- Creating and deleting Object Store buckets -- Viewing bucket contents and metadata -- Managing bucket permissions and settings -- API endpoints for programmatic access - -## Usage - -### Basic Usage - -```bash -# Start admin interface on default port (23646) -weed admin - -# Start with custom configuration -weed admin -port=8080 -masters="master1:9333,master2:9333" - -# Start with authentication -weed admin -adminUser=admin -adminPassword=secret123 - -# Start with HTTPS -weed admin -port=443 -tlsCert=/path/to/cert.pem -tlsKey=/path/to/key.pem -``` - -### Configuration Options - -| Option | Default | Description | -|--------|---------|-------------| -| `-port` | 23646 | Admin server port | -| `-masters` | localhost:9333 | Comma-separated master servers | -| `-adminUser` | admin | Admin username (if auth enabled) | -| `-adminPassword` | "" | Admin password (empty = no auth) | -| `-tlsCert` | "" | Path to TLS certificate | -| `-tlsKey` | "" | Path to TLS private key | - -### Docker Usage - -```bash -# Build Docker image with admin component -make docker-build - -# Run with Docker -docker run -p 23646:23646 seaweedfs/seaweedfs:latest admin -masters=host.docker.internal:9333 -``` - -## Development Workflow - -### Quick Start - -```bash -# Clone and setup -git clone -cd seaweedfs/weed/admin - -# Install dependencies and build -make install-deps -make build - -# Start development server -make dev -``` - -### Making Changes - -1. **Template Changes**: Edit `.templ` files in `view/` - - Templates auto-regenerate in development mode - - Use `make generate` to manually regenerate - -2. **Go Code Changes**: Edit `.go` files - - Restart the server to see changes - - Use `make build` to rebuild - -3. **Static Assets**: Edit files in `static/` - - Changes are served immediately - -### Testing - -```bash -# Run admin component tests -make test - -# Run from root directory -make admin-test - -# Lint code -make lint - -# Format code -make fmt -``` - -## Production Deployment - -### Security Considerations - -1. **Authentication**: Always set `adminPassword` in production -2. **HTTPS**: Use TLS certificates for encrypted connections -3. **Firewall**: Restrict admin interface access to authorized networks - -### Example Production Setup - -```bash -# Production deployment with security -weed admin \ - -port=443 \ - -masters="master1:9333,master2:9333,master3:9333" \ - -adminUser=admin \ - -adminPassword=your-secure-password \ - -tlsCert=/etc/ssl/certs/admin.crt \ - -tlsKey=/etc/ssl/private/admin.key -``` - -### Monitoring - -The admin interface provides endpoints for monitoring: - -- `GET /health` - Health check endpoint -- `GET /metrics` - Prometheus metrics (if enabled) -- `GET /api/status` - JSON status information - -## Troubleshooting - -### Common Issues - -1. **Templates not found**: Run `make generate` to create template files -2. **Build errors**: Ensure `templ` is installed with `make install-templ` -3. **Static files not loading**: Check that `static/` directory exists and has proper files -4. **Connection errors**: Verify master and filer addresses are correct - -### Debug Mode - -```bash -# Enable debug logging -weed -v=2 admin - -# Check generated templates -ls -la view/app/*_templ.go view/layout/*_templ.go -``` - -## Contributing - -1. Fork the repository -2. Create a feature branch -3. Make your changes -4. Run tests: `make test` -5. Format code: `make fmt` -6. Submit a pull request - -## Architecture - -The admin component follows a clean architecture: - -- **Presentation Layer**: Templ templates + Bootstrap CSS -- **HTTP Layer**: Gin router with middleware -- **Business Logic**: Handler functions in `dash/` package -- **Data Layer**: Communicates with SeaweedFS masters and filers - -This separation makes the code maintainable and testable. \ No newline at end of file diff --git a/weed/admin/config/schema.go b/weed/admin/config/schema.go deleted file mode 100644 index 54fb615f9..000000000 --- a/weed/admin/config/schema.go +++ /dev/null @@ -1,360 +0,0 @@ -package config - -import ( - "fmt" - "reflect" - "strings" - "time" -) - -// ConfigWithDefaults defines an interface for configurations that can apply their own defaults -type ConfigWithDefaults interface { - // ApplySchemaDefaults applies default values using the provided schema - ApplySchemaDefaults(schema *Schema) error - // Validate validates the configuration - Validate() error -} - -// FieldType defines the type of a configuration field -type FieldType string - -const ( - FieldTypeBool FieldType = "bool" - FieldTypeInt FieldType = "int" - FieldTypeDuration FieldType = "duration" - FieldTypeInterval FieldType = "interval" - FieldTypeString FieldType = "string" - FieldTypeFloat FieldType = "float" -) - -// FieldUnit defines the unit for display purposes -type FieldUnit string - -const ( - UnitSeconds FieldUnit = "seconds" - UnitMinutes FieldUnit = "minutes" - UnitHours FieldUnit = "hours" - UnitDays FieldUnit = "days" - UnitCount FieldUnit = "count" - UnitNone FieldUnit = "" -) - -// Field defines a configuration field with all its metadata -type Field struct { - // Field identification - Name string `json:"name"` - JSONName string `json:"json_name"` - Type FieldType `json:"type"` - - // Default value and validation - DefaultValue interface{} `json:"default_value"` - MinValue interface{} `json:"min_value,omitempty"` - MaxValue interface{} `json:"max_value,omitempty"` - Required bool `json:"required"` - - // UI display - DisplayName string `json:"display_name"` - Description string `json:"description"` - HelpText string `json:"help_text"` - Placeholder string `json:"placeholder"` - Unit FieldUnit `json:"unit"` - - // Form rendering - InputType string `json:"input_type"` // "checkbox", "number", "text", "interval", etc. - CSSClasses string `json:"css_classes,omitempty"` -} - -// GetDisplayValue returns the value formatted for display in the specified unit -func (f *Field) GetDisplayValue(value interface{}) interface{} { - if (f.Type == FieldTypeDuration || f.Type == FieldTypeInterval) && f.Unit != UnitSeconds { - if duration, ok := value.(time.Duration); ok { - switch f.Unit { - case UnitMinutes: - return int(duration.Minutes()) - case UnitHours: - return int(duration.Hours()) - case UnitDays: - return int(duration.Hours() / 24) - } - } - if seconds, ok := value.(int); ok { - switch f.Unit { - case UnitMinutes: - return seconds / 60 - case UnitHours: - return seconds / 3600 - case UnitDays: - return seconds / (24 * 3600) - } - } - } - return value -} - -// GetIntervalDisplayValue returns the value and unit for interval fields -func (f *Field) GetIntervalDisplayValue(value interface{}) (int, string) { - if f.Type != FieldTypeInterval { - return 0, "minutes" - } - - seconds := 0 - if duration, ok := value.(time.Duration); ok { - seconds = int(duration.Seconds()) - } else if s, ok := value.(int); ok { - seconds = s - } - - return SecondsToIntervalValueUnit(seconds) -} - -// SecondsToIntervalValueUnit converts seconds to the most appropriate interval unit -func SecondsToIntervalValueUnit(totalSeconds int) (int, string) { - if totalSeconds == 0 { - return 0, "minutes" - } - - // Check if it's evenly divisible by days - if totalSeconds%(24*3600) == 0 { - return totalSeconds / (24 * 3600), "days" - } - - // Check if it's evenly divisible by hours - if totalSeconds%3600 == 0 { - return totalSeconds / 3600, "hours" - } - - // Default to minutes - return totalSeconds / 60, "minutes" -} - -// IntervalValueUnitToSeconds converts interval value and unit to seconds -func IntervalValueUnitToSeconds(value int, unit string) int { - switch unit { - case "days": - return value * 24 * 3600 - case "hours": - return value * 3600 - case "minutes": - return value * 60 - default: - return value * 60 // Default to minutes - } -} - -// ParseDisplayValue converts a display value back to the storage format -func (f *Field) ParseDisplayValue(displayValue interface{}) interface{} { - if (f.Type == FieldTypeDuration || f.Type == FieldTypeInterval) && f.Unit != UnitSeconds { - if val, ok := displayValue.(int); ok { - switch f.Unit { - case UnitMinutes: - return val * 60 - case UnitHours: - return val * 3600 - case UnitDays: - return val * 24 * 3600 - } - } - } - return displayValue -} - -// ParseIntervalFormData parses form data for interval fields (value + unit) -func (f *Field) ParseIntervalFormData(valueStr, unitStr string) (int, error) { - if f.Type != FieldTypeInterval { - return 0, fmt.Errorf("field %s is not an interval field", f.Name) - } - - value := 0 - if valueStr != "" { - var err error - value, err = fmt.Sscanf(valueStr, "%d", &value) - if err != nil { - return 0, fmt.Errorf("invalid interval value: %s", valueStr) - } - } - - return IntervalValueUnitToSeconds(value, unitStr), nil -} - -// ValidateValue validates a value against the field constraints -func (f *Field) ValidateValue(value interface{}) error { - if f.Required && (value == nil || value == "" || value == 0) { - return fmt.Errorf("%s is required", f.DisplayName) - } - - if f.MinValue != nil { - if !f.compareValues(value, f.MinValue, ">=") { - return fmt.Errorf("%s must be >= %v", f.DisplayName, f.MinValue) - } - } - - if f.MaxValue != nil { - if !f.compareValues(value, f.MaxValue, "<=") { - return fmt.Errorf("%s must be <= %v", f.DisplayName, f.MaxValue) - } - } - - return nil -} - -// compareValues compares two values based on the operator -func (f *Field) compareValues(a, b interface{}, op string) bool { - switch f.Type { - case FieldTypeInt: - aVal, aOk := a.(int) - bVal, bOk := b.(int) - if !aOk || !bOk { - return false - } - switch op { - case ">=": - return aVal >= bVal - case "<=": - return aVal <= bVal - } - case FieldTypeFloat: - aVal, aOk := a.(float64) - bVal, bOk := b.(float64) - if !aOk || !bOk { - return false - } - switch op { - case ">=": - return aVal >= bVal - case "<=": - return aVal <= bVal - } - } - return true -} - -// Schema provides common functionality for configuration schemas -type Schema struct { - Fields []*Field `json:"fields"` -} - -// GetFieldByName returns a field by its JSON name -func (s *Schema) GetFieldByName(jsonName string) *Field { - for _, field := range s.Fields { - if field.JSONName == jsonName { - return field - } - } - return nil -} - -// ApplyDefaultsToConfig applies defaults to a configuration that implements ConfigWithDefaults -func (s *Schema) ApplyDefaultsToConfig(config ConfigWithDefaults) error { - return config.ApplySchemaDefaults(s) -} - -// ApplyDefaultsToProtobuf applies defaults to protobuf types using reflection -func (s *Schema) ApplyDefaultsToProtobuf(config interface{}) error { - return s.applyDefaultsReflection(config) -} - -// applyDefaultsReflection applies default values using reflection (internal use only) -// Used for protobuf types and embedded struct handling -func (s *Schema) applyDefaultsReflection(config interface{}) error { - configValue := reflect.ValueOf(config) - if configValue.Kind() == reflect.Ptr { - configValue = configValue.Elem() - } - - if configValue.Kind() != reflect.Struct { - return fmt.Errorf("config must be a struct or pointer to struct") - } - - configType := configValue.Type() - - for i := 0; i < configValue.NumField(); i++ { - field := configValue.Field(i) - fieldType := configType.Field(i) - - // Handle embedded structs recursively (before JSON tag check) - if field.Kind() == reflect.Struct && fieldType.Anonymous { - if !field.CanAddr() { - return fmt.Errorf("embedded struct %s is not addressable - config must be a pointer", fieldType.Name) - } - err := s.applyDefaultsReflection(field.Addr().Interface()) - if err != nil { - return fmt.Errorf("failed to apply defaults to embedded struct %s: %v", fieldType.Name, err) - } - continue - } - - // Get JSON tag name - jsonTag := fieldType.Tag.Get("json") - if jsonTag == "" { - continue - } - - // Remove options like ",omitempty" - if commaIdx := strings.Index(jsonTag, ","); commaIdx >= 0 { - jsonTag = jsonTag[:commaIdx] - } - - // Find corresponding schema field - schemaField := s.GetFieldByName(jsonTag) - if schemaField == nil { - continue - } - - // Apply default if field is zero value - if field.CanSet() && field.IsZero() { - defaultValue := reflect.ValueOf(schemaField.DefaultValue) - if defaultValue.Type().ConvertibleTo(field.Type()) { - field.Set(defaultValue.Convert(field.Type())) - } - } - } - - return nil -} - -// ValidateConfig validates a configuration against the schema -func (s *Schema) ValidateConfig(config interface{}) []error { - var errors []error - - configValue := reflect.ValueOf(config) - if configValue.Kind() == reflect.Ptr { - configValue = configValue.Elem() - } - - if configValue.Kind() != reflect.Struct { - errors = append(errors, fmt.Errorf("config must be a struct or pointer to struct")) - return errors - } - - configType := configValue.Type() - - for i := 0; i < configValue.NumField(); i++ { - field := configValue.Field(i) - fieldType := configType.Field(i) - - // Get JSON tag name - jsonTag := fieldType.Tag.Get("json") - if jsonTag == "" { - continue - } - - // Remove options like ",omitempty" - if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 { - jsonTag = jsonTag[:commaIdx] - } - - // Find corresponding schema field - schemaField := s.GetFieldByName(jsonTag) - if schemaField == nil { - continue - } - - // Validate field value - fieldValue := field.Interface() - if err := schemaField.ValidateValue(fieldValue); err != nil { - errors = append(errors, err) - } - } - - return errors -} diff --git a/weed/admin/config/schema_test.go b/weed/admin/config/schema_test.go deleted file mode 100644 index 3d0d74a38..000000000 --- a/weed/admin/config/schema_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package config - -import ( - "testing" -) - -// Test structs that mirror the actual configuration structure -type TestBaseConfigForSchema struct { - Enabled bool `json:"enabled"` - ScanIntervalSeconds int `json:"scan_interval_seconds"` - MaxConcurrent int `json:"max_concurrent"` -} - -// ApplySchemaDefaults implements ConfigWithDefaults for test struct -func (c *TestBaseConfigForSchema) ApplySchemaDefaults(schema *Schema) error { - return schema.ApplyDefaultsToProtobuf(c) -} - -// Validate implements ConfigWithDefaults for test struct -func (c *TestBaseConfigForSchema) Validate() error { - return nil -} - -type TestTaskConfigForSchema struct { - TestBaseConfigForSchema - TaskSpecificField float64 `json:"task_specific_field"` - AnotherSpecificField string `json:"another_specific_field"` -} - -// ApplySchemaDefaults implements ConfigWithDefaults for test struct -func (c *TestTaskConfigForSchema) ApplySchemaDefaults(schema *Schema) error { - return schema.ApplyDefaultsToProtobuf(c) -} - -// Validate implements ConfigWithDefaults for test struct -func (c *TestTaskConfigForSchema) Validate() error { - return nil -} - -func createTestSchema() *Schema { - return &Schema{ - Fields: []*Field{ - { - Name: "enabled", - JSONName: "enabled", - Type: FieldTypeBool, - DefaultValue: true, - }, - { - Name: "scan_interval_seconds", - JSONName: "scan_interval_seconds", - Type: FieldTypeInt, - DefaultValue: 1800, - }, - { - Name: "max_concurrent", - JSONName: "max_concurrent", - Type: FieldTypeInt, - DefaultValue: 3, - }, - { - Name: "task_specific_field", - JSONName: "task_specific_field", - Type: FieldTypeFloat, - DefaultValue: 0.25, - }, - { - Name: "another_specific_field", - JSONName: "another_specific_field", - Type: FieldTypeString, - DefaultValue: "default_value", - }, - }, - } -} - -func TestApplyDefaults_WithEmbeddedStruct(t *testing.T) { - schema := createTestSchema() - - // Start with zero values - config := &TestTaskConfigForSchema{} - - err := schema.ApplyDefaultsToConfig(config) - if err != nil { - t.Fatalf("ApplyDefaultsToConfig failed: %v", err) - } - - // Verify embedded struct fields got default values - if config.Enabled != true { - t.Errorf("Expected Enabled=true (default), got %v", config.Enabled) - } - - if config.ScanIntervalSeconds != 1800 { - t.Errorf("Expected ScanIntervalSeconds=1800 (default), got %v", config.ScanIntervalSeconds) - } - - if config.MaxConcurrent != 3 { - t.Errorf("Expected MaxConcurrent=3 (default), got %v", config.MaxConcurrent) - } - - // Verify task-specific fields got default values - if config.TaskSpecificField != 0.25 { - t.Errorf("Expected TaskSpecificField=0.25 (default), got %v", config.TaskSpecificField) - } - - if config.AnotherSpecificField != "default_value" { - t.Errorf("Expected AnotherSpecificField='default_value' (default), got %v", config.AnotherSpecificField) - } -} - -func TestApplyDefaults_PartiallySet(t *testing.T) { - schema := createTestSchema() - - // Start with some pre-set values - config := &TestTaskConfigForSchema{ - TestBaseConfigForSchema: TestBaseConfigForSchema{ - Enabled: true, // Non-zero value, should not be overridden - ScanIntervalSeconds: 0, // Should get default - MaxConcurrent: 5, // Non-zero value, should not be overridden - }, - TaskSpecificField: 0.0, // Should get default - AnotherSpecificField: "custom", // Non-zero value, should not be overridden - } - - err := schema.ApplyDefaultsToConfig(config) - if err != nil { - t.Fatalf("ApplyDefaultsToConfig failed: %v", err) - } - - // Verify already-set values are preserved - if config.Enabled != true { - t.Errorf("Expected Enabled=true (pre-set), got %v", config.Enabled) - } - - if config.MaxConcurrent != 5 { - t.Errorf("Expected MaxConcurrent=5 (pre-set), got %v", config.MaxConcurrent) - } - - if config.AnotherSpecificField != "custom" { - t.Errorf("Expected AnotherSpecificField='custom' (pre-set), got %v", config.AnotherSpecificField) - } - - // Verify zero values got defaults - if config.ScanIntervalSeconds != 1800 { - t.Errorf("Expected ScanIntervalSeconds=1800 (default), got %v", config.ScanIntervalSeconds) - } - - if config.TaskSpecificField != 0.25 { - t.Errorf("Expected TaskSpecificField=0.25 (default), got %v", config.TaskSpecificField) - } -} - -func TestApplyDefaults_NonPointer(t *testing.T) { - schema := createTestSchema() - config := TestTaskConfigForSchema{} - // This should fail since we need a pointer to modify the struct - err := schema.ApplyDefaultsToProtobuf(config) - if err == nil { - t.Fatal("Expected error for non-pointer config, but got nil") - } -} - -func TestApplyDefaults_NonStruct(t *testing.T) { - schema := createTestSchema() - var config interface{} = "not a struct" - err := schema.ApplyDefaultsToProtobuf(config) - if err == nil { - t.Fatal("Expected error for non-struct config, but got nil") - } -} - -func TestApplyDefaults_EmptySchema(t *testing.T) { - schema := &Schema{Fields: []*Field{}} - config := &TestTaskConfigForSchema{} - - err := schema.ApplyDefaultsToConfig(config) - if err != nil { - t.Fatalf("ApplyDefaultsToConfig failed for empty schema: %v", err) - } - - // All fields should remain at zero values since no defaults are defined - if config.Enabled != false { - t.Errorf("Expected Enabled=false (zero value), got %v", config.Enabled) - } -} - -func TestApplyDefaults_MissingSchemaField(t *testing.T) { - // Schema with fewer fields than the struct - schema := &Schema{ - Fields: []*Field{ - { - Name: "enabled", - JSONName: "enabled", - Type: FieldTypeBool, - DefaultValue: true, - }, - // Note: missing scan_interval_seconds and other fields - }, - } - - config := &TestTaskConfigForSchema{} - err := schema.ApplyDefaultsToConfig(config) - if err != nil { - t.Fatalf("ApplyDefaultsToConfig failed: %v", err) - } - - // Only the field with a schema definition should get a default - if config.Enabled != true { - t.Errorf("Expected Enabled=true (has schema), got %v", config.Enabled) - } - - // Fields without schema should remain at zero values - if config.ScanIntervalSeconds != 0 { - t.Errorf("Expected ScanIntervalSeconds=0 (no schema), got %v", config.ScanIntervalSeconds) - } -} - -func BenchmarkApplyDefaults(b *testing.B) { - schema := createTestSchema() - config := &TestTaskConfigForSchema{} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = schema.ApplyDefaultsToConfig(config) - } -} diff --git a/weed/admin/dash/admin_data.go b/weed/admin/dash/admin_data.go deleted file mode 100644 index b474437c4..000000000 --- a/weed/admin/dash/admin_data.go +++ /dev/null @@ -1,297 +0,0 @@ -package dash - -import ( - "context" - "net/http" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -type AdminData struct { - Username string `json:"username"` - TotalVolumes int `json:"total_volumes"` - TotalFiles int64 `json:"total_files"` - TotalSize int64 `json:"total_size"` - VolumeSizeLimitMB uint64 `json:"volume_size_limit_mb"` - MasterNodes []MasterNode `json:"master_nodes"` - VolumeServers []VolumeServer `json:"volume_servers"` - FilerNodes []FilerNode `json:"filer_nodes"` - MessageBrokers []MessageBrokerNode `json:"message_brokers"` - DataCenters []DataCenter `json:"datacenters"` - LastUpdated time.Time `json:"last_updated"` - - // EC shard totals for dashboard - TotalEcVolumes int `json:"total_ec_volumes"` // Total number of EC volumes across all servers - TotalEcShards int `json:"total_ec_shards"` // Total number of EC shards across all servers -} - -// Object Store Users management structures -type ObjectStoreUser struct { - Username string `json:"username"` - Email string `json:"email"` - AccessKey string `json:"access_key"` - SecretKey string `json:"secret_key"` - Permissions []string `json:"permissions"` -} - -type ObjectStoreUsersData struct { - Username string `json:"username"` - Users []ObjectStoreUser `json:"users"` - TotalUsers int `json:"total_users"` - LastUpdated time.Time `json:"last_updated"` -} - -// User management request structures -type CreateUserRequest struct { - Username string `json:"username" binding:"required"` - Email string `json:"email"` - Actions []string `json:"actions"` - GenerateKey bool `json:"generate_key"` -} - -type UpdateUserRequest struct { - Email string `json:"email"` - Actions []string `json:"actions"` -} - -type UpdateUserPoliciesRequest struct { - Actions []string `json:"actions" binding:"required"` -} - -type AccessKeyInfo struct { - AccessKey string `json:"access_key"` - SecretKey string `json:"secret_key"` - CreatedAt time.Time `json:"created_at"` -} - -type UserDetails struct { - Username string `json:"username"` - Email string `json:"email"` - Actions []string `json:"actions"` - AccessKeys []AccessKeyInfo `json:"access_keys"` -} - -type FilerNode struct { - Address string `json:"address"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - LastUpdated time.Time `json:"last_updated"` -} - -type MessageBrokerNode struct { - Address string `json:"address"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - LastUpdated time.Time `json:"last_updated"` -} - -// GetAdminData retrieves admin data as a struct (for reuse by both JSON and HTML handlers) -func (s *AdminServer) GetAdminData(username string) (AdminData, error) { - if username == "" { - username = "admin" - } - - // Get cluster topology - topology, err := s.GetClusterTopology() - if err != nil { - glog.Errorf("Failed to get cluster topology: %v", err) - return AdminData{}, err - } - - // Get volume servers data with EC shard information - volumeServersData, err := s.GetClusterVolumeServers() - if err != nil { - glog.Errorf("Failed to get cluster volume servers: %v", err) - return AdminData{}, err - } - - // Get master nodes status - masterNodes := s.getMasterNodesStatus() - - // Get filer nodes status - filerNodes := s.getFilerNodesStatus() - - // Get message broker nodes status - messageBrokers := s.getMessageBrokerNodesStatus() - - // Get volume size limit from master configuration - var volumeSizeLimitMB uint64 = 30000 // Default to 30GB - err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return err - } - volumeSizeLimitMB = uint64(resp.VolumeSizeLimitMB) - return nil - }) - if err != nil { - glog.Warningf("Failed to get volume size limit from master: %v", err) - // Keep default value on error - } - - // Calculate EC shard totals - var totalEcVolumes, totalEcShards int - ecVolumeSet := make(map[uint32]bool) // To avoid counting the same EC volume multiple times - - for _, vs := range volumeServersData.VolumeServers { - totalEcShards += vs.EcShards - // Count unique EC volumes across all servers - for _, ecInfo := range vs.EcShardDetails { - ecVolumeSet[ecInfo.VolumeID] = true - } - } - totalEcVolumes = len(ecVolumeSet) - - // Prepare admin data - adminData := AdminData{ - Username: username, - TotalVolumes: topology.TotalVolumes, - TotalFiles: topology.TotalFiles, - TotalSize: topology.TotalSize, - VolumeSizeLimitMB: volumeSizeLimitMB, - MasterNodes: masterNodes, - VolumeServers: volumeServersData.VolumeServers, - FilerNodes: filerNodes, - MessageBrokers: messageBrokers, - DataCenters: topology.DataCenters, - LastUpdated: topology.UpdatedAt, - TotalEcVolumes: totalEcVolumes, - TotalEcShards: totalEcShards, - } - - return adminData, nil -} - -// ShowAdmin displays the main admin page (now uses GetAdminData) -func (s *AdminServer) ShowAdmin(c *gin.Context) { - username := c.GetString("username") - - adminData, err := s.GetAdminData(username) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get admin data: " + err.Error()}) - return - } - - // Return JSON for API calls - c.JSON(http.StatusOK, adminData) -} - -// ShowOverview displays cluster overview -func (s *AdminServer) ShowOverview(c *gin.Context) { - topology, err := s.GetClusterTopology() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, topology) -} - -// getMasterNodesStatus checks status of all master nodes -func (s *AdminServer) getMasterNodesStatus() []MasterNode { - var masterNodes []MasterNode - - // Since we have a single master address, create one entry - var isLeader bool = true // Assume leader since it's the only master we know about - - // Try to get leader info from this master - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - _, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return err - } - // For now, assume this master is the leader since we can connect to it - isLeader = true - return nil - }) - - if err != nil { - isLeader = false - } - - currentMaster := s.masterClient.GetMaster(context.Background()) - if currentMaster != "" { - masterNodes = append(masterNodes, MasterNode{ - Address: string(currentMaster), - IsLeader: isLeader, - }) - } - - return masterNodes -} - -// getFilerNodesStatus checks status of all filer nodes using master's ListClusterNodes -func (s *AdminServer) getFilerNodesStatus() []FilerNode { - var filerNodes []FilerNode - - // Get filer nodes from master using ListClusterNodes - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - return err - } - - // Process each filer node - for _, node := range resp.ClusterNodes { - filerNodes = append(filerNodes, FilerNode{ - Address: node.Address, - DataCenter: node.DataCenter, - Rack: node.Rack, - LastUpdated: time.Now(), - }) - } - - return nil - }) - - if err != nil { - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Errorf("Failed to get filer nodes from master %s: %v", currentMaster, err) - // Return empty list if we can't get filer info from master - return []FilerNode{} - } - - return filerNodes -} - -// getMessageBrokerNodesStatus checks status of all message broker nodes using master's ListClusterNodes -func (s *AdminServer) getMessageBrokerNodesStatus() []MessageBrokerNode { - var messageBrokers []MessageBrokerNode - - // Get message broker nodes from master using ListClusterNodes - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.BrokerType, - }) - if err != nil { - return err - } - - // Process each message broker node - for _, node := range resp.ClusterNodes { - messageBrokers = append(messageBrokers, MessageBrokerNode{ - Address: node.Address, - DataCenter: node.DataCenter, - Rack: node.Rack, - LastUpdated: time.Now(), - }) - } - - return nil - }) - - if err != nil { - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Errorf("Failed to get message broker nodes from master %s: %v", currentMaster, err) - // Return empty list if we can't get broker info from master - return []MessageBrokerNode{} - } - - return messageBrokers -} diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go deleted file mode 100644 index 4a1dd592f..000000000 --- a/weed/admin/dash/admin_server.go +++ /dev/null @@ -1,1986 +0,0 @@ -package dash - -import ( - "bytes" - "context" - "fmt" - "net/http" - "strconv" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/s3api" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" -) - -type AdminServer struct { - masterClient *wdclient.MasterClient - templateFS http.FileSystem - dataDir string - grpcDialOption grpc.DialOption - cacheExpiration time.Duration - lastCacheUpdate time.Time - cachedTopology *ClusterTopology - - // Filer discovery and caching - cachedFilers []string - lastFilerUpdate time.Time - filerCacheExpiration time.Duration - - // Credential management - credentialManager *credential.CredentialManager - - // Configuration persistence - configPersistence *ConfigPersistence - - // Maintenance system - maintenanceManager *maintenance.MaintenanceManager - - // Topic retention purger - topicRetentionPurger *TopicRetentionPurger - - // Worker gRPC server - workerGrpcServer *WorkerGrpcServer -} - -// Type definitions moved to types.go - -func NewAdminServer(masters string, templateFS http.FileSystem, dataDir string) *AdminServer { - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.admin") - - // Create master client with multiple master support - masterClient := wdclient.NewMasterClient( - grpcDialOption, - "", // filerGroup - not needed for admin - "admin", // clientType - "", // clientHost - not needed for admin - "", // dataCenter - not needed for admin - "", // rack - not needed for admin - *pb.ServerAddresses(masters).ToServiceDiscovery(), - ) - - // Start master client connection process (like shell and filer do) - ctx := context.Background() - go masterClient.KeepConnectedToMaster(ctx) - - server := &AdminServer{ - masterClient: masterClient, - templateFS: templateFS, - dataDir: dataDir, - grpcDialOption: grpcDialOption, - cacheExpiration: 10 * time.Second, - filerCacheExpiration: 30 * time.Second, // Cache filers for 30 seconds - configPersistence: NewConfigPersistence(dataDir), - } - - // Initialize topic retention purger - server.topicRetentionPurger = NewTopicRetentionPurger(server) - - // Initialize credential manager with defaults - credentialManager, err := credential.NewCredentialManagerWithDefaults("") - if err != nil { - glog.Warningf("Failed to initialize credential manager: %v", err) - // Continue without credential manager - will fall back to legacy approach - } else { - // For stores that need filer client details, set them - if store := credentialManager.GetStore(); store != nil { - if filerClientSetter, ok := store.(interface { - SetFilerClient(string, grpc.DialOption) - }); ok { - // We'll set the filer client later when we discover filers - // For now, just store the credential manager - server.credentialManager = credentialManager - - // Set up a goroutine to set filer client once we discover filers - go func() { - for { - filerAddr := server.GetFilerAddress() - if filerAddr != "" { - filerClientSetter.SetFilerClient(filerAddr, server.grpcDialOption) - glog.V(1).Infof("Set filer client for credential manager: %s", filerAddr) - break - } - glog.V(1).Infof("Waiting for filer discovery for credential manager...") - time.Sleep(5 * time.Second) // Retry every 5 seconds - } - }() - } else { - server.credentialManager = credentialManager - } - } else { - server.credentialManager = credentialManager - } - } - - // Initialize maintenance system - always initialize even without persistent storage - var maintenanceConfig *maintenance.MaintenanceConfig - if server.configPersistence.IsConfigured() { - var err error - maintenanceConfig, err = server.configPersistence.LoadMaintenanceConfig() - if err != nil { - glog.Errorf("Failed to load maintenance configuration: %v", err) - maintenanceConfig = maintenance.DefaultMaintenanceConfig() - } - - // Apply new defaults to handle schema changes (like enabling by default) - schema := maintenance.GetMaintenanceConfigSchema() - if err := schema.ApplyDefaultsToProtobuf(maintenanceConfig); err != nil { - glog.Warningf("Failed to apply schema defaults to loaded config: %v", err) - } - - // Force enable maintenance system for new default behavior - // This handles the case where old configs had Enabled=false as default - if !maintenanceConfig.Enabled { - glog.V(1).Infof("Enabling maintenance system (new default behavior)") - maintenanceConfig.Enabled = true - } - - glog.V(1).Infof("Maintenance system initialized with persistent configuration (enabled: %v)", maintenanceConfig.Enabled) - } else { - maintenanceConfig = maintenance.DefaultMaintenanceConfig() - glog.V(1).Infof("No data directory configured, maintenance system will run in memory-only mode (enabled: %v)", maintenanceConfig.Enabled) - } - - // Always initialize maintenance manager - server.InitMaintenanceManager(maintenanceConfig) - - // Load saved task configurations from persistence - server.loadTaskConfigurationsFromPersistence() - - // Start maintenance manager if enabled - if maintenanceConfig.Enabled { - go func() { - // Give master client a bit of time to connect before starting scans - time.Sleep(2 * time.Second) - if err := server.StartMaintenanceManager(); err != nil { - glog.Errorf("Failed to start maintenance manager: %v", err) - } - }() - } - - return server -} - -// loadTaskConfigurationsFromPersistence loads saved task configurations from protobuf files -func (s *AdminServer) loadTaskConfigurationsFromPersistence() { - if s.configPersistence == nil || !s.configPersistence.IsConfigured() { - glog.V(1).Infof("Config persistence not available, using default task configurations") - return - } - - // Load task configurations dynamically using the config update registry - configUpdateRegistry := tasks.GetGlobalConfigUpdateRegistry() - configUpdateRegistry.UpdateAllConfigs(s.configPersistence) -} - -// GetCredentialManager returns the credential manager -func (s *AdminServer) GetCredentialManager() *credential.CredentialManager { - return s.credentialManager -} - -// Filer discovery methods moved to client_management.go - -// Client management methods moved to client_management.go - -// WithFilerClient and WithVolumeServerClient methods moved to client_management.go - -// Cluster topology methods moved to cluster_topology.go - -// getTopologyViaGRPC method moved to cluster_topology.go - -// InvalidateCache method moved to cluster_topology.go - -// GetS3Buckets retrieves all Object Store buckets from the filer and collects size/object data from collections -func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) { - var buckets []S3Bucket - - // Build a map of collection name to collection data - collectionMap := make(map[string]struct { - Size int64 - FileCount int64 - }) - - // Collect volume information by collection - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - for _, volInfo := range diskInfo.VolumeInfos { - collection := volInfo.Collection - if collection == "" { - collection = "default" - } - - if _, exists := collectionMap[collection]; !exists { - collectionMap[collection] = struct { - Size int64 - FileCount int64 - }{} - } - - data := collectionMap[collection] - data.Size += int64(volInfo.Size) - data.FileCount += int64(volInfo.FileCount) - collectionMap[collection] = data - } - } - } - } - } - } - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to get volume information: %w", err) - } - - // Get filer configuration to determine FilerGroup - var filerGroup string - err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - configResp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - glog.Warningf("Failed to get filer configuration: %v", err) - // Continue without filer group - return nil - } - filerGroup = configResp.FilerGroup - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to get filer configuration: %w", err) - } - - // Now list buckets from the filer and match with collection data - err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // List buckets by looking at the /buckets directory - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: "/buckets", - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - return err - } - - if resp.Entry.IsDirectory { - bucketName := resp.Entry.Name - - // Determine collection name for this bucket - var collectionName string - if filerGroup != "" { - collectionName = fmt.Sprintf("%s_%s", filerGroup, bucketName) - } else { - collectionName = bucketName - } - - // Get size and object count from collection data - var size int64 - var objectCount int64 - if collectionData, exists := collectionMap[collectionName]; exists { - size = collectionData.Size - objectCount = collectionData.FileCount - } - - // Get quota information from entry - quota := resp.Entry.Quota - quotaEnabled := quota > 0 - if quota < 0 { - // Negative quota means disabled - quota = -quota - quotaEnabled = false - } - - // Get versioning and object lock information from extended attributes - versioningEnabled := false - objectLockEnabled := false - objectLockMode := "" - var objectLockDuration int32 = 0 - - if resp.Entry.Extended != nil { - // Use shared utility to extract versioning information - versioningEnabled = extractVersioningFromEntry(resp.Entry) - - // Use shared utility to extract Object Lock information - objectLockEnabled, objectLockMode, objectLockDuration = extractObjectLockInfoFromEntry(resp.Entry) - } - - bucket := S3Bucket{ - Name: bucketName, - CreatedAt: time.Unix(resp.Entry.Attributes.Crtime, 0), - Size: size, - ObjectCount: objectCount, - LastModified: time.Unix(resp.Entry.Attributes.Mtime, 0), - Quota: quota, - QuotaEnabled: quotaEnabled, - VersioningEnabled: versioningEnabled, - ObjectLockEnabled: objectLockEnabled, - ObjectLockMode: objectLockMode, - ObjectLockDuration: objectLockDuration, - } - buckets = append(buckets, bucket) - } - } - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to list Object Store buckets: %w", err) - } - - return buckets, nil -} - -// GetBucketDetails retrieves detailed information about a specific bucket -func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error) { - bucketPath := fmt.Sprintf("/buckets/%s", bucketName) - - details := &BucketDetails{ - Bucket: S3Bucket{ - Name: bucketName, - }, - Objects: []S3Object{}, - UpdatedAt: time.Now(), - } - - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // Get bucket info - bucketResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: "/buckets", - Name: bucketName, - }) - if err != nil { - return fmt.Errorf("bucket not found: %w", err) - } - - details.Bucket.CreatedAt = time.Unix(bucketResp.Entry.Attributes.Crtime, 0) - details.Bucket.LastModified = time.Unix(bucketResp.Entry.Attributes.Mtime, 0) - - // Get quota information from entry - quota := bucketResp.Entry.Quota - quotaEnabled := quota > 0 - if quota < 0 { - // Negative quota means disabled - quota = -quota - quotaEnabled = false - } - details.Bucket.Quota = quota - details.Bucket.QuotaEnabled = quotaEnabled - - // Get versioning and object lock information from extended attributes - versioningEnabled := false - objectLockEnabled := false - objectLockMode := "" - var objectLockDuration int32 = 0 - - if bucketResp.Entry.Extended != nil { - // Use shared utility to extract versioning information - versioningEnabled = extractVersioningFromEntry(bucketResp.Entry) - - // Use shared utility to extract Object Lock information - objectLockEnabled, objectLockMode, objectLockDuration = extractObjectLockInfoFromEntry(bucketResp.Entry) - } - - details.Bucket.VersioningEnabled = versioningEnabled - details.Bucket.ObjectLockEnabled = objectLockEnabled - details.Bucket.ObjectLockMode = objectLockMode - details.Bucket.ObjectLockDuration = objectLockDuration - - // List objects in bucket (recursively) - return s.listBucketObjects(client, bucketPath, "", details) - }) - - if err != nil { - return nil, err - } - - return details, nil -} - -// listBucketObjects recursively lists all objects in a bucket -func (s *AdminServer) listBucketObjects(client filer_pb.SeaweedFilerClient, directory, prefix string, details *BucketDetails) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: directory, - Prefix: prefix, - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - return err - } - - entry := resp.Entry - if entry.IsDirectory { - // Recursively list subdirectories - subDir := fmt.Sprintf("%s/%s", directory, entry.Name) - err := s.listBucketObjects(client, subDir, "", details) - if err != nil { - return err - } - } else { - // Add file object - objectKey := entry.Name - if directory != fmt.Sprintf("/buckets/%s", details.Bucket.Name) { - // Remove bucket prefix to get relative path - relativePath := directory[len(fmt.Sprintf("/buckets/%s", details.Bucket.Name))+1:] - objectKey = fmt.Sprintf("%s/%s", relativePath, entry.Name) - } - - obj := S3Object{ - Key: objectKey, - Size: int64(entry.Attributes.FileSize), - LastModified: time.Unix(entry.Attributes.Mtime, 0), - ETag: "", // Could be calculated from chunks if needed - StorageClass: "STANDARD", - } - - details.Objects = append(details.Objects, obj) - details.TotalSize += obj.Size - details.TotalCount++ - } - } - - // Update bucket totals - details.Bucket.Size = details.TotalSize - details.Bucket.ObjectCount = details.TotalCount - - return nil -} - -// CreateS3Bucket creates a new S3 bucket -func (s *AdminServer) CreateS3Bucket(bucketName string) error { - return s.CreateS3BucketWithQuota(bucketName, 0, false) -} - -// DeleteS3Bucket deletes an S3 bucket and all its contents -func (s *AdminServer) DeleteS3Bucket(bucketName string) error { - return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // Delete bucket directory recursively - _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: "/buckets", - Name: bucketName, - IsDeleteData: true, - IsRecursive: true, - IgnoreRecursiveError: false, - }) - if err != nil { - return fmt.Errorf("failed to delete bucket: %w", err) - } - - return nil - }) -} - -// GetObjectStoreUsers retrieves object store users from identity.json -func (s *AdminServer) GetObjectStoreUsers() ([]ObjectStoreUser, error) { - s3cfg := &iam_pb.S3ApiConfiguration{} - - // Load IAM configuration from filer - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var buf bytes.Buffer - if err := filer.ReadEntry(nil, client, filer.IamConfigDirectory, filer.IamIdentityFile, &buf); err != nil { - if err == filer_pb.ErrNotFound { - // If file doesn't exist, return empty configuration - return nil - } - return err - } - if buf.Len() > 0 { - return filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg) - } - return nil - }) - - if err != nil { - glog.Errorf("Failed to load IAM configuration: %v", err) - return []ObjectStoreUser{}, nil // Return empty list instead of error for UI - } - - var users []ObjectStoreUser - - // Convert IAM identities to ObjectStoreUser format - for _, identity := range s3cfg.Identities { - // Skip anonymous identity - if identity.Name == "anonymous" { - continue - } - - user := ObjectStoreUser{ - Username: identity.Name, - Permissions: identity.Actions, - } - - // Set email from account if available - if identity.Account != nil { - user.Email = identity.Account.EmailAddress - } - - // Get first access key for display - if len(identity.Credentials) > 0 { - user.AccessKey = identity.Credentials[0].AccessKey - user.SecretKey = identity.Credentials[0].SecretKey - } - - users = append(users, user) - } - - return users, nil -} - -// Volume server methods moved to volume_management.go - -// Volume methods moved to volume_management.go - -// sortVolumes method moved to volume_management.go - -// GetClusterCollections method moved to collection_management.go - -// GetClusterMasters retrieves cluster masters data -func (s *AdminServer) GetClusterMasters() (*ClusterMastersData, error) { - var masters []MasterInfo - var leaderCount int - - // First, get master information from topology - topology, err := s.GetClusterTopology() - if err != nil { - return nil, err - } - - // Create a map to merge topology and raft data - masterMap := make(map[string]*MasterInfo) - - // Add masters from topology - for _, master := range topology.Masters { - masterInfo := &MasterInfo{ - Address: master.Address, - IsLeader: master.IsLeader, - Suffrage: "", - } - - if master.IsLeader { - leaderCount++ - } - - masterMap[master.Address] = masterInfo - } - - // Then, get additional master information from Raft cluster - err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.RaftListClusterServers(context.Background(), &master_pb.RaftListClusterServersRequest{}) - if err != nil { - return err - } - - // Process each raft server - for _, server := range resp.ClusterServers { - address := server.Address - - // Update existing master info or create new one - if masterInfo, exists := masterMap[address]; exists { - // Update existing master with raft data - masterInfo.IsLeader = server.IsLeader - masterInfo.Suffrage = server.Suffrage - } else { - // Create new master info from raft data - masterInfo := &MasterInfo{ - Address: address, - IsLeader: server.IsLeader, - Suffrage: server.Suffrage, - } - masterMap[address] = masterInfo - } - - if server.IsLeader { - // Update leader count based on raft data - leaderCount = 1 // There should only be one leader - } - } - - return nil - }) - - if err != nil { - // If gRPC call fails, log the error but continue with topology data - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Errorf("Failed to get raft cluster servers from master %s: %v", currentMaster, err) - } - - // Convert map to slice - for _, masterInfo := range masterMap { - masters = append(masters, *masterInfo) - } - - // If no masters found at all, add the current master as fallback - if len(masters) == 0 { - currentMaster := s.masterClient.GetMaster(context.Background()) - if currentMaster != "" { - masters = append(masters, MasterInfo{ - Address: string(currentMaster), - IsLeader: true, - Suffrage: "Voter", - }) - leaderCount = 1 - } - } - - return &ClusterMastersData{ - Masters: masters, - TotalMasters: len(masters), - LeaderCount: leaderCount, - LastUpdated: time.Now(), - }, nil -} - -// GetClusterFilers retrieves cluster filers data -func (s *AdminServer) GetClusterFilers() (*ClusterFilersData, error) { - var filers []FilerInfo - - // Get filer information from master using ListClusterNodes - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - return err - } - - // Process each filer node - for _, node := range resp.ClusterNodes { - createdAt := time.Unix(0, node.CreatedAtNs) - - filerInfo := FilerInfo{ - Address: node.Address, - DataCenter: node.DataCenter, - Rack: node.Rack, - Version: node.Version, - CreatedAt: createdAt, - } - - filers = append(filers, filerInfo) - } - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to get filer nodes from master: %w", err) - } - - return &ClusterFilersData{ - Filers: filers, - TotalFilers: len(filers), - LastUpdated: time.Now(), - }, nil -} - -// GetClusterBrokers retrieves cluster message brokers data -func (s *AdminServer) GetClusterBrokers() (*ClusterBrokersData, error) { - var brokers []MessageBrokerInfo - - // Get broker information from master using ListClusterNodes - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.BrokerType, - }) - if err != nil { - return err - } - - // Process each broker node - for _, node := range resp.ClusterNodes { - createdAt := time.Unix(0, node.CreatedAtNs) - - brokerInfo := MessageBrokerInfo{ - Address: node.Address, - DataCenter: node.DataCenter, - Rack: node.Rack, - Version: node.Version, - CreatedAt: createdAt, - } - - brokers = append(brokers, brokerInfo) - } - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to get broker nodes from master: %w", err) - } - - return &ClusterBrokersData{ - Brokers: brokers, - TotalBrokers: len(brokers), - LastUpdated: time.Now(), - }, nil -} - -// GetAllFilers method moved to client_management.go - -// GetVolumeDetails method moved to volume_management.go - -// VacuumVolume method moved to volume_management.go - -// ShowMaintenanceQueue displays the maintenance queue page -func (as *AdminServer) ShowMaintenanceQueue(c *gin.Context) { - data, err := as.getMaintenanceQueueData() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // This should not render HTML template, it should use the component approach - c.JSON(http.StatusOK, data) -} - -// ShowMaintenanceWorkers displays the maintenance workers page -func (as *AdminServer) ShowMaintenanceWorkers(c *gin.Context) { - workers, err := as.getMaintenanceWorkers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Create worker details data - workersData := make([]*WorkerDetailsData, 0, len(workers)) - for _, worker := range workers { - details, err := as.getMaintenanceWorkerDetails(worker.ID) - if err != nil { - // Create basic worker details if we can't get full details - details = &WorkerDetailsData{ - Worker: worker, - CurrentTasks: []*MaintenanceTask{}, - RecentTasks: []*MaintenanceTask{}, - Performance: &WorkerPerformance{ - TasksCompleted: 0, - TasksFailed: 0, - AverageTaskTime: 0, - Uptime: 0, - SuccessRate: 0, - }, - LastUpdated: time.Now(), - } - } - workersData = append(workersData, details) - } - - c.JSON(http.StatusOK, gin.H{ - "workers": workersData, - "title": "Maintenance Workers", - }) -} - -// ShowMaintenanceConfig displays the maintenance configuration page -func (as *AdminServer) ShowMaintenanceConfig(c *gin.Context) { - config, err := as.getMaintenanceConfig() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // This should not render HTML template, it should use the component approach - c.JSON(http.StatusOK, config) -} - -// UpdateMaintenanceConfig updates maintenance configuration from form -func (as *AdminServer) UpdateMaintenanceConfig(c *gin.Context) { - var config MaintenanceConfig - if err := c.ShouldBind(&config); err != nil { - c.HTML(http.StatusBadRequest, "error.html", gin.H{"error": err.Error()}) - return - } - - err := as.updateMaintenanceConfig(&config) - if err != nil { - c.HTML(http.StatusInternalServerError, "error.html", gin.H{"error": err.Error()}) - return - } - - c.Redirect(http.StatusSeeOther, "/maintenance/config") -} - -// TriggerMaintenanceScan triggers a maintenance scan -func (as *AdminServer) TriggerMaintenanceScan(c *gin.Context) { - err := as.triggerMaintenanceScan() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"success": false, "error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"success": true, "message": "Maintenance scan triggered"}) -} - -// GetMaintenanceTasks returns all maintenance tasks -func (as *AdminServer) GetMaintenanceTasks(c *gin.Context) { - tasks, err := as.getMaintenanceTasks() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, tasks) -} - -// GetMaintenanceTask returns a specific maintenance task -func (as *AdminServer) GetMaintenanceTask(c *gin.Context) { - taskID := c.Param("id") - task, err := as.getMaintenanceTask(taskID) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Task not found"}) - return - } - - c.JSON(http.StatusOK, task) -} - -// GetMaintenanceTaskDetailAPI returns detailed task information via API -func (as *AdminServer) GetMaintenanceTaskDetailAPI(c *gin.Context) { - taskID := c.Param("id") - taskDetail, err := as.GetMaintenanceTaskDetail(taskID) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Task detail not found", "details": err.Error()}) - return - } - - c.JSON(http.StatusOK, taskDetail) -} - -// ShowMaintenanceTaskDetail renders the task detail page -func (as *AdminServer) ShowMaintenanceTaskDetail(c *gin.Context) { - username := c.GetString("username") - if username == "" { - username = "admin" // Default fallback - } - - taskID := c.Param("id") - taskDetail, err := as.GetMaintenanceTaskDetail(taskID) - if err != nil { - c.HTML(http.StatusNotFound, "error.html", gin.H{ - "error": "Task not found", - "details": err.Error(), - }) - return - } - - // Prepare data for template - data := gin.H{ - "username": username, - "task": taskDetail.Task, - "taskDetail": taskDetail, - "title": fmt.Sprintf("Task Detail - %s", taskID), - } - - c.HTML(http.StatusOK, "task_detail.html", data) -} - -// CancelMaintenanceTask cancels a pending maintenance task -func (as *AdminServer) CancelMaintenanceTask(c *gin.Context) { - taskID := c.Param("id") - err := as.cancelMaintenanceTask(taskID) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"success": false, "error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"success": true, "message": "Task cancelled"}) -} - -// cancelMaintenanceTask cancels a pending maintenance task -func (as *AdminServer) cancelMaintenanceTask(taskID string) error { - if as.maintenanceManager == nil { - return fmt.Errorf("maintenance manager not initialized") - } - - return as.maintenanceManager.CancelTask(taskID) -} - -// GetMaintenanceWorkersAPI returns all maintenance workers -func (as *AdminServer) GetMaintenanceWorkersAPI(c *gin.Context) { - workers, err := as.getMaintenanceWorkers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, workers) -} - -// GetMaintenanceWorker returns a specific maintenance worker -func (as *AdminServer) GetMaintenanceWorker(c *gin.Context) { - workerID := c.Param("id") - worker, err := as.getMaintenanceWorkerDetails(workerID) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Worker not found"}) - return - } - - c.JSON(http.StatusOK, worker) -} - -// GetMaintenanceStats returns maintenance statistics -func (as *AdminServer) GetMaintenanceStats(c *gin.Context) { - stats, err := as.getMaintenanceStats() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, stats) -} - -// GetMaintenanceConfigAPI returns maintenance configuration -func (as *AdminServer) GetMaintenanceConfigAPI(c *gin.Context) { - config, err := as.getMaintenanceConfig() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, config) -} - -// UpdateMaintenanceConfigAPI updates maintenance configuration via API -func (as *AdminServer) UpdateMaintenanceConfigAPI(c *gin.Context) { - // Parse JSON into a generic map first to handle type conversions - var jsonConfig map[string]interface{} - if err := c.ShouldBindJSON(&jsonConfig); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Convert JSON map to protobuf configuration - config, err := convertJSONToMaintenanceConfig(jsonConfig) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()}) - return - } - - err = as.updateMaintenanceConfig(config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"success": true, "message": "Configuration updated"}) -} - -// GetMaintenanceConfigData returns maintenance configuration data (public wrapper) -func (as *AdminServer) GetMaintenanceConfigData() (*maintenance.MaintenanceConfigData, error) { - return as.getMaintenanceConfig() -} - -// UpdateMaintenanceConfigData updates maintenance configuration (public wrapper) -func (as *AdminServer) UpdateMaintenanceConfigData(config *maintenance.MaintenanceConfig) error { - return as.updateMaintenanceConfig(config) -} - -// Helper methods for maintenance operations - -// getMaintenanceQueueData returns data for the maintenance queue UI -func (as *AdminServer) getMaintenanceQueueData() (*maintenance.MaintenanceQueueData, error) { - tasks, err := as.getMaintenanceTasks() - if err != nil { - return nil, err - } - - workers, err := as.getMaintenanceWorkers() - if err != nil { - return nil, err - } - - stats, err := as.getMaintenanceQueueStats() - if err != nil { - return nil, err - } - - return &maintenance.MaintenanceQueueData{ - Tasks: tasks, - Workers: workers, - Stats: stats, - LastUpdated: time.Now(), - }, nil -} - -// GetMaintenanceQueueStats returns statistics for the maintenance queue (exported for handlers) -func (as *AdminServer) GetMaintenanceQueueStats() (*maintenance.QueueStats, error) { - return as.getMaintenanceQueueStats() -} - -// getMaintenanceQueueStats returns statistics for the maintenance queue -func (as *AdminServer) getMaintenanceQueueStats() (*maintenance.QueueStats, error) { - if as.maintenanceManager == nil { - return &maintenance.QueueStats{ - PendingTasks: 0, - RunningTasks: 0, - CompletedToday: 0, - FailedToday: 0, - TotalTasks: 0, - }, nil - } - - // Get real statistics from maintenance manager - stats := as.maintenanceManager.GetStats() - - // Convert MaintenanceStats to QueueStats - queueStats := &maintenance.QueueStats{ - PendingTasks: stats.TasksByStatus[maintenance.TaskStatusPending], - RunningTasks: stats.TasksByStatus[maintenance.TaskStatusAssigned] + stats.TasksByStatus[maintenance.TaskStatusInProgress], - CompletedToday: stats.CompletedToday, - FailedToday: stats.FailedToday, - TotalTasks: stats.TotalTasks, - } - - return queueStats, nil -} - -// getMaintenanceTasks returns all maintenance tasks -func (as *AdminServer) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) { - if as.maintenanceManager == nil { - return []*maintenance.MaintenanceTask{}, nil - } - - // Collect all tasks from memory across all statuses - allTasks := []*maintenance.MaintenanceTask{} - statuses := []maintenance.MaintenanceTaskStatus{ - maintenance.TaskStatusPending, - maintenance.TaskStatusAssigned, - maintenance.TaskStatusInProgress, - maintenance.TaskStatusCompleted, - maintenance.TaskStatusFailed, - maintenance.TaskStatusCancelled, - } - - for _, status := range statuses { - tasks := as.maintenanceManager.GetTasks(status, "", 0) - allTasks = append(allTasks, tasks...) - } - - // Also load any persisted tasks that might not be in memory - if as.configPersistence != nil { - persistedTasks, err := as.configPersistence.LoadAllTaskStates() - if err == nil { - // Add any persisted tasks not already in memory - for _, persistedTask := range persistedTasks { - found := false - for _, memoryTask := range allTasks { - if memoryTask.ID == persistedTask.ID { - found = true - break - } - } - if !found { - allTasks = append(allTasks, persistedTask) - } - } - } - } - - return allTasks, nil -} - -// getMaintenanceTask returns a specific maintenance task -func (as *AdminServer) getMaintenanceTask(taskID string) (*maintenance.MaintenanceTask, error) { - if as.maintenanceManager == nil { - return nil, fmt.Errorf("maintenance manager not initialized") - } - - // Search for the task across all statuses since we don't know which status it has - statuses := []maintenance.MaintenanceTaskStatus{ - maintenance.TaskStatusPending, - maintenance.TaskStatusAssigned, - maintenance.TaskStatusInProgress, - maintenance.TaskStatusCompleted, - maintenance.TaskStatusFailed, - maintenance.TaskStatusCancelled, - } - - // First, search for the task in memory across all statuses - for _, status := range statuses { - tasks := as.maintenanceManager.GetTasks(status, "", 0) // Get all tasks with this status - for _, task := range tasks { - if task.ID == taskID { - return task, nil - } - } - } - - // If not found in memory, try to load from persistent storage - if as.configPersistence != nil { - task, err := as.configPersistence.LoadTaskState(taskID) - if err == nil { - glog.V(2).Infof("Loaded task %s from persistent storage", taskID) - return task, nil - } - glog.V(2).Infof("Task %s not found in persistent storage: %v", taskID, err) - } - - return nil, fmt.Errorf("task %s not found", taskID) -} - -// GetMaintenanceTaskDetail returns comprehensive task details including logs and assignment history -func (as *AdminServer) GetMaintenanceTaskDetail(taskID string) (*maintenance.TaskDetailData, error) { - // Get basic task information - task, err := as.getMaintenanceTask(taskID) - if err != nil { - return nil, err - } - - // Create task detail structure from the loaded task - taskDetail := &maintenance.TaskDetailData{ - Task: task, - AssignmentHistory: task.AssignmentHistory, // Use assignment history from persisted task - ExecutionLogs: []*maintenance.TaskExecutionLog{}, - RelatedTasks: []*maintenance.MaintenanceTask{}, - LastUpdated: time.Now(), - } - - if taskDetail.AssignmentHistory == nil { - taskDetail.AssignmentHistory = []*maintenance.TaskAssignmentRecord{} - } - - // Get worker information if task is assigned - if task.WorkerID != "" { - workers := as.maintenanceManager.GetWorkers() - for _, worker := range workers { - if worker.ID == task.WorkerID { - taskDetail.WorkerInfo = worker - break - } - } - } - - // Get execution logs from worker if task is active/completed and worker is connected - if task.Status == maintenance.TaskStatusInProgress || task.Status == maintenance.TaskStatusCompleted { - if as.workerGrpcServer != nil && task.WorkerID != "" { - workerLogs, err := as.workerGrpcServer.RequestTaskLogs(task.WorkerID, taskID, 100, "") - if err == nil && len(workerLogs) > 0 { - // Convert worker logs to maintenance logs - for _, workerLog := range workerLogs { - maintenanceLog := &maintenance.TaskExecutionLog{ - Timestamp: time.Unix(workerLog.Timestamp, 0), - Level: workerLog.Level, - Message: workerLog.Message, - Source: "worker", - TaskID: taskID, - WorkerID: task.WorkerID, - } - // carry structured fields if present - if len(workerLog.Fields) > 0 { - maintenanceLog.Fields = make(map[string]string, len(workerLog.Fields)) - for k, v := range workerLog.Fields { - maintenanceLog.Fields[k] = v - } - } - // carry optional progress/status - if workerLog.Progress != 0 { - p := float64(workerLog.Progress) - maintenanceLog.Progress = &p - } - if workerLog.Status != "" { - maintenanceLog.Status = workerLog.Status - } - taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, maintenanceLog) - } - } else if err != nil { - // Add a diagnostic log entry when worker logs cannot be retrieved - diagnosticLog := &maintenance.TaskExecutionLog{ - Timestamp: time.Now(), - Level: "WARNING", - Message: fmt.Sprintf("Failed to retrieve worker logs: %v", err), - Source: "admin", - TaskID: taskID, - WorkerID: task.WorkerID, - } - taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, diagnosticLog) - glog.V(1).Infof("Failed to get worker logs for task %s from worker %s: %v", taskID, task.WorkerID, err) - } - } else { - // Add diagnostic information when worker is not available - reason := "worker gRPC server not available" - if task.WorkerID == "" { - reason = "no worker assigned to task" - } - diagnosticLog := &maintenance.TaskExecutionLog{ - Timestamp: time.Now(), - Level: "INFO", - Message: fmt.Sprintf("Worker logs not available: %s", reason), - Source: "admin", - TaskID: taskID, - WorkerID: task.WorkerID, - } - taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, diagnosticLog) - } - } - - // Get related tasks (other tasks on same volume/server) - if task.VolumeID != 0 || task.Server != "" { - allTasks := as.maintenanceManager.GetTasks("", "", 50) // Get recent tasks - for _, relatedTask := range allTasks { - if relatedTask.ID != taskID && - (relatedTask.VolumeID == task.VolumeID || relatedTask.Server == task.Server) { - taskDetail.RelatedTasks = append(taskDetail.RelatedTasks, relatedTask) - } - } - } - - // Save updated task detail to disk - if err := as.configPersistence.SaveTaskDetail(taskID, taskDetail); err != nil { - glog.V(1).Infof("Failed to save task detail for %s: %v", taskID, err) - } - - return taskDetail, nil -} - -// getMaintenanceWorkers returns all maintenance workers -func (as *AdminServer) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) { - if as.maintenanceManager == nil { - return []*MaintenanceWorker{}, nil - } - return as.maintenanceManager.GetWorkers(), nil -} - -// getMaintenanceWorkerDetails returns detailed information about a worker -func (as *AdminServer) getMaintenanceWorkerDetails(workerID string) (*WorkerDetailsData, error) { - if as.maintenanceManager == nil { - return nil, fmt.Errorf("maintenance manager not initialized") - } - - workers := as.maintenanceManager.GetWorkers() - var targetWorker *MaintenanceWorker - for _, worker := range workers { - if worker.ID == workerID { - targetWorker = worker - break - } - } - - if targetWorker == nil { - return nil, fmt.Errorf("worker %s not found", workerID) - } - - // Get current tasks for this worker - currentTasks := as.maintenanceManager.GetTasks(TaskStatusInProgress, "", 0) - var workerCurrentTasks []*MaintenanceTask - for _, task := range currentTasks { - if task.WorkerID == workerID { - workerCurrentTasks = append(workerCurrentTasks, task) - } - } - - // Get recent tasks for this worker - recentTasks := as.maintenanceManager.GetTasks(TaskStatusCompleted, "", 10) - var workerRecentTasks []*MaintenanceTask - for _, task := range recentTasks { - if task.WorkerID == workerID { - workerRecentTasks = append(workerRecentTasks, task) - } - } - - // Calculate performance metrics - var totalDuration time.Duration - var completedTasks, failedTasks int - for _, task := range workerRecentTasks { - if task.Status == TaskStatusCompleted { - completedTasks++ - if task.StartedAt != nil && task.CompletedAt != nil { - totalDuration += task.CompletedAt.Sub(*task.StartedAt) - } - } else if task.Status == TaskStatusFailed { - failedTasks++ - } - } - - var averageTaskTime time.Duration - var successRate float64 - if completedTasks+failedTasks > 0 { - if completedTasks > 0 { - averageTaskTime = totalDuration / time.Duration(completedTasks) - } - successRate = float64(completedTasks) / float64(completedTasks+failedTasks) * 100 - } - - return &WorkerDetailsData{ - Worker: targetWorker, - CurrentTasks: workerCurrentTasks, - RecentTasks: workerRecentTasks, - Performance: &WorkerPerformance{ - TasksCompleted: completedTasks, - TasksFailed: failedTasks, - AverageTaskTime: averageTaskTime, - Uptime: time.Since(targetWorker.LastHeartbeat), // This should be tracked properly - SuccessRate: successRate, - }, - LastUpdated: time.Now(), - }, nil -} - -// GetWorkerLogs fetches logs from a specific worker for a task -func (as *AdminServer) GetWorkerLogs(c *gin.Context) { - workerID := c.Param("id") - taskID := c.Query("taskId") - maxEntriesStr := c.DefaultQuery("maxEntries", "100") - logLevel := c.DefaultQuery("logLevel", "") - - maxEntries := int32(100) - if maxEntriesStr != "" { - if parsed, err := strconv.ParseInt(maxEntriesStr, 10, 32); err == nil { - maxEntries = int32(parsed) - } - } - - if as.workerGrpcServer == nil { - c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Worker gRPC server not available"}) - return - } - - logs, err := as.workerGrpcServer.RequestTaskLogs(workerID, taskID, maxEntries, logLevel) - if err != nil { - c.JSON(http.StatusBadGateway, gin.H{"error": fmt.Sprintf("Failed to get logs from worker: %v", err)}) - return - } - - c.JSON(http.StatusOK, gin.H{"worker_id": workerID, "task_id": taskID, "logs": logs, "count": len(logs)}) -} - -// getMaintenanceStats returns maintenance statistics -func (as *AdminServer) getMaintenanceStats() (*MaintenanceStats, error) { - if as.maintenanceManager == nil { - return &MaintenanceStats{ - TotalTasks: 0, - TasksByStatus: make(map[MaintenanceTaskStatus]int), - TasksByType: make(map[MaintenanceTaskType]int), - ActiveWorkers: 0, - }, nil - } - return as.maintenanceManager.GetStats(), nil -} - -// getMaintenanceConfig returns maintenance configuration -func (as *AdminServer) getMaintenanceConfig() (*maintenance.MaintenanceConfigData, error) { - // Load configuration from persistent storage - config, err := as.configPersistence.LoadMaintenanceConfig() - if err != nil { - // Fallback to default configuration - config = maintenance.DefaultMaintenanceConfig() - } - - // Note: Do NOT apply schema defaults to existing config as it overrides saved values - // Only apply defaults when creating new configs or handling fallback cases - // The schema defaults should only be used in the UI for new installations - - // Get system stats from maintenance manager if available - var systemStats *MaintenanceStats - if as.maintenanceManager != nil { - systemStats = as.maintenanceManager.GetStats() - } else { - // Fallback stats - systemStats = &MaintenanceStats{ - TotalTasks: 0, - TasksByStatus: map[MaintenanceTaskStatus]int{ - TaskStatusPending: 0, - TaskStatusInProgress: 0, - TaskStatusCompleted: 0, - TaskStatusFailed: 0, - }, - TasksByType: make(map[MaintenanceTaskType]int), - ActiveWorkers: 0, - CompletedToday: 0, - FailedToday: 0, - AverageTaskTime: 0, - LastScanTime: time.Now().Add(-time.Hour), - NextScanTime: time.Now().Add(time.Duration(config.ScanIntervalSeconds) * time.Second), - } - } - - configData := &MaintenanceConfigData{ - Config: config, - IsEnabled: config.Enabled, - LastScanTime: systemStats.LastScanTime, - NextScanTime: systemStats.NextScanTime, - SystemStats: systemStats, - MenuItems: maintenance.BuildMaintenanceMenuItems(), - } - - return configData, nil -} - -// updateMaintenanceConfig updates maintenance configuration -func (as *AdminServer) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error { - // Use ConfigField validation instead of standalone validation - if err := maintenance.ValidateMaintenanceConfigWithSchema(config); err != nil { - return fmt.Errorf("configuration validation failed: %v", err) - } - - // Save configuration to persistent storage - if err := as.configPersistence.SaveMaintenanceConfig(config); err != nil { - return fmt.Errorf("failed to save maintenance configuration: %w", err) - } - - // Update maintenance manager if available - if as.maintenanceManager != nil { - if err := as.maintenanceManager.UpdateConfig(config); err != nil { - glog.Errorf("Failed to update maintenance manager config: %v", err) - // Don't return error here, just log it - } - } - - glog.V(1).Infof("Updated maintenance configuration (enabled: %v, scan interval: %ds)", - config.Enabled, config.ScanIntervalSeconds) - return nil -} - -// triggerMaintenanceScan triggers a maintenance scan -func (as *AdminServer) triggerMaintenanceScan() error { - if as.maintenanceManager == nil { - return fmt.Errorf("maintenance manager not initialized") - } - - glog.V(1).Infof("Triggering maintenance scan") - err := as.maintenanceManager.TriggerScan() - if err != nil { - glog.Errorf("Failed to trigger maintenance scan: %v", err) - return err - } - glog.V(1).Infof("Maintenance scan triggered successfully") - return nil -} - -// TriggerTopicRetentionPurgeAPI triggers topic retention purge via HTTP API -func (as *AdminServer) TriggerTopicRetentionPurgeAPI(c *gin.Context) { - err := as.TriggerTopicRetentionPurge() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "Topic retention purge triggered successfully"}) -} - -// GetConfigInfo returns information about the admin configuration -func (as *AdminServer) GetConfigInfo(c *gin.Context) { - configInfo := as.configPersistence.GetConfigInfo() - - // Add additional admin server info - currentMaster := as.masterClient.GetMaster(context.Background()) - configInfo["master_address"] = string(currentMaster) - configInfo["cache_expiration"] = as.cacheExpiration.String() - configInfo["filer_cache_expiration"] = as.filerCacheExpiration.String() - - // Add maintenance system info - if as.maintenanceManager != nil { - configInfo["maintenance_enabled"] = true - configInfo["maintenance_running"] = as.maintenanceManager.IsRunning() - } else { - configInfo["maintenance_enabled"] = false - configInfo["maintenance_running"] = false - } - - c.JSON(http.StatusOK, gin.H{ - "config_info": configInfo, - "title": "Configuration Information", - }) -} - -// GetMaintenanceWorkersData returns workers data for the maintenance workers page -func (as *AdminServer) GetMaintenanceWorkersData() (*MaintenanceWorkersData, error) { - workers, err := as.getMaintenanceWorkers() - if err != nil { - return nil, err - } - - // Create worker details data - workersData := make([]*WorkerDetailsData, 0, len(workers)) - activeWorkers := 0 - busyWorkers := 0 - totalLoad := 0 - - for _, worker := range workers { - details, err := as.getMaintenanceWorkerDetails(worker.ID) - if err != nil { - // Create basic worker details if we can't get full details - details = &WorkerDetailsData{ - Worker: worker, - CurrentTasks: []*MaintenanceTask{}, - RecentTasks: []*MaintenanceTask{}, - Performance: &WorkerPerformance{ - TasksCompleted: 0, - TasksFailed: 0, - AverageTaskTime: 0, - Uptime: 0, - SuccessRate: 0, - }, - LastUpdated: time.Now(), - } - } - workersData = append(workersData, details) - - if worker.Status == "active" { - activeWorkers++ - } else if worker.Status == "busy" { - busyWorkers++ - } - totalLoad += worker.CurrentLoad - } - - return &MaintenanceWorkersData{ - Workers: workersData, - ActiveWorkers: activeWorkers, - BusyWorkers: busyWorkers, - TotalLoad: totalLoad, - LastUpdated: time.Now(), - }, nil -} - -// StartWorkerGrpcServer starts the worker gRPC server -func (s *AdminServer) StartWorkerGrpcServer(grpcPort int) error { - if s.workerGrpcServer != nil { - return fmt.Errorf("worker gRPC server is already running") - } - - s.workerGrpcServer = NewWorkerGrpcServer(s) - return s.workerGrpcServer.StartWithTLS(grpcPort) -} - -// StopWorkerGrpcServer stops the worker gRPC server -func (s *AdminServer) StopWorkerGrpcServer() error { - if s.workerGrpcServer != nil { - err := s.workerGrpcServer.Stop() - s.workerGrpcServer = nil - return err - } - return nil -} - -// GetWorkerGrpcServer returns the worker gRPC server -func (s *AdminServer) GetWorkerGrpcServer() *WorkerGrpcServer { - return s.workerGrpcServer -} - -// Maintenance system integration methods - -// InitMaintenanceManager initializes the maintenance manager -func (s *AdminServer) InitMaintenanceManager(config *maintenance.MaintenanceConfig) { - s.maintenanceManager = maintenance.NewMaintenanceManager(s, config) - - // Set up task persistence if config persistence is available - if s.configPersistence != nil { - queue := s.maintenanceManager.GetQueue() - if queue != nil { - queue.SetPersistence(s.configPersistence) - - // Load tasks from persistence on startup - if err := queue.LoadTasksFromPersistence(); err != nil { - glog.Errorf("Failed to load tasks from persistence: %v", err) - } - } - } - - glog.V(1).Infof("Maintenance manager initialized (enabled: %v)", config.Enabled) -} - -// GetMaintenanceManager returns the maintenance manager -func (s *AdminServer) GetMaintenanceManager() *maintenance.MaintenanceManager { - return s.maintenanceManager -} - -// StartMaintenanceManager starts the maintenance manager -func (s *AdminServer) StartMaintenanceManager() error { - if s.maintenanceManager == nil { - return fmt.Errorf("maintenance manager not initialized") - } - return s.maintenanceManager.Start() -} - -// StopMaintenanceManager stops the maintenance manager -func (s *AdminServer) StopMaintenanceManager() { - if s.maintenanceManager != nil { - s.maintenanceManager.Stop() - } -} - -// TriggerTopicRetentionPurge triggers topic data purging based on retention policies -func (s *AdminServer) TriggerTopicRetentionPurge() error { - if s.topicRetentionPurger == nil { - return fmt.Errorf("topic retention purger not initialized") - } - - glog.V(0).Infof("Triggering topic retention purge") - return s.topicRetentionPurger.PurgeExpiredTopicData() -} - -// GetTopicRetentionPurger returns the topic retention purger -func (s *AdminServer) GetTopicRetentionPurger() *TopicRetentionPurger { - return s.topicRetentionPurger -} - -// CreateTopicWithRetention creates a new topic with optional retention configuration -func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partitionCount int32, retentionEnabled bool, retentionSeconds int64) error { - // Find broker leader to create the topic - brokerLeader, err := s.findBrokerLeader() - if err != nil { - return fmt.Errorf("failed to find broker leader: %w", err) - } - - // Create retention configuration - var retention *mq_pb.TopicRetention - if retentionEnabled { - retention = &mq_pb.TopicRetention{ - Enabled: true, - RetentionSeconds: retentionSeconds, - } - } else { - retention = &mq_pb.TopicRetention{ - Enabled: false, - RetentionSeconds: 0, - } - } - - // Create the topic via broker - err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - _, err := client.ConfigureTopic(ctx, &mq_pb.ConfigureTopicRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: name, - }, - PartitionCount: partitionCount, - Retention: retention, - }) - return err - }) - - if err != nil { - return fmt.Errorf("failed to create topic: %w", err) - } - - glog.V(0).Infof("Created topic %s.%s with %d partitions (retention: enabled=%v, seconds=%d)", - namespace, name, partitionCount, retentionEnabled, retentionSeconds) - return nil -} - -// UpdateTopicRetention updates the retention configuration for an existing topic -func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool, retentionSeconds int64) error { - // Get broker information from master - var brokerAddress string - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.BrokerType, - }) - if err != nil { - return err - } - - // Find the first available broker - for _, node := range resp.ClusterNodes { - brokerAddress = node.Address - break - } - - return nil - }) - - if err != nil { - return fmt.Errorf("failed to get broker nodes from master: %w", err) - } - - if brokerAddress == "" { - return fmt.Errorf("no active brokers found") - } - - // Create gRPC connection - conn, err := grpc.NewClient(brokerAddress, s.grpcDialOption) - if err != nil { - return fmt.Errorf("failed to connect to broker: %w", err) - } - defer conn.Close() - - client := mq_pb.NewSeaweedMessagingClient(conn) - - // First, get the current topic configuration to preserve existing settings - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - currentConfig, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: name, - }, - }) - if err != nil { - return fmt.Errorf("failed to get current topic configuration: %w", err) - } - - // Create the topic configuration request, preserving all existing settings - configRequest := &mq_pb.ConfigureTopicRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: name, - }, - // Preserve existing partition count - this is critical! - PartitionCount: currentConfig.PartitionCount, - // Preserve existing schema if it exists - MessageRecordType: currentConfig.MessageRecordType, - KeyColumns: currentConfig.KeyColumns, - } - - // Update only the retention configuration - if enabled { - configRequest.Retention = &mq_pb.TopicRetention{ - RetentionSeconds: retentionSeconds, - Enabled: true, - } - } else { - // Set retention to disabled - configRequest.Retention = &mq_pb.TopicRetention{ - RetentionSeconds: 0, - Enabled: false, - } - } - - // Send the configuration request with preserved settings - _, err = client.ConfigureTopic(ctx, configRequest) - if err != nil { - return fmt.Errorf("failed to update topic retention: %w", err) - } - - glog.V(0).Infof("Updated topic %s.%s retention (enabled: %v, seconds: %d) while preserving %d partitions", - namespace, name, enabled, retentionSeconds, currentConfig.PartitionCount) - return nil -} - -// Shutdown gracefully shuts down the admin server -func (s *AdminServer) Shutdown() { - glog.V(1).Infof("Shutting down admin server...") - - // Stop maintenance manager - s.StopMaintenanceManager() - - // Stop worker gRPC server - if err := s.StopWorkerGrpcServer(); err != nil { - glog.Errorf("Failed to stop worker gRPC server: %v", err) - } - - glog.V(1).Infof("Admin server shutdown complete") -} - -// Function to extract Object Lock information from bucket entry using shared utilities -func extractObjectLockInfoFromEntry(entry *filer_pb.Entry) (bool, string, int32) { - // Try to load Object Lock configuration using shared utility - if config, found := s3api.LoadObjectLockConfigurationFromExtended(entry); found { - return s3api.ExtractObjectLockInfoFromConfig(config) - } - - return false, "", 0 -} - -// Function to extract versioning information from bucket entry using shared utilities -func extractVersioningFromEntry(entry *filer_pb.Entry) bool { - enabled, _ := s3api.LoadVersioningFromExtended(entry) - return enabled -} - -// GetConfigPersistence returns the config persistence manager -func (as *AdminServer) GetConfigPersistence() *ConfigPersistence { - return as.configPersistence -} - -// convertJSONToMaintenanceConfig converts JSON map to protobuf MaintenanceConfig -func convertJSONToMaintenanceConfig(jsonConfig map[string]interface{}) (*maintenance.MaintenanceConfig, error) { - config := &maintenance.MaintenanceConfig{} - - // Helper function to get int32 from interface{} - getInt32 := func(key string) (int32, error) { - if val, ok := jsonConfig[key]; ok { - switch v := val.(type) { - case int: - return int32(v), nil - case int32: - return v, nil - case int64: - return int32(v), nil - case float64: - return int32(v), nil - default: - return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v) - } - } - return 0, nil - } - - // Helper function to get bool from interface{} - getBool := func(key string) bool { - if val, ok := jsonConfig[key]; ok { - if b, ok := val.(bool); ok { - return b - } - } - return false - } - - var err error - - // Convert basic fields - config.Enabled = getBool("enabled") - - if config.ScanIntervalSeconds, err = getInt32("scan_interval_seconds"); err != nil { - return nil, err - } - if config.WorkerTimeoutSeconds, err = getInt32("worker_timeout_seconds"); err != nil { - return nil, err - } - if config.TaskTimeoutSeconds, err = getInt32("task_timeout_seconds"); err != nil { - return nil, err - } - if config.RetryDelaySeconds, err = getInt32("retry_delay_seconds"); err != nil { - return nil, err - } - if config.MaxRetries, err = getInt32("max_retries"); err != nil { - return nil, err - } - if config.CleanupIntervalSeconds, err = getInt32("cleanup_interval_seconds"); err != nil { - return nil, err - } - if config.TaskRetentionSeconds, err = getInt32("task_retention_seconds"); err != nil { - return nil, err - } - - // Convert policy if present - if policyData, ok := jsonConfig["policy"]; ok { - if policyMap, ok := policyData.(map[string]interface{}); ok { - policy := &maintenance.MaintenancePolicy{} - - if globalMaxConcurrent, err := getInt32FromMap(policyMap, "global_max_concurrent"); err != nil { - return nil, err - } else { - policy.GlobalMaxConcurrent = globalMaxConcurrent - } - - if defaultRepeatIntervalSeconds, err := getInt32FromMap(policyMap, "default_repeat_interval_seconds"); err != nil { - return nil, err - } else { - policy.DefaultRepeatIntervalSeconds = defaultRepeatIntervalSeconds - } - - if defaultCheckIntervalSeconds, err := getInt32FromMap(policyMap, "default_check_interval_seconds"); err != nil { - return nil, err - } else { - policy.DefaultCheckIntervalSeconds = defaultCheckIntervalSeconds - } - - // Convert task policies if present - if taskPoliciesData, ok := policyMap["task_policies"]; ok { - if taskPoliciesMap, ok := taskPoliciesData.(map[string]interface{}); ok { - policy.TaskPolicies = make(map[string]*maintenance.TaskPolicy) - - for taskType, taskPolicyData := range taskPoliciesMap { - if taskPolicyMap, ok := taskPolicyData.(map[string]interface{}); ok { - taskPolicy := &maintenance.TaskPolicy{} - - taskPolicy.Enabled = getBoolFromMap(taskPolicyMap, "enabled") - - if maxConcurrent, err := getInt32FromMap(taskPolicyMap, "max_concurrent"); err != nil { - return nil, err - } else { - taskPolicy.MaxConcurrent = maxConcurrent - } - - if repeatIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "repeat_interval_seconds"); err != nil { - return nil, err - } else { - taskPolicy.RepeatIntervalSeconds = repeatIntervalSeconds - } - - if checkIntervalSeconds, err := getInt32FromMap(taskPolicyMap, "check_interval_seconds"); err != nil { - return nil, err - } else { - taskPolicy.CheckIntervalSeconds = checkIntervalSeconds - } - - policy.TaskPolicies[taskType] = taskPolicy - } - } - } - } - - config.Policy = policy - } - } - - return config, nil -} - -// Helper functions for map conversion -func getInt32FromMap(m map[string]interface{}, key string) (int32, error) { - if val, ok := m[key]; ok { - switch v := val.(type) { - case int: - return int32(v), nil - case int32: - return v, nil - case int64: - return int32(v), nil - case float64: - return int32(v), nil - default: - return 0, fmt.Errorf("invalid type for %s: expected number, got %T", key, v) - } - } - return 0, nil -} - -func getBoolFromMap(m map[string]interface{}, key string) bool { - if val, ok := m[key]; ok { - if b, ok := val.(bool); ok { - return b - } - } - return false -} diff --git a/weed/admin/dash/auth_middleware.go b/weed/admin/dash/auth_middleware.go deleted file mode 100644 index 986a30290..000000000 --- a/weed/admin/dash/auth_middleware.go +++ /dev/null @@ -1,53 +0,0 @@ -package dash - -import ( - "net/http" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -// ShowLogin displays the login page -func (s *AdminServer) ShowLogin(c *gin.Context) { - // If authentication is not required, redirect to admin - session := sessions.Default(c) - if session.Get("authenticated") == true { - c.Redirect(http.StatusSeeOther, "/admin") - return - } - - // For now, return a simple login form as JSON - c.HTML(http.StatusOK, "login.html", gin.H{ - "title": "SeaweedFS Admin Login", - "error": c.Query("error"), - }) -} - -// HandleLogin handles login form submission -func (s *AdminServer) HandleLogin(username, password string) gin.HandlerFunc { - return func(c *gin.Context) { - loginUsername := c.PostForm("username") - loginPassword := c.PostForm("password") - - if loginUsername == username && loginPassword == password { - session := sessions.Default(c) - session.Set("authenticated", true) - session.Set("username", loginUsername) - session.Save() - - c.Redirect(http.StatusSeeOther, "/admin") - return - } - - // Authentication failed - c.Redirect(http.StatusSeeOther, "/login?error=Invalid credentials") - } -} - -// HandleLogout handles user logout -func (s *AdminServer) HandleLogout(c *gin.Context) { - session := sessions.Default(c) - session.Clear() - session.Save() - c.Redirect(http.StatusSeeOther, "/login") -} diff --git a/weed/admin/dash/bucket_management.go b/weed/admin/dash/bucket_management.go deleted file mode 100644 index 5942d5695..000000000 --- a/weed/admin/dash/bucket_management.go +++ /dev/null @@ -1,395 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api" -) - -// S3 Bucket management data structures for templates -type S3BucketsData struct { - Username string `json:"username"` - Buckets []S3Bucket `json:"buckets"` - TotalBuckets int `json:"total_buckets"` - TotalSize int64 `json:"total_size"` - LastUpdated time.Time `json:"last_updated"` -} - -type CreateBucketRequest struct { - Name string `json:"name" binding:"required"` - Region string `json:"region"` - QuotaSize int64 `json:"quota_size"` // Quota size in bytes - QuotaUnit string `json:"quota_unit"` // Unit: MB, GB, TB - QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled - VersioningEnabled bool `json:"versioning_enabled"` // Whether versioning is enabled - ObjectLockEnabled bool `json:"object_lock_enabled"` // Whether object lock is enabled - ObjectLockMode string `json:"object_lock_mode"` // Object lock mode: "GOVERNANCE" or "COMPLIANCE" - SetDefaultRetention bool `json:"set_default_retention"` // Whether to set default retention - ObjectLockDuration int32 `json:"object_lock_duration"` // Default retention duration in days -} - -// S3 Bucket Management Handlers - -// ShowS3Buckets displays the Object Store buckets management page -func (s *AdminServer) ShowS3Buckets(c *gin.Context) { - username := c.GetString("username") - - buckets, err := s.GetS3Buckets() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get Object Store buckets: " + err.Error()}) - return - } - - // Calculate totals - var totalSize int64 - for _, bucket := range buckets { - totalSize += bucket.Size - } - - data := S3BucketsData{ - Username: username, - Buckets: buckets, - TotalBuckets: len(buckets), - TotalSize: totalSize, - LastUpdated: time.Now(), - } - - c.JSON(http.StatusOK, data) -} - -// ShowBucketDetails displays detailed information about a specific bucket -func (s *AdminServer) ShowBucketDetails(c *gin.Context) { - bucketName := c.Param("bucket") - if bucketName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name is required"}) - return - } - - details, err := s.GetBucketDetails(bucketName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get bucket details: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, details) -} - -// CreateBucket creates a new S3 bucket -func (s *AdminServer) CreateBucket(c *gin.Context) { - var req CreateBucketRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Validate bucket name (basic validation) - if len(req.Name) < 3 || len(req.Name) > 63 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name must be between 3 and 63 characters"}) - return - } - - // Validate object lock settings - if req.ObjectLockEnabled { - // Object lock requires versioning to be enabled - req.VersioningEnabled = true - - // Validate object lock mode - if req.ObjectLockMode != "GOVERNANCE" && req.ObjectLockMode != "COMPLIANCE" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Object lock mode must be either GOVERNANCE or COMPLIANCE"}) - return - } - - // Validate retention duration if default retention is enabled - if req.SetDefaultRetention { - if req.ObjectLockDuration <= 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Object lock duration must be greater than 0 days when default retention is enabled"}) - return - } - } - } - - // Convert quota to bytes - quotaBytes := convertQuotaToBytes(req.QuotaSize, req.QuotaUnit) - - err := s.CreateS3BucketWithObjectLock(req.Name, quotaBytes, req.QuotaEnabled, req.VersioningEnabled, req.ObjectLockEnabled, req.ObjectLockMode, req.SetDefaultRetention, req.ObjectLockDuration) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create bucket: " + err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{ - "message": "Bucket created successfully", - "bucket": req.Name, - "quota_size": req.QuotaSize, - "quota_unit": req.QuotaUnit, - "quota_enabled": req.QuotaEnabled, - "versioning_enabled": req.VersioningEnabled, - "object_lock_enabled": req.ObjectLockEnabled, - "object_lock_mode": req.ObjectLockMode, - "object_lock_duration": req.ObjectLockDuration, - }) -} - -// UpdateBucketQuota updates the quota settings for a bucket -func (s *AdminServer) UpdateBucketQuota(c *gin.Context) { - bucketName := c.Param("bucket") - if bucketName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name is required"}) - return - } - - var req struct { - QuotaSize int64 `json:"quota_size"` - QuotaUnit string `json:"quota_unit"` - QuotaEnabled bool `json:"quota_enabled"` - } - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Convert quota to bytes - quotaBytes := convertQuotaToBytes(req.QuotaSize, req.QuotaUnit) - - err := s.SetBucketQuota(bucketName, quotaBytes, req.QuotaEnabled) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update bucket quota: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Bucket quota updated successfully", - "bucket": bucketName, - "quota_size": req.QuotaSize, - "quota_unit": req.QuotaUnit, - "quota_enabled": req.QuotaEnabled, - }) -} - -// DeleteBucket deletes an S3 bucket -func (s *AdminServer) DeleteBucket(c *gin.Context) { - bucketName := c.Param("bucket") - if bucketName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name is required"}) - return - } - - err := s.DeleteS3Bucket(bucketName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete bucket: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Bucket deleted successfully", - "bucket": bucketName, - }) -} - -// ListBucketsAPI returns the list of buckets as JSON -func (s *AdminServer) ListBucketsAPI(c *gin.Context) { - buckets, err := s.GetS3Buckets() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get buckets: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "buckets": buckets, - "total": len(buckets), - }) -} - -// Helper function to convert quota size and unit to bytes -func convertQuotaToBytes(size int64, unit string) int64 { - if size <= 0 { - return 0 - } - - switch strings.ToUpper(unit) { - case "TB": - return size * 1024 * 1024 * 1024 * 1024 - case "GB": - return size * 1024 * 1024 * 1024 - case "MB": - return size * 1024 * 1024 - default: - // Default to MB if unit is not recognized - return size * 1024 * 1024 - } -} - -// Helper function to convert bytes to appropriate unit and size -func convertBytesToQuota(bytes int64) (int64, string) { - if bytes == 0 { - return 0, "MB" - } - - // Convert to TB if >= 1TB - if bytes >= 1024*1024*1024*1024 && bytes%(1024*1024*1024*1024) == 0 { - return bytes / (1024 * 1024 * 1024 * 1024), "TB" - } - - // Convert to GB if >= 1GB - if bytes >= 1024*1024*1024 && bytes%(1024*1024*1024) == 0 { - return bytes / (1024 * 1024 * 1024), "GB" - } - - // Convert to MB (default) - return bytes / (1024 * 1024), "MB" -} - -// SetBucketQuota sets the quota for a bucket -func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error { - return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // Get the current bucket entry - lookupResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: "/buckets", - Name: bucketName, - }) - if err != nil { - return fmt.Errorf("bucket not found: %w", err) - } - - bucketEntry := lookupResp.Entry - - // Determine quota value (negative if disabled) - var quota int64 - if quotaEnabled && quotaBytes > 0 { - quota = quotaBytes - } else if !quotaEnabled && quotaBytes > 0 { - quota = -quotaBytes - } else { - quota = 0 - } - - // Update the quota - bucketEntry.Quota = quota - - // Update the entry - _, err = client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{ - Directory: "/buckets", - Entry: bucketEntry, - }) - if err != nil { - return fmt.Errorf("failed to update bucket quota: %w", err) - } - - return nil - }) -} - -// CreateS3BucketWithQuota creates a new S3 bucket with quota settings -func (s *AdminServer) CreateS3BucketWithQuota(bucketName string, quotaBytes int64, quotaEnabled bool) error { - return s.CreateS3BucketWithObjectLock(bucketName, quotaBytes, quotaEnabled, false, false, "", false, 0) -} - -// CreateS3BucketWithObjectLock creates a new S3 bucket with quota, versioning, and object lock settings -func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes int64, quotaEnabled, versioningEnabled, objectLockEnabled bool, objectLockMode string, setDefaultRetention bool, objectLockDuration int32) error { - return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // First ensure /buckets directory exists - _, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ - Directory: "/", - Entry: &filer_pb.Entry{ - Name: "buckets", - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - FileMode: uint32(0755 | os.ModeDir), // Directory mode - Uid: uint32(1000), - Gid: uint32(1000), - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - TtlSec: 0, - }, - }, - }) - // Ignore error if directory already exists - if err != nil && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "existing entry") { - return fmt.Errorf("failed to create /buckets directory: %w", err) - } - - // Check if bucket already exists - _, err = client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: "/buckets", - Name: bucketName, - }) - if err == nil { - return fmt.Errorf("bucket %s already exists", bucketName) - } - - // Determine quota value (negative if disabled) - var quota int64 - if quotaEnabled && quotaBytes > 0 { - quota = quotaBytes - } else if !quotaEnabled && quotaBytes > 0 { - quota = -quotaBytes - } else { - quota = 0 - } - - // Prepare bucket attributes with versioning and object lock metadata - attributes := &filer_pb.FuseAttributes{ - FileMode: uint32(0755 | os.ModeDir), // Directory mode - Uid: filer_pb.OS_UID, - Gid: filer_pb.OS_GID, - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - TtlSec: 0, - } - - // Create extended attributes map for versioning - extended := make(map[string][]byte) - - // Create bucket entry - bucketEntry := &filer_pb.Entry{ - Name: bucketName, - IsDirectory: true, - Attributes: attributes, - Extended: extended, - Quota: quota, - } - - // Handle versioning using shared utilities - if err := s3api.StoreVersioningInExtended(bucketEntry, versioningEnabled); err != nil { - return fmt.Errorf("failed to store versioning configuration: %w", err) - } - - // Handle Object Lock configuration using shared utilities - if objectLockEnabled { - var duration int32 = 0 - if setDefaultRetention { - // Validate Object Lock parameters only when setting default retention - if err := s3api.ValidateObjectLockParameters(objectLockEnabled, objectLockMode, objectLockDuration); err != nil { - return fmt.Errorf("invalid Object Lock parameters: %w", err) - } - duration = objectLockDuration - } - - // Create Object Lock configuration using shared utility - objectLockConfig := s3api.CreateObjectLockConfigurationFromParams(objectLockEnabled, objectLockMode, duration) - - // Store Object Lock configuration in extended attributes using shared utility - if err := s3api.StoreObjectLockConfigurationInExtended(bucketEntry, objectLockConfig); err != nil { - return fmt.Errorf("failed to store Object Lock configuration: %w", err) - } - } - - // Create bucket directory under /buckets - _, err = client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ - Directory: "/buckets", - Entry: bucketEntry, - }) - if err != nil { - return fmt.Errorf("failed to create bucket directory: %w", err) - } - - return nil - }) -} diff --git a/weed/admin/dash/client_management.go b/weed/admin/dash/client_management.go deleted file mode 100644 index 974d996fc..000000000 --- a/weed/admin/dash/client_management.go +++ /dev/null @@ -1,93 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" -) - -// WithMasterClient executes a function with a master client connection -func (s *AdminServer) WithMasterClient(f func(client master_pb.SeaweedClient) error) error { - return s.masterClient.WithClient(false, f) -} - -// WithFilerClient executes a function with a filer client connection -func (s *AdminServer) WithFilerClient(f func(client filer_pb.SeaweedFilerClient) error) error { - filerAddr := s.GetFilerAddress() - if filerAddr == "" { - return fmt.Errorf("no filer available") - } - - return pb.WithGrpcFilerClient(false, 0, pb.ServerAddress(filerAddr), s.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - return f(client) - }) -} - -// WithVolumeServerClient executes a function with a volume server client connection -func (s *AdminServer) WithVolumeServerClient(address pb.ServerAddress, f func(client volume_server_pb.VolumeServerClient) error) error { - return operation.WithVolumeServerClient(false, address, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - return f(client) - }) -} - -// GetFilerAddress returns a filer address, discovering from masters if needed -func (s *AdminServer) GetFilerAddress() string { - // Discover filers from masters - filers := s.getDiscoveredFilers() - if len(filers) > 0 { - return filers[0] // Return the first available filer - } - - return "" -} - -// getDiscoveredFilers returns cached filers or discovers them from masters -func (s *AdminServer) getDiscoveredFilers() []string { - // Check if cache is still valid - if time.Since(s.lastFilerUpdate) < s.filerCacheExpiration && len(s.cachedFilers) > 0 { - return s.cachedFilers - } - - // Discover filers from masters - var filers []string - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - return err - } - - for _, node := range resp.ClusterNodes { - filers = append(filers, node.Address) - } - - return nil - }) - - if err != nil { - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Warningf("Failed to discover filers from master %s: %v", currentMaster, err) - // Return cached filers even if expired, better than nothing - return s.cachedFilers - } - - // Update cache - s.cachedFilers = filers - s.lastFilerUpdate = time.Now() - - return filers -} - -// GetAllFilers returns all discovered filers -func (s *AdminServer) GetAllFilers() []string { - return s.getDiscoveredFilers() -} diff --git a/weed/admin/dash/cluster_topology.go b/weed/admin/dash/cluster_topology.go deleted file mode 100644 index 8c25cc2ac..000000000 --- a/weed/admin/dash/cluster_topology.go +++ /dev/null @@ -1,127 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// GetClusterTopology returns the current cluster topology with caching -func (s *AdminServer) GetClusterTopology() (*ClusterTopology, error) { - now := time.Now() - if s.cachedTopology != nil && now.Sub(s.lastCacheUpdate) < s.cacheExpiration { - return s.cachedTopology, nil - } - - topology := &ClusterTopology{ - UpdatedAt: now, - } - - // Use gRPC only - err := s.getTopologyViaGRPC(topology) - if err != nil { - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Errorf("Failed to connect to master server %s: %v", currentMaster, err) - return nil, fmt.Errorf("gRPC topology request failed: %w", err) - } - - // Cache the result - s.cachedTopology = topology - s.lastCacheUpdate = now - - return topology, nil -} - -// getTopologyViaGRPC gets topology using gRPC (original method) -func (s *AdminServer) getTopologyViaGRPC(topology *ClusterTopology) error { - // Get cluster status from master - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - currentMaster := s.masterClient.GetMaster(context.Background()) - glog.Errorf("Failed to get volume list from master %s: %v", currentMaster, err) - return err - } - - if resp.TopologyInfo != nil { - // Process gRPC response - for _, dc := range resp.TopologyInfo.DataCenterInfos { - dataCenter := DataCenter{ - ID: dc.Id, - Racks: []Rack{}, - } - - for _, rack := range dc.RackInfos { - rackObj := Rack{ - ID: rack.Id, - Nodes: []VolumeServer{}, - } - - for _, node := range rack.DataNodeInfos { - // Calculate totals from disk infos - var totalVolumes int64 - var totalMaxVolumes int64 - var totalSize int64 - var totalFiles int64 - - for _, diskInfo := range node.DiskInfos { - totalVolumes += diskInfo.VolumeCount - totalMaxVolumes += diskInfo.MaxVolumeCount - - // Sum up individual volume information - for _, volInfo := range diskInfo.VolumeInfos { - totalSize += int64(volInfo.Size) - totalFiles += int64(volInfo.FileCount) - } - - // Sum up EC shard sizes - for _, ecShardInfo := range diskInfo.EcShardInfos { - for _, shardSize := range ecShardInfo.ShardSizes { - totalSize += shardSize - } - } - } - - vs := VolumeServer{ - ID: node.Id, - Address: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - PublicURL: node.Id, - Volumes: int(totalVolumes), - MaxVolumes: int(totalMaxVolumes), - DiskUsage: totalSize, - DiskCapacity: totalMaxVolumes * int64(resp.VolumeSizeLimitMb) * 1024 * 1024, - LastHeartbeat: time.Now(), - } - - rackObj.Nodes = append(rackObj.Nodes, vs) - topology.VolumeServers = append(topology.VolumeServers, vs) - topology.TotalVolumes += vs.Volumes - topology.TotalFiles += totalFiles - topology.TotalSize += totalSize - } - - dataCenter.Racks = append(dataCenter.Racks, rackObj) - } - - topology.DataCenters = append(topology.DataCenters, dataCenter) - } - } - - return nil - }) - - return err -} - -// InvalidateCache forces a refresh of cached data -func (s *AdminServer) InvalidateCache() { - s.lastCacheUpdate = time.Time{} - s.cachedTopology = nil - s.lastFilerUpdate = time.Time{} - s.cachedFilers = nil -} diff --git a/weed/admin/dash/collection_management.go b/weed/admin/dash/collection_management.go deleted file mode 100644 index 03c1e452b..000000000 --- a/weed/admin/dash/collection_management.go +++ /dev/null @@ -1,385 +0,0 @@ -package dash - -import ( - "context" - "sort" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// GetClusterCollections retrieves cluster collections data -func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) { - var collections []CollectionInfo - var totalVolumes int - var totalEcVolumes int - var totalFiles int64 - var totalSize int64 - collectionMap := make(map[string]*CollectionInfo) - - // Get actual collection information from volume data - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - // Process regular volumes - for _, volInfo := range diskInfo.VolumeInfos { - // Extract collection name from volume info - collectionName := volInfo.Collection - if collectionName == "" { - collectionName = "default" // Default collection for volumes without explicit collection - } - - // Get disk type from volume info, default to hdd if empty - diskType := volInfo.DiskType - if diskType == "" { - diskType = "hdd" - } - - // Get or create collection info - if collection, exists := collectionMap[collectionName]; exists { - collection.VolumeCount++ - collection.FileCount += int64(volInfo.FileCount) - collection.TotalSize += int64(volInfo.Size) - - // Update data center if this collection spans multiple DCs - if collection.DataCenter != dc.Id && collection.DataCenter != "multi" { - collection.DataCenter = "multi" - } - - // Add disk type if not already present - diskTypeExists := false - for _, existingDiskType := range collection.DiskTypes { - if existingDiskType == diskType { - diskTypeExists = true - break - } - } - if !diskTypeExists { - collection.DiskTypes = append(collection.DiskTypes, diskType) - } - - totalVolumes++ - totalFiles += int64(volInfo.FileCount) - totalSize += int64(volInfo.Size) - } else { - newCollection := CollectionInfo{ - Name: collectionName, - DataCenter: dc.Id, - VolumeCount: 1, - EcVolumeCount: 0, - FileCount: int64(volInfo.FileCount), - TotalSize: int64(volInfo.Size), - DiskTypes: []string{diskType}, - } - collectionMap[collectionName] = &newCollection - totalVolumes++ - totalFiles += int64(volInfo.FileCount) - totalSize += int64(volInfo.Size) - } - } - - // Process EC volumes - ecVolumeMap := make(map[uint32]bool) // Track unique EC volumes to avoid double counting - for _, ecShardInfo := range diskInfo.EcShardInfos { - // Extract collection name from EC shard info - collectionName := ecShardInfo.Collection - if collectionName == "" { - collectionName = "default" // Default collection for EC volumes without explicit collection - } - - // Only count each EC volume once (not per shard) - if !ecVolumeMap[ecShardInfo.Id] { - ecVolumeMap[ecShardInfo.Id] = true - - // Get disk type from disk info, default to hdd if empty - diskType := diskInfo.Type - if diskType == "" { - diskType = "hdd" - } - - // Get or create collection info - if collection, exists := collectionMap[collectionName]; exists { - collection.EcVolumeCount++ - - // Update data center if this collection spans multiple DCs - if collection.DataCenter != dc.Id && collection.DataCenter != "multi" { - collection.DataCenter = "multi" - } - - // Add disk type if not already present - diskTypeExists := false - for _, existingDiskType := range collection.DiskTypes { - if existingDiskType == diskType { - diskTypeExists = true - break - } - } - if !diskTypeExists { - collection.DiskTypes = append(collection.DiskTypes, diskType) - } - - totalEcVolumes++ - } else { - newCollection := CollectionInfo{ - Name: collectionName, - DataCenter: dc.Id, - VolumeCount: 0, - EcVolumeCount: 1, - FileCount: 0, - TotalSize: 0, - DiskTypes: []string{diskType}, - } - collectionMap[collectionName] = &newCollection - totalEcVolumes++ - } - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Convert map to slice - for _, collection := range collectionMap { - collections = append(collections, *collection) - } - - // Sort collections alphabetically by name - sort.Slice(collections, func(i, j int) bool { - return collections[i].Name < collections[j].Name - }) - - // If no collections found, show a message indicating no collections exist - if len(collections) == 0 { - // Return empty collections data instead of creating fake ones - return &ClusterCollectionsData{ - Collections: []CollectionInfo{}, - TotalCollections: 0, - TotalVolumes: 0, - TotalEcVolumes: 0, - TotalFiles: 0, - TotalSize: 0, - LastUpdated: time.Now(), - }, nil - } - - return &ClusterCollectionsData{ - Collections: collections, - TotalCollections: len(collections), - TotalVolumes: totalVolumes, - TotalEcVolumes: totalEcVolumes, - TotalFiles: totalFiles, - TotalSize: totalSize, - LastUpdated: time.Now(), - }, nil -} - -// GetCollectionDetails retrieves detailed information for a specific collection including volumes and EC volumes -func (s *AdminServer) GetCollectionDetails(collectionName string, page int, pageSize int, sortBy string, sortOrder string) (*CollectionDetailsData, error) { - // Set defaults - if page < 1 { - page = 1 - } - if pageSize < 1 || pageSize > 1000 { - pageSize = 25 - } - if sortBy == "" { - sortBy = "volume_id" - } - if sortOrder == "" { - sortOrder = "asc" - } - - var regularVolumes []VolumeWithTopology - var ecVolumes []EcVolumeWithShards - var totalFiles int64 - var totalSize int64 - dataCenters := make(map[string]bool) - diskTypes := make(map[string]bool) - - // Get regular volumes for this collection - regularVolumeData, err := s.GetClusterVolumes(1, 10000, "volume_id", "asc", collectionName) // Get all volumes - if err != nil { - return nil, err - } - - regularVolumes = regularVolumeData.Volumes - totalSize = regularVolumeData.TotalSize - - // Calculate total files from regular volumes - for _, vol := range regularVolumes { - totalFiles += int64(vol.FileCount) - } - - // Collect data centers and disk types from regular volumes - for _, vol := range regularVolumes { - dataCenters[vol.DataCenter] = true - diskTypes[vol.DiskType] = true - } - - // Get EC volumes for this collection - ecVolumeData, err := s.GetClusterEcVolumes(1, 10000, "volume_id", "asc", collectionName) // Get all EC volumes - if err != nil { - return nil, err - } - - ecVolumes = ecVolumeData.EcVolumes - - // Collect data centers from EC volumes - for _, ecVol := range ecVolumes { - for _, dc := range ecVol.DataCenters { - dataCenters[dc] = true - } - } - - // Combine all volumes for sorting and pagination - type VolumeForSorting struct { - Type string // "regular" or "ec" - RegularVolume *VolumeWithTopology - EcVolume *EcVolumeWithShards - } - - var allVolumes []VolumeForSorting - for i := range regularVolumes { - allVolumes = append(allVolumes, VolumeForSorting{ - Type: "regular", - RegularVolume: ®ularVolumes[i], - }) - } - for i := range ecVolumes { - allVolumes = append(allVolumes, VolumeForSorting{ - Type: "ec", - EcVolume: &ecVolumes[i], - }) - } - - // Sort all volumes - sort.Slice(allVolumes, func(i, j int) bool { - var less bool - switch sortBy { - case "volume_id": - var idI, idJ uint32 - if allVolumes[i].Type == "regular" { - idI = allVolumes[i].RegularVolume.Id - } else { - idI = allVolumes[i].EcVolume.VolumeID - } - if allVolumes[j].Type == "regular" { - idJ = allVolumes[j].RegularVolume.Id - } else { - idJ = allVolumes[j].EcVolume.VolumeID - } - less = idI < idJ - case "type": - // Sort by type first (regular before ec), then by volume ID - if allVolumes[i].Type == allVolumes[j].Type { - var idI, idJ uint32 - if allVolumes[i].Type == "regular" { - idI = allVolumes[i].RegularVolume.Id - } else { - idI = allVolumes[i].EcVolume.VolumeID - } - if allVolumes[j].Type == "regular" { - idJ = allVolumes[j].RegularVolume.Id - } else { - idJ = allVolumes[j].EcVolume.VolumeID - } - less = idI < idJ - } else { - less = allVolumes[i].Type < allVolumes[j].Type // "ec" < "regular" - } - default: - // Default to volume ID sort - var idI, idJ uint32 - if allVolumes[i].Type == "regular" { - idI = allVolumes[i].RegularVolume.Id - } else { - idI = allVolumes[i].EcVolume.VolumeID - } - if allVolumes[j].Type == "regular" { - idJ = allVolumes[j].RegularVolume.Id - } else { - idJ = allVolumes[j].EcVolume.VolumeID - } - less = idI < idJ - } - - if sortOrder == "desc" { - return !less - } - return less - }) - - // Apply pagination - totalVolumesAndEc := len(allVolumes) - totalPages := (totalVolumesAndEc + pageSize - 1) / pageSize - startIndex := (page - 1) * pageSize - endIndex := startIndex + pageSize - if endIndex > totalVolumesAndEc { - endIndex = totalVolumesAndEc - } - - if startIndex >= totalVolumesAndEc { - startIndex = 0 - endIndex = 0 - } - - // Extract paginated results - var paginatedRegularVolumes []VolumeWithTopology - var paginatedEcVolumes []EcVolumeWithShards - - for i := startIndex; i < endIndex; i++ { - if allVolumes[i].Type == "regular" { - paginatedRegularVolumes = append(paginatedRegularVolumes, *allVolumes[i].RegularVolume) - } else { - paginatedEcVolumes = append(paginatedEcVolumes, *allVolumes[i].EcVolume) - } - } - - // Convert maps to slices - var dcList []string - for dc := range dataCenters { - dcList = append(dcList, dc) - } - sort.Strings(dcList) - - var diskTypeList []string - for diskType := range diskTypes { - diskTypeList = append(diskTypeList, diskType) - } - sort.Strings(diskTypeList) - - return &CollectionDetailsData{ - CollectionName: collectionName, - RegularVolumes: paginatedRegularVolumes, - EcVolumes: paginatedEcVolumes, - TotalVolumes: len(regularVolumes), - TotalEcVolumes: len(ecVolumes), - TotalFiles: totalFiles, - TotalSize: totalSize, - DataCenters: dcList, - DiskTypes: diskTypeList, - LastUpdated: time.Now(), - Page: page, - PageSize: pageSize, - TotalPages: totalPages, - SortBy: sortBy, - SortOrder: sortOrder, - }, nil -} diff --git a/weed/admin/dash/config_persistence.go b/weed/admin/dash/config_persistence.go deleted file mode 100644 index 1fe1a9b42..000000000 --- a/weed/admin/dash/config_persistence.go +++ /dev/null @@ -1,1235 +0,0 @@ -package dash - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" -) - -const ( - // Configuration subdirectory - ConfigSubdir = "conf" - - // Configuration file names (protobuf binary) - MaintenanceConfigFile = "maintenance.pb" - VacuumTaskConfigFile = "task_vacuum.pb" - ECTaskConfigFile = "task_erasure_coding.pb" - BalanceTaskConfigFile = "task_balance.pb" - ReplicationTaskConfigFile = "task_replication.pb" - - // JSON reference files - MaintenanceConfigJSONFile = "maintenance.json" - VacuumTaskConfigJSONFile = "task_vacuum.json" - ECTaskConfigJSONFile = "task_erasure_coding.json" - BalanceTaskConfigJSONFile = "task_balance.json" - ReplicationTaskConfigJSONFile = "task_replication.json" - - // Task persistence subdirectories and settings - TasksSubdir = "tasks" - TaskDetailsSubdir = "task_details" - TaskLogsSubdir = "task_logs" - MaxCompletedTasks = 10 // Only keep last 10 completed tasks - - ConfigDirPermissions = 0755 - ConfigFilePermissions = 0644 -) - -// Task configuration types -type ( - VacuumTaskConfig = worker_pb.VacuumTaskConfig - ErasureCodingTaskConfig = worker_pb.ErasureCodingTaskConfig - BalanceTaskConfig = worker_pb.BalanceTaskConfig - ReplicationTaskConfig = worker_pb.ReplicationTaskConfig -) - -// isValidTaskID validates that a task ID is safe for use in file paths -// This prevents path traversal attacks by ensuring the task ID doesn't contain -// path separators or parent directory references -func isValidTaskID(taskID string) bool { - if taskID == "" { - return false - } - - // Reject task IDs with leading or trailing whitespace - if strings.TrimSpace(taskID) != taskID { - return false - } - - // Check for path traversal patterns - if strings.Contains(taskID, "/") || - strings.Contains(taskID, "\\") || - strings.Contains(taskID, "..") || - strings.Contains(taskID, ":") { - return false - } - - // Additional safety: ensure it's not just dots or empty after trim - if taskID == "." || taskID == ".." { - return false - } - - return true -} - -// ConfigPersistence handles saving and loading configuration files -type ConfigPersistence struct { - dataDir string -} - -// NewConfigPersistence creates a new configuration persistence manager -func NewConfigPersistence(dataDir string) *ConfigPersistence { - return &ConfigPersistence{ - dataDir: dataDir, - } -} - -// SaveMaintenanceConfig saves maintenance configuration to protobuf file and JSON reference -func (cp *ConfigPersistence) SaveMaintenanceConfig(config *MaintenanceConfig) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot save configuration") - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - if err := os.MkdirAll(confDir, ConfigDirPermissions); err != nil { - return fmt.Errorf("failed to create config directory: %w", err) - } - - // Save as protobuf (primary format) - pbConfigPath := filepath.Join(confDir, MaintenanceConfigFile) - pbData, err := proto.Marshal(config) - if err != nil { - return fmt.Errorf("failed to marshal maintenance config to protobuf: %w", err) - } - - if err := os.WriteFile(pbConfigPath, pbData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write protobuf config file: %w", err) - } - - // Save JSON reference copy for debugging - jsonConfigPath := filepath.Join(confDir, MaintenanceConfigJSONFile) - jsonData, err := protojson.MarshalOptions{ - Multiline: true, - Indent: " ", - EmitUnpopulated: true, - }.Marshal(config) - if err != nil { - return fmt.Errorf("failed to marshal maintenance config to JSON: %w", err) - } - - if err := os.WriteFile(jsonConfigPath, jsonData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write JSON reference file: %w", err) - } - - return nil -} - -// LoadMaintenanceConfig loads maintenance configuration from protobuf file -func (cp *ConfigPersistence) LoadMaintenanceConfig() (*MaintenanceConfig, error) { - if cp.dataDir == "" { - return DefaultMaintenanceConfig(), nil - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, MaintenanceConfigFile) - - // Try to load from protobuf file - if configData, err := os.ReadFile(configPath); err == nil { - var config MaintenanceConfig - if err := proto.Unmarshal(configData, &config); err == nil { - // Always populate policy from separate task configuration files - config.Policy = buildPolicyFromTaskConfigs() - return &config, nil - } - } - - // File doesn't exist or failed to load, use defaults - return DefaultMaintenanceConfig(), nil -} - -// GetConfigPath returns the path to a configuration file -func (cp *ConfigPersistence) GetConfigPath(filename string) string { - if cp.dataDir == "" { - return "" - } - - // All configs go in conf subdirectory - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - return filepath.Join(confDir, filename) -} - -// ListConfigFiles returns all configuration files in the conf subdirectory -func (cp *ConfigPersistence) ListConfigFiles() ([]string, error) { - if cp.dataDir == "" { - return nil, fmt.Errorf("no data directory specified") - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - files, err := os.ReadDir(confDir) - if err != nil { - // If conf directory doesn't exist, return empty list - if os.IsNotExist(err) { - return []string{}, nil - } - return nil, fmt.Errorf("failed to read config directory: %w", err) - } - - var configFiles []string - for _, file := range files { - if !file.IsDir() { - ext := filepath.Ext(file.Name()) - if ext == ".json" || ext == ".pb" { - configFiles = append(configFiles, file.Name()) - } - } - } - - return configFiles, nil -} - -// BackupConfig creates a backup of a configuration file -func (cp *ConfigPersistence) BackupConfig(filename string) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified") - } - - configPath := cp.GetConfigPath(filename) - if _, err := os.Stat(configPath); os.IsNotExist(err) { - return fmt.Errorf("config file does not exist: %s", filename) - } - - // Create backup filename with timestamp - timestamp := time.Now().Format("2006-01-02_15-04-05") - backupName := fmt.Sprintf("%s.backup_%s", filename, timestamp) - - // Determine backup directory (conf subdirectory) - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - backupPath := filepath.Join(confDir, backupName) - - // Copy file - configData, err := os.ReadFile(configPath) - if err != nil { - return fmt.Errorf("failed to read config file: %w", err) - } - - if err := os.WriteFile(backupPath, configData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to create backup: %w", err) - } - - glog.V(1).Infof("Created backup of %s as %s", filename, backupName) - return nil -} - -// RestoreConfig restores a configuration file from a backup -func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified") - } - - // Determine backup path (conf subdirectory) - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - backupPath := filepath.Join(confDir, backupName) - - if _, err := os.Stat(backupPath); os.IsNotExist(err) { - return fmt.Errorf("backup file does not exist: %s", backupName) - } - - // Read backup file - backupData, err := os.ReadFile(backupPath) - if err != nil { - return fmt.Errorf("failed to read backup file: %w", err) - } - - // Write to config file - configPath := cp.GetConfigPath(filename) - if err := os.WriteFile(configPath, backupData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to restore config: %w", err) - } - - glog.V(1).Infof("Restored %s from backup %s", filename, backupName) - return nil -} - -// SaveVacuumTaskConfig saves vacuum task configuration to protobuf file -func (cp *ConfigPersistence) SaveVacuumTaskConfig(config *VacuumTaskConfig) error { - return cp.saveTaskConfig(VacuumTaskConfigFile, config) -} - -// SaveVacuumTaskPolicy saves complete vacuum task policy to protobuf file -func (cp *ConfigPersistence) SaveVacuumTaskPolicy(policy *worker_pb.TaskPolicy) error { - return cp.saveTaskConfig(VacuumTaskConfigFile, policy) -} - -// LoadVacuumTaskConfig loads vacuum task configuration from protobuf file -func (cp *ConfigPersistence) LoadVacuumTaskConfig() (*VacuumTaskConfig, error) { - // Load as TaskPolicy and extract vacuum config - if taskPolicy, err := cp.LoadVacuumTaskPolicy(); err == nil && taskPolicy != nil { - if vacuumConfig := taskPolicy.GetVacuumConfig(); vacuumConfig != nil { - return vacuumConfig, nil - } - } - - // Return default config if no valid config found - return &VacuumTaskConfig{ - GarbageThreshold: 0.3, - MinVolumeAgeHours: 24, - MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days - }, nil -} - -// LoadVacuumTaskPolicy loads complete vacuum task policy from protobuf file -func (cp *ConfigPersistence) LoadVacuumTaskPolicy() (*worker_pb.TaskPolicy, error) { - if cp.dataDir == "" { - // Return default policy if no data directory - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 2, - RepeatIntervalSeconds: 24 * 3600, // 24 hours in seconds - CheckIntervalSeconds: 6 * 3600, // 6 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: &worker_pb.VacuumTaskConfig{ - GarbageThreshold: 0.3, - MinVolumeAgeHours: 24, - MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days - }, - }, - }, nil - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, VacuumTaskConfigFile) - - // Check if file exists - if _, err := os.Stat(configPath); os.IsNotExist(err) { - // Return default policy if file doesn't exist - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 2, - RepeatIntervalSeconds: 24 * 3600, // 24 hours in seconds - CheckIntervalSeconds: 6 * 3600, // 6 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: &worker_pb.VacuumTaskConfig{ - GarbageThreshold: 0.3, - MinVolumeAgeHours: 24, - MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days - }, - }, - }, nil - } - - // Read file - configData, err := os.ReadFile(configPath) - if err != nil { - return nil, fmt.Errorf("failed to read vacuum task config file: %w", err) - } - - // Try to unmarshal as TaskPolicy - var policy worker_pb.TaskPolicy - if err := proto.Unmarshal(configData, &policy); err == nil { - // Validate that it's actually a TaskPolicy with vacuum config - if policy.GetVacuumConfig() != nil { - glog.V(1).Infof("Loaded vacuum task policy from %s", configPath) - return &policy, nil - } - } - - return nil, fmt.Errorf("failed to unmarshal vacuum task configuration") -} - -// SaveErasureCodingTaskConfig saves EC task configuration to protobuf file -func (cp *ConfigPersistence) SaveErasureCodingTaskConfig(config *ErasureCodingTaskConfig) error { - return cp.saveTaskConfig(ECTaskConfigFile, config) -} - -// SaveErasureCodingTaskPolicy saves complete EC task policy to protobuf file -func (cp *ConfigPersistence) SaveErasureCodingTaskPolicy(policy *worker_pb.TaskPolicy) error { - return cp.saveTaskConfig(ECTaskConfigFile, policy) -} - -// LoadErasureCodingTaskConfig loads EC task configuration from protobuf file -func (cp *ConfigPersistence) LoadErasureCodingTaskConfig() (*ErasureCodingTaskConfig, error) { - // Load as TaskPolicy and extract EC config - if taskPolicy, err := cp.LoadErasureCodingTaskPolicy(); err == nil && taskPolicy != nil { - if ecConfig := taskPolicy.GetErasureCodingConfig(); ecConfig != nil { - return ecConfig, nil - } - } - - // Return default config if no valid config found - return &ErasureCodingTaskConfig{ - FullnessRatio: 0.9, - QuietForSeconds: 3600, - MinVolumeSizeMb: 1024, - CollectionFilter: "", - }, nil -} - -// LoadErasureCodingTaskPolicy loads complete EC task policy from protobuf file -func (cp *ConfigPersistence) LoadErasureCodingTaskPolicy() (*worker_pb.TaskPolicy, error) { - if cp.dataDir == "" { - // Return default policy if no data directory - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 1, - RepeatIntervalSeconds: 168 * 3600, // 1 week in seconds - CheckIntervalSeconds: 24 * 3600, // 24 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{ - ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{ - FullnessRatio: 0.9, - QuietForSeconds: 3600, - MinVolumeSizeMb: 1024, - CollectionFilter: "", - }, - }, - }, nil - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, ECTaskConfigFile) - - // Check if file exists - if _, err := os.Stat(configPath); os.IsNotExist(err) { - // Return default policy if file doesn't exist - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 1, - RepeatIntervalSeconds: 168 * 3600, // 1 week in seconds - CheckIntervalSeconds: 24 * 3600, // 24 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{ - ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{ - FullnessRatio: 0.9, - QuietForSeconds: 3600, - MinVolumeSizeMb: 1024, - CollectionFilter: "", - }, - }, - }, nil - } - - // Read file - configData, err := os.ReadFile(configPath) - if err != nil { - return nil, fmt.Errorf("failed to read EC task config file: %w", err) - } - - // Try to unmarshal as TaskPolicy - var policy worker_pb.TaskPolicy - if err := proto.Unmarshal(configData, &policy); err == nil { - // Validate that it's actually a TaskPolicy with EC config - if policy.GetErasureCodingConfig() != nil { - glog.V(1).Infof("Loaded EC task policy from %s", configPath) - return &policy, nil - } - } - - return nil, fmt.Errorf("failed to unmarshal EC task configuration") -} - -// SaveBalanceTaskConfig saves balance task configuration to protobuf file -func (cp *ConfigPersistence) SaveBalanceTaskConfig(config *BalanceTaskConfig) error { - return cp.saveTaskConfig(BalanceTaskConfigFile, config) -} - -// SaveBalanceTaskPolicy saves complete balance task policy to protobuf file -func (cp *ConfigPersistence) SaveBalanceTaskPolicy(policy *worker_pb.TaskPolicy) error { - return cp.saveTaskConfig(BalanceTaskConfigFile, policy) -} - -// LoadBalanceTaskConfig loads balance task configuration from protobuf file -func (cp *ConfigPersistence) LoadBalanceTaskConfig() (*BalanceTaskConfig, error) { - // Load as TaskPolicy and extract balance config - if taskPolicy, err := cp.LoadBalanceTaskPolicy(); err == nil && taskPolicy != nil { - if balanceConfig := taskPolicy.GetBalanceConfig(); balanceConfig != nil { - return balanceConfig, nil - } - } - - // Return default config if no valid config found - return &BalanceTaskConfig{ - ImbalanceThreshold: 0.1, - MinServerCount: 2, - }, nil -} - -// LoadBalanceTaskPolicy loads complete balance task policy from protobuf file -func (cp *ConfigPersistence) LoadBalanceTaskPolicy() (*worker_pb.TaskPolicy, error) { - if cp.dataDir == "" { - // Return default policy if no data directory - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 1, - RepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds - CheckIntervalSeconds: 12 * 3600, // 12 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{ - BalanceConfig: &worker_pb.BalanceTaskConfig{ - ImbalanceThreshold: 0.1, - MinServerCount: 2, - }, - }, - }, nil - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, BalanceTaskConfigFile) - - // Check if file exists - if _, err := os.Stat(configPath); os.IsNotExist(err) { - // Return default policy if file doesn't exist - return &worker_pb.TaskPolicy{ - Enabled: true, - MaxConcurrent: 1, - RepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds - CheckIntervalSeconds: 12 * 3600, // 12 hours in seconds - TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{ - BalanceConfig: &worker_pb.BalanceTaskConfig{ - ImbalanceThreshold: 0.1, - MinServerCount: 2, - }, - }, - }, nil - } - - // Read file - configData, err := os.ReadFile(configPath) - if err != nil { - return nil, fmt.Errorf("failed to read balance task config file: %w", err) - } - - // Try to unmarshal as TaskPolicy - var policy worker_pb.TaskPolicy - if err := proto.Unmarshal(configData, &policy); err == nil { - // Validate that it's actually a TaskPolicy with balance config - if policy.GetBalanceConfig() != nil { - glog.V(1).Infof("Loaded balance task policy from %s", configPath) - return &policy, nil - } - } - - return nil, fmt.Errorf("failed to unmarshal balance task configuration") -} - -// SaveReplicationTaskConfig saves replication task configuration to protobuf file -func (cp *ConfigPersistence) SaveReplicationTaskConfig(config *ReplicationTaskConfig) error { - return cp.saveTaskConfig(ReplicationTaskConfigFile, config) -} - -// LoadReplicationTaskConfig loads replication task configuration from protobuf file -func (cp *ConfigPersistence) LoadReplicationTaskConfig() (*ReplicationTaskConfig, error) { - var config ReplicationTaskConfig - err := cp.loadTaskConfig(ReplicationTaskConfigFile, &config) - if err != nil { - // Return default config if file doesn't exist - if os.IsNotExist(err) { - return &ReplicationTaskConfig{ - TargetReplicaCount: 1, - }, nil - } - return nil, err - } - return &config, nil -} - -// saveTaskConfig is a generic helper for saving task configurations with both protobuf and JSON reference -func (cp *ConfigPersistence) saveTaskConfig(filename string, config proto.Message) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot save task configuration") - } - - // Create conf subdirectory path - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, filename) - - // Generate JSON reference filename - jsonFilename := filename[:len(filename)-3] + ".json" // Replace .pb with .json - jsonPath := filepath.Join(confDir, jsonFilename) - - // Create conf directory if it doesn't exist - if err := os.MkdirAll(confDir, ConfigDirPermissions); err != nil { - return fmt.Errorf("failed to create config directory: %w", err) - } - - // Marshal configuration to protobuf binary format - configData, err := proto.Marshal(config) - if err != nil { - return fmt.Errorf("failed to marshal task config: %w", err) - } - - // Write protobuf file - if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write task config file: %w", err) - } - - // Marshal configuration to JSON for reference - marshaler := protojson.MarshalOptions{ - Multiline: true, - Indent: " ", - EmitUnpopulated: true, - } - jsonData, err := marshaler.Marshal(config) - if err != nil { - glog.Warningf("Failed to marshal task config to JSON reference: %v", err) - } else { - // Write JSON reference file - if err := os.WriteFile(jsonPath, jsonData, ConfigFilePermissions); err != nil { - glog.Warningf("Failed to write task config JSON reference: %v", err) - } - } - - glog.V(1).Infof("Saved task configuration to %s (with JSON reference)", configPath) - return nil -} - -// loadTaskConfig is a generic helper for loading task configurations from conf subdirectory -func (cp *ConfigPersistence) loadTaskConfig(filename string, config proto.Message) error { - if cp.dataDir == "" { - return os.ErrNotExist // Will trigger default config return - } - - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - configPath := filepath.Join(confDir, filename) - - // Check if file exists - if _, err := os.Stat(configPath); os.IsNotExist(err) { - return err // Will trigger default config return - } - - // Read file - configData, err := os.ReadFile(configPath) - if err != nil { - return fmt.Errorf("failed to read task config file: %w", err) - } - - // Unmarshal protobuf binary data - if err := proto.Unmarshal(configData, config); err != nil { - return fmt.Errorf("failed to unmarshal task config: %w", err) - } - - glog.V(1).Infof("Loaded task configuration from %s", configPath) - return nil -} - -// GetDataDir returns the data directory path -func (cp *ConfigPersistence) GetDataDir() string { - return cp.dataDir -} - -// IsConfigured returns true if a data directory is configured -func (cp *ConfigPersistence) IsConfigured() bool { - return cp.dataDir != "" -} - -// GetConfigInfo returns information about the configuration storage -func (cp *ConfigPersistence) GetConfigInfo() map[string]interface{} { - info := map[string]interface{}{ - "data_dir_configured": cp.IsConfigured(), - "data_dir": cp.dataDir, - "config_subdir": ConfigSubdir, - } - - if cp.IsConfigured() { - // Check if data directory exists - if _, err := os.Stat(cp.dataDir); err == nil { - info["data_dir_exists"] = true - - // Check if conf subdirectory exists - confDir := filepath.Join(cp.dataDir, ConfigSubdir) - if _, err := os.Stat(confDir); err == nil { - info["conf_dir_exists"] = true - - // List config files - configFiles, err := cp.ListConfigFiles() - if err == nil { - info["config_files"] = configFiles - } - } else { - info["conf_dir_exists"] = false - } - } else { - info["data_dir_exists"] = false - } - } - - return info -} - -// buildPolicyFromTaskConfigs loads task configurations from separate files and builds a MaintenancePolicy -func buildPolicyFromTaskConfigs() *worker_pb.MaintenancePolicy { - policy := &worker_pb.MaintenancePolicy{ - GlobalMaxConcurrent: 4, - DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds - DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds - TaskPolicies: make(map[string]*worker_pb.TaskPolicy), - } - - // Load vacuum task configuration - if vacuumConfig := vacuum.LoadConfigFromPersistence(nil); vacuumConfig != nil { - policy.TaskPolicies["vacuum"] = &worker_pb.TaskPolicy{ - Enabled: vacuumConfig.Enabled, - MaxConcurrent: int32(vacuumConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: &worker_pb.VacuumTaskConfig{ - GarbageThreshold: float64(vacuumConfig.GarbageThreshold), - MinVolumeAgeHours: int32(vacuumConfig.MinVolumeAgeSeconds / 3600), // Convert seconds to hours - MinIntervalSeconds: int32(vacuumConfig.MinIntervalSeconds), - }, - }, - } - } - - // Load erasure coding task configuration - if ecConfig := erasure_coding.LoadConfigFromPersistence(nil); ecConfig != nil { - policy.TaskPolicies["erasure_coding"] = &worker_pb.TaskPolicy{ - Enabled: ecConfig.Enabled, - MaxConcurrent: int32(ecConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(ecConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(ecConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{ - ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{ - FullnessRatio: float64(ecConfig.FullnessRatio), - QuietForSeconds: int32(ecConfig.QuietForSeconds), - MinVolumeSizeMb: int32(ecConfig.MinSizeMB), - CollectionFilter: ecConfig.CollectionFilter, - }, - }, - } - } - - // Load balance task configuration - if balanceConfig := balance.LoadConfigFromPersistence(nil); balanceConfig != nil { - policy.TaskPolicies["balance"] = &worker_pb.TaskPolicy{ - Enabled: balanceConfig.Enabled, - MaxConcurrent: int32(balanceConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{ - BalanceConfig: &worker_pb.BalanceTaskConfig{ - ImbalanceThreshold: float64(balanceConfig.ImbalanceThreshold), - MinServerCount: int32(balanceConfig.MinServerCount), - }, - }, - } - } - - glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies)) - return policy -} - -// SaveTaskDetail saves detailed task information to disk -func (cp *ConfigPersistence) SaveTaskDetail(taskID string, detail *maintenance.TaskDetailData) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot save task detail") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskDetailDir := filepath.Join(cp.dataDir, TaskDetailsSubdir) - if err := os.MkdirAll(taskDetailDir, ConfigDirPermissions); err != nil { - return fmt.Errorf("failed to create task details directory: %w", err) - } - - // Save task detail as JSON for easy reading and debugging - taskDetailPath := filepath.Join(taskDetailDir, fmt.Sprintf("%s.json", taskID)) - jsonData, err := json.MarshalIndent(detail, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal task detail to JSON: %w", err) - } - - if err := os.WriteFile(taskDetailPath, jsonData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write task detail file: %w", err) - } - - glog.V(2).Infof("Saved task detail for task %s to %s", taskID, taskDetailPath) - return nil -} - -// LoadTaskDetail loads detailed task information from disk -func (cp *ConfigPersistence) LoadTaskDetail(taskID string) (*maintenance.TaskDetailData, error) { - if cp.dataDir == "" { - return nil, fmt.Errorf("no data directory specified, cannot load task detail") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskDetailPath := filepath.Join(cp.dataDir, TaskDetailsSubdir, fmt.Sprintf("%s.json", taskID)) - if _, err := os.Stat(taskDetailPath); os.IsNotExist(err) { - return nil, fmt.Errorf("task detail file not found: %s", taskID) - } - - jsonData, err := os.ReadFile(taskDetailPath) - if err != nil { - return nil, fmt.Errorf("failed to read task detail file: %w", err) - } - - var detail maintenance.TaskDetailData - if err := json.Unmarshal(jsonData, &detail); err != nil { - return nil, fmt.Errorf("failed to unmarshal task detail JSON: %w", err) - } - - glog.V(2).Infof("Loaded task detail for task %s from %s", taskID, taskDetailPath) - return &detail, nil -} - -// SaveTaskExecutionLogs saves execution logs for a task -func (cp *ConfigPersistence) SaveTaskExecutionLogs(taskID string, logs []*maintenance.TaskExecutionLog) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot save task logs") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskLogsDir := filepath.Join(cp.dataDir, TaskLogsSubdir) - if err := os.MkdirAll(taskLogsDir, ConfigDirPermissions); err != nil { - return fmt.Errorf("failed to create task logs directory: %w", err) - } - - // Save logs as JSON for easy reading - taskLogsPath := filepath.Join(taskLogsDir, fmt.Sprintf("%s.json", taskID)) - logsData := struct { - TaskID string `json:"task_id"` - Logs []*maintenance.TaskExecutionLog `json:"logs"` - }{ - TaskID: taskID, - Logs: logs, - } - jsonData, err := json.MarshalIndent(logsData, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal task logs to JSON: %w", err) - } - - if err := os.WriteFile(taskLogsPath, jsonData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write task logs file: %w", err) - } - - glog.V(2).Infof("Saved %d execution logs for task %s to %s", len(logs), taskID, taskLogsPath) - return nil -} - -// LoadTaskExecutionLogs loads execution logs for a task -func (cp *ConfigPersistence) LoadTaskExecutionLogs(taskID string) ([]*maintenance.TaskExecutionLog, error) { - if cp.dataDir == "" { - return nil, fmt.Errorf("no data directory specified, cannot load task logs") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskLogsPath := filepath.Join(cp.dataDir, TaskLogsSubdir, fmt.Sprintf("%s.json", taskID)) - if _, err := os.Stat(taskLogsPath); os.IsNotExist(err) { - // Return empty slice if logs don't exist yet - return []*maintenance.TaskExecutionLog{}, nil - } - - jsonData, err := os.ReadFile(taskLogsPath) - if err != nil { - return nil, fmt.Errorf("failed to read task logs file: %w", err) - } - - var logsData struct { - TaskID string `json:"task_id"` - Logs []*maintenance.TaskExecutionLog `json:"logs"` - } - if err := json.Unmarshal(jsonData, &logsData); err != nil { - return nil, fmt.Errorf("failed to unmarshal task logs JSON: %w", err) - } - - glog.V(2).Infof("Loaded %d execution logs for task %s from %s", len(logsData.Logs), taskID, taskLogsPath) - return logsData.Logs, nil -} - -// DeleteTaskDetail removes task detail and logs from disk -func (cp *ConfigPersistence) DeleteTaskDetail(taskID string) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot delete task detail") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - // Delete task detail file - taskDetailPath := filepath.Join(cp.dataDir, TaskDetailsSubdir, fmt.Sprintf("%s.json", taskID)) - if err := os.Remove(taskDetailPath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to delete task detail file: %w", err) - } - - // Delete task logs file - taskLogsPath := filepath.Join(cp.dataDir, TaskLogsSubdir, fmt.Sprintf("%s.json", taskID)) - if err := os.Remove(taskLogsPath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to delete task logs file: %w", err) - } - - glog.V(2).Infof("Deleted task detail and logs for task %s", taskID) - return nil -} - -// ListTaskDetails returns a list of all task IDs that have stored details -func (cp *ConfigPersistence) ListTaskDetails() ([]string, error) { - if cp.dataDir == "" { - return nil, fmt.Errorf("no data directory specified, cannot list task details") - } - - taskDetailDir := filepath.Join(cp.dataDir, TaskDetailsSubdir) - if _, err := os.Stat(taskDetailDir); os.IsNotExist(err) { - return []string{}, nil - } - - entries, err := os.ReadDir(taskDetailDir) - if err != nil { - return nil, fmt.Errorf("failed to read task details directory: %w", err) - } - - var taskIDs []string - for _, entry := range entries { - if !entry.IsDir() && filepath.Ext(entry.Name()) == ".json" { - taskID := entry.Name()[:len(entry.Name())-5] // Remove .json extension - taskIDs = append(taskIDs, taskID) - } - } - - return taskIDs, nil -} - -// CleanupCompletedTasks removes old completed tasks beyond the retention limit -func (cp *ConfigPersistence) CleanupCompletedTasks() error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot cleanup completed tasks") - } - - tasksDir := filepath.Join(cp.dataDir, TasksSubdir) - if _, err := os.Stat(tasksDir); os.IsNotExist(err) { - return nil // No tasks directory, nothing to cleanup - } - - // Load all tasks and find completed/failed ones - allTasks, err := cp.LoadAllTaskStates() - if err != nil { - return fmt.Errorf("failed to load tasks for cleanup: %w", err) - } - - // Filter completed and failed tasks, sort by completion time - var completedTasks []*maintenance.MaintenanceTask - for _, task := range allTasks { - if (task.Status == maintenance.TaskStatusCompleted || task.Status == maintenance.TaskStatusFailed) && task.CompletedAt != nil { - completedTasks = append(completedTasks, task) - } - } - - // Sort by completion time (most recent first) - sort.Slice(completedTasks, func(i, j int) bool { - return completedTasks[i].CompletedAt.After(*completedTasks[j].CompletedAt) - }) - - // Keep only the most recent MaxCompletedTasks, delete the rest - if len(completedTasks) > MaxCompletedTasks { - tasksToDelete := completedTasks[MaxCompletedTasks:] - for _, task := range tasksToDelete { - if err := cp.DeleteTaskState(task.ID); err != nil { - glog.Warningf("Failed to delete old completed task %s: %v", task.ID, err) - } else { - glog.V(2).Infof("Cleaned up old completed task %s (completed: %v)", task.ID, task.CompletedAt) - } - } - glog.V(1).Infof("Cleaned up %d old completed tasks (keeping %d most recent)", len(tasksToDelete), MaxCompletedTasks) - } - - return nil -} - -// SaveTaskState saves a task state to protobuf file -func (cp *ConfigPersistence) SaveTaskState(task *maintenance.MaintenanceTask) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot save task state") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(task.ID) { - return fmt.Errorf("invalid task ID: %q contains illegal path characters", task.ID) - } - - tasksDir := filepath.Join(cp.dataDir, TasksSubdir) - if err := os.MkdirAll(tasksDir, ConfigDirPermissions); err != nil { - return fmt.Errorf("failed to create tasks directory: %w", err) - } - - taskFilePath := filepath.Join(tasksDir, fmt.Sprintf("%s.pb", task.ID)) - - // Convert task to protobuf - pbTask := cp.maintenanceTaskToProtobuf(task) - taskStateFile := &worker_pb.TaskStateFile{ - Task: pbTask, - LastUpdated: time.Now().Unix(), - AdminVersion: "unknown", // TODO: add version info - } - - pbData, err := proto.Marshal(taskStateFile) - if err != nil { - return fmt.Errorf("failed to marshal task state protobuf: %w", err) - } - - if err := os.WriteFile(taskFilePath, pbData, ConfigFilePermissions); err != nil { - return fmt.Errorf("failed to write task state file: %w", err) - } - - glog.V(2).Infof("Saved task state for task %s to %s", task.ID, taskFilePath) - return nil -} - -// LoadTaskState loads a task state from protobuf file -func (cp *ConfigPersistence) LoadTaskState(taskID string) (*maintenance.MaintenanceTask, error) { - if cp.dataDir == "" { - return nil, fmt.Errorf("no data directory specified, cannot load task state") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskFilePath := filepath.Join(cp.dataDir, TasksSubdir, fmt.Sprintf("%s.pb", taskID)) - if _, err := os.Stat(taskFilePath); os.IsNotExist(err) { - return nil, fmt.Errorf("task state file not found: %s", taskID) - } - - pbData, err := os.ReadFile(taskFilePath) - if err != nil { - return nil, fmt.Errorf("failed to read task state file: %w", err) - } - - var taskStateFile worker_pb.TaskStateFile - if err := proto.Unmarshal(pbData, &taskStateFile); err != nil { - return nil, fmt.Errorf("failed to unmarshal task state protobuf: %w", err) - } - - // Convert protobuf to maintenance task - task := cp.protobufToMaintenanceTask(taskStateFile.Task) - - glog.V(2).Infof("Loaded task state for task %s from %s", taskID, taskFilePath) - return task, nil -} - -// LoadAllTaskStates loads all task states from disk -func (cp *ConfigPersistence) LoadAllTaskStates() ([]*maintenance.MaintenanceTask, error) { - if cp.dataDir == "" { - return []*maintenance.MaintenanceTask{}, nil - } - - tasksDir := filepath.Join(cp.dataDir, TasksSubdir) - if _, err := os.Stat(tasksDir); os.IsNotExist(err) { - return []*maintenance.MaintenanceTask{}, nil - } - - entries, err := os.ReadDir(tasksDir) - if err != nil { - return nil, fmt.Errorf("failed to read tasks directory: %w", err) - } - - var tasks []*maintenance.MaintenanceTask - for _, entry := range entries { - if !entry.IsDir() && filepath.Ext(entry.Name()) == ".pb" { - taskID := entry.Name()[:len(entry.Name())-3] // Remove .pb extension - task, err := cp.LoadTaskState(taskID) - if err != nil { - glog.Warningf("Failed to load task state for %s: %v", taskID, err) - continue - } - tasks = append(tasks, task) - } - } - - glog.V(1).Infof("Loaded %d task states from disk", len(tasks)) - return tasks, nil -} - -// DeleteTaskState removes a task state file from disk -func (cp *ConfigPersistence) DeleteTaskState(taskID string) error { - if cp.dataDir == "" { - return fmt.Errorf("no data directory specified, cannot delete task state") - } - - // Validate task ID to prevent path traversal - if !isValidTaskID(taskID) { - return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID) - } - - taskFilePath := filepath.Join(cp.dataDir, TasksSubdir, fmt.Sprintf("%s.pb", taskID)) - if err := os.Remove(taskFilePath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to delete task state file: %w", err) - } - - glog.V(2).Infof("Deleted task state for task %s", taskID) - return nil -} - -// maintenanceTaskToProtobuf converts a MaintenanceTask to protobuf format -func (cp *ConfigPersistence) maintenanceTaskToProtobuf(task *maintenance.MaintenanceTask) *worker_pb.MaintenanceTaskData { - pbTask := &worker_pb.MaintenanceTaskData{ - Id: task.ID, - Type: string(task.Type), - Priority: cp.priorityToString(task.Priority), - Status: string(task.Status), - VolumeId: task.VolumeID, - Server: task.Server, - Collection: task.Collection, - Reason: task.Reason, - CreatedAt: task.CreatedAt.Unix(), - ScheduledAt: task.ScheduledAt.Unix(), - WorkerId: task.WorkerID, - Error: task.Error, - Progress: task.Progress, - RetryCount: int32(task.RetryCount), - MaxRetries: int32(task.MaxRetries), - CreatedBy: task.CreatedBy, - CreationContext: task.CreationContext, - DetailedReason: task.DetailedReason, - Tags: task.Tags, - } - - // Handle optional timestamps - if task.StartedAt != nil { - pbTask.StartedAt = task.StartedAt.Unix() - } - if task.CompletedAt != nil { - pbTask.CompletedAt = task.CompletedAt.Unix() - } - - // Convert assignment history - if task.AssignmentHistory != nil { - for _, record := range task.AssignmentHistory { - pbRecord := &worker_pb.TaskAssignmentRecord{ - WorkerId: record.WorkerID, - WorkerAddress: record.WorkerAddress, - AssignedAt: record.AssignedAt.Unix(), - Reason: record.Reason, - } - if record.UnassignedAt != nil { - pbRecord.UnassignedAt = record.UnassignedAt.Unix() - } - pbTask.AssignmentHistory = append(pbTask.AssignmentHistory, pbRecord) - } - } - - // Convert typed parameters if available - if task.TypedParams != nil { - pbTask.TypedParams = task.TypedParams - } - - return pbTask -} - -// protobufToMaintenanceTask converts protobuf format to MaintenanceTask -func (cp *ConfigPersistence) protobufToMaintenanceTask(pbTask *worker_pb.MaintenanceTaskData) *maintenance.MaintenanceTask { - task := &maintenance.MaintenanceTask{ - ID: pbTask.Id, - Type: maintenance.MaintenanceTaskType(pbTask.Type), - Priority: cp.stringToPriority(pbTask.Priority), - Status: maintenance.MaintenanceTaskStatus(pbTask.Status), - VolumeID: pbTask.VolumeId, - Server: pbTask.Server, - Collection: pbTask.Collection, - Reason: pbTask.Reason, - CreatedAt: time.Unix(pbTask.CreatedAt, 0), - ScheduledAt: time.Unix(pbTask.ScheduledAt, 0), - WorkerID: pbTask.WorkerId, - Error: pbTask.Error, - Progress: pbTask.Progress, - RetryCount: int(pbTask.RetryCount), - MaxRetries: int(pbTask.MaxRetries), - CreatedBy: pbTask.CreatedBy, - CreationContext: pbTask.CreationContext, - DetailedReason: pbTask.DetailedReason, - Tags: pbTask.Tags, - } - - // Handle optional timestamps - if pbTask.StartedAt > 0 { - startTime := time.Unix(pbTask.StartedAt, 0) - task.StartedAt = &startTime - } - if pbTask.CompletedAt > 0 { - completedTime := time.Unix(pbTask.CompletedAt, 0) - task.CompletedAt = &completedTime - } - - // Convert assignment history - if pbTask.AssignmentHistory != nil { - task.AssignmentHistory = make([]*maintenance.TaskAssignmentRecord, 0, len(pbTask.AssignmentHistory)) - for _, pbRecord := range pbTask.AssignmentHistory { - record := &maintenance.TaskAssignmentRecord{ - WorkerID: pbRecord.WorkerId, - WorkerAddress: pbRecord.WorkerAddress, - AssignedAt: time.Unix(pbRecord.AssignedAt, 0), - Reason: pbRecord.Reason, - } - if pbRecord.UnassignedAt > 0 { - unassignedTime := time.Unix(pbRecord.UnassignedAt, 0) - record.UnassignedAt = &unassignedTime - } - task.AssignmentHistory = append(task.AssignmentHistory, record) - } - } - - // Convert typed parameters if available - if pbTask.TypedParams != nil { - task.TypedParams = pbTask.TypedParams - } - - return task -} - -// priorityToString converts MaintenanceTaskPriority to string for protobuf storage -func (cp *ConfigPersistence) priorityToString(priority maintenance.MaintenanceTaskPriority) string { - switch priority { - case maintenance.PriorityLow: - return "low" - case maintenance.PriorityNormal: - return "normal" - case maintenance.PriorityHigh: - return "high" - case maintenance.PriorityCritical: - return "critical" - default: - return "normal" - } -} - -// stringToPriority converts string from protobuf to MaintenanceTaskPriority -func (cp *ConfigPersistence) stringToPriority(priorityStr string) maintenance.MaintenanceTaskPriority { - switch priorityStr { - case "low": - return maintenance.PriorityLow - case "normal": - return maintenance.PriorityNormal - case "high": - return maintenance.PriorityHigh - case "critical": - return maintenance.PriorityCritical - default: - return maintenance.PriorityNormal - } -} diff --git a/weed/admin/dash/ec_shard_management.go b/weed/admin/dash/ec_shard_management.go deleted file mode 100644 index 34574ecdb..000000000 --- a/weed/admin/dash/ec_shard_management.go +++ /dev/null @@ -1,745 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "sort" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -// matchesCollection checks if a volume/EC volume collection matches the filter collection. -// Handles the special case where empty collection ("") represents the "default" collection. -func matchesCollection(volumeCollection, filterCollection string) bool { - // Both empty means default collection matches default filter - if volumeCollection == "" && filterCollection == "" { - return true - } - // Direct string match for named collections - return volumeCollection == filterCollection -} - -// GetClusterEcShards retrieves cluster EC shards data with pagination, sorting, and filtering -func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcShardsData, error) { - // Set defaults - if page < 1 { - page = 1 - } - if pageSize < 1 || pageSize > 1000 { - pageSize = 100 - } - if sortBy == "" { - sortBy = "volume_id" - } - if sortOrder == "" { - sortOrder = "asc" - } - - var ecShards []EcShardWithInfo - volumeShardsMap := make(map[uint32]map[int]bool) // volumeId -> set of shards present - volumesWithAllShards := 0 - volumesWithMissingShards := 0 - - // Get detailed EC shard information via gRPC - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - // Process EC shard information - for _, ecShardInfo := range diskInfo.EcShardInfos { - volumeId := ecShardInfo.Id - - // Initialize volume shards map if needed - if volumeShardsMap[volumeId] == nil { - volumeShardsMap[volumeId] = make(map[int]bool) - } - - // Create individual shard entries for each shard this server has - shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { - if (shardBits & (1 << uint(shardId))) != 0 { - // Mark this shard as present for this volume - volumeShardsMap[volumeId][shardId] = true - - ecShard := EcShardWithInfo{ - VolumeID: volumeId, - ShardID: uint32(shardId), - Collection: ecShardInfo.Collection, - Size: 0, // EC shards don't have individual size in the API response - Server: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - DiskType: diskInfo.Type, - ModifiedTime: 0, // Not available in current API - EcIndexBits: ecShardInfo.EcIndexBits, - ShardCount: getShardCount(ecShardInfo.EcIndexBits), - } - ecShards = append(ecShards, ecShard) - } - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Calculate volume-level completeness (across all servers) - volumeCompleteness := make(map[uint32]bool) - volumeMissingShards := make(map[uint32][]int) - - for volumeId, shardsPresent := range volumeShardsMap { - var missingShards []int - shardCount := len(shardsPresent) - - // Find which shards are missing for this volume across ALL servers - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { - if !shardsPresent[shardId] { - missingShards = append(missingShards, shardId) - } - } - - isComplete := (shardCount == erasure_coding.TotalShardsCount) - volumeCompleteness[volumeId] = isComplete - volumeMissingShards[volumeId] = missingShards - - if isComplete { - volumesWithAllShards++ - } else { - volumesWithMissingShards++ - } - } - - // Update completeness info for each shard based on volume-level completeness - for i := range ecShards { - volumeId := ecShards[i].VolumeID - ecShards[i].IsComplete = volumeCompleteness[volumeId] - ecShards[i].MissingShards = volumeMissingShards[volumeId] - } - - // Filter by collection if specified - if collection != "" { - var filteredShards []EcShardWithInfo - for _, shard := range ecShards { - if shard.Collection == collection { - filteredShards = append(filteredShards, shard) - } - } - ecShards = filteredShards - } - - // Sort the results - sortEcShards(ecShards, sortBy, sortOrder) - - // Calculate statistics for conditional display - dataCenters := make(map[string]bool) - racks := make(map[string]bool) - collections := make(map[string]bool) - - for _, shard := range ecShards { - dataCenters[shard.DataCenter] = true - racks[shard.Rack] = true - if shard.Collection != "" { - collections[shard.Collection] = true - } - } - - // Pagination - totalShards := len(ecShards) - totalPages := (totalShards + pageSize - 1) / pageSize - startIndex := (page - 1) * pageSize - endIndex := startIndex + pageSize - if endIndex > totalShards { - endIndex = totalShards - } - - if startIndex >= totalShards { - startIndex = 0 - endIndex = 0 - } - - paginatedShards := ecShards[startIndex:endIndex] - - // Build response - data := &ClusterEcShardsData{ - EcShards: paginatedShards, - TotalShards: totalShards, - TotalVolumes: len(volumeShardsMap), - LastUpdated: time.Now(), - - // Pagination - CurrentPage: page, - TotalPages: totalPages, - PageSize: pageSize, - - // Sorting - SortBy: sortBy, - SortOrder: sortOrder, - - // Statistics - DataCenterCount: len(dataCenters), - RackCount: len(racks), - CollectionCount: len(collections), - - // Conditional display flags - ShowDataCenterColumn: len(dataCenters) > 1, - ShowRackColumn: len(racks) > 1, - ShowCollectionColumn: len(collections) > 1 || collection != "", - - // Filtering - FilterCollection: collection, - - // EC specific statistics - ShardsPerVolume: make(map[uint32]int), // This will be recalculated below - VolumesWithAllShards: volumesWithAllShards, - VolumesWithMissingShards: volumesWithMissingShards, - } - - // Recalculate ShardsPerVolume for the response - for volumeId, shardsPresent := range volumeShardsMap { - data.ShardsPerVolume[volumeId] = len(shardsPresent) - } - - // Set single values when only one exists - if len(dataCenters) == 1 { - for dc := range dataCenters { - data.SingleDataCenter = dc - break - } - } - if len(racks) == 1 { - for rack := range racks { - data.SingleRack = rack - break - } - } - if len(collections) == 1 { - for col := range collections { - data.SingleCollection = col - break - } - } - - return data, nil -} - -// GetClusterEcVolumes retrieves cluster EC volumes data grouped by volume ID with shard locations -func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcVolumesData, error) { - // Set defaults - if page < 1 { - page = 1 - } - if pageSize < 1 || pageSize > 1000 { - pageSize = 100 - } - if sortBy == "" { - sortBy = "volume_id" - } - if sortOrder == "" { - sortOrder = "asc" - } - - volumeData := make(map[uint32]*EcVolumeWithShards) - totalShards := 0 - - // Get detailed EC shard information via gRPC - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - // Process EC shard information - for _, ecShardInfo := range diskInfo.EcShardInfos { - volumeId := ecShardInfo.Id - - // Initialize volume data if needed - if volumeData[volumeId] == nil { - volumeData[volumeId] = &EcVolumeWithShards{ - VolumeID: volumeId, - Collection: ecShardInfo.Collection, - TotalShards: 0, - IsComplete: false, - MissingShards: []int{}, - ShardLocations: make(map[int]string), - ShardSizes: make(map[int]int64), - DataCenters: []string{}, - Servers: []string{}, - Racks: []string{}, - } - } - - volume := volumeData[volumeId] - - // Track data centers and servers - dcExists := false - for _, existingDc := range volume.DataCenters { - if existingDc == dc.Id { - dcExists = true - break - } - } - if !dcExists { - volume.DataCenters = append(volume.DataCenters, dc.Id) - } - - serverExists := false - for _, existingServer := range volume.Servers { - if existingServer == node.Id { - serverExists = true - break - } - } - if !serverExists { - volume.Servers = append(volume.Servers, node.Id) - } - - // Track racks - rackExists := false - for _, existingRack := range volume.Racks { - if existingRack == rack.Id { - rackExists = true - break - } - } - if !rackExists { - volume.Racks = append(volume.Racks, rack.Id) - } - - // Process each shard this server has for this volume - shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { - if (shardBits & (1 << uint(shardId))) != 0 { - // Record shard location - volume.ShardLocations[shardId] = node.Id - totalShards++ - } - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Collect shard size information from volume servers - for volumeId, volume := range volumeData { - // Group servers by volume to minimize gRPC calls - serverHasVolume := make(map[string]bool) - for _, server := range volume.Servers { - serverHasVolume[server] = true - } - - // Query each server for shard sizes - for server := range serverHasVolume { - err := s.WithVolumeServerClient(pb.ServerAddress(server), func(client volume_server_pb.VolumeServerClient) error { - resp, err := client.VolumeEcShardsInfo(context.Background(), &volume_server_pb.VolumeEcShardsInfoRequest{ - VolumeId: volumeId, - }) - if err != nil { - glog.V(1).Infof("Failed to get EC shard info from %s for volume %d: %v", server, volumeId, err) - return nil // Continue with other servers, don't fail the entire request - } - - // Update shard sizes - for _, shardInfo := range resp.EcShardInfos { - volume.ShardSizes[int(shardInfo.ShardId)] = shardInfo.Size - } - - return nil - }) - if err != nil { - glog.V(1).Infof("Failed to connect to volume server %s: %v", server, err) - } - } - } - - // Calculate completeness for each volume - completeVolumes := 0 - incompleteVolumes := 0 - - for _, volume := range volumeData { - volume.TotalShards = len(volume.ShardLocations) - - // Find missing shards - var missingShards []int - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { - if _, exists := volume.ShardLocations[shardId]; !exists { - missingShards = append(missingShards, shardId) - } - } - - volume.MissingShards = missingShards - volume.IsComplete = (len(missingShards) == 0) - - if volume.IsComplete { - completeVolumes++ - } else { - incompleteVolumes++ - } - } - - // Convert map to slice - var ecVolumes []EcVolumeWithShards - for _, volume := range volumeData { - // Filter by collection if specified - if collection == "" || matchesCollection(volume.Collection, collection) { - ecVolumes = append(ecVolumes, *volume) - } - } - - // Sort the results - sortEcVolumes(ecVolumes, sortBy, sortOrder) - - // Calculate statistics for conditional display - dataCenters := make(map[string]bool) - collections := make(map[string]bool) - - for _, volume := range ecVolumes { - for _, dc := range volume.DataCenters { - dataCenters[dc] = true - } - if volume.Collection != "" { - collections[volume.Collection] = true - } - } - - // Pagination - totalVolumes := len(ecVolumes) - totalPages := (totalVolumes + pageSize - 1) / pageSize - startIndex := (page - 1) * pageSize - endIndex := startIndex + pageSize - if endIndex > totalVolumes { - endIndex = totalVolumes - } - - if startIndex >= totalVolumes { - startIndex = 0 - endIndex = 0 - } - - paginatedVolumes := ecVolumes[startIndex:endIndex] - - // Build response - data := &ClusterEcVolumesData{ - EcVolumes: paginatedVolumes, - TotalVolumes: totalVolumes, - LastUpdated: time.Now(), - - // Pagination - Page: page, - PageSize: pageSize, - TotalPages: totalPages, - - // Sorting - SortBy: sortBy, - SortOrder: sortOrder, - - // Filtering - Collection: collection, - - // Conditional display flags - ShowDataCenterColumn: len(dataCenters) > 1, - ShowRackColumn: false, // We don't track racks in this view for simplicity - ShowCollectionColumn: len(collections) > 1 || collection != "", - - // Statistics - CompleteVolumes: completeVolumes, - IncompleteVolumes: incompleteVolumes, - TotalShards: totalShards, - } - - return data, nil -} - -// sortEcVolumes sorts EC volumes based on the specified field and order -func sortEcVolumes(volumes []EcVolumeWithShards, sortBy string, sortOrder string) { - sort.Slice(volumes, func(i, j int) bool { - var less bool - switch sortBy { - case "volume_id": - less = volumes[i].VolumeID < volumes[j].VolumeID - case "collection": - if volumes[i].Collection == volumes[j].Collection { - less = volumes[i].VolumeID < volumes[j].VolumeID - } else { - less = volumes[i].Collection < volumes[j].Collection - } - case "total_shards": - if volumes[i].TotalShards == volumes[j].TotalShards { - less = volumes[i].VolumeID < volumes[j].VolumeID - } else { - less = volumes[i].TotalShards < volumes[j].TotalShards - } - case "completeness": - // Complete volumes first, then by volume ID - if volumes[i].IsComplete == volumes[j].IsComplete { - less = volumes[i].VolumeID < volumes[j].VolumeID - } else { - less = volumes[i].IsComplete && !volumes[j].IsComplete - } - default: - less = volumes[i].VolumeID < volumes[j].VolumeID - } - - if sortOrder == "desc" { - return !less - } - return less - }) -} - -// getShardCount returns the number of shards represented by the bitmap -func getShardCount(ecIndexBits uint32) int { - count := 0 - for i := 0; i < erasure_coding.TotalShardsCount; i++ { - if (ecIndexBits & (1 << uint(i))) != 0 { - count++ - } - } - return count -} - -// getMissingShards returns a slice of missing shard IDs for a volume -func getMissingShards(ecIndexBits uint32) []int { - var missing []int - for i := 0; i < erasure_coding.TotalShardsCount; i++ { - if (ecIndexBits & (1 << uint(i))) == 0 { - missing = append(missing, i) - } - } - return missing -} - -// sortEcShards sorts EC shards based on the specified field and order -func sortEcShards(shards []EcShardWithInfo, sortBy string, sortOrder string) { - sort.Slice(shards, func(i, j int) bool { - var less bool - switch sortBy { - case "shard_id": - less = shards[i].ShardID < shards[j].ShardID - case "server": - if shards[i].Server == shards[j].Server { - less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID - } else { - less = shards[i].Server < shards[j].Server - } - case "data_center": - if shards[i].DataCenter == shards[j].DataCenter { - less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID - } else { - less = shards[i].DataCenter < shards[j].DataCenter - } - case "rack": - if shards[i].Rack == shards[j].Rack { - less = shards[i].ShardID < shards[j].ShardID // Secondary sort by shard ID - } else { - less = shards[i].Rack < shards[j].Rack - } - default: - less = shards[i].ShardID < shards[j].ShardID - } - - if sortOrder == "desc" { - return !less - } - return less - }) -} - -// GetEcVolumeDetails retrieves detailed information about a specific EC volume -func (s *AdminServer) GetEcVolumeDetails(volumeID uint32, sortBy string, sortOrder string) (*EcVolumeDetailsData, error) { - // Set defaults - if sortBy == "" { - sortBy = "shard_id" - } - if sortOrder == "" { - sortOrder = "asc" - } - - var shards []EcShardWithInfo - var collection string - dataCenters := make(map[string]bool) - servers := make(map[string]bool) - - // Get detailed EC shard information for the specific volume via gRPC - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - // Process EC shard information for this specific volume - for _, ecShardInfo := range diskInfo.EcShardInfos { - if ecShardInfo.Id == volumeID { - collection = ecShardInfo.Collection - dataCenters[dc.Id] = true - servers[node.Id] = true - - // Create individual shard entries for each shard this server has - shardBits := ecShardInfo.EcIndexBits - for shardId := 0; shardId < erasure_coding.TotalShardsCount; shardId++ { - if (shardBits & (1 << uint(shardId))) != 0 { - ecShard := EcShardWithInfo{ - VolumeID: ecShardInfo.Id, - ShardID: uint32(shardId), - Collection: ecShardInfo.Collection, - Size: 0, // EC shards don't have individual size in the API response - Server: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - DiskType: diskInfo.Type, - ModifiedTime: 0, // Not available in current API - EcIndexBits: ecShardInfo.EcIndexBits, - ShardCount: getShardCount(ecShardInfo.EcIndexBits), - } - shards = append(shards, ecShard) - } - } - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - if len(shards) == 0 { - return nil, fmt.Errorf("EC volume %d not found", volumeID) - } - - // Collect shard size information from volume servers - shardSizeMap := make(map[string]map[uint32]uint64) // server -> shardId -> size - for _, shard := range shards { - server := shard.Server - if _, exists := shardSizeMap[server]; !exists { - // Query this server for shard sizes - err := s.WithVolumeServerClient(pb.ServerAddress(server), func(client volume_server_pb.VolumeServerClient) error { - resp, err := client.VolumeEcShardsInfo(context.Background(), &volume_server_pb.VolumeEcShardsInfoRequest{ - VolumeId: volumeID, - }) - if err != nil { - glog.V(1).Infof("Failed to get EC shard info from %s for volume %d: %v", server, volumeID, err) - return nil // Continue with other servers, don't fail the entire request - } - - // Store shard sizes for this server - shardSizeMap[server] = make(map[uint32]uint64) - for _, shardInfo := range resp.EcShardInfos { - shardSizeMap[server][shardInfo.ShardId] = uint64(shardInfo.Size) - } - - return nil - }) - if err != nil { - glog.V(1).Infof("Failed to connect to volume server %s: %v", server, err) - } - } - } - - // Update shard sizes in the shards array - for i := range shards { - server := shards[i].Server - shardId := shards[i].ShardID - if serverSizes, exists := shardSizeMap[server]; exists { - if size, exists := serverSizes[shardId]; exists { - shards[i].Size = size - } - } - } - - // Calculate completeness based on unique shard IDs - foundShards := make(map[int]bool) - for _, shard := range shards { - foundShards[int(shard.ShardID)] = true - } - - totalUniqueShards := len(foundShards) - isComplete := (totalUniqueShards == erasure_coding.TotalShardsCount) - - // Calculate missing shards - var missingShards []int - for i := 0; i < erasure_coding.TotalShardsCount; i++ { - if !foundShards[i] { - missingShards = append(missingShards, i) - } - } - - // Update completeness info for each shard - for i := range shards { - shards[i].IsComplete = isComplete - shards[i].MissingShards = missingShards - } - - // Sort shards based on parameters - sortEcShards(shards, sortBy, sortOrder) - - // Convert maps to slices - var dcList []string - for dc := range dataCenters { - dcList = append(dcList, dc) - } - var serverList []string - for server := range servers { - serverList = append(serverList, server) - } - - data := &EcVolumeDetailsData{ - VolumeID: volumeID, - Collection: collection, - Shards: shards, - TotalShards: totalUniqueShards, - IsComplete: isComplete, - MissingShards: missingShards, - DataCenters: dcList, - Servers: serverList, - LastUpdated: time.Now(), - SortBy: sortBy, - SortOrder: sortOrder, - } - - return data, nil -} diff --git a/weed/admin/dash/file_browser_data.go b/weed/admin/dash/file_browser_data.go deleted file mode 100644 index 6bb30c469..000000000 --- a/weed/admin/dash/file_browser_data.go +++ /dev/null @@ -1,272 +0,0 @@ -package dash - -import ( - "context" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -// FileEntry represents a file or directory entry in the file browser -type FileEntry struct { - Name string `json:"name"` - FullPath string `json:"full_path"` - IsDirectory bool `json:"is_directory"` - Size int64 `json:"size"` - ModTime time.Time `json:"mod_time"` - Mode string `json:"mode"` - Uid uint32 `json:"uid"` - Gid uint32 `json:"gid"` - Mime string `json:"mime"` - Replication string `json:"replication"` - Collection string `json:"collection"` - TtlSec int32 `json:"ttl_sec"` -} - -// BreadcrumbItem represents a single breadcrumb in the navigation -type BreadcrumbItem struct { - Name string `json:"name"` - Path string `json:"path"` -} - -// FileBrowserData contains all data needed for the file browser view -type FileBrowserData struct { - Username string `json:"username"` - CurrentPath string `json:"current_path"` - ParentPath string `json:"parent_path"` - Breadcrumbs []BreadcrumbItem `json:"breadcrumbs"` - Entries []FileEntry `json:"entries"` - TotalEntries int `json:"total_entries"` - TotalSize int64 `json:"total_size"` - LastUpdated time.Time `json:"last_updated"` - IsBucketPath bool `json:"is_bucket_path"` - BucketName string `json:"bucket_name"` -} - -// GetFileBrowser retrieves file browser data for a given path -func (s *AdminServer) GetFileBrowser(path string) (*FileBrowserData, error) { - if path == "" { - path = "/" - } - - var entries []FileEntry - var totalSize int64 - - // Get directory listing from filer - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: path, - Prefix: "", - Limit: 1000, - InclusiveStartFrom: false, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - return err - } - - entry := resp.Entry - if entry == nil { - continue - } - - fullPath := path - if !strings.HasSuffix(fullPath, "/") { - fullPath += "/" - } - fullPath += entry.Name - - var modTime time.Time - if entry.Attributes != nil && entry.Attributes.Mtime > 0 { - modTime = time.Unix(entry.Attributes.Mtime, 0) - } - - var mode string - var uid, gid uint32 - var size int64 - var replication, collection string - var ttlSec int32 - - if entry.Attributes != nil { - mode = FormatFileMode(entry.Attributes.FileMode) - uid = entry.Attributes.Uid - gid = entry.Attributes.Gid - size = int64(entry.Attributes.FileSize) - ttlSec = entry.Attributes.TtlSec - } - - // Get replication and collection from entry extended attributes or chunks - if entry.Extended != nil { - if repl, ok := entry.Extended["replication"]; ok { - replication = string(repl) - } - if coll, ok := entry.Extended["collection"]; ok { - collection = string(coll) - } - } - - // Determine MIME type based on file extension - mime := "application/octet-stream" - if entry.IsDirectory { - mime = "inode/directory" - } else { - ext := strings.ToLower(filepath.Ext(entry.Name)) - switch ext { - case ".txt", ".log": - mime = "text/plain" - case ".html", ".htm": - mime = "text/html" - case ".css": - mime = "text/css" - case ".js": - mime = "application/javascript" - case ".json": - mime = "application/json" - case ".xml": - mime = "application/xml" - case ".pdf": - mime = "application/pdf" - case ".jpg", ".jpeg": - mime = "image/jpeg" - case ".png": - mime = "image/png" - case ".gif": - mime = "image/gif" - case ".svg": - mime = "image/svg+xml" - case ".mp4": - mime = "video/mp4" - case ".mp3": - mime = "audio/mpeg" - case ".zip": - mime = "application/zip" - case ".tar": - mime = "application/x-tar" - case ".gz": - mime = "application/gzip" - } - } - - fileEntry := FileEntry{ - Name: entry.Name, - FullPath: fullPath, - IsDirectory: entry.IsDirectory, - Size: size, - ModTime: modTime, - Mode: mode, - Uid: uid, - Gid: gid, - Mime: mime, - Replication: replication, - Collection: collection, - TtlSec: ttlSec, - } - - entries = append(entries, fileEntry) - if !entry.IsDirectory { - totalSize += size - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Sort entries: directories first, then files, both alphabetically - sort.Slice(entries, func(i, j int) bool { - if entries[i].IsDirectory != entries[j].IsDirectory { - return entries[i].IsDirectory - } - return strings.ToLower(entries[i].Name) < strings.ToLower(entries[j].Name) - }) - - // Generate breadcrumbs - breadcrumbs := s.generateBreadcrumbs(path) - - // Calculate parent path - parentPath := "/" - if path != "/" { - parentPath = filepath.Dir(path) - if parentPath == "." { - parentPath = "/" - } - } - - // Check if this is a bucket path - isBucketPath := false - bucketName := "" - if strings.HasPrefix(path, "/buckets/") { - isBucketPath = true - pathParts := strings.Split(strings.Trim(path, "/"), "/") - if len(pathParts) >= 2 { - bucketName = pathParts[1] - } - } - - return &FileBrowserData{ - CurrentPath: path, - ParentPath: parentPath, - Breadcrumbs: breadcrumbs, - Entries: entries, - TotalEntries: len(entries), - TotalSize: totalSize, - LastUpdated: time.Now(), - IsBucketPath: isBucketPath, - BucketName: bucketName, - }, nil -} - -// generateBreadcrumbs creates breadcrumb navigation for the current path -func (s *AdminServer) generateBreadcrumbs(path string) []BreadcrumbItem { - var breadcrumbs []BreadcrumbItem - - // Always start with root - breadcrumbs = append(breadcrumbs, BreadcrumbItem{ - Name: "Root", - Path: "/", - }) - - if path == "/" { - return breadcrumbs - } - - // Split path and build breadcrumbs - parts := strings.Split(strings.Trim(path, "/"), "/") - currentPath := "" - - for _, part := range parts { - if part == "" { - continue - } - currentPath += "/" + part - - // Special handling for bucket paths - displayName := part - if len(breadcrumbs) == 1 && part == "buckets" { - displayName = "Object Store Buckets" - } else if len(breadcrumbs) == 2 && strings.HasPrefix(path, "/buckets/") { - displayName = "๐Ÿ“ฆ " + part // Add bucket icon to bucket name - } - - breadcrumbs = append(breadcrumbs, BreadcrumbItem{ - Name: displayName, - Path: currentPath, - }) - } - - return breadcrumbs -} diff --git a/weed/admin/dash/file_mode_utils.go b/weed/admin/dash/file_mode_utils.go deleted file mode 100644 index 19c5b2f49..000000000 --- a/weed/admin/dash/file_mode_utils.go +++ /dev/null @@ -1,85 +0,0 @@ -package dash - -// FormatFileMode converts file mode to Unix-style string representation (e.g., "drwxr-xr-x") -// Handles both Go's os.ModeDir format and standard Unix file type bits -func FormatFileMode(mode uint32) string { - var result []byte = make([]byte, 10) - - // File type - handle Go's os.ModeDir first, then standard Unix file type bits - if mode&0x80000000 != 0 { // Go's os.ModeDir (0x80000000 = 2147483648) - result[0] = 'd' - } else { - switch mode & 0170000 { // S_IFMT mask - case 0040000: // S_IFDIR - result[0] = 'd' - case 0100000: // S_IFREG - result[0] = '-' - case 0120000: // S_IFLNK - result[0] = 'l' - case 0020000: // S_IFCHR - result[0] = 'c' - case 0060000: // S_IFBLK - result[0] = 'b' - case 0010000: // S_IFIFO - result[0] = 'p' - case 0140000: // S_IFSOCK - result[0] = 's' - default: - result[0] = '-' // S_IFREG is default - } - } - - // Permission bits (always use the lower 12 bits regardless of file type format) - // Owner permissions - if mode&0400 != 0 { // S_IRUSR - result[1] = 'r' - } else { - result[1] = '-' - } - if mode&0200 != 0 { // S_IWUSR - result[2] = 'w' - } else { - result[2] = '-' - } - if mode&0100 != 0 { // S_IXUSR - result[3] = 'x' - } else { - result[3] = '-' - } - - // Group permissions - if mode&0040 != 0 { // S_IRGRP - result[4] = 'r' - } else { - result[4] = '-' - } - if mode&0020 != 0 { // S_IWGRP - result[5] = 'w' - } else { - result[5] = '-' - } - if mode&0010 != 0 { // S_IXGRP - result[6] = 'x' - } else { - result[6] = '-' - } - - // Other permissions - if mode&0004 != 0 { // S_IROTH - result[7] = 'r' - } else { - result[7] = '-' - } - if mode&0002 != 0 { // S_IWOTH - result[8] = 'w' - } else { - result[8] = '-' - } - if mode&0001 != 0 { // S_IXOTH - result[9] = 'x' - } else { - result[9] = '-' - } - - return string(result) -} diff --git a/weed/admin/dash/middleware.go b/weed/admin/dash/middleware.go deleted file mode 100644 index a4cfedfd0..000000000 --- a/weed/admin/dash/middleware.go +++ /dev/null @@ -1,50 +0,0 @@ -package dash - -import ( - "net/http" - - "github.com/gin-contrib/sessions" - "github.com/gin-gonic/gin" -) - -// RequireAuth checks if user is authenticated -func RequireAuth() gin.HandlerFunc { - return func(c *gin.Context) { - session := sessions.Default(c) - authenticated := session.Get("authenticated") - username := session.Get("username") - - if authenticated != true || username == nil { - c.Redirect(http.StatusTemporaryRedirect, "/login") - c.Abort() - return - } - - // Set username in context for use in handlers - c.Set("username", username) - c.Next() - } -} - -// RequireAuthAPI checks if user is authenticated for API endpoints -// Returns JSON error instead of redirecting to login page -func RequireAuthAPI() gin.HandlerFunc { - return func(c *gin.Context) { - session := sessions.Default(c) - authenticated := session.Get("authenticated") - username := session.Get("username") - - if authenticated != true || username == nil { - c.JSON(http.StatusUnauthorized, gin.H{ - "error": "Authentication required", - "message": "Please log in to access this endpoint", - }) - c.Abort() - return - } - - // Set username in context for use in handlers - c.Set("username", username) - c.Next() - } -} diff --git a/weed/admin/dash/mq_management.go b/weed/admin/dash/mq_management.go deleted file mode 100644 index 3fd4aed85..000000000 --- a/weed/admin/dash/mq_management.go +++ /dev/null @@ -1,681 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "io" - "path/filepath" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// GetTopics retrieves message queue topics data -func (s *AdminServer) GetTopics() (*TopicsData, error) { - var topics []TopicInfo - - // Find broker leader and get topics - brokerLeader, err := s.findBrokerLeader() - if err != nil { - // If no broker leader found, return empty data - return &TopicsData{ - Topics: topics, - TotalTopics: len(topics), - LastUpdated: time.Now(), - }, nil - } - - // Connect to broker leader and list topics - err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - resp, err := client.ListTopics(ctx, &mq_pb.ListTopicsRequest{}) - if err != nil { - return err - } - - // Convert protobuf topics to TopicInfo - only include available data - for _, pbTopic := range resp.Topics { - topicInfo := TopicInfo{ - Name: fmt.Sprintf("%s.%s", pbTopic.Namespace, pbTopic.Name), - Partitions: 0, // Will be populated by LookupTopicBrokers call - Retention: TopicRetentionInfo{ - Enabled: false, - DisplayValue: 0, - DisplayUnit: "days", - }, - } - - // Get topic configuration to get partition count and retention info - lookupResp, err := client.LookupTopicBrokers(ctx, &mq_pb.LookupTopicBrokersRequest{ - Topic: pbTopic, - }) - if err == nil { - topicInfo.Partitions = len(lookupResp.BrokerPartitionAssignments) - } - - // Get topic configuration for retention information - configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{ - Topic: pbTopic, - }) - if err == nil && configResp.Retention != nil { - topicInfo.Retention = convertTopicRetention(configResp.Retention) - } - - topics = append(topics, topicInfo) - } - - return nil - }) - - if err != nil { - // If connection fails, return empty data - return &TopicsData{ - Topics: topics, - TotalTopics: len(topics), - LastUpdated: time.Now(), - }, nil - } - - return &TopicsData{ - Topics: topics, - TotalTopics: len(topics), - LastUpdated: time.Now(), - // Don't include TotalMessages and TotalSize as they're not available - }, nil -} - -// GetSubscribers retrieves message queue subscribers data -func (s *AdminServer) GetSubscribers() (*SubscribersData, error) { - var subscribers []SubscriberInfo - - // Find broker leader and get subscriber info from broker stats - brokerLeader, err := s.findBrokerLeader() - if err != nil { - // If no broker leader found, return empty data - return &SubscribersData{ - Subscribers: subscribers, - TotalSubscribers: len(subscribers), - ActiveSubscribers: 0, - LastUpdated: time.Now(), - }, nil - } - - // Connect to broker leader and get subscriber information - // Note: SeaweedMQ doesn't have a direct API to list all subscribers - // We would need to collect this information from broker statistics - // For now, return empty data structure as subscriber info is not - // directly available through the current MQ API - err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error { - // TODO: Implement subscriber data collection from broker statistics - // This would require access to broker internal statistics about - // active subscribers, consumer groups, etc. - return nil - }) - - if err != nil { - // If connection fails, return empty data - return &SubscribersData{ - Subscribers: subscribers, - TotalSubscribers: len(subscribers), - ActiveSubscribers: 0, - LastUpdated: time.Now(), - }, nil - } - - activeCount := 0 - for _, sub := range subscribers { - if sub.Status == "active" { - activeCount++ - } - } - - return &SubscribersData{ - Subscribers: subscribers, - TotalSubscribers: len(subscribers), - ActiveSubscribers: activeCount, - LastUpdated: time.Now(), - }, nil -} - -// GetTopicDetails retrieves detailed information about a specific topic -func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetailsData, error) { - // Find broker leader - brokerLeader, err := s.findBrokerLeader() - if err != nil { - return nil, fmt.Errorf("failed to find broker leader: %w", err) - } - - var topicDetails *TopicDetailsData - - // Connect to broker leader and get topic configuration - err = s.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Get topic configuration using the new API - configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: topicName, - }, - }) - if err != nil { - return fmt.Errorf("failed to get topic configuration: %w", err) - } - - // Initialize topic details - topicDetails = &TopicDetailsData{ - TopicName: fmt.Sprintf("%s.%s", namespace, topicName), - Namespace: namespace, - Name: topicName, - Partitions: []PartitionInfo{}, - Publishers: []PublisherInfo{}, - Subscribers: []TopicSubscriberInfo{}, - ConsumerGroupOffsets: []ConsumerGroupOffsetInfo{}, - Retention: convertTopicRetention(configResp.Retention), - CreatedAt: time.Unix(0, configResp.CreatedAtNs), - LastUpdated: time.Unix(0, configResp.LastUpdatedNs), - } - - // Set current time if timestamps are not available - if configResp.CreatedAtNs == 0 { - topicDetails.CreatedAt = time.Now() - } - if configResp.LastUpdatedNs == 0 { - topicDetails.LastUpdated = time.Now() - } - - // Process partitions - for _, assignment := range configResp.BrokerPartitionAssignments { - if assignment.Partition != nil { - partitionInfo := PartitionInfo{ - ID: assignment.Partition.RangeStart, - LeaderBroker: assignment.LeaderBroker, - FollowerBroker: assignment.FollowerBroker, - MessageCount: 0, // Will be enhanced later with actual stats - TotalSize: 0, // Will be enhanced later with actual stats - LastDataTime: time.Time{}, // Will be enhanced later - CreatedAt: time.Now(), - } - topicDetails.Partitions = append(topicDetails.Partitions, partitionInfo) - } - } - - // Process flat schema format - if configResp.MessageRecordType != nil { - for _, field := range configResp.MessageRecordType.Fields { - isKey := false - for _, keyCol := range configResp.KeyColumns { - if field.Name == keyCol { - isKey = true - break - } - } - - fieldType := "UNKNOWN" - if field.Type != nil && field.Type.Kind != nil { - fieldType = getFieldTypeName(field.Type) - } - - schemaField := SchemaFieldInfo{ - Name: field.Name, - Type: fieldType, - } - - if isKey { - topicDetails.KeySchema = append(topicDetails.KeySchema, schemaField) - } else { - topicDetails.ValueSchema = append(topicDetails.ValueSchema, schemaField) - } - } - } - - // Get publishers information - publishersResp, err := client.GetTopicPublishers(ctx, &mq_pb.GetTopicPublishersRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: topicName, - }, - }) - if err != nil { - // Log error but don't fail the entire request - glog.V(0).Infof("failed to get topic publishers for %s.%s: %v", namespace, topicName, err) - } else { - glog.V(1).Infof("got %d publishers for topic %s.%s", len(publishersResp.Publishers), namespace, topicName) - topicDetails.Publishers = convertTopicPublishers(publishersResp.Publishers) - } - - // Get subscribers information - subscribersResp, err := client.GetTopicSubscribers(ctx, &mq_pb.GetTopicSubscribersRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: topicName, - }, - }) - if err != nil { - // Log error but don't fail the entire request - glog.V(0).Infof("failed to get topic subscribers for %s.%s: %v", namespace, topicName, err) - } else { - glog.V(1).Infof("got %d subscribers for topic %s.%s", len(subscribersResp.Subscribers), namespace, topicName) - topicDetails.Subscribers = convertTopicSubscribers(subscribersResp.Subscribers) - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Get consumer group offsets from the filer - offsets, err := s.GetConsumerGroupOffsets(namespace, topicName) - if err != nil { - // Log error but don't fail the entire request - glog.V(0).Infof("failed to get consumer group offsets for %s.%s: %v", namespace, topicName, err) - } else { - glog.V(1).Infof("got %d consumer group offsets for topic %s.%s", len(offsets), namespace, topicName) - topicDetails.ConsumerGroupOffsets = offsets - } - - return topicDetails, nil -} - -// GetConsumerGroupOffsets retrieves consumer group offsets for a topic from the filer -func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]ConsumerGroupOffsetInfo, error) { - var offsets []ConsumerGroupOffsetInfo - - err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // Get the topic directory: /topics/namespace/topicName - topicObj := topic.NewTopic(namespace, topicName) - topicDir := topicObj.Dir() - - // List all version directories under the topic directory (e.g., v2025-07-10-05-44-34) - versionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: topicDir, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - return fmt.Errorf("failed to list topic directory %s: %v", topicDir, err) - } - - // Process each version directory - for { - versionResp, err := versionStream.Recv() - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("failed to receive version entries: %w", err) - } - - // Only process directories that are versions (start with "v") - if versionResp.Entry.IsDirectory && strings.HasPrefix(versionResp.Entry.Name, "v") { - versionDir := filepath.Join(topicDir, versionResp.Entry.Name) - - // List all partition directories under the version directory (e.g., 0315-0630) - partitionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: versionDir, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - glog.Warningf("Failed to list version directory %s: %v", versionDir, err) - continue - } - - // Process each partition directory - for { - partitionResp, err := partitionStream.Recv() - if err != nil { - if err == io.EOF { - break - } - glog.Warningf("Failed to receive partition entries: %v", err) - break - } - - // Only process directories that are partitions (format: NNNN-NNNN) - if partitionResp.Entry.IsDirectory { - // Parse partition range to get partition start ID (e.g., "0315-0630" -> 315) - var partitionStart, partitionStop int32 - if n, err := fmt.Sscanf(partitionResp.Entry.Name, "%04d-%04d", &partitionStart, &partitionStop); n != 2 || err != nil { - // Skip directories that don't match the partition format - continue - } - - partitionDir := filepath.Join(versionDir, partitionResp.Entry.Name) - - // List all .offset files in this partition directory - offsetStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: partitionDir, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - glog.Warningf("Failed to list partition directory %s: %v", partitionDir, err) - continue - } - - // Process each offset file - for { - offsetResp, err := offsetStream.Recv() - if err != nil { - if err == io.EOF { - break - } - glog.Warningf("Failed to receive offset entries: %v", err) - break - } - - // Only process .offset files - if !offsetResp.Entry.IsDirectory && strings.HasSuffix(offsetResp.Entry.Name, ".offset") { - consumerGroup := strings.TrimSuffix(offsetResp.Entry.Name, ".offset") - - // Read the offset value from the file - offsetData, err := filer.ReadInsideFiler(client, partitionDir, offsetResp.Entry.Name) - if err != nil { - glog.Warningf("Failed to read offset file %s: %v", offsetResp.Entry.Name, err) - continue - } - - if len(offsetData) == 8 { - offset := int64(util.BytesToUint64(offsetData)) - - // Get the file modification time - lastUpdated := time.Unix(offsetResp.Entry.Attributes.Mtime, 0) - - offsets = append(offsets, ConsumerGroupOffsetInfo{ - ConsumerGroup: consumerGroup, - PartitionID: partitionStart, // Use partition start as the ID - Offset: offset, - LastUpdated: lastUpdated, - }) - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to get consumer group offsets: %w", err) - } - - return offsets, nil -} - -// convertRecordTypeToSchemaFields converts a protobuf RecordType to SchemaFieldInfo slice -func convertRecordTypeToSchemaFields(recordType *schema_pb.RecordType) []SchemaFieldInfo { - var schemaFields []SchemaFieldInfo - - if recordType == nil || recordType.Fields == nil { - return schemaFields - } - - for _, field := range recordType.Fields { - schemaField := SchemaFieldInfo{ - Name: field.Name, - Type: getFieldTypeString(field.Type), - Required: field.IsRequired, - } - schemaFields = append(schemaFields, schemaField) - } - - return schemaFields -} - -// getFieldTypeString converts a protobuf Type to a human-readable string -func getFieldTypeString(fieldType *schema_pb.Type) string { - if fieldType == nil { - return "unknown" - } - - switch kind := fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - return getScalarTypeString(kind.ScalarType) - case *schema_pb.Type_RecordType: - return "record" - case *schema_pb.Type_ListType: - elementType := getFieldTypeString(kind.ListType.ElementType) - return fmt.Sprintf("list<%s>", elementType) - default: - return "unknown" - } -} - -// getScalarTypeString converts a protobuf ScalarType to a string -func getScalarTypeString(scalarType schema_pb.ScalarType) string { - switch scalarType { - case schema_pb.ScalarType_BOOL: - return "bool" - case schema_pb.ScalarType_INT32: - return "int32" - case schema_pb.ScalarType_INT64: - return "int64" - case schema_pb.ScalarType_FLOAT: - return "float" - case schema_pb.ScalarType_DOUBLE: - return "double" - case schema_pb.ScalarType_BYTES: - return "bytes" - case schema_pb.ScalarType_STRING: - return "string" - default: - return "unknown" - } -} - -// convertTopicPublishers converts protobuf TopicPublisher slice to PublisherInfo slice -func convertTopicPublishers(publishers []*mq_pb.TopicPublisher) []PublisherInfo { - publisherInfos := make([]PublisherInfo, 0, len(publishers)) - - for _, publisher := range publishers { - publisherInfo := PublisherInfo{ - PublisherName: publisher.PublisherName, - ClientID: publisher.ClientId, - PartitionID: publisher.Partition.RangeStart, - Broker: publisher.Broker, - IsActive: publisher.IsActive, - LastPublishedOffset: publisher.LastPublishedOffset, - LastAckedOffset: publisher.LastAckedOffset, - } - - // Convert timestamps - if publisher.ConnectTimeNs > 0 { - publisherInfo.ConnectTime = time.Unix(0, publisher.ConnectTimeNs) - } - if publisher.LastSeenTimeNs > 0 { - publisherInfo.LastSeenTime = time.Unix(0, publisher.LastSeenTimeNs) - } - - publisherInfos = append(publisherInfos, publisherInfo) - } - - return publisherInfos -} - -// convertTopicSubscribers converts protobuf TopicSubscriber slice to TopicSubscriberInfo slice -func convertTopicSubscribers(subscribers []*mq_pb.TopicSubscriber) []TopicSubscriberInfo { - subscriberInfos := make([]TopicSubscriberInfo, 0, len(subscribers)) - - for _, subscriber := range subscribers { - subscriberInfo := TopicSubscriberInfo{ - ConsumerGroup: subscriber.ConsumerGroup, - ConsumerID: subscriber.ConsumerId, - ClientID: subscriber.ClientId, - PartitionID: subscriber.Partition.RangeStart, - Broker: subscriber.Broker, - IsActive: subscriber.IsActive, - CurrentOffset: subscriber.CurrentOffset, - LastReceivedOffset: subscriber.LastReceivedOffset, - } - - // Convert timestamps - if subscriber.ConnectTimeNs > 0 { - subscriberInfo.ConnectTime = time.Unix(0, subscriber.ConnectTimeNs) - } - if subscriber.LastSeenTimeNs > 0 { - subscriberInfo.LastSeenTime = time.Unix(0, subscriber.LastSeenTimeNs) - } - - subscriberInfos = append(subscriberInfos, subscriberInfo) - } - - return subscriberInfos -} - -// findBrokerLeader finds the current broker leader -func (s *AdminServer) findBrokerLeader() (string, error) { - // First, try to find any broker from the cluster - var brokers []string - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.BrokerType, - }) - if err != nil { - return err - } - - for _, node := range resp.ClusterNodes { - brokers = append(brokers, node.Address) - } - - return nil - }) - - if err != nil { - return "", fmt.Errorf("failed to list brokers: %w", err) - } - - if len(brokers) == 0 { - return "", fmt.Errorf("no brokers found in cluster") - } - - // Try each broker to find the leader - for _, brokerAddr := range brokers { - err := s.withBrokerClient(brokerAddr, func(client mq_pb.SeaweedMessagingClient) error { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - // Try to find broker leader - _, err := client.FindBrokerLeader(ctx, &mq_pb.FindBrokerLeaderRequest{ - FilerGroup: "", - }) - if err == nil { - return nil // This broker is the leader - } - return err - }) - if err == nil { - return brokerAddr, nil - } - } - - return "", fmt.Errorf("no broker leader found") -} - -// withBrokerClient connects to a message queue broker and executes a function -func (s *AdminServer) withBrokerClient(brokerAddress string, fn func(client mq_pb.SeaweedMessagingClient) error) error { - return pb.WithBrokerGrpcClient(false, brokerAddress, s.grpcDialOption, fn) -} - -// convertTopicRetention converts protobuf retention to TopicRetentionInfo -func convertTopicRetention(retention *mq_pb.TopicRetention) TopicRetentionInfo { - if retention == nil || !retention.Enabled { - return TopicRetentionInfo{ - Enabled: false, - RetentionSeconds: 0, - DisplayValue: 0, - DisplayUnit: "days", - } - } - - // Convert seconds to human-readable format - seconds := retention.RetentionSeconds - var displayValue int32 - var displayUnit string - - if seconds >= 86400 { // >= 1 day - displayValue = int32(seconds / 86400) - displayUnit = "days" - } else if seconds >= 3600 { // >= 1 hour - displayValue = int32(seconds / 3600) - displayUnit = "hours" - } else { - displayValue = int32(seconds) - displayUnit = "seconds" - } - - return TopicRetentionInfo{ - Enabled: retention.Enabled, - RetentionSeconds: seconds, - DisplayValue: displayValue, - DisplayUnit: displayUnit, - } -} - -// getFieldTypeName converts a schema_pb.Type to a human-readable type name -func getFieldTypeName(fieldType *schema_pb.Type) string { - if fieldType.Kind == nil { - return "UNKNOWN" - } - - switch kind := fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch kind.ScalarType { - case schema_pb.ScalarType_BOOL: - return "BOOLEAN" - case schema_pb.ScalarType_INT32: - return "INT32" - case schema_pb.ScalarType_INT64: - return "INT64" - case schema_pb.ScalarType_FLOAT: - return "FLOAT" - case schema_pb.ScalarType_DOUBLE: - return "DOUBLE" - case schema_pb.ScalarType_BYTES: - return "BYTES" - case schema_pb.ScalarType_STRING: - return "STRING" - case schema_pb.ScalarType_TIMESTAMP: - return "TIMESTAMP" - case schema_pb.ScalarType_DATE: - return "DATE" - case schema_pb.ScalarType_TIME: - return "TIME" - case schema_pb.ScalarType_DECIMAL: - return "DECIMAL" - default: - return "SCALAR" - } - case *schema_pb.Type_ListType: - return "LIST" - case *schema_pb.Type_RecordType: - return "RECORD" - default: - return "UNKNOWN" - } -} diff --git a/weed/admin/dash/policies_management.go b/weed/admin/dash/policies_management.go deleted file mode 100644 index 5757520b2..000000000 --- a/weed/admin/dash/policies_management.go +++ /dev/null @@ -1,226 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" -) - -type IAMPolicy struct { - Name string `json:"name"` - Document policy_engine.PolicyDocument `json:"document"` - DocumentJSON string `json:"document_json"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -type PoliciesCollection struct { - Policies map[string]policy_engine.PolicyDocument `json:"policies"` -} - -type PoliciesData struct { - Username string `json:"username"` - Policies []IAMPolicy `json:"policies"` - TotalPolicies int `json:"total_policies"` - LastUpdated time.Time `json:"last_updated"` -} - -// Policy management request structures -type CreatePolicyRequest struct { - Name string `json:"name" binding:"required"` - Document policy_engine.PolicyDocument `json:"document" binding:"required"` - DocumentJSON string `json:"document_json"` -} - -type UpdatePolicyRequest struct { - Document policy_engine.PolicyDocument `json:"document" binding:"required"` - DocumentJSON string `json:"document_json"` -} - -// PolicyManager interface is now in the credential package - -// CredentialStorePolicyManager implements credential.PolicyManager by delegating to the credential store -type CredentialStorePolicyManager struct { - credentialManager *credential.CredentialManager -} - -// NewCredentialStorePolicyManager creates a new CredentialStorePolicyManager -func NewCredentialStorePolicyManager(credentialManager *credential.CredentialManager) *CredentialStorePolicyManager { - return &CredentialStorePolicyManager{ - credentialManager: credentialManager, - } -} - -// GetPolicies retrieves all IAM policies via credential store -func (cspm *CredentialStorePolicyManager) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) { - // Get policies from credential store - // We'll use the credential store to access the filer indirectly - // Since policies are stored separately, we need to access the underlying store - store := cspm.credentialManager.GetStore() - glog.V(1).Infof("Getting policies from credential store: %T", store) - - // Check if the store supports policy management - if policyStore, ok := store.(credential.PolicyManager); ok { - glog.V(1).Infof("Store supports policy management, calling GetPolicies") - policies, err := policyStore.GetPolicies(ctx) - if err != nil { - glog.Errorf("Error getting policies from store: %v", err) - return nil, err - } - glog.V(1).Infof("Got %d policies from store", len(policies)) - return policies, nil - } else { - // Fallback: use empty policies for stores that don't support policies - glog.V(1).Infof("Credential store doesn't support policy management, returning empty policies") - return make(map[string]policy_engine.PolicyDocument), nil - } -} - -// CreatePolicy creates a new IAM policy via credential store -func (cspm *CredentialStorePolicyManager) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - store := cspm.credentialManager.GetStore() - - if policyStore, ok := store.(credential.PolicyManager); ok { - return policyStore.CreatePolicy(ctx, name, document) - } - - return fmt.Errorf("credential store doesn't support policy creation") -} - -// UpdatePolicy updates an existing IAM policy via credential store -func (cspm *CredentialStorePolicyManager) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - store := cspm.credentialManager.GetStore() - - if policyStore, ok := store.(credential.PolicyManager); ok { - return policyStore.UpdatePolicy(ctx, name, document) - } - - return fmt.Errorf("credential store doesn't support policy updates") -} - -// DeletePolicy deletes an IAM policy via credential store -func (cspm *CredentialStorePolicyManager) DeletePolicy(ctx context.Context, name string) error { - store := cspm.credentialManager.GetStore() - - if policyStore, ok := store.(credential.PolicyManager); ok { - return policyStore.DeletePolicy(ctx, name) - } - - return fmt.Errorf("credential store doesn't support policy deletion") -} - -// GetPolicy retrieves a specific IAM policy via credential store -func (cspm *CredentialStorePolicyManager) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) { - store := cspm.credentialManager.GetStore() - - if policyStore, ok := store.(credential.PolicyManager); ok { - return policyStore.GetPolicy(ctx, name) - } - - return nil, fmt.Errorf("credential store doesn't support policy retrieval") -} - -// AdminServer policy management methods using credential.PolicyManager -func (s *AdminServer) GetPolicyManager() credential.PolicyManager { - if s.credentialManager == nil { - glog.V(1).Infof("Credential manager is nil, policy management not available") - return nil - } - glog.V(1).Infof("Credential manager available, creating CredentialStorePolicyManager") - return NewCredentialStorePolicyManager(s.credentialManager) -} - -// GetPolicies retrieves all IAM policies -func (s *AdminServer) GetPolicies() ([]IAMPolicy, error) { - policyManager := s.GetPolicyManager() - if policyManager == nil { - return nil, fmt.Errorf("policy manager not available") - } - - ctx := context.Background() - policyMap, err := policyManager.GetPolicies(ctx) - if err != nil { - return nil, err - } - - // Convert map[string]PolicyDocument to []IAMPolicy - var policies []IAMPolicy - for name, doc := range policyMap { - policy := IAMPolicy{ - Name: name, - Document: doc, - DocumentJSON: "", // Will be populated if needed - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - } - policies = append(policies, policy) - } - - return policies, nil -} - -// CreatePolicy creates a new IAM policy -func (s *AdminServer) CreatePolicy(name string, document policy_engine.PolicyDocument) error { - policyManager := s.GetPolicyManager() - if policyManager == nil { - return fmt.Errorf("policy manager not available") - } - - ctx := context.Background() - return policyManager.CreatePolicy(ctx, name, document) -} - -// UpdatePolicy updates an existing IAM policy -func (s *AdminServer) UpdatePolicy(name string, document policy_engine.PolicyDocument) error { - policyManager := s.GetPolicyManager() - if policyManager == nil { - return fmt.Errorf("policy manager not available") - } - - ctx := context.Background() - return policyManager.UpdatePolicy(ctx, name, document) -} - -// DeletePolicy deletes an IAM policy -func (s *AdminServer) DeletePolicy(name string) error { - policyManager := s.GetPolicyManager() - if policyManager == nil { - return fmt.Errorf("policy manager not available") - } - - ctx := context.Background() - return policyManager.DeletePolicy(ctx, name) -} - -// GetPolicy retrieves a specific IAM policy -func (s *AdminServer) GetPolicy(name string) (*IAMPolicy, error) { - policyManager := s.GetPolicyManager() - if policyManager == nil { - return nil, fmt.Errorf("policy manager not available") - } - - ctx := context.Background() - policyDoc, err := policyManager.GetPolicy(ctx, name) - if err != nil { - return nil, err - } - - if policyDoc == nil { - return nil, nil - } - - // Convert PolicyDocument to IAMPolicy - policy := &IAMPolicy{ - Name: name, - Document: *policyDoc, - DocumentJSON: "", // Will be populated if needed - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - } - - return policy, nil -} diff --git a/weed/admin/dash/topic_retention.go b/weed/admin/dash/topic_retention.go deleted file mode 100644 index fed4893a4..000000000 --- a/weed/admin/dash/topic_retention.go +++ /dev/null @@ -1,296 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "io" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -// TopicRetentionPurger handles topic data purging based on retention policies -type TopicRetentionPurger struct { - adminServer *AdminServer -} - -// NewTopicRetentionPurger creates a new topic retention purger -func NewTopicRetentionPurger(adminServer *AdminServer) *TopicRetentionPurger { - return &TopicRetentionPurger{ - adminServer: adminServer, - } -} - -// PurgeExpiredTopicData purges expired topic data based on retention policies -func (p *TopicRetentionPurger) PurgeExpiredTopicData() error { - glog.V(1).Infof("Starting topic data purge based on retention policies") - - // Get all topics with retention enabled - topics, err := p.getTopicsWithRetention() - if err != nil { - return fmt.Errorf("failed to get topics with retention: %w", err) - } - - glog.V(1).Infof("Found %d topics with retention enabled", len(topics)) - - // Process each topic - for _, topicRetention := range topics { - err := p.purgeTopicData(topicRetention) - if err != nil { - glog.Errorf("Failed to purge data for topic %s: %v", topicRetention.TopicName, err) - continue - } - } - - glog.V(1).Infof("Completed topic data purge") - return nil -} - -// TopicRetentionConfig represents a topic with its retention configuration -type TopicRetentionConfig struct { - TopicName string - Namespace string - Name string - RetentionSeconds int64 -} - -// getTopicsWithRetention retrieves all topics that have retention enabled -func (p *TopicRetentionPurger) getTopicsWithRetention() ([]TopicRetentionConfig, error) { - var topicsWithRetention []TopicRetentionConfig - - // Find broker leader to get topics - brokerLeader, err := p.adminServer.findBrokerLeader() - if err != nil { - return nil, fmt.Errorf("failed to find broker leader: %w", err) - } - - // Get all topics from the broker - err = p.adminServer.withBrokerClient(brokerLeader, func(client mq_pb.SeaweedMessagingClient) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.ListTopics(ctx, &mq_pb.ListTopicsRequest{}) - if err != nil { - return err - } - - // Check each topic for retention configuration - for _, pbTopic := range resp.Topics { - configResp, err := client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{ - Topic: pbTopic, - }) - if err != nil { - glog.Warningf("Failed to get configuration for topic %s.%s: %v", pbTopic.Namespace, pbTopic.Name, err) - continue - } - - // Check if retention is enabled - if configResp.Retention != nil && configResp.Retention.Enabled && configResp.Retention.RetentionSeconds > 0 { - topicRetention := TopicRetentionConfig{ - TopicName: fmt.Sprintf("%s.%s", pbTopic.Namespace, pbTopic.Name), - Namespace: pbTopic.Namespace, - Name: pbTopic.Name, - RetentionSeconds: configResp.Retention.RetentionSeconds, - } - topicsWithRetention = append(topicsWithRetention, topicRetention) - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - return topicsWithRetention, nil -} - -// purgeTopicData purges expired data for a specific topic -func (p *TopicRetentionPurger) purgeTopicData(topicRetention TopicRetentionConfig) error { - glog.V(1).Infof("Purging expired data for topic %s with retention %d seconds", topicRetention.TopicName, topicRetention.RetentionSeconds) - - // Calculate cutoff time - cutoffTime := time.Now().Add(-time.Duration(topicRetention.RetentionSeconds) * time.Second) - - // Get topic directory - topicObj := topic.NewTopic(topicRetention.Namespace, topicRetention.Name) - topicDir := topicObj.Dir() - - var purgedDirs []string - - err := p.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - // List all version directories under the topic directory - versionStream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: topicDir, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - return fmt.Errorf("failed to list topic directory %s: %v", topicDir, err) - } - - var versionDirs []VersionDirInfo - - // Collect all version directories - for { - versionResp, err := versionStream.Recv() - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("failed to receive version entries: %w", err) - } - - // Only process directories that are versions (start with "v") - if versionResp.Entry.IsDirectory && strings.HasPrefix(versionResp.Entry.Name, "v") { - versionTime, err := p.parseVersionTime(versionResp.Entry.Name) - if err != nil { - glog.Warningf("Failed to parse version time from %s: %v", versionResp.Entry.Name, err) - continue - } - - versionDirs = append(versionDirs, VersionDirInfo{ - Name: versionResp.Entry.Name, - VersionTime: versionTime, - ModTime: time.Unix(versionResp.Entry.Attributes.Mtime, 0), - }) - } - } - - // Sort version directories by time (oldest first) - sort.Slice(versionDirs, func(i, j int) bool { - return versionDirs[i].VersionTime.Before(versionDirs[j].VersionTime) - }) - - // Keep at least the most recent version directory, even if it's expired - if len(versionDirs) <= 1 { - glog.V(1).Infof("Topic %s has %d version directories, keeping all", topicRetention.TopicName, len(versionDirs)) - return nil - } - - // Purge expired directories (keep the most recent one) - for i := 0; i < len(versionDirs)-1; i++ { - versionDir := versionDirs[i] - - // Check if this version directory is expired - if versionDir.VersionTime.Before(cutoffTime) { - dirPath := filepath.Join(topicDir, versionDir.Name) - - // Delete the entire version directory - err := p.deleteDirectoryRecursively(client, dirPath) - if err != nil { - glog.Errorf("Failed to delete expired directory %s: %v", dirPath, err) - } else { - purgedDirs = append(purgedDirs, dirPath) - glog.V(1).Infof("Purged expired directory: %s (created: %s)", dirPath, versionDir.VersionTime.Format("2006-01-02 15:04:05")) - } - } - } - - return nil - }) - - if err != nil { - return err - } - - if len(purgedDirs) > 0 { - glog.V(0).Infof("Purged %d expired directories for topic %s", len(purgedDirs), topicRetention.TopicName) - } - - return nil -} - -// VersionDirInfo represents a version directory with its timestamp -type VersionDirInfo struct { - Name string - VersionTime time.Time - ModTime time.Time -} - -// parseVersionTime parses the version directory name to extract the timestamp -// Version format: v2025-01-10-05-44-34 -func (p *TopicRetentionPurger) parseVersionTime(versionName string) (time.Time, error) { - // Remove the 'v' prefix - if !strings.HasPrefix(versionName, "v") { - return time.Time{}, fmt.Errorf("invalid version format: %s", versionName) - } - - timeStr := versionName[1:] // Remove 'v' - - // Parse the time format: 2025-01-10-05-44-34 - versionTime, err := time.Parse("2006-01-02-15-04-05", timeStr) - if err != nil { - return time.Time{}, fmt.Errorf("failed to parse version time %s: %v", timeStr, err) - } - - return versionTime, nil -} - -// deleteDirectoryRecursively deletes a directory and all its contents -func (p *TopicRetentionPurger) deleteDirectoryRecursively(client filer_pb.SeaweedFilerClient, dirPath string) error { - // List all entries in the directory - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: dirPath, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, - }) - if err != nil { - return fmt.Errorf("failed to list directory %s: %v", dirPath, err) - } - - // Delete all entries - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("failed to receive entries: %w", err) - } - - entryPath := filepath.Join(dirPath, resp.Entry.Name) - - if resp.Entry.IsDirectory { - // Recursively delete subdirectory - err = p.deleteDirectoryRecursively(client, entryPath) - if err != nil { - return fmt.Errorf("failed to delete subdirectory %s: %v", entryPath, err) - } - } else { - // Delete file - _, err = client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: dirPath, - Name: resp.Entry.Name, - }) - if err != nil { - return fmt.Errorf("failed to delete file %s: %v", entryPath, err) - } - } - } - - // Delete the directory itself - parentDir := filepath.Dir(dirPath) - dirName := filepath.Base(dirPath) - - _, err = client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: parentDir, - Name: dirName, - }) - if err != nil { - return fmt.Errorf("failed to delete directory %s: %v", dirPath, err) - } - - return nil -} diff --git a/weed/admin/dash/types.go b/weed/admin/dash/types.go deleted file mode 100644 index 8b793bdbd..000000000 --- a/weed/admin/dash/types.go +++ /dev/null @@ -1,546 +0,0 @@ -package dash - -import ( - "time" - - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// Core cluster topology structures -type ClusterTopology struct { - Masters []MasterNode `json:"masters"` - DataCenters []DataCenter `json:"datacenters"` - VolumeServers []VolumeServer `json:"volume_servers"` - TotalVolumes int `json:"total_volumes"` - TotalFiles int64 `json:"total_files"` - TotalSize int64 `json:"total_size"` - UpdatedAt time.Time `json:"updated_at"` -} - -type MasterNode struct { - Address string `json:"address"` - IsLeader bool `json:"is_leader"` -} - -type DataCenter struct { - ID string `json:"id"` - Racks []Rack `json:"racks"` -} - -type Rack struct { - ID string `json:"id"` - Nodes []VolumeServer `json:"nodes"` -} - -type VolumeServer struct { - ID string `json:"id"` - Address string `json:"address"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - PublicURL string `json:"public_url"` - Volumes int `json:"volumes"` - MaxVolumes int `json:"max_volumes"` - DiskUsage int64 `json:"disk_usage"` - DiskCapacity int64 `json:"disk_capacity"` - LastHeartbeat time.Time `json:"last_heartbeat"` - - // EC shard information - EcVolumes int `json:"ec_volumes"` // Number of EC volumes this server has shards for - EcShards int `json:"ec_shards"` // Total number of EC shards on this server - EcShardDetails []VolumeServerEcInfo `json:"ec_shard_details"` // Detailed EC shard information -} - -// VolumeServerEcInfo represents EC shard information for a specific volume on a server -type VolumeServerEcInfo struct { - VolumeID uint32 `json:"volume_id"` - Collection string `json:"collection"` - ShardCount int `json:"shard_count"` // Number of shards this server has for this volume - EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has - ShardNumbers []int `json:"shard_numbers"` // List of actual shard numbers this server has - ShardSizes map[int]int64 `json:"shard_sizes"` // Map from shard number to size in bytes - TotalSize int64 `json:"total_size"` // Total size of all shards on this server for this volume -} - -// S3 Bucket management structures -type S3Bucket struct { - Name string `json:"name"` - CreatedAt time.Time `json:"created_at"` - Size int64 `json:"size"` - ObjectCount int64 `json:"object_count"` - LastModified time.Time `json:"last_modified"` - Quota int64 `json:"quota"` // Quota in bytes, 0 means no quota - QuotaEnabled bool `json:"quota_enabled"` // Whether quota is enabled - VersioningEnabled bool `json:"versioning_enabled"` // Whether versioning is enabled - ObjectLockEnabled bool `json:"object_lock_enabled"` // Whether object lock is enabled - ObjectLockMode string `json:"object_lock_mode"` // Object lock mode: "GOVERNANCE" or "COMPLIANCE" - ObjectLockDuration int32 `json:"object_lock_duration"` // Default retention duration in days -} - -type S3Object struct { - Key string `json:"key"` - Size int64 `json:"size"` - LastModified time.Time `json:"last_modified"` - ETag string `json:"etag"` - StorageClass string `json:"storage_class"` -} - -type BucketDetails struct { - Bucket S3Bucket `json:"bucket"` - Objects []S3Object `json:"objects"` - TotalSize int64 `json:"total_size"` - TotalCount int64 `json:"total_count"` - UpdatedAt time.Time `json:"updated_at"` -} - -// ObjectStoreUser is defined in admin_data.go - -// Volume management structures -type VolumeWithTopology struct { - *master_pb.VolumeInformationMessage - Server string `json:"server"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` -} - -type ClusterVolumesData struct { - Username string `json:"username"` - Volumes []VolumeWithTopology `json:"volumes"` - TotalVolumes int `json:"total_volumes"` - TotalSize int64 `json:"total_size"` - VolumeSizeLimit uint64 `json:"volume_size_limit"` - LastUpdated time.Time `json:"last_updated"` - - // Pagination - CurrentPage int `json:"current_page"` - TotalPages int `json:"total_pages"` - PageSize int `json:"page_size"` - - // Sorting - SortBy string `json:"sort_by"` - SortOrder string `json:"sort_order"` - - // Statistics - DataCenterCount int `json:"datacenter_count"` - RackCount int `json:"rack_count"` - DiskTypeCount int `json:"disk_type_count"` - CollectionCount int `json:"collection_count"` - VersionCount int `json:"version_count"` - - // Conditional display flags - ShowDataCenterColumn bool `json:"show_datacenter_column"` - ShowRackColumn bool `json:"show_rack_column"` - ShowDiskTypeColumn bool `json:"show_disk_type_column"` - ShowCollectionColumn bool `json:"show_collection_column"` - ShowVersionColumn bool `json:"show_version_column"` - - // Single values when only one exists - SingleDataCenter string `json:"single_datacenter"` - SingleRack string `json:"single_rack"` - SingleDiskType string `json:"single_disk_type"` - SingleCollection string `json:"single_collection"` - SingleVersion string `json:"single_version"` - - // All versions when multiple exist - AllVersions []string `json:"all_versions"` - - // All disk types when multiple exist - AllDiskTypes []string `json:"all_disk_types"` - - // Filtering - FilterCollection string `json:"filter_collection"` -} - -// ClusterEcShardsData represents the data for the cluster EC shards page -type ClusterEcShardsData struct { - Username string `json:"username"` - EcShards []EcShardWithInfo `json:"ec_shards"` - TotalShards int `json:"total_shards"` - TotalVolumes int `json:"total_volumes"` - LastUpdated time.Time `json:"last_updated"` - - // Pagination - CurrentPage int `json:"current_page"` - TotalPages int `json:"total_pages"` - PageSize int `json:"page_size"` - - // Sorting - SortBy string `json:"sort_by"` - SortOrder string `json:"sort_order"` - - // Statistics - DataCenterCount int `json:"datacenter_count"` - RackCount int `json:"rack_count"` - CollectionCount int `json:"collection_count"` - - // Conditional display flags - ShowDataCenterColumn bool `json:"show_datacenter_column"` - ShowRackColumn bool `json:"show_rack_column"` - ShowCollectionColumn bool `json:"show_collection_column"` - - // Single values when only one exists - SingleDataCenter string `json:"single_datacenter"` - SingleRack string `json:"single_rack"` - SingleCollection string `json:"single_collection"` - - // Filtering - FilterCollection string `json:"filter_collection"` - - // EC specific statistics - ShardsPerVolume map[uint32]int `json:"shards_per_volume"` // VolumeID -> shard count - VolumesWithAllShards int `json:"volumes_with_all_shards"` // Volumes with all 14 shards - VolumesWithMissingShards int `json:"volumes_with_missing_shards"` // Volumes missing shards -} - -// EcShardWithInfo represents an EC shard with its topology information -type EcShardWithInfo struct { - VolumeID uint32 `json:"volume_id"` - ShardID uint32 `json:"shard_id"` - Collection string `json:"collection"` - Size uint64 `json:"size"` - Server string `json:"server"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - DiskType string `json:"disk_type"` - ModifiedTime int64 `json:"modified_time"` - - // EC specific fields - EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has - ShardCount int `json:"shard_count"` // Number of shards this server has for this volume - IsComplete bool `json:"is_complete"` // True if this volume has all 14 shards - MissingShards []int `json:"missing_shards"` // List of missing shard IDs -} - -// EcVolumeDetailsData represents the data for the EC volume details page -type EcVolumeDetailsData struct { - Username string `json:"username"` - VolumeID uint32 `json:"volume_id"` - Collection string `json:"collection"` - Shards []EcShardWithInfo `json:"shards"` - TotalShards int `json:"total_shards"` - IsComplete bool `json:"is_complete"` - MissingShards []int `json:"missing_shards"` - DataCenters []string `json:"datacenters"` - Servers []string `json:"servers"` - LastUpdated time.Time `json:"last_updated"` - - // Sorting - SortBy string `json:"sort_by"` - SortOrder string `json:"sort_order"` -} - -type VolumeDetailsData struct { - Volume VolumeWithTopology `json:"volume"` - Replicas []VolumeWithTopology `json:"replicas"` - VolumeSizeLimit uint64 `json:"volume_size_limit"` - ReplicationCount int `json:"replication_count"` - LastUpdated time.Time `json:"last_updated"` -} - -// Collection management structures -type CollectionInfo struct { - Name string `json:"name"` - DataCenter string `json:"datacenter"` - VolumeCount int `json:"volume_count"` - EcVolumeCount int `json:"ec_volume_count"` - FileCount int64 `json:"file_count"` - TotalSize int64 `json:"total_size"` - DiskTypes []string `json:"disk_types"` -} - -type ClusterCollectionsData struct { - Username string `json:"username"` - Collections []CollectionInfo `json:"collections"` - TotalCollections int `json:"total_collections"` - TotalVolumes int `json:"total_volumes"` - TotalEcVolumes int `json:"total_ec_volumes"` - TotalFiles int64 `json:"total_files"` - TotalSize int64 `json:"total_size"` - LastUpdated time.Time `json:"last_updated"` -} - -// Master and Filer management structures -type MasterInfo struct { - Address string `json:"address"` - IsLeader bool `json:"is_leader"` - Suffrage string `json:"suffrage"` -} - -type ClusterMastersData struct { - Username string `json:"username"` - Masters []MasterInfo `json:"masters"` - TotalMasters int `json:"total_masters"` - LeaderCount int `json:"leader_count"` - LastUpdated time.Time `json:"last_updated"` -} - -type FilerInfo struct { - Address string `json:"address"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - Version string `json:"version"` - CreatedAt time.Time `json:"created_at"` -} - -type ClusterFilersData struct { - Username string `json:"username"` - Filers []FilerInfo `json:"filers"` - TotalFilers int `json:"total_filers"` - LastUpdated time.Time `json:"last_updated"` -} - -type MessageBrokerInfo struct { - Address string `json:"address"` - DataCenter string `json:"datacenter"` - Rack string `json:"rack"` - Version string `json:"version"` - CreatedAt time.Time `json:"created_at"` -} - -type ClusterBrokersData struct { - Username string `json:"username"` - Brokers []MessageBrokerInfo `json:"brokers"` - TotalBrokers int `json:"total_brokers"` - LastUpdated time.Time `json:"last_updated"` -} - -type TopicInfo struct { - Name string `json:"name"` - Partitions int `json:"partitions"` - Subscribers int `json:"subscribers"` - MessageCount int64 `json:"message_count"` - TotalSize int64 `json:"total_size"` - LastMessage time.Time `json:"last_message"` - CreatedAt time.Time `json:"created_at"` - Retention TopicRetentionInfo `json:"retention"` -} - -type TopicsData struct { - Username string `json:"username"` - Topics []TopicInfo `json:"topics"` - TotalTopics int `json:"total_topics"` - TotalMessages int64 `json:"total_messages"` - TotalSize int64 `json:"total_size"` - LastUpdated time.Time `json:"last_updated"` -} - -type SubscriberInfo struct { - Name string `json:"name"` - Topic string `json:"topic"` - ConsumerGroup string `json:"consumer_group"` - Status string `json:"status"` - LastSeen time.Time `json:"last_seen"` - MessageCount int64 `json:"message_count"` - CreatedAt time.Time `json:"created_at"` -} - -type SubscribersData struct { - Username string `json:"username"` - Subscribers []SubscriberInfo `json:"subscribers"` - TotalSubscribers int `json:"total_subscribers"` - ActiveSubscribers int `json:"active_subscribers"` - LastUpdated time.Time `json:"last_updated"` -} - -// Topic Details structures -type PartitionInfo struct { - ID int32 `json:"id"` - LeaderBroker string `json:"leader_broker"` - FollowerBroker string `json:"follower_broker"` - MessageCount int64 `json:"message_count"` - TotalSize int64 `json:"total_size"` - LastDataTime time.Time `json:"last_data_time"` - CreatedAt time.Time `json:"created_at"` -} - -type SchemaFieldInfo struct { - Name string `json:"name"` - Type string `json:"type"` - Required bool `json:"required"` -} - -type PublisherInfo struct { - PublisherName string `json:"publisher_name"` - ClientID string `json:"client_id"` - PartitionID int32 `json:"partition_id"` - Broker string `json:"broker"` - ConnectTime time.Time `json:"connect_time"` - LastSeenTime time.Time `json:"last_seen_time"` - IsActive bool `json:"is_active"` - LastPublishedOffset int64 `json:"last_published_offset"` - LastAckedOffset int64 `json:"last_acked_offset"` -} - -type TopicSubscriberInfo struct { - ConsumerGroup string `json:"consumer_group"` - ConsumerID string `json:"consumer_id"` - ClientID string `json:"client_id"` - PartitionID int32 `json:"partition_id"` - Broker string `json:"broker"` - ConnectTime time.Time `json:"connect_time"` - LastSeenTime time.Time `json:"last_seen_time"` - IsActive bool `json:"is_active"` - CurrentOffset int64 `json:"current_offset"` // last acknowledged offset - LastReceivedOffset int64 `json:"last_received_offset"` // last received offset -} - -type ConsumerGroupOffsetInfo struct { - ConsumerGroup string `json:"consumer_group"` - PartitionID int32 `json:"partition_id"` - Offset int64 `json:"offset"` - LastUpdated time.Time `json:"last_updated"` -} - -type TopicRetentionInfo struct { - Enabled bool `json:"enabled"` - RetentionSeconds int64 `json:"retention_seconds"` - DisplayValue int32 `json:"display_value"` // for UI rendering - DisplayUnit string `json:"display_unit"` // for UI rendering -} - -type TopicDetailsData struct { - Username string `json:"username"` - TopicName string `json:"topic_name"` - Namespace string `json:"namespace"` - Name string `json:"name"` - Partitions []PartitionInfo `json:"partitions"` - KeySchema []SchemaFieldInfo `json:"key_schema"` // Schema fields for keys - ValueSchema []SchemaFieldInfo `json:"value_schema"` // Schema fields for values - Publishers []PublisherInfo `json:"publishers"` - Subscribers []TopicSubscriberInfo `json:"subscribers"` - ConsumerGroupOffsets []ConsumerGroupOffsetInfo `json:"consumer_group_offsets"` - Retention TopicRetentionInfo `json:"retention"` - MessageCount int64 `json:"message_count"` - TotalSize int64 `json:"total_size"` - CreatedAt time.Time `json:"created_at"` - LastUpdated time.Time `json:"last_updated"` -} - -// Volume server management structures -type ClusterVolumeServersData struct { - Username string `json:"username"` - VolumeServers []VolumeServer `json:"volume_servers"` - TotalVolumeServers int `json:"total_volume_servers"` - TotalVolumes int `json:"total_volumes"` - TotalCapacity int64 `json:"total_capacity"` - LastUpdated time.Time `json:"last_updated"` -} - -// Type aliases for maintenance package types to support existing code -type MaintenanceTask = maintenance.MaintenanceTask -type MaintenanceTaskType = maintenance.MaintenanceTaskType -type MaintenanceTaskStatus = maintenance.MaintenanceTaskStatus -type MaintenanceTaskPriority = maintenance.MaintenanceTaskPriority -type MaintenanceWorker = maintenance.MaintenanceWorker -type MaintenanceConfig = maintenance.MaintenanceConfig -type MaintenanceStats = maintenance.MaintenanceStats -type MaintenanceConfigData = maintenance.MaintenanceConfigData -type MaintenanceQueueData = maintenance.MaintenanceQueueData -type QueueStats = maintenance.QueueStats -type WorkerDetailsData = maintenance.WorkerDetailsData -type WorkerPerformance = maintenance.WorkerPerformance - -// GetTaskIcon returns the icon CSS class for a task type from its UI provider -func GetTaskIcon(taskType MaintenanceTaskType) string { - return maintenance.GetTaskIcon(taskType) -} - -// Status constants (these are still static) -const ( - TaskStatusPending = maintenance.TaskStatusPending - TaskStatusAssigned = maintenance.TaskStatusAssigned - TaskStatusInProgress = maintenance.TaskStatusInProgress - TaskStatusCompleted = maintenance.TaskStatusCompleted - TaskStatusFailed = maintenance.TaskStatusFailed - TaskStatusCancelled = maintenance.TaskStatusCancelled - - PriorityLow = maintenance.PriorityLow - PriorityNormal = maintenance.PriorityNormal - PriorityHigh = maintenance.PriorityHigh - PriorityCritical = maintenance.PriorityCritical -) - -// Helper functions from maintenance package -var DefaultMaintenanceConfig = maintenance.DefaultMaintenanceConfig - -// MaintenanceWorkersData represents the data for the maintenance workers page -type MaintenanceWorkersData struct { - Workers []*WorkerDetailsData `json:"workers"` - ActiveWorkers int `json:"active_workers"` - BusyWorkers int `json:"busy_workers"` - TotalLoad int `json:"total_load"` - LastUpdated time.Time `json:"last_updated"` -} - -// Maintenance system types are now in weed/admin/maintenance package - -// EcVolumeWithShards represents an EC volume with its shard distribution -type EcVolumeWithShards struct { - VolumeID uint32 `json:"volume_id"` - Collection string `json:"collection"` - TotalShards int `json:"total_shards"` - IsComplete bool `json:"is_complete"` - MissingShards []int `json:"missing_shards"` - ShardLocations map[int]string `json:"shard_locations"` // shardId -> server - ShardSizes map[int]int64 `json:"shard_sizes"` // shardId -> size in bytes - DataCenters []string `json:"data_centers"` - Servers []string `json:"servers"` - Racks []string `json:"racks"` - ModifiedTime int64 `json:"modified_time"` -} - -// ClusterEcVolumesData represents the response for clustered EC volumes view -type ClusterEcVolumesData struct { - EcVolumes []EcVolumeWithShards `json:"ec_volumes"` - TotalVolumes int `json:"total_volumes"` - LastUpdated time.Time `json:"last_updated"` - - // Pagination - Page int `json:"page"` - PageSize int `json:"page_size"` - TotalPages int `json:"total_pages"` - - // Sorting - SortBy string `json:"sort_by"` - SortOrder string `json:"sort_order"` - - // Filtering - Collection string `json:"collection"` - - // Conditional display flags - ShowDataCenterColumn bool `json:"show_datacenter_column"` - ShowRackColumn bool `json:"show_rack_column"` - ShowCollectionColumn bool `json:"show_collection_column"` - - // Statistics - CompleteVolumes int `json:"complete_volumes"` - IncompleteVolumes int `json:"incomplete_volumes"` - TotalShards int `json:"total_shards"` - - // User context - Username string `json:"username"` -} - -// Collection detail page structures -type CollectionDetailsData struct { - Username string `json:"username"` - CollectionName string `json:"collection_name"` - RegularVolumes []VolumeWithTopology `json:"regular_volumes"` - EcVolumes []EcVolumeWithShards `json:"ec_volumes"` - TotalVolumes int `json:"total_volumes"` - TotalEcVolumes int `json:"total_ec_volumes"` - TotalFiles int64 `json:"total_files"` - TotalSize int64 `json:"total_size"` - DataCenters []string `json:"data_centers"` - DiskTypes []string `json:"disk_types"` - LastUpdated time.Time `json:"last_updated"` - - // Pagination - Page int `json:"page"` - PageSize int `json:"page_size"` - TotalPages int `json:"total_pages"` - - // Sorting - SortBy string `json:"sort_by"` - SortOrder string `json:"sort_order"` -} diff --git a/weed/admin/dash/user_management.go b/weed/admin/dash/user_management.go deleted file mode 100644 index 747c398d7..000000000 --- a/weed/admin/dash/user_management.go +++ /dev/null @@ -1,338 +0,0 @@ -package dash - -import ( - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" -) - -// CreateObjectStoreUser creates a new user using the credential manager -func (s *AdminServer) CreateObjectStoreUser(req CreateUserRequest) (*ObjectStoreUser, error) { - if s.credentialManager == nil { - return nil, fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Create new identity - newIdentity := &iam_pb.Identity{ - Name: req.Username, - Actions: req.Actions, - } - - // Add account if email is provided - if req.Email != "" { - newIdentity.Account = &iam_pb.Account{ - Id: generateAccountId(), - DisplayName: req.Username, - EmailAddress: req.Email, - } - } - - // Generate access key if requested - var accessKey, secretKey string - if req.GenerateKey { - accessKey = generateAccessKey() - secretKey = generateSecretKey() - newIdentity.Credentials = []*iam_pb.Credential{ - { - AccessKey: accessKey, - SecretKey: secretKey, - }, - } - } - - // Create user using credential manager - err := s.credentialManager.CreateUser(ctx, newIdentity) - if err != nil { - if err == credential.ErrUserAlreadyExists { - return nil, fmt.Errorf("user %s already exists", req.Username) - } - return nil, fmt.Errorf("failed to create user: %w", err) - } - - // Return created user - user := &ObjectStoreUser{ - Username: req.Username, - Email: req.Email, - AccessKey: accessKey, - SecretKey: secretKey, - Permissions: req.Actions, - } - - return user, nil -} - -// UpdateObjectStoreUser updates an existing user -func (s *AdminServer) UpdateObjectStoreUser(username string, req UpdateUserRequest) (*ObjectStoreUser, error) { - if s.credentialManager == nil { - return nil, fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Get existing user - identity, err := s.credentialManager.GetUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return nil, fmt.Errorf("user %s not found", username) - } - return nil, fmt.Errorf("failed to get user: %w", err) - } - - // Create updated identity - updatedIdentity := &iam_pb.Identity{ - Name: identity.Name, - Account: identity.Account, - Credentials: identity.Credentials, - Actions: identity.Actions, - } - - // Update actions if provided - if len(req.Actions) > 0 { - updatedIdentity.Actions = req.Actions - } - - // Update email if provided - if req.Email != "" { - if updatedIdentity.Account == nil { - updatedIdentity.Account = &iam_pb.Account{ - Id: generateAccountId(), - DisplayName: username, - } - } - updatedIdentity.Account.EmailAddress = req.Email - } - - // Update user using credential manager - err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity) - if err != nil { - return nil, fmt.Errorf("failed to update user: %w", err) - } - - // Return updated user - user := &ObjectStoreUser{ - Username: username, - Email: req.Email, - Permissions: updatedIdentity.Actions, - } - - // Get first access key for display - if len(updatedIdentity.Credentials) > 0 { - user.AccessKey = updatedIdentity.Credentials[0].AccessKey - user.SecretKey = updatedIdentity.Credentials[0].SecretKey - } - - return user, nil -} - -// DeleteObjectStoreUser deletes a user using the credential manager -func (s *AdminServer) DeleteObjectStoreUser(username string) error { - if s.credentialManager == nil { - return fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Delete user using credential manager - err := s.credentialManager.DeleteUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return fmt.Errorf("user %s not found", username) - } - return fmt.Errorf("failed to delete user: %w", err) - } - - return nil -} - -// GetObjectStoreUserDetails returns detailed information about a user -func (s *AdminServer) GetObjectStoreUserDetails(username string) (*UserDetails, error) { - if s.credentialManager == nil { - return nil, fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Get user using credential manager - identity, err := s.credentialManager.GetUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return nil, fmt.Errorf("user %s not found", username) - } - return nil, fmt.Errorf("failed to get user: %w", err) - } - - details := &UserDetails{ - Username: username, - Actions: identity.Actions, - } - - // Set email from account if available - if identity.Account != nil { - details.Email = identity.Account.EmailAddress - } - - // Convert credentials to access key info - for _, cred := range identity.Credentials { - details.AccessKeys = append(details.AccessKeys, AccessKeyInfo{ - AccessKey: cred.AccessKey, - SecretKey: cred.SecretKey, - CreatedAt: time.Now().AddDate(0, -1, 0), // Mock creation date - }) - } - - return details, nil -} - -// CreateAccessKey creates a new access key for a user -func (s *AdminServer) CreateAccessKey(username string) (*AccessKeyInfo, error) { - if s.credentialManager == nil { - return nil, fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Check if user exists - _, err := s.credentialManager.GetUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return nil, fmt.Errorf("user %s not found", username) - } - return nil, fmt.Errorf("failed to get user: %w", err) - } - - // Generate new access key - accessKey := generateAccessKey() - secretKey := generateSecretKey() - - credential := &iam_pb.Credential{ - AccessKey: accessKey, - SecretKey: secretKey, - } - - // Create access key using credential manager - err = s.credentialManager.CreateAccessKey(ctx, username, credential) - if err != nil { - return nil, fmt.Errorf("failed to create access key: %w", err) - } - - return &AccessKeyInfo{ - AccessKey: accessKey, - SecretKey: secretKey, - CreatedAt: time.Now(), - }, nil -} - -// DeleteAccessKey deletes an access key for a user -func (s *AdminServer) DeleteAccessKey(username, accessKeyId string) error { - if s.credentialManager == nil { - return fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Delete access key using credential manager - err := s.credentialManager.DeleteAccessKey(ctx, username, accessKeyId) - if err != nil { - if err == credential.ErrUserNotFound { - return fmt.Errorf("user %s not found", username) - } - if err == credential.ErrAccessKeyNotFound { - return fmt.Errorf("access key %s not found for user %s", accessKeyId, username) - } - return fmt.Errorf("failed to delete access key: %w", err) - } - - return nil -} - -// GetUserPolicies returns the policies for a user (actions) -func (s *AdminServer) GetUserPolicies(username string) ([]string, error) { - if s.credentialManager == nil { - return nil, fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Get user using credential manager - identity, err := s.credentialManager.GetUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return nil, fmt.Errorf("user %s not found", username) - } - return nil, fmt.Errorf("failed to get user: %w", err) - } - - return identity.Actions, nil -} - -// UpdateUserPolicies updates the policies (actions) for a user -func (s *AdminServer) UpdateUserPolicies(username string, actions []string) error { - if s.credentialManager == nil { - return fmt.Errorf("credential manager not available") - } - - ctx := context.Background() - - // Get existing user - identity, err := s.credentialManager.GetUser(ctx, username) - if err != nil { - if err == credential.ErrUserNotFound { - return fmt.Errorf("user %s not found", username) - } - return fmt.Errorf("failed to get user: %w", err) - } - - // Create updated identity with new actions - updatedIdentity := &iam_pb.Identity{ - Name: identity.Name, - Account: identity.Account, - Credentials: identity.Credentials, - Actions: actions, - } - - // Update user using credential manager - err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity) - if err != nil { - return fmt.Errorf("failed to update user policies: %w", err) - } - - return nil -} - -// Helper functions for generating keys and IDs -func generateAccessKey() string { - // Generate 20-character access key (AWS standard) - const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - b := make([]byte, 20) - for i := range b { - b[i] = charset[randomInt(len(charset))] - } - return string(b) -} - -func generateSecretKey() string { - // Generate 40-character secret key (AWS standard) - b := make([]byte, 30) // 30 bytes = 40 characters in base64 - rand.Read(b) - return base64.StdEncoding.EncodeToString(b) -} - -func generateAccountId() string { - // Generate 12-digit account ID - b := make([]byte, 8) - rand.Read(b) - return fmt.Sprintf("%012d", b[0]<<24|b[1]<<16|b[2]<<8|b[3]) -} - -func randomInt(max int) int { - b := make([]byte, 1) - rand.Read(b) - return int(b[0]) % max -} diff --git a/weed/admin/dash/volume_management.go b/weed/admin/dash/volume_management.go deleted file mode 100644 index c0be958a9..000000000 --- a/weed/admin/dash/volume_management.go +++ /dev/null @@ -1,561 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "math" - "sort" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -// GetClusterVolumes retrieves cluster volumes data with pagination, sorting, and filtering -func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterVolumesData, error) { - // Set defaults - if page < 1 { - page = 1 - } - if pageSize < 1 || pageSize > 1000 { - pageSize = 100 - } - if sortBy == "" { - sortBy = "id" - } - if sortOrder == "" { - sortOrder = "asc" - } - var volumes []VolumeWithTopology - var totalSize int64 - var cachedTopologyInfo *master_pb.TopologyInfo - - // Get detailed volume information via gRPC - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - // Cache the topology info for reuse - cachedTopologyInfo = resp.TopologyInfo - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - // Process regular volumes - for _, volInfo := range diskInfo.VolumeInfos { - volume := VolumeWithTopology{ - VolumeInformationMessage: volInfo, - Server: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - } - volumes = append(volumes, volume) - totalSize += int64(volInfo.Size) - } - - // Process EC shards in the same loop - for _, ecShardInfo := range diskInfo.EcShardInfos { - // Add all shard sizes for this EC volume - for _, shardSize := range ecShardInfo.ShardSizes { - totalSize += shardSize - } - } - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Filter by collection if specified - if collection != "" { - var filteredVolumes []VolumeWithTopology - var filteredTotalSize int64 - var filteredEcTotalSize int64 - - for _, volume := range volumes { - if matchesCollection(volume.Collection, collection) { - filteredVolumes = append(filteredVolumes, volume) - filteredTotalSize += int64(volume.Size) - } - } - - // Filter EC shard sizes by collection using already processed data - // This reuses the topology traversal done above (lines 43-71) to avoid a second pass - if cachedTopologyInfo != nil { - for _, dc := range cachedTopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - for _, ecShardInfo := range diskInfo.EcShardInfos { - if matchesCollection(ecShardInfo.Collection, collection) { - // Add all shard sizes for this EC volume - for _, shardSize := range ecShardInfo.ShardSizes { - filteredEcTotalSize += shardSize - } - } - } - } - } - } - } - } - - volumes = filteredVolumes - totalSize = filteredTotalSize + filteredEcTotalSize - } - - // Calculate unique data center, rack, disk type, collection, and version counts from filtered volumes - dataCenterMap := make(map[string]bool) - rackMap := make(map[string]bool) - diskTypeMap := make(map[string]bool) - collectionMap := make(map[string]bool) - versionMap := make(map[string]bool) - for _, volume := range volumes { - if volume.DataCenter != "" { - dataCenterMap[volume.DataCenter] = true - } - if volume.Rack != "" { - rackMap[volume.Rack] = true - } - diskType := volume.DiskType - if diskType == "" { - diskType = "hdd" // Default to hdd if not specified - } - diskTypeMap[diskType] = true - - // Handle collection for display purposes - collectionName := volume.Collection - if collectionName == "" { - collectionName = "default" - } - collectionMap[collectionName] = true - - versionMap[fmt.Sprintf("%d", volume.Version)] = true - } - dataCenterCount := len(dataCenterMap) - rackCount := len(rackMap) - diskTypeCount := len(diskTypeMap) - collectionCount := len(collectionMap) - versionCount := len(versionMap) - - // Sort volumes - s.sortVolumes(volumes, sortBy, sortOrder) - - // Get volume size limit from master - var volumeSizeLimit uint64 - err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return err - } - volumeSizeLimit = uint64(resp.VolumeSizeLimitMB) * 1024 * 1024 // Convert MB to bytes - return nil - }) - if err != nil { - // If we can't get the limit, set a default - volumeSizeLimit = 30 * 1024 * 1024 * 1024 // 30GB default - } - - // Calculate pagination - totalVolumes := len(volumes) - totalPages := (totalVolumes + pageSize - 1) / pageSize - if totalPages == 0 { - totalPages = 1 - } - - // Apply pagination - startIndex := (page - 1) * pageSize - endIndex := startIndex + pageSize - if startIndex >= totalVolumes { - volumes = []VolumeWithTopology{} - } else { - if endIndex > totalVolumes { - endIndex = totalVolumes - } - volumes = volumes[startIndex:endIndex] - } - - // Determine conditional display flags and extract single values - showDataCenterColumn := dataCenterCount > 1 - showRackColumn := rackCount > 1 - showDiskTypeColumn := diskTypeCount > 1 - showCollectionColumn := collectionCount > 1 && collection == "" // Hide column when filtering by collection - showVersionColumn := versionCount > 1 - - var singleDataCenter, singleRack, singleDiskType, singleCollection, singleVersion string - var allVersions, allDiskTypes []string - - if dataCenterCount == 1 { - for dc := range dataCenterMap { - singleDataCenter = dc - break - } - } - if rackCount == 1 { - for rack := range rackMap { - singleRack = rack - break - } - } - if diskTypeCount == 1 { - for diskType := range diskTypeMap { - singleDiskType = diskType - break - } - } else { - // Collect all disk types and sort them - for diskType := range diskTypeMap { - allDiskTypes = append(allDiskTypes, diskType) - } - sort.Strings(allDiskTypes) - } - if collectionCount == 1 { - for collection := range collectionMap { - singleCollection = collection - break - } - } - if versionCount == 1 { - for version := range versionMap { - singleVersion = "v" + version - break - } - } else { - // Collect all versions and sort them - for version := range versionMap { - allVersions = append(allVersions, "v"+version) - } - sort.Strings(allVersions) - } - - return &ClusterVolumesData{ - Volumes: volumes, - TotalVolumes: totalVolumes, - TotalSize: totalSize, - VolumeSizeLimit: volumeSizeLimit, - LastUpdated: time.Now(), - CurrentPage: page, - TotalPages: totalPages, - PageSize: pageSize, - SortBy: sortBy, - SortOrder: sortOrder, - DataCenterCount: dataCenterCount, - RackCount: rackCount, - DiskTypeCount: diskTypeCount, - CollectionCount: collectionCount, - VersionCount: versionCount, - ShowDataCenterColumn: showDataCenterColumn, - ShowRackColumn: showRackColumn, - ShowDiskTypeColumn: showDiskTypeColumn, - ShowCollectionColumn: showCollectionColumn, - ShowVersionColumn: showVersionColumn, - SingleDataCenter: singleDataCenter, - SingleRack: singleRack, - SingleDiskType: singleDiskType, - SingleCollection: singleCollection, - SingleVersion: singleVersion, - AllVersions: allVersions, - AllDiskTypes: allDiskTypes, - FilterCollection: collection, - }, nil -} - -// sortVolumes sorts the volumes slice based on the specified field and order -func (s *AdminServer) sortVolumes(volumes []VolumeWithTopology, sortBy string, sortOrder string) { - sort.Slice(volumes, func(i, j int) bool { - var less bool - - switch sortBy { - case "id": - less = volumes[i].Id < volumes[j].Id - case "server": - less = volumes[i].Server < volumes[j].Server - case "datacenter": - less = volumes[i].DataCenter < volumes[j].DataCenter - case "rack": - less = volumes[i].Rack < volumes[j].Rack - case "collection": - less = volumes[i].Collection < volumes[j].Collection - case "size": - less = volumes[i].Size < volumes[j].Size - case "filecount": - less = volumes[i].FileCount < volumes[j].FileCount - case "replication": - less = volumes[i].ReplicaPlacement < volumes[j].ReplicaPlacement - case "disktype": - less = volumes[i].DiskType < volumes[j].DiskType - case "version": - less = volumes[i].Version < volumes[j].Version - default: - less = volumes[i].Id < volumes[j].Id - } - - if sortOrder == "desc" { - return !less - } - return less - }) -} - -// GetVolumeDetails retrieves detailed information about a specific volume -func (s *AdminServer) GetVolumeDetails(volumeID int, server string) (*VolumeDetailsData, error) { - var primaryVolume VolumeWithTopology - var replicas []VolumeWithTopology - var volumeSizeLimit uint64 - var found bool - - // Find the volume and all its replicas in the cluster - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo != nil { - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - for _, diskInfo := range node.DiskInfos { - for _, volInfo := range diskInfo.VolumeInfos { - if int(volInfo.Id) == volumeID { - diskType := volInfo.DiskType - if diskType == "" { - diskType = "hdd" - } - - volume := VolumeWithTopology{ - VolumeInformationMessage: volInfo, - Server: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - } - - // If this is the requested server, it's the primary volume - if node.Id == server { - primaryVolume = volume - found = true - } else { - // This is a replica on another server - replicas = append(replicas, volume) - } - } - } - } - } - } - } - } - return nil - }) - - if err != nil { - return nil, err - } - - if !found { - return nil, fmt.Errorf("volume %d not found on server %s", volumeID, server) - } - - // Get volume size limit from master - err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return err - } - volumeSizeLimit = uint64(resp.VolumeSizeLimitMB) * 1024 * 1024 // Convert MB to bytes - return nil - }) - - if err != nil { - // If we can't get the limit, set a default - volumeSizeLimit = 30 * 1024 * 1024 * 1024 // 30GB default - } - - return &VolumeDetailsData{ - Volume: primaryVolume, - Replicas: replicas, - VolumeSizeLimit: volumeSizeLimit, - ReplicationCount: len(replicas) + 1, // Include the primary volume - LastUpdated: time.Now(), - }, nil -} - -// VacuumVolume performs a vacuum operation on a specific volume -func (s *AdminServer) VacuumVolume(volumeID int, server string) error { - // Validate volumeID range before converting to uint32 - if volumeID < 0 || uint64(volumeID) > math.MaxUint32 { - return fmt.Errorf("volume ID out of range: %d", volumeID) - } - return s.WithMasterClient(func(client master_pb.SeaweedClient) error { - _, err := client.VacuumVolume(context.Background(), &master_pb.VacuumVolumeRequest{ - // lgtm[go/incorrect-integer-conversion] - // Safe conversion: volumeID has been validated to be in range [0, 0xFFFFFFFF] above - VolumeId: uint32(volumeID), - GarbageThreshold: 0.0001, // A very low threshold to ensure all garbage is collected - Collection: "", // Empty for all collections - }) - return err - }) -} - -// GetClusterVolumeServers retrieves cluster volume servers data including EC shard information -func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) { - var volumeServerMap map[string]*VolumeServer - - // Make only ONE VolumeList call and use it for both topology building AND EC shard processing - err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - // Get volume size limit from response, default to 30GB if not set - volumeSizeLimitMB := resp.VolumeSizeLimitMb - if volumeSizeLimitMB == 0 { - volumeSizeLimitMB = 30000 // default to 30000MB (30GB) - } - - // Build basic topology from the VolumeList response (replaces GetClusterTopology call) - volumeServerMap = make(map[string]*VolumeServer) - - if resp.TopologyInfo != nil { - // Process topology to build basic volume server info (similar to cluster_topology.go logic) - for _, dc := range resp.TopologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, node := range rack.DataNodeInfos { - // Initialize volume server if not exists - if volumeServerMap[node.Id] == nil { - volumeServerMap[node.Id] = &VolumeServer{ - Address: node.Id, - DataCenter: dc.Id, - Rack: rack.Id, - Volumes: 0, - DiskUsage: 0, - DiskCapacity: 0, - EcVolumes: 0, - EcShards: 0, - EcShardDetails: []VolumeServerEcInfo{}, - } - } - vs := volumeServerMap[node.Id] - - // Process EC shard information for this server at volume server level (not per-disk) - ecVolumeMap := make(map[uint32]*VolumeServerEcInfo) - // Temporary map to accumulate shard info across disks - ecShardAccumulator := make(map[uint32][]*master_pb.VolumeEcShardInformationMessage) - - // Process disk information - for _, diskInfo := range node.DiskInfos { - vs.DiskCapacity += int64(diskInfo.MaxVolumeCount) * int64(volumeSizeLimitMB) * 1024 * 1024 // Use actual volume size limit - - // Count regular volumes and calculate disk usage - for _, volInfo := range diskInfo.VolumeInfos { - vs.Volumes++ - vs.DiskUsage += int64(volInfo.Size) - } - - // Accumulate EC shard information across all disks for this volume server - for _, ecShardInfo := range diskInfo.EcShardInfos { - volumeId := ecShardInfo.Id - ecShardAccumulator[volumeId] = append(ecShardAccumulator[volumeId], ecShardInfo) - } - } - - // Process accumulated EC shard information per volume - for volumeId, ecShardInfos := range ecShardAccumulator { - if len(ecShardInfos) == 0 { - continue - } - - // Initialize EC volume info - ecInfo := &VolumeServerEcInfo{ - VolumeID: volumeId, - Collection: ecShardInfos[0].Collection, - ShardCount: 0, - EcIndexBits: 0, - ShardNumbers: []int{}, - ShardSizes: make(map[int]int64), - TotalSize: 0, - } - - // Merge EcIndexBits from all disks and collect shard sizes - allShardSizes := make(map[erasure_coding.ShardId]int64) - for _, ecShardInfo := range ecShardInfos { - ecInfo.EcIndexBits |= ecShardInfo.EcIndexBits - - // Collect shard sizes from this disk - shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) - shardBits.EachSetIndex(func(shardId erasure_coding.ShardId) { - if size, found := erasure_coding.GetShardSize(ecShardInfo, shardId); found { - allShardSizes[shardId] = size - } - }) - } - - // Process final merged shard information - finalShardBits := erasure_coding.ShardBits(ecInfo.EcIndexBits) - finalShardBits.EachSetIndex(func(shardId erasure_coding.ShardId) { - ecInfo.ShardCount++ - ecInfo.ShardNumbers = append(ecInfo.ShardNumbers, int(shardId)) - vs.EcShards++ - - // Add shard size if available - if shardSize, exists := allShardSizes[shardId]; exists { - ecInfo.ShardSizes[int(shardId)] = shardSize - ecInfo.TotalSize += shardSize - vs.DiskUsage += shardSize // Add EC shard size to total disk usage - } - }) - - ecVolumeMap[volumeId] = ecInfo - } - - // Convert EC volume map to slice and update volume server (after processing all disks) - for _, ecInfo := range ecVolumeMap { - vs.EcShardDetails = append(vs.EcShardDetails, *ecInfo) - vs.EcVolumes++ - } - } - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - // Convert map back to slice - var volumeServers []VolumeServer - for _, vs := range volumeServerMap { - volumeServers = append(volumeServers, *vs) - } - - var totalCapacity int64 - var totalVolumes int - for _, vs := range volumeServers { - totalCapacity += vs.DiskCapacity - totalVolumes += vs.Volumes - } - - return &ClusterVolumeServersData{ - VolumeServers: volumeServers, - TotalVolumeServers: len(volumeServers), - TotalVolumes: totalVolumes, - TotalCapacity: totalCapacity, - LastUpdated: time.Now(), - }, nil -} diff --git a/weed/admin/dash/worker_grpc_server.go b/weed/admin/dash/worker_grpc_server.go deleted file mode 100644 index 74410aab6..000000000 --- a/weed/admin/dash/worker_grpc_server.go +++ /dev/null @@ -1,627 +0,0 @@ -package dash - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" - "google.golang.org/grpc/peer" -) - -// WorkerGrpcServer implements the WorkerService gRPC interface -type WorkerGrpcServer struct { - worker_pb.UnimplementedWorkerServiceServer - adminServer *AdminServer - - // Worker connection management - connections map[string]*WorkerConnection - connMutex sync.RWMutex - - // Log request correlation - pendingLogRequests map[string]*LogRequestContext - logRequestsMutex sync.RWMutex - - // gRPC server - grpcServer *grpc.Server - listener net.Listener - running bool - stopChan chan struct{} -} - -// LogRequestContext tracks pending log requests -type LogRequestContext struct { - TaskID string - WorkerID string - ResponseCh chan *worker_pb.TaskLogResponse - Timeout time.Time -} - -// WorkerConnection represents an active worker connection -type WorkerConnection struct { - workerID string - stream worker_pb.WorkerService_WorkerStreamServer - lastSeen time.Time - capabilities []MaintenanceTaskType - address string - maxConcurrent int32 - outgoing chan *worker_pb.AdminMessage - ctx context.Context - cancel context.CancelFunc -} - -// NewWorkerGrpcServer creates a new gRPC server for worker connections -func NewWorkerGrpcServer(adminServer *AdminServer) *WorkerGrpcServer { - return &WorkerGrpcServer{ - adminServer: adminServer, - connections: make(map[string]*WorkerConnection), - pendingLogRequests: make(map[string]*LogRequestContext), - stopChan: make(chan struct{}), - } -} - -// StartWithTLS starts the gRPC server on the specified port with optional TLS -func (s *WorkerGrpcServer) StartWithTLS(port int) error { - if s.running { - return fmt.Errorf("worker gRPC server is already running") - } - - // Create listener - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - if err != nil { - return fmt.Errorf("failed to listen on port %d: %v", port, err) - } - - // Create gRPC server with optional TLS - grpcServer := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.admin")) - - worker_pb.RegisterWorkerServiceServer(grpcServer, s) - - s.grpcServer = grpcServer - s.listener = listener - s.running = true - - // Start cleanup routine - go s.cleanupRoutine() - - // Start serving in a goroutine - go func() { - if err := s.grpcServer.Serve(listener); err != nil { - if s.running { - glog.Errorf("Worker gRPC server error: %v", err) - } - } - }() - - return nil -} - -// Stop stops the gRPC server -func (s *WorkerGrpcServer) Stop() error { - if !s.running { - return nil - } - - s.running = false - close(s.stopChan) - - // Close all worker connections - s.connMutex.Lock() - for _, conn := range s.connections { - conn.cancel() - close(conn.outgoing) - } - s.connections = make(map[string]*WorkerConnection) - s.connMutex.Unlock() - - // Stop gRPC server - if s.grpcServer != nil { - s.grpcServer.GracefulStop() - } - - // Close listener - if s.listener != nil { - s.listener.Close() - } - - glog.Infof("Worker gRPC server stopped") - return nil -} - -// WorkerStream handles bidirectional communication with workers -func (s *WorkerGrpcServer) WorkerStream(stream worker_pb.WorkerService_WorkerStreamServer) error { - ctx := stream.Context() - - // get client address - address := findClientAddress(ctx) - - // Wait for initial registration message - msg, err := stream.Recv() - if err != nil { - return fmt.Errorf("failed to receive registration message: %w", err) - } - - registration := msg.GetRegistration() - if registration == nil { - return fmt.Errorf("first message must be registration") - } - registration.Address = address - - workerID := registration.WorkerId - if workerID == "" { - return fmt.Errorf("worker ID cannot be empty") - } - - glog.Infof("Worker %s connecting from %s", workerID, registration.Address) - - // Create worker connection - connCtx, connCancel := context.WithCancel(ctx) - conn := &WorkerConnection{ - workerID: workerID, - stream: stream, - lastSeen: time.Now(), - address: registration.Address, - maxConcurrent: registration.MaxConcurrent, - outgoing: make(chan *worker_pb.AdminMessage, 100), - ctx: connCtx, - cancel: connCancel, - } - - // Convert capabilities - capabilities := make([]MaintenanceTaskType, len(registration.Capabilities)) - for i, cap := range registration.Capabilities { - capabilities[i] = MaintenanceTaskType(cap) - } - conn.capabilities = capabilities - - // Register connection - s.connMutex.Lock() - s.connections[workerID] = conn - s.connMutex.Unlock() - - // Register worker with maintenance manager - s.registerWorkerWithManager(conn) - - // Send registration response - regResponse := &worker_pb.AdminMessage{ - Timestamp: time.Now().Unix(), - Message: &worker_pb.AdminMessage_RegistrationResponse{ - RegistrationResponse: &worker_pb.RegistrationResponse{ - Success: true, - Message: "Worker registered successfully", - }, - }, - } - - select { - case conn.outgoing <- regResponse: - case <-time.After(5 * time.Second): - glog.Errorf("Failed to send registration response to worker %s", workerID) - } - - // Start outgoing message handler - go s.handleOutgoingMessages(conn) - - // Handle incoming messages - for { - select { - case <-ctx.Done(): - glog.Infof("Worker %s connection closed: %v", workerID, ctx.Err()) - s.unregisterWorker(workerID) - return nil - case <-connCtx.Done(): - glog.Infof("Worker %s connection cancelled", workerID) - s.unregisterWorker(workerID) - return nil - default: - } - - msg, err := stream.Recv() - if err != nil { - if err == io.EOF { - glog.Infof("Worker %s disconnected", workerID) - } else { - glog.Errorf("Error receiving from worker %s: %v", workerID, err) - } - s.unregisterWorker(workerID) - return err - } - - conn.lastSeen = time.Now() - s.handleWorkerMessage(conn, msg) - } -} - -// handleOutgoingMessages sends messages to worker -func (s *WorkerGrpcServer) handleOutgoingMessages(conn *WorkerConnection) { - for { - select { - case <-conn.ctx.Done(): - return - case msg, ok := <-conn.outgoing: - if !ok { - return - } - - if err := conn.stream.Send(msg); err != nil { - glog.Errorf("Failed to send message to worker %s: %v", conn.workerID, err) - conn.cancel() - return - } - } - } -} - -// handleWorkerMessage processes incoming messages from workers -func (s *WorkerGrpcServer) handleWorkerMessage(conn *WorkerConnection, msg *worker_pb.WorkerMessage) { - workerID := conn.workerID - - switch m := msg.Message.(type) { - case *worker_pb.WorkerMessage_Heartbeat: - s.handleHeartbeat(conn, m.Heartbeat) - - case *worker_pb.WorkerMessage_TaskRequest: - s.handleTaskRequest(conn, m.TaskRequest) - - case *worker_pb.WorkerMessage_TaskUpdate: - s.handleTaskUpdate(conn, m.TaskUpdate) - - case *worker_pb.WorkerMessage_TaskComplete: - s.handleTaskCompletion(conn, m.TaskComplete) - - case *worker_pb.WorkerMessage_TaskLogResponse: - s.handleTaskLogResponse(conn, m.TaskLogResponse) - - case *worker_pb.WorkerMessage_Shutdown: - glog.Infof("Worker %s shutting down: %s", workerID, m.Shutdown.Reason) - s.unregisterWorker(workerID) - - default: - glog.Warningf("Unknown message type from worker %s", workerID) - } -} - -// registerWorkerWithManager registers the worker with the maintenance manager -func (s *WorkerGrpcServer) registerWorkerWithManager(conn *WorkerConnection) { - if s.adminServer.maintenanceManager == nil { - return - } - - worker := &MaintenanceWorker{ - ID: conn.workerID, - Address: conn.address, - LastHeartbeat: time.Now(), - Status: "active", - Capabilities: conn.capabilities, - MaxConcurrent: int(conn.maxConcurrent), - CurrentLoad: 0, - } - - s.adminServer.maintenanceManager.RegisterWorker(worker) - glog.V(1).Infof("Registered worker %s with maintenance manager", conn.workerID) -} - -// handleHeartbeat processes heartbeat messages -func (s *WorkerGrpcServer) handleHeartbeat(conn *WorkerConnection, heartbeat *worker_pb.WorkerHeartbeat) { - if s.adminServer.maintenanceManager != nil { - s.adminServer.maintenanceManager.UpdateWorkerHeartbeat(conn.workerID) - } - - // Send heartbeat response - response := &worker_pb.AdminMessage{ - Timestamp: time.Now().Unix(), - Message: &worker_pb.AdminMessage_HeartbeatResponse{ - HeartbeatResponse: &worker_pb.HeartbeatResponse{ - Success: true, - Message: "Heartbeat acknowledged", - }, - }, - } - - select { - case conn.outgoing <- response: - case <-time.After(time.Second): - glog.Warningf("Failed to send heartbeat response to worker %s", conn.workerID) - } -} - -// handleTaskRequest processes task requests from workers -func (s *WorkerGrpcServer) handleTaskRequest(conn *WorkerConnection, request *worker_pb.TaskRequest) { - - if s.adminServer.maintenanceManager == nil { - return - } - - // Get next task from maintenance manager - task := s.adminServer.maintenanceManager.GetNextTask(conn.workerID, conn.capabilities) - - if task != nil { - - // Use typed params directly - master client should already be configured in the params - var taskParams *worker_pb.TaskParams - if task.TypedParams != nil { - taskParams = task.TypedParams - } else { - // Create basic params if none exist - taskParams = &worker_pb.TaskParams{ - VolumeId: task.VolumeID, - Collection: task.Collection, - Sources: []*worker_pb.TaskSource{ - { - Node: task.Server, - VolumeId: task.VolumeID, - }, - }, - } - } - - // Send task assignment - assignment := &worker_pb.AdminMessage{ - Timestamp: time.Now().Unix(), - Message: &worker_pb.AdminMessage_TaskAssignment{ - TaskAssignment: &worker_pb.TaskAssignment{ - TaskId: task.ID, - TaskType: string(task.Type), - Params: taskParams, - Priority: int32(task.Priority), - CreatedTime: time.Now().Unix(), - }, - }, - } - - select { - case conn.outgoing <- assignment: - case <-time.After(time.Second): - glog.Warningf("Failed to send task assignment to worker %s", conn.workerID) - } - } else { - } -} - -// handleTaskUpdate processes task progress updates -func (s *WorkerGrpcServer) handleTaskUpdate(conn *WorkerConnection, update *worker_pb.TaskUpdate) { - if s.adminServer.maintenanceManager != nil { - s.adminServer.maintenanceManager.UpdateTaskProgress(update.TaskId, float64(update.Progress)) - glog.V(3).Infof("Updated task %s progress: %.1f%%", update.TaskId, update.Progress) - } -} - -// handleTaskCompletion processes task completion notifications -func (s *WorkerGrpcServer) handleTaskCompletion(conn *WorkerConnection, completion *worker_pb.TaskComplete) { - if s.adminServer.maintenanceManager != nil { - errorMsg := "" - if !completion.Success { - errorMsg = completion.ErrorMessage - } - s.adminServer.maintenanceManager.CompleteTask(completion.TaskId, errorMsg) - - if completion.Success { - glog.V(1).Infof("Worker %s completed task %s successfully", conn.workerID, completion.TaskId) - } else { - glog.Errorf("Worker %s failed task %s: %s", conn.workerID, completion.TaskId, completion.ErrorMessage) - } - } -} - -// handleTaskLogResponse processes task log responses from workers -func (s *WorkerGrpcServer) handleTaskLogResponse(conn *WorkerConnection, response *worker_pb.TaskLogResponse) { - requestKey := fmt.Sprintf("%s:%s", response.WorkerId, response.TaskId) - - s.logRequestsMutex.RLock() - requestContext, exists := s.pendingLogRequests[requestKey] - s.logRequestsMutex.RUnlock() - - if !exists { - glog.Warningf("Received unexpected log response for task %s from worker %s", response.TaskId, response.WorkerId) - return - } - - glog.V(1).Infof("Received log response for task %s from worker %s: %d entries", response.TaskId, response.WorkerId, len(response.LogEntries)) - - // Send response to waiting channel - select { - case requestContext.ResponseCh <- response: - // Response delivered successfully - case <-time.After(time.Second): - glog.Warningf("Failed to deliver log response for task %s from worker %s: timeout", response.TaskId, response.WorkerId) - } - - // Clean up the pending request - s.logRequestsMutex.Lock() - delete(s.pendingLogRequests, requestKey) - s.logRequestsMutex.Unlock() -} - -// unregisterWorker removes a worker connection -func (s *WorkerGrpcServer) unregisterWorker(workerID string) { - s.connMutex.Lock() - if conn, exists := s.connections[workerID]; exists { - conn.cancel() - close(conn.outgoing) - delete(s.connections, workerID) - } - s.connMutex.Unlock() - - glog.V(1).Infof("Unregistered worker %s", workerID) -} - -// cleanupRoutine periodically cleans up stale connections -func (s *WorkerGrpcServer) cleanupRoutine() { - ticker := time.NewTicker(30 * time.Second) - defer ticker.Stop() - - for { - select { - case <-s.stopChan: - return - case <-ticker.C: - s.cleanupStaleConnections() - } - } -} - -// cleanupStaleConnections removes connections that haven't been seen recently -func (s *WorkerGrpcServer) cleanupStaleConnections() { - cutoff := time.Now().Add(-2 * time.Minute) - - s.connMutex.Lock() - defer s.connMutex.Unlock() - - for workerID, conn := range s.connections { - if conn.lastSeen.Before(cutoff) { - glog.Warningf("Cleaning up stale worker connection: %s", workerID) - conn.cancel() - close(conn.outgoing) - delete(s.connections, workerID) - } - } -} - -// GetConnectedWorkers returns a list of currently connected workers -func (s *WorkerGrpcServer) GetConnectedWorkers() []string { - s.connMutex.RLock() - defer s.connMutex.RUnlock() - - workers := make([]string, 0, len(s.connections)) - for workerID := range s.connections { - workers = append(workers, workerID) - } - return workers -} - -// RequestTaskLogs requests execution logs from a worker for a specific task -func (s *WorkerGrpcServer) RequestTaskLogs(workerID, taskID string, maxEntries int32, logLevel string) ([]*worker_pb.TaskLogEntry, error) { - s.connMutex.RLock() - conn, exists := s.connections[workerID] - s.connMutex.RUnlock() - - if !exists { - return nil, fmt.Errorf("worker %s is not connected", workerID) - } - - // Create response channel for this request - responseCh := make(chan *worker_pb.TaskLogResponse, 1) - requestKey := fmt.Sprintf("%s:%s", workerID, taskID) - - // Register pending request - requestContext := &LogRequestContext{ - TaskID: taskID, - WorkerID: workerID, - ResponseCh: responseCh, - Timeout: time.Now().Add(10 * time.Second), - } - - s.logRequestsMutex.Lock() - s.pendingLogRequests[requestKey] = requestContext - s.logRequestsMutex.Unlock() - - // Create log request message - logRequest := &worker_pb.AdminMessage{ - AdminId: "admin-server", - Timestamp: time.Now().Unix(), - Message: &worker_pb.AdminMessage_TaskLogRequest{ - TaskLogRequest: &worker_pb.TaskLogRequest{ - TaskId: taskID, - WorkerId: workerID, - IncludeMetadata: true, - MaxEntries: maxEntries, - LogLevel: logLevel, - }, - }, - } - - // Send the request through the worker's outgoing channel - select { - case conn.outgoing <- logRequest: - glog.V(1).Infof("Log request sent to worker %s for task %s", workerID, taskID) - case <-time.After(5 * time.Second): - // Clean up pending request on timeout - s.logRequestsMutex.Lock() - delete(s.pendingLogRequests, requestKey) - s.logRequestsMutex.Unlock() - return nil, fmt.Errorf("timeout sending log request to worker %s", workerID) - } - - // Wait for response - select { - case response := <-responseCh: - if !response.Success { - return nil, fmt.Errorf("worker log request failed: %s", response.ErrorMessage) - } - glog.V(1).Infof("Received %d log entries for task %s from worker %s", len(response.LogEntries), taskID, workerID) - return response.LogEntries, nil - case <-time.After(10 * time.Second): - // Clean up pending request on timeout - s.logRequestsMutex.Lock() - delete(s.pendingLogRequests, requestKey) - s.logRequestsMutex.Unlock() - return nil, fmt.Errorf("timeout waiting for log response from worker %s", workerID) - } -} - -// RequestTaskLogsFromAllWorkers requests logs for a task from all connected workers -func (s *WorkerGrpcServer) RequestTaskLogsFromAllWorkers(taskID string, maxEntries int32, logLevel string) (map[string][]*worker_pb.TaskLogEntry, error) { - s.connMutex.RLock() - workerIDs := make([]string, 0, len(s.connections)) - for workerID := range s.connections { - workerIDs = append(workerIDs, workerID) - } - s.connMutex.RUnlock() - - results := make(map[string][]*worker_pb.TaskLogEntry) - - for _, workerID := range workerIDs { - logs, err := s.RequestTaskLogs(workerID, taskID, maxEntries, logLevel) - if err != nil { - glog.V(1).Infof("Failed to get logs from worker %s for task %s: %v", workerID, taskID, err) - // Store empty result with error information for debugging - results[workerID+"_error"] = []*worker_pb.TaskLogEntry{ - { - Timestamp: time.Now().Unix(), - Level: "ERROR", - Message: fmt.Sprintf("Failed to retrieve logs from worker %s: %v", workerID, err), - Fields: map[string]string{"source": "admin"}, - }, - } - continue - } - if len(logs) > 0 { - results[workerID] = logs - } else { - glog.V(2).Infof("No logs found for task %s on worker %s", taskID, workerID) - } - } - - return results, nil -} - -// convertTaskParameters converts task parameters to protobuf format -func convertTaskParameters(params map[string]interface{}) map[string]string { - result := make(map[string]string) - for key, value := range params { - result[key] = fmt.Sprintf("%v", value) - } - return result -} - -func findClientAddress(ctx context.Context) string { - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return "" - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return "" - } - return pr.Addr.String() -} diff --git a/weed/admin/handlers/admin_handlers.go b/weed/admin/handlers/admin_handlers.go deleted file mode 100644 index 215e2a4e5..000000000 --- a/weed/admin/handlers/admin_handlers.go +++ /dev/null @@ -1,434 +0,0 @@ -package handlers - -import ( - "net/http" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" -) - -// AdminHandlers contains all the HTTP handlers for the admin interface -type AdminHandlers struct { - adminServer *dash.AdminServer - authHandlers *AuthHandlers - clusterHandlers *ClusterHandlers - fileBrowserHandlers *FileBrowserHandlers - userHandlers *UserHandlers - policyHandlers *PolicyHandlers - maintenanceHandlers *MaintenanceHandlers - mqHandlers *MessageQueueHandlers -} - -// NewAdminHandlers creates a new instance of AdminHandlers -func NewAdminHandlers(adminServer *dash.AdminServer) *AdminHandlers { - authHandlers := NewAuthHandlers(adminServer) - clusterHandlers := NewClusterHandlers(adminServer) - fileBrowserHandlers := NewFileBrowserHandlers(adminServer) - userHandlers := NewUserHandlers(adminServer) - policyHandlers := NewPolicyHandlers(adminServer) - maintenanceHandlers := NewMaintenanceHandlers(adminServer) - mqHandlers := NewMessageQueueHandlers(adminServer) - return &AdminHandlers{ - adminServer: adminServer, - authHandlers: authHandlers, - clusterHandlers: clusterHandlers, - fileBrowserHandlers: fileBrowserHandlers, - userHandlers: userHandlers, - policyHandlers: policyHandlers, - maintenanceHandlers: maintenanceHandlers, - mqHandlers: mqHandlers, - } -} - -// SetupRoutes configures all the routes for the admin interface -func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username, password string) { - // Health check (no auth required) - r.GET("/health", h.HealthCheck) - - if authRequired { - // Authentication routes (no auth required) - r.GET("/login", h.authHandlers.ShowLogin) - r.POST("/login", h.authHandlers.HandleLogin(username, password)) - r.GET("/logout", h.authHandlers.HandleLogout) - - // Protected routes group - protected := r.Group("/") - protected.Use(dash.RequireAuth()) - - // Main admin interface routes - protected.GET("/", h.ShowDashboard) - protected.GET("/admin", h.ShowDashboard) - - // Object Store management routes - protected.GET("/object-store/buckets", h.ShowS3Buckets) - protected.GET("/object-store/buckets/:bucket", h.ShowBucketDetails) - protected.GET("/object-store/users", h.userHandlers.ShowObjectStoreUsers) - protected.GET("/object-store/policies", h.policyHandlers.ShowPolicies) - - // File browser routes - protected.GET("/files", h.fileBrowserHandlers.ShowFileBrowser) - - // Cluster management routes - protected.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters) - protected.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers) - protected.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers) - protected.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes) - protected.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails) - protected.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections) - protected.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails) - protected.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards) - protected.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails) - - // Message Queue management routes - protected.GET("/mq/brokers", h.mqHandlers.ShowBrokers) - protected.GET("/mq/topics", h.mqHandlers.ShowTopics) - protected.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails) - - // Maintenance system routes - protected.GET("/maintenance", h.maintenanceHandlers.ShowMaintenanceQueue) - protected.GET("/maintenance/workers", h.maintenanceHandlers.ShowMaintenanceWorkers) - protected.GET("/maintenance/config", h.maintenanceHandlers.ShowMaintenanceConfig) - protected.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig) - protected.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) - protected.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig) - protected.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail) - - // API routes for AJAX calls - api := r.Group("/api") - api.Use(dash.RequireAuthAPI()) // Use API-specific auth middleware - { - api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology) - api.GET("/cluster/masters", h.clusterHandlers.GetMasters) - api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers) - api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data - api.GET("/config", h.adminServer.GetConfigInfo) // Configuration information - - // S3 API routes - s3Api := api.Group("/s3") - { - s3Api.GET("/buckets", h.adminServer.ListBucketsAPI) - s3Api.POST("/buckets", h.adminServer.CreateBucket) - s3Api.DELETE("/buckets/:bucket", h.adminServer.DeleteBucket) - s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails) - s3Api.PUT("/buckets/:bucket/quota", h.adminServer.UpdateBucketQuota) - } - - // User management API routes - usersApi := api.Group("/users") - { - usersApi.GET("", h.userHandlers.GetUsers) - usersApi.POST("", h.userHandlers.CreateUser) - usersApi.GET("/:username", h.userHandlers.GetUserDetails) - usersApi.PUT("/:username", h.userHandlers.UpdateUser) - usersApi.DELETE("/:username", h.userHandlers.DeleteUser) - usersApi.POST("/:username/access-keys", h.userHandlers.CreateAccessKey) - usersApi.DELETE("/:username/access-keys/:accessKeyId", h.userHandlers.DeleteAccessKey) - usersApi.GET("/:username/policies", h.userHandlers.GetUserPolicies) - usersApi.PUT("/:username/policies", h.userHandlers.UpdateUserPolicies) - } - - // Object Store Policy management API routes - objectStorePoliciesApi := api.Group("/object-store/policies") - { - objectStorePoliciesApi.GET("", h.policyHandlers.GetPolicies) - objectStorePoliciesApi.POST("", h.policyHandlers.CreatePolicy) - objectStorePoliciesApi.GET("/:name", h.policyHandlers.GetPolicy) - objectStorePoliciesApi.PUT("/:name", h.policyHandlers.UpdatePolicy) - objectStorePoliciesApi.DELETE("/:name", h.policyHandlers.DeletePolicy) - objectStorePoliciesApi.POST("/validate", h.policyHandlers.ValidatePolicy) - } - - // File management API routes - filesApi := api.Group("/files") - { - filesApi.DELETE("/delete", h.fileBrowserHandlers.DeleteFile) - filesApi.DELETE("/delete-multiple", h.fileBrowserHandlers.DeleteMultipleFiles) - filesApi.POST("/create-folder", h.fileBrowserHandlers.CreateFolder) - filesApi.POST("/upload", h.fileBrowserHandlers.UploadFile) - filesApi.GET("/download", h.fileBrowserHandlers.DownloadFile) - filesApi.GET("/view", h.fileBrowserHandlers.ViewFile) - filesApi.GET("/properties", h.fileBrowserHandlers.GetFileProperties) - } - - // Volume management API routes - volumeApi := api.Group("/volumes") - { - volumeApi.POST("/:id/:server/vacuum", h.clusterHandlers.VacuumVolume) - } - - // Maintenance API routes - maintenanceApi := api.Group("/maintenance") - { - maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan) - maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) - maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) - maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI) - maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask) - maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) - maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) - maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs) - maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) - maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) - maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI) - } - - // Message Queue API routes - mqApi := api.Group("/mq") - { - mqApi.GET("/topics/:namespace/:topic", h.mqHandlers.GetTopicDetailsAPI) - mqApi.POST("/topics/create", h.mqHandlers.CreateTopicAPI) - mqApi.POST("/topics/retention/update", h.mqHandlers.UpdateTopicRetentionAPI) - mqApi.POST("/retention/purge", h.adminServer.TriggerTopicRetentionPurgeAPI) - } - } - } else { - // No authentication required - all routes are public - r.GET("/", h.ShowDashboard) - r.GET("/admin", h.ShowDashboard) - - // Object Store management routes - r.GET("/object-store/buckets", h.ShowS3Buckets) - r.GET("/object-store/buckets/:bucket", h.ShowBucketDetails) - r.GET("/object-store/users", h.userHandlers.ShowObjectStoreUsers) - r.GET("/object-store/policies", h.policyHandlers.ShowPolicies) - - // File browser routes - r.GET("/files", h.fileBrowserHandlers.ShowFileBrowser) - - // Cluster management routes - r.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters) - r.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers) - r.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers) - r.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes) - r.GET("/cluster/volumes/:id/:server", h.clusterHandlers.ShowVolumeDetails) - r.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections) - r.GET("/cluster/collections/:name", h.clusterHandlers.ShowCollectionDetails) - r.GET("/cluster/ec-shards", h.clusterHandlers.ShowClusterEcShards) - r.GET("/cluster/ec-volumes/:id", h.clusterHandlers.ShowEcVolumeDetails) - - // Message Queue management routes - r.GET("/mq/brokers", h.mqHandlers.ShowBrokers) - r.GET("/mq/topics", h.mqHandlers.ShowTopics) - r.GET("/mq/topics/:namespace/:topic", h.mqHandlers.ShowTopicDetails) - - // Maintenance system routes - r.GET("/maintenance", h.maintenanceHandlers.ShowMaintenanceQueue) - r.GET("/maintenance/workers", h.maintenanceHandlers.ShowMaintenanceWorkers) - r.GET("/maintenance/config", h.maintenanceHandlers.ShowMaintenanceConfig) - r.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig) - r.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) - r.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig) - r.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail) - - // API routes for AJAX calls - api := r.Group("/api") - { - api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology) - api.GET("/cluster/masters", h.clusterHandlers.GetMasters) - api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers) - api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data - api.GET("/config", h.adminServer.GetConfigInfo) // Configuration information - - // S3 API routes - s3Api := api.Group("/s3") - { - s3Api.GET("/buckets", h.adminServer.ListBucketsAPI) - s3Api.POST("/buckets", h.adminServer.CreateBucket) - s3Api.DELETE("/buckets/:bucket", h.adminServer.DeleteBucket) - s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails) - s3Api.PUT("/buckets/:bucket/quota", h.adminServer.UpdateBucketQuota) - } - - // User management API routes - usersApi := api.Group("/users") - { - usersApi.GET("", h.userHandlers.GetUsers) - usersApi.POST("", h.userHandlers.CreateUser) - usersApi.GET("/:username", h.userHandlers.GetUserDetails) - usersApi.PUT("/:username", h.userHandlers.UpdateUser) - usersApi.DELETE("/:username", h.userHandlers.DeleteUser) - usersApi.POST("/:username/access-keys", h.userHandlers.CreateAccessKey) - usersApi.DELETE("/:username/access-keys/:accessKeyId", h.userHandlers.DeleteAccessKey) - usersApi.GET("/:username/policies", h.userHandlers.GetUserPolicies) - usersApi.PUT("/:username/policies", h.userHandlers.UpdateUserPolicies) - } - - // Object Store Policy management API routes - objectStorePoliciesApi := api.Group("/object-store/policies") - { - objectStorePoliciesApi.GET("", h.policyHandlers.GetPolicies) - objectStorePoliciesApi.POST("", h.policyHandlers.CreatePolicy) - objectStorePoliciesApi.GET("/:name", h.policyHandlers.GetPolicy) - objectStorePoliciesApi.PUT("/:name", h.policyHandlers.UpdatePolicy) - objectStorePoliciesApi.DELETE("/:name", h.policyHandlers.DeletePolicy) - objectStorePoliciesApi.POST("/validate", h.policyHandlers.ValidatePolicy) - } - - // File management API routes - filesApi := api.Group("/files") - { - filesApi.DELETE("/delete", h.fileBrowserHandlers.DeleteFile) - filesApi.DELETE("/delete-multiple", h.fileBrowserHandlers.DeleteMultipleFiles) - filesApi.POST("/create-folder", h.fileBrowserHandlers.CreateFolder) - filesApi.POST("/upload", h.fileBrowserHandlers.UploadFile) - filesApi.GET("/download", h.fileBrowserHandlers.DownloadFile) - filesApi.GET("/view", h.fileBrowserHandlers.ViewFile) - filesApi.GET("/properties", h.fileBrowserHandlers.GetFileProperties) - } - - // Volume management API routes - volumeApi := api.Group("/volumes") - { - volumeApi.POST("/:id/:server/vacuum", h.clusterHandlers.VacuumVolume) - } - - // Maintenance API routes - maintenanceApi := api.Group("/maintenance") - { - maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan) - maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) - maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) - maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI) - maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask) - maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) - maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) - maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs) - maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) - maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) - maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI) - } - - // Message Queue API routes - mqApi := api.Group("/mq") - { - mqApi.GET("/topics/:namespace/:topic", h.mqHandlers.GetTopicDetailsAPI) - mqApi.POST("/topics/create", h.mqHandlers.CreateTopicAPI) - mqApi.POST("/topics/retention/update", h.mqHandlers.UpdateTopicRetentionAPI) - mqApi.POST("/retention/purge", h.adminServer.TriggerTopicRetentionPurgeAPI) - } - } - } -} - -// HealthCheck returns the health status of the admin interface -func (h *AdminHandlers) HealthCheck(c *gin.Context) { - c.JSON(200, gin.H{"health": "ok"}) -} - -// ShowDashboard renders the main admin dashboard -func (h *AdminHandlers) ShowDashboard(c *gin.Context) { - // Get admin data from the server - adminData := h.getAdminData(c) - - // Render HTML template - c.Header("Content-Type", "text/html") - adminComponent := app.Admin(adminData) - layoutComponent := layout.Layout(c, adminComponent) - err := layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowS3Buckets renders the Object Store buckets management page -func (h *AdminHandlers) ShowS3Buckets(c *gin.Context) { - // Get Object Store buckets data from the server - s3Data := h.getS3BucketsData(c) - - // Render HTML template - c.Header("Content-Type", "text/html") - s3Component := app.S3Buckets(s3Data) - layoutComponent := layout.Layout(c, s3Component) - err := layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowBucketDetails returns detailed information about a specific bucket -func (h *AdminHandlers) ShowBucketDetails(c *gin.Context) { - bucketName := c.Param("bucket") - details, err := h.adminServer.GetBucketDetails(bucketName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get bucket details: " + err.Error()}) - return - } - c.JSON(http.StatusOK, details) -} - -// getS3BucketsData retrieves Object Store buckets data from the server -func (h *AdminHandlers) getS3BucketsData(c *gin.Context) dash.S3BucketsData { - username := c.GetString("username") - if username == "" { - username = "admin" - } - - // Get Object Store buckets - buckets, err := h.adminServer.GetS3Buckets() - if err != nil { - // Return empty data on error - return dash.S3BucketsData{ - Username: username, - Buckets: []dash.S3Bucket{}, - TotalBuckets: 0, - TotalSize: 0, - LastUpdated: time.Now(), - } - } - - // Calculate totals - var totalSize int64 - for _, bucket := range buckets { - totalSize += bucket.Size - } - - return dash.S3BucketsData{ - Username: username, - Buckets: buckets, - TotalBuckets: len(buckets), - TotalSize: totalSize, - LastUpdated: time.Now(), - } -} - -// getAdminData retrieves admin data from the server (now uses consolidated method) -func (h *AdminHandlers) getAdminData(c *gin.Context) dash.AdminData { - username := c.GetString("username") - - // Use the consolidated GetAdminData method from AdminServer - adminData, err := h.adminServer.GetAdminData(username) - if err != nil { - // Return default data when services are not available - if username == "" { - username = "admin" - } - - masterNodes := []dash.MasterNode{ - { - Address: "localhost:9333", - IsLeader: true, - }, - } - - return dash.AdminData{ - Username: username, - TotalVolumes: 0, - TotalFiles: 0, - TotalSize: 0, - MasterNodes: masterNodes, - VolumeServers: []dash.VolumeServer{}, - FilerNodes: []dash.FilerNode{}, - DataCenters: []dash.DataCenter{}, - LastUpdated: time.Now(), - } - } - - return adminData -} - -// Helper functions diff --git a/weed/admin/handlers/auth_handlers.go b/weed/admin/handlers/auth_handlers.go deleted file mode 100644 index 07596b8e4..000000000 --- a/weed/admin/handlers/auth_handlers.go +++ /dev/null @@ -1,45 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" -) - -// AuthHandlers contains authentication-related HTTP handlers -type AuthHandlers struct { - adminServer *dash.AdminServer -} - -// NewAuthHandlers creates a new instance of AuthHandlers -func NewAuthHandlers(adminServer *dash.AdminServer) *AuthHandlers { - return &AuthHandlers{ - adminServer: adminServer, - } -} - -// ShowLogin displays the login page -func (a *AuthHandlers) ShowLogin(c *gin.Context) { - errorMessage := c.Query("error") - - // Render login template - c.Header("Content-Type", "text/html") - loginComponent := layout.LoginForm(c, "SeaweedFS Admin", errorMessage) - err := loginComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render login template: " + err.Error()}) - return - } -} - -// HandleLogin handles login form submission -func (a *AuthHandlers) HandleLogin(username, password string) gin.HandlerFunc { - return a.adminServer.HandleLogin(username, password) -} - -// HandleLogout handles user logout -func (a *AuthHandlers) HandleLogout(c *gin.Context) { - a.adminServer.HandleLogout(c) -} diff --git a/weed/admin/handlers/cluster_handlers.go b/weed/admin/handlers/cluster_handlers.go deleted file mode 100644 index 1a58e919d..000000000 --- a/weed/admin/handlers/cluster_handlers.go +++ /dev/null @@ -1,431 +0,0 @@ -package handlers - -import ( - "math" - "net/http" - "strconv" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" -) - -// ClusterHandlers contains all the HTTP handlers for cluster management -type ClusterHandlers struct { - adminServer *dash.AdminServer -} - -// NewClusterHandlers creates a new instance of ClusterHandlers -func NewClusterHandlers(adminServer *dash.AdminServer) *ClusterHandlers { - return &ClusterHandlers{ - adminServer: adminServer, - } -} - -// ShowClusterVolumeServers renders the cluster volume servers page -func (h *ClusterHandlers) ShowClusterVolumeServers(c *gin.Context) { - // Get cluster volume servers data - volumeServersData, err := h.adminServer.GetClusterVolumeServers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volume servers: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - volumeServersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - volumeServersComponent := app.ClusterVolumeServers(*volumeServersData) - layoutComponent := layout.Layout(c, volumeServersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterVolumes renders the cluster volumes page -func (h *ClusterHandlers) ShowClusterVolumes(c *gin.Context) { - // Get pagination and sorting parameters from query string - page := 1 - if p := c.Query("page"); p != "" { - if parsed, err := strconv.Atoi(p); err == nil && parsed > 0 { - page = parsed - } - } - - pageSize := 100 - if ps := c.Query("pageSize"); ps != "" { - if parsed, err := strconv.Atoi(ps); err == nil && parsed > 0 && parsed <= 1000 { - pageSize = parsed - } - } - - sortBy := c.DefaultQuery("sortBy", "id") - sortOrder := c.DefaultQuery("sortOrder", "asc") - collection := c.Query("collection") // Optional collection filter - - // Get cluster volumes data - volumesData, err := h.adminServer.GetClusterVolumes(page, pageSize, sortBy, sortOrder, collection) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volumes: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - volumesData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - volumesComponent := app.ClusterVolumes(*volumesData) - layoutComponent := layout.Layout(c, volumesComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowVolumeDetails renders the volume details page -func (h *ClusterHandlers) ShowVolumeDetails(c *gin.Context) { - volumeIDStr := c.Param("id") - server := c.Param("server") - - if volumeIDStr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"}) - return - } - - if server == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Server is required"}) - return - } - - volumeID, err := strconv.Atoi(volumeIDStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"}) - return - } - - // Get volume details - volumeDetails, err := h.adminServer.GetVolumeDetails(volumeID, server) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get volume details: " + err.Error()}) - return - } - - // Render HTML template - c.Header("Content-Type", "text/html") - volumeDetailsComponent := app.VolumeDetails(*volumeDetails) - layoutComponent := layout.Layout(c, volumeDetailsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterCollections renders the cluster collections page -func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) { - // Get cluster collections data - collectionsData, err := h.adminServer.GetClusterCollections() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster collections: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - collectionsData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - collectionsComponent := app.ClusterCollections(*collectionsData) - layoutComponent := layout.Layout(c, collectionsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowCollectionDetails renders the collection detail page -func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) { - collectionName := c.Param("name") - if collectionName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Collection name is required"}) - return - } - - // Map "default" collection to empty string for backend filtering - actualCollectionName := collectionName - if collectionName == "default" { - actualCollectionName = "" - } - - // Parse query parameters - page, _ := strconv.Atoi(c.DefaultQuery("page", "1")) - pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25")) - sortBy := c.DefaultQuery("sort_by", "volume_id") - sortOrder := c.DefaultQuery("sort_order", "asc") - - // Get collection details data (volumes and EC volumes) - collectionDetailsData, err := h.adminServer.GetCollectionDetails(actualCollectionName, page, pageSize, sortBy, sortOrder) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - collectionDetailsData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - collectionDetailsComponent := app.CollectionDetails(*collectionDetailsData) - layoutComponent := layout.Layout(c, collectionDetailsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterEcShards handles the cluster EC shards page (individual shards view) -func (h *ClusterHandlers) ShowClusterEcShards(c *gin.Context) { - // Parse query parameters - page, _ := strconv.Atoi(c.DefaultQuery("page", "1")) - pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "100")) - sortBy := c.DefaultQuery("sort_by", "volume_id") - sortOrder := c.DefaultQuery("sort_order", "asc") - collection := c.DefaultQuery("collection", "") - - // Get data from admin server - data, err := h.adminServer.GetClusterEcVolumes(page, pageSize, sortBy, sortOrder, collection) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - data.Username = username - - // Render template - c.Header("Content-Type", "text/html") - ecVolumesComponent := app.ClusterEcVolumes(*data) - layoutComponent := layout.Layout(c, ecVolumesComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } -} - -// ShowEcVolumeDetails renders the EC volume details page -func (h *ClusterHandlers) ShowEcVolumeDetails(c *gin.Context) { - volumeIDStr := c.Param("id") - - if volumeIDStr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"}) - return - } - - volumeID, err := strconv.Atoi(volumeIDStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"}) - return - } - - // Check that volumeID is within uint32 range - if volumeID < 0 || uint64(volumeID) > math.MaxUint32 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID out of range"}) - return - } - - // Parse sorting parameters - sortBy := c.DefaultQuery("sort_by", "shard_id") - sortOrder := c.DefaultQuery("sort_order", "asc") - - // Get EC volume details - ecVolumeDetails, err := h.adminServer.GetEcVolumeDetails(uint32(volumeID), sortBy, sortOrder) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get EC volume details: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - ecVolumeDetails.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - ecVolumeDetailsComponent := app.EcVolumeDetails(*ecVolumeDetails) - layoutComponent := layout.Layout(c, ecVolumeDetailsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterMasters renders the cluster masters page -func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) { - // Get cluster masters data - mastersData, err := h.adminServer.GetClusterMasters() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster masters: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - mastersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - mastersComponent := app.ClusterMasters(*mastersData) - layoutComponent := layout.Layout(c, mastersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterFilers renders the cluster filers page -func (h *ClusterHandlers) ShowClusterFilers(c *gin.Context) { - // Get cluster filers data - filersData, err := h.adminServer.GetClusterFilers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster filers: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - filersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - filersComponent := app.ClusterFilers(*filersData) - layoutComponent := layout.Layout(c, filersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowClusterBrokers renders the cluster message brokers page -func (h *ClusterHandlers) ShowClusterBrokers(c *gin.Context) { - // Get cluster brokers data - brokersData, err := h.adminServer.GetClusterBrokers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster brokers: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - brokersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - brokersComponent := app.ClusterBrokers(*brokersData) - layoutComponent := layout.Layout(c, brokersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// GetClusterTopology returns the cluster topology as JSON -func (h *ClusterHandlers) GetClusterTopology(c *gin.Context) { - topology, err := h.adminServer.GetClusterTopology() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - c.JSON(http.StatusOK, topology) -} - -// GetMasters returns master node information -func (h *ClusterHandlers) GetMasters(c *gin.Context) { - // Simple master info - c.JSON(http.StatusOK, gin.H{"masters": []gin.H{{"address": "localhost:9333"}}}) -} - -// GetVolumeServers returns volume server information -func (h *ClusterHandlers) GetVolumeServers(c *gin.Context) { - topology, err := h.adminServer.GetClusterTopology() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - c.JSON(http.StatusOK, gin.H{"volume_servers": topology.VolumeServers}) -} - -// VacuumVolume handles volume vacuum requests via API -func (h *ClusterHandlers) VacuumVolume(c *gin.Context) { - volumeIDStr := c.Param("id") - server := c.Param("server") - - if volumeIDStr == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Volume ID is required"}) - return - } - - volumeID, err := strconv.Atoi(volumeIDStr) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid volume ID"}) - return - } - - // Perform vacuum operation - err = h.adminServer.VacuumVolume(volumeID, server) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to vacuum volume: " + err.Error(), - }) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Volume vacuum started successfully", - "volume_id": volumeID, - "server": server, - }) -} diff --git a/weed/admin/handlers/file_browser_handlers.go b/weed/admin/handlers/file_browser_handlers.go deleted file mode 100644 index a0427e39f..000000000 --- a/weed/admin/handlers/file_browser_handlers.go +++ /dev/null @@ -1,951 +0,0 @@ -package handlers - -import ( - "bytes" - "context" - "fmt" - "io" - "mime/multipart" - "net" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -type FileBrowserHandlers struct { - adminServer *dash.AdminServer -} - -func NewFileBrowserHandlers(adminServer *dash.AdminServer) *FileBrowserHandlers { - return &FileBrowserHandlers{ - adminServer: adminServer, - } -} - -// ShowFileBrowser renders the file browser page -func (h *FileBrowserHandlers) ShowFileBrowser(c *gin.Context) { - // Get path from query parameter, default to root - path := c.DefaultQuery("path", "/") - - // Get file browser data - browserData, err := h.adminServer.GetFileBrowser(path) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get file browser data: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - browserData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - browserComponent := app.FileBrowser(*browserData) - layoutComponent := layout.Layout(c, browserComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// DeleteFile handles file deletion API requests -func (h *FileBrowserHandlers) DeleteFile(c *gin.Context) { - var request struct { - Path string `json:"path" binding:"required"` - } - - if err := c.ShouldBindJSON(&request); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Delete file via filer - err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: filepath.Dir(request.Path), - Name: filepath.Base(request.Path), - IsDeleteData: true, - IsRecursive: true, - IgnoreRecursiveError: false, - }) - return err - }) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete file: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "File deleted successfully"}) -} - -// DeleteMultipleFiles handles multiple file deletion API requests -func (h *FileBrowserHandlers) DeleteMultipleFiles(c *gin.Context) { - var request struct { - Paths []string `json:"paths" binding:"required"` - } - - if err := c.ShouldBindJSON(&request); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - if len(request.Paths) == 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "No paths provided"}) - return - } - - var deletedCount int - var failedCount int - var errors []string - - // Delete each file/folder - for _, path := range request.Paths { - err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: filepath.Dir(path), - Name: filepath.Base(path), - IsDeleteData: true, - IsRecursive: true, - IgnoreRecursiveError: false, - }) - return err - }) - - if err != nil { - failedCount++ - errors = append(errors, fmt.Sprintf("%s: %v", path, err)) - } else { - deletedCount++ - } - } - - // Prepare response - response := map[string]interface{}{ - "deleted": deletedCount, - "failed": failedCount, - "total": len(request.Paths), - } - - if len(errors) > 0 { - response["errors"] = errors - } - - if deletedCount > 0 { - if failedCount == 0 { - response["message"] = fmt.Sprintf("Successfully deleted %d item(s)", deletedCount) - } else { - response["message"] = fmt.Sprintf("Deleted %d item(s), failed to delete %d item(s)", deletedCount, failedCount) - } - c.JSON(http.StatusOK, response) - } else { - response["message"] = "Failed to delete all selected items" - c.JSON(http.StatusInternalServerError, response) - } -} - -// CreateFolder handles folder creation requests -func (h *FileBrowserHandlers) CreateFolder(c *gin.Context) { - var request struct { - Path string `json:"path" binding:"required"` - FolderName string `json:"folder_name" binding:"required"` - } - - if err := c.ShouldBindJSON(&request); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Clean and validate folder name - folderName := strings.TrimSpace(request.FolderName) - if folderName == "" || strings.Contains(folderName, "/") || strings.Contains(folderName, "\\") { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid folder name"}) - return - } - - // Create full path for new folder - fullPath := filepath.Join(request.Path, folderName) - if !strings.HasPrefix(fullPath, "/") { - fullPath = "/" + fullPath - } - - // Create folder via filer - err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - _, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ - Directory: filepath.Dir(fullPath), - Entry: &filer_pb.Entry{ - Name: filepath.Base(fullPath), - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - FileMode: uint32(0755 | os.ModeDir), // Directory mode - Uid: filer_pb.OS_UID, - Gid: filer_pb.OS_GID, - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - TtlSec: 0, - }, - }, - }) - return err - }) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create folder: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"message": "Folder created successfully"}) -} - -// UploadFile handles file upload requests -func (h *FileBrowserHandlers) UploadFile(c *gin.Context) { - // Get the current path - currentPath := c.PostForm("path") - if currentPath == "" { - currentPath = "/" - } - - // Parse multipart form - err := c.Request.ParseMultipartForm(1 << 30) // 1GB max memory for large file uploads - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse multipart form: " + err.Error()}) - return - } - - // Get uploaded files (supports multiple files) - files := c.Request.MultipartForm.File["files"] - if len(files) == 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "No files uploaded"}) - return - } - - var uploadResults []map[string]interface{} - var failedUploads []string - - // Process each uploaded file - for _, fileHeader := range files { - // Validate file name - fileName := fileHeader.Filename - if fileName == "" { - failedUploads = append(failedUploads, "invalid filename") - continue - } - - // Create full path for the file - fullPath := filepath.Join(currentPath, fileName) - if !strings.HasPrefix(fullPath, "/") { - fullPath = "/" + fullPath - } - - // Open the file - file, err := fileHeader.Open() - if err != nil { - failedUploads = append(failedUploads, fmt.Sprintf("%s: %v", fileName, err)) - continue - } - - // Upload file to filer - err = h.uploadFileToFiler(fullPath, fileHeader) - file.Close() - - if err != nil { - failedUploads = append(failedUploads, fmt.Sprintf("%s: %v", fileName, err)) - } else { - uploadResults = append(uploadResults, map[string]interface{}{ - "name": fileName, - "size": fileHeader.Size, - "path": fullPath, - }) - } - } - - // Prepare response - response := map[string]interface{}{ - "uploaded": len(uploadResults), - "failed": len(failedUploads), - "files": uploadResults, - } - - if len(failedUploads) > 0 { - response["errors"] = failedUploads - } - - if len(uploadResults) > 0 { - if len(failedUploads) == 0 { - response["message"] = fmt.Sprintf("Successfully uploaded %d file(s)", len(uploadResults)) - } else { - response["message"] = fmt.Sprintf("Uploaded %d file(s), %d failed", len(uploadResults), len(failedUploads)) - } - c.JSON(http.StatusOK, response) - } else { - response["message"] = "All file uploads failed" - c.JSON(http.StatusInternalServerError, response) - } -} - -// uploadFileToFiler uploads a file directly to the filer using multipart form data -func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *multipart.FileHeader) error { - // Get filer address from admin server - filerAddress := h.adminServer.GetFilerAddress() - if filerAddress == "" { - return fmt.Errorf("filer address not configured") - } - - // Validate and sanitize the filer address - if err := h.validateFilerAddress(filerAddress); err != nil { - return fmt.Errorf("invalid filer address: %w", err) - } - - // Validate and sanitize the file path - cleanFilePath, err := h.validateAndCleanFilePath(filePath) - if err != nil { - return fmt.Errorf("invalid file path: %w", err) - } - - // Open the file - file, err := fileHeader.Open() - if err != nil { - return fmt.Errorf("failed to open file: %w", err) - } - defer file.Close() - - // Create multipart form data - var body bytes.Buffer - writer := multipart.NewWriter(&body) - - // Create form file field - part, err := writer.CreateFormFile("file", fileHeader.Filename) - if err != nil { - return fmt.Errorf("failed to create form file: %w", err) - } - - // Copy file content to form - _, err = io.Copy(part, file) - if err != nil { - return fmt.Errorf("failed to copy file content: %w", err) - } - - // Close the writer to finalize the form - err = writer.Close() - if err != nil { - return fmt.Errorf("failed to close multipart writer: %w", err) - } - - // Create the upload URL with validated components - uploadURL := fmt.Sprintf("http://%s%s", filerAddress, cleanFilePath) - - // Create HTTP request - req, err := http.NewRequest("POST", uploadURL, &body) - if err != nil { - return fmt.Errorf("failed to create request: %w", err) - } - - // Set content type with boundary - req.Header.Set("Content-Type", writer.FormDataContentType()) - - // Send request - client := &http.Client{Timeout: 60 * time.Second} // Increased timeout for larger files - // lgtm[go/ssrf] - // Safe: filerAddress validated by validateFilerAddress() to match configured filer - // Safe: cleanFilePath validated and cleaned by validateAndCleanFilePath() to prevent path traversal - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to upload file: %w", err) - } - defer resp.Body.Close() - - // Check response - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - responseBody, _ := io.ReadAll(resp.Body) - return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(responseBody)) - } - - return nil -} - -// validateFilerAddress validates that the filer address is safe to use -func (h *FileBrowserHandlers) validateFilerAddress(address string) error { - if address == "" { - return fmt.Errorf("filer address cannot be empty") - } - - // CRITICAL: Only allow the configured filer address to prevent SSRF - configuredFiler := h.adminServer.GetFilerAddress() - if address != configuredFiler { - return fmt.Errorf("address does not match configured filer: got %s, expected %s", address, configuredFiler) - } - - // Parse the address to validate it's a proper host:port format - host, port, err := net.SplitHostPort(address) - if err != nil { - return fmt.Errorf("invalid address format: %w", err) - } - - // Validate host is not empty - if host == "" { - return fmt.Errorf("host cannot be empty") - } - - // Validate port is numeric and in valid range - if port == "" { - return fmt.Errorf("port cannot be empty") - } - - portNum, err := strconv.Atoi(port) - if err != nil { - return fmt.Errorf("invalid port number: %w", err) - } - - if portNum < 1 || portNum > 65535 { - return fmt.Errorf("port number must be between 1 and 65535") - } - - return nil -} - -// validateAndCleanFilePath validates and cleans the file path to prevent path traversal -func (h *FileBrowserHandlers) validateAndCleanFilePath(filePath string) (string, error) { - if filePath == "" { - return "", fmt.Errorf("file path cannot be empty") - } - - // Clean the path to remove any .. or . components - cleanPath := filepath.Clean(filePath) - - // Ensure the path starts with / - if !strings.HasPrefix(cleanPath, "/") { - cleanPath = "/" + cleanPath - } - - // Prevent path traversal attacks - if strings.Contains(cleanPath, "..") { - return "", fmt.Errorf("path traversal not allowed") - } - - // Additional validation: ensure path doesn't contain dangerous characters - if strings.ContainsAny(cleanPath, "\x00\r\n") { - return "", fmt.Errorf("path contains invalid characters") - } - - return cleanPath, nil -} - -// DownloadFile handles file download requests -func (h *FileBrowserHandlers) DownloadFile(c *gin.Context) { - filePath := c.Query("path") - if filePath == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "File path is required"}) - return - } - - // Get filer address - filerAddress := h.adminServer.GetFilerAddress() - if filerAddress == "" { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Filer address not configured"}) - return - } - - // Validate and sanitize the file path - cleanFilePath, err := h.validateAndCleanFilePath(filePath) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid file path: " + err.Error()}) - return - } - - // Create the download URL - downloadURL := fmt.Sprintf("http://%s%s", filerAddress, cleanFilePath) - - // Set headers for file download - fileName := filepath.Base(cleanFilePath) - c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fileName)) - c.Header("Content-Type", "application/octet-stream") - - // Proxy the request to filer - c.Redirect(http.StatusFound, downloadURL) -} - -// ViewFile handles file viewing requests (for text files, images, etc.) -func (h *FileBrowserHandlers) ViewFile(c *gin.Context) { - filePath := c.Query("path") - if filePath == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "File path is required"}) - return - } - - // Get file metadata first - var fileEntry dash.FileEntry - err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: filepath.Dir(filePath), - Name: filepath.Base(filePath), - }) - if err != nil { - return err - } - - entry := resp.Entry - if entry == nil { - return fmt.Errorf("file not found") - } - - // Convert to FileEntry - var modTime time.Time - if entry.Attributes != nil && entry.Attributes.Mtime > 0 { - modTime = time.Unix(entry.Attributes.Mtime, 0) - } - - var size int64 - if entry.Attributes != nil { - size = int64(entry.Attributes.FileSize) - } - - // Determine MIME type with comprehensive extension support - mime := h.determineMimeType(entry.Name) - - fileEntry = dash.FileEntry{ - Name: entry.Name, - FullPath: filePath, - IsDirectory: entry.IsDirectory, - Size: size, - ModTime: modTime, - Mime: mime, - } - - return nil - }) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get file metadata: " + err.Error()}) - return - } - - // Check if file is viewable as text - var content string - var viewable bool - var reason string - - // First check if it's a known text type or if we should check content - isKnownTextType := strings.HasPrefix(fileEntry.Mime, "text/") || - fileEntry.Mime == "application/json" || - fileEntry.Mime == "application/javascript" || - fileEntry.Mime == "application/xml" - - // For unknown types, check if it might be text by content - if !isKnownTextType && fileEntry.Mime == "application/octet-stream" { - isKnownTextType = h.isLikelyTextFile(filePath, 512) - if isKnownTextType { - // Update MIME type for better display - fileEntry.Mime = "text/plain" - } - } - - if isKnownTextType { - // Limit text file size for viewing (max 1MB) - if fileEntry.Size > 1024*1024 { - viewable = false - reason = "File too large for viewing (>1MB)" - } else { - // Get file content from filer - filerAddress := h.adminServer.GetFilerAddress() - if filerAddress != "" { - // Validate filer address to prevent SSRF - if err := h.validateFilerAddress(filerAddress); err != nil { - viewable = false - reason = "Invalid filer address configuration" - } else { - cleanFilePath, err := h.validateAndCleanFilePath(filePath) - if err == nil { - fileURL := fmt.Sprintf("http://%s%s", filerAddress, cleanFilePath) - - client := &http.Client{Timeout: 30 * time.Second} - // lgtm[go/ssrf] - // Safe: filerAddress validated by validateFilerAddress() to match configured filer - // Safe: cleanFilePath validated and cleaned by validateAndCleanFilePath() to prevent path traversal - resp, err := client.Get(fileURL) - if err == nil && resp.StatusCode == http.StatusOK { - defer resp.Body.Close() - contentBytes, err := io.ReadAll(resp.Body) - if err == nil { - content = string(contentBytes) - viewable = true - } else { - viewable = false - reason = "Failed to read file content" - } - } else { - viewable = false - reason = "Failed to fetch file from filer" - } - } else { - viewable = false - reason = "Invalid file path" - } - } - } else { - viewable = false - reason = "Filer address not configured" - } - } - } else { - // Not a text file, but might be viewable as image or PDF - if strings.HasPrefix(fileEntry.Mime, "image/") || fileEntry.Mime == "application/pdf" { - viewable = true - } else { - viewable = false - reason = "File type not supported for viewing" - } - } - - c.JSON(http.StatusOK, gin.H{ - "file": fileEntry, - "content": content, - "viewable": viewable, - "reason": reason, - }) -} - -// GetFileProperties handles file properties requests -func (h *FileBrowserHandlers) GetFileProperties(c *gin.Context) { - filePath := c.Query("path") - if filePath == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "File path is required"}) - return - } - - // Get detailed file information from filer - var properties map[string]interface{} - err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: filepath.Dir(filePath), - Name: filepath.Base(filePath), - }) - if err != nil { - return err - } - - entry := resp.Entry - if entry == nil { - return fmt.Errorf("file not found") - } - - properties = make(map[string]interface{}) - properties["name"] = entry.Name - properties["full_path"] = filePath - properties["is_directory"] = entry.IsDirectory - - if entry.Attributes != nil { - properties["size"] = entry.Attributes.FileSize - properties["size_formatted"] = h.formatBytes(int64(entry.Attributes.FileSize)) - - if entry.Attributes.Mtime > 0 { - modTime := time.Unix(entry.Attributes.Mtime, 0) - properties["modified_time"] = modTime.Format("2006-01-02 15:04:05") - properties["modified_timestamp"] = entry.Attributes.Mtime - } - - if entry.Attributes.Crtime > 0 { - createTime := time.Unix(entry.Attributes.Crtime, 0) - properties["created_time"] = createTime.Format("2006-01-02 15:04:05") - properties["created_timestamp"] = entry.Attributes.Crtime - } - - properties["file_mode"] = dash.FormatFileMode(entry.Attributes.FileMode) - properties["file_mode_formatted"] = dash.FormatFileMode(entry.Attributes.FileMode) - properties["file_mode_octal"] = fmt.Sprintf("%o", entry.Attributes.FileMode) - properties["uid"] = entry.Attributes.Uid - properties["gid"] = entry.Attributes.Gid - properties["ttl_seconds"] = entry.Attributes.TtlSec - - if entry.Attributes.TtlSec > 0 { - properties["ttl_formatted"] = fmt.Sprintf("%d seconds", entry.Attributes.TtlSec) - } - } - - // Get extended attributes - if entry.Extended != nil { - extended := make(map[string]string) - for key, value := range entry.Extended { - extended[key] = string(value) - } - properties["extended"] = extended - } - - // Get chunk information for files - if !entry.IsDirectory && len(entry.Chunks) > 0 { - chunks := make([]map[string]interface{}, 0, len(entry.Chunks)) - for _, chunk := range entry.Chunks { - chunkInfo := map[string]interface{}{ - "file_id": chunk.FileId, - "offset": chunk.Offset, - "size": chunk.Size, - "modified_ts": chunk.ModifiedTsNs, - "e_tag": chunk.ETag, - "source_fid": chunk.SourceFileId, - } - chunks = append(chunks, chunkInfo) - } - properties["chunks"] = chunks - properties["chunk_count"] = len(entry.Chunks) - } - - // Determine MIME type - if !entry.IsDirectory { - mime := h.determineMimeType(entry.Name) - properties["mime_type"] = mime - } - - return nil - }) - - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get file properties: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, properties) -} - -// Helper function to format bytes -func (h *FileBrowserHandlers) formatBytes(bytes int64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%d B", bytes) - } - div, exp := int64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) -} - -// Helper function to determine MIME type from filename -func (h *FileBrowserHandlers) determineMimeType(filename string) string { - ext := strings.ToLower(filepath.Ext(filename)) - - // Text files - switch ext { - case ".txt", ".log", ".cfg", ".conf", ".ini", ".properties": - return "text/plain" - case ".md", ".markdown": - return "text/markdown" - case ".html", ".htm": - return "text/html" - case ".css": - return "text/css" - case ".js", ".mjs": - return "application/javascript" - case ".ts": - return "text/typescript" - case ".json": - return "application/json" - case ".xml": - return "application/xml" - case ".yaml", ".yml": - return "text/yaml" - case ".csv": - return "text/csv" - case ".sql": - return "text/sql" - case ".sh", ".bash", ".zsh", ".fish": - return "text/x-shellscript" - case ".py": - return "text/x-python" - case ".go": - return "text/x-go" - case ".java": - return "text/x-java" - case ".c": - return "text/x-c" - case ".cpp", ".cc", ".cxx", ".c++": - return "text/x-c++" - case ".h", ".hpp": - return "text/x-c-header" - case ".php": - return "text/x-php" - case ".rb": - return "text/x-ruby" - case ".pl": - return "text/x-perl" - case ".rs": - return "text/x-rust" - case ".swift": - return "text/x-swift" - case ".kt": - return "text/x-kotlin" - case ".scala": - return "text/x-scala" - case ".dockerfile": - return "text/x-dockerfile" - case ".gitignore", ".gitattributes": - return "text/plain" - case ".env": - return "text/plain" - - // Image files - case ".jpg", ".jpeg": - return "image/jpeg" - case ".png": - return "image/png" - case ".gif": - return "image/gif" - case ".bmp": - return "image/bmp" - case ".webp": - return "image/webp" - case ".svg": - return "image/svg+xml" - case ".ico": - return "image/x-icon" - - // Document files - case ".pdf": - return "application/pdf" - case ".doc": - return "application/msword" - case ".docx": - return "application/vnd.openxmlformats-officedocument.wordprocessingml.document" - case ".xls": - return "application/vnd.ms-excel" - case ".xlsx": - return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" - case ".ppt": - return "application/vnd.ms-powerpoint" - case ".pptx": - return "application/vnd.openxmlformats-officedocument.presentationml.presentation" - - // Archive files - case ".zip": - return "application/zip" - case ".tar": - return "application/x-tar" - case ".gz": - return "application/gzip" - case ".bz2": - return "application/x-bzip2" - case ".7z": - return "application/x-7z-compressed" - case ".rar": - return "application/x-rar-compressed" - - // Video files - case ".mp4": - return "video/mp4" - case ".avi": - return "video/x-msvideo" - case ".mov": - return "video/quicktime" - case ".wmv": - return "video/x-ms-wmv" - case ".flv": - return "video/x-flv" - case ".webm": - return "video/webm" - - // Audio files - case ".mp3": - return "audio/mpeg" - case ".wav": - return "audio/wav" - case ".flac": - return "audio/flac" - case ".aac": - return "audio/aac" - case ".ogg": - return "audio/ogg" - - default: - // For files without extension or unknown extensions, - // we'll check if they might be text files by content - return "application/octet-stream" - } -} - -// Helper function to check if a file is likely a text file by checking content -func (h *FileBrowserHandlers) isLikelyTextFile(filePath string, maxCheckSize int64) bool { - filerAddress := h.adminServer.GetFilerAddress() - if filerAddress == "" { - return false - } - - // Validate filer address to prevent SSRF - if err := h.validateFilerAddress(filerAddress); err != nil { - glog.Errorf("Invalid filer address: %v", err) - return false - } - - cleanFilePath, err := h.validateAndCleanFilePath(filePath) - if err != nil { - return false - } - - fileURL := fmt.Sprintf("http://%s%s", filerAddress, cleanFilePath) - - client := &http.Client{Timeout: 10 * time.Second} - // lgtm[go/ssrf] - // Safe: filerAddress validated by validateFilerAddress() to match configured filer - // Safe: cleanFilePath validated and cleaned by validateAndCleanFilePath() to prevent path traversal - resp, err := client.Get(fileURL) - if err != nil || resp.StatusCode != http.StatusOK { - return false - } - defer resp.Body.Close() - - // Read first few bytes to check if it's text - buffer := make([]byte, min(maxCheckSize, 512)) - n, err := resp.Body.Read(buffer) - if err != nil && err != io.EOF { - return false - } - - if n == 0 { - return true // Empty file can be considered text - } - - // Check if content is printable text - return h.isPrintableText(buffer[:n]) -} - -// Helper function to check if content is printable text -func (h *FileBrowserHandlers) isPrintableText(data []byte) bool { - if len(data) == 0 { - return true - } - - // Count printable characters - printable := 0 - for _, b := range data { - if b >= 32 && b <= 126 || b == 9 || b == 10 || b == 13 { - // Printable ASCII, tab, newline, carriage return - printable++ - } else if b >= 128 { - // Potential UTF-8 character - printable++ - } - } - - // If more than 95% of characters are printable, consider it text - return float64(printable)/float64(len(data)) > 0.95 -} - -// Helper function for min -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} diff --git a/weed/admin/handlers/maintenance_handlers.go b/weed/admin/handlers/maintenance_handlers.go deleted file mode 100644 index 6335c4174..000000000 --- a/weed/admin/handlers/maintenance_handlers.go +++ /dev/null @@ -1,589 +0,0 @@ -package handlers - -import ( - "context" - "fmt" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -// MaintenanceHandlers handles maintenance-related HTTP requests -type MaintenanceHandlers struct { - adminServer *dash.AdminServer -} - -// NewMaintenanceHandlers creates a new instance of MaintenanceHandlers -func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers { - return &MaintenanceHandlers{ - adminServer: adminServer, - } -} - -// ShowTaskDetail displays the task detail page -func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) { - taskID := c.Param("id") - - taskDetail, err := h.adminServer.GetMaintenanceTaskDetail(taskID) - if err != nil { - glog.Errorf("DEBUG ShowTaskDetail: error getting task detail for %s: %v", taskID, err) - c.String(http.StatusNotFound, "Task not found: %s (Error: %v)", taskID, err) - return - } - - - c.Header("Content-Type", "text/html") - taskDetailComponent := app.TaskDetail(taskDetail) - layoutComponent := layout.Layout(c, taskDetailComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - glog.Errorf("DEBUG ShowTaskDetail: render error: %v", err) - c.String(http.StatusInternalServerError, "Failed to render template: %v", err) - return - } - -} - -// ShowMaintenanceQueue displays the maintenance queue page -func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) { - // Add timeout to prevent hanging - ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second) - defer cancel() - - // Use a channel to handle timeout for data retrieval - type result struct { - data *maintenance.MaintenanceQueueData - err error - } - resultChan := make(chan result, 1) - - go func() { - data, err := h.getMaintenanceQueueData() - resultChan <- result{data: data, err: err} - }() - - select { - case res := <-resultChan: - if res.err != nil { - glog.V(1).Infof("ShowMaintenanceQueue: error getting data: %v", res.err) - c.JSON(http.StatusInternalServerError, gin.H{"error": res.err.Error()}) - return - } - - glog.V(2).Infof("ShowMaintenanceQueue: got data with %d tasks", len(res.data.Tasks)) - - // Render HTML template - c.Header("Content-Type", "text/html") - maintenanceComponent := app.MaintenanceQueue(res.data) - layoutComponent := layout.Layout(c, maintenanceComponent) - err := layoutComponent.Render(ctx, c.Writer) - if err != nil { - glog.V(1).Infof("ShowMaintenanceQueue: render error: %v", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } - - glog.V(3).Infof("ShowMaintenanceQueue: template rendered successfully") - - case <-ctx.Done(): - glog.Warningf("ShowMaintenanceQueue: timeout waiting for data") - c.JSON(http.StatusRequestTimeout, gin.H{ - "error": "Request timeout - maintenance data retrieval took too long. This may indicate a system issue.", - "suggestion": "Try refreshing the page or contact system administrator if the problem persists.", - }) - return - } -} - -// ShowMaintenanceWorkers displays the maintenance workers page -func (h *MaintenanceHandlers) ShowMaintenanceWorkers(c *gin.Context) { - workersData, err := h.adminServer.GetMaintenanceWorkersData() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Render HTML template - c.Header("Content-Type", "text/html") - workersComponent := app.MaintenanceWorkers(workersData) - layoutComponent := layout.Layout(c, workersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowMaintenanceConfig displays the maintenance configuration page -func (h *MaintenanceHandlers) ShowMaintenanceConfig(c *gin.Context) { - config, err := h.getMaintenanceConfig() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // Get the schema for dynamic form rendering - schema := maintenance.GetMaintenanceConfigSchema() - - // Render HTML template using schema-driven approach - c.Header("Content-Type", "text/html") - configComponent := app.MaintenanceConfigSchema(config, schema) - layoutComponent := layout.Layout(c, configComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowTaskConfig displays the configuration page for a specific task type -func (h *MaintenanceHandlers) ShowTaskConfig(c *gin.Context) { - taskTypeName := c.Param("taskType") - - // Get the schema for this task type - schema := tasks.GetTaskConfigSchema(taskTypeName) - if schema == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Task type not found or no schema available"}) - return - } - - // Get the UI provider for current configuration - uiRegistry := tasks.GetGlobalUIRegistry() - typesRegistry := tasks.GetGlobalTypesRegistry() - - var provider types.TaskUIProvider - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == taskTypeName { - provider = uiRegistry.GetProvider(workerTaskType) - break - } - } - - if provider == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "UI provider not found for task type"}) - return - } - - // Get current configuration - currentConfig := provider.GetCurrentConfig() - - // Note: Do NOT apply schema defaults to current config as it overrides saved values - // Only apply defaults when creating new configs, not when displaying existing ones - - // Create task configuration data - configData := &maintenance.TaskConfigData{ - TaskType: maintenance.MaintenanceTaskType(taskTypeName), - TaskName: schema.DisplayName, - TaskIcon: schema.Icon, - Description: schema.Description, - } - - // Render HTML template using schema-based approach - c.Header("Content-Type", "text/html") - taskConfigComponent := app.TaskConfigSchema(configData, schema, currentConfig) - layoutComponent := layout.Layout(c, taskConfigComponent) - err := layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// UpdateTaskConfig updates task configuration from form -func (h *MaintenanceHandlers) UpdateTaskConfig(c *gin.Context) { - taskTypeName := c.Param("taskType") - taskType := types.TaskType(taskTypeName) - - // Parse form data - err := c.Request.ParseForm() - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse form data: " + err.Error()}) - return - } - - // Debug logging - show received form data - glog.V(1).Infof("Received form data for task type %s:", taskTypeName) - for key, values := range c.Request.PostForm { - glog.V(1).Infof(" %s: %v", key, values) - } - - // Get the task configuration schema - schema := tasks.GetTaskConfigSchema(taskTypeName) - if schema == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Schema not found for task type: " + taskTypeName}) - return - } - - // Create a new config instance based on task type and apply schema defaults - var config TaskConfig - switch taskType { - case types.TaskTypeVacuum: - config = &vacuum.Config{} - case types.TaskTypeBalance: - config = &balance.Config{} - case types.TaskTypeErasureCoding: - config = &erasure_coding.Config{} - default: - c.JSON(http.StatusBadRequest, gin.H{"error": "Unsupported task type: " + taskTypeName}) - return - } - - // Apply schema defaults first using type-safe method - if err := schema.ApplyDefaultsToConfig(config); err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply defaults: " + err.Error()}) - return - } - - // First, get the current configuration to preserve existing values - currentUIRegistry := tasks.GetGlobalUIRegistry() - currentTypesRegistry := tasks.GetGlobalTypesRegistry() - - var currentProvider types.TaskUIProvider - for workerTaskType := range currentTypesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - currentProvider = currentUIRegistry.GetProvider(workerTaskType) - break - } - } - - if currentProvider != nil { - // Copy current config values to the new config - currentConfig := currentProvider.GetCurrentConfig() - if currentConfigProtobuf, ok := currentConfig.(TaskConfig); ok { - // Apply current values using protobuf directly - no map conversion needed! - currentPolicy := currentConfigProtobuf.ToTaskPolicy() - if err := config.FromTaskPolicy(currentPolicy); err != nil { - glog.Warningf("Failed to load current config for %s: %v", taskTypeName, err) - } - } - } - - // Parse form data using schema-based approach (this will override with new values) - err = h.parseTaskConfigFromForm(c.Request.PostForm, schema, config) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse configuration: " + err.Error()}) - return - } - - // Debug logging - show parsed config values - switch taskType { - case types.TaskTypeVacuum: - if vacuumConfig, ok := config.(*vacuum.Config); ok { - glog.V(1).Infof("Parsed vacuum config - GarbageThreshold: %f, MinVolumeAgeSeconds: %d, MinIntervalSeconds: %d", - vacuumConfig.GarbageThreshold, vacuumConfig.MinVolumeAgeSeconds, vacuumConfig.MinIntervalSeconds) - } - case types.TaskTypeErasureCoding: - if ecConfig, ok := config.(*erasure_coding.Config); ok { - glog.V(1).Infof("Parsed EC config - FullnessRatio: %f, QuietForSeconds: %d, MinSizeMB: %d, CollectionFilter: '%s'", - ecConfig.FullnessRatio, ecConfig.QuietForSeconds, ecConfig.MinSizeMB, ecConfig.CollectionFilter) - } - case types.TaskTypeBalance: - if balanceConfig, ok := config.(*balance.Config); ok { - glog.V(1).Infof("Parsed balance config - Enabled: %v, MaxConcurrent: %d, ScanIntervalSeconds: %d, ImbalanceThreshold: %f, MinServerCount: %d", - balanceConfig.Enabled, balanceConfig.MaxConcurrent, balanceConfig.ScanIntervalSeconds, balanceConfig.ImbalanceThreshold, balanceConfig.MinServerCount) - } - } - - // Validate the configuration - if validationErrors := schema.ValidateConfig(config); len(validationErrors) > 0 { - errorMessages := make([]string, len(validationErrors)) - for i, err := range validationErrors { - errorMessages[i] = err.Error() - } - c.JSON(http.StatusBadRequest, gin.H{"error": "Configuration validation failed", "details": errorMessages}) - return - } - - // Apply configuration using UIProvider - uiRegistry := tasks.GetGlobalUIRegistry() - typesRegistry := tasks.GetGlobalTypesRegistry() - - var provider types.TaskUIProvider - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - provider = uiRegistry.GetProvider(workerTaskType) - break - } - } - - if provider == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "UI provider not found for task type"}) - return - } - - // Apply configuration using provider - err = provider.ApplyTaskConfig(config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to apply configuration: " + err.Error()}) - return - } - - // Save task configuration to protobuf file using ConfigPersistence - if h.adminServer != nil && h.adminServer.GetConfigPersistence() != nil { - err = h.saveTaskConfigToProtobuf(taskType, config) - if err != nil { - glog.Warningf("Failed to save task config to protobuf file: %v", err) - // Don't fail the request, just log the warning - } - } - - // Trigger a configuration reload in the maintenance manager - if h.adminServer != nil { - if manager := h.adminServer.GetMaintenanceManager(); manager != nil { - err = manager.ReloadTaskConfigurations() - if err != nil { - glog.Warningf("Failed to reload task configurations: %v", err) - } else { - glog.V(1).Infof("Successfully reloaded task configurations after updating %s", taskTypeName) - } - } - } - - // Redirect back to task configuration page - c.Redirect(http.StatusSeeOther, "/maintenance/config/"+taskTypeName) -} - -// parseTaskConfigFromForm parses form data using schema definitions -func (h *MaintenanceHandlers) parseTaskConfigFromForm(formData map[string][]string, schema *tasks.TaskConfigSchema, config interface{}) error { - configValue := reflect.ValueOf(config) - if configValue.Kind() == reflect.Ptr { - configValue = configValue.Elem() - } - - if configValue.Kind() != reflect.Struct { - return fmt.Errorf("config must be a struct or pointer to struct") - } - - configType := configValue.Type() - - for i := 0; i < configValue.NumField(); i++ { - field := configValue.Field(i) - fieldType := configType.Field(i) - - // Handle embedded structs recursively - if fieldType.Anonymous && field.Kind() == reflect.Struct { - err := h.parseTaskConfigFromForm(formData, schema, field.Addr().Interface()) - if err != nil { - return fmt.Errorf("error parsing embedded struct %s: %w", fieldType.Name, err) - } - continue - } - - // Get JSON tag name - jsonTag := fieldType.Tag.Get("json") - if jsonTag == "" { - continue - } - - // Remove options like ",omitempty" - if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 { - jsonTag = jsonTag[:commaIdx] - } - - // Find corresponding schema field - schemaField := schema.GetFieldByName(jsonTag) - if schemaField == nil { - continue - } - - // Parse value based on field type - if err := h.parseFieldFromForm(formData, schemaField, field); err != nil { - return fmt.Errorf("error parsing field %s: %w", schemaField.DisplayName, err) - } - } - - return nil -} - -// parseFieldFromForm parses a single field value from form data -func (h *MaintenanceHandlers) parseFieldFromForm(formData map[string][]string, schemaField *config.Field, fieldValue reflect.Value) error { - if !fieldValue.CanSet() { - return nil - } - - switch schemaField.Type { - case config.FieldTypeBool: - // Checkbox fields - present means true, absent means false - _, exists := formData[schemaField.JSONName] - fieldValue.SetBool(exists) - - case config.FieldTypeInt: - if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 { - if intVal, err := strconv.Atoi(values[0]); err != nil { - return fmt.Errorf("invalid integer value: %s", values[0]) - } else { - fieldValue.SetInt(int64(intVal)) - } - } - - case config.FieldTypeFloat: - if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 { - if floatVal, err := strconv.ParseFloat(values[0], 64); err != nil { - return fmt.Errorf("invalid float value: %s", values[0]) - } else { - fieldValue.SetFloat(floatVal) - } - } - - case config.FieldTypeString: - if values, ok := formData[schemaField.JSONName]; ok && len(values) > 0 { - fieldValue.SetString(values[0]) - } - - case config.FieldTypeInterval: - // Parse interval fields with value + unit - valueKey := schemaField.JSONName + "_value" - unitKey := schemaField.JSONName + "_unit" - - if valueStrs, ok := formData[valueKey]; ok && len(valueStrs) > 0 { - value, err := strconv.Atoi(valueStrs[0]) - if err != nil { - return fmt.Errorf("invalid interval value: %s", valueStrs[0]) - } - - unit := "minutes" // default - if unitStrs, ok := formData[unitKey]; ok && len(unitStrs) > 0 { - unit = unitStrs[0] - } - - // Convert to seconds - seconds := config.IntervalValueUnitToSeconds(value, unit) - fieldValue.SetInt(int64(seconds)) - } - - default: - return fmt.Errorf("unsupported field type: %s", schemaField.Type) - } - - return nil -} - -// UpdateMaintenanceConfig updates maintenance configuration from form -func (h *MaintenanceHandlers) UpdateMaintenanceConfig(c *gin.Context) { - var config maintenance.MaintenanceConfig - if err := c.ShouldBind(&config); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - err := h.updateMaintenanceConfig(&config) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.Redirect(http.StatusSeeOther, "/maintenance/config") -} - -// Helper methods that delegate to AdminServer - -func (h *MaintenanceHandlers) getMaintenanceQueueData() (*maintenance.MaintenanceQueueData, error) { - tasks, err := h.getMaintenanceTasks() - if err != nil { - return nil, err - } - - workers, err := h.getMaintenanceWorkers() - if err != nil { - return nil, err - } - - stats, err := h.getMaintenanceQueueStats() - if err != nil { - return nil, err - } - - data := &maintenance.MaintenanceQueueData{ - Tasks: tasks, - Workers: workers, - Stats: stats, - LastUpdated: time.Now(), - } - - return data, nil -} - -func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStats, error) { - // Use the exported method from AdminServer - return h.adminServer.GetMaintenanceQueueStats() -} - -func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) { - // Call the maintenance manager directly to get recent tasks (limit for performance) - if h.adminServer == nil { - return []*maintenance.MaintenanceTask{}, nil - } - - manager := h.adminServer.GetMaintenanceManager() - if manager == nil { - return []*maintenance.MaintenanceTask{}, nil - } - - // Get recent tasks only (last 100) to prevent slow page loads - // Users can view more tasks via pagination if needed - allTasks := manager.GetTasks("", "", 100) - return allTasks, nil -} - -func (h *MaintenanceHandlers) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) { - // Get workers from the admin server's maintenance manager - if h.adminServer == nil { - return []*maintenance.MaintenanceWorker{}, nil - } - - if h.adminServer.GetMaintenanceManager() == nil { - return []*maintenance.MaintenanceWorker{}, nil - } - - // Get workers from the maintenance manager - workers := h.adminServer.GetMaintenanceManager().GetWorkers() - return workers, nil -} - -func (h *MaintenanceHandlers) getMaintenanceConfig() (*maintenance.MaintenanceConfigData, error) { - // Delegate to AdminServer's real persistence method - return h.adminServer.GetMaintenanceConfigData() -} - -func (h *MaintenanceHandlers) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error { - // Delegate to AdminServer's real persistence method - return h.adminServer.UpdateMaintenanceConfigData(config) -} - -// saveTaskConfigToProtobuf saves task configuration to protobuf file -func (h *MaintenanceHandlers) saveTaskConfigToProtobuf(taskType types.TaskType, config TaskConfig) error { - configPersistence := h.adminServer.GetConfigPersistence() - if configPersistence == nil { - return fmt.Errorf("config persistence not available") - } - - // Use the new ToTaskPolicy method - much simpler and more maintainable! - taskPolicy := config.ToTaskPolicy() - - // Save using task-specific methods - switch taskType { - case types.TaskTypeVacuum: - return configPersistence.SaveVacuumTaskPolicy(taskPolicy) - case types.TaskTypeErasureCoding: - return configPersistence.SaveErasureCodingTaskPolicy(taskPolicy) - case types.TaskTypeBalance: - return configPersistence.SaveBalanceTaskPolicy(taskPolicy) - default: - return fmt.Errorf("unsupported task type for protobuf persistence: %s", taskType) - } -} diff --git a/weed/admin/handlers/maintenance_handlers_test.go b/weed/admin/handlers/maintenance_handlers_test.go deleted file mode 100644 index fa5a365f1..000000000 --- a/weed/admin/handlers/maintenance_handlers_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package handlers - -import ( - "net/url" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/base" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" -) - -func TestParseTaskConfigFromForm_WithEmbeddedStruct(t *testing.T) { - // Create a maintenance handlers instance for testing - h := &MaintenanceHandlers{} - - // Test with balance config - t.Run("Balance Config", func(t *testing.T) { - // Simulate form data - formData := url.Values{ - "enabled": {"on"}, // checkbox field - "scan_interval_seconds_value": {"30"}, // interval field - "scan_interval_seconds_unit": {"minutes"}, // interval unit - "max_concurrent": {"2"}, // number field - "imbalance_threshold": {"0.15"}, // float field - "min_server_count": {"3"}, // number field - } - - // Get schema - schema := tasks.GetTaskConfigSchema("balance") - if schema == nil { - t.Fatal("Failed to get balance schema") - } - - // Create config instance - config := &balance.Config{} - - // Parse form data - err := h.parseTaskConfigFromForm(formData, schema, config) - if err != nil { - t.Fatalf("Failed to parse form data: %v", err) - } - - // Verify embedded struct fields were set correctly - if !config.Enabled { - t.Errorf("Expected Enabled=true, got %v", config.Enabled) - } - - if config.ScanIntervalSeconds != 1800 { // 30 minutes * 60 - t.Errorf("Expected ScanIntervalSeconds=1800, got %v", config.ScanIntervalSeconds) - } - - if config.MaxConcurrent != 2 { - t.Errorf("Expected MaxConcurrent=2, got %v", config.MaxConcurrent) - } - - // Verify balance-specific fields were set correctly - if config.ImbalanceThreshold != 0.15 { - t.Errorf("Expected ImbalanceThreshold=0.15, got %v", config.ImbalanceThreshold) - } - - if config.MinServerCount != 3 { - t.Errorf("Expected MinServerCount=3, got %v", config.MinServerCount) - } - }) - - // Test with vacuum config - t.Run("Vacuum Config", func(t *testing.T) { - // Simulate form data - formData := url.Values{ - // "enabled" field omitted to simulate unchecked checkbox - "scan_interval_seconds_value": {"4"}, // interval field - "scan_interval_seconds_unit": {"hours"}, // interval unit - "max_concurrent": {"3"}, // number field - "garbage_threshold": {"0.4"}, // float field - "min_volume_age_seconds_value": {"2"}, // interval field - "min_volume_age_seconds_unit": {"days"}, // interval unit - "min_interval_seconds_value": {"1"}, // interval field - "min_interval_seconds_unit": {"days"}, // interval unit - } - - // Get schema - schema := tasks.GetTaskConfigSchema("vacuum") - if schema == nil { - t.Fatal("Failed to get vacuum schema") - } - - // Create config instance - config := &vacuum.Config{} - - // Parse form data - err := h.parseTaskConfigFromForm(formData, schema, config) - if err != nil { - t.Fatalf("Failed to parse form data: %v", err) - } - - // Verify embedded struct fields were set correctly - if config.Enabled { - t.Errorf("Expected Enabled=false, got %v", config.Enabled) - } - - if config.ScanIntervalSeconds != 14400 { // 4 hours * 3600 - t.Errorf("Expected ScanIntervalSeconds=14400, got %v", config.ScanIntervalSeconds) - } - - if config.MaxConcurrent != 3 { - t.Errorf("Expected MaxConcurrent=3, got %v", config.MaxConcurrent) - } - - // Verify vacuum-specific fields were set correctly - if config.GarbageThreshold != 0.4 { - t.Errorf("Expected GarbageThreshold=0.4, got %v", config.GarbageThreshold) - } - - if config.MinVolumeAgeSeconds != 172800 { // 2 days * 86400 - t.Errorf("Expected MinVolumeAgeSeconds=172800, got %v", config.MinVolumeAgeSeconds) - } - - if config.MinIntervalSeconds != 86400 { // 1 day * 86400 - t.Errorf("Expected MinIntervalSeconds=86400, got %v", config.MinIntervalSeconds) - } - }) - - // Test with erasure coding config - t.Run("Erasure Coding Config", func(t *testing.T) { - // Simulate form data - formData := url.Values{ - "enabled": {"on"}, // checkbox field - "scan_interval_seconds_value": {"2"}, // interval field - "scan_interval_seconds_unit": {"hours"}, // interval unit - "max_concurrent": {"1"}, // number field - "quiet_for_seconds_value": {"10"}, // interval field - "quiet_for_seconds_unit": {"minutes"}, // interval unit - "fullness_ratio": {"0.85"}, // float field - "collection_filter": {"test_collection"}, // string field - "min_size_mb": {"50"}, // number field - } - - // Get schema - schema := tasks.GetTaskConfigSchema("erasure_coding") - if schema == nil { - t.Fatal("Failed to get erasure_coding schema") - } - - // Create config instance - config := &erasure_coding.Config{} - - // Parse form data - err := h.parseTaskConfigFromForm(formData, schema, config) - if err != nil { - t.Fatalf("Failed to parse form data: %v", err) - } - - // Verify embedded struct fields were set correctly - if !config.Enabled { - t.Errorf("Expected Enabled=true, got %v", config.Enabled) - } - - if config.ScanIntervalSeconds != 7200 { // 2 hours * 3600 - t.Errorf("Expected ScanIntervalSeconds=7200, got %v", config.ScanIntervalSeconds) - } - - if config.MaxConcurrent != 1 { - t.Errorf("Expected MaxConcurrent=1, got %v", config.MaxConcurrent) - } - - // Verify erasure coding-specific fields were set correctly - if config.QuietForSeconds != 600 { // 10 minutes * 60 - t.Errorf("Expected QuietForSeconds=600, got %v", config.QuietForSeconds) - } - - if config.FullnessRatio != 0.85 { - t.Errorf("Expected FullnessRatio=0.85, got %v", config.FullnessRatio) - } - - if config.CollectionFilter != "test_collection" { - t.Errorf("Expected CollectionFilter='test_collection', got %v", config.CollectionFilter) - } - - if config.MinSizeMB != 50 { - t.Errorf("Expected MinSizeMB=50, got %v", config.MinSizeMB) - } - }) -} - -func TestConfigurationValidation(t *testing.T) { - // Test that config structs can be validated and converted to protobuf format - taskTypes := []struct { - name string - config interface{} - }{ - { - "balance", - &balance.Config{ - BaseConfig: base.BaseConfig{ - Enabled: true, - ScanIntervalSeconds: 2400, - MaxConcurrent: 3, - }, - ImbalanceThreshold: 0.18, - MinServerCount: 4, - }, - }, - { - "vacuum", - &vacuum.Config{ - BaseConfig: base.BaseConfig{ - Enabled: false, - ScanIntervalSeconds: 7200, - MaxConcurrent: 2, - }, - GarbageThreshold: 0.35, - MinVolumeAgeSeconds: 86400, - MinIntervalSeconds: 604800, - }, - }, - { - "erasure_coding", - &erasure_coding.Config{ - BaseConfig: base.BaseConfig{ - Enabled: true, - ScanIntervalSeconds: 3600, - MaxConcurrent: 1, - }, - QuietForSeconds: 900, - FullnessRatio: 0.9, - CollectionFilter: "important", - MinSizeMB: 100, - }, - }, - } - - for _, test := range taskTypes { - t.Run(test.name, func(t *testing.T) { - // Test that configs can be converted to protobuf TaskPolicy - switch cfg := test.config.(type) { - case *balance.Config: - policy := cfg.ToTaskPolicy() - if policy == nil { - t.Fatal("ToTaskPolicy returned nil") - } - if policy.Enabled != cfg.Enabled { - t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled) - } - if policy.MaxConcurrent != int32(cfg.MaxConcurrent) { - t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent) - } - case *vacuum.Config: - policy := cfg.ToTaskPolicy() - if policy == nil { - t.Fatal("ToTaskPolicy returned nil") - } - if policy.Enabled != cfg.Enabled { - t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled) - } - if policy.MaxConcurrent != int32(cfg.MaxConcurrent) { - t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent) - } - case *erasure_coding.Config: - policy := cfg.ToTaskPolicy() - if policy == nil { - t.Fatal("ToTaskPolicy returned nil") - } - if policy.Enabled != cfg.Enabled { - t.Errorf("Expected Enabled=%v, got %v", cfg.Enabled, policy.Enabled) - } - if policy.MaxConcurrent != int32(cfg.MaxConcurrent) { - t.Errorf("Expected MaxConcurrent=%v, got %v", cfg.MaxConcurrent, policy.MaxConcurrent) - } - default: - t.Fatalf("Unknown config type: %T", test.config) - } - - // Test that configs can be validated - switch cfg := test.config.(type) { - case *balance.Config: - if err := cfg.Validate(); err != nil { - t.Errorf("Validation failed: %v", err) - } - case *vacuum.Config: - if err := cfg.Validate(); err != nil { - t.Errorf("Validation failed: %v", err) - } - case *erasure_coding.Config: - if err := cfg.Validate(); err != nil { - t.Errorf("Validation failed: %v", err) - } - } - }) - } -} - -func TestParseFieldFromForm_EdgeCases(t *testing.T) { - h := &MaintenanceHandlers{} - - // Test checkbox parsing (boolean fields) - t.Run("Checkbox Fields", func(t *testing.T) { - tests := []struct { - name string - formData url.Values - expectedValue bool - }{ - {"Checked checkbox", url.Values{"test_field": {"on"}}, true}, - {"Unchecked checkbox", url.Values{}, false}, - {"Empty value checkbox", url.Values{"test_field": {""}}, true}, // Present but empty means checked - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - schema := &tasks.TaskConfigSchema{ - Schema: config.Schema{ - Fields: []*config.Field{ - { - JSONName: "test_field", - Type: config.FieldTypeBool, - InputType: "checkbox", - }, - }, - }, - } - - type TestConfig struct { - TestField bool `json:"test_field"` - } - - config := &TestConfig{} - err := h.parseTaskConfigFromForm(test.formData, schema, config) - if err != nil { - t.Fatalf("parseTaskConfigFromForm failed: %v", err) - } - - if config.TestField != test.expectedValue { - t.Errorf("Expected %v, got %v", test.expectedValue, config.TestField) - } - }) - } - }) - - // Test interval parsing - t.Run("Interval Fields", func(t *testing.T) { - tests := []struct { - name string - value string - unit string - expectedSecs int - }{ - {"Minutes", "30", "minutes", 1800}, - {"Hours", "2", "hours", 7200}, - {"Days", "1", "days", 86400}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - formData := url.Values{ - "test_field_value": {test.value}, - "test_field_unit": {test.unit}, - } - - schema := &tasks.TaskConfigSchema{ - Schema: config.Schema{ - Fields: []*config.Field{ - { - JSONName: "test_field", - Type: config.FieldTypeInterval, - InputType: "interval", - }, - }, - }, - } - - type TestConfig struct { - TestField int `json:"test_field"` - } - - config := &TestConfig{} - err := h.parseTaskConfigFromForm(formData, schema, config) - if err != nil { - t.Fatalf("parseTaskConfigFromForm failed: %v", err) - } - - if config.TestField != test.expectedSecs { - t.Errorf("Expected %d seconds, got %d", test.expectedSecs, config.TestField) - } - }) - } - }) -} diff --git a/weed/admin/handlers/mq_handlers.go b/weed/admin/handlers/mq_handlers.go deleted file mode 100644 index 8508998e6..000000000 --- a/weed/admin/handlers/mq_handlers.go +++ /dev/null @@ -1,238 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" -) - -// MessageQueueHandlers contains all the HTTP handlers for message queue management -type MessageQueueHandlers struct { - adminServer *dash.AdminServer -} - -// NewMessageQueueHandlers creates a new instance of MessageQueueHandlers -func NewMessageQueueHandlers(adminServer *dash.AdminServer) *MessageQueueHandlers { - return &MessageQueueHandlers{ - adminServer: adminServer, - } -} - -// ShowBrokers renders the message queue brokers page -func (h *MessageQueueHandlers) ShowBrokers(c *gin.Context) { - // Get cluster brokers data - brokersData, err := h.adminServer.GetClusterBrokers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster brokers: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - brokersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - brokersComponent := app.ClusterBrokers(*brokersData) - layoutComponent := layout.Layout(c, brokersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowTopics renders the message queue topics page -func (h *MessageQueueHandlers) ShowTopics(c *gin.Context) { - // Get topics data - topicsData, err := h.adminServer.GetTopics() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get topics: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - topicsData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - topicsComponent := app.Topics(*topicsData) - layoutComponent := layout.Layout(c, topicsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowSubscribers renders the message queue subscribers page -func (h *MessageQueueHandlers) ShowSubscribers(c *gin.Context) { - // Get subscribers data - subscribersData, err := h.adminServer.GetSubscribers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get subscribers: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - subscribersData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - subscribersComponent := app.Subscribers(*subscribersData) - layoutComponent := layout.Layout(c, subscribersComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// ShowTopicDetails renders the topic details page -func (h *MessageQueueHandlers) ShowTopicDetails(c *gin.Context) { - // Get topic parameters from URL - namespace := c.Param("namespace") - topicName := c.Param("topic") - - if namespace == "" || topicName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Missing namespace or topic name"}) - return - } - - // Get topic details data - topicDetailsData, err := h.adminServer.GetTopicDetails(namespace, topicName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get topic details: " + err.Error()}) - return - } - - // Set username - username := c.GetString("username") - if username == "" { - username = "admin" - } - topicDetailsData.Username = username - - // Render HTML template - c.Header("Content-Type", "text/html") - topicDetailsComponent := app.TopicDetails(*topicDetailsData) - layoutComponent := layout.Layout(c, topicDetailsComponent) - err = layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// GetTopicDetailsAPI returns topic details as JSON for AJAX calls -func (h *MessageQueueHandlers) GetTopicDetailsAPI(c *gin.Context) { - // Get topic parameters from URL - namespace := c.Param("namespace") - topicName := c.Param("topic") - - if namespace == "" || topicName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Missing namespace or topic name"}) - return - } - - // Get topic details data - topicDetailsData, err := h.adminServer.GetTopicDetails(namespace, topicName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get topic details: " + err.Error()}) - return - } - - // Return JSON data - c.JSON(http.StatusOK, topicDetailsData) -} - -// CreateTopicAPI creates a new topic with retention configuration -func (h *MessageQueueHandlers) CreateTopicAPI(c *gin.Context) { - var req struct { - Namespace string `json:"namespace" binding:"required"` - Name string `json:"name" binding:"required"` - PartitionCount int32 `json:"partition_count" binding:"required"` - Retention struct { - Enabled bool `json:"enabled"` - RetentionSeconds int64 `json:"retention_seconds"` - } `json:"retention"` - } - - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Validate inputs - if req.PartitionCount < 1 || req.PartitionCount > 100 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Partition count must be between 1 and 100"}) - return - } - - if req.Retention.Enabled && req.Retention.RetentionSeconds <= 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Retention seconds must be positive when retention is enabled"}) - return - } - - // Create the topic via admin server - err := h.adminServer.CreateTopicWithRetention(req.Namespace, req.Name, req.PartitionCount, req.Retention.Enabled, req.Retention.RetentionSeconds) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create topic: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Topic created successfully", - "topic": fmt.Sprintf("%s.%s", req.Namespace, req.Name), - }) -} - -type UpdateTopicRetentionRequest struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - Retention struct { - Enabled bool `json:"enabled"` - RetentionSeconds int64 `json:"retention_seconds"` - } `json:"retention"` -} - -func (h *MessageQueueHandlers) UpdateTopicRetentionAPI(c *gin.Context) { - var request UpdateTopicRetentionRequest - if err := c.ShouldBindJSON(&request); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // Validate required fields - if request.Namespace == "" || request.Name == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "namespace and name are required"}) - return - } - - // Update the topic retention - err := h.adminServer.UpdateTopicRetention(request.Namespace, request.Name, request.Retention.Enabled, request.Retention.RetentionSeconds) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Topic retention updated successfully", - "topic": request.Namespace + "." + request.Name, - }) -} diff --git a/weed/admin/handlers/policy_handlers.go b/weed/admin/handlers/policy_handlers.go deleted file mode 100644 index c9850b219..000000000 --- a/weed/admin/handlers/policy_handlers.go +++ /dev/null @@ -1,273 +0,0 @@ -package handlers - -import ( - "fmt" - "net/http" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" -) - -// PolicyHandlers contains all the HTTP handlers for policy management -type PolicyHandlers struct { - adminServer *dash.AdminServer -} - -// NewPolicyHandlers creates a new instance of PolicyHandlers -func NewPolicyHandlers(adminServer *dash.AdminServer) *PolicyHandlers { - return &PolicyHandlers{ - adminServer: adminServer, - } -} - -// ShowPolicies renders the policies management page -func (h *PolicyHandlers) ShowPolicies(c *gin.Context) { - // Get policies data from the server - policiesData := h.getPoliciesData(c) - - // Render HTML template - c.Header("Content-Type", "text/html") - policiesComponent := app.Policies(policiesData) - layoutComponent := layout.Layout(c, policiesComponent) - err := layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// GetPolicies returns the list of policies as JSON -func (h *PolicyHandlers) GetPolicies(c *gin.Context) { - policies, err := h.adminServer.GetPolicies() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get policies: " + err.Error()}) - return - } - c.JSON(http.StatusOK, gin.H{"policies": policies}) -} - -// CreatePolicy handles policy creation -func (h *PolicyHandlers) CreatePolicy(c *gin.Context) { - var req dash.CreatePolicyRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Validate policy name - if req.Name == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy name is required"}) - return - } - - // Check if policy already exists - existingPolicy, err := h.adminServer.GetPolicy(req.Name) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to check existing policy: " + err.Error()}) - return - } - if existingPolicy != nil { - c.JSON(http.StatusConflict, gin.H{"error": "Policy with this name already exists"}) - return - } - - // Create the policy - err = h.adminServer.CreatePolicy(req.Name, req.Document) - if err != nil { - glog.Errorf("Failed to create policy %s: %v", req.Name, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create policy: " + err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{ - "success": true, - "message": "Policy created successfully", - "policy": req.Name, - }) -} - -// GetPolicy returns a specific policy -func (h *PolicyHandlers) GetPolicy(c *gin.Context) { - policyName := c.Param("name") - if policyName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy name is required"}) - return - } - - policy, err := h.adminServer.GetPolicy(policyName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get policy: " + err.Error()}) - return - } - - if policy == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Policy not found"}) - return - } - - c.JSON(http.StatusOK, policy) -} - -// UpdatePolicy handles policy updates -func (h *PolicyHandlers) UpdatePolicy(c *gin.Context) { - policyName := c.Param("name") - if policyName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy name is required"}) - return - } - - var req dash.UpdatePolicyRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Check if policy exists - existingPolicy, err := h.adminServer.GetPolicy(policyName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to check existing policy: " + err.Error()}) - return - } - if existingPolicy == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Policy not found"}) - return - } - - // Update the policy - err = h.adminServer.UpdatePolicy(policyName, req.Document) - if err != nil { - glog.Errorf("Failed to update policy %s: %v", policyName, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update policy: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "success": true, - "message": "Policy updated successfully", - "policy": policyName, - }) -} - -// DeletePolicy handles policy deletion -func (h *PolicyHandlers) DeletePolicy(c *gin.Context) { - policyName := c.Param("name") - if policyName == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy name is required"}) - return - } - - // Check if policy exists - existingPolicy, err := h.adminServer.GetPolicy(policyName) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to check existing policy: " + err.Error()}) - return - } - if existingPolicy == nil { - c.JSON(http.StatusNotFound, gin.H{"error": "Policy not found"}) - return - } - - // Delete the policy - err = h.adminServer.DeletePolicy(policyName) - if err != nil { - glog.Errorf("Failed to delete policy %s: %v", policyName, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete policy: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "success": true, - "message": "Policy deleted successfully", - "policy": policyName, - }) -} - -// ValidatePolicy validates a policy document without saving it -func (h *PolicyHandlers) ValidatePolicy(c *gin.Context) { - var req struct { - Document policy_engine.PolicyDocument `json:"document" binding:"required"` - } - - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Basic validation - if req.Document.Version == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy version is required"}) - return - } - - if len(req.Document.Statement) == 0 { - c.JSON(http.StatusBadRequest, gin.H{"error": "Policy must have at least one statement"}) - return - } - - // Validate each statement - for i, statement := range req.Document.Statement { - if statement.Effect != "Allow" && statement.Effect != "Deny" { - c.JSON(http.StatusBadRequest, gin.H{ - "error": fmt.Sprintf("Statement %d: Effect must be 'Allow' or 'Deny'", i+1), - }) - return - } - - if len(statement.Action.Strings()) == 0 { - c.JSON(http.StatusBadRequest, gin.H{ - "error": fmt.Sprintf("Statement %d: Action is required", i+1), - }) - return - } - - if len(statement.Resource.Strings()) == 0 { - c.JSON(http.StatusBadRequest, gin.H{ - "error": fmt.Sprintf("Statement %d: Resource is required", i+1), - }) - return - } - } - - c.JSON(http.StatusOK, gin.H{ - "valid": true, - "message": "Policy document is valid", - }) -} - -// getPoliciesData retrieves policies data from the server -func (h *PolicyHandlers) getPoliciesData(c *gin.Context) dash.PoliciesData { - username := c.GetString("username") - if username == "" { - username = "admin" - } - - // Get policies - policies, err := h.adminServer.GetPolicies() - if err != nil { - glog.Errorf("Failed to get policies: %v", err) - // Return empty data on error - return dash.PoliciesData{ - Username: username, - Policies: []dash.IAMPolicy{}, - TotalPolicies: 0, - LastUpdated: time.Now(), - } - } - - // Ensure policies is never nil - if policies == nil { - policies = []dash.IAMPolicy{} - } - - return dash.PoliciesData{ - Username: username, - Policies: policies, - TotalPolicies: len(policies), - LastUpdated: time.Now(), - } -} diff --git a/weed/admin/handlers/task_config_interface.go b/weed/admin/handlers/task_config_interface.go deleted file mode 100644 index dd22c5250..000000000 --- a/weed/admin/handlers/task_config_interface.go +++ /dev/null @@ -1,25 +0,0 @@ -package handlers - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" -) - -// TaskConfig defines the interface that all task configuration types must implement -type TaskConfig interface { - config.ConfigWithDefaults // Extends ConfigWithDefaults for type-safe schema operations - - // Common methods from BaseConfig - IsEnabled() bool - SetEnabled(enabled bool) - - // Protobuf serialization methods - no more map[string]interface{}! - ToTaskPolicy() *worker_pb.TaskPolicy - FromTaskPolicy(policy *worker_pb.TaskPolicy) error -} - -// TaskConfigProvider defines the interface for creating specific task config types -type TaskConfigProvider interface { - NewConfig() TaskConfig - GetTaskType() string -} diff --git a/weed/admin/handlers/user_handlers.go b/weed/admin/handlers/user_handlers.go deleted file mode 100644 index 9f36848c0..000000000 --- a/weed/admin/handlers/user_handlers.go +++ /dev/null @@ -1,255 +0,0 @@ -package handlers - -import ( - "net/http" - "time" - - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/view/app" - "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// UserHandlers contains all the HTTP handlers for user management -type UserHandlers struct { - adminServer *dash.AdminServer -} - -// NewUserHandlers creates a new instance of UserHandlers -func NewUserHandlers(adminServer *dash.AdminServer) *UserHandlers { - return &UserHandlers{ - adminServer: adminServer, - } -} - -// ShowObjectStoreUsers renders the object store users management page -func (h *UserHandlers) ShowObjectStoreUsers(c *gin.Context) { - // Get object store users data from the server - usersData := h.getObjectStoreUsersData(c) - - // Render HTML template - c.Header("Content-Type", "text/html") - usersComponent := app.ObjectStoreUsers(usersData) - layoutComponent := layout.Layout(c, usersComponent) - err := layoutComponent.Render(c.Request.Context(), c.Writer) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) - return - } -} - -// GetUsers returns the list of users as JSON -func (h *UserHandlers) GetUsers(c *gin.Context) { - users, err := h.adminServer.GetObjectStoreUsers() - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get users: " + err.Error()}) - return - } - c.JSON(http.StatusOK, gin.H{"users": users}) -} - -// CreateUser handles user creation -func (h *UserHandlers) CreateUser(c *gin.Context) { - var req dash.CreateUserRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - // Validate required fields - if req.Username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - user, err := h.adminServer.CreateObjectStoreUser(req) - if err != nil { - glog.Errorf("Failed to create user %s: %v", req.Username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create user: " + err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{ - "message": "User created successfully", - "user": user, - }) -} - -// UpdateUser handles user updates -func (h *UserHandlers) UpdateUser(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - var req dash.UpdateUserRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - user, err := h.adminServer.UpdateObjectStoreUser(username, req) - if err != nil { - glog.Errorf("Failed to update user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update user: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "User updated successfully", - "user": user, - }) -} - -// DeleteUser handles user deletion -func (h *UserHandlers) DeleteUser(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - err := h.adminServer.DeleteObjectStoreUser(username) - if err != nil { - glog.Errorf("Failed to delete user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete user: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "User deleted successfully", - }) -} - -// GetUserDetails returns detailed information about a specific user -func (h *UserHandlers) GetUserDetails(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - user, err := h.adminServer.GetObjectStoreUserDetails(username) - if err != nil { - c.JSON(http.StatusNotFound, gin.H{"error": "User not found: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, user) -} - -// CreateAccessKey creates a new access key for a user -func (h *UserHandlers) CreateAccessKey(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - accessKey, err := h.adminServer.CreateAccessKey(username) - if err != nil { - glog.Errorf("Failed to create access key for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create access key: " + err.Error()}) - return - } - - c.JSON(http.StatusCreated, gin.H{ - "message": "Access key created successfully", - "access_key": accessKey, - }) -} - -// DeleteAccessKey deletes an access key for a user -func (h *UserHandlers) DeleteAccessKey(c *gin.Context) { - username := c.Param("username") - accessKeyId := c.Param("accessKeyId") - - if username == "" || accessKeyId == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username and access key ID are required"}) - return - } - - err := h.adminServer.DeleteAccessKey(username, accessKeyId) - if err != nil { - glog.Errorf("Failed to delete access key %s for user %s: %v", accessKeyId, username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete access key: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "Access key deleted successfully", - }) -} - -// GetUserPolicies returns the policies for a user -func (h *UserHandlers) GetUserPolicies(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - policies, err := h.adminServer.GetUserPolicies(username) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get user policies: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"policies": policies}) -} - -// UpdateUserPolicies updates the policies for a user -func (h *UserHandlers) UpdateUserPolicies(c *gin.Context) { - username := c.Param("username") - if username == "" { - c.JSON(http.StatusBadRequest, gin.H{"error": "Username is required"}) - return - } - - var req dash.UpdateUserPoliciesRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) - return - } - - err := h.adminServer.UpdateUserPolicies(username, req.Actions) - if err != nil { - glog.Errorf("Failed to update policies for user %s: %v", username, err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update user policies: " + err.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{ - "message": "User policies updated successfully", - }) -} - -// getObjectStoreUsersData retrieves object store users data from the server -func (h *UserHandlers) getObjectStoreUsersData(c *gin.Context) dash.ObjectStoreUsersData { - username := c.GetString("username") - if username == "" { - username = "admin" - } - - // Get object store users - users, err := h.adminServer.GetObjectStoreUsers() - if err != nil { - glog.Errorf("Failed to get object store users: %v", err) - // Return empty data on error - return dash.ObjectStoreUsersData{ - Username: username, - Users: []dash.ObjectStoreUser{}, - TotalUsers: 0, - LastUpdated: time.Now(), - } - } - - return dash.ObjectStoreUsersData{ - Username: username, - Users: users, - TotalUsers: len(users), - LastUpdated: time.Now(), - } -} diff --git a/weed/admin/maintenance/config_schema.go b/weed/admin/maintenance/config_schema.go deleted file mode 100644 index c911ad59c..000000000 --- a/weed/admin/maintenance/config_schema.go +++ /dev/null @@ -1,190 +0,0 @@ -package maintenance - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/config" -) - -// Type aliases for backward compatibility -type ConfigFieldType = config.FieldType -type ConfigFieldUnit = config.FieldUnit -type ConfigField = config.Field - -// Constant aliases for backward compatibility -const ( - FieldTypeBool = config.FieldTypeBool - FieldTypeInt = config.FieldTypeInt - FieldTypeDuration = config.FieldTypeDuration - FieldTypeInterval = config.FieldTypeInterval - FieldTypeString = config.FieldTypeString - FieldTypeFloat = config.FieldTypeFloat -) - -const ( - UnitSeconds = config.UnitSeconds - UnitMinutes = config.UnitMinutes - UnitHours = config.UnitHours - UnitDays = config.UnitDays - UnitCount = config.UnitCount - UnitNone = config.UnitNone -) - -// Function aliases for backward compatibility -var ( - SecondsToIntervalValueUnit = config.SecondsToIntervalValueUnit - IntervalValueUnitToSeconds = config.IntervalValueUnitToSeconds -) - -// MaintenanceConfigSchema defines the schema for maintenance configuration -type MaintenanceConfigSchema struct { - config.Schema // Embed common schema functionality -} - -// GetMaintenanceConfigSchema returns the schema for maintenance configuration -func GetMaintenanceConfigSchema() *MaintenanceConfigSchema { - return &MaintenanceConfigSchema{ - Schema: config.Schema{ - Fields: []*config.Field{ - { - Name: "enabled", - JSONName: "enabled", - Type: config.FieldTypeBool, - DefaultValue: true, - Required: false, - DisplayName: "Enable Maintenance System", - Description: "When enabled, the system will automatically scan for and execute maintenance tasks", - HelpText: "Toggle this to enable or disable the entire maintenance system", - InputType: "checkbox", - CSSClasses: "form-check-input", - }, - { - Name: "scan_interval_seconds", - JSONName: "scan_interval_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 30 * 60, // 30 minutes in seconds - MinValue: 1 * 60, // 1 minute - MaxValue: 24 * 60 * 60, // 24 hours - Required: true, - DisplayName: "Scan Interval", - Description: "How often to scan for maintenance tasks", - HelpText: "The system will check for new maintenance tasks at this interval", - Placeholder: "30", - Unit: config.UnitMinutes, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "worker_timeout_seconds", - JSONName: "worker_timeout_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 5 * 60, // 5 minutes - MinValue: 1 * 60, // 1 minute - MaxValue: 60 * 60, // 1 hour - Required: true, - DisplayName: "Worker Timeout", - Description: "How long to wait for worker heartbeat before considering it inactive", - HelpText: "Workers that don't send heartbeats within this time are considered offline", - Placeholder: "5", - Unit: config.UnitMinutes, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "task_timeout_seconds", - JSONName: "task_timeout_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 2 * 60 * 60, // 2 hours - MinValue: 10 * 60, // 10 minutes - MaxValue: 24 * 60 * 60, // 24 hours - Required: true, - DisplayName: "Task Timeout", - Description: "Maximum time allowed for a task to complete", - HelpText: "Tasks that exceed this duration will be marked as failed", - Placeholder: "2", - Unit: config.UnitHours, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "retry_delay_seconds", - JSONName: "retry_delay_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 15 * 60, // 15 minutes - MinValue: 1 * 60, // 1 minute - MaxValue: 24 * 60 * 60, // 24 hours - Required: true, - DisplayName: "Retry Delay", - Description: "How long to wait before retrying a failed task", - HelpText: "Failed tasks will be retried after this delay", - Placeholder: "15", - Unit: config.UnitMinutes, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "max_retries", - JSONName: "max_retries", - Type: config.FieldTypeInt, - DefaultValue: 3, - MinValue: 0, - MaxValue: 10, - Required: true, - DisplayName: "Max Retries", - Description: "Maximum number of times to retry a failed task", - HelpText: "Tasks that fail more than this many times will be marked as permanently failed", - Placeholder: "3", - Unit: config.UnitCount, - InputType: "number", - CSSClasses: "form-control", - }, - { - Name: "cleanup_interval_seconds", - JSONName: "cleanup_interval_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 24 * 60 * 60, // 24 hours - MinValue: 1 * 60 * 60, // 1 hour - MaxValue: 7 * 24 * 60 * 60, // 7 days - Required: true, - DisplayName: "Cleanup Interval", - Description: "How often to run maintenance cleanup operations", - HelpText: "Removes old task records and temporary files at this interval", - Placeholder: "24", - Unit: config.UnitHours, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "task_retention_seconds", - JSONName: "task_retention_seconds", - Type: config.FieldTypeInterval, - DefaultValue: 7 * 24 * 60 * 60, // 7 days - MinValue: 1 * 24 * 60 * 60, // 1 day - MaxValue: 30 * 24 * 60 * 60, // 30 days - Required: true, - DisplayName: "Task Retention", - Description: "How long to keep completed task records", - HelpText: "Task history older than this duration will be automatically deleted", - Placeholder: "7", - Unit: config.UnitDays, - InputType: "interval", - CSSClasses: "form-control", - }, - { - Name: "global_max_concurrent", - JSONName: "global_max_concurrent", - Type: config.FieldTypeInt, - DefaultValue: 10, - MinValue: 1, - MaxValue: 100, - Required: true, - DisplayName: "Global Max Concurrent Tasks", - Description: "Maximum number of maintenance tasks that can run simultaneously across all workers", - HelpText: "Limits the total number of maintenance operations to control system load", - Placeholder: "10", - Unit: config.UnitCount, - InputType: "number", - CSSClasses: "form-control", - }, - }, - }, - } -} diff --git a/weed/admin/maintenance/config_verification.go b/weed/admin/maintenance/config_verification.go deleted file mode 100644 index 0ac40aad1..000000000 --- a/weed/admin/maintenance/config_verification.go +++ /dev/null @@ -1,124 +0,0 @@ -package maintenance - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" -) - -// VerifyProtobufConfig demonstrates that the protobuf configuration system is working -func VerifyProtobufConfig() error { - // Create configuration manager - configManager := NewMaintenanceConfigManager() - config := configManager.GetConfig() - - // Verify basic configuration - if !config.Enabled { - return fmt.Errorf("expected config to be enabled by default") - } - - if config.ScanIntervalSeconds != 30*60 { - return fmt.Errorf("expected scan interval to be 1800 seconds, got %d", config.ScanIntervalSeconds) - } - - // Verify policy configuration - if config.Policy == nil { - return fmt.Errorf("expected policy to be configured") - } - - if config.Policy.GlobalMaxConcurrent != 4 { - return fmt.Errorf("expected global max concurrent to be 4, got %d", config.Policy.GlobalMaxConcurrent) - } - - // Verify task policies - vacuumPolicy := config.Policy.TaskPolicies["vacuum"] - if vacuumPolicy == nil { - return fmt.Errorf("expected vacuum policy to be configured") - } - - if !vacuumPolicy.Enabled { - return fmt.Errorf("expected vacuum policy to be enabled") - } - - // Verify typed configuration access - vacuumConfig := vacuumPolicy.GetVacuumConfig() - if vacuumConfig == nil { - return fmt.Errorf("expected vacuum config to be accessible") - } - - if vacuumConfig.GarbageThreshold != 0.3 { - return fmt.Errorf("expected garbage threshold to be 0.3, got %f", vacuumConfig.GarbageThreshold) - } - - // Verify helper functions work - if !IsTaskEnabled(config.Policy, "vacuum") { - return fmt.Errorf("expected vacuum task to be enabled via helper function") - } - - maxConcurrent := GetMaxConcurrent(config.Policy, "vacuum") - if maxConcurrent != 2 { - return fmt.Errorf("expected vacuum max concurrent to be 2, got %d", maxConcurrent) - } - - // Verify erasure coding configuration - ecPolicy := config.Policy.TaskPolicies["erasure_coding"] - if ecPolicy == nil { - return fmt.Errorf("expected EC policy to be configured") - } - - ecConfig := ecPolicy.GetErasureCodingConfig() - if ecConfig == nil { - return fmt.Errorf("expected EC config to be accessible") - } - - // Verify configurable EC fields only - if ecConfig.FullnessRatio <= 0 || ecConfig.FullnessRatio > 1 { - return fmt.Errorf("expected EC config to have valid fullness ratio (0-1), got %f", ecConfig.FullnessRatio) - } - - return nil -} - -// GetProtobufConfigSummary returns a summary of the current protobuf configuration -func GetProtobufConfigSummary() string { - configManager := NewMaintenanceConfigManager() - config := configManager.GetConfig() - - summary := fmt.Sprintf("SeaweedFS Protobuf Maintenance Configuration:\n") - summary += fmt.Sprintf(" Enabled: %v\n", config.Enabled) - summary += fmt.Sprintf(" Scan Interval: %d seconds\n", config.ScanIntervalSeconds) - summary += fmt.Sprintf(" Max Retries: %d\n", config.MaxRetries) - summary += fmt.Sprintf(" Global Max Concurrent: %d\n", config.Policy.GlobalMaxConcurrent) - summary += fmt.Sprintf(" Task Policies: %d configured\n", len(config.Policy.TaskPolicies)) - - for taskType, policy := range config.Policy.TaskPolicies { - summary += fmt.Sprintf(" %s: enabled=%v, max_concurrent=%d\n", - taskType, policy.Enabled, policy.MaxConcurrent) - } - - return summary -} - -// CreateCustomConfig demonstrates creating a custom protobuf configuration -func CreateCustomConfig() *worker_pb.MaintenanceConfig { - return &worker_pb.MaintenanceConfig{ - Enabled: true, - ScanIntervalSeconds: 60 * 60, // 1 hour - MaxRetries: 5, - Policy: &worker_pb.MaintenancePolicy{ - GlobalMaxConcurrent: 8, - TaskPolicies: map[string]*worker_pb.TaskPolicy{ - "custom_vacuum": { - Enabled: true, - MaxConcurrent: 4, - TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: &worker_pb.VacuumTaskConfig{ - GarbageThreshold: 0.5, - MinVolumeAgeHours: 48, - }, - }, - }, - }, - }, - } -} diff --git a/weed/admin/maintenance/maintenance_config_proto.go b/weed/admin/maintenance/maintenance_config_proto.go deleted file mode 100644 index 67a6b74be..000000000 --- a/weed/admin/maintenance/maintenance_config_proto.go +++ /dev/null @@ -1,287 +0,0 @@ -package maintenance - -import ( - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" -) - -// MaintenanceConfigManager handles protobuf-based configuration -type MaintenanceConfigManager struct { - config *worker_pb.MaintenanceConfig -} - -// NewMaintenanceConfigManager creates a new config manager with defaults -func NewMaintenanceConfigManager() *MaintenanceConfigManager { - return &MaintenanceConfigManager{ - config: DefaultMaintenanceConfigProto(), - } -} - -// DefaultMaintenanceConfigProto returns default configuration as protobuf -func DefaultMaintenanceConfigProto() *worker_pb.MaintenanceConfig { - return &worker_pb.MaintenanceConfig{ - Enabled: true, - ScanIntervalSeconds: 30 * 60, // 30 minutes - WorkerTimeoutSeconds: 5 * 60, // 5 minutes - TaskTimeoutSeconds: 2 * 60 * 60, // 2 hours - RetryDelaySeconds: 15 * 60, // 15 minutes - MaxRetries: 3, - CleanupIntervalSeconds: 24 * 60 * 60, // 24 hours - TaskRetentionSeconds: 7 * 24 * 60 * 60, // 7 days - // Policy field will be populated dynamically from separate task configuration files - Policy: nil, - } -} - -// GetConfig returns the current configuration -func (mcm *MaintenanceConfigManager) GetConfig() *worker_pb.MaintenanceConfig { - return mcm.config -} - -// Type-safe configuration accessors - -// GetVacuumConfig returns vacuum-specific configuration for a task type -func (mcm *MaintenanceConfigManager) GetVacuumConfig(taskType string) *worker_pb.VacuumTaskConfig { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - if vacuumConfig := policy.GetVacuumConfig(); vacuumConfig != nil { - return vacuumConfig - } - } - // Return defaults if not configured - return &worker_pb.VacuumTaskConfig{ - GarbageThreshold: 0.3, - MinVolumeAgeHours: 24, - MinIntervalSeconds: 7 * 24 * 60 * 60, // 7 days - } -} - -// GetErasureCodingConfig returns EC-specific configuration for a task type -func (mcm *MaintenanceConfigManager) GetErasureCodingConfig(taskType string) *worker_pb.ErasureCodingTaskConfig { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - if ecConfig := policy.GetErasureCodingConfig(); ecConfig != nil { - return ecConfig - } - } - // Return defaults if not configured - return &worker_pb.ErasureCodingTaskConfig{ - FullnessRatio: 0.95, - QuietForSeconds: 3600, - MinVolumeSizeMb: 100, - CollectionFilter: "", - } -} - -// GetBalanceConfig returns balance-specific configuration for a task type -func (mcm *MaintenanceConfigManager) GetBalanceConfig(taskType string) *worker_pb.BalanceTaskConfig { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - if balanceConfig := policy.GetBalanceConfig(); balanceConfig != nil { - return balanceConfig - } - } - // Return defaults if not configured - return &worker_pb.BalanceTaskConfig{ - ImbalanceThreshold: 0.2, - MinServerCount: 2, - } -} - -// GetReplicationConfig returns replication-specific configuration for a task type -func (mcm *MaintenanceConfigManager) GetReplicationConfig(taskType string) *worker_pb.ReplicationTaskConfig { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - if replicationConfig := policy.GetReplicationConfig(); replicationConfig != nil { - return replicationConfig - } - } - // Return defaults if not configured - return &worker_pb.ReplicationTaskConfig{ - TargetReplicaCount: 2, - } -} - -// Typed convenience methods for getting task configurations - -// GetVacuumTaskConfigForType returns vacuum configuration for a specific task type -func (mcm *MaintenanceConfigManager) GetVacuumTaskConfigForType(taskType string) *worker_pb.VacuumTaskConfig { - return GetVacuumTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType)) -} - -// GetErasureCodingTaskConfigForType returns erasure coding configuration for a specific task type -func (mcm *MaintenanceConfigManager) GetErasureCodingTaskConfigForType(taskType string) *worker_pb.ErasureCodingTaskConfig { - return GetErasureCodingTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType)) -} - -// GetBalanceTaskConfigForType returns balance configuration for a specific task type -func (mcm *MaintenanceConfigManager) GetBalanceTaskConfigForType(taskType string) *worker_pb.BalanceTaskConfig { - return GetBalanceTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType)) -} - -// GetReplicationTaskConfigForType returns replication configuration for a specific task type -func (mcm *MaintenanceConfigManager) GetReplicationTaskConfigForType(taskType string) *worker_pb.ReplicationTaskConfig { - return GetReplicationTaskConfig(mcm.config.Policy, MaintenanceTaskType(taskType)) -} - -// Helper methods - -func (mcm *MaintenanceConfigManager) getTaskPolicy(taskType string) *worker_pb.TaskPolicy { - if mcm.config.Policy != nil && mcm.config.Policy.TaskPolicies != nil { - return mcm.config.Policy.TaskPolicies[taskType] - } - return nil -} - -// IsTaskEnabled returns whether a task type is enabled -func (mcm *MaintenanceConfigManager) IsTaskEnabled(taskType string) bool { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - return policy.Enabled - } - return false -} - -// GetMaxConcurrent returns the max concurrent limit for a task type -func (mcm *MaintenanceConfigManager) GetMaxConcurrent(taskType string) int32 { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - return policy.MaxConcurrent - } - return 1 // Default -} - -// GetRepeatInterval returns the repeat interval for a task type in seconds -func (mcm *MaintenanceConfigManager) GetRepeatInterval(taskType string) int32 { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - return policy.RepeatIntervalSeconds - } - return mcm.config.Policy.DefaultRepeatIntervalSeconds -} - -// GetCheckInterval returns the check interval for a task type in seconds -func (mcm *MaintenanceConfigManager) GetCheckInterval(taskType string) int32 { - if policy := mcm.getTaskPolicy(taskType); policy != nil { - return policy.CheckIntervalSeconds - } - return mcm.config.Policy.DefaultCheckIntervalSeconds -} - -// Duration accessor methods - -// GetScanInterval returns the scan interval as a time.Duration -func (mcm *MaintenanceConfigManager) GetScanInterval() time.Duration { - return time.Duration(mcm.config.ScanIntervalSeconds) * time.Second -} - -// GetWorkerTimeout returns the worker timeout as a time.Duration -func (mcm *MaintenanceConfigManager) GetWorkerTimeout() time.Duration { - return time.Duration(mcm.config.WorkerTimeoutSeconds) * time.Second -} - -// GetTaskTimeout returns the task timeout as a time.Duration -func (mcm *MaintenanceConfigManager) GetTaskTimeout() time.Duration { - return time.Duration(mcm.config.TaskTimeoutSeconds) * time.Second -} - -// GetRetryDelay returns the retry delay as a time.Duration -func (mcm *MaintenanceConfigManager) GetRetryDelay() time.Duration { - return time.Duration(mcm.config.RetryDelaySeconds) * time.Second -} - -// GetCleanupInterval returns the cleanup interval as a time.Duration -func (mcm *MaintenanceConfigManager) GetCleanupInterval() time.Duration { - return time.Duration(mcm.config.CleanupIntervalSeconds) * time.Second -} - -// GetTaskRetention returns the task retention period as a time.Duration -func (mcm *MaintenanceConfigManager) GetTaskRetention() time.Duration { - return time.Duration(mcm.config.TaskRetentionSeconds) * time.Second -} - -// ValidateMaintenanceConfigWithSchema validates protobuf maintenance configuration using ConfigField rules -func ValidateMaintenanceConfigWithSchema(config *worker_pb.MaintenanceConfig) error { - if config == nil { - return fmt.Errorf("configuration cannot be nil") - } - - // Get the schema to access field validation rules - schema := GetMaintenanceConfigSchema() - - // Validate each field individually using the ConfigField rules - if err := validateFieldWithSchema(schema, "enabled", config.Enabled); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "scan_interval_seconds", int(config.ScanIntervalSeconds)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "worker_timeout_seconds", int(config.WorkerTimeoutSeconds)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "task_timeout_seconds", int(config.TaskTimeoutSeconds)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "retry_delay_seconds", int(config.RetryDelaySeconds)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "max_retries", int(config.MaxRetries)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "cleanup_interval_seconds", int(config.CleanupIntervalSeconds)); err != nil { - return err - } - - if err := validateFieldWithSchema(schema, "task_retention_seconds", int(config.TaskRetentionSeconds)); err != nil { - return err - } - - // Validate policy fields if present - if config.Policy != nil { - // Note: These field names might need to be adjusted based on the actual schema - if err := validatePolicyField("global_max_concurrent", int(config.Policy.GlobalMaxConcurrent)); err != nil { - return err - } - - if err := validatePolicyField("default_repeat_interval_seconds", int(config.Policy.DefaultRepeatIntervalSeconds)); err != nil { - return err - } - - if err := validatePolicyField("default_check_interval_seconds", int(config.Policy.DefaultCheckIntervalSeconds)); err != nil { - return err - } - } - - return nil -} - -// validateFieldWithSchema validates a single field using its ConfigField definition -func validateFieldWithSchema(schema *MaintenanceConfigSchema, fieldName string, value interface{}) error { - field := schema.GetFieldByName(fieldName) - if field == nil { - // Field not in schema, skip validation - return nil - } - - return field.ValidateValue(value) -} - -// validatePolicyField validates policy fields (simplified validation for now) -func validatePolicyField(fieldName string, value int) error { - switch fieldName { - case "global_max_concurrent": - if value < 1 || value > 20 { - return fmt.Errorf("Global Max Concurrent must be between 1 and 20, got %d", value) - } - case "default_repeat_interval": - if value < 1 || value > 168 { - return fmt.Errorf("Default Repeat Interval must be between 1 and 168 hours, got %d", value) - } - case "default_check_interval": - if value < 1 || value > 168 { - return fmt.Errorf("Default Check Interval must be between 1 and 168 hours, got %d", value) - } - } - return nil -} diff --git a/weed/admin/maintenance/maintenance_integration.go b/weed/admin/maintenance/maintenance_integration.go deleted file mode 100644 index 20f1ea97d..000000000 --- a/weed/admin/maintenance/maintenance_integration.go +++ /dev/null @@ -1,480 +0,0 @@ -package maintenance - -import ( - "time" - - "github.com/seaweedfs/seaweedfs/weed/admin/topology" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -// MaintenanceIntegration bridges the task system with existing maintenance -type MaintenanceIntegration struct { - taskRegistry *types.TaskRegistry - uiRegistry *types.UIRegistry - - // Bridge to existing system - maintenanceQueue *MaintenanceQueue - maintenancePolicy *MaintenancePolicy - - // Pending operations tracker - pendingOperations *PendingOperations - - // Active topology for task detection and target selection - activeTopology *topology.ActiveTopology - - // Type conversion maps - taskTypeMap map[types.TaskType]MaintenanceTaskType - revTaskTypeMap map[MaintenanceTaskType]types.TaskType - priorityMap map[types.TaskPriority]MaintenanceTaskPriority - revPriorityMap map[MaintenanceTaskPriority]types.TaskPriority -} - -// NewMaintenanceIntegration creates the integration bridge -func NewMaintenanceIntegration(queue *MaintenanceQueue, policy *MaintenancePolicy) *MaintenanceIntegration { - integration := &MaintenanceIntegration{ - taskRegistry: tasks.GetGlobalTypesRegistry(), // Use global types registry with auto-registered tasks - uiRegistry: tasks.GetGlobalUIRegistry(), // Use global UI registry with auto-registered UI providers - maintenanceQueue: queue, - maintenancePolicy: policy, - pendingOperations: NewPendingOperations(), - } - - // Initialize active topology with 10 second recent task window - integration.activeTopology = topology.NewActiveTopology(10) - - // Initialize type conversion maps - integration.initializeTypeMaps() - - // Register all tasks - integration.registerAllTasks() - - return integration -} - -// initializeTypeMaps creates the type conversion maps for dynamic conversion -func (s *MaintenanceIntegration) initializeTypeMaps() { - // Initialize empty maps - s.taskTypeMap = make(map[types.TaskType]MaintenanceTaskType) - s.revTaskTypeMap = make(map[MaintenanceTaskType]types.TaskType) - - // Build task type mappings dynamically from registered tasks after registration - // This will be called from registerAllTasks() after all tasks are registered - - // Priority mappings (these are static and don't depend on registered tasks) - s.priorityMap = map[types.TaskPriority]MaintenanceTaskPriority{ - types.TaskPriorityLow: PriorityLow, - types.TaskPriorityNormal: PriorityNormal, - types.TaskPriorityHigh: PriorityHigh, - } - - // Reverse priority mappings - s.revPriorityMap = map[MaintenanceTaskPriority]types.TaskPriority{ - PriorityLow: types.TaskPriorityLow, - PriorityNormal: types.TaskPriorityNormal, - PriorityHigh: types.TaskPriorityHigh, - PriorityCritical: types.TaskPriorityHigh, // Map critical to high - } -} - -// buildTaskTypeMappings dynamically builds task type mappings from registered tasks -func (s *MaintenanceIntegration) buildTaskTypeMappings() { - // Clear existing mappings - s.taskTypeMap = make(map[types.TaskType]MaintenanceTaskType) - s.revTaskTypeMap = make(map[MaintenanceTaskType]types.TaskType) - - // Build mappings from registered detectors - for workerTaskType := range s.taskRegistry.GetAllDetectors() { - // Convert types.TaskType to MaintenanceTaskType by string conversion - maintenanceTaskType := MaintenanceTaskType(string(workerTaskType)) - - s.taskTypeMap[workerTaskType] = maintenanceTaskType - s.revTaskTypeMap[maintenanceTaskType] = workerTaskType - - glog.V(3).Infof("Dynamically mapped task type: %s <-> %s", workerTaskType, maintenanceTaskType) - } - - glog.V(2).Infof("Built %d dynamic task type mappings", len(s.taskTypeMap)) -} - -// registerAllTasks registers all available tasks -func (s *MaintenanceIntegration) registerAllTasks() { - // Tasks are already auto-registered via import statements - // No manual registration needed - - // Build dynamic type mappings from registered tasks - s.buildTaskTypeMappings() - - // Configure tasks from policy - s.ConfigureTasksFromPolicy() - - registeredTaskTypes := make([]string, 0, len(s.taskTypeMap)) - for _, maintenanceTaskType := range s.taskTypeMap { - registeredTaskTypes = append(registeredTaskTypes, string(maintenanceTaskType)) - } - glog.V(1).Infof("Registered tasks: %v", registeredTaskTypes) -} - -// ConfigureTasksFromPolicy dynamically configures all registered tasks based on the maintenance policy -func (s *MaintenanceIntegration) ConfigureTasksFromPolicy() { - if s.maintenancePolicy == nil { - return - } - - // Configure all registered detectors and schedulers dynamically using policy configuration - configuredCount := 0 - - // Get all registered task types from the registry - for taskType, detector := range s.taskRegistry.GetAllDetectors() { - // Configure detector using policy-based configuration - s.configureDetectorFromPolicy(taskType, detector) - configuredCount++ - } - - for taskType, scheduler := range s.taskRegistry.GetAllSchedulers() { - // Configure scheduler using policy-based configuration - s.configureSchedulerFromPolicy(taskType, scheduler) - } - - glog.V(1).Infof("Dynamically configured %d task types from maintenance policy", configuredCount) -} - -// configureDetectorFromPolicy configures a detector using policy-based configuration -func (s *MaintenanceIntegration) configureDetectorFromPolicy(taskType types.TaskType, detector types.TaskDetector) { - // Try to configure using PolicyConfigurableDetector interface if supported - if configurableDetector, ok := detector.(types.PolicyConfigurableDetector); ok { - configurableDetector.ConfigureFromPolicy(s.maintenancePolicy) - glog.V(2).Infof("Configured detector %s using policy interface", taskType) - return - } - - // Apply basic configuration that all detectors should support - if basicDetector, ok := detector.(interface{ SetEnabled(bool) }); ok { - // Convert task system type to maintenance task type for policy lookup - maintenanceTaskType, exists := s.taskTypeMap[taskType] - if exists { - enabled := IsTaskEnabled(s.maintenancePolicy, maintenanceTaskType) - basicDetector.SetEnabled(enabled) - glog.V(3).Infof("Set enabled=%v for detector %s", enabled, taskType) - } - } - - // For detectors that don't implement PolicyConfigurableDetector interface, - // they should be updated to implement it for full policy-based configuration - glog.V(2).Infof("Detector %s should implement PolicyConfigurableDetector interface for full policy support", taskType) -} - -// configureSchedulerFromPolicy configures a scheduler using policy-based configuration -func (s *MaintenanceIntegration) configureSchedulerFromPolicy(taskType types.TaskType, scheduler types.TaskScheduler) { - // Try to configure using PolicyConfigurableScheduler interface if supported - if configurableScheduler, ok := scheduler.(types.PolicyConfigurableScheduler); ok { - configurableScheduler.ConfigureFromPolicy(s.maintenancePolicy) - glog.V(2).Infof("Configured scheduler %s using policy interface", taskType) - return - } - - // Apply basic configuration that all schedulers should support - maintenanceTaskType, exists := s.taskTypeMap[taskType] - if !exists { - glog.V(3).Infof("No maintenance task type mapping for %s, skipping configuration", taskType) - return - } - - // Set enabled status if scheduler supports it - if enableableScheduler, ok := scheduler.(interface{ SetEnabled(bool) }); ok { - enabled := IsTaskEnabled(s.maintenancePolicy, maintenanceTaskType) - enableableScheduler.SetEnabled(enabled) - glog.V(3).Infof("Set enabled=%v for scheduler %s", enabled, taskType) - } - - // Set max concurrent if scheduler supports it - if concurrentScheduler, ok := scheduler.(interface{ SetMaxConcurrent(int) }); ok { - maxConcurrent := GetMaxConcurrent(s.maintenancePolicy, maintenanceTaskType) - if maxConcurrent > 0 { - concurrentScheduler.SetMaxConcurrent(maxConcurrent) - glog.V(3).Infof("Set max concurrent=%d for scheduler %s", maxConcurrent, taskType) - } - } - - // For schedulers that don't implement PolicyConfigurableScheduler interface, - // they should be updated to implement it for full policy-based configuration - glog.V(2).Infof("Scheduler %s should implement PolicyConfigurableScheduler interface for full policy support", taskType) -} - -// ScanWithTaskDetectors performs a scan using the task system -func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.VolumeHealthMetrics) ([]*TaskDetectionResult, error) { - // Note: ActiveTopology gets updated from topology info instead of volume metrics - glog.V(2).Infof("Processed %d volume metrics for task detection", len(volumeMetrics)) - - // Filter out volumes with pending operations to avoid duplicates - filteredMetrics := s.pendingOperations.FilterVolumeMetricsExcludingPending(volumeMetrics) - - glog.V(1).Infof("Scanning %d volumes (filtered from %d) excluding pending operations", - len(filteredMetrics), len(volumeMetrics)) - - var allResults []*TaskDetectionResult - - // Create cluster info - clusterInfo := &types.ClusterInfo{ - TotalVolumes: len(filteredMetrics), - LastUpdated: time.Now(), - ActiveTopology: s.activeTopology, // Provide ActiveTopology for destination planning - } - - // Run detection for each registered task type - for taskType, detector := range s.taskRegistry.GetAllDetectors() { - if !detector.IsEnabled() { - continue - } - - glog.V(2).Infof("Running detection for task type: %s", taskType) - - results, err := detector.ScanForTasks(filteredMetrics, clusterInfo) - if err != nil { - glog.Errorf("Failed to scan for %s tasks: %v", taskType, err) - continue - } - - // Convert results to existing system format and check for conflicts - for _, result := range results { - existingResult := s.convertToExistingFormat(result) - if existingResult != nil { - // Double-check for conflicts with pending operations - opType := s.mapMaintenanceTaskTypeToPendingOperationType(existingResult.TaskType) - if !s.pendingOperations.WouldConflictWithPending(existingResult.VolumeID, opType) { - // All task types should now have TypedParams populated during detection phase - if existingResult.TypedParams == nil { - glog.Warningf("Task %s for volume %d has no typed parameters - skipping (task parameter creation may have failed)", - existingResult.TaskType, existingResult.VolumeID) - continue - } - allResults = append(allResults, existingResult) - } else { - glog.V(2).Infof("Skipping task %s for volume %d due to conflict with pending operation", - existingResult.TaskType, existingResult.VolumeID) - } - } - } - - glog.V(2).Infof("Found %d %s tasks", len(results), taskType) - } - - return allResults, nil -} - -// UpdateTopologyInfo updates the volume shard tracker with topology information for empty servers -func (s *MaintenanceIntegration) UpdateTopologyInfo(topologyInfo *master_pb.TopologyInfo) error { - return s.activeTopology.UpdateTopology(topologyInfo) -} - -// convertToExistingFormat converts task results to existing system format using dynamic mapping -func (s *MaintenanceIntegration) convertToExistingFormat(result *types.TaskDetectionResult) *TaskDetectionResult { - // Convert types using mapping tables - existingType, exists := s.taskTypeMap[result.TaskType] - if !exists { - glog.Warningf("Unknown task type %s, skipping conversion", result.TaskType) - // Return nil to indicate conversion failed - caller should handle this - return nil - } - - existingPriority, exists := s.priorityMap[result.Priority] - if !exists { - glog.Warningf("Unknown priority %s, defaulting to normal", result.Priority) - existingPriority = PriorityNormal - } - - return &TaskDetectionResult{ - TaskType: existingType, - VolumeID: result.VolumeID, - Server: result.Server, - Collection: result.Collection, - Priority: existingPriority, - Reason: result.Reason, - TypedParams: result.TypedParams, - ScheduleAt: result.ScheduleAt, - } -} - -// CanScheduleWithTaskSchedulers determines if a task can be scheduled using task schedulers with dynamic type conversion -func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *MaintenanceTask, runningTasks []*MaintenanceTask, availableWorkers []*MaintenanceWorker) bool { - - // Convert existing types to task types using mapping - taskType, exists := s.revTaskTypeMap[task.Type] - if !exists { - return false // Fallback to existing logic for unknown types - } - - - // Convert task objects - taskObject := s.convertTaskToTaskSystem(task) - if taskObject == nil { - return false - } - - - runningTaskObjects := s.convertTasksToTaskSystem(runningTasks) - workerObjects := s.convertWorkersToTaskSystem(availableWorkers) - - - // Get the appropriate scheduler - scheduler := s.taskRegistry.GetScheduler(taskType) - if scheduler == nil { - return false - } - - - canSchedule := scheduler.CanScheduleNow(taskObject, runningTaskObjects, workerObjects) - - return canSchedule -} - -// convertTaskToTaskSystem converts existing task to task system format using dynamic mapping -func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask) *types.TaskInput { - // Convert task type using mapping - taskType, exists := s.revTaskTypeMap[task.Type] - if !exists { - glog.Errorf("Unknown task type %s in conversion, cannot convert task", task.Type) - // Return nil to indicate conversion failed - return nil - } - - // Convert priority using mapping - priority, exists := s.revPriorityMap[task.Priority] - if !exists { - glog.Warningf("Unknown priority %d in conversion, defaulting to normal", task.Priority) - priority = types.TaskPriorityNormal - } - - return &types.TaskInput{ - ID: task.ID, - Type: taskType, - Priority: priority, - VolumeID: task.VolumeID, - Server: task.Server, - Collection: task.Collection, - TypedParams: task.TypedParams, - CreatedAt: task.CreatedAt, - } -} - -// convertTasksToTaskSystem converts multiple tasks -func (s *MaintenanceIntegration) convertTasksToTaskSystem(tasks []*MaintenanceTask) []*types.TaskInput { - var result []*types.TaskInput - for _, task := range tasks { - converted := s.convertTaskToTaskSystem(task) - if converted != nil { - result = append(result, converted) - } - } - return result -} - -// convertWorkersToTaskSystem converts workers to task system format using dynamic mapping -func (s *MaintenanceIntegration) convertWorkersToTaskSystem(workers []*MaintenanceWorker) []*types.WorkerData { - var result []*types.WorkerData - for _, worker := range workers { - capabilities := make([]types.TaskType, 0, len(worker.Capabilities)) - for _, cap := range worker.Capabilities { - // Convert capability using mapping - taskType, exists := s.revTaskTypeMap[cap] - if exists { - capabilities = append(capabilities, taskType) - } else { - glog.V(3).Infof("Unknown capability %s for worker %s, skipping", cap, worker.ID) - } - } - - result = append(result, &types.WorkerData{ - ID: worker.ID, - Address: worker.Address, - Capabilities: capabilities, - MaxConcurrent: worker.MaxConcurrent, - CurrentLoad: worker.CurrentLoad, - }) - } - return result -} - -// GetTaskScheduler returns the scheduler for a task type using dynamic mapping -func (s *MaintenanceIntegration) GetTaskScheduler(taskType MaintenanceTaskType) types.TaskScheduler { - // Convert task type using mapping - taskSystemType, exists := s.revTaskTypeMap[taskType] - if !exists { - glog.V(3).Infof("Unknown task type %s for scheduler", taskType) - return nil - } - - return s.taskRegistry.GetScheduler(taskSystemType) -} - -// GetUIProvider returns the UI provider for a task type using dynamic mapping -func (s *MaintenanceIntegration) GetUIProvider(taskType MaintenanceTaskType) types.TaskUIProvider { - // Convert task type using mapping - taskSystemType, exists := s.revTaskTypeMap[taskType] - if !exists { - glog.V(3).Infof("Unknown task type %s for UI provider", taskType) - return nil - } - - return s.uiRegistry.GetProvider(taskSystemType) -} - -// GetAllTaskStats returns stats for all registered tasks -func (s *MaintenanceIntegration) GetAllTaskStats() []*types.TaskStats { - var stats []*types.TaskStats - - for taskType, detector := range s.taskRegistry.GetAllDetectors() { - uiProvider := s.uiRegistry.GetProvider(taskType) - if uiProvider == nil { - continue - } - - stat := &types.TaskStats{ - TaskType: taskType, - DisplayName: uiProvider.GetDisplayName(), - Enabled: detector.IsEnabled(), - LastScan: time.Now().Add(-detector.ScanInterval()), - NextScan: time.Now().Add(detector.ScanInterval()), - ScanInterval: detector.ScanInterval(), - MaxConcurrent: s.taskRegistry.GetScheduler(taskType).GetMaxConcurrent(), - // Would need to get these from actual queue/stats - PendingTasks: 0, - RunningTasks: 0, - CompletedToday: 0, - FailedToday: 0, - } - - stats = append(stats, stat) - } - - return stats -} - -// mapMaintenanceTaskTypeToPendingOperationType converts a maintenance task type to a pending operation type -func (s *MaintenanceIntegration) mapMaintenanceTaskTypeToPendingOperationType(taskType MaintenanceTaskType) PendingOperationType { - switch taskType { - case MaintenanceTaskType("balance"): - return OpTypeVolumeBalance - case MaintenanceTaskType("erasure_coding"): - return OpTypeErasureCoding - case MaintenanceTaskType("vacuum"): - return OpTypeVacuum - case MaintenanceTaskType("replication"): - return OpTypeReplication - default: - // For other task types, assume they're volume operations - return OpTypeVolumeMove - } -} - -// GetPendingOperations returns the pending operations tracker -func (s *MaintenanceIntegration) GetPendingOperations() *PendingOperations { - return s.pendingOperations -} - -// GetActiveTopology returns the active topology for task detection -func (s *MaintenanceIntegration) GetActiveTopology() *topology.ActiveTopology { - return s.activeTopology -} diff --git a/weed/admin/maintenance/maintenance_manager.go b/weed/admin/maintenance/maintenance_manager.go deleted file mode 100644 index 4aab137e0..000000000 --- a/weed/admin/maintenance/maintenance_manager.go +++ /dev/null @@ -1,568 +0,0 @@ -package maintenance - -import ( - "fmt" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" -) - -// buildPolicyFromTaskConfigs loads task configurations from separate files and builds a MaintenancePolicy -func buildPolicyFromTaskConfigs() *worker_pb.MaintenancePolicy { - policy := &worker_pb.MaintenancePolicy{ - GlobalMaxConcurrent: 4, - DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds - DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds - TaskPolicies: make(map[string]*worker_pb.TaskPolicy), - } - - // Load vacuum task configuration - if vacuumConfig := vacuum.LoadConfigFromPersistence(nil); vacuumConfig != nil { - policy.TaskPolicies["vacuum"] = &worker_pb.TaskPolicy{ - Enabled: vacuumConfig.Enabled, - MaxConcurrent: int32(vacuumConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(vacuumConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: &worker_pb.VacuumTaskConfig{ - GarbageThreshold: float64(vacuumConfig.GarbageThreshold), - MinVolumeAgeHours: int32(vacuumConfig.MinVolumeAgeSeconds / 3600), // Convert seconds to hours - MinIntervalSeconds: int32(vacuumConfig.MinIntervalSeconds), - }, - }, - } - } - - // Load erasure coding task configuration - if ecConfig := erasure_coding.LoadConfigFromPersistence(nil); ecConfig != nil { - policy.TaskPolicies["erasure_coding"] = &worker_pb.TaskPolicy{ - Enabled: ecConfig.Enabled, - MaxConcurrent: int32(ecConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(ecConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(ecConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_ErasureCodingConfig{ - ErasureCodingConfig: &worker_pb.ErasureCodingTaskConfig{ - FullnessRatio: float64(ecConfig.FullnessRatio), - QuietForSeconds: int32(ecConfig.QuietForSeconds), - MinVolumeSizeMb: int32(ecConfig.MinSizeMB), - CollectionFilter: ecConfig.CollectionFilter, - }, - }, - } - } - - // Load balance task configuration - if balanceConfig := balance.LoadConfigFromPersistence(nil); balanceConfig != nil { - policy.TaskPolicies["balance"] = &worker_pb.TaskPolicy{ - Enabled: balanceConfig.Enabled, - MaxConcurrent: int32(balanceConfig.MaxConcurrent), - RepeatIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds), - CheckIntervalSeconds: int32(balanceConfig.ScanIntervalSeconds), - TaskConfig: &worker_pb.TaskPolicy_BalanceConfig{ - BalanceConfig: &worker_pb.BalanceTaskConfig{ - ImbalanceThreshold: float64(balanceConfig.ImbalanceThreshold), - MinServerCount: int32(balanceConfig.MinServerCount), - }, - }, - } - } - - glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies)) - return policy -} - -// MaintenanceManager coordinates the maintenance system -type MaintenanceManager struct { - config *MaintenanceConfig - scanner *MaintenanceScanner - queue *MaintenanceQueue - adminClient AdminClient - running bool - stopChan chan struct{} - // Error handling and backoff - errorCount int - lastError error - lastErrorTime time.Time - backoffDelay time.Duration - mutex sync.RWMutex - scanInProgress bool -} - -// NewMaintenanceManager creates a new maintenance manager -func NewMaintenanceManager(adminClient AdminClient, config *MaintenanceConfig) *MaintenanceManager { - if config == nil { - config = DefaultMaintenanceConfig() - } - - // Use the policy from the config (which is populated from separate task files in LoadMaintenanceConfig) - policy := config.Policy - if policy == nil { - // Fallback: build policy from separate task configuration files if not already populated - policy = buildPolicyFromTaskConfigs() - } - - queue := NewMaintenanceQueue(policy) - scanner := NewMaintenanceScanner(adminClient, policy, queue) - - return &MaintenanceManager{ - config: config, - scanner: scanner, - queue: queue, - adminClient: adminClient, - stopChan: make(chan struct{}), - backoffDelay: time.Second, // Start with 1 second backoff - } -} - -// Start begins the maintenance manager -func (mm *MaintenanceManager) Start() error { - if !mm.config.Enabled { - glog.V(1).Infof("Maintenance system is disabled") - return nil - } - - // Validate configuration durations to prevent ticker panics - if err := mm.validateConfig(); err != nil { - return fmt.Errorf("invalid maintenance configuration: %w", err) - } - - mm.running = true - - // Start background processes - go mm.scanLoop() - go mm.cleanupLoop() - - glog.Infof("Maintenance manager started with scan interval %ds", mm.config.ScanIntervalSeconds) - return nil -} - -// validateConfig validates the maintenance configuration durations -func (mm *MaintenanceManager) validateConfig() error { - if mm.config.ScanIntervalSeconds <= 0 { - glog.Warningf("Invalid scan interval %ds, using default 30m", mm.config.ScanIntervalSeconds) - mm.config.ScanIntervalSeconds = 30 * 60 // 30 minutes in seconds - } - - if mm.config.CleanupIntervalSeconds <= 0 { - glog.Warningf("Invalid cleanup interval %ds, using default 24h", mm.config.CleanupIntervalSeconds) - mm.config.CleanupIntervalSeconds = 24 * 60 * 60 // 24 hours in seconds - } - - if mm.config.WorkerTimeoutSeconds <= 0 { - glog.Warningf("Invalid worker timeout %ds, using default 5m", mm.config.WorkerTimeoutSeconds) - mm.config.WorkerTimeoutSeconds = 5 * 60 // 5 minutes in seconds - } - - if mm.config.TaskTimeoutSeconds <= 0 { - glog.Warningf("Invalid task timeout %ds, using default 2h", mm.config.TaskTimeoutSeconds) - mm.config.TaskTimeoutSeconds = 2 * 60 * 60 // 2 hours in seconds - } - - if mm.config.RetryDelaySeconds <= 0 { - glog.Warningf("Invalid retry delay %ds, using default 15m", mm.config.RetryDelaySeconds) - mm.config.RetryDelaySeconds = 15 * 60 // 15 minutes in seconds - } - - if mm.config.TaskRetentionSeconds <= 0 { - glog.Warningf("Invalid task retention %ds, using default 168h", mm.config.TaskRetentionSeconds) - mm.config.TaskRetentionSeconds = 7 * 24 * 60 * 60 // 7 days in seconds - } - - return nil -} - -// IsRunning returns whether the maintenance manager is currently running -func (mm *MaintenanceManager) IsRunning() bool { - return mm.running -} - -// Stop terminates the maintenance manager -func (mm *MaintenanceManager) Stop() { - mm.running = false - close(mm.stopChan) - glog.Infof("Maintenance manager stopped") -} - -// scanLoop periodically scans for maintenance tasks with adaptive timing -func (mm *MaintenanceManager) scanLoop() { - scanInterval := time.Duration(mm.config.ScanIntervalSeconds) * time.Second - ticker := time.NewTicker(scanInterval) - defer ticker.Stop() - - for mm.running { - select { - case <-mm.stopChan: - return - case <-ticker.C: - glog.V(1).Infof("Performing maintenance scan every %v", scanInterval) - - // Use the same synchronization as TriggerScan to prevent concurrent scans - if err := mm.triggerScanInternal(false); err != nil { - glog.V(1).Infof("Scheduled scan skipped: %v", err) - } - - // Adjust ticker interval based on error state (read error state safely) - currentInterval := mm.getScanInterval(scanInterval) - - // Reset ticker with new interval if needed - if currentInterval != scanInterval { - ticker.Stop() - ticker = time.NewTicker(currentInterval) - } - } - } -} - -// getScanInterval safely reads the current scan interval with error backoff -func (mm *MaintenanceManager) getScanInterval(baseInterval time.Duration) time.Duration { - mm.mutex.RLock() - defer mm.mutex.RUnlock() - - if mm.errorCount > 0 { - // Use backoff delay when there are errors - currentInterval := mm.backoffDelay - if currentInterval > baseInterval { - // Don't make it longer than the configured interval * 10 - maxInterval := baseInterval * 10 - if currentInterval > maxInterval { - currentInterval = maxInterval - } - } - return currentInterval - } - return baseInterval -} - -// cleanupLoop periodically cleans up old tasks and stale workers -func (mm *MaintenanceManager) cleanupLoop() { - cleanupInterval := time.Duration(mm.config.CleanupIntervalSeconds) * time.Second - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for mm.running { - select { - case <-mm.stopChan: - return - case <-ticker.C: - mm.performCleanup() - } - } -} - -// performScan executes a maintenance scan with error handling and backoff -func (mm *MaintenanceManager) performScan() { - defer func() { - // Always reset scan in progress flag when done - mm.mutex.Lock() - mm.scanInProgress = false - mm.mutex.Unlock() - }() - - glog.Infof("Starting maintenance scan...") - - results, err := mm.scanner.ScanForMaintenanceTasks() - if err != nil { - // Handle scan error - mm.mutex.Lock() - mm.handleScanError(err) - mm.mutex.Unlock() - glog.Warningf("Maintenance scan failed: %v", err) - return - } - - // Scan succeeded - update state and process results - mm.handleScanSuccess(results) -} - -// handleScanSuccess processes successful scan results with proper lock management -func (mm *MaintenanceManager) handleScanSuccess(results []*TaskDetectionResult) { - // Update manager state first - mm.mutex.Lock() - mm.resetErrorTracking() - taskCount := len(results) - mm.mutex.Unlock() - - if taskCount > 0 { - // Count tasks by type for logging (outside of lock) - taskCounts := make(map[MaintenanceTaskType]int) - for _, result := range results { - taskCounts[result.TaskType]++ - } - - // Add tasks to queue (no manager lock held) - mm.queue.AddTasksFromResults(results) - - // Log detailed scan results - glog.Infof("Maintenance scan completed: found %d tasks", taskCount) - for taskType, count := range taskCounts { - glog.Infof(" - %s: %d tasks", taskType, count) - } - } else { - glog.Infof("Maintenance scan completed: no maintenance tasks needed") - } -} - -// handleScanError handles scan errors with exponential backoff and reduced logging -func (mm *MaintenanceManager) handleScanError(err error) { - now := time.Now() - mm.errorCount++ - mm.lastError = err - mm.lastErrorTime = now - - // Use exponential backoff with jitter - if mm.errorCount > 1 { - mm.backoffDelay = mm.backoffDelay * 2 - if mm.backoffDelay > 5*time.Minute { - mm.backoffDelay = 5 * time.Minute // Cap at 5 minutes - } - } - - // Reduce log frequency based on error count and time - shouldLog := false - if mm.errorCount <= 3 { - // Log first 3 errors immediately - shouldLog = true - } else if mm.errorCount <= 10 && mm.errorCount%3 == 0 { - // Log every 3rd error for errors 4-10 - shouldLog = true - } else if mm.errorCount%10 == 0 { - // Log every 10th error after that - shouldLog = true - } - - if shouldLog { - // Check if it's a connection error to provide better messaging - if isConnectionError(err) { - if mm.errorCount == 1 { - glog.Errorf("Maintenance scan failed: %v (will retry with backoff)", err) - } else { - glog.Errorf("Maintenance scan still failing after %d attempts: %v (backoff: %v)", - mm.errorCount, err, mm.backoffDelay) - } - } else { - glog.Errorf("Maintenance scan failed: %v", err) - } - } else { - // Use debug level for suppressed errors - glog.V(3).Infof("Maintenance scan failed (error #%d, suppressed): %v", mm.errorCount, err) - } -} - -// resetErrorTracking resets error tracking when scan succeeds -func (mm *MaintenanceManager) resetErrorTracking() { - if mm.errorCount > 0 { - glog.V(1).Infof("Maintenance scan recovered after %d failed attempts", mm.errorCount) - mm.errorCount = 0 - mm.lastError = nil - mm.backoffDelay = time.Second // Reset to initial delay - } -} - -// isConnectionError checks if the error is a connection-related error -func isConnectionError(err error) bool { - if err == nil { - return false - } - errStr := err.Error() - return strings.Contains(errStr, "connection refused") || - strings.Contains(errStr, "connection error") || - strings.Contains(errStr, "dial tcp") || - strings.Contains(errStr, "connection timeout") || - strings.Contains(errStr, "no route to host") || - strings.Contains(errStr, "network unreachable") -} - -// performCleanup cleans up old tasks and stale workers -func (mm *MaintenanceManager) performCleanup() { - glog.V(2).Infof("Starting maintenance cleanup") - - taskRetention := time.Duration(mm.config.TaskRetentionSeconds) * time.Second - workerTimeout := time.Duration(mm.config.WorkerTimeoutSeconds) * time.Second - - removedTasks := mm.queue.CleanupOldTasks(taskRetention) - removedWorkers := mm.queue.RemoveStaleWorkers(workerTimeout) - - // Clean up stale pending operations (operations running for more than 4 hours) - staleOperationTimeout := 4 * time.Hour - removedOperations := 0 - if mm.scanner != nil && mm.scanner.integration != nil { - pendingOps := mm.scanner.integration.GetPendingOperations() - if pendingOps != nil { - removedOperations = pendingOps.CleanupStaleOperations(staleOperationTimeout) - } - } - - if removedTasks > 0 || removedWorkers > 0 || removedOperations > 0 { - glog.V(1).Infof("Cleanup completed: removed %d old tasks, %d stale workers, and %d stale operations", - removedTasks, removedWorkers, removedOperations) - } -} - -// GetQueue returns the maintenance queue -func (mm *MaintenanceManager) GetQueue() *MaintenanceQueue { - return mm.queue -} - -// GetConfig returns the maintenance configuration -func (mm *MaintenanceManager) GetConfig() *MaintenanceConfig { - return mm.config -} - -// GetStats returns maintenance statistics -func (mm *MaintenanceManager) GetStats() *MaintenanceStats { - stats := mm.queue.GetStats() - - mm.mutex.RLock() - defer mm.mutex.RUnlock() - - stats.LastScanTime = time.Now() // Would need to track this properly - - // Calculate next scan time based on current error state - scanInterval := time.Duration(mm.config.ScanIntervalSeconds) * time.Second - nextScanInterval := scanInterval - if mm.errorCount > 0 { - nextScanInterval = mm.backoffDelay - maxInterval := scanInterval * 10 - if nextScanInterval > maxInterval { - nextScanInterval = maxInterval - } - } - stats.NextScanTime = time.Now().Add(nextScanInterval) - - return stats -} - -// ReloadTaskConfigurations reloads task configurations from the current policy -func (mm *MaintenanceManager) ReloadTaskConfigurations() error { - mm.mutex.Lock() - defer mm.mutex.Unlock() - - // Trigger configuration reload in the integration layer - if mm.scanner != nil && mm.scanner.integration != nil { - mm.scanner.integration.ConfigureTasksFromPolicy() - glog.V(1).Infof("Task configurations reloaded from policy") - return nil - } - - return fmt.Errorf("integration not available for configuration reload") -} - -// GetErrorState returns the current error state for monitoring -func (mm *MaintenanceManager) GetErrorState() (errorCount int, lastError error, backoffDelay time.Duration) { - mm.mutex.RLock() - defer mm.mutex.RUnlock() - return mm.errorCount, mm.lastError, mm.backoffDelay -} - -// GetTasks returns tasks with filtering -func (mm *MaintenanceManager) GetTasks(status MaintenanceTaskStatus, taskType MaintenanceTaskType, limit int) []*MaintenanceTask { - return mm.queue.GetTasks(status, taskType, limit) -} - -// GetWorkers returns all registered workers -func (mm *MaintenanceManager) GetWorkers() []*MaintenanceWorker { - return mm.queue.GetWorkers() -} - -// TriggerScan manually triggers a maintenance scan -func (mm *MaintenanceManager) TriggerScan() error { - return mm.triggerScanInternal(true) -} - -// triggerScanInternal handles both manual and automatic scan triggers -func (mm *MaintenanceManager) triggerScanInternal(isManual bool) error { - if !mm.running { - return fmt.Errorf("maintenance manager is not running") - } - - // Prevent multiple concurrent scans - mm.mutex.Lock() - if mm.scanInProgress { - mm.mutex.Unlock() - if isManual { - glog.V(1).Infof("Manual scan already in progress, ignoring trigger request") - } else { - glog.V(2).Infof("Automatic scan already in progress, ignoring scheduled scan") - } - return fmt.Errorf("scan already in progress") - } - mm.scanInProgress = true - mm.mutex.Unlock() - - go mm.performScan() - return nil -} - -// UpdateConfig updates the maintenance configuration -func (mm *MaintenanceManager) UpdateConfig(config *MaintenanceConfig) error { - if config == nil { - return fmt.Errorf("config cannot be nil") - } - - mm.config = config - mm.queue.policy = config.Policy - mm.scanner.policy = config.Policy - - glog.V(1).Infof("Maintenance configuration updated") - return nil -} - -// CancelTask cancels a pending task -func (mm *MaintenanceManager) CancelTask(taskID string) error { - mm.queue.mutex.Lock() - defer mm.queue.mutex.Unlock() - - task, exists := mm.queue.tasks[taskID] - if !exists { - return fmt.Errorf("task %s not found", taskID) - } - - if task.Status == TaskStatusPending { - task.Status = TaskStatusCancelled - task.CompletedAt = &[]time.Time{time.Now()}[0] - - // Remove from pending tasks - for i, pendingTask := range mm.queue.pendingTasks { - if pendingTask.ID == taskID { - mm.queue.pendingTasks = append(mm.queue.pendingTasks[:i], mm.queue.pendingTasks[i+1:]...) - break - } - } - - glog.V(2).Infof("Cancelled task %s", taskID) - return nil - } - - return fmt.Errorf("task %s cannot be cancelled (status: %s)", taskID, task.Status) -} - -// RegisterWorker registers a new worker -func (mm *MaintenanceManager) RegisterWorker(worker *MaintenanceWorker) { - mm.queue.RegisterWorker(worker) -} - -// GetNextTask returns the next task for a worker -func (mm *MaintenanceManager) GetNextTask(workerID string, capabilities []MaintenanceTaskType) *MaintenanceTask { - return mm.queue.GetNextTask(workerID, capabilities) -} - -// CompleteTask marks a task as completed -func (mm *MaintenanceManager) CompleteTask(taskID string, error string) { - mm.queue.CompleteTask(taskID, error) -} - -// UpdateTaskProgress updates task progress -func (mm *MaintenanceManager) UpdateTaskProgress(taskID string, progress float64) { - mm.queue.UpdateTaskProgress(taskID, progress) -} - -// UpdateWorkerHeartbeat updates worker heartbeat -func (mm *MaintenanceManager) UpdateWorkerHeartbeat(workerID string) { - mm.queue.UpdateWorkerHeartbeat(workerID) -} diff --git a/weed/admin/maintenance/maintenance_manager_test.go b/weed/admin/maintenance/maintenance_manager_test.go deleted file mode 100644 index 243a88f5e..000000000 --- a/weed/admin/maintenance/maintenance_manager_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package maintenance - -import ( - "errors" - "testing" - "time" -) - -func TestMaintenanceManager_ErrorHandling(t *testing.T) { - config := DefaultMaintenanceConfig() - config.ScanIntervalSeconds = 1 // Short interval for testing (1 second) - - manager := NewMaintenanceManager(nil, config) - - // Test initial state - if manager.errorCount != 0 { - t.Errorf("Expected initial error count to be 0, got %d", manager.errorCount) - } - - if manager.backoffDelay != time.Second { - t.Errorf("Expected initial backoff delay to be 1s, got %v", manager.backoffDelay) - } - - // Test error handling - err := errors.New("dial tcp [::1]:19333: connect: connection refused") - manager.handleScanError(err) - - if manager.errorCount != 1 { - t.Errorf("Expected error count to be 1, got %d", manager.errorCount) - } - - if manager.lastError != err { - t.Errorf("Expected last error to be set") - } - - // Test exponential backoff - initialDelay := manager.backoffDelay - manager.handleScanError(err) - - if manager.backoffDelay != initialDelay*2 { - t.Errorf("Expected backoff delay to double, got %v", manager.backoffDelay) - } - - if manager.errorCount != 2 { - t.Errorf("Expected error count to be 2, got %d", manager.errorCount) - } - - // Test backoff cap - for i := 0; i < 10; i++ { - manager.handleScanError(err) - } - - if manager.backoffDelay > 5*time.Minute { - t.Errorf("Expected backoff delay to be capped at 5 minutes, got %v", manager.backoffDelay) - } - - // Test error reset - manager.resetErrorTracking() - - if manager.errorCount != 0 { - t.Errorf("Expected error count to be reset to 0, got %d", manager.errorCount) - } - - if manager.backoffDelay != time.Second { - t.Errorf("Expected backoff delay to be reset to 1s, got %v", manager.backoffDelay) - } - - if manager.lastError != nil { - t.Errorf("Expected last error to be reset to nil") - } -} - -func TestIsConnectionError(t *testing.T) { - tests := []struct { - err error - expected bool - }{ - {nil, false}, - {errors.New("connection refused"), true}, - {errors.New("dial tcp [::1]:19333: connect: connection refused"), true}, - {errors.New("connection error: desc = \"transport: Error while dialing\""), true}, - {errors.New("connection timeout"), true}, - {errors.New("no route to host"), true}, - {errors.New("network unreachable"), true}, - {errors.New("some other error"), false}, - {errors.New("invalid argument"), false}, - } - - for _, test := range tests { - result := isConnectionError(test.err) - if result != test.expected { - t.Errorf("For error %v, expected %v, got %v", test.err, test.expected, result) - } - } -} - -func TestMaintenanceManager_GetErrorState(t *testing.T) { - config := DefaultMaintenanceConfig() - manager := NewMaintenanceManager(nil, config) - - // Test initial state - errorCount, lastError, backoffDelay := manager.GetErrorState() - if errorCount != 0 || lastError != nil || backoffDelay != time.Second { - t.Errorf("Expected initial state to be clean") - } - - // Add some errors - err := errors.New("test error") - manager.handleScanError(err) - manager.handleScanError(err) - - errorCount, lastError, backoffDelay = manager.GetErrorState() - if errorCount != 2 || lastError != err || backoffDelay != 2*time.Second { - t.Errorf("Expected error state to be tracked correctly: count=%d, err=%v, delay=%v", - errorCount, lastError, backoffDelay) - } -} - -func TestMaintenanceManager_LogThrottling(t *testing.T) { - config := DefaultMaintenanceConfig() - manager := NewMaintenanceManager(nil, config) - - // This is a basic test to ensure the error handling doesn't panic - // In practice, you'd want to capture log output to verify throttling - err := errors.New("test error") - - // Generate many errors to test throttling - for i := 0; i < 25; i++ { - manager.handleScanError(err) - } - - // Should not panic and should have capped backoff - if manager.backoffDelay > 5*time.Minute { - t.Errorf("Expected backoff to be capped at 5 minutes") - } - - if manager.errorCount != 25 { - t.Errorf("Expected error count to be 25, got %d", manager.errorCount) - } -} diff --git a/weed/admin/maintenance/maintenance_queue.go b/weed/admin/maintenance/maintenance_queue.go deleted file mode 100644 index d39c96a30..000000000 --- a/weed/admin/maintenance/maintenance_queue.go +++ /dev/null @@ -1,936 +0,0 @@ -package maintenance - -import ( - "crypto/rand" - "fmt" - "sort" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// NewMaintenanceQueue creates a new maintenance queue -func NewMaintenanceQueue(policy *MaintenancePolicy) *MaintenanceQueue { - queue := &MaintenanceQueue{ - tasks: make(map[string]*MaintenanceTask), - workers: make(map[string]*MaintenanceWorker), - pendingTasks: make([]*MaintenanceTask, 0), - policy: policy, - } - return queue -} - -// SetIntegration sets the integration reference -func (mq *MaintenanceQueue) SetIntegration(integration *MaintenanceIntegration) { - mq.integration = integration - glog.V(1).Infof("Maintenance queue configured with integration") -} - -// SetPersistence sets the task persistence interface -func (mq *MaintenanceQueue) SetPersistence(persistence TaskPersistence) { - mq.persistence = persistence - glog.V(1).Infof("Maintenance queue configured with task persistence") -} - -// LoadTasksFromPersistence loads tasks from persistent storage on startup -func (mq *MaintenanceQueue) LoadTasksFromPersistence() error { - if mq.persistence == nil { - glog.V(1).Infof("No task persistence configured, skipping task loading") - return nil - } - - mq.mutex.Lock() - defer mq.mutex.Unlock() - - glog.Infof("Loading tasks from persistence...") - - tasks, err := mq.persistence.LoadAllTaskStates() - if err != nil { - return fmt.Errorf("failed to load task states: %w", err) - } - - glog.Infof("DEBUG LoadTasksFromPersistence: Found %d tasks in persistence", len(tasks)) - - // Reset task maps - mq.tasks = make(map[string]*MaintenanceTask) - mq.pendingTasks = make([]*MaintenanceTask, 0) - - // Load tasks by status - for _, task := range tasks { - glog.Infof("DEBUG LoadTasksFromPersistence: Loading task %s (type: %s, status: %s, scheduled: %v)", task.ID, task.Type, task.Status, task.ScheduledAt) - mq.tasks[task.ID] = task - - switch task.Status { - case TaskStatusPending: - glog.Infof("DEBUG LoadTasksFromPersistence: Adding task %s to pending queue", task.ID) - mq.pendingTasks = append(mq.pendingTasks, task) - case TaskStatusAssigned, TaskStatusInProgress: - // For assigned/in-progress tasks, we need to check if the worker is still available - // If not, we should fail them and make them eligible for retry - if task.WorkerID != "" { - if _, exists := mq.workers[task.WorkerID]; !exists { - glog.Warningf("Task %s was assigned to unavailable worker %s, marking as failed", task.ID, task.WorkerID) - task.Status = TaskStatusFailed - task.Error = "Worker unavailable after restart" - completedTime := time.Now() - task.CompletedAt = &completedTime - - // Check if it should be retried - if task.RetryCount < task.MaxRetries { - task.RetryCount++ - task.Status = TaskStatusPending - task.WorkerID = "" - task.StartedAt = nil - task.CompletedAt = nil - task.Error = "" - task.ScheduledAt = time.Now().Add(1 * time.Minute) // Retry after restart delay - glog.Infof("DEBUG LoadTasksFromPersistence: Retrying task %s, adding to pending queue", task.ID) - mq.pendingTasks = append(mq.pendingTasks, task) - } - } - } - } - } - - // Sort pending tasks by priority and schedule time - sort.Slice(mq.pendingTasks, func(i, j int) bool { - if mq.pendingTasks[i].Priority != mq.pendingTasks[j].Priority { - return mq.pendingTasks[i].Priority > mq.pendingTasks[j].Priority - } - return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt) - }) - - glog.Infof("Loaded %d tasks from persistence (%d pending)", len(tasks), len(mq.pendingTasks)) - return nil -} - -// saveTaskState saves a task to persistent storage -func (mq *MaintenanceQueue) saveTaskState(task *MaintenanceTask) { - if mq.persistence != nil { - if err := mq.persistence.SaveTaskState(task); err != nil { - glog.Errorf("Failed to save task state for %s: %v", task.ID, err) - } - } -} - -// cleanupCompletedTasks removes old completed tasks beyond the retention limit -func (mq *MaintenanceQueue) cleanupCompletedTasks() { - if mq.persistence != nil { - if err := mq.persistence.CleanupCompletedTasks(); err != nil { - glog.Errorf("Failed to cleanup completed tasks: %v", err) - } - } -} - -// AddTask adds a new maintenance task to the queue with deduplication -func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - // Check for duplicate tasks (same type + volume + not completed) - if mq.hasDuplicateTask(task) { - glog.V(1).Infof("Task skipped (duplicate): %s for volume %d on %s (already queued or running)", - task.Type, task.VolumeID, task.Server) - return - } - - task.ID = generateTaskID() - task.Status = TaskStatusPending - task.CreatedAt = time.Now() - task.MaxRetries = 3 // Default retry count - - // Initialize assignment history and set creation context - task.AssignmentHistory = make([]*TaskAssignmentRecord, 0) - if task.CreatedBy == "" { - task.CreatedBy = "maintenance-system" - } - if task.CreationContext == "" { - task.CreationContext = "Automatic task creation based on system monitoring" - } - if task.Tags == nil { - task.Tags = make(map[string]string) - } - - mq.tasks[task.ID] = task - mq.pendingTasks = append(mq.pendingTasks, task) - - // Sort pending tasks by priority and schedule time - sort.Slice(mq.pendingTasks, func(i, j int) bool { - if mq.pendingTasks[i].Priority != mq.pendingTasks[j].Priority { - return mq.pendingTasks[i].Priority > mq.pendingTasks[j].Priority - } - return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt) - }) - - // Save task state to persistence - mq.saveTaskState(task) - - scheduleInfo := "" - if !task.ScheduledAt.IsZero() && time.Until(task.ScheduledAt) > time.Minute { - scheduleInfo = fmt.Sprintf(", scheduled for %v", task.ScheduledAt.Format("15:04:05")) - } - - glog.Infof("Task queued: %s (%s) volume %d on %s, priority %d%s, reason: %s", - task.ID, task.Type, task.VolumeID, task.Server, task.Priority, scheduleInfo, task.Reason) -} - -// hasDuplicateTask checks if a similar task already exists (same type, volume, and not completed) -func (mq *MaintenanceQueue) hasDuplicateTask(newTask *MaintenanceTask) bool { - for _, existingTask := range mq.tasks { - if existingTask.Type == newTask.Type && - existingTask.VolumeID == newTask.VolumeID && - existingTask.Server == newTask.Server && - (existingTask.Status == TaskStatusPending || - existingTask.Status == TaskStatusAssigned || - existingTask.Status == TaskStatusInProgress) { - return true - } - } - return false -} - -// AddTasksFromResults converts detection results to tasks and adds them to the queue -func (mq *MaintenanceQueue) AddTasksFromResults(results []*TaskDetectionResult) { - for _, result := range results { - // Validate that task has proper typed parameters - if result.TypedParams == nil { - glog.Warningf("Rejecting invalid task: %s for volume %d on %s - no typed parameters (insufficient destinations or planning failed)", - result.TaskType, result.VolumeID, result.Server) - continue - } - - task := &MaintenanceTask{ - Type: result.TaskType, - Priority: result.Priority, - VolumeID: result.VolumeID, - Server: result.Server, - Collection: result.Collection, - // Copy typed protobuf parameters - TypedParams: result.TypedParams, - Reason: result.Reason, - ScheduledAt: result.ScheduleAt, - } - mq.AddTask(task) - } -} - -// GetNextTask returns the next available task for a worker -func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []MaintenanceTaskType) *MaintenanceTask { - // Use read lock for initial checks and search - mq.mutex.RLock() - - worker, exists := mq.workers[workerID] - if !exists { - mq.mutex.RUnlock() - glog.V(2).Infof("Task assignment failed for worker %s: worker not registered", workerID) - return nil - } - - // Check if worker has capacity - if worker.CurrentLoad >= worker.MaxConcurrent { - mq.mutex.RUnlock() - glog.V(2).Infof("Task assignment failed for worker %s: at capacity (%d/%d)", workerID, worker.CurrentLoad, worker.MaxConcurrent) - return nil - } - - now := time.Now() - var selectedTask *MaintenanceTask - var selectedIndex int = -1 - - // Find the next suitable task (using read lock) - for i, task := range mq.pendingTasks { - // Check if it's time to execute the task - if task.ScheduledAt.After(now) { - glog.V(3).Infof("Task %s skipped for worker %s: scheduled for future (%v)", task.ID, workerID, task.ScheduledAt) - continue - } - - // Check if worker can handle this task type - if !mq.workerCanHandle(task.Type, capabilities) { - glog.V(3).Infof("Task %s (%s) skipped for worker %s: capability mismatch (worker has: %v)", task.ID, task.Type, workerID, capabilities) - continue - } - - // Check if this task type needs a cooldown period - if !mq.canScheduleTaskNow(task) { - // Add detailed diagnostic information - runningCount := mq.GetRunningTaskCount(task.Type) - maxConcurrent := mq.getMaxConcurrentForTaskType(task.Type) - glog.V(2).Infof("Task %s (%s) skipped for worker %s: scheduling constraints not met (running: %d, max: %d)", - task.ID, task.Type, workerID, runningCount, maxConcurrent) - continue - } - - // Found a suitable task - selectedTask = task - selectedIndex = i - break - } - - // Release read lock - mq.mutex.RUnlock() - - // If no task found, return nil - if selectedTask == nil { - glog.V(2).Infof("No suitable tasks available for worker %s (checked %d pending tasks)", workerID, len(mq.pendingTasks)) - return nil - } - - // Now acquire write lock to actually assign the task - mq.mutex.Lock() - defer mq.mutex.Unlock() - - // Re-check that the task is still available (it might have been assigned to another worker) - if selectedIndex >= len(mq.pendingTasks) || mq.pendingTasks[selectedIndex].ID != selectedTask.ID { - glog.V(2).Infof("Task %s no longer available for worker %s: assigned to another worker", selectedTask.ID, workerID) - return nil - } - - // Record assignment history - workerAddress := "" - if worker, exists := mq.workers[workerID]; exists { - workerAddress = worker.Address - } - - // Create assignment record - assignmentRecord := &TaskAssignmentRecord{ - WorkerID: workerID, - WorkerAddress: workerAddress, - AssignedAt: now, - Reason: "Task assigned to available worker", - } - - // Initialize assignment history if nil - if selectedTask.AssignmentHistory == nil { - selectedTask.AssignmentHistory = make([]*TaskAssignmentRecord, 0) - } - selectedTask.AssignmentHistory = append(selectedTask.AssignmentHistory, assignmentRecord) - - // Assign the task - selectedTask.Status = TaskStatusAssigned - selectedTask.WorkerID = workerID - selectedTask.StartedAt = &now - - // Remove from pending tasks - mq.pendingTasks = append(mq.pendingTasks[:selectedIndex], mq.pendingTasks[selectedIndex+1:]...) - - // Update worker load - if worker, exists := mq.workers[workerID]; exists { - worker.CurrentLoad++ - } - - // Track pending operation - mq.trackPendingOperation(selectedTask) - - // Save task state after assignment - mq.saveTaskState(selectedTask) - - glog.Infof("Task assigned: %s (%s) โ†’ worker %s (volume %d, server %s)", - selectedTask.ID, selectedTask.Type, workerID, selectedTask.VolumeID, selectedTask.Server) - - return selectedTask -} - -// CompleteTask marks a task as completed -func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - task, exists := mq.tasks[taskID] - if !exists { - glog.Warningf("Attempted to complete non-existent task: %s", taskID) - return - } - - completedTime := time.Now() - task.CompletedAt = &completedTime - - // Calculate task duration - var duration time.Duration - if task.StartedAt != nil { - duration = completedTime.Sub(*task.StartedAt) - } - - if error != "" { - task.Status = TaskStatusFailed - task.Error = error - - // Check if task should be retried - if task.RetryCount < task.MaxRetries { - // Record unassignment due to failure/retry - if task.WorkerID != "" && len(task.AssignmentHistory) > 0 { - lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1] - if lastAssignment.UnassignedAt == nil { - unassignedTime := completedTime - lastAssignment.UnassignedAt = &unassignedTime - lastAssignment.Reason = fmt.Sprintf("Task failed, scheduling retry (attempt %d/%d): %s", - task.RetryCount+1, task.MaxRetries, error) - } - } - - task.RetryCount++ - task.Status = TaskStatusPending - task.WorkerID = "" - task.StartedAt = nil - task.CompletedAt = nil - task.Error = "" - task.ScheduledAt = time.Now().Add(15 * time.Minute) // Retry delay - - mq.pendingTasks = append(mq.pendingTasks, task) - // Save task state after retry setup - mq.saveTaskState(task) - glog.Warningf("Task failed, scheduling retry: %s (%s) attempt %d/%d, worker %s, duration %v, error: %s", - taskID, task.Type, task.RetryCount, task.MaxRetries, task.WorkerID, duration, error) - } else { - // Record unassignment due to permanent failure - if task.WorkerID != "" && len(task.AssignmentHistory) > 0 { - lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1] - if lastAssignment.UnassignedAt == nil { - unassignedTime := completedTime - lastAssignment.UnassignedAt = &unassignedTime - lastAssignment.Reason = fmt.Sprintf("Task failed permanently after %d retries: %s", task.MaxRetries, error) - } - } - - // Save task state after permanent failure - mq.saveTaskState(task) - glog.Errorf("Task failed permanently: %s (%s) worker %s, duration %v, after %d retries: %s", - taskID, task.Type, task.WorkerID, duration, task.MaxRetries, error) - } - } else { - task.Status = TaskStatusCompleted - task.Progress = 100 - // Save task state after successful completion - mq.saveTaskState(task) - glog.Infof("Task completed: %s (%s) worker %s, duration %v, volume %d", - taskID, task.Type, task.WorkerID, duration, task.VolumeID) - } - - // Update worker - if task.WorkerID != "" { - if worker, exists := mq.workers[task.WorkerID]; exists { - worker.CurrentTask = nil - worker.CurrentLoad-- - if worker.CurrentLoad == 0 { - worker.Status = "active" - } - } - } - - // Remove pending operation (unless it's being retried) - if task.Status != TaskStatusPending { - mq.removePendingOperation(taskID) - } - - // Periodically cleanup old completed tasks (every 10th completion) - if task.Status == TaskStatusCompleted { - // Simple counter-based trigger for cleanup - if len(mq.tasks)%10 == 0 { - go mq.cleanupCompletedTasks() - } - } -} - -// UpdateTaskProgress updates the progress of a running task -func (mq *MaintenanceQueue) UpdateTaskProgress(taskID string, progress float64) { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - if task, exists := mq.tasks[taskID]; exists { - oldProgress := task.Progress - task.Progress = progress - task.Status = TaskStatusInProgress - - // Update pending operation status - mq.updatePendingOperationStatus(taskID, "in_progress") - - // Log progress at significant milestones or changes - if progress == 0 { - glog.V(1).Infof("Task started: %s (%s) worker %s, volume %d", - taskID, task.Type, task.WorkerID, task.VolumeID) - } else if progress >= 100 { - glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete", - taskID, task.Type, task.WorkerID, progress) - } else if progress-oldProgress >= 25 { // Log every 25% increment - glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete", - taskID, task.Type, task.WorkerID, progress) - } - - // Save task state after progress update - if progress == 0 || progress >= 100 || progress-oldProgress >= 10 { - mq.saveTaskState(task) - } - } else { - glog.V(2).Infof("Progress update for unknown task: %s (%.1f%%)", taskID, progress) - } -} - -// RegisterWorker registers a new worker -func (mq *MaintenanceQueue) RegisterWorker(worker *MaintenanceWorker) { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - isNewWorker := true - if existingWorker, exists := mq.workers[worker.ID]; exists { - isNewWorker = false - glog.Infof("Worker reconnected: %s at %s (capabilities: %v, max concurrent: %d)", - worker.ID, worker.Address, worker.Capabilities, worker.MaxConcurrent) - - // Preserve current load when reconnecting - worker.CurrentLoad = existingWorker.CurrentLoad - } else { - glog.Infof("Worker registered: %s at %s (capabilities: %v, max concurrent: %d)", - worker.ID, worker.Address, worker.Capabilities, worker.MaxConcurrent) - } - - worker.LastHeartbeat = time.Now() - worker.Status = "active" - if isNewWorker { - worker.CurrentLoad = 0 - } - mq.workers[worker.ID] = worker -} - -// UpdateWorkerHeartbeat updates worker heartbeat -func (mq *MaintenanceQueue) UpdateWorkerHeartbeat(workerID string) { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - if worker, exists := mq.workers[workerID]; exists { - lastSeen := worker.LastHeartbeat - worker.LastHeartbeat = time.Now() - - // Log if worker was offline for a while - if time.Since(lastSeen) > 2*time.Minute { - glog.Infof("Worker %s heartbeat resumed after %v", workerID, time.Since(lastSeen)) - } - } else { - glog.V(2).Infof("Heartbeat from unknown worker: %s", workerID) - } -} - -// GetRunningTaskCount returns the number of running tasks of a specific type -func (mq *MaintenanceQueue) GetRunningTaskCount(taskType MaintenanceTaskType) int { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - count := 0 - for _, task := range mq.tasks { - if task.Type == taskType && (task.Status == TaskStatusAssigned || task.Status == TaskStatusInProgress) { - count++ - } - } - return count -} - -// WasTaskRecentlyCompleted checks if a similar task was recently completed -func (mq *MaintenanceQueue) WasTaskRecentlyCompleted(taskType MaintenanceTaskType, volumeID uint32, server string, now time.Time) bool { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - // Get the repeat prevention interval for this task type - interval := mq.getRepeatPreventionInterval(taskType) - cutoff := now.Add(-interval) - - for _, task := range mq.tasks { - if task.Type == taskType && - task.VolumeID == volumeID && - task.Server == server && - task.Status == TaskStatusCompleted && - task.CompletedAt != nil && - task.CompletedAt.After(cutoff) { - return true - } - } - return false -} - -// getRepeatPreventionInterval returns the interval for preventing task repetition -func (mq *MaintenanceQueue) getRepeatPreventionInterval(taskType MaintenanceTaskType) time.Duration { - // First try to get default from task scheduler - if mq.integration != nil { - if scheduler := mq.integration.GetTaskScheduler(taskType); scheduler != nil { - defaultInterval := scheduler.GetDefaultRepeatInterval() - if defaultInterval > 0 { - glog.V(3).Infof("Using task scheduler default repeat interval for %s: %v", taskType, defaultInterval) - return defaultInterval - } - } - } - - // Fallback to policy configuration if no scheduler available or scheduler doesn't provide default - if mq.policy != nil { - repeatIntervalHours := GetRepeatInterval(mq.policy, taskType) - if repeatIntervalHours > 0 { - interval := time.Duration(repeatIntervalHours) * time.Hour - glog.V(3).Infof("Using policy configuration repeat interval for %s: %v", taskType, interval) - return interval - } - } - - // Ultimate fallback - but avoid hardcoded values where possible - glog.V(2).Infof("No scheduler or policy configuration found for task type %s, using minimal default: 1h", taskType) - return time.Hour // Minimal safe default -} - -// GetTasks returns tasks with optional filtering -func (mq *MaintenanceQueue) GetTasks(status MaintenanceTaskStatus, taskType MaintenanceTaskType, limit int) []*MaintenanceTask { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - var tasks []*MaintenanceTask - for _, task := range mq.tasks { - if status != "" && task.Status != status { - continue - } - if taskType != "" && task.Type != taskType { - continue - } - tasks = append(tasks, task) - if limit > 0 && len(tasks) >= limit { - break - } - } - - // Sort by creation time (newest first) - sort.Slice(tasks, func(i, j int) bool { - return tasks[i].CreatedAt.After(tasks[j].CreatedAt) - }) - - return tasks -} - -// GetWorkers returns all registered workers -func (mq *MaintenanceQueue) GetWorkers() []*MaintenanceWorker { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - var workers []*MaintenanceWorker - for _, worker := range mq.workers { - workers = append(workers, worker) - } - return workers -} - -// generateTaskID generates a unique ID for tasks -func generateTaskID() string { - const charset = "abcdefghijklmnopqrstuvwxyz0123456789" - b := make([]byte, 8) - randBytes := make([]byte, 8) - - // Generate random bytes - if _, err := rand.Read(randBytes); err != nil { - // Fallback to timestamp-based ID if crypto/rand fails - timestamp := time.Now().UnixNano() - return fmt.Sprintf("task-%d", timestamp) - } - - // Convert random bytes to charset - for i := range b { - b[i] = charset[int(randBytes[i])%len(charset)] - } - - // Add timestamp suffix to ensure uniqueness - timestamp := time.Now().Unix() % 10000 // last 4 digits of timestamp - return fmt.Sprintf("%s-%04d", string(b), timestamp) -} - -// CleanupOldTasks removes old completed and failed tasks -func (mq *MaintenanceQueue) CleanupOldTasks(retention time.Duration) int { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - cutoff := time.Now().Add(-retention) - removed := 0 - - for id, task := range mq.tasks { - if (task.Status == TaskStatusCompleted || task.Status == TaskStatusFailed) && - task.CompletedAt != nil && - task.CompletedAt.Before(cutoff) { - delete(mq.tasks, id) - removed++ - } - } - - glog.V(2).Infof("Cleaned up %d old maintenance tasks", removed) - return removed -} - -// RemoveStaleWorkers removes workers that haven't sent heartbeat recently -func (mq *MaintenanceQueue) RemoveStaleWorkers(timeout time.Duration) int { - mq.mutex.Lock() - defer mq.mutex.Unlock() - - cutoff := time.Now().Add(-timeout) - removed := 0 - - for id, worker := range mq.workers { - if worker.LastHeartbeat.Before(cutoff) { - // Mark any assigned tasks as failed and record unassignment - for _, task := range mq.tasks { - if task.WorkerID == id && (task.Status == TaskStatusAssigned || task.Status == TaskStatusInProgress) { - // Record unassignment due to worker becoming unavailable - if len(task.AssignmentHistory) > 0 { - lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1] - if lastAssignment.UnassignedAt == nil { - unassignedTime := time.Now() - lastAssignment.UnassignedAt = &unassignedTime - lastAssignment.Reason = "Worker became unavailable (stale heartbeat)" - } - } - - task.Status = TaskStatusFailed - task.Error = "Worker became unavailable" - completedTime := time.Now() - task.CompletedAt = &completedTime - } - } - - delete(mq.workers, id) - removed++ - glog.Warningf("Removed stale maintenance worker %s", id) - } - } - - return removed -} - -// GetStats returns maintenance statistics -func (mq *MaintenanceQueue) GetStats() *MaintenanceStats { - mq.mutex.RLock() - defer mq.mutex.RUnlock() - - stats := &MaintenanceStats{ - TotalTasks: len(mq.tasks), - TasksByStatus: make(map[MaintenanceTaskStatus]int), - TasksByType: make(map[MaintenanceTaskType]int), - ActiveWorkers: 0, - } - - today := time.Now().Truncate(24 * time.Hour) - var totalDuration time.Duration - var completedTasks int - - for _, task := range mq.tasks { - stats.TasksByStatus[task.Status]++ - stats.TasksByType[task.Type]++ - - if task.CompletedAt != nil && task.CompletedAt.After(today) { - if task.Status == TaskStatusCompleted { - stats.CompletedToday++ - } else if task.Status == TaskStatusFailed { - stats.FailedToday++ - } - - if task.StartedAt != nil { - duration := task.CompletedAt.Sub(*task.StartedAt) - totalDuration += duration - completedTasks++ - } - } - } - - for _, worker := range mq.workers { - if worker.Status == "active" || worker.Status == "busy" { - stats.ActiveWorkers++ - } - } - - if completedTasks > 0 { - stats.AverageTaskTime = totalDuration / time.Duration(completedTasks) - } - - return stats -} - -// workerCanHandle checks if a worker can handle a specific task type -func (mq *MaintenanceQueue) workerCanHandle(taskType MaintenanceTaskType, capabilities []MaintenanceTaskType) bool { - for _, capability := range capabilities { - if capability == taskType { - return true - } - } - return false -} - -// canScheduleTaskNow determines if a task can be scheduled using task schedulers or fallback logic -func (mq *MaintenanceQueue) canScheduleTaskNow(task *MaintenanceTask) bool { - glog.V(2).Infof("Checking if task %s (type: %s) can be scheduled", task.ID, task.Type) - - // TEMPORARY FIX: Skip integration task scheduler which is being overly restrictive - // Use fallback logic directly for now - glog.V(2).Infof("Using fallback logic for task scheduling") - canExecute := mq.canExecuteTaskType(task.Type) - glog.V(2).Infof("Fallback decision for task %s: %v", task.ID, canExecute) - return canExecute - - // NOTE: Original integration code disabled temporarily - // Try task scheduling logic first - /* - if mq.integration != nil { - glog.Infof("DEBUG canScheduleTaskNow: Using integration task scheduler") - // Get all running tasks and available workers - runningTasks := mq.getRunningTasks() - availableWorkers := mq.getAvailableWorkers() - - glog.Infof("DEBUG canScheduleTaskNow: Running tasks: %d, Available workers: %d", len(runningTasks), len(availableWorkers)) - - canSchedule := mq.integration.CanScheduleWithTaskSchedulers(task, runningTasks, availableWorkers) - glog.Infof("DEBUG canScheduleTaskNow: Task scheduler decision for task %s (%s): %v", task.ID, task.Type, canSchedule) - return canSchedule - } - */ -} - -// canExecuteTaskType checks if we can execute more tasks of this type (concurrency limits) - fallback logic -func (mq *MaintenanceQueue) canExecuteTaskType(taskType MaintenanceTaskType) bool { - runningCount := mq.GetRunningTaskCount(taskType) - maxConcurrent := mq.getMaxConcurrentForTaskType(taskType) - - canExecute := runningCount < maxConcurrent - glog.V(3).Infof("canExecuteTaskType for %s: running=%d, max=%d, canExecute=%v", taskType, runningCount, maxConcurrent, canExecute) - - return canExecute -} - -// getMaxConcurrentForTaskType returns the maximum concurrent tasks allowed for a task type -func (mq *MaintenanceQueue) getMaxConcurrentForTaskType(taskType MaintenanceTaskType) int { - // First try to get default from task scheduler - if mq.integration != nil { - if scheduler := mq.integration.GetTaskScheduler(taskType); scheduler != nil { - maxConcurrent := scheduler.GetMaxConcurrent() - if maxConcurrent > 0 { - glog.V(3).Infof("Using task scheduler max concurrent for %s: %d", taskType, maxConcurrent) - return maxConcurrent - } - } - } - - // Fallback to policy configuration if no scheduler available or scheduler doesn't provide default - if mq.policy != nil { - maxConcurrent := GetMaxConcurrent(mq.policy, taskType) - if maxConcurrent > 0 { - glog.V(3).Infof("Using policy configuration max concurrent for %s: %d", taskType, maxConcurrent) - return maxConcurrent - } - } - - // Ultimate fallback - minimal safe default - glog.V(2).Infof("No scheduler or policy configuration found for task type %s, using minimal default: 1", taskType) - return 1 -} - -// getRunningTasks returns all currently running tasks -func (mq *MaintenanceQueue) getRunningTasks() []*MaintenanceTask { - var runningTasks []*MaintenanceTask - for _, task := range mq.tasks { - if task.Status == TaskStatusAssigned || task.Status == TaskStatusInProgress { - runningTasks = append(runningTasks, task) - } - } - return runningTasks -} - -// getAvailableWorkers returns all workers that can take more work -func (mq *MaintenanceQueue) getAvailableWorkers() []*MaintenanceWorker { - var availableWorkers []*MaintenanceWorker - for _, worker := range mq.workers { - if worker.Status == "active" && worker.CurrentLoad < worker.MaxConcurrent { - availableWorkers = append(availableWorkers, worker) - } - } - return availableWorkers -} - -// trackPendingOperation adds a task to the pending operations tracker -func (mq *MaintenanceQueue) trackPendingOperation(task *MaintenanceTask) { - if mq.integration == nil { - return - } - - pendingOps := mq.integration.GetPendingOperations() - if pendingOps == nil { - return - } - - // Skip tracking for tasks without proper typed parameters - if task.TypedParams == nil { - glog.V(2).Infof("Skipping pending operation tracking for task %s - no typed parameters", task.ID) - return - } - - // Map maintenance task type to pending operation type - var opType PendingOperationType - switch task.Type { - case MaintenanceTaskType("balance"): - opType = OpTypeVolumeBalance - case MaintenanceTaskType("erasure_coding"): - opType = OpTypeErasureCoding - case MaintenanceTaskType("vacuum"): - opType = OpTypeVacuum - case MaintenanceTaskType("replication"): - opType = OpTypeReplication - default: - opType = OpTypeVolumeMove - } - - // Determine destination node and estimated size from unified targets - destNode := "" - estimatedSize := uint64(1024 * 1024 * 1024) // Default 1GB estimate - - // Use unified targets array - the only source of truth - if len(task.TypedParams.Targets) > 0 { - destNode = task.TypedParams.Targets[0].Node - if task.TypedParams.Targets[0].EstimatedSize > 0 { - estimatedSize = task.TypedParams.Targets[0].EstimatedSize - } - } - - // Determine source node from unified sources - sourceNode := "" - if len(task.TypedParams.Sources) > 0 { - sourceNode = task.TypedParams.Sources[0].Node - } - - operation := &PendingOperation{ - VolumeID: task.VolumeID, - OperationType: opType, - SourceNode: sourceNode, - DestNode: destNode, - TaskID: task.ID, - StartTime: time.Now(), - EstimatedSize: estimatedSize, - Collection: task.Collection, - Status: "assigned", - } - - pendingOps.AddOperation(operation) -} - -// removePendingOperation removes a task from the pending operations tracker -func (mq *MaintenanceQueue) removePendingOperation(taskID string) { - if mq.integration == nil { - return - } - - pendingOps := mq.integration.GetPendingOperations() - if pendingOps == nil { - return - } - - pendingOps.RemoveOperation(taskID) -} - -// updatePendingOperationStatus updates the status of a pending operation -func (mq *MaintenanceQueue) updatePendingOperationStatus(taskID string, status string) { - if mq.integration == nil { - return - } - - pendingOps := mq.integration.GetPendingOperations() - if pendingOps == nil { - return - } - - pendingOps.UpdateOperationStatus(taskID, status) -} diff --git a/weed/admin/maintenance/maintenance_queue_test.go b/weed/admin/maintenance/maintenance_queue_test.go deleted file mode 100644 index 2c38471a0..000000000 --- a/weed/admin/maintenance/maintenance_queue_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package maintenance - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" -) - -// Test suite for canScheduleTaskNow() function and related scheduling logic -// -// This test suite ensures that: -// 1. The fallback scheduling logic works correctly when no integration is present -// 2. Task concurrency limits are properly enforced per task type -// 3. Different task types don't interfere with each other's concurrency limits -// 4. Custom policies with higher concurrency limits work correctly -// 5. Edge cases (nil tasks, empty task types) are handled gracefully -// 6. Helper functions (GetRunningTaskCount, canExecuteTaskType, etc.) work correctly -// -// Background: The canScheduleTaskNow() function is critical for task assignment. -// It was previously failing due to an overly restrictive integration scheduler, -// so we implemented a temporary fix that bypasses the integration and uses -// fallback logic based on simple concurrency limits per task type. - -func TestCanScheduleTaskNow_FallbackLogic(t *testing.T) { - // Test the current implementation which uses fallback logic - mq := &MaintenanceQueue{ - tasks: make(map[string]*MaintenanceTask), - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, // No policy for default behavior - integration: nil, // No integration to force fallback - } - - task := &MaintenanceTask{ - ID: "test-task-1", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusPending, - } - - // Should return true with fallback logic (no running tasks, default max concurrent = 1) - result := mq.canScheduleTaskNow(task) - if !result { - t.Errorf("Expected canScheduleTaskNow to return true with fallback logic, got false") - } -} - -func TestCanScheduleTaskNow_FallbackWithRunningTasks(t *testing.T) { - // Test fallback logic when there are already running tasks - mq := &MaintenanceQueue{ - tasks: map[string]*MaintenanceTask{ - "running-task": { - ID: "running-task", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - }, - }, - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, - integration: nil, - } - - task := &MaintenanceTask{ - ID: "test-task-2", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusPending, - } - - // Should return false because max concurrent is 1 and we have 1 running task - result := mq.canScheduleTaskNow(task) - if result { - t.Errorf("Expected canScheduleTaskNow to return false when at capacity, got true") - } -} - -func TestCanScheduleTaskNow_DifferentTaskTypes(t *testing.T) { - // Test that different task types don't interfere with each other - mq := &MaintenanceQueue{ - tasks: map[string]*MaintenanceTask{ - "running-ec-task": { - ID: "running-ec-task", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - }, - }, - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, - integration: nil, - } - - // Test vacuum task when EC task is running - vacuumTask := &MaintenanceTask{ - ID: "vacuum-task", - Type: MaintenanceTaskType("vacuum"), - Status: TaskStatusPending, - } - - // Should return true because vacuum and erasure_coding are different task types - result := mq.canScheduleTaskNow(vacuumTask) - if !result { - t.Errorf("Expected canScheduleTaskNow to return true for different task type, got false") - } - - // Test another EC task when one is already running - ecTask := &MaintenanceTask{ - ID: "ec-task", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusPending, - } - - // Should return false because max concurrent for EC is 1 and we have 1 running - result = mq.canScheduleTaskNow(ecTask) - if result { - t.Errorf("Expected canScheduleTaskNow to return false for same task type at capacity, got true") - } -} - -func TestCanScheduleTaskNow_WithIntegration(t *testing.T) { - // Test with a real MaintenanceIntegration (will use fallback logic in current implementation) - policy := &MaintenancePolicy{ - TaskPolicies: make(map[string]*worker_pb.TaskPolicy), - GlobalMaxConcurrent: 10, - DefaultRepeatIntervalSeconds: 24 * 60 * 60, // 24 hours in seconds - DefaultCheckIntervalSeconds: 60 * 60, // 1 hour in seconds - } - mq := NewMaintenanceQueue(policy) - - // Create a basic integration (this would normally be more complex) - integration := NewMaintenanceIntegration(mq, policy) - mq.SetIntegration(integration) - - task := &MaintenanceTask{ - ID: "test-task-3", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusPending, - } - - // With our current implementation (fallback logic), this should return true - result := mq.canScheduleTaskNow(task) - if !result { - t.Errorf("Expected canScheduleTaskNow to return true with fallback logic, got false") - } -} - -func TestGetRunningTaskCount(t *testing.T) { - // Test the helper function used by fallback logic - mq := &MaintenanceQueue{ - tasks: map[string]*MaintenanceTask{ - "task1": { - ID: "task1", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - }, - "task2": { - ID: "task2", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusAssigned, - }, - "task3": { - ID: "task3", - Type: MaintenanceTaskType("vacuum"), - Status: TaskStatusInProgress, - }, - "task4": { - ID: "task4", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusCompleted, - }, - }, - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - } - - // Should count 2 running EC tasks (in_progress + assigned) - ecCount := mq.GetRunningTaskCount(MaintenanceTaskType("erasure_coding")) - if ecCount != 2 { - t.Errorf("Expected 2 running EC tasks, got %d", ecCount) - } - - // Should count 1 running vacuum task - vacuumCount := mq.GetRunningTaskCount(MaintenanceTaskType("vacuum")) - if vacuumCount != 1 { - t.Errorf("Expected 1 running vacuum task, got %d", vacuumCount) - } - - // Should count 0 running balance tasks - balanceCount := mq.GetRunningTaskCount(MaintenanceTaskType("balance")) - if balanceCount != 0 { - t.Errorf("Expected 0 running balance tasks, got %d", balanceCount) - } -} - -func TestCanExecuteTaskType(t *testing.T) { - // Test the fallback logic helper function - mq := &MaintenanceQueue{ - tasks: map[string]*MaintenanceTask{ - "running-task": { - ID: "running-task", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - }, - }, - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, // Will use default max concurrent = 1 - integration: nil, - } - - // Should return false for EC (1 running, max = 1) - result := mq.canExecuteTaskType(MaintenanceTaskType("erasure_coding")) - if result { - t.Errorf("Expected canExecuteTaskType to return false for EC at capacity, got true") - } - - // Should return true for vacuum (0 running, max = 1) - result = mq.canExecuteTaskType(MaintenanceTaskType("vacuum")) - if !result { - t.Errorf("Expected canExecuteTaskType to return true for vacuum, got false") - } -} - -func TestGetMaxConcurrentForTaskType_DefaultBehavior(t *testing.T) { - // Test the default behavior when no policy or integration is set - mq := &MaintenanceQueue{ - tasks: make(map[string]*MaintenanceTask), - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, - integration: nil, - } - - // Should return default value of 1 - maxConcurrent := mq.getMaxConcurrentForTaskType(MaintenanceTaskType("erasure_coding")) - if maxConcurrent != 1 { - t.Errorf("Expected default max concurrent to be 1, got %d", maxConcurrent) - } - - maxConcurrent = mq.getMaxConcurrentForTaskType(MaintenanceTaskType("vacuum")) - if maxConcurrent != 1 { - t.Errorf("Expected default max concurrent to be 1, got %d", maxConcurrent) - } -} - -// Test edge cases and error conditions -func TestCanScheduleTaskNow_NilTask(t *testing.T) { - mq := &MaintenanceQueue{ - tasks: make(map[string]*MaintenanceTask), - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, - integration: nil, - } - - // This should panic with a nil task, so we expect and catch the panic - defer func() { - if r := recover(); r == nil { - t.Errorf("Expected canScheduleTaskNow to panic with nil task, but it didn't") - } - }() - - // This should panic - mq.canScheduleTaskNow(nil) -} - -func TestCanScheduleTaskNow_EmptyTaskType(t *testing.T) { - mq := &MaintenanceQueue{ - tasks: make(map[string]*MaintenanceTask), - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: nil, - integration: nil, - } - - task := &MaintenanceTask{ - ID: "empty-type-task", - Type: MaintenanceTaskType(""), // Empty task type - Status: TaskStatusPending, - } - - // Should handle empty task type gracefully - result := mq.canScheduleTaskNow(task) - if !result { - t.Errorf("Expected canScheduleTaskNow to handle empty task type, got false") - } -} - -func TestCanScheduleTaskNow_WithPolicy(t *testing.T) { - // Test with a policy that allows higher concurrency - policy := &MaintenancePolicy{ - TaskPolicies: map[string]*worker_pb.TaskPolicy{ - string(MaintenanceTaskType("erasure_coding")): { - Enabled: true, - MaxConcurrent: 3, - RepeatIntervalSeconds: 60 * 60, // 1 hour - CheckIntervalSeconds: 60 * 60, // 1 hour - }, - string(MaintenanceTaskType("vacuum")): { - Enabled: true, - MaxConcurrent: 2, - RepeatIntervalSeconds: 60 * 60, // 1 hour - CheckIntervalSeconds: 60 * 60, // 1 hour - }, - }, - GlobalMaxConcurrent: 10, - DefaultRepeatIntervalSeconds: 24 * 60 * 60, // 24 hours in seconds - DefaultCheckIntervalSeconds: 60 * 60, // 1 hour in seconds - } - - mq := &MaintenanceQueue{ - tasks: map[string]*MaintenanceTask{ - "running-task-1": { - ID: "running-task-1", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - }, - "running-task-2": { - ID: "running-task-2", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusAssigned, - }, - }, - pendingTasks: []*MaintenanceTask{}, - workers: make(map[string]*MaintenanceWorker), - policy: policy, - integration: nil, - } - - task := &MaintenanceTask{ - ID: "test-task-policy", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusPending, - } - - // Should return true because we have 2 running EC tasks but max is 3 - result := mq.canScheduleTaskNow(task) - if !result { - t.Errorf("Expected canScheduleTaskNow to return true with policy allowing 3 concurrent, got false") - } - - // Add one more running task to reach the limit - mq.tasks["running-task-3"] = &MaintenanceTask{ - ID: "running-task-3", - Type: MaintenanceTaskType("erasure_coding"), - Status: TaskStatusInProgress, - } - - // Should return false because we now have 3 running EC tasks (at limit) - result = mq.canScheduleTaskNow(task) - if result { - t.Errorf("Expected canScheduleTaskNow to return false when at policy limit, got true") - } -} diff --git a/weed/admin/maintenance/maintenance_scanner.go b/weed/admin/maintenance/maintenance_scanner.go deleted file mode 100644 index 6f3b46be2..000000000 --- a/weed/admin/maintenance/maintenance_scanner.go +++ /dev/null @@ -1,231 +0,0 @@ -package maintenance - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -// NewMaintenanceScanner creates a new maintenance scanner -func NewMaintenanceScanner(adminClient AdminClient, policy *MaintenancePolicy, queue *MaintenanceQueue) *MaintenanceScanner { - scanner := &MaintenanceScanner{ - adminClient: adminClient, - policy: policy, - queue: queue, - lastScan: make(map[MaintenanceTaskType]time.Time), - } - - // Initialize integration - scanner.integration = NewMaintenanceIntegration(queue, policy) - - // Set up bidirectional relationship - queue.SetIntegration(scanner.integration) - - glog.V(1).Infof("Initialized maintenance scanner with task system") - - return scanner -} - -// ScanForMaintenanceTasks analyzes the cluster and generates maintenance tasks -func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult, error) { - // Get volume health metrics - volumeMetrics, err := ms.getVolumeHealthMetrics() - if err != nil { - return nil, fmt.Errorf("failed to get volume health metrics: %w", err) - } - - // Use task system for all task types - if ms.integration != nil { - // Convert metrics to task system format - taskMetrics := ms.convertToTaskMetrics(volumeMetrics) - - // Update topology information for complete cluster view (including empty servers) - // This must happen before task detection to ensure EC placement can consider all servers - if ms.lastTopologyInfo != nil { - if err := ms.integration.UpdateTopologyInfo(ms.lastTopologyInfo); err != nil { - glog.Errorf("Failed to update topology info for empty servers: %v", err) - // Don't fail the scan - continue with just volume-bearing servers - } else { - glog.V(1).Infof("Updated topology info for complete cluster view including empty servers") - } - } - - // Use task detection system with complete cluster information - results, err := ms.integration.ScanWithTaskDetectors(taskMetrics) - if err != nil { - glog.Errorf("Task scanning failed: %v", err) - return nil, err - } - - glog.V(1).Infof("Maintenance scan completed: found %d tasks", len(results)) - return results, nil - } - - // No integration available - glog.Warningf("No integration available, no tasks will be scheduled") - return []*TaskDetectionResult{}, nil -} - -// getVolumeHealthMetrics collects health information for all volumes -func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics, error) { - var metrics []*VolumeHealthMetrics - - glog.V(1).Infof("Collecting volume health metrics from master") - err := ms.adminClient.WithMasterClient(func(client master_pb.SeaweedClient) error { - - resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) - if err != nil { - return err - } - - if resp.TopologyInfo == nil { - glog.Warningf("No topology info received from master") - return nil - } - - volumeSizeLimitBytes := uint64(resp.VolumeSizeLimitMb) * 1024 * 1024 // Convert MB to bytes - - // Track all nodes discovered in topology - var allNodesInTopology []string - var nodesWithVolumes []string - var nodesWithoutVolumes []string - - for _, dc := range resp.TopologyInfo.DataCenterInfos { - glog.V(2).Infof("Processing datacenter: %s", dc.Id) - for _, rack := range dc.RackInfos { - glog.V(2).Infof("Processing rack: %s in datacenter: %s", rack.Id, dc.Id) - for _, node := range rack.DataNodeInfos { - allNodesInTopology = append(allNodesInTopology, node.Id) - glog.V(2).Infof("Found volume server in topology: %s (disks: %d)", node.Id, len(node.DiskInfos)) - - hasVolumes := false - // Process each disk on this node - for diskType, diskInfo := range node.DiskInfos { - if len(diskInfo.VolumeInfos) > 0 { - hasVolumes = true - glog.V(2).Infof("Volume server %s disk %s has %d volumes", node.Id, diskType, len(diskInfo.VolumeInfos)) - } - - // Process volumes on this specific disk - for _, volInfo := range diskInfo.VolumeInfos { - metric := &VolumeHealthMetrics{ - VolumeID: volInfo.Id, - Server: node.Id, - DiskType: diskType, // Track which disk this volume is on - DiskId: volInfo.DiskId, // Use disk ID from volume info - DataCenter: dc.Id, // Data center from current loop - Rack: rack.Id, // Rack from current loop - Collection: volInfo.Collection, - Size: volInfo.Size, - DeletedBytes: volInfo.DeletedByteCount, - LastModified: time.Unix(int64(volInfo.ModifiedAtSecond), 0), - IsReadOnly: volInfo.ReadOnly, - IsECVolume: false, // Will be determined from volume structure - ReplicaCount: 1, // Will be counted - ExpectedReplicas: int(volInfo.ReplicaPlacement), - } - - // Calculate derived metrics - if metric.Size > 0 { - metric.GarbageRatio = float64(metric.DeletedBytes) / float64(metric.Size) - // Calculate fullness ratio using actual volume size limit from master - metric.FullnessRatio = float64(metric.Size) / float64(volumeSizeLimitBytes) - } - metric.Age = time.Since(metric.LastModified) - - glog.V(3).Infof("Volume %d on %s:%s (ID %d): size=%d, limit=%d, fullness=%.2f", - metric.VolumeID, metric.Server, metric.DiskType, metric.DiskId, metric.Size, volumeSizeLimitBytes, metric.FullnessRatio) - - metrics = append(metrics, metric) - } - } - - if hasVolumes { - nodesWithVolumes = append(nodesWithVolumes, node.Id) - } else { - nodesWithoutVolumes = append(nodesWithoutVolumes, node.Id) - glog.V(1).Infof("Volume server %s found in topology but has no volumes", node.Id) - } - } - } - } - - glog.Infof("Topology discovery complete:") - glog.Infof(" - Total volume servers in topology: %d (%v)", len(allNodesInTopology), allNodesInTopology) - glog.Infof(" - Volume servers with volumes: %d (%v)", len(nodesWithVolumes), nodesWithVolumes) - glog.Infof(" - Volume servers without volumes: %d (%v)", len(nodesWithoutVolumes), nodesWithoutVolumes) - - // Store topology info for volume shard tracker - ms.lastTopologyInfo = resp.TopologyInfo - - return nil - }) - - if err != nil { - glog.Errorf("Failed to get volume health metrics: %v", err) - return nil, err - } - - glog.V(1).Infof("Successfully collected metrics for %d actual volumes with disk ID information", len(metrics)) - - // Count actual replicas and identify EC volumes - ms.enrichVolumeMetrics(metrics) - - return metrics, nil -} - -// enrichVolumeMetrics adds additional information like replica counts -func (ms *MaintenanceScanner) enrichVolumeMetrics(metrics []*VolumeHealthMetrics) { - // Group volumes by ID to count replicas - volumeGroups := make(map[uint32][]*VolumeHealthMetrics) - for _, metric := range metrics { - volumeGroups[metric.VolumeID] = append(volumeGroups[metric.VolumeID], metric) - } - - // Update replica counts for actual volumes - for volumeID, replicas := range volumeGroups { - replicaCount := len(replicas) - for _, replica := range replicas { - replica.ReplicaCount = replicaCount - } - glog.V(3).Infof("Volume %d has %d replicas", volumeID, replicaCount) - } - - // TODO: Identify EC volumes by checking volume structure - // This would require querying volume servers for EC shard information -} - -// convertToTaskMetrics converts existing volume metrics to task system format -func (ms *MaintenanceScanner) convertToTaskMetrics(metrics []*VolumeHealthMetrics) []*types.VolumeHealthMetrics { - var simplified []*types.VolumeHealthMetrics - - for _, metric := range metrics { - simplified = append(simplified, &types.VolumeHealthMetrics{ - VolumeID: metric.VolumeID, - Server: metric.Server, - DiskType: metric.DiskType, - DiskId: metric.DiskId, - DataCenter: metric.DataCenter, - Rack: metric.Rack, - Collection: metric.Collection, - Size: metric.Size, - DeletedBytes: metric.DeletedBytes, - GarbageRatio: metric.GarbageRatio, - LastModified: metric.LastModified, - Age: metric.Age, - ReplicaCount: metric.ReplicaCount, - ExpectedReplicas: metric.ExpectedReplicas, - IsReadOnly: metric.IsReadOnly, - HasRemoteCopy: metric.HasRemoteCopy, - IsECVolume: metric.IsECVolume, - FullnessRatio: metric.FullnessRatio, - }) - } - - glog.V(2).Infof("Converted %d volume metrics with disk ID information for task detection", len(simplified)) - return simplified -} diff --git a/weed/admin/maintenance/maintenance_types.go b/weed/admin/maintenance/maintenance_types.go deleted file mode 100644 index fe5d5fa55..000000000 --- a/weed/admin/maintenance/maintenance_types.go +++ /dev/null @@ -1,650 +0,0 @@ -package maintenance - -import ( - "html/template" - "sort" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -// AdminClient interface defines what the maintenance system needs from the admin server -type AdminClient interface { - WithMasterClient(fn func(client master_pb.SeaweedClient) error) error -} - -// MaintenanceTaskType represents different types of maintenance operations -type MaintenanceTaskType string - -// GetRegisteredMaintenanceTaskTypes returns all registered task types as MaintenanceTaskType values -// sorted alphabetically for consistent menu ordering -func GetRegisteredMaintenanceTaskTypes() []MaintenanceTaskType { - typesRegistry := tasks.GetGlobalTypesRegistry() - var taskTypes []MaintenanceTaskType - - for workerTaskType := range typesRegistry.GetAllDetectors() { - maintenanceTaskType := MaintenanceTaskType(string(workerTaskType)) - taskTypes = append(taskTypes, maintenanceTaskType) - } - - // Sort task types alphabetically to ensure consistent menu ordering - sort.Slice(taskTypes, func(i, j int) bool { - return string(taskTypes[i]) < string(taskTypes[j]) - }) - - return taskTypes -} - -// GetMaintenanceTaskType returns a specific task type if it's registered, or empty string if not found -func GetMaintenanceTaskType(taskTypeName string) MaintenanceTaskType { - typesRegistry := tasks.GetGlobalTypesRegistry() - - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == taskTypeName { - return MaintenanceTaskType(taskTypeName) - } - } - - return MaintenanceTaskType("") -} - -// IsMaintenanceTaskTypeRegistered checks if a task type is registered -func IsMaintenanceTaskTypeRegistered(taskType MaintenanceTaskType) bool { - typesRegistry := tasks.GetGlobalTypesRegistry() - - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - return true - } - } - - return false -} - -// MaintenanceTaskPriority represents task execution priority -type MaintenanceTaskPriority int - -const ( - PriorityLow MaintenanceTaskPriority = iota - PriorityNormal - PriorityHigh - PriorityCritical -) - -// MaintenanceTaskStatus represents the current status of a task -type MaintenanceTaskStatus string - -const ( - TaskStatusPending MaintenanceTaskStatus = "pending" - TaskStatusAssigned MaintenanceTaskStatus = "assigned" - TaskStatusInProgress MaintenanceTaskStatus = "in_progress" - TaskStatusCompleted MaintenanceTaskStatus = "completed" - TaskStatusFailed MaintenanceTaskStatus = "failed" - TaskStatusCancelled MaintenanceTaskStatus = "cancelled" -) - -// MaintenanceTask represents a single maintenance operation -type MaintenanceTask struct { - ID string `json:"id"` - Type MaintenanceTaskType `json:"type"` - Priority MaintenanceTaskPriority `json:"priority"` - Status MaintenanceTaskStatus `json:"status"` - VolumeID uint32 `json:"volume_id,omitempty"` - Server string `json:"server,omitempty"` - Collection string `json:"collection,omitempty"` - TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"` - Reason string `json:"reason"` - CreatedAt time.Time `json:"created_at"` - ScheduledAt time.Time `json:"scheduled_at"` - StartedAt *time.Time `json:"started_at,omitempty"` - CompletedAt *time.Time `json:"completed_at,omitempty"` - WorkerID string `json:"worker_id,omitempty"` - Error string `json:"error,omitempty"` - Progress float64 `json:"progress"` // 0-100 - RetryCount int `json:"retry_count"` - MaxRetries int `json:"max_retries"` - - // Enhanced fields for detailed task tracking - CreatedBy string `json:"created_by,omitempty"` // Who/what created this task - CreationContext string `json:"creation_context,omitempty"` // Additional context about creation - AssignmentHistory []*TaskAssignmentRecord `json:"assignment_history,omitempty"` // History of worker assignments - DetailedReason string `json:"detailed_reason,omitempty"` // More detailed explanation than Reason - Tags map[string]string `json:"tags,omitempty"` // Additional metadata tags -} - -// TaskAssignmentRecord tracks when a task was assigned to a worker -type TaskAssignmentRecord struct { - WorkerID string `json:"worker_id"` - WorkerAddress string `json:"worker_address"` - AssignedAt time.Time `json:"assigned_at"` - UnassignedAt *time.Time `json:"unassigned_at,omitempty"` - Reason string `json:"reason"` // Why was it assigned/unassigned -} - -// TaskExecutionLog represents a log entry from task execution -type TaskExecutionLog struct { - Timestamp time.Time `json:"timestamp"` - Level string `json:"level"` // "info", "warn", "error", "debug" - Message string `json:"message"` - Source string `json:"source"` // Which component logged this - TaskID string `json:"task_id"` - WorkerID string `json:"worker_id"` - // Optional structured fields carried from worker logs - Fields map[string]string `json:"fields,omitempty"` - // Optional progress/status carried from worker logs - Progress *float64 `json:"progress,omitempty"` - Status string `json:"status,omitempty"` -} - -// TaskDetailData represents comprehensive information about a task for the detail view -type TaskDetailData struct { - Task *MaintenanceTask `json:"task"` - AssignmentHistory []*TaskAssignmentRecord `json:"assignment_history"` - ExecutionLogs []*TaskExecutionLog `json:"execution_logs"` - RelatedTasks []*MaintenanceTask `json:"related_tasks,omitempty"` // Other tasks on same volume/server - WorkerInfo *MaintenanceWorker `json:"worker_info,omitempty"` // Current or last assigned worker - CreationMetrics *TaskCreationMetrics `json:"creation_metrics,omitempty"` // Metrics that led to task creation - LastUpdated time.Time `json:"last_updated"` -} - -// TaskCreationMetrics holds metrics that led to the task being created -type TaskCreationMetrics struct { - TriggerMetric string `json:"trigger_metric"` // What metric triggered this task - MetricValue float64 `json:"metric_value"` // Value of the trigger metric - Threshold float64 `json:"threshold"` // Threshold that was exceeded - VolumeMetrics *VolumeHealthMetrics `json:"volume_metrics,omitempty"` - AdditionalData map[string]interface{} `json:"additional_data,omitempty"` -} - -// MaintenanceConfig holds configuration for the maintenance system -// DEPRECATED: Use worker_pb.MaintenanceConfig instead -type MaintenanceConfig = worker_pb.MaintenanceConfig - -// MaintenancePolicy defines policies for maintenance operations -// DEPRECATED: Use worker_pb.MaintenancePolicy instead -type MaintenancePolicy = worker_pb.MaintenancePolicy - -// TaskPolicy represents configuration for a specific task type -// DEPRECATED: Use worker_pb.TaskPolicy instead -type TaskPolicy = worker_pb.TaskPolicy - -// TaskPersistence interface for task state persistence -type TaskPersistence interface { - SaveTaskState(task *MaintenanceTask) error - LoadTaskState(taskID string) (*MaintenanceTask, error) - LoadAllTaskStates() ([]*MaintenanceTask, error) - DeleteTaskState(taskID string) error - CleanupCompletedTasks() error -} - -// Default configuration values -func DefaultMaintenanceConfig() *MaintenanceConfig { - return DefaultMaintenanceConfigProto() -} - -// Policy helper functions (since we can't add methods to type aliases) - -// GetTaskPolicy returns the policy for a specific task type -func GetTaskPolicy(mp *MaintenancePolicy, taskType MaintenanceTaskType) *TaskPolicy { - if mp.TaskPolicies == nil { - return nil - } - return mp.TaskPolicies[string(taskType)] -} - -// SetTaskPolicy sets the policy for a specific task type -func SetTaskPolicy(mp *MaintenancePolicy, taskType MaintenanceTaskType, policy *TaskPolicy) { - if mp.TaskPolicies == nil { - mp.TaskPolicies = make(map[string]*TaskPolicy) - } - mp.TaskPolicies[string(taskType)] = policy -} - -// IsTaskEnabled returns whether a task type is enabled -func IsTaskEnabled(mp *MaintenancePolicy, taskType MaintenanceTaskType) bool { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return false - } - return policy.Enabled -} - -// GetMaxConcurrent returns the max concurrent limit for a task type -func GetMaxConcurrent(mp *MaintenancePolicy, taskType MaintenanceTaskType) int { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return 1 - } - return int(policy.MaxConcurrent) -} - -// GetRepeatInterval returns the repeat interval for a task type -func GetRepeatInterval(mp *MaintenancePolicy, taskType MaintenanceTaskType) int { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return int(mp.DefaultRepeatIntervalSeconds) - } - return int(policy.RepeatIntervalSeconds) -} - -// GetVacuumTaskConfig returns the vacuum task configuration -func GetVacuumTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.VacuumTaskConfig { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return nil - } - return policy.GetVacuumConfig() -} - -// GetErasureCodingTaskConfig returns the erasure coding task configuration -func GetErasureCodingTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.ErasureCodingTaskConfig { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return nil - } - return policy.GetErasureCodingConfig() -} - -// GetBalanceTaskConfig returns the balance task configuration -func GetBalanceTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.BalanceTaskConfig { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return nil - } - return policy.GetBalanceConfig() -} - -// GetReplicationTaskConfig returns the replication task configuration -func GetReplicationTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType) *worker_pb.ReplicationTaskConfig { - policy := GetTaskPolicy(mp, taskType) - if policy == nil { - return nil - } - return policy.GetReplicationConfig() -} - -// Note: GetTaskConfig was removed - use typed getters: GetVacuumTaskConfig, GetErasureCodingTaskConfig, GetBalanceTaskConfig, or GetReplicationTaskConfig - -// SetVacuumTaskConfig sets the vacuum task configuration -func SetVacuumTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.VacuumTaskConfig) { - policy := GetTaskPolicy(mp, taskType) - if policy != nil { - policy.TaskConfig = &worker_pb.TaskPolicy_VacuumConfig{ - VacuumConfig: config, - } - } -} - -// SetErasureCodingTaskConfig sets the erasure coding task configuration -func SetErasureCodingTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.ErasureCodingTaskConfig) { - policy := GetTaskPolicy(mp, taskType) - if policy != nil { - policy.TaskConfig = &worker_pb.TaskPolicy_ErasureCodingConfig{ - ErasureCodingConfig: config, - } - } -} - -// SetBalanceTaskConfig sets the balance task configuration -func SetBalanceTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.BalanceTaskConfig) { - policy := GetTaskPolicy(mp, taskType) - if policy != nil { - policy.TaskConfig = &worker_pb.TaskPolicy_BalanceConfig{ - BalanceConfig: config, - } - } -} - -// SetReplicationTaskConfig sets the replication task configuration -func SetReplicationTaskConfig(mp *MaintenancePolicy, taskType MaintenanceTaskType, config *worker_pb.ReplicationTaskConfig) { - policy := GetTaskPolicy(mp, taskType) - if policy != nil { - policy.TaskConfig = &worker_pb.TaskPolicy_ReplicationConfig{ - ReplicationConfig: config, - } - } -} - -// SetTaskConfig sets a configuration value for a task type (legacy method - use typed setters above) -// Note: SetTaskConfig was removed - use typed setters: SetVacuumTaskConfig, SetErasureCodingTaskConfig, SetBalanceTaskConfig, or SetReplicationTaskConfig - -// MaintenanceWorker represents a worker instance -type MaintenanceWorker struct { - ID string `json:"id"` - Address string `json:"address"` - LastHeartbeat time.Time `json:"last_heartbeat"` - Status string `json:"status"` // active, inactive, busy - CurrentTask *MaintenanceTask `json:"current_task,omitempty"` - Capabilities []MaintenanceTaskType `json:"capabilities"` - MaxConcurrent int `json:"max_concurrent"` - CurrentLoad int `json:"current_load"` -} - -// MaintenanceQueue manages the task queue and worker coordination -type MaintenanceQueue struct { - tasks map[string]*MaintenanceTask - workers map[string]*MaintenanceWorker - pendingTasks []*MaintenanceTask - mutex sync.RWMutex - policy *MaintenancePolicy - integration *MaintenanceIntegration - persistence TaskPersistence // Interface for task persistence -} - -// MaintenanceScanner analyzes the cluster and generates maintenance tasks -type MaintenanceScanner struct { - adminClient AdminClient - policy *MaintenancePolicy - queue *MaintenanceQueue - lastScan map[MaintenanceTaskType]time.Time - integration *MaintenanceIntegration - lastTopologyInfo *master_pb.TopologyInfo -} - -// TaskDetectionResult represents the result of scanning for maintenance needs -type TaskDetectionResult struct { - TaskType MaintenanceTaskType `json:"task_type"` - VolumeID uint32 `json:"volume_id,omitempty"` - Server string `json:"server,omitempty"` - Collection string `json:"collection,omitempty"` - Priority MaintenanceTaskPriority `json:"priority"` - Reason string `json:"reason"` - TypedParams *worker_pb.TaskParams `json:"typed_params,omitempty"` - ScheduleAt time.Time `json:"schedule_at"` -} - -// VolumeHealthMetrics represents the health metrics for a volume -type VolumeHealthMetrics struct { - VolumeID uint32 `json:"volume_id"` - Server string `json:"server"` - DiskType string `json:"disk_type"` // Disk type (e.g., "hdd", "ssd") or disk path (e.g., "/data1") - DiskId uint32 `json:"disk_id"` // ID of the disk in Store.Locations array - DataCenter string `json:"data_center"` // Data center of the server - Rack string `json:"rack"` // Rack of the server - Collection string `json:"collection"` - Size uint64 `json:"size"` - DeletedBytes uint64 `json:"deleted_bytes"` - GarbageRatio float64 `json:"garbage_ratio"` - LastModified time.Time `json:"last_modified"` - Age time.Duration `json:"age"` - ReplicaCount int `json:"replica_count"` - ExpectedReplicas int `json:"expected_replicas"` - IsReadOnly bool `json:"is_read_only"` - HasRemoteCopy bool `json:"has_remote_copy"` - IsECVolume bool `json:"is_ec_volume"` - FullnessRatio float64 `json:"fullness_ratio"` -} - -// MaintenanceStats provides statistics about maintenance operations -type MaintenanceStats struct { - TotalTasks int `json:"total_tasks"` - TasksByStatus map[MaintenanceTaskStatus]int `json:"tasks_by_status"` - TasksByType map[MaintenanceTaskType]int `json:"tasks_by_type"` - ActiveWorkers int `json:"active_workers"` - CompletedToday int `json:"completed_today"` - FailedToday int `json:"failed_today"` - AverageTaskTime time.Duration `json:"average_task_time"` - LastScanTime time.Time `json:"last_scan_time"` - NextScanTime time.Time `json:"next_scan_time"` -} - -// MaintenanceQueueData represents data for the queue visualization UI -type MaintenanceQueueData struct { - Tasks []*MaintenanceTask `json:"tasks"` - Workers []*MaintenanceWorker `json:"workers"` - Stats *QueueStats `json:"stats"` - LastUpdated time.Time `json:"last_updated"` -} - -// QueueStats provides statistics for the queue UI -type QueueStats struct { - PendingTasks int `json:"pending_tasks"` - RunningTasks int `json:"running_tasks"` - CompletedToday int `json:"completed_today"` - FailedToday int `json:"failed_today"` - TotalTasks int `json:"total_tasks"` -} - -// MaintenanceConfigData represents configuration data for the UI -type MaintenanceConfigData struct { - Config *MaintenanceConfig `json:"config"` - IsEnabled bool `json:"is_enabled"` - LastScanTime time.Time `json:"last_scan_time"` - NextScanTime time.Time `json:"next_scan_time"` - SystemStats *MaintenanceStats `json:"system_stats"` - MenuItems []*MaintenanceMenuItem `json:"menu_items"` -} - -// MaintenanceMenuItem represents a menu item for task configuration -type MaintenanceMenuItem struct { - TaskType MaintenanceTaskType `json:"task_type"` - DisplayName string `json:"display_name"` - Description string `json:"description"` - Icon string `json:"icon"` - IsEnabled bool `json:"is_enabled"` - Path string `json:"path"` -} - -// WorkerDetailsData represents detailed worker information -type WorkerDetailsData struct { - Worker *MaintenanceWorker `json:"worker"` - CurrentTasks []*MaintenanceTask `json:"current_tasks"` - RecentTasks []*MaintenanceTask `json:"recent_tasks"` - Performance *WorkerPerformance `json:"performance"` - LastUpdated time.Time `json:"last_updated"` -} - -// WorkerPerformance tracks worker performance metrics -type WorkerPerformance struct { - TasksCompleted int `json:"tasks_completed"` - TasksFailed int `json:"tasks_failed"` - AverageTaskTime time.Duration `json:"average_task_time"` - Uptime time.Duration `json:"uptime"` - SuccessRate float64 `json:"success_rate"` -} - -// TaskConfigData represents data for individual task configuration page -type TaskConfigData struct { - TaskType MaintenanceTaskType `json:"task_type"` - TaskName string `json:"task_name"` - TaskIcon string `json:"task_icon"` - Description string `json:"description"` - ConfigFormHTML template.HTML `json:"config_form_html"` -} - -// ClusterReplicationTask represents a cluster replication task parameters -type ClusterReplicationTask struct { - SourcePath string `json:"source_path"` - TargetCluster string `json:"target_cluster"` - TargetPath string `json:"target_path"` - ReplicationMode string `json:"replication_mode"` // "sync", "async", "backup" - Priority int `json:"priority"` - Checksum string `json:"checksum,omitempty"` - FileSize int64 `json:"file_size"` - CreatedAt time.Time `json:"created_at"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// BuildMaintenancePolicyFromTasks creates a maintenance policy with configurations -// from all registered tasks using their UI providers -func BuildMaintenancePolicyFromTasks() *MaintenancePolicy { - policy := &MaintenancePolicy{ - TaskPolicies: make(map[string]*TaskPolicy), - GlobalMaxConcurrent: 4, - DefaultRepeatIntervalSeconds: 6 * 3600, // 6 hours in seconds - DefaultCheckIntervalSeconds: 12 * 3600, // 12 hours in seconds - } - - // Get all registered task types from the UI registry - uiRegistry := tasks.GetGlobalUIRegistry() - typesRegistry := tasks.GetGlobalTypesRegistry() - - for taskType, provider := range uiRegistry.GetAllProviders() { - // Convert task type to maintenance task type - maintenanceTaskType := MaintenanceTaskType(string(taskType)) - - // Get the default configuration from the UI provider - defaultConfig := provider.GetCurrentConfig() - - // Create task policy from UI configuration - taskPolicy := &TaskPolicy{ - Enabled: true, // Default enabled - MaxConcurrent: 2, // Default concurrency - RepeatIntervalSeconds: policy.DefaultRepeatIntervalSeconds, - CheckIntervalSeconds: policy.DefaultCheckIntervalSeconds, - } - - // Extract configuration using TaskConfig interface - no more map conversions! - if taskConfig, ok := defaultConfig.(interface{ ToTaskPolicy() *worker_pb.TaskPolicy }); ok { - // Use protobuf directly for clean, type-safe config extraction - pbTaskPolicy := taskConfig.ToTaskPolicy() - taskPolicy.Enabled = pbTaskPolicy.Enabled - taskPolicy.MaxConcurrent = pbTaskPolicy.MaxConcurrent - if pbTaskPolicy.RepeatIntervalSeconds > 0 { - taskPolicy.RepeatIntervalSeconds = pbTaskPolicy.RepeatIntervalSeconds - } - if pbTaskPolicy.CheckIntervalSeconds > 0 { - taskPolicy.CheckIntervalSeconds = pbTaskPolicy.CheckIntervalSeconds - } - } - - // Also get defaults from scheduler if available (using types.TaskScheduler explicitly) - var scheduler types.TaskScheduler = typesRegistry.GetScheduler(taskType) - if scheduler != nil { - if taskPolicy.MaxConcurrent <= 0 { - taskPolicy.MaxConcurrent = int32(scheduler.GetMaxConcurrent()) - } - // Convert default repeat interval to seconds - if repeatInterval := scheduler.GetDefaultRepeatInterval(); repeatInterval > 0 { - taskPolicy.RepeatIntervalSeconds = int32(repeatInterval.Seconds()) - } - } - - // Also get defaults from detector if available (using types.TaskDetector explicitly) - var detector types.TaskDetector = typesRegistry.GetDetector(taskType) - if detector != nil { - // Convert scan interval to check interval (seconds) - if scanInterval := detector.ScanInterval(); scanInterval > 0 { - taskPolicy.CheckIntervalSeconds = int32(scanInterval.Seconds()) - } - } - - policy.TaskPolicies[string(maintenanceTaskType)] = taskPolicy - glog.V(3).Infof("Built policy for task type %s: enabled=%v, max_concurrent=%d", - maintenanceTaskType, taskPolicy.Enabled, taskPolicy.MaxConcurrent) - } - - glog.V(2).Infof("Built maintenance policy with %d task configurations", len(policy.TaskPolicies)) - return policy -} - -// SetPolicyFromTasks sets the maintenance policy from registered tasks -func SetPolicyFromTasks(policy *MaintenancePolicy) { - if policy == nil { - return - } - - // Build new policy from tasks - newPolicy := BuildMaintenancePolicyFromTasks() - - // Copy task policies - policy.TaskPolicies = newPolicy.TaskPolicies - - glog.V(1).Infof("Updated maintenance policy with %d task configurations from registered tasks", len(policy.TaskPolicies)) -} - -// GetTaskIcon returns the icon CSS class for a task type from its UI provider -func GetTaskIcon(taskType MaintenanceTaskType) string { - typesRegistry := tasks.GetGlobalTypesRegistry() - uiRegistry := tasks.GetGlobalUIRegistry() - - // Convert MaintenanceTaskType to TaskType - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - // Get the UI provider for this task type - provider := uiRegistry.GetProvider(workerTaskType) - if provider != nil { - return provider.GetIcon() - } - break - } - } - - // Default icon if no UI provider found - return "fas fa-cog text-muted" -} - -// GetTaskDisplayName returns the display name for a task type from its UI provider -func GetTaskDisplayName(taskType MaintenanceTaskType) string { - typesRegistry := tasks.GetGlobalTypesRegistry() - uiRegistry := tasks.GetGlobalUIRegistry() - - // Convert MaintenanceTaskType to TaskType - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - // Get the UI provider for this task type - provider := uiRegistry.GetProvider(workerTaskType) - if provider != nil { - return provider.GetDisplayName() - } - break - } - } - - // Fallback to the task type string - return string(taskType) -} - -// GetTaskDescription returns the description for a task type from its UI provider -func GetTaskDescription(taskType MaintenanceTaskType) string { - typesRegistry := tasks.GetGlobalTypesRegistry() - uiRegistry := tasks.GetGlobalUIRegistry() - - // Convert MaintenanceTaskType to TaskType - for workerTaskType := range typesRegistry.GetAllDetectors() { - if string(workerTaskType) == string(taskType) { - // Get the UI provider for this task type - provider := uiRegistry.GetProvider(workerTaskType) - if provider != nil { - return provider.GetDescription() - } - break - } - } - - // Fallback to a generic description - return "Configure detailed settings for " + string(taskType) + " tasks." -} - -// BuildMaintenanceMenuItems creates menu items for all registered task types -func BuildMaintenanceMenuItems() []*MaintenanceMenuItem { - var menuItems []*MaintenanceMenuItem - - // Get all registered task types - registeredTypes := GetRegisteredMaintenanceTaskTypes() - - for _, taskType := range registeredTypes { - menuItem := &MaintenanceMenuItem{ - TaskType: taskType, - DisplayName: GetTaskDisplayName(taskType), - Description: GetTaskDescription(taskType), - Icon: GetTaskIcon(taskType), - IsEnabled: IsMaintenanceTaskTypeRegistered(taskType), - Path: "/maintenance/config/" + string(taskType), - } - - menuItems = append(menuItems, menuItem) - } - - return menuItems -} - -// Helper functions to extract configuration fields - -// Note: Removed getVacuumConfigField, getErasureCodingConfigField, getBalanceConfigField, getReplicationConfigField -// These were orphaned after removing GetTaskConfig - use typed getters instead diff --git a/weed/admin/maintenance/maintenance_worker.go b/weed/admin/maintenance/maintenance_worker.go deleted file mode 100644 index e4a6b4cf6..000000000 --- a/weed/admin/maintenance/maintenance_worker.go +++ /dev/null @@ -1,421 +0,0 @@ -package maintenance - -import ( - "context" - "fmt" - "os" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/worker" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/types" - - // Import task packages to trigger their auto-registration - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" -) - -// MaintenanceWorkerService manages maintenance task execution -// TaskExecutor defines the function signature for task execution -type TaskExecutor func(*MaintenanceWorkerService, *MaintenanceTask) error - -// TaskExecutorFactory creates a task executor for a given worker service -type TaskExecutorFactory func() TaskExecutor - -// Global registry for task executor factories -var taskExecutorFactories = make(map[MaintenanceTaskType]TaskExecutorFactory) -var executorRegistryMutex sync.RWMutex -var executorRegistryInitOnce sync.Once - -// initializeExecutorFactories dynamically registers executor factories for all auto-registered task types -func initializeExecutorFactories() { - executorRegistryInitOnce.Do(func() { - // Get all registered task types from the global registry - typesRegistry := tasks.GetGlobalTypesRegistry() - - var taskTypes []MaintenanceTaskType - for workerTaskType := range typesRegistry.GetAllDetectors() { - // Convert types.TaskType to MaintenanceTaskType by string conversion - maintenanceTaskType := MaintenanceTaskType(string(workerTaskType)) - taskTypes = append(taskTypes, maintenanceTaskType) - } - - // Register generic executor for all task types - for _, taskType := range taskTypes { - RegisterTaskExecutorFactory(taskType, createGenericTaskExecutor) - } - - glog.V(1).Infof("Dynamically registered generic task executor for %d task types: %v", len(taskTypes), taskTypes) - }) -} - -// RegisterTaskExecutorFactory registers a factory function for creating task executors -func RegisterTaskExecutorFactory(taskType MaintenanceTaskType, factory TaskExecutorFactory) { - executorRegistryMutex.Lock() - defer executorRegistryMutex.Unlock() - taskExecutorFactories[taskType] = factory - glog.V(2).Infof("Registered executor factory for task type: %s", taskType) -} - -// GetTaskExecutorFactory returns the factory for a task type -func GetTaskExecutorFactory(taskType MaintenanceTaskType) (TaskExecutorFactory, bool) { - // Ensure executor factories are initialized - initializeExecutorFactories() - - executorRegistryMutex.RLock() - defer executorRegistryMutex.RUnlock() - factory, exists := taskExecutorFactories[taskType] - return factory, exists -} - -// GetSupportedExecutorTaskTypes returns all task types with registered executor factories -func GetSupportedExecutorTaskTypes() []MaintenanceTaskType { - // Ensure executor factories are initialized - initializeExecutorFactories() - - executorRegistryMutex.RLock() - defer executorRegistryMutex.RUnlock() - - taskTypes := make([]MaintenanceTaskType, 0, len(taskExecutorFactories)) - for taskType := range taskExecutorFactories { - taskTypes = append(taskTypes, taskType) - } - return taskTypes -} - -// createGenericTaskExecutor creates a generic task executor that uses the task registry -func createGenericTaskExecutor() TaskExecutor { - return func(mws *MaintenanceWorkerService, task *MaintenanceTask) error { - return mws.executeGenericTask(task) - } -} - -// init does minimal initialization - actual registration happens lazily -func init() { - // Executor factory registration will happen lazily when first accessed - glog.V(1).Infof("Maintenance worker initialized - executor factories will be registered on first access") -} - -type MaintenanceWorkerService struct { - workerID string - address string - adminServer string - capabilities []MaintenanceTaskType - maxConcurrent int - currentTasks map[string]*MaintenanceTask - queue *MaintenanceQueue - adminClient AdminClient - running bool - stopChan chan struct{} - - // Task execution registry - taskExecutors map[MaintenanceTaskType]TaskExecutor - - // Task registry for creating task instances - taskRegistry *tasks.TaskRegistry -} - -// NewMaintenanceWorkerService creates a new maintenance worker service -func NewMaintenanceWorkerService(workerID, address, adminServer string) *MaintenanceWorkerService { - // Get all registered maintenance task types dynamically - capabilities := GetRegisteredMaintenanceTaskTypes() - - worker := &MaintenanceWorkerService{ - workerID: workerID, - address: address, - adminServer: adminServer, - capabilities: capabilities, - maxConcurrent: 2, // Default concurrent task limit - currentTasks: make(map[string]*MaintenanceTask), - stopChan: make(chan struct{}), - taskExecutors: make(map[MaintenanceTaskType]TaskExecutor), - taskRegistry: tasks.GetGlobalTaskRegistry(), // Use global registry with auto-registered tasks - } - - // Initialize task executor registry - worker.initializeTaskExecutors() - - glog.V(1).Infof("Created maintenance worker with %d registered task types", len(worker.taskRegistry.GetAll())) - - return worker -} - -// executeGenericTask executes a task using the task registry instead of hardcoded methods -func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) error { - glog.V(2).Infof("Executing generic task %s: %s for volume %d", task.ID, task.Type, task.VolumeID) - - // Validate that task has proper typed parameters - if task.TypedParams == nil { - return fmt.Errorf("task %s has no typed parameters - task was not properly planned (insufficient destinations)", task.ID) - } - - // Convert MaintenanceTask to types.TaskType - taskType := types.TaskType(string(task.Type)) - - // Create task instance using the registry - taskInstance, err := mws.taskRegistry.Get(taskType).Create(task.TypedParams) - if err != nil { - return fmt.Errorf("failed to create task instance: %w", err) - } - - // Update progress to show task has started - mws.updateTaskProgress(task.ID, 5) - - // Execute the task - err = taskInstance.Execute(context.Background(), task.TypedParams) - if err != nil { - return fmt.Errorf("task execution failed: %w", err) - } - - // Update progress to show completion - mws.updateTaskProgress(task.ID, 100) - - glog.V(2).Infof("Generic task %s completed successfully", task.ID) - return nil -} - -// initializeTaskExecutors sets up the task execution registry dynamically -func (mws *MaintenanceWorkerService) initializeTaskExecutors() { - mws.taskExecutors = make(map[MaintenanceTaskType]TaskExecutor) - - // Get all registered executor factories and create executors - executorRegistryMutex.RLock() - defer executorRegistryMutex.RUnlock() - - for taskType, factory := range taskExecutorFactories { - executor := factory() - mws.taskExecutors[taskType] = executor - glog.V(3).Infof("Initialized executor for task type: %s", taskType) - } - - glog.V(2).Infof("Initialized %d task executors", len(mws.taskExecutors)) -} - -// RegisterTaskExecutor allows dynamic registration of new task executors -func (mws *MaintenanceWorkerService) RegisterTaskExecutor(taskType MaintenanceTaskType, executor TaskExecutor) { - if mws.taskExecutors == nil { - mws.taskExecutors = make(map[MaintenanceTaskType]TaskExecutor) - } - mws.taskExecutors[taskType] = executor - glog.V(1).Infof("Registered executor for task type: %s", taskType) -} - -// GetSupportedTaskTypes returns all task types that this worker can execute -func (mws *MaintenanceWorkerService) GetSupportedTaskTypes() []MaintenanceTaskType { - return GetSupportedExecutorTaskTypes() -} - -// Start begins the worker service -func (mws *MaintenanceWorkerService) Start() error { - mws.running = true - - // Register with admin server - worker := &MaintenanceWorker{ - ID: mws.workerID, - Address: mws.address, - Capabilities: mws.capabilities, - MaxConcurrent: mws.maxConcurrent, - } - - if mws.queue != nil { - mws.queue.RegisterWorker(worker) - } - - // Start worker loop - go mws.workerLoop() - - glog.Infof("Maintenance worker %s started at %s", mws.workerID, mws.address) - return nil -} - -// Stop terminates the worker service -func (mws *MaintenanceWorkerService) Stop() { - mws.running = false - close(mws.stopChan) - - // Wait for current tasks to complete or timeout - timeout := time.NewTimer(30 * time.Second) - defer timeout.Stop() - - for len(mws.currentTasks) > 0 { - select { - case <-timeout.C: - glog.Warningf("Worker %s stopping with %d tasks still running", mws.workerID, len(mws.currentTasks)) - return - case <-time.After(time.Second): - // Check again - } - } - - glog.Infof("Maintenance worker %s stopped", mws.workerID) -} - -// workerLoop is the main worker event loop -func (mws *MaintenanceWorkerService) workerLoop() { - heartbeatTicker := time.NewTicker(30 * time.Second) - defer heartbeatTicker.Stop() - - taskRequestTicker := time.NewTicker(5 * time.Second) - defer taskRequestTicker.Stop() - - for mws.running { - select { - case <-mws.stopChan: - return - case <-heartbeatTicker.C: - mws.sendHeartbeat() - case <-taskRequestTicker.C: - mws.requestTasks() - } - } -} - -// sendHeartbeat sends heartbeat to admin server -func (mws *MaintenanceWorkerService) sendHeartbeat() { - if mws.queue != nil { - mws.queue.UpdateWorkerHeartbeat(mws.workerID) - } -} - -// requestTasks requests new tasks from the admin server -func (mws *MaintenanceWorkerService) requestTasks() { - if len(mws.currentTasks) >= mws.maxConcurrent { - return // Already at capacity - } - - if mws.queue != nil { - task := mws.queue.GetNextTask(mws.workerID, mws.capabilities) - if task != nil { - mws.executeTask(task) - } - } -} - -// executeTask executes a maintenance task -func (mws *MaintenanceWorkerService) executeTask(task *MaintenanceTask) { - mws.currentTasks[task.ID] = task - - go func() { - defer func() { - delete(mws.currentTasks, task.ID) - }() - - glog.Infof("Worker %s executing task %s: %s", mws.workerID, task.ID, task.Type) - - // Execute task using dynamic executor registry - var err error - if executor, exists := mws.taskExecutors[task.Type]; exists { - err = executor(mws, task) - } else { - err = fmt.Errorf("unsupported task type: %s", task.Type) - glog.Errorf("No executor registered for task type: %s", task.Type) - } - - // Report task completion - if mws.queue != nil { - errorMsg := "" - if err != nil { - errorMsg = err.Error() - } - mws.queue.CompleteTask(task.ID, errorMsg) - } - - if err != nil { - glog.Errorf("Worker %s failed to execute task %s: %v", mws.workerID, task.ID, err) - } else { - glog.Infof("Worker %s completed task %s successfully", mws.workerID, task.ID) - } - }() -} - -// updateTaskProgress updates the progress of a task -func (mws *MaintenanceWorkerService) updateTaskProgress(taskID string, progress float64) { - if mws.queue != nil { - mws.queue.UpdateTaskProgress(taskID, progress) - } -} - -// GetStatus returns the current status of the worker -func (mws *MaintenanceWorkerService) GetStatus() map[string]interface{} { - return map[string]interface{}{ - "worker_id": mws.workerID, - "address": mws.address, - "running": mws.running, - "capabilities": mws.capabilities, - "max_concurrent": mws.maxConcurrent, - "current_tasks": len(mws.currentTasks), - "task_details": mws.currentTasks, - } -} - -// SetQueue sets the maintenance queue for the worker -func (mws *MaintenanceWorkerService) SetQueue(queue *MaintenanceQueue) { - mws.queue = queue -} - -// SetAdminClient sets the admin client for the worker -func (mws *MaintenanceWorkerService) SetAdminClient(client AdminClient) { - mws.adminClient = client -} - -// SetCapabilities sets the worker capabilities -func (mws *MaintenanceWorkerService) SetCapabilities(capabilities []MaintenanceTaskType) { - mws.capabilities = capabilities -} - -// SetMaxConcurrent sets the maximum concurrent tasks -func (mws *MaintenanceWorkerService) SetMaxConcurrent(max int) { - mws.maxConcurrent = max -} - -// SetHeartbeatInterval sets the heartbeat interval (placeholder for future use) -func (mws *MaintenanceWorkerService) SetHeartbeatInterval(interval time.Duration) { - // Future implementation for configurable heartbeat -} - -// SetTaskRequestInterval sets the task request interval (placeholder for future use) -func (mws *MaintenanceWorkerService) SetTaskRequestInterval(interval time.Duration) { - // Future implementation for configurable task requests -} - -// MaintenanceWorkerCommand represents a standalone maintenance worker command -type MaintenanceWorkerCommand struct { - workerService *MaintenanceWorkerService -} - -// NewMaintenanceWorkerCommand creates a new worker command -func NewMaintenanceWorkerCommand(workerID, address, adminServer string) *MaintenanceWorkerCommand { - return &MaintenanceWorkerCommand{ - workerService: NewMaintenanceWorkerService(workerID, address, adminServer), - } -} - -// Run starts the maintenance worker as a standalone service -func (mwc *MaintenanceWorkerCommand) Run() error { - // Generate or load persistent worker ID if not provided - if mwc.workerService.workerID == "" { - // Get current working directory for worker ID persistence - wd, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get working directory: %w", err) - } - - workerID, err := worker.GenerateOrLoadWorkerID(wd) - if err != nil { - return fmt.Errorf("failed to generate or load worker ID: %w", err) - } - mwc.workerService.workerID = workerID - } - - // Start the worker service - err := mwc.workerService.Start() - if err != nil { - return fmt.Errorf("failed to start maintenance worker: %w", err) - } - - // Wait for interrupt signal - select {} -} diff --git a/weed/admin/maintenance/pending_operations.go b/weed/admin/maintenance/pending_operations.go deleted file mode 100644 index 16130b4c9..000000000 --- a/weed/admin/maintenance/pending_operations.go +++ /dev/null @@ -1,311 +0,0 @@ -package maintenance - -import ( - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -// PendingOperationType represents the type of pending operation -type PendingOperationType string - -const ( - OpTypeVolumeMove PendingOperationType = "volume_move" - OpTypeVolumeBalance PendingOperationType = "volume_balance" - OpTypeErasureCoding PendingOperationType = "erasure_coding" - OpTypeVacuum PendingOperationType = "vacuum" - OpTypeReplication PendingOperationType = "replication" -) - -// PendingOperation represents a pending volume/shard operation -type PendingOperation struct { - VolumeID uint32 `json:"volume_id"` - OperationType PendingOperationType `json:"operation_type"` - SourceNode string `json:"source_node"` - DestNode string `json:"dest_node,omitempty"` // Empty for non-movement operations - TaskID string `json:"task_id"` - StartTime time.Time `json:"start_time"` - EstimatedSize uint64 `json:"estimated_size"` // Bytes - Collection string `json:"collection"` - Status string `json:"status"` // "assigned", "in_progress", "completing" -} - -// PendingOperations tracks all pending volume/shard operations -type PendingOperations struct { - // Operations by volume ID for conflict detection - byVolumeID map[uint32]*PendingOperation - - // Operations by task ID for updates - byTaskID map[string]*PendingOperation - - // Operations by node for capacity calculations - bySourceNode map[string][]*PendingOperation - byDestNode map[string][]*PendingOperation - - mutex sync.RWMutex -} - -// NewPendingOperations creates a new pending operations tracker -func NewPendingOperations() *PendingOperations { - return &PendingOperations{ - byVolumeID: make(map[uint32]*PendingOperation), - byTaskID: make(map[string]*PendingOperation), - bySourceNode: make(map[string][]*PendingOperation), - byDestNode: make(map[string][]*PendingOperation), - } -} - -// AddOperation adds a pending operation -func (po *PendingOperations) AddOperation(op *PendingOperation) { - po.mutex.Lock() - defer po.mutex.Unlock() - - // Check for existing operation on this volume - if existing, exists := po.byVolumeID[op.VolumeID]; exists { - glog.V(1).Infof("Replacing existing pending operation on volume %d: %s -> %s", - op.VolumeID, existing.TaskID, op.TaskID) - po.removeOperationUnlocked(existing) - } - - // Add new operation - po.byVolumeID[op.VolumeID] = op - po.byTaskID[op.TaskID] = op - - // Add to node indexes - po.bySourceNode[op.SourceNode] = append(po.bySourceNode[op.SourceNode], op) - if op.DestNode != "" { - po.byDestNode[op.DestNode] = append(po.byDestNode[op.DestNode], op) - } - - glog.V(2).Infof("Added pending operation: volume %d, type %s, task %s, %s -> %s", - op.VolumeID, op.OperationType, op.TaskID, op.SourceNode, op.DestNode) -} - -// RemoveOperation removes a completed operation -func (po *PendingOperations) RemoveOperation(taskID string) { - po.mutex.Lock() - defer po.mutex.Unlock() - - if op, exists := po.byTaskID[taskID]; exists { - po.removeOperationUnlocked(op) - glog.V(2).Infof("Removed completed operation: volume %d, task %s", op.VolumeID, taskID) - } -} - -// removeOperationUnlocked removes an operation (must hold lock) -func (po *PendingOperations) removeOperationUnlocked(op *PendingOperation) { - delete(po.byVolumeID, op.VolumeID) - delete(po.byTaskID, op.TaskID) - - // Remove from source node list - if ops, exists := po.bySourceNode[op.SourceNode]; exists { - for i, other := range ops { - if other.TaskID == op.TaskID { - po.bySourceNode[op.SourceNode] = append(ops[:i], ops[i+1:]...) - break - } - } - } - - // Remove from dest node list - if op.DestNode != "" { - if ops, exists := po.byDestNode[op.DestNode]; exists { - for i, other := range ops { - if other.TaskID == op.TaskID { - po.byDestNode[op.DestNode] = append(ops[:i], ops[i+1:]...) - break - } - } - } - } -} - -// HasPendingOperationOnVolume checks if a volume has a pending operation -func (po *PendingOperations) HasPendingOperationOnVolume(volumeID uint32) bool { - po.mutex.RLock() - defer po.mutex.RUnlock() - - _, exists := po.byVolumeID[volumeID] - return exists -} - -// GetPendingOperationOnVolume returns the pending operation on a volume -func (po *PendingOperations) GetPendingOperationOnVolume(volumeID uint32) *PendingOperation { - po.mutex.RLock() - defer po.mutex.RUnlock() - - return po.byVolumeID[volumeID] -} - -// WouldConflictWithPending checks if a new operation would conflict with pending ones -func (po *PendingOperations) WouldConflictWithPending(volumeID uint32, opType PendingOperationType) bool { - po.mutex.RLock() - defer po.mutex.RUnlock() - - if existing, exists := po.byVolumeID[volumeID]; exists { - // Volume already has a pending operation - glog.V(3).Infof("Volume %d conflict: already has %s operation (task %s)", - volumeID, existing.OperationType, existing.TaskID) - return true - } - - return false -} - -// GetPendingCapacityImpactForNode calculates pending capacity changes for a node -func (po *PendingOperations) GetPendingCapacityImpactForNode(nodeID string) (incoming uint64, outgoing uint64) { - po.mutex.RLock() - defer po.mutex.RUnlock() - - // Calculate outgoing capacity (volumes leaving this node) - if ops, exists := po.bySourceNode[nodeID]; exists { - for _, op := range ops { - // Only count movement operations - if op.DestNode != "" { - outgoing += op.EstimatedSize - } - } - } - - // Calculate incoming capacity (volumes coming to this node) - if ops, exists := po.byDestNode[nodeID]; exists { - for _, op := range ops { - incoming += op.EstimatedSize - } - } - - return incoming, outgoing -} - -// FilterVolumeMetricsExcludingPending filters out volumes with pending operations -func (po *PendingOperations) FilterVolumeMetricsExcludingPending(metrics []*types.VolumeHealthMetrics) []*types.VolumeHealthMetrics { - po.mutex.RLock() - defer po.mutex.RUnlock() - - var filtered []*types.VolumeHealthMetrics - excludedCount := 0 - - for _, metric := range metrics { - if _, hasPending := po.byVolumeID[metric.VolumeID]; !hasPending { - filtered = append(filtered, metric) - } else { - excludedCount++ - glog.V(3).Infof("Excluding volume %d from scan due to pending operation", metric.VolumeID) - } - } - - if excludedCount > 0 { - glog.V(1).Infof("Filtered out %d volumes with pending operations from %d total volumes", - excludedCount, len(metrics)) - } - - return filtered -} - -// GetNodeCapacityProjection calculates projected capacity for a node -func (po *PendingOperations) GetNodeCapacityProjection(nodeID string, currentUsed uint64, totalCapacity uint64) NodeCapacityProjection { - incoming, outgoing := po.GetPendingCapacityImpactForNode(nodeID) - - projectedUsed := currentUsed + incoming - outgoing - projectedFree := totalCapacity - projectedUsed - - return NodeCapacityProjection{ - NodeID: nodeID, - CurrentUsed: currentUsed, - TotalCapacity: totalCapacity, - PendingIncoming: incoming, - PendingOutgoing: outgoing, - ProjectedUsed: projectedUsed, - ProjectedFree: projectedFree, - } -} - -// GetAllPendingOperations returns all pending operations -func (po *PendingOperations) GetAllPendingOperations() []*PendingOperation { - po.mutex.RLock() - defer po.mutex.RUnlock() - - var operations []*PendingOperation - for _, op := range po.byVolumeID { - operations = append(operations, op) - } - - return operations -} - -// UpdateOperationStatus updates the status of a pending operation -func (po *PendingOperations) UpdateOperationStatus(taskID string, status string) { - po.mutex.Lock() - defer po.mutex.Unlock() - - if op, exists := po.byTaskID[taskID]; exists { - op.Status = status - glog.V(3).Infof("Updated operation status: task %s, volume %d -> %s", taskID, op.VolumeID, status) - } -} - -// CleanupStaleOperations removes operations that have been running too long -func (po *PendingOperations) CleanupStaleOperations(maxAge time.Duration) int { - po.mutex.Lock() - defer po.mutex.Unlock() - - cutoff := time.Now().Add(-maxAge) - var staleOps []*PendingOperation - - for _, op := range po.byVolumeID { - if op.StartTime.Before(cutoff) { - staleOps = append(staleOps, op) - } - } - - for _, op := range staleOps { - po.removeOperationUnlocked(op) - glog.Warningf("Removed stale pending operation: volume %d, task %s, age %v", - op.VolumeID, op.TaskID, time.Since(op.StartTime)) - } - - return len(staleOps) -} - -// NodeCapacityProjection represents projected capacity for a node -type NodeCapacityProjection struct { - NodeID string `json:"node_id"` - CurrentUsed uint64 `json:"current_used"` - TotalCapacity uint64 `json:"total_capacity"` - PendingIncoming uint64 `json:"pending_incoming"` - PendingOutgoing uint64 `json:"pending_outgoing"` - ProjectedUsed uint64 `json:"projected_used"` - ProjectedFree uint64 `json:"projected_free"` -} - -// GetStats returns statistics about pending operations -func (po *PendingOperations) GetStats() PendingOperationsStats { - po.mutex.RLock() - defer po.mutex.RUnlock() - - stats := PendingOperationsStats{ - TotalOperations: len(po.byVolumeID), - ByType: make(map[PendingOperationType]int), - ByStatus: make(map[string]int), - } - - var totalSize uint64 - for _, op := range po.byVolumeID { - stats.ByType[op.OperationType]++ - stats.ByStatus[op.Status]++ - totalSize += op.EstimatedSize - } - - stats.TotalEstimatedSize = totalSize - return stats -} - -// PendingOperationsStats provides statistics about pending operations -type PendingOperationsStats struct { - TotalOperations int `json:"total_operations"` - ByType map[PendingOperationType]int `json:"by_type"` - ByStatus map[string]int `json:"by_status"` - TotalEstimatedSize uint64 `json:"total_estimated_size"` -} diff --git a/weed/admin/maintenance/pending_operations_test.go b/weed/admin/maintenance/pending_operations_test.go deleted file mode 100644 index 64bb591fb..000000000 --- a/weed/admin/maintenance/pending_operations_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package maintenance - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/worker/types" -) - -func TestPendingOperations_ConflictDetection(t *testing.T) { - pendingOps := NewPendingOperations() - - // Add a pending erasure coding operation on volume 123 - op := &PendingOperation{ - VolumeID: 123, - OperationType: OpTypeErasureCoding, - SourceNode: "node1", - TaskID: "task-001", - StartTime: time.Now(), - EstimatedSize: 1024 * 1024 * 1024, // 1GB - Collection: "test", - Status: "assigned", - } - - pendingOps.AddOperation(op) - - // Test conflict detection - if !pendingOps.HasPendingOperationOnVolume(123) { - t.Errorf("Expected volume 123 to have pending operation") - } - - if !pendingOps.WouldConflictWithPending(123, OpTypeVacuum) { - t.Errorf("Expected conflict when trying to add vacuum operation on volume 123") - } - - if pendingOps.HasPendingOperationOnVolume(124) { - t.Errorf("Expected volume 124 to have no pending operation") - } - - if pendingOps.WouldConflictWithPending(124, OpTypeVacuum) { - t.Errorf("Expected no conflict for volume 124") - } -} - -func TestPendingOperations_CapacityProjection(t *testing.T) { - pendingOps := NewPendingOperations() - - // Add operation moving volume from node1 to node2 - op1 := &PendingOperation{ - VolumeID: 100, - OperationType: OpTypeVolumeMove, - SourceNode: "node1", - DestNode: "node2", - TaskID: "task-001", - StartTime: time.Now(), - EstimatedSize: 2 * 1024 * 1024 * 1024, // 2GB - Collection: "test", - Status: "in_progress", - } - - // Add operation moving volume from node3 to node1 - op2 := &PendingOperation{ - VolumeID: 101, - OperationType: OpTypeVolumeMove, - SourceNode: "node3", - DestNode: "node1", - TaskID: "task-002", - StartTime: time.Now(), - EstimatedSize: 1 * 1024 * 1024 * 1024, // 1GB - Collection: "test", - Status: "assigned", - } - - pendingOps.AddOperation(op1) - pendingOps.AddOperation(op2) - - // Test capacity impact for node1 - incoming, outgoing := pendingOps.GetPendingCapacityImpactForNode("node1") - expectedIncoming := uint64(1 * 1024 * 1024 * 1024) // 1GB incoming - expectedOutgoing := uint64(2 * 1024 * 1024 * 1024) // 2GB outgoing - - if incoming != expectedIncoming { - t.Errorf("Expected incoming capacity %d, got %d", expectedIncoming, incoming) - } - - if outgoing != expectedOutgoing { - t.Errorf("Expected outgoing capacity %d, got %d", expectedOutgoing, outgoing) - } - - // Test projection for node1 - currentUsed := uint64(10 * 1024 * 1024 * 1024) // 10GB current - totalCapacity := uint64(50 * 1024 * 1024 * 1024) // 50GB total - - projection := pendingOps.GetNodeCapacityProjection("node1", currentUsed, totalCapacity) - - expectedProjectedUsed := currentUsed + incoming - outgoing // 10 + 1 - 2 = 9GB - expectedProjectedFree := totalCapacity - expectedProjectedUsed // 50 - 9 = 41GB - - if projection.ProjectedUsed != expectedProjectedUsed { - t.Errorf("Expected projected used %d, got %d", expectedProjectedUsed, projection.ProjectedUsed) - } - - if projection.ProjectedFree != expectedProjectedFree { - t.Errorf("Expected projected free %d, got %d", expectedProjectedFree, projection.ProjectedFree) - } -} - -func TestPendingOperations_VolumeFiltering(t *testing.T) { - pendingOps := NewPendingOperations() - - // Create volume metrics - metrics := []*types.VolumeHealthMetrics{ - {VolumeID: 100, Server: "node1"}, - {VolumeID: 101, Server: "node2"}, - {VolumeID: 102, Server: "node3"}, - {VolumeID: 103, Server: "node1"}, - } - - // Add pending operations on volumes 101 and 103 - op1 := &PendingOperation{ - VolumeID: 101, - OperationType: OpTypeVacuum, - SourceNode: "node2", - TaskID: "task-001", - StartTime: time.Now(), - EstimatedSize: 1024 * 1024 * 1024, - Status: "in_progress", - } - - op2 := &PendingOperation{ - VolumeID: 103, - OperationType: OpTypeErasureCoding, - SourceNode: "node1", - TaskID: "task-002", - StartTime: time.Now(), - EstimatedSize: 2 * 1024 * 1024 * 1024, - Status: "assigned", - } - - pendingOps.AddOperation(op1) - pendingOps.AddOperation(op2) - - // Filter metrics - filtered := pendingOps.FilterVolumeMetricsExcludingPending(metrics) - - // Should only have volumes 100 and 102 (101 and 103 are filtered out) - if len(filtered) != 2 { - t.Errorf("Expected 2 filtered metrics, got %d", len(filtered)) - } - - // Check that correct volumes remain - foundVolumes := make(map[uint32]bool) - for _, metric := range filtered { - foundVolumes[metric.VolumeID] = true - } - - if !foundVolumes[100] || !foundVolumes[102] { - t.Errorf("Expected volumes 100 and 102 to remain after filtering") - } - - if foundVolumes[101] || foundVolumes[103] { - t.Errorf("Expected volumes 101 and 103 to be filtered out") - } -} - -func TestPendingOperations_OperationLifecycle(t *testing.T) { - pendingOps := NewPendingOperations() - - // Add operation - op := &PendingOperation{ - VolumeID: 200, - OperationType: OpTypeVolumeBalance, - SourceNode: "node1", - DestNode: "node2", - TaskID: "task-balance-001", - StartTime: time.Now(), - EstimatedSize: 1024 * 1024 * 1024, - Status: "assigned", - } - - pendingOps.AddOperation(op) - - // Check it exists - if !pendingOps.HasPendingOperationOnVolume(200) { - t.Errorf("Expected volume 200 to have pending operation") - } - - // Update status - pendingOps.UpdateOperationStatus("task-balance-001", "in_progress") - - retrievedOp := pendingOps.GetPendingOperationOnVolume(200) - if retrievedOp == nil { - t.Errorf("Expected to retrieve pending operation for volume 200") - } else if retrievedOp.Status != "in_progress" { - t.Errorf("Expected operation status to be 'in_progress', got '%s'", retrievedOp.Status) - } - - // Complete operation - pendingOps.RemoveOperation("task-balance-001") - - if pendingOps.HasPendingOperationOnVolume(200) { - t.Errorf("Expected volume 200 to have no pending operation after removal") - } -} - -func TestPendingOperations_StaleCleanup(t *testing.T) { - pendingOps := NewPendingOperations() - - // Add recent operation - recentOp := &PendingOperation{ - VolumeID: 300, - OperationType: OpTypeVacuum, - SourceNode: "node1", - TaskID: "task-recent", - StartTime: time.Now(), - EstimatedSize: 1024 * 1024 * 1024, - Status: "in_progress", - } - - // Add stale operation (24 hours ago) - staleOp := &PendingOperation{ - VolumeID: 301, - OperationType: OpTypeErasureCoding, - SourceNode: "node2", - TaskID: "task-stale", - StartTime: time.Now().Add(-24 * time.Hour), - EstimatedSize: 2 * 1024 * 1024 * 1024, - Status: "in_progress", - } - - pendingOps.AddOperation(recentOp) - pendingOps.AddOperation(staleOp) - - // Clean up operations older than 1 hour - removedCount := pendingOps.CleanupStaleOperations(1 * time.Hour) - - if removedCount != 1 { - t.Errorf("Expected to remove 1 stale operation, removed %d", removedCount) - } - - // Recent operation should still exist - if !pendingOps.HasPendingOperationOnVolume(300) { - t.Errorf("Expected recent operation on volume 300 to still exist") - } - - // Stale operation should be removed - if pendingOps.HasPendingOperationOnVolume(301) { - t.Errorf("Expected stale operation on volume 301 to be removed") - } -} diff --git a/weed/admin/static/css/admin.css b/weed/admin/static/css/admin.css deleted file mode 100644 index a945d320e..000000000 --- a/weed/admin/static/css/admin.css +++ /dev/null @@ -1,218 +0,0 @@ -/* SeaweedFS Dashboard Custom Styles */ - -/* Sidebar Styles */ -.sidebar { - position: fixed; - top: 56px; - bottom: 0; - left: 0; - z-index: 100; - padding: 48px 0 0; - box-shadow: inset -1px 0 0 rgba(0, 0, 0, .1); - overflow-y: auto; -} - -.sidebar-heading { - font-size: .75rem; - text-transform: uppercase; -} - -.sidebar .nav-link { - font-weight: 500; - color: #333; -} - -.sidebar .nav-link:hover { - color: #007bff; -} - -.sidebar .nav-link.active { - color: #007bff; -} - -.sidebar .nav-link:hover .feather, -.sidebar .nav-link.active .feather { - color: inherit; -} - -/* Main content area */ -main { - margin-left: 240px; -} - -@media (max-width: 767.98px) { - .sidebar { - top: 5rem; - } - main { - margin-left: 0; - } -} - -/* Custom card styles */ -.border-left-primary { - border-left: 0.25rem solid #4e73df !important; -} - -.border-left-success { - border-left: 0.25rem solid #1cc88a !important; -} - -.border-left-info { - border-left: 0.25rem solid #36b9cc !important; -} - -.border-left-warning { - border-left: 0.25rem solid #f6c23e !important; -} - -.border-left-danger { - border-left: 0.25rem solid #e74a3b !important; -} - -/* Status badges */ -.badge { - font-size: 0.875em; -} - -/* Progress bars */ -.progress { - background-color: #f8f9fc; - border: 1px solid #e3e6f0; -} - -.progress-bar { - font-size: 0.75rem; - font-weight: 700; - color: #fff; - text-align: center; -} - -/* Tables */ -.table { - color: #5a5c69; -} - -.table thead th { - vertical-align: bottom; - border-bottom: 1px solid #e3e6f0; - font-weight: 700; - color: #5a5c69; - background-color: #f8f9fc; -} - -.table-bordered { - border: 1px solid #e3e6f0; -} - -.table-bordered th, -.table-bordered td { - border: 1px solid #e3e6f0; -} - -/* Cards */ -.card { - box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; - border: 1px solid #e3e6f0; -} - -.card-header { - background-color: #f8f9fc; - border-bottom: 1px solid #e3e6f0; -} - -/* Buttons */ -.btn-primary { - background-color: #4e73df; - border-color: #4e73df; -} - -.btn-primary:hover { - background-color: #2e59d9; - border-color: #2653d4; -} - -/* Text utilities */ -.text-gray-800 { - color: #5a5c69 !important; -} - -.text-gray-300 { - color: #dddfeb !important; -} - -/* Animation for HTMX updates */ -.htmx-indicator { - opacity: 0; - transition: opacity 500ms ease-in; -} - -.htmx-request .htmx-indicator { - opacity: 1; -} - -.htmx-request.htmx-indicator { - opacity: 1; -} - -/* Loading spinner */ -.spinner-border-sm { - width: 1rem; - height: 1rem; -} - -/* Custom utilities */ -.bg-gradient-primary { - background: linear-gradient(180deg, #4e73df 10%, #224abe 100%); -} - -.shadow { - box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; -} - -/* Collapsible menu styles */ -.nav-link[data-bs-toggle="collapse"] { - position: relative; -} - -.nav-link[data-bs-toggle="collapse"] .fa-chevron-down { - transition: transform 0.2s ease; -} - -.nav-link[data-bs-toggle="collapse"][aria-expanded="true"] .fa-chevron-down { - transform: rotate(180deg); -} - -.nav-link[data-bs-toggle="collapse"]:not(.collapsed) { - color: #007bff; -} - -.nav-link[data-bs-toggle="collapse"]:not(.collapsed) .fa-chevron-down { - color: #007bff; -} - -/* Submenu styles */ -.nav .nav { - border-left: 1px solid #e3e6f0; - margin-left: 0.5rem; -} - -.nav .nav .nav-link { - font-size: 0.875rem; - padding-left: 1rem; -} - -.nav .nav .nav-link:hover { - background-color: #f8f9fc; -} - -/* Responsive adjustments */ -@media (max-width: 576px) { - .card-body { - padding: 1rem; - } - - .h5 { - font-size: 1rem; - } -} \ No newline at end of file diff --git a/weed/admin/static/css/bootstrap.min.css b/weed/admin/static/css/bootstrap.min.css deleted file mode 100644 index f5910accb..000000000 --- a/weed/admin/static/css/bootstrap.min.css +++ /dev/null @@ -1,6 +0,0 @@ -@charset "UTF-8";/*! - * Bootstrap v5.3.2 (https://getbootstrap.com/) - * Copyright 2011-2023 The Bootstrap Authors - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */:root,[data-bs-theme=light]{--bs-blue:#0d6efd;--bs-indigo:#6610f2;--bs-purple:#6f42c1;--bs-pink:#d63384;--bs-red:#dc3545;--bs-orange:#fd7e14;--bs-yellow:#ffc107;--bs-green:#198754;--bs-teal:#20c997;--bs-cyan:#0dcaf0;--bs-black:#000;--bs-white:#fff;--bs-gray:#6c757d;--bs-gray-dark:#343a40;--bs-gray-100:#f8f9fa;--bs-gray-200:#e9ecef;--bs-gray-300:#dee2e6;--bs-gray-400:#ced4da;--bs-gray-500:#adb5bd;--bs-gray-600:#6c757d;--bs-gray-700:#495057;--bs-gray-800:#343a40;--bs-gray-900:#212529;--bs-primary:#0d6efd;--bs-secondary:#6c757d;--bs-success:#198754;--bs-info:#0dcaf0;--bs-warning:#ffc107;--bs-danger:#dc3545;--bs-light:#f8f9fa;--bs-dark:#212529;--bs-primary-rgb:13,110,253;--bs-secondary-rgb:108,117,125;--bs-success-rgb:25,135,84;--bs-info-rgb:13,202,240;--bs-warning-rgb:255,193,7;--bs-danger-rgb:220,53,69;--bs-light-rgb:248,249,250;--bs-dark-rgb:33,37,41;--bs-primary-text-emphasis:#052c65;--bs-secondary-text-emphasis:#2b2f32;--bs-success-text-emphasis:#0a3622;--bs-info-text-emphasis:#055160;--bs-warning-text-emphasis:#664d03;--bs-danger-text-emphasis:#58151c;--bs-light-text-emphasis:#495057;--bs-dark-text-emphasis:#495057;--bs-primary-bg-subtle:#cfe2ff;--bs-secondary-bg-subtle:#e2e3e5;--bs-success-bg-subtle:#d1e7dd;--bs-info-bg-subtle:#cff4fc;--bs-warning-bg-subtle:#fff3cd;--bs-danger-bg-subtle:#f8d7da;--bs-light-bg-subtle:#fcfcfd;--bs-dark-bg-subtle:#ced4da;--bs-primary-border-subtle:#9ec5fe;--bs-secondary-border-subtle:#c4c8cb;--bs-success-border-subtle:#a3cfbb;--bs-info-border-subtle:#9eeaf9;--bs-warning-border-subtle:#ffe69c;--bs-danger-border-subtle:#f1aeb5;--bs-light-border-subtle:#e9ecef;--bs-dark-border-subtle:#adb5bd;--bs-white-rgb:255,255,255;--bs-black-rgb:0,0,0;--bs-font-sans-serif:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue","Noto Sans","Liberation Sans",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--bs-font-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--bs-gradient:linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0));--bs-body-font-family:var(--bs-font-sans-serif);--bs-body-font-size:1rem;--bs-body-font-weight:400;--bs-body-line-height:1.5;--bs-body-color:#212529;--bs-body-color-rgb:33,37,41;--bs-body-bg:#fff;--bs-body-bg-rgb:255,255,255;--bs-emphasis-color:#000;--bs-emphasis-color-rgb:0,0,0;--bs-secondary-color:rgba(33, 37, 41, 0.75);--bs-secondary-color-rgb:33,37,41;--bs-secondary-bg:#e9ecef;--bs-secondary-bg-rgb:233,236,239;--bs-tertiary-color:rgba(33, 37, 41, 0.5);--bs-tertiary-color-rgb:33,37,41;--bs-tertiary-bg:#f8f9fa;--bs-tertiary-bg-rgb:248,249,250;--bs-heading-color:inherit;--bs-link-color:#0d6efd;--bs-link-color-rgb:13,110,253;--bs-link-decoration:underline;--bs-link-hover-color:#0a58ca;--bs-link-hover-color-rgb:10,88,202;--bs-code-color:#d63384;--bs-highlight-color:#212529;--bs-highlight-bg:#fff3cd;--bs-border-width:1px;--bs-border-style:solid;--bs-border-color:#dee2e6;--bs-border-color-translucent:rgba(0, 0, 0, 0.175);--bs-border-radius:0.375rem;--bs-border-radius-sm:0.25rem;--bs-border-radius-lg:0.5rem;--bs-border-radius-xl:1rem;--bs-border-radius-xxl:2rem;--bs-border-radius-2xl:var(--bs-border-radius-xxl);--bs-border-radius-pill:50rem;--bs-box-shadow:0 0.5rem 1rem rgba(0, 0, 0, 0.15);--bs-box-shadow-sm:0 0.125rem 0.25rem rgba(0, 0, 0, 0.075);--bs-box-shadow-lg:0 1rem 3rem rgba(0, 0, 0, 0.175);--bs-box-shadow-inset:inset 0 1px 2px rgba(0, 0, 0, 0.075);--bs-focus-ring-width:0.25rem;--bs-focus-ring-opacity:0.25;--bs-focus-ring-color:rgba(13, 110, 253, 0.25);--bs-form-valid-color:#198754;--bs-form-valid-border-color:#198754;--bs-form-invalid-color:#dc3545;--bs-form-invalid-border-color:#dc3545}[data-bs-theme=dark]{color-scheme:dark;--bs-body-color:#dee2e6;--bs-body-color-rgb:222,226,230;--bs-body-bg:#212529;--bs-body-bg-rgb:33,37,41;--bs-emphasis-color:#fff;--bs-emphasis-color-rgb:255,255,255;--bs-secondary-color:rgba(222, 226, 230, 0.75);--bs-secondary-color-rgb:222,226,230;--bs-secondary-bg:#343a40;--bs-secondary-bg-rgb:52,58,64;--bs-tertiary-color:rgba(222, 226, 230, 0.5);--bs-tertiary-color-rgb:222,226,230;--bs-tertiary-bg:#2b3035;--bs-tertiary-bg-rgb:43,48,53;--bs-primary-text-emphasis:#6ea8fe;--bs-secondary-text-emphasis:#a7acb1;--bs-success-text-emphasis:#75b798;--bs-info-text-emphasis:#6edff6;--bs-warning-text-emphasis:#ffda6a;--bs-danger-text-emphasis:#ea868f;--bs-light-text-emphasis:#f8f9fa;--bs-dark-text-emphasis:#dee2e6;--bs-primary-bg-subtle:#031633;--bs-secondary-bg-subtle:#161719;--bs-success-bg-subtle:#051b11;--bs-info-bg-subtle:#032830;--bs-warning-bg-subtle:#332701;--bs-danger-bg-subtle:#2c0b0e;--bs-light-bg-subtle:#343a40;--bs-dark-bg-subtle:#1a1d20;--bs-primary-border-subtle:#084298;--bs-secondary-border-subtle:#41464b;--bs-success-border-subtle:#0f5132;--bs-info-border-subtle:#087990;--bs-warning-border-subtle:#997404;--bs-danger-border-subtle:#842029;--bs-light-border-subtle:#495057;--bs-dark-border-subtle:#343a40;--bs-heading-color:inherit;--bs-link-color:#6ea8fe;--bs-link-hover-color:#8bb9fe;--bs-link-color-rgb:110,168,254;--bs-link-hover-color-rgb:139,185,254;--bs-code-color:#e685b5;--bs-highlight-color:#dee2e6;--bs-highlight-bg:#664d03;--bs-border-color:#495057;--bs-border-color-translucent:rgba(255, 255, 255, 0.15);--bs-form-valid-color:#75b798;--bs-form-valid-border-color:#75b798;--bs-form-invalid-color:#ea868f;--bs-form-invalid-border-color:#ea868f}*,::after,::before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{margin:0;font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);color:var(--bs-body-color);text-align:var(--bs-body-text-align);background-color:var(--bs-body-bg);-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}hr{margin:1rem 0;color:inherit;border:0;border-top:var(--bs-border-width) solid;opacity:.25}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem;font-weight:500;line-height:1.2;color:var(--bs-heading-color)}.h1,h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){.h1,h1{font-size:2.5rem}}.h2,h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){.h2,h2{font-size:2rem}}.h3,h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){.h3,h3{font-size:1.75rem}}.h4,h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){.h4,h4{font-size:1.5rem}}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}p{margin-top:0;margin-bottom:1rem}abbr[title]{-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul{padding-left:2rem}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}.small,small{font-size:.875em}.mark,mark{padding:.1875em;color:var(--bs-highlight-color);background-color:var(--bs-highlight-bg)}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:rgba(var(--bs-link-color-rgb),var(--bs-link-opacity,1));text-decoration:underline}a:hover{--bs-link-color-rgb:var(--bs-link-hover-color-rgb)}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:var(--bs-font-monospace);font-size:1em}pre{display:block;margin-top:0;margin-bottom:1rem;overflow:auto;font-size:.875em}pre code{font-size:inherit;color:inherit;word-break:normal}code{font-size:.875em;color:var(--bs-code-color);word-wrap:break-word}a>code{color:inherit}kbd{padding:.1875rem .375rem;font-size:.875em;color:var(--bs-body-bg);background-color:var(--bs-body-color);border-radius:.25rem}kbd kbd{padding:0;font-size:1em}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{caption-side:bottom;border-collapse:collapse}caption{padding-top:.5rem;padding-bottom:.5rem;color:var(--bs-secondary-color);text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border-color:inherit;border-style:solid;border-width:0}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]:not([type=date]):not([type=datetime-local]):not([type=month]):not([type=week]):not([type=time])::-webkit-calendar-picker-indicator{display:none!important}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{padding:0;border-style:none}textarea{resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{float:left;width:100%;padding:0;margin-bottom:.5rem;font-size:calc(1.275rem + .3vw);line-height:inherit}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}::file-selector-button{font:inherit;-webkit-appearance:button}output{display:inline-block}iframe{border:0}summary{display:list-item;cursor:pointer}progress{vertical-align:baseline}[hidden]{display:none!important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-6{font-size:2.5rem}}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{margin-top:-1rem;margin-bottom:1rem;font-size:.875em;color:#6c757d}.blockquote-footer::before{content:"โ€”ย "}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:var(--bs-body-bg);border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:.875em;color:var(--bs-secondary-color)}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{--bs-gutter-x:1.5rem;--bs-gutter-y:0;width:100%;padding-right:calc(var(--bs-gutter-x) * .5);padding-left:calc(var(--bs-gutter-x) * .5);margin-right:auto;margin-left:auto}@media (min-width:576px){.container,.container-sm{max-width:540px}}@media (min-width:768px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:992px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1140px}}@media (min-width:1400px){.container,.container-lg,.container-md,.container-sm,.container-xl,.container-xxl{max-width:1320px}}:root{--bs-breakpoint-xs:0;--bs-breakpoint-sm:576px;--bs-breakpoint-md:768px;--bs-breakpoint-lg:992px;--bs-breakpoint-xl:1200px;--bs-breakpoint-xxl:1400px}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-top:calc(-1 * var(--bs-gutter-y));margin-right:calc(-.5 * var(--bs-gutter-x));margin-left:calc(-.5 * var(--bs-gutter-x))}.row>*{flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--bs-gutter-x) * .5);padding-left:calc(var(--bs-gutter-x) * .5);margin-top:var(--bs-gutter-y)}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.66666667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333333%}.col-2{flex:0 0 auto;width:16.66666667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333333%}.col-5{flex:0 0 auto;width:41.66666667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333333%}.col-8{flex:0 0 auto;width:66.66666667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333333%}.col-11{flex:0 0 auto;width:91.66666667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333333%}.offset-2{margin-left:16.66666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333333%}.offset-5{margin-left:41.66666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333333%}.offset-8{margin-left:66.66666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333333%}.offset-11{margin-left:91.66666667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:0.25rem}.g-1,.gy-1{--bs-gutter-y:0.25rem}.g-2,.gx-2{--bs-gutter-x:0.5rem}.g-2,.gy-2{--bs-gutter-y:0.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:576px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.66666667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333333%}.col-sm-2{flex:0 0 auto;width:16.66666667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333333%}.col-sm-5{flex:0 0 auto;width:41.66666667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333333%}.col-sm-8{flex:0 0 auto;width:66.66666667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333333%}.col-sm-11{flex:0 0 auto;width:91.66666667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333333%}.offset-sm-2{margin-left:16.66666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333333%}.offset-sm-5{margin-left:41.66666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333333%}.offset-sm-8{margin-left:66.66666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333333%}.offset-sm-11{margin-left:91.66666667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:0.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:0.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:0.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:0.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:768px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.66666667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333333%}.col-md-2{flex:0 0 auto;width:16.66666667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333333%}.col-md-5{flex:0 0 auto;width:41.66666667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333333%}.col-md-8{flex:0 0 auto;width:66.66666667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333333%}.col-md-11{flex:0 0 auto;width:91.66666667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333333%}.offset-md-2{margin-left:16.66666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333333%}.offset-md-5{margin-left:41.66666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333333%}.offset-md-8{margin-left:66.66666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333333%}.offset-md-11{margin-left:91.66666667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:0.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:0.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:0.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:0.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:992px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.66666667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333333%}.col-lg-2{flex:0 0 auto;width:16.66666667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333333%}.col-lg-5{flex:0 0 auto;width:41.66666667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333333%}.col-lg-8{flex:0 0 auto;width:66.66666667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333333%}.col-lg-11{flex:0 0 auto;width:91.66666667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333333%}.offset-lg-2{margin-left:16.66666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333333%}.offset-lg-5{margin-left:41.66666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333333%}.offset-lg-8{margin-left:66.66666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333333%}.offset-lg-11{margin-left:91.66666667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:0.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:0.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:0.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:0.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.66666667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333333%}.col-xl-2{flex:0 0 auto;width:16.66666667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333333%}.col-xl-5{flex:0 0 auto;width:41.66666667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333333%}.col-xl-8{flex:0 0 auto;width:66.66666667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333333%}.col-xl-11{flex:0 0 auto;width:91.66666667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333333%}.offset-xl-2{margin-left:16.66666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333333%}.offset-xl-5{margin-left:41.66666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333333%}.offset-xl-8{margin-left:66.66666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333333%}.offset-xl-11{margin-left:91.66666667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:0.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:0.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:0.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:0.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}@media (min-width:1400px){.col-xxl{flex:1 0 0%}.row-cols-xxl-auto>*{flex:0 0 auto;width:auto}.row-cols-xxl-1>*{flex:0 0 auto;width:100%}.row-cols-xxl-2>*{flex:0 0 auto;width:50%}.row-cols-xxl-3>*{flex:0 0 auto;width:33.33333333%}.row-cols-xxl-4>*{flex:0 0 auto;width:25%}.row-cols-xxl-5>*{flex:0 0 auto;width:20%}.row-cols-xxl-6>*{flex:0 0 auto;width:16.66666667%}.col-xxl-auto{flex:0 0 auto;width:auto}.col-xxl-1{flex:0 0 auto;width:8.33333333%}.col-xxl-2{flex:0 0 auto;width:16.66666667%}.col-xxl-3{flex:0 0 auto;width:25%}.col-xxl-4{flex:0 0 auto;width:33.33333333%}.col-xxl-5{flex:0 0 auto;width:41.66666667%}.col-xxl-6{flex:0 0 auto;width:50%}.col-xxl-7{flex:0 0 auto;width:58.33333333%}.col-xxl-8{flex:0 0 auto;width:66.66666667%}.col-xxl-9{flex:0 0 auto;width:75%}.col-xxl-10{flex:0 0 auto;width:83.33333333%}.col-xxl-11{flex:0 0 auto;width:91.66666667%}.col-xxl-12{flex:0 0 auto;width:100%}.offset-xxl-0{margin-left:0}.offset-xxl-1{margin-left:8.33333333%}.offset-xxl-2{margin-left:16.66666667%}.offset-xxl-3{margin-left:25%}.offset-xxl-4{margin-left:33.33333333%}.offset-xxl-5{margin-left:41.66666667%}.offset-xxl-6{margin-left:50%}.offset-xxl-7{margin-left:58.33333333%}.offset-xxl-8{margin-left:66.66666667%}.offset-xxl-9{margin-left:75%}.offset-xxl-10{margin-left:83.33333333%}.offset-xxl-11{margin-left:91.66666667%}.g-xxl-0,.gx-xxl-0{--bs-gutter-x:0}.g-xxl-0,.gy-xxl-0{--bs-gutter-y:0}.g-xxl-1,.gx-xxl-1{--bs-gutter-x:0.25rem}.g-xxl-1,.gy-xxl-1{--bs-gutter-y:0.25rem}.g-xxl-2,.gx-xxl-2{--bs-gutter-x:0.5rem}.g-xxl-2,.gy-xxl-2{--bs-gutter-y:0.5rem}.g-xxl-3,.gx-xxl-3{--bs-gutter-x:1rem}.g-xxl-3,.gy-xxl-3{--bs-gutter-y:1rem}.g-xxl-4,.gx-xxl-4{--bs-gutter-x:1.5rem}.g-xxl-4,.gy-xxl-4{--bs-gutter-y:1.5rem}.g-xxl-5,.gx-xxl-5{--bs-gutter-x:3rem}.g-xxl-5,.gy-xxl-5{--bs-gutter-y:3rem}}.table{--bs-table-color-type:initial;--bs-table-bg-type:initial;--bs-table-color-state:initial;--bs-table-bg-state:initial;--bs-table-color:var(--bs-emphasis-color);--bs-table-bg:var(--bs-body-bg);--bs-table-border-color:var(--bs-border-color);--bs-table-accent-bg:transparent;--bs-table-striped-color:var(--bs-emphasis-color);--bs-table-striped-bg:rgba(var(--bs-emphasis-color-rgb), 0.05);--bs-table-active-color:var(--bs-emphasis-color);--bs-table-active-bg:rgba(var(--bs-emphasis-color-rgb), 0.1);--bs-table-hover-color:var(--bs-emphasis-color);--bs-table-hover-bg:rgba(var(--bs-emphasis-color-rgb), 0.075);width:100%;margin-bottom:1rem;vertical-align:top;border-color:var(--bs-table-border-color)}.table>:not(caption)>*>*{padding:.5rem .5rem;color:var(--bs-table-color-state,var(--bs-table-color-type,var(--bs-table-color)));background-color:var(--bs-table-bg);border-bottom-width:var(--bs-border-width);box-shadow:inset 0 0 0 9999px var(--bs-table-bg-state,var(--bs-table-bg-type,var(--bs-table-accent-bg)))}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table-group-divider{border-top:calc(var(--bs-border-width) * 2) solid currentcolor}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem .25rem}.table-bordered>:not(caption)>*{border-width:var(--bs-border-width) 0}.table-bordered>:not(caption)>*>*{border-width:0 var(--bs-border-width)}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-color-type:var(--bs-table-striped-color);--bs-table-bg-type:var(--bs-table-striped-bg)}.table-striped-columns>:not(caption)>tr>:nth-child(2n){--bs-table-color-type:var(--bs-table-striped-color);--bs-table-bg-type:var(--bs-table-striped-bg)}.table-active{--bs-table-color-state:var(--bs-table-active-color);--bs-table-bg-state:var(--bs-table-active-bg)}.table-hover>tbody>tr:hover>*{--bs-table-color-state:var(--bs-table-hover-color);--bs-table-bg-state:var(--bs-table-hover-bg)}.table-primary{--bs-table-color:#000;--bs-table-bg:#cfe2ff;--bs-table-border-color:#a6b5cc;--bs-table-striped-bg:#c5d7f2;--bs-table-striped-color:#000;--bs-table-active-bg:#bacbe6;--bs-table-active-color:#000;--bs-table-hover-bg:#bfd1ec;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-secondary{--bs-table-color:#000;--bs-table-bg:#e2e3e5;--bs-table-border-color:#b5b6b7;--bs-table-striped-bg:#d7d8da;--bs-table-striped-color:#000;--bs-table-active-bg:#cbccce;--bs-table-active-color:#000;--bs-table-hover-bg:#d1d2d4;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-success{--bs-table-color:#000;--bs-table-bg:#d1e7dd;--bs-table-border-color:#a7b9b1;--bs-table-striped-bg:#c7dbd2;--bs-table-striped-color:#000;--bs-table-active-bg:#bcd0c7;--bs-table-active-color:#000;--bs-table-hover-bg:#c1d6cc;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-info{--bs-table-color:#000;--bs-table-bg:#cff4fc;--bs-table-border-color:#a6c3ca;--bs-table-striped-bg:#c5e8ef;--bs-table-striped-color:#000;--bs-table-active-bg:#badce3;--bs-table-active-color:#000;--bs-table-hover-bg:#bfe2e9;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-warning{--bs-table-color:#000;--bs-table-bg:#fff3cd;--bs-table-border-color:#ccc2a4;--bs-table-striped-bg:#f2e7c3;--bs-table-striped-color:#000;--bs-table-active-bg:#e6dbb9;--bs-table-active-color:#000;--bs-table-hover-bg:#ece1be;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-danger{--bs-table-color:#000;--bs-table-bg:#f8d7da;--bs-table-border-color:#c6acae;--bs-table-striped-bg:#eccccf;--bs-table-striped-color:#000;--bs-table-active-bg:#dfc2c4;--bs-table-active-color:#000;--bs-table-hover-bg:#e5c7ca;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-light{--bs-table-color:#000;--bs-table-bg:#f8f9fa;--bs-table-border-color:#c6c7c8;--bs-table-striped-bg:#ecedee;--bs-table-striped-color:#000;--bs-table-active-bg:#dfe0e1;--bs-table-active-color:#000;--bs-table-hover-bg:#e5e6e7;--bs-table-hover-color:#000;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-dark{--bs-table-color:#fff;--bs-table-bg:#212529;--bs-table-border-color:#4d5154;--bs-table-striped-bg:#2c3034;--bs-table-striped-color:#fff;--bs-table-active-bg:#373b3e;--bs-table-active-color:#fff;--bs-table-hover-bg:#323539;--bs-table-hover-color:#fff;color:var(--bs-table-color);border-color:var(--bs-table-border-color)}.table-responsive{overflow-x:auto;-webkit-overflow-scrolling:touch}@media (max-width:575.98px){.table-responsive-sm{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:767.98px){.table-responsive-md{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:991.98px){.table-responsive-lg{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1199.98px){.table-responsive-xl{overflow-x:auto;-webkit-overflow-scrolling:touch}}@media (max-width:1399.98px){.table-responsive-xxl{overflow-x:auto;-webkit-overflow-scrolling:touch}}.form-label{margin-bottom:.5rem}.col-form-label{padding-top:calc(.375rem + var(--bs-border-width));padding-bottom:calc(.375rem + var(--bs-border-width));margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + var(--bs-border-width));padding-bottom:calc(.5rem + var(--bs-border-width));font-size:1.25rem}.col-form-label-sm{padding-top:calc(.25rem + var(--bs-border-width));padding-bottom:calc(.25rem + var(--bs-border-width));font-size:.875rem}.form-text{margin-top:.25rem;font-size:.875em;color:var(--bs-secondary-color)}.form-control{display:block;width:100%;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:var(--bs-body-bg);background-clip:padding-box;border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{color:var(--bs-body-color);background-color:var(--bs-body-bg);border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-control::-webkit-date-and-time-value{min-width:85px;height:1.5em;margin:0}.form-control::-webkit-datetime-edit{display:block;padding:0}.form-control::-moz-placeholder{color:var(--bs-secondary-color);opacity:1}.form-control::placeholder{color:var(--bs-secondary-color);opacity:1}.form-control:disabled{background-color:var(--bs-secondary-bg);opacity:1}.form-control::-webkit-file-upload-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:var(--bs-body-color);background-color:var(--bs-tertiary-bg);pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:var(--bs-border-width);border-radius:0;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}.form-control::file-selector-button{padding:.375rem .75rem;margin:-.375rem -.75rem;-webkit-margin-end:.75rem;margin-inline-end:.75rem;color:var(--bs-body-color);background-color:var(--bs-tertiary-bg);pointer-events:none;border-color:inherit;border-style:solid;border-width:0;border-inline-end-width:var(--bs-border-width);border-radius:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::-webkit-file-upload-button{-webkit-transition:none;transition:none}.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button{background-color:var(--bs-secondary-bg)}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:var(--bs-secondary-bg)}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;line-height:1.5;color:var(--bs-body-color);background-color:transparent;border:solid transparent;border-width:var(--bs-border-width) 0}.form-control-plaintext:focus{outline:0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{min-height:calc(1.5em + .5rem + calc(var(--bs-border-width) * 2));padding:.25rem .5rem;font-size:.875rem;border-radius:var(--bs-border-radius-sm)}.form-control-sm::-webkit-file-upload-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-sm::file-selector-button{padding:.25rem .5rem;margin:-.25rem -.5rem;-webkit-margin-end:.5rem;margin-inline-end:.5rem}.form-control-lg{min-height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2));padding:.5rem 1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}.form-control-lg::-webkit-file-upload-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}.form-control-lg::file-selector-button{padding:.5rem 1rem;margin:-.5rem -1rem;-webkit-margin-end:1rem;margin-inline-end:1rem}textarea.form-control{min-height:calc(1.5em + .75rem + calc(var(--bs-border-width) * 2))}textarea.form-control-sm{min-height:calc(1.5em + .5rem + calc(var(--bs-border-width) * 2))}textarea.form-control-lg{min-height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2))}.form-control-color{width:3rem;height:calc(1.5em + .75rem + calc(var(--bs-border-width) * 2));padding:.375rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{border:0!important;border-radius:var(--bs-border-radius)}.form-control-color::-webkit-color-swatch{border:0!important;border-radius:var(--bs-border-radius)}.form-control-color.form-control-sm{height:calc(1.5em + .5rem + calc(var(--bs-border-width) * 2))}.form-control-color.form-control-lg{height:calc(1.5em + 1rem + calc(var(--bs-border-width) * 2))}.form-select{--bs-form-select-bg-img:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e");display:block;width:100%;padding:.375rem 2.25rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:var(--bs-body-bg);background-image:var(--bs-form-select-bg-img),var(--bs-form-select-bg-icon,none);background-repeat:no-repeat;background-position:right .75rem center;background-size:16px 12px;border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius);transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-select{transition:none}}.form-select:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-select[multiple],.form-select[size]:not([size="1"]){padding-right:.75rem;background-image:none}.form-select:disabled{background-color:var(--bs-secondary-bg)}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 var(--bs-body-color)}.form-select-sm{padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem;border-radius:var(--bs-border-radius-sm)}.form-select-lg{padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}[data-bs-theme=dark] .form-select{--bs-form-select-bg-img:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23dee2e6' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e")}.form-check{display:block;min-height:1.5rem;padding-left:1.5em;margin-bottom:.125rem}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-reverse{padding-right:1.5em;padding-left:0;text-align:right}.form-check-reverse .form-check-input{float:right;margin-right:-1.5em;margin-left:0}.form-check-input{--bs-form-check-bg:var(--bs-body-bg);flex-shrink:0;width:1em;height:1em;margin-top:.25em;vertical-align:top;-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:var(--bs-form-check-bg);background-image:var(--bs-form-check-bg-image);background-repeat:no-repeat;background-position:center;background-size:contain;border:var(--bs-border-width) solid var(--bs-border-color);-webkit-print-color-adjust:exact;color-adjust:exact;print-color-adjust:exact}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{--bs-form-check-bg-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='m6 10 3 3 6-6'/%3e%3c/svg%3e")}.form-check-input:checked[type=radio]{--bs-form-check-bg-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;border-color:#0d6efd;--bs-form-check-bg-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e")}.form-check-input:disabled{pointer-events:none;filter:none;opacity:.5}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{cursor:default;opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{--bs-form-switch-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e");width:2em;margin-left:-2.5em;background-image:var(--bs-form-switch-bg);background-position:left center;border-radius:2em;transition:background-position .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{--bs-form-switch-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%2386b7fe'/%3e%3c/svg%3e")}.form-switch .form-check-input:checked{background-position:right center;--bs-form-switch-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.form-switch.form-check-reverse{padding-right:2.5em;padding-left:0}.form-switch.form-check-reverse .form-check-input{margin-right:-2.5em;margin-left:0}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.btn-check:disabled+.btn,.btn-check[disabled]+.btn{pointer-events:none;filter:none;opacity:.65}[data-bs-theme=dark] .form-switch .form-check-input:not(:checked):not(:focus){--bs-form-switch-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%28255, 255, 255, 0.25%29'/%3e%3c/svg%3e")}.form-range{width:100%;height:1.5rem;padding:0;-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:transparent}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;-webkit-appearance:none;appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;-webkit-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:var(--bs-secondary-bg);border-color:transparent;border-radius:1rem}.form-range::-moz-range-thumb{width:1rem;height:1rem;-moz-appearance:none;appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;-moz-transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-range::-moz-range-thumb{-moz-transition:none;transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:var(--bs-secondary-bg);border-color:transparent;border-radius:1rem}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:var(--bs-secondary-color)}.form-range:disabled::-moz-range-thumb{background-color:var(--bs-secondary-color)}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-control-plaintext,.form-floating>.form-select{height:calc(3.5rem + calc(var(--bs-border-width) * 2));min-height:calc(3.5rem + calc(var(--bs-border-width) * 2));line-height:1.25}.form-floating>label{position:absolute;top:0;left:0;z-index:2;height:100%;padding:1rem .75rem;overflow:hidden;text-align:start;text-overflow:ellipsis;white-space:nowrap;pointer-events:none;border:var(--bs-border-width) solid transparent;transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out}@media (prefers-reduced-motion:reduce){.form-floating>label{transition:none}}.form-floating>.form-control,.form-floating>.form-control-plaintext{padding:1rem .75rem}.form-floating>.form-control-plaintext::-moz-placeholder,.form-floating>.form-control::-moz-placeholder{color:transparent}.form-floating>.form-control-plaintext::placeholder,.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control-plaintext:not(:-moz-placeholder-shown),.form-floating>.form-control:not(:-moz-placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control-plaintext:focus,.form-floating>.form-control-plaintext:not(:placeholder-shown),.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control-plaintext:-webkit-autofill,.form-floating>.form-control:-webkit-autofill{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-select{padding-top:1.625rem;padding-bottom:.625rem}.form-floating>.form-control:not(:-moz-placeholder-shown)~label{color:rgba(var(--bs-body-color-rgb),.65);transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control-plaintext~label,.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{color:rgba(var(--bs-body-color-rgb),.65);transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:not(:-moz-placeholder-shown)~label::after{position:absolute;inset:1rem 0.375rem;z-index:-1;height:1.5em;content:"";background-color:var(--bs-body-bg);border-radius:var(--bs-border-radius)}.form-floating>.form-control-plaintext~label::after,.form-floating>.form-control:focus~label::after,.form-floating>.form-control:not(:placeholder-shown)~label::after,.form-floating>.form-select~label::after{position:absolute;inset:1rem 0.375rem;z-index:-1;height:1.5em;content:"";background-color:var(--bs-body-bg);border-radius:var(--bs-border-radius)}.form-floating>.form-control:-webkit-autofill~label{color:rgba(var(--bs-body-color-rgb),.65);transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control-plaintext~label{border-width:var(--bs-border-width) 0}.form-floating>.form-control:disabled~label,.form-floating>:disabled~label{color:#6c757d}.form-floating>.form-control:disabled~label::after,.form-floating>:disabled~label::after{background-color:var(--bs-secondary-bg)}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-floating,.input-group>.form-select{position:relative;flex:1 1 auto;width:1%;min-width:0}.input-group>.form-control:focus,.input-group>.form-floating:focus-within,.input-group>.form-select:focus{z-index:5}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:5}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:var(--bs-body-color);text-align:center;white-space:nowrap;background-color:var(--bs-tertiary-bg);border:var(--bs-border-width) solid var(--bs-border-color);border-radius:var(--bs-border-radius)}.input-group-lg>.btn,.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text{padding:.5rem 1rem;font-size:1.25rem;border-radius:var(--bs-border-radius-lg)}.input-group-sm>.btn,.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text{padding:.25rem .5rem;font-size:.875rem;border-radius:var(--bs-border-radius-sm)}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-control,.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-select,.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating){border-top-right-radius:0;border-bottom-right-radius:0}.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-control,.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-select,.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){margin-left:calc(var(--bs-border-width) * -1);border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.form-floating:not(:first-child)>.form-control,.input-group>.form-floating:not(:first-child)>.form-select{border-top-left-radius:0;border-bottom-left-radius:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:var(--bs-form-valid-color)}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:var(--bs-success);border-radius:var(--bs-border-radius)}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:var(--bs-form-valid-border-color);padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:var(--bs-form-valid-border-color);box-shadow:0 0 0 .25rem rgba(var(--bs-success-rgb),.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-valid,.was-validated .form-select:valid{border-color:var(--bs-form-valid-border-color)}.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"],.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"]{--bs-form-select-bg-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-valid:focus,.was-validated .form-select:valid:focus{border-color:var(--bs-form-valid-border-color);box-shadow:0 0 0 .25rem rgba(var(--bs-success-rgb),.25)}.form-control-color.is-valid,.was-validated .form-control-color:valid{width:calc(3rem + calc(1.5em + .75rem))}.form-check-input.is-valid,.was-validated .form-check-input:valid{border-color:var(--bs-form-valid-border-color)}.form-check-input.is-valid:checked,.was-validated .form-check-input:valid:checked{background-color:var(--bs-form-valid-color)}.form-check-input.is-valid:focus,.was-validated .form-check-input:valid:focus{box-shadow:0 0 0 .25rem rgba(var(--bs-success-rgb),.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:var(--bs-form-valid-color)}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.input-group>.form-control:not(:focus).is-valid,.input-group>.form-floating:not(:focus-within).is-valid,.input-group>.form-select:not(:focus).is-valid,.was-validated .input-group>.form-control:not(:focus):valid,.was-validated .input-group>.form-floating:not(:focus-within):valid,.was-validated .input-group>.form-select:not(:focus):valid{z-index:3}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:.875em;color:var(--bs-form-invalid-color)}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;color:#fff;background-color:var(--bs-danger);border-radius:var(--bs-border-radius)}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:var(--bs-form-invalid-border-color);padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:var(--bs-form-invalid-border-color);box-shadow:0 0 0 .25rem rgba(var(--bs-danger-rgb),.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.form-select.is-invalid,.was-validated .form-select:invalid{border-color:var(--bs-form-invalid-border-color)}.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"],.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"]{--bs-form-select-bg-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23dc3545'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3e%3c/svg%3e");padding-right:4.125rem;background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem)}.form-select.is-invalid:focus,.was-validated .form-select:invalid:focus{border-color:var(--bs-form-invalid-border-color);box-shadow:0 0 0 .25rem rgba(var(--bs-danger-rgb),.25)}.form-control-color.is-invalid,.was-validated .form-control-color:invalid{width:calc(3rem + calc(1.5em + .75rem))}.form-check-input.is-invalid,.was-validated .form-check-input:invalid{border-color:var(--bs-form-invalid-border-color)}.form-check-input.is-invalid:checked,.was-validated .form-check-input:invalid:checked{background-color:var(--bs-form-invalid-color)}.form-check-input.is-invalid:focus,.was-validated .form-check-input:invalid:focus{box-shadow:0 0 0 .25rem rgba(var(--bs-danger-rgb),.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:var(--bs-form-invalid-color)}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.input-group>.form-control:not(:focus).is-invalid,.input-group>.form-floating:not(:focus-within).is-invalid,.input-group>.form-select:not(:focus).is-invalid,.was-validated .input-group>.form-control:not(:focus):invalid,.was-validated .input-group>.form-floating:not(:focus-within):invalid,.was-validated .input-group>.form-select:not(:focus):invalid{z-index:4}.btn{--bs-btn-padding-x:0.75rem;--bs-btn-padding-y:0.375rem;--bs-btn-font-family: ;--bs-btn-font-size:1rem;--bs-btn-font-weight:400;--bs-btn-line-height:1.5;--bs-btn-color:var(--bs-body-color);--bs-btn-bg:transparent;--bs-btn-border-width:var(--bs-border-width);--bs-btn-border-color:transparent;--bs-btn-border-radius:var(--bs-border-radius);--bs-btn-hover-border-color:transparent;--bs-btn-box-shadow:inset 0 1px 0 rgba(255, 255, 255, 0.15),0 1px 1px rgba(0, 0, 0, 0.075);--bs-btn-disabled-opacity:0.65;--bs-btn-focus-box-shadow:0 0 0 0.25rem rgba(var(--bs-btn-focus-shadow-rgb), .5);display:inline-block;padding:var(--bs-btn-padding-y) var(--bs-btn-padding-x);font-family:var(--bs-btn-font-family);font-size:var(--bs-btn-font-size);font-weight:var(--bs-btn-font-weight);line-height:var(--bs-btn-line-height);color:var(--bs-btn-color);text-align:center;text-decoration:none;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;border:var(--bs-btn-border-width) solid var(--bs-btn-border-color);border-radius:var(--bs-btn-border-radius);background-color:var(--bs-btn-bg);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color)}.btn-check+.btn:hover{color:var(--bs-btn-color);background-color:var(--bs-btn-bg);border-color:var(--bs-btn-border-color)}.btn:focus-visible{color:var(--bs-btn-hover-color);background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:focus-visible+.btn{border-color:var(--bs-btn-hover-border-color);outline:0;box-shadow:var(--bs-btn-focus-box-shadow)}.btn-check:checked+.btn,.btn.active,.btn.show,.btn:first-child:active,:not(.btn-check)+.btn:active{color:var(--bs-btn-active-color);background-color:var(--bs-btn-active-bg);border-color:var(--bs-btn-active-border-color)}.btn-check:checked+.btn:focus-visible,.btn.active:focus-visible,.btn.show:focus-visible,.btn:first-child:active:focus-visible,:not(.btn-check)+.btn:active:focus-visible{box-shadow:var(--bs-btn-focus-box-shadow)}.btn.disabled,.btn:disabled,fieldset:disabled .btn{color:var(--bs-btn-disabled-color);pointer-events:none;background-color:var(--bs-btn-disabled-bg);border-color:var(--bs-btn-disabled-border-color);opacity:var(--bs-btn-disabled-opacity)}.btn-primary{--bs-btn-color:#fff;--bs-btn-bg:#0d6efd;--bs-btn-border-color:#0d6efd;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#0b5ed7;--bs-btn-hover-border-color:#0a58ca;--bs-btn-focus-shadow-rgb:49,132,253;--bs-btn-active-color:#fff;--bs-btn-active-bg:#0a58ca;--bs-btn-active-border-color:#0a53be;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#0d6efd;--bs-btn-disabled-border-color:#0d6efd}.btn-secondary{--bs-btn-color:#fff;--bs-btn-bg:#6c757d;--bs-btn-border-color:#6c757d;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#5c636a;--bs-btn-hover-border-color:#565e64;--bs-btn-focus-shadow-rgb:130,138,145;--bs-btn-active-color:#fff;--bs-btn-active-bg:#565e64;--bs-btn-active-border-color:#51585e;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#6c757d;--bs-btn-disabled-border-color:#6c757d}.btn-success{--bs-btn-color:#fff;--bs-btn-bg:#198754;--bs-btn-border-color:#198754;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#157347;--bs-btn-hover-border-color:#146c43;--bs-btn-focus-shadow-rgb:60,153,110;--bs-btn-active-color:#fff;--bs-btn-active-bg:#146c43;--bs-btn-active-border-color:#13653f;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#198754;--bs-btn-disabled-border-color:#198754}.btn-info{--bs-btn-color:#000;--bs-btn-bg:#0dcaf0;--bs-btn-border-color:#0dcaf0;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#31d2f2;--bs-btn-hover-border-color:#25cff2;--bs-btn-focus-shadow-rgb:11,172,204;--bs-btn-active-color:#000;--bs-btn-active-bg:#3dd5f3;--bs-btn-active-border-color:#25cff2;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#0dcaf0;--bs-btn-disabled-border-color:#0dcaf0}.btn-warning{--bs-btn-color:#000;--bs-btn-bg:#ffc107;--bs-btn-border-color:#ffc107;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#ffca2c;--bs-btn-hover-border-color:#ffc720;--bs-btn-focus-shadow-rgb:217,164,6;--bs-btn-active-color:#000;--bs-btn-active-bg:#ffcd39;--bs-btn-active-border-color:#ffc720;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#ffc107;--bs-btn-disabled-border-color:#ffc107}.btn-danger{--bs-btn-color:#fff;--bs-btn-bg:#dc3545;--bs-btn-border-color:#dc3545;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#bb2d3b;--bs-btn-hover-border-color:#b02a37;--bs-btn-focus-shadow-rgb:225,83,97;--bs-btn-active-color:#fff;--bs-btn-active-bg:#b02a37;--bs-btn-active-border-color:#a52834;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#dc3545;--bs-btn-disabled-border-color:#dc3545}.btn-light{--bs-btn-color:#000;--bs-btn-bg:#f8f9fa;--bs-btn-border-color:#f8f9fa;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#d3d4d5;--bs-btn-hover-border-color:#c6c7c8;--bs-btn-focus-shadow-rgb:211,212,213;--bs-btn-active-color:#000;--bs-btn-active-bg:#c6c7c8;--bs-btn-active-border-color:#babbbc;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#f8f9fa;--bs-btn-disabled-border-color:#f8f9fa}.btn-dark{--bs-btn-color:#fff;--bs-btn-bg:#212529;--bs-btn-border-color:#212529;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#424649;--bs-btn-hover-border-color:#373b3e;--bs-btn-focus-shadow-rgb:66,70,73;--bs-btn-active-color:#fff;--bs-btn-active-bg:#4d5154;--bs-btn-active-border-color:#373b3e;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#212529;--bs-btn-disabled-border-color:#212529}.btn-outline-primary{--bs-btn-color:#0d6efd;--bs-btn-border-color:#0d6efd;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#0d6efd;--bs-btn-hover-border-color:#0d6efd;--bs-btn-focus-shadow-rgb:13,110,253;--bs-btn-active-color:#fff;--bs-btn-active-bg:#0d6efd;--bs-btn-active-border-color:#0d6efd;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#0d6efd;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#0d6efd;--bs-gradient:none}.btn-outline-secondary{--bs-btn-color:#6c757d;--bs-btn-border-color:#6c757d;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#6c757d;--bs-btn-hover-border-color:#6c757d;--bs-btn-focus-shadow-rgb:108,117,125;--bs-btn-active-color:#fff;--bs-btn-active-bg:#6c757d;--bs-btn-active-border-color:#6c757d;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#6c757d;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#6c757d;--bs-gradient:none}.btn-outline-success{--bs-btn-color:#198754;--bs-btn-border-color:#198754;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#198754;--bs-btn-hover-border-color:#198754;--bs-btn-focus-shadow-rgb:25,135,84;--bs-btn-active-color:#fff;--bs-btn-active-bg:#198754;--bs-btn-active-border-color:#198754;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#198754;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#198754;--bs-gradient:none}.btn-outline-info{--bs-btn-color:#0dcaf0;--bs-btn-border-color:#0dcaf0;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#0dcaf0;--bs-btn-hover-border-color:#0dcaf0;--bs-btn-focus-shadow-rgb:13,202,240;--bs-btn-active-color:#000;--bs-btn-active-bg:#0dcaf0;--bs-btn-active-border-color:#0dcaf0;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#0dcaf0;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#0dcaf0;--bs-gradient:none}.btn-outline-warning{--bs-btn-color:#ffc107;--bs-btn-border-color:#ffc107;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#ffc107;--bs-btn-hover-border-color:#ffc107;--bs-btn-focus-shadow-rgb:255,193,7;--bs-btn-active-color:#000;--bs-btn-active-bg:#ffc107;--bs-btn-active-border-color:#ffc107;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#ffc107;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#ffc107;--bs-gradient:none}.btn-outline-danger{--bs-btn-color:#dc3545;--bs-btn-border-color:#dc3545;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#dc3545;--bs-btn-hover-border-color:#dc3545;--bs-btn-focus-shadow-rgb:220,53,69;--bs-btn-active-color:#fff;--bs-btn-active-bg:#dc3545;--bs-btn-active-border-color:#dc3545;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#dc3545;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#dc3545;--bs-gradient:none}.btn-outline-light{--bs-btn-color:#f8f9fa;--bs-btn-border-color:#f8f9fa;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#f8f9fa;--bs-btn-hover-border-color:#f8f9fa;--bs-btn-focus-shadow-rgb:248,249,250;--bs-btn-active-color:#000;--bs-btn-active-bg:#f8f9fa;--bs-btn-active-border-color:#f8f9fa;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#f8f9fa;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#f8f9fa;--bs-gradient:none}.btn-outline-dark{--bs-btn-color:#212529;--bs-btn-border-color:#212529;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#212529;--bs-btn-hover-border-color:#212529;--bs-btn-focus-shadow-rgb:33,37,41;--bs-btn-active-color:#fff;--bs-btn-active-bg:#212529;--bs-btn-active-border-color:#212529;--bs-btn-active-shadow:inset 0 3px 5px rgba(0, 0, 0, 0.125);--bs-btn-disabled-color:#212529;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#212529;--bs-gradient:none}.btn-link{--bs-btn-font-weight:400;--bs-btn-color:var(--bs-link-color);--bs-btn-bg:transparent;--bs-btn-border-color:transparent;--bs-btn-hover-color:var(--bs-link-hover-color);--bs-btn-hover-border-color:transparent;--bs-btn-active-color:var(--bs-link-hover-color);--bs-btn-active-border-color:transparent;--bs-btn-disabled-color:#6c757d;--bs-btn-disabled-border-color:transparent;--bs-btn-box-shadow:0 0 0 #000;--bs-btn-focus-shadow-rgb:49,132,253;text-decoration:underline}.btn-link:focus-visible{color:var(--bs-btn-color)}.btn-link:hover{color:var(--bs-btn-hover-color)}.btn-group-lg>.btn,.btn-lg{--bs-btn-padding-y:0.5rem;--bs-btn-padding-x:1rem;--bs-btn-font-size:1.25rem;--bs-btn-border-radius:var(--bs-border-radius-lg)}.btn-group-sm>.btn,.btn-sm{--bs-btn-padding-y:0.25rem;--bs-btn-padding-x:0.5rem;--bs-btn-font-size:0.875rem;--bs-btn-border-radius:var(--bs-border-radius-sm)}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{width:0;height:auto;transition:width .35s ease}@media (prefers-reduced-motion:reduce){.collapsing.collapse-horizontal{transition:none}}.dropdown,.dropdown-center,.dropend,.dropstart,.dropup,.dropup-center{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{--bs-dropdown-zindex:1000;--bs-dropdown-min-width:10rem;--bs-dropdown-padding-x:0;--bs-dropdown-padding-y:0.5rem;--bs-dropdown-spacer:0.125rem;--bs-dropdown-font-size:1rem;--bs-dropdown-color:var(--bs-body-color);--bs-dropdown-bg:var(--bs-body-bg);--bs-dropdown-border-color:var(--bs-border-color-translucent);--bs-dropdown-border-radius:var(--bs-border-radius);--bs-dropdown-border-width:var(--bs-border-width);--bs-dropdown-inner-border-radius:calc(var(--bs-border-radius) - var(--bs-border-width));--bs-dropdown-divider-bg:var(--bs-border-color-translucent);--bs-dropdown-divider-margin-y:0.5rem;--bs-dropdown-box-shadow:var(--bs-box-shadow);--bs-dropdown-link-color:var(--bs-body-color);--bs-dropdown-link-hover-color:var(--bs-body-color);--bs-dropdown-link-hover-bg:var(--bs-tertiary-bg);--bs-dropdown-link-active-color:#fff;--bs-dropdown-link-active-bg:#0d6efd;--bs-dropdown-link-disabled-color:var(--bs-tertiary-color);--bs-dropdown-item-padding-x:1rem;--bs-dropdown-item-padding-y:0.25rem;--bs-dropdown-header-color:#6c757d;--bs-dropdown-header-padding-x:1rem;--bs-dropdown-header-padding-y:0.5rem;position:absolute;z-index:var(--bs-dropdown-zindex);display:none;min-width:var(--bs-dropdown-min-width);padding:var(--bs-dropdown-padding-y) var(--bs-dropdown-padding-x);margin:0;font-size:var(--bs-dropdown-font-size);color:var(--bs-dropdown-color);text-align:left;list-style:none;background-color:var(--bs-dropdown-bg);background-clip:padding-box;border:var(--bs-dropdown-border-width) solid var(--bs-dropdown-border-color);border-radius:var(--bs-dropdown-border-radius)}.dropdown-menu[data-bs-popper]{top:100%;left:0;margin-top:var(--bs-dropdown-spacer)}.dropdown-menu-start{--bs-position:start}.dropdown-menu-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-end{--bs-position:end}.dropdown-menu-end[data-bs-popper]{right:0;left:auto}@media (min-width:576px){.dropdown-menu-sm-start{--bs-position:start}.dropdown-menu-sm-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-sm-end{--bs-position:end}.dropdown-menu-sm-end[data-bs-popper]{right:0;left:auto}}@media (min-width:768px){.dropdown-menu-md-start{--bs-position:start}.dropdown-menu-md-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-md-end{--bs-position:end}.dropdown-menu-md-end[data-bs-popper]{right:0;left:auto}}@media (min-width:992px){.dropdown-menu-lg-start{--bs-position:start}.dropdown-menu-lg-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-lg-end{--bs-position:end}.dropdown-menu-lg-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1200px){.dropdown-menu-xl-start{--bs-position:start}.dropdown-menu-xl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xl-end{--bs-position:end}.dropdown-menu-xl-end[data-bs-popper]{right:0;left:auto}}@media (min-width:1400px){.dropdown-menu-xxl-start{--bs-position:start}.dropdown-menu-xxl-start[data-bs-popper]{right:auto;left:0}.dropdown-menu-xxl-end{--bs-position:end}.dropdown-menu-xxl-end[data-bs-popper]{right:0;left:auto}}.dropup .dropdown-menu[data-bs-popper]{top:auto;bottom:100%;margin-top:0;margin-bottom:var(--bs-dropdown-spacer)}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{top:0;right:auto;left:100%;margin-top:0;margin-left:var(--bs-dropdown-spacer)}.dropend .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropend .dropdown-toggle:empty::after{margin-left:0}.dropend .dropdown-toggle::after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{top:0;right:100%;left:auto;margin-top:0;margin-right:var(--bs-dropdown-spacer)}.dropstart .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropstart .dropdown-toggle::after{display:none}.dropstart .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropstart .dropdown-toggle:empty::after{margin-left:0}.dropstart .dropdown-toggle::before{vertical-align:0}.dropdown-divider{height:0;margin:var(--bs-dropdown-divider-margin-y) 0;overflow:hidden;border-top:1px solid var(--bs-dropdown-divider-bg);opacity:1}.dropdown-item{display:block;width:100%;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);clear:both;font-weight:400;color:var(--bs-dropdown-link-color);text-align:inherit;text-decoration:none;white-space:nowrap;background-color:transparent;border:0;border-radius:var(--bs-dropdown-item-border-radius,0)}.dropdown-item:focus,.dropdown-item:hover{color:var(--bs-dropdown-link-hover-color);background-color:var(--bs-dropdown-link-hover-bg)}.dropdown-item.active,.dropdown-item:active{color:var(--bs-dropdown-link-active-color);text-decoration:none;background-color:var(--bs-dropdown-link-active-bg)}.dropdown-item.disabled,.dropdown-item:disabled{color:var(--bs-dropdown-link-disabled-color);pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:var(--bs-dropdown-header-padding-y) var(--bs-dropdown-header-padding-x);margin-bottom:0;font-size:.875rem;color:var(--bs-dropdown-header-color);white-space:nowrap}.dropdown-item-text{display:block;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);color:var(--bs-dropdown-link-color)}.dropdown-menu-dark{--bs-dropdown-color:#dee2e6;--bs-dropdown-bg:#343a40;--bs-dropdown-border-color:var(--bs-border-color-translucent);--bs-dropdown-box-shadow: ;--bs-dropdown-link-color:#dee2e6;--bs-dropdown-link-hover-color:#fff;--bs-dropdown-divider-bg:var(--bs-border-color-translucent);--bs-dropdown-link-hover-bg:rgba(255, 255, 255, 0.15);--bs-dropdown-link-active-color:#fff;--bs-dropdown-link-active-bg:#0d6efd;--bs-dropdown-link-disabled-color:#adb5bd;--bs-dropdown-header-color:#adb5bd}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group{border-radius:var(--bs-border-radius)}.btn-group>.btn-group:not(:first-child),.btn-group>:not(.btn-check:first-child)+.btn{margin-left:calc(var(--bs-border-width) * -1)}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn.dropdown-toggle-split:first-child,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropend .dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after{margin-left:0}.dropstart .dropdown-toggle-split::before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:calc(var(--bs-border-width) * -1)}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn~.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{--bs-nav-link-padding-x:1rem;--bs-nav-link-padding-y:0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color:var(--bs-link-color);--bs-nav-link-hover-color:var(--bs-link-hover-color);--bs-nav-link-disabled-color:var(--bs-secondary-color);display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:var(--bs-nav-link-padding-y) var(--bs-nav-link-padding-x);font-size:var(--bs-nav-link-font-size);font-weight:var(--bs-nav-link-font-weight);color:var(--bs-nav-link-color);text-decoration:none;background:0 0;border:0;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media (prefers-reduced-motion:reduce){.nav-link{transition:none}}.nav-link:focus,.nav-link:hover{color:var(--bs-nav-link-hover-color)}.nav-link:focus-visible{outline:0;box-shadow:0 0 0 .25rem rgba(13,110,253,.25)}.nav-link.disabled,.nav-link:disabled{color:var(--bs-nav-link-disabled-color);pointer-events:none;cursor:default}.nav-tabs{--bs-nav-tabs-border-width:var(--bs-border-width);--bs-nav-tabs-border-color:var(--bs-border-color);--bs-nav-tabs-border-radius:var(--bs-border-radius);--bs-nav-tabs-link-hover-border-color:var(--bs-secondary-bg) var(--bs-secondary-bg) var(--bs-border-color);--bs-nav-tabs-link-active-color:var(--bs-emphasis-color);--bs-nav-tabs-link-active-bg:var(--bs-body-bg);--bs-nav-tabs-link-active-border-color:var(--bs-border-color) var(--bs-border-color) var(--bs-body-bg);border-bottom:var(--bs-nav-tabs-border-width) solid var(--bs-nav-tabs-border-color)}.nav-tabs .nav-link{margin-bottom:calc(-1 * var(--bs-nav-tabs-border-width));border:var(--bs-nav-tabs-border-width) solid transparent;border-top-left-radius:var(--bs-nav-tabs-border-radius);border-top-right-radius:var(--bs-nav-tabs-border-radius)}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{isolation:isolate;border-color:var(--bs-nav-tabs-link-hover-border-color)}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:var(--bs-nav-tabs-link-active-color);background-color:var(--bs-nav-tabs-link-active-bg);border-color:var(--bs-nav-tabs-link-active-border-color)}.nav-tabs .dropdown-menu{margin-top:calc(-1 * var(--bs-nav-tabs-border-width));border-top-left-radius:0;border-top-right-radius:0}.nav-pills{--bs-nav-pills-border-radius:var(--bs-border-radius);--bs-nav-pills-link-active-color:#fff;--bs-nav-pills-link-active-bg:#0d6efd}.nav-pills .nav-link{border-radius:var(--bs-nav-pills-border-radius)}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:var(--bs-nav-pills-link-active-color);background-color:var(--bs-nav-pills-link-active-bg)}.nav-underline{--bs-nav-underline-gap:1rem;--bs-nav-underline-border-width:0.125rem;--bs-nav-underline-link-active-color:var(--bs-emphasis-color);gap:var(--bs-nav-underline-gap)}.nav-underline .nav-link{padding-right:0;padding-left:0;border-bottom:var(--bs-nav-underline-border-width) solid transparent}.nav-underline .nav-link:focus,.nav-underline .nav-link:hover{border-bottom-color:currentcolor}.nav-underline .nav-link.active,.nav-underline .show>.nav-link{font-weight:700;color:var(--bs-nav-underline-link-active-color);border-bottom-color:currentcolor}.nav-fill .nav-item,.nav-fill>.nav-link{flex:1 1 auto;text-align:center}.nav-justified .nav-item,.nav-justified>.nav-link{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{--bs-navbar-padding-x:0;--bs-navbar-padding-y:0.5rem;--bs-navbar-color:rgba(var(--bs-emphasis-color-rgb), 0.65);--bs-navbar-hover-color:rgba(var(--bs-emphasis-color-rgb), 0.8);--bs-navbar-disabled-color:rgba(var(--bs-emphasis-color-rgb), 0.3);--bs-navbar-active-color:rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-brand-padding-y:0.3125rem;--bs-navbar-brand-margin-end:1rem;--bs-navbar-brand-font-size:1.25rem;--bs-navbar-brand-color:rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-brand-hover-color:rgba(var(--bs-emphasis-color-rgb), 1);--bs-navbar-nav-link-padding-x:0.5rem;--bs-navbar-toggler-padding-y:0.25rem;--bs-navbar-toggler-padding-x:0.75rem;--bs-navbar-toggler-font-size:1.25rem;--bs-navbar-toggler-icon-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%2833, 37, 41, 0.75%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e");--bs-navbar-toggler-border-color:rgba(var(--bs-emphasis-color-rgb), 0.15);--bs-navbar-toggler-border-radius:var(--bs-border-radius);--bs-navbar-toggler-focus-width:0.25rem;--bs-navbar-toggler-transition:box-shadow 0.15s ease-in-out;position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding:var(--bs-navbar-padding-y) var(--bs-navbar-padding-x)}.navbar>.container,.navbar>.container-fluid,.navbar>.container-lg,.navbar>.container-md,.navbar>.container-sm,.navbar>.container-xl,.navbar>.container-xxl{display:flex;flex-wrap:inherit;align-items:center;justify-content:space-between}.navbar-brand{padding-top:var(--bs-navbar-brand-padding-y);padding-bottom:var(--bs-navbar-brand-padding-y);margin-right:var(--bs-navbar-brand-margin-end);font-size:var(--bs-navbar-brand-font-size);color:var(--bs-navbar-brand-color);text-decoration:none;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{color:var(--bs-navbar-brand-hover-color)}.navbar-nav{--bs-nav-link-padding-x:0;--bs-nav-link-padding-y:0.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color:var(--bs-navbar-color);--bs-nav-link-hover-color:var(--bs-navbar-hover-color);--bs-nav-link-disabled-color:var(--bs-navbar-disabled-color);display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link.active,.navbar-nav .nav-link.show{color:var(--bs-navbar-active-color)}.navbar-nav .dropdown-menu{position:static}.navbar-text{padding-top:.5rem;padding-bottom:.5rem;color:var(--bs-navbar-color)}.navbar-text a,.navbar-text a:focus,.navbar-text a:hover{color:var(--bs-navbar-active-color)}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:var(--bs-navbar-toggler-padding-y) var(--bs-navbar-toggler-padding-x);font-size:var(--bs-navbar-toggler-font-size);line-height:1;color:var(--bs-navbar-color);background-color:transparent;border:var(--bs-border-width) solid var(--bs-navbar-toggler-border-color);border-radius:var(--bs-navbar-toggler-border-radius);transition:var(--bs-navbar-toggler-transition)}@media (prefers-reduced-motion:reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{text-decoration:none;outline:0;box-shadow:0 0 0 var(--bs-navbar-toggler-focus-width)}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;background-image:var(--bs-navbar-toggler-icon-bg);background-repeat:no-repeat;background-position:center;background-size:100%}.navbar-nav-scroll{max-height:var(--bs-scroll-height,75vh);overflow-y:auto}@media (min-width:576px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand-sm .offcanvas .offcanvas-header{display:none}.navbar-expand-sm .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:768px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand-md .offcanvas .offcanvas-header{display:none}.navbar-expand-md .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:992px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand-lg .offcanvas .offcanvas-header{display:none}.navbar-expand-lg .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand-xl .offcanvas .offcanvas-header{display:none}.navbar-expand-xl .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}@media (min-width:1400px){.navbar-expand-xxl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xxl .navbar-nav{flex-direction:row}.navbar-expand-xxl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xxl .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xxl .navbar-nav-scroll{overflow:visible}.navbar-expand-xxl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xxl .navbar-toggler{display:none}.navbar-expand-xxl .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand-xxl .offcanvas .offcanvas-header{display:none}.navbar-expand-xxl .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:var(--bs-navbar-nav-link-padding-x);padding-left:var(--bs-navbar-nav-link-padding-x)}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .offcanvas{position:static;z-index:auto;flex-grow:1;width:auto!important;height:auto!important;visibility:visible!important;background-color:transparent!important;border:0!important;transform:none!important;transition:none}.navbar-expand .offcanvas .offcanvas-header{display:none}.navbar-expand .offcanvas .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible}.navbar-dark,.navbar[data-bs-theme=dark]{--bs-navbar-color:rgba(255, 255, 255, 0.55);--bs-navbar-hover-color:rgba(255, 255, 255, 0.75);--bs-navbar-disabled-color:rgba(255, 255, 255, 0.25);--bs-navbar-active-color:#fff;--bs-navbar-brand-color:#fff;--bs-navbar-brand-hover-color:#fff;--bs-navbar-toggler-border-color:rgba(255, 255, 255, 0.1);--bs-navbar-toggler-icon-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}[data-bs-theme=dark] .navbar-toggler-icon{--bs-navbar-toggler-icon-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.card{--bs-card-spacer-y:1rem;--bs-card-spacer-x:1rem;--bs-card-title-spacer-y:0.5rem;--bs-card-title-color: ;--bs-card-subtitle-color: ;--bs-card-border-width:var(--bs-border-width);--bs-card-border-color:var(--bs-border-color-translucent);--bs-card-border-radius:var(--bs-border-radius);--bs-card-box-shadow: ;--bs-card-inner-border-radius:calc(var(--bs-border-radius) - (var(--bs-border-width)));--bs-card-cap-padding-y:0.5rem;--bs-card-cap-padding-x:1rem;--bs-card-cap-bg:rgba(var(--bs-body-color-rgb), 0.03);--bs-card-cap-color: ;--bs-card-height: ;--bs-card-color: ;--bs-card-bg:var(--bs-body-bg);--bs-card-img-overlay-padding:1rem;--bs-card-group-margin:0.75rem;position:relative;display:flex;flex-direction:column;min-width:0;height:var(--bs-card-height);color:var(--bs-body-color);word-wrap:break-word;background-color:var(--bs-card-bg);background-clip:border-box;border:var(--bs-card-border-width) solid var(--bs-card-border-color);border-radius:var(--bs-card-border-radius)}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:var(--bs-card-inner-border-radius);border-bottom-left-radius:var(--bs-card-inner-border-radius)}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{flex:1 1 auto;padding:var(--bs-card-spacer-y) var(--bs-card-spacer-x);color:var(--bs-card-color)}.card-title{margin-bottom:var(--bs-card-title-spacer-y);color:var(--bs-card-title-color)}.card-subtitle{margin-top:calc(-.5 * var(--bs-card-title-spacer-y));margin-bottom:0;color:var(--bs-card-subtitle-color)}.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:var(--bs-card-spacer-x)}.card-header{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);margin-bottom:0;color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-bottom:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-header:first-child{border-radius:var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius) 0 0}.card-footer{padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x);color:var(--bs-card-cap-color);background-color:var(--bs-card-cap-bg);border-top:var(--bs-card-border-width) solid var(--bs-card-border-color)}.card-footer:last-child{border-radius:0 0 var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius)}.card-header-tabs{margin-right:calc(-.5 * var(--bs-card-cap-padding-x));margin-bottom:calc(-1 * var(--bs-card-cap-padding-y));margin-left:calc(-.5 * var(--bs-card-cap-padding-x));border-bottom:0}.card-header-tabs .nav-link.active{background-color:var(--bs-card-bg);border-bottom-color:var(--bs-card-bg)}.card-header-pills{margin-right:calc(-.5 * var(--bs-card-cap-padding-x));margin-left:calc(-.5 * var(--bs-card-cap-padding-x))}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:var(--bs-card-img-overlay-padding);border-radius:var(--bs-card-inner-border-radius)}.card-img,.card-img-bottom,.card-img-top{width:100%}.card-img,.card-img-top{border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius)}.card-img,.card-img-bottom{border-bottom-right-radius:var(--bs-card-inner-border-radius);border-bottom-left-radius:var(--bs-card-inner-border-radius)}.card-group>.card{margin-bottom:var(--bs-card-group-margin)}@media (min-width:576px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.accordion{--bs-accordion-color:var(--bs-body-color);--bs-accordion-bg:var(--bs-body-bg);--bs-accordion-transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out,border-radius 0.15s ease;--bs-accordion-border-color:var(--bs-border-color);--bs-accordion-border-width:var(--bs-border-width);--bs-accordion-border-radius:var(--bs-border-radius);--bs-accordion-inner-border-radius:calc(var(--bs-border-radius) - (var(--bs-border-width)));--bs-accordion-btn-padding-x:1.25rem;--bs-accordion-btn-padding-y:1rem;--bs-accordion-btn-color:var(--bs-body-color);--bs-accordion-btn-bg:var(--bs-accordion-bg);--bs-accordion-btn-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-icon-width:1.25rem;--bs-accordion-btn-icon-transform:rotate(-180deg);--bs-accordion-btn-icon-transition:transform 0.2s ease-in-out;--bs-accordion-btn-active-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23052c65'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-focus-border-color:#86b7fe;--bs-accordion-btn-focus-box-shadow:0 0 0 0.25rem rgba(13, 110, 253, 0.25);--bs-accordion-body-padding-x:1.25rem;--bs-accordion-body-padding-y:1rem;--bs-accordion-active-color:var(--bs-primary-text-emphasis);--bs-accordion-active-bg:var(--bs-primary-bg-subtle)}.accordion-button{position:relative;display:flex;align-items:center;width:100%;padding:var(--bs-accordion-btn-padding-y) var(--bs-accordion-btn-padding-x);font-size:1rem;color:var(--bs-accordion-btn-color);text-align:left;background-color:var(--bs-accordion-btn-bg);border:0;border-radius:0;overflow-anchor:none;transition:var(--bs-accordion-transition)}@media (prefers-reduced-motion:reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){color:var(--bs-accordion-active-color);background-color:var(--bs-accordion-active-bg);box-shadow:inset 0 calc(-1 * var(--bs-accordion-border-width)) 0 var(--bs-accordion-border-color)}.accordion-button:not(.collapsed)::after{background-image:var(--bs-accordion-btn-active-icon);transform:var(--bs-accordion-btn-icon-transform)}.accordion-button::after{flex-shrink:0;width:var(--bs-accordion-btn-icon-width);height:var(--bs-accordion-btn-icon-width);margin-left:auto;content:"";background-image:var(--bs-accordion-btn-icon);background-repeat:no-repeat;background-size:var(--bs-accordion-btn-icon-width);transition:var(--bs-accordion-btn-icon-transition)}@media (prefers-reduced-motion:reduce){.accordion-button::after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{z-index:3;border-color:var(--bs-accordion-btn-focus-border-color);outline:0;box-shadow:var(--bs-accordion-btn-focus-box-shadow)}.accordion-header{margin-bottom:0}.accordion-item{color:var(--bs-accordion-color);background-color:var(--bs-accordion-bg);border:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color)}.accordion-item:first-of-type{border-top-left-radius:var(--bs-accordion-border-radius);border-top-right-radius:var(--bs-accordion-border-radius)}.accordion-item:first-of-type .accordion-button{border-top-left-radius:var(--bs-accordion-inner-border-radius);border-top-right-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-right-radius:var(--bs-accordion-border-radius);border-bottom-left-radius:var(--bs-accordion-border-radius)}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-right-radius:var(--bs-accordion-inner-border-radius);border-bottom-left-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:last-of-type .accordion-collapse{border-bottom-right-radius:var(--bs-accordion-border-radius);border-bottom-left-radius:var(--bs-accordion-border-radius)}.accordion-body{padding:var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x)}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-right:0;border-left:0;border-radius:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button,.accordion-flush .accordion-item .accordion-button.collapsed{border-radius:0}[data-bs-theme=dark] .accordion-button::after{--bs-accordion-btn-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e");--bs-accordion-btn-active-icon:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%236ea8fe'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.breadcrumb{--bs-breadcrumb-padding-x:0;--bs-breadcrumb-padding-y:0;--bs-breadcrumb-margin-bottom:1rem;--bs-breadcrumb-bg: ;--bs-breadcrumb-border-radius: ;--bs-breadcrumb-divider-color:var(--bs-secondary-color);--bs-breadcrumb-item-padding-x:0.5rem;--bs-breadcrumb-item-active-color:var(--bs-secondary-color);display:flex;flex-wrap:wrap;padding:var(--bs-breadcrumb-padding-y) var(--bs-breadcrumb-padding-x);margin-bottom:var(--bs-breadcrumb-margin-bottom);font-size:var(--bs-breadcrumb-font-size);list-style:none;background-color:var(--bs-breadcrumb-bg);border-radius:var(--bs-breadcrumb-border-radius)}.breadcrumb-item+.breadcrumb-item{padding-left:var(--bs-breadcrumb-item-padding-x)}.breadcrumb-item+.breadcrumb-item::before{float:left;padding-right:var(--bs-breadcrumb-item-padding-x);color:var(--bs-breadcrumb-divider-color);content:var(--bs-breadcrumb-divider, "/")}.breadcrumb-item.active{color:var(--bs-breadcrumb-item-active-color)}.pagination{--bs-pagination-padding-x:0.75rem;--bs-pagination-padding-y:0.375rem;--bs-pagination-font-size:1rem;--bs-pagination-color:var(--bs-link-color);--bs-pagination-bg:var(--bs-body-bg);--bs-pagination-border-width:var(--bs-border-width);--bs-pagination-border-color:var(--bs-border-color);--bs-pagination-border-radius:var(--bs-border-radius);--bs-pagination-hover-color:var(--bs-link-hover-color);--bs-pagination-hover-bg:var(--bs-tertiary-bg);--bs-pagination-hover-border-color:var(--bs-border-color);--bs-pagination-focus-color:var(--bs-link-hover-color);--bs-pagination-focus-bg:var(--bs-secondary-bg);--bs-pagination-focus-box-shadow:0 0 0 0.25rem rgba(13, 110, 253, 0.25);--bs-pagination-active-color:#fff;--bs-pagination-active-bg:#0d6efd;--bs-pagination-active-border-color:#0d6efd;--bs-pagination-disabled-color:var(--bs-secondary-color);--bs-pagination-disabled-bg:var(--bs-secondary-bg);--bs-pagination-disabled-border-color:var(--bs-border-color);display:flex;padding-left:0;list-style:none}.page-link{position:relative;display:block;padding:var(--bs-pagination-padding-y) var(--bs-pagination-padding-x);font-size:var(--bs-pagination-font-size);color:var(--bs-pagination-color);text-decoration:none;background-color:var(--bs-pagination-bg);border:var(--bs-pagination-border-width) solid var(--bs-pagination-border-color);transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.page-link{transition:none}}.page-link:hover{z-index:2;color:var(--bs-pagination-hover-color);background-color:var(--bs-pagination-hover-bg);border-color:var(--bs-pagination-hover-border-color)}.page-link:focus{z-index:3;color:var(--bs-pagination-focus-color);background-color:var(--bs-pagination-focus-bg);outline:0;box-shadow:var(--bs-pagination-focus-box-shadow)}.active>.page-link,.page-link.active{z-index:3;color:var(--bs-pagination-active-color);background-color:var(--bs-pagination-active-bg);border-color:var(--bs-pagination-active-border-color)}.disabled>.page-link,.page-link.disabled{color:var(--bs-pagination-disabled-color);pointer-events:none;background-color:var(--bs-pagination-disabled-bg);border-color:var(--bs-pagination-disabled-border-color)}.page-item:not(:first-child) .page-link{margin-left:calc(var(--bs-border-width) * -1)}.page-item:first-child .page-link{border-top-left-radius:var(--bs-pagination-border-radius);border-bottom-left-radius:var(--bs-pagination-border-radius)}.page-item:last-child .page-link{border-top-right-radius:var(--bs-pagination-border-radius);border-bottom-right-radius:var(--bs-pagination-border-radius)}.pagination-lg{--bs-pagination-padding-x:1.5rem;--bs-pagination-padding-y:0.75rem;--bs-pagination-font-size:1.25rem;--bs-pagination-border-radius:var(--bs-border-radius-lg)}.pagination-sm{--bs-pagination-padding-x:0.5rem;--bs-pagination-padding-y:0.25rem;--bs-pagination-font-size:0.875rem;--bs-pagination-border-radius:var(--bs-border-radius-sm)}.badge{--bs-badge-padding-x:0.65em;--bs-badge-padding-y:0.35em;--bs-badge-font-size:0.75em;--bs-badge-font-weight:700;--bs-badge-color:#fff;--bs-badge-border-radius:var(--bs-border-radius);display:inline-block;padding:var(--bs-badge-padding-y) var(--bs-badge-padding-x);font-size:var(--bs-badge-font-size);font-weight:var(--bs-badge-font-weight);line-height:1;color:var(--bs-badge-color);text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:var(--bs-badge-border-radius)}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{--bs-alert-bg:transparent;--bs-alert-padding-x:1rem;--bs-alert-padding-y:1rem;--bs-alert-margin-bottom:1rem;--bs-alert-color:inherit;--bs-alert-border-color:transparent;--bs-alert-border:var(--bs-border-width) solid var(--bs-alert-border-color);--bs-alert-border-radius:var(--bs-border-radius);--bs-alert-link-color:inherit;position:relative;padding:var(--bs-alert-padding-y) var(--bs-alert-padding-x);margin-bottom:var(--bs-alert-margin-bottom);color:var(--bs-alert-color);background-color:var(--bs-alert-bg);border:var(--bs-alert-border);border-radius:var(--bs-alert-border-radius)}.alert-heading{color:inherit}.alert-link{font-weight:700;color:var(--bs-alert-link-color)}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{position:absolute;top:0;right:0;z-index:2;padding:1.25rem 1rem}.alert-primary{--bs-alert-color:var(--bs-primary-text-emphasis);--bs-alert-bg:var(--bs-primary-bg-subtle);--bs-alert-border-color:var(--bs-primary-border-subtle);--bs-alert-link-color:var(--bs-primary-text-emphasis)}.alert-secondary{--bs-alert-color:var(--bs-secondary-text-emphasis);--bs-alert-bg:var(--bs-secondary-bg-subtle);--bs-alert-border-color:var(--bs-secondary-border-subtle);--bs-alert-link-color:var(--bs-secondary-text-emphasis)}.alert-success{--bs-alert-color:var(--bs-success-text-emphasis);--bs-alert-bg:var(--bs-success-bg-subtle);--bs-alert-border-color:var(--bs-success-border-subtle);--bs-alert-link-color:var(--bs-success-text-emphasis)}.alert-info{--bs-alert-color:var(--bs-info-text-emphasis);--bs-alert-bg:var(--bs-info-bg-subtle);--bs-alert-border-color:var(--bs-info-border-subtle);--bs-alert-link-color:var(--bs-info-text-emphasis)}.alert-warning{--bs-alert-color:var(--bs-warning-text-emphasis);--bs-alert-bg:var(--bs-warning-bg-subtle);--bs-alert-border-color:var(--bs-warning-border-subtle);--bs-alert-link-color:var(--bs-warning-text-emphasis)}.alert-danger{--bs-alert-color:var(--bs-danger-text-emphasis);--bs-alert-bg:var(--bs-danger-bg-subtle);--bs-alert-border-color:var(--bs-danger-border-subtle);--bs-alert-link-color:var(--bs-danger-text-emphasis)}.alert-light{--bs-alert-color:var(--bs-light-text-emphasis);--bs-alert-bg:var(--bs-light-bg-subtle);--bs-alert-border-color:var(--bs-light-border-subtle);--bs-alert-link-color:var(--bs-light-text-emphasis)}.alert-dark{--bs-alert-color:var(--bs-dark-text-emphasis);--bs-alert-bg:var(--bs-dark-bg-subtle);--bs-alert-border-color:var(--bs-dark-border-subtle);--bs-alert-link-color:var(--bs-dark-text-emphasis)}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress,.progress-stacked{--bs-progress-height:1rem;--bs-progress-font-size:0.75rem;--bs-progress-bg:var(--bs-secondary-bg);--bs-progress-border-radius:var(--bs-border-radius);--bs-progress-box-shadow:var(--bs-box-shadow-inset);--bs-progress-bar-color:#fff;--bs-progress-bar-bg:#0d6efd;--bs-progress-bar-transition:width 0.6s ease;display:flex;height:var(--bs-progress-height);overflow:hidden;font-size:var(--bs-progress-font-size);background-color:var(--bs-progress-bg);border-radius:var(--bs-progress-border-radius)}.progress-bar{display:flex;flex-direction:column;justify-content:center;overflow:hidden;color:var(--bs-progress-bar-color);text-align:center;white-space:nowrap;background-color:var(--bs-progress-bar-bg);transition:var(--bs-progress-bar-transition)}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:var(--bs-progress-height) var(--bs-progress-height)}.progress-stacked>.progress{overflow:visible}.progress-stacked>.progress>.progress-bar{width:100%}.progress-bar-animated{animation:1s linear infinite progress-bar-stripes}@media (prefers-reduced-motion:reduce){.progress-bar-animated{animation:none}}.list-group{--bs-list-group-color:var(--bs-body-color);--bs-list-group-bg:var(--bs-body-bg);--bs-list-group-border-color:var(--bs-border-color);--bs-list-group-border-width:var(--bs-border-width);--bs-list-group-border-radius:var(--bs-border-radius);--bs-list-group-item-padding-x:1rem;--bs-list-group-item-padding-y:0.5rem;--bs-list-group-action-color:var(--bs-secondary-color);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-tertiary-bg);--bs-list-group-action-active-color:var(--bs-body-color);--bs-list-group-action-active-bg:var(--bs-secondary-bg);--bs-list-group-disabled-color:var(--bs-secondary-color);--bs-list-group-disabled-bg:var(--bs-body-bg);--bs-list-group-active-color:#fff;--bs-list-group-active-bg:#0d6efd;--bs-list-group-active-border-color:#0d6efd;display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:var(--bs-list-group-border-radius)}.list-group-numbered{list-style-type:none;counter-reset:section}.list-group-numbered>.list-group-item::before{content:counters(section, ".") ". ";counter-increment:section}.list-group-item-action{width:100%;color:var(--bs-list-group-action-color);text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:var(--bs-list-group-action-hover-color);text-decoration:none;background-color:var(--bs-list-group-action-hover-bg)}.list-group-item-action:active{color:var(--bs-list-group-action-active-color);background-color:var(--bs-list-group-action-active-bg)}.list-group-item{position:relative;display:block;padding:var(--bs-list-group-item-padding-y) var(--bs-list-group-item-padding-x);color:var(--bs-list-group-color);text-decoration:none;background-color:var(--bs-list-group-bg);border:var(--bs-list-group-border-width) solid var(--bs-list-group-border-color)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:var(--bs-list-group-disabled-color);pointer-events:none;background-color:var(--bs-list-group-disabled-bg)}.list-group-item.active{z-index:2;color:var(--bs-list-group-active-color);background-color:var(--bs-list-group-active-bg);border-color:var(--bs-list-group-active-border-color)}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:calc(-1 * var(--bs-list-group-border-width));border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}@media (min-width:576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media (min-width:768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media (min-width:992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}@media (min-width:1400px){.list-group-horizontal-xxl{flex-direction:row}.list-group-horizontal-xxl>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-xxl>.list-group-item:last-child:not(:first-child){border-top-right-radius:var(--bs-list-group-border-radius);border-bottom-left-radius:0}.list-group-horizontal-xxl>.list-group-item.active{margin-top:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item{border-top-width:var(--bs-list-group-border-width);border-left-width:0}.list-group-horizontal-xxl>.list-group-item+.list-group-item.active{margin-left:calc(-1 * var(--bs-list-group-border-width));border-left-width:var(--bs-list-group-border-width)}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 var(--bs-list-group-border-width)}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{--bs-list-group-color:var(--bs-primary-text-emphasis);--bs-list-group-bg:var(--bs-primary-bg-subtle);--bs-list-group-border-color:var(--bs-primary-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-primary-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-primary-border-subtle);--bs-list-group-active-color:var(--bs-primary-bg-subtle);--bs-list-group-active-bg:var(--bs-primary-text-emphasis);--bs-list-group-active-border-color:var(--bs-primary-text-emphasis)}.list-group-item-secondary{--bs-list-group-color:var(--bs-secondary-text-emphasis);--bs-list-group-bg:var(--bs-secondary-bg-subtle);--bs-list-group-border-color:var(--bs-secondary-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-secondary-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-secondary-border-subtle);--bs-list-group-active-color:var(--bs-secondary-bg-subtle);--bs-list-group-active-bg:var(--bs-secondary-text-emphasis);--bs-list-group-active-border-color:var(--bs-secondary-text-emphasis)}.list-group-item-success{--bs-list-group-color:var(--bs-success-text-emphasis);--bs-list-group-bg:var(--bs-success-bg-subtle);--bs-list-group-border-color:var(--bs-success-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-success-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-success-border-subtle);--bs-list-group-active-color:var(--bs-success-bg-subtle);--bs-list-group-active-bg:var(--bs-success-text-emphasis);--bs-list-group-active-border-color:var(--bs-success-text-emphasis)}.list-group-item-info{--bs-list-group-color:var(--bs-info-text-emphasis);--bs-list-group-bg:var(--bs-info-bg-subtle);--bs-list-group-border-color:var(--bs-info-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-info-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-info-border-subtle);--bs-list-group-active-color:var(--bs-info-bg-subtle);--bs-list-group-active-bg:var(--bs-info-text-emphasis);--bs-list-group-active-border-color:var(--bs-info-text-emphasis)}.list-group-item-warning{--bs-list-group-color:var(--bs-warning-text-emphasis);--bs-list-group-bg:var(--bs-warning-bg-subtle);--bs-list-group-border-color:var(--bs-warning-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-warning-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-warning-border-subtle);--bs-list-group-active-color:var(--bs-warning-bg-subtle);--bs-list-group-active-bg:var(--bs-warning-text-emphasis);--bs-list-group-active-border-color:var(--bs-warning-text-emphasis)}.list-group-item-danger{--bs-list-group-color:var(--bs-danger-text-emphasis);--bs-list-group-bg:var(--bs-danger-bg-subtle);--bs-list-group-border-color:var(--bs-danger-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-danger-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-danger-border-subtle);--bs-list-group-active-color:var(--bs-danger-bg-subtle);--bs-list-group-active-bg:var(--bs-danger-text-emphasis);--bs-list-group-active-border-color:var(--bs-danger-text-emphasis)}.list-group-item-light{--bs-list-group-color:var(--bs-light-text-emphasis);--bs-list-group-bg:var(--bs-light-bg-subtle);--bs-list-group-border-color:var(--bs-light-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-light-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-light-border-subtle);--bs-list-group-active-color:var(--bs-light-bg-subtle);--bs-list-group-active-bg:var(--bs-light-text-emphasis);--bs-list-group-active-border-color:var(--bs-light-text-emphasis)}.list-group-item-dark{--bs-list-group-color:var(--bs-dark-text-emphasis);--bs-list-group-bg:var(--bs-dark-bg-subtle);--bs-list-group-border-color:var(--bs-dark-border-subtle);--bs-list-group-action-hover-color:var(--bs-emphasis-color);--bs-list-group-action-hover-bg:var(--bs-dark-border-subtle);--bs-list-group-action-active-color:var(--bs-emphasis-color);--bs-list-group-action-active-bg:var(--bs-dark-border-subtle);--bs-list-group-active-color:var(--bs-dark-bg-subtle);--bs-list-group-active-bg:var(--bs-dark-text-emphasis);--bs-list-group-active-border-color:var(--bs-dark-text-emphasis)}.btn-close{--bs-btn-close-color:#000;--bs-btn-close-bg:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3e%3c/svg%3e");--bs-btn-close-opacity:0.5;--bs-btn-close-hover-opacity:0.75;--bs-btn-close-focus-shadow:0 0 0 0.25rem rgba(13, 110, 253, 0.25);--bs-btn-close-focus-opacity:1;--bs-btn-close-disabled-opacity:0.25;--bs-btn-close-white-filter:invert(1) grayscale(100%) brightness(200%);box-sizing:content-box;width:1em;height:1em;padding:.25em .25em;color:var(--bs-btn-close-color);background:transparent var(--bs-btn-close-bg) center/1em auto no-repeat;border:0;border-radius:.375rem;opacity:var(--bs-btn-close-opacity)}.btn-close:hover{color:var(--bs-btn-close-color);text-decoration:none;opacity:var(--bs-btn-close-hover-opacity)}.btn-close:focus{outline:0;box-shadow:var(--bs-btn-close-focus-shadow);opacity:var(--bs-btn-close-focus-opacity)}.btn-close.disabled,.btn-close:disabled{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;opacity:var(--bs-btn-close-disabled-opacity)}.btn-close-white{filter:var(--bs-btn-close-white-filter)}[data-bs-theme=dark] .btn-close{filter:var(--bs-btn-close-white-filter)}.toast{--bs-toast-zindex:1090;--bs-toast-padding-x:0.75rem;--bs-toast-padding-y:0.5rem;--bs-toast-spacing:1.5rem;--bs-toast-max-width:350px;--bs-toast-font-size:0.875rem;--bs-toast-color: ;--bs-toast-bg:rgba(var(--bs-body-bg-rgb), 0.85);--bs-toast-border-width:var(--bs-border-width);--bs-toast-border-color:var(--bs-border-color-translucent);--bs-toast-border-radius:var(--bs-border-radius);--bs-toast-box-shadow:var(--bs-box-shadow);--bs-toast-header-color:var(--bs-secondary-color);--bs-toast-header-bg:rgba(var(--bs-body-bg-rgb), 0.85);--bs-toast-header-border-color:var(--bs-border-color-translucent);width:var(--bs-toast-max-width);max-width:100%;font-size:var(--bs-toast-font-size);color:var(--bs-toast-color);pointer-events:auto;background-color:var(--bs-toast-bg);background-clip:padding-box;border:var(--bs-toast-border-width) solid var(--bs-toast-border-color);box-shadow:var(--bs-toast-box-shadow);border-radius:var(--bs-toast-border-radius)}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{--bs-toast-zindex:1090;position:absolute;z-index:var(--bs-toast-zindex);width:-webkit-max-content;width:-moz-max-content;width:max-content;max-width:100%;pointer-events:none}.toast-container>:not(:last-child){margin-bottom:var(--bs-toast-spacing)}.toast-header{display:flex;align-items:center;padding:var(--bs-toast-padding-y) var(--bs-toast-padding-x);color:var(--bs-toast-header-color);background-color:var(--bs-toast-header-bg);background-clip:padding-box;border-bottom:var(--bs-toast-border-width) solid var(--bs-toast-header-border-color);border-top-left-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width));border-top-right-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width))}.toast-header .btn-close{margin-right:calc(-.5 * var(--bs-toast-padding-x));margin-left:var(--bs-toast-padding-x)}.toast-body{padding:var(--bs-toast-padding-x);word-wrap:break-word}.modal{--bs-modal-zindex:1055;--bs-modal-width:500px;--bs-modal-padding:1rem;--bs-modal-margin:0.5rem;--bs-modal-color: ;--bs-modal-bg:var(--bs-body-bg);--bs-modal-border-color:var(--bs-border-color-translucent);--bs-modal-border-width:var(--bs-border-width);--bs-modal-border-radius:var(--bs-border-radius-lg);--bs-modal-box-shadow:var(--bs-box-shadow-sm);--bs-modal-inner-border-radius:calc(var(--bs-border-radius-lg) - (var(--bs-border-width)));--bs-modal-header-padding-x:1rem;--bs-modal-header-padding-y:1rem;--bs-modal-header-padding:1rem 1rem;--bs-modal-header-border-color:var(--bs-border-color);--bs-modal-header-border-width:var(--bs-border-width);--bs-modal-title-line-height:1.5;--bs-modal-footer-gap:0.5rem;--bs-modal-footer-bg: ;--bs-modal-footer-border-color:var(--bs-border-color);--bs-modal-footer-border-width:var(--bs-border-width);position:fixed;top:0;left:0;z-index:var(--bs-modal-zindex);display:none;width:100%;height:100%;overflow-x:hidden;overflow-y:auto;outline:0}.modal-dialog{position:relative;width:auto;margin:var(--bs-modal-margin);pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translate(0,-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - var(--bs-modal-margin) * 2)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - var(--bs-modal-margin) * 2)}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;color:var(--bs-modal-color);pointer-events:auto;background-color:var(--bs-modal-bg);background-clip:padding-box;border:var(--bs-modal-border-width) solid var(--bs-modal-border-color);border-radius:var(--bs-modal-border-radius);outline:0}.modal-backdrop{--bs-backdrop-zindex:1050;--bs-backdrop-bg:#000;--bs-backdrop-opacity:0.5;position:fixed;top:0;left:0;z-index:var(--bs-backdrop-zindex);width:100vw;height:100vh;background-color:var(--bs-backdrop-bg)}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:var(--bs-backdrop-opacity)}.modal-header{display:flex;flex-shrink:0;align-items:center;justify-content:space-between;padding:var(--bs-modal-header-padding);border-bottom:var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);border-top-left-radius:var(--bs-modal-inner-border-radius);border-top-right-radius:var(--bs-modal-inner-border-radius)}.modal-header .btn-close{padding:calc(var(--bs-modal-header-padding-y) * .5) calc(var(--bs-modal-header-padding-x) * .5);margin:calc(-.5 * var(--bs-modal-header-padding-y)) calc(-.5 * var(--bs-modal-header-padding-x)) calc(-.5 * var(--bs-modal-header-padding-y)) auto}.modal-title{margin-bottom:0;line-height:var(--bs-modal-title-line-height)}.modal-body{position:relative;flex:1 1 auto;padding:var(--bs-modal-padding)}.modal-footer{display:flex;flex-shrink:0;flex-wrap:wrap;align-items:center;justify-content:flex-end;padding:calc(var(--bs-modal-padding) - var(--bs-modal-footer-gap) * .5);background-color:var(--bs-modal-footer-bg);border-top:var(--bs-modal-footer-border-width) solid var(--bs-modal-footer-border-color);border-bottom-right-radius:var(--bs-modal-inner-border-radius);border-bottom-left-radius:var(--bs-modal-inner-border-radius)}.modal-footer>*{margin:calc(var(--bs-modal-footer-gap) * .5)}@media (min-width:576px){.modal{--bs-modal-margin:1.75rem;--bs-modal-box-shadow:var(--bs-box-shadow)}.modal-dialog{max-width:var(--bs-modal-width);margin-right:auto;margin-left:auto}.modal-sm{--bs-modal-width:300px}}@media (min-width:992px){.modal-lg,.modal-xl{--bs-modal-width:800px}}@media (min-width:1200px){.modal-xl{--bs-modal-width:1140px}}.modal-fullscreen{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen .modal-footer,.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}@media (max-width:575.98px){.modal-fullscreen-sm-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-sm-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-sm-down .modal-footer,.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}}@media (max-width:767.98px){.modal-fullscreen-md-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-md-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-md-down .modal-footer,.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}}@media (max-width:991.98px){.modal-fullscreen-lg-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-lg-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-lg-down .modal-footer,.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}}@media (max-width:1199.98px){.modal-fullscreen-xl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xl-down .modal-footer,.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}}@media (max-width:1399.98px){.modal-fullscreen-xxl-down{width:100vw;max-width:none;height:100%;margin:0}.modal-fullscreen-xxl-down .modal-content{height:100%;border:0;border-radius:0}.modal-fullscreen-xxl-down .modal-footer,.modal-fullscreen-xxl-down .modal-header{border-radius:0}.modal-fullscreen-xxl-down .modal-body{overflow-y:auto}}.tooltip{--bs-tooltip-zindex:1080;--bs-tooltip-max-width:200px;--bs-tooltip-padding-x:0.5rem;--bs-tooltip-padding-y:0.25rem;--bs-tooltip-margin: ;--bs-tooltip-font-size:0.875rem;--bs-tooltip-color:var(--bs-body-bg);--bs-tooltip-bg:var(--bs-emphasis-color);--bs-tooltip-border-radius:var(--bs-border-radius);--bs-tooltip-opacity:0.9;--bs-tooltip-arrow-width:0.8rem;--bs-tooltip-arrow-height:0.4rem;z-index:var(--bs-tooltip-zindex);display:block;margin:var(--bs-tooltip-margin);font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-tooltip-font-size);word-wrap:break-word;opacity:0}.tooltip.show{opacity:var(--bs-tooltip-opacity)}.tooltip .tooltip-arrow{display:block;width:var(--bs-tooltip-arrow-width);height:var(--bs-tooltip-arrow-height)}.tooltip .tooltip-arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow,.bs-tooltip-top .tooltip-arrow{bottom:calc(-1 * var(--bs-tooltip-arrow-height))}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow::before,.bs-tooltip-top .tooltip-arrow::before{top:-1px;border-width:var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width) * .5) 0;border-top-color:var(--bs-tooltip-bg)}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow,.bs-tooltip-end .tooltip-arrow{left:calc(-1 * var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow::before,.bs-tooltip-end .tooltip-arrow::before{right:-1px;border-width:calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width) * .5) 0;border-right-color:var(--bs-tooltip-bg)}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow,.bs-tooltip-bottom .tooltip-arrow{top:calc(-1 * var(--bs-tooltip-arrow-height))}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow::before,.bs-tooltip-bottom .tooltip-arrow::before{bottom:-1px;border-width:0 calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height);border-bottom-color:var(--bs-tooltip-bg)}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow,.bs-tooltip-start .tooltip-arrow{right:calc(-1 * var(--bs-tooltip-arrow-height));width:var(--bs-tooltip-arrow-height);height:var(--bs-tooltip-arrow-width)}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow::before,.bs-tooltip-start .tooltip-arrow::before{left:-1px;border-width:calc(var(--bs-tooltip-arrow-width) * .5) 0 calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height);border-left-color:var(--bs-tooltip-bg)}.tooltip-inner{max-width:var(--bs-tooltip-max-width);padding:var(--bs-tooltip-padding-y) var(--bs-tooltip-padding-x);color:var(--bs-tooltip-color);text-align:center;background-color:var(--bs-tooltip-bg);border-radius:var(--bs-tooltip-border-radius)}.popover{--bs-popover-zindex:1070;--bs-popover-max-width:276px;--bs-popover-font-size:0.875rem;--bs-popover-bg:var(--bs-body-bg);--bs-popover-border-width:var(--bs-border-width);--bs-popover-border-color:var(--bs-border-color-translucent);--bs-popover-border-radius:var(--bs-border-radius-lg);--bs-popover-inner-border-radius:calc(var(--bs-border-radius-lg) - var(--bs-border-width));--bs-popover-box-shadow:var(--bs-box-shadow);--bs-popover-header-padding-x:1rem;--bs-popover-header-padding-y:0.5rem;--bs-popover-header-font-size:1rem;--bs-popover-header-color:inherit;--bs-popover-header-bg:var(--bs-secondary-bg);--bs-popover-body-padding-x:1rem;--bs-popover-body-padding-y:1rem;--bs-popover-body-color:var(--bs-body-color);--bs-popover-arrow-width:1rem;--bs-popover-arrow-height:0.5rem;--bs-popover-arrow-border:var(--bs-popover-border-color);z-index:var(--bs-popover-zindex);display:block;max-width:var(--bs-popover-max-width);font-family:var(--bs-font-sans-serif);font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;white-space:normal;word-spacing:normal;line-break:auto;font-size:var(--bs-popover-font-size);word-wrap:break-word;background-color:var(--bs-popover-bg);background-clip:padding-box;border:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-radius:var(--bs-popover-border-radius)}.popover .popover-arrow{display:block;width:var(--bs-popover-arrow-width);height:var(--bs-popover-arrow-height)}.popover .popover-arrow::after,.popover .popover-arrow::before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid;border-width:0}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow,.bs-popover-top>.popover-arrow{bottom:calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::after,.bs-popover-top>.popover-arrow::before{border-width:var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width) * .5) 0}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::before,.bs-popover-top>.popover-arrow::before{bottom:0;border-top-color:var(--bs-popover-arrow-border)}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow::after,.bs-popover-top>.popover-arrow::after{bottom:var(--bs-popover-border-width);border-top-color:var(--bs-popover-bg)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow,.bs-popover-end>.popover-arrow{left:calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::after,.bs-popover-end>.popover-arrow::before{border-width:calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width) * .5) 0}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::before,.bs-popover-end>.popover-arrow::before{left:0;border-right-color:var(--bs-popover-arrow-border)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow::after,.bs-popover-end>.popover-arrow::after{left:var(--bs-popover-border-width);border-right-color:var(--bs-popover-bg)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow,.bs-popover-bottom>.popover-arrow{top:calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width))}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::after,.bs-popover-bottom>.popover-arrow::before{border-width:0 calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::before,.bs-popover-bottom>.popover-arrow::before{top:0;border-bottom-color:var(--bs-popover-arrow-border)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow::after,.bs-popover-bottom>.popover-arrow::after{top:var(--bs-popover-border-width);border-bottom-color:var(--bs-popover-bg)}.bs-popover-auto[data-popper-placement^=bottom] .popover-header::before,.bs-popover-bottom .popover-header::before{position:absolute;top:0;left:50%;display:block;width:var(--bs-popover-arrow-width);margin-left:calc(-.5 * var(--bs-popover-arrow-width));content:"";border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-header-bg)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow,.bs-popover-start>.popover-arrow{right:calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height);height:var(--bs-popover-arrow-width)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::after,.bs-popover-start>.popover-arrow::before{border-width:calc(var(--bs-popover-arrow-width) * .5) 0 calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::before,.bs-popover-start>.popover-arrow::before{right:0;border-left-color:var(--bs-popover-arrow-border)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow::after,.bs-popover-start>.popover-arrow::after{right:var(--bs-popover-border-width);border-left-color:var(--bs-popover-bg)}.popover-header{padding:var(--bs-popover-header-padding-y) var(--bs-popover-header-padding-x);margin-bottom:0;font-size:var(--bs-popover-header-font-size);color:var(--bs-popover-header-color);background-color:var(--bs-popover-header-bg);border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-top-left-radius:var(--bs-popover-inner-border-radius);border-top-right-radius:var(--bs-popover-inner-border-radius)}.popover-header:empty{display:none}.popover-body{padding:var(--bs-popover-body-padding-y) var(--bs-popover-body-padding-x);color:var(--bs-popover-body-color)}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-end,.carousel-item-next:not(.carousel-item-start){transform:translateX(100%)}.active.carousel-item-start,.carousel-item-prev:not(.carousel-item-end){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;padding:0;color:#fff;text-align:center;background:0 0;border:0;opacity:.5;transition:opacity .15s ease}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:2rem;height:2rem;background-repeat:no-repeat;background-position:50%;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:2;display:flex;justify-content:center;padding:0;margin-right:15%;margin-bottom:1rem;margin-left:15%}.carousel-indicators [data-bs-target]{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;padding:0;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border:0;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media (prefers-reduced-motion:reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:1.25rem;left:15%;padding-top:1.25rem;padding-bottom:1.25rem;color:#fff;text-align:center}.carousel-dark .carousel-control-next-icon,.carousel-dark .carousel-control-prev-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}[data-bs-theme=dark] .carousel .carousel-control-next-icon,[data-bs-theme=dark] .carousel .carousel-control-prev-icon,[data-bs-theme=dark].carousel .carousel-control-next-icon,[data-bs-theme=dark].carousel .carousel-control-prev-icon{filter:invert(1) grayscale(100)}[data-bs-theme=dark] .carousel .carousel-indicators [data-bs-target],[data-bs-theme=dark].carousel .carousel-indicators [data-bs-target]{background-color:#000}[data-bs-theme=dark] .carousel .carousel-caption,[data-bs-theme=dark].carousel .carousel-caption{color:#000}.spinner-border,.spinner-grow{display:inline-block;width:var(--bs-spinner-width);height:var(--bs-spinner-height);vertical-align:var(--bs-spinner-vertical-align);border-radius:50%;animation:var(--bs-spinner-animation-speed) linear infinite var(--bs-spinner-animation-name)}@keyframes spinner-border{to{transform:rotate(360deg)}}.spinner-border{--bs-spinner-width:2rem;--bs-spinner-height:2rem;--bs-spinner-vertical-align:-0.125em;--bs-spinner-border-width:0.25em;--bs-spinner-animation-speed:0.75s;--bs-spinner-animation-name:spinner-border;border:var(--bs-spinner-border-width) solid currentcolor;border-right-color:transparent}.spinner-border-sm{--bs-spinner-width:1rem;--bs-spinner-height:1rem;--bs-spinner-border-width:0.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{--bs-spinner-width:2rem;--bs-spinner-height:2rem;--bs-spinner-vertical-align:-0.125em;--bs-spinner-animation-speed:0.75s;--bs-spinner-animation-name:spinner-grow;background-color:currentcolor;opacity:0}.spinner-grow-sm{--bs-spinner-width:1rem;--bs-spinner-height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{--bs-spinner-animation-speed:1.5s}}.offcanvas,.offcanvas-lg,.offcanvas-md,.offcanvas-sm,.offcanvas-xl,.offcanvas-xxl{--bs-offcanvas-zindex:1045;--bs-offcanvas-width:400px;--bs-offcanvas-height:30vh;--bs-offcanvas-padding-x:1rem;--bs-offcanvas-padding-y:1rem;--bs-offcanvas-color:var(--bs-body-color);--bs-offcanvas-bg:var(--bs-body-bg);--bs-offcanvas-border-width:var(--bs-border-width);--bs-offcanvas-border-color:var(--bs-border-color-translucent);--bs-offcanvas-box-shadow:var(--bs-box-shadow-sm);--bs-offcanvas-transition:transform 0.3s ease-in-out;--bs-offcanvas-title-line-height:1.5}@media (max-width:575.98px){.offcanvas-sm{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media (max-width:575.98px) and (prefers-reduced-motion:reduce){.offcanvas-sm{transition:none}}@media (max-width:575.98px){.offcanvas-sm.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-sm.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-sm.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-sm.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-sm.show:not(.hiding),.offcanvas-sm.showing{transform:none}.offcanvas-sm.hiding,.offcanvas-sm.show,.offcanvas-sm.showing{visibility:visible}}@media (min-width:576px){.offcanvas-sm{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-sm .offcanvas-header{display:none}.offcanvas-sm .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:transparent!important}}@media (max-width:767.98px){.offcanvas-md{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media (max-width:767.98px) and (prefers-reduced-motion:reduce){.offcanvas-md{transition:none}}@media (max-width:767.98px){.offcanvas-md.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-md.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-md.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-md.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-md.show:not(.hiding),.offcanvas-md.showing{transform:none}.offcanvas-md.hiding,.offcanvas-md.show,.offcanvas-md.showing{visibility:visible}}@media (min-width:768px){.offcanvas-md{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-md .offcanvas-header{display:none}.offcanvas-md .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:transparent!important}}@media (max-width:991.98px){.offcanvas-lg{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media (max-width:991.98px) and (prefers-reduced-motion:reduce){.offcanvas-lg{transition:none}}@media (max-width:991.98px){.offcanvas-lg.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-lg.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-lg.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-lg.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-lg.show:not(.hiding),.offcanvas-lg.showing{transform:none}.offcanvas-lg.hiding,.offcanvas-lg.show,.offcanvas-lg.showing{visibility:visible}}@media (min-width:992px){.offcanvas-lg{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-lg .offcanvas-header{display:none}.offcanvas-lg .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:transparent!important}}@media (max-width:1199.98px){.offcanvas-xl{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media (max-width:1199.98px) and (prefers-reduced-motion:reduce){.offcanvas-xl{transition:none}}@media (max-width:1199.98px){.offcanvas-xl.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-xl.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-xl.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-xl.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xl.show:not(.hiding),.offcanvas-xl.showing{transform:none}.offcanvas-xl.hiding,.offcanvas-xl.show,.offcanvas-xl.showing{visibility:visible}}@media (min-width:1200px){.offcanvas-xl{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-xl .offcanvas-header{display:none}.offcanvas-xl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:transparent!important}}@media (max-width:1399.98px){.offcanvas-xxl{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}}@media (max-width:1399.98px) and (prefers-reduced-motion:reduce){.offcanvas-xxl{transition:none}}@media (max-width:1399.98px){.offcanvas-xxl.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas-xxl.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas-xxl.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas-xxl.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xxl.show:not(.hiding),.offcanvas-xxl.showing{transform:none}.offcanvas-xxl.hiding,.offcanvas-xxl.show,.offcanvas-xxl.showing{visibility:visible}}@media (min-width:1400px){.offcanvas-xxl{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-xxl .offcanvas-header{display:none}.offcanvas-xxl .offcanvas-body{display:flex;flex-grow:0;padding:0;overflow-y:visible;background-color:transparent!important}}.offcanvas{position:fixed;bottom:0;z-index:var(--bs-offcanvas-zindex);display:flex;flex-direction:column;max-width:100%;color:var(--bs-offcanvas-color);visibility:hidden;background-color:var(--bs-offcanvas-bg);background-clip:padding-box;outline:0;transition:var(--bs-offcanvas-transition)}@media (prefers-reduced-motion:reduce){.offcanvas{transition:none}}.offcanvas.offcanvas-start{top:0;left:0;width:var(--bs-offcanvas-width);border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(-100%)}.offcanvas.offcanvas-end{top:0;right:0;width:var(--bs-offcanvas-width);border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateX(100%)}.offcanvas.offcanvas-top{top:0;right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(-100%)}.offcanvas.offcanvas-bottom{right:0;left:0;height:var(--bs-offcanvas-height);max-height:100%;border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas.show:not(.hiding),.offcanvas.showing{transform:none}.offcanvas.hiding,.offcanvas.show,.offcanvas.showing{visibility:visible}.offcanvas-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{display:flex;align-items:center;justify-content:space-between;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x)}.offcanvas-header .btn-close{padding:calc(var(--bs-offcanvas-padding-y) * .5) calc(var(--bs-offcanvas-padding-x) * .5);margin-top:calc(-.5 * var(--bs-offcanvas-padding-y));margin-right:calc(-.5 * var(--bs-offcanvas-padding-x));margin-bottom:calc(-.5 * var(--bs-offcanvas-padding-y))}.offcanvas-title{margin-bottom:0;line-height:var(--bs-offcanvas-title-line-height)}.offcanvas-body{flex-grow:1;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);overflow-y:auto}.placeholder{display:inline-block;min-height:1em;vertical-align:middle;cursor:wait;background-color:currentcolor;opacity:.5}.placeholder.btn::before{display:inline-block;content:""}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{animation:placeholder-glow 2s ease-in-out infinite}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{-webkit-mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,0.8) 75%,#000 95%);mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,0.8) 75%,#000 95%);-webkit-mask-size:200% 100%;mask-size:200% 100%;animation:placeholder-wave 2s linear infinite}@keyframes placeholder-wave{100%{-webkit-mask-position:-200% 0%;mask-position:-200% 0%}}.clearfix::after{display:block;clear:both;content:""}.text-bg-primary{color:#fff!important;background-color:RGBA(var(--bs-primary-rgb),var(--bs-bg-opacity,1))!important}.text-bg-secondary{color:#fff!important;background-color:RGBA(var(--bs-secondary-rgb),var(--bs-bg-opacity,1))!important}.text-bg-success{color:#fff!important;background-color:RGBA(var(--bs-success-rgb),var(--bs-bg-opacity,1))!important}.text-bg-info{color:#000!important;background-color:RGBA(var(--bs-info-rgb),var(--bs-bg-opacity,1))!important}.text-bg-warning{color:#000!important;background-color:RGBA(var(--bs-warning-rgb),var(--bs-bg-opacity,1))!important}.text-bg-danger{color:#fff!important;background-color:RGBA(var(--bs-danger-rgb),var(--bs-bg-opacity,1))!important}.text-bg-light{color:#000!important;background-color:RGBA(var(--bs-light-rgb),var(--bs-bg-opacity,1))!important}.text-bg-dark{color:#fff!important;background-color:RGBA(var(--bs-dark-rgb),var(--bs-bg-opacity,1))!important}.link-primary{color:RGBA(var(--bs-primary-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-primary-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-primary-rgb),var(--bs-link-underline-opacity,1))!important}.link-primary:focus,.link-primary:hover{color:RGBA(10,88,202,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(10,88,202,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(10,88,202,var(--bs-link-underline-opacity,1))!important}.link-secondary{color:RGBA(var(--bs-secondary-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-secondary-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-secondary-rgb),var(--bs-link-underline-opacity,1))!important}.link-secondary:focus,.link-secondary:hover{color:RGBA(86,94,100,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(86,94,100,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(86,94,100,var(--bs-link-underline-opacity,1))!important}.link-success{color:RGBA(var(--bs-success-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-success-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-success-rgb),var(--bs-link-underline-opacity,1))!important}.link-success:focus,.link-success:hover{color:RGBA(20,108,67,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(20,108,67,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(20,108,67,var(--bs-link-underline-opacity,1))!important}.link-info{color:RGBA(var(--bs-info-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-info-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-info-rgb),var(--bs-link-underline-opacity,1))!important}.link-info:focus,.link-info:hover{color:RGBA(61,213,243,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(61,213,243,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(61,213,243,var(--bs-link-underline-opacity,1))!important}.link-warning{color:RGBA(var(--bs-warning-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-warning-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-warning-rgb),var(--bs-link-underline-opacity,1))!important}.link-warning:focus,.link-warning:hover{color:RGBA(255,205,57,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(255,205,57,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(255,205,57,var(--bs-link-underline-opacity,1))!important}.link-danger{color:RGBA(var(--bs-danger-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-danger-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-danger-rgb),var(--bs-link-underline-opacity,1))!important}.link-danger:focus,.link-danger:hover{color:RGBA(176,42,55,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(176,42,55,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(176,42,55,var(--bs-link-underline-opacity,1))!important}.link-light{color:RGBA(var(--bs-light-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-light-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-light-rgb),var(--bs-link-underline-opacity,1))!important}.link-light:focus,.link-light:hover{color:RGBA(249,250,251,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(249,250,251,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(249,250,251,var(--bs-link-underline-opacity,1))!important}.link-dark{color:RGBA(var(--bs-dark-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-dark-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-dark-rgb),var(--bs-link-underline-opacity,1))!important}.link-dark:focus,.link-dark:hover{color:RGBA(26,30,33,var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(26,30,33,var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(26,30,33,var(--bs-link-underline-opacity,1))!important}.link-body-emphasis{color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-opacity,1))!important;-webkit-text-decoration-color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-underline-opacity,1))!important}.link-body-emphasis:focus,.link-body-emphasis:hover{color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-opacity,.75))!important;-webkit-text-decoration-color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-underline-opacity,0.75))!important;text-decoration-color:RGBA(var(--bs-emphasis-color-rgb),var(--bs-link-underline-opacity,0.75))!important}.focus-ring:focus{outline:0;box-shadow:var(--bs-focus-ring-x,0) var(--bs-focus-ring-y,0) var(--bs-focus-ring-blur,0) var(--bs-focus-ring-width) var(--bs-focus-ring-color)}.icon-link{display:inline-flex;gap:.375rem;align-items:center;-webkit-text-decoration-color:rgba(var(--bs-link-color-rgb),var(--bs-link-opacity,0.5));text-decoration-color:rgba(var(--bs-link-color-rgb),var(--bs-link-opacity,0.5));text-underline-offset:0.25em;-webkit-backface-visibility:hidden;backface-visibility:hidden}.icon-link>.bi{flex-shrink:0;width:1em;height:1em;fill:currentcolor;transition:.2s ease-in-out transform}@media (prefers-reduced-motion:reduce){.icon-link>.bi{transition:none}}.icon-link-hover:focus-visible>.bi,.icon-link-hover:hover>.bi{transform:var(--bs-icon-link-transform,translate3d(.25em,0,0))}.ratio{position:relative;width:100%}.ratio::before{display:block;padding-top:var(--bs-aspect-ratio);content:""}.ratio>*{position:absolute;top:0;left:0;width:100%;height:100%}.ratio-1x1{--bs-aspect-ratio:100%}.ratio-4x3{--bs-aspect-ratio:75%}.ratio-16x9{--bs-aspect-ratio:56.25%}.ratio-21x9{--bs-aspect-ratio:42.8571428571%}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}@media (min-width:576px){.sticky-sm-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-sm-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}}@media (min-width:768px){.sticky-md-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-md-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}}@media (min-width:992px){.sticky-lg-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-lg-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}}@media (min-width:1200px){.sticky-xl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-xl-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}}@media (min-width:1400px){.sticky-xxl-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}.sticky-xxl-bottom{position:-webkit-sticky;position:sticky;bottom:0;z-index:1020}}.hstack{display:flex;flex-direction:row;align-items:center;align-self:stretch}.vstack{display:flex;flex:1 1 auto;flex-direction:column;align-self:stretch}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){width:1px!important;height:1px!important;padding:0!important;margin:-1px!important;overflow:hidden!important;clip:rect(0,0,0,0)!important;white-space:nowrap!important;border:0!important}.visually-hidden-focusable:not(:focus):not(:focus-within):not(caption),.visually-hidden:not(caption){position:absolute!important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{display:inline-block;align-self:stretch;width:var(--bs-border-width);min-height:1em;background-color:currentcolor;opacity:.25}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.float-start{float:left!important}.float-end{float:right!important}.float-none{float:none!important}.object-fit-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-none{-o-object-fit:none!important;object-fit:none!important}.opacity-0{opacity:0!important}.opacity-25{opacity:.25!important}.opacity-50{opacity:.5!important}.opacity-75{opacity:.75!important}.opacity-100{opacity:1!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.overflow-visible{overflow:visible!important}.overflow-scroll{overflow:scroll!important}.overflow-x-auto{overflow-x:auto!important}.overflow-x-hidden{overflow-x:hidden!important}.overflow-x-visible{overflow-x:visible!important}.overflow-x-scroll{overflow-x:scroll!important}.overflow-y-auto{overflow-y:auto!important}.overflow-y-hidden{overflow-y:hidden!important}.overflow-y-visible{overflow-y:visible!important}.overflow-y-scroll{overflow-y:scroll!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-inline-grid{display:inline-grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.shadow{box-shadow:var(--bs-box-shadow)!important}.shadow-sm{box-shadow:var(--bs-box-shadow-sm)!important}.shadow-lg{box-shadow:var(--bs-box-shadow-lg)!important}.shadow-none{box-shadow:none!important}.focus-ring-primary{--bs-focus-ring-color:rgba(var(--bs-primary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-secondary{--bs-focus-ring-color:rgba(var(--bs-secondary-rgb), var(--bs-focus-ring-opacity))}.focus-ring-success{--bs-focus-ring-color:rgba(var(--bs-success-rgb), var(--bs-focus-ring-opacity))}.focus-ring-info{--bs-focus-ring-color:rgba(var(--bs-info-rgb), var(--bs-focus-ring-opacity))}.focus-ring-warning{--bs-focus-ring-color:rgba(var(--bs-warning-rgb), var(--bs-focus-ring-opacity))}.focus-ring-danger{--bs-focus-ring-color:rgba(var(--bs-danger-rgb), var(--bs-focus-ring-opacity))}.focus-ring-light{--bs-focus-ring-color:rgba(var(--bs-light-rgb), var(--bs-focus-ring-opacity))}.focus-ring-dark{--bs-focus-ring-color:rgba(var(--bs-dark-rgb), var(--bs-focus-ring-opacity))}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:-webkit-sticky!important;position:sticky!important}.top-0{top:0!important}.top-50{top:50%!important}.top-100{top:100%!important}.bottom-0{bottom:0!important}.bottom-50{bottom:50%!important}.bottom-100{bottom:100%!important}.start-0{left:0!important}.start-50{left:50%!important}.start-100{left:100%!important}.end-0{right:0!important}.end-50{right:50%!important}.end-100{right:100%!important}.translate-middle{transform:translate(-50%,-50%)!important}.translate-middle-x{transform:translateX(-50%)!important}.translate-middle-y{transform:translateY(-50%)!important}.border{border:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-0{border:0!important}.border-top{border-top:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-top-0{border-top:0!important}.border-end{border-right:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-end-0{border-right:0!important}.border-bottom{border-bottom:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-bottom-0{border-bottom:0!important}.border-start{border-left:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-start-0{border-left:0!important}.border-primary{--bs-border-opacity:1;border-color:rgba(var(--bs-primary-rgb),var(--bs-border-opacity))!important}.border-secondary{--bs-border-opacity:1;border-color:rgba(var(--bs-secondary-rgb),var(--bs-border-opacity))!important}.border-success{--bs-border-opacity:1;border-color:rgba(var(--bs-success-rgb),var(--bs-border-opacity))!important}.border-info{--bs-border-opacity:1;border-color:rgba(var(--bs-info-rgb),var(--bs-border-opacity))!important}.border-warning{--bs-border-opacity:1;border-color:rgba(var(--bs-warning-rgb),var(--bs-border-opacity))!important}.border-danger{--bs-border-opacity:1;border-color:rgba(var(--bs-danger-rgb),var(--bs-border-opacity))!important}.border-light{--bs-border-opacity:1;border-color:rgba(var(--bs-light-rgb),var(--bs-border-opacity))!important}.border-dark{--bs-border-opacity:1;border-color:rgba(var(--bs-dark-rgb),var(--bs-border-opacity))!important}.border-black{--bs-border-opacity:1;border-color:rgba(var(--bs-black-rgb),var(--bs-border-opacity))!important}.border-white{--bs-border-opacity:1;border-color:rgba(var(--bs-white-rgb),var(--bs-border-opacity))!important}.border-primary-subtle{border-color:var(--bs-primary-border-subtle)!important}.border-secondary-subtle{border-color:var(--bs-secondary-border-subtle)!important}.border-success-subtle{border-color:var(--bs-success-border-subtle)!important}.border-info-subtle{border-color:var(--bs-info-border-subtle)!important}.border-warning-subtle{border-color:var(--bs-warning-border-subtle)!important}.border-danger-subtle{border-color:var(--bs-danger-border-subtle)!important}.border-light-subtle{border-color:var(--bs-light-border-subtle)!important}.border-dark-subtle{border-color:var(--bs-dark-border-subtle)!important}.border-1{border-width:1px!important}.border-2{border-width:2px!important}.border-3{border-width:3px!important}.border-4{border-width:4px!important}.border-5{border-width:5px!important}.border-opacity-10{--bs-border-opacity:0.1}.border-opacity-25{--bs-border-opacity:0.25}.border-opacity-50{--bs-border-opacity:0.5}.border-opacity-75{--bs-border-opacity:0.75}.border-opacity-100{--bs-border-opacity:1}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.mw-100{max-width:100%!important}.vw-100{width:100vw!important}.min-vw-100{min-width:100vw!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mh-100{max-height:100%!important}.vh-100{height:100vh!important}.min-vh-100{min-height:100vh!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-right:0!important;margin-left:0!important}.mx-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-3{margin-right:1rem!important;margin-left:1rem!important}.mx-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-5{margin-right:3rem!important;margin-left:3rem!important}.mx-auto{margin-right:auto!important;margin-left:auto!important}.my-0{margin-top:0!important;margin-bottom:0!important}.my-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-right:0!important;padding-left:0!important}.px-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-3{padding-right:1rem!important;padding-left:1rem!important}.px-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-5{padding-right:3rem!important;padding-left:3rem!important}.py-0{padding-top:0!important;padding-bottom:0!important}.py-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}.gap-0{gap:0!important}.gap-1{gap:.25rem!important}.gap-2{gap:.5rem!important}.gap-3{gap:1rem!important}.gap-4{gap:1.5rem!important}.gap-5{gap:3rem!important}.row-gap-0{row-gap:0!important}.row-gap-1{row-gap:.25rem!important}.row-gap-2{row-gap:.5rem!important}.row-gap-3{row-gap:1rem!important}.row-gap-4{row-gap:1.5rem!important}.row-gap-5{row-gap:3rem!important}.column-gap-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.font-monospace{font-family:var(--bs-font-monospace)!important}.fs-1{font-size:calc(1.375rem + 1.5vw)!important}.fs-2{font-size:calc(1.325rem + .9vw)!important}.fs-3{font-size:calc(1.3rem + .6vw)!important}.fs-4{font-size:calc(1.275rem + .3vw)!important}.fs-5{font-size:1.25rem!important}.fs-6{font-size:1rem!important}.fst-italic{font-style:italic!important}.fst-normal{font-style:normal!important}.fw-lighter{font-weight:lighter!important}.fw-light{font-weight:300!important}.fw-normal{font-weight:400!important}.fw-medium{font-weight:500!important}.fw-semibold{font-weight:600!important}.fw-bold{font-weight:700!important}.fw-bolder{font-weight:bolder!important}.lh-1{line-height:1!important}.lh-sm{line-height:1.25!important}.lh-base{line-height:1.5!important}.lh-lg{line-height:2!important}.text-start{text-align:left!important}.text-end{text-align:right!important}.text-center{text-align:center!important}.text-decoration-none{text-decoration:none!important}.text-decoration-underline{text-decoration:underline!important}.text-decoration-line-through{text-decoration:line-through!important}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-break{word-wrap:break-word!important;word-break:break-word!important}.text-primary{--bs-text-opacity:1;color:rgba(var(--bs-primary-rgb),var(--bs-text-opacity))!important}.text-secondary{--bs-text-opacity:1;color:rgba(var(--bs-secondary-rgb),var(--bs-text-opacity))!important}.text-success{--bs-text-opacity:1;color:rgba(var(--bs-success-rgb),var(--bs-text-opacity))!important}.text-info{--bs-text-opacity:1;color:rgba(var(--bs-info-rgb),var(--bs-text-opacity))!important}.text-warning{--bs-text-opacity:1;color:rgba(var(--bs-warning-rgb),var(--bs-text-opacity))!important}.text-danger{--bs-text-opacity:1;color:rgba(var(--bs-danger-rgb),var(--bs-text-opacity))!important}.text-light{--bs-text-opacity:1;color:rgba(var(--bs-light-rgb),var(--bs-text-opacity))!important}.text-dark{--bs-text-opacity:1;color:rgba(var(--bs-dark-rgb),var(--bs-text-opacity))!important}.text-black{--bs-text-opacity:1;color:rgba(var(--bs-black-rgb),var(--bs-text-opacity))!important}.text-white{--bs-text-opacity:1;color:rgba(var(--bs-white-rgb),var(--bs-text-opacity))!important}.text-body{--bs-text-opacity:1;color:rgba(var(--bs-body-color-rgb),var(--bs-text-opacity))!important}.text-muted{--bs-text-opacity:1;color:var(--bs-secondary-color)!important}.text-black-50{--bs-text-opacity:1;color:rgba(0,0,0,.5)!important}.text-white-50{--bs-text-opacity:1;color:rgba(255,255,255,.5)!important}.text-body-secondary{--bs-text-opacity:1;color:var(--bs-secondary-color)!important}.text-body-tertiary{--bs-text-opacity:1;color:var(--bs-tertiary-color)!important}.text-body-emphasis{--bs-text-opacity:1;color:var(--bs-emphasis-color)!important}.text-reset{--bs-text-opacity:1;color:inherit!important}.text-opacity-25{--bs-text-opacity:0.25}.text-opacity-50{--bs-text-opacity:0.5}.text-opacity-75{--bs-text-opacity:0.75}.text-opacity-100{--bs-text-opacity:1}.text-primary-emphasis{color:var(--bs-primary-text-emphasis)!important}.text-secondary-emphasis{color:var(--bs-secondary-text-emphasis)!important}.text-success-emphasis{color:var(--bs-success-text-emphasis)!important}.text-info-emphasis{color:var(--bs-info-text-emphasis)!important}.text-warning-emphasis{color:var(--bs-warning-text-emphasis)!important}.text-danger-emphasis{color:var(--bs-danger-text-emphasis)!important}.text-light-emphasis{color:var(--bs-light-text-emphasis)!important}.text-dark-emphasis{color:var(--bs-dark-text-emphasis)!important}.link-opacity-10{--bs-link-opacity:0.1}.link-opacity-10-hover:hover{--bs-link-opacity:0.1}.link-opacity-25{--bs-link-opacity:0.25}.link-opacity-25-hover:hover{--bs-link-opacity:0.25}.link-opacity-50{--bs-link-opacity:0.5}.link-opacity-50-hover:hover{--bs-link-opacity:0.5}.link-opacity-75{--bs-link-opacity:0.75}.link-opacity-75-hover:hover{--bs-link-opacity:0.75}.link-opacity-100{--bs-link-opacity:1}.link-opacity-100-hover:hover{--bs-link-opacity:1}.link-offset-1{text-underline-offset:0.125em!important}.link-offset-1-hover:hover{text-underline-offset:0.125em!important}.link-offset-2{text-underline-offset:0.25em!important}.link-offset-2-hover:hover{text-underline-offset:0.25em!important}.link-offset-3{text-underline-offset:0.375em!important}.link-offset-3-hover:hover{text-underline-offset:0.375em!important}.link-underline-primary{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-primary-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-primary-rgb),var(--bs-link-underline-opacity))!important}.link-underline-secondary{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-secondary-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-secondary-rgb),var(--bs-link-underline-opacity))!important}.link-underline-success{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-success-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-success-rgb),var(--bs-link-underline-opacity))!important}.link-underline-info{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-info-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-info-rgb),var(--bs-link-underline-opacity))!important}.link-underline-warning{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-warning-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-warning-rgb),var(--bs-link-underline-opacity))!important}.link-underline-danger{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-danger-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-danger-rgb),var(--bs-link-underline-opacity))!important}.link-underline-light{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-light-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-light-rgb),var(--bs-link-underline-opacity))!important}.link-underline-dark{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-dark-rgb),var(--bs-link-underline-opacity))!important;text-decoration-color:rgba(var(--bs-dark-rgb),var(--bs-link-underline-opacity))!important}.link-underline{--bs-link-underline-opacity:1;-webkit-text-decoration-color:rgba(var(--bs-link-color-rgb),var(--bs-link-underline-opacity,1))!important;text-decoration-color:rgba(var(--bs-link-color-rgb),var(--bs-link-underline-opacity,1))!important}.link-underline-opacity-0{--bs-link-underline-opacity:0}.link-underline-opacity-0-hover:hover{--bs-link-underline-opacity:0}.link-underline-opacity-10{--bs-link-underline-opacity:0.1}.link-underline-opacity-10-hover:hover{--bs-link-underline-opacity:0.1}.link-underline-opacity-25{--bs-link-underline-opacity:0.25}.link-underline-opacity-25-hover:hover{--bs-link-underline-opacity:0.25}.link-underline-opacity-50{--bs-link-underline-opacity:0.5}.link-underline-opacity-50-hover:hover{--bs-link-underline-opacity:0.5}.link-underline-opacity-75{--bs-link-underline-opacity:0.75}.link-underline-opacity-75-hover:hover{--bs-link-underline-opacity:0.75}.link-underline-opacity-100{--bs-link-underline-opacity:1}.link-underline-opacity-100-hover:hover{--bs-link-underline-opacity:1}.bg-primary{--bs-bg-opacity:1;background-color:rgba(var(--bs-primary-rgb),var(--bs-bg-opacity))!important}.bg-secondary{--bs-bg-opacity:1;background-color:rgba(var(--bs-secondary-rgb),var(--bs-bg-opacity))!important}.bg-success{--bs-bg-opacity:1;background-color:rgba(var(--bs-success-rgb),var(--bs-bg-opacity))!important}.bg-info{--bs-bg-opacity:1;background-color:rgba(var(--bs-info-rgb),var(--bs-bg-opacity))!important}.bg-warning{--bs-bg-opacity:1;background-color:rgba(var(--bs-warning-rgb),var(--bs-bg-opacity))!important}.bg-danger{--bs-bg-opacity:1;background-color:rgba(var(--bs-danger-rgb),var(--bs-bg-opacity))!important}.bg-light{--bs-bg-opacity:1;background-color:rgba(var(--bs-light-rgb),var(--bs-bg-opacity))!important}.bg-dark{--bs-bg-opacity:1;background-color:rgba(var(--bs-dark-rgb),var(--bs-bg-opacity))!important}.bg-black{--bs-bg-opacity:1;background-color:rgba(var(--bs-black-rgb),var(--bs-bg-opacity))!important}.bg-white{--bs-bg-opacity:1;background-color:rgba(var(--bs-white-rgb),var(--bs-bg-opacity))!important}.bg-body{--bs-bg-opacity:1;background-color:rgba(var(--bs-body-bg-rgb),var(--bs-bg-opacity))!important}.bg-transparent{--bs-bg-opacity:1;background-color:transparent!important}.bg-body-secondary{--bs-bg-opacity:1;background-color:rgba(var(--bs-secondary-bg-rgb),var(--bs-bg-opacity))!important}.bg-body-tertiary{--bs-bg-opacity:1;background-color:rgba(var(--bs-tertiary-bg-rgb),var(--bs-bg-opacity))!important}.bg-opacity-10{--bs-bg-opacity:0.1}.bg-opacity-25{--bs-bg-opacity:0.25}.bg-opacity-50{--bs-bg-opacity:0.5}.bg-opacity-75{--bs-bg-opacity:0.75}.bg-opacity-100{--bs-bg-opacity:1}.bg-primary-subtle{background-color:var(--bs-primary-bg-subtle)!important}.bg-secondary-subtle{background-color:var(--bs-secondary-bg-subtle)!important}.bg-success-subtle{background-color:var(--bs-success-bg-subtle)!important}.bg-info-subtle{background-color:var(--bs-info-bg-subtle)!important}.bg-warning-subtle{background-color:var(--bs-warning-bg-subtle)!important}.bg-danger-subtle{background-color:var(--bs-danger-bg-subtle)!important}.bg-light-subtle{background-color:var(--bs-light-bg-subtle)!important}.bg-dark-subtle{background-color:var(--bs-dark-bg-subtle)!important}.bg-gradient{background-image:var(--bs-gradient)!important}.user-select-all{-webkit-user-select:all!important;-moz-user-select:all!important;user-select:all!important}.user-select-auto{-webkit-user-select:auto!important;-moz-user-select:auto!important;user-select:auto!important}.user-select-none{-webkit-user-select:none!important;-moz-user-select:none!important;user-select:none!important}.pe-none{pointer-events:none!important}.pe-auto{pointer-events:auto!important}.rounded{border-radius:var(--bs-border-radius)!important}.rounded-0{border-radius:0!important}.rounded-1{border-radius:var(--bs-border-radius-sm)!important}.rounded-2{border-radius:var(--bs-border-radius)!important}.rounded-3{border-radius:var(--bs-border-radius-lg)!important}.rounded-4{border-radius:var(--bs-border-radius-xl)!important}.rounded-5{border-radius:var(--bs-border-radius-xxl)!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:var(--bs-border-radius-pill)!important}.rounded-top{border-top-left-radius:var(--bs-border-radius)!important;border-top-right-radius:var(--bs-border-radius)!important}.rounded-top-0{border-top-left-radius:0!important;border-top-right-radius:0!important}.rounded-top-1{border-top-left-radius:var(--bs-border-radius-sm)!important;border-top-right-radius:var(--bs-border-radius-sm)!important}.rounded-top-2{border-top-left-radius:var(--bs-border-radius)!important;border-top-right-radius:var(--bs-border-radius)!important}.rounded-top-3{border-top-left-radius:var(--bs-border-radius-lg)!important;border-top-right-radius:var(--bs-border-radius-lg)!important}.rounded-top-4{border-top-left-radius:var(--bs-border-radius-xl)!important;border-top-right-radius:var(--bs-border-radius-xl)!important}.rounded-top-5{border-top-left-radius:var(--bs-border-radius-xxl)!important;border-top-right-radius:var(--bs-border-radius-xxl)!important}.rounded-top-circle{border-top-left-radius:50%!important;border-top-right-radius:50%!important}.rounded-top-pill{border-top-left-radius:var(--bs-border-radius-pill)!important;border-top-right-radius:var(--bs-border-radius-pill)!important}.rounded-end{border-top-right-radius:var(--bs-border-radius)!important;border-bottom-right-radius:var(--bs-border-radius)!important}.rounded-end-0{border-top-right-radius:0!important;border-bottom-right-radius:0!important}.rounded-end-1{border-top-right-radius:var(--bs-border-radius-sm)!important;border-bottom-right-radius:var(--bs-border-radius-sm)!important}.rounded-end-2{border-top-right-radius:var(--bs-border-radius)!important;border-bottom-right-radius:var(--bs-border-radius)!important}.rounded-end-3{border-top-right-radius:var(--bs-border-radius-lg)!important;border-bottom-right-radius:var(--bs-border-radius-lg)!important}.rounded-end-4{border-top-right-radius:var(--bs-border-radius-xl)!important;border-bottom-right-radius:var(--bs-border-radius-xl)!important}.rounded-end-5{border-top-right-radius:var(--bs-border-radius-xxl)!important;border-bottom-right-radius:var(--bs-border-radius-xxl)!important}.rounded-end-circle{border-top-right-radius:50%!important;border-bottom-right-radius:50%!important}.rounded-end-pill{border-top-right-radius:var(--bs-border-radius-pill)!important;border-bottom-right-radius:var(--bs-border-radius-pill)!important}.rounded-bottom{border-bottom-right-radius:var(--bs-border-radius)!important;border-bottom-left-radius:var(--bs-border-radius)!important}.rounded-bottom-0{border-bottom-right-radius:0!important;border-bottom-left-radius:0!important}.rounded-bottom-1{border-bottom-right-radius:var(--bs-border-radius-sm)!important;border-bottom-left-radius:var(--bs-border-radius-sm)!important}.rounded-bottom-2{border-bottom-right-radius:var(--bs-border-radius)!important;border-bottom-left-radius:var(--bs-border-radius)!important}.rounded-bottom-3{border-bottom-right-radius:var(--bs-border-radius-lg)!important;border-bottom-left-radius:var(--bs-border-radius-lg)!important}.rounded-bottom-4{border-bottom-right-radius:var(--bs-border-radius-xl)!important;border-bottom-left-radius:var(--bs-border-radius-xl)!important}.rounded-bottom-5{border-bottom-right-radius:var(--bs-border-radius-xxl)!important;border-bottom-left-radius:var(--bs-border-radius-xxl)!important}.rounded-bottom-circle{border-bottom-right-radius:50%!important;border-bottom-left-radius:50%!important}.rounded-bottom-pill{border-bottom-right-radius:var(--bs-border-radius-pill)!important;border-bottom-left-radius:var(--bs-border-radius-pill)!important}.rounded-start{border-bottom-left-radius:var(--bs-border-radius)!important;border-top-left-radius:var(--bs-border-radius)!important}.rounded-start-0{border-bottom-left-radius:0!important;border-top-left-radius:0!important}.rounded-start-1{border-bottom-left-radius:var(--bs-border-radius-sm)!important;border-top-left-radius:var(--bs-border-radius-sm)!important}.rounded-start-2{border-bottom-left-radius:var(--bs-border-radius)!important;border-top-left-radius:var(--bs-border-radius)!important}.rounded-start-3{border-bottom-left-radius:var(--bs-border-radius-lg)!important;border-top-left-radius:var(--bs-border-radius-lg)!important}.rounded-start-4{border-bottom-left-radius:var(--bs-border-radius-xl)!important;border-top-left-radius:var(--bs-border-radius-xl)!important}.rounded-start-5{border-bottom-left-radius:var(--bs-border-radius-xxl)!important;border-top-left-radius:var(--bs-border-radius-xxl)!important}.rounded-start-circle{border-bottom-left-radius:50%!important;border-top-left-radius:50%!important}.rounded-start-pill{border-bottom-left-radius:var(--bs-border-radius-pill)!important;border-top-left-radius:var(--bs-border-radius-pill)!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}.z-n1{z-index:-1!important}.z-0{z-index:0!important}.z-1{z-index:1!important}.z-2{z-index:2!important}.z-3{z-index:3!important}@media (min-width:576px){.float-sm-start{float:left!important}.float-sm-end{float:right!important}.float-sm-none{float:none!important}.object-fit-sm-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-sm-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-sm-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-sm-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-sm-none{-o-object-fit:none!important;object-fit:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-inline-grid{display:inline-grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-right:0!important;margin-left:0!important}.mx-sm-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-sm-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-sm-3{margin-right:1rem!important;margin-left:1rem!important}.mx-sm-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-sm-5{margin-right:3rem!important;margin-left:3rem!important}.mx-sm-auto{margin-right:auto!important;margin-left:auto!important}.my-sm-0{margin-top:0!important;margin-bottom:0!important}.my-sm-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-sm-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-sm-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-sm-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-sm-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-sm-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-right:0!important;padding-left:0!important}.px-sm-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-sm-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-sm-3{padding-right:1rem!important;padding-left:1rem!important}.px-sm-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-sm-5{padding-right:3rem!important;padding-left:3rem!important}.py-sm-0{padding-top:0!important;padding-bottom:0!important}.py-sm-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-sm-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-sm-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-sm-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-sm-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}.gap-sm-0{gap:0!important}.gap-sm-1{gap:.25rem!important}.gap-sm-2{gap:.5rem!important}.gap-sm-3{gap:1rem!important}.gap-sm-4{gap:1.5rem!important}.gap-sm-5{gap:3rem!important}.row-gap-sm-0{row-gap:0!important}.row-gap-sm-1{row-gap:.25rem!important}.row-gap-sm-2{row-gap:.5rem!important}.row-gap-sm-3{row-gap:1rem!important}.row-gap-sm-4{row-gap:1.5rem!important}.row-gap-sm-5{row-gap:3rem!important}.column-gap-sm-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-sm-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-sm-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-sm-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-sm-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-sm-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.text-sm-start{text-align:left!important}.text-sm-end{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:768px){.float-md-start{float:left!important}.float-md-end{float:right!important}.float-md-none{float:none!important}.object-fit-md-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-md-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-md-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-md-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-md-none{-o-object-fit:none!important;object-fit:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-inline-grid{display:inline-grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-right:0!important;margin-left:0!important}.mx-md-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-md-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-md-3{margin-right:1rem!important;margin-left:1rem!important}.mx-md-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-md-5{margin-right:3rem!important;margin-left:3rem!important}.mx-md-auto{margin-right:auto!important;margin-left:auto!important}.my-md-0{margin-top:0!important;margin-bottom:0!important}.my-md-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-md-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-md-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-md-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-md-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-md-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-right:0!important;padding-left:0!important}.px-md-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-md-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-md-3{padding-right:1rem!important;padding-left:1rem!important}.px-md-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-md-5{padding-right:3rem!important;padding-left:3rem!important}.py-md-0{padding-top:0!important;padding-bottom:0!important}.py-md-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-md-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-md-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-md-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-md-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}.gap-md-0{gap:0!important}.gap-md-1{gap:.25rem!important}.gap-md-2{gap:.5rem!important}.gap-md-3{gap:1rem!important}.gap-md-4{gap:1.5rem!important}.gap-md-5{gap:3rem!important}.row-gap-md-0{row-gap:0!important}.row-gap-md-1{row-gap:.25rem!important}.row-gap-md-2{row-gap:.5rem!important}.row-gap-md-3{row-gap:1rem!important}.row-gap-md-4{row-gap:1.5rem!important}.row-gap-md-5{row-gap:3rem!important}.column-gap-md-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-md-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-md-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-md-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-md-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-md-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.text-md-start{text-align:left!important}.text-md-end{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:992px){.float-lg-start{float:left!important}.float-lg-end{float:right!important}.float-lg-none{float:none!important}.object-fit-lg-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-lg-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-lg-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-lg-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-lg-none{-o-object-fit:none!important;object-fit:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-inline-grid{display:inline-grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-right:0!important;margin-left:0!important}.mx-lg-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-lg-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-lg-3{margin-right:1rem!important;margin-left:1rem!important}.mx-lg-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-lg-5{margin-right:3rem!important;margin-left:3rem!important}.mx-lg-auto{margin-right:auto!important;margin-left:auto!important}.my-lg-0{margin-top:0!important;margin-bottom:0!important}.my-lg-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-lg-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-lg-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-lg-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-lg-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-lg-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-right:0!important;padding-left:0!important}.px-lg-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-lg-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-lg-3{padding-right:1rem!important;padding-left:1rem!important}.px-lg-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-lg-5{padding-right:3rem!important;padding-left:3rem!important}.py-lg-0{padding-top:0!important;padding-bottom:0!important}.py-lg-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-lg-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-lg-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-lg-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-lg-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}.gap-lg-0{gap:0!important}.gap-lg-1{gap:.25rem!important}.gap-lg-2{gap:.5rem!important}.gap-lg-3{gap:1rem!important}.gap-lg-4{gap:1.5rem!important}.gap-lg-5{gap:3rem!important}.row-gap-lg-0{row-gap:0!important}.row-gap-lg-1{row-gap:.25rem!important}.row-gap-lg-2{row-gap:.5rem!important}.row-gap-lg-3{row-gap:1rem!important}.row-gap-lg-4{row-gap:1.5rem!important}.row-gap-lg-5{row-gap:3rem!important}.column-gap-lg-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-lg-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-lg-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-lg-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-lg-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-lg-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.text-lg-start{text-align:left!important}.text-lg-end{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.float-xl-start{float:left!important}.float-xl-end{float:right!important}.float-xl-none{float:none!important}.object-fit-xl-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-xl-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-xl-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-xl-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-xl-none{-o-object-fit:none!important;object-fit:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-inline-grid{display:inline-grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-right:0!important;margin-left:0!important}.mx-xl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xl-auto{margin-right:auto!important;margin-left:auto!important}.my-xl-0{margin-top:0!important;margin-bottom:0!important}.my-xl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-right:0!important;padding-left:0!important}.px-xl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xl-0{padding-top:0!important;padding-bottom:0!important}.py-xl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}.gap-xl-0{gap:0!important}.gap-xl-1{gap:.25rem!important}.gap-xl-2{gap:.5rem!important}.gap-xl-3{gap:1rem!important}.gap-xl-4{gap:1.5rem!important}.gap-xl-5{gap:3rem!important}.row-gap-xl-0{row-gap:0!important}.row-gap-xl-1{row-gap:.25rem!important}.row-gap-xl-2{row-gap:.5rem!important}.row-gap-xl-3{row-gap:1rem!important}.row-gap-xl-4{row-gap:1.5rem!important}.row-gap-xl-5{row-gap:3rem!important}.column-gap-xl-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-xl-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-xl-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-xl-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-xl-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-xl-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.text-xl-start{text-align:left!important}.text-xl-end{text-align:right!important}.text-xl-center{text-align:center!important}}@media (min-width:1400px){.float-xxl-start{float:left!important}.float-xxl-end{float:right!important}.float-xxl-none{float:none!important}.object-fit-xxl-contain{-o-object-fit:contain!important;object-fit:contain!important}.object-fit-xxl-cover{-o-object-fit:cover!important;object-fit:cover!important}.object-fit-xxl-fill{-o-object-fit:fill!important;object-fit:fill!important}.object-fit-xxl-scale{-o-object-fit:scale-down!important;object-fit:scale-down!important}.object-fit-xxl-none{-o-object-fit:none!important;object-fit:none!important}.d-xxl-inline{display:inline!important}.d-xxl-inline-block{display:inline-block!important}.d-xxl-block{display:block!important}.d-xxl-grid{display:grid!important}.d-xxl-inline-grid{display:inline-grid!important}.d-xxl-table{display:table!important}.d-xxl-table-row{display:table-row!important}.d-xxl-table-cell{display:table-cell!important}.d-xxl-flex{display:flex!important}.d-xxl-inline-flex{display:inline-flex!important}.d-xxl-none{display:none!important}.flex-xxl-fill{flex:1 1 auto!important}.flex-xxl-row{flex-direction:row!important}.flex-xxl-column{flex-direction:column!important}.flex-xxl-row-reverse{flex-direction:row-reverse!important}.flex-xxl-column-reverse{flex-direction:column-reverse!important}.flex-xxl-grow-0{flex-grow:0!important}.flex-xxl-grow-1{flex-grow:1!important}.flex-xxl-shrink-0{flex-shrink:0!important}.flex-xxl-shrink-1{flex-shrink:1!important}.flex-xxl-wrap{flex-wrap:wrap!important}.flex-xxl-nowrap{flex-wrap:nowrap!important}.flex-xxl-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-xxl-start{justify-content:flex-start!important}.justify-content-xxl-end{justify-content:flex-end!important}.justify-content-xxl-center{justify-content:center!important}.justify-content-xxl-between{justify-content:space-between!important}.justify-content-xxl-around{justify-content:space-around!important}.justify-content-xxl-evenly{justify-content:space-evenly!important}.align-items-xxl-start{align-items:flex-start!important}.align-items-xxl-end{align-items:flex-end!important}.align-items-xxl-center{align-items:center!important}.align-items-xxl-baseline{align-items:baseline!important}.align-items-xxl-stretch{align-items:stretch!important}.align-content-xxl-start{align-content:flex-start!important}.align-content-xxl-end{align-content:flex-end!important}.align-content-xxl-center{align-content:center!important}.align-content-xxl-between{align-content:space-between!important}.align-content-xxl-around{align-content:space-around!important}.align-content-xxl-stretch{align-content:stretch!important}.align-self-xxl-auto{align-self:auto!important}.align-self-xxl-start{align-self:flex-start!important}.align-self-xxl-end{align-self:flex-end!important}.align-self-xxl-center{align-self:center!important}.align-self-xxl-baseline{align-self:baseline!important}.align-self-xxl-stretch{align-self:stretch!important}.order-xxl-first{order:-1!important}.order-xxl-0{order:0!important}.order-xxl-1{order:1!important}.order-xxl-2{order:2!important}.order-xxl-3{order:3!important}.order-xxl-4{order:4!important}.order-xxl-5{order:5!important}.order-xxl-last{order:6!important}.m-xxl-0{margin:0!important}.m-xxl-1{margin:.25rem!important}.m-xxl-2{margin:.5rem!important}.m-xxl-3{margin:1rem!important}.m-xxl-4{margin:1.5rem!important}.m-xxl-5{margin:3rem!important}.m-xxl-auto{margin:auto!important}.mx-xxl-0{margin-right:0!important;margin-left:0!important}.mx-xxl-1{margin-right:.25rem!important;margin-left:.25rem!important}.mx-xxl-2{margin-right:.5rem!important;margin-left:.5rem!important}.mx-xxl-3{margin-right:1rem!important;margin-left:1rem!important}.mx-xxl-4{margin-right:1.5rem!important;margin-left:1.5rem!important}.mx-xxl-5{margin-right:3rem!important;margin-left:3rem!important}.mx-xxl-auto{margin-right:auto!important;margin-left:auto!important}.my-xxl-0{margin-top:0!important;margin-bottom:0!important}.my-xxl-1{margin-top:.25rem!important;margin-bottom:.25rem!important}.my-xxl-2{margin-top:.5rem!important;margin-bottom:.5rem!important}.my-xxl-3{margin-top:1rem!important;margin-bottom:1rem!important}.my-xxl-4{margin-top:1.5rem!important;margin-bottom:1.5rem!important}.my-xxl-5{margin-top:3rem!important;margin-bottom:3rem!important}.my-xxl-auto{margin-top:auto!important;margin-bottom:auto!important}.mt-xxl-0{margin-top:0!important}.mt-xxl-1{margin-top:.25rem!important}.mt-xxl-2{margin-top:.5rem!important}.mt-xxl-3{margin-top:1rem!important}.mt-xxl-4{margin-top:1.5rem!important}.mt-xxl-5{margin-top:3rem!important}.mt-xxl-auto{margin-top:auto!important}.me-xxl-0{margin-right:0!important}.me-xxl-1{margin-right:.25rem!important}.me-xxl-2{margin-right:.5rem!important}.me-xxl-3{margin-right:1rem!important}.me-xxl-4{margin-right:1.5rem!important}.me-xxl-5{margin-right:3rem!important}.me-xxl-auto{margin-right:auto!important}.mb-xxl-0{margin-bottom:0!important}.mb-xxl-1{margin-bottom:.25rem!important}.mb-xxl-2{margin-bottom:.5rem!important}.mb-xxl-3{margin-bottom:1rem!important}.mb-xxl-4{margin-bottom:1.5rem!important}.mb-xxl-5{margin-bottom:3rem!important}.mb-xxl-auto{margin-bottom:auto!important}.ms-xxl-0{margin-left:0!important}.ms-xxl-1{margin-left:.25rem!important}.ms-xxl-2{margin-left:.5rem!important}.ms-xxl-3{margin-left:1rem!important}.ms-xxl-4{margin-left:1.5rem!important}.ms-xxl-5{margin-left:3rem!important}.ms-xxl-auto{margin-left:auto!important}.p-xxl-0{padding:0!important}.p-xxl-1{padding:.25rem!important}.p-xxl-2{padding:.5rem!important}.p-xxl-3{padding:1rem!important}.p-xxl-4{padding:1.5rem!important}.p-xxl-5{padding:3rem!important}.px-xxl-0{padding-right:0!important;padding-left:0!important}.px-xxl-1{padding-right:.25rem!important;padding-left:.25rem!important}.px-xxl-2{padding-right:.5rem!important;padding-left:.5rem!important}.px-xxl-3{padding-right:1rem!important;padding-left:1rem!important}.px-xxl-4{padding-right:1.5rem!important;padding-left:1.5rem!important}.px-xxl-5{padding-right:3rem!important;padding-left:3rem!important}.py-xxl-0{padding-top:0!important;padding-bottom:0!important}.py-xxl-1{padding-top:.25rem!important;padding-bottom:.25rem!important}.py-xxl-2{padding-top:.5rem!important;padding-bottom:.5rem!important}.py-xxl-3{padding-top:1rem!important;padding-bottom:1rem!important}.py-xxl-4{padding-top:1.5rem!important;padding-bottom:1.5rem!important}.py-xxl-5{padding-top:3rem!important;padding-bottom:3rem!important}.pt-xxl-0{padding-top:0!important}.pt-xxl-1{padding-top:.25rem!important}.pt-xxl-2{padding-top:.5rem!important}.pt-xxl-3{padding-top:1rem!important}.pt-xxl-4{padding-top:1.5rem!important}.pt-xxl-5{padding-top:3rem!important}.pe-xxl-0{padding-right:0!important}.pe-xxl-1{padding-right:.25rem!important}.pe-xxl-2{padding-right:.5rem!important}.pe-xxl-3{padding-right:1rem!important}.pe-xxl-4{padding-right:1.5rem!important}.pe-xxl-5{padding-right:3rem!important}.pb-xxl-0{padding-bottom:0!important}.pb-xxl-1{padding-bottom:.25rem!important}.pb-xxl-2{padding-bottom:.5rem!important}.pb-xxl-3{padding-bottom:1rem!important}.pb-xxl-4{padding-bottom:1.5rem!important}.pb-xxl-5{padding-bottom:3rem!important}.ps-xxl-0{padding-left:0!important}.ps-xxl-1{padding-left:.25rem!important}.ps-xxl-2{padding-left:.5rem!important}.ps-xxl-3{padding-left:1rem!important}.ps-xxl-4{padding-left:1.5rem!important}.ps-xxl-5{padding-left:3rem!important}.gap-xxl-0{gap:0!important}.gap-xxl-1{gap:.25rem!important}.gap-xxl-2{gap:.5rem!important}.gap-xxl-3{gap:1rem!important}.gap-xxl-4{gap:1.5rem!important}.gap-xxl-5{gap:3rem!important}.row-gap-xxl-0{row-gap:0!important}.row-gap-xxl-1{row-gap:.25rem!important}.row-gap-xxl-2{row-gap:.5rem!important}.row-gap-xxl-3{row-gap:1rem!important}.row-gap-xxl-4{row-gap:1.5rem!important}.row-gap-xxl-5{row-gap:3rem!important}.column-gap-xxl-0{-moz-column-gap:0!important;column-gap:0!important}.column-gap-xxl-1{-moz-column-gap:0.25rem!important;column-gap:.25rem!important}.column-gap-xxl-2{-moz-column-gap:0.5rem!important;column-gap:.5rem!important}.column-gap-xxl-3{-moz-column-gap:1rem!important;column-gap:1rem!important}.column-gap-xxl-4{-moz-column-gap:1.5rem!important;column-gap:1.5rem!important}.column-gap-xxl-5{-moz-column-gap:3rem!important;column-gap:3rem!important}.text-xxl-start{text-align:left!important}.text-xxl-end{text-align:right!important}.text-xxl-center{text-align:center!important}}@media (min-width:1200px){.fs-1{font-size:2.5rem!important}.fs-2{font-size:2rem!important}.fs-3{font-size:1.75rem!important}.fs-4{font-size:1.5rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-inline-grid{display:inline-grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}} -/*# sourceMappingURL=bootstrap.min.css.map */ \ No newline at end of file diff --git a/weed/admin/static/css/fontawesome.min.css b/weed/admin/static/css/fontawesome.min.css deleted file mode 100644 index 1f367c1fa..000000000 --- a/weed/admin/static/css/fontawesome.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Font Awesome Free 6.4.0 by @fontawesome - https://fontawesome.com - * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) - * Copyright 2023 Fonticons, Inc. - */ -.fa{font-family:var(--fa-style-family,"Font Awesome 6 Free");font-weight:var(--fa-style,900)}.fa,.fa-brands,.fa-classic,.fa-regular,.fa-sharp,.fa-solid,.fab,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:var(--fa-display,inline-block);font-style:normal;font-variant:normal;line-height:1;text-rendering:auto}.fa-classic,.fa-regular,.fa-solid,.far,.fas{font-family:"Font Awesome 6 Free"}.fa-brands,.fab{font-family:"Font Awesome 6 Brands"}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-2xs{font-size:.625em;line-height:.1em;vertical-align:.225em}.fa-xs{font-size:.75em;line-height:.08333em;vertical-align:.125em}.fa-sm{font-size:.875em;line-height:.07143em;vertical-align:.05357em}.fa-lg{font-size:1.25em;line-height:.05em;vertical-align:-.075em}.fa-xl{font-size:1.5em;line-height:.04167em;vertical-align:-.125em}.fa-2xl{font-size:2em;line-height:.03125em;vertical-align:-.1875em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:var(--fa-li-margin,2.5em);padding-left:0}.fa-ul>li{position:relative}.fa-li{left:calc(var(--fa-li-width, 2em)*-1);position:absolute;text-align:center;width:var(--fa-li-width,2em);line-height:inherit}.fa-border{border-radius:var(--fa-border-radius,.1em);border:var(--fa-border-width,.08em) var(--fa-border-style,solid) var(--fa-border-color,#eee);padding:var(--fa-border-padding,.2em .25em .15em)}.fa-pull-left{float:left;margin-right:var(--fa-pull-margin,.3em)}.fa-pull-right{float:right;margin-left:var(--fa-pull-margin,.3em)}.fa-beat{-webkit-animation-name:fa-beat;animation-name:fa-beat;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-bounce{-webkit-animation-name:fa-bounce;animation-name:fa-bounce;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1))}.fa-fade{-webkit-animation-name:fa-fade;animation-name:fa-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-beat-fade,.fa-fade{-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s)}.fa-beat-fade{-webkit-animation-name:fa-beat-fade;animation-name:fa-beat-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-flip{-webkit-animation-name:fa-flip;animation-name:fa-flip;-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-shake{-webkit-animation-name:fa-shake;animation-name:fa-shake;-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-shake,.fa-spin{-webkit-animation-delay:var(--fa-animation-delay,0s);animation-delay:var(--fa-animation-delay,0s);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal)}.fa-spin{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-duration:var(--fa-animation-duration,2s);animation-duration:var(--fa-animation-duration,2s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin-reverse{--fa-animation-direction:reverse}.fa-pulse,.fa-spin-pulse{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,steps(8));animation-timing-function:var(--fa-animation-timing,steps(8))}@media (prefers-reduced-motion:reduce){.fa-beat,.fa-beat-fade,.fa-bounce,.fa-fade,.fa-flip,.fa-pulse,.fa-shake,.fa-spin,.fa-spin-pulse{-webkit-animation-delay:-1ms;animation-delay:-1ms;-webkit-animation-duration:1ms;animation-duration:1ms;-webkit-animation-iteration-count:1;animation-iteration-count:1;-webkit-transition-delay:0s;transition-delay:0s;-webkit-transition-duration:0s;transition-duration:0s}}@-webkit-keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@-webkit-keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@-webkit-keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@-webkit-keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@-webkit-keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@-webkit-keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}.fa-rotate-by{-webkit-transform:rotate(var(--fa-rotate-angle,none));transform:rotate(var(--fa-rotate-angle,none))}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%;z-index:var(--fa-stack-z-index,auto)}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:var(--fa-inverse,#fff)} - -.fa-0:before{content:"\30"}.fa-1:before{content:"\31"}.fa-2:before{content:"\32"}.fa-3:before{content:"\33"}.fa-4:before{content:"\34"}.fa-5:before{content:"\35"}.fa-6:before{content:"\36"}.fa-7:before{content:"\37"}.fa-8:before{content:"\38"}.fa-9:before{content:"\39"}.fa-fill-drip:before{content:"\f576"}.fa-arrows-to-circle:before{content:"\e4bd"}.fa-chevron-circle-right:before,.fa-circle-chevron-right:before{content:"\f138"}.fa-at:before{content:"\40"}.fa-trash-alt:before,.fa-trash-can:before{content:"\f2ed"}.fa-text-height:before{content:"\f034"}.fa-user-times:before,.fa-user-xmark:before{content:"\f235"}.fa-stethoscope:before{content:"\f0f1"}.fa-comment-alt:before,.fa-message:before{content:"\f27a"}.fa-info:before{content:"\f129"}.fa-compress-alt:before,.fa-down-left-and-up-right-to-center:before{content:"\f422"}.fa-explosion:before{content:"\e4e9"}.fa-file-alt:before,.fa-file-lines:before,.fa-file-text:before{content:"\f15c"}.fa-wave-square:before{content:"\f83e"}.fa-ring:before{content:"\f70b"}.fa-building-un:before{content:"\e4d9"}.fa-dice-three:before{content:"\f527"}.fa-calendar-alt:before,.fa-calendar-days:before{content:"\f073"}.fa-anchor-circle-check:before{content:"\e4aa"}.fa-building-circle-arrow-right:before{content:"\e4d1"}.fa-volleyball-ball:before,.fa-volleyball:before{content:"\f45f"}.fa-arrows-up-to-line:before{content:"\e4c2"}.fa-sort-desc:before,.fa-sort-down:before{content:"\f0dd"}.fa-circle-minus:before,.fa-minus-circle:before{content:"\f056"}.fa-door-open:before{content:"\f52b"}.fa-right-from-bracket:before,.fa-sign-out-alt:before{content:"\f2f5"}.fa-atom:before{content:"\f5d2"}.fa-soap:before{content:"\e06e"}.fa-heart-music-camera-bolt:before,.fa-icons:before{content:"\f86d"}.fa-microphone-alt-slash:before,.fa-microphone-lines-slash:before{content:"\f539"}.fa-bridge-circle-check:before{content:"\e4c9"}.fa-pump-medical:before{content:"\e06a"}.fa-fingerprint:before{content:"\f577"}.fa-hand-point-right:before{content:"\f0a4"}.fa-magnifying-glass-location:before,.fa-search-location:before{content:"\f689"}.fa-forward-step:before,.fa-step-forward:before{content:"\f051"}.fa-face-smile-beam:before,.fa-smile-beam:before{content:"\f5b8"}.fa-flag-checkered:before{content:"\f11e"}.fa-football-ball:before,.fa-football:before{content:"\f44e"}.fa-school-circle-exclamation:before{content:"\e56c"}.fa-crop:before{content:"\f125"}.fa-angle-double-down:before,.fa-angles-down:before{content:"\f103"}.fa-users-rectangle:before{content:"\e594"}.fa-people-roof:before{content:"\e537"}.fa-people-line:before{content:"\e534"}.fa-beer-mug-empty:before,.fa-beer:before{content:"\f0fc"}.fa-diagram-predecessor:before{content:"\e477"}.fa-arrow-up-long:before,.fa-long-arrow-up:before{content:"\f176"}.fa-burn:before,.fa-fire-flame-simple:before{content:"\f46a"}.fa-male:before,.fa-person:before{content:"\f183"}.fa-laptop:before{content:"\f109"}.fa-file-csv:before{content:"\f6dd"}.fa-menorah:before{content:"\f676"}.fa-truck-plane:before{content:"\e58f"}.fa-record-vinyl:before{content:"\f8d9"}.fa-face-grin-stars:before,.fa-grin-stars:before{content:"\f587"}.fa-bong:before{content:"\f55c"}.fa-pastafarianism:before,.fa-spaghetti-monster-flying:before{content:"\f67b"}.fa-arrow-down-up-across-line:before{content:"\e4af"}.fa-spoon:before,.fa-utensil-spoon:before{content:"\f2e5"}.fa-jar-wheat:before{content:"\e517"}.fa-envelopes-bulk:before,.fa-mail-bulk:before{content:"\f674"}.fa-file-circle-exclamation:before{content:"\e4eb"}.fa-circle-h:before,.fa-hospital-symbol:before{content:"\f47e"}.fa-pager:before{content:"\f815"}.fa-address-book:before,.fa-contact-book:before{content:"\f2b9"}.fa-strikethrough:before{content:"\f0cc"}.fa-k:before{content:"\4b"}.fa-landmark-flag:before{content:"\e51c"}.fa-pencil-alt:before,.fa-pencil:before{content:"\f303"}.fa-backward:before{content:"\f04a"}.fa-caret-right:before{content:"\f0da"}.fa-comments:before{content:"\f086"}.fa-file-clipboard:before,.fa-paste:before{content:"\f0ea"}.fa-code-pull-request:before{content:"\e13c"}.fa-clipboard-list:before{content:"\f46d"}.fa-truck-loading:before,.fa-truck-ramp-box:before{content:"\f4de"}.fa-user-check:before{content:"\f4fc"}.fa-vial-virus:before{content:"\e597"}.fa-sheet-plastic:before{content:"\e571"}.fa-blog:before{content:"\f781"}.fa-user-ninja:before{content:"\f504"}.fa-person-arrow-up-from-line:before{content:"\e539"}.fa-scroll-torah:before,.fa-torah:before{content:"\f6a0"}.fa-broom-ball:before,.fa-quidditch-broom-ball:before,.fa-quidditch:before{content:"\f458"}.fa-toggle-off:before{content:"\f204"}.fa-archive:before,.fa-box-archive:before{content:"\f187"}.fa-person-drowning:before{content:"\e545"}.fa-arrow-down-9-1:before,.fa-sort-numeric-desc:before,.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-face-grin-tongue-squint:before,.fa-grin-tongue-squint:before{content:"\f58a"}.fa-spray-can:before{content:"\f5bd"}.fa-truck-monster:before{content:"\f63b"}.fa-w:before{content:"\57"}.fa-earth-africa:before,.fa-globe-africa:before{content:"\f57c"}.fa-rainbow:before{content:"\f75b"}.fa-circle-notch:before{content:"\f1ce"}.fa-tablet-alt:before,.fa-tablet-screen-button:before{content:"\f3fa"}.fa-paw:before{content:"\f1b0"}.fa-cloud:before{content:"\f0c2"}.fa-trowel-bricks:before{content:"\e58a"}.fa-face-flushed:before,.fa-flushed:before{content:"\f579"}.fa-hospital-user:before{content:"\f80d"}.fa-tent-arrow-left-right:before{content:"\e57f"}.fa-gavel:before,.fa-legal:before{content:"\f0e3"}.fa-binoculars:before{content:"\f1e5"}.fa-microphone-slash:before{content:"\f131"}.fa-box-tissue:before{content:"\e05b"}.fa-motorcycle:before{content:"\f21c"}.fa-bell-concierge:before,.fa-concierge-bell:before{content:"\f562"}.fa-pen-ruler:before,.fa-pencil-ruler:before{content:"\f5ae"}.fa-people-arrows-left-right:before,.fa-people-arrows:before{content:"\e068"}.fa-mars-and-venus-burst:before{content:"\e523"}.fa-caret-square-right:before,.fa-square-caret-right:before{content:"\f152"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-sun-plant-wilt:before{content:"\e57a"}.fa-toilets-portable:before{content:"\e584"}.fa-hockey-puck:before{content:"\f453"}.fa-table:before{content:"\f0ce"}.fa-magnifying-glass-arrow-right:before{content:"\e521"}.fa-digital-tachograph:before,.fa-tachograph-digital:before{content:"\f566"}.fa-users-slash:before{content:"\e073"}.fa-clover:before{content:"\e139"}.fa-mail-reply:before,.fa-reply:before{content:"\f3e5"}.fa-star-and-crescent:before{content:"\f699"}.fa-house-fire:before{content:"\e50c"}.fa-minus-square:before,.fa-square-minus:before{content:"\f146"}.fa-helicopter:before{content:"\f533"}.fa-compass:before{content:"\f14e"}.fa-caret-square-down:before,.fa-square-caret-down:before{content:"\f150"}.fa-file-circle-question:before{content:"\e4ef"}.fa-laptop-code:before{content:"\f5fc"}.fa-swatchbook:before{content:"\f5c3"}.fa-prescription-bottle:before{content:"\f485"}.fa-bars:before,.fa-navicon:before{content:"\f0c9"}.fa-people-group:before{content:"\e533"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-heart-broken:before,.fa-heart-crack:before{content:"\f7a9"}.fa-external-link-square-alt:before,.fa-square-up-right:before{content:"\f360"}.fa-face-kiss-beam:before,.fa-kiss-beam:before{content:"\f597"}.fa-film:before{content:"\f008"}.fa-ruler-horizontal:before{content:"\f547"}.fa-people-robbery:before{content:"\e536"}.fa-lightbulb:before{content:"\f0eb"}.fa-caret-left:before{content:"\f0d9"}.fa-circle-exclamation:before,.fa-exclamation-circle:before{content:"\f06a"}.fa-school-circle-xmark:before{content:"\e56d"}.fa-arrow-right-from-bracket:before,.fa-sign-out:before{content:"\f08b"}.fa-chevron-circle-down:before,.fa-circle-chevron-down:before{content:"\f13a"}.fa-unlock-alt:before,.fa-unlock-keyhole:before{content:"\f13e"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-headphones-alt:before,.fa-headphones-simple:before{content:"\f58f"}.fa-sitemap:before{content:"\f0e8"}.fa-circle-dollar-to-slot:before,.fa-donate:before{content:"\f4b9"}.fa-memory:before{content:"\f538"}.fa-road-spikes:before{content:"\e568"}.fa-fire-burner:before{content:"\e4f1"}.fa-flag:before{content:"\f024"}.fa-hanukiah:before{content:"\f6e6"}.fa-feather:before{content:"\f52d"}.fa-volume-down:before,.fa-volume-low:before{content:"\f027"}.fa-comment-slash:before{content:"\f4b3"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-compress:before{content:"\f066"}.fa-wheat-alt:before,.fa-wheat-awn:before{content:"\e2cd"}.fa-ankh:before{content:"\f644"}.fa-hands-holding-child:before{content:"\e4fa"}.fa-asterisk:before{content:"\2a"}.fa-check-square:before,.fa-square-check:before{content:"\f14a"}.fa-peseta-sign:before{content:"\e221"}.fa-header:before,.fa-heading:before{content:"\f1dc"}.fa-ghost:before{content:"\f6e2"}.fa-list-squares:before,.fa-list:before{content:"\f03a"}.fa-phone-square-alt:before,.fa-square-phone-flip:before{content:"\f87b"}.fa-cart-plus:before{content:"\f217"}.fa-gamepad:before{content:"\f11b"}.fa-circle-dot:before,.fa-dot-circle:before{content:"\f192"}.fa-dizzy:before,.fa-face-dizzy:before{content:"\f567"}.fa-egg:before{content:"\f7fb"}.fa-house-medical-circle-xmark:before{content:"\e513"}.fa-campground:before{content:"\f6bb"}.fa-folder-plus:before{content:"\f65e"}.fa-futbol-ball:before,.fa-futbol:before,.fa-soccer-ball:before{content:"\f1e3"}.fa-paint-brush:before,.fa-paintbrush:before{content:"\f1fc"}.fa-lock:before{content:"\f023"}.fa-gas-pump:before{content:"\f52f"}.fa-hot-tub-person:before,.fa-hot-tub:before{content:"\f593"}.fa-map-location:before,.fa-map-marked:before{content:"\f59f"}.fa-house-flood-water:before{content:"\e50e"}.fa-tree:before{content:"\f1bb"}.fa-bridge-lock:before{content:"\e4cc"}.fa-sack-dollar:before{content:"\f81d"}.fa-edit:before,.fa-pen-to-square:before{content:"\f044"}.fa-car-side:before{content:"\f5e4"}.fa-share-alt:before,.fa-share-nodes:before{content:"\f1e0"}.fa-heart-circle-minus:before{content:"\e4ff"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-microscope:before{content:"\f610"}.fa-sink:before{content:"\e06d"}.fa-bag-shopping:before,.fa-shopping-bag:before{content:"\f290"}.fa-arrow-down-z-a:before,.fa-sort-alpha-desc:before,.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-mitten:before{content:"\f7b5"}.fa-person-rays:before{content:"\e54d"}.fa-users:before{content:"\f0c0"}.fa-eye-slash:before{content:"\f070"}.fa-flask-vial:before{content:"\e4f3"}.fa-hand-paper:before,.fa-hand:before{content:"\f256"}.fa-om:before{content:"\f679"}.fa-worm:before{content:"\e599"}.fa-house-circle-xmark:before{content:"\e50b"}.fa-plug:before{content:"\f1e6"}.fa-chevron-up:before{content:"\f077"}.fa-hand-spock:before{content:"\f259"}.fa-stopwatch:before{content:"\f2f2"}.fa-face-kiss:before,.fa-kiss:before{content:"\f596"}.fa-bridge-circle-xmark:before{content:"\e4cb"}.fa-face-grin-tongue:before,.fa-grin-tongue:before{content:"\f589"}.fa-chess-bishop:before{content:"\f43a"}.fa-face-grin-wink:before,.fa-grin-wink:before{content:"\f58c"}.fa-deaf:before,.fa-deafness:before,.fa-ear-deaf:before,.fa-hard-of-hearing:before{content:"\f2a4"}.fa-road-circle-check:before{content:"\e564"}.fa-dice-five:before{content:"\f523"}.fa-rss-square:before,.fa-square-rss:before{content:"\f143"}.fa-land-mine-on:before{content:"\e51b"}.fa-i-cursor:before{content:"\f246"}.fa-stamp:before{content:"\f5bf"}.fa-stairs:before{content:"\e289"}.fa-i:before{content:"\49"}.fa-hryvnia-sign:before,.fa-hryvnia:before{content:"\f6f2"}.fa-pills:before{content:"\f484"}.fa-face-grin-wide:before,.fa-grin-alt:before{content:"\f581"}.fa-tooth:before{content:"\f5c9"}.fa-v:before{content:"\56"}.fa-bangladeshi-taka-sign:before{content:"\e2e6"}.fa-bicycle:before{content:"\f206"}.fa-rod-asclepius:before,.fa-rod-snake:before,.fa-staff-aesculapius:before,.fa-staff-snake:before{content:"\e579"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-ambulance:before,.fa-truck-medical:before{content:"\f0f9"}.fa-wheat-awn-circle-exclamation:before{content:"\e598"}.fa-snowman:before{content:"\f7d0"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-road-barrier:before{content:"\e562"}.fa-school:before{content:"\f549"}.fa-igloo:before{content:"\f7ae"}.fa-joint:before{content:"\f595"}.fa-angle-right:before{content:"\f105"}.fa-horse:before{content:"\f6f0"}.fa-q:before{content:"\51"}.fa-g:before{content:"\47"}.fa-notes-medical:before{content:"\f481"}.fa-temperature-2:before,.fa-temperature-half:before,.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-dong-sign:before{content:"\e169"}.fa-capsules:before{content:"\f46b"}.fa-poo-bolt:before,.fa-poo-storm:before{content:"\f75a"}.fa-face-frown-open:before,.fa-frown-open:before{content:"\f57a"}.fa-hand-point-up:before{content:"\f0a6"}.fa-money-bill:before{content:"\f0d6"}.fa-bookmark:before{content:"\f02e"}.fa-align-justify:before{content:"\f039"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-helmet-un:before{content:"\e503"}.fa-bullseye:before{content:"\f140"}.fa-bacon:before{content:"\f7e5"}.fa-hand-point-down:before{content:"\f0a7"}.fa-arrow-up-from-bracket:before{content:"\e09a"}.fa-folder-blank:before,.fa-folder:before{content:"\f07b"}.fa-file-medical-alt:before,.fa-file-waveform:before{content:"\f478"}.fa-radiation:before{content:"\f7b9"}.fa-chart-simple:before{content:"\e473"}.fa-mars-stroke:before{content:"\f229"}.fa-vial:before{content:"\f492"}.fa-dashboard:before,.fa-gauge-med:before,.fa-gauge:before,.fa-tachometer-alt-average:before{content:"\f624"}.fa-magic-wand-sparkles:before,.fa-wand-magic-sparkles:before{content:"\e2ca"}.fa-e:before{content:"\45"}.fa-pen-alt:before,.fa-pen-clip:before{content:"\f305"}.fa-bridge-circle-exclamation:before{content:"\e4ca"}.fa-user:before{content:"\f007"}.fa-school-circle-check:before{content:"\e56b"}.fa-dumpster:before{content:"\f793"}.fa-shuttle-van:before,.fa-van-shuttle:before{content:"\f5b6"}.fa-building-user:before{content:"\e4da"}.fa-caret-square-left:before,.fa-square-caret-left:before{content:"\f191"}.fa-highlighter:before{content:"\f591"}.fa-key:before{content:"\f084"}.fa-bullhorn:before{content:"\f0a1"}.fa-globe:before{content:"\f0ac"}.fa-synagogue:before{content:"\f69b"}.fa-person-half-dress:before{content:"\e548"}.fa-road-bridge:before{content:"\e563"}.fa-location-arrow:before{content:"\f124"}.fa-c:before{content:"\43"}.fa-tablet-button:before{content:"\f10a"}.fa-building-lock:before{content:"\e4d6"}.fa-pizza-slice:before{content:"\f818"}.fa-money-bill-wave:before{content:"\f53a"}.fa-area-chart:before,.fa-chart-area:before{content:"\f1fe"}.fa-house-flag:before{content:"\e50d"}.fa-person-circle-minus:before{content:"\e540"}.fa-ban:before,.fa-cancel:before{content:"\f05e"}.fa-camera-rotate:before{content:"\e0d8"}.fa-air-freshener:before,.fa-spray-can-sparkles:before{content:"\f5d0"}.fa-star:before{content:"\f005"}.fa-repeat:before{content:"\f363"}.fa-cross:before{content:"\f654"}.fa-box:before{content:"\f466"}.fa-venus-mars:before{content:"\f228"}.fa-arrow-pointer:before,.fa-mouse-pointer:before{content:"\f245"}.fa-expand-arrows-alt:before,.fa-maximize:before{content:"\f31e"}.fa-charging-station:before{content:"\f5e7"}.fa-shapes:before,.fa-triangle-circle-square:before{content:"\f61f"}.fa-random:before,.fa-shuffle:before{content:"\f074"}.fa-person-running:before,.fa-running:before{content:"\f70c"}.fa-mobile-retro:before{content:"\e527"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-spider:before{content:"\f717"}.fa-hands-bound:before{content:"\e4f9"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-plane-circle-exclamation:before{content:"\e556"}.fa-x-ray:before{content:"\f497"}.fa-spell-check:before{content:"\f891"}.fa-slash:before{content:"\f715"}.fa-computer-mouse:before,.fa-mouse:before{content:"\f8cc"}.fa-arrow-right-to-bracket:before,.fa-sign-in:before{content:"\f090"}.fa-shop-slash:before,.fa-store-alt-slash:before{content:"\e070"}.fa-server:before{content:"\f233"}.fa-virus-covid-slash:before{content:"\e4a9"}.fa-shop-lock:before{content:"\e4a5"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-blender-phone:before{content:"\f6b6"}.fa-building-wheat:before{content:"\e4db"}.fa-person-breastfeeding:before{content:"\e53a"}.fa-right-to-bracket:before,.fa-sign-in-alt:before{content:"\f2f6"}.fa-venus:before{content:"\f221"}.fa-passport:before{content:"\f5ab"}.fa-heart-pulse:before,.fa-heartbeat:before{content:"\f21e"}.fa-people-carry-box:before,.fa-people-carry:before{content:"\f4ce"}.fa-temperature-high:before{content:"\f769"}.fa-microchip:before{content:"\f2db"}.fa-crown:before{content:"\f521"}.fa-weight-hanging:before{content:"\f5cd"}.fa-xmarks-lines:before{content:"\e59a"}.fa-file-prescription:before{content:"\f572"}.fa-weight-scale:before,.fa-weight:before{content:"\f496"}.fa-user-friends:before,.fa-user-group:before{content:"\f500"}.fa-arrow-up-a-z:before,.fa-sort-alpha-up:before{content:"\f15e"}.fa-chess-knight:before{content:"\f441"}.fa-face-laugh-squint:before,.fa-laugh-squint:before{content:"\f59b"}.fa-wheelchair:before{content:"\f193"}.fa-arrow-circle-up:before,.fa-circle-arrow-up:before{content:"\f0aa"}.fa-toggle-on:before{content:"\f205"}.fa-person-walking:before,.fa-walking:before{content:"\f554"}.fa-l:before{content:"\4c"}.fa-fire:before{content:"\f06d"}.fa-bed-pulse:before,.fa-procedures:before{content:"\f487"}.fa-shuttle-space:before,.fa-space-shuttle:before{content:"\f197"}.fa-face-laugh:before,.fa-laugh:before{content:"\f599"}.fa-folder-open:before{content:"\f07c"}.fa-heart-circle-plus:before{content:"\e500"}.fa-code-fork:before{content:"\e13b"}.fa-city:before{content:"\f64f"}.fa-microphone-alt:before,.fa-microphone-lines:before{content:"\f3c9"}.fa-pepper-hot:before{content:"\f816"}.fa-unlock:before{content:"\f09c"}.fa-colon-sign:before{content:"\e140"}.fa-headset:before{content:"\f590"}.fa-store-slash:before{content:"\e071"}.fa-road-circle-xmark:before{content:"\e566"}.fa-user-minus:before{content:"\f503"}.fa-mars-stroke-up:before,.fa-mars-stroke-v:before{content:"\f22a"}.fa-champagne-glasses:before,.fa-glass-cheers:before{content:"\f79f"}.fa-clipboard:before{content:"\f328"}.fa-house-circle-exclamation:before{content:"\e50a"}.fa-file-arrow-up:before,.fa-file-upload:before{content:"\f574"}.fa-wifi-3:before,.fa-wifi-strong:before,.fa-wifi:before{content:"\f1eb"}.fa-bath:before,.fa-bathtub:before{content:"\f2cd"}.fa-underline:before{content:"\f0cd"}.fa-user-edit:before,.fa-user-pen:before{content:"\f4ff"}.fa-signature:before{content:"\f5b7"}.fa-stroopwafel:before{content:"\f551"}.fa-bold:before{content:"\f032"}.fa-anchor-lock:before{content:"\e4ad"}.fa-building-ngo:before{content:"\e4d7"}.fa-manat-sign:before{content:"\e1d5"}.fa-not-equal:before{content:"\f53e"}.fa-border-style:before,.fa-border-top-left:before{content:"\f853"}.fa-map-location-dot:before,.fa-map-marked-alt:before{content:"\f5a0"}.fa-jedi:before{content:"\f669"}.fa-poll:before,.fa-square-poll-vertical:before{content:"\f681"}.fa-mug-hot:before{content:"\f7b6"}.fa-battery-car:before,.fa-car-battery:before{content:"\f5df"}.fa-gift:before{content:"\f06b"}.fa-dice-two:before{content:"\f528"}.fa-chess-queen:before{content:"\f445"}.fa-glasses:before{content:"\f530"}.fa-chess-board:before{content:"\f43c"}.fa-building-circle-check:before{content:"\e4d2"}.fa-person-chalkboard:before{content:"\e53d"}.fa-mars-stroke-h:before,.fa-mars-stroke-right:before{content:"\f22b"}.fa-hand-back-fist:before,.fa-hand-rock:before{content:"\f255"}.fa-caret-square-up:before,.fa-square-caret-up:before{content:"\f151"}.fa-cloud-showers-water:before{content:"\e4e4"}.fa-bar-chart:before,.fa-chart-bar:before{content:"\f080"}.fa-hands-bubbles:before,.fa-hands-wash:before{content:"\e05e"}.fa-less-than-equal:before{content:"\f537"}.fa-train:before{content:"\f238"}.fa-eye-low-vision:before,.fa-low-vision:before{content:"\f2a8"}.fa-crow:before{content:"\f520"}.fa-sailboat:before{content:"\e445"}.fa-window-restore:before{content:"\f2d2"}.fa-plus-square:before,.fa-square-plus:before{content:"\f0fe"}.fa-torii-gate:before{content:"\f6a1"}.fa-frog:before{content:"\f52e"}.fa-bucket:before{content:"\e4cf"}.fa-image:before{content:"\f03e"}.fa-microphone:before{content:"\f130"}.fa-cow:before{content:"\f6c8"}.fa-caret-up:before{content:"\f0d8"}.fa-screwdriver:before{content:"\f54a"}.fa-folder-closed:before{content:"\e185"}.fa-house-tsunami:before{content:"\e515"}.fa-square-nfi:before{content:"\e576"}.fa-arrow-up-from-ground-water:before{content:"\e4b5"}.fa-glass-martini-alt:before,.fa-martini-glass:before{content:"\f57b"}.fa-rotate-back:before,.fa-rotate-backward:before,.fa-rotate-left:before,.fa-undo-alt:before{content:"\f2ea"}.fa-columns:before,.fa-table-columns:before{content:"\f0db"}.fa-lemon:before{content:"\f094"}.fa-head-side-mask:before{content:"\e063"}.fa-handshake:before{content:"\f2b5"}.fa-gem:before{content:"\f3a5"}.fa-dolly-box:before,.fa-dolly:before{content:"\f472"}.fa-smoking:before{content:"\f48d"}.fa-compress-arrows-alt:before,.fa-minimize:before{content:"\f78c"}.fa-monument:before{content:"\f5a6"}.fa-snowplow:before{content:"\f7d2"}.fa-angle-double-right:before,.fa-angles-right:before{content:"\f101"}.fa-cannabis:before{content:"\f55f"}.fa-circle-play:before,.fa-play-circle:before{content:"\f144"}.fa-tablets:before{content:"\f490"}.fa-ethernet:before{content:"\f796"}.fa-eur:before,.fa-euro-sign:before,.fa-euro:before{content:"\f153"}.fa-chair:before{content:"\f6c0"}.fa-check-circle:before,.fa-circle-check:before{content:"\f058"}.fa-circle-stop:before,.fa-stop-circle:before{content:"\f28d"}.fa-compass-drafting:before,.fa-drafting-compass:before{content:"\f568"}.fa-plate-wheat:before{content:"\e55a"}.fa-icicles:before{content:"\f7ad"}.fa-person-shelter:before{content:"\e54f"}.fa-neuter:before{content:"\f22c"}.fa-id-badge:before{content:"\f2c1"}.fa-marker:before{content:"\f5a1"}.fa-face-laugh-beam:before,.fa-laugh-beam:before{content:"\f59a"}.fa-helicopter-symbol:before{content:"\e502"}.fa-universal-access:before{content:"\f29a"}.fa-chevron-circle-up:before,.fa-circle-chevron-up:before{content:"\f139"}.fa-lari-sign:before{content:"\e1c8"}.fa-volcano:before{content:"\f770"}.fa-person-walking-dashed-line-arrow-right:before{content:"\e553"}.fa-gbp:before,.fa-pound-sign:before,.fa-sterling-sign:before{content:"\f154"}.fa-viruses:before{content:"\e076"}.fa-square-person-confined:before{content:"\e577"}.fa-user-tie:before{content:"\f508"}.fa-arrow-down-long:before,.fa-long-arrow-down:before{content:"\f175"}.fa-tent-arrow-down-to-line:before{content:"\e57e"}.fa-certificate:before{content:"\f0a3"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-suitcase:before{content:"\f0f2"}.fa-person-skating:before,.fa-skating:before{content:"\f7c5"}.fa-filter-circle-dollar:before,.fa-funnel-dollar:before{content:"\f662"}.fa-camera-retro:before{content:"\f083"}.fa-arrow-circle-down:before,.fa-circle-arrow-down:before{content:"\f0ab"}.fa-arrow-right-to-file:before,.fa-file-import:before{content:"\f56f"}.fa-external-link-square:before,.fa-square-arrow-up-right:before{content:"\f14c"}.fa-box-open:before{content:"\f49e"}.fa-scroll:before{content:"\f70e"}.fa-spa:before{content:"\f5bb"}.fa-location-pin-lock:before{content:"\e51f"}.fa-pause:before{content:"\f04c"}.fa-hill-avalanche:before{content:"\e507"}.fa-temperature-0:before,.fa-temperature-empty:before,.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-bomb:before{content:"\f1e2"}.fa-registered:before{content:"\f25d"}.fa-address-card:before,.fa-contact-card:before,.fa-vcard:before{content:"\f2bb"}.fa-balance-scale-right:before,.fa-scale-unbalanced-flip:before{content:"\f516"}.fa-subscript:before{content:"\f12c"}.fa-diamond-turn-right:before,.fa-directions:before{content:"\f5eb"}.fa-burst:before{content:"\e4dc"}.fa-house-laptop:before,.fa-laptop-house:before{content:"\e066"}.fa-face-tired:before,.fa-tired:before{content:"\f5c8"}.fa-money-bills:before{content:"\e1f3"}.fa-smog:before{content:"\f75f"}.fa-crutch:before{content:"\f7f7"}.fa-cloud-arrow-up:before,.fa-cloud-upload-alt:before,.fa-cloud-upload:before{content:"\f0ee"}.fa-palette:before{content:"\f53f"}.fa-arrows-turn-right:before{content:"\e4c0"}.fa-vest:before{content:"\e085"}.fa-ferry:before{content:"\e4ea"}.fa-arrows-down-to-people:before{content:"\e4b9"}.fa-seedling:before,.fa-sprout:before{content:"\f4d8"}.fa-arrows-alt-h:before,.fa-left-right:before{content:"\f337"}.fa-boxes-packing:before{content:"\e4c7"}.fa-arrow-circle-left:before,.fa-circle-arrow-left:before{content:"\f0a8"}.fa-group-arrows-rotate:before{content:"\e4f6"}.fa-bowl-food:before{content:"\e4c6"}.fa-candy-cane:before{content:"\f786"}.fa-arrow-down-wide-short:before,.fa-sort-amount-asc:before,.fa-sort-amount-down:before{content:"\f160"}.fa-cloud-bolt:before,.fa-thunderstorm:before{content:"\f76c"}.fa-remove-format:before,.fa-text-slash:before{content:"\f87d"}.fa-face-smile-wink:before,.fa-smile-wink:before{content:"\f4da"}.fa-file-word:before{content:"\f1c2"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-arrows-h:before,.fa-arrows-left-right:before{content:"\f07e"}.fa-house-lock:before{content:"\e510"}.fa-cloud-arrow-down:before,.fa-cloud-download-alt:before,.fa-cloud-download:before{content:"\f0ed"}.fa-children:before{content:"\e4e1"}.fa-blackboard:before,.fa-chalkboard:before{content:"\f51b"}.fa-user-alt-slash:before,.fa-user-large-slash:before{content:"\f4fa"}.fa-envelope-open:before{content:"\f2b6"}.fa-handshake-alt-slash:before,.fa-handshake-simple-slash:before{content:"\e05f"}.fa-mattress-pillow:before{content:"\e525"}.fa-guarani-sign:before{content:"\e19a"}.fa-arrows-rotate:before,.fa-refresh:before,.fa-sync:before{content:"\f021"}.fa-fire-extinguisher:before{content:"\f134"}.fa-cruzeiro-sign:before{content:"\e152"}.fa-greater-than-equal:before{content:"\f532"}.fa-shield-alt:before,.fa-shield-halved:before{content:"\f3ed"}.fa-atlas:before,.fa-book-atlas:before{content:"\f558"}.fa-virus:before{content:"\e074"}.fa-envelope-circle-check:before{content:"\e4e8"}.fa-layer-group:before{content:"\f5fd"}.fa-arrows-to-dot:before{content:"\e4be"}.fa-archway:before{content:"\f557"}.fa-heart-circle-check:before{content:"\e4fd"}.fa-house-chimney-crack:before,.fa-house-damage:before{content:"\f6f1"}.fa-file-archive:before,.fa-file-zipper:before{content:"\f1c6"}.fa-square:before{content:"\f0c8"}.fa-glass-martini:before,.fa-martini-glass-empty:before{content:"\f000"}.fa-couch:before{content:"\f4b8"}.fa-cedi-sign:before{content:"\e0df"}.fa-italic:before{content:"\f033"}.fa-church:before{content:"\f51d"}.fa-comments-dollar:before{content:"\f653"}.fa-democrat:before{content:"\f747"}.fa-z:before{content:"\5a"}.fa-person-skiing:before,.fa-skiing:before{content:"\f7c9"}.fa-road-lock:before{content:"\e567"}.fa-a:before{content:"\41"}.fa-temperature-arrow-down:before,.fa-temperature-down:before{content:"\e03f"}.fa-feather-alt:before,.fa-feather-pointed:before{content:"\f56b"}.fa-p:before{content:"\50"}.fa-snowflake:before{content:"\f2dc"}.fa-newspaper:before{content:"\f1ea"}.fa-ad:before,.fa-rectangle-ad:before{content:"\f641"}.fa-arrow-circle-right:before,.fa-circle-arrow-right:before{content:"\f0a9"}.fa-filter-circle-xmark:before{content:"\e17b"}.fa-locust:before{content:"\e520"}.fa-sort:before,.fa-unsorted:before{content:"\f0dc"}.fa-list-1-2:before,.fa-list-numeric:before,.fa-list-ol:before{content:"\f0cb"}.fa-person-dress-burst:before{content:"\e544"}.fa-money-check-alt:before,.fa-money-check-dollar:before{content:"\f53d"}.fa-vector-square:before{content:"\f5cb"}.fa-bread-slice:before{content:"\f7ec"}.fa-language:before{content:"\f1ab"}.fa-face-kiss-wink-heart:before,.fa-kiss-wink-heart:before{content:"\f598"}.fa-filter:before{content:"\f0b0"}.fa-question:before{content:"\3f"}.fa-file-signature:before{content:"\f573"}.fa-arrows-alt:before,.fa-up-down-left-right:before{content:"\f0b2"}.fa-house-chimney-user:before{content:"\e065"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-puzzle-piece:before{content:"\f12e"}.fa-money-check:before{content:"\f53c"}.fa-star-half-alt:before,.fa-star-half-stroke:before{content:"\f5c0"}.fa-code:before{content:"\f121"}.fa-glass-whiskey:before,.fa-whiskey-glass:before{content:"\f7a0"}.fa-building-circle-exclamation:before{content:"\e4d3"}.fa-magnifying-glass-chart:before{content:"\e522"}.fa-arrow-up-right-from-square:before,.fa-external-link:before{content:"\f08e"}.fa-cubes-stacked:before{content:"\e4e6"}.fa-krw:before,.fa-won-sign:before,.fa-won:before{content:"\f159"}.fa-virus-covid:before{content:"\e4a8"}.fa-austral-sign:before{content:"\e0a9"}.fa-f:before{content:"\46"}.fa-leaf:before{content:"\f06c"}.fa-road:before{content:"\f018"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-person-circle-plus:before{content:"\e541"}.fa-chart-pie:before,.fa-pie-chart:before{content:"\f200"}.fa-bolt-lightning:before{content:"\e0b7"}.fa-sack-xmark:before{content:"\e56a"}.fa-file-excel:before{content:"\f1c3"}.fa-file-contract:before{content:"\f56c"}.fa-fish-fins:before{content:"\e4f2"}.fa-building-flag:before{content:"\e4d5"}.fa-face-grin-beam:before,.fa-grin-beam:before{content:"\f582"}.fa-object-ungroup:before{content:"\f248"}.fa-poop:before{content:"\f619"}.fa-location-pin:before,.fa-map-marker:before{content:"\f041"}.fa-kaaba:before{content:"\f66b"}.fa-toilet-paper:before{content:"\f71e"}.fa-hard-hat:before,.fa-hat-hard:before,.fa-helmet-safety:before{content:"\f807"}.fa-eject:before{content:"\f052"}.fa-arrow-alt-circle-right:before,.fa-circle-right:before{content:"\f35a"}.fa-plane-circle-check:before{content:"\e555"}.fa-face-rolling-eyes:before,.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-object-group:before{content:"\f247"}.fa-chart-line:before,.fa-line-chart:before{content:"\f201"}.fa-mask-ventilator:before{content:"\e524"}.fa-arrow-right:before{content:"\f061"}.fa-map-signs:before,.fa-signs-post:before{content:"\f277"}.fa-cash-register:before{content:"\f788"}.fa-person-circle-question:before{content:"\e542"}.fa-h:before{content:"\48"}.fa-tarp:before{content:"\e57b"}.fa-screwdriver-wrench:before,.fa-tools:before{content:"\f7d9"}.fa-arrows-to-eye:before{content:"\e4bf"}.fa-plug-circle-bolt:before{content:"\e55b"}.fa-heart:before{content:"\f004"}.fa-mars-and-venus:before{content:"\f224"}.fa-home-user:before,.fa-house-user:before{content:"\e1b0"}.fa-dumpster-fire:before{content:"\f794"}.fa-house-crack:before{content:"\e3b1"}.fa-cocktail:before,.fa-martini-glass-citrus:before{content:"\f561"}.fa-face-surprise:before,.fa-surprise:before{content:"\f5c2"}.fa-bottle-water:before{content:"\e4c5"}.fa-circle-pause:before,.fa-pause-circle:before{content:"\f28b"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-apple-alt:before,.fa-apple-whole:before{content:"\f5d1"}.fa-kitchen-set:before{content:"\e51a"}.fa-r:before{content:"\52"}.fa-temperature-1:before,.fa-temperature-quarter:before,.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-cube:before{content:"\f1b2"}.fa-bitcoin-sign:before{content:"\e0b4"}.fa-shield-dog:before{content:"\e573"}.fa-solar-panel:before{content:"\f5ba"}.fa-lock-open:before{content:"\f3c1"}.fa-elevator:before{content:"\e16d"}.fa-money-bill-transfer:before{content:"\e528"}.fa-money-bill-trend-up:before{content:"\e529"}.fa-house-flood-water-circle-arrow-right:before{content:"\e50f"}.fa-poll-h:before,.fa-square-poll-horizontal:before{content:"\f682"}.fa-circle:before{content:"\f111"}.fa-backward-fast:before,.fa-fast-backward:before{content:"\f049"}.fa-recycle:before{content:"\f1b8"}.fa-user-astronaut:before{content:"\f4fb"}.fa-plane-slash:before{content:"\e069"}.fa-trademark:before{content:"\f25c"}.fa-basketball-ball:before,.fa-basketball:before{content:"\f434"}.fa-satellite-dish:before{content:"\f7c0"}.fa-arrow-alt-circle-up:before,.fa-circle-up:before{content:"\f35b"}.fa-mobile-alt:before,.fa-mobile-screen-button:before{content:"\f3cd"}.fa-volume-high:before,.fa-volume-up:before{content:"\f028"}.fa-users-rays:before{content:"\e593"}.fa-wallet:before{content:"\f555"}.fa-clipboard-check:before{content:"\f46c"}.fa-file-audio:before{content:"\f1c7"}.fa-burger:before,.fa-hamburger:before{content:"\f805"}.fa-wrench:before{content:"\f0ad"}.fa-bugs:before{content:"\e4d0"}.fa-rupee-sign:before,.fa-rupee:before{content:"\f156"}.fa-file-image:before{content:"\f1c5"}.fa-circle-question:before,.fa-question-circle:before{content:"\f059"}.fa-plane-departure:before{content:"\f5b0"}.fa-handshake-slash:before{content:"\e060"}.fa-book-bookmark:before{content:"\e0bb"}.fa-code-branch:before{content:"\f126"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-bridge:before{content:"\e4c8"}.fa-phone-alt:before,.fa-phone-flip:before{content:"\f879"}.fa-truck-front:before{content:"\e2b7"}.fa-cat:before{content:"\f6be"}.fa-anchor-circle-exclamation:before{content:"\e4ab"}.fa-truck-field:before{content:"\e58d"}.fa-route:before{content:"\f4d7"}.fa-clipboard-question:before{content:"\e4e3"}.fa-panorama:before{content:"\e209"}.fa-comment-medical:before{content:"\f7f5"}.fa-teeth-open:before{content:"\f62f"}.fa-file-circle-minus:before{content:"\e4ed"}.fa-tags:before{content:"\f02c"}.fa-wine-glass:before{content:"\f4e3"}.fa-fast-forward:before,.fa-forward-fast:before{content:"\f050"}.fa-face-meh-blank:before,.fa-meh-blank:before{content:"\f5a4"}.fa-parking:before,.fa-square-parking:before{content:"\f540"}.fa-house-signal:before{content:"\e012"}.fa-bars-progress:before,.fa-tasks-alt:before{content:"\f828"}.fa-faucet-drip:before{content:"\e006"}.fa-cart-flatbed:before,.fa-dolly-flatbed:before{content:"\f474"}.fa-ban-smoking:before,.fa-smoking-ban:before{content:"\f54d"}.fa-terminal:before{content:"\f120"}.fa-mobile-button:before{content:"\f10b"}.fa-house-medical-flag:before{content:"\e514"}.fa-basket-shopping:before,.fa-shopping-basket:before{content:"\f291"}.fa-tape:before{content:"\f4db"}.fa-bus-alt:before,.fa-bus-simple:before{content:"\f55e"}.fa-eye:before{content:"\f06e"}.fa-face-sad-cry:before,.fa-sad-cry:before{content:"\f5b3"}.fa-audio-description:before{content:"\f29e"}.fa-person-military-to-person:before{content:"\e54c"}.fa-file-shield:before{content:"\e4f0"}.fa-user-slash:before{content:"\f506"}.fa-pen:before{content:"\f304"}.fa-tower-observation:before{content:"\e586"}.fa-file-code:before{content:"\f1c9"}.fa-signal-5:before,.fa-signal-perfect:before,.fa-signal:before{content:"\f012"}.fa-bus:before{content:"\f207"}.fa-heart-circle-xmark:before{content:"\e501"}.fa-home-lg:before,.fa-house-chimney:before{content:"\e3af"}.fa-window-maximize:before{content:"\f2d0"}.fa-face-frown:before,.fa-frown:before{content:"\f119"}.fa-prescription:before{content:"\f5b1"}.fa-shop:before,.fa-store-alt:before{content:"\f54f"}.fa-floppy-disk:before,.fa-save:before{content:"\f0c7"}.fa-vihara:before{content:"\f6a7"}.fa-balance-scale-left:before,.fa-scale-unbalanced:before{content:"\f515"}.fa-sort-asc:before,.fa-sort-up:before{content:"\f0de"}.fa-comment-dots:before,.fa-commenting:before{content:"\f4ad"}.fa-plant-wilt:before{content:"\e5aa"}.fa-diamond:before{content:"\f219"}.fa-face-grin-squint:before,.fa-grin-squint:before{content:"\f585"}.fa-hand-holding-dollar:before,.fa-hand-holding-usd:before{content:"\f4c0"}.fa-bacterium:before{content:"\e05a"}.fa-hand-pointer:before{content:"\f25a"}.fa-drum-steelpan:before{content:"\f56a"}.fa-hand-scissors:before{content:"\f257"}.fa-hands-praying:before,.fa-praying-hands:before{content:"\f684"}.fa-arrow-right-rotate:before,.fa-arrow-rotate-forward:before,.fa-arrow-rotate-right:before,.fa-redo:before{content:"\f01e"}.fa-biohazard:before{content:"\f780"}.fa-location-crosshairs:before,.fa-location:before{content:"\f601"}.fa-mars-double:before{content:"\f227"}.fa-child-dress:before{content:"\e59c"}.fa-users-between-lines:before{content:"\e591"}.fa-lungs-virus:before{content:"\e067"}.fa-face-grin-tears:before,.fa-grin-tears:before{content:"\f588"}.fa-phone:before{content:"\f095"}.fa-calendar-times:before,.fa-calendar-xmark:before{content:"\f273"}.fa-child-reaching:before{content:"\e59d"}.fa-head-side-virus:before{content:"\e064"}.fa-user-cog:before,.fa-user-gear:before{content:"\f4fe"}.fa-arrow-up-1-9:before,.fa-sort-numeric-up:before{content:"\f163"}.fa-door-closed:before{content:"\f52a"}.fa-shield-virus:before{content:"\e06c"}.fa-dice-six:before{content:"\f526"}.fa-mosquito-net:before{content:"\e52c"}.fa-bridge-water:before{content:"\e4ce"}.fa-person-booth:before{content:"\f756"}.fa-text-width:before{content:"\f035"}.fa-hat-wizard:before{content:"\f6e8"}.fa-pen-fancy:before{content:"\f5ac"}.fa-digging:before,.fa-person-digging:before{content:"\f85e"}.fa-trash:before{content:"\f1f8"}.fa-gauge-simple-med:before,.fa-gauge-simple:before,.fa-tachometer-average:before{content:"\f629"}.fa-book-medical:before{content:"\f7e6"}.fa-poo:before{content:"\f2fe"}.fa-quote-right-alt:before,.fa-quote-right:before{content:"\f10e"}.fa-shirt:before,.fa-t-shirt:before,.fa-tshirt:before{content:"\f553"}.fa-cubes:before{content:"\f1b3"}.fa-divide:before{content:"\f529"}.fa-tenge-sign:before,.fa-tenge:before{content:"\f7d7"}.fa-headphones:before{content:"\f025"}.fa-hands-holding:before{content:"\f4c2"}.fa-hands-clapping:before{content:"\e1a8"}.fa-republican:before{content:"\f75e"}.fa-arrow-left:before{content:"\f060"}.fa-person-circle-xmark:before{content:"\e543"}.fa-ruler:before{content:"\f545"}.fa-align-left:before{content:"\f036"}.fa-dice-d6:before{content:"\f6d1"}.fa-restroom:before{content:"\f7bd"}.fa-j:before{content:"\4a"}.fa-users-viewfinder:before{content:"\e595"}.fa-file-video:before{content:"\f1c8"}.fa-external-link-alt:before,.fa-up-right-from-square:before{content:"\f35d"}.fa-table-cells:before,.fa-th:before{content:"\f00a"}.fa-file-pdf:before{content:"\f1c1"}.fa-bible:before,.fa-book-bible:before{content:"\f647"}.fa-o:before{content:"\4f"}.fa-medkit:before,.fa-suitcase-medical:before{content:"\f0fa"}.fa-user-secret:before{content:"\f21b"}.fa-otter:before{content:"\f700"}.fa-female:before,.fa-person-dress:before{content:"\f182"}.fa-comment-dollar:before{content:"\f651"}.fa-briefcase-clock:before,.fa-business-time:before{content:"\f64a"}.fa-table-cells-large:before,.fa-th-large:before{content:"\f009"}.fa-book-tanakh:before,.fa-tanakh:before{content:"\f827"}.fa-phone-volume:before,.fa-volume-control-phone:before{content:"\f2a0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-clipboard-user:before{content:"\f7f3"}.fa-child:before{content:"\f1ae"}.fa-lira-sign:before{content:"\f195"}.fa-satellite:before{content:"\f7bf"}.fa-plane-lock:before{content:"\e558"}.fa-tag:before{content:"\f02b"}.fa-comment:before{content:"\f075"}.fa-birthday-cake:before,.fa-cake-candles:before,.fa-cake:before{content:"\f1fd"}.fa-envelope:before{content:"\f0e0"}.fa-angle-double-up:before,.fa-angles-up:before{content:"\f102"}.fa-paperclip:before{content:"\f0c6"}.fa-arrow-right-to-city:before{content:"\e4b3"}.fa-ribbon:before{content:"\f4d6"}.fa-lungs:before{content:"\f604"}.fa-arrow-up-9-1:before,.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-litecoin-sign:before{content:"\e1d3"}.fa-border-none:before{content:"\f850"}.fa-circle-nodes:before{content:"\e4e2"}.fa-parachute-box:before{content:"\f4cd"}.fa-indent:before{content:"\f03c"}.fa-truck-field-un:before{content:"\e58e"}.fa-hourglass-empty:before,.fa-hourglass:before{content:"\f254"}.fa-mountain:before{content:"\f6fc"}.fa-user-doctor:before,.fa-user-md:before{content:"\f0f0"}.fa-circle-info:before,.fa-info-circle:before{content:"\f05a"}.fa-cloud-meatball:before{content:"\f73b"}.fa-camera-alt:before,.fa-camera:before{content:"\f030"}.fa-square-virus:before{content:"\e578"}.fa-meteor:before{content:"\f753"}.fa-car-on:before{content:"\e4dd"}.fa-sleigh:before{content:"\f7cc"}.fa-arrow-down-1-9:before,.fa-sort-numeric-asc:before,.fa-sort-numeric-down:before{content:"\f162"}.fa-hand-holding-droplet:before,.fa-hand-holding-water:before{content:"\f4c1"}.fa-water:before{content:"\f773"}.fa-calendar-check:before{content:"\f274"}.fa-braille:before{content:"\f2a1"}.fa-prescription-bottle-alt:before,.fa-prescription-bottle-medical:before{content:"\f486"}.fa-landmark:before{content:"\f66f"}.fa-truck:before{content:"\f0d1"}.fa-crosshairs:before{content:"\f05b"}.fa-person-cane:before{content:"\e53c"}.fa-tent:before{content:"\e57d"}.fa-vest-patches:before{content:"\e086"}.fa-check-double:before{content:"\f560"}.fa-arrow-down-a-z:before,.fa-sort-alpha-asc:before,.fa-sort-alpha-down:before{content:"\f15d"}.fa-money-bill-wheat:before{content:"\e52a"}.fa-cookie:before{content:"\f563"}.fa-arrow-left-rotate:before,.fa-arrow-rotate-back:before,.fa-arrow-rotate-backward:before,.fa-arrow-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-hard-drive:before,.fa-hdd:before{content:"\f0a0"}.fa-face-grin-squint-tears:before,.fa-grin-squint-tears:before{content:"\f586"}.fa-dumbbell:before{content:"\f44b"}.fa-list-alt:before,.fa-rectangle-list:before{content:"\f022"}.fa-tarp-droplet:before{content:"\e57c"}.fa-house-medical-circle-check:before{content:"\e511"}.fa-person-skiing-nordic:before,.fa-skiing-nordic:before{content:"\f7ca"}.fa-calendar-plus:before{content:"\f271"}.fa-plane-arrival:before{content:"\f5af"}.fa-arrow-alt-circle-left:before,.fa-circle-left:before{content:"\f359"}.fa-subway:before,.fa-train-subway:before{content:"\f239"}.fa-chart-gantt:before{content:"\e0e4"}.fa-indian-rupee-sign:before,.fa-indian-rupee:before,.fa-inr:before{content:"\e1bc"}.fa-crop-alt:before,.fa-crop-simple:before{content:"\f565"}.fa-money-bill-1:before,.fa-money-bill-alt:before{content:"\f3d1"}.fa-left-long:before,.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-dna:before{content:"\f471"}.fa-virus-slash:before{content:"\e075"}.fa-minus:before,.fa-subtract:before{content:"\f068"}.fa-chess:before{content:"\f439"}.fa-arrow-left-long:before,.fa-long-arrow-left:before{content:"\f177"}.fa-plug-circle-check:before{content:"\e55c"}.fa-street-view:before{content:"\f21d"}.fa-franc-sign:before{content:"\e18f"}.fa-volume-off:before{content:"\f026"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before,.fa-hands-american-sign-language-interpreting:before,.fa-hands-asl-interpreting:before{content:"\f2a3"}.fa-cog:before,.fa-gear:before{content:"\f013"}.fa-droplet-slash:before,.fa-tint-slash:before{content:"\f5c7"}.fa-mosque:before{content:"\f678"}.fa-mosquito:before{content:"\e52b"}.fa-star-of-david:before{content:"\f69a"}.fa-person-military-rifle:before{content:"\e54b"}.fa-cart-shopping:before,.fa-shopping-cart:before{content:"\f07a"}.fa-vials:before{content:"\f493"}.fa-plug-circle-plus:before{content:"\e55f"}.fa-place-of-worship:before{content:"\f67f"}.fa-grip-vertical:before{content:"\f58e"}.fa-arrow-turn-up:before,.fa-level-up:before{content:"\f148"}.fa-u:before{content:"\55"}.fa-square-root-alt:before,.fa-square-root-variable:before{content:"\f698"}.fa-clock-four:before,.fa-clock:before{content:"\f017"}.fa-backward-step:before,.fa-step-backward:before{content:"\f048"}.fa-pallet:before{content:"\f482"}.fa-faucet:before{content:"\e005"}.fa-baseball-bat-ball:before{content:"\f432"}.fa-s:before{content:"\53"}.fa-timeline:before{content:"\e29c"}.fa-keyboard:before{content:"\f11c"}.fa-caret-down:before{content:"\f0d7"}.fa-clinic-medical:before,.fa-house-chimney-medical:before{content:"\f7f2"}.fa-temperature-3:before,.fa-temperature-three-quarters:before,.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-mobile-android-alt:before,.fa-mobile-screen:before{content:"\f3cf"}.fa-plane-up:before{content:"\e22d"}.fa-piggy-bank:before{content:"\f4d3"}.fa-battery-3:before,.fa-battery-half:before{content:"\f242"}.fa-mountain-city:before{content:"\e52e"}.fa-coins:before{content:"\f51e"}.fa-khanda:before{content:"\f66d"}.fa-sliders-h:before,.fa-sliders:before{content:"\f1de"}.fa-folder-tree:before{content:"\f802"}.fa-network-wired:before{content:"\f6ff"}.fa-map-pin:before{content:"\f276"}.fa-hamsa:before{content:"\f665"}.fa-cent-sign:before{content:"\e3f5"}.fa-flask:before{content:"\f0c3"}.fa-person-pregnant:before{content:"\e31e"}.fa-wand-sparkles:before{content:"\f72b"}.fa-ellipsis-v:before,.fa-ellipsis-vertical:before{content:"\f142"}.fa-ticket:before{content:"\f145"}.fa-power-off:before{content:"\f011"}.fa-long-arrow-alt-right:before,.fa-right-long:before{content:"\f30b"}.fa-flag-usa:before{content:"\f74d"}.fa-laptop-file:before{content:"\e51d"}.fa-teletype:before,.fa-tty:before{content:"\f1e4"}.fa-diagram-next:before{content:"\e476"}.fa-person-rifle:before{content:"\e54e"}.fa-house-medical-circle-exclamation:before{content:"\e512"}.fa-closed-captioning:before{content:"\f20a"}.fa-hiking:before,.fa-person-hiking:before{content:"\f6ec"}.fa-venus-double:before{content:"\f226"}.fa-images:before{content:"\f302"}.fa-calculator:before{content:"\f1ec"}.fa-people-pulling:before{content:"\e535"}.fa-n:before{content:"\4e"}.fa-cable-car:before,.fa-tram:before{content:"\f7da"}.fa-cloud-rain:before{content:"\f73d"}.fa-building-circle-xmark:before{content:"\e4d4"}.fa-ship:before{content:"\f21a"}.fa-arrows-down-to-line:before{content:"\e4b8"}.fa-download:before{content:"\f019"}.fa-face-grin:before,.fa-grin:before{content:"\f580"}.fa-backspace:before,.fa-delete-left:before{content:"\f55a"}.fa-eye-dropper-empty:before,.fa-eye-dropper:before,.fa-eyedropper:before{content:"\f1fb"}.fa-file-circle-check:before{content:"\e5a0"}.fa-forward:before{content:"\f04e"}.fa-mobile-android:before,.fa-mobile-phone:before,.fa-mobile:before{content:"\f3ce"}.fa-face-meh:before,.fa-meh:before{content:"\f11a"}.fa-align-center:before{content:"\f037"}.fa-book-dead:before,.fa-book-skull:before{content:"\f6b7"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-heart-circle-exclamation:before{content:"\e4fe"}.fa-home-alt:before,.fa-home-lg-alt:before,.fa-home:before,.fa-house:before{content:"\f015"}.fa-calendar-week:before{content:"\f784"}.fa-laptop-medical:before{content:"\f812"}.fa-b:before{content:"\42"}.fa-file-medical:before{content:"\f477"}.fa-dice-one:before{content:"\f525"}.fa-kiwi-bird:before{content:"\f535"}.fa-arrow-right-arrow-left:before,.fa-exchange:before{content:"\f0ec"}.fa-redo-alt:before,.fa-rotate-forward:before,.fa-rotate-right:before{content:"\f2f9"}.fa-cutlery:before,.fa-utensils:before{content:"\f2e7"}.fa-arrow-up-wide-short:before,.fa-sort-amount-up:before{content:"\f161"}.fa-mill-sign:before{content:"\e1ed"}.fa-bowl-rice:before{content:"\e2eb"}.fa-skull:before{content:"\f54c"}.fa-broadcast-tower:before,.fa-tower-broadcast:before{content:"\f519"}.fa-truck-pickup:before{content:"\f63c"}.fa-long-arrow-alt-up:before,.fa-up-long:before{content:"\f30c"}.fa-stop:before{content:"\f04d"}.fa-code-merge:before{content:"\f387"}.fa-upload:before{content:"\f093"}.fa-hurricane:before{content:"\f751"}.fa-mound:before{content:"\e52d"}.fa-toilet-portable:before{content:"\e583"}.fa-compact-disc:before{content:"\f51f"}.fa-file-arrow-down:before,.fa-file-download:before{content:"\f56d"}.fa-caravan:before{content:"\f8ff"}.fa-shield-cat:before{content:"\e572"}.fa-bolt:before,.fa-zap:before{content:"\f0e7"}.fa-glass-water:before{content:"\e4f4"}.fa-oil-well:before{content:"\e532"}.fa-vault:before{content:"\e2c5"}.fa-mars:before{content:"\f222"}.fa-toilet:before{content:"\f7d8"}.fa-plane-circle-xmark:before{content:"\e557"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen-sign:before,.fa-yen:before{content:"\f157"}.fa-rouble:before,.fa-rub:before,.fa-ruble-sign:before,.fa-ruble:before{content:"\f158"}.fa-sun:before{content:"\f185"}.fa-guitar:before{content:"\f7a6"}.fa-face-laugh-wink:before,.fa-laugh-wink:before{content:"\f59c"}.fa-horse-head:before{content:"\f7ab"}.fa-bore-hole:before{content:"\e4c3"}.fa-industry:before{content:"\f275"}.fa-arrow-alt-circle-down:before,.fa-circle-down:before{content:"\f358"}.fa-arrows-turn-to-dots:before{content:"\e4c1"}.fa-florin-sign:before{content:"\e184"}.fa-arrow-down-short-wide:before,.fa-sort-amount-desc:before,.fa-sort-amount-down-alt:before{content:"\f884"}.fa-less-than:before{content:"\3c"}.fa-angle-down:before{content:"\f107"}.fa-car-tunnel:before{content:"\e4de"}.fa-head-side-cough:before{content:"\e061"}.fa-grip-lines:before{content:"\f7a4"}.fa-thumbs-down:before{content:"\f165"}.fa-user-lock:before{content:"\f502"}.fa-arrow-right-long:before,.fa-long-arrow-right:before{content:"\f178"}.fa-anchor-circle-xmark:before{content:"\e4ac"}.fa-ellipsis-h:before,.fa-ellipsis:before{content:"\f141"}.fa-chess-pawn:before{content:"\f443"}.fa-first-aid:before,.fa-kit-medical:before{content:"\f479"}.fa-person-through-window:before{content:"\e5a9"}.fa-toolbox:before{content:"\f552"}.fa-hands-holding-circle:before{content:"\e4fb"}.fa-bug:before{content:"\f188"}.fa-credit-card-alt:before,.fa-credit-card:before{content:"\f09d"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-hand-holding-hand:before{content:"\e4f7"}.fa-book-open-reader:before,.fa-book-reader:before{content:"\f5da"}.fa-mountain-sun:before{content:"\e52f"}.fa-arrows-left-right-to-line:before{content:"\e4ba"}.fa-dice-d20:before{content:"\f6cf"}.fa-truck-droplet:before{content:"\e58c"}.fa-file-circle-xmark:before{content:"\e5a1"}.fa-temperature-arrow-up:before,.fa-temperature-up:before{content:"\e040"}.fa-medal:before{content:"\f5a2"}.fa-bed:before{content:"\f236"}.fa-h-square:before,.fa-square-h:before{content:"\f0fd"}.fa-podcast:before{content:"\f2ce"}.fa-temperature-4:before,.fa-temperature-full:before,.fa-thermometer-4:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-bell:before{content:"\f0f3"}.fa-superscript:before{content:"\f12b"}.fa-plug-circle-xmark:before{content:"\e560"}.fa-star-of-life:before{content:"\f621"}.fa-phone-slash:before{content:"\f3dd"}.fa-paint-roller:before{content:"\f5aa"}.fa-hands-helping:before,.fa-handshake-angle:before{content:"\f4c4"}.fa-location-dot:before,.fa-map-marker-alt:before{content:"\f3c5"}.fa-file:before{content:"\f15b"}.fa-greater-than:before{content:"\3e"}.fa-person-swimming:before,.fa-swimmer:before{content:"\f5c4"}.fa-arrow-down:before{content:"\f063"}.fa-droplet:before,.fa-tint:before{content:"\f043"}.fa-eraser:before{content:"\f12d"}.fa-earth-america:before,.fa-earth-americas:before,.fa-earth:before,.fa-globe-americas:before{content:"\f57d"}.fa-person-burst:before{content:"\e53b"}.fa-dove:before{content:"\f4ba"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-socks:before{content:"\f696"}.fa-inbox:before{content:"\f01c"}.fa-section:before{content:"\e447"}.fa-gauge-high:before,.fa-tachometer-alt-fast:before,.fa-tachometer-alt:before{content:"\f625"}.fa-envelope-open-text:before{content:"\f658"}.fa-hospital-alt:before,.fa-hospital-wide:before,.fa-hospital:before{content:"\f0f8"}.fa-wine-bottle:before{content:"\f72f"}.fa-chess-rook:before{content:"\f447"}.fa-bars-staggered:before,.fa-reorder:before,.fa-stream:before{content:"\f550"}.fa-dharmachakra:before{content:"\f655"}.fa-hotdog:before{content:"\f80f"}.fa-blind:before,.fa-person-walking-with-cane:before{content:"\f29d"}.fa-drum:before{content:"\f569"}.fa-ice-cream:before{content:"\f810"}.fa-heart-circle-bolt:before{content:"\e4fc"}.fa-fax:before{content:"\f1ac"}.fa-paragraph:before{content:"\f1dd"}.fa-check-to-slot:before,.fa-vote-yea:before{content:"\f772"}.fa-star-half:before{content:"\f089"}.fa-boxes-alt:before,.fa-boxes-stacked:before,.fa-boxes:before{content:"\f468"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-assistive-listening-systems:before,.fa-ear-listen:before{content:"\f2a2"}.fa-tree-city:before{content:"\e587"}.fa-play:before{content:"\f04b"}.fa-font:before{content:"\f031"}.fa-rupiah-sign:before{content:"\e23d"}.fa-magnifying-glass:before,.fa-search:before{content:"\f002"}.fa-ping-pong-paddle-ball:before,.fa-table-tennis-paddle-ball:before,.fa-table-tennis:before{content:"\f45d"}.fa-diagnoses:before,.fa-person-dots-from-line:before{content:"\f470"}.fa-trash-can-arrow-up:before,.fa-trash-restore-alt:before{content:"\f82a"}.fa-naira-sign:before{content:"\e1f6"}.fa-cart-arrow-down:before{content:"\f218"}.fa-walkie-talkie:before{content:"\f8ef"}.fa-file-edit:before,.fa-file-pen:before{content:"\f31c"}.fa-receipt:before{content:"\f543"}.fa-pen-square:before,.fa-pencil-square:before,.fa-square-pen:before{content:"\f14b"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-person-circle-exclamation:before{content:"\e53f"}.fa-chevron-down:before{content:"\f078"}.fa-battery-5:before,.fa-battery-full:before,.fa-battery:before{content:"\f240"}.fa-skull-crossbones:before{content:"\f714"}.fa-code-compare:before{content:"\e13a"}.fa-list-dots:before,.fa-list-ul:before{content:"\f0ca"}.fa-school-lock:before{content:"\e56f"}.fa-tower-cell:before{content:"\e585"}.fa-down-long:before,.fa-long-arrow-alt-down:before{content:"\f309"}.fa-ranking-star:before{content:"\e561"}.fa-chess-king:before{content:"\f43f"}.fa-person-harassing:before{content:"\e549"}.fa-brazilian-real-sign:before{content:"\e46c"}.fa-landmark-alt:before,.fa-landmark-dome:before{content:"\f752"}.fa-arrow-up:before{content:"\f062"}.fa-television:before,.fa-tv-alt:before,.fa-tv:before{content:"\f26c"}.fa-shrimp:before{content:"\e448"}.fa-list-check:before,.fa-tasks:before{content:"\f0ae"}.fa-jug-detergent:before{content:"\e519"}.fa-circle-user:before,.fa-user-circle:before{content:"\f2bd"}.fa-user-shield:before{content:"\f505"}.fa-wind:before{content:"\f72e"}.fa-car-burst:before,.fa-car-crash:before{content:"\f5e1"}.fa-y:before{content:"\59"}.fa-person-snowboarding:before,.fa-snowboarding:before{content:"\f7ce"}.fa-shipping-fast:before,.fa-truck-fast:before{content:"\f48b"}.fa-fish:before{content:"\f578"}.fa-user-graduate:before{content:"\f501"}.fa-adjust:before,.fa-circle-half-stroke:before{content:"\f042"}.fa-clapperboard:before{content:"\e131"}.fa-circle-radiation:before,.fa-radiation-alt:before{content:"\f7ba"}.fa-baseball-ball:before,.fa-baseball:before{content:"\f433"}.fa-jet-fighter-up:before{content:"\e518"}.fa-diagram-project:before,.fa-project-diagram:before{content:"\f542"}.fa-copy:before{content:"\f0c5"}.fa-volume-mute:before,.fa-volume-times:before,.fa-volume-xmark:before{content:"\f6a9"}.fa-hand-sparkles:before{content:"\e05d"}.fa-grip-horizontal:before,.fa-grip:before{content:"\f58d"}.fa-share-from-square:before,.fa-share-square:before{content:"\f14d"}.fa-child-combatant:before,.fa-child-rifle:before{content:"\e4e0"}.fa-gun:before{content:"\e19b"}.fa-phone-square:before,.fa-square-phone:before{content:"\f098"}.fa-add:before,.fa-plus:before{content:"\2b"}.fa-expand:before{content:"\f065"}.fa-computer:before{content:"\e4e5"}.fa-close:before,.fa-multiply:before,.fa-remove:before,.fa-times:before,.fa-xmark:before{content:"\f00d"}.fa-arrows-up-down-left-right:before,.fa-arrows:before{content:"\f047"}.fa-chalkboard-teacher:before,.fa-chalkboard-user:before{content:"\f51c"}.fa-peso-sign:before{content:"\e222"}.fa-building-shield:before{content:"\e4d8"}.fa-baby:before{content:"\f77c"}.fa-users-line:before{content:"\e592"}.fa-quote-left-alt:before,.fa-quote-left:before{content:"\f10d"}.fa-tractor:before{content:"\f722"}.fa-trash-arrow-up:before,.fa-trash-restore:before{content:"\f829"}.fa-arrow-down-up-lock:before{content:"\e4b0"}.fa-lines-leaning:before{content:"\e51e"}.fa-ruler-combined:before{content:"\f546"}.fa-copyright:before{content:"\f1f9"}.fa-equals:before{content:"\3d"}.fa-blender:before{content:"\f517"}.fa-teeth:before{content:"\f62e"}.fa-ils:before,.fa-shekel-sign:before,.fa-shekel:before,.fa-sheqel-sign:before,.fa-sheqel:before{content:"\f20b"}.fa-map:before{content:"\f279"}.fa-rocket:before{content:"\f135"}.fa-photo-film:before,.fa-photo-video:before{content:"\f87c"}.fa-folder-minus:before{content:"\f65d"}.fa-store:before{content:"\f54e"}.fa-arrow-trend-up:before{content:"\e098"}.fa-plug-circle-minus:before{content:"\e55e"}.fa-sign-hanging:before,.fa-sign:before{content:"\f4d9"}.fa-bezier-curve:before{content:"\f55b"}.fa-bell-slash:before{content:"\f1f6"}.fa-tablet-android:before,.fa-tablet:before{content:"\f3fb"}.fa-school-flag:before{content:"\e56e"}.fa-fill:before{content:"\f575"}.fa-angle-up:before{content:"\f106"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-holly-berry:before{content:"\f7aa"}.fa-chevron-left:before{content:"\f053"}.fa-bacteria:before{content:"\e059"}.fa-hand-lizard:before{content:"\f258"}.fa-notdef:before{content:"\e1fe"}.fa-disease:before{content:"\f7fa"}.fa-briefcase-medical:before{content:"\f469"}.fa-genderless:before{content:"\f22d"}.fa-chevron-right:before{content:"\f054"}.fa-retweet:before{content:"\f079"}.fa-car-alt:before,.fa-car-rear:before{content:"\f5de"}.fa-pump-soap:before{content:"\e06b"}.fa-video-slash:before{content:"\f4e2"}.fa-battery-2:before,.fa-battery-quarter:before{content:"\f243"}.fa-radio:before{content:"\f8d7"}.fa-baby-carriage:before,.fa-carriage-baby:before{content:"\f77d"}.fa-traffic-light:before{content:"\f637"}.fa-thermometer:before{content:"\f491"}.fa-vr-cardboard:before{content:"\f729"}.fa-hand-middle-finger:before{content:"\f806"}.fa-percent:before,.fa-percentage:before{content:"\25"}.fa-truck-moving:before{content:"\f4df"}.fa-glass-water-droplet:before{content:"\e4f5"}.fa-display:before{content:"\e163"}.fa-face-smile:before,.fa-smile:before{content:"\f118"}.fa-thumb-tack:before,.fa-thumbtack:before{content:"\f08d"}.fa-trophy:before{content:"\f091"}.fa-person-praying:before,.fa-pray:before{content:"\f683"}.fa-hammer:before{content:"\f6e3"}.fa-hand-peace:before{content:"\f25b"}.fa-rotate:before,.fa-sync-alt:before{content:"\f2f1"}.fa-spinner:before{content:"\f110"}.fa-robot:before{content:"\f544"}.fa-peace:before{content:"\f67c"}.fa-cogs:before,.fa-gears:before{content:"\f085"}.fa-warehouse:before{content:"\f494"}.fa-arrow-up-right-dots:before{content:"\e4b7"}.fa-splotch:before{content:"\f5bc"}.fa-face-grin-hearts:before,.fa-grin-hearts:before{content:"\f584"}.fa-dice-four:before{content:"\f524"}.fa-sim-card:before{content:"\f7c4"}.fa-transgender-alt:before,.fa-transgender:before{content:"\f225"}.fa-mercury:before{content:"\f223"}.fa-arrow-turn-down:before,.fa-level-down:before{content:"\f149"}.fa-person-falling-burst:before{content:"\e547"}.fa-award:before{content:"\f559"}.fa-ticket-alt:before,.fa-ticket-simple:before{content:"\f3ff"}.fa-building:before{content:"\f1ad"}.fa-angle-double-left:before,.fa-angles-left:before{content:"\f100"}.fa-qrcode:before{content:"\f029"}.fa-clock-rotate-left:before,.fa-history:before{content:"\f1da"}.fa-face-grin-beam-sweat:before,.fa-grin-beam-sweat:before{content:"\f583"}.fa-arrow-right-from-file:before,.fa-file-export:before{content:"\f56e"}.fa-shield-blank:before,.fa-shield:before{content:"\f132"}.fa-arrow-up-short-wide:before,.fa-sort-amount-up-alt:before{content:"\f885"}.fa-house-medical:before{content:"\e3b2"}.fa-golf-ball-tee:before,.fa-golf-ball:before{content:"\f450"}.fa-chevron-circle-left:before,.fa-circle-chevron-left:before{content:"\f137"}.fa-house-chimney-window:before{content:"\e00d"}.fa-pen-nib:before{content:"\f5ad"}.fa-tent-arrow-turn-left:before{content:"\e580"}.fa-tents:before{content:"\e582"}.fa-magic:before,.fa-wand-magic:before{content:"\f0d0"}.fa-dog:before{content:"\f6d3"}.fa-carrot:before{content:"\f787"}.fa-moon:before{content:"\f186"}.fa-wine-glass-alt:before,.fa-wine-glass-empty:before{content:"\f5ce"}.fa-cheese:before{content:"\f7ef"}.fa-yin-yang:before{content:"\f6ad"}.fa-music:before{content:"\f001"}.fa-code-commit:before{content:"\f386"}.fa-temperature-low:before{content:"\f76b"}.fa-biking:before,.fa-person-biking:before{content:"\f84a"}.fa-broom:before{content:"\f51a"}.fa-shield-heart:before{content:"\e574"}.fa-gopuram:before{content:"\f664"}.fa-earth-oceania:before,.fa-globe-oceania:before{content:"\e47b"}.fa-square-xmark:before,.fa-times-square:before,.fa-xmark-square:before{content:"\f2d3"}.fa-hashtag:before{content:"\23"}.fa-expand-alt:before,.fa-up-right-and-down-left-from-center:before{content:"\f424"}.fa-oil-can:before{content:"\f613"}.fa-t:before{content:"\54"}.fa-hippo:before{content:"\f6ed"}.fa-chart-column:before{content:"\e0e3"}.fa-infinity:before{content:"\f534"}.fa-vial-circle-check:before{content:"\e596"}.fa-person-arrow-down-to-line:before{content:"\e538"}.fa-voicemail:before{content:"\f897"}.fa-fan:before{content:"\f863"}.fa-person-walking-luggage:before{content:"\e554"}.fa-arrows-alt-v:before,.fa-up-down:before{content:"\f338"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-calendar:before{content:"\f133"}.fa-trailer:before{content:"\e041"}.fa-bahai:before,.fa-haykal:before{content:"\f666"}.fa-sd-card:before{content:"\f7c2"}.fa-dragon:before{content:"\f6d5"}.fa-shoe-prints:before{content:"\f54b"}.fa-circle-plus:before,.fa-plus-circle:before{content:"\f055"}.fa-face-grin-tongue-wink:before,.fa-grin-tongue-wink:before{content:"\f58b"}.fa-hand-holding:before{content:"\f4bd"}.fa-plug-circle-exclamation:before{content:"\e55d"}.fa-chain-broken:before,.fa-chain-slash:before,.fa-link-slash:before,.fa-unlink:before{content:"\f127"}.fa-clone:before{content:"\f24d"}.fa-person-walking-arrow-loop-left:before{content:"\e551"}.fa-arrow-up-z-a:before,.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-fire-alt:before,.fa-fire-flame-curved:before{content:"\f7e4"}.fa-tornado:before{content:"\f76f"}.fa-file-circle-plus:before{content:"\e494"}.fa-book-quran:before,.fa-quran:before{content:"\f687"}.fa-anchor:before{content:"\f13d"}.fa-border-all:before{content:"\f84c"}.fa-angry:before,.fa-face-angry:before{content:"\f556"}.fa-cookie-bite:before{content:"\f564"}.fa-arrow-trend-down:before{content:"\e097"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-draw-polygon:before{content:"\f5ee"}.fa-balance-scale:before,.fa-scale-balanced:before{content:"\f24e"}.fa-gauge-simple-high:before,.fa-tachometer-fast:before,.fa-tachometer:before{content:"\f62a"}.fa-shower:before{content:"\f2cc"}.fa-desktop-alt:before,.fa-desktop:before{content:"\f390"}.fa-m:before{content:"\4d"}.fa-table-list:before,.fa-th-list:before{content:"\f00b"}.fa-comment-sms:before,.fa-sms:before{content:"\f7cd"}.fa-book:before{content:"\f02d"}.fa-user-plus:before{content:"\f234"}.fa-check:before{content:"\f00c"}.fa-battery-4:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-house-circle-check:before{content:"\e509"}.fa-angle-left:before{content:"\f104"}.fa-diagram-successor:before{content:"\e47a"}.fa-truck-arrow-right:before{content:"\e58b"}.fa-arrows-split-up-and-left:before{content:"\e4bc"}.fa-fist-raised:before,.fa-hand-fist:before{content:"\f6de"}.fa-cloud-moon:before{content:"\f6c3"}.fa-briefcase:before{content:"\f0b1"}.fa-person-falling:before{content:"\e546"}.fa-image-portrait:before,.fa-portrait:before{content:"\f3e0"}.fa-user-tag:before{content:"\f507"}.fa-rug:before{content:"\e569"}.fa-earth-europe:before,.fa-globe-europe:before{content:"\f7a2"}.fa-cart-flatbed-suitcase:before,.fa-luggage-cart:before{content:"\f59d"}.fa-rectangle-times:before,.fa-rectangle-xmark:before,.fa-times-rectangle:before,.fa-window-close:before{content:"\f410"}.fa-baht-sign:before{content:"\e0ac"}.fa-book-open:before{content:"\f518"}.fa-book-journal-whills:before,.fa-journal-whills:before{content:"\f66a"}.fa-handcuffs:before{content:"\e4f8"}.fa-exclamation-triangle:before,.fa-triangle-exclamation:before,.fa-warning:before{content:"\f071"}.fa-database:before{content:"\f1c0"}.fa-arrow-turn-right:before,.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-bottle-droplet:before{content:"\e4c4"}.fa-mask-face:before{content:"\e1d7"}.fa-hill-rockslide:before{content:"\e508"}.fa-exchange-alt:before,.fa-right-left:before{content:"\f362"}.fa-paper-plane:before{content:"\f1d8"}.fa-road-circle-exclamation:before{content:"\e565"}.fa-dungeon:before{content:"\f6d9"}.fa-align-right:before{content:"\f038"}.fa-money-bill-1-wave:before,.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-life-ring:before{content:"\f1cd"}.fa-hands:before,.fa-sign-language:before,.fa-signing:before{content:"\f2a7"}.fa-calendar-day:before{content:"\f783"}.fa-ladder-water:before,.fa-swimming-pool:before,.fa-water-ladder:before{content:"\f5c5"}.fa-arrows-up-down:before,.fa-arrows-v:before{content:"\f07d"}.fa-face-grimace:before,.fa-grimace:before{content:"\f57f"}.fa-wheelchair-alt:before,.fa-wheelchair-move:before{content:"\e2ce"}.fa-level-down-alt:before,.fa-turn-down:before{content:"\f3be"}.fa-person-walking-arrow-right:before{content:"\e552"}.fa-envelope-square:before,.fa-square-envelope:before{content:"\f199"}.fa-dice:before{content:"\f522"}.fa-bowling-ball:before{content:"\f436"}.fa-brain:before{content:"\f5dc"}.fa-band-aid:before,.fa-bandage:before{content:"\f462"}.fa-calendar-minus:before{content:"\f272"}.fa-circle-xmark:before,.fa-times-circle:before,.fa-xmark-circle:before{content:"\f057"}.fa-gifts:before{content:"\f79c"}.fa-hotel:before{content:"\f594"}.fa-earth-asia:before,.fa-globe-asia:before{content:"\f57e"}.fa-id-card-alt:before,.fa-id-card-clip:before{content:"\f47f"}.fa-magnifying-glass-plus:before,.fa-search-plus:before{content:"\f00e"}.fa-thumbs-up:before{content:"\f164"}.fa-user-clock:before{content:"\f4fd"}.fa-allergies:before,.fa-hand-dots:before{content:"\f461"}.fa-file-invoice:before{content:"\f570"}.fa-window-minimize:before{content:"\f2d1"}.fa-coffee:before,.fa-mug-saucer:before{content:"\f0f4"}.fa-brush:before{content:"\f55d"}.fa-mask:before{content:"\f6fa"}.fa-magnifying-glass-minus:before,.fa-search-minus:before{content:"\f010"}.fa-ruler-vertical:before{content:"\f548"}.fa-user-alt:before,.fa-user-large:before{content:"\f406"}.fa-train-tram:before{content:"\e5b4"}.fa-user-nurse:before{content:"\f82f"}.fa-syringe:before{content:"\f48e"}.fa-cloud-sun:before{content:"\f6c4"}.fa-stopwatch-20:before{content:"\e06f"}.fa-square-full:before{content:"\f45c"}.fa-magnet:before{content:"\f076"}.fa-jar:before{content:"\e516"}.fa-note-sticky:before,.fa-sticky-note:before{content:"\f249"}.fa-bug-slash:before{content:"\e490"}.fa-arrow-up-from-water-pump:before{content:"\e4b6"}.fa-bone:before{content:"\f5d7"}.fa-user-injured:before{content:"\f728"}.fa-face-sad-tear:before,.fa-sad-tear:before{content:"\f5b4"}.fa-plane:before{content:"\f072"}.fa-tent-arrows-down:before{content:"\e581"}.fa-exclamation:before{content:"\21"}.fa-arrows-spin:before{content:"\e4bb"}.fa-print:before{content:"\f02f"}.fa-try:before,.fa-turkish-lira-sign:before,.fa-turkish-lira:before{content:"\e2bb"}.fa-dollar-sign:before,.fa-dollar:before,.fa-usd:before{content:"\24"}.fa-x:before{content:"\58"}.fa-magnifying-glass-dollar:before,.fa-search-dollar:before{content:"\f688"}.fa-users-cog:before,.fa-users-gear:before{content:"\f509"}.fa-person-military-pointing:before{content:"\e54a"}.fa-bank:before,.fa-building-columns:before,.fa-institution:before,.fa-museum:before,.fa-university:before{content:"\f19c"}.fa-umbrella:before{content:"\f0e9"}.fa-trowel:before{content:"\e589"}.fa-d:before{content:"\44"}.fa-stapler:before{content:"\e5af"}.fa-masks-theater:before,.fa-theater-masks:before{content:"\f630"}.fa-kip-sign:before{content:"\e1c4"}.fa-hand-point-left:before{content:"\f0a5"}.fa-handshake-alt:before,.fa-handshake-simple:before{content:"\f4c6"}.fa-fighter-jet:before,.fa-jet-fighter:before{content:"\f0fb"}.fa-share-alt-square:before,.fa-square-share-nodes:before{content:"\f1e1"}.fa-barcode:before{content:"\f02a"}.fa-plus-minus:before{content:"\e43c"}.fa-video-camera:before,.fa-video:before{content:"\f03d"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\f19d"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-person-circle-check:before{content:"\e53e"}.fa-level-up-alt:before,.fa-turn-up:before{content:"\f3bf"} -.fa-sr-only,.fa-sr-only-focusable:not(:focus),.sr-only,.sr-only-focusable:not(:focus){position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}:host,:root{--fa-style-family-brands:"Font Awesome 6 Brands";--fa-font-brands:normal 400 1em/1 "Font Awesome 6 Brands"}@font-face{font-family:"Font Awesome 6 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}.fa-brands,.fab{font-weight:400}.fa-monero:before{content:"\f3d0"}.fa-hooli:before{content:"\f427"}.fa-yelp:before{content:"\f1e9"}.fa-cc-visa:before{content:"\f1f0"}.fa-lastfm:before{content:"\f202"}.fa-shopware:before{content:"\f5b5"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-aws:before{content:"\f375"}.fa-redhat:before{content:"\f7bc"}.fa-yoast:before{content:"\f2b1"}.fa-cloudflare:before{content:"\e07d"}.fa-ups:before{content:"\f7e0"}.fa-wpexplorer:before{content:"\f2de"}.fa-dyalog:before{content:"\f399"}.fa-bity:before{content:"\f37a"}.fa-stackpath:before{content:"\f842"}.fa-buysellads:before{content:"\f20d"}.fa-first-order:before{content:"\f2b0"}.fa-modx:before{content:"\f285"}.fa-guilded:before{content:"\e07e"}.fa-vnv:before{content:"\f40b"}.fa-js-square:before,.fa-square-js:before{content:"\f3b9"}.fa-microsoft:before{content:"\f3ca"}.fa-qq:before{content:"\f1d6"}.fa-orcid:before{content:"\f8d2"}.fa-java:before{content:"\f4e4"}.fa-invision:before{content:"\f7b0"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-centercode:before{content:"\f380"}.fa-glide-g:before{content:"\f2a6"}.fa-drupal:before{content:"\f1a9"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-unity:before{content:"\e049"}.fa-whmcs:before{content:"\f40d"}.fa-rocketchat:before{content:"\f3e8"}.fa-vk:before{content:"\f189"}.fa-untappd:before{content:"\f405"}.fa-mailchimp:before{content:"\f59e"}.fa-css3-alt:before{content:"\f38b"}.fa-reddit-square:before,.fa-square-reddit:before{content:"\f1a2"}.fa-vimeo-v:before{content:"\f27d"}.fa-contao:before{content:"\f26d"}.fa-square-font-awesome:before{content:"\e5ad"}.fa-deskpro:before{content:"\f38f"}.fa-sistrix:before{content:"\f3ee"}.fa-instagram-square:before,.fa-square-instagram:before{content:"\e055"}.fa-battle-net:before{content:"\f835"}.fa-the-red-yeti:before{content:"\f69d"}.fa-hacker-news-square:before,.fa-square-hacker-news:before{content:"\f3af"}.fa-edge:before{content:"\f282"}.fa-napster:before{content:"\f3d2"}.fa-snapchat-square:before,.fa-square-snapchat:before{content:"\f2ad"}.fa-google-plus-g:before{content:"\f0d5"}.fa-artstation:before{content:"\f77a"}.fa-markdown:before{content:"\f60f"}.fa-sourcetree:before{content:"\f7d3"}.fa-google-plus:before{content:"\f2b3"}.fa-diaspora:before{content:"\f791"}.fa-foursquare:before{content:"\f180"}.fa-stack-overflow:before{content:"\f16c"}.fa-github-alt:before{content:"\f113"}.fa-phoenix-squadron:before{content:"\f511"}.fa-pagelines:before{content:"\f18c"}.fa-algolia:before{content:"\f36c"}.fa-red-river:before{content:"\f3e3"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-safari:before{content:"\f267"}.fa-google:before{content:"\f1a0"}.fa-font-awesome-alt:before,.fa-square-font-awesome-stroke:before{content:"\f35c"}.fa-atlassian:before{content:"\f77b"}.fa-linkedin-in:before{content:"\f0e1"}.fa-digital-ocean:before{content:"\f391"}.fa-nimblr:before{content:"\f5a8"}.fa-chromecast:before{content:"\f838"}.fa-evernote:before{content:"\f839"}.fa-hacker-news:before{content:"\f1d4"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-adversal:before{content:"\f36a"}.fa-creative-commons:before{content:"\f25e"}.fa-watchman-monitoring:before{content:"\e087"}.fa-fonticons:before{content:"\f280"}.fa-weixin:before{content:"\f1d7"}.fa-shirtsinbulk:before{content:"\f214"}.fa-codepen:before{content:"\f1cb"}.fa-git-alt:before{content:"\f841"}.fa-lyft:before{content:"\f3c3"}.fa-rev:before{content:"\f5b2"}.fa-windows:before{content:"\f17a"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-square-viadeo:before,.fa-viadeo-square:before{content:"\f2aa"}.fa-meetup:before{content:"\f2e0"}.fa-centos:before{content:"\f789"}.fa-adn:before{content:"\f170"}.fa-cloudsmith:before{content:"\f384"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-dribbble-square:before,.fa-square-dribbble:before{content:"\f397"}.fa-codiepie:before{content:"\f284"}.fa-node:before{content:"\f419"}.fa-mix:before{content:"\f3cb"}.fa-steam:before{content:"\f1b6"}.fa-cc-apple-pay:before{content:"\f416"}.fa-scribd:before{content:"\f28a"}.fa-openid:before{content:"\f19b"}.fa-instalod:before{content:"\e081"}.fa-expeditedssl:before{content:"\f23e"}.fa-sellcast:before{content:"\f2da"}.fa-square-twitter:before,.fa-twitter-square:before{content:"\f081"}.fa-r-project:before{content:"\f4f7"}.fa-delicious:before{content:"\f1a5"}.fa-freebsd:before{content:"\f3a4"}.fa-vuejs:before{content:"\f41f"}.fa-accusoft:before{content:"\f369"}.fa-ioxhost:before{content:"\f208"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-app-store:before{content:"\f36f"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-itunes-note:before{content:"\f3b5"}.fa-golang:before{content:"\e40f"}.fa-kickstarter:before{content:"\f3bb"}.fa-grav:before{content:"\f2d6"}.fa-weibo:before{content:"\f18a"}.fa-uncharted:before{content:"\e084"}.fa-firstdraft:before{content:"\f3a1"}.fa-square-youtube:before,.fa-youtube-square:before{content:"\f431"}.fa-wikipedia-w:before{content:"\f266"}.fa-rendact:before,.fa-wpressr:before{content:"\f3e4"}.fa-angellist:before{content:"\f209"}.fa-galactic-republic:before{content:"\f50c"}.fa-nfc-directional:before{content:"\e530"}.fa-skype:before{content:"\f17e"}.fa-joget:before{content:"\f3b7"}.fa-fedora:before{content:"\f798"}.fa-stripe-s:before{content:"\f42a"}.fa-meta:before{content:"\e49b"}.fa-laravel:before{content:"\f3bd"}.fa-hotjar:before{content:"\f3b1"}.fa-bluetooth-b:before{content:"\f294"}.fa-sticker-mule:before{content:"\f3f7"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-hips:before{content:"\f452"}.fa-behance:before{content:"\f1b4"}.fa-reddit:before{content:"\f1a1"}.fa-discord:before{content:"\f392"}.fa-chrome:before{content:"\f268"}.fa-app-store-ios:before{content:"\f370"}.fa-cc-discover:before{content:"\f1f2"}.fa-wpbeginner:before{content:"\f297"}.fa-confluence:before{content:"\f78d"}.fa-mdb:before{content:"\f8ca"}.fa-dochub:before{content:"\f394"}.fa-accessible-icon:before{content:"\f368"}.fa-ebay:before{content:"\f4f4"}.fa-amazon:before{content:"\f270"}.fa-unsplash:before{content:"\e07c"}.fa-yarn:before{content:"\f7e3"}.fa-square-steam:before,.fa-steam-square:before{content:"\f1b7"}.fa-500px:before{content:"\f26e"}.fa-square-vimeo:before,.fa-vimeo-square:before{content:"\f194"}.fa-asymmetrik:before{content:"\f372"}.fa-font-awesome-flag:before,.fa-font-awesome-logo-full:before,.fa-font-awesome:before{content:"\f2b4"}.fa-gratipay:before{content:"\f184"}.fa-apple:before{content:"\f179"}.fa-hive:before{content:"\e07f"}.fa-gitkraken:before{content:"\f3a6"}.fa-keybase:before{content:"\f4f5"}.fa-apple-pay:before{content:"\f415"}.fa-padlet:before{content:"\e4a0"}.fa-amazon-pay:before{content:"\f42c"}.fa-github-square:before,.fa-square-github:before{content:"\f092"}.fa-stumbleupon:before{content:"\f1a4"}.fa-fedex:before{content:"\f797"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-shopify:before{content:"\e057"}.fa-neos:before{content:"\f612"}.fa-hackerrank:before{content:"\f5f7"}.fa-researchgate:before{content:"\f4f8"}.fa-swift:before{content:"\f8e1"}.fa-angular:before{content:"\f420"}.fa-speakap:before{content:"\f3f3"}.fa-angrycreative:before{content:"\f36e"}.fa-y-combinator:before{content:"\f23b"}.fa-empire:before{content:"\f1d1"}.fa-envira:before{content:"\f299"}.fa-gitlab-square:before,.fa-square-gitlab:before{content:"\e5ae"}.fa-studiovinari:before{content:"\f3f8"}.fa-pied-piper:before{content:"\f2ae"}.fa-wordpress:before{content:"\f19a"}.fa-product-hunt:before{content:"\f288"}.fa-firefox:before{content:"\f269"}.fa-linode:before{content:"\f2b8"}.fa-goodreads:before{content:"\f3a8"}.fa-odnoklassniki-square:before,.fa-square-odnoklassniki:before{content:"\f264"}.fa-jsfiddle:before{content:"\f1cc"}.fa-sith:before{content:"\f512"}.fa-themeisle:before{content:"\f2b2"}.fa-page4:before{content:"\f3d7"}.fa-hashnode:before{content:"\e499"}.fa-react:before{content:"\f41b"}.fa-cc-paypal:before{content:"\f1f4"}.fa-squarespace:before{content:"\f5be"}.fa-cc-stripe:before{content:"\f1f5"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-bitcoin:before{content:"\f379"}.fa-keycdn:before{content:"\f3ba"}.fa-opera:before{content:"\f26a"}.fa-itch-io:before{content:"\f83a"}.fa-umbraco:before{content:"\f8e8"}.fa-galactic-senate:before{content:"\f50d"}.fa-ubuntu:before{content:"\f7df"}.fa-draft2digital:before{content:"\f396"}.fa-stripe:before{content:"\f429"}.fa-houzz:before{content:"\f27c"}.fa-gg:before{content:"\f260"}.fa-dhl:before{content:"\f790"}.fa-pinterest-square:before,.fa-square-pinterest:before{content:"\f0d3"}.fa-xing:before{content:"\f168"}.fa-blackberry:before{content:"\f37b"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-playstation:before{content:"\f3df"}.fa-quinscape:before{content:"\f459"}.fa-less:before{content:"\f41d"}.fa-blogger-b:before{content:"\f37d"}.fa-opencart:before{content:"\f23d"}.fa-vine:before{content:"\f1ca"}.fa-paypal:before{content:"\f1ed"}.fa-gitlab:before{content:"\f296"}.fa-typo3:before{content:"\f42b"}.fa-reddit-alien:before{content:"\f281"}.fa-yahoo:before{content:"\f19e"}.fa-dailymotion:before{content:"\e052"}.fa-affiliatetheme:before{content:"\f36b"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-bootstrap:before{content:"\f836"}.fa-odnoklassniki:before{content:"\f263"}.fa-nfc-symbol:before{content:"\e531"}.fa-ethereum:before{content:"\f42e"}.fa-speaker-deck:before{content:"\f83c"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-patreon:before{content:"\f3d9"}.fa-avianex:before{content:"\f374"}.fa-ello:before{content:"\f5f1"}.fa-gofore:before{content:"\f3a7"}.fa-bimobject:before{content:"\f378"}.fa-facebook-f:before{content:"\f39e"}.fa-google-plus-square:before,.fa-square-google-plus:before{content:"\f0d4"}.fa-mandalorian:before{content:"\f50f"}.fa-first-order-alt:before{content:"\f50a"}.fa-osi:before{content:"\f41a"}.fa-google-wallet:before{content:"\f1ee"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-periscope:before{content:"\f3da"}.fa-fulcrum:before{content:"\f50b"}.fa-cloudscale:before{content:"\f383"}.fa-forumbee:before{content:"\f211"}.fa-mizuni:before{content:"\f3cc"}.fa-schlix:before{content:"\f3ea"}.fa-square-xing:before,.fa-xing-square:before{content:"\f169"}.fa-bandcamp:before{content:"\f2d5"}.fa-wpforms:before{content:"\f298"}.fa-cloudversify:before{content:"\f385"}.fa-usps:before{content:"\f7e1"}.fa-megaport:before{content:"\f5a3"}.fa-magento:before{content:"\f3c4"}.fa-spotify:before{content:"\f1bc"}.fa-optin-monster:before{content:"\f23c"}.fa-fly:before{content:"\f417"}.fa-aviato:before{content:"\f421"}.fa-itunes:before{content:"\f3b4"}.fa-cuttlefish:before{content:"\f38c"}.fa-blogger:before{content:"\f37c"}.fa-flickr:before{content:"\f16e"}.fa-viber:before{content:"\f409"}.fa-soundcloud:before{content:"\f1be"}.fa-digg:before{content:"\f1a6"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-symfony:before{content:"\f83d"}.fa-maxcdn:before{content:"\f136"}.fa-etsy:before{content:"\f2d7"}.fa-facebook-messenger:before{content:"\f39f"}.fa-audible:before{content:"\f373"}.fa-think-peaks:before{content:"\f731"}.fa-bilibili:before{content:"\e3d9"}.fa-erlang:before{content:"\f39d"}.fa-cotton-bureau:before{content:"\f89e"}.fa-dashcube:before{content:"\f210"}.fa-42-group:before,.fa-innosoft:before{content:"\e080"}.fa-stack-exchange:before{content:"\f18d"}.fa-elementor:before{content:"\f430"}.fa-pied-piper-square:before,.fa-square-pied-piper:before{content:"\e01e"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-palfed:before{content:"\f3d8"}.fa-superpowers:before{content:"\f2dd"}.fa-resolving:before{content:"\f3e7"}.fa-xbox:before{content:"\f412"}.fa-searchengin:before{content:"\f3eb"}.fa-tiktok:before{content:"\e07b"}.fa-facebook-square:before,.fa-square-facebook:before{content:"\f082"}.fa-renren:before{content:"\f18b"}.fa-linux:before{content:"\f17c"}.fa-glide:before{content:"\f2a5"}.fa-linkedin:before{content:"\f08c"}.fa-hubspot:before{content:"\f3b2"}.fa-deploydog:before{content:"\f38e"}.fa-twitch:before{content:"\f1e8"}.fa-ravelry:before{content:"\f2d9"}.fa-mixer:before{content:"\e056"}.fa-lastfm-square:before,.fa-square-lastfm:before{content:"\f203"}.fa-vimeo:before{content:"\f40a"}.fa-mendeley:before{content:"\f7b3"}.fa-uniregistry:before{content:"\f404"}.fa-figma:before{content:"\f799"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-dropbox:before{content:"\f16b"}.fa-instagram:before{content:"\f16d"}.fa-cmplid:before{content:"\e360"}.fa-facebook:before{content:"\f09a"}.fa-gripfire:before{content:"\f3ac"}.fa-jedi-order:before{content:"\f50e"}.fa-uikit:before{content:"\f403"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-phabricator:before{content:"\f3db"}.fa-ussunnah:before{content:"\f407"}.fa-earlybirds:before{content:"\f39a"}.fa-trade-federation:before{content:"\f513"}.fa-autoprefixer:before{content:"\f41c"}.fa-whatsapp:before{content:"\f232"}.fa-slideshare:before{content:"\f1e7"}.fa-google-play:before{content:"\f3ab"}.fa-viadeo:before{content:"\f2a9"}.fa-line:before{content:"\f3c0"}.fa-google-drive:before{content:"\f3aa"}.fa-servicestack:before{content:"\f3ec"}.fa-simplybuilt:before{content:"\f215"}.fa-bitbucket:before{content:"\f171"}.fa-imdb:before{content:"\f2d8"}.fa-deezer:before{content:"\e077"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-jira:before{content:"\f7b1"}.fa-docker:before{content:"\f395"}.fa-screenpal:before{content:"\e570"}.fa-bluetooth:before{content:"\f293"}.fa-gitter:before{content:"\f426"}.fa-d-and-d:before{content:"\f38d"}.fa-microblog:before{content:"\e01a"}.fa-cc-diners-club:before{content:"\f24c"}.fa-gg-circle:before{content:"\f261"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-yandex:before{content:"\f413"}.fa-readme:before{content:"\f4d5"}.fa-html5:before{content:"\f13b"}.fa-sellsy:before{content:"\f213"}.fa-sass:before{content:"\f41e"}.fa-wirsindhandwerk:before,.fa-wsh:before{content:"\e2d0"}.fa-buromobelexperte:before{content:"\f37f"}.fa-salesforce:before{content:"\f83b"}.fa-octopus-deploy:before{content:"\e082"}.fa-medapps:before{content:"\f3c6"}.fa-ns8:before{content:"\f3d5"}.fa-pinterest-p:before{content:"\f231"}.fa-apper:before{content:"\f371"}.fa-fort-awesome:before{content:"\f286"}.fa-waze:before{content:"\f83f"}.fa-cc-jcb:before{content:"\f24b"}.fa-snapchat-ghost:before,.fa-snapchat:before{content:"\f2ab"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-rust:before{content:"\e07a"}.fa-wix:before{content:"\f5cf"}.fa-behance-square:before,.fa-square-behance:before{content:"\f1b5"}.fa-supple:before{content:"\f3f9"}.fa-rebel:before{content:"\f1d0"}.fa-css3:before{content:"\f13c"}.fa-staylinked:before{content:"\f3f5"}.fa-kaggle:before{content:"\f5fa"}.fa-space-awesome:before{content:"\e5ac"}.fa-deviantart:before{content:"\f1bd"}.fa-cpanel:before{content:"\f388"}.fa-goodreads-g:before{content:"\f3a9"}.fa-git-square:before,.fa-square-git:before{content:"\f1d2"}.fa-square-tumblr:before,.fa-tumblr-square:before{content:"\f174"}.fa-trello:before{content:"\f181"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-get-pocket:before{content:"\f265"}.fa-perbyte:before{content:"\e083"}.fa-grunt:before{content:"\f3ad"}.fa-weebly:before{content:"\f5cc"}.fa-connectdevelop:before{content:"\f20e"}.fa-leanpub:before{content:"\f212"}.fa-black-tie:before{content:"\f27e"}.fa-themeco:before{content:"\f5c6"}.fa-python:before{content:"\f3e2"}.fa-android:before{content:"\f17b"}.fa-bots:before{content:"\e340"}.fa-free-code-camp:before{content:"\f2c5"}.fa-hornbill:before{content:"\f592"}.fa-js:before{content:"\f3b8"}.fa-ideal:before{content:"\e013"}.fa-git:before{content:"\f1d3"}.fa-dev:before{content:"\f6cc"}.fa-sketch:before{content:"\f7c6"}.fa-yandex-international:before{content:"\f414"}.fa-cc-amex:before{content:"\f1f3"}.fa-uber:before{content:"\f402"}.fa-github:before{content:"\f09b"}.fa-php:before{content:"\f457"}.fa-alipay:before{content:"\f642"}.fa-youtube:before{content:"\f167"}.fa-skyatlas:before{content:"\f216"}.fa-firefox-browser:before{content:"\e007"}.fa-replyd:before{content:"\f3e6"}.fa-suse:before{content:"\f7d6"}.fa-jenkins:before{content:"\f3b6"}.fa-twitter:before{content:"\f099"}.fa-rockrms:before{content:"\f3e9"}.fa-pinterest:before{content:"\f0d2"}.fa-buffer:before{content:"\f837"}.fa-npm:before{content:"\f3d4"}.fa-yammer:before{content:"\f840"}.fa-btc:before{content:"\f15a"}.fa-dribbble:before{content:"\f17d"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-internet-explorer:before{content:"\f26b"}.fa-stubber:before{content:"\e5c7"}.fa-telegram-plane:before,.fa-telegram:before{content:"\f2c6"}.fa-old-republic:before{content:"\f510"}.fa-odysee:before{content:"\e5c6"}.fa-square-whatsapp:before,.fa-whatsapp-square:before{content:"\f40c"}.fa-node-js:before{content:"\f3d3"}.fa-edge-legacy:before{content:"\e078"}.fa-slack-hash:before,.fa-slack:before{content:"\f198"}.fa-medrt:before{content:"\f3c8"}.fa-usb:before{content:"\f287"}.fa-tumblr:before{content:"\f173"}.fa-vaadin:before{content:"\f408"}.fa-quora:before{content:"\f2c4"}.fa-reacteurope:before{content:"\f75d"}.fa-medium-m:before,.fa-medium:before{content:"\f23a"}.fa-amilia:before{content:"\f36d"}.fa-mixcloud:before{content:"\f289"}.fa-flipboard:before{content:"\f44d"}.fa-viacoin:before{content:"\f237"}.fa-critical-role:before{content:"\f6c9"}.fa-sitrox:before{content:"\e44a"}.fa-discourse:before{content:"\f393"}.fa-joomla:before{content:"\f1aa"}.fa-mastodon:before{content:"\f4f6"}.fa-airbnb:before{content:"\f834"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-buy-n-large:before{content:"\f8a6"}.fa-gulp:before{content:"\f3ae"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-strava:before{content:"\f428"}.fa-ember:before{content:"\f423"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-teamspeak:before{content:"\f4f9"}.fa-pushed:before{content:"\f3e1"}.fa-wordpress-simple:before{content:"\f411"}.fa-nutritionix:before{content:"\f3d6"}.fa-wodu:before{content:"\e088"}.fa-google-pay:before{content:"\e079"}.fa-intercom:before{content:"\f7af"}.fa-zhihu:before{content:"\f63f"}.fa-korvue:before{content:"\f42f"}.fa-pix:before{content:"\e43a"}.fa-steam-symbol:before{content:"\f3f6"}:host,:root{--fa-font-regular:normal 400 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}.fa-regular,.far{font-weight:400}:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-weight:900}@font-face{font-family:"Font Awesome 5 Brands";font-display:block;font-weight:400;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:900;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:400;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype");unicode-range:u+f003,u+f006,u+f014,u+f016-f017,u+f01a-f01b,u+f01d,u+f022,u+f03e,u+f044,u+f046,u+f05c-f05d,u+f06e,u+f070,u+f087-f088,u+f08a,u+f094,u+f096-f097,u+f09d,u+f0a0,u+f0a2,u+f0a4-f0a7,u+f0c5,u+f0c7,u+f0e5-f0e6,u+f0eb,u+f0f6-f0f8,u+f10c,u+f114-f115,u+f118-f11a,u+f11c-f11d,u+f133,u+f147,u+f14e,u+f150-f152,u+f185-f186,u+f18e,u+f190-f192,u+f196,u+f1c1-f1c9,u+f1d9,u+f1db,u+f1e3,u+f1ea,u+f1f7,u+f1f9,u+f20a,u+f247-f248,u+f24a,u+f24d,u+f255-f25b,u+f25d,u+f271-f274,u+f278,u+f27b,u+f28c,u+f28e,u+f29c,u+f2b5,u+f2b7,u+f2ba,u+f2bc,u+f2be,u+f2c0-f2c1,u+f2c3,u+f2d0,u+f2d2,u+f2d4,u+f2dc}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-v4compatibility.woff2) format("woff2"),url(../webfonts/fa-v4compatibility.ttf) format("truetype");unicode-range:u+f041,u+f047,u+f065-f066,u+f07d-f07e,u+f080,u+f08b,u+f08e,u+f090,u+f09a,u+f0ac,u+f0ae,u+f0b2,u+f0d0,u+f0d6,u+f0e4,u+f0ec,u+f10a-f10b,u+f123,u+f13e,u+f148-f149,u+f14c,u+f156,u+f15e,u+f160-f161,u+f163,u+f175-f178,u+f195,u+f1f8,u+f219,u+f27a} \ No newline at end of file diff --git a/weed/admin/static/favicon.ico b/weed/admin/static/favicon.ico deleted file mode 100644 index 1059a4099..000000000 Binary files a/weed/admin/static/favicon.ico and /dev/null differ diff --git a/weed/admin/static/js/admin.js b/weed/admin/static/js/admin.js deleted file mode 100644 index 1758cde82..000000000 --- a/weed/admin/static/js/admin.js +++ /dev/null @@ -1,2533 +0,0 @@ -// SeaweedFS Dashboard JavaScript - -// Global variables -let bucketToDelete = ''; - -// Initialize dashboard when DOM is loaded -document.addEventListener('DOMContentLoaded', function() { - initializeDashboard(); - initializeEventHandlers(); - setupFormValidation(); - setupFileManagerEventHandlers(); - - // Initialize delete button visibility on file browser page - if (window.location.pathname === '/files') { - updateDeleteSelectedButton(); - } -}); - -function initializeDashboard() { - // Set up HTMX event listeners - setupHTMXListeners(); - - // Initialize tooltips - initializeTooltips(); - - // Set up periodic refresh - setupAutoRefresh(); - - // Set active navigation - setActiveNavigation(); - - // Set up submenu behavior - setupSubmenuBehavior(); -} - -// HTMX event listeners -function setupHTMXListeners() { - // Show loading indicator on requests - document.body.addEventListener('htmx:beforeRequest', function(evt) { - showLoadingIndicator(); - }); - - // Hide loading indicator on completion - document.body.addEventListener('htmx:afterRequest', function(evt) { - hideLoadingIndicator(); - }); - - // Handle errors - document.body.addEventListener('htmx:responseError', function(evt) { - handleHTMXError(evt); - }); -} - -// Initialize Bootstrap tooltips -function initializeTooltips() { - var tooltipTriggerList = [].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')); - var tooltipList = tooltipTriggerList.map(function (tooltipTriggerEl) { - return new bootstrap.Tooltip(tooltipTriggerEl); - }); -} - -// Set up auto-refresh for dashboard data -function setupAutoRefresh() { - // Refresh dashboard data every 30 seconds - setInterval(function() { - if (window.location.pathname === '/dashboard') { - htmx.trigger('#dashboard-content', 'refresh'); - } - }, 30000); -} - -// Set active navigation item -function setActiveNavigation() { - const currentPath = window.location.pathname; - const navLinks = document.querySelectorAll('.sidebar .nav-link'); - - navLinks.forEach(function(link) { - const href = link.getAttribute('href'); - let isActive = false; - - if (href === currentPath) { - isActive = true; - } else if (currentPath === '/' && href === '/admin') { - isActive = true; - } else if (currentPath.startsWith('/s3/') && href === '/s3/buckets') { - isActive = true; - } - // Note: Removed the problematic cluster condition that was highlighting all submenu items - - if (isActive) { - link.classList.add('active'); - } else { - link.classList.remove('active'); - } - }); -} - -// Set up submenu behavior -function setupSubmenuBehavior() { - const currentPath = window.location.pathname; - - // If we're on a cluster page, expand the cluster submenu - if (currentPath.startsWith('/cluster/')) { - const clusterSubmenu = document.getElementById('clusterSubmenu'); - if (clusterSubmenu) { - clusterSubmenu.classList.add('show'); - - // Update the parent toggle button state - const toggleButton = document.querySelector('[data-bs-target="#clusterSubmenu"]'); - if (toggleButton) { - toggleButton.classList.remove('collapsed'); - toggleButton.setAttribute('aria-expanded', 'true'); - } - } - } - - // If we're on an object store page, expand the object store submenu - if (currentPath.startsWith('/object-store/')) { - const objectStoreSubmenu = document.getElementById('objectStoreSubmenu'); - if (objectStoreSubmenu) { - objectStoreSubmenu.classList.add('show'); - - // Update the parent toggle button state - const toggleButton = document.querySelector('[data-bs-target="#objectStoreSubmenu"]'); - if (toggleButton) { - toggleButton.classList.remove('collapsed'); - toggleButton.setAttribute('aria-expanded', 'true'); - } - } - } - - // If we're on a maintenance page, expand the maintenance submenu - if (currentPath.startsWith('/maintenance')) { - const maintenanceSubmenu = document.getElementById('maintenanceSubmenu'); - if (maintenanceSubmenu) { - maintenanceSubmenu.classList.add('show'); - - // Update the parent toggle button state - const toggleButton = document.querySelector('[data-bs-target="#maintenanceSubmenu"]'); - if (toggleButton) { - toggleButton.classList.remove('collapsed'); - toggleButton.setAttribute('aria-expanded', 'true'); - } - } - } - - // Prevent submenu from collapsing when clicking on submenu items - const clusterSubmenuLinks = document.querySelectorAll('#clusterSubmenu .nav-link'); - clusterSubmenuLinks.forEach(function(link) { - link.addEventListener('click', function(e) { - // Don't prevent the navigation, just stop the collapse behavior - e.stopPropagation(); - }); - }); - - const objectStoreSubmenuLinks = document.querySelectorAll('#objectStoreSubmenu .nav-link'); - objectStoreSubmenuLinks.forEach(function(link) { - link.addEventListener('click', function(e) { - // Don't prevent the navigation, just stop the collapse behavior - e.stopPropagation(); - }); - }); - - const maintenanceSubmenuLinks = document.querySelectorAll('#maintenanceSubmenu .nav-link'); - maintenanceSubmenuLinks.forEach(function(link) { - link.addEventListener('click', function(e) { - // Don't prevent the navigation, just stop the collapse behavior - e.stopPropagation(); - }); - }); - - // Handle the main cluster toggle - const clusterToggle = document.querySelector('[data-bs-target="#clusterSubmenu"]'); - if (clusterToggle) { - clusterToggle.addEventListener('click', function(e) { - e.preventDefault(); - - const submenu = document.getElementById('clusterSubmenu'); - const isExpanded = submenu.classList.contains('show'); - - if (isExpanded) { - // Collapse - submenu.classList.remove('show'); - this.classList.add('collapsed'); - this.setAttribute('aria-expanded', 'false'); - } else { - // Expand - submenu.classList.add('show'); - this.classList.remove('collapsed'); - this.setAttribute('aria-expanded', 'true'); - } - }); - } - - // Handle the main object store toggle - const objectStoreToggle = document.querySelector('[data-bs-target="#objectStoreSubmenu"]'); - if (objectStoreToggle) { - objectStoreToggle.addEventListener('click', function(e) { - e.preventDefault(); - - const submenu = document.getElementById('objectStoreSubmenu'); - const isExpanded = submenu.classList.contains('show'); - - if (isExpanded) { - // Collapse - submenu.classList.remove('show'); - this.classList.add('collapsed'); - this.setAttribute('aria-expanded', 'false'); - } else { - // Expand - submenu.classList.add('show'); - this.classList.remove('collapsed'); - this.setAttribute('aria-expanded', 'true'); - } - }); - } - - // Handle the main maintenance toggle - const maintenanceToggle = document.querySelector('[data-bs-target="#maintenanceSubmenu"]'); - if (maintenanceToggle) { - maintenanceToggle.addEventListener('click', function(e) { - e.preventDefault(); - - const submenu = document.getElementById('maintenanceSubmenu'); - const isExpanded = submenu.classList.contains('show'); - - if (isExpanded) { - // Collapse - submenu.classList.remove('show'); - this.classList.add('collapsed'); - this.setAttribute('aria-expanded', 'false'); - } else { - // Expand - submenu.classList.add('show'); - this.classList.remove('collapsed'); - this.setAttribute('aria-expanded', 'true'); - } - }); - } -} - -// Loading indicator functions -function showLoadingIndicator() { - const indicator = document.getElementById('loading-indicator'); - if (indicator) { - indicator.style.display = 'block'; - } - - // Add loading class to body - document.body.classList.add('loading'); -} - -function hideLoadingIndicator() { - const indicator = document.getElementById('loading-indicator'); - if (indicator) { - indicator.style.display = 'none'; - } - - // Remove loading class from body - document.body.classList.remove('loading'); -} - -// Handle HTMX errors -function handleHTMXError(evt) { - console.error('HTMX Request Error:', evt.detail); - - // Show error toast or message - showErrorMessage('Request failed. Please try again.'); - - hideLoadingIndicator(); -} - -// Utility functions -function showErrorMessage(message) { - // Create toast element - const toast = document.createElement('div'); - toast.className = 'toast align-items-center text-white bg-danger border-0'; - toast.setAttribute('role', 'alert'); - toast.setAttribute('aria-live', 'assertive'); - toast.setAttribute('aria-atomic', 'true'); - - toast.innerHTML = ` -
-
- - ${message} -
- -
- `; - - // Add to toast container or create one - let toastContainer = document.getElementById('toast-container'); - if (!toastContainer) { - toastContainer = document.createElement('div'); - toastContainer.id = 'toast-container'; - toastContainer.className = 'toast-container position-fixed top-0 end-0 p-3'; - toastContainer.style.zIndex = '1055'; - document.body.appendChild(toastContainer); - } - - toastContainer.appendChild(toast); - - // Show toast - const bsToast = new bootstrap.Toast(toast); - bsToast.show(); - - // Remove toast element after it's hidden - toast.addEventListener('hidden.bs.toast', function() { - toast.remove(); - }); -} - -function showSuccessMessage(message) { - // Similar to showErrorMessage but with success styling - const toast = document.createElement('div'); - toast.className = 'toast align-items-center text-white bg-success border-0'; - toast.setAttribute('role', 'alert'); - toast.setAttribute('aria-live', 'assertive'); - toast.setAttribute('aria-atomic', 'true'); - - toast.innerHTML = ` -
-
- - ${message} -
- -
- `; - - let toastContainer = document.getElementById('toast-container'); - if (!toastContainer) { - toastContainer = document.createElement('div'); - toastContainer.id = 'toast-container'; - toastContainer.className = 'toast-container position-fixed top-0 end-0 p-3'; - toastContainer.style.zIndex = '1055'; - document.body.appendChild(toastContainer); - } - - toastContainer.appendChild(toast); - - const bsToast = new bootstrap.Toast(toast); - bsToast.show(); - - toast.addEventListener('hidden.bs.toast', function() { - toast.remove(); - }); -} - -// Format bytes for display -function formatBytes(bytes, decimals = 2) { - if (bytes === 0) return '0 Bytes'; - - const k = 1024; - const dm = decimals < 0 ? 0 : decimals; - const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']; - - const i = Math.floor(Math.log(bytes) / Math.log(k)); - - return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i]; -} - -// Format numbers with commas -function formatNumber(num) { - return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); -} - -// Helper function to format disk types for CSV export -function formatDiskTypes(diskTypesText) { - // Remove any HTML tags and clean up the text - return diskTypesText.replace(/<[^>]*>/g, '').replace(/\s+/g, ' ').trim(); -} - -// Confirm action dialogs -function confirmAction(message, callback) { - if (confirm(message)) { - callback(); - } -} - -// Global error handler -window.addEventListener('error', function(e) { - console.error('Global error:', e.error); - showErrorMessage('An unexpected error occurred.'); -}); - -// Export functions for global use -window.Dashboard = { - showErrorMessage, - showSuccessMessage, - formatBytes, - formatNumber, - confirmAction -}; - -// Initialize event handlers -function initializeEventHandlers() { - // S3 Bucket Management - const createBucketForm = document.getElementById('createBucketForm'); - if (createBucketForm) { - createBucketForm.addEventListener('submit', handleCreateBucket); - } - - // Delete bucket buttons - document.addEventListener('click', function(e) { - if (e.target.closest('.delete-bucket-btn')) { - const button = e.target.closest('.delete-bucket-btn'); - const bucketName = button.getAttribute('data-bucket-name'); - confirmDeleteBucket(bucketName); - } - - // Quota management buttons - if (e.target.closest('.quota-btn')) { - const button = e.target.closest('.quota-btn'); - const bucketName = button.getAttribute('data-bucket-name'); - const currentQuota = parseInt(button.getAttribute('data-current-quota')) || 0; - const quotaEnabled = button.getAttribute('data-quota-enabled') === 'true'; - showQuotaModal(bucketName, currentQuota, quotaEnabled); - } - }); - - // Quota form submission - const quotaForm = document.getElementById('quotaForm'); - if (quotaForm) { - quotaForm.addEventListener('submit', handleUpdateQuota); - } - - // Enable quota checkbox for create bucket form - const enableQuotaCheckbox = document.getElementById('enableQuota'); - if (enableQuotaCheckbox) { - enableQuotaCheckbox.addEventListener('change', function() { - const quotaSettings = document.getElementById('quotaSettings'); - if (this.checked) { - quotaSettings.style.display = 'block'; - } else { - quotaSettings.style.display = 'none'; - } - }); - } - - // Enable quota checkbox for quota modal - const quotaEnabledCheckbox = document.getElementById('quotaEnabled'); - if (quotaEnabledCheckbox) { - quotaEnabledCheckbox.addEventListener('change', function() { - const quotaSizeSettings = document.getElementById('quotaSizeSettings'); - if (this.checked) { - quotaSizeSettings.style.display = 'block'; - } else { - quotaSizeSettings.style.display = 'none'; - } - }); - } -} - -// Setup form validation -function setupFormValidation() { - // Bucket name validation - const bucketNameInput = document.getElementById('bucketName'); - if (bucketNameInput) { - bucketNameInput.addEventListener('input', validateBucketName); - } -} - -// S3 Bucket Management Functions - -// Handle create bucket form submission -async function handleCreateBucket(event) { - event.preventDefault(); - - const form = event.target; - const formData = new FormData(form); - const bucketData = { - name: formData.get('name'), - region: formData.get('region') || 'us-east-1', - quota_enabled: formData.get('quota_enabled') === 'on', - quota_size: parseInt(formData.get('quota_size')) || 0, - quota_unit: formData.get('quota_unit') || 'MB' - }; - - try { - const response = await fetch('/api/s3/buckets', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify(bucketData) - }); - - const result = await response.json(); - - if (response.ok) { - // Success - showAlert('success', `Bucket "${bucketData.name}" created successfully!`); - - // Close modal - const modal = bootstrap.Modal.getInstance(document.getElementById('createBucketModal')); - modal.hide(); - - // Reset form - form.reset(); - - // Refresh the page after a short delay - setTimeout(() => { - location.reload(); - }, 1500); - } else { - // Error - showAlert('danger', result.error || 'Failed to create bucket'); - } - } catch (error) { - console.error('Error creating bucket:', error); - showAlert('danger', 'Network error occurred while creating bucket'); - } -} - -// Validate bucket name input -function validateBucketName(event) { - const input = event.target; - const value = input.value; - const isValid = /^[a-z0-9.-]+$/.test(value) && value.length >= 3 && value.length <= 63; - - if (value.length > 0 && !isValid) { - input.setCustomValidity('Bucket name must contain only lowercase letters, numbers, dots, and hyphens (3-63 characters)'); - } else { - input.setCustomValidity(''); - } -} - -// Confirm bucket deletion -function confirmDeleteBucket(bucketName) { - bucketToDelete = bucketName; - document.getElementById('deleteBucketName').textContent = bucketName; - - const modal = new bootstrap.Modal(document.getElementById('deleteBucketModal')); - modal.show(); -} - -// Delete bucket -async function deleteBucket() { - if (!bucketToDelete) { - return; - } - - try { - const response = await fetch(`/api/s3/buckets/${bucketToDelete}`, { - method: 'DELETE' - }); - - const result = await response.json(); - - if (response.ok) { - // Success - showAlert('success', `Bucket "${bucketToDelete}" deleted successfully!`); - - // Close modal - const modal = bootstrap.Modal.getInstance(document.getElementById('deleteBucketModal')); - modal.hide(); - - // Refresh the page after a short delay - setTimeout(() => { - location.reload(); - }, 1500); - } else { - // Error - showAlert('danger', result.error || 'Failed to delete bucket'); - } - } catch (error) { - console.error('Error deleting bucket:', error); - showAlert('danger', 'Network error occurred while deleting bucket'); - } - - bucketToDelete = ''; -} - -// Refresh buckets list -function refreshBuckets() { - location.reload(); -} - -// Export bucket list -function exportBucketList() { - // Get table data - const table = document.getElementById('bucketsTable'); - if (!table) return; - - const rows = Array.from(table.querySelectorAll('tbody tr')); - const data = rows.map(row => { - const cells = row.querySelectorAll('td'); - if (cells.length < 5) return null; // Skip empty state row - - return { - name: cells[0].textContent.trim(), - created: cells[1].textContent.trim(), - objects: cells[2].textContent.trim(), - size: cells[3].textContent.trim(), - quota: cells[4].textContent.trim() - }; - }).filter(item => item !== null); - - // Convert to CSV - const csv = [ - ['Name', 'Created', 'Objects', 'Size', 'Quota'].join(','), - ...data.map(row => [ - row.name, - row.created, - row.objects, - row.size, - row.quota - ].join(',')) - ].join('\n'); - - // Download CSV - const blob = new Blob([csv], { type: 'text/csv' }); - const url = window.URL.createObjectURL(blob); - const a = document.createElement('a'); - a.href = url; - a.download = `seaweedfs-buckets-${new Date().toISOString().split('T')[0]}.csv`; - document.body.appendChild(a); - a.click(); - document.body.removeChild(a); - window.URL.revokeObjectURL(url); -} - -// Show alert message -function showAlert(type, message) { - // Remove existing alerts - const existingAlerts = document.querySelectorAll('.alert-floating'); - existingAlerts.forEach(alert => alert.remove()); - - // Create new alert - const alert = document.createElement('div'); - alert.className = `alert alert-${type} alert-dismissible fade show alert-floating`; - alert.style.cssText = ` - position: fixed; - top: 20px; - right: 20px; - z-index: 9999; - min-width: 300px; - box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); - `; - - alert.innerHTML = ` - ${message} - - `; - - document.body.appendChild(alert); - - // Auto-remove after 5 seconds - setTimeout(() => { - if (alert.parentNode) { - alert.remove(); - } - }, 5000); -} - -// Format date for display -function formatDate(date) { - return new Date(date).toLocaleString(); -} - -// Copy text to clipboard -function copyToClipboard(text) { - navigator.clipboard.writeText(text).then(() => { - showAlert('success', 'Copied to clipboard!'); - }).catch(err => { - console.error('Failed to copy text: ', err); - showAlert('danger', 'Failed to copy to clipboard'); - }); -} - -// Dashboard refresh functionality -function refreshDashboard() { - location.reload(); -} - -// Cluster management functions - -// Export volume servers data as CSV -function exportVolumeServers() { - const table = document.getElementById('hostsTable'); - if (!table) { - showErrorMessage('No volume servers data to export'); - return; - } - - let csv = 'Server ID,Address,Data Center,Rack,Volumes,Capacity,Usage\n'; - - const rows = table.querySelectorAll('tbody tr'); - rows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 7) { - const rowData = [ - cells[0].textContent.trim(), - cells[1].textContent.trim(), - cells[2].textContent.trim(), - cells[3].textContent.trim(), - cells[4].textContent.trim(), - cells[5].textContent.trim(), - cells[6].textContent.trim() - ]; - csv += rowData.join(',') + '\n'; - } - }); - - downloadCSV(csv, 'seaweedfs-volume-servers.csv'); -} - -// Export volumes data as CSV -function exportVolumes() { - const table = document.getElementById('volumesTable'); - if (!table) { - showErrorMessage('No volumes data to export'); - return; - } - - // Get headers from the table (dynamically handles conditional columns) - const headerCells = table.querySelectorAll('thead th'); - const headers = []; - headerCells.forEach((cell, index) => { - // Skip the Actions column (last column) - if (index < headerCells.length - 1) { - headers.push(cell.textContent.trim()); - } - }); - - let csv = headers.join(',') + '\n'; - - const rows = table.querySelectorAll('tbody tr'); - rows.forEach(row => { - const cells = row.querySelectorAll('td'); - const rowData = []; - // Export all cells except the Actions column (last column) - for (let i = 0; i < cells.length - 1; i++) { - rowData.push(`"${cells[i].textContent.trim().replace(/"/g, '""')}"`); - } - csv += rowData.join(',') + '\n'; - }); - - downloadCSV(csv, 'seaweedfs-volumes.csv'); -} - -// Export collections data as CSV -function exportCollections() { - const table = document.getElementById('collectionsTable'); - if (!table) { - showAlert('error', 'Collections table not found'); - return; - } - - const headers = ['Collection Name', 'Volumes', 'Files', 'Size', 'Disk Types']; - const rows = []; - - // Get table rows - const tableRows = table.querySelectorAll('tbody tr'); - tableRows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 5) { - rows.push([ - cells[0].textContent.trim(), - cells[1].textContent.trim(), - cells[2].textContent.trim(), - cells[3].textContent.trim(), - formatDiskTypes(cells[4].textContent.trim()) - ]); - } - }); - - // Generate CSV - const csvContent = [headers, ...rows] - .map(row => row.map(cell => `"${cell}"`).join(',')) - .join('\n'); - - // Download - const filename = `seaweedfs-collections-${new Date().toISOString().split('T')[0]}.csv`; - downloadCSV(csvContent, filename); -} - -// Export Masters to CSV -function exportMasters() { - const table = document.getElementById('mastersTable'); - if (!table) { - showAlert('error', 'Masters table not found'); - return; - } - - const headers = ['Address', 'Role', 'Suffrage']; - const rows = []; - - // Get table rows - const tableRows = table.querySelectorAll('tbody tr'); - tableRows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 3) { - rows.push([ - cells[0].textContent.trim(), - cells[1].textContent.trim(), - cells[2].textContent.trim() - ]); - } - }); - - // Generate CSV - const csvContent = [headers, ...rows] - .map(row => row.map(cell => `"${cell}"`).join(',')) - .join('\n'); - - // Download - const filename = `seaweedfs-masters-${new Date().toISOString().split('T')[0]}.csv`; - downloadCSV(csvContent, filename); -} - -// Export Filers to CSV -function exportFilers() { - const table = document.getElementById('filersTable'); - if (!table) { - showAlert('error', 'Filers table not found'); - return; - } - - const headers = ['Address', 'Version', 'Data Center', 'Rack', 'Created At']; - const rows = []; - - // Get table rows - const tableRows = table.querySelectorAll('tbody tr'); - tableRows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 5) { - rows.push([ - cells[0].textContent.trim(), - cells[1].textContent.trim(), - cells[2].textContent.trim(), - cells[3].textContent.trim(), - cells[4].textContent.trim() - ]); - } - }); - - // Generate CSV - const csvContent = [headers, ...rows] - .map(row => row.map(cell => `"${cell}"`).join(',')) - .join('\n'); - - // Download - const filename = `seaweedfs-filers-${new Date().toISOString().split('T')[0]}.csv`; - downloadCSV(csvContent, filename); -} - -// Export Users to CSV -function exportUsers() { - const table = document.getElementById('usersTable'); - if (!table) { - showAlert('error', 'Users table not found'); - return; - } - - const rows = table.querySelectorAll('tbody tr'); - if (rows.length === 0) { - showErrorMessage('No users to export'); - return; - } - - let csvContent = 'Username,Email,Access Key,Status,Created,Last Login\n'; - - rows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 6) { - const username = cells[0].textContent.trim(); - const email = cells[1].textContent.trim(); - const accessKey = cells[2].textContent.trim(); - const status = cells[3].textContent.trim(); - const created = cells[4].textContent.trim(); - const lastLogin = cells[5].textContent.trim(); - - csvContent += `"${username}","${email}","${accessKey}","${status}","${created}","${lastLogin}"\n`; - } - }); - - downloadCSV(csvContent, 'seaweedfs-users.csv'); -} - -// Confirm delete collection -function confirmDeleteCollection(button) { - const collectionName = button.getAttribute('data-collection-name'); - document.getElementById('deleteCollectionName').textContent = collectionName; - - const modal = new bootstrap.Modal(document.getElementById('deleteCollectionModal')); - modal.show(); - - // Set up confirm button - document.getElementById('confirmDeleteCollection').onclick = function() { - deleteCollection(collectionName); - }; -} - -// Delete collection -async function deleteCollection(collectionName) { - try { - const response = await fetch(`/api/collections/${collectionName}`, { - method: 'DELETE', - headers: { - 'Content-Type': 'application/json', - } - }); - - if (response.ok) { - showSuccessMessage(`Collection "${collectionName}" deleted successfully`); - // Hide modal - const modal = bootstrap.Modal.getInstance(document.getElementById('deleteCollectionModal')); - modal.hide(); - // Refresh page - setTimeout(() => { - window.location.reload(); - }, 1000); - } else { - const error = await response.json(); - showErrorMessage(`Failed to delete collection: ${error.error || 'Unknown error'}`); - } - } catch (error) { - console.error('Error deleting collection:', error); - showErrorMessage('Failed to delete collection. Please try again.'); - } -} - - - -// Download CSV utility function -function downloadCSV(csvContent, filename) { - const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); - const link = document.createElement('a'); - - if (link.download !== undefined) { - const url = URL.createObjectURL(blob); - link.setAttribute('href', url); - link.setAttribute('download', filename); - link.style.visibility = 'hidden'; - document.body.appendChild(link); - link.click(); - document.body.removeChild(link); - } -} - -// File Browser Functions - -// Toggle select all checkboxes -function toggleSelectAll() { - const selectAll = document.getElementById('selectAll'); - const checkboxes = document.querySelectorAll('.file-checkbox'); - - checkboxes.forEach(checkbox => { - checkbox.checked = selectAll.checked; - }); - - updateDeleteSelectedButton(); -} - -// Update visibility of delete selected button based on selection -function updateDeleteSelectedButton() { - const checkboxes = document.querySelectorAll('.file-checkbox:checked'); - const deleteBtn = document.getElementById('deleteSelectedBtn'); - - if (deleteBtn) { - if (checkboxes.length > 0) { - deleteBtn.style.display = 'inline-block'; - deleteBtn.innerHTML = `Delete Selected (${checkboxes.length})`; - } else { - deleteBtn.style.display = 'none'; - } - } -} - -// Update select all checkbox state based on individual selections -function updateSelectAllCheckbox() { - const selectAll = document.getElementById('selectAll'); - const allCheckboxes = document.querySelectorAll('.file-checkbox'); - const checkedCheckboxes = document.querySelectorAll('.file-checkbox:checked'); - - if (selectAll && allCheckboxes.length > 0) { - if (checkedCheckboxes.length === 0) { - selectAll.checked = false; - selectAll.indeterminate = false; - } else if (checkedCheckboxes.length === allCheckboxes.length) { - selectAll.checked = true; - selectAll.indeterminate = false; - } else { - selectAll.checked = false; - selectAll.indeterminate = true; - } - } -} - -// Get selected file paths -function getSelectedFilePaths() { - const checkboxes = document.querySelectorAll('.file-checkbox:checked'); - return Array.from(checkboxes).map(cb => cb.value); -} - -// Confirm delete selected files -function confirmDeleteSelected() { - const selectedPaths = getSelectedFilePaths(); - - if (selectedPaths.length === 0) { - showAlert('warning', 'No files selected'); - return; - } - - const fileNames = selectedPaths.map(path => path.split('/').pop()).join(', '); - const message = selectedPaths.length === 1 - ? `Are you sure you want to delete "${fileNames}"?` - : `Are you sure you want to delete ${selectedPaths.length} selected items?\n\n${fileNames.substring(0, 200)}${fileNames.length > 200 ? '...' : ''}`; - - if (confirm(message)) { - deleteSelectedFiles(selectedPaths); - } -} - -// Delete multiple selected files -async function deleteSelectedFiles(filePaths) { - if (!filePaths || filePaths.length === 0) { - showAlert('warning', 'No files selected'); - return; - } - - // Disable the delete button during operation - const deleteBtn = document.getElementById('deleteSelectedBtn'); - const originalText = deleteBtn.innerHTML; - deleteBtn.disabled = true; - deleteBtn.innerHTML = 'Deleting...'; - - try { - const response = await fetch('/api/files/delete-multiple', { - method: 'DELETE', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ paths: filePaths }) - }); - - if (response.ok) { - const result = await response.json(); - - if (result.deleted > 0) { - if (result.failed === 0) { - showAlert('success', `Successfully deleted ${result.deleted} item(s)`); - } else { - showAlert('warning', `Deleted ${result.deleted} item(s), failed to delete ${result.failed} item(s)`); - if (result.errors && result.errors.length > 0) { - console.warn('Deletion errors:', result.errors); - } - } - - // Reload the page to update the file list - setTimeout(() => { - window.location.reload(); - }, 1000); - } else { - let errorMessage = result.message || 'Failed to delete all selected items'; - if (result.errors && result.errors.length > 0) { - errorMessage += ': ' + result.errors.join(', '); - } - showAlert('error', errorMessage); - } - } else { - const error = await response.json(); - showAlert('error', `Failed to delete files: ${error.error || 'Unknown error'}`); - } - } catch (error) { - console.error('Delete error:', error); - showAlert('error', 'Failed to delete files'); - } finally { - // Re-enable the button - deleteBtn.disabled = false; - deleteBtn.innerHTML = originalText; - } -} - -// Create new folder -function createFolder() { - const modal = new bootstrap.Modal(document.getElementById('createFolderModal')); - modal.show(); -} - -// Upload file -function uploadFile() { - const modal = new bootstrap.Modal(document.getElementById('uploadFileModal')); - modal.show(); -} - -// Submit create folder form -async function submitCreateFolder() { - const folderName = document.getElementById('folderName').value.trim(); - const currentPath = document.getElementById('currentPath').value; - - if (!folderName) { - showErrorMessage('Please enter a folder name'); - return; - } - - // Validate folder name - if (folderName.includes('/') || folderName.includes('\\')) { - showErrorMessage('Folder names cannot contain / or \\ characters'); - return; - } - - // Additional validation for reserved names - const reservedNames = ['.', '..', 'CON', 'PRN', 'AUX', 'NUL']; - if (reservedNames.includes(folderName.toUpperCase())) { - showErrorMessage('This folder name is reserved and cannot be used'); - return; - } - - // Disable the button to prevent double submission - const submitButton = document.querySelector('#createFolderModal .btn-primary'); - const originalText = submitButton.innerHTML; - submitButton.disabled = true; - submitButton.innerHTML = 'Creating...'; - - try { - const response = await fetch('/api/files/create-folder', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - path: currentPath, - folder_name: folderName - }) - }); - - if (response.ok) { - showSuccessMessage(`Folder "${folderName}" created successfully`); - // Hide modal - const modal = bootstrap.Modal.getInstance(document.getElementById('createFolderModal')); - modal.hide(); - // Clear form - document.getElementById('folderName').value = ''; - // Refresh page - setTimeout(() => { - window.location.reload(); - }, 1000); - } else { - const error = await response.json(); - showErrorMessage(`Failed to create folder: ${error.error || 'Unknown error'}`); - } - } catch (error) { - console.error('Create folder error:', error); - showErrorMessage('Failed to create folder. Please try again.'); - } finally { - // Re-enable the button - submitButton.disabled = false; - submitButton.innerHTML = originalText; - } -} - -// Submit upload file form -async function submitUploadFile() { - const fileInput = document.getElementById('fileInput'); - const currentPath = document.getElementById('uploadPath').value; - - if (!fileInput.files || fileInput.files.length === 0) { - showErrorMessage('Please select at least one file to upload'); - return; - } - - const files = Array.from(fileInput.files); - const totalSize = files.reduce((sum, file) => sum + file.size, 0); - - // Validate total file size (limit to 500MB for admin interface) - const maxSize = 500 * 1024 * 1024; // 500MB total - if (totalSize > maxSize) { - showErrorMessage('Total file size exceeds 500MB limit. Please select fewer or smaller files.'); - return; - } - - // Individual file size validation removed - no limit per file - - const formData = new FormData(); - files.forEach(file => { - formData.append('files', file); - }); - formData.append('path', currentPath); - - // Show progress bar and disable button - const progressContainer = document.getElementById('uploadProgress'); - const progressBar = progressContainer.querySelector('.progress-bar'); - const uploadStatus = document.getElementById('uploadStatus'); - const submitButton = document.querySelector('#uploadFileModal .btn-primary'); - const originalText = submitButton.innerHTML; - - progressContainer.style.display = 'block'; - progressBar.style.width = '0%'; - progressBar.setAttribute('aria-valuenow', '0'); - progressBar.textContent = '0%'; - uploadStatus.textContent = `Uploading ${files.length} file(s)...`; - submitButton.disabled = true; - submitButton.innerHTML = 'Uploading...'; - - try { - const xhr = new XMLHttpRequest(); - - // Handle progress - xhr.upload.addEventListener('progress', function(e) { - if (e.lengthComputable) { - const percentComplete = Math.round((e.loaded / e.total) * 100); - progressBar.style.width = percentComplete + '%'; - progressBar.setAttribute('aria-valuenow', percentComplete); - progressBar.textContent = percentComplete + '%'; - uploadStatus.textContent = `Uploading ${files.length} file(s)... ${percentComplete}%`; - } - }); - - // Handle completion - xhr.addEventListener('load', function() { - if (xhr.status === 200) { - try { - const response = JSON.parse(xhr.responseText); - - if (response.uploaded > 0) { - if (response.failed === 0) { - showSuccessMessage(`Successfully uploaded ${response.uploaded} file(s)`); - } else { - showSuccessMessage(response.message); - // Show details of failed uploads - if (response.errors && response.errors.length > 0) { - console.warn('Upload errors:', response.errors); - } - } - - // Hide modal and refresh page - const modal = bootstrap.Modal.getInstance(document.getElementById('uploadFileModal')); - modal.hide(); - setTimeout(() => { - window.location.reload(); - }, 1000); - } else { - let errorMessage = response.message || 'All file uploads failed'; - if (response.errors && response.errors.length > 0) { - errorMessage += ': ' + response.errors.join(', '); - } - showErrorMessage(errorMessage); - } - } catch (e) { - showErrorMessage('Upload completed but response format was unexpected'); - } - progressContainer.style.display = 'none'; - } else { - let errorMessage = 'Unknown error'; - try { - const error = JSON.parse(xhr.responseText); - errorMessage = error.error || error.message || errorMessage; - } catch (e) { - errorMessage = `Server returned status ${xhr.status}`; - } - showErrorMessage(`Failed to upload files: ${errorMessage}`); - progressContainer.style.display = 'none'; - } - }); - - // Handle errors - xhr.addEventListener('error', function() { - showErrorMessage('Failed to upload files. Please check your connection and try again.'); - progressContainer.style.display = 'none'; - }); - - // Handle abort - xhr.addEventListener('abort', function() { - showErrorMessage('File upload was cancelled.'); - progressContainer.style.display = 'none'; - }); - - // Send request - xhr.open('POST', '/api/files/upload'); - xhr.send(formData); - - } catch (error) { - console.error('Upload error:', error); - showErrorMessage('Failed to upload files. Please try again.'); - progressContainer.style.display = 'none'; - } finally { - // Re-enable the button - submitButton.disabled = false; - submitButton.innerHTML = originalText; - } -} - -// Export file list to CSV -function exportFileList() { - const table = document.getElementById('fileTable'); - if (!table) { - showAlert('error', 'File table not found'); - return; - } - - const headers = ['Name', 'Size', 'Type', 'Modified', 'Permissions']; - const rows = []; - - // Get table rows - const tableRows = table.querySelectorAll('tbody tr'); - tableRows.forEach(row => { - const cells = row.querySelectorAll('td'); - if (cells.length >= 6) { - rows.push([ - cells[1].textContent.trim(), // Name - cells[2].textContent.trim(), // Size - cells[3].textContent.trim(), // Type - cells[4].textContent.trim(), // Modified - cells[5].textContent.trim() // Permissions - ]); - } - }); - - // Generate CSV - const csvContent = [headers, ...rows] - .map(row => row.map(cell => `"${cell}"`).join(',')) - .join('\n'); - - // Download - const filename = `seaweedfs-files-${new Date().toISOString().split('T')[0]}.csv`; - downloadCSV(csvContent, filename); -} - -// Download file -function downloadFile(filePath) { - // Create download link using admin API - const downloadUrl = `/api/files/download?path=${encodeURIComponent(filePath)}`; - window.open(downloadUrl, '_blank'); -} - -// View file -async function viewFile(filePath) { - try { - const response = await fetch(`/api/files/view?path=${encodeURIComponent(filePath)}`); - - if (!response.ok) { - const error = await response.json(); - showAlert('error', `Failed to view file: ${error.error || 'Unknown error'}`); - return; - } - - const data = await response.json(); - showFileViewer(data); - - } catch (error) { - console.error('View file error:', error); - showAlert('error', 'Failed to view file'); - } -} - -// Show file properties -async function showProperties(filePath) { - try { - const response = await fetch(`/api/files/properties?path=${encodeURIComponent(filePath)}`); - - if (!response.ok) { - const error = await response.json(); - showAlert('error', `Failed to get file properties: ${error.error || 'Unknown error'}`); - return; - } - - const properties = await response.json(); - showPropertiesModal(properties); - - } catch (error) { - console.error('Properties error:', error); - showAlert('error', 'Failed to get file properties'); - } -} - -// Confirm delete file/folder -function confirmDelete(filePath) { - if (confirm(`Are you sure you want to delete "${filePath}"?`)) { - deleteFile(filePath); - } -} - -// Delete file/folder -async function deleteFile(filePath) { - try { - const response = await fetch('/api/files/delete', { - method: 'DELETE', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ path: filePath }) - }); - - if (response.ok) { - showAlert('success', `Successfully deleted "${filePath}"`); - // Reload the page to update the file list - window.location.reload(); - } else { - const error = await response.json(); - showAlert('error', `Failed to delete file: ${error.error || 'Unknown error'}`); - } - } catch (error) { - console.error('Delete error:', error); - showAlert('error', 'Failed to delete file'); - } -} - -// Setup file manager specific event handlers -function setupFileManagerEventHandlers() { - // Handle Enter key in folder name input - const folderNameInput = document.getElementById('folderName'); - if (folderNameInput) { - folderNameInput.addEventListener('keypress', function(e) { - if (e.key === 'Enter') { - e.preventDefault(); - submitCreateFolder(); - } - }); - } - - // Handle file selection change to show preview - const fileInput = document.getElementById('fileInput'); - if (fileInput) { - fileInput.addEventListener('change', function(e) { - updateFileListPreview(); - }); - } - - // Setup checkbox event listeners for file selection - const checkboxes = document.querySelectorAll('.file-checkbox'); - checkboxes.forEach(checkbox => { - checkbox.addEventListener('change', function() { - updateDeleteSelectedButton(); - updateSelectAllCheckbox(); - }); - }); - - // Setup drag and drop for file uploads - setupDragAndDrop(); - - // Clear form when modals are hidden - const createFolderModal = document.getElementById('createFolderModal'); - if (createFolderModal) { - createFolderModal.addEventListener('hidden.bs.modal', function() { - document.getElementById('folderName').value = ''; - }); - } - - const uploadFileModal = document.getElementById('uploadFileModal'); - if (uploadFileModal) { - uploadFileModal.addEventListener('hidden.bs.modal', function() { - const fileInput = document.getElementById('fileInput'); - const progressContainer = document.getElementById('uploadProgress'); - const fileListPreview = document.getElementById('fileListPreview'); - fileInput.value = ''; - progressContainer.style.display = 'none'; - fileListPreview.style.display = 'none'; - }); - } -} - -// Setup drag and drop functionality -function setupDragAndDrop() { - const dropZone = document.querySelector('.card-body'); // Main file listing area - const uploadModal = document.getElementById('uploadFileModal'); - - if (!dropZone || !uploadModal) return; - - // Prevent default drag behaviors - ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => { - dropZone.addEventListener(eventName, preventDefaults, false); - document.body.addEventListener(eventName, preventDefaults, false); - }); - - // Highlight drop zone when item is dragged over it - ['dragenter', 'dragover'].forEach(eventName => { - dropZone.addEventListener(eventName, highlight, false); - }); - - ['dragleave', 'drop'].forEach(eventName => { - dropZone.addEventListener(eventName, unhighlight, false); - }); - - // Handle dropped files - dropZone.addEventListener('drop', handleDrop, false); - - function preventDefaults(e) { - e.preventDefault(); - e.stopPropagation(); - } - - function highlight(e) { - dropZone.classList.add('drag-over'); - // Add some visual feedback - if (!dropZone.querySelector('.drag-overlay')) { - const overlay = document.createElement('div'); - overlay.className = 'drag-overlay'; - overlay.innerHTML = ` -
- -
Drop files here to upload
-

Release to upload files to this directory

-
- `; - overlay.style.cssText = ` - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: rgba(255, 255, 255, 0.9); - border: 2px dashed #007bff; - border-radius: 0.375rem; - z-index: 1000; - display: flex; - align-items: center; - justify-content: center; - `; - dropZone.style.position = 'relative'; - dropZone.appendChild(overlay); - } - } - - function unhighlight(e) { - dropZone.classList.remove('drag-over'); - const overlay = dropZone.querySelector('.drag-overlay'); - if (overlay) { - overlay.remove(); - } - } - - function handleDrop(e) { - const dt = e.dataTransfer; - const files = dt.files; - - if (files.length > 0) { - // Open upload modal and set files - const fileInput = document.getElementById('fileInput'); - if (fileInput) { - // Create a new FileList-like object - const fileArray = Array.from(files); - - // Set files to input (this is a bit tricky with file inputs) - const dataTransfer = new DataTransfer(); - fileArray.forEach(file => dataTransfer.items.add(file)); - fileInput.files = dataTransfer.files; - - // Update preview and show modal - updateFileListPreview(); - const modal = new bootstrap.Modal(uploadModal); - modal.show(); - } - } - } -} - -// Update file list preview when files are selected -function updateFileListPreview() { - const fileInput = document.getElementById('fileInput'); - const fileListPreview = document.getElementById('fileListPreview'); - const selectedFilesList = document.getElementById('selectedFilesList'); - - if (!fileInput.files || fileInput.files.length === 0) { - fileListPreview.style.display = 'none'; - return; - } - - const files = Array.from(fileInput.files); - const totalSize = files.reduce((sum, file) => sum + file.size, 0); - - let html = `
- ${files.length} file(s) selected - Total: ${formatBytes(totalSize)} -
`; - - files.forEach((file, index) => { - const fileIcon = getFileIconByName(file.name); - html += `
-
- - ${file.name} -
- ${formatBytes(file.size)} -
`; - }); - - selectedFilesList.innerHTML = html; - fileListPreview.style.display = 'block'; -} - -// Get file icon based on file name/extension -function getFileIconByName(fileName) { - const ext = fileName.split('.').pop().toLowerCase(); - - switch (ext) { - case 'jpg': - case 'jpeg': - case 'png': - case 'gif': - case 'bmp': - case 'svg': - return 'fa-image'; - case 'mp4': - case 'avi': - case 'mov': - case 'wmv': - case 'flv': - return 'fa-video'; - case 'mp3': - case 'wav': - case 'flac': - case 'aac': - return 'fa-music'; - case 'pdf': - return 'fa-file-pdf'; - case 'doc': - case 'docx': - return 'fa-file-word'; - case 'xls': - case 'xlsx': - return 'fa-file-excel'; - case 'ppt': - case 'pptx': - return 'fa-file-powerpoint'; - case 'txt': - case 'md': - return 'fa-file-text'; - case 'zip': - case 'rar': - case '7z': - case 'tar': - case 'gz': - return 'fa-file-archive'; - case 'js': - case 'ts': - case 'html': - case 'css': - case 'json': - case 'xml': - return 'fa-file-code'; - default: - return 'fa-file'; - } -} - -// Quota Management Functions - -// Show quota management modal -function showQuotaModal(bucketName, currentQuotaMB, quotaEnabled) { - document.getElementById('quotaBucketName').value = bucketName; - document.getElementById('quotaEnabled').checked = quotaEnabled; - - // Convert quota to appropriate unit and set values - const quotaBytes = currentQuotaMB * 1024 * 1024; // Convert MB to bytes - const { size, unit } = convertBytesToBestUnit(quotaBytes); - - document.getElementById('quotaSizeMB').value = size; - document.getElementById('quotaUnitMB').value = unit; - - // Show/hide quota size settings based on enabled state - const quotaSizeSettings = document.getElementById('quotaSizeSettings'); - if (quotaEnabled) { - quotaSizeSettings.style.display = 'block'; - } else { - quotaSizeSettings.style.display = 'none'; - } - - const modal = new bootstrap.Modal(document.getElementById('manageQuotaModal')); - modal.show(); -} - -// Convert bytes to the best unit (TB, GB, or MB) -function convertBytesToBestUnit(bytes) { - if (bytes === 0) { - return { size: 0, unit: 'MB' }; - } - - // Check if it's a clean TB value - if (bytes >= 1024 * 1024 * 1024 * 1024 && bytes % (1024 * 1024 * 1024 * 1024) === 0) { - return { size: bytes / (1024 * 1024 * 1024 * 1024), unit: 'TB' }; - } - - // Check if it's a clean GB value - if (bytes >= 1024 * 1024 * 1024 && bytes % (1024 * 1024 * 1024) === 0) { - return { size: bytes / (1024 * 1024 * 1024), unit: 'GB' }; - } - - // Default to MB - return { size: bytes / (1024 * 1024), unit: 'MB' }; -} - -// Handle quota update form submission -async function handleUpdateQuota(event) { - event.preventDefault(); - - const form = event.target; - const formData = new FormData(form); - const bucketName = document.getElementById('quotaBucketName').value; - - const quotaData = { - quota_enabled: formData.get('quota_enabled') === 'on', - quota_size: parseInt(formData.get('quota_size')) || 0, - quota_unit: formData.get('quota_unit') || 'MB' - }; - - try { - const response = await fetch(`/api/s3/buckets/${bucketName}/quota`, { - method: 'PUT', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify(quotaData) - }); - - const result = await response.json(); - - if (response.ok) { - // Success - showAlert('success', `Quota for bucket "${bucketName}" updated successfully!`); - - // Close modal - const modal = bootstrap.Modal.getInstance(document.getElementById('manageQuotaModal')); - modal.hide(); - - // Refresh the page after a short delay - setTimeout(() => { - location.reload(); - }, 1500); - } else { - // Error - showAlert('danger', result.error || 'Failed to update bucket quota'); - } - } catch (error) { - console.error('Error updating bucket quota:', error); - showAlert('danger', 'Network error occurred while updating bucket quota'); - } -} - -// Show file viewer modal -function showFileViewer(data) { - const file = data.file; - const content = data.content || ''; - const viewable = data.viewable !== false; - - // Create modal HTML - const modalHtml = ` - - `; - - // Remove existing modal if any - const existingModal = document.getElementById('fileViewerModal'); - if (existingModal) { - existingModal.remove(); - } - - // Add modal to DOM - document.body.insertAdjacentHTML('beforeend', modalHtml); - - // Show modal - const modal = new bootstrap.Modal(document.getElementById('fileViewerModal')); - modal.show(); - - // Clean up when modal is hidden - document.getElementById('fileViewerModal').addEventListener('hidden.bs.modal', function () { - this.remove(); - }); -} - -// Create file viewer content based on file type -function createFileViewerContent(file, content) { - if (file.mime.startsWith('image/')) { - return ` -
- ${file.name} -
- `; - } else if (file.mime.startsWith('text/') || file.mime === 'application/json' || file.mime === 'application/javascript') { - const language = getLanguageFromMime(file.mime, file.name); - return ` -
- - - Size: ${formatBytes(file.size)} | Type: ${file.mime} - -
-
${escapeHtml(content)}
- `; - } else if (file.mime === 'application/pdf') { - return ` -
- -
- `; - } else { - return createNonViewableContent('This file type cannot be previewed in the browser.'); - } -} - -// Create non-viewable content message -function createNonViewableContent(reason) { - return ` -
- -
Cannot preview file
-

${reason}

-
- `; -} - -// Get language for syntax highlighting -function getLanguageFromMime(mime, filename) { - // First check MIME type - switch (mime) { - case 'application/json': return 'json'; - case 'application/javascript': return 'javascript'; - case 'text/html': return 'html'; - case 'text/css': return 'css'; - case 'application/xml': return 'xml'; - case 'text/typescript': return 'typescript'; - case 'text/x-python': return 'python'; - case 'text/x-go': return 'go'; - case 'text/x-java': return 'java'; - case 'text/x-c': return 'c'; - case 'text/x-c++': return 'cpp'; - case 'text/x-c-header': return 'c'; - case 'text/x-shellscript': return 'bash'; - case 'text/x-php': return 'php'; - case 'text/x-ruby': return 'ruby'; - case 'text/x-perl': return 'perl'; - case 'text/x-rust': return 'rust'; - case 'text/x-swift': return 'swift'; - case 'text/x-kotlin': return 'kotlin'; - case 'text/x-scala': return 'scala'; - case 'text/x-dockerfile': return 'dockerfile'; - case 'text/yaml': return 'yaml'; - case 'text/csv': return 'csv'; - case 'text/sql': return 'sql'; - case 'text/markdown': return 'markdown'; - } - - // Fallback to file extension - const ext = filename.split('.').pop().toLowerCase(); - switch (ext) { - case 'js': case 'mjs': return 'javascript'; - case 'ts': return 'typescript'; - case 'py': return 'python'; - case 'go': return 'go'; - case 'java': return 'java'; - case 'cpp': case 'cc': case 'cxx': case 'c++': return 'cpp'; - case 'c': return 'c'; - case 'h': case 'hpp': return 'c'; - case 'sh': case 'bash': case 'zsh': case 'fish': return 'bash'; - case 'php': return 'php'; - case 'rb': return 'ruby'; - case 'pl': return 'perl'; - case 'rs': return 'rust'; - case 'swift': return 'swift'; - case 'kt': return 'kotlin'; - case 'scala': return 'scala'; - case 'yml': case 'yaml': return 'yaml'; - case 'md': case 'markdown': return 'markdown'; - case 'sql': return 'sql'; - case 'csv': return 'csv'; - case 'dockerfile': return 'dockerfile'; - case 'gitignore': case 'gitattributes': return 'text'; - case 'env': return 'bash'; - case 'cfg': case 'conf': case 'ini': case 'properties': return 'ini'; - default: return 'text'; - } -} - -// Show properties modal -function showPropertiesModal(properties) { - // Create modal HTML - const modalHtml = ` - - `; - - // Remove existing modal if any - const existingModal = document.getElementById('propertiesModal'); - if (existingModal) { - existingModal.remove(); - } - - // Add modal to DOM - document.body.insertAdjacentHTML('beforeend', modalHtml); - - // Show modal - const modal = new bootstrap.Modal(document.getElementById('propertiesModal')); - modal.show(); - - // Clean up when modal is hidden - document.getElementById('propertiesModal').addEventListener('hidden.bs.modal', function () { - this.remove(); - }); -} - -// Create properties content -function createPropertiesContent(properties) { - let html = ` -
-
-
Basic Information
- - - - - `; - - if (!properties.is_directory) { - html += ` - - - `; - } - - html += ` -
Name:${properties.name}
Full Path:${properties.full_path}
Type:${properties.is_directory ? 'Directory' : 'File'}
Size:${properties.size_formatted || formatBytes(properties.size || 0)}
MIME Type:${properties.mime_type || 'Unknown'}
-
-
-
Timestamps
- - `; - - if (properties.modified_time) { - html += ``; - } - if (properties.created_time) { - html += ``; - } - - html += ` -
Modified:${properties.modified_time}
Created:${properties.created_time}
- -
Permissions
- - - - -
Mode:${properties.file_mode_formatted || properties.file_mode}
UID:${properties.uid || 'N/A'}
GID:${properties.gid || 'N/A'}
-
-
- `; - - // Add TTL information if available - if (properties.ttl_seconds && properties.ttl_seconds > 0) { - html += ` -
-
-
TTL (Time To Live)
- - -
TTL:${properties.ttl_formatted || properties.ttl_seconds + ' seconds'}
-
-
- `; - } - - // Add chunk information if available - if (properties.chunks && properties.chunks.length > 0) { - html += ` -
-
-
Chunks (${properties.chunk_count})
-
- - - - - - - - - - - `; - - properties.chunks.forEach(chunk => { - html += ` - - - - - - - `; - }); - - html += ` - -
File IDOffsetSizeETag
${chunk.file_id}${formatBytes(chunk.offset)}${formatBytes(chunk.size)}${chunk.e_tag || 'N/A'}
-
-
-
- `; - } - - // Add extended attributes if available - if (properties.extended && Object.keys(properties.extended).length > 0) { - html += ` -
-
-
Extended Attributes
- - `; - - Object.entries(properties.extended).forEach(([key, value]) => { - html += ``; - }); - - html += ` -
${key}:${value}
-
-
- `; - } - - return html; -} - -// Utility function to escape HTML -function escapeHtml(text) { - var map = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - "'": ''' - }; - return text.replace(/[&<>"']/g, function(m) { return map[m]; }); -} - -// ============================================================================ -// USER MANAGEMENT FUNCTIONS -// ============================================================================ - -// Global variables for user management -let currentEditingUser = ''; -let currentAccessKeysUser = ''; - -// User Management Functions - -async function handleCreateUser() { - const form = document.getElementById('createUserForm'); - const formData = new FormData(form); - - // Get selected actions - const actionsSelect = document.getElementById('actions'); - const selectedActions = Array.from(actionsSelect.selectedOptions).map(option => option.value); - - const userData = { - username: formData.get('username'), - email: formData.get('email'), - actions: selectedActions, - generate_key: formData.get('generateKey') === 'on' - }; - - try { - const response = await fetch('/api/users', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify(userData) - }); - - if (response.ok) { - const result = await response.json(); - showSuccessMessage('User created successfully'); - - // Show the created access key if generated - if (result.user && result.user.access_key) { - showNewAccessKeyModal(result.user); - } - - // Close modal and refresh page - const modal = bootstrap.Modal.getInstance(document.getElementById('createUserModal')); - modal.hide(); - form.reset(); - setTimeout(() => window.location.reload(), 1000); - } else { - const error = await response.json(); - showErrorMessage('Failed to create user: ' + (error.error || 'Unknown error')); - } - } catch (error) { - console.error('Error creating user:', error); - showErrorMessage('Failed to create user: ' + error.message); - } -} - -async function editUser(username) { - currentEditingUser = username; - - try { - const response = await fetch(`/api/users/${username}`); - if (response.ok) { - const user = await response.json(); - - // Populate edit form - document.getElementById('editUsername').value = username; - document.getElementById('editEmail').value = user.email || ''; - - // Set selected actions - const actionsSelect = document.getElementById('editActions'); - Array.from(actionsSelect.options).forEach(option => { - option.selected = user.actions && user.actions.includes(option.value); - }); - - // Show modal - const modal = new bootstrap.Modal(document.getElementById('editUserModal')); - modal.show(); - } else { - showErrorMessage('Failed to load user details'); - } - } catch (error) { - console.error('Error loading user:', error); - showErrorMessage('Failed to load user details'); - } -} - -async function handleUpdateUser() { - const form = document.getElementById('editUserForm'); - const formData = new FormData(form); - - // Get selected actions - const actionsSelect = document.getElementById('editActions'); - const selectedActions = Array.from(actionsSelect.selectedOptions).map(option => option.value); - - const userData = { - email: formData.get('email'), - actions: selectedActions - }; - - try { - const response = await fetch(`/api/users/${currentEditingUser}`, { - method: 'PUT', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify(userData) - }); - - if (response.ok) { - showSuccessMessage('User updated successfully'); - - // Close modal and refresh page - const modal = bootstrap.Modal.getInstance(document.getElementById('editUserModal')); - modal.hide(); - setTimeout(() => window.location.reload(), 1000); - } else { - const error = await response.json(); - showErrorMessage('Failed to update user: ' + (error.error || 'Unknown error')); - } - } catch (error) { - console.error('Error updating user:', error); - showErrorMessage('Failed to update user: ' + error.message); - } -} - -function confirmDeleteUser(username) { - confirmAction( - `Are you sure you want to delete user "${username}"? This action cannot be undone.`, - () => deleteUserConfirmed(username) - ); -} - -function deleteUser(username) { - confirmDeleteUser(username); -} - -async function deleteUserConfirmed(username) { - try { - const response = await fetch(`/api/users/${username}`, { - method: 'DELETE' - }); - - if (response.ok) { - showSuccessMessage('User deleted successfully'); - setTimeout(() => window.location.reload(), 1000); - } else { - const error = await response.json(); - showErrorMessage('Failed to delete user: ' + (error.error || 'Unknown error')); - } - } catch (error) { - console.error('Error deleting user:', error); - showErrorMessage('Failed to delete user: ' + error.message); - } -} - -async function showUserDetails(username) { - try { - const response = await fetch(`/api/users/${username}`); - if (response.ok) { - const user = await response.json(); - - const content = createUserDetailsContent(user); - document.getElementById('userDetailsContent').innerHTML = content; - - const modal = new bootstrap.Modal(document.getElementById('userDetailsModal')); - modal.show(); - } else { - showErrorMessage('Failed to load user details'); - } - } catch (error) { - console.error('Error loading user details:', error); - showErrorMessage('Failed to load user details'); - } -} - -function createUserDetailsContent(user) { - return ` -
-
-
Basic Information
- - - - - - - - - -
Username:${escapeHtml(user.username)}
Email:${escapeHtml(user.email || 'Not set')}
-
-
-
Permissions
-
- ${user.actions && user.actions.length > 0 ? - user.actions.map(action => `${action}`).join('') : - 'No permissions assigned' - } -
- -
Access Keys
- ${user.access_keys && user.access_keys.length > 0 ? - createAccessKeysTable(user.access_keys) : - '

No access keys

' - } -
-
- `; -} - -function createAccessKeysTable(accessKeys) { - return ` -
- - - - - - - - - ${accessKeys.map(key => ` - - - - - `).join('')} - -
Access KeyCreated
${key.access_key}${new Date(key.created_at).toLocaleDateString()}
-
- `; -} - -async function manageAccessKeys(username) { - currentAccessKeysUser = username; - document.getElementById('accessKeysUsername').textContent = username; - - await loadAccessKeys(username); - - const modal = new bootstrap.Modal(document.getElementById('accessKeysModal')); - modal.show(); -} - -async function loadAccessKeys(username) { - try { - const response = await fetch(`/api/users/${username}`); - if (response.ok) { - const user = await response.json(); - - const content = createAccessKeysManagementContent(user.access_keys || []); - document.getElementById('accessKeysContent').innerHTML = content; - } else { - document.getElementById('accessKeysContent').innerHTML = '

Failed to load access keys

'; - } - } catch (error) { - console.error('Error loading access keys:', error); - document.getElementById('accessKeysContent').innerHTML = '

Error loading access keys

'; - } -} - -function createAccessKeysManagementContent(accessKeys) { - if (accessKeys.length === 0) { - return '

No access keys found. Create one to get started.

'; - } - - return ` -
- - - - - - - - - - - ${accessKeys.map(key => ` - - - - - - - `).join('')} - -
Access KeySecret KeyCreatedActions
- ${key.access_key} - - - โ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ขโ€ข - - ${new Date(key.created_at).toLocaleDateString()} - -
-
- `; -} - -async function createAccessKey() { - if (!currentAccessKeysUser) { - showErrorMessage('No user selected'); - return; - } - - try { - const response = await fetch(`/api/users/${currentAccessKeysUser}/access-keys`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - } - }); - - if (response.ok) { - const result = await response.json(); - showSuccessMessage('Access key created successfully'); - - // Show the new access key - showNewAccessKeyModal(result.access_key); - - // Reload access keys - await loadAccessKeys(currentAccessKeysUser); - } else { - const error = await response.json(); - showErrorMessage('Failed to create access key: ' + (error.error || 'Unknown error')); - } - } catch (error) { - console.error('Error creating access key:', error); - showErrorMessage('Failed to create access key: ' + error.message); - } -} - -function confirmDeleteAccessKey(accessKeyId) { - confirmAction( - `Are you sure you want to delete access key "${accessKeyId}"? This action cannot be undone.`, - () => deleteAccessKeyConfirmed(accessKeyId) - ); -} - -async function deleteAccessKeyConfirmed(accessKeyId) { - try { - const response = await fetch(`/api/users/${currentAccessKeysUser}/access-keys/${accessKeyId}`, { - method: 'DELETE' - }); - - if (response.ok) { - showSuccessMessage('Access key deleted successfully'); - - // Reload access keys - await loadAccessKeys(currentAccessKeysUser); - } else { - const error = await response.json(); - showErrorMessage('Failed to delete access key: ' + (error.error || 'Unknown error')); - } - } catch (error) { - console.error('Error deleting access key:', error); - showErrorMessage('Failed to delete access key: ' + error.message); - } -} - -function showSecretKey(accessKey, secretKey) { - const content = ` -
- - Access Key Details: These credentials provide access to your object storage. Keep them secure and don't share them. -
-
- -
- - -
-
-
- -
- - -
-
- `; - - showModal('Access Key Details', content); -} - -function showNewAccessKeyModal(accessKeyData) { - const content = ` -
- - Success! Your new access key has been created. -
-
- - Important: These credentials provide access to your object storage. Keep them secure and don't share them. You can view them again through the user management interface if needed. -
-
- -
- - -
-
-
- -
- - -
-
- `; - - showModal('New Access Key Created', content); -} - -function showModal(title, content) { - // Create a dynamic modal - const modalId = 'dynamicModal_' + Date.now(); - const modalHtml = ` - - `; - - // Add modal to body - document.body.insertAdjacentHTML('beforeend', modalHtml); - - // Show modal - const modal = new bootstrap.Modal(document.getElementById(modalId)); - modal.show(); - - // Remove modal from DOM when hidden - document.getElementById(modalId).addEventListener('hidden.bs.modal', function() { - this.remove(); - }); -} - - - - \ No newline at end of file diff --git a/weed/admin/static/js/bootstrap.bundle.min.js b/weed/admin/static/js/bootstrap.bundle.min.js deleted file mode 100644 index b1999d9a9..000000000 --- a/weed/admin/static/js/bootstrap.bundle.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * Bootstrap v5.3.2 (https://getbootstrap.com/) - * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap=e()}(this,(function(){"use strict";const t=new Map,e={set(e,i,n){t.has(e)||t.set(e,new Map);const s=t.get(e);s.has(i)||0===s.size?s.set(i,n):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(s.keys())[0]}.`)},get:(e,i)=>t.has(e)&&t.get(e).get(i)||null,remove(e,i){if(!t.has(e))return;const n=t.get(e);n.delete(i),0===n.size&&t.delete(e)}},i="transitionend",n=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),s=t=>{t.dispatchEvent(new Event(i))},o=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),r=t=>o(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(n(t)):null,a=t=>{if(!o(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},l=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),c=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?c(t.parentNode):null},h=()=>{},d=t=>{t.offsetHeight},u=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,f=[],p=()=>"rtl"===document.documentElement.dir,m=t=>{var e;e=()=>{const e=u();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(f.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of f)t()})),f.push(e)):e()},g=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,_=(t,e,n=!0)=>{if(!n)return void g(t);const o=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let r=!1;const a=({target:n})=>{n===e&&(r=!0,e.removeEventListener(i,a),g(t))};e.addEventListener(i,a),setTimeout((()=>{r||s(e)}),o)},b=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},v=/[^.]*(?=\..*)\.|.*/,y=/\..*/,w=/::\d+$/,A={};let E=1;const T={mouseenter:"mouseover",mouseleave:"mouseout"},C=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function O(t,e){return e&&`${e}::${E++}`||t.uidEvent||E++}function x(t){const e=O(t);return t.uidEvent=e,A[e]=A[e]||{},A[e]}function k(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function L(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=I(t);return C.has(o)||(o=t),[n,s,o]}function S(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=L(e,i,n);if(e in T){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=x(t),c=l[a]||(l[a]={}),h=k(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=O(r,e.replace(v,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return P(s,{delegateTarget:r}),n.oneOff&&N.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return P(n,{delegateTarget:t}),i.oneOff&&N.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function D(t,e,i,n,s){const o=k(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function $(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&D(t,e,i,r.callable,r.delegationSelector)}function I(t){return t=t.replace(y,""),T[t]||t}const N={on(t,e,i,n){S(t,e,i,n,!1)},one(t,e,i,n){S(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=L(e,i,n),a=r!==e,l=x(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))$(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(w,"");a&&!e.includes(s)||D(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;D(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=u();let s=null,o=!0,r=!0,a=!1;e!==I(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=P(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function P(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function M(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function j(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const F={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${j(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${j(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=M(t.dataset[n])}return e},getDataAttribute:(t,e)=>M(t.getAttribute(`data-bs-${j(e)}`))};class H{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=o(e)?F.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...o(e)?F.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],r=o(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(r))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)}var i}}class W extends H{constructor(t,i){super(),(t=r(t))&&(this._element=t,this._config=this._getConfig(i),e.set(this._element,this.constructor.DATA_KEY,this))}dispose(){e.remove(this._element,this.constructor.DATA_KEY),N.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){_(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return e.get(r(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.2"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const B=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?n(i.trim()):null}return e},z={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!l(t)&&a(t)))},getSelectorFromElement(t){const e=B(t);return e&&z.findOne(e)?e:null},getElementFromSelector(t){const e=B(t);return e?z.findOne(e):null},getMultipleElementsFromSelector(t){const e=B(t);return e?z.find(e):[]}},R=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;N.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),l(this))return;const s=z.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},q=".bs.alert",V=`close${q}`,K=`closed${q}`;class Q extends W{static get NAME(){return"alert"}close(){if(N.trigger(this._element,V).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),N.trigger(this._element,K),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Q.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}R(Q,"close"),m(Q);const X='[data-bs-toggle="button"]';class Y extends W{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=Y.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}N.on(document,"click.bs.button.data-api",X,(t=>{t.preventDefault();const e=t.target.closest(X);Y.getOrCreateInstance(e).toggle()})),m(Y);const U=".bs.swipe",G=`touchstart${U}`,J=`touchmove${U}`,Z=`touchend${U}`,tt=`pointerdown${U}`,et=`pointerup${U}`,it={endCallback:null,leftCallback:null,rightCallback:null},nt={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class st extends H{constructor(t,e){super(),this._element=t,t&&st.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return it}static get DefaultType(){return nt}static get NAME(){return"swipe"}dispose(){N.off(this._element,U)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),g(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&g(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(N.on(this._element,tt,(t=>this._start(t))),N.on(this._element,et,(t=>this._end(t))),this._element.classList.add("pointer-event")):(N.on(this._element,G,(t=>this._start(t))),N.on(this._element,J,(t=>this._move(t))),N.on(this._element,Z,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const ot=".bs.carousel",rt=".data-api",at="next",lt="prev",ct="left",ht="right",dt=`slide${ot}`,ut=`slid${ot}`,ft=`keydown${ot}`,pt=`mouseenter${ot}`,mt=`mouseleave${ot}`,gt=`dragstart${ot}`,_t=`load${ot}${rt}`,bt=`click${ot}${rt}`,vt="carousel",yt="active",wt=".active",At=".carousel-item",Et=wt+At,Tt={ArrowLeft:ht,ArrowRight:ct},Ct={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},Ot={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class xt extends W{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=z.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===vt&&this.cycle()}static get Default(){return Ct}static get DefaultType(){return Ot}static get NAME(){return"carousel"}next(){this._slide(at)}nextWhenVisible(){!document.hidden&&a(this._element)&&this.next()}prev(){this._slide(lt)}pause(){this._isSliding&&s(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?N.one(this._element,ut,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void N.one(this._element,ut,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?at:lt;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&N.on(this._element,ft,(t=>this._keydown(t))),"hover"===this._config.pause&&(N.on(this._element,pt,(()=>this.pause())),N.on(this._element,mt,(()=>this._maybeEnableCycle()))),this._config.touch&&st.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of z.find(".carousel-item img",this._element))N.on(t,gt,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ct)),rightCallback:()=>this._slide(this._directionToOrder(ht)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new st(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=Tt[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=z.findOne(wt,this._indicatorsElement);e.classList.remove(yt),e.removeAttribute("aria-current");const i=z.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(yt),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===at,s=e||b(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>N.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(dt).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),d(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(yt),i.classList.remove(yt,c,l),this._isSliding=!1,r(ut)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return z.findOne(Et,this._element)}_getItems(){return z.find(At,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return p()?t===ct?lt:at:t===ct?at:lt}_orderToDirection(t){return p()?t===lt?ct:ht:t===lt?ht:ct}static jQueryInterface(t){return this.each((function(){const e=xt.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}N.on(document,bt,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=z.getElementFromSelector(this);if(!e||!e.classList.contains(vt))return;t.preventDefault();const i=xt.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===F.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),N.on(window,_t,(()=>{const t=z.find('[data-bs-ride="carousel"]');for(const e of t)xt.getOrCreateInstance(e)})),m(xt);const kt=".bs.collapse",Lt=`show${kt}`,St=`shown${kt}`,Dt=`hide${kt}`,$t=`hidden${kt}`,It=`click${kt}.data-api`,Nt="show",Pt="collapse",Mt="collapsing",jt=`:scope .${Pt} .${Pt}`,Ft='[data-bs-toggle="collapse"]',Ht={parent:null,toggle:!0},Wt={parent:"(null|element)",toggle:"boolean"};class Bt extends W{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=z.find(Ft);for(const t of i){const e=z.getSelectorFromElement(t),i=z.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return Ht}static get DefaultType(){return Wt}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>Bt.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(N.trigger(this._element,Lt).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(Pt),this._element.classList.add(Mt),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(Mt),this._element.classList.add(Pt,Nt),this._element.style[e]="",N.trigger(this._element,St)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(N.trigger(this._element,Dt).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,d(this._element),this._element.classList.add(Mt),this._element.classList.remove(Pt,Nt);for(const t of this._triggerArray){const e=z.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(Mt),this._element.classList.add(Pt),N.trigger(this._element,$t)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(Nt)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=r(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(Ft);for(const e of t){const t=z.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=z.find(jt,this._config.parent);return z.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Bt.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}N.on(document,It,Ft,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of z.getMultipleElementsFromSelector(this))Bt.getOrCreateInstance(t,{toggle:!1}).toggle()})),m(Bt);var zt="top",Rt="bottom",qt="right",Vt="left",Kt="auto",Qt=[zt,Rt,qt,Vt],Xt="start",Yt="end",Ut="clippingParents",Gt="viewport",Jt="popper",Zt="reference",te=Qt.reduce((function(t,e){return t.concat([e+"-"+Xt,e+"-"+Yt])}),[]),ee=[].concat(Qt,[Kt]).reduce((function(t,e){return t.concat([e,e+"-"+Xt,e+"-"+Yt])}),[]),ie="beforeRead",ne="read",se="afterRead",oe="beforeMain",re="main",ae="afterMain",le="beforeWrite",ce="write",he="afterWrite",de=[ie,ne,se,oe,re,ae,le,ce,he];function ue(t){return t?(t.nodeName||"").toLowerCase():null}function fe(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function pe(t){return t instanceof fe(t).Element||t instanceof Element}function me(t){return t instanceof fe(t).HTMLElement||t instanceof HTMLElement}function ge(t){return"undefined"!=typeof ShadowRoot&&(t instanceof fe(t).ShadowRoot||t instanceof ShadowRoot)}const _e={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];me(s)&&ue(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});me(n)&&ue(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function be(t){return t.split("-")[0]}var ve=Math.max,ye=Math.min,we=Math.round;function Ae(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function Ee(){return!/^((?!chrome|android).)*safari/i.test(Ae())}function Te(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&me(t)&&(s=t.offsetWidth>0&&we(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&we(n.height)/t.offsetHeight||1);var r=(pe(t)?fe(t):window).visualViewport,a=!Ee()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function Ce(t){var e=Te(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function Oe(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&ge(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function xe(t){return fe(t).getComputedStyle(t)}function ke(t){return["table","td","th"].indexOf(ue(t))>=0}function Le(t){return((pe(t)?t.ownerDocument:t.document)||window.document).documentElement}function Se(t){return"html"===ue(t)?t:t.assignedSlot||t.parentNode||(ge(t)?t.host:null)||Le(t)}function De(t){return me(t)&&"fixed"!==xe(t).position?t.offsetParent:null}function $e(t){for(var e=fe(t),i=De(t);i&&ke(i)&&"static"===xe(i).position;)i=De(i);return i&&("html"===ue(i)||"body"===ue(i)&&"static"===xe(i).position)?e:i||function(t){var e=/firefox/i.test(Ae());if(/Trident/i.test(Ae())&&me(t)&&"fixed"===xe(t).position)return null;var i=Se(t);for(ge(i)&&(i=i.host);me(i)&&["html","body"].indexOf(ue(i))<0;){var n=xe(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Ie(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function Ne(t,e,i){return ve(t,ye(e,i))}function Pe(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function Me(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const je={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,s=t.options,o=i.elements.arrow,r=i.modifiersData.popperOffsets,a=be(i.placement),l=Ie(a),c=[Vt,qt].indexOf(a)>=0?"height":"width";if(o&&r){var h=function(t,e){return Pe("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:Me(t,Qt))}(s.padding,i),d=Ce(o),u="y"===l?zt:Vt,f="y"===l?Rt:qt,p=i.rects.reference[c]+i.rects.reference[l]-r[l]-i.rects.popper[c],m=r[l]-i.rects.reference[l],g=$e(o),_=g?"y"===l?g.clientHeight||0:g.clientWidth||0:0,b=p/2-m/2,v=h[u],y=_-d[c]-h[f],w=_/2-d[c]/2+b,A=Ne(v,w,y),E=l;i.modifiersData[n]=((e={})[E]=A,e.centerOffset=A-w,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&Oe(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Fe(t){return t.split("-")[1]}var He={top:"auto",right:"auto",bottom:"auto",left:"auto"};function We(t){var e,i=t.popper,n=t.popperRect,s=t.placement,o=t.variation,r=t.offsets,a=t.position,l=t.gpuAcceleration,c=t.adaptive,h=t.roundOffsets,d=t.isFixed,u=r.x,f=void 0===u?0:u,p=r.y,m=void 0===p?0:p,g="function"==typeof h?h({x:f,y:m}):{x:f,y:m};f=g.x,m=g.y;var _=r.hasOwnProperty("x"),b=r.hasOwnProperty("y"),v=Vt,y=zt,w=window;if(c){var A=$e(i),E="clientHeight",T="clientWidth";A===fe(i)&&"static"!==xe(A=Le(i)).position&&"absolute"===a&&(E="scrollHeight",T="scrollWidth"),(s===zt||(s===Vt||s===qt)&&o===Yt)&&(y=Rt,m-=(d&&A===w&&w.visualViewport?w.visualViewport.height:A[E])-n.height,m*=l?1:-1),s!==Vt&&(s!==zt&&s!==Rt||o!==Yt)||(v=qt,f-=(d&&A===w&&w.visualViewport?w.visualViewport.width:A[T])-n.width,f*=l?1:-1)}var C,O=Object.assign({position:a},c&&He),x=!0===h?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:we(i*s)/s||0,y:we(n*s)/s||0}}({x:f,y:m},fe(i)):{x:f,y:m};return f=x.x,m=x.y,l?Object.assign({},O,((C={})[y]=b?"0":"",C[v]=_?"0":"",C.transform=(w.devicePixelRatio||1)<=1?"translate("+f+"px, "+m+"px)":"translate3d("+f+"px, "+m+"px, 0)",C)):Object.assign({},O,((e={})[y]=b?m+"px":"",e[v]=_?f+"px":"",e.transform="",e))}const Be={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:be(e.placement),variation:Fe(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,We(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,We(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var ze={passive:!0};const Re={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=fe(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,ze)})),a&&l.addEventListener("resize",i.update,ze),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,ze)})),a&&l.removeEventListener("resize",i.update,ze)}},data:{}};var qe={left:"right",right:"left",bottom:"top",top:"bottom"};function Ve(t){return t.replace(/left|right|bottom|top/g,(function(t){return qe[t]}))}var Ke={start:"end",end:"start"};function Qe(t){return t.replace(/start|end/g,(function(t){return Ke[t]}))}function Xe(t){var e=fe(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function Ye(t){return Te(Le(t)).left+Xe(t).scrollLeft}function Ue(t){var e=xe(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function Ge(t){return["html","body","#document"].indexOf(ue(t))>=0?t.ownerDocument.body:me(t)&&Ue(t)?t:Ge(Se(t))}function Je(t,e){var i;void 0===e&&(e=[]);var n=Ge(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=fe(n),r=s?[o].concat(o.visualViewport||[],Ue(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(Je(Se(r)))}function Ze(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function ti(t,e,i){return e===Gt?Ze(function(t,e){var i=fe(t),n=Le(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=Ee();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+Ye(t),y:l}}(t,i)):pe(e)?function(t,e){var i=Te(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):Ze(function(t){var e,i=Le(t),n=Xe(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=ve(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=ve(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+Ye(t),l=-n.scrollTop;return"rtl"===xe(s||i).direction&&(a+=ve(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(Le(t)))}function ei(t){var e,i=t.reference,n=t.element,s=t.placement,o=s?be(s):null,r=s?Fe(s):null,a=i.x+i.width/2-n.width/2,l=i.y+i.height/2-n.height/2;switch(o){case zt:e={x:a,y:i.y-n.height};break;case Rt:e={x:a,y:i.y+i.height};break;case qt:e={x:i.x+i.width,y:l};break;case Vt:e={x:i.x-n.width,y:l};break;default:e={x:i.x,y:i.y}}var c=o?Ie(o):null;if(null!=c){var h="y"===c?"height":"width";switch(r){case Xt:e[c]=e[c]-(i[h]/2-n[h]/2);break;case Yt:e[c]=e[c]+(i[h]/2-n[h]/2)}}return e}function ii(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=void 0===n?t.placement:n,o=i.strategy,r=void 0===o?t.strategy:o,a=i.boundary,l=void 0===a?Ut:a,c=i.rootBoundary,h=void 0===c?Gt:c,d=i.elementContext,u=void 0===d?Jt:d,f=i.altBoundary,p=void 0!==f&&f,m=i.padding,g=void 0===m?0:m,_=Pe("number"!=typeof g?g:Me(g,Qt)),b=u===Jt?Zt:Jt,v=t.rects.popper,y=t.elements[p?b:u],w=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=Je(Se(t)),i=["absolute","fixed"].indexOf(xe(t).position)>=0&&me(t)?$e(t):t;return pe(i)?e.filter((function(t){return pe(t)&&Oe(t,i)&&"body"!==ue(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=ti(t,i,n);return e.top=ve(s.top,e.top),e.right=ye(s.right,e.right),e.bottom=ye(s.bottom,e.bottom),e.left=ve(s.left,e.left),e}),ti(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(pe(y)?y:y.contextElement||Le(t.elements.popper),l,h,r),A=Te(t.elements.reference),E=ei({reference:A,element:v,strategy:"absolute",placement:s}),T=Ze(Object.assign({},v,E)),C=u===Jt?T:A,O={top:w.top-C.top+_.top,bottom:C.bottom-w.bottom+_.bottom,left:w.left-C.left+_.left,right:C.right-w.right+_.right},x=t.modifiersData.offset;if(u===Jt&&x){var k=x[s];Object.keys(O).forEach((function(t){var e=[qt,Rt].indexOf(t)>=0?1:-1,i=[zt,Rt].indexOf(t)>=0?"y":"x";O[t]+=k[i]*e}))}return O}function ni(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,c=void 0===l?ee:l,h=Fe(n),d=h?a?te:te.filter((function(t){return Fe(t)===h})):Qt,u=d.filter((function(t){return c.indexOf(t)>=0}));0===u.length&&(u=d);var f=u.reduce((function(e,i){return e[i]=ii(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[be(i)],e}),{});return Object.keys(f).sort((function(t,e){return f[t]-f[e]}))}const si={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0===r||r,l=i.fallbackPlacements,c=i.padding,h=i.boundary,d=i.rootBoundary,u=i.altBoundary,f=i.flipVariations,p=void 0===f||f,m=i.allowedAutoPlacements,g=e.options.placement,_=be(g),b=l||(_!==g&&p?function(t){if(be(t)===Kt)return[];var e=Ve(t);return[Qe(t),e,Qe(e)]}(g):[Ve(g)]),v=[g].concat(b).reduce((function(t,i){return t.concat(be(i)===Kt?ni(e,{placement:i,boundary:h,rootBoundary:d,padding:c,flipVariations:p,allowedAutoPlacements:m}):i)}),[]),y=e.rects.reference,w=e.rects.popper,A=new Map,E=!0,T=v[0],C=0;C=0,S=L?"width":"height",D=ii(e,{placement:O,boundary:h,rootBoundary:d,altBoundary:u,padding:c}),$=L?k?qt:Vt:k?Rt:zt;y[S]>w[S]&&($=Ve($));var I=Ve($),N=[];if(o&&N.push(D[x]<=0),a&&N.push(D[$]<=0,D[I]<=0),N.every((function(t){return t}))){T=O,E=!1;break}A.set(O,N)}if(E)for(var P=function(t){var e=v.find((function(e){var i=A.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return T=e,"break"},M=p?3:1;M>0&&"break"!==P(M);M--);e.placement!==T&&(e.modifiersData[n]._skip=!0,e.placement=T,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function oi(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function ri(t){return[zt,qt,Rt,Vt].some((function(e){return t[e]>=0}))}const ai={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=ii(e,{elementContext:"reference"}),a=ii(e,{altBoundary:!0}),l=oi(r,n),c=oi(a,s,o),h=ri(l),d=ri(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},li={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.offset,o=void 0===s?[0,0]:s,r=ee.reduce((function(t,i){return t[i]=function(t,e,i){var n=be(t),s=[Vt,zt].indexOf(n)>=0?-1:1,o="function"==typeof i?i(Object.assign({},e,{placement:t})):i,r=o[0],a=o[1];return r=r||0,a=(a||0)*s,[Vt,qt].indexOf(n)>=0?{x:a,y:r}:{x:r,y:a}}(i,e.rects,o),t}),{}),a=r[e.placement],l=a.x,c=a.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=l,e.modifiersData.popperOffsets.y+=c),e.modifiersData[n]=r}},ci={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=ei({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},hi={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,s=i.mainAxis,o=void 0===s||s,r=i.altAxis,a=void 0!==r&&r,l=i.boundary,c=i.rootBoundary,h=i.altBoundary,d=i.padding,u=i.tether,f=void 0===u||u,p=i.tetherOffset,m=void 0===p?0:p,g=ii(e,{boundary:l,rootBoundary:c,padding:d,altBoundary:h}),_=be(e.placement),b=Fe(e.placement),v=!b,y=Ie(_),w="x"===y?"y":"x",A=e.modifiersData.popperOffsets,E=e.rects.reference,T=e.rects.popper,C="function"==typeof m?m(Object.assign({},e.rects,{placement:e.placement})):m,O="number"==typeof C?{mainAxis:C,altAxis:C}:Object.assign({mainAxis:0,altAxis:0},C),x=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,k={x:0,y:0};if(A){if(o){var L,S="y"===y?zt:Vt,D="y"===y?Rt:qt,$="y"===y?"height":"width",I=A[y],N=I+g[S],P=I-g[D],M=f?-T[$]/2:0,j=b===Xt?E[$]:T[$],F=b===Xt?-T[$]:-E[$],H=e.elements.arrow,W=f&&H?Ce(H):{width:0,height:0},B=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},z=B[S],R=B[D],q=Ne(0,E[$],W[$]),V=v?E[$]/2-M-q-z-O.mainAxis:j-q-z-O.mainAxis,K=v?-E[$]/2+M+q+R+O.mainAxis:F+q+R+O.mainAxis,Q=e.elements.arrow&&$e(e.elements.arrow),X=Q?"y"===y?Q.clientTop||0:Q.clientLeft||0:0,Y=null!=(L=null==x?void 0:x[y])?L:0,U=I+K-Y,G=Ne(f?ye(N,I+V-Y-X):N,I,f?ve(P,U):P);A[y]=G,k[y]=G-I}if(a){var J,Z="x"===y?zt:Vt,tt="x"===y?Rt:qt,et=A[w],it="y"===w?"height":"width",nt=et+g[Z],st=et-g[tt],ot=-1!==[zt,Vt].indexOf(_),rt=null!=(J=null==x?void 0:x[w])?J:0,at=ot?nt:et-E[it]-T[it]-rt+O.altAxis,lt=ot?et+E[it]+T[it]-rt-O.altAxis:st,ct=f&&ot?function(t,e,i){var n=Ne(t,e,i);return n>i?i:n}(at,et,lt):Ne(f?at:nt,et,f?lt:st);A[w]=ct,k[w]=ct-et}e.modifiersData[n]=k}},requiresIfExists:["offset"]};function di(t,e,i){void 0===i&&(i=!1);var n,s,o=me(e),r=me(e)&&function(t){var e=t.getBoundingClientRect(),i=we(e.width)/t.offsetWidth||1,n=we(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=Le(e),l=Te(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==ue(e)||Ue(a))&&(c=(n=e)!==fe(n)&&me(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:Xe(n)),me(e)?((h=Te(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=Ye(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function ui(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var fi={placement:"bottom",modifiers:[],strategy:"absolute"};function pi(){for(var t=arguments.length,e=new Array(t),i=0;iNumber.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(F.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...g(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=z.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>a(t)));i.length&&b(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=qi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=z.find(Ni);for(const i of e){const e=qi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ei,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ii)?this:z.prev(this,Ii)[0]||z.next(this,Ii)[0]||z.findOne(Ii,t.delegateTarget.parentNode),o=qi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}N.on(document,Si,Ii,qi.dataApiKeydownHandler),N.on(document,Si,Pi,qi.dataApiKeydownHandler),N.on(document,Li,qi.clearMenus),N.on(document,Di,qi.clearMenus),N.on(document,Li,Ii,(function(t){t.preventDefault(),qi.getOrCreateInstance(this).toggle()})),m(qi);const Vi="backdrop",Ki="show",Qi=`mousedown.bs.${Vi}`,Xi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Yi={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Ui extends H{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Xi}static get DefaultType(){return Yi}static get NAME(){return Vi}show(t){if(!this._config.isVisible)return void g(t);this._append();const e=this._getElement();this._config.isAnimated&&d(e),e.classList.add(Ki),this._emulateAnimation((()=>{g(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Ki),this._emulateAnimation((()=>{this.dispose(),g(t)}))):g(t)}dispose(){this._isAppended&&(N.off(this._element,Qi),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=r(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),N.on(t,Qi,(()=>{g(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){_(t,this._getElement(),this._config.isAnimated)}}const Gi=".bs.focustrap",Ji=`focusin${Gi}`,Zi=`keydown.tab${Gi}`,tn="backward",en={autofocus:!0,trapElement:null},nn={autofocus:"boolean",trapElement:"element"};class sn extends H{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return en}static get DefaultType(){return nn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),N.off(document,Gi),N.on(document,Ji,(t=>this._handleFocusin(t))),N.on(document,Zi,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,N.off(document,Gi))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=z.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===tn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?tn:"forward")}}const on=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",rn=".sticky-top",an="padding-right",ln="margin-right";class cn{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,an,(e=>e+t)),this._setElementAttributes(on,an,(e=>e+t)),this._setElementAttributes(rn,ln,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,an),this._resetElementAttributes(on,an),this._resetElementAttributes(rn,ln)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&F.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=F.getDataAttribute(t,e);null!==i?(F.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(o(t))e(t);else for(const i of z.find(t,this._element))e(i)}}const hn=".bs.modal",dn=`hide${hn}`,un=`hidePrevented${hn}`,fn=`hidden${hn}`,pn=`show${hn}`,mn=`shown${hn}`,gn=`resize${hn}`,_n=`click.dismiss${hn}`,bn=`mousedown.dismiss${hn}`,vn=`keydown.dismiss${hn}`,yn=`click${hn}.data-api`,wn="modal-open",An="show",En="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},Cn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class On extends W{constructor(t,e){super(t,e),this._dialog=z.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new cn,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return Cn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||N.trigger(this._element,pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(wn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(N.trigger(this._element,dn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(An),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){N.off(window,hn),N.off(this._dialog,hn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Ui({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=z.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),d(this._element),this._element.classList.add(An),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,N.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){N.on(this._element,vn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),N.on(window,gn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),N.on(this._element,bn,(t=>{N.one(this._element,_n,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(wn),this._resetAdjustments(),this._scrollBar.reset(),N.trigger(this._element,fn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(N.trigger(this._element,un).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(En)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(En),this._queueCallback((()=>{this._element.classList.remove(En),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=p()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=p()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=On.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}N.on(document,yn,'[data-bs-toggle="modal"]',(function(t){const e=z.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),N.one(e,pn,(t=>{t.defaultPrevented||N.one(e,fn,(()=>{a(this)&&this.focus()}))}));const i=z.findOne(".modal.show");i&&On.getInstance(i).hide(),On.getOrCreateInstance(e).toggle(this)})),R(On),m(On);const xn=".bs.offcanvas",kn=".data-api",Ln=`load${xn}${kn}`,Sn="show",Dn="showing",$n="hiding",In=".offcanvas.show",Nn=`show${xn}`,Pn=`shown${xn}`,Mn=`hide${xn}`,jn=`hidePrevented${xn}`,Fn=`hidden${xn}`,Hn=`resize${xn}`,Wn=`click${xn}${kn}`,Bn=`keydown.dismiss${xn}`,zn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class qn extends W{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return zn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||N.trigger(this._element,Nn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new cn).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Dn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(Sn),this._element.classList.remove(Dn),N.trigger(this._element,Pn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(N.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add($n),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(Sn,$n),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new cn).reset(),N.trigger(this._element,Fn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Ui({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():N.trigger(this._element,jn)}:null})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_addEventListeners(){N.on(this._element,Bn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():N.trigger(this._element,jn))}))}static jQueryInterface(t){return this.each((function(){const e=qn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}N.on(document,Wn,'[data-bs-toggle="offcanvas"]',(function(t){const e=z.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this))return;N.one(e,Fn,(()=>{a(this)&&this.focus()}));const i=z.findOne(In);i&&i!==e&&qn.getInstance(i).hide(),qn.getOrCreateInstance(e).toggle(this)})),N.on(window,Ln,(()=>{for(const t of z.find(In))qn.getOrCreateInstance(t).show()})),N.on(window,Hn,(()=>{for(const t of z.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&qn.getOrCreateInstance(t).hide()})),R(qn),m(qn);const Vn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Kn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Qn=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Xn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Kn.has(i)||Boolean(Qn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Yn={allowList:Vn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Un={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Gn={entry:"(string|element|function|null)",selector:"(string|element)"};class Jn extends H{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Yn}static get DefaultType(){return Un}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Gn)}_setContent(t,e,i){const n=z.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?o(e)?this._putElementInTemplate(r(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Xn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return g(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const Zn=new Set(["sanitize","allowList","sanitizeFn"]),ts="fade",es="show",is=".modal",ns="hide.bs.modal",ss="hover",os="focus",rs={AUTO:"auto",TOP:"top",RIGHT:p()?"left":"right",BOTTOM:"bottom",LEFT:p()?"right":"left"},as={allowList:Vn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},ls={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class cs extends W{constructor(t,e){if(void 0===vi)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,e),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return as}static get DefaultType(){return ls}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),N.off(this._element.closest(is),ns,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=N.trigger(this._element,this.constructor.eventName("show")),e=(c(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),N.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))N.on(t,"mouseover",h);this._queueCallback((()=>{N.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!N.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))N.off(t,"mouseover",h);this._activeTrigger.click=!1,this._activeTrigger[os]=!1,this._activeTrigger[ss]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),N.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ts,es),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ts),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Jn({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ts)}_isShown(){return this.tip&&this.tip.classList.contains(es)}_createPopper(t){const e=g(this._config.placement,[this,t,this._element]),i=rs[e.toUpperCase()];return bi(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return g(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...g(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)N.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ss?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ss?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");N.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?os:ss]=!0,e._enter()})),N.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?os:ss]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},N.on(this._element.closest(is),ns,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=F.getDataAttributes(this._element);for(const t of Object.keys(e))Zn.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:r(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=cs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(cs);const hs={...cs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},ds={...cs.DefaultType,content:"(null|string|element|function)"};class us extends cs{static get Default(){return hs}static get DefaultType(){return ds}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=us.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}m(us);const fs=".bs.scrollspy",ps=`activate${fs}`,ms=`click${fs}`,gs=`load${fs}.data-api`,_s="active",bs="[href]",vs=".nav-link",ys=`${vs}, .nav-item > ${vs}, .list-group-item`,ws={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},As={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Es extends W{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return ws}static get DefaultType(){return As}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=r(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(N.off(this._config.target,ms),N.on(this._config.target,ms,bs,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=z.find(bs,this._config.target);for(const e of t){if(!e.hash||l(e))continue;const t=z.findOne(decodeURI(e.hash),this._element);a(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(_s),this._activateParents(t),N.trigger(this._element,ps,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))z.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(_s);else for(const e of z.parents(t,".nav, .list-group"))for(const t of z.prev(e,ys))t.classList.add(_s)}_clearActiveClass(t){t.classList.remove(_s);const e=z.find(`${bs}.${_s}`,t);for(const t of e)t.classList.remove(_s)}static jQueryInterface(t){return this.each((function(){const e=Es.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}N.on(window,gs,(()=>{for(const t of z.find('[data-bs-spy="scroll"]'))Es.getOrCreateInstance(t)})),m(Es);const Ts=".bs.tab",Cs=`hide${Ts}`,Os=`hidden${Ts}`,xs=`show${Ts}`,ks=`shown${Ts}`,Ls=`click${Ts}`,Ss=`keydown${Ts}`,Ds=`load${Ts}`,$s="ArrowLeft",Is="ArrowRight",Ns="ArrowUp",Ps="ArrowDown",Ms="Home",js="End",Fs="active",Hs="fade",Ws="show",Bs=".dropdown-toggle",zs=`:not(${Bs})`,Rs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',qs=`.nav-link${zs}, .list-group-item${zs}, [role="tab"]${zs}, ${Rs}`,Vs=`.${Fs}[data-bs-toggle="tab"], .${Fs}[data-bs-toggle="pill"], .${Fs}[data-bs-toggle="list"]`;class Ks extends W{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),N.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?N.trigger(e,Cs,{relatedTarget:t}):null;N.trigger(t,xs,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Fs),this._activate(z.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),N.trigger(t,ks,{relatedTarget:e})):t.classList.add(Ws)}),t,t.classList.contains(Hs)))}_deactivate(t,e){t&&(t.classList.remove(Fs),t.blur(),this._deactivate(z.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),N.trigger(t,Os,{relatedTarget:e})):t.classList.remove(Ws)}),t,t.classList.contains(Hs)))}_keydown(t){if(![$s,Is,Ns,Ps,Ms,js].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!l(t)));let i;if([Ms,js].includes(t.key))i=e[t.key===Ms?0:e.length-1];else{const n=[Is,Ps].includes(t.key);i=b(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Ks.getOrCreateInstance(i).show())}_getChildren(){return z.find(qs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=z.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=z.findOne(t,i);s&&s.classList.toggle(n,e)};n(Bs,Fs),n(".dropdown-menu",Ws),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Fs)}_getInnerElement(t){return t.matches(qs)?t:z.findOne(qs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Ks.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}N.on(document,Ls,Rs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),l(this)||Ks.getOrCreateInstance(this).show()})),N.on(window,Ds,(()=>{for(const t of z.find(Vs))Ks.getOrCreateInstance(t)})),m(Ks);const Qs=".bs.toast",Xs=`mouseover${Qs}`,Ys=`mouseout${Qs}`,Us=`focusin${Qs}`,Gs=`focusout${Qs}`,Js=`hide${Qs}`,Zs=`hidden${Qs}`,to=`show${Qs}`,eo=`shown${Qs}`,io="hide",no="show",so="showing",oo={animation:"boolean",autohide:"boolean",delay:"number"},ro={animation:!0,autohide:!0,delay:5e3};class ao extends W{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return ro}static get DefaultType(){return oo}static get NAME(){return"toast"}show(){N.trigger(this._element,to).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(io),d(this._element),this._element.classList.add(no,so),this._queueCallback((()=>{this._element.classList.remove(so),N.trigger(this._element,eo),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(N.trigger(this._element,Js).defaultPrevented||(this._element.classList.add(so),this._queueCallback((()=>{this._element.classList.add(io),this._element.classList.remove(so,no),N.trigger(this._element,Zs)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(no),super.dispose()}isShown(){return this._element.classList.contains(no)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){N.on(this._element,Xs,(t=>this._onInteraction(t,!0))),N.on(this._element,Ys,(t=>this._onInteraction(t,!1))),N.on(this._element,Us,(t=>this._onInteraction(t,!0))),N.on(this._element,Gs,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ao.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}return R(ao),m(ao),{Alert:Q,Button:Y,Carousel:xt,Collapse:Bt,Dropdown:qi,Modal:On,Offcanvas:qn,Popover:us,ScrollSpy:Es,Tab:Ks,Toast:ao,Tooltip:cs}})); -//# sourceMappingURL=bootstrap.bundle.min.js.map \ No newline at end of file diff --git a/weed/admin/static/js/htmx.min.js b/weed/admin/static/js/htmx.min.js deleted file mode 100644 index 4091536a6..000000000 --- a/weed/admin/static/js/htmx.min.js +++ /dev/null @@ -1 +0,0 @@ -(function(e,t){if(typeof define==="function"&&define.amd){define([],t)}else if(typeof module==="object"&&module.exports){module.exports=t()}else{e.htmx=e.htmx||t()}})(typeof self!=="undefined"?self:this,function(){return function(){"use strict";var Y={onLoad:t,process:Dt,on:Z,off:K,trigger:fe,ajax:Cr,find:E,findAll:f,closest:d,values:function(e,t){var r=or(e,t||"post");return r.values},remove:B,addClass:F,removeClass:n,toggleClass:V,takeClass:j,defineExtension:Ar,removeExtension:Nr,logAll:X,logNone:U,logger:null,config:{historyEnabled:true,historyCacheSize:10,refreshOnHistoryMiss:false,defaultSwapStyle:"innerHTML",defaultSwapDelay:0,defaultSettleDelay:20,includeIndicatorStyles:true,indicatorClass:"htmx-indicator",requestClass:"htmx-request",addedClass:"htmx-added",settlingClass:"htmx-settling",swappingClass:"htmx-swapping",allowEval:true,allowScriptTags:true,inlineScriptNonce:"",attributesToSettle:["class","style","width","height"],withCredentials:false,timeout:0,wsReconnectDelay:"full-jitter",wsBinaryType:"blob",disableSelector:"[hx-disable], [data-hx-disable]",useTemplateFragments:false,scrollBehavior:"smooth",defaultFocusScroll:false,getCacheBusterParam:false,globalViewTransitions:false,methodsThatUseUrlParams:["get"],selfRequestsOnly:false,scrollIntoViewOnBoost:true},parseInterval:v,_:e,createEventSource:function(e){return new EventSource(e,{withCredentials:true})},createWebSocket:function(e){var t=new WebSocket(e,[]);t.binaryType=Y.config.wsBinaryType;return t},version:"1.9.8"};var r={addTriggerHandler:St,bodyContains:oe,canAccessLocalStorage:M,findThisElement:ve,filterValues:cr,hasAttribute:o,getAttributeValue:ee,getClosestAttributeValue:re,getClosestMatch:c,getExpressionVars:wr,getHeaders:fr,getInputValues:or,getInternalData:ie,getSwapSpecification:dr,getTriggerSpecs:Ze,getTarget:ge,makeFragment:l,mergeObjects:se,makeSettleInfo:T,oobSwap:ye,querySelectorExt:le,selectAndSwap:Ue,settleImmediately:Jt,shouldCancel:tt,triggerEvent:fe,triggerErrorEvent:ue,withExtensions:C};var b=["get","post","put","delete","patch"];var w=b.map(function(e){return"[hx-"+e+"], [data-hx-"+e+"]"}).join(", ");function v(e){if(e==undefined){return undefined}if(e.slice(-2)=="ms"){return parseFloat(e.slice(0,-2))||undefined}if(e.slice(-1)=="s"){return parseFloat(e.slice(0,-1))*1e3||undefined}if(e.slice(-1)=="m"){return parseFloat(e.slice(0,-1))*1e3*60||undefined}return parseFloat(e)||undefined}function Q(e,t){return e.getAttribute&&e.getAttribute(t)}function o(e,t){return e.hasAttribute&&(e.hasAttribute(t)||e.hasAttribute("data-"+t))}function ee(e,t){return Q(e,t)||Q(e,"data-"+t)}function u(e){return e.parentElement}function te(){return document}function c(e,t){while(e&&!t(e)){e=u(e)}return e?e:null}function R(e,t,r){var n=ee(t,r);var i=ee(t,"hx-disinherit");if(e!==t&&i&&(i==="*"||i.split(" ").indexOf(r)>=0)){return"unset"}else{return n}}function re(t,r){var n=null;c(t,function(e){return n=R(t,e,r)});if(n!=="unset"){return n}}function h(e,t){var r=e.matches||e.matchesSelector||e.msMatchesSelector||e.mozMatchesSelector||e.webkitMatchesSelector||e.oMatchesSelector;return r&&r.call(e,t)}function q(e){var t=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i;var r=t.exec(e);if(r){return r[1].toLowerCase()}else{return""}}function i(e,t){var r=new DOMParser;var n=r.parseFromString(e,"text/html");var i=n.body;while(t>0){t--;i=i.firstChild}if(i==null){i=te().createDocumentFragment()}return i}function H(e){return e.match(/",0);return r.querySelector("template").content}else{var n=q(e);switch(n){case"thead":case"tbody":case"tfoot":case"colgroup":case"caption":return i(""+e+"
",1);case"col":return i(""+e+"
",2);case"tr":return i(""+e+"
",2);case"td":case"th":return i(""+e+"
",3);case"script":case"style":return i("
"+e+"
",1);default:return i(e,0)}}}function ne(e){if(e){e()}}function L(e,t){return Object.prototype.toString.call(e)==="[object "+t+"]"}function A(e){return L(e,"Function")}function N(e){return L(e,"Object")}function ie(e){var t="htmx-internal-data";var r=e[t];if(!r){r=e[t]={}}return r}function I(e){var t=[];if(e){for(var r=0;r=0}function oe(e){if(e.getRootNode&&e.getRootNode()instanceof window.ShadowRoot){return te().body.contains(e.getRootNode().host)}else{return te().body.contains(e)}}function P(e){return e.trim().split(/\s+/)}function se(e,t){for(var r in t){if(t.hasOwnProperty(r)){e[r]=t[r]}}return e}function S(e){try{return JSON.parse(e)}catch(e){y(e);return null}}function M(){var e="htmx:localStorageTest";try{localStorage.setItem(e,e);localStorage.removeItem(e);return true}catch(e){return false}}function D(t){try{var e=new URL(t);if(e){t=e.pathname+e.search}if(!t.match("^/$")){t=t.replace(/\/+$/,"")}return t}catch(e){return t}}function e(e){return xr(te().body,function(){return eval(e)})}function t(t){var e=Y.on("htmx:load",function(e){t(e.detail.elt)});return e}function X(){Y.logger=function(e,t,r){if(console){console.log(t,e,r)}}}function U(){Y.logger=null}function E(e,t){if(t){return e.querySelector(t)}else{return E(te(),e)}}function f(e,t){if(t){return e.querySelectorAll(t)}else{return f(te(),e)}}function B(e,t){e=s(e);if(t){setTimeout(function(){B(e);e=null},t)}else{e.parentElement.removeChild(e)}}function F(e,t,r){e=s(e);if(r){setTimeout(function(){F(e,t);e=null},r)}else{e.classList&&e.classList.add(t)}}function n(e,t,r){e=s(e);if(r){setTimeout(function(){n(e,t);e=null},r)}else{if(e.classList){e.classList.remove(t);if(e.classList.length===0){e.removeAttribute("class")}}}}function V(e,t){e=s(e);e.classList.toggle(t)}function j(e,t){e=s(e);ae(e.parentElement.children,function(e){n(e,t)});F(e,t)}function d(e,t){e=s(e);if(e.closest){return e.closest(t)}else{do{if(e==null||h(e,t)){return e}}while(e=e&&u(e));return null}}function g(e,t){return e.substring(0,t.length)===t}function _(e,t){return e.substring(e.length-t.length)===t}function z(e){var t=e.trim();if(g(t,"<")&&_(t,"/>")){return t.substring(1,t.length-2)}else{return t}}function W(e,t){if(t.indexOf("closest ")===0){return[d(e,z(t.substr(8)))]}else if(t.indexOf("find ")===0){return[E(e,z(t.substr(5)))]}else if(t==="next"){return[e.nextElementSibling]}else if(t.indexOf("next ")===0){return[$(e,z(t.substr(5)))]}else if(t==="previous"){return[e.previousElementSibling]}else if(t.indexOf("previous ")===0){return[G(e,z(t.substr(9)))]}else if(t==="document"){return[document]}else if(t==="window"){return[window]}else if(t==="body"){return[document.body]}else{return te().querySelectorAll(z(t))}}var $=function(e,t){var r=te().querySelectorAll(t);for(var n=0;n=0;n--){var i=r[n];if(i.compareDocumentPosition(e)===Node.DOCUMENT_POSITION_FOLLOWING){return i}}};function le(e,t){if(t){return W(e,t)[0]}else{return W(te().body,e)[0]}}function s(e){if(L(e,"String")){return E(e)}else{return e}}function J(e,t,r){if(A(t)){return{target:te().body,event:e,listener:t}}else{return{target:s(e),event:t,listener:r}}}function Z(t,r,n){Pr(function(){var e=J(t,r,n);e.target.addEventListener(e.event,e.listener)});var e=A(r);return e?r:n}function K(t,r,n){Pr(function(){var e=J(t,r,n);e.target.removeEventListener(e.event,e.listener)});return A(r)?r:n}var he=te().createElement("output");function de(e,t){var r=re(e,t);if(r){if(r==="this"){return[ve(e,t)]}else{var n=W(e,r);if(n.length===0){y('The selector "'+r+'" on '+t+" returned no matches!");return[he]}else{return n}}}}function ve(e,t){return c(e,function(e){return ee(e,t)!=null})}function ge(e){var t=re(e,"hx-target");if(t){if(t==="this"){return ve(e,"hx-target")}else{return le(e,t)}}else{var r=ie(e);if(r.boosted){return te().body}else{return e}}}function me(e){var t=Y.config.attributesToSettle;for(var r=0;r0){o=e.substr(0,e.indexOf(":"));t=e.substr(e.indexOf(":")+1,e.length)}else{o=e}var r=te().querySelectorAll(t);if(r){ae(r,function(e){var t;var r=i.cloneNode(true);t=te().createDocumentFragment();t.appendChild(r);if(!xe(o,e)){t=r}var n={shouldSwap:true,target:e,fragment:t};if(!fe(e,"htmx:oobBeforeSwap",n))return;e=n.target;if(n["shouldSwap"]){De(o,e,e,t,a)}ae(a.elts,function(e){fe(e,"htmx:oobAfterSwap",n)})});i.parentNode.removeChild(i)}else{i.parentNode.removeChild(i);ue(te().body,"htmx:oobErrorNoTarget",{content:i})}return e}function be(e,t,r){var n=re(e,"hx-select-oob");if(n){var i=n.split(",");for(let e=0;e0){var r=t.replace("'","\\'");var n=e.tagName.replace(":","\\:");var i=o.querySelector(n+"[id='"+r+"']");if(i&&i!==o){var a=e.cloneNode();pe(e,i);s.tasks.push(function(){pe(e,a)})}}})}function Ee(e){return function(){n(e,Y.config.addedClass);Dt(e);Ct(e);Ce(e);fe(e,"htmx:load")}}function Ce(e){var t="[autofocus]";var r=h(e,t)?e:e.querySelector(t);if(r!=null){r.focus()}}function a(e,t,r,n){Se(e,r,n);while(r.childNodes.length>0){var i=r.firstChild;F(i,Y.config.addedClass);e.insertBefore(i,t);if(i.nodeType!==Node.TEXT_NODE&&i.nodeType!==Node.COMMENT_NODE){n.tasks.push(Ee(i))}}}function Te(e,t){var r=0;while(r-1){var t=e.replace(/]*>|>)([\s\S]*?)<\/svg>/gim,"");var r=t.match(/]*>|>)([\s\S]*?)<\/title>/im);if(r){return r[2]}}}function Ue(e,t,r,n,i,a){i.title=Xe(n);var o=l(n);if(o){be(r,o,i);o=Me(r,o,a);we(o);return De(e,r,t,o,i)}}function Be(e,t,r){var n=e.getResponseHeader(t);if(n.indexOf("{")===0){var i=S(n);for(var a in i){if(i.hasOwnProperty(a)){var o=i[a];if(!N(o)){o={value:o}}fe(r,a,o)}}}else{var s=n.split(",");for(var l=0;l0){var o=t[0];if(o==="]"){n--;if(n===0){if(a===null){i=i+"true"}t.shift();i+=")})";try{var s=xr(e,function(){return Function(i)()},function(){return true});s.source=i;return s}catch(e){ue(te().body,"htmx:syntax:error",{error:e,source:i});return null}}}else if(o==="["){n++}if($e(o,a,r)){i+="(("+r+"."+o+") ? ("+r+"."+o+") : (window."+o+"))"}else{i=i+o}a=t.shift()}}}function x(e,t){var r="";while(e.length>0&&!e[0].match(t)){r+=e.shift()}return r}var Je="input, textarea, select";function Ze(e){var t=ee(e,"hx-trigger");var r=[];if(t){var n=We(t);do{x(n,ze);var i=n.length;var a=x(n,/[,\[\s]/);if(a!==""){if(a==="every"){var o={trigger:"every"};x(n,ze);o.pollInterval=v(x(n,/[,\[\s]/));x(n,ze);var s=Ge(e,n,"event");if(s){o.eventFilter=s}r.push(o)}else if(a.indexOf("sse:")===0){r.push({trigger:"sse",sseEvent:a.substr(4)})}else{var l={trigger:a};var s=Ge(e,n,"event");if(s){l.eventFilter=s}while(n.length>0&&n[0]!==","){x(n,ze);var u=n.shift();if(u==="changed"){l.changed=true}else if(u==="once"){l.once=true}else if(u==="consume"){l.consume=true}else if(u==="delay"&&n[0]===":"){n.shift();l.delay=v(x(n,p))}else if(u==="from"&&n[0]===":"){n.shift();var f=x(n,p);if(f==="closest"||f==="find"||f==="next"||f==="previous"){n.shift();var c=x(n,p);if(c.length>0){f+=" "+c}}l.from=f}else if(u==="target"&&n[0]===":"){n.shift();l.target=x(n,p)}else if(u==="throttle"&&n[0]===":"){n.shift();l.throttle=v(x(n,p))}else if(u==="queue"&&n[0]===":"){n.shift();l.queue=x(n,p)}else if((u==="root"||u==="threshold")&&n[0]===":"){n.shift();l[u]=x(n,p)}else{ue(e,"htmx:syntax:error",{token:n.shift()})}}r.push(l)}}if(n.length===i){ue(e,"htmx:syntax:error",{token:n.shift()})}x(n,ze)}while(n[0]===","&&n.shift())}if(r.length>0){return r}else if(h(e,"form")){return[{trigger:"submit"}]}else if(h(e,'input[type="button"], input[type="submit"]')){return[{trigger:"click"}]}else if(h(e,Je)){return[{trigger:"change"}]}else{return[{trigger:"click"}]}}function Ke(e){ie(e).cancelled=true}function Ye(e,t,r){var n=ie(e);n.timeout=setTimeout(function(){if(oe(e)&&n.cancelled!==true){if(!nt(r,e,Ut("hx:poll:trigger",{triggerSpec:r,target:e}))){t(e)}Ye(e,t,r)}},r.pollInterval)}function Qe(e){return location.hostname===e.hostname&&Q(e,"href")&&Q(e,"href").indexOf("#")!==0}function et(t,r,e){if(t.tagName==="A"&&Qe(t)&&(t.target===""||t.target==="_self")||t.tagName==="FORM"){r.boosted=true;var n,i;if(t.tagName==="A"){n="get";i=Q(t,"href")}else{var a=Q(t,"method");n=a?a.toLowerCase():"get";if(n==="get"){}i=Q(t,"action")}e.forEach(function(e){it(t,function(e,t){if(d(e,Y.config.disableSelector)){m(e);return}ce(n,i,e,t)},r,e,true)})}}function tt(e,t){if(e.type==="submit"||e.type==="click"){if(t.tagName==="FORM"){return true}if(h(t,'input[type="submit"], button')&&d(t,"form")!==null){return true}if(t.tagName==="A"&&t.href&&(t.getAttribute("href")==="#"||t.getAttribute("href").indexOf("#")!==0)){return true}}return false}function rt(e,t){return ie(e).boosted&&e.tagName==="A"&&t.type==="click"&&(t.ctrlKey||t.metaKey)}function nt(e,t,r){var n=e.eventFilter;if(n){try{return n.call(t,r)!==true}catch(e){ue(te().body,"htmx:eventFilter:error",{error:e,source:n.source});return true}}return false}function it(a,o,e,s,l){var u=ie(a);var t;if(s.from){t=W(a,s.from)}else{t=[a]}if(s.changed){t.forEach(function(e){var t=ie(e);t.lastValue=e.value})}ae(t,function(n){var i=function(e){if(!oe(a)){n.removeEventListener(s.trigger,i);return}if(rt(a,e)){return}if(l||tt(e,a)){e.preventDefault()}if(nt(s,a,e)){return}var t=ie(e);t.triggerSpec=s;if(t.handledFor==null){t.handledFor=[]}if(t.handledFor.indexOf(a)<0){t.handledFor.push(a);if(s.consume){e.stopPropagation()}if(s.target&&e.target){if(!h(e.target,s.target)){return}}if(s.once){if(u.triggeredOnce){return}else{u.triggeredOnce=true}}if(s.changed){var r=ie(n);if(r.lastValue===n.value){return}r.lastValue=n.value}if(u.delayed){clearTimeout(u.delayed)}if(u.throttle){return}if(s.throttle){if(!u.throttle){o(a,e);u.throttle=setTimeout(function(){u.throttle=null},s.throttle)}}else if(s.delay){u.delayed=setTimeout(function(){o(a,e)},s.delay)}else{fe(a,"htmx:trigger");o(a,e)}}};if(e.listenerInfos==null){e.listenerInfos=[]}e.listenerInfos.push({trigger:s.trigger,listener:i,on:n});n.addEventListener(s.trigger,i)})}var at=false;var ot=null;function st(){if(!ot){ot=function(){at=true};window.addEventListener("scroll",ot);setInterval(function(){if(at){at=false;ae(te().querySelectorAll("[hx-trigger='revealed'],[data-hx-trigger='revealed']"),function(e){lt(e)})}},200)}}function lt(t){if(!o(t,"data-hx-revealed")&&k(t)){t.setAttribute("data-hx-revealed","true");var e=ie(t);if(e.initHash){fe(t,"revealed")}else{t.addEventListener("htmx:afterProcessNode",function(e){fe(t,"revealed")},{once:true})}}}function ut(e,t,r){var n=P(r);for(var i=0;i=0){var t=dt(n);setTimeout(function(){ft(s,r,n+1)},t)}};t.onopen=function(e){n=0};ie(s).webSocket=t;t.addEventListener("message",function(e){if(ct(s)){return}var t=e.data;C(s,function(e){t=e.transformResponse(t,null,s)});var r=T(s);var n=l(t);var i=I(n.children);for(var a=0;a0){fe(u,"htmx:validation:halted",i);return}t.send(JSON.stringify(l));if(tt(e,u)){e.preventDefault()}})}else{ue(u,"htmx:noWebSocketSourceError")}}function dt(e){var t=Y.config.wsReconnectDelay;if(typeof t==="function"){return t(e)}if(t==="full-jitter"){var r=Math.min(e,6);var n=1e3*Math.pow(2,r);return n*Math.random()}y('htmx.config.wsReconnectDelay must either be a function or the string "full-jitter"')}function vt(e,t,r){var n=P(r);for(var i=0;i0){var o=n.shift();var s=o.match(/^\s*([a-zA-Z:\-\.]+:)(.*)/);if(a===0&&s){o.split(":");i=s[1].slice(0,-1);r[i]=s[2]}else{r[i]+=o}a+=Nt(o)}for(var l in r){It(e,l,r[l])}}}function Pt(t){Re(t);for(var e=0;eY.config.historyCacheSize){i.shift()}while(i.length>0){try{localStorage.setItem("htmx-history-cache",JSON.stringify(i));break}catch(e){ue(te().body,"htmx:historyCacheError",{cause:e,cache:i});i.shift()}}}function _t(e){if(!M()){return null}e=D(e);var t=S(localStorage.getItem("htmx-history-cache"))||[];for(var r=0;r=200&&this.status<400){fe(te().body,"htmx:historyCacheMissLoad",o);var e=l(this.response);e=e.querySelector("[hx-history-elt],[data-hx-history-elt]")||e;var t=Vt();var r=T(t);var n=Xe(this.response);if(n){var i=E("title");if(i){i.innerHTML=n}else{window.document.title=n}}Pe(t,e,r);Jt(r.tasks);Ft=a;fe(te().body,"htmx:historyRestore",{path:a,cacheMiss:true,serverResponse:this.response})}else{ue(te().body,"htmx:historyCacheMissLoadError",o)}};e.send()}function Kt(e){Wt();e=e||location.pathname+location.search;var t=_t(e);if(t){var r=l(t.content);var n=Vt();var i=T(n);Pe(n,r,i);Jt(i.tasks);document.title=t.title;setTimeout(function(){window.scrollTo(0,t.scroll)},0);Ft=e;fe(te().body,"htmx:historyRestore",{path:e,item:t})}else{if(Y.config.refreshOnHistoryMiss){window.location.reload(true)}else{Zt(e)}}}function Yt(e){var t=de(e,"hx-indicator");if(t==null){t=[e]}ae(t,function(e){var t=ie(e);t.requestCount=(t.requestCount||0)+1;e.classList["add"].call(e.classList,Y.config.requestClass)});return t}function Qt(e){var t=de(e,"hx-disabled-elt");if(t==null){t=[]}ae(t,function(e){var t=ie(e);t.requestCount=(t.requestCount||0)+1;e.setAttribute("disabled","")});return t}function er(e,t){ae(e,function(e){var t=ie(e);t.requestCount=(t.requestCount||0)-1;if(t.requestCount===0){e.classList["remove"].call(e.classList,Y.config.requestClass)}});ae(t,function(e){var t=ie(e);t.requestCount=(t.requestCount||0)-1;if(t.requestCount===0){e.removeAttribute("disabled")}})}function tr(e,t){for(var r=0;r=0}function dr(e,t){var r=t?t:re(e,"hx-swap");var n={swapStyle:ie(e).boosted?"innerHTML":Y.config.defaultSwapStyle,swapDelay:Y.config.defaultSwapDelay,settleDelay:Y.config.defaultSettleDelay};if(Y.config.scrollIntoViewOnBoost&&ie(e).boosted&&!hr(e)){n["show"]="top"}if(r){var i=P(r);if(i.length>0){for(var a=0;a0?l.join(":"):null;n["scroll"]=u;n["scrollTarget"]=f}else if(o.indexOf("show:")===0){var c=o.substr(5);var l=c.split(":");var h=l.pop();var f=l.length>0?l.join(":"):null;n["show"]=h;n["showTarget"]=f}else if(o.indexOf("focus-scroll:")===0){var d=o.substr("focus-scroll:".length);n["focusScroll"]=d=="true"}else if(a==0){n["swapStyle"]=o}else{y("Unknown modifier in hx-swap: "+o)}}}}return n}function vr(e){return re(e,"hx-encoding")==="multipart/form-data"||h(e,"form")&&Q(e,"enctype")==="multipart/form-data"}function gr(t,r,n){var i=null;C(r,function(e){if(i==null){i=e.encodeParameters(t,n,r)}});if(i!=null){return i}else{if(vr(r)){return ur(n)}else{return lr(n)}}}function T(e){return{tasks:[],elts:[e]}}function mr(e,t){var r=e[0];var n=e[e.length-1];if(t.scroll){var i=null;if(t.scrollTarget){i=le(r,t.scrollTarget)}if(t.scroll==="top"&&(r||i)){i=i||r;i.scrollTop=0}if(t.scroll==="bottom"&&(n||i)){i=i||n;i.scrollTop=i.scrollHeight}}if(t.show){var i=null;if(t.showTarget){var a=t.showTarget;if(t.showTarget==="window"){a="body"}i=le(r,a)}if(t.show==="top"&&(r||i)){i=i||r;i.scrollIntoView({block:"start",behavior:Y.config.scrollBehavior})}if(t.show==="bottom"&&(n||i)){i=i||n;i.scrollIntoView({block:"end",behavior:Y.config.scrollBehavior})}}}function pr(e,t,r,n){if(n==null){n={}}if(e==null){return n}var i=ee(e,t);if(i){var a=i.trim();var o=r;if(a==="unset"){return null}if(a.indexOf("javascript:")===0){a=a.substr(11);o=true}else if(a.indexOf("js:")===0){a=a.substr(3);o=true}if(a.indexOf("{")!==0){a="{"+a+"}"}var s;if(o){s=xr(e,function(){return Function("return ("+a+")")()},{})}else{s=S(a)}for(var l in s){if(s.hasOwnProperty(l)){if(n[l]==null){n[l]=s[l]}}}}return pr(u(e),t,r,n)}function xr(e,t,r){if(Y.config.allowEval){return t()}else{ue(e,"htmx:evalDisallowedError");return r}}function yr(e,t){return pr(e,"hx-vars",true,t)}function br(e,t){return pr(e,"hx-vals",false,t)}function wr(e){return se(yr(e),br(e))}function Sr(t,r,n){if(n!==null){try{t.setRequestHeader(r,n)}catch(e){t.setRequestHeader(r,encodeURIComponent(n));t.setRequestHeader(r+"-URI-AutoEncoded","true")}}}function Er(t){if(t.responseURL&&typeof URL!=="undefined"){try{var e=new URL(t.responseURL);return e.pathname+e.search}catch(e){ue(te().body,"htmx:badResponseUrl",{url:t.responseURL})}}}function O(e,t){return e.getAllResponseHeaders().match(t)}function Cr(e,t,r){e=e.toLowerCase();if(r){if(r instanceof Element||L(r,"String")){return ce(e,t,null,null,{targetOverride:s(r),returnPromise:true})}else{return ce(e,t,s(r.source),r.event,{handler:r.handler,headers:r.headers,values:r.values,targetOverride:s(r.target),swapOverride:r.swap,returnPromise:true})}}else{return ce(e,t,null,null,{returnPromise:true})}}function Tr(e){var t=[];while(e){t.push(e);e=e.parentElement}return t}function Or(e,t,r){var n;var i;if(typeof URL==="function"){i=new URL(t,document.location.href);var a=document.location.origin;n=a===i.origin}else{i=t;n=g(t,document.location.origin)}if(Y.config.selfRequestsOnly){if(!n){return false}}return fe(e,"htmx:validateUrl",se({url:i,sameHost:n},r))}function ce(t,r,n,i,a,e){var o=null;var s=null;a=a!=null?a:{};if(a.returnPromise&&typeof Promise!=="undefined"){var l=new Promise(function(e,t){o=e;s=t})}if(n==null){n=te().body}var M=a.handler||qr;if(!oe(n)){ne(o);return l}var u=a.targetOverride||ge(n);if(u==null||u==he){ue(n,"htmx:targetError",{target:ee(n,"hx-target")});ne(s);return l}var f=ie(n);var c=f.lastButtonClicked;if(c){var h=Q(c,"formaction");if(h!=null){r=h}var d=Q(c,"formmethod");if(d!=null){if(d.toLowerCase()!=="dialog"){t=d}}}var v=re(n,"hx-confirm");if(e===undefined){var D=function(e){return ce(t,r,n,i,a,!!e)};var X={target:u,elt:n,path:r,verb:t,triggeringEvent:i,etc:a,issueRequest:D,question:v};if(fe(n,"htmx:confirm",X)===false){ne(o);return l}}var g=n;var m=re(n,"hx-sync");var p=null;var x=false;if(m){var U=m.split(":");var B=U[0].trim();if(B==="this"){g=ve(n,"hx-sync")}else{g=le(n,B)}m=(U[1]||"drop").trim();f=ie(g);if(m==="drop"&&f.xhr&&f.abortable!==true){ne(o);return l}else if(m==="abort"){if(f.xhr){ne(o);return l}else{x=true}}else if(m==="replace"){fe(g,"htmx:abort")}else if(m.indexOf("queue")===0){var F=m.split(" ");p=(F[1]||"last").trim()}}if(f.xhr){if(f.abortable){fe(g,"htmx:abort")}else{if(p==null){if(i){var y=ie(i);if(y&&y.triggerSpec&&y.triggerSpec.queue){p=y.triggerSpec.queue}}if(p==null){p="last"}}if(f.queuedRequests==null){f.queuedRequests=[]}if(p==="first"&&f.queuedRequests.length===0){f.queuedRequests.push(function(){ce(t,r,n,i,a)})}else if(p==="all"){f.queuedRequests.push(function(){ce(t,r,n,i,a)})}else if(p==="last"){f.queuedRequests=[];f.queuedRequests.push(function(){ce(t,r,n,i,a)})}ne(o);return l}}var b=new XMLHttpRequest;f.xhr=b;f.abortable=x;var w=function(){f.xhr=null;f.abortable=false;if(f.queuedRequests!=null&&f.queuedRequests.length>0){var e=f.queuedRequests.shift();e()}};var V=re(n,"hx-prompt");if(V){var S=prompt(V);if(S===null||!fe(n,"htmx:prompt",{prompt:S,target:u})){ne(o);w();return l}}if(v&&!e){if(!confirm(v)){ne(o);w();return l}}var E=fr(n,u,S);if(a.headers){E=se(E,a.headers)}var j=or(n,t);var C=j.errors;var T=j.values;if(a.values){T=se(T,a.values)}var _=wr(n);var z=se(T,_);var O=cr(z,n);if(t!=="get"&&!vr(n)){E["Content-Type"]="application/x-www-form-urlencoded"}if(Y.config.getCacheBusterParam&&t==="get"){O["org.htmx.cache-buster"]=Q(u,"id")||"true"}if(r==null||r===""){r=te().location.href}var R=pr(n,"hx-request");var W=ie(n).boosted;var q=Y.config.methodsThatUseUrlParams.indexOf(t)>=0;var H={boosted:W,useUrlParams:q,parameters:O,unfilteredParameters:z,headers:E,target:u,verb:t,errors:C,withCredentials:a.credentials||R.credentials||Y.config.withCredentials,timeout:a.timeout||R.timeout||Y.config.timeout,path:r,triggeringEvent:i};if(!fe(n,"htmx:configRequest",H)){ne(o);w();return l}r=H.path;t=H.verb;E=H.headers;O=H.parameters;C=H.errors;q=H.useUrlParams;if(C&&C.length>0){fe(n,"htmx:validation:halted",H);ne(o);w();return l}var $=r.split("#");var G=$[0];var L=$[1];var A=r;if(q){A=G;var J=Object.keys(O).length!==0;if(J){if(A.indexOf("?")<0){A+="?"}else{A+="&"}A+=lr(O);if(L){A+="#"+L}}}if(!Or(n,A,H)){ue(n,"htmx:invalidPath",H);ne(s);return l}b.open(t.toUpperCase(),A,true);b.overrideMimeType("text/html");b.withCredentials=H.withCredentials;b.timeout=H.timeout;if(R.noHeaders){}else{for(var N in E){if(E.hasOwnProperty(N)){var Z=E[N];Sr(b,N,Z)}}}var I={xhr:b,target:u,requestConfig:H,etc:a,boosted:W,pathInfo:{requestPath:r,finalRequestPath:A,anchor:L}};b.onload=function(){try{var e=Tr(n);I.pathInfo.responsePath=Er(b);M(n,I);er(k,P);fe(n,"htmx:afterRequest",I);fe(n,"htmx:afterOnLoad",I);if(!oe(n)){var t=null;while(e.length>0&&t==null){var r=e.shift();if(oe(r)){t=r}}if(t){fe(t,"htmx:afterRequest",I);fe(t,"htmx:afterOnLoad",I)}}ne(o);w()}catch(e){ue(n,"htmx:onLoadError",se({error:e},I));throw e}};b.onerror=function(){er(k,P);ue(n,"htmx:afterRequest",I);ue(n,"htmx:sendError",I);ne(s);w()};b.onabort=function(){er(k,P);ue(n,"htmx:afterRequest",I);ue(n,"htmx:sendAbort",I);ne(s);w()};b.ontimeout=function(){er(k,P);ue(n,"htmx:afterRequest",I);ue(n,"htmx:timeout",I);ne(s);w()};if(!fe(n,"htmx:beforeRequest",I)){ne(o);w();return l}var k=Yt(n);var P=Qt(n);ae(["loadstart","loadend","progress","abort"],function(t){ae([b,b.upload],function(e){e.addEventListener(t,function(e){fe(n,"htmx:xhr:"+t,{lengthComputable:e.lengthComputable,loaded:e.loaded,total:e.total})})})});fe(n,"htmx:beforeSend",I);var K=q?null:gr(b,n,O);b.send(K);return l}function Rr(e,t){var r=t.xhr;var n=null;var i=null;if(O(r,/HX-Push:/i)){n=r.getResponseHeader("HX-Push");i="push"}else if(O(r,/HX-Push-Url:/i)){n=r.getResponseHeader("HX-Push-Url");i="push"}else if(O(r,/HX-Replace-Url:/i)){n=r.getResponseHeader("HX-Replace-Url");i="replace"}if(n){if(n==="false"){return{}}else{return{type:i,path:n}}}var a=t.pathInfo.finalRequestPath;var o=t.pathInfo.responsePath;var s=re(e,"hx-push-url");var l=re(e,"hx-replace-url");var u=ie(e).boosted;var f=null;var c=null;if(s){f="push";c=s}else if(l){f="replace";c=l}else if(u){f="push";c=o||a}if(c){if(c==="false"){return{}}if(c==="true"){c=o||a}if(t.pathInfo.anchor&&c.indexOf("#")===-1){c=c+"#"+t.pathInfo.anchor}return{type:f,path:c}}else{return{}}}function qr(l,u){var f=u.xhr;var c=u.target;var e=u.etc;var t=u.requestConfig;if(!fe(l,"htmx:beforeOnLoad",u))return;if(O(f,/HX-Trigger:/i)){Be(f,"HX-Trigger",l)}if(O(f,/HX-Location:/i)){Wt();var r=f.getResponseHeader("HX-Location");var h;if(r.indexOf("{")===0){h=S(r);r=h["path"];delete h["path"]}Cr("GET",r,h).then(function(){$t(r)});return}var n=O(f,/HX-Refresh:/i)&&"true"===f.getResponseHeader("HX-Refresh");if(O(f,/HX-Redirect:/i)){location.href=f.getResponseHeader("HX-Redirect");n&&location.reload();return}if(n){location.reload();return}if(O(f,/HX-Retarget:/i)){u.target=te().querySelector(f.getResponseHeader("HX-Retarget"))}var d=Rr(l,u);var i=f.status>=200&&f.status<400&&f.status!==204;var v=f.response;var a=f.status>=400;var g=Y.config.ignoreTitle;var o=se({shouldSwap:i,serverResponse:v,isError:a,ignoreTitle:g},u);if(!fe(c,"htmx:beforeSwap",o))return;c=o.target;v=o.serverResponse;a=o.isError;g=o.ignoreTitle;u.target=c;u.failed=a;u.successful=!a;if(o.shouldSwap){if(f.status===286){Ke(l)}C(l,function(e){v=e.transformResponse(v,f,l)});if(d.type){Wt()}var s=e.swapOverride;if(O(f,/HX-Reswap:/i)){s=f.getResponseHeader("HX-Reswap")}var h=dr(l,s);if(h.hasOwnProperty("ignoreTitle")){g=h.ignoreTitle}c.classList.add(Y.config.swappingClass);var m=null;var p=null;var x=function(){try{var e=document.activeElement;var t={};try{t={elt:e,start:e?e.selectionStart:null,end:e?e.selectionEnd:null}}catch(e){}var r;if(O(f,/HX-Reselect:/i)){r=f.getResponseHeader("HX-Reselect")}var n=T(c);Ue(h.swapStyle,c,l,v,n,r);if(t.elt&&!oe(t.elt)&&Q(t.elt,"id")){var i=document.getElementById(Q(t.elt,"id"));var a={preventScroll:h.focusScroll!==undefined?!h.focusScroll:!Y.config.defaultFocusScroll};if(i){if(t.start&&i.setSelectionRange){try{i.setSelectionRange(t.start,t.end)}catch(e){}}i.focus(a)}}c.classList.remove(Y.config.swappingClass);ae(n.elts,function(e){if(e.classList){e.classList.add(Y.config.settlingClass)}fe(e,"htmx:afterSwap",u)});if(O(f,/HX-Trigger-After-Swap:/i)){var o=l;if(!oe(l)){o=te().body}Be(f,"HX-Trigger-After-Swap",o)}var s=function(){ae(n.tasks,function(e){e.call()});ae(n.elts,function(e){if(e.classList){e.classList.remove(Y.config.settlingClass)}fe(e,"htmx:afterSettle",u)});if(d.type){fe(te().body,"htmx:beforeHistoryUpdate",se({history:d},u));if(d.type==="push"){$t(d.path);fe(te().body,"htmx:pushedIntoHistory",{path:d.path})}else{Gt(d.path);fe(te().body,"htmx:replacedInHistory",{path:d.path})}}if(u.pathInfo.anchor){var e=te().getElementById(u.pathInfo.anchor);if(e){e.scrollIntoView({block:"start",behavior:"auto"})}}if(n.title&&!g){var t=E("title");if(t){t.innerHTML=n.title}else{window.document.title=n.title}}mr(n.elts,h);if(O(f,/HX-Trigger-After-Settle:/i)){var r=l;if(!oe(l)){r=te().body}Be(f,"HX-Trigger-After-Settle",r)}ne(m)};if(h.settleDelay>0){setTimeout(s,h.settleDelay)}else{s()}}catch(e){ue(l,"htmx:swapError",u);ne(p);throw e}};var y=Y.config.globalViewTransitions;if(h.hasOwnProperty("transition")){y=h.transition}if(y&&fe(l,"htmx:beforeTransition",u)&&typeof Promise!=="undefined"&&document.startViewTransition){var b=new Promise(function(e,t){m=e;p=t});var w=x;x=function(){document.startViewTransition(function(){w();return b})}}if(h.swapDelay>0){setTimeout(x,h.swapDelay)}else{x()}}if(a){ue(l,"htmx:responseError",se({error:"Response Status Error Code "+f.status+" from "+u.pathInfo.requestPath},u))}}var Hr={};function Lr(){return{init:function(e){return null},onEvent:function(e,t){return true},transformResponse:function(e,t,r){return e},isInlineSwap:function(e){return false},handleSwap:function(e,t,r,n){return false},encodeParameters:function(e,t,r){return null}}}function Ar(e,t){if(t.init){t.init(r)}Hr[e]=se(Lr(),t)}function Nr(e){delete Hr[e]}function Ir(e,r,n){if(e==undefined){return r}if(r==undefined){r=[]}if(n==undefined){n=[]}var t=ee(e,"hx-ext");if(t){ae(t.split(","),function(e){e=e.replace(/ /g,"");if(e.slice(0,7)=="ignore:"){n.push(e.slice(7));return}if(n.indexOf(e)<0){var t=Hr[e];if(t&&r.indexOf(t)<0){r.push(t)}}})}return Ir(u(e),r,n)}var kr=false;te().addEventListener("DOMContentLoaded",function(){kr=true});function Pr(e){if(kr||te().readyState==="complete"){e()}else{te().addEventListener("DOMContentLoaded",e)}}function Mr(){if(Y.config.includeIndicatorStyles!==false){te().head.insertAdjacentHTML("beforeend","")}}function Dr(){var e=te().querySelector('meta[name="htmx-config"]');if(e){return S(e.content)}else{return null}}function Xr(){var e=Dr();if(e){Y.config=se(Y.config,e)}}Pr(function(){Xr();Mr();var e=te().body;Dt(e);var t=te().querySelectorAll("[hx-trigger='restored'],[data-hx-trigger='restored']");e.addEventListener("htmx:abort",function(e){var t=e.target;var r=ie(t);if(r&&r.xhr){r.xhr.abort()}});var r=window.onpopstate;window.onpopstate=function(e){if(e.state&&e.state.htmx){Kt();ae(t,function(e){fe(e,"htmx:restored",{document:te(),triggerEvent:fe})})}else{if(r){r(e)}}};setTimeout(function(){fe(e,"htmx:load",{});e=null},0)});return Y}()}); \ No newline at end of file diff --git a/weed/admin/static/webfonts/fa-brands-400.ttf b/weed/admin/static/webfonts/fa-brands-400.ttf deleted file mode 100644 index 774d51ac4..000000000 Binary files a/weed/admin/static/webfonts/fa-brands-400.ttf and /dev/null differ diff --git a/weed/admin/static/webfonts/fa-brands-400.woff2 b/weed/admin/static/webfonts/fa-brands-400.woff2 deleted file mode 100644 index 71e318526..000000000 Binary files a/weed/admin/static/webfonts/fa-brands-400.woff2 and /dev/null differ diff --git a/weed/admin/static/webfonts/fa-regular-400.ttf b/weed/admin/static/webfonts/fa-regular-400.ttf deleted file mode 100644 index 8a9d6344d..000000000 Binary files a/weed/admin/static/webfonts/fa-regular-400.ttf and /dev/null differ diff --git a/weed/admin/static/webfonts/fa-regular-400.woff2 b/weed/admin/static/webfonts/fa-regular-400.woff2 deleted file mode 100644 index 7f021680b..000000000 Binary files a/weed/admin/static/webfonts/fa-regular-400.woff2 and /dev/null differ diff --git a/weed/admin/static/webfonts/fa-solid-900.ttf b/weed/admin/static/webfonts/fa-solid-900.ttf deleted file mode 100644 index 993dbe1f9..000000000 Binary files a/weed/admin/static/webfonts/fa-solid-900.ttf and /dev/null differ diff --git a/weed/admin/static/webfonts/fa-solid-900.woff2 b/weed/admin/static/webfonts/fa-solid-900.woff2 deleted file mode 100644 index 5c16cd3e8..000000000 Binary files a/weed/admin/static/webfonts/fa-solid-900.woff2 and /dev/null differ diff --git a/weed/admin/static_embed.go b/weed/admin/static_embed.go deleted file mode 100644 index 1910afd86..000000000 --- a/weed/admin/static_embed.go +++ /dev/null @@ -1,14 +0,0 @@ -package admin - -import ( - "embed" - "io/fs" -) - -//go:embed static/* -var StaticFS embed.FS - -// GetStaticFS returns the embedded static filesystem -func GetStaticFS() (fs.FS, error) { - return fs.Sub(StaticFS, "static") -} diff --git a/weed/admin/topology/active_topology.go b/weed/admin/topology/active_topology.go deleted file mode 100644 index e4ef5c5d0..000000000 --- a/weed/admin/topology/active_topology.go +++ /dev/null @@ -1,19 +0,0 @@ -package topology - -// NewActiveTopology creates a new ActiveTopology instance -func NewActiveTopology(recentTaskWindowSeconds int) *ActiveTopology { - if recentTaskWindowSeconds <= 0 { - recentTaskWindowSeconds = 10 // Default 10 seconds - } - - return &ActiveTopology{ - nodes: make(map[string]*activeNode), - disks: make(map[string]*activeDisk), - volumeIndex: make(map[uint32][]string), - ecShardIndex: make(map[uint32][]string), - pendingTasks: make(map[string]*taskState), - assignedTasks: make(map[string]*taskState), - recentTasks: make(map[string]*taskState), - recentTaskWindowSeconds: recentTaskWindowSeconds, - } -} diff --git a/weed/admin/topology/active_topology_test.go b/weed/admin/topology/active_topology_test.go deleted file mode 100644 index 9b0990f21..000000000 --- a/weed/admin/topology/active_topology_test.go +++ /dev/null @@ -1,607 +0,0 @@ -package topology - -import ( - "fmt" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Helper function to find a disk by ID for testing - reduces code duplication -func findDiskByID(disks []*DiskInfo, diskID uint32) *DiskInfo { - for _, disk := range disks { - if disk.DiskID == diskID { - return disk - } - } - return nil -} - -// TestActiveTopologyBasicOperations tests basic topology management -func TestActiveTopologyBasicOperations(t *testing.T) { - topology := NewActiveTopology(10) - assert.NotNil(t, topology) - assert.Equal(t, 10, topology.recentTaskWindowSeconds) - - // Test empty topology - assert.Equal(t, 0, len(topology.nodes)) - assert.Equal(t, 0, len(topology.disks)) - assert.Equal(t, 0, len(topology.pendingTasks)) -} - -// TestActiveTopologyUpdate tests topology updates from master -func TestActiveTopologyUpdate(t *testing.T) { - topology := NewActiveTopology(10) - - // Create sample topology info - topologyInfo := createSampleTopology() - - err := topology.UpdateTopology(topologyInfo) - require.NoError(t, err) - - // Verify topology structure - assert.Equal(t, 2, len(topology.nodes)) // 2 nodes - assert.Equal(t, 4, len(topology.disks)) // 4 disks total (2 per node) - - // Verify node structure - node1, exists := topology.nodes["10.0.0.1:8080"] - require.True(t, exists) - assert.Equal(t, "dc1", node1.dataCenter) - assert.Equal(t, "rack1", node1.rack) - assert.Equal(t, 2, len(node1.disks)) - - // Verify disk structure - disk1, exists := topology.disks["10.0.0.1:8080:0"] - require.True(t, exists) - assert.Equal(t, uint32(0), disk1.DiskID) - assert.Equal(t, "hdd", disk1.DiskType) - assert.Equal(t, "dc1", disk1.DataCenter) -} - -// TestTaskLifecycle tests the complete task lifecycle -func TestTaskLifecycle(t *testing.T) { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - taskID := "balance-001" - - // 1. Add pending task - err := topology.AddPendingTask(TaskSpec{ - TaskID: taskID, - TaskType: TaskTypeBalance, - VolumeID: 1001, - VolumeSize: 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 1}, - }, - }) - assert.NoError(t, err, "Should add pending task successfully") - - // Verify pending state - assert.Equal(t, 1, len(topology.pendingTasks)) - assert.Equal(t, 0, len(topology.assignedTasks)) - assert.Equal(t, 0, len(topology.recentTasks)) - - task := topology.pendingTasks[taskID] - assert.Equal(t, TaskStatusPending, task.Status) - assert.Equal(t, uint32(1001), task.VolumeID) - - // Verify task assigned to disks - sourceDisk := topology.disks["10.0.0.1:8080:0"] - targetDisk := topology.disks["10.0.0.2:8080:1"] - assert.Equal(t, 1, len(sourceDisk.pendingTasks)) - assert.Equal(t, 1, len(targetDisk.pendingTasks)) - - // 2. Assign task - err = topology.AssignTask(taskID) - require.NoError(t, err) - - // Verify assigned state - assert.Equal(t, 0, len(topology.pendingTasks)) - assert.Equal(t, 1, len(topology.assignedTasks)) - assert.Equal(t, 0, len(topology.recentTasks)) - - task = topology.assignedTasks[taskID] - assert.Equal(t, TaskStatusInProgress, task.Status) - - // Verify task moved to assigned on disks - assert.Equal(t, 0, len(sourceDisk.pendingTasks)) - assert.Equal(t, 1, len(sourceDisk.assignedTasks)) - assert.Equal(t, 0, len(targetDisk.pendingTasks)) - assert.Equal(t, 1, len(targetDisk.assignedTasks)) - - // 3. Complete task - err = topology.CompleteTask(taskID) - require.NoError(t, err) - - // Verify completed state - assert.Equal(t, 0, len(topology.pendingTasks)) - assert.Equal(t, 0, len(topology.assignedTasks)) - assert.Equal(t, 1, len(topology.recentTasks)) - - task = topology.recentTasks[taskID] - assert.Equal(t, TaskStatusCompleted, task.Status) - assert.False(t, task.CompletedAt.IsZero()) -} - -// TestTaskDetectionScenarios tests various task detection scenarios -func TestTaskDetectionScenarios(t *testing.T) { - tests := []struct { - name string - scenario func() *ActiveTopology - expectedTasks map[string]bool // taskType -> shouldDetect - }{ - { - name: "Empty cluster - no tasks needed", - scenario: func() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createEmptyTopology()) - return topology - }, - expectedTasks: map[string]bool{ - "balance": false, - "vacuum": false, - "ec": false, - }, - }, - { - name: "Unbalanced cluster - balance task needed", - scenario: func() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createUnbalancedTopology()) - return topology - }, - expectedTasks: map[string]bool{ - "balance": true, - "vacuum": false, - "ec": false, - }, - }, - { - name: "High garbage ratio - vacuum task needed", - scenario: func() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createHighGarbageTopology()) - return topology - }, - expectedTasks: map[string]bool{ - "balance": false, - "vacuum": true, - "ec": false, - }, - }, - { - name: "Large volumes - EC task needed", - scenario: func() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createLargeVolumeTopology()) - return topology - }, - expectedTasks: map[string]bool{ - "balance": false, - "vacuum": false, - "ec": true, - }, - }, - { - name: "Recent tasks - no immediate re-detection", - scenario: func() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createUnbalancedTopology()) - // Add recent balance task - topology.recentTasks["recent-balance"] = &taskState{ - VolumeID: 1001, - TaskType: TaskTypeBalance, - Status: TaskStatusCompleted, - CompletedAt: time.Now().Add(-5 * time.Second), // 5 seconds ago - } - return topology - }, - expectedTasks: map[string]bool{ - "balance": false, // Should not detect due to recent task - "vacuum": false, - "ec": false, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - topology := tt.scenario() - - // Test balance task detection - shouldDetectBalance := tt.expectedTasks["balance"] - actualDetectBalance := !topology.HasRecentTaskForVolume(1001, TaskTypeBalance) - if shouldDetectBalance { - assert.True(t, actualDetectBalance, "Should detect balance task") - } else { - // Note: In real implementation, task detection would be more sophisticated - // This is a simplified test of the recent task prevention mechanism - } - - // Test that recent tasks prevent re-detection - if len(topology.recentTasks) > 0 { - for _, task := range topology.recentTasks { - hasRecent := topology.HasRecentTaskForVolume(task.VolumeID, task.TaskType) - assert.True(t, hasRecent, "Should find recent task for volume %d", task.VolumeID) - } - } - }) - } -} - -// TestTargetSelectionScenarios tests target selection for different task types -func TestTargetSelectionScenarios(t *testing.T) { - tests := []struct { - name string - topology *ActiveTopology - taskType TaskType - excludeNode string - expectedTargets int - expectedBestTarget string - }{ - { - name: "Balance task - find least loaded disk", - topology: createTopologyWithLoad(), - taskType: TaskTypeBalance, - excludeNode: "10.0.0.1:8080", // Exclude source node - expectedTargets: 2, // 2 disks on other node - }, - { - name: "EC task - find multiple available disks", - topology: createTopologyForEC(), - taskType: TaskTypeErasureCoding, - excludeNode: "", // Don't exclude any nodes - expectedTargets: 4, // All 4 disks available - }, - { - name: "Vacuum task - avoid conflicting disks", - topology: createTopologyWithConflicts(), - taskType: TaskTypeVacuum, - excludeNode: "", - expectedTargets: 1, // Only 1 disk without conflicts (conflicts exclude more disks) - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - availableDisks := tt.topology.GetAvailableDisks(tt.taskType, tt.excludeNode) - assert.Equal(t, tt.expectedTargets, len(availableDisks), - "Expected %d available disks, got %d", tt.expectedTargets, len(availableDisks)) - - // Verify disks are actually available - for _, disk := range availableDisks { - assert.NotEqual(t, tt.excludeNode, disk.NodeID, - "Available disk should not be on excluded node") - - assert.Less(t, disk.LoadCount, 2, "Disk load should be less than 2") - } - }) - } -} - -// TestDiskLoadCalculation tests disk load calculation -func TestDiskLoadCalculation(t *testing.T) { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Initially no load - disks := topology.GetNodeDisks("10.0.0.1:8080") - targetDisk := findDiskByID(disks, 0) - require.NotNil(t, targetDisk, "Should find disk with ID 0") - assert.Equal(t, 0, targetDisk.LoadCount) - - // Add pending task - err := topology.AddPendingTask(TaskSpec{ - TaskID: "task1", - TaskType: TaskTypeBalance, - VolumeID: 1001, - VolumeSize: 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 1}, - }, - }) - assert.NoError(t, err, "Should add pending task successfully") - - // Check load increased - disks = topology.GetNodeDisks("10.0.0.1:8080") - targetDisk = findDiskByID(disks, 0) - assert.Equal(t, 1, targetDisk.LoadCount) - - // Add another task to same disk - err = topology.AddPendingTask(TaskSpec{ - TaskID: "task2", - TaskType: TaskTypeVacuum, - VolumeID: 1002, - VolumeSize: 0, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "", DiskID: 0}, // Vacuum doesn't have a destination - }, - }) - assert.NoError(t, err, "Should add vacuum task successfully") - - disks = topology.GetNodeDisks("10.0.0.1:8080") - targetDisk = findDiskByID(disks, 0) - assert.Equal(t, 2, targetDisk.LoadCount) - - // Move one task to assigned - topology.AssignTask("task1") - - // Load should still be 2 (1 pending + 1 assigned) - disks = topology.GetNodeDisks("10.0.0.1:8080") - targetDisk = findDiskByID(disks, 0) - assert.Equal(t, 2, targetDisk.LoadCount) - - // Complete one task - topology.CompleteTask("task1") - - // Load should decrease to 1 - disks = topology.GetNodeDisks("10.0.0.1:8080") - targetDisk = findDiskByID(disks, 0) - assert.Equal(t, 1, targetDisk.LoadCount) -} - -// TestTaskConflictDetection tests task conflict detection -func TestTaskConflictDetection(t *testing.T) { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Add a balance task - err := topology.AddPendingTask(TaskSpec{ - TaskID: "balance1", - TaskType: TaskTypeBalance, - VolumeID: 1001, - VolumeSize: 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 1}, - }, - }) - assert.NoError(t, err, "Should add balance task successfully") - topology.AssignTask("balance1") - - // Try to get available disks for vacuum (conflicts with balance) - availableDisks := topology.GetAvailableDisks(TaskTypeVacuum, "") - - // Source disk should not be available due to conflict - sourceDiskAvailable := false - for _, disk := range availableDisks { - if disk.NodeID == "10.0.0.1:8080" && disk.DiskID == 0 { - sourceDiskAvailable = true - break - } - } - assert.False(t, sourceDiskAvailable, "Source disk should not be available due to task conflict") -} - -// TestPublicInterfaces tests the public interface methods -func TestPublicInterfaces(t *testing.T) { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Test GetAllNodes - nodes := topology.GetAllNodes() - assert.Equal(t, 2, len(nodes)) - assert.Contains(t, nodes, "10.0.0.1:8080") - assert.Contains(t, nodes, "10.0.0.2:8080") - - // Test GetNodeDisks - disks := topology.GetNodeDisks("10.0.0.1:8080") - assert.Equal(t, 2, len(disks)) - - // Test with non-existent node - disks = topology.GetNodeDisks("non-existent") - assert.Nil(t, disks) -} - -// Helper functions to create test topologies - -func createSampleTopology() *master_pb.TopologyInfo { - return &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, VolumeCount: 10, MaxVolumeCount: 100}, - "ssd": {DiskId: 1, VolumeCount: 5, MaxVolumeCount: 50}, - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, VolumeCount: 8, MaxVolumeCount: 100}, - "ssd": {DiskId: 1, VolumeCount: 3, MaxVolumeCount: 50}, - }, - }, - }, - }, - }, - }, - }, - } -} - -func createEmptyTopology() *master_pb.TopologyInfo { - return &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, VolumeCount: 0, MaxVolumeCount: 100}, - }, - }, - }, - }, - }, - }, - }, - } -} - -func createUnbalancedTopology() *master_pb.TopologyInfo { - return &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, VolumeCount: 90, MaxVolumeCount: 100}, // Very loaded - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, VolumeCount: 10, MaxVolumeCount: 100}, // Lightly loaded - }, - }, - }, - }, - }, - }, - }, - } -} - -func createHighGarbageTopology() *master_pb.TopologyInfo { - // In a real implementation, this would include volume-level garbage metrics - return createSampleTopology() -} - -func createLargeVolumeTopology() *master_pb.TopologyInfo { - // In a real implementation, this would include volume-level size metrics - return createSampleTopology() -} - -func createTopologyWithLoad() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Add some existing tasks to create load - err := topology.AddPendingTask(TaskSpec{ - TaskID: "existing1", - TaskType: TaskTypeVacuum, - VolumeID: 2001, - VolumeSize: 0, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "", DiskID: 0}, // Vacuum doesn't have a destination - }, - }) - if err != nil { - // In test helper function, just log error instead of failing - fmt.Printf("Warning: Failed to add existing task: %v\n", err) - } - topology.AssignTask("existing1") - - return topology -} - -func createTopologyForEC() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - return topology -} - -func createTopologyWithConflicts() *ActiveTopology { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Add conflicting tasks - err := topology.AddPendingTask(TaskSpec{ - TaskID: "balance1", - TaskType: TaskTypeBalance, - VolumeID: 3001, - VolumeSize: 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 0}, - }, - }) - if err != nil { - fmt.Printf("Warning: Failed to add balance task: %v\n", err) - } - topology.AssignTask("balance1") - - err = topology.AddPendingTask(TaskSpec{ - TaskID: "ec1", - TaskType: TaskTypeErasureCoding, - VolumeID: 3002, - VolumeSize: 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 1}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "", DiskID: 0}, // EC doesn't have single destination - }, - }) - if err != nil { - fmt.Printf("Warning: Failed to add EC task: %v\n", err) - } - topology.AssignTask("ec1") - - return topology -} - -// TestDestinationPlanning tests that the public interface works correctly -// NOTE: Destination planning is now done in task detection phase, not in ActiveTopology -func TestDestinationPlanning(t *testing.T) { - topology := NewActiveTopology(10) - topology.UpdateTopology(createSampleTopology()) - - // Test that GetAvailableDisks works for destination planning - t.Run("GetAvailableDisks functionality", func(t *testing.T) { - availableDisks := topology.GetAvailableDisks(TaskTypeBalance, "10.0.0.1:8080") - assert.Greater(t, len(availableDisks), 0) - - // Should exclude the source node - for _, disk := range availableDisks { - assert.NotEqual(t, "10.0.0.1:8080", disk.NodeID) - } - }) - - // Test that topology state can be used for planning - t.Run("Topology provides planning information", func(t *testing.T) { - topologyInfo := topology.GetTopologyInfo() - assert.NotNil(t, topologyInfo) - assert.Greater(t, len(topologyInfo.DataCenterInfos), 0) - - // Test getting node disks - disks := topology.GetNodeDisks("10.0.0.1:8080") - assert.Greater(t, len(disks), 0) - }) -} diff --git a/weed/admin/topology/capacity.go b/weed/admin/topology/capacity.go deleted file mode 100644 index a595ed369..000000000 --- a/weed/admin/topology/capacity.go +++ /dev/null @@ -1,300 +0,0 @@ -package topology - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// GetEffectiveAvailableCapacity returns the effective available capacity for a disk -// This considers BOTH pending and assigned tasks for capacity reservation. -// -// Formula: BaseAvailable - (VolumeSlots + ShardSlots/ShardsPerVolumeSlot) from all tasks -// -// The calculation includes: -// - Pending tasks: Reserve capacity immediately when added -// - Assigned tasks: Continue to reserve capacity during execution -// - Recently completed tasks are NOT counted against capacity -func (at *ActiveTopology) GetEffectiveAvailableCapacity(nodeID string, diskID uint32) int64 { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKey := fmt.Sprintf("%s:%d", nodeID, diskID) - disk, exists := at.disks[diskKey] - if !exists { - return 0 - } - - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return 0 - } - - // Use the same logic as getEffectiveAvailableCapacityUnsafe but with locking - capacity := at.getEffectiveAvailableCapacityUnsafe(disk) - return int64(capacity.VolumeSlots) -} - -// GetEffectiveAvailableCapacityDetailed returns detailed available capacity as StorageSlotChange -// This provides granular information about available volume slots and shard slots -func (at *ActiveTopology) GetEffectiveAvailableCapacityDetailed(nodeID string, diskID uint32) StorageSlotChange { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKey := fmt.Sprintf("%s:%d", nodeID, diskID) - disk, exists := at.disks[diskKey] - if !exists { - return StorageSlotChange{} - } - - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return StorageSlotChange{} - } - - return at.getEffectiveAvailableCapacityUnsafe(disk) -} - -// GetEffectiveCapacityImpact returns the StorageSlotChange impact for a disk -// This shows the net impact from all pending and assigned tasks -func (at *ActiveTopology) GetEffectiveCapacityImpact(nodeID string, diskID uint32) StorageSlotChange { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKey := fmt.Sprintf("%s:%d", nodeID, diskID) - disk, exists := at.disks[diskKey] - if !exists { - return StorageSlotChange{} - } - - return at.getEffectiveCapacityUnsafe(disk) -} - -// GetDisksWithEffectiveCapacity returns disks with sufficient effective capacity -// This method considers BOTH pending and assigned tasks for capacity reservation using StorageSlotChange. -// -// Parameters: -// - taskType: type of task to check compatibility for -// - excludeNodeID: node to exclude from results -// - minCapacity: minimum effective capacity required (in volume slots) -// -// Returns: DiskInfo objects where VolumeCount reflects capacity reserved by all tasks -func (at *ActiveTopology) GetDisksWithEffectiveCapacity(taskType TaskType, excludeNodeID string, minCapacity int64) []*DiskInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - - var available []*DiskInfo - - for _, disk := range at.disks { - if disk.NodeID == excludeNodeID { - continue // Skip excluded node - } - - if at.isDiskAvailable(disk, taskType) { - effectiveCapacity := at.getEffectiveAvailableCapacityUnsafe(disk) - - // Only include disks that meet minimum capacity requirement - if int64(effectiveCapacity.VolumeSlots) >= minCapacity { - // Create a new DiskInfo with current capacity information - diskCopy := DiskInfo{ - NodeID: disk.DiskInfo.NodeID, - DiskID: disk.DiskInfo.DiskID, - DiskType: disk.DiskInfo.DiskType, - DataCenter: disk.DiskInfo.DataCenter, - Rack: disk.DiskInfo.Rack, - LoadCount: len(disk.pendingTasks) + len(disk.assignedTasks), // Count all tasks - } - - // Create a new protobuf DiskInfo to avoid modifying the original - diskInfoCopy := &master_pb.DiskInfo{ - DiskId: disk.DiskInfo.DiskInfo.DiskId, - MaxVolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount, - VolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount - int64(effectiveCapacity.VolumeSlots), - VolumeInfos: disk.DiskInfo.DiskInfo.VolumeInfos, - EcShardInfos: disk.DiskInfo.DiskInfo.EcShardInfos, - RemoteVolumeCount: disk.DiskInfo.DiskInfo.RemoteVolumeCount, - ActiveVolumeCount: disk.DiskInfo.DiskInfo.ActiveVolumeCount, - FreeVolumeCount: disk.DiskInfo.DiskInfo.FreeVolumeCount, - } - diskCopy.DiskInfo = diskInfoCopy - - available = append(available, &diskCopy) - } - } - } - - return available -} - -// GetDisksForPlanning returns disks considering both active and pending tasks for planning decisions -// This helps avoid over-scheduling tasks to the same disk -func (at *ActiveTopology) GetDisksForPlanning(taskType TaskType, excludeNodeID string, minCapacity int64) []*DiskInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - - var available []*DiskInfo - - for _, disk := range at.disks { - if disk.NodeID == excludeNodeID { - continue // Skip excluded node - } - - // Consider both pending and active tasks for scheduling decisions - if at.isDiskAvailableForPlanning(disk, taskType) { - // Check if disk can accommodate new task considering pending tasks - planningCapacity := at.getPlanningCapacityUnsafe(disk) - - if int64(planningCapacity.VolumeSlots) >= minCapacity { - // Create a new DiskInfo with planning information - diskCopy := DiskInfo{ - NodeID: disk.DiskInfo.NodeID, - DiskID: disk.DiskInfo.DiskID, - DiskType: disk.DiskInfo.DiskType, - DataCenter: disk.DiskInfo.DataCenter, - Rack: disk.DiskInfo.Rack, - LoadCount: len(disk.pendingTasks) + len(disk.assignedTasks), - } - - // Create a new protobuf DiskInfo to avoid modifying the original - diskInfoCopy := &master_pb.DiskInfo{ - DiskId: disk.DiskInfo.DiskInfo.DiskId, - MaxVolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount, - VolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount - int64(planningCapacity.VolumeSlots), - VolumeInfos: disk.DiskInfo.DiskInfo.VolumeInfos, - EcShardInfos: disk.DiskInfo.DiskInfo.EcShardInfos, - RemoteVolumeCount: disk.DiskInfo.DiskInfo.RemoteVolumeCount, - ActiveVolumeCount: disk.DiskInfo.DiskInfo.ActiveVolumeCount, - FreeVolumeCount: disk.DiskInfo.DiskInfo.FreeVolumeCount, - } - diskCopy.DiskInfo = diskInfoCopy - - available = append(available, &diskCopy) - } - } - } - - return available -} - -// CanAccommodateTask checks if a disk can accommodate a new task considering all constraints -func (at *ActiveTopology) CanAccommodateTask(nodeID string, diskID uint32, taskType TaskType, volumesNeeded int64) bool { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKey := fmt.Sprintf("%s:%d", nodeID, diskID) - disk, exists := at.disks[diskKey] - if !exists { - return false - } - - // Check basic availability - if !at.isDiskAvailable(disk, taskType) { - return false - } - - // Check effective capacity - effectiveCapacity := at.getEffectiveAvailableCapacityUnsafe(disk) - return int64(effectiveCapacity.VolumeSlots) >= volumesNeeded -} - -// getPlanningCapacityUnsafe considers both pending and active tasks for planning -func (at *ActiveTopology) getPlanningCapacityUnsafe(disk *activeDisk) StorageSlotChange { - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return StorageSlotChange{} - } - - baseAvailableVolumes := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount - - // Use the centralized helper function to calculate task storage impact - totalImpact := at.calculateTaskStorageImpact(disk) - - // Calculate available capacity considering impact (negative impact reduces availability) - availableVolumeSlots := baseAvailableVolumes - totalImpact.ToVolumeSlots() - if availableVolumeSlots < 0 { - availableVolumeSlots = 0 - } - - // Return detailed capacity information - return StorageSlotChange{ - VolumeSlots: int32(availableVolumeSlots), - ShardSlots: -totalImpact.ShardSlots, // Available shard capacity (negative impact becomes positive availability) - } -} - -// isDiskAvailableForPlanning checks if disk can accept new tasks considering pending load -func (at *ActiveTopology) isDiskAvailableForPlanning(disk *activeDisk, taskType TaskType) bool { - // Check total load including pending tasks - totalLoad := len(disk.pendingTasks) + len(disk.assignedTasks) - if totalLoad >= MaxTotalTaskLoadPerDisk { - return false - } - - // Check for conflicting task types in active tasks only - for _, task := range disk.assignedTasks { - if at.areTaskTypesConflicting(task.TaskType, taskType) { - return false - } - } - - return true -} - -// calculateTaskStorageImpact is a helper function that calculates the total storage impact -// from all tasks (pending and assigned) on a given disk. This eliminates code duplication -// between multiple capacity calculation functions. -func (at *ActiveTopology) calculateTaskStorageImpact(disk *activeDisk) StorageSlotChange { - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return StorageSlotChange{} - } - - totalImpact := StorageSlotChange{} - - // Process both pending and assigned tasks with identical logic - taskLists := [][]*taskState{disk.pendingTasks, disk.assignedTasks} - - for _, taskList := range taskLists { - for _, task := range taskList { - // Calculate impact for all source locations - for _, source := range task.Sources { - if source.SourceServer == disk.NodeID && source.SourceDisk == disk.DiskID { - totalImpact.AddInPlace(source.StorageChange) - } - } - - // Calculate impact for all destination locations - for _, dest := range task.Destinations { - if dest.TargetServer == disk.NodeID && dest.TargetDisk == disk.DiskID { - totalImpact.AddInPlace(dest.StorageChange) - } - } - } - } - - return totalImpact -} - -// getEffectiveCapacityUnsafe returns effective capacity impact without locking (for internal use) -// Returns StorageSlotChange representing the net impact from all tasks -func (at *ActiveTopology) getEffectiveCapacityUnsafe(disk *activeDisk) StorageSlotChange { - return at.calculateTaskStorageImpact(disk) -} - -// getEffectiveAvailableCapacityUnsafe returns detailed available capacity as StorageSlotChange -func (at *ActiveTopology) getEffectiveAvailableCapacityUnsafe(disk *activeDisk) StorageSlotChange { - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return StorageSlotChange{} - } - - baseAvailable := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount - netImpact := at.getEffectiveCapacityUnsafe(disk) - - // Calculate available volume slots (negative impact reduces availability) - availableVolumeSlots := baseAvailable - netImpact.ToVolumeSlots() - if availableVolumeSlots < 0 { - availableVolumeSlots = 0 - } - - // Return detailed capacity information - return StorageSlotChange{ - VolumeSlots: int32(availableVolumeSlots), - ShardSlots: -netImpact.ShardSlots, // Available shard capacity (negative impact becomes positive availability) - } -} diff --git a/weed/admin/topology/internal.go b/weed/admin/topology/internal.go deleted file mode 100644 index 72e37f6c1..000000000 --- a/weed/admin/topology/internal.go +++ /dev/null @@ -1,114 +0,0 @@ -package topology - -import ( - "fmt" - "time" -) - -// reassignTaskStates assigns tasks to the appropriate disks -func (at *ActiveTopology) reassignTaskStates() { - // Clear existing task assignments - for _, disk := range at.disks { - disk.pendingTasks = nil - disk.assignedTasks = nil - disk.recentTasks = nil - } - - // Reassign pending tasks - for _, task := range at.pendingTasks { - at.assignTaskToDisk(task) - } - - // Reassign assigned tasks - for _, task := range at.assignedTasks { - at.assignTaskToDisk(task) - } - - // Reassign recent tasks - for _, task := range at.recentTasks { - at.assignTaskToDisk(task) - } -} - -// assignTaskToDisk assigns a task to the appropriate disk(s) -func (at *ActiveTopology) assignTaskToDisk(task *taskState) { - addedDisks := make(map[string]bool) - - // Local helper function to assign task to a disk and avoid code duplication - assign := func(server string, diskID uint32) { - key := fmt.Sprintf("%s:%d", server, diskID) - if server == "" || addedDisks[key] { - return - } - if disk, exists := at.disks[key]; exists { - switch task.Status { - case TaskStatusPending: - disk.pendingTasks = append(disk.pendingTasks, task) - case TaskStatusInProgress: - disk.assignedTasks = append(disk.assignedTasks, task) - case TaskStatusCompleted: - disk.recentTasks = append(disk.recentTasks, task) - } - addedDisks[key] = true - } - } - - // Assign to all source disks - for _, source := range task.Sources { - assign(source.SourceServer, source.SourceDisk) - } - - // Assign to all destination disks (duplicates automatically avoided by helper) - for _, dest := range task.Destinations { - assign(dest.TargetServer, dest.TargetDisk) - } -} - -// isDiskAvailable checks if a disk can accept new tasks -func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool { - // Check if disk has too many pending and active tasks - activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks) - if activeLoad >= MaxConcurrentTasksPerDisk { - return false - } - - // Check for conflicting task types - for _, task := range disk.assignedTasks { - if at.areTaskTypesConflicting(task.TaskType, taskType) { - return false - } - } - - return true -} - -// areTaskTypesConflicting checks if two task types conflict -func (at *ActiveTopology) areTaskTypesConflicting(existing, new TaskType) bool { - // Examples of conflicting task types - conflictMap := map[TaskType][]TaskType{ - TaskTypeVacuum: {TaskTypeBalance, TaskTypeErasureCoding}, - TaskTypeBalance: {TaskTypeVacuum, TaskTypeErasureCoding}, - TaskTypeErasureCoding: {TaskTypeVacuum, TaskTypeBalance}, - } - - if conflicts, exists := conflictMap[existing]; exists { - for _, conflictType := range conflicts { - if conflictType == new { - return true - } - } - } - - return false -} - -// cleanupRecentTasks removes old recent tasks -func (at *ActiveTopology) cleanupRecentTasks() { - cutoff := time.Now().Add(-time.Duration(at.recentTaskWindowSeconds) * time.Second) - - for taskID, task := range at.recentTasks { - if task.CompletedAt.Before(cutoff) { - delete(at.recentTasks, taskID) - } - } -} diff --git a/weed/admin/topology/storage_impact.go b/weed/admin/topology/storage_impact.go deleted file mode 100644 index e325fc9cf..000000000 --- a/weed/admin/topology/storage_impact.go +++ /dev/null @@ -1,50 +0,0 @@ -package topology - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -// CalculateTaskStorageImpact calculates storage impact for different task types -func CalculateTaskStorageImpact(taskType TaskType, volumeSize int64) (sourceChange, targetChange StorageSlotChange) { - switch taskType { - case TaskTypeErasureCoding: - // EC task: distributes shards to MULTIPLE targets, source reserves with zero impact - // Source reserves capacity but with zero StorageSlotChange (no actual capacity consumption during planning) - // WARNING: EC has multiple targets! Use AddPendingTask with multiple destinations for proper multi-target handling - // This simplified function returns zero impact; real EC requires specialized multi-destination calculation - return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0} - - case TaskTypeBalance: - // Balance task: moves volume from source to target - // Source loses 1 volume, target gains 1 volume - return StorageSlotChange{VolumeSlots: -1, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0} - - case TaskTypeVacuum: - // Vacuum task: frees space by removing deleted entries, no slot change - return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0} - - case TaskTypeReplication: - // Replication task: creates new replica on target - return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0} - - default: - // Unknown task type, assume minimal impact - glog.Warningf("unhandled task type %s in CalculateTaskStorageImpact, assuming default impact", taskType) - return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0} - } -} - -// CalculateECShardStorageImpact calculates storage impact for EC shards specifically -func CalculateECShardStorageImpact(shardCount int32, expectedShardSize int64) StorageSlotChange { - // EC shards are typically much smaller than full volumes - // Use shard-level tracking for granular capacity planning - return StorageSlotChange{VolumeSlots: 0, ShardSlots: shardCount} -} - -// CalculateECShardCleanupImpact calculates storage impact for cleaning up existing EC shards -func CalculateECShardCleanupImpact(originalVolumeSize int64) StorageSlotChange { - // Cleaning up existing EC shards frees shard slots - // Use the actual EC configuration constants for accurate shard count - return StorageSlotChange{VolumeSlots: 0, ShardSlots: -int32(erasure_coding.TotalShardsCount)} // Negative = freed capacity -} diff --git a/weed/admin/topology/storage_slot_test.go b/weed/admin/topology/storage_slot_test.go deleted file mode 100644 index 5a0ed3ce5..000000000 --- a/weed/admin/topology/storage_slot_test.go +++ /dev/null @@ -1,1004 +0,0 @@ -package topology - -import ( - "fmt" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" - "github.com/stretchr/testify/assert" -) - -// NOTE: These tests are designed to work with any value of erasure_coding.DataShardsCount. -// This ensures compatibility with custom erasure coding configurations where DataShardsCount -// might be changed from the default value of 10. All shard-to-volume conversion calculations -// are done dynamically using the actual constant value. - -// testGetDiskStorageImpact is a test helper that provides the same interface as the removed -// GetDiskStorageImpact method. For simplicity, it returns the total impact as "planned" -// and zeros for "reserved" since the distinction is not critical for most test scenarios. -func testGetDiskStorageImpact(at *ActiveTopology, nodeID string, diskID uint32) (plannedVolumeSlots, reservedVolumeSlots int64, plannedShardSlots, reservedShardSlots int32, estimatedSize int64) { - impact := at.GetEffectiveCapacityImpact(nodeID, diskID) - // Return total impact as "planned" for test compatibility - return int64(impact.VolumeSlots), 0, impact.ShardSlots, 0, 0 -} - -// TestStorageSlotChangeArithmetic tests the arithmetic operations on StorageSlotChange -func TestStorageSlotChangeArithmetic(t *testing.T) { - // Test basic arithmetic operations - a := StorageSlotChange{VolumeSlots: 5, ShardSlots: 10} - b := StorageSlotChange{VolumeSlots: 3, ShardSlots: 8} - - // Test Add - sum := a.Add(b) - assert.Equal(t, StorageSlotChange{VolumeSlots: 8, ShardSlots: 18}, sum, "Add should work correctly") - - // Test Subtract - diff := a.Subtract(b) - assert.Equal(t, StorageSlotChange{VolumeSlots: 2, ShardSlots: 2}, diff, "Subtract should work correctly") - - // Test AddInPlace - c := StorageSlotChange{VolumeSlots: 1, ShardSlots: 2} - c.AddInPlace(b) - assert.Equal(t, StorageSlotChange{VolumeSlots: 4, ShardSlots: 10}, c, "AddInPlace should modify in place") - - // Test SubtractInPlace - d := StorageSlotChange{VolumeSlots: 10, ShardSlots: 20} - d.SubtractInPlace(b) - assert.Equal(t, StorageSlotChange{VolumeSlots: 7, ShardSlots: 12}, d, "SubtractInPlace should modify in place") - - // Test IsZero - zero := StorageSlotChange{VolumeSlots: 0, ShardSlots: 0} - nonZero := StorageSlotChange{VolumeSlots: 1, ShardSlots: 0} - assert.True(t, zero.IsZero(), "Zero struct should return true for IsZero") - assert.False(t, nonZero.IsZero(), "Non-zero struct should return false for IsZero") - - // Test ToVolumeSlots conversion - impact1 := StorageSlotChange{VolumeSlots: 5, ShardSlots: 10} - assert.Equal(t, int64(6), impact1.ToVolumeSlots(), fmt.Sprintf("ToVolumeSlots should be 5 + 10/%d = 6", erasure_coding.DataShardsCount)) - - impact2 := StorageSlotChange{VolumeSlots: -2, ShardSlots: 25} - assert.Equal(t, int64(0), impact2.ToVolumeSlots(), fmt.Sprintf("ToVolumeSlots should be -2 + 25/%d = 0", erasure_coding.DataShardsCount)) - - impact3 := StorageSlotChange{VolumeSlots: 3, ShardSlots: 7} - assert.Equal(t, int64(3), impact3.ToVolumeSlots(), fmt.Sprintf("ToVolumeSlots should be 3 + 7/%d = 3 (integer division)", erasure_coding.DataShardsCount)) -} - -// TestStorageSlotChange tests the new dual-level storage slot tracking -func TestStorageSlotChange(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Create test topology - topologyInfo := &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": { - DiskId: 0, - Type: "hdd", - VolumeCount: 5, - MaxVolumeCount: 20, - }, - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": { - DiskId: 0, - Type: "hdd", - VolumeCount: 8, - MaxVolumeCount: 15, - }, - }, - }, - }, - }, - }, - }, - }, - } - - activeTopology.UpdateTopology(topologyInfo) - - // Test 1: Basic storage slot calculation - ecSourceChange, ecTargetChange := CalculateTaskStorageImpact(TaskTypeErasureCoding, 1024*1024*1024) - assert.Equal(t, int32(0), ecSourceChange.VolumeSlots, "EC source reserves with zero StorageSlotChange impact") - assert.Equal(t, int32(0), ecSourceChange.ShardSlots, "EC source should have zero shard impact") - assert.Equal(t, int32(0), ecTargetChange.VolumeSlots, "EC should not directly impact target volume slots") - assert.Equal(t, int32(0), ecTargetChange.ShardSlots, "EC target should have zero shard impact from this simplified function") - - balSourceChange, balTargetChange := CalculateTaskStorageImpact(TaskTypeBalance, 1024*1024*1024) - assert.Equal(t, int32(-1), balSourceChange.VolumeSlots, "Balance should free 1 volume slot on source") - assert.Equal(t, int32(1), balTargetChange.VolumeSlots, "Balance should consume 1 volume slot on target") - - // Test 2: EC shard impact calculation - shardImpact := CalculateECShardStorageImpact(3, 100*1024*1024) // 3 shards, 100MB each - assert.Equal(t, int32(0), shardImpact.VolumeSlots, "EC shards should not impact volume slots") - assert.Equal(t, int32(3), shardImpact.ShardSlots, "EC should impact 3 shard slots") - - // Test 3: Add EC task with shard-level tracking - sourceServer := "10.0.0.1:8080" - sourceDisk := uint32(0) - shardDestinations := []string{"10.0.0.2:8080", "10.0.0.2:8080"} - shardDiskIDs := []uint32{0, 0} - - expectedShardSize := int64(50 * 1024 * 1024) // 50MB per shard - originalVolumeSize := int64(1024 * 1024 * 1024) // 1GB original - - // Create source specs (single replica in this test) - sources := []TaskSourceSpec{ - {ServerID: sourceServer, DiskID: sourceDisk, CleanupType: CleanupVolumeReplica}, - } - - // Create destination specs - destinations := make([]TaskDestinationSpec, len(shardDestinations)) - shardImpact = CalculateECShardStorageImpact(1, expectedShardSize) - for i, dest := range shardDestinations { - destinations[i] = TaskDestinationSpec{ - ServerID: dest, - DiskID: shardDiskIDs[i], - StorageImpact: &shardImpact, - EstimatedSize: &expectedShardSize, - } - } - - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "ec_test", - TaskType: TaskTypeErasureCoding, - VolumeID: 100, - VolumeSize: originalVolumeSize, - Sources: sources, - Destinations: destinations, - }) - assert.NoError(t, err, "Should add EC shard task successfully") - - // Test 4: Check storage impact on source (EC reserves with zero impact) - sourceImpact := activeTopology.GetEffectiveCapacityImpact("10.0.0.1:8080", 0) - assert.Equal(t, int32(0), sourceImpact.VolumeSlots, "Source should show 0 volume slot impact (EC reserves with zero impact)") - assert.Equal(t, int32(0), sourceImpact.ShardSlots, "Source should show 0 shard slot impact") - - // Test 5: Check storage impact on target (should gain shards) - targetImpact := activeTopology.GetEffectiveCapacityImpact("10.0.0.2:8080", 0) - assert.Equal(t, int32(0), targetImpact.VolumeSlots, "Target should show 0 volume slot impact (EC shards don't use volume slots)") - assert.Equal(t, int32(2), targetImpact.ShardSlots, "Target should show 2 shard slot impact") - - // Test 6: Check effective capacity calculation (EC source reserves with zero StorageSlotChange) - sourceCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - targetCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - - // Source: 15 original available (EC source reserves with zero StorageSlotChange impact) - assert.Equal(t, int64(15), sourceCapacity, "Source should have 15 available slots (EC source has zero StorageSlotChange impact)") - - // Target: 7 original available - (2 shards / 10) = 7 (since 2/10 rounds down to 0) - assert.Equal(t, int64(7), targetCapacity, "Target should have 7 available slots (minimal shard impact)") - - // Test 7: Add traditional balance task for comparison - err = activeTopology.AddPendingTask(TaskSpec{ - TaskID: "balance_test", - TaskType: TaskTypeBalance, - VolumeID: 101, - VolumeSize: 512 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 0}, - }, - }) - assert.NoError(t, err, "Should add balance task successfully") - - // Check updated impacts after adding balance task - finalSourceImpact := activeTopology.GetEffectiveCapacityImpact("10.0.0.1:8080", 0) - finalTargetImpact := activeTopology.GetEffectiveCapacityImpact("10.0.0.2:8080", 0) - - assert.Equal(t, int32(-1), finalSourceImpact.VolumeSlots, "Source should show -1 volume slot impact (EC: 0, Balance: -1)") - assert.Equal(t, int32(1), finalTargetImpact.VolumeSlots, "Target should show 1 volume slot impact (Balance: +1)") - assert.Equal(t, int32(2), finalTargetImpact.ShardSlots, "Target should still show 2 shard slot impact (EC shards)") -} - -// TestStorageSlotChangeCapacityCalculation tests the capacity calculation with mixed slot types -func TestStorageSlotChangeCapacityCalculation(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Create simple topology - topologyInfo := &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": { - DiskId: 0, - Type: "hdd", - VolumeCount: 10, - MaxVolumeCount: 100, // Large capacity for testing - }, - }, - }, - }, - }, - }, - }, - }, - } - - activeTopology.UpdateTopology(topologyInfo) - - // Initial capacity - initialCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - assert.Equal(t, int64(90), initialCapacity, "Should start with 90 available slots") - - // Add tasks with different shard slot impacts - targetImpact1 := StorageSlotChange{VolumeSlots: 0, ShardSlots: 5} // Target gains 5 shards - estimatedSize1 := int64(100 * 1024 * 1024) - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "shard_test_1", - TaskType: TaskTypeErasureCoding, - VolumeID: 100, - VolumeSize: estimatedSize1, - Sources: []TaskSourceSpec{ - {ServerID: "", DiskID: 0}, // Source not applicable here - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, StorageImpact: &targetImpact1, EstimatedSize: &estimatedSize1}, - }, - }) - assert.NoError(t, err, "Should add shard test 1 successfully") - - // Capacity should be reduced by pending tasks via StorageSlotChange - capacityAfterShards := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - // Dynamic calculation: 5 shards < DataShardsCount, so no volume impact - expectedImpact5 := int64(5 / erasure_coding.DataShardsCount) // Should be 0 for any reasonable DataShardsCount - assert.Equal(t, int64(90-expectedImpact5), capacityAfterShards, fmt.Sprintf("5 shard slots should consume %d volume slot equivalent (5/%d = %d)", expectedImpact5, erasure_coding.DataShardsCount, expectedImpact5)) - - // Add more shards to reach threshold - additionalShards := int32(erasure_coding.DataShardsCount) // Add exactly one volume worth of shards - targetImpact2 := StorageSlotChange{VolumeSlots: 0, ShardSlots: additionalShards} // Target gains additional shards - estimatedSize2 := int64(100 * 1024 * 1024) - err = activeTopology.AddPendingTask(TaskSpec{ - TaskID: "shard_test_2", - TaskType: TaskTypeErasureCoding, - VolumeID: 101, - VolumeSize: estimatedSize2, - Sources: []TaskSourceSpec{ - {ServerID: "", DiskID: 0}, // Source not applicable here - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, StorageImpact: &targetImpact2, EstimatedSize: &estimatedSize2}, - }, - }) - assert.NoError(t, err, "Should add shard test 2 successfully") - - // Dynamic calculation: (5 + DataShardsCount) shards should consume 1 volume slot - totalShards := 5 + erasure_coding.DataShardsCount - expectedImpact15 := int64(totalShards / erasure_coding.DataShardsCount) // Should be 1 - capacityAfterMoreShards := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - assert.Equal(t, int64(90-expectedImpact15), capacityAfterMoreShards, fmt.Sprintf("%d shard slots should consume %d volume slot equivalent (%d/%d = %d)", totalShards, expectedImpact15, totalShards, erasure_coding.DataShardsCount, expectedImpact15)) - - // Add a full volume task - targetImpact3 := StorageSlotChange{VolumeSlots: 1, ShardSlots: 0} // Target gains 1 volume - estimatedSize3 := int64(1024 * 1024 * 1024) - err = activeTopology.AddPendingTask(TaskSpec{ - TaskID: "volume_test", - TaskType: TaskTypeBalance, - VolumeID: 102, - VolumeSize: estimatedSize3, - Sources: []TaskSourceSpec{ - {ServerID: "", DiskID: 0}, // Source not applicable here - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, StorageImpact: &targetImpact3, EstimatedSize: &estimatedSize3}, - }, - }) - assert.NoError(t, err, "Should add volume test successfully") - - // Capacity should be reduced by 1 more volume slot - finalCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - assert.Equal(t, int64(88), finalCapacity, "1 volume + 15 shard slots should consume 2 volume slots total") - - // Verify the detailed storage impact - plannedVol, reservedVol, plannedShard, reservedShard, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.1:8080", 0) - assert.Equal(t, int64(1), plannedVol, "Should show 1 planned volume slot") - assert.Equal(t, int64(0), reservedVol, "Should show 0 reserved volume slots") - assert.Equal(t, int32(15), plannedShard, "Should show 15 planned shard slots") - assert.Equal(t, int32(0), reservedShard, "Should show 0 reserved shard slots") -} - -// TestECMultipleTargets demonstrates proper handling of EC operations with multiple targets -func TestECMultipleTargets(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Create test topology with multiple target nodes - topologyInfo := &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", // Source - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 10, MaxVolumeCount: 50}, - }, - }, - { - Id: "10.0.0.2:8080", // Target 1 - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 5, MaxVolumeCount: 30}, - }, - }, - { - Id: "10.0.0.3:8080", // Target 2 - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 8, MaxVolumeCount: 40}, - }, - }, - { - Id: "10.0.0.4:8080", // Target 3 - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 12, MaxVolumeCount: 35}, - }, - }, - }, - }, - }, - }, - }, - } - - activeTopology.UpdateTopology(topologyInfo) - - // Demonstrate why CalculateTaskStorageImpact is insufficient for EC - sourceChange, targetChange := CalculateTaskStorageImpact(TaskTypeErasureCoding, 1*1024*1024*1024) - assert.Equal(t, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, sourceChange, "Source reserves with zero StorageSlotChange") - assert.Equal(t, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, targetChange, "Target has zero impact from simplified function - insufficient for multi-target EC") - - // Proper way: Use AddPendingTask for multiple targets - sourceServer := "10.0.0.1:8080" - sourceDisk := uint32(0) - - // EC typically distributes shards across multiple targets - shardDestinations := []string{ - "10.0.0.2:8080", "10.0.0.2:8080", "10.0.0.2:8080", "10.0.0.2:8080", "10.0.0.2:8080", // 5 shards to target 1 - "10.0.0.3:8080", "10.0.0.3:8080", "10.0.0.3:8080", "10.0.0.3:8080", "10.0.0.3:8080", // 5 shards to target 2 - "10.0.0.4:8080", "10.0.0.4:8080", "10.0.0.4:8080", "10.0.0.4:8080", // 4 shards to target 3 - } - shardDiskIDs := make([]uint32, len(shardDestinations)) - for i := range shardDiskIDs { - shardDiskIDs[i] = 0 - } - - // Create source specs (single replica in this test) - sources := []TaskSourceSpec{ - {ServerID: sourceServer, DiskID: sourceDisk, CleanupType: CleanupVolumeReplica}, - } - - // Create destination specs - destinations := make([]TaskDestinationSpec, len(shardDestinations)) - expectedShardSize := int64(50 * 1024 * 1024) - shardImpact := CalculateECShardStorageImpact(1, expectedShardSize) - for i, dest := range shardDestinations { - destinations[i] = TaskDestinationSpec{ - ServerID: dest, - DiskID: shardDiskIDs[i], - StorageImpact: &shardImpact, - EstimatedSize: &expectedShardSize, - } - } - - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "ec_multi_target", - TaskType: TaskTypeErasureCoding, - VolumeID: 200, - VolumeSize: 1 * 1024 * 1024 * 1024, - Sources: sources, - Destinations: destinations, - }) - assert.NoError(t, err, "Should add multi-target EC task successfully") - - // Verify source impact (EC reserves with zero StorageSlotChange) - sourcePlannedVol, sourceReservedVol, sourcePlannedShard, sourceReservedShard, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.1:8080", 0) - assert.Equal(t, int64(0), sourcePlannedVol, "Source should reserve with zero volume slot impact") - assert.Equal(t, int64(0), sourceReservedVol, "Source should not have reserved capacity yet") - assert.Equal(t, int32(0), sourcePlannedShard, "Source should not have planned shard impact") - assert.Equal(t, int32(0), sourceReservedShard, "Source should not have reserved shard impact") - // Note: EstimatedSize tracking is no longer exposed via public API - - // Verify target impacts (planned, not yet reserved) - target1PlannedVol, target1ReservedVol, target1PlannedShard, target1ReservedShard, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.2:8080", 0) - target2PlannedVol, target2ReservedVol, target2PlannedShard, target2ReservedShard, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.3:8080", 0) - target3PlannedVol, target3ReservedVol, target3PlannedShard, target3ReservedShard, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.4:8080", 0) - - assert.Equal(t, int64(0), target1PlannedVol, "Target 1 should not have planned volume impact") - assert.Equal(t, int32(5), target1PlannedShard, "Target 1 should plan to receive 5 shards") - assert.Equal(t, int64(0), target1ReservedVol, "Target 1 should not have reserved capacity yet") - assert.Equal(t, int32(0), target1ReservedShard, "Target 1 should not have reserved shards yet") - - assert.Equal(t, int64(0), target2PlannedVol, "Target 2 should not have planned volume impact") - assert.Equal(t, int32(5), target2PlannedShard, "Target 2 should plan to receive 5 shards") - assert.Equal(t, int64(0), target2ReservedVol, "Target 2 should not have reserved capacity yet") - assert.Equal(t, int32(0), target2ReservedShard, "Target 2 should not have reserved shards yet") - - assert.Equal(t, int64(0), target3PlannedVol, "Target 3 should not have planned volume impact") - assert.Equal(t, int32(4), target3PlannedShard, "Target 3 should plan to receive 4 shards") - assert.Equal(t, int64(0), target3ReservedVol, "Target 3 should not have reserved capacity yet") - assert.Equal(t, int32(0), target3ReservedShard, "Target 3 should not have reserved shards yet") - - // Verify effective capacity (considers both pending and active tasks via StorageSlotChange) - sourceCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - target1Capacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - target2Capacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.3:8080", 0) - target3Capacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.4:8080", 0) - - // Dynamic capacity calculations based on actual DataShardsCount - expectedTarget1Impact := int64(5 / erasure_coding.DataShardsCount) // 5 shards impact - expectedTarget2Impact := int64(5 / erasure_coding.DataShardsCount) // 5 shards impact - expectedTarget3Impact := int64(4 / erasure_coding.DataShardsCount) // 4 shards impact - - assert.Equal(t, int64(40), sourceCapacity, "Source: 40 (EC source reserves with zero StorageSlotChange impact)") - assert.Equal(t, int64(25-expectedTarget1Impact), target1Capacity, fmt.Sprintf("Target 1: 25 - %d (5 shards/%d = %d impact) = %d", expectedTarget1Impact, erasure_coding.DataShardsCount, expectedTarget1Impact, 25-expectedTarget1Impact)) - assert.Equal(t, int64(32-expectedTarget2Impact), target2Capacity, fmt.Sprintf("Target 2: 32 - %d (5 shards/%d = %d impact) = %d", expectedTarget2Impact, erasure_coding.DataShardsCount, expectedTarget2Impact, 32-expectedTarget2Impact)) - assert.Equal(t, int64(23-expectedTarget3Impact), target3Capacity, fmt.Sprintf("Target 3: 23 - %d (4 shards/%d = %d impact) = %d", expectedTarget3Impact, erasure_coding.DataShardsCount, expectedTarget3Impact, 23-expectedTarget3Impact)) - - t.Logf("EC operation distributed %d shards across %d targets", len(shardDestinations), 3) - t.Logf("Capacity impacts: EC source reserves with zero impact, Targets minimal (shards < %d)", erasure_coding.DataShardsCount) -} - -// TestCapacityReservationCycle demonstrates the complete task lifecycle and capacity management -func TestCapacityReservationCycle(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Create test topology - topologyInfo := &master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 10, MaxVolumeCount: 20}, - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "hdd": {DiskId: 0, Type: "hdd", VolumeCount: 5, MaxVolumeCount: 15}, - }, - }, - }, - }, - }, - }, - }, - } - activeTopology.UpdateTopology(topologyInfo) - - // Initial capacity - sourceCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - targetCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - assert.Equal(t, int64(10), sourceCapacity, "Source initial capacity") - assert.Equal(t, int64(10), targetCapacity, "Target initial capacity") - - // Step 1: Add pending task (should reserve capacity via StorageSlotChange) - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "balance_test", - TaskType: TaskTypeBalance, - VolumeID: 123, - VolumeSize: 1 * 1024 * 1024 * 1024, - Sources: []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0}, - }, - Destinations: []TaskDestinationSpec{ - {ServerID: "10.0.0.2:8080", DiskID: 0}, - }, - }) - assert.NoError(t, err, "Should add balance test successfully") - - sourceCapacityAfterPending := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - targetCapacityAfterPending := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - assert.Equal(t, int64(11), sourceCapacityAfterPending, "Source should gain capacity from pending balance task (balance source frees 1 slot)") - assert.Equal(t, int64(9), targetCapacityAfterPending, "Target should consume capacity from pending task (balance reserves 1 slot)") - - // Verify planning capacity considers the same pending tasks - planningDisks := activeTopology.GetDisksForPlanning(TaskTypeBalance, "", 1) - assert.Len(t, planningDisks, 2, "Both disks should be available for planning") - - // Step 2: Assign task (capacity already reserved by pending task) - err = activeTopology.AssignTask("balance_test") - assert.NoError(t, err, "Should assign task successfully") - - sourceCapacityAfterAssign := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - targetCapacityAfterAssign := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - - assert.Equal(t, int64(11), sourceCapacityAfterAssign, "Source capacity should remain same (already accounted by pending)") - assert.Equal(t, int64(9), targetCapacityAfterAssign, "Target capacity should remain same (already accounted by pending)") - - // Note: Detailed task state tracking (planned vs reserved) is no longer exposed via public API - // The important functionality is that capacity calculations remain consistent - - // Step 3: Complete task (should release reserved capacity) - err = activeTopology.CompleteTask("balance_test") - assert.NoError(t, err, "Should complete task successfully") - - sourceCapacityAfterComplete := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - targetCapacityAfterComplete := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - assert.Equal(t, int64(10), sourceCapacityAfterComplete, "Source should return to original capacity") - assert.Equal(t, int64(10), targetCapacityAfterComplete, "Target should return to original capacity") - - // Step 4: Apply actual storage change (simulates master topology update) - activeTopology.ApplyActualStorageChange("10.0.0.1:8080", 0, -1) // Source loses 1 volume - activeTopology.ApplyActualStorageChange("10.0.0.2:8080", 0, 1) // Target gains 1 volume - - // Final capacity should reflect actual topology changes - finalSourceCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - finalTargetCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - assert.Equal(t, int64(11), finalSourceCapacity, "Source: (20-9) = 11 after losing 1 volume") - assert.Equal(t, int64(9), finalTargetCapacity, "Target: (15-6) = 9 after gaining 1 volume") - - t.Logf("Capacity lifecycle with StorageSlotChange: Pending -> Assigned -> Released -> Applied") - t.Logf("Source: 10 -> 11 -> 11 -> 10 -> 11 (freed by pending balance, then applied)") - t.Logf("Target: 10 -> 9 -> 9 -> 10 -> 9 (reserved by pending, then applied)") -} - -// TestReplicatedVolumeECOperations tests EC operations on replicated volumes -func TestReplicatedVolumeECOperations(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Setup cluster with multiple servers for replicated volumes - activeTopology.UpdateTopology(&master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 10}, - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 5}, - }, - }, - { - Id: "10.0.0.3:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 3}, - }, - }, - { - Id: "10.0.0.4:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 15}, - }, - }, - { - Id: "10.0.0.5:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 20}, - }, - }, - { - Id: "10.0.0.6:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 25}, - }, - }, - }, - }, - }, - }, - }, - }) - - // Test: EC operation on replicated volume (3 replicas) - volumeID := uint32(300) - originalVolumeSize := int64(1024 * 1024 * 1024) // 1GB - - // Create source specs for replicated volume (3 replicas) - sources := []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, // Replica 1 - {ServerID: "10.0.0.2:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, // Replica 2 - {ServerID: "10.0.0.3:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, // Replica 3 - } - - // EC destinations (shards distributed across different servers than sources) - shardDestinations := []string{ - "10.0.0.4:8080", "10.0.0.4:8080", "10.0.0.4:8080", "10.0.0.4:8080", "10.0.0.4:8080", // 5 shards - "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", // 5 shards - "10.0.0.6:8080", "10.0.0.6:8080", "10.0.0.6:8080", "10.0.0.6:8080", // 4 shards - } - shardDiskIDs := make([]uint32, len(shardDestinations)) - for i := range shardDiskIDs { - shardDiskIDs[i] = 0 - } - - expectedShardSize := int64(50 * 1024 * 1024) // 50MB per shard - - // Create destination specs - destinations := make([]TaskDestinationSpec, len(shardDestinations)) - shardImpact := CalculateECShardStorageImpact(1, expectedShardSize) - for i, dest := range shardDestinations { - destinations[i] = TaskDestinationSpec{ - ServerID: dest, - DiskID: shardDiskIDs[i], - StorageImpact: &shardImpact, - EstimatedSize: &expectedShardSize, - } - } - - // Create EC task for replicated volume - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "ec_replicated", - TaskType: TaskTypeErasureCoding, - VolumeID: volumeID, - VolumeSize: originalVolumeSize, - Sources: sources, - Destinations: destinations, - }) - assert.NoError(t, err, "Should successfully create EC task for replicated volume") - - // Verify capacity impact on all source replicas (each should reserve with zero impact) - for i, source := range sources { - plannedVol, reservedVol, plannedShard, reservedShard, _ := testGetDiskStorageImpact(activeTopology, source.ServerID, source.DiskID) - assert.Equal(t, int64(0), plannedVol, fmt.Sprintf("Source replica %d should reserve with zero volume slot impact", i+1)) - assert.Equal(t, int64(0), reservedVol, fmt.Sprintf("Source replica %d should have no active volume slots", i+1)) - assert.Equal(t, int32(0), plannedShard, fmt.Sprintf("Source replica %d should have no planned shard slots", i+1)) - assert.Equal(t, int32(0), reservedShard, fmt.Sprintf("Source replica %d should have no active shard slots", i+1)) - // Note: EstimatedSize tracking is no longer exposed via public API - } - - // Verify capacity impact on EC destinations - destinationCounts := make(map[string]int) - for _, dest := range shardDestinations { - destinationCounts[dest]++ - } - - for serverID, expectedShards := range destinationCounts { - plannedVol, _, plannedShard, _, _ := testGetDiskStorageImpact(activeTopology, serverID, 0) - assert.Equal(t, int64(0), plannedVol, fmt.Sprintf("Destination %s should have no planned volume slots", serverID)) - assert.Equal(t, int32(expectedShards), plannedShard, fmt.Sprintf("Destination %s should plan to receive %d shards", serverID, expectedShards)) - } - - // Verify effective capacity calculation for sources (should have zero EC impact) - sourceCapacity1 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - sourceCapacity2 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.2:8080", 0) - sourceCapacity3 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.3:8080", 0) - - // All sources should have same capacity as baseline (EC source reserves with zero impact) - assert.Equal(t, int64(90), sourceCapacity1, "Source 1: 100 - 10 (current) - 0 (EC source impact) = 90") - assert.Equal(t, int64(95), sourceCapacity2, "Source 2: 100 - 5 (current) - 0 (EC source impact) = 95") - assert.Equal(t, int64(97), sourceCapacity3, "Source 3: 100 - 3 (current) - 0 (EC source impact) = 97") - - // Verify effective capacity calculation for destinations (should be reduced by shard slots) - destCapacity4 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.4:8080", 0) - destCapacity5 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.5:8080", 0) - destCapacity6 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.6:8080", 0) - - // Dynamic shard impact calculations - dest4ShardImpact := int64(5 / erasure_coding.DataShardsCount) // 5 shards impact - dest5ShardImpact := int64(5 / erasure_coding.DataShardsCount) // 5 shards impact - dest6ShardImpact := int64(4 / erasure_coding.DataShardsCount) // 4 shards impact - - // Destinations should be reduced by shard impact - assert.Equal(t, int64(85-dest4ShardImpact), destCapacity4, fmt.Sprintf("Dest 4: 100 - 15 (current) - %d (5 shards/%d = %d impact) = %d", dest4ShardImpact, erasure_coding.DataShardsCount, dest4ShardImpact, 85-dest4ShardImpact)) - assert.Equal(t, int64(80-dest5ShardImpact), destCapacity5, fmt.Sprintf("Dest 5: 100 - 20 (current) - %d (5 shards/%d = %d impact) = %d", dest5ShardImpact, erasure_coding.DataShardsCount, dest5ShardImpact, 80-dest5ShardImpact)) - assert.Equal(t, int64(75-dest6ShardImpact), destCapacity6, fmt.Sprintf("Dest 6: 100 - 25 (current) - %d (4 shards/%d = %d impact) = %d", dest6ShardImpact, erasure_coding.DataShardsCount, dest6ShardImpact, 75-dest6ShardImpact)) - - t.Logf("Replicated volume EC operation: %d source replicas, %d EC shards distributed across %d destinations", - len(sources), len(shardDestinations), len(destinationCounts)) - t.Logf("Each source replica reserves with zero capacity impact, destinations receive EC shards") -} - -// TestECWithOldShardCleanup tests EC operations that need to clean up old shards from previous failed attempts -func TestECWithOldShardCleanup(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Setup cluster with servers - activeTopology.UpdateTopology(&master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 10}, - }, - }, - { - Id: "10.0.0.2:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 5}, - }, - }, - { - Id: "10.0.0.3:8080", // Had old EC shards from previous failed attempt - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 3}, - }, - }, - { - Id: "10.0.0.4:8080", // Had old EC shards from previous failed attempt - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 7}, - }, - }, - { - Id: "10.0.0.5:8080", // New EC destination - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 20}, - }, - }, - { - Id: "10.0.0.6:8080", // New EC destination - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 25}, - }, - }, - }, - }, - }, - }, - }, - }) - - // Test: EC operation that needs to clean up both volume replicas AND old EC shards - volumeID := uint32(400) - originalVolumeSize := int64(1024 * 1024 * 1024) // 1GB - - // Create source specs: volume replicas + old EC shard locations - sources := []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, // Volume replica 1 - {ServerID: "10.0.0.2:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, // Volume replica 2 - {ServerID: "10.0.0.3:8080", DiskID: 0, CleanupType: CleanupECShards}, // Old EC shards from failed attempt - {ServerID: "10.0.0.4:8080", DiskID: 0, CleanupType: CleanupECShards}, // Old EC shards from failed attempt - } - - // EC destinations (new complete set of shards) - shardDestinations := []string{ - "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", // 5 shards - "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", "10.0.0.5:8080", // 4 more shards (9 total) - "10.0.0.6:8080", "10.0.0.6:8080", "10.0.0.6:8080", "10.0.0.6:8080", "10.0.0.6:8080", // 5 shards - } - shardDiskIDs := make([]uint32, len(shardDestinations)) - for i := range shardDiskIDs { - shardDiskIDs[i] = 0 - } - - expectedShardSize := int64(50 * 1024 * 1024) // 50MB per shard - - // Create destination specs - destinations := make([]TaskDestinationSpec, len(shardDestinations)) - shardImpact := CalculateECShardStorageImpact(1, expectedShardSize) - for i, dest := range shardDestinations { - destinations[i] = TaskDestinationSpec{ - ServerID: dest, - DiskID: shardDiskIDs[i], - StorageImpact: &shardImpact, - EstimatedSize: &expectedShardSize, - } - } - - // Create EC task that cleans up both volume replicas and old EC shards - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "ec_cleanup", - TaskType: TaskTypeErasureCoding, - VolumeID: volumeID, - VolumeSize: originalVolumeSize, - Sources: sources, - Destinations: destinations, - }) - assert.NoError(t, err, "Should successfully create EC task with mixed cleanup types") - - // Verify capacity impact on volume replica sources (zero impact for EC) - for i := 0; i < 2; i++ { - source := sources[i] - plannedVol, _, plannedShard, _, _ := testGetDiskStorageImpact(activeTopology, source.ServerID, source.DiskID) - assert.Equal(t, int64(0), plannedVol, fmt.Sprintf("Volume replica source %d should have zero volume slot impact", i+1)) - assert.Equal(t, int32(0), plannedShard, fmt.Sprintf("Volume replica source %d should have zero shard slot impact", i+1)) - // Note: EstimatedSize tracking is no longer exposed via public API - } - - // Verify capacity impact on old EC shard sources (should free shard slots) - for i := 2; i < 4; i++ { - source := sources[i] - plannedVol, _, plannedShard, _, _ := testGetDiskStorageImpact(activeTopology, source.ServerID, source.DiskID) - assert.Equal(t, int64(0), plannedVol, fmt.Sprintf("EC shard source %d should have zero volume slot impact", i+1)) - assert.Equal(t, int32(-erasure_coding.TotalShardsCount), plannedShard, fmt.Sprintf("EC shard source %d should free %d shard slots", i+1, erasure_coding.TotalShardsCount)) - // Note: EstimatedSize tracking is no longer exposed via public API - } - - // Verify capacity impact on new EC destinations - destPlan5, _, destShard5, _, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.5:8080", 0) - destPlan6, _, destShard6, _, _ := testGetDiskStorageImpact(activeTopology, "10.0.0.6:8080", 0) - - assert.Equal(t, int64(0), destPlan5, "New EC destination 5 should have no planned volume slots") - assert.Equal(t, int32(9), destShard5, "New EC destination 5 should plan to receive 9 shards") - assert.Equal(t, int64(0), destPlan6, "New EC destination 6 should have no planned volume slots") - assert.Equal(t, int32(5), destShard6, "New EC destination 6 should plan to receive 5 shards") - - // Verify effective capacity calculation shows proper impact - capacity3 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.3:8080", 0) // Freeing old EC shards - capacity4 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.4:8080", 0) // Freeing old EC shards - capacity5 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.5:8080", 0) // Receiving new EC shards - capacity6 := activeTopology.GetEffectiveAvailableCapacity("10.0.0.6:8080", 0) // Receiving new EC shards - - // Servers freeing old EC shards should have INCREASED capacity (freed shard slots provide capacity) - assert.Equal(t, int64(98), capacity3, fmt.Sprintf("Server 3: 100 - 3 (current) + 1 (freeing %d shards) = 98", erasure_coding.TotalShardsCount)) - assert.Equal(t, int64(94), capacity4, fmt.Sprintf("Server 4: 100 - 7 (current) + 1 (freeing %d shards) = 94", erasure_coding.TotalShardsCount)) - - // Servers receiving new EC shards should have slightly reduced capacity - server5ShardImpact := int64(9 / erasure_coding.DataShardsCount) // 9 shards impact - server6ShardImpact := int64(5 / erasure_coding.DataShardsCount) // 5 shards impact - - assert.Equal(t, int64(80-server5ShardImpact), capacity5, fmt.Sprintf("Server 5: 100 - 20 (current) - %d (9 shards/%d = %d impact) = %d", server5ShardImpact, erasure_coding.DataShardsCount, server5ShardImpact, 80-server5ShardImpact)) - assert.Equal(t, int64(75-server6ShardImpact), capacity6, fmt.Sprintf("Server 6: 100 - 25 (current) - %d (5 shards/%d = %d impact) = %d", server6ShardImpact, erasure_coding.DataShardsCount, server6ShardImpact, 75-server6ShardImpact)) - - t.Logf("EC operation with cleanup: %d volume replicas + %d old EC shard locations โ†’ %d new EC shards", - 2, 2, len(shardDestinations)) - t.Logf("Volume sources have zero impact, old EC shard sources free capacity, new destinations consume shard slots") -} - -// TestDetailedCapacityCalculations tests the new StorageSlotChange-based capacity calculation functions -func TestDetailedCapacityCalculations(t *testing.T) { - activeTopology := NewActiveTopology(10) - - // Setup cluster - activeTopology.UpdateTopology(&master_pb.TopologyInfo{ - DataCenterInfos: []*master_pb.DataCenterInfo{ - { - Id: "dc1", - RackInfos: []*master_pb.RackInfo{ - { - Id: "rack1", - DataNodeInfos: []*master_pb.DataNodeInfo{ - { - Id: "10.0.0.1:8080", - DiskInfos: map[string]*master_pb.DiskInfo{ - "0": {DiskId: 0, Type: "hdd", MaxVolumeCount: 100, VolumeCount: 20}, - }, - }, - }, - }, - }, - }, - }, - }) - - // Test: Add an EC task and check detailed capacity - sources := []TaskSourceSpec{ - {ServerID: "10.0.0.1:8080", DiskID: 0, CleanupType: CleanupVolumeReplica}, - } - - shardDestinations := []string{"10.0.0.1:8080", "10.0.0.1:8080", "10.0.0.1:8080", "10.0.0.1:8080", "10.0.0.1:8080"} - shardDiskIDs := []uint32{0, 0, 0, 0, 0} - - // Create destination specs - destinations := make([]TaskDestinationSpec, len(shardDestinations)) - expectedShardSize := int64(50 * 1024 * 1024) - shardImpact := CalculateECShardStorageImpact(1, expectedShardSize) - for i, dest := range shardDestinations { - destinations[i] = TaskDestinationSpec{ - ServerID: dest, - DiskID: shardDiskIDs[i], - StorageImpact: &shardImpact, - EstimatedSize: &expectedShardSize, - } - } - - err := activeTopology.AddPendingTask(TaskSpec{ - TaskID: "detailed_test", - TaskType: TaskTypeErasureCoding, - VolumeID: 500, - VolumeSize: 1024 * 1024 * 1024, - Sources: sources, - Destinations: destinations, - }) - assert.NoError(t, err, "Should add EC task successfully") - - // Test the new detailed capacity function - detailedCapacity := activeTopology.GetEffectiveAvailableCapacityDetailed("10.0.0.1:8080", 0) - simpleCapacity := activeTopology.GetEffectiveAvailableCapacity("10.0.0.1:8080", 0) - - // The simple capacity should match the volume slots from detailed capacity - assert.Equal(t, int64(detailedCapacity.VolumeSlots), simpleCapacity, "Simple capacity should match detailed volume slots") - - // Verify detailed capacity has both volume and shard information - assert.Equal(t, int32(80), detailedCapacity.VolumeSlots, "Should have 80 available volume slots (100 - 20 current, no volume impact from EC)") - assert.Equal(t, int32(-5), detailedCapacity.ShardSlots, "Should show -5 available shard slots (5 destination shards)") - - // Verify capacity impact - capacityImpact := activeTopology.GetEffectiveCapacityImpact("10.0.0.1:8080", 0) - assert.Equal(t, int32(0), capacityImpact.VolumeSlots, "EC source should have zero volume slot impact") - assert.Equal(t, int32(5), capacityImpact.ShardSlots, "Should have positive shard slot impact (consuming 5 shards)") - - t.Logf("Detailed capacity calculation: VolumeSlots=%d, ShardSlots=%d", - detailedCapacity.VolumeSlots, detailedCapacity.ShardSlots) - t.Logf("Capacity impact: VolumeSlots=%d, ShardSlots=%d", - capacityImpact.VolumeSlots, capacityImpact.ShardSlots) - t.Logf("Simple capacity (backward compatible): %d", simpleCapacity) -} - -// TestStorageSlotChangeConversions tests the conversion and accommodation methods for StorageSlotChange -// This test is designed to work with any value of erasure_coding.DataShardsCount, making it -// compatible with custom erasure coding configurations. -func TestStorageSlotChangeConversions(t *testing.T) { - // Get the actual erasure coding constants for dynamic testing - dataShards := int32(erasure_coding.DataShardsCount) - - // Test conversion constants - assert.Equal(t, int(dataShards), ShardsPerVolumeSlot, fmt.Sprintf("Should use erasure_coding.DataShardsCount (%d) shards per volume slot", dataShards)) - - // Test basic conversions using dynamic values - volumeOnly := StorageSlotChange{VolumeSlots: 5, ShardSlots: 0} - shardOnly := StorageSlotChange{VolumeSlots: 0, ShardSlots: 2 * dataShards} // 2 volume equivalents in shards - mixed := StorageSlotChange{VolumeSlots: 2, ShardSlots: dataShards + 5} // 2 volumes + 1.5 volume equivalent in shards - - // Test ToVolumeSlots conversion - these should work regardless of DataShardsCount value - assert.Equal(t, int64(5), volumeOnly.ToVolumeSlots(), "5 volume slots = 5 volume slots") - assert.Equal(t, int64(2), shardOnly.ToVolumeSlots(), fmt.Sprintf("%d shard slots = 2 volume slots", 2*dataShards)) - expectedMixedVolumes := int64(2 + (dataShards+5)/dataShards) // 2 + floor((DataShardsCount+5)/DataShardsCount) - assert.Equal(t, expectedMixedVolumes, mixed.ToVolumeSlots(), fmt.Sprintf("2 volume + %d shards = %d volume slots", dataShards+5, expectedMixedVolumes)) - - // Test ToShardSlots conversion - expectedVolumeShards := int32(5 * dataShards) - assert.Equal(t, expectedVolumeShards, volumeOnly.ToShardSlots(), fmt.Sprintf("5 volume slots = %d shard slots", expectedVolumeShards)) - assert.Equal(t, 2*dataShards, shardOnly.ToShardSlots(), fmt.Sprintf("%d shard slots = %d shard slots", 2*dataShards, 2*dataShards)) - expectedMixedShards := int32(2*dataShards + dataShards + 5) - assert.Equal(t, expectedMixedShards, mixed.ToShardSlots(), fmt.Sprintf("2 volume + %d shards = %d shard slots", dataShards+5, expectedMixedShards)) - - // Test capacity accommodation checks using shard-based comparison - availableVolumes := int32(10) - available := StorageSlotChange{VolumeSlots: availableVolumes, ShardSlots: 0} // availableVolumes * dataShards shard slots available - - smallVolumeRequest := StorageSlotChange{VolumeSlots: 3, ShardSlots: 0} // Needs 3 * dataShards shard slots - largeVolumeRequest := StorageSlotChange{VolumeSlots: availableVolumes + 5, ShardSlots: 0} // Needs more than available - shardRequest := StorageSlotChange{VolumeSlots: 0, ShardSlots: 5 * dataShards} // Needs 5 volume equivalents in shards - mixedRequest := StorageSlotChange{VolumeSlots: 8, ShardSlots: 3 * dataShards} // Needs 11 volume equivalents total - - smallShardsNeeded := 3 * dataShards - availableShards := availableVolumes * dataShards - largeShardsNeeded := (availableVolumes + 5) * dataShards - shardShardsNeeded := 5 * dataShards - mixedShardsNeeded := 8*dataShards + 3*dataShards - - assert.True(t, available.CanAccommodate(smallVolumeRequest), fmt.Sprintf("Should accommodate small volume request (%d <= %d shards)", smallShardsNeeded, availableShards)) - assert.False(t, available.CanAccommodate(largeVolumeRequest), fmt.Sprintf("Should NOT accommodate large volume request (%d > %d shards)", largeShardsNeeded, availableShards)) - assert.True(t, available.CanAccommodate(shardRequest), fmt.Sprintf("Should accommodate shard request (%d <= %d shards)", shardShardsNeeded, availableShards)) - assert.False(t, available.CanAccommodate(mixedRequest), fmt.Sprintf("Should NOT accommodate mixed request (%d > %d shards)", mixedShardsNeeded, availableShards)) - - t.Logf("Conversion tests passed: %d shards = 1 volume slot", ShardsPerVolumeSlot) - t.Logf("Mixed capacity (%d volumes + %d shards) = %d equivalent volume slots", - mixed.VolumeSlots, mixed.ShardSlots, mixed.ToVolumeSlots()) - t.Logf("Available capacity (%d volumes) = %d total shard slots", - available.VolumeSlots, available.ToShardSlots()) - t.Logf("NOTE: This test adapts automatically to erasure_coding.DataShardsCount = %d", erasure_coding.DataShardsCount) -} diff --git a/weed/admin/topology/structs.go b/weed/admin/topology/structs.go deleted file mode 100644 index 103ee5abe..000000000 --- a/weed/admin/topology/structs.go +++ /dev/null @@ -1,121 +0,0 @@ -package topology - -import ( - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// TaskSource represents a single source in a multi-source task (for replicated volume cleanup) -type TaskSource struct { - SourceServer string `json:"source_server"` - SourceDisk uint32 `json:"source_disk"` - StorageChange StorageSlotChange `json:"storage_change"` // Storage impact on this source - EstimatedSize int64 `json:"estimated_size"` // Estimated size for this source -} - -// TaskDestination represents a single destination in a multi-destination task -type TaskDestination struct { - TargetServer string `json:"target_server"` - TargetDisk uint32 `json:"target_disk"` - StorageChange StorageSlotChange `json:"storage_change"` // Storage impact on this destination - EstimatedSize int64 `json:"estimated_size"` // Estimated size for this destination -} - -// taskState represents the current state of tasks affecting the topology (internal) -// Uses unified multi-source/multi-destination design: -// - Single-source tasks (balance, vacuum, replication): 1 source, 1 destination -// - Multi-source EC tasks (replicated volumes): N sources, M destinations -type taskState struct { - VolumeID uint32 `json:"volume_id"` - TaskType TaskType `json:"task_type"` - Status TaskStatus `json:"status"` - StartedAt time.Time `json:"started_at"` - CompletedAt time.Time `json:"completed_at,omitempty"` - EstimatedSize int64 `json:"estimated_size"` // Total estimated size of task - - // Unified source and destination arrays (always used) - Sources []TaskSource `json:"sources"` // Source locations (1+ for all task types) - Destinations []TaskDestination `json:"destinations"` // Destination locations (1+ for all task types) -} - -// DiskInfo represents a disk with its current state and ongoing tasks (public for external access) -type DiskInfo struct { - NodeID string `json:"node_id"` - DiskID uint32 `json:"disk_id"` - DiskType string `json:"disk_type"` - DataCenter string `json:"data_center"` - Rack string `json:"rack"` - DiskInfo *master_pb.DiskInfo `json:"disk_info"` - LoadCount int `json:"load_count"` // Number of active tasks -} - -// activeDisk represents internal disk state (private) -type activeDisk struct { - *DiskInfo - pendingTasks []*taskState - assignedTasks []*taskState - recentTasks []*taskState // Completed in last N seconds -} - -// activeNode represents a node with its disks (private) -type activeNode struct { - nodeID string - dataCenter string - rack string - nodeInfo *master_pb.DataNodeInfo - disks map[uint32]*activeDisk // DiskID -> activeDisk -} - -// ActiveTopology provides a real-time view of cluster state with task awareness -type ActiveTopology struct { - // Core topology from master - topologyInfo *master_pb.TopologyInfo - lastUpdated time.Time - - // Structured topology for easy access (private) - nodes map[string]*activeNode // NodeID -> activeNode - disks map[string]*activeDisk // "NodeID:DiskID" -> activeDisk - - // Performance indexes for O(1) lookups (private) - volumeIndex map[uint32][]string // VolumeID -> list of "NodeID:DiskID" where volume replicas exist - ecShardIndex map[uint32][]string // VolumeID -> list of "NodeID:DiskID" where EC shards exist - - // Task states affecting the topology (private) - pendingTasks map[string]*taskState - assignedTasks map[string]*taskState - recentTasks map[string]*taskState - - // Configuration - recentTaskWindowSeconds int - - // Synchronization - mutex sync.RWMutex -} - -// DestinationPlan represents a planned destination for a volume/shard operation -type DestinationPlan struct { - TargetNode string `json:"target_node"` - TargetDisk uint32 `json:"target_disk"` - TargetRack string `json:"target_rack"` - TargetDC string `json:"target_dc"` - ExpectedSize uint64 `json:"expected_size"` - PlacementScore float64 `json:"placement_score"` -} - -// MultiDestinationPlan represents multiple planned destinations for operations like EC -type MultiDestinationPlan struct { - Plans []*DestinationPlan `json:"plans"` - TotalShards int `json:"total_shards"` - SuccessfulRack int `json:"successful_racks"` - SuccessfulDCs int `json:"successful_dcs"` -} - -// VolumeReplica represents a replica location with server and disk information -type VolumeReplica struct { - ServerID string `json:"server_id"` - DiskID uint32 `json:"disk_id"` - DataCenter string `json:"data_center"` - Rack string `json:"rack"` -} diff --git a/weed/admin/topology/task_management.go b/weed/admin/topology/task_management.go deleted file mode 100644 index ada60248b..000000000 --- a/weed/admin/topology/task_management.go +++ /dev/null @@ -1,259 +0,0 @@ -package topology - -import ( - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// AssignTask moves a task from pending to assigned and reserves capacity -func (at *ActiveTopology) AssignTask(taskID string) error { - at.mutex.Lock() - defer at.mutex.Unlock() - - task, exists := at.pendingTasks[taskID] - if !exists { - return fmt.Errorf("pending task %s not found", taskID) - } - - // Check if all destination disks have sufficient capacity to reserve - for _, dest := range task.Destinations { - targetKey := fmt.Sprintf("%s:%d", dest.TargetServer, dest.TargetDisk) - if targetDisk, exists := at.disks[targetKey]; exists { - availableCapacity := at.getEffectiveAvailableCapacityUnsafe(targetDisk) - - // Check if we have enough total capacity using the improved unified comparison - if !availableCapacity.CanAccommodate(dest.StorageChange) { - return fmt.Errorf("insufficient capacity on target disk %s:%d. Available: %+v, Required: %+v", - dest.TargetServer, dest.TargetDisk, availableCapacity, dest.StorageChange) - } - } else if dest.TargetServer != "" { - // Fail fast if destination disk is not found in topology - return fmt.Errorf("destination disk %s not found in topology", targetKey) - } - } - - // Move task to assigned and reserve capacity - delete(at.pendingTasks, taskID) - task.Status = TaskStatusInProgress - at.assignedTasks[taskID] = task - at.reassignTaskStates() - - // Log capacity reservation information for all sources and destinations - totalSourceImpact := StorageSlotChange{} - totalDestImpact := StorageSlotChange{} - for _, source := range task.Sources { - totalSourceImpact.AddInPlace(source.StorageChange) - } - for _, dest := range task.Destinations { - totalDestImpact.AddInPlace(dest.StorageChange) - } - - glog.V(2).Infof("Task %s assigned and capacity reserved: %d sources (VolumeSlots:%d, ShardSlots:%d), %d destinations (VolumeSlots:%d, ShardSlots:%d)", - taskID, len(task.Sources), totalSourceImpact.VolumeSlots, totalSourceImpact.ShardSlots, - len(task.Destinations), totalDestImpact.VolumeSlots, totalDestImpact.ShardSlots) - - return nil -} - -// CompleteTask moves a task from assigned to recent and releases reserved capacity -// NOTE: This only releases the reserved capacity. The actual topology update (VolumeCount changes) -// should be handled by the master when it receives the task completion notification. -func (at *ActiveTopology) CompleteTask(taskID string) error { - at.mutex.Lock() - defer at.mutex.Unlock() - - task, exists := at.assignedTasks[taskID] - if !exists { - return fmt.Errorf("assigned task %s not found", taskID) - } - - // Release reserved capacity by moving task to completed state - delete(at.assignedTasks, taskID) - task.Status = TaskStatusCompleted - task.CompletedAt = time.Now() - at.recentTasks[taskID] = task - at.reassignTaskStates() - - // Log capacity release information for all sources and destinations - totalSourceImpact := StorageSlotChange{} - totalDestImpact := StorageSlotChange{} - for _, source := range task.Sources { - totalSourceImpact.AddInPlace(source.StorageChange) - } - for _, dest := range task.Destinations { - totalDestImpact.AddInPlace(dest.StorageChange) - } - - glog.V(2).Infof("Task %s completed and capacity released: %d sources (VolumeSlots:%d, ShardSlots:%d), %d destinations (VolumeSlots:%d, ShardSlots:%d)", - taskID, len(task.Sources), totalSourceImpact.VolumeSlots, totalSourceImpact.ShardSlots, - len(task.Destinations), totalDestImpact.VolumeSlots, totalDestImpact.ShardSlots) - - // Clean up old recent tasks - at.cleanupRecentTasks() - - return nil -} - -// ApplyActualStorageChange updates the topology to reflect actual storage changes after task completion -// This should be called when the master updates the topology with new VolumeCount information -func (at *ActiveTopology) ApplyActualStorageChange(nodeID string, diskID uint32, volumeCountChange int64) { - at.mutex.Lock() - defer at.mutex.Unlock() - - diskKey := fmt.Sprintf("%s:%d", nodeID, diskID) - if disk, exists := at.disks[diskKey]; exists && disk.DiskInfo != nil && disk.DiskInfo.DiskInfo != nil { - oldCount := disk.DiskInfo.DiskInfo.VolumeCount - disk.DiskInfo.DiskInfo.VolumeCount += volumeCountChange - - glog.V(2).Infof("Applied actual storage change on disk %s: volume_count %d -> %d (change: %+d)", - diskKey, oldCount, disk.DiskInfo.DiskInfo.VolumeCount, volumeCountChange) - } -} - -// AddPendingTask is the unified function that handles both simple and complex task creation -func (at *ActiveTopology) AddPendingTask(spec TaskSpec) error { - // Validation - if len(spec.Sources) == 0 { - return fmt.Errorf("at least one source is required") - } - if len(spec.Destinations) == 0 { - return fmt.Errorf("at least one destination is required") - } - - at.mutex.Lock() - defer at.mutex.Unlock() - - // Build sources array - sources := make([]TaskSource, len(spec.Sources)) - for i, sourceSpec := range spec.Sources { - var storageImpact StorageSlotChange - var estimatedSize int64 - - if sourceSpec.StorageImpact != nil { - // Use manually specified impact - storageImpact = *sourceSpec.StorageImpact - } else { - // Auto-calculate based on task type and cleanup type - storageImpact = at.calculateSourceStorageImpact(spec.TaskType, sourceSpec.CleanupType, spec.VolumeSize) - } - - if sourceSpec.EstimatedSize != nil { - estimatedSize = *sourceSpec.EstimatedSize - } else { - estimatedSize = spec.VolumeSize // Default to volume size - } - - sources[i] = TaskSource{ - SourceServer: sourceSpec.ServerID, - SourceDisk: sourceSpec.DiskID, - StorageChange: storageImpact, - EstimatedSize: estimatedSize, - } - } - - // Build destinations array - destinations := make([]TaskDestination, len(spec.Destinations)) - for i, destSpec := range spec.Destinations { - var storageImpact StorageSlotChange - var estimatedSize int64 - - if destSpec.StorageImpact != nil { - // Use manually specified impact - storageImpact = *destSpec.StorageImpact - } else { - // Auto-calculate based on task type - _, storageImpact = CalculateTaskStorageImpact(spec.TaskType, spec.VolumeSize) - } - - if destSpec.EstimatedSize != nil { - estimatedSize = *destSpec.EstimatedSize - } else { - estimatedSize = spec.VolumeSize // Default to volume size - } - - destinations[i] = TaskDestination{ - TargetServer: destSpec.ServerID, - TargetDisk: destSpec.DiskID, - StorageChange: storageImpact, - EstimatedSize: estimatedSize, - } - } - - // Create the task - task := &taskState{ - VolumeID: spec.VolumeID, - TaskType: spec.TaskType, - Status: TaskStatusPending, - StartedAt: time.Now(), - EstimatedSize: spec.VolumeSize, - Sources: sources, - Destinations: destinations, - } - - at.pendingTasks[spec.TaskID] = task - at.assignTaskToDisk(task) - - glog.V(2).Infof("Added pending %s task %s: volume %d, %d sources, %d destinations", - spec.TaskType, spec.TaskID, spec.VolumeID, len(sources), len(destinations)) - - return nil -} - -// calculateSourceStorageImpact calculates storage impact for sources based on task type and cleanup type -func (at *ActiveTopology) calculateSourceStorageImpact(taskType TaskType, cleanupType SourceCleanupType, volumeSize int64) StorageSlotChange { - switch taskType { - case TaskTypeErasureCoding: - switch cleanupType { - case CleanupVolumeReplica: - impact, _ := CalculateTaskStorageImpact(TaskTypeErasureCoding, volumeSize) - return impact - case CleanupECShards: - return CalculateECShardCleanupImpact(volumeSize) - default: - impact, _ := CalculateTaskStorageImpact(TaskTypeErasureCoding, volumeSize) - return impact - } - default: - impact, _ := CalculateTaskStorageImpact(taskType, volumeSize) - return impact - } -} - -// SourceCleanupType indicates what type of data needs to be cleaned up from a source -type SourceCleanupType int - -const ( - CleanupVolumeReplica SourceCleanupType = iota // Clean up volume replica (frees volume slots) - CleanupECShards // Clean up existing EC shards (frees shard slots) -) - -// TaskSourceSpec represents a source specification for task creation -type TaskSourceSpec struct { - ServerID string - DiskID uint32 - DataCenter string // Data center of the source server - Rack string // Rack of the source server - CleanupType SourceCleanupType // For EC: volume replica vs existing shards - StorageImpact *StorageSlotChange // Optional: manual override - EstimatedSize *int64 // Optional: manual override -} - -// TaskDestinationSpec represents a destination specification for task creation -type TaskDestinationSpec struct { - ServerID string - DiskID uint32 - StorageImpact *StorageSlotChange // Optional: manual override - EstimatedSize *int64 // Optional: manual override -} - -// TaskSpec represents a complete task specification -type TaskSpec struct { - TaskID string - TaskType TaskType - VolumeID uint32 - VolumeSize int64 // Used for auto-calculation when manual impacts not provided - Sources []TaskSourceSpec // Can be single or multiple - Destinations []TaskDestinationSpec // Can be single or multiple -} diff --git a/weed/admin/topology/topology_management.go b/weed/admin/topology/topology_management.go deleted file mode 100644 index 65b7dfe7e..000000000 --- a/weed/admin/topology/topology_management.go +++ /dev/null @@ -1,257 +0,0 @@ -package topology - -import ( - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -// UpdateTopology updates the topology information from master -func (at *ActiveTopology) UpdateTopology(topologyInfo *master_pb.TopologyInfo) error { - at.mutex.Lock() - defer at.mutex.Unlock() - - at.topologyInfo = topologyInfo - at.lastUpdated = time.Now() - - // Rebuild structured topology - at.nodes = make(map[string]*activeNode) - at.disks = make(map[string]*activeDisk) - - for _, dc := range topologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, nodeInfo := range rack.DataNodeInfos { - node := &activeNode{ - nodeID: nodeInfo.Id, - dataCenter: dc.Id, - rack: rack.Id, - nodeInfo: nodeInfo, - disks: make(map[uint32]*activeDisk), - } - - // Add disks for this node - for diskType, diskInfo := range nodeInfo.DiskInfos { - disk := &activeDisk{ - DiskInfo: &DiskInfo{ - NodeID: nodeInfo.Id, - DiskID: diskInfo.DiskId, - DiskType: diskType, - DataCenter: dc.Id, - Rack: rack.Id, - DiskInfo: diskInfo, - }, - } - - diskKey := fmt.Sprintf("%s:%d", nodeInfo.Id, diskInfo.DiskId) - node.disks[diskInfo.DiskId] = disk - at.disks[diskKey] = disk - } - - at.nodes[nodeInfo.Id] = node - } - } - } - - // Rebuild performance indexes for O(1) lookups - at.rebuildIndexes() - - // Reassign task states to updated topology - at.reassignTaskStates() - - glog.V(1).Infof("ActiveTopology updated: %d nodes, %d disks, %d volume entries, %d EC shard entries", - len(at.nodes), len(at.disks), len(at.volumeIndex), len(at.ecShardIndex)) - return nil -} - -// GetAvailableDisks returns disks that can accept new tasks of the given type -// NOTE: For capacity-aware operations, prefer GetDisksWithEffectiveCapacity -func (at *ActiveTopology) GetAvailableDisks(taskType TaskType, excludeNodeID string) []*DiskInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - - var available []*DiskInfo - - for _, disk := range at.disks { - if disk.NodeID == excludeNodeID { - continue // Skip excluded node - } - - if at.isDiskAvailable(disk, taskType) { - // Create a copy with current load count and effective capacity - diskCopy := *disk.DiskInfo - diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks) - available = append(available, &diskCopy) - } - } - - return available -} - -// HasRecentTaskForVolume checks if a volume had a recent task (to avoid immediate re-detection) -func (at *ActiveTopology) HasRecentTaskForVolume(volumeID uint32, taskType TaskType) bool { - at.mutex.RLock() - defer at.mutex.RUnlock() - - for _, task := range at.recentTasks { - if task.VolumeID == volumeID && task.TaskType == taskType { - return true - } - } - - return false -} - -// GetAllNodes returns information about all nodes (public interface) -func (at *ActiveTopology) GetAllNodes() map[string]*master_pb.DataNodeInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - - result := make(map[string]*master_pb.DataNodeInfo) - for nodeID, node := range at.nodes { - result[nodeID] = node.nodeInfo - } - return result -} - -// GetTopologyInfo returns the current topology information (read-only access) -func (at *ActiveTopology) GetTopologyInfo() *master_pb.TopologyInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - return at.topologyInfo -} - -// GetNodeDisks returns all disks for a specific node -func (at *ActiveTopology) GetNodeDisks(nodeID string) []*DiskInfo { - at.mutex.RLock() - defer at.mutex.RUnlock() - - node, exists := at.nodes[nodeID] - if !exists { - return nil - } - - var disks []*DiskInfo - for _, disk := range node.disks { - diskCopy := *disk.DiskInfo - diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks) - disks = append(disks, &diskCopy) - } - - return disks -} - -// rebuildIndexes rebuilds the volume and EC shard indexes for O(1) lookups -func (at *ActiveTopology) rebuildIndexes() { - // Clear existing indexes - at.volumeIndex = make(map[uint32][]string) - at.ecShardIndex = make(map[uint32][]string) - - // Rebuild indexes from current topology - for _, dc := range at.topologyInfo.DataCenterInfos { - for _, rack := range dc.RackInfos { - for _, nodeInfo := range rack.DataNodeInfos { - for _, diskInfo := range nodeInfo.DiskInfos { - diskKey := fmt.Sprintf("%s:%d", nodeInfo.Id, diskInfo.DiskId) - - // Index volumes - for _, volumeInfo := range diskInfo.VolumeInfos { - volumeID := volumeInfo.Id - at.volumeIndex[volumeID] = append(at.volumeIndex[volumeID], diskKey) - } - - // Index EC shards - for _, ecShardInfo := range diskInfo.EcShardInfos { - volumeID := ecShardInfo.Id - at.ecShardIndex[volumeID] = append(at.ecShardIndex[volumeID], diskKey) - } - } - } - } - } -} - -// GetVolumeLocations returns the disk locations for a volume using O(1) lookup -func (at *ActiveTopology) GetVolumeLocations(volumeID uint32, collection string) []VolumeReplica { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKeys, exists := at.volumeIndex[volumeID] - if !exists { - return []VolumeReplica{} - } - - var replicas []VolumeReplica - for _, diskKey := range diskKeys { - if disk, diskExists := at.disks[diskKey]; diskExists { - // Verify collection matches (since index doesn't include collection) - if at.volumeMatchesCollection(disk, volumeID, collection) { - replicas = append(replicas, VolumeReplica{ - ServerID: disk.NodeID, - DiskID: disk.DiskID, - DataCenter: disk.DataCenter, - Rack: disk.Rack, - }) - } - } - } - - return replicas -} - -// GetECShardLocations returns the disk locations for EC shards using O(1) lookup -func (at *ActiveTopology) GetECShardLocations(volumeID uint32, collection string) []VolumeReplica { - at.mutex.RLock() - defer at.mutex.RUnlock() - - diskKeys, exists := at.ecShardIndex[volumeID] - if !exists { - return []VolumeReplica{} - } - - var ecShards []VolumeReplica - for _, diskKey := range diskKeys { - if disk, diskExists := at.disks[diskKey]; diskExists { - // Verify collection matches (since index doesn't include collection) - if at.ecShardMatchesCollection(disk, volumeID, collection) { - ecShards = append(ecShards, VolumeReplica{ - ServerID: disk.NodeID, - DiskID: disk.DiskID, - DataCenter: disk.DataCenter, - Rack: disk.Rack, - }) - } - } - } - - return ecShards -} - -// volumeMatchesCollection checks if a volume on a disk matches the given collection -func (at *ActiveTopology) volumeMatchesCollection(disk *activeDisk, volumeID uint32, collection string) bool { - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return false - } - - for _, volumeInfo := range disk.DiskInfo.DiskInfo.VolumeInfos { - if volumeInfo.Id == volumeID && volumeInfo.Collection == collection { - return true - } - } - return false -} - -// ecShardMatchesCollection checks if EC shards on a disk match the given collection -func (at *ActiveTopology) ecShardMatchesCollection(disk *activeDisk, volumeID uint32, collection string) bool { - if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil { - return false - } - - for _, ecShardInfo := range disk.DiskInfo.DiskInfo.EcShardInfos { - if ecShardInfo.Id == volumeID && ecShardInfo.Collection == collection { - return true - } - } - return false -} diff --git a/weed/admin/topology/types.go b/weed/admin/topology/types.go deleted file mode 100644 index df0103529..000000000 --- a/weed/admin/topology/types.go +++ /dev/null @@ -1,97 +0,0 @@ -package topology - -import "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" - -// TaskType represents different types of maintenance operations -type TaskType string - -// TaskStatus represents the current status of a task -type TaskStatus string - -// Common task type constants -const ( - TaskTypeVacuum TaskType = "vacuum" - TaskTypeBalance TaskType = "balance" - TaskTypeErasureCoding TaskType = "erasure_coding" - TaskTypeReplication TaskType = "replication" -) - -// Common task status constants -const ( - TaskStatusPending TaskStatus = "pending" - TaskStatusInProgress TaskStatus = "in_progress" - TaskStatusCompleted TaskStatus = "completed" -) - -// Task and capacity management configuration constants -const ( - // MaxConcurrentTasksPerDisk defines the maximum number of concurrent tasks per disk - // This prevents overloading a single disk with too many simultaneous operations - MaxConcurrentTasksPerDisk = 2 - - // MaxTotalTaskLoadPerDisk defines the maximum total task load (pending + active) per disk - // This allows more tasks to be queued but limits the total pipeline depth - MaxTotalTaskLoadPerDisk = 3 - - // MaxTaskLoadForECPlacement defines the maximum task load to consider a disk for EC placement - // This threshold ensures disks aren't overloaded when planning EC operations - MaxTaskLoadForECPlacement = 10 -) - -// StorageSlotChange represents storage impact at both volume and shard levels -type StorageSlotChange struct { - VolumeSlots int32 `json:"volume_slots"` // Volume-level slot changes (full volumes) - ShardSlots int32 `json:"shard_slots"` // Shard-level slot changes (EC shards, fractional capacity) -} - -// Add returns a new StorageSlotChange with the sum of this and other -func (s StorageSlotChange) Add(other StorageSlotChange) StorageSlotChange { - return StorageSlotChange{ - VolumeSlots: s.VolumeSlots + other.VolumeSlots, - ShardSlots: s.ShardSlots + other.ShardSlots, - } -} - -// Subtract returns a new StorageSlotChange with other subtracted from this -func (s StorageSlotChange) Subtract(other StorageSlotChange) StorageSlotChange { - return StorageSlotChange{ - VolumeSlots: s.VolumeSlots - other.VolumeSlots, - ShardSlots: s.ShardSlots - other.ShardSlots, - } -} - -// AddInPlace adds other to this StorageSlotChange in-place -func (s *StorageSlotChange) AddInPlace(other StorageSlotChange) { - s.VolumeSlots += other.VolumeSlots - s.ShardSlots += other.ShardSlots -} - -// SubtractInPlace subtracts other from this StorageSlotChange in-place -func (s *StorageSlotChange) SubtractInPlace(other StorageSlotChange) { - s.VolumeSlots -= other.VolumeSlots - s.ShardSlots -= other.ShardSlots -} - -// IsZero returns true if both VolumeSlots and ShardSlots are zero -func (s StorageSlotChange) IsZero() bool { - return s.VolumeSlots == 0 && s.ShardSlots == 0 -} - -// ShardsPerVolumeSlot defines how many EC shards are equivalent to one volume slot -const ShardsPerVolumeSlot = erasure_coding.DataShardsCount - -// ToVolumeSlots converts the entire StorageSlotChange to equivalent volume slots -func (s StorageSlotChange) ToVolumeSlots() int64 { - return int64(s.VolumeSlots) + int64(s.ShardSlots)/ShardsPerVolumeSlot -} - -// ToShardSlots converts the entire StorageSlotChange to equivalent shard slots -func (s StorageSlotChange) ToShardSlots() int32 { - return s.ShardSlots + s.VolumeSlots*ShardsPerVolumeSlot -} - -// CanAccommodate checks if this StorageSlotChange can accommodate the required StorageSlotChange -// Both are converted to shard slots for a more precise comparison -func (s StorageSlotChange) CanAccommodate(required StorageSlotChange) bool { - return s.ToShardSlots() >= required.ToShardSlots() -} diff --git a/weed/admin/view/app/admin.templ b/weed/admin/view/app/admin.templ deleted file mode 100644 index 568db59d7..000000000 --- a/weed/admin/view/app/admin.templ +++ /dev/null @@ -1,392 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ Admin(data dash.AdminData) { -
-

- Dashboard -

- -
- -
- -
-
-
-
-
-
-
- Total Volumes -
-
- {fmt.Sprintf("%d", data.TotalVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Files -
-
- {formatNumber(data.TotalFiles)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Size -
-
- {formatBytes(data.TotalSize)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Volume Size Limit -
-
- {fmt.Sprintf("%d MB", data.VolumeSizeLimitMB)} -
-
-
- -
-
-
-
-
-
- - -
-
-
-
-
-
-
- EC Volumes -
-
- {fmt.Sprintf("%d", data.TotalEcVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- EC Shards -
-
- {fmt.Sprintf("%d", data.TotalEcShards)} -
-
-
- -
-
-
-
-
- - -
-
-
- - -
-
-
-
-
- Master Nodes -
-
-
-
- - - - - - - - - for _, master := range data.MasterNodes { - - - - - } - -
AddressRole
{master.Address} - if master.IsLeader { - Leader - } else { - Follower - } -
-
-
-
-
- - -
-
-
-
- Cluster -
-
-
-
-
-
-
-
{fmt.Sprintf("%d", len(data.MasterNodes))}
- Masters -
-
-
-
-
-
-
{fmt.Sprintf("%d", len(data.VolumeServers))}
- Volume Servers -
-
-
-
-
-
-
{fmt.Sprintf("%d", len(data.FilerNodes))}
- Filers -
-
-
-
-
-
-
{fmt.Sprintf("%d", len(data.MessageBrokers))}
- Message Brokers -
-
-
-
-
-
-
-
- - -
-
-
-
-
- Volume Servers -
- -
-
-
- - - - - - - - - - - - - - for _, vs := range data.VolumeServers { - - - - - - - - - - } - if len(data.VolumeServers) == 0 { - - - - } - -
IDAddressData CenterRackVolumesEC ShardsCapacity
{vs.ID} - - {vs.Address} - - - {vs.DataCenter}{vs.Rack} -
-
- {fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)} -
-
-
- if vs.EcShards > 0 { - {fmt.Sprintf("%d", vs.EcShards)} - if vs.EcVolumes > 0 { - ({fmt.Sprintf("%d vol", vs.EcVolumes)}) - } - } else { - - - } - {formatBytes(vs.DiskUsage)} / {formatBytes(vs.DiskCapacity)}
- - No volume servers found -
-
-
-
-
-
- - -
-
-
-
-
- Filer Nodes -
- -
-
-
- - - - - - - - - - - for _, filer := range data.FilerNodes { - - - - - - - } - if len(data.FilerNodes) == 0 { - - - - } - -
AddressData CenterRackLast Updated
- - {filer.Address} - - - {filer.DataCenter}{filer.Rack}{filer.LastUpdated.Format("2006-01-02 15:04:05")}
- - No filer nodes found -
-
-
-
-
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
-} \ No newline at end of file diff --git a/weed/admin/view/app/admin_templ.go b/weed/admin/view/app/admin_templ.go deleted file mode 100644 index 11b4c7271..000000000 --- a/weed/admin/view/app/admin_templ.go +++ /dev/null @@ -1,487 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func Admin(data dash.AdminData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "
Total Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 35, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Files
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(formatNumber(data.TotalFiles)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 55, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Total Size
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 75, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Volume Size Limit
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d MB", data.VolumeSizeLimitMB)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 95, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
EC Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 118, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
EC Shards
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 138, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Master Nodes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, master := range data.MasterNodes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
AddressRole
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 175, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if master.IsLeader { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Leader") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "Follower") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
Cluster
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 205, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
Masters
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 213, Col: 87} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Volume Servers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 221, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
Filers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MessageBrokers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 229, Col: 88} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Message Brokers
Volume Servers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, vs := range data.VolumeServers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.VolumeServers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
IDAddressData CenterRackVolumesEC ShardsCapacity
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 276, Col: 54} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 279, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 283, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 284, Col: 56} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 289, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if vs.EcShards > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", vs.EcShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 295, Col: 127} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if vs.EcVolumes > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "(") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d vol", vs.EcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 297, Col: 119} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, ")") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " / ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 303, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
No volume servers found
Filer Nodes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, filer := range data.FilerNodes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.FilerNodes) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
AddressData CenterRackLast Updated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 357, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 361, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 362, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 363, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "
No filer nodes found
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 387, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_brokers.templ b/weed/admin/view/app/cluster_brokers.templ deleted file mode 100644 index d80a14c69..000000000 --- a/weed/admin/view/app/cluster_brokers.templ +++ /dev/null @@ -1,144 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterBrokers(data dash.ClusterBrokersData) { -
-

- Message Brokers -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Message Brokers -
-
- { fmt.Sprintf("%d", data.TotalBrokers) } -
-
-
- -
-
-
-
-
-
- - -
-
-
- Message Brokers -
-
-
- if len(data.Brokers) > 0 { -
- - - - - - - - - - - - for _, broker := range data.Brokers { - - - - - - - - } - -
AddressVersionData CenterRackCreated At
- { broker.Address } - - { broker.Version } - - { broker.DataCenter } - - { broker.Rack } - - if !broker.CreatedAt.IsZero() { - { broker.CreatedAt.Format("2006-01-02 15:04:05") } - } else { - N/A - } -
-
- } else { -
- -
No Message Brokers Found
-

No message broker servers are currently available in the cluster.

-
- } -
-
- - -
-
- - - Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } - -
-
-
- - -} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_brokers_templ.go b/weed/admin/view/app/cluster_brokers_templ.go deleted file mode 100644 index 18b5b0c34..000000000 --- a/weed/admin/view/app/cluster_brokers_templ.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterBrokers(data dash.ClusterBrokersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Message Brokers

Total Message Brokers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalBrokers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 34, Col: 47} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Message Brokers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Brokers) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, broker := range data.Brokers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
AddressVersionData CenterRackCreated At
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(broker.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 70, Col: 27} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(broker.Version) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 73, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(broker.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 76, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(broker.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 79, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !broker.CreatedAt.IsZero() { - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(broker.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 83, Col: 60} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "N/A") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
No Message Brokers Found

No message broker servers are currently available in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_brokers.templ`, Line: 108, Col: 67} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_collections.templ b/weed/admin/view/app/cluster_collections.templ deleted file mode 100644 index d4765ea86..000000000 --- a/weed/admin/view/app/cluster_collections.templ +++ /dev/null @@ -1,449 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterCollections(data dash.ClusterCollectionsData) { -
-

- Cluster Collections -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Collections -
-
- {fmt.Sprintf("%d", data.TotalCollections)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Regular Volumes -
-
- {fmt.Sprintf("%d", data.TotalVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- EC Volumes -
-
- {fmt.Sprintf("%d", data.TotalEcVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Files -
-
- {fmt.Sprintf("%d", data.TotalFiles)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Storage Size (Logical) -
-
- {formatBytes(data.TotalSize)} -
-
-
- -
-
-
-
-
-
- - -
-
-
- Collection Details -
-
-
- if len(data.Collections) > 0 { -
- - - - - - - - - - - - - - for _, collection := range data.Collections { - - - - - - - - - - } - -
Collection NameRegular VolumesEC VolumesFilesSize (Logical)Disk TypesActions
- - {collection.Name} - - - -
- - if collection.VolumeCount > 0 { - {fmt.Sprintf("%d", collection.VolumeCount)} - } else { - 0 - } -
-
-
- -
- - if collection.EcVolumeCount > 0 { - {fmt.Sprintf("%d", collection.EcVolumeCount)} - } else { - 0 - } -
-
-
-
- - {fmt.Sprintf("%d", collection.FileCount)} -
-
-
- - {formatBytes(collection.TotalSize)} -
-
- for i, diskType := range collection.DiskTypes { - if i > 0 { - - } - {diskType} - } - if len(collection.DiskTypes) == 0 { - Unknown - } - - -
-
- } else { -
- -
No Collections Found
-

No collections are currently configured in the cluster.

-
- } -
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
- - - - - - - -} - -func getDiskTypeColor(diskType string) string { - switch diskType { - case "ssd": - return "primary" - case "hdd", "": - return "secondary" - default: - return "info" - } -} - -func formatDiskTypes(diskTypes []string) string { - if len(diskTypes) == 0 { - return "Unknown" - } - if len(diskTypes) == 1 { - return diskTypes[0] - } - // For multiple disk types, join with comma - result := "" - for i, diskType := range diskTypes { - if i > 0 { - result += ", " - } - result += diskType - } - return result -} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_collections_templ.go b/weed/admin/view/app/cluster_collections_templ.go deleted file mode 100644 index e3630d7a6..000000000 --- a/weed/admin/view/app/cluster_collections_templ.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterCollections(data dash.ClusterCollectionsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Cluster Collections

Total Collections
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalCollections)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 34, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Regular Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 54, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
EC Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 74, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Total Files
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 94, Col: 71} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Total Storage Size (Logical)
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 114, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Collection Details
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Collections) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, collection := range data.Collections { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
Collection NameRegular VolumesEC VolumesFilesSize (Logical)Disk TypesActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 153, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if collection.VolumeCount > 0 { - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 161, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "0") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if collection.EcVolumeCount > 0 { - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.EcVolumeCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 173, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "0") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 183, Col: 88} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(collection.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 189, Col: 82} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, diskType := range collection.DiskTypes { - if i > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 = []any{fmt.Sprintf("badge bg-%s me-1", getDiskTypeColor(diskType))} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(diskType) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 197, Col: 131} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(collection.DiskTypes) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "Unknown") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
No Collections Found

No collections are currently configured in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 238, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func getDiskTypeColor(diskType string) string { - switch diskType { - case "ssd": - return "primary" - case "hdd", "": - return "secondary" - default: - return "info" - } -} - -func formatDiskTypes(diskTypes []string) string { - if len(diskTypes) == 0 { - return "Unknown" - } - if len(diskTypes) == 1 { - return diskTypes[0] - } - // For multiple disk types, join with comma - result := "" - for i, diskType := range diskTypes { - if i > 0 { - result += ", " - } - result += diskType - } - return result -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_ec_shards.templ b/weed/admin/view/app/cluster_ec_shards.templ deleted file mode 100644 index a3e8fc0ec..000000000 --- a/weed/admin/view/app/cluster_ec_shards.templ +++ /dev/null @@ -1,455 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterEcShards(data dash.ClusterEcShardsData) { -
-
-

- EC Shards -

- if data.FilterCollection != "" { -
- if data.FilterCollection == "default" { - - Collection: default - - } else { - - Collection: {data.FilterCollection} - - } - - Clear Filter - -
- } -
-
-
- - -
-
-
- - -
-
-
-
-
-
-
Total Shards
-

{fmt.Sprintf("%d", data.TotalShards)}

-
-
- -
-
-
-
-
-
-
-
-
-
-
EC Volumes
-

{fmt.Sprintf("%d", data.TotalVolumes)}

-
-
- -
-
-
-
-
-
-
-
-
-
-
Healthy Volumes
-

{fmt.Sprintf("%d", data.VolumesWithAllShards)}

- Complete (14/14 shards) -
-
- -
-
-
-
-
-
-
-
-
-
-
Degraded Volumes
-

{fmt.Sprintf("%d", data.VolumesWithMissingShards)}

- Incomplete/Critical -
-
- -
-
-
-
-
-
- - -
- - - - - - if data.ShowCollectionColumn { - - } - - if data.ShowDataCenterColumn { - - } - if data.ShowRackColumn { - - } - - - - - - - for _, shard := range data.EcShards { - - - if data.ShowCollectionColumn { - - } - - if data.ShowDataCenterColumn { - - } - if data.ShowRackColumn { - - } - - - - - } - -
- - Volume ID - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Collection - if data.SortBy == "collection" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Server - if data.SortBy == "server" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Data Center - if data.SortBy == "datacenter" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Rack - if data.SortBy == "rack" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - DistributionStatusActions
- {fmt.Sprintf("%d", shard.VolumeID)} - - if shard.Collection != "" { - - {shard.Collection} - - } else { - - default - - } - - {shard.Server} - - {shard.DataCenter} - - {shard.Rack} - - @displayShardDistribution(shard, data.EcShards) - - @displayVolumeStatus(shard) - -
- - if !shard.IsComplete { - - } -
-
-
- - - if data.TotalPages > 1 { - - } - - - - -} - -// displayShardDistribution shows the distribution summary for a volume's shards -templ displayShardDistribution(shard dash.EcShardWithInfo, allShards []dash.EcShardWithInfo) { -
- - { calculateDistributionSummary(shard.VolumeID, allShards) } -
-} - -// displayVolumeStatus shows an improved status display -templ displayVolumeStatus(shard dash.EcShardWithInfo) { - if shard.IsComplete { - Complete - } else { - if len(shard.MissingShards) > 10 { - Critical ({fmt.Sprintf("%d", len(shard.MissingShards))} missing) - } else if len(shard.MissingShards) > 6 { - Degraded ({fmt.Sprintf("%d", len(shard.MissingShards))} missing) - } else if len(shard.MissingShards) > 2 { - Incomplete ({fmt.Sprintf("%d", len(shard.MissingShards))} missing) - } else { - Minor Issues ({fmt.Sprintf("%d", len(shard.MissingShards))} missing) - } - } -} - -// calculateDistributionSummary calculates and formats the distribution summary -func calculateDistributionSummary(volumeID uint32, allShards []dash.EcShardWithInfo) string { - dataCenters := make(map[string]bool) - racks := make(map[string]bool) - servers := make(map[string]bool) - - for _, s := range allShards { - if s.VolumeID == volumeID { - dataCenters[s.DataCenter] = true - racks[s.Rack] = true - servers[s.Server] = true - } - } - - return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), len(racks), len(servers)) -} - diff --git a/weed/admin/view/app/cluster_ec_shards_templ.go b/weed/admin/view/app/cluster_ec_shards_templ.go deleted file mode 100644 index f995e5ef4..000000000 --- a/weed/admin/view/app/cluster_ec_shards_templ.go +++ /dev/null @@ -1,840 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterEcShards(data dash.ClusterEcShardsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

EC Shards

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.FilterCollection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.FilterCollection == "default" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "Collection: default ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "Collection: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.FilterCollection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 22, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Clear Filter
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Total Shards

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 54, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "

EC Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 69, Col: 82} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "

Healthy Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumesWithAllShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 84, Col: 90} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "

Complete (14/14 shards)
Degraded Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumesWithMissingShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 100, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "

Incomplete/Critical
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowRackColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, shard := range data.EcShards { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowRackColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "
Volume ID ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "Collection ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "collection" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "Server ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "server" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "Data Center ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "datacenter" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "Rack ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "rack" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "DistributionStatusActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shard.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 203, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if shard.Collection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 209, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "default") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 219, Col: 61} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 223, Col: 88} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 228, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = displayShardDistribution(shard, data.EcShards).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = displayVolumeStatus(shard).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !shard.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.TotalPages > 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// displayShardDistribution shows the distribution summary for a volume's shards -func displayShardDistribution(shard dash.EcShardWithInfo, allShards []dash.EcShardWithInfo) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var23 := templ.GetChildren(ctx) - if templ_7745c5c3_Var23 == nil { - templ_7745c5c3_Var23 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(calculateDistributionSummary(shard.VolumeID, allShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 418, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// displayVolumeStatus shows an improved status display -func displayVolumeStatus(shard dash.EcShardWithInfo) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var25 := templ.GetChildren(ctx) - if templ_7745c5c3_Var25 == nil { - templ_7745c5c3_Var25 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if shard.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "Complete") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - if len(shard.MissingShards) > 10 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "Critical (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 428, Col: 129} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if len(shard.MissingShards) > 6 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "Degraded (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 430, Col: 145} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if len(shard.MissingShards) > 2 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "Incomplete (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 432, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "Minor Issues (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(shard.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_shards.templ`, Line: 434, Col: 137} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - return nil - }) -} - -// calculateDistributionSummary calculates and formats the distribution summary -func calculateDistributionSummary(volumeID uint32, allShards []dash.EcShardWithInfo) string { - dataCenters := make(map[string]bool) - racks := make(map[string]bool) - servers := make(map[string]bool) - - for _, s := range allShards { - if s.VolumeID == volumeID { - dataCenters[s.DataCenter] = true - racks[s.Rack] = true - servers[s.Server] = true - } - } - - return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), len(racks), len(servers)) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_ec_volumes.templ b/weed/admin/view/app/cluster_ec_volumes.templ deleted file mode 100644 index c84da45ca..000000000 --- a/weed/admin/view/app/cluster_ec_volumes.templ +++ /dev/null @@ -1,776 +0,0 @@ -package app - -import ( - "fmt" - "strings" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -templ ClusterEcVolumes(data dash.ClusterEcVolumesData) { - - - - EC Volumes - SeaweedFS - - - - - - -
-
-
-

- EC Volumes - ({fmt.Sprintf("%d", data.TotalVolumes)} volumes) -

-
-
- - -
-
-
-
-
-
-
Total Volumes
-

{fmt.Sprintf("%d", data.TotalVolumes)}

- EC encoded volumes -
-
- -
-
-
-
-
-
-
-
-
-
-
Total Shards
-

{fmt.Sprintf("%d", data.TotalShards)}

- Distributed shards -
-
- -
-
-
-
-
-
-
-
-
-
-
Complete Volumes
-

{fmt.Sprintf("%d", data.CompleteVolumes)}

- All shards present -
-
- -
-
-
-
-
-
-
-
-
-
-
Incomplete Volumes
-

{fmt.Sprintf("%d", data.IncompleteVolumes)}

- Missing shards -
-
- -
-
-
-
-
-
- - - - - -
-
- - Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int { - end := data.Page * data.PageSize - if end > data.TotalVolumes { - return data.TotalVolumes - } - return end - }())} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes - - -
- - - per page -
-
- - if data.Collection != "" { -
- if data.Collection == "default" { - Collection: default - } else { - Collection: {data.Collection} - } - Clear Filter -
- } -
- -
- - - - - if data.ShowCollectionColumn { - - } - - - - - if data.ShowDataCenterColumn { - - } - - - - - for _, volume := range data.EcVolumes { - - - if data.ShowCollectionColumn { - - } - - - - - if data.ShowDataCenterColumn { - - } - - - } - -
- - Volume ID - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Collection - if data.SortBy == "collection" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Shard Count - if data.SortBy == "total_shards" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - Shard SizeShard Locations - - Status - if data.SortBy == "completeness" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - Data CentersActions
- {fmt.Sprintf("%d", volume.VolumeID)} - - if volume.Collection != "" { - - {volume.Collection} - - } else { - - default - - } - - {fmt.Sprintf("%d/14", volume.TotalShards)} - - @displayShardSizes(volume.ShardSizes) - - @displayVolumeDistribution(volume) - - @displayEcVolumeStatus(volume) - - for i, dc := range volume.DataCenters { - if i > 0 { - , - } - {dc} - } - -
- - if !volume.IsComplete { - - } -
-
-
- - - if data.TotalPages > 1 { - - } -
- - - - - -} - -// displayShardLocationsHTML renders shard locations as proper HTML -templ displayShardLocationsHTML(shardLocations map[int]string) { - if len(shardLocations) == 0 { - No shards - } else { - for i, serverInfo := range groupShardsByServer(shardLocations) { - if i > 0 { -
- } - - - { serverInfo.Server } - : - { serverInfo.ShardRanges } - } - } -} - -// displayShardSizes renders shard sizes in a compact format -templ displayShardSizes(shardSizes map[int]int64) { - if len(shardSizes) == 0 { - - - } else { - @renderShardSizesContent(shardSizes) - } -} - -// renderShardSizesContent renders the content of shard sizes -templ renderShardSizesContent(shardSizes map[int]int64) { - if areAllShardSizesSame(shardSizes) { - // All shards have the same size, show just the common size - {getCommonShardSize(shardSizes)} - } else { - // Shards have different sizes, show individual sizes -
- { formatIndividualShardSizes(shardSizes) } -
- } -} - -// ServerShardInfo represents server and its shard ranges with sizes -type ServerShardInfo struct { - Server string - ShardRanges string -} - -// groupShardsByServer groups shards by server and formats ranges -func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo { - if len(shardLocations) == 0 { - return []ServerShardInfo{} - } - - // Group shards by server - serverShards := make(map[string][]int) - for shardId, server := range shardLocations { - serverShards[server] = append(serverShards[server], shardId) - } - - var serverInfos []ServerShardInfo - for server, shards := range serverShards { - // Sort shards for each server - for i := 0; i < len(shards); i++ { - for j := i + 1; j < len(shards); j++ { - if shards[i] > shards[j] { - shards[i], shards[j] = shards[j], shards[i] - } - } - } - - // Format shard ranges compactly - shardRanges := formatShardRanges(shards) - serverInfos = append(serverInfos, ServerShardInfo{ - Server: server, - ShardRanges: shardRanges, - }) - } - - // Sort by server name - for i := 0; i < len(serverInfos); i++ { - for j := i + 1; j < len(serverInfos); j++ { - if serverInfos[i].Server > serverInfos[j].Server { - serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] - } - } - } - - return serverInfos -} - -// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes -func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo { - if len(shardLocations) == 0 { - return []ServerShardInfo{} - } - - // Group shards by server - serverShards := make(map[string][]int) - for shardId, server := range shardLocations { - serverShards[server] = append(serverShards[server], shardId) - } - - var serverInfos []ServerShardInfo - for server, shards := range serverShards { - // Sort shards for each server - for i := 0; i < len(shards); i++ { - for j := i + 1; j < len(shards); j++ { - if shards[i] > shards[j] { - shards[i], shards[j] = shards[j], shards[i] - } - } - } - - // Format shard ranges compactly with sizes - shardRanges := formatShardRangesWithSizes(shards, shardSizes) - serverInfos = append(serverInfos, ServerShardInfo{ - Server: server, - ShardRanges: shardRanges, - }) - } - - // Sort by server name - for i := 0; i < len(serverInfos); i++ { - for j := i + 1; j < len(serverInfos); j++ { - if serverInfos[i].Server > serverInfos[j].Server { - serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] - } - } - } - - return serverInfos -} - -// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11") -func formatShardRanges(shards []int) string { - if len(shards) == 0 { - return "" - } - - var ranges []string - start := shards[0] - end := shards[0] - - for i := 1; i < len(shards); i++ { - if shards[i] == end+1 { - end = shards[i] - } else { - if start == end { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - start = shards[i] - end = shards[i] - } - } - - // Add the last range - if start == end { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - - return strings.Join(ranges, ",") -} - -// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)") -func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string { - if len(shards) == 0 { - return "" - } - - var ranges []string - start := shards[0] - end := shards[0] - var totalSize int64 - - for i := 1; i < len(shards); i++ { - if shards[i] == end+1 { - end = shards[i] - totalSize += shardSizes[shards[i]] - } else { - // Add current range with size - if start == end { - size := shardSizes[start] - if size > 0 { - ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) - } else { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } - } else { - // Calculate total size for the range - rangeSize := shardSizes[start] - for j := start + 1; j <= end; j++ { - rangeSize += shardSizes[j] - } - if rangeSize > 0 { - ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - } - start = shards[i] - end = shards[i] - totalSize = shardSizes[shards[i]] - } - } - - // Add the last range - if start == end { - size := shardSizes[start] - if size > 0 { - ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) - } else { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } - } else { - // Calculate total size for the range - rangeSize := shardSizes[start] - for j := start + 1; j <= end; j++ { - rangeSize += shardSizes[j] - } - if rangeSize > 0 { - ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - } - - return strings.Join(ranges, ",") -} - -// Helper function to convert bytes to human readable format -func bytesToHumanReadable(bytes int64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%dB", bytes) - } - div, exp := int64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) -} - -// Helper function to format missing shards -func formatMissingShards(missingShards []int) string { - if len(missingShards) == 0 { - return "" - } - - var shardStrs []string - for _, shard := range missingShards { - shardStrs = append(shardStrs, fmt.Sprintf("%d", shard)) - } - - return strings.Join(shardStrs, ", ") -} - -// Helper function to check if all shard sizes are the same -func areAllShardSizesSame(shardSizes map[int]int64) bool { - if len(shardSizes) <= 1 { - return true - } - - var firstSize int64 = -1 - for _, size := range shardSizes { - if firstSize == -1 { - firstSize = size - } else if size != firstSize { - return false - } - } - return true -} - -// Helper function to get the common shard size (when all shards are the same size) -func getCommonShardSize(shardSizes map[int]int64) string { - for _, size := range shardSizes { - return bytesToHumanReadable(size) - } - return "-" -} - -// Helper function to format individual shard sizes -func formatIndividualShardSizes(shardSizes map[int]int64) string { - if len(shardSizes) == 0 { - return "" - } - - // Group shards by size for more compact display - sizeGroups := make(map[int64][]int) - for shardId, size := range shardSizes { - sizeGroups[size] = append(sizeGroups[size], shardId) - } - - // If there are only 1-2 different sizes, show them grouped - if len(sizeGroups) <= 3 { - var groupStrs []string - for size, shardIds := range sizeGroups { - // Sort shard IDs - for i := 0; i < len(shardIds); i++ { - for j := i + 1; j < len(shardIds); j++ { - if shardIds[i] > shardIds[j] { - shardIds[i], shardIds[j] = shardIds[j], shardIds[i] - } - } - } - - var idRanges []string - if len(shardIds) <= erasure_coding.ParityShardsCount { - // Show individual IDs if few shards - for _, id := range shardIds { - idRanges = append(idRanges, fmt.Sprintf("%d", id)) - } - } else { - // Show count if many shards - idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds))) - } - groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size))) - } - return strings.Join(groupStrs, " | ") - } - - // If too many different sizes, show summary - return fmt.Sprintf("%d different sizes", len(sizeGroups)) -} - -// displayVolumeDistribution shows the distribution summary for a volume -templ displayVolumeDistribution(volume dash.EcVolumeWithShards) { -
- - { calculateVolumeDistributionSummary(volume) } -
-} - -// displayEcVolumeStatus shows an improved status display for EC volumes -templ displayEcVolumeStatus(volume dash.EcVolumeWithShards) { - if volume.IsComplete { - Complete - } else { - if len(volume.MissingShards) > erasure_coding.DataShardsCount { - Critical ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) - } else if len(volume.MissingShards) > (erasure_coding.DataShardsCount/2) { - Degraded ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) - } else if len(volume.MissingShards) > (erasure_coding.ParityShardsCount/2) { - Incomplete ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) - } else { - Minor Issues ({fmt.Sprintf("%d", len(volume.MissingShards))} missing) - } - } -} - -// calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume -func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string { - dataCenters := make(map[string]bool) - racks := make(map[string]bool) - servers := make(map[string]bool) - - // Count unique servers from shard locations - for _, server := range volume.ShardLocations { - servers[server] = true - } - - // Use the DataCenters field if available - for _, dc := range volume.DataCenters { - dataCenters[dc] = true - } - - // Use the Servers field if available - for _, server := range volume.Servers { - servers[server] = true - } - - // Use the Racks field if available - for _, rack := range volume.Racks { - racks[rack] = true - } - - // If we don't have rack information, estimate it from servers as fallback - rackCount := len(racks) - if rackCount == 0 { - // Fallback estimation - assume each server might be in a different rack - rackCount = len(servers) - if len(dataCenters) > 0 { - // More conservative estimate if we have DC info - rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters) - if rackCount == 0 { - rackCount = 1 - } - } - } - - return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers)) -} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_ec_volumes_templ.go b/weed/admin/view/app/cluster_ec_volumes_templ.go deleted file mode 100644 index 3220c057f..000000000 --- a/weed/admin/view/app/cluster_ec_volumes_templ.go +++ /dev/null @@ -1,1366 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" - "strings" -) - -func ClusterEcVolumes(data dash.ClusterEcVolumesData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "EC Volumes - SeaweedFS

EC Volumes (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 26, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " volumes)

Total Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 39, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

EC encoded volumes
Total Shards

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 55, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Distributed shards
Complete Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CompleteVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 71, Col: 89} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

All shards present
Incomplete Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.IncompleteVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 87, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "

Missing shards
EC Storage Note: EC volumes use erasure coding (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 103, Col: 131} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, ") which stores data across ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.TotalShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 103, Col: 212} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " shards with redundancy. Physical storage is approximately ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1fx", float64(erasure_coding.TotalShardsCount)/float64(erasure_coding.DataShardsCount))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 104, Col: 150} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " the original logical data size due to ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.ParityShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 104, Col: 244} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " parity shards.
Showing ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 111, Col: 79} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " to ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int { - end := data.Page * data.PageSize - if end > data.TotalVolumes { - return data.TotalVolumes - } - return end - }())) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 117, Col: 24} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " of ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 117, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, " volumes
per page
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Collection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Collection == "default" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "Collection: default ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "Collection: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 138, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "Clear Filter
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, volume := range data.EcVolumes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "
Volume ID ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "Collection ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "collection" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "Shard Count ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "total_shards" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "Shard SizeShard LocationsStatus ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "completeness" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "Data CentersActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 219, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if volume.Collection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 225, Col: 101} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "default") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", volume.TotalShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 235, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = displayShardSizes(volume.ShardSizes).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = displayVolumeDistribution(volume).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = displayEcVolumeStatus(volume).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, dc := range volume.DataCenters { - if i > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, ", ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(dc) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 252, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !volume.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.TotalPages > 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// displayShardLocationsHTML renders shard locations as proper HTML -func displayShardLocationsHTML(shardLocations map[int]string) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var27 := templ.GetChildren(ctx) - if templ_7745c5c3_Var27 == nil { - templ_7745c5c3_Var27 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if len(shardLocations) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "No shards") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - for i, serverInfo := range groupShardsByServer(shardLocations) { - if i > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 392, Col: 24} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, ": ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var30 string - templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(serverInfo.ShardRanges) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 394, Col: 37} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - return nil - }) -} - -// displayShardSizes renders shard sizes in a compact format -func displayShardSizes(shardSizes map[int]int64) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var31 := templ.GetChildren(ctx) - if templ_7745c5c3_Var31 == nil { - templ_7745c5c3_Var31 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if len(shardSizes) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = renderShardSizesContent(shardSizes).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -// renderShardSizesContent renders the content of shard sizes -func renderShardSizesContent(shardSizes map[int]int64) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var32 := templ.GetChildren(ctx) - if templ_7745c5c3_Var32 == nil { - templ_7745c5c3_Var32 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if areAllShardSizesSame(shardSizes) { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var33 string - templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(getCommonShardSize(shardSizes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 412, Col: 60} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var34 string - templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(formatIndividualShardSizes(shardSizes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 416, Col: 43} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -// ServerShardInfo represents server and its shard ranges with sizes -type ServerShardInfo struct { - Server string - ShardRanges string -} - -// groupShardsByServer groups shards by server and formats ranges -func groupShardsByServer(shardLocations map[int]string) []ServerShardInfo { - if len(shardLocations) == 0 { - return []ServerShardInfo{} - } - - // Group shards by server - serverShards := make(map[string][]int) - for shardId, server := range shardLocations { - serverShards[server] = append(serverShards[server], shardId) - } - - var serverInfos []ServerShardInfo - for server, shards := range serverShards { - // Sort shards for each server - for i := 0; i < len(shards); i++ { - for j := i + 1; j < len(shards); j++ { - if shards[i] > shards[j] { - shards[i], shards[j] = shards[j], shards[i] - } - } - } - - // Format shard ranges compactly - shardRanges := formatShardRanges(shards) - serverInfos = append(serverInfos, ServerShardInfo{ - Server: server, - ShardRanges: shardRanges, - }) - } - - // Sort by server name - for i := 0; i < len(serverInfos); i++ { - for j := i + 1; j < len(serverInfos); j++ { - if serverInfos[i].Server > serverInfos[j].Server { - serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] - } - } - } - - return serverInfos -} - -// groupShardsByServerWithSizes groups shards by server and formats ranges with sizes -func groupShardsByServerWithSizes(shardLocations map[int]string, shardSizes map[int]int64) []ServerShardInfo { - if len(shardLocations) == 0 { - return []ServerShardInfo{} - } - - // Group shards by server - serverShards := make(map[string][]int) - for shardId, server := range shardLocations { - serverShards[server] = append(serverShards[server], shardId) - } - - var serverInfos []ServerShardInfo - for server, shards := range serverShards { - // Sort shards for each server - for i := 0; i < len(shards); i++ { - for j := i + 1; j < len(shards); j++ { - if shards[i] > shards[j] { - shards[i], shards[j] = shards[j], shards[i] - } - } - } - - // Format shard ranges compactly with sizes - shardRanges := formatShardRangesWithSizes(shards, shardSizes) - serverInfos = append(serverInfos, ServerShardInfo{ - Server: server, - ShardRanges: shardRanges, - }) - } - - // Sort by server name - for i := 0; i < len(serverInfos); i++ { - for j := i + 1; j < len(serverInfos); j++ { - if serverInfos[i].Server > serverInfos[j].Server { - serverInfos[i], serverInfos[j] = serverInfos[j], serverInfos[i] - } - } - } - - return serverInfos -} - -// Helper function to format shard ranges compactly (e.g., "0-3,7,9-11") -func formatShardRanges(shards []int) string { - if len(shards) == 0 { - return "" - } - - var ranges []string - start := shards[0] - end := shards[0] - - for i := 1; i < len(shards); i++ { - if shards[i] == end+1 { - end = shards[i] - } else { - if start == end { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - start = shards[i] - end = shards[i] - } - } - - // Add the last range - if start == end { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - - return strings.Join(ranges, ",") -} - -// Helper function to format shard ranges with sizes (e.g., "0(1.2MB),1-3(2.5MB),7(800KB)") -func formatShardRangesWithSizes(shards []int, shardSizes map[int]int64) string { - if len(shards) == 0 { - return "" - } - - var ranges []string - start := shards[0] - end := shards[0] - var totalSize int64 - - for i := 1; i < len(shards); i++ { - if shards[i] == end+1 { - end = shards[i] - totalSize += shardSizes[shards[i]] - } else { - // Add current range with size - if start == end { - size := shardSizes[start] - if size > 0 { - ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) - } else { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } - } else { - // Calculate total size for the range - rangeSize := shardSizes[start] - for j := start + 1; j <= end; j++ { - rangeSize += shardSizes[j] - } - if rangeSize > 0 { - ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - } - start = shards[i] - end = shards[i] - totalSize = shardSizes[shards[i]] - } - } - - // Add the last range - if start == end { - size := shardSizes[start] - if size > 0 { - ranges = append(ranges, fmt.Sprintf("%d(%s)", start, bytesToHumanReadable(size))) - } else { - ranges = append(ranges, fmt.Sprintf("%d", start)) - } - } else { - // Calculate total size for the range - rangeSize := shardSizes[start] - for j := start + 1; j <= end; j++ { - rangeSize += shardSizes[j] - } - if rangeSize > 0 { - ranges = append(ranges, fmt.Sprintf("%d-%d(%s)", start, end, bytesToHumanReadable(rangeSize))) - } else { - ranges = append(ranges, fmt.Sprintf("%d-%d", start, end)) - } - } - - return strings.Join(ranges, ",") -} - -// Helper function to convert bytes to human readable format -func bytesToHumanReadable(bytes int64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%dB", bytes) - } - div, exp := int64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) -} - -// Helper function to format missing shards -func formatMissingShards(missingShards []int) string { - if len(missingShards) == 0 { - return "" - } - - var shardStrs []string - for _, shard := range missingShards { - shardStrs = append(shardStrs, fmt.Sprintf("%d", shard)) - } - - return strings.Join(shardStrs, ", ") -} - -// Helper function to check if all shard sizes are the same -func areAllShardSizesSame(shardSizes map[int]int64) bool { - if len(shardSizes) <= 1 { - return true - } - - var firstSize int64 = -1 - for _, size := range shardSizes { - if firstSize == -1 { - firstSize = size - } else if size != firstSize { - return false - } - } - return true -} - -// Helper function to get the common shard size (when all shards are the same size) -func getCommonShardSize(shardSizes map[int]int64) string { - for _, size := range shardSizes { - return bytesToHumanReadable(size) - } - return "-" -} - -// Helper function to format individual shard sizes -func formatIndividualShardSizes(shardSizes map[int]int64) string { - if len(shardSizes) == 0 { - return "" - } - - // Group shards by size for more compact display - sizeGroups := make(map[int64][]int) - for shardId, size := range shardSizes { - sizeGroups[size] = append(sizeGroups[size], shardId) - } - - // If there are only 1-2 different sizes, show them grouped - if len(sizeGroups) <= 3 { - var groupStrs []string - for size, shardIds := range sizeGroups { - // Sort shard IDs - for i := 0; i < len(shardIds); i++ { - for j := i + 1; j < len(shardIds); j++ { - if shardIds[i] > shardIds[j] { - shardIds[i], shardIds[j] = shardIds[j], shardIds[i] - } - } - } - - var idRanges []string - if len(shardIds) <= erasure_coding.ParityShardsCount { - // Show individual IDs if few shards - for _, id := range shardIds { - idRanges = append(idRanges, fmt.Sprintf("%d", id)) - } - } else { - // Show count if many shards - idRanges = append(idRanges, fmt.Sprintf("%d shards", len(shardIds))) - } - groupStrs = append(groupStrs, fmt.Sprintf("%s: %s", strings.Join(idRanges, ","), bytesToHumanReadable(size))) - } - return strings.Join(groupStrs, " | ") - } - - // If too many different sizes, show summary - return fmt.Sprintf("%d different sizes", len(sizeGroups)) -} - -// displayVolumeDistribution shows the distribution summary for a volume -func displayVolumeDistribution(volume dash.EcVolumeWithShards) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var35 := templ.GetChildren(ctx) - if templ_7745c5c3_Var35 == nil { - templ_7745c5c3_Var35 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var36 string - templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(calculateVolumeDistributionSummary(volume)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 714, Col: 52} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// displayEcVolumeStatus shows an improved status display for EC volumes -func displayEcVolumeStatus(volume dash.EcVolumeWithShards) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var37 := templ.GetChildren(ctx) - if templ_7745c5c3_Var37 == nil { - templ_7745c5c3_Var37 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if volume.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "Complete") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - if len(volume.MissingShards) > erasure_coding.DataShardsCount { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "Critical (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var38 string - templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 724, Col: 130} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if len(volume.MissingShards) > (erasure_coding.DataShardsCount / 2) { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "Degraded (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 726, Col: 146} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if len(volume.MissingShards) > (erasure_coding.ParityShardsCount / 2) { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, "Incomplete (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var40 string - templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 728, Col: 139} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, "Minor Issues (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var41 string - templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(volume.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_ec_volumes.templ`, Line: 730, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, " missing)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - return nil - }) -} - -// calculateVolumeDistributionSummary calculates and formats the distribution summary for a volume -func calculateVolumeDistributionSummary(volume dash.EcVolumeWithShards) string { - dataCenters := make(map[string]bool) - racks := make(map[string]bool) - servers := make(map[string]bool) - - // Count unique servers from shard locations - for _, server := range volume.ShardLocations { - servers[server] = true - } - - // Use the DataCenters field if available - for _, dc := range volume.DataCenters { - dataCenters[dc] = true - } - - // Use the Servers field if available - for _, server := range volume.Servers { - servers[server] = true - } - - // Use the Racks field if available - for _, rack := range volume.Racks { - racks[rack] = true - } - - // If we don't have rack information, estimate it from servers as fallback - rackCount := len(racks) - if rackCount == 0 { - // Fallback estimation - assume each server might be in a different rack - rackCount = len(servers) - if len(dataCenters) > 0 { - // More conservative estimate if we have DC info - rackCount = (len(servers) + len(dataCenters) - 1) / len(dataCenters) - if rackCount == 0 { - rackCount = 1 - } - } - } - - return fmt.Sprintf("%d DCs, %d racks, %d servers", len(dataCenters), rackCount, len(servers)) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_filers.templ b/weed/admin/view/app/cluster_filers.templ deleted file mode 100644 index 023fd4478..000000000 --- a/weed/admin/view/app/cluster_filers.templ +++ /dev/null @@ -1,182 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterFilers(data dash.ClusterFilersData) { -
-

- Filers -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Filers -
-
- { fmt.Sprintf("%d", data.TotalFilers) } -
-
-
- -
-
-
-
-
-
- - -
-
-
- Filers -
-
-
- if len(data.Filers) > 0 { -
- - - - - - - - - - - - - for _, filer := range data.Filers { - - - - - - - - - } - -
AddressVersionData CenterRackCreated AtActions
- - { filer.Address } - - - - { filer.Version } - - { filer.DataCenter } - - { filer.Rack } - - if !filer.CreatedAt.IsZero() { - { filer.CreatedAt.Format("2006-01-02 15:04:05") } - } else { - N/A - } - -
- -
-
-
- } else { -
- -
No Filers Found
-

No filer servers are currently available in the cluster.

-
- } -
-
- - -
-
- - - Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } - -
-
-
- - - -} - - \ No newline at end of file diff --git a/weed/admin/view/app/cluster_filers_templ.go b/weed/admin/view/app/cluster_filers_templ.go deleted file mode 100644 index c61c218fc..000000000 --- a/weed/admin/view/app/cluster_filers_templ.go +++ /dev/null @@ -1,194 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterFilers(data dash.ClusterFilersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Filers

Total Filers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFilers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 34, Col: 46} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Filers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Filers) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, filer := range data.Filers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
AddressVersionData CenterRackCreated AtActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 72, Col: 27} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Version) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 77, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 80, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 83, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !filer.CreatedAt.IsZero() { - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(filer.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 87, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "N/A") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
No Filers Found

No filer servers are currently available in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 119, Col: 67} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_masters.templ b/weed/admin/view/app/cluster_masters.templ deleted file mode 100644 index 6a53c5493..000000000 --- a/weed/admin/view/app/cluster_masters.templ +++ /dev/null @@ -1,282 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterMasters(data dash.ClusterMastersData) { -
-

- Masters -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Masters -
-
- { fmt.Sprintf("%d", data.TotalMasters) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Leaders -
-
- { fmt.Sprintf("%d", data.LeaderCount) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Cluster Health -
-
- if data.LeaderCount > 0 { - Healthy - } else { - Warning - } -
-
-
- -
-
-
-
-
-
- - -
-
-
- Masters -
-
-
- if len(data.Masters) > 0 { -
- - - - - - - - - - - for _, master := range data.Masters { - - - - - - - } - -
AddressRoleSuffrageActions
- - { master.Address } - - - - if master.IsLeader { - - Leader - - } else { - - Follower - - } - - if master.Suffrage != "" { - - { master.Suffrage } - - } else { - - - } - - -
-
- } else { -
- -
No Masters Found
-

No master servers are currently available in the cluster.

-
- } -
-
- - -
-
- - - Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } - -
-
-
- - - -} - - \ No newline at end of file diff --git a/weed/admin/view/app/cluster_masters_templ.go b/weed/admin/view/app/cluster_masters_templ.go deleted file mode 100644 index b10881bc0..000000000 --- a/weed/admin/view/app/cluster_masters_templ.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterMasters(data dash.ClusterMastersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Masters

Total Masters
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalMasters)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 34, Col: 47} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Leaders
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.LeaderCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 54, Col: 46} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Cluster Health
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.LeaderCount > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "Healthy") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "Warning") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Masters
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Masters) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, master := range data.Masters { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
AddressRoleSuffrageActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 114, Col: 28} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if master.IsLeader { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "Leader") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "Follower") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if master.Suffrage != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(master.Suffrage) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 132, Col: 30} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
No Masters Found

No master servers are currently available in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 169, Col: 67} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_volume_servers.templ b/weed/admin/view/app/cluster_volume_servers.templ deleted file mode 100644 index 14b952dce..000000000 --- a/weed/admin/view/app/cluster_volume_servers.templ +++ /dev/null @@ -1,384 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { -
-

- Volume Servers -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Volume Servers -
-
- {fmt.Sprintf("%d", data.TotalVolumeServers)} -
-
-
- -
-
-
-
-
- - - -
-
-
-
-
-
- Total Volumes -
-
- {fmt.Sprintf("%d", data.TotalVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Capacity -
-
- {formatBytes(data.TotalCapacity)} -
-
-
- -
-
-
-
-
-
- - -
-
-
- Volume Servers -
-
-
- if len(data.VolumeServers) > 0 { -
- - - - - - - - - - - - - - - - for _, host := range data.VolumeServers { - - - - - - - - - - - - } - -
AddressData CenterRackVolumesMax VolumesEC ShardsCapacityUsageActions
- - {host.Address} - - - - {host.DataCenter} - - {host.Rack} - -
-
-
-
-
- {fmt.Sprintf("%d", host.Volumes)} -
-
- {fmt.Sprintf("%d", host.MaxVolumes)} - - if host.EcShards > 0 { -
- - {fmt.Sprintf("%d", host.EcShards)} - shards -
- if host.EcVolumes > 0 { -
- {fmt.Sprintf("%d EC volumes", host.EcVolumes)} -
- } - } else { - - - } -
{formatBytes(host.DiskCapacity)} -
-
-
-
-
- {formatBytes(host.DiskUsage)} -
-
- -
-
- } else { -
- -
No Volume Servers Found
-

No volume servers are currently available in the cluster.

-
- } -
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
- - - -} - - \ No newline at end of file diff --git a/weed/admin/view/app/cluster_volume_servers_templ.go b/weed/admin/view/app/cluster_volume_servers_templ.go deleted file mode 100644 index f2293562f..000000000 --- a/weed/admin/view/app/cluster_volume_servers_templ.go +++ /dev/null @@ -1,455 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Volume Servers

Total Volume Servers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumeServers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 34, Col: 79} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 56, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Total Capacity
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalCapacity)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 76, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Volume Servers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.VolumeServers) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, host := range data.VolumeServers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
AddressData CenterRackVolumesMax VolumesEC ShardsCapacityUsageActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(host.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 117, Col: 61} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(host.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 122, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(host.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 125, Col: 93} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.Volumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 134, Col: 111} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.MaxVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 138, Col: 112} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if host.EcShards > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", host.EcShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 144, Col: 129} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " shards
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if host.EcVolumes > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d EC volumes", host.EcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 149, Col: 127} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskCapacity)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 156, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskUsage)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 164, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
No Volume Servers Found

No volume servers are currently available in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 207, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_volumes.templ b/weed/admin/view/app/cluster_volumes.templ deleted file mode 100644 index 1d84ad0cb..000000000 --- a/weed/admin/view/app/cluster_volumes.templ +++ /dev/null @@ -1,726 +0,0 @@ -package app - -import ( - "fmt" - "strings" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ClusterVolumes(data dash.ClusterVolumesData) { -
-
-

- Cluster Volumes -

- if data.FilterCollection != "" { -
- - Collection: {data.FilterCollection} - - - Clear Filter - -
- } -
-
-
- - -
-
-
- -
- -
-
-
-
-
-
-
- Total Volumes -
-
- {fmt.Sprintf("%d", data.TotalVolumes)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- if data.CollectionCount == 1 { - Collection - } else { - Collections - } -
-
- if data.CollectionCount == 1 { - {data.SingleCollection} - } else { - {fmt.Sprintf("%d", data.CollectionCount)} - } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- if data.DataCenterCount == 1 { - Data Center - } else { - Data Centers - } -
-
- if data.DataCenterCount == 1 { - {data.SingleDataCenter} - } else { - {fmt.Sprintf("%d", data.DataCenterCount)} - } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- if data.RackCount == 1 { - Rack - } else { - Racks - } -
-
- if data.RackCount == 1 { - {data.SingleRack} - } else { - {fmt.Sprintf("%d", data.RackCount)} - } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- if data.DiskTypeCount == 1 { - Disk Type - } else { - Disk Types - } -
-
- if data.DiskTypeCount == 1 { - {data.SingleDiskType} - } else { - {strings.Join(data.AllDiskTypes, ", ")} - } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- if data.VersionCount == 1 { - Version - } else { - Versions - } -
-
- if data.VersionCount == 1 { - {data.SingleVersion} - } else { - {strings.Join(data.AllVersions, ", ")} - } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Size -
-
- {formatBytes(data.TotalSize)} -
-
-
- -
-
-
-
-
-
- - -
-
-
- Volume Details -
-
-
- if len(data.Volumes) > 0 { -
- - - - - - if data.ShowDataCenterColumn { - - } - if data.ShowRackColumn { - - } - if data.ShowCollectionColumn { - - } - - - - - if data.ShowDiskTypeColumn { - - } - if data.ShowVersionColumn { - - } - - - - - for _, volume := range data.Volumes { - - - - if data.ShowDataCenterColumn { - - } - if data.ShowRackColumn { - - } - if data.ShowCollectionColumn { - - } - - - - - if data.ShowDiskTypeColumn { - - } - if data.ShowVersionColumn { - - } - - - } - -
- - Volume ID - @getSortIcon("id", data.SortBy, data.SortOrder) - - - - Server - @getSortIcon("server", data.SortBy, data.SortOrder) - - - - Data Center - @getSortIcon("datacenter", data.SortBy, data.SortOrder) - - - - Rack - @getSortIcon("rack", data.SortBy, data.SortOrder) - - - - Collection - @getSortIcon("collection", data.SortBy, data.SortOrder) - - - - Size - @getSortIcon("size", data.SortBy, data.SortOrder) - - Volume Utilization - - File Count - @getSortIcon("filecount", data.SortBy, data.SortOrder) - - - - Replication - @getSortIcon("replication", data.SortBy, data.SortOrder) - - - - Disk Type - @getSortIcon("disktype", data.SortBy, data.SortOrder) - - - - Version - @getSortIcon("version", data.SortBy, data.SortOrder) - - Actions
- - {fmt.Sprintf("%d", volume.Id)} - - - - {volume.Server} - - - - {volume.DataCenter} - - {volume.Rack} - - if volume.Collection == "" { - - default - - } else { - - {volume.Collection} - - } - {formatBytes(int64(volume.Size))} -
-
- -
0 { - activePct := float64(volume.Size - volume.DeletedByteCount) / float64(volume.Size) * 100 - if data.VolumeSizeLimit > 0 { - return activePct * float64(volume.Size) / float64(data.VolumeSizeLimit) * 100 - } - return activePct - } - return 0 - }())} - title={fmt.Sprintf("Active: %s", formatBytes(int64(volume.Size - volume.DeletedByteCount)))}> -
- -
0 && volume.DeletedByteCount > 0 { - garbagePct := float64(volume.DeletedByteCount) / float64(volume.Size) * 100 - if data.VolumeSizeLimit > 0 { - return garbagePct * float64(volume.Size) / float64(data.VolumeSizeLimit) * 100 - } - return garbagePct - } - return 0 - }())} - title={fmt.Sprintf("Garbage: %s", formatBytes(int64(volume.DeletedByteCount)))}> -
-
- - {func() string { - if data.VolumeSizeLimit > 0 { - return fmt.Sprintf("%.0f%%", float64(volume.Size)/float64(data.VolumeSizeLimit)*100) - } - return "N/A" - }()} - -
-
{fmt.Sprintf("%d", volume.FileCount)} - {fmt.Sprintf("%03d", volume.ReplicaPlacement)} - - {volume.DiskType} - - {fmt.Sprintf("v%d", volume.Version)} - -
- - -
-
-
- - -
-
- - Showing {fmt.Sprintf("%d", (data.CurrentPage-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", minInt(data.CurrentPage*data.PageSize, data.TotalVolumes))} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes - -
- if data.TotalPages > 1 { -
- - Page {fmt.Sprintf("%d", data.CurrentPage)} of {fmt.Sprintf("%d", data.TotalPages)} - -
- } -
- - - if data.TotalPages > 1 { -
- -
- } - } else { -
- -
No Volumes Found
-

No volumes are currently available in the cluster.

-
- } -
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
- - - -} - -func countActiveVolumes(volumes []dash.VolumeWithTopology) int { - // Since we removed status tracking, consider all volumes as active - return len(volumes) -} - -func countUniqueDataCenters(volumes []dash.VolumeWithTopology) int { - dcMap := make(map[string]bool) - for _, volume := range volumes { - dcMap[volume.DataCenter] = true - } - return len(dcMap) -} - -func countUniqueRacks(volumes []dash.VolumeWithTopology) int { - rackMap := make(map[string]bool) - for _, volume := range volumes { - if volume.Rack != "" { - rackMap[volume.Rack] = true - } - } - return len(rackMap) -} - -func countUniqueDiskTypes(volumes []dash.VolumeWithTopology) int { - diskTypeMap := make(map[string]bool) - for _, volume := range volumes { - diskType := volume.DiskType - if diskType == "" { - diskType = "hdd" - } - diskTypeMap[diskType] = true - } - return len(diskTypeMap) -} - - - -templ getSortIcon(column, currentSort, currentOrder string) { - if column != currentSort { - - } else if currentOrder == "asc" { - - } else { - - } -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_volumes_templ.go b/weed/admin/view/app/cluster_volumes_templ.go deleted file mode 100644 index c029a229c..000000000 --- a/weed/admin/view/app/cluster_volumes_templ.go +++ /dev/null @@ -1,1138 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "strings" -) - -func ClusterVolumes(data dash.ClusterVolumesData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Cluster Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.FilterCollection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Collection: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.FilterCollection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 18, Col: 92} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, " Clear Filter
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Total Volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 53, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.CollectionCount == 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "Collection") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "Collections") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.CollectionCount == 1 { - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.SingleCollection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 78, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CollectionCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 80, Col: 80} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.DataCenterCount == 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "Data Center") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "Data Centers") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.DataCenterCount == 1 { - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.SingleDataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 106, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.DataCenterCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 108, Col: 80} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.RackCount == 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Rack") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "Racks") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.RackCount == 1 { - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.SingleRack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 134, Col: 56} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.RackCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 136, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.DiskTypeCount == 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "Disk Type") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "Disk Types") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.DiskTypeCount == 1 { - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.SingleDiskType) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 162, Col: 60} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(strings.Join(data.AllDiskTypes, ", ")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 164, Col: 78} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.VersionCount == 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "Version") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "Versions") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.VersionCount == 1 { - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.SingleVersion) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 190, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(strings.Join(data.AllVersions, ", ")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 192, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
Total Size
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 213, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
Volume Details
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Volumes) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowRackColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDiskTypeColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowVersionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, volume := range data.Volumes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDataCenterColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowRackColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowCollectionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ShowDiskTypeColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.ShowVersionColumn { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "
Volume ID") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("id", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "Server") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("server", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "Data Center") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("datacenter", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "Rack") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("rack", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "Collection") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("collection", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "Size") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("size", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "Volume UtilizationFile Count") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("filecount", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "Replication") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("replication", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "Disk Type") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("disktype", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "Version") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = getSortIcon("version", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "Actions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.Id)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 319, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 324, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(volume.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 330, Col: 105} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 335, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if volume.Collection == "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "default") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 346, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(volume.Size))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 351, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "
0 { - activePct := float64(volume.Size-volume.DeletedByteCount) / float64(volume.Size) * 100 - if data.VolumeSizeLimit > 0 { - return activePct * float64(volume.Size) / float64(data.VolumeSizeLimit) * 100 - } - return activePct - } - return 0 - }())) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 367, Col: 49} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "\" title=\"") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Active: %s", formatBytes(int64(volume.Size-volume.DeletedByteCount)))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 368, Col: 132} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "\">
0 && volume.DeletedByteCount > 0 { - garbagePct := float64(volume.DeletedByteCount) / float64(volume.Size) * 100 - if data.VolumeSizeLimit > 0 { - return garbagePct * float64(volume.Size) / float64(data.VolumeSizeLimit) * 100 - } - return garbagePct - } - return 0 - }())) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 382, Col: 49} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "\" title=\"") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("Garbage: %s", formatBytes(int64(volume.DeletedByteCount)))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 383, Col: 119} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "\">
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(func() string { - if data.VolumeSizeLimit > 0 { - return fmt.Sprintf("%.0f%%", float64(volume.Size)/float64(data.VolumeSizeLimit)*100) - } - return "N/A" - }()) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 392, Col: 39} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var30 string - templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 396, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var31 string - templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%03d", volume.ReplicaPlacement)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 398, Col: 101} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var32 string - templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(volume.DiskType) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 402, Col: 95} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var33 string - templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("v%d", volume.Version)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 407, Col: 111} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "
Showing ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var37 string - templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.CurrentPage-1)*data.PageSize+1)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 434, Col: 98} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, " to ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var38 string - templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", minInt(data.CurrentPage*data.PageSize, data.TotalVolumes))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 434, Col: 180} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, " of ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 434, Col: 222} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, " volumes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.TotalPages > 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "
Page ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var40 string - templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 440, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, " of ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var41 string - templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 440, Col: 117} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.TotalPages > 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 109, "
No Volumes Found

No volumes are currently available in the cluster.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 110, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var47 string - templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 512, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 111, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func countActiveVolumes(volumes []dash.VolumeWithTopology) int { - // Since we removed status tracking, consider all volumes as active - return len(volumes) -} - -func countUniqueDataCenters(volumes []dash.VolumeWithTopology) int { - dcMap := make(map[string]bool) - for _, volume := range volumes { - dcMap[volume.DataCenter] = true - } - return len(dcMap) -} - -func countUniqueRacks(volumes []dash.VolumeWithTopology) int { - rackMap := make(map[string]bool) - for _, volume := range volumes { - if volume.Rack != "" { - rackMap[volume.Rack] = true - } - } - return len(rackMap) -} - -func countUniqueDiskTypes(volumes []dash.VolumeWithTopology) int { - diskTypeMap := make(map[string]bool) - for _, volume := range volumes { - diskType := volume.DiskType - if diskType == "" { - diskType = "hdd" - } - diskTypeMap[diskType] = true - } - return len(diskTypeMap) -} - -func getSortIcon(column, currentSort, currentOrder string) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var48 := templ.GetChildren(ctx) - if templ_7745c5c3_Var48 == nil { - templ_7745c5c3_Var48 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if column != currentSort { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 112, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if currentOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 113, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 114, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/collection_details.templ b/weed/admin/view/app/collection_details.templ deleted file mode 100644 index b5c86ba18..000000000 --- a/weed/admin/view/app/collection_details.templ +++ /dev/null @@ -1,381 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -templ CollectionDetails(data dash.CollectionDetailsData) { -
-
-

- Collection Details: {data.CollectionName} -

- -
-
-
- - -
-
-
- - -
-
-
-
-
-
-
Regular Volumes
-

{fmt.Sprintf("%d", data.TotalVolumes)}

- Traditional volumes -
-
- -
-
-
-
-
-
-
-
-
-
-
EC Volumes
-

{fmt.Sprintf("%d", data.TotalEcVolumes)}

- Erasure coded volumes -
-
- -
-
-
-
-
-
-
-
-
-
-
Total Files
-

{fmt.Sprintf("%d", data.TotalFiles)}

- Files stored -
-
- -
-
-
-
-
-
-
-
-
-
-
Total Size (Logical)
-

{util.BytesToHumanReadable(uint64(data.TotalSize))}

- Data stored (regular volumes only) -
-
- -
-
-
-
-
-
- - - - - -
-
- - Showing {fmt.Sprintf("%d", (data.Page-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", func() int { - end := data.Page * data.PageSize - totalItems := data.TotalVolumes + data.TotalEcVolumes - if end > totalItems { - return totalItems - } - return end - }())} of {fmt.Sprintf("%d", data.TotalVolumes + data.TotalEcVolumes)} items - - -
- - - per page -
-
-
- - -
- - - - - - - - - - - - - // Display regular volumes - for _, volume := range data.RegularVolumes { - - - - - - - - - } - - // Display EC volumes - for _, ecVolume := range data.EcVolumes { - - - - - - - - - } - - // Show message when no volumes found - if len(data.RegularVolumes) == 0 && len(data.EcVolumes) == 0 { - - - - } - -
- - Volume ID - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - - - Type - if data.SortBy == "type" { - if data.SortOrder == "asc" { - - } else { - - } - } else { - - } - - Logical Size / Shard CountFilesStatusActions
- {fmt.Sprintf("%d", volume.Id)} - - - Regular - - - {util.BytesToHumanReadable(volume.Size)} - - {fmt.Sprintf("%d", volume.FileCount)} - - if volume.ReadOnly { - Read Only - } else { - Read/Write - } - -
- -
-
- {fmt.Sprintf("%d", ecVolume.VolumeID)} - - - EC - - - {fmt.Sprintf("%d/14", ecVolume.TotalShards)} - - - - - if ecVolume.IsComplete { - - Complete - - } else { - - - Missing {fmt.Sprintf("%d", len(ecVolume.MissingShards))} shards - - } - -
- - if !ecVolume.IsComplete { - - } -
-
- - No volumes found for collection "{data.CollectionName}" -
-
- - - if data.TotalPages > 1 { - - } - - -} \ No newline at end of file diff --git a/weed/admin/view/app/collection_details_templ.go b/weed/admin/view/app/collection_details_templ.go deleted file mode 100644 index a0e781637..000000000 --- a/weed/admin/view/app/collection_details_templ.go +++ /dev/null @@ -1,586 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func CollectionDetails(data dash.CollectionDetailsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Collection Details: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.CollectionName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 13, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

Regular Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 43, Col: 61} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Traditional volumes
EC Volumes

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 59, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

Erasure coded volumes
Total Files

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 75, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "

Files stored
Total Size (Logical)

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(uint64(data.TotalSize))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 91, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "

Data stored (regular volumes only)
Size Information: Logical size represents the actual data stored (regular volumes only). EC volumes show shard counts instead of size - physical storage for EC volumes is approximately 1.4x the original data due to erasure coding redundancy.
Showing ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.Page-1)*data.PageSize+1)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 115, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " to ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", func() int { - end := data.Page * data.PageSize - totalItems := data.TotalVolumes + data.TotalEcVolumes - if end > totalItems { - return totalItems - } - return end - }())) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 122, Col: 8} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " of ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes+data.TotalEcVolumes)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 122, Col: 72} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " items
per page
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, volume := range data.RegularVolumes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - for _, ecVolume := range data.EcVolumes { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.RegularVolumes) == 0 && len(data.EcVolumes) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "
Volume ID ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "volume_id" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Type ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "type" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "Logical Size / Shard CountFilesStatusActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.Id)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 182, Col: 44} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "Regular") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(volume.Size)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 190, Col: 46} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 193, Col: 43} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if volume.ReadOnly { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "Read Only") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "Read/Write") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", ecVolume.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 220, Col: 52} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "EC") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/14", ecVolume.TotalShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 228, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if ecVolume.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "Complete") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " Missing ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(ecVolume.MissingShards))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 241, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, " shards") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !ecVolume.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
No volumes found for collection \"") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(data.CollectionName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/collection_details.templ`, Line: 271, Col: 60} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "\"
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.TotalPages > 1 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/ec_volume_details.templ b/weed/admin/view/app/ec_volume_details.templ deleted file mode 100644 index caf506d0f..000000000 --- a/weed/admin/view/app/ec_volume_details.templ +++ /dev/null @@ -1,313 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ EcVolumeDetails(data dash.EcVolumeDetailsData) { -
-
-

- EC Volume Details -

- -
-
-
- - -
-
-
- - -
-
-
-
-
- Volume Information -
-
-
- - - - - - - - - - - - - - if !data.IsComplete { - - - - - } - - - - - - - - - - - - -
Volume ID:{fmt.Sprintf("%d", data.VolumeID)}
Collection: - if data.Collection != "" { - {data.Collection} - } else { - default - } -
Status: - if data.IsComplete { - - Complete ({data.TotalShards}/14 shards) - - } else { - - Incomplete ({data.TotalShards}/14 shards) - - } -
Missing Shards: - for i, shardID := range data.MissingShards { - if i > 0 { - , - } - {fmt.Sprintf("%02d", shardID)} - } -
Data Centers: - for i, dc := range data.DataCenters { - if i > 0 { - , - } - {dc} - } -
Servers: - {fmt.Sprintf("%d servers", len(data.Servers))} -
Last Updated: - {data.LastUpdated.Format("2006-01-02 15:04:05")} -
-
-
-
- -
-
-
-
- Shard Distribution -
-
-
-
-
-
-

{fmt.Sprintf("%d", data.TotalShards)}

- Total Shards -
-
-
-
-

{fmt.Sprintf("%d", len(data.DataCenters))}

- Data Centers -
-
-
-
-

{fmt.Sprintf("%d", len(data.Servers))}

- Servers -
-
-
- - -
-
Present Shards:
-
- for _, shard := range data.Shards { - {fmt.Sprintf("%02d", shard.ShardID)} - } -
- if len(data.MissingShards) > 0 { -
Missing Shards:
-
- for _, shardID := range data.MissingShards { - {fmt.Sprintf("%02d", shardID)} - } -
- } -
-
-
-
-
- - -
-
-
- Shard Details -
-
-
- if len(data.Shards) > 0 { - - } else { -
- -
No EC shards found
-

This volume may not be EC encoded yet.

-
- } -
-
- - -} - -// Helper function to convert bytes to human readable format (uint64 version) -func bytesToHumanReadableUint64(bytes uint64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%dB", bytes) - } - div, exp := uint64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) -} \ No newline at end of file diff --git a/weed/admin/view/app/ec_volume_details_templ.go b/weed/admin/view/app/ec_volume_details_templ.go deleted file mode 100644 index a062998bd..000000000 --- a/weed/admin/view/app/ec_volume_details_templ.go +++ /dev/null @@ -1,560 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func EcVolumeDetails(data dash.EcVolumeDetailsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

EC Volume Details

Volume Information
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !data.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
Volume ID:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 47, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Collection:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Collection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 53, Col: 80} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "default") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Status:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.IsComplete { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "Complete (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 64, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "/14 shards)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Incomplete (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.TotalShards) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 68, Col: 117} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "/14 shards)") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
Missing Shards:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, shardID := range data.MissingShards { - if i > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, ", ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 81, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Data Centers:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, dc := range data.DataCenters { - if i > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, ", ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(dc) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 93, Col: 70} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
Servers:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d servers", len(data.Servers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 100, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Last Updated:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 106, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
Shard Distribution

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalShards)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 125, Col: 98} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "

Total Shards

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.DataCenters))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 131, Col: 103} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "

Data Centers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Servers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 137, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "

Servers
Present Shards:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, shard := range data.Shards { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 148, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.MissingShards) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "
Missing Shards:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, shardID := range data.MissingShards { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shardID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 155, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
Shard Details
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Shards) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, shard := range data.Shards { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "
Shard ID ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "shard_id" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "Server ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "server" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "Data Center ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "data_center" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "Rack ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.SortBy == "rack" { - if data.SortOrder == "asc" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "Disk TypeShard SizeActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%02d", shard.ShardID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 243, Col: 110} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 247, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 251, Col: 103} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(shard.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 254, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(shard.DiskType) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 257, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(bytesToHumanReadableUint64(shard.Size)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/ec_volume_details.templ`, Line: 260, Col: 110} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "Volume Server
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "
No EC shards found

This volume may not be EC encoded yet.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// Helper function to convert bytes to human readable format (uint64 version) -func bytesToHumanReadableUint64(bytes uint64) string { - const unit = 1024 - if bytes < unit { - return fmt.Sprintf("%dB", bytes) - } - div, exp := uint64(unit), 0 - for n := bytes / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f%cB", float64(bytes)/float64(div), "KMGTPE"[exp]) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/file_browser.templ b/weed/admin/view/app/file_browser.templ deleted file mode 100644 index 83db7df0f..000000000 --- a/weed/admin/view/app/file_browser.templ +++ /dev/null @@ -1,812 +0,0 @@ -package app - -import ( - "fmt" - "path/filepath" - "strings" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ FileBrowser(data dash.FileBrowserData) { -
-

- if data.IsBucketPath && data.BucketName != "" { - S3 Bucket: {data.BucketName} - } else { - File Browser - } -

-
-
- if data.IsBucketPath && data.BucketName != "" { - - Back to Buckets - - } - - - - -
-
-
- - - - - -
-
-
-
-
-
-
- Total Entries -
-
- { fmt.Sprintf("%d", data.TotalEntries) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Directories -
-
- { fmt.Sprintf("%d", countDirectories(data.Entries)) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Files -
-
- { fmt.Sprintf("%d", countFiles(data.Entries)) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Size -
-
- { formatBytes(data.TotalSize) } -
-
-
- -
-
-
-
-
-
- - -
-
-
- - if data.CurrentPath == "/" { - Root Directory - } else if data.CurrentPath == "/buckets" { - Object Store Buckets Directory - - Manage Buckets - - } else { - { filepath.Base(data.CurrentPath) } - } -
- if data.ParentPath != data.CurrentPath { - - Up - - } -
-
- if len(data.Entries) > 0 { -
- - - - - - - - - - - - - - for _, entry := range data.Entries { - - - - - - - - - - } - -
- - NameSizeTypeModifiedPermissionsActions
- - -
- if entry.IsDirectory { - - - { entry.Name } - - } else { - - { entry.Name } - } -
-
- if entry.IsDirectory { - โ€” - } else { - { formatBytes(entry.Size) } - } - - - if entry.IsDirectory { - Directory - } else { - { getMimeDisplayName(entry.Mime) } - } - - - if !entry.ModTime.IsZero() { - { entry.ModTime.Format("2006-01-02 15:04") } - } else { - โ€” - } - - { entry.Mode } - -
- if !entry.IsDirectory { - - - } - - -
-
-
- } else { -
- -
Empty Directory
-

This directory contains no files or subdirectories.

-
- } -
-
- - -
-
- - - Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } - -
-
- - - - - - - - - -} - -func countDirectories(entries []dash.FileEntry) int { - count := 0 - for _, entry := range entries { - if entry.IsDirectory { - count++ - } - } - return count -} - -func countFiles(entries []dash.FileEntry) int { - count := 0 - for _, entry := range entries { - if !entry.IsDirectory { - count++ - } - } - return count -} - -func getFileIcon(mime string) string { - switch { - case strings.HasPrefix(mime, "image/"): - return "fa-image" - case strings.HasPrefix(mime, "video/"): - return "fa-video" - case strings.HasPrefix(mime, "audio/"): - return "fa-music" - case strings.HasPrefix(mime, "text/"): - return "fa-file-text" - case mime == "application/pdf": - return "fa-file-pdf" - case mime == "application/zip" || strings.Contains(mime, "archive"): - return "fa-file-archive" - case mime == "application/json": - return "fa-file-code" - case strings.Contains(mime, "script") || strings.Contains(mime, "javascript"): - return "fa-file-code" - default: - return "fa-file" - } -} - -func getMimeDisplayName(mime string) string { - switch mime { - case "text/plain": - return "Text" - case "text/html": - return "HTML" - case "application/json": - return "JSON" - case "application/pdf": - return "PDF" - case "image/jpeg": - return "JPEG" - case "image/png": - return "PNG" - case "image/gif": - return "GIF" - case "video/mp4": - return "MP4" - case "audio/mpeg": - return "MP3" - case "application/zip": - return "ZIP" - default: - if strings.HasPrefix(mime, "image/") { - return "Image" - } else if strings.HasPrefix(mime, "video/") { - return "Video" - } else if strings.HasPrefix(mime, "audio/") { - return "Audio" - } else if strings.HasPrefix(mime, "text/") { - return "Text" - } - return "File" - } -} \ No newline at end of file diff --git a/weed/admin/view/app/file_browser_templ.go b/weed/admin/view/app/file_browser_templ.go deleted file mode 100644 index 8bfdedc84..000000000 --- a/weed/admin/view/app/file_browser_templ.go +++ /dev/null @@ -1,637 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "path/filepath" - "strings" -) - -func FileBrowser(data dash.FileBrowserData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.IsBucketPath && data.BucketName != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "S3 Bucket: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.BucketName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 14, Col: 63} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "File Browser") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.IsBucketPath && data.BucketName != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "Back to Buckets ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Total Entries
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEntries)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 77, Col: 46} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Directories
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countDirectories(data.Entries))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 97, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
Files
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countFiles(data.Entries))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 117, Col: 53} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Total Size
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 137, Col: 37} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.CurrentPath == "/" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "Root Directory") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.CurrentPath == "/buckets" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "Object Store Buckets Directory Manage Buckets") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(filepath.Base(data.CurrentPath)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 162, Col: 37} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.ParentPath != data.CurrentPath { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Up") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Entries) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, entry := range data.Entries { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "
NameSizeTypeModifiedPermissionsActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if entry.IsDirectory { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 199, Col: 25} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var15 = []any{fmt.Sprintf("fas %s text-muted me-2", getFileIcon(entry.Mime))} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 203, Col: 30} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if entry.IsDirectory { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "โ€”") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(entry.Size)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 211, Col: 36} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if entry.IsDirectory { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "Directory") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(getMimeDisplayName(entry.Mime)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 219, Col: 44} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !entry.ModTime.IsZero() { - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(entry.ModTime.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 225, Col: 53} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "โ€”") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Mode) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 231, Col: 146} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !entry.IsDirectory { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "
Empty Directory

This directory contains no files or subdirectories.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 271, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "
Create New Folder
Folder names cannot contain / or \\ characters.
Upload Files
Choose one or more files to upload to the current directory. You can select multiple files by holding Ctrl (Cmd on Mac) while clicking.
0%
Preparing upload...
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func countDirectories(entries []dash.FileEntry) int { - count := 0 - for _, entry := range entries { - if entry.IsDirectory { - count++ - } - } - return count -} - -func countFiles(entries []dash.FileEntry) int { - count := 0 - for _, entry := range entries { - if !entry.IsDirectory { - count++ - } - } - return count -} - -func getFileIcon(mime string) string { - switch { - case strings.HasPrefix(mime, "image/"): - return "fa-image" - case strings.HasPrefix(mime, "video/"): - return "fa-video" - case strings.HasPrefix(mime, "audio/"): - return "fa-music" - case strings.HasPrefix(mime, "text/"): - return "fa-file-text" - case mime == "application/pdf": - return "fa-file-pdf" - case mime == "application/zip" || strings.Contains(mime, "archive"): - return "fa-file-archive" - case mime == "application/json": - return "fa-file-code" - case strings.Contains(mime, "script") || strings.Contains(mime, "javascript"): - return "fa-file-code" - default: - return "fa-file" - } -} - -func getMimeDisplayName(mime string) string { - switch mime { - case "text/plain": - return "Text" - case "text/html": - return "HTML" - case "application/json": - return "JSON" - case "application/pdf": - return "PDF" - case "image/jpeg": - return "JPEG" - case "image/png": - return "PNG" - case "image/gif": - return "GIF" - case "video/mp4": - return "MP4" - case "audio/mpeg": - return "MP3" - case "application/zip": - return "ZIP" - default: - if strings.HasPrefix(mime, "image/") { - return "Image" - } else if strings.HasPrefix(mime, "video/") { - return "Video" - } else if strings.HasPrefix(mime, "audio/") { - return "Audio" - } else if strings.HasPrefix(mime, "text/") { - return "Text" - } - return "File" - } -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/maintenance_config.templ b/weed/admin/view/app/maintenance_config.templ deleted file mode 100644 index 65ef565af..000000000 --- a/weed/admin/view/app/maintenance_config.templ +++ /dev/null @@ -1,267 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" -) - -templ MaintenanceConfig(data *maintenance.MaintenanceConfigData) { -
-
-
-
-

- - Maintenance Configuration -

- -
-
-
- -
-
-
-
-
System Settings
-
-
-
-
-
- - -
- - When enabled, the system will automatically scan for and execute maintenance tasks. - -
- -
- - - - How often to scan for maintenance tasks (1-1440 minutes). Default: 30 minutes - -
- -
- - - - How long to wait for worker heartbeat before considering it inactive (1-60 minutes). Default: 5 minutes - -
- -
- - - - Maximum time allowed for a single task to complete (1-24 hours). Default: 2 hours - -
- -
- - - - Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). Default: 4 - -
- -
- - - - Default number of times to retry failed tasks (0-10). Default: 3 - -
- -
- - - - Time to wait before retrying failed tasks (1-120 minutes). Default: 15 minutes - -
- -
- - - - How long to keep completed/failed task records (1-30 days). Default: 7 days - -
- -
- - -
-
-
-
-
-
- - -
-
-
-
-
- - Task Configuration -
-
-
-

Configure specific settings for each maintenance task type.

- -
-
-
-
- - -
-
-
-
-
System Statistics
-
-
-
-
-
-
Last Scan
-

{data.LastScanTime.Format("2006-01-02 15:04:05")}

-
-
-
-
-
Next Scan
-

{data.NextScanTime.Format("2006-01-02 15:04:05")}

-
-
-
-
-
Total Tasks
-

{fmt.Sprintf("%d", data.SystemStats.TotalTasks)}

-
-
-
-
-
Active Workers
-

{fmt.Sprintf("%d", data.SystemStats.ActiveWorkers)}

-
-
-
-
-
-
-
-
- - -} \ No newline at end of file diff --git a/weed/admin/view/app/maintenance_config_schema.templ b/weed/admin/view/app/maintenance_config_schema.templ deleted file mode 100644 index ee89cab64..000000000 --- a/weed/admin/view/app/maintenance_config_schema.templ +++ /dev/null @@ -1,381 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" -) - -templ MaintenanceConfigSchema(data *maintenance.MaintenanceConfigData, schema *maintenance.MaintenanceConfigSchema) { -
-
-
-
-

- - Maintenance Configuration -

- -
-
-
- -
-
-
-
-
System Settings
-
-
-
- - for _, field := range schema.Fields { - @ConfigField(field, data.Config) - } - -
- - -
-
-
-
-
-
- - -
-
-
-
-
- - Volume Vacuum -
-
-
-

Reclaims disk space by removing deleted files from volumes.

- Configure -
-
-
-
-
-
-
- - Volume Balance -
-
-
-

Redistributes volumes across servers to optimize storage utilization.

- Configure -
-
-
-
-
-
-
- - Erasure Coding -
-
-
-

Converts volumes to erasure coded format for improved durability.

- Configure -
-
-
-
-
- - -} - -// ConfigField renders a single configuration field based on schema with typed value lookup -templ ConfigField(field *config.Field, config *maintenance.MaintenanceConfig) { - if field.InputType == "interval" { - -
- -
- - -
- if field.Description != "" { -
{ field.Description }
- } -
- } else if field.InputType == "checkbox" { - -
-
- - -
- if field.Description != "" { -
{ field.Description }
- } -
- } else { - -
- - - if field.Description != "" { -
{ field.Description }
- } -
- } -} - -// Helper functions for form field types - -func getNumberStep(field *config.Field) string { - if field.Type == config.FieldTypeFloat { - return "0.01" - } - return "1" -} - -// Typed field getters for MaintenanceConfig - no interface{} needed -func getMaintenanceInt32Field(config *maintenance.MaintenanceConfig, fieldName string) int32 { - if config == nil { - return 0 - } - - switch fieldName { - case "scan_interval_seconds": - return config.ScanIntervalSeconds - case "worker_timeout_seconds": - return config.WorkerTimeoutSeconds - case "task_timeout_seconds": - return config.TaskTimeoutSeconds - case "retry_delay_seconds": - return config.RetryDelaySeconds - case "max_retries": - return config.MaxRetries - case "cleanup_interval_seconds": - return config.CleanupIntervalSeconds - case "task_retention_seconds": - return config.TaskRetentionSeconds - case "global_max_concurrent": - if config.Policy != nil { - return config.Policy.GlobalMaxConcurrent - } - return 0 - default: - return 0 - } -} - -func getMaintenanceBoolField(config *maintenance.MaintenanceConfig, fieldName string) bool { - if config == nil { - return false - } - - switch fieldName { - case "enabled": - return config.Enabled - default: - return false - } -} - -// Helper function to convert schema to JSON for JavaScript -templ schemaToJSON(schema *maintenance.MaintenanceConfigSchema) { - {`{}`} -} \ No newline at end of file diff --git a/weed/admin/view/app/maintenance_config_schema_templ.go b/weed/admin/view/app/maintenance_config_schema_templ.go deleted file mode 100644 index b7046f3f9..000000000 --- a/weed/admin/view/app/maintenance_config_schema_templ.go +++ /dev/null @@ -1,622 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" -) - -func MaintenanceConfigSchema(data *maintenance.MaintenanceConfigData, schema *maintenance.MaintenanceConfigSchema) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Maintenance Configuration

System Settings
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, field := range schema.Fields { - templ_7745c5c3_Err = ConfigField(field, data.Config).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Volume Vacuum

Reclaims disk space by removing deleted files from volumes.

Configure
Volume Balance

Redistributes volumes across servers to optimize storage utilization.

Configure
Erasure Coding

Converts volumes to erasure coded format for improved durability.

Configure
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// ConfigField renders a single configuration field based on schema with typed value lookup -func ConfigField(field *config.Field, config *maintenance.MaintenanceConfig) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var2 := templ.GetChildren(ctx) - if templ_7745c5c3_Var2 == nil { - templ_7745c5c3_Var2 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if field.InputType == "interval" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 267, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if field.InputType == "checkbox" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 288, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 319, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -// Helper functions for form field types - -func getNumberStep(field *config.Field) string { - if field.Type == config.FieldTypeFloat { - return "0.01" - } - return "1" -} - -// Typed field getters for MaintenanceConfig - no interface{} needed -func getMaintenanceInt32Field(config *maintenance.MaintenanceConfig, fieldName string) int32 { - if config == nil { - return 0 - } - - switch fieldName { - case "scan_interval_seconds": - return config.ScanIntervalSeconds - case "worker_timeout_seconds": - return config.WorkerTimeoutSeconds - case "task_timeout_seconds": - return config.TaskTimeoutSeconds - case "retry_delay_seconds": - return config.RetryDelaySeconds - case "max_retries": - return config.MaxRetries - case "cleanup_interval_seconds": - return config.CleanupIntervalSeconds - case "task_retention_seconds": - return config.TaskRetentionSeconds - case "global_max_concurrent": - if config.Policy != nil { - return config.Policy.GlobalMaxConcurrent - } - return 0 - default: - return 0 - } -} - -func getMaintenanceBoolField(config *maintenance.MaintenanceConfig, fieldName string) bool { - if config == nil { - return false - } - - switch fieldName { - case "enabled": - return config.Enabled - default: - return false - } -} - -// Helper function to convert schema to JSON for JavaScript -func schemaToJSON(schema *maintenance.MaintenanceConfigSchema) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var26 := templ.GetChildren(ctx) - if templ_7745c5c3_Var26 == nil { - templ_7745c5c3_Var26 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(`{}`) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config_schema.templ`, Line: 380, Col: 9} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/maintenance_config_templ.go b/weed/admin/view/app/maintenance_config_templ.go deleted file mode 100644 index 45e9b8ef1..000000000 --- a/weed/admin/view/app/maintenance_config_templ.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" -) - -func MaintenanceConfig(data *maintenance.MaintenanceConfigData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Maintenance Configuration

System Settings
When enabled, the system will automatically scan for and execute maintenance tasks.
How often to scan for maintenance tasks (1-1440 minutes). Default: 30 minutes
How long to wait for worker heartbeat before considering it inactive (1-60 minutes). Default: 5 minutes
Maximum time allowed for a single task to complete (1-24 hours). Default: 2 hours
Maximum number of maintenance tasks that can run simultaneously across all workers (1-20). Default: 4
Default number of times to retry failed tasks (0-10). Default: 3
Time to wait before retrying failed tasks (1-120 minutes). Default: 15 minutes
How long to keep completed/failed task records (1-30 days). Default: 7 days
Task Configuration

Configure specific settings for each maintenance task type.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, menuItem := range data.MenuItems { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 = []any{menuItem.Icon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var10...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.DisplayName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 151, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if menuItem.IsEnabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "Enabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "Disabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(menuItem.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 159, Col: 90} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
System Statistics
Last Scan

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastScanTime.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 180, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "

Next Scan

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(data.NextScanTime.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 186, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "

Total Tasks

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.TotalTasks)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 192, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "

Active Workers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.SystemStats.ActiveWorkers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_config.templ`, Line: 198, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/maintenance_queue.templ b/weed/admin/view/app/maintenance_queue.templ deleted file mode 100644 index 74540f285..000000000 --- a/weed/admin/view/app/maintenance_queue.templ +++ /dev/null @@ -1,429 +0,0 @@ -package app - -import ( - "fmt" - "time" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" -) - -templ MaintenanceQueue(data *maintenance.MaintenanceQueueData) { -
- -
-
-
-

- - Maintenance Queue -

-
- - -
-
-
-
- - -
-
-
-
- -

{fmt.Sprintf("%d", data.Stats.PendingTasks)}

-

Pending Tasks

-
-
-
-
-
-
- -

{fmt.Sprintf("%d", data.Stats.RunningTasks)}

-

Running Tasks

-
-
-
-
-
-
- -

{fmt.Sprintf("%d", data.Stats.CompletedToday)}

-

Completed Today

-
-
-
-
-
-
- -

{fmt.Sprintf("%d", data.Stats.FailedToday)}

-

Failed Today

-
-
-
-
- - -
-
-
-
-
- - Completed Tasks -
-
-
- if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 { -
- -

No completed maintenance tasks today

- Completed tasks will appear here after workers finish processing them -
- } else { -
- - - - - - - - - - - - - for _, task := range data.Tasks { - if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" { - if string(task.Status) == "failed" { - - - - - - - - - } else { - - - - - - - - - } - } - } - -
TypeStatusVolumeWorkerDurationCompleted
- @TaskTypeIcon(task.Type) - {string(task.Type)} - @StatusBadge(task.Status){fmt.Sprintf("%d", task.VolumeID)} - if task.WorkerID != "" { - {task.WorkerID} - } else { - - - } - - if task.StartedAt != nil && task.CompletedAt != nil { - {formatDuration(task.CompletedAt.Sub(*task.StartedAt))} - } else { - - - } - - if task.CompletedAt != nil { - {task.CompletedAt.Format("2006-01-02 15:04")} - } else { - - - } -
- @TaskTypeIcon(task.Type) - {string(task.Type)} - @StatusBadge(task.Status){fmt.Sprintf("%d", task.VolumeID)} - if task.WorkerID != "" { - {task.WorkerID} - } else { - - - } - - if task.StartedAt != nil && task.CompletedAt != nil { - {formatDuration(task.CompletedAt.Sub(*task.StartedAt))} - } else { - - - } - - if task.CompletedAt != nil { - {task.CompletedAt.Format("2006-01-02 15:04")} - } else { - - - } -
-
- } -
-
-
-
- - -
-
-
-
-
- - Pending Tasks -
-
-
- if data.Stats.PendingTasks == 0 { -
- -

No pending maintenance tasks

- Pending tasks will appear here when the system detects maintenance needs -
- } else { -
- - - - - - - - - - - - - for _, task := range data.Tasks { - if string(task.Status) == "pending" { - - - - - - - - - } - } - -
TypePriorityVolumeServerReasonCreated
- @TaskTypeIcon(task.Type) - {string(task.Type)} - @PriorityBadge(task.Priority){fmt.Sprintf("%d", task.VolumeID)}{task.Server}{task.Reason}{task.CreatedAt.Format("2006-01-02 15:04")}
-
- } -
-
-
-
- - -
-
-
-
-
- - Active Tasks -
-
-
- if data.Stats.RunningTasks == 0 { -
- -

No active maintenance tasks

- Active tasks will appear here when workers start processing them -
- } else { -
- - - - - - - - - - - - - for _, task := range data.Tasks { - if string(task.Status) == "assigned" || string(task.Status) == "in_progress" { - - - - - - - - - } - } - -
TypeStatusProgressVolumeWorkerStarted
- @TaskTypeIcon(task.Type) - {string(task.Type)} - @StatusBadge(task.Status)@ProgressBar(task.Progress, task.Status){fmt.Sprintf("%d", task.VolumeID)} - if task.WorkerID != "" { - {task.WorkerID} - } else { - - - } - - if task.StartedAt != nil { - {task.StartedAt.Format("2006-01-02 15:04")} - } else { - - - } -
-
- } -
-
-
-
-
- - -} - -// Helper components -templ TaskTypeIcon(taskType maintenance.MaintenanceTaskType) { - -} - -templ PriorityBadge(priority maintenance.MaintenanceTaskPriority) { - switch priority { - case maintenance.PriorityCritical: - Critical - case maintenance.PriorityHigh: - High - case maintenance.PriorityNormal: - Normal - case maintenance.PriorityLow: - Low - default: - Unknown - } -} - -templ StatusBadge(status maintenance.MaintenanceTaskStatus) { - switch status { - case maintenance.TaskStatusPending: - Pending - case maintenance.TaskStatusAssigned: - Assigned - case maintenance.TaskStatusInProgress: - Running - case maintenance.TaskStatusCompleted: - Completed - case maintenance.TaskStatusFailed: - Failed - case maintenance.TaskStatusCancelled: - Cancelled - default: - Unknown - } -} - -templ ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) { - if status == maintenance.TaskStatusInProgress || status == maintenance.TaskStatusAssigned { -
-
-
-
- {fmt.Sprintf("%.1f%%", progress)} - } else if status == maintenance.TaskStatusCompleted { -
-
-
-
- 100% - } else { - - - } -} - -func formatDuration(d time.Duration) string { - if d < time.Minute { - return fmt.Sprintf("%.0fs", d.Seconds()) - } else if d < time.Hour { - return fmt.Sprintf("%.1fm", d.Minutes()) - } else { - return fmt.Sprintf("%.1fh", d.Hours()) - } -} - -func formatTimeAgo(t time.Time) string { - duration := time.Since(t) - if duration < time.Minute { - return "just now" - } else if duration < time.Hour { - minutes := int(duration.Minutes()) - return fmt.Sprintf("%dm ago", minutes) - } else if duration < 24*time.Hour { - hours := int(duration.Hours()) - return fmt.Sprintf("%dh ago", hours) - } else { - days := int(duration.Hours() / 24) - return fmt.Sprintf("%dd ago", days) - } -} \ No newline at end of file diff --git a/weed/admin/view/app/maintenance_queue_templ.go b/weed/admin/view/app/maintenance_queue_templ.go deleted file mode 100644 index 05ecfbef8..000000000 --- a/weed/admin/view/app/maintenance_queue_templ.go +++ /dev/null @@ -1,876 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "time" -) - -func MaintenanceQueue(data *maintenance.MaintenanceQueueData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Maintenance Queue

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.PendingTasks)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 39, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

Pending Tasks

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.RunningTasks)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 48, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

Running Tasks

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.CompletedToday)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 57, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Completed Today

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Stats.FailedToday)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 66, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

Failed Today

Completed Tasks
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Stats.CompletedToday == 0 && data.Stats.FailedToday == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "

No completed maintenance tasks today

Completed tasks will appear here after workers finish processing them
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, task := range data.Tasks { - if string(task.Status) == "completed" || string(task.Status) == "failed" || string(task.Status) == "cancelled" { - if string(task.Status) == "failed" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
TypeStatusVolumeWorkerDurationCompleted
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 110, Col: 78} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 113, Col: 93} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.WorkerID != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 116, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.StartedAt != nil && task.CompletedAt != nil { - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 123, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.CompletedAt != nil { - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 130, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 140, Col: 78} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 143, Col: 93} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.WorkerID != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 146, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.StartedAt != nil && task.CompletedAt != nil { - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(formatDuration(task.CompletedAt.Sub(*task.StartedAt))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 153, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.CompletedAt != nil { - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(task.CompletedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 160, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
Pending Tasks
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Stats.PendingTasks == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "

No pending maintenance tasks

Pending tasks will appear here when the system detects maintenance needs
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, task := range data.Tasks { - if string(task.Status) == "pending" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
TypePriorityVolumeServerReasonCreated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 214, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = PriorityBadge(task.Priority).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 217, Col: 89} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(task.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 218, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(task.Reason) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 219, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(task.CreatedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 220, Col: 98} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "
Active Tasks
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Stats.RunningTasks == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "

No active maintenance tasks

Active tasks will appear here when workers start processing them
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, task := range data.Tasks { - if string(task.Status) == "assigned" || string(task.Status) == "in_progress" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "
TypeStatusProgressVolumeWorkerStarted
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = TaskTypeIcon(task.Type).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(string(task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 269, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = StatusBadge(task.Status).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = ProgressBar(task.Progress, task.Status).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", task.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 273, Col: 89} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.WorkerID != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(task.WorkerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 276, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if task.StartedAt != nil { - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(task.StartedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 283, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// Helper components -func TaskTypeIcon(taskType maintenance.MaintenanceTaskType) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var29 := templ.GetChildren(ctx) - if templ_7745c5c3_Var29 == nil { - templ_7745c5c3_Var29 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - var templ_7745c5c3_Var30 = []any{maintenance.GetTaskIcon(taskType) + " me-1"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var30...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func PriorityBadge(priority maintenance.MaintenanceTaskPriority) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var32 := templ.GetChildren(ctx) - if templ_7745c5c3_Var32 == nil { - templ_7745c5c3_Var32 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - switch priority { - case maintenance.PriorityCritical: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "Critical") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.PriorityHigh: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "High") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.PriorityNormal: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "Normal") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.PriorityLow: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "Low") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - default: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "Unknown") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -func StatusBadge(status maintenance.MaintenanceTaskStatus) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var33 := templ.GetChildren(ctx) - if templ_7745c5c3_Var33 == nil { - templ_7745c5c3_Var33 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - switch status { - case maintenance.TaskStatusPending: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "Pending") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.TaskStatusAssigned: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "Assigned") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.TaskStatusInProgress: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "Running") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.TaskStatusCompleted: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "Completed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.TaskStatusFailed: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "Failed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case maintenance.TaskStatusCancelled: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "Cancelled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - default: - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "Unknown") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -func ProgressBar(progress float64, status maintenance.MaintenanceTaskStatus) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var34 := templ.GetChildren(ctx) - if templ_7745c5c3_Var34 == nil { - templ_7745c5c3_Var34 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if status == maintenance.TaskStatusInProgress || status == maintenance.TaskStatusAssigned { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var36 string - templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", progress)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_queue.templ`, Line: 393, Col: 66} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if status == maintenance.TaskStatusCompleted { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "
100%") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -func formatDuration(d time.Duration) string { - if d < time.Minute { - return fmt.Sprintf("%.0fs", d.Seconds()) - } else if d < time.Hour { - return fmt.Sprintf("%.1fm", d.Minutes()) - } else { - return fmt.Sprintf("%.1fh", d.Hours()) - } -} - -func formatTimeAgo(t time.Time) string { - duration := time.Since(t) - if duration < time.Minute { - return "just now" - } else if duration < time.Hour { - minutes := int(duration.Minutes()) - return fmt.Sprintf("%dm ago", minutes) - } else if duration < 24*time.Hour { - hours := int(duration.Hours()) - return fmt.Sprintf("%dh ago", hours) - } else { - days := int(duration.Hours() / 24) - return fmt.Sprintf("%dd ago", days) - } -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/maintenance_workers.templ b/weed/admin/view/app/maintenance_workers.templ deleted file mode 100644 index 00748e550..000000000 --- a/weed/admin/view/app/maintenance_workers.templ +++ /dev/null @@ -1,338 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "time" -) - -templ MaintenanceWorkers(data *dash.MaintenanceWorkersData) { -
-
-
-
-
-

Maintenance Workers

-

Monitor and manage maintenance workers

-
-
- Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } -
-
-
-
- - -
-
-
-
-
-
-
- Total Workers -
-
{ fmt.Sprintf("%d", len(data.Workers)) }
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Active Workers -
-
- { fmt.Sprintf("%d", data.ActiveWorkers) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Busy Workers -
-
- { fmt.Sprintf("%d", data.BusyWorkers) } -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Load -
-
- { fmt.Sprintf("%d", data.TotalLoad) } -
-
-
- -
-
-
-
-
-
- - -
-
-
-
-
Worker Details
-
-
- if len(data.Workers) == 0 { -
- -
No Workers Found
-

No maintenance workers are currently registered.

-
- Tip: To start a worker, run: -
weed worker -admin=<admin_server> -capabilities=vacuum,ec,replication -
-
- } else { -
- - - - - - - - - - - - - - - - for _, worker := range data.Workers { - - - - - - - - - - - - } - -
Worker IDAddressStatusCapabilitiesLoadCurrent TasksPerformanceLast HeartbeatActions
- { worker.Worker.ID } - - { worker.Worker.Address } - - if worker.Worker.Status == "active" { - Active - } else if worker.Worker.Status == "busy" { - Busy - } else { - Inactive - } - -
- for _, capability := range worker.Worker.Capabilities { - { string(capability) } - } -
-
-
- if worker.Worker.MaxConcurrent > 0 { -
- { fmt.Sprintf("%d/%d", worker.Worker.CurrentLoad, worker.Worker.MaxConcurrent) } -
- } else { -
0/0
- } -
-
- { fmt.Sprintf("%d", len(worker.CurrentTasks)) } - - -
Completed: { fmt.Sprintf("%d", worker.Performance.TasksCompleted) }
-
Failed: { fmt.Sprintf("%d", worker.Performance.TasksFailed) }
-
Success Rate: { fmt.Sprintf("%.1f%%", worker.Performance.SuccessRate) }
-
-
- if time.Since(worker.Worker.LastHeartbeat) < 2*time.Minute { - - - { worker.Worker.LastHeartbeat.Format("15:04:05") } - - } else { - - - { worker.Worker.LastHeartbeat.Format("15:04:05") } - - } - -
- - if worker.Worker.Status == "active" { - - } -
-
-
- } -
-
-
-
-
- - - - - -} \ No newline at end of file diff --git a/weed/admin/view/app/maintenance_workers_templ.go b/weed/admin/view/app/maintenance_workers_templ.go deleted file mode 100644 index f1fd13ebb..000000000 --- a/weed/admin/view/app/maintenance_workers_templ.go +++ /dev/null @@ -1,401 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "time" -) - -func MaintenanceWorkers(data *dash.MaintenanceWorkersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Maintenance Workers

Monitor and manage maintenance workers

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 19, Col: 112} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Workers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Workers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 35, Col: 122} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Active Workers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.ActiveWorkers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 54, Col: 75} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Busy Workers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.BusyWorkers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 74, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Total Load
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalLoad)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 94, Col: 71} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Worker Details
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Workers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
No Workers Found

No maintenance workers are currently registered.

Tip: To start a worker, run:
weed worker -admin=<admin_server> -capabilities=vacuum,ec,replication
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, worker := range data.Workers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "
Worker IDAddressStatusCapabilitiesLoadCurrent TasksPerformanceLast HeartbeatActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Worker.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 144, Col: 76} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Worker.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 147, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if worker.Worker.Status == "active" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "Active") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if worker.Worker.Status == "busy" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "Busy") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "Inactive") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, capability := range worker.Worker.Capabilities { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(string(capability)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 161, Col: 126} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if worker.Worker.MaxConcurrent > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", worker.Worker.CurrentLoad, worker.Worker.MaxConcurrent)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 173, Col: 142} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
0/0
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(worker.CurrentTasks))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 181, Col: 97} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
Completed: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", worker.Performance.TasksCompleted)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 185, Col: 122} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
Failed: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", worker.Performance.TasksFailed)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 186, Col: 116} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
Success Rate: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", worker.Performance.SuccessRate)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 187, Col: 126} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if time.Since(worker.Worker.LastHeartbeat) < 2*time.Minute { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Worker.LastHeartbeat.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 194, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(worker.Worker.LastHeartbeat.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/maintenance_workers.templ`, Line: 199, Col: 108} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if worker.Worker.Status == "active" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "
Worker Details
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/object_store_users.templ b/weed/admin/view/app/object_store_users.templ deleted file mode 100644 index 686f57e1c..000000000 --- a/weed/admin/view/app/object_store_users.templ +++ /dev/null @@ -1,691 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ ObjectStoreUsers(data dash.ObjectStoreUsersData) { -
- -
-
-

- Object Store Users -

-

Manage S3 API users and their access credentials

-
-
- -
-
- - -
-
-
-
-
-
-
- Total Users -
-
- {fmt.Sprintf("%d", data.TotalUsers)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Users -
-
- {fmt.Sprintf("%d", len(data.Users))} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Last Updated -
-
- {data.LastUpdated.Format("15:04")} -
-
-
- -
-
-
-
-
-
- - -
-
-
-
-
- Object Store Users -
- -
-
-
- - - - - - - - - - - for _, user := range data.Users { - - - - - - - } - if len(data.Users) == 0 { - - - - } - -
UsernameEmailAccess KeyActions
-
- - {user.Username} -
-
{user.Email} - {user.AccessKey} - -
- - - - -
-
- -
-
No users found
-

Create your first object store user to get started.

-
-
-
-
-
-
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
- - - - - - - - - - - - - - - -} - -// Helper functions for template - \ No newline at end of file diff --git a/weed/admin/view/app/object_store_users_templ.go b/weed/admin/view/app/object_store_users_templ.go deleted file mode 100644 index 249ee1efc..000000000 --- a/weed/admin/view/app/object_store_users_templ.go +++ /dev/null @@ -1,205 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func ObjectStoreUsers(data dash.ObjectStoreUsersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Object Store Users

Manage S3 API users and their access credentials

Total Users
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalUsers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 38, Col: 71} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Users
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Users))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 58, Col: 71} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Last Updated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 78, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Object Store Users
Actions:
Export List
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, user := range data.Users { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.Users) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
UsernameEmailAccess KeyActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(user.Username) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 127, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(user.Email) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 130, Col: 59} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(user.AccessKey) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 132, Col: 88} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
No users found

Create your first object store user to get started.

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 180, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Create New User
Hold Ctrl/Cmd to select multiple permissions
Edit User
User Details
Manage Access Keys
Access Keys for
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// Helper functions for template -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/policies.templ b/weed/admin/view/app/policies.templ deleted file mode 100644 index e613d535e..000000000 --- a/weed/admin/view/app/policies.templ +++ /dev/null @@ -1,658 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ Policies(data dash.PoliciesData) { -
-

- IAM Policies -

-
-
- -
-
-
- -
- -
-
-
-
-
-
-
- Total Policies -
-
- {fmt.Sprintf("%d", data.TotalPolicies)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Active Policies -
-
- {fmt.Sprintf("%d", data.TotalPolicies)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Last Updated -
-
- {data.LastUpdated.Format("15:04")} -
-
-
- -
-
-
-
-
-
- - -
-
-
-
-
- IAM Policies -
- -
-
-
- - - - - - - - - - - - - for _, policy := range data.Policies { - - - - - - - - - } - if len(data.Policies) == 0 { - - - - } - -
Policy NameVersionStatementsCreatedUpdatedActions
- {policy.Name} - - {policy.Document.Version} - - {fmt.Sprintf("%d statements", len(policy.Document.Statement))} - - {policy.CreatedAt.Format("2006-01-02 15:04")} - - {policy.UpdatedAt.Format("2006-01-02 15:04")} - -
- - - -
-
- -
-
No IAM policies found
-

Create your first policy to manage access permissions.

- -
-
-
-
-
-
-
-
- - - - - - - - - - - - -} \ No newline at end of file diff --git a/weed/admin/view/app/policies_templ.go b/weed/admin/view/app/policies_templ.go deleted file mode 100644 index 89aa83db5..000000000 --- a/weed/admin/view/app/policies_templ.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func Policies(data dash.PoliciesData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

IAM Policies

Total Policies
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPolicies)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 34, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Policies
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPolicies)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 54, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Last Updated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 74, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
IAM Policies
Actions:
Export List
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, policy := range data.Policies { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.Policies) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Policy NameVersionStatementsCreatedUpdatedActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(policy.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 123, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(policy.Document.Version) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 126, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d statements", len(policy.Document.Statement))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 129, Col: 142} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(policy.CreatedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 132, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(policy.UpdatedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/policies.templ`, Line: 135, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
No IAM policies found

Create your first policy to manage access permissions.

Create IAM Policy
Enter a unique name for this policy (alphanumeric and underscores only)
Enter the policy document in AWS IAM JSON format
View IAM Policy
Loading...

Loading policy...

Edit IAM Policy
Policy name cannot be changed
Edit the policy document in AWS IAM JSON format
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/s3_buckets.templ b/weed/admin/view/app/s3_buckets.templ deleted file mode 100644 index 14117ba9f..000000000 --- a/weed/admin/view/app/s3_buckets.templ +++ /dev/null @@ -1,962 +0,0 @@ -package app - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ S3Buckets(data dash.S3BucketsData) { -
-

- Object Store Buckets -

-
-
- - -
-
-
- -
- -
-
-
-
-
-
-
- Total Buckets -
-
- {fmt.Sprintf("%d", data.TotalBuckets)} -
-
-
- -
-
-
-
-
- -
-
-
-
-
-
- Total Storage -
-
- {formatBytes(data.TotalSize)} -
-
-
- -
-
-
-
-
- - -
-
-
-
-
-
- Last Updated -
-
- {data.LastUpdated.Format("15:04:05")} -
-
-
- -
-
-
-
-
-
- - -
-
-
-
-
- Object Store Buckets -
- -
-
-
- - - - - - - - - - - - - - - for _, bucket := range data.Buckets { - - - - - - - - - - - } - if len(data.Buckets) == 0 { - - - - } - -
NameCreatedObjectsSizeQuotaVersioningObject LockActions
- - - {bucket.Name} - - {bucket.CreatedAt.Format("2006-01-02 15:04")}{fmt.Sprintf("%d", bucket.ObjectCount)}{formatBytes(bucket.Size)} - if bucket.Quota > 0 { -
- - {formatBytes(bucket.Quota)} - - if bucket.QuotaEnabled { -
- {fmt.Sprintf("%.1f%% used", float64(bucket.Size)/float64(bucket.Quota)*100)} -
- } else { -
Disabled
- } -
- } else { - No quota - } -
- if bucket.VersioningEnabled { - - Enabled - - } else { - - Disabled - - } - - if bucket.ObjectLockEnabled { -
- - Enabled - -
- {bucket.ObjectLockMode} โ€ข {fmt.Sprintf("%d days", bucket.ObjectLockDuration)} -
-
- } else { - - Disabled - - } -
-
- - - - - - -
-
- -
-
No Object Store buckets found
-

Create your first bucket to get started with S3 storage.

- -
-
-
-
-
-
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
-
- - - - - - - - - - - - - - - -} - -// Helper functions for template -func getQuotaStatusColor(used, quota int64, enabled bool) string { - if !enabled || quota <= 0 { - return "secondary" - } - - percentage := float64(used) / float64(quota) * 100 - if percentage >= 90 { - return "danger" - } else if percentage >= 75 { - return "warning" - } else { - return "success" - } -} - -func getQuotaInMB(quotaBytes int64) int64 { - if quotaBytes < 0 { - quotaBytes = -quotaBytes // Handle disabled quotas (negative values) - } - return quotaBytes / (1024 * 1024) -} \ No newline at end of file diff --git a/weed/admin/view/app/s3_buckets_templ.go b/weed/admin/view/app/s3_buckets_templ.go deleted file mode 100644 index 02d605db7..000000000 --- a/weed/admin/view/app/s3_buckets_templ.go +++ /dev/null @@ -1,413 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -func S3Buckets(data dash.S3BucketsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Object Store Buckets

Total Buckets
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalBuckets)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 37, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Storage
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 57, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Last Updated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 78, Col: 72} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Object Store Buckets
Actions:
Export List
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, bucket := range data.Buckets { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if len(data.Buckets) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
NameCreatedObjectsSizeQuotaVersioningObject LockActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 132, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.CreatedAt.Format("2006-01-02 15:04")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 135, Col: 92} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", bucket.ObjectCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 136, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(bucket.Size)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 137, Col: 73} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if bucket.Quota > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 = []any{fmt.Sprintf("badge bg-%s", getQuotaStatusColor(bucket.Size, bucket.Quota, bucket.QuotaEnabled))} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var10...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(bucket.Quota)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 142, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if bucket.QuotaEnabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%% used", float64(bucket.Size)/float64(bucket.Quota)*100)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 146, Col: 139} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Disabled
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "No quota") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if bucket.VersioningEnabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "Enabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Disabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if bucket.ObjectLockEnabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
Enabled
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.ObjectLockMode) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 174, Col: 82} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, " โ€ข ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d days", bucket.ObjectLockDuration)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 174, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "Disabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
No Object Store buckets found

Create your first bucket to get started with S3 storage.

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 243, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
Create New S3 Bucket
Bucket names must be between 3 and 63 characters, contain only lowercase letters, numbers, dots, and hyphens.
Set the maximum storage size for this bucket.
Keep multiple versions of objects in this bucket.
Prevent objects from being deleted or overwritten for a specified period. Automatically enables versioning.
Governance allows override with special permissions, Compliance is immutable.
Apply default retention to all new objects in this bucket.
Default retention period for new objects (1-36500 days).
Delete Bucket

Are you sure you want to delete the bucket ?

Warning: This action cannot be undone. All objects in the bucket will be permanently deleted.
Manage Bucket Quota
Set the maximum storage size for this bucket. Set to 0 to remove quota.
Bucket Details
Loading...
Loading bucket details...
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// Helper functions for template -func getQuotaStatusColor(used, quota int64, enabled bool) string { - if !enabled || quota <= 0 { - return "secondary" - } - - percentage := float64(used) / float64(quota) * 100 - if percentage >= 90 { - return "danger" - } else if percentage >= 75 { - return "warning" - } else { - return "success" - } -} - -func getQuotaInMB(quotaBytes int64) int64 { - if quotaBytes < 0 { - quotaBytes = -quotaBytes // Handle disabled quotas (negative values) - } - return quotaBytes / (1024 * 1024) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/subscribers.templ b/weed/admin/view/app/subscribers.templ deleted file mode 100644 index edcaf8a7b..000000000 --- a/weed/admin/view/app/subscribers.templ +++ /dev/null @@ -1,151 +0,0 @@ -package app - -import "fmt" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" - -templ Subscribers(data dash.SubscribersData) { -
-
-
-
-

Message Queue Subscribers

- Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} -
- - -
-
-
-
-
Total Subscribers
-

{fmt.Sprintf("%d", data.TotalSubscribers)}

-
-
-
-
-
-
-
Active Subscribers
-

{fmt.Sprintf("%d", data.ActiveSubscribers)}

-
-
-
-
-
-
-
Inactive Subscribers
-

{fmt.Sprintf("%d", data.TotalSubscribers - data.ActiveSubscribers)}

-
-
-
-
- - -
-
-
Subscribers
-
- -
-
-
- if len(data.Subscribers) == 0 { -
- -
No Subscribers Found
-

No message queue subscribers are currently active.

-
- } else { -
- - - - - - - - - - - - - - for _, subscriber := range data.Subscribers { - - - - - - - - - - } - -
Subscriber NameTopicConsumer GroupStatusMessages ProcessedLast SeenCreated
- {subscriber.Name} - - {subscriber.Topic} - {subscriber.ConsumerGroup} - if subscriber.Status == "active" { - Active - } else if subscriber.Status == "inactive" { - Inactive - } else { - {subscriber.Status} - } - {fmt.Sprintf("%d", subscriber.MessageCount)} - if !subscriber.LastSeen.IsZero() { - {subscriber.LastSeen.Format("2006-01-02 15:04:05")} - } else { - Never - } - - {subscriber.CreatedAt.Format("2006-01-02 15:04:05")} -
-
- } -
-
-
-
-
- - -} \ No newline at end of file diff --git a/weed/admin/view/app/subscribers_templ.go b/weed/admin/view/app/subscribers_templ.go deleted file mode 100644 index 32b743da6..000000000 --- a/weed/admin/view/app/subscribers_templ.go +++ /dev/null @@ -1,246 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import "fmt" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" - -func Subscribers(data dash.SubscribersData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Message Queue Subscribers

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 12, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Subscribers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalSubscribers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 21, Col: 98} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

Active Subscribers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.ActiveSubscribers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 29, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Inactive Subscribers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalSubscribers-data.ActiveSubscribers)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 37, Col: 123} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

Subscribers
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Subscribers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
No Subscribers Found

No message queue subscribers are currently active.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, subscriber := range data.Subscribers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Subscriber NameTopicConsumer GroupStatusMessages ProcessedLast SeenCreated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 78, Col: 76} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.Topic) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 81, Col: 97} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.ConsumerGroup) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 83, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if subscriber.Status == "active" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "Active") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if subscriber.Status == "inactive" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "Inactive") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.Status) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 90, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", subscriber.MessageCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 93, Col: 95} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !subscriber.LastSeen.IsZero() { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.LastSeen.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 96, Col: 131} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "Never") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/subscribers.templ`, Line: 102, Col: 128} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/task_config.templ b/weed/admin/view/app/task_config.templ deleted file mode 100644 index 81e089de6..000000000 --- a/weed/admin/view/app/task_config.templ +++ /dev/null @@ -1,160 +0,0 @@ -package app - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" -) - -templ TaskConfig(data *maintenance.TaskConfigData) { -
-
-
-
-

- - {data.TaskName} Configuration -

- -
-
-
- -
-
-
-
-
- - {data.TaskName} Settings -
-
-
-

{data.Description}

- - -
-
- @templ.Raw(string(data.ConfigFormHTML)) -
- -
- -
- - - - - Cancel - -
-
-
-
-
-
- - -
-
-
-
-
- - Task Information -
-
-
-
-
-
Task Type
-

- {string(data.TaskType)} -

-
-
-
Display Name
-

{data.TaskName}

-
-
-
-
-
Description
-

{data.Description}

-
-
-
-
-
-
-
- - -} \ No newline at end of file diff --git a/weed/admin/view/app/task_config_schema.templ b/weed/admin/view/app/task_config_schema.templ deleted file mode 100644 index bc2f29661..000000000 --- a/weed/admin/view/app/task_config_schema.templ +++ /dev/null @@ -1,487 +0,0 @@ -package app - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "reflect" - "strings" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -// Helper function to convert task schema to JSON string -func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string { - if schema == nil { - return "{}" - } - - data := map[string]interface{}{ - "fields": schema.Fields, - } - - jsonBytes, err := json.Marshal(data) - if err != nil { - return "{}" - } - - return string(jsonBytes) -} - -// Helper function to base64 encode the JSON to avoid HTML escaping issues -func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string { - jsonStr := taskSchemaToJSON(schema) - return base64.StdEncoding.EncodeToString([]byte(jsonStr)) -} - -templ TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) { -
-
-
-
-

- - {schema.DisplayName} Configuration -

- -
-
-
- - -
-
-
-
-
- - Task Configuration -
-

{schema.Description}

-
-
-
- - for _, field := range schema.Fields { - @TaskConfigField(field, config) - } - -
- - -
-
-
-
-
-
- - -
-
-
-
-
- - Important Notes -
-
-
- -
-
-
-
-
- - - - -
- - -} - -// TaskConfigField renders a single task configuration field based on schema with typed field lookup -templ TaskConfigField(field *config.Field, config interface{}) { - if field.InputType == "interval" { - -
- -
- - -
- if field.Description != "" { -
{ field.Description }
- } -
- } else if field.InputType == "checkbox" { - -
-
- - -
- if field.Description != "" { -
{ field.Description }
- } -
- } else if field.InputType == "text" { - -
- - - if field.Description != "" { -
{ field.Description }
- } -
- } else { - -
- - - if field.Description != "" { -
{ field.Description }
- } -
- } -} - -// Typed field getters for task configs - avoiding interface{} where possible -func getTaskConfigBoolField(config interface{}, fieldName string) bool { - switch fieldName { - case "enabled": - // Use reflection only for the common 'enabled' field in BaseConfig - if value := getTaskFieldValue(config, fieldName); value != nil { - if boolVal, ok := value.(bool); ok { - return boolVal - } - } - return false - default: - // For other boolean fields, use reflection - if value := getTaskFieldValue(config, fieldName); value != nil { - if boolVal, ok := value.(bool); ok { - return boolVal - } - } - return false - } -} - -func getTaskConfigInt32Field(config interface{}, fieldName string) int32 { - switch fieldName { - case "scan_interval_seconds", "max_concurrent": - // Common fields that should be int/int32 - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case int32: - return v - case int: - return int32(v) - case int64: - return int32(v) - } - } - return 0 - default: - // For other int fields, use reflection - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case int32: - return v - case int: - return int32(v) - case int64: - return int32(v) - case float64: - return int32(v) - } - } - return 0 - } -} - -func getTaskConfigFloatField(config interface{}, fieldName string) float64 { - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case float64: - return v - case float32: - return float64(v) - case int: - return float64(v) - case int32: - return float64(v) - case int64: - return float64(v) - } - } - return 0.0 -} - -func getTaskConfigStringField(config interface{}, fieldName string) string { - if value := getTaskFieldValue(config, fieldName); value != nil { - if strVal, ok := value.(string); ok { - return strVal - } - // Convert numbers to strings for form display - switch v := value.(type) { - case int: - return fmt.Sprintf("%d", v) - case int32: - return fmt.Sprintf("%d", v) - case int64: - return fmt.Sprintf("%d", v) - case float64: - return fmt.Sprintf("%.6g", v) - case float32: - return fmt.Sprintf("%.6g", v) - } - } - return "" -} - -func getTaskNumberStep(field *config.Field) string { - if field.Type == config.FieldTypeFloat { - return "0.01" - } - return "1" -} - -func getTaskFieldValue(config interface{}, fieldName string) interface{} { - if config == nil { - return nil - } - - // Use reflection to get the field value from the config struct - configValue := reflect.ValueOf(config) - if configValue.Kind() == reflect.Ptr { - configValue = configValue.Elem() - } - - if configValue.Kind() != reflect.Struct { - return nil - } - - configType := configValue.Type() - - for i := 0; i < configValue.NumField(); i++ { - field := configValue.Field(i) - fieldType := configType.Field(i) - - // Handle embedded structs recursively (before JSON tag check) - if field.Kind() == reflect.Struct && fieldType.Anonymous { - if value := getTaskFieldValue(field.Interface(), fieldName); value != nil { - return value - } - continue - } - - // Get JSON tag name - jsonTag := fieldType.Tag.Get("json") - if jsonTag == "" { - continue - } - - // Remove options like ",omitempty" - if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 { - jsonTag = jsonTag[:commaIdx] - } - - // Check if this is the field we're looking for - if jsonTag == fieldName { - return field.Interface() - } - } - - return nil -} - - \ No newline at end of file diff --git a/weed/admin/view/app/task_config_schema_templ.go b/weed/admin/view/app/task_config_schema_templ.go deleted file mode 100644 index e28490b2a..000000000 --- a/weed/admin/view/app/task_config_schema_templ.go +++ /dev/null @@ -1,948 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/config" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "reflect" - "strings" -) - -// Helper function to convert task schema to JSON string -func taskSchemaToJSON(schema *tasks.TaskConfigSchema) string { - if schema == nil { - return "{}" - } - - data := map[string]interface{}{ - "fields": schema.Fields, - } - - jsonBytes, err := json.Marshal(data) - if err != nil { - return "{}" - } - - return string(jsonBytes) -} - -// Helper function to base64 encode the JSON to avoid HTML escaping issues -func taskSchemaToBase64JSON(schema *tasks.TaskConfigSchema) string { - jsonStr := taskSchemaToJSON(schema) - return base64.StdEncoding.EncodeToString([]byte(jsonStr)) -} - -func TaskConfigSchema(data *maintenance.TaskConfigData, schema *tasks.TaskConfigSchema, config interface{}) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 = []any{schema.Icon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(schema.DisplayName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 47, Col: 43} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration

Task Configuration

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(schema.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 68, Col: 76} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, field := range schema.Fields { - templ_7745c5c3_Err = TaskConfigField(field, config).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Important Notes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if schema.TaskName == "vacuum" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Vacuum Operations:

Performance: Vacuum operations are I/O intensive and may impact cluster performance.

Safety: Only volumes meeting age and garbage thresholds will be processed.

Recommendation: Monitor cluster load and adjust concurrent limits accordingly.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if schema.TaskName == "balance" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
Balance Operations:

Performance: Volume balancing involves data movement and can impact cluster performance.

Safety: Requires adequate server count to ensure data safety during moves.

Recommendation: Run during off-peak hours to minimize impact on production workloads.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if schema.TaskName == "erasure_coding" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "
Erasure Coding Operations:

Performance: Erasure coding is CPU and I/O intensive. Consider running during off-peak hours.

Durability: With ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d+%d", erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 118, Col: 170} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " configuration, can tolerate up to ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", erasure_coding.ParityShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 118, Col: 260} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " shard failures.

Configuration: Fullness ratio should be between 0.5 and 1.0 (e.g., 0.90 for 90%).

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// TaskConfigField renders a single task configuration field based on schema with typed field lookup -func TaskConfigField(field *config.Field, config interface{}) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var9 := templ.GetChildren(ctx) - if templ_7745c5c3_Var9 == nil { - templ_7745c5c3_Var9 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - if field.InputType == "interval" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 253, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if field.InputType == "checkbox" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 275, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if field.InputType == "text" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 299, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(field.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_schema.templ`, Line: 330, Col: 69} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - return nil - }) -} - -// Typed field getters for task configs - avoiding interface{} where possible -func getTaskConfigBoolField(config interface{}, fieldName string) bool { - switch fieldName { - case "enabled": - // Use reflection only for the common 'enabled' field in BaseConfig - if value := getTaskFieldValue(config, fieldName); value != nil { - if boolVal, ok := value.(bool); ok { - return boolVal - } - } - return false - default: - // For other boolean fields, use reflection - if value := getTaskFieldValue(config, fieldName); value != nil { - if boolVal, ok := value.(bool); ok { - return boolVal - } - } - return false - } -} - -func getTaskConfigInt32Field(config interface{}, fieldName string) int32 { - switch fieldName { - case "scan_interval_seconds", "max_concurrent": - // Common fields that should be int/int32 - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case int32: - return v - case int: - return int32(v) - case int64: - return int32(v) - } - } - return 0 - default: - // For other int fields, use reflection - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case int32: - return v - case int: - return int32(v) - case int64: - return int32(v) - case float64: - return int32(v) - } - } - return 0 - } -} - -func getTaskConfigFloatField(config interface{}, fieldName string) float64 { - if value := getTaskFieldValue(config, fieldName); value != nil { - switch v := value.(type) { - case float64: - return v - case float32: - return float64(v) - case int: - return float64(v) - case int32: - return float64(v) - case int64: - return float64(v) - } - } - return 0.0 -} - -func getTaskConfigStringField(config interface{}, fieldName string) string { - if value := getTaskFieldValue(config, fieldName); value != nil { - if strVal, ok := value.(string); ok { - return strVal - } - // Convert numbers to strings for form display - switch v := value.(type) { - case int: - return fmt.Sprintf("%d", v) - case int32: - return fmt.Sprintf("%d", v) - case int64: - return fmt.Sprintf("%d", v) - case float64: - return fmt.Sprintf("%.6g", v) - case float32: - return fmt.Sprintf("%.6g", v) - } - } - return "" -} - -func getTaskNumberStep(field *config.Field) string { - if field.Type == config.FieldTypeFloat { - return "0.01" - } - return "1" -} - -func getTaskFieldValue(config interface{}, fieldName string) interface{} { - if config == nil { - return nil - } - - // Use reflection to get the field value from the config struct - configValue := reflect.ValueOf(config) - if configValue.Kind() == reflect.Ptr { - configValue = configValue.Elem() - } - - if configValue.Kind() != reflect.Struct { - return nil - } - - configType := configValue.Type() - - for i := 0; i < configValue.NumField(); i++ { - field := configValue.Field(i) - fieldType := configType.Field(i) - - // Handle embedded structs recursively (before JSON tag check) - if field.Kind() == reflect.Struct && fieldType.Anonymous { - if value := getTaskFieldValue(field.Interface(), fieldName); value != nil { - return value - } - continue - } - - // Get JSON tag name - jsonTag := fieldType.Tag.Get("json") - if jsonTag == "" { - continue - } - - // Remove options like ",omitempty" - if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 { - jsonTag = jsonTag[:commaIdx] - } - - // Check if this is the field we're looking for - if jsonTag == fieldName { - return field.Interface() - } - } - - return nil -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/task_config_schema_test.go b/weed/admin/view/app/task_config_schema_test.go deleted file mode 100644 index a4e2a8bc4..000000000 --- a/weed/admin/view/app/task_config_schema_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package app - -import ( - "testing" -) - -// Test structs that mirror the actual configuration structure -type TestBaseConfigForTemplate struct { - Enabled bool `json:"enabled"` - ScanIntervalSeconds int `json:"scan_interval_seconds"` - MaxConcurrent int `json:"max_concurrent"` -} - -type TestTaskConfigForTemplate struct { - TestBaseConfigForTemplate - TaskSpecificField float64 `json:"task_specific_field"` - AnotherSpecificField string `json:"another_specific_field"` -} - -func TestGetTaskFieldValue_EmbeddedStructFields(t *testing.T) { - config := &TestTaskConfigForTemplate{ - TestBaseConfigForTemplate: TestBaseConfigForTemplate{ - Enabled: true, - ScanIntervalSeconds: 2400, - MaxConcurrent: 5, - }, - TaskSpecificField: 0.18, - AnotherSpecificField: "test_value", - } - - // Test embedded struct fields - tests := []struct { - fieldName string - expectedValue interface{} - description string - }{ - {"enabled", true, "BaseConfig boolean field"}, - {"scan_interval_seconds", 2400, "BaseConfig integer field"}, - {"max_concurrent", 5, "BaseConfig integer field"}, - {"task_specific_field", 0.18, "Task-specific float field"}, - {"another_specific_field", "test_value", "Task-specific string field"}, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - result := getTaskFieldValue(config, test.fieldName) - - if result != test.expectedValue { - t.Errorf("Field %s: expected %v (%T), got %v (%T)", - test.fieldName, test.expectedValue, test.expectedValue, result, result) - } - }) - } -} - -func TestGetTaskFieldValue_NonExistentField(t *testing.T) { - config := &TestTaskConfigForTemplate{ - TestBaseConfigForTemplate: TestBaseConfigForTemplate{ - Enabled: true, - ScanIntervalSeconds: 1800, - MaxConcurrent: 3, - }, - } - - result := getTaskFieldValue(config, "non_existent_field") - - if result != nil { - t.Errorf("Expected nil for non-existent field, got %v", result) - } -} - -func TestGetTaskFieldValue_NilConfig(t *testing.T) { - var config *TestTaskConfigForTemplate = nil - - result := getTaskFieldValue(config, "enabled") - - if result != nil { - t.Errorf("Expected nil for nil config, got %v", result) - } -} - -func TestGetTaskFieldValue_EmptyStruct(t *testing.T) { - config := &TestTaskConfigForTemplate{} - - // Test that we can extract zero values - tests := []struct { - fieldName string - expectedValue interface{} - description string - }{ - {"enabled", false, "Zero value boolean"}, - {"scan_interval_seconds", 0, "Zero value integer"}, - {"max_concurrent", 0, "Zero value integer"}, - {"task_specific_field", 0.0, "Zero value float"}, - {"another_specific_field", "", "Zero value string"}, - } - - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - result := getTaskFieldValue(config, test.fieldName) - - if result != test.expectedValue { - t.Errorf("Field %s: expected %v (%T), got %v (%T)", - test.fieldName, test.expectedValue, test.expectedValue, result, result) - } - }) - } -} - -func TestGetTaskFieldValue_NonStructConfig(t *testing.T) { - var config interface{} = "not a struct" - - result := getTaskFieldValue(config, "enabled") - - if result != nil { - t.Errorf("Expected nil for non-struct config, got %v", result) - } -} - -func TestGetTaskFieldValue_PointerToStruct(t *testing.T) { - config := &TestTaskConfigForTemplate{ - TestBaseConfigForTemplate: TestBaseConfigForTemplate{ - Enabled: false, - ScanIntervalSeconds: 900, - MaxConcurrent: 2, - }, - TaskSpecificField: 0.35, - } - - // Test that pointers are handled correctly - enabledResult := getTaskFieldValue(config, "enabled") - if enabledResult != false { - t.Errorf("Expected false for enabled field, got %v", enabledResult) - } - - intervalResult := getTaskFieldValue(config, "scan_interval_seconds") - if intervalResult != 900 { - t.Errorf("Expected 900 for scan_interval_seconds field, got %v", intervalResult) - } -} - -func TestGetTaskFieldValue_FieldsWithJSONOmitempty(t *testing.T) { - // Test struct with omitempty tags - type TestConfigWithOmitempty struct { - TestBaseConfigForTemplate - OptionalField string `json:"optional_field,omitempty"` - } - - config := &TestConfigWithOmitempty{ - TestBaseConfigForTemplate: TestBaseConfigForTemplate{ - Enabled: true, - ScanIntervalSeconds: 1200, - MaxConcurrent: 4, - }, - OptionalField: "optional_value", - } - - // Test that fields with omitempty are still found - result := getTaskFieldValue(config, "optional_field") - if result != "optional_value" { - t.Errorf("Expected 'optional_value' for optional_field, got %v", result) - } - - // Test embedded fields still work - enabledResult := getTaskFieldValue(config, "enabled") - if enabledResult != true { - t.Errorf("Expected true for enabled field, got %v", enabledResult) - } -} - -func TestGetTaskFieldValue_DeepEmbedding(t *testing.T) { - // Test with multiple levels of embedding - type DeepBaseConfig struct { - DeepField string `json:"deep_field"` - } - - type MiddleConfig struct { - DeepBaseConfig - MiddleField int `json:"middle_field"` - } - - type TopConfig struct { - MiddleConfig - TopField bool `json:"top_field"` - } - - config := &TopConfig{ - MiddleConfig: MiddleConfig{ - DeepBaseConfig: DeepBaseConfig{ - DeepField: "deep_value", - }, - MiddleField: 123, - }, - TopField: true, - } - - // Test that deeply embedded fields are found - deepResult := getTaskFieldValue(config, "deep_field") - if deepResult != "deep_value" { - t.Errorf("Expected 'deep_value' for deep_field, got %v", deepResult) - } - - middleResult := getTaskFieldValue(config, "middle_field") - if middleResult != 123 { - t.Errorf("Expected 123 for middle_field, got %v", middleResult) - } - - topResult := getTaskFieldValue(config, "top_field") - if topResult != true { - t.Errorf("Expected true for top_field, got %v", topResult) - } -} - -// Benchmark to ensure performance is reasonable -func BenchmarkGetTaskFieldValue(b *testing.B) { - config := &TestTaskConfigForTemplate{ - TestBaseConfigForTemplate: TestBaseConfigForTemplate{ - Enabled: true, - ScanIntervalSeconds: 1800, - MaxConcurrent: 3, - }, - TaskSpecificField: 0.25, - AnotherSpecificField: "benchmark_test", - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Test both embedded and regular fields - _ = getTaskFieldValue(config, "enabled") - _ = getTaskFieldValue(config, "task_specific_field") - } -} diff --git a/weed/admin/view/app/task_config_templ.go b/weed/admin/view/app/task_config_templ.go deleted file mode 100644 index 59a56d30b..000000000 --- a/weed/admin/view/app/task_config_templ.go +++ /dev/null @@ -1,174 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" -) - -func TaskConfig(data *maintenance.TaskConfigData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 = []any{data.TaskIcon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 14, Col: 38} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 = []any{data.TaskIcon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var5...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 36, Col: 42} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " Settings

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 40, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templ.Raw(string(data.ConfigFormHTML)).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "

Cancel
Task Information
Task Type

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(string(data.TaskType)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 85, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "

Display Name

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 90, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "

Description

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config.templ`, Line: 96, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/task_config_templ.templ b/weed/admin/view/app/task_config_templ.templ deleted file mode 100644 index 010f5782c..000000000 --- a/weed/admin/view/app/task_config_templ.templ +++ /dev/null @@ -1,160 +0,0 @@ -package app - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" -) - -// TaskConfigTemplData represents data for templ-based task configuration -type TaskConfigTemplData struct { - TaskType maintenance.MaintenanceTaskType - TaskName string - TaskIcon string - Description string - ConfigSections []components.ConfigSectionData -} - -templ TaskConfigTempl(data *TaskConfigTemplData) { -
-
-
-
-

- - {data.TaskName} Configuration -

- -
-
-
- -
-
- -
-
- -
- - for _, section := range data.ConfigSections { - @components.ConfigSection(section) - } - - -
-
-
-
-
-
- - -
-
- -
-
-
-
-
-
-
-
- - -} \ No newline at end of file diff --git a/weed/admin/view/app/task_config_templ_templ.go b/weed/admin/view/app/task_config_templ_templ.go deleted file mode 100644 index e037eb1cf..000000000 --- a/weed/admin/view/app/task_config_templ_templ.go +++ /dev/null @@ -1,112 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/admin/view/components" -) - -// TaskConfigTemplData represents data for templ-based task configuration -type TaskConfigTemplData struct { - TaskType maintenance.MaintenanceTaskType - TaskName string - TaskIcon string - Description string - ConfigSections []components.ConfigSectionData -} - -func TaskConfigTempl(data *TaskConfigTemplData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 = []any{data.TaskIcon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.TaskName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_templ.templ`, Line: 24, Col: 38} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " Configuration

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_config_templ.templ`, Line: 44, Col: 37} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, section := range data.ConfigSections { - templ_7745c5c3_Err = components.ConfigSection(section).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/task_detail.templ b/weed/admin/view/app/task_detail.templ deleted file mode 100644 index 6045a5301..000000000 --- a/weed/admin/view/app/task_detail.templ +++ /dev/null @@ -1,1118 +0,0 @@ -package app - -import ( - "fmt" - "sort" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" -) - -// sortedKeys returns the sorted keys for a string map -func sortedKeys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -templ TaskDetail(data *maintenance.TaskDetailData) { -
- -
-
-
-
- -

- - Task Detail: {data.Task.ID} -

-
-
- - -
-
-
-
- - -
-
-
-
-
- - Task Overview -
-
-
-
-
-
-
Task ID:
-
{data.Task.ID}
- -
Type:
-
- {string(data.Task.Type)} -
- -
Status:
-
- if data.Task.Status == maintenance.TaskStatusPending { - Pending - } else if data.Task.Status == maintenance.TaskStatusAssigned { - Assigned - } else if data.Task.Status == maintenance.TaskStatusInProgress { - In Progress - } else if data.Task.Status == maintenance.TaskStatusCompleted { - Completed - } else if data.Task.Status == maintenance.TaskStatusFailed { - Failed - } else if data.Task.Status == maintenance.TaskStatusCancelled { - Cancelled - } -
- -
Priority:
-
- if data.Task.Priority == maintenance.PriorityHigh { - High - } else if data.Task.Priority == maintenance.PriorityCritical { - Critical - } else if data.Task.Priority == maintenance.PriorityNormal { - Normal - } else { - Low - } -
- - if data.Task.Reason != "" { -
Reason:
-
- {data.Task.Reason} -
- } -
-
-
- -
-
- Task Timeline -
-
-
-
-
- -
-
-
- Created - {data.Task.CreatedAt.Format("01-02 15:04:05")} -
-
- -
-
- -
- if data.Task.StartedAt != nil { -
- } else { -
- } -
- Scheduled - {data.Task.ScheduledAt.Format("01-02 15:04:05")} -
-
- -
- if data.Task.StartedAt != nil { -
- -
- } else { -
- -
- } - if data.Task.CompletedAt != nil { -
- } else { -
- } -
- Started - - if data.Task.StartedAt != nil { - {data.Task.StartedAt.Format("01-02 15:04:05")} - } else { - โ€” - } - -
-
- -
- if data.Task.CompletedAt != nil { -
- if data.Task.Status == maintenance.TaskStatusCompleted { - - } else if data.Task.Status == maintenance.TaskStatusFailed { - - } else { - - } -
- } else { -
- -
- } -
- - if data.Task.Status == maintenance.TaskStatusCompleted { - Completed - } else if data.Task.Status == maintenance.TaskStatusFailed { - Failed - } else if data.Task.Status == maintenance.TaskStatusCancelled { - Cancelled - } else { - Pending - } - - - if data.Task.CompletedAt != nil { - {data.Task.CompletedAt.Format("01-02 15:04:05")} - } else { - โ€” - } - -
-
-
-
-
- - - if data.Task.WorkerID != "" { -
-
Worker:
-
{data.Task.WorkerID}
-
- } - -
- if data.Task.TypedParams != nil && data.Task.TypedParams.VolumeSize > 0 { -
Volume Size:
-
- {formatBytes(int64(data.Task.TypedParams.VolumeSize))} -
- } - - if data.Task.TypedParams != nil && data.Task.TypedParams.Collection != "" { -
Collection:
-
- {data.Task.TypedParams.Collection} -
- } - - if data.Task.TypedParams != nil && data.Task.TypedParams.DataCenter != "" { -
Data Center:
-
- {data.Task.TypedParams.DataCenter} -
- } - - if data.Task.Progress > 0 { -
Progress:
-
-
-
- {fmt.Sprintf("%.1f%%", data.Task.Progress)} -
-
-
- } -
-
-
- - - - if data.Task.DetailedReason != "" { -
-
-
Detailed Reason:
-

{data.Task.DetailedReason}

-
-
- } - - if data.Task.Error != "" { -
-
-
Error:
-
- {data.Task.Error} -
-
-
- } -
-
-
-
- - - if data.Task.TypedParams != nil { -
-
-
-
-
- - Task Configuration -
-
-
- - if len(data.Task.TypedParams.Sources) > 0 { -
-
- - Source Servers - {fmt.Sprintf("%d", len(data.Task.TypedParams.Sources))} -
-
-
- for i, source := range data.Task.TypedParams.Sources { -
- {fmt.Sprintf("#%d", i+1)} - {source.Node} -
- if source.DataCenter != "" { - - {source.DataCenter} - - } -
-
- if source.Rack != "" { - - {source.Rack} - - } -
-
- if source.VolumeId > 0 { - - Vol:{fmt.Sprintf("%d", source.VolumeId)} - - } -
-
- if len(source.ShardIds) > 0 { - - Shards: - for j, shardId := range source.ShardIds { - if j > 0 { - , - } - if shardId < erasure_coding.DataShardsCount { - {fmt.Sprintf("%d", shardId)} - } else { - {fmt.Sprintf("P%d", shardId-erasure_coding.DataShardsCount)} - } - } - - } -
-
- } -
-
-
- } - - - if len(data.Task.TypedParams.Sources) > 0 || len(data.Task.TypedParams.Targets) > 0 { -
- -
- Task: {string(data.Task.Type)} -
- } - - - if len(data.Task.TypedParams.Targets) > 0 { -
-
- - Target Servers - {fmt.Sprintf("%d", len(data.Task.TypedParams.Targets))} -
-
-
- for i, target := range data.Task.TypedParams.Targets { -
- {fmt.Sprintf("#%d", i+1)} - {target.Node} -
- if target.DataCenter != "" { - - {target.DataCenter} - - } -
-
- if target.Rack != "" { - - {target.Rack} - - } -
-
- if target.VolumeId > 0 { - - Vol:{fmt.Sprintf("%d", target.VolumeId)} - - } -
-
- if len(target.ShardIds) > 0 { - - Shards: - for j, shardId := range target.ShardIds { - if j > 0 { - , - } - if shardId < erasure_coding.DataShardsCount { - {fmt.Sprintf("%d", shardId)} - } else { - {fmt.Sprintf("P%d", shardId-erasure_coding.DataShardsCount)} - } - } - - } -
-
- } -
-
-
- } -
-
-
-
- } - - - if data.WorkerInfo != nil { -
-
-
-
-
- - Worker Information -
-
-
-
-
-
-
Worker ID:
-
{data.WorkerInfo.ID}
- -
Address:
-
{data.WorkerInfo.Address}
- -
Status:
-
- if data.WorkerInfo.Status == "active" { - Active - } else if data.WorkerInfo.Status == "busy" { - Busy - } else { - Inactive - } -
-
-
-
-
-
Last Heartbeat:
-
{data.WorkerInfo.LastHeartbeat.Format("2006-01-02 15:04:05")}
- -
Current Load:
-
{fmt.Sprintf("%d/%d", data.WorkerInfo.CurrentLoad, data.WorkerInfo.MaxConcurrent)}
- -
Capabilities:
-
- for _, capability := range data.WorkerInfo.Capabilities { - {string(capability)} - } -
-
-
-
-
-
-
-
- } - - - if len(data.AssignmentHistory) > 0 { -
-
-
-
-
- - Assignment History -
-
-
-
- - - - - - - - - - - - for _, assignment := range data.AssignmentHistory { - - - - - - - - } - -
Worker IDWorker AddressAssigned AtUnassigned AtReason
{assignment.WorkerID}{assignment.WorkerAddress}{assignment.AssignedAt.Format("2006-01-02 15:04:05")} - if assignment.UnassignedAt != nil { - {assignment.UnassignedAt.Format("2006-01-02 15:04:05")} - } else { - โ€” - } - {assignment.Reason}
-
-
-
-
-
- } - - - if len(data.ExecutionLogs) > 0 { -
-
-
-
-
- - Execution Logs -
-
-
-
- - - - - - - - - - - for _, log := range data.ExecutionLogs { - - - - - - - } - -
TimestampLevelMessageDetails
{log.Timestamp.Format("15:04:05")} - if log.Level == "error" { - {log.Level} - } else if log.Level == "warn" { - {log.Level} - } else if log.Level == "info" { - {log.Level} - } else { - {log.Level} - } - {log.Message} - if log.Fields != nil && len(log.Fields) > 0 { - - for _, k := range sortedKeys(log.Fields) { - {k}={log.Fields[k]} - } - - } else if log.Progress != nil || log.Status != "" { - - if log.Progress != nil { - progress={fmt.Sprintf("%.0f%%", *log.Progress)} - } - if log.Status != "" { - status={log.Status} - } - - } else { - - - } -
-
-
-
-
-
- } - - - if len(data.RelatedTasks) > 0 { -
-
-
-
-
- - Related Tasks -
-
-
-
- - - - - - - - - - - - - for _, relatedTask := range data.RelatedTasks { - - - - - - - - - } - -
Task IDTypeStatusVolume IDServerCreated
- - {relatedTask.ID} - - {string(relatedTask.Type)} - if relatedTask.Status == maintenance.TaskStatusCompleted { - Completed - } else if relatedTask.Status == maintenance.TaskStatusFailed { - Failed - } else if relatedTask.Status == maintenance.TaskStatusInProgress { - In Progress - } else { - {string(relatedTask.Status)} - } - - if relatedTask.VolumeID != 0 { - {fmt.Sprintf("%d", relatedTask.VolumeID)} - } else { - - - } - - if relatedTask.Server != "" { - {relatedTask.Server} - } else { - - - } - {relatedTask.CreatedAt.Format("2006-01-02 15:04:05")}
-
-
-
-
-
- } - - -
-
-
-
-
- - Actions -
-
-
- if data.Task.Status == maintenance.TaskStatusPending || data.Task.Status == maintenance.TaskStatusAssigned { - - } - if data.Task.WorkerID != "" { - - } - -
-
-
-
-
- - - - - - - -} diff --git a/weed/admin/view/app/task_detail_templ.go b/weed/admin/view/app/task_detail_templ.go deleted file mode 100644 index eec5ba29c..000000000 --- a/weed/admin/view/app/task_detail_templ.go +++ /dev/null @@ -1,1628 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" - "sort" -) - -// sortedKeys returns the sorted keys for a string map -func sortedKeys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -func TaskDetail(data *maintenance.TaskDetailData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Task Detail: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 35, Col: 54} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

Task Overview
Task ID:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 67, Col: 76} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Type:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(string(data.Task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 71, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Status:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Status == maintenance.TaskStatusPending { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "Pending") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusAssigned { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Assigned") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusInProgress { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "In Progress") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusCompleted { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "Completed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusFailed { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "Failed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusCancelled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Cancelled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
Priority:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Priority == maintenance.PriorityHigh { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "High") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Priority == maintenance.PriorityCritical { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "Critical") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Priority == maintenance.PriorityNormal { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "Normal") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "Low") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Reason != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Reason:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.Reason) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 107, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
Task Timeline
Created ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.CreatedAt.Format("01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 127, Col: 131} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.StartedAt != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Scheduled ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.ScheduledAt.Format("01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 142, Col: 133} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.StartedAt != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.CompletedAt != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "
Started ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.StartedAt != nil { - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.StartedAt.Format("01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 165, Col: 105} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "โ€”") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.CompletedAt != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Status == maintenance.TaskStatusCompleted { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusFailed { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Status == maintenance.TaskStatusCompleted { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "Completed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusFailed { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "Failed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.Task.Status == maintenance.TaskStatusCancelled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "Cancelled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "Pending") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.CompletedAt != nil { - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.CompletedAt.Format("01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 203, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "โ€”") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.WorkerID != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
Worker:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.WorkerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 218, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.TypedParams != nil && data.Task.TypedParams.VolumeSize > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "
Volume Size:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(data.Task.TypedParams.VolumeSize))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 226, Col: 128} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.TypedParams != nil && data.Task.TypedParams.Collection != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "
Collection:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.TypedParams.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 233, Col: 139} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.TypedParams != nil && data.Task.TypedParams.DataCenter != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "
Data Center:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.TypedParams.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 240, Col: 146} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.Progress > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "
Progress:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", data.Task.Progress)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 252, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.DetailedReason != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
Detailed Reason:

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.DetailedReason) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 267, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.Error != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "
Error:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(data.Task.Error) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 277, Col: 62} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.TypedParams != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "
Task Configuration
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Task.TypedParams.Sources) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "
Source Servers ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Task.TypedParams.Sources))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 305, Col: 127} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, source := range data.Task.TypedParams.Sources { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("#%d", i+1)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 311, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(source.Node) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 312, Col: 54} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if source.DataCenter != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(source.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 316, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if source.Rack != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(source.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 323, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if source.VolumeId > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "Vol:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", source.VolumeId)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 330, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(source.ShardIds) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "Shards: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for j, shardId := range source.ShardIds { - if j > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, ", ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if shardId < erasure_coding.DataShardsCount { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shardId)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 343, Col: 202} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("P%d", shardId-erasure_coding.DataShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 345, Col: 246} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Task.TypedParams.Sources) > 0 || len(data.Task.TypedParams.Targets) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "

Task: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(string(data.Task.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 363, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Task.TypedParams.Targets) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "
Target Servers ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var30 string - templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Task.TypedParams.Targets))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 373, Col: 130} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for i, target := range data.Task.TypedParams.Targets { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var31 string - templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("#%d", i+1)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 379, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var32 string - templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(target.Node) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 380, Col: 54} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if target.DataCenter != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var33 string - templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(target.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 384, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if target.Rack != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var34 string - templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(target.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 391, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if target.VolumeId > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, "Vol:") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var35 string - templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", target.VolumeId)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 398, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 109, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(target.ShardIds) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 110, "Shards: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for j, shardId := range target.ShardIds { - if j > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 111, ", ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 112, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if shardId < erasure_coding.DataShardsCount { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 113, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var37 string - templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", shardId)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 411, Col: 202} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 115, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 116, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("P%d", shardId-erasure_coding.DataShardsCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 413, Col: 246} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 118, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 119, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 120, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 121, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 122, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 123, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.WorkerInfo != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 124, "
Worker Information
Worker ID:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var40 string - templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(data.WorkerInfo.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 447, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 125, "
Address:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var41 string - templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(data.WorkerInfo.Address) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 450, Col: 91} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 126, "
Status:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.WorkerInfo.Status == "active" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 127, "Active") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if data.WorkerInfo.Status == "busy" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 128, "Busy") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 129, "Inactive") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 130, "
Last Heartbeat:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var42 string - templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(data.WorkerInfo.LastHeartbeat.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 467, Col: 121} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 131, "
Current Load:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var43 string - templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", data.WorkerInfo.CurrentLoad, data.WorkerInfo.MaxConcurrent)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 470, Col: 142} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 132, "
Capabilities:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, capability := range data.WorkerInfo.Capabilities { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 133, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var44 string - templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(string(capability)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 475, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 134, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 135, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 136, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.AssignmentHistory) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 137, "
Assignment History
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, assignment := range data.AssignmentHistory { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 138, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 145, "
Worker IDWorker AddressAssigned AtUnassigned AtReason
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var45 string - templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(assignment.WorkerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 513, Col: 78} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 139, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var46 string - templ_7745c5c3_Var46, templ_7745c5c3_Err = templ.JoinStringErrs(assignment.WorkerAddress) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 514, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var46)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 140, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var47 string - templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(assignment.AssignedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 515, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 141, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if assignment.UnassignedAt != nil { - var templ_7745c5c3_Var48 string - templ_7745c5c3_Var48, templ_7745c5c3_Err = templ.JoinStringErrs(assignment.UnassignedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 518, Col: 110} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var48)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 142, "โ€”") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 143, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var49 string - templ_7745c5c3_Var49, templ_7745c5c3_Err = templ.JoinStringErrs(assignment.Reason) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 523, Col: 70} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var49)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 144, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 146, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.ExecutionLogs) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 147, "
Execution Logs
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, log := range data.ExecutionLogs { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 148, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 173, "
TimestampLevelMessageDetails
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var50 string - templ_7745c5c3_Var50, templ_7745c5c3_Err = templ.JoinStringErrs(log.Timestamp.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 560, Col: 92} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var50)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 149, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if log.Level == "error" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 150, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var51 string - templ_7745c5c3_Var51, templ_7745c5c3_Err = templ.JoinStringErrs(log.Level) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 563, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var51)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 151, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if log.Level == "warn" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 152, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var52 string - templ_7745c5c3_Var52, templ_7745c5c3_Err = templ.JoinStringErrs(log.Level) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 565, Col: 97} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var52)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 153, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if log.Level == "info" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 154, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var53 string - templ_7745c5c3_Var53, templ_7745c5c3_Err = templ.JoinStringErrs(log.Level) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 567, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var53)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 155, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 156, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var54 string - templ_7745c5c3_Var54, templ_7745c5c3_Err = templ.JoinStringErrs(log.Level) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 569, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var54)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 157, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 158, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var55 string - templ_7745c5c3_Var55, templ_7745c5c3_Err = templ.JoinStringErrs(log.Message) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 572, Col: 70} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var55)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 159, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if log.Fields != nil && len(log.Fields) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 160, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, k := range sortedKeys(log.Fields) { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 161, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var56 string - templ_7745c5c3_Var56, templ_7745c5c3_Err = templ.JoinStringErrs(k) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 577, Col: 110} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var56)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 162, "=") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var57 string - templ_7745c5c3_Var57, templ_7745c5c3_Err = templ.JoinStringErrs(log.Fields[k]) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 577, Col: 129} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var57)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 163, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 164, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if log.Progress != nil || log.Status != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 165, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if log.Progress != nil { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 166, "progress=") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var58 string - templ_7745c5c3_Var58, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.0f%%", *log.Progress)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 583, Col: 151} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var58)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 167, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if log.Status != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 168, "status=") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var59 string - templ_7745c5c3_Var59, templ_7745c5c3_Err = templ.JoinStringErrs(log.Status) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 586, Col: 118} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var59)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 169, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 170, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 171, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 172, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 174, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.RelatedTasks) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 175, "
Related Tasks
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, relatedTask := range data.RelatedTasks { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 176, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 193, "
Task IDTypeStatusVolume IDServerCreated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var61 string - templ_7745c5c3_Var61, templ_7745c5c3_Err = templ.JoinStringErrs(relatedTask.ID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 633, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var61)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 178, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var62 string - templ_7745c5c3_Var62, templ_7745c5c3_Err = templ.JoinStringErrs(string(relatedTask.Type)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 636, Col: 105} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var62)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 179, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if relatedTask.Status == maintenance.TaskStatusCompleted { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 180, "Completed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if relatedTask.Status == maintenance.TaskStatusFailed { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 181, "Failed") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else if relatedTask.Status == maintenance.TaskStatusInProgress { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 182, "In Progress") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 183, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var63 string - templ_7745c5c3_Var63, templ_7745c5c3_Err = templ.JoinStringErrs(string(relatedTask.Status)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 645, Col: 116} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var63)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 184, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 185, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if relatedTask.VolumeID != 0 { - var templ_7745c5c3_Var64 string - templ_7745c5c3_Var64, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", relatedTask.VolumeID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 650, Col: 96} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var64)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 186, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 187, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if relatedTask.Server != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 188, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var65 string - templ_7745c5c3_Var65, templ_7745c5c3_Err = templ.JoinStringErrs(relatedTask.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 657, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var65)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 189, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 190, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 191, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var66 string - templ_7745c5c3_Var66, templ_7745c5c3_Err = templ.JoinStringErrs(relatedTask.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/task_detail.templ`, Line: 662, Col: 111} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var66)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 192, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 194, "
Actions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Task.Status == maintenance.TaskStatusPending || data.Task.Status == maintenance.TaskStatusAssigned { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 195, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if data.Task.WorkerID != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 197, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 200, "
Task Logs
Loading logs...

Fetching logs from worker...

Task: | Worker: | Entries:
Log Entries (Last 100) Newest entries first
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/template_helpers.go b/weed/admin/view/app/template_helpers.go deleted file mode 100644 index eca78f136..000000000 --- a/weed/admin/view/app/template_helpers.go +++ /dev/null @@ -1,68 +0,0 @@ -package app - -import ( - "fmt" - "strconv" -) - -// getStatusColor returns Bootstrap color class for status -func getStatusColor(status string) string { - switch status { - case "active", "healthy": - return "success" - case "warning": - return "warning" - case "critical", "unreachable": - return "danger" - default: - return "secondary" - } -} - -// formatBytes converts bytes to human readable format -func formatBytes(bytes int64) string { - if bytes == 0 { - return "0 B" - } - - units := []string{"B", "KB", "MB", "GB", "TB", "PB"} - var i int - value := float64(bytes) - - for value >= 1024 && i < len(units)-1 { - value /= 1024 - i++ - } - - if i == 0 { - return fmt.Sprintf("%.0f %s", value, units[i]) - } - return fmt.Sprintf("%.1f %s", value, units[i]) -} - -// formatNumber formats large numbers with commas -func formatNumber(num int64) string { - if num == 0 { - return "0" - } - - str := strconv.FormatInt(num, 10) - result := "" - - for i, char := range str { - if i > 0 && (len(str)-i)%3 == 0 { - result += "," - } - result += string(char) - } - - return result -} - -// calculatePercent calculates percentage for progress bars -func calculatePercent(current, max int) int { - if max == 0 { - return 0 - } - return (current * 100) / max -} diff --git a/weed/admin/view/app/topic_details.templ b/weed/admin/view/app/topic_details.templ deleted file mode 100644 index 03a8af488..000000000 --- a/weed/admin/view/app/topic_details.templ +++ /dev/null @@ -1,693 +0,0 @@ -package app - -import "fmt" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" -import "github.com/seaweedfs/seaweedfs/weed/util" - -templ TopicDetails(data dash.TopicDetailsData) { -
-
-
- -
-
- -

Topic Details: {data.TopicName}

-
- Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} -
- - -
-
-
-
-
Partitions
-

{fmt.Sprintf("%d", len(data.Partitions))}

-
-
-
-
-
-
-
Schema Fields
-

{fmt.Sprintf("%d", len(data.KeySchema) + len(data.ValueSchema))}

-
-
-
-
-
-
-
Total Messages
-

{fmt.Sprintf("%d", data.MessageCount)}

-
-
-
-
-
-
-
Total Size
-

{util.BytesToHumanReadable(uint64(data.TotalSize))}

-
-
-
-
-
-
-
Publishers
-

{fmt.Sprintf("%d", len(data.Publishers))}

-
-
-
-
-
-
-
Subscribers
-

{fmt.Sprintf("%d", len(data.Subscribers))}

-
-
-
-
- - -
-
-
-
-
Consumer Group Offsets
-

{fmt.Sprintf("%d", len(data.ConsumerGroupOffsets))}

-

Saved consumer progress checkpoints

-
-
-
-
- - -
-
-
-
-
Topic Information
-
-
-
-
Namespace:
-
{data.Namespace}
-
Name:
-
{data.Name}
-
Full Name:
-
{data.TopicName}
-
Created:
-
{data.CreatedAt.Format("2006-01-02 15:04:05")}
-
-
-
-
-
-
-
-
- Retention Policy -
- -
-
-
-
Status:
-
- if data.Retention.Enabled { - Enabled - } else { - Disabled - } -
-
Duration:
-
- if data.Retention.Enabled { - - {fmt.Sprintf("%d", data.Retention.DisplayValue)} {data.Retention.DisplayUnit} - - } else { - No retention configured - } -
-
-
-
-
-
- - -
-
-
-
-
Schema Definition
-
-
- if len(data.KeySchema) == 0 && len(data.ValueSchema) == 0 { -

No schema information available

- } else { -
- - - - - - - - - - - for _, field := range data.KeySchema { - - - - - - - } - for _, field := range data.ValueSchema { - - - - - - - } - -
FieldTypeRequiredSchema Part
{field.Name}{field.Type} - if field.Required { - - } else { - - } - Key
{field.Name}{field.Type} - if field.Required { - - } else { - - } - Value
-
- } -
-
-
-
- - -
-
-
Partitions
-
- -
-
-
- if len(data.Partitions) == 0 { -
- -
No Partitions Found
-

No partitions are configured for this topic.

-
- } else { -
- - - - - - - - - - - - - - for _, partition := range data.Partitions { - - - - - - - - - - } - -
Partition IDLeader BrokerFollower BrokerMessagesSizeLast Data TimeCreated
- {fmt.Sprintf("%d", partition.ID)} - - {partition.LeaderBroker} - - if partition.FollowerBroker != "" { - {partition.FollowerBroker} - } else { - None - } - {fmt.Sprintf("%d", partition.MessageCount)}{util.BytesToHumanReadable(uint64(partition.TotalSize))} - if !partition.LastDataTime.IsZero() { - {partition.LastDataTime.Format("2006-01-02 15:04:05")} - } else { - Never - } - - {partition.CreatedAt.Format("2006-01-02 15:04:05")} -
-
- } -
-
- - -
-
-
-
-
Active Publishers {fmt.Sprintf("%d", len(data.Publishers))}
-
-
- if len(data.Publishers) == 0 { -
- No active publishers found for this topic. -
- } else { -
- - - - - - - - - - - - - - for _, publisher := range data.Publishers { - - - - - - - - - - } - -
PublisherPartitionBrokerStatusPublishedAcknowledgedLast Seen
{publisher.PublisherName}{fmt.Sprintf("%d", publisher.PartitionID)}{publisher.Broker} - if publisher.IsActive { - Active - } else { - Inactive - } - - if publisher.LastPublishedOffset > 0 { - {fmt.Sprintf("%d", publisher.LastPublishedOffset)} - } else { - - - } - - if publisher.LastAckedOffset > 0 { - {fmt.Sprintf("%d", publisher.LastAckedOffset)} - } else { - - - } - - if !publisher.LastSeenTime.IsZero() { - {publisher.LastSeenTime.Format("15:04:05")} - } else { - - - } -
-
- } -
-
-
-
- -
-
-
-
-
Active Subscribers {fmt.Sprintf("%d", len(data.Subscribers))}
-
-
- if len(data.Subscribers) == 0 { -
- No active subscribers found for this topic. -
- } else { -
- - - - - - - - - - - - - - - for _, subscriber := range data.Subscribers { - - - - - - - - - - - } - -
Consumer GroupConsumer IDPartitionBrokerStatusReceivedAcknowledgedLast Seen
{subscriber.ConsumerGroup}{subscriber.ConsumerID}{fmt.Sprintf("%d", subscriber.PartitionID)}{subscriber.Broker} - if subscriber.IsActive { - Active - } else { - Inactive - } - - if subscriber.LastReceivedOffset > 0 { - {fmt.Sprintf("%d", subscriber.LastReceivedOffset)} - } else { - - - } - - if subscriber.CurrentOffset > 0 { - {fmt.Sprintf("%d", subscriber.CurrentOffset)} - } else { - - - } - - if !subscriber.LastSeenTime.IsZero() { - {subscriber.LastSeenTime.Format("15:04:05")} - } else { - - - } -
-
- } -
-
-
-
- - -
-
-
-
-
Consumer Group Offsets {fmt.Sprintf("%d", len(data.ConsumerGroupOffsets))}
-
-
- if len(data.ConsumerGroupOffsets) == 0 { -
- No consumer group offsets found for this topic. -
- } else { -
- - - - - - - - - - - for _, offset := range data.ConsumerGroupOffsets { - - - - - - - } - -
Consumer GroupPartitionOffsetLast Updated
- {offset.ConsumerGroup} - - {fmt.Sprintf("%d", offset.PartitionID)} - - {fmt.Sprintf("%d", offset.Offset)} - - {offset.LastUpdated.Format("2006-01-02 15:04:05")} -
-
- } -
-
-
-
-
-
-
- - - - - -} \ No newline at end of file diff --git a/weed/admin/view/app/topic_details_templ.go b/weed/admin/view/app/topic_details_templ.go deleted file mode 100644 index a3e48f581..000000000 --- a/weed/admin/view/app/topic_details_templ.go +++ /dev/null @@ -1,996 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import "fmt" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" -import "github.com/seaweedfs/seaweedfs/weed/util" - -func TopicDetails(data dash.TopicDetailsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Topic Details: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(data.TopicName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 20, Col: 74} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 22, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Partitions

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Partitions))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 31, Col: 97} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

Schema Fields

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.KeySchema)+len(data.ValueSchema))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 39, Col: 117} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "

Total Messages

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.MessageCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 47, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "

Total Size

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(uint64(data.TotalSize))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 55, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "

Publishers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Publishers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 63, Col: 97} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "

Subscribers

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Subscribers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 71, Col: 95} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "

Consumer Group Offsets

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.ConsumerGroupOffsets))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 83, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "

Saved consumer progress checkpoints

Topic Information
Namespace:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.Namespace) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 100, Col: 72} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
Name:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(data.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 102, Col: 67} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
Full Name:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.TopicName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 104, Col: 72} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
Created:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(data.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 106, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Retention Policy
Status:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Retention.Enabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "Enabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "Disabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
Duration:
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Retention.Enabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Retention.DisplayValue)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 135, Col: 95} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(data.Retention.DisplayUnit) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 135, Col: 124} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "No retention configured") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Schema Definition
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.KeySchema) == 0 && len(data.ValueSchema) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "

No schema information available

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, field := range data.KeySchema { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - for _, field := range data.ValueSchema { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
FieldTypeRequiredSchema Part
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(field.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 171, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(field.Type) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 172, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Required { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "Key
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(field.Name) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 185, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(field.Type) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 186, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if field.Required { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "Value
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "
Partitions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Partitions) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "
No Partitions Found

No partitions are configured for this topic.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, partition := range data.Partitions { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "
Partition IDLeader BrokerFollower BrokerMessagesSizeLast Data TimeCreated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", partition.ID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 241, Col: 115} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(partition.LeaderBroker) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 244, Col: 83} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if partition.FollowerBroker != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(partition.FollowerBroker) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 248, Col: 106} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "None") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var25 string - templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", partition.MessageCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 253, Col: 94} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(util.BytesToHumanReadable(uint64(partition.TotalSize))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 254, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !partition.LastDataTime.IsZero() { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(partition.LastDataTime.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 257, Col: 134} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "Never") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var28 string - templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(partition.CreatedAt.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 263, Col: 127} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "
Active Publishers ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Publishers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 279, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Publishers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "
No active publishers found for this topic.
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, publisher := range data.Publishers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "
PublisherPartitionBrokerStatusPublishedAcknowledgedLast Seen
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var30 string - templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(publisher.PublisherName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 303, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var31 string - templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", publisher.PartitionID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 304, Col: 132} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var32 string - templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(publisher.Broker) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 305, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if publisher.IsActive { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "Active") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "Inactive") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if publisher.LastPublishedOffset > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var33 string - templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", publisher.LastPublishedOffset)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 315, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if publisher.LastAckedOffset > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var34 string - templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", publisher.LastAckedOffset)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 322, Col: 134} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !publisher.LastSeenTime.IsZero() { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var35 string - templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(publisher.LastSeenTime.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 329, Col: 131} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 81, "
Active Subscribers ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var36 string - templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Subscribers))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 349, Col: 137} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 82, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Subscribers) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 83, "
No active subscribers found for this topic.
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 84, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, subscriber := range data.Subscribers { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 85, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 105, "
Consumer GroupConsumer IDPartitionBrokerStatusReceivedAcknowledgedLast Seen
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var37 string - templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.ConsumerGroup) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 374, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 86, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var38 string - templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.ConsumerID) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 375, Col: 82} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 87, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", subscriber.PartitionID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 376, Col: 133} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 88, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var40 string - templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.Broker) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 377, Col: 78} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 89, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if subscriber.IsActive { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 90, "Active") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 91, "Inactive") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 92, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if subscriber.LastReceivedOffset > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 93, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var41 string - templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", subscriber.LastReceivedOffset)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 387, Col: 138} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 94, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 95, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 96, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if subscriber.CurrentOffset > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 97, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var42 string - templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", subscriber.CurrentOffset)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 394, Col: 133} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !subscriber.LastSeenTime.IsZero() { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var43 string - templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(subscriber.LastSeenTime.Format("15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 401, Col: 132} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 102, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 103, "-") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 104, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 106, "
Consumer Group Offsets ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var44 string - templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.ConsumerGroupOffsets))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 422, Col: 153} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 107, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.ConsumerGroupOffsets) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 108, "
No consumer group offsets found for this topic.
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 109, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, offset := range data.ConsumerGroupOffsets { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 110, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 115, "
Consumer GroupPartitionOffsetLast Updated
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var45 string - templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(offset.ConsumerGroup) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 444, Col: 114} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 111, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var46 string - templ_7745c5c3_Var46, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", offset.PartitionID)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 447, Col: 129} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var46)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 112, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var47 string - templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", offset.Offset)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 450, Col: 101} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 113, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var48 string - templ_7745c5c3_Var48, templ_7745c5c3_Err = templ.JoinStringErrs(offset.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topic_details.templ`, Line: 453, Col: 134} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var48)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 114, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 116, "
Edit Retention Policy
Retention Configuration
Data older than this duration will be automatically purged to save storage space.
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/topics.templ b/weed/admin/view/app/topics.templ deleted file mode 100644 index 4a69b0c54..000000000 --- a/weed/admin/view/app/topics.templ +++ /dev/null @@ -1,388 +0,0 @@ -package app - -import "fmt" -import "strings" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" - -templ Topics(data dash.TopicsData) { -
-
-
-
-

Message Queue Topics

- Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} -
- - -
-
-
-
-
Total Topics
-

{fmt.Sprintf("%d", data.TotalTopics)}

-
-
-
-
-
-
-
Available Topics
-

{fmt.Sprintf("%d", len(data.Topics))}

-
-
-
-
- - -
-
-
Topics
-
- - -
-
-
- if len(data.Topics) == 0 { -
- -
No Topics Found
-

No message queue topics are currently configured.

-
- } else { -
- - - - - - - - - - - - for _, topic := range data.Topics { - - - - - - - - - - - } - -
NamespaceTopic NamePartitionsRetentionActions
- {func() string { - idx := strings.LastIndex(topic.Name, ".") - if idx == -1 { - return "default" - } - return topic.Name[:idx] - }()} - - {func() string { - idx := strings.LastIndex(topic.Name, ".") - if idx == -1 { - return topic.Name - } - return topic.Name[idx+1:] - }()} - - {fmt.Sprintf("%d", topic.Partitions)} - - if topic.Retention.Enabled { - - - {fmt.Sprintf("%d %s", topic.Retention.DisplayValue, topic.Retention.DisplayUnit)} - - } else { - - Disabled - - } - - -
-
- } -
-
-
-
-
- - - - - -} \ No newline at end of file diff --git a/weed/admin/view/app/topics_templ.go b/weed/admin/view/app/topics_templ.go deleted file mode 100644 index 6920a2e53..000000000 --- a/weed/admin/view/app/topics_templ.go +++ /dev/null @@ -1,226 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import "fmt" -import "strings" -import "github.com/seaweedfs/seaweedfs/weed/admin/dash" - -func Topics(data dash.TopicsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Message Queue Topics

Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var2 string - templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 13, Col: 107} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Topics

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalTopics)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 22, Col: 93} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

Available Topics

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.Topics))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 30, Col: 90} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Topics
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Topics) == 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
No Topics Found

No message queue topics are currently configured.

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, topic := range data.Topics { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
NamespaceTopic NamePartitionsRetentionActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(func() string { - idx := strings.LastIndex(topic.Name, ".") - if idx == -1 { - return "default" - } - return topic.Name[:idx] - }()) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 78, Col: 55} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(func() string { - idx := strings.LastIndex(topic.Name, ".") - if idx == -1 { - return topic.Name - } - return topic.Name[idx+1:] - }()) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 87, Col: 55} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", topic.Partitions)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 90, Col: 116} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if topic.Retention.Enabled { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d %s", topic.Retention.DisplayValue, topic.Retention.DisplayUnit)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/topics.templ`, Line: 96, Col: 140} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "Disabled") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Loading topic details...
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
Create New Topic
Retention Policy
Data older than this duration will be automatically purged to save storage space.
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/volume_details.templ b/weed/admin/view/app/volume_details.templ deleted file mode 100644 index 0c0359e92..000000000 --- a/weed/admin/view/app/volume_details.templ +++ /dev/null @@ -1,497 +0,0 @@ -package app - -import ( - "fmt" - "time" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" -) - -templ VolumeDetails(data dash.VolumeDetailsData) { -
-
-

- Volume Details -

- -
-
-
- - -
-
-
- -
- -
-
-
-
- Volume Information -
-
-
-
-
-
- -
{fmt.Sprintf("%d", data.Volume.Id)}
-
- -
- -
{data.Volume.DataCenter}
-
-
- -
{data.Volume.Rack}
-
-
-
-
- -
- if data.Volume.Collection == "" { - - default - - } else { - - {data.Volume.Collection} - - } -
-
-
- -
{fmt.Sprintf("%03d", data.Volume.ReplicaPlacement)}
-
-
- -
- - if data.Volume.DiskType == "" { - hdd - } else { - {data.Volume.DiskType} - } - -
-
-
- -
{fmt.Sprintf("v%d", data.Volume.Version)}
-
-
-
-
-
-
- - -
- -
-
-
- Volume Statistics & Health -
-
-
- -
-
-
-
- {formatBytes(int64(data.Volume.Size - data.Volume.DeletedByteCount))} -
- Active Bytes -
-
-
-
-
- {formatBytes(int64(data.Volume.DeletedByteCount))} -
- Deleted Bytes -
-
-
- - - - -
-
-
-
- {fmt.Sprintf("%d", data.Volume.FileCount)} -
- Active Files -
-
-
-
-
- {fmt.Sprintf("%d", data.Volume.DeleteCount)} -
- Deleted Files -
-
-
- - - - - if data.Volume.FileCount > 0 && data.Volume.Size > 0 { -
-
- Storage Efficiency - - {fmt.Sprintf("%.1f%%", float64(data.Volume.Size-data.Volume.DeletedByteCount)/float64(data.Volume.Size)*100)} - -
-
-
-
-
-
- } - -
- - -
-
-
- if data.Volume.ReadOnly { - - Read Only - - if data.Volume.Size >= data.VolumeSizeLimit { -
- Size limit exceeded -
- } - } else if data.VolumeSizeLimit > data.Volume.Size { - - Read/Write - - } else { - - Size Limit Reached - - } -
-
-
- - -
-
-
-
- #{fmt.Sprintf("%d", data.Volume.CompactRevision)} -
- Vacuum Revision -
-
-
-
-
- if data.Volume.ModifiedAtSecond > 0 { - {formatTimestamp(data.Volume.ModifiedAtSecond)} - } else { - Never modified - } -
- Last Modified -
-
-
- - - if data.Volume.Ttl > 0 { -
- - {formatTTL(data.Volume.Ttl)} - -
- Time To Live -
-
- } - - - if data.Volume.RemoteStorageName != "" { -
-
-
-
- {data.Volume.RemoteStorageName} -
- Remote Storage -
-
- if data.Volume.RemoteStorageKey != "" { -
-
- {data.Volume.RemoteStorageKey} -
- Storage Key -
- } - } -
-
-
-
- - - if len(data.Replicas) > 0 { -
-
-
-
-
- Replicas ({fmt.Sprintf("%d", data.ReplicationCount)}) -
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - for _, replica := range data.Replicas { - - - - - - - - - - } - -
ServerData CenterRackSizeFile CountStatusActions
- - - {data.Volume.Server} - - - - Primary - {data.Volume.DataCenter}{data.Volume.Rack}{formatBytes(int64(data.Volume.Size))}{fmt.Sprintf("%d", data.Volume.FileCount)}Active - Current Volume -
- - {replica.Server} - - - {replica.DataCenter}{replica.Rack}{formatBytes(int64(replica.Size))}{fmt.Sprintf("%d", replica.FileCount)}Replica - - View - -
-
-
-
-
-
- } - - -
-
-
-
-
- Actions -
-
-
-
- -
-
- - - Use these actions to perform maintenance operations on the volume. - -
-
-
-
-
- - -
-
- - - Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} - -
-
- - - -} - -func formatTimestamp(unixTimestamp int64) string { - if unixTimestamp <= 0 { - return "Never" - } - t := time.Unix(unixTimestamp, 0) - return t.Format("2006-01-02 15:04:05") -} - -func formatTTL(ttlSeconds uint32) string { - if ttlSeconds == 0 { - return "No TTL" - } - - duration := time.Duration(ttlSeconds) * time.Second - - // Convert to human readable format - days := int(duration.Hours()) / 24 - hours := int(duration.Hours()) % 24 - minutes := int(duration.Minutes()) % 60 - - if days > 0 { - if hours > 0 { - return fmt.Sprintf("%dd %dh", days, hours) - } - return fmt.Sprintf("%d days", days) - } else if hours > 0 { - if minutes > 0 { - return fmt.Sprintf("%dh %dm", hours, minutes) - } - return fmt.Sprintf("%d hours", hours) - } else if minutes > 0 { - return fmt.Sprintf("%d minutes", minutes) - } else { - return fmt.Sprintf("%d seconds", int(duration.Seconds())) - } -} \ No newline at end of file diff --git a/weed/admin/view/app/volume_details_templ.go b/weed/admin/view/app/volume_details_templ.go deleted file mode 100644 index 921f20fbb..000000000 --- a/weed/admin/view/app/volume_details_templ.go +++ /dev/null @@ -1,740 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package app - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "time" -) - -func VolumeDetails(data dash.VolumeDetailsData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Volume Details

Volume Information
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var3 string - templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Volume.Id)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 49, Col: 90} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var6 string - templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 62, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var7 string - templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 66, Col: 93} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.Collection == "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "default") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 string - templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.Collection) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 79, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var11 string - templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%03d", data.Volume.ReplicaPlacement)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 86, Col: 115} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.DiskType == "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "hdd") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.DiskType) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 95, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var13 string - templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("v%d", data.Volume.Version)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 102, Col: 105} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Volume Statistics & Health
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var14 string - templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(data.Volume.Size - data.Volume.DeletedByteCount))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 125, Col: 104} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
Active Bytes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(data.Volume.DeletedByteCount))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 133, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
Deleted Bytes
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Volume.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 147, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
Active Files
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var17 string - templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Volume.DeleteCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 155, Col: 79} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
Deleted Files
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.FileCount > 0 && data.Volume.Size > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
Storage Efficiency ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f%%", float64(data.Volume.Size-data.Volume.DeletedByteCount)/float64(data.Volume.Size)*100)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 170, Col: 144} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.ReadOnly { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "Read Only ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.Size >= data.VolumeSizeLimit { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
Size limit exceeded
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } else if data.VolumeSizeLimit > data.Volume.Size { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "Read/Write") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "Size Limit Reached") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "
#") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var21 string - templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Volume.CompactRevision)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 216, Col: 84} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "
Vacuum Revision
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.ModifiedAtSecond > 0 { - var templ_7745c5c3_Var22 string - templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(formatTimestamp(data.Volume.ModifiedAtSecond)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 225, Col: 86} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "Never modified") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
Last Modified
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.Ttl > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var23 string - templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(formatTTL(data.Volume.Ttl)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 239, Col: 92} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
Time To Live
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.RemoteStorageName != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.RemoteStorageName) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 253, Col: 99} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "
Remote Storage
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Volume.RemoteStorageKey != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var26 string - templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.RemoteStorageKey) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 261, Col: 65} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "
Storage Key
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if len(data.Replicas) > 0 { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
Replicas (") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var27 string - templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.ReplicationCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 279, Col: 111} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, ")
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, replica := range data.Replicas { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
ServerData CenterRackSizeFile CountStatusActions
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var29 string - templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 302, Col: 71} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, " Primary") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var30 string - templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 308, Col: 106} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var31 string - templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(data.Volume.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 309, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var32 string - templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(data.Volume.Size))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 310, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var33 string - templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.Volume.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 311, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "ActiveCurrent Volume
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var35 string - templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(replica.Server) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 322, Col: 67} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var36 string - templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(replica.DataCenter) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 326, Col: 106} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var37 string - templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(replica.Rack) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 327, Col: 100} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var38 string - templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(int64(replica.Size))) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 328, Col: 81} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var39 string - templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", replica.FileCount)) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 329, Col: 85} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "ReplicaView
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
Actions
Use these actions to perform maintenance operations on the volume.
Last updated: ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var43 string - templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/volume_details.templ`, Line: 381, Col: 77} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func formatTimestamp(unixTimestamp int64) string { - if unixTimestamp <= 0 { - return "Never" - } - t := time.Unix(unixTimestamp, 0) - return t.Format("2006-01-02 15:04:05") -} - -func formatTTL(ttlSeconds uint32) string { - if ttlSeconds == 0 { - return "No TTL" - } - - duration := time.Duration(ttlSeconds) * time.Second - - // Convert to human readable format - days := int(duration.Hours()) / 24 - hours := int(duration.Hours()) % 24 - minutes := int(duration.Minutes()) % 60 - - if days > 0 { - if hours > 0 { - return fmt.Sprintf("%dd %dh", days, hours) - } - return fmt.Sprintf("%d days", days) - } else if hours > 0 { - if minutes > 0 { - return fmt.Sprintf("%dh %dm", hours, minutes) - } - return fmt.Sprintf("%d hours", hours) - } else if minutes > 0 { - return fmt.Sprintf("%d minutes", minutes) - } else { - return fmt.Sprintf("%d seconds", int(duration.Seconds())) - } -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/components/config_sections.templ b/weed/admin/view/components/config_sections.templ deleted file mode 100644 index 813f4ba67..000000000 --- a/weed/admin/view/components/config_sections.templ +++ /dev/null @@ -1,83 +0,0 @@ -package components - - - -// ConfigSectionData represents data for a configuration section -type ConfigSectionData struct { - Title string - Icon string - Description string - Fields []interface{} // Will hold field data structures -} - -// InfoSectionData represents data for an informational section -type InfoSectionData struct { - Title string - Icon string - Type string // "info", "warning", "success", "danger" - Content string -} - -// ConfigSection renders a Bootstrap card for configuration settings -templ ConfigSection(data ConfigSectionData) { -
-
-
-
-
- if data.Icon != "" { - - } - { data.Title } -
- if data.Description != "" { - { data.Description } - } -
-
- for _, field := range data.Fields { - switch v := field.(type) { - case TextFieldData: - @TextField(v) - case NumberFieldData: - @NumberField(v) - case CheckboxFieldData: - @CheckboxField(v) - case SelectFieldData: - @SelectField(v) - case DurationFieldData: - @DurationField(v) - case DurationInputFieldData: - @DurationInputField(v) - } - } -
-
-
-
-} - -// InfoSection renders a Bootstrap alert section for informational content -templ InfoSection(data InfoSectionData) { -
-
-
-
-
- if data.Icon != "" { - - } - { data.Title } -
-
-
- -
-
-
-
-} - - \ No newline at end of file diff --git a/weed/admin/view/components/config_sections_templ.go b/weed/admin/view/components/config_sections_templ.go deleted file mode 100644 index ca428dccd..000000000 --- a/weed/admin/view/components/config_sections_templ.go +++ /dev/null @@ -1,257 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package components - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -// ConfigSectionData represents data for a configuration section -type ConfigSectionData struct { - Title string - Icon string - Description string - Fields []interface{} // Will hold field data structures -} - -// InfoSectionData represents data for an informational section -type InfoSectionData struct { - Title string - Icon string - Type string // "info", "warning", "success", "danger" - Content string -} - -// ConfigSection renders a Bootstrap card for configuration settings -func ConfigSection(data ConfigSectionData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Icon != "" { - var templ_7745c5c3_Var2 = []any{data.Icon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - var templ_7745c5c3_Var4 string - templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.Title) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/config_sections.templ`, Line: 31, Col: 36} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var5 string - templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/config_sections.templ`, Line: 34, Col: 68} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - for _, field := range data.Fields { - switch v := field.(type) { - case TextFieldData: - templ_7745c5c3_Err = TextField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case NumberFieldData: - templ_7745c5c3_Err = NumberField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case CheckboxFieldData: - templ_7745c5c3_Err = CheckboxField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case SelectFieldData: - templ_7745c5c3_Err = SelectField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case DurationFieldData: - templ_7745c5c3_Err = DurationField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - case DurationInputFieldData: - templ_7745c5c3_Err = DurationInputField(v).Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// InfoSection renders a Bootstrap alert section for informational content -func InfoSection(data InfoSectionData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var6 := templ.GetChildren(ctx) - if templ_7745c5c3_Var6 == nil { - templ_7745c5c3_Var6 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Icon != "" { - var templ_7745c5c3_Var7 = []any{data.Icon + " me-2"} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var7...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - var templ_7745c5c3_Var9 string - templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(data.Title) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/config_sections.templ`, Line: 70, Col: 36} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var10 = []any{"alert alert-" + data.Type} - templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var10...) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var12 string - templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(data.Content) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/config_sections.templ`, Line: 75, Col: 37} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/components/form_fields.templ b/weed/admin/view/components/form_fields.templ deleted file mode 100644 index 82d20d407..000000000 --- a/weed/admin/view/components/form_fields.templ +++ /dev/null @@ -1,424 +0,0 @@ -package components - -import "fmt" - -// FormFieldData represents common form field data -type FormFieldData struct { - Name string - Label string - Description string - Required bool -} - -// TextFieldData represents text input field data -type TextFieldData struct { - FormFieldData - Value string - Placeholder string -} - -// NumberFieldData represents number input field data -type NumberFieldData struct { - FormFieldData - Value float64 - Step string - Min *float64 - Max *float64 -} - -// CheckboxFieldData represents checkbox field data -type CheckboxFieldData struct { - FormFieldData - Checked bool -} - -// SelectFieldData represents select field data -type SelectFieldData struct { - FormFieldData - Value string - Options []SelectOption -} - -type SelectOption struct { - Value string - Label string -} - -// DurationFieldData represents duration input field data -type DurationFieldData struct { - FormFieldData - Value string - Placeholder string -} - -// DurationInputFieldData represents duration input with number + unit dropdown -type DurationInputFieldData struct { - FormFieldData - Seconds int // The duration value in seconds -} - -// TextField renders a Bootstrap text input field -templ TextField(data TextFieldData) { -
- - - if data.Description != "" { -
{ data.Description }
- } -
-} - -// NumberField renders a Bootstrap number input field -templ NumberField(data NumberFieldData) { -
- - - if data.Description != "" { -
{ data.Description }
- } -
-} - -// CheckboxField renders a Bootstrap checkbox field -templ CheckboxField(data CheckboxFieldData) { -
-
- - -
- if data.Description != "" { -
{ data.Description }
- } -
-} - -// SelectField renders a Bootstrap select field -templ SelectField(data SelectFieldData) { -
- - - if data.Description != "" { -
{ data.Description }
- } -
-} - -// DurationField renders a Bootstrap duration input field -templ DurationField(data DurationFieldData) { -
- - - if data.Description != "" { -
{ data.Description }
- } -
-} - -// DurationInputField renders a Bootstrap duration input with number + unit dropdown -templ DurationInputField(data DurationInputFieldData) { -
- -
- - -
- if data.Description != "" { -
{ data.Description }
- } -
-} - -// Helper functions for duration conversion (used by DurationInputField) - -// Typed conversion functions for protobuf int32 (most common) - EXPORTED -func ConvertInt32SecondsToDisplayValue(seconds int32) float64 { - return convertIntSecondsToDisplayValue(int(seconds)) -} - -func GetInt32DisplayUnit(seconds int32) string { - return getIntDisplayUnit(int(seconds)) -} - -// Typed conversion functions for regular int -func convertIntSecondsToDisplayValue(seconds int) float64 { - if seconds == 0 { - return 0 - } - - // Check if it's evenly divisible by days - if seconds%(24*3600) == 0 { - return float64(seconds / (24 * 3600)) - } - - // Check if it's evenly divisible by hours - if seconds%3600 == 0 { - return float64(seconds / 3600) - } - - // Default to minutes - return float64(seconds / 60) -} - -func getIntDisplayUnit(seconds int) string { - if seconds == 0 { - return "minutes" - } - - // Check if it's evenly divisible by days - if seconds%(24*3600) == 0 { - return "days" - } - - // Check if it's evenly divisible by hours - if seconds%3600 == 0 { - return "hours" - } - - // Default to minutes - return "minutes" -} - -func convertSecondsToUnit(seconds int) string { - if seconds == 0 { - return "minutes" - } - - // Try days first - if seconds%(24*3600) == 0 && seconds >= 24*3600 { - return "days" - } - - // Try hours - if seconds%3600 == 0 && seconds >= 3600 { - return "hours" - } - - // Default to minutes - return "minutes" -} - -func convertSecondsToValue(seconds int, unit string) float64 { - if seconds == 0 { - return 0 - } - - switch unit { - case "days": - return float64(seconds / (24 * 3600)) - case "hours": - return float64(seconds / 3600) - case "minutes": - return float64(seconds / 60) - default: - return float64(seconds / 60) // Default to minutes - } -} - -// IntervalFieldData represents interval input field data with separate value and unit -type IntervalFieldData struct { - FormFieldData - Seconds int // The interval value in seconds -} - -// IntervalField renders a Bootstrap interval input with number + unit dropdown (like task config) -templ IntervalField(data IntervalFieldData) { -
- -
- - -
- if data.Description != "" { -
{ data.Description }
- } -
-} \ No newline at end of file diff --git a/weed/admin/view/components/form_fields_templ.go b/weed/admin/view/components/form_fields_templ.go deleted file mode 100644 index 180147874..000000000 --- a/weed/admin/view/components/form_fields_templ.go +++ /dev/null @@ -1,1363 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package components - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import "fmt" - -// FormFieldData represents common form field data -type FormFieldData struct { - Name string - Label string - Description string - Required bool -} - -// TextFieldData represents text input field data -type TextFieldData struct { - FormFieldData - Value string - Placeholder string -} - -// NumberFieldData represents number input field data -type NumberFieldData struct { - FormFieldData - Value float64 - Step string - Min *float64 - Max *float64 -} - -// CheckboxFieldData represents checkbox field data -type CheckboxFieldData struct { - FormFieldData - Checked bool -} - -// SelectFieldData represents select field data -type SelectFieldData struct { - FormFieldData - Value string - Options []SelectOption -} - -type SelectOption struct { - Value string - Label string -} - -// DurationFieldData represents duration input field data -type DurationFieldData struct { - FormFieldData - Value string - Placeholder string -} - -// DurationInputFieldData represents duration input with number + unit dropdown -type DurationInputFieldData struct { - FormFieldData - Seconds int // The duration value in seconds -} - -// TextField renders a Bootstrap text input field -func TextField(data TextFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var8 string - templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 83, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// NumberField renders a Bootstrap number input field -func NumberField(data NumberFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var9 := templ.GetChildren(ctx) - if templ_7745c5c3_Var9 == nil { - templ_7745c5c3_Var9 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 119, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// CheckboxField renders a Bootstrap checkbox field -func CheckboxField(data CheckboxFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var19 := templ.GetChildren(ctx) - if templ_7745c5c3_Var19 == nil { - templ_7745c5c3_Var19 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var24 string - templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 142, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// SelectField renders a Bootstrap select field -func SelectField(data SelectFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var25 := templ.GetChildren(ctx) - if templ_7745c5c3_Var25 == nil { - templ_7745c5c3_Var25 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var32 string - templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 176, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// DurationField renders a Bootstrap duration input field -func DurationField(data DurationFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var33 := templ.GetChildren(ctx) - if templ_7745c5c3_Var33 == nil { - templ_7745c5c3_Var33 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var40 string - templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 206, Col: 64} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 79, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// DurationInputField renders a Bootstrap duration input with number + unit dropdown -func DurationInputField(data DurationInputFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var41 := templ.GetChildren(ctx) - if templ_7745c5c3_Var41 == nil { - templ_7745c5c3_Var41 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 80, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 98, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var49 string - templ_7745c5c3_Var49, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 266, Col: 55} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var49)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 99, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 100, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -// Helper functions for duration conversion (used by DurationInputField) - -// Typed conversion functions for protobuf int32 (most common) - EXPORTED -func ConvertInt32SecondsToDisplayValue(seconds int32) float64 { - return convertIntSecondsToDisplayValue(int(seconds)) -} - -func GetInt32DisplayUnit(seconds int32) string { - return getIntDisplayUnit(int(seconds)) -} - -// Typed conversion functions for regular int -func convertIntSecondsToDisplayValue(seconds int) float64 { - if seconds == 0 { - return 0 - } - - // Check if it's evenly divisible by days - if seconds%(24*3600) == 0 { - return float64(seconds / (24 * 3600)) - } - - // Check if it's evenly divisible by hours - if seconds%3600 == 0 { - return float64(seconds / 3600) - } - - // Default to minutes - return float64(seconds / 60) -} - -func getIntDisplayUnit(seconds int) string { - if seconds == 0 { - return "minutes" - } - - // Check if it's evenly divisible by days - if seconds%(24*3600) == 0 { - return "days" - } - - // Check if it's evenly divisible by hours - if seconds%3600 == 0 { - return "hours" - } - - // Default to minutes - return "minutes" -} - -func convertSecondsToUnit(seconds int) string { - if seconds == 0 { - return "minutes" - } - - // Try days first - if seconds%(24*3600) == 0 && seconds >= 24*3600 { - return "days" - } - - // Try hours - if seconds%3600 == 0 && seconds >= 3600 { - return "hours" - } - - // Default to minutes - return "minutes" -} - -func convertSecondsToValue(seconds int, unit string) float64 { - if seconds == 0 { - return 0 - } - - switch unit { - case "days": - return float64(seconds / (24 * 3600)) - case "hours": - return float64(seconds / 3600) - case "minutes": - return float64(seconds / 60) - default: - return float64(seconds / 60) // Default to minutes - } -} - -// IntervalFieldData represents interval input field data with separate value and unit -type IntervalFieldData struct { - FormFieldData - Seconds int // The interval value in seconds -} - -// IntervalField renders a Bootstrap interval input with number + unit dropdown (like task config) -func IntervalField(data IntervalFieldData) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var50 := templ.GetChildren(ctx) - if templ_7745c5c3_Var50 == nil { - templ_7745c5c3_Var50 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 101, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if data.Description != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 121, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var58 string - templ_7745c5c3_Var58, templ_7745c5c3_Err = templ.JoinStringErrs(data.Description) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/components/form_fields.templ`, Line: 421, Col: 55} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var58)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 122, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 123, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/layout/layout.templ b/weed/admin/view/layout/layout.templ deleted file mode 100644 index cd192fa44..000000000 --- a/weed/admin/view/layout/layout.templ +++ /dev/null @@ -1,406 +0,0 @@ -package layout - -import ( - "fmt" - "strings" - "time" - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/util/version" -) - -templ Layout(c *gin.Context, content templ.Component) { -{{ - username := c.GetString("username") - if username == "" { - username = "admin" - } - - // Detect if we're on a configuration page to keep submenu expanded - currentPath := c.Request.URL.Path - isConfigPage := strings.HasPrefix(currentPath, "/maintenance/config") || currentPath == "/config" - - // Detect if we're on a message queue page to keep submenu expanded - isMQPage := strings.HasPrefix(currentPath, "/mq/") -}} - - - - - SeaweedFS Admin - - - - - - - - - - - - - -
- - - -
- - - - -
-
- @content -
-
-
-
- - -
-
- - © {fmt.Sprintf("%d", time.Now().Year())} SeaweedFS Admin v{version.VERSION_NUMBER} - if !strings.Contains(version.VERSION, "enterprise") { - โ€ข - - Enterprise Version Available - - } - -
-
- - - - - - - -} - -templ LoginForm(c *gin.Context, title string, errorMessage string) { - - - - - {title} - Login - - - - - - -
-
-
-
-
-
- -

{title}

-

Please sign in to continue

-
- - if errorMessage != "" { - - } - -
-
- -
- - - - -
-
- -
- -
- - - - -
-
- - -
-
-
-
-
-
- - - - -} \ No newline at end of file diff --git a/weed/admin/view/layout/layout_templ.go b/weed/admin/view/layout/layout_templ.go deleted file mode 100644 index 8572ae6d6..000000000 --- a/weed/admin/view/layout/layout_templ.go +++ /dev/null @@ -1,486 +0,0 @@ -// Code generated by templ - DO NOT EDIT. - -// templ: version: v0.3.960 -package layout - -//lint:file-ignore SA4006 This context is only used if a nested component is present. - -import "github.com/a-h/templ" -import templruntime "github.com/a-h/templ/runtime" - -import ( - "fmt" - "github.com/gin-gonic/gin" - "github.com/seaweedfs/seaweedfs/weed/util/version" - "strings" - "time" -) - -func Layout(c *gin.Context, content templ.Component) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var1 := templ.GetChildren(ctx) - if templ_7745c5c3_Var1 == nil { - templ_7745c5c3_Var1 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - username := c.GetString("username") - if username == "" { - username = "admin" - } - - // Detect if we're on a configuration page to keep submenu expanded - currentPath := c.Request.URL.Path - isConfigPage := strings.HasPrefix(currentPath, "/maintenance/config") || currentPath == "/config" - - // Detect if we're on a message queue page to keep submenu expanded - isMQPage := strings.HasPrefix(currentPath, "/mq/") - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "SeaweedFS Admin
MAIN
MANAGEMENT
  • File Browser
  • Object Store
  • ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if isMQPage { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "Message Queue ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "Message Queue ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - if isMQPage { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
    • ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if currentPath == "/mq/brokers" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Brokers") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "Brokers") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
    • ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if currentPath == "/mq/topics" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "Topics") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Topics") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
    ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } else { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
  • Metrics
  • Logs
SYSTEM
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = content.Render(ctx, templ_7745c5c3_Buffer) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
© ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var15 string - templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", time.Now().Year())) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 323, Col: 60} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, " SeaweedFS Admin v") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var16 string - templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(version.VERSION_NUMBER) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 323, Col: 102} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, " ") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if !strings.Contains(version.VERSION, "enterprise") { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "โ€ข Enterprise Version Available") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -func LoginForm(c *gin.Context, title string, errorMessage string) templ.Component { - return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { - templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context - if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { - return templ_7745c5c3_CtxErr - } - templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) - if !templ_7745c5c3_IsBuffer { - defer func() { - templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) - if templ_7745c5c3_Err == nil { - templ_7745c5c3_Err = templ_7745c5c3_BufErr - } - }() - } - ctx = templ.InitializeContext(ctx) - templ_7745c5c3_Var17 := templ.GetChildren(ctx) - if templ_7745c5c3_Var17 == nil { - templ_7745c5c3_Var17 = templ.NopComponent - } - ctx = templ.ClearChildren(ctx) - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var18 string - templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(title) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 347, Col: 17} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, " - Login

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var19 string - templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(title) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 361, Col: 57} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "

Please sign in to continue

") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - if errorMessage != "" { - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - var templ_7745c5c3_Var20 string - templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(errorMessage) - if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 368, Col: 45} - } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - } - templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "
") - if templ_7745c5c3_Err != nil { - return templ_7745c5c3_Err - } - return nil - }) -} - -var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/layout/menu_helper.go b/weed/admin/view/layout/menu_helper.go deleted file mode 100644 index fc8954423..000000000 --- a/weed/admin/view/layout/menu_helper.go +++ /dev/null @@ -1,47 +0,0 @@ -package layout - -import ( - "github.com/seaweedfs/seaweedfs/weed/admin/maintenance" - - // Import task packages to trigger their auto-registration - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" -) - -// MenuItemData represents a menu item -type MenuItemData struct { - Name string - URL string - Icon string - Description string -} - -// GetConfigurationMenuItems returns the dynamic configuration menu items -func GetConfigurationMenuItems() []*MenuItemData { - var menuItems []*MenuItemData - - // Add system configuration item - menuItems = append(menuItems, &MenuItemData{ - Name: "System", - URL: "/maintenance/config", - Icon: "fas fa-cogs", - Description: "System-level configuration", - }) - - // Get all registered task types and add them as submenu items - registeredTypes := maintenance.GetRegisteredMaintenanceTaskTypes() - - for _, taskType := range registeredTypes { - menuItem := &MenuItemData{ - Name: maintenance.GetTaskDisplayName(taskType), - URL: "/maintenance/config/" + string(taskType), - Icon: maintenance.GetTaskIcon(taskType), - Description: maintenance.GetTaskDescription(taskType), - } - - menuItems = append(menuItems, menuItem) - } - - return menuItems -} diff --git a/weed/cluster/cluster.go b/weed/cluster/cluster.go index 52d32f697..ad6e6b879 100644 --- a/weed/cluster/cluster.go +++ b/weed/cluster/cluster.go @@ -1,8 +1,9 @@ package cluster import ( - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "math" "sync" "time" ) @@ -14,138 +15,303 @@ const ( BrokerType = "broker" ) -type FilerGroupName string -type DataCenter string -type Rack string +type FilerGroup string +type Filers struct { + filers map[pb.ServerAddress]*ClusterNode + leaders *Leaders +} +type Leaders struct { + leaders [3]pb.ServerAddress +} type ClusterNode struct { - Address pb.ServerAddress - Version string - counter int - CreatedTs time.Time - DataCenter DataCenter - Rack Rack + Address pb.ServerAddress + Version string + counter int + CreatedTs time.Time } -type ClusterNodeGroups struct { - groupMembers map[FilerGroupName]*GroupMembers - sync.RWMutex -} type Cluster struct { - filerGroups *ClusterNodeGroups - brokerGroups *ClusterNodeGroups -} - -func newClusterNodeGroups() *ClusterNodeGroups { - return &ClusterNodeGroups{ - groupMembers: map[FilerGroupName]*GroupMembers{}, - } -} -func (g *ClusterNodeGroups) getGroupMembers(filerGroup FilerGroupName, createIfNotFound bool) *GroupMembers { - members, found := g.groupMembers[filerGroup] - if !found && createIfNotFound { - members = newGroupMembers() - g.groupMembers[filerGroup] = members - } - return members -} - -func (g *ClusterNodeGroups) AddClusterNode(filerGroup FilerGroupName, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse { - g.Lock() - defer g.Unlock() - m := g.getGroupMembers(filerGroup, true) - if t := m.addMember(dataCenter, rack, address, version); t != nil { - return buildClusterNodeUpdateMessage(true, filerGroup, nodeType, address) - } - return nil -} -func (g *ClusterNodeGroups) RemoveClusterNode(filerGroup FilerGroupName, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse { - g.Lock() - defer g.Unlock() - m := g.getGroupMembers(filerGroup, false) - if m == nil { - return nil - } - if m.removeMember(address) { - return buildClusterNodeUpdateMessage(false, filerGroup, nodeType, address) - } - return nil -} -func (g *ClusterNodeGroups) ListClusterNode(filerGroup FilerGroupName) (nodes []*ClusterNode) { - g.Lock() - defer g.Unlock() - m := g.getGroupMembers(filerGroup, false) - if m == nil { - return nil - } - for _, node := range m.members { - nodes = append(nodes, node) - } - return + filerGroup2filers map[FilerGroup]*Filers + filersLock sync.RWMutex + brokers map[pb.ServerAddress]*ClusterNode + brokersLock sync.RWMutex } func NewCluster() *Cluster { return &Cluster{ - filerGroups: newClusterNodeGroups(), - brokerGroups: newClusterNodeGroups(), + filerGroup2filers: make(map[FilerGroup]*Filers), + brokers: make(map[pb.ServerAddress]*ClusterNode), } } -func (cluster *Cluster) getGroupMembers(filerGroup FilerGroupName, nodeType string, createIfNotFound bool) *GroupMembers { - switch nodeType { - case FilerType: - return cluster.filerGroups.getGroupMembers(filerGroup, createIfNotFound) - case BrokerType: - return cluster.brokerGroups.getGroupMembers(filerGroup, createIfNotFound) +func (cluster *Cluster) getFilers(filerGroup FilerGroup, createIfNotFound bool) *Filers { + filers, found := cluster.filerGroup2filers[filerGroup] + if !found && createIfNotFound { + filers = &Filers{ + filers: make(map[pb.ServerAddress]*ClusterNode), + leaders: &Leaders{}, + } + cluster.filerGroup2filers[filerGroup] = filers } - return nil + return filers } -func (cluster *Cluster) AddClusterNode(ns, nodeType string, dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse { - filerGroup := FilerGroupName(ns) +func (cluster *Cluster) AddClusterNode(ns, nodeType string, address pb.ServerAddress, version string) []*master_pb.KeepConnectedResponse { + filerGroup := FilerGroup(ns) switch nodeType { case FilerType: - return cluster.filerGroups.AddClusterNode(filerGroup, nodeType, dataCenter, rack, address, version) + cluster.filersLock.Lock() + defer cluster.filersLock.Unlock() + filers := cluster.getFilers(filerGroup, true) + if existingNode, found := filers.filers[address]; found { + existingNode.counter++ + return nil + } + filers.filers[address] = &ClusterNode{ + Address: address, + Version: version, + counter: 1, + CreatedTs: time.Now(), + } + return cluster.ensureFilerLeaders(filers, true, filerGroup, nodeType, address) case BrokerType: - return cluster.brokerGroups.AddClusterNode(filerGroup, nodeType, dataCenter, rack, address, version) + cluster.brokersLock.Lock() + defer cluster.brokersLock.Unlock() + if existingNode, found := cluster.brokers[address]; found { + existingNode.counter++ + return nil + } + cluster.brokers[address] = &ClusterNode{ + Address: address, + Version: version, + counter: 1, + CreatedTs: time.Now(), + } + return []*master_pb.KeepConnectedResponse{ + { + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + NodeType: nodeType, + Address: string(address), + IsAdd: true, + }, + }, + } case MasterType: - return buildClusterNodeUpdateMessage(true, filerGroup, nodeType, address) + return []*master_pb.KeepConnectedResponse{ + { + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + NodeType: nodeType, + Address: string(address), + IsAdd: true, + }, + }, + } } return nil } func (cluster *Cluster) RemoveClusterNode(ns string, nodeType string, address pb.ServerAddress) []*master_pb.KeepConnectedResponse { - filerGroup := FilerGroupName(ns) + filerGroup := FilerGroup(ns) switch nodeType { case FilerType: - return cluster.filerGroups.RemoveClusterNode(filerGroup, nodeType, address) + cluster.filersLock.Lock() + defer cluster.filersLock.Unlock() + filers := cluster.getFilers(filerGroup, false) + if filers == nil { + return nil + } + if existingNode, found := filers.filers[address]; !found { + return nil + } else { + existingNode.counter-- + if existingNode.counter <= 0 { + delete(filers.filers, address) + return cluster.ensureFilerLeaders(filers, false, filerGroup, nodeType, address) + } + } case BrokerType: - return cluster.brokerGroups.RemoveClusterNode(filerGroup, nodeType, address) + cluster.brokersLock.Lock() + defer cluster.brokersLock.Unlock() + if existingNode, found := cluster.brokers[address]; !found { + return nil + } else { + existingNode.counter-- + if existingNode.counter <= 0 { + delete(cluster.brokers, address) + return []*master_pb.KeepConnectedResponse{ + { + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + NodeType: nodeType, + Address: string(address), + IsAdd: false, + }, + }, + } + } + } case MasterType: - return buildClusterNodeUpdateMessage(false, filerGroup, nodeType, address) + return []*master_pb.KeepConnectedResponse{ + { + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + NodeType: nodeType, + Address: string(address), + IsAdd: false, + }, + }, + } } return nil } -func (cluster *Cluster) ListClusterNode(filerGroup FilerGroupName, nodeType string) (nodes []*ClusterNode) { +func (cluster *Cluster) ListClusterNode(filerGroup FilerGroup, nodeType string) (nodes []*ClusterNode) { switch nodeType { case FilerType: - return cluster.filerGroups.ListClusterNode(filerGroup) + cluster.filersLock.RLock() + defer cluster.filersLock.RUnlock() + filers := cluster.getFilers(filerGroup, false) + if filers == nil { + return + } + for _, node := range filers.filers { + nodes = append(nodes, node) + } case BrokerType: - return cluster.brokerGroups.ListClusterNode(filerGroup) + cluster.brokersLock.RLock() + defer cluster.brokersLock.RUnlock() + for _, node := range cluster.brokers { + nodes = append(nodes, node) + } case MasterType: } return } -func buildClusterNodeUpdateMessage(isAdd bool, filerGroup FilerGroupName, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) { - result = append(result, &master_pb.KeepConnectedResponse{ - ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ - FilerGroup: string(filerGroup), - NodeType: nodeType, - Address: string(address), - IsAdd: isAdd, - }, - }) +func (cluster *Cluster) IsOneLeader(filerGroup FilerGroup, address pb.ServerAddress) bool { + filers := cluster.getFilers(filerGroup, false) + if filers == nil { + return false + } + return filers.leaders.isOneLeader(address) +} + +func (cluster *Cluster) ensureFilerLeaders(filers *Filers, isAdd bool, filerGroup FilerGroup, nodeType string, address pb.ServerAddress) (result []*master_pb.KeepConnectedResponse) { + if isAdd { + if filers.leaders.addLeaderIfVacant(address) { + // has added the address as one leader + result = append(result, &master_pb.KeepConnectedResponse{ + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + FilerGroup: string(filerGroup), + NodeType: nodeType, + Address: string(address), + IsLeader: true, + IsAdd: true, + }, + }) + } else { + result = append(result, &master_pb.KeepConnectedResponse{ + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + FilerGroup: string(filerGroup), + NodeType: nodeType, + Address: string(address), + IsLeader: false, + IsAdd: true, + }, + }) + } + } else { + if filers.leaders.removeLeaderIfExists(address) { + + result = append(result, &master_pb.KeepConnectedResponse{ + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + FilerGroup: string(filerGroup), + NodeType: nodeType, + Address: string(address), + IsLeader: true, + IsAdd: false, + }, + }) + + // pick the freshest one, since it is less likely to go away + var shortestDuration int64 = math.MaxInt64 + now := time.Now() + var candidateAddress pb.ServerAddress + for _, node := range filers.filers { + if filers.leaders.isOneLeader(node.Address) { + continue + } + duration := now.Sub(node.CreatedTs).Nanoseconds() + if duration < shortestDuration { + shortestDuration = duration + candidateAddress = node.Address + } + } + if candidateAddress != "" { + filers.leaders.addLeaderIfVacant(candidateAddress) + // added a new leader + result = append(result, &master_pb.KeepConnectedResponse{ + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + NodeType: nodeType, + Address: string(candidateAddress), + IsLeader: true, + IsAdd: true, + }, + }) + } + } else { + result = append(result, &master_pb.KeepConnectedResponse{ + ClusterNodeUpdate: &master_pb.ClusterNodeUpdate{ + FilerGroup: string(filerGroup), + NodeType: nodeType, + Address: string(address), + IsLeader: false, + IsAdd: false, + }, + }) + } + } + return +} + +func (leaders *Leaders) addLeaderIfVacant(address pb.ServerAddress) (hasChanged bool) { + if leaders.isOneLeader(address) { + return + } + for i := 0; i < len(leaders.leaders); i++ { + if leaders.leaders[i] == "" { + leaders.leaders[i] = address + hasChanged = true + return + } + } + return +} +func (leaders *Leaders) removeLeaderIfExists(address pb.ServerAddress) (hasChanged bool) { + if !leaders.isOneLeader(address) { + return + } + for i := 0; i < len(leaders.leaders); i++ { + if leaders.leaders[i] == address { + leaders.leaders[i] = "" + hasChanged = true + return + } + } + return +} +func (leaders *Leaders) isOneLeader(address pb.ServerAddress) bool { + for i := 0; i < len(leaders.leaders); i++ { + if leaders.leaders[i] == address { + return true + } + } + return false +} +func (leaders *Leaders) GetLeaders() (addresses []pb.ServerAddress) { + for i := 0; i < len(leaders.leaders); i++ { + if leaders.leaders[i] != "" { + addresses = append(addresses, leaders.leaders[i]) + } + } return } diff --git a/weed/cluster/cluster_test.go b/weed/cluster/cluster_test.go index db8384d88..1187642de 100644 --- a/weed/cluster/cluster_test.go +++ b/weed/cluster/cluster_test.go @@ -1,12 +1,53 @@ package cluster import ( - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/stretchr/testify/assert" "strconv" "sync" "testing" ) +func TestClusterAddRemoveNodes(t *testing.T) { + c := NewCluster() + + c.AddClusterNode("", "filer", pb.ServerAddress("111:1"), "23.45") + c.AddClusterNode("", "filer", pb.ServerAddress("111:2"), "23.45") + assert.Equal(t, []pb.ServerAddress{ + pb.ServerAddress("111:1"), + pb.ServerAddress("111:2"), + }, c.getFilers("", false).leaders.GetLeaders()) + + c.AddClusterNode("", "filer", pb.ServerAddress("111:3"), "23.45") + c.AddClusterNode("", "filer", pb.ServerAddress("111:4"), "23.45") + assert.Equal(t, []pb.ServerAddress{ + pb.ServerAddress("111:1"), + pb.ServerAddress("111:2"), + pb.ServerAddress("111:3"), + }, c.getFilers("", false).leaders.GetLeaders()) + + c.AddClusterNode("", "filer", pb.ServerAddress("111:5"), "23.45") + c.AddClusterNode("", "filer", pb.ServerAddress("111:6"), "23.45") + c.RemoveClusterNode("", "filer", pb.ServerAddress("111:4")) + assert.Equal(t, []pb.ServerAddress{ + pb.ServerAddress("111:1"), + pb.ServerAddress("111:2"), + pb.ServerAddress("111:3"), + }, c.getFilers("", false).leaders.GetLeaders()) + + // remove oldest + c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1")) + assert.Equal(t, []pb.ServerAddress{ + pb.ServerAddress("111:6"), + pb.ServerAddress("111:2"), + pb.ServerAddress("111:3"), + }, c.getFilers("", false).leaders.GetLeaders()) + + // remove oldest + c.RemoveClusterNode("", "filer", pb.ServerAddress("111:1")) + +} + func TestConcurrentAddRemoveNodes(t *testing.T) { c := NewCluster() var wg sync.WaitGroup @@ -15,7 +56,7 @@ func TestConcurrentAddRemoveNodes(t *testing.T) { go func(i int) { defer wg.Done() address := strconv.Itoa(i) - c.AddClusterNode("", "filer", "", "", pb.ServerAddress(address), "23.45") + c.AddClusterNode("", "filer", pb.ServerAddress(address), "23.45") }(i) } wg.Wait() diff --git a/weed/cluster/group_members.go b/weed/cluster/group_members.go deleted file mode 100644 index 79bd78790..000000000 --- a/weed/cluster/group_members.go +++ /dev/null @@ -1,52 +0,0 @@ -package cluster - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb" - "time" -) - -type GroupMembers struct { - members map[pb.ServerAddress]*ClusterNode -} - -func newGroupMembers() *GroupMembers { - return &GroupMembers{ - members: make(map[pb.ServerAddress]*ClusterNode), - } -} - -func (m *GroupMembers) addMember(dataCenter DataCenter, rack Rack, address pb.ServerAddress, version string) *ClusterNode { - if existingNode, found := m.members[address]; found { - existingNode.counter++ - return nil - } - t := &ClusterNode{ - Address: address, - Version: version, - counter: 1, - CreatedTs: time.Now(), - DataCenter: dataCenter, - Rack: rack, - } - m.members[address] = t - return t -} -func (m *GroupMembers) removeMember(address pb.ServerAddress) bool { - if existingNode, found := m.members[address]; !found { - return false - } else { - existingNode.counter-- - if existingNode.counter <= 0 { - delete(m.members, address) - return true - } - } - return false -} - -func (m *GroupMembers) GetMembers() (addresses []pb.ServerAddress) { - for k := range m.members { - addresses = append(addresses, k) - } - return -} diff --git a/weed/cluster/lock_client.go b/weed/cluster/lock_client.go deleted file mode 100644 index 63d93ed54..000000000 --- a/weed/cluster/lock_client.go +++ /dev/null @@ -1,230 +0,0 @@ -package cluster - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" -) - -type LockClient struct { - grpcDialOption grpc.DialOption - maxLockDuration time.Duration - sleepDuration time.Duration - seedFiler pb.ServerAddress -} - -func NewLockClient(grpcDialOption grpc.DialOption, seedFiler pb.ServerAddress) *LockClient { - return &LockClient{ - grpcDialOption: grpcDialOption, - maxLockDuration: 5 * time.Second, - sleepDuration: 2473 * time.Millisecond, - seedFiler: seedFiler, - } -} - -type LiveLock struct { - key string - renewToken string - expireAtNs int64 - hostFiler pb.ServerAddress - cancelCh chan struct{} - grpcDialOption grpc.DialOption - isLocked bool - self string - lc *LockClient - owner string -} - -// NewShortLivedLock creates a lock with a 5-second duration -func (lc *LockClient) NewShortLivedLock(key string, owner string) (lock *LiveLock) { - lock = &LiveLock{ - key: key, - hostFiler: lc.seedFiler, - cancelCh: make(chan struct{}), - expireAtNs: time.Now().Add(5 * time.Second).UnixNano(), - grpcDialOption: lc.grpcDialOption, - self: owner, - lc: lc, - } - lock.retryUntilLocked(5 * time.Second) - return -} - -// StartLongLivedLock starts a goroutine to lock the key and returns immediately. -func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerChange func(newLockOwner string)) (lock *LiveLock) { - lock = &LiveLock{ - key: key, - hostFiler: lc.seedFiler, - cancelCh: make(chan struct{}), - expireAtNs: time.Now().Add(lock_manager.LiveLockTTL).UnixNano(), - grpcDialOption: lc.grpcDialOption, - self: owner, - lc: lc, - } - go func() { - isLocked := false - lockOwner := "" - for { - // Check for cancellation BEFORE attempting to lock to avoid race condition - // where Stop() is called after sleep but before lock attempt - select { - case <-lock.cancelCh: - return - default: - } - - if isLocked { - if err := lock.AttemptToLock(lock_manager.LiveLockTTL); err != nil { - glog.V(0).Infof("Lost lock %s: %v", key, err) - isLocked = false - } - } else { - if err := lock.AttemptToLock(lock_manager.LiveLockTTL); err == nil { - isLocked = true - } - } - if lockOwner != lock.LockOwner() && lock.LockOwner() != "" { - glog.V(0).Infof("Lock owner changed from %s to %s", lockOwner, lock.LockOwner()) - onLockOwnerChange(lock.LockOwner()) - lockOwner = lock.LockOwner() - } - select { - case <-lock.cancelCh: - return - default: - time.Sleep(lock_manager.RenewInterval) - } - } - }() - return -} - -func (lock *LiveLock) retryUntilLocked(lockDuration time.Duration) { - util.RetryUntil("create lock:"+lock.key, func() error { - return lock.AttemptToLock(lockDuration) - }, func(err error) (shouldContinue bool) { - if err != nil { - glog.Warningf("create lock %s: %s", lock.key, err) - } - return lock.renewToken == "" - }) -} - -func (lock *LiveLock) AttemptToLock(lockDuration time.Duration) error { - glog.V(4).Infof("LOCK: AttemptToLock key=%s owner=%s", lock.key, lock.self) - errorMessage, err := lock.doLock(lockDuration) - if err != nil { - glog.V(1).Infof("LOCK: doLock failed for key=%s: %v", lock.key, err) - time.Sleep(time.Second) - return err - } - if errorMessage != "" { - glog.V(1).Infof("LOCK: doLock returned error message for key=%s: %s", lock.key, errorMessage) - time.Sleep(time.Second) - return fmt.Errorf("%v", errorMessage) - } - if !lock.isLocked { - // Only log when transitioning from unlocked to locked - glog.V(1).Infof("LOCK: Successfully acquired key=%s owner=%s", lock.key, lock.self) - } - lock.isLocked = true - return nil -} - -func (lock *LiveLock) StopShortLivedLock() error { - if !lock.isLocked { - return nil - } - defer func() { - lock.isLocked = false - }() - return pb.WithFilerClient(false, 0, lock.hostFiler, lock.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - _, err := client.DistributedUnlock(context.Background(), &filer_pb.UnlockRequest{ - Name: lock.key, - RenewToken: lock.renewToken, - }) - return err - }) -} - -// Stop stops a long-lived lock by closing the cancel channel and releasing the lock -func (lock *LiveLock) Stop() error { - // Close the cancel channel to stop the long-lived lock goroutine - select { - case <-lock.cancelCh: - // Already closed - default: - close(lock.cancelCh) - } - - // Wait a brief moment for the goroutine to see the closed channel - // This reduces the race condition window where the goroutine might - // attempt one more lock operation after we've released the lock - time.Sleep(10 * time.Millisecond) - - // Also release the lock if held - // Note: We intentionally don't clear renewToken here because - // StopShortLivedLock needs it to properly unlock - return lock.StopShortLivedLock() -} - -func (lock *LiveLock) doLock(lockDuration time.Duration) (errorMessage string, err error) { - glog.V(4).Infof("LOCK: doLock calling DistributedLock - key=%s filer=%s owner=%s", - lock.key, lock.hostFiler, lock.self) - - previousHostFiler := lock.hostFiler - previousOwner := lock.owner - - err = pb.WithFilerClient(false, 0, lock.hostFiler, lock.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.DistributedLock(context.Background(), &filer_pb.LockRequest{ - Name: lock.key, - SecondsToLock: int64(lockDuration.Seconds()), - RenewToken: lock.renewToken, - IsMoved: false, - Owner: lock.self, - }) - glog.V(4).Infof("LOCK: DistributedLock response - key=%s err=%v", lock.key, err) - if err == nil && resp != nil { - lock.renewToken = resp.RenewToken - glog.V(4).Infof("LOCK: Got renewToken for key=%s", lock.key) - } else { - //this can be retried. Need to remember the last valid renewToken - lock.renewToken = "" - glog.V(1).Infof("LOCK: Cleared renewToken for key=%s (err=%v)", lock.key, err) - } - if resp != nil { - errorMessage = resp.Error - if resp.LockHostMovedTo != "" && resp.LockHostMovedTo != string(previousHostFiler) { - // Only log if the host actually changed - glog.V(1).Infof("LOCK: Host changed from %s to %s for key=%s", previousHostFiler, resp.LockHostMovedTo, lock.key) - lock.hostFiler = pb.ServerAddress(resp.LockHostMovedTo) - lock.lc.seedFiler = lock.hostFiler - } else if resp.LockHostMovedTo != "" { - lock.hostFiler = pb.ServerAddress(resp.LockHostMovedTo) - } - if resp.LockOwner != "" && resp.LockOwner != previousOwner { - // Only log if the owner actually changed - glog.V(1).Infof("LOCK: Owner changed from %s to %s for key=%s", previousOwner, resp.LockOwner, lock.key) - lock.owner = resp.LockOwner - } else if resp.LockOwner != "" { - lock.owner = resp.LockOwner - } else if previousOwner != "" { - glog.V(1).Infof("LOCK: Owner cleared for key=%s", lock.key) - lock.owner = "" - } - } - return err - }) - return -} - -func (lock *LiveLock) LockOwner() string { - return lock.owner -} diff --git a/weed/cluster/lock_manager/distributed_lock_manager.go b/weed/cluster/lock_manager/distributed_lock_manager.go deleted file mode 100644 index 7de78410f..000000000 --- a/weed/cluster/lock_manager/distributed_lock_manager.go +++ /dev/null @@ -1,103 +0,0 @@ -package lock_manager - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "time" -) - -const RenewInterval = time.Second * 3 -const LiveLockTTL = time.Second * 7 - -var NoLockServerError = fmt.Errorf("no lock server found") - -type DistributedLockManager struct { - lockManager *LockManager - LockRing *LockRing - Host pb.ServerAddress -} - -func NewDistributedLockManager(host pb.ServerAddress) *DistributedLockManager { - return &DistributedLockManager{ - lockManager: NewLockManager(), - LockRing: NewLockRing(time.Second * 5), - Host: host, - } -} - -func (dlm *DistributedLockManager) LockWithTimeout(key string, expiredAtNs int64, token string, owner string) (lockOwner string, renewToken string, movedTo pb.ServerAddress, err error) { - movedTo, err = dlm.findLockOwningFiler(key) - if err != nil { - return - } - if movedTo != dlm.Host { - return - } - lockOwner, renewToken, err = dlm.lockManager.Lock(key, expiredAtNs, token, owner) - return -} - -func (dlm *DistributedLockManager) findLockOwningFiler(key string) (movedTo pb.ServerAddress, err error) { - servers := dlm.LockRing.GetSnapshot() - if servers == nil { - err = NoLockServerError - return - } - - movedTo = hashKeyToServer(key, servers) - return -} - -func (dlm *DistributedLockManager) FindLockOwner(key string) (owner string, movedTo pb.ServerAddress, err error) { - movedTo, err = dlm.findLockOwningFiler(key) - if err != nil { - return - } - if movedTo != dlm.Host { - servers := dlm.LockRing.GetSnapshot() - glog.V(0).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers) - return - } - owner, err = dlm.lockManager.GetLockOwner(key) - return -} - -func (dlm *DistributedLockManager) Unlock(key string, token string) (movedTo pb.ServerAddress, err error) { - servers := dlm.LockRing.GetSnapshot() - if servers == nil { - err = NoLockServerError - return - } - - server := hashKeyToServer(key, servers) - if server != dlm.Host { - movedTo = server - return - } - _, err = dlm.lockManager.Unlock(key, token) - return -} - -// InsertLock is used to insert a lock to a server unconditionally -// It is used when a server is down and the lock is moved to another server -func (dlm *DistributedLockManager) InsertLock(key string, expiredAtNs int64, token string, owner string) { - dlm.lockManager.InsertLock(key, expiredAtNs, token, owner) -} -func (dlm *DistributedLockManager) SelectNotOwnedLocks(servers []pb.ServerAddress) (locks []*Lock) { - return dlm.lockManager.SelectLocks(func(key string) bool { - server := hashKeyToServer(key, servers) - return server != dlm.Host - }) -} -func (dlm *DistributedLockManager) CalculateTargetServer(key string, servers []pb.ServerAddress) pb.ServerAddress { - return hashKeyToServer(key, servers) -} - -func (dlm *DistributedLockManager) IsLocal(key string) bool { - servers := dlm.LockRing.GetSnapshot() - if len(servers) <= 1 { - return true - } - return hashKeyToServer(key, servers) == dlm.Host -} diff --git a/weed/cluster/lock_manager/lock_manager.go b/weed/cluster/lock_manager/lock_manager.go deleted file mode 100644 index ebc9dfeaa..000000000 --- a/weed/cluster/lock_manager/lock_manager.go +++ /dev/null @@ -1,182 +0,0 @@ -package lock_manager - -import ( - "fmt" - "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" - "sync" - "time" -) - -var LockErrorNonEmptyTokenOnNewLock = fmt.Errorf("lock: non-empty token on a new lock") -var LockErrorNonEmptyTokenOnExpiredLock = fmt.Errorf("lock: non-empty token on an expired lock") -var LockErrorTokenMismatch = fmt.Errorf("lock: token mismatch") -var UnlockErrorTokenMismatch = fmt.Errorf("unlock: token mismatch") -var LockNotFound = fmt.Errorf("lock not found") - -// LockManager local lock manager, used by distributed lock manager -type LockManager struct { - locks map[string]*Lock - accessLock sync.RWMutex -} -type Lock struct { - Token string - ExpiredAtNs int64 - Key string // only used for moving locks - Owner string -} - -func NewLockManager() *LockManager { - t := &LockManager{ - locks: make(map[string]*Lock), - } - go t.CleanUp() - return t -} - -func (lm *LockManager) Lock(path string, expiredAtNs int64, token string, owner string) (lockOwner, renewToken string, err error) { - lm.accessLock.Lock() - defer lm.accessLock.Unlock() - - glog.V(4).Infof("lock %s %v %v %v", path, time.Unix(0, expiredAtNs), token, owner) - - if oldValue, found := lm.locks[path]; found { - if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < time.Now().UnixNano() { - // lock is expired, set to a new lock - if token != "" { - glog.V(4).Infof("lock expired key %s non-empty token %v owner %v ts %s", path, token, owner, time.Unix(0, oldValue.ExpiredAtNs)) - err = LockErrorNonEmptyTokenOnExpiredLock - return - } else { - // new lock - renewToken = uuid.New().String() - glog.V(4).Infof("key %s new token %v owner %v", path, renewToken, owner) - lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} - return - } - } - // not expired - lockOwner = oldValue.Owner - if oldValue.Token == token { - // token matches, renew the lock - renewToken = uuid.New().String() - glog.V(4).Infof("key %s old token %v owner %v => %v owner %v", path, oldValue.Token, oldValue.Owner, renewToken, owner) - lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} - return - } else { - if token == "" { - // new lock - glog.V(4).Infof("key %s locked by %v", path, oldValue.Owner) - err = fmt.Errorf("lock already owned by %v", oldValue.Owner) - return - } - glog.V(4).Infof("key %s expected token %v owner %v received %v from %v", path, oldValue.Token, oldValue.Owner, token, owner) - err = fmt.Errorf("lock: token mismatch") - return - } - } else { - glog.V(4).Infof("key %s no lock owner %v", path, owner) - if token == "" { - // new lock - glog.V(4).Infof("key %s new token %v owner %v", path, token, owner) - renewToken = uuid.New().String() - lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} - return - } else { - glog.V(4).Infof("key %s non-empty token %v owner %v", path, token, owner) - err = LockErrorNonEmptyTokenOnNewLock - return - } - } -} - -func (lm *LockManager) Unlock(path string, token string) (isUnlocked bool, err error) { - lm.accessLock.Lock() - defer lm.accessLock.Unlock() - - if oldValue, found := lm.locks[path]; found { - now := time.Now() - if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < now.UnixNano() { - // lock is expired, delete it - isUnlocked = true - glog.V(4).Infof("key %s expired at %v", path, time.Unix(0, oldValue.ExpiredAtNs)) - delete(lm.locks, path) - return - } - if oldValue.Token == token { - isUnlocked = true - glog.V(4).Infof("key %s unlocked with %v", path, token) - delete(lm.locks, path) - return - } else { - isUnlocked = false - err = UnlockErrorTokenMismatch - return - } - } - err = LockNotFound - return -} - -func (lm *LockManager) CleanUp() { - - for { - time.Sleep(1 * time.Minute) - now := time.Now().UnixNano() - - lm.accessLock.Lock() - for key, value := range lm.locks { - if value == nil { - continue - } - if now > value.ExpiredAtNs { - glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, value.ExpiredAtNs)) - delete(lm.locks, key) - } - } - lm.accessLock.Unlock() - } -} - -// SelectLocks takes out locks by key -// if keyFn return true, the lock will be taken out -func (lm *LockManager) SelectLocks(selectFn func(key string) bool) (locks []*Lock) { - lm.accessLock.RLock() - defer lm.accessLock.RUnlock() - - now := time.Now().UnixNano() - - for key, lock := range lm.locks { - if now > lock.ExpiredAtNs { - glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, lock.ExpiredAtNs)) - delete(lm.locks, key) - continue - } - if selectFn(key) { - glog.V(4).Infof("key %s selected and deleted", key) - delete(lm.locks, key) - lock.Key = key - locks = append(locks, lock) - } - } - return -} - -// InsertLock inserts a lock unconditionally -func (lm *LockManager) InsertLock(path string, expiredAtNs int64, token string, owner string) { - lm.accessLock.Lock() - defer lm.accessLock.Unlock() - - lm.locks[path] = &Lock{Token: token, ExpiredAtNs: expiredAtNs, Owner: owner} -} - -func (lm *LockManager) GetLockOwner(key string) (owner string, err error) { - lm.accessLock.RLock() - defer lm.accessLock.RUnlock() - - if lock, found := lm.locks[key]; found { - return lock.Owner, nil - } - err = LockNotFound - return -} diff --git a/weed/cluster/lock_manager/lock_ring.go b/weed/cluster/lock_manager/lock_ring.go deleted file mode 100644 index 398f26153..000000000 --- a/weed/cluster/lock_manager/lock_ring.go +++ /dev/null @@ -1,203 +0,0 @@ -package lock_manager - -import ( - "sort" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -type LockRingSnapshot struct { - servers []pb.ServerAddress - ts time.Time -} - -type LockRing struct { - sync.RWMutex - snapshots []*LockRingSnapshot - candidateServers map[pb.ServerAddress]struct{} - lastUpdateTime time.Time - lastCompactTime time.Time - snapshotInterval time.Duration - onTakeSnapshot func(snapshot []pb.ServerAddress) - cleanupWg sync.WaitGroup -} - -func NewLockRing(snapshotInterval time.Duration) *LockRing { - return &LockRing{ - snapshotInterval: snapshotInterval, - candidateServers: make(map[pb.ServerAddress]struct{}), - } -} - -func (r *LockRing) SetTakeSnapshotCallback(onTakeSnapshot func(snapshot []pb.ServerAddress)) { - r.Lock() - defer r.Unlock() - r.onTakeSnapshot = onTakeSnapshot -} - -// AddServer adds a server to the ring -// if the previous snapshot passed the snapshot interval, create a new snapshot -func (r *LockRing) AddServer(server pb.ServerAddress) { - glog.V(0).Infof("add server %v", server) - r.Lock() - - if _, found := r.candidateServers[server]; found { - glog.V(0).Infof("add server: already exists %v", server) - r.Unlock() - return - } - r.lastUpdateTime = time.Now() - r.candidateServers[server] = struct{}{} - r.Unlock() - - r.takeSnapshotWithDelayedCompaction() -} - -func (r *LockRing) RemoveServer(server pb.ServerAddress) { - glog.V(0).Infof("remove server %v", server) - - r.Lock() - - if _, found := r.candidateServers[server]; !found { - r.Unlock() - return - } - r.lastUpdateTime = time.Now() - delete(r.candidateServers, server) - r.Unlock() - - r.takeSnapshotWithDelayedCompaction() -} - -func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) { - - sort.Slice(servers, func(i, j int) bool { - return servers[i] < servers[j] - }) - - r.Lock() - r.lastUpdateTime = time.Now() - // init candidateServers - for _, server := range servers { - r.candidateServers[server] = struct{}{} - } - r.Unlock() - - r.addOneSnapshot(servers) - - r.cleanupWg.Add(1) - go func() { - defer r.cleanupWg.Done() - <-time.After(r.snapshotInterval) - r.compactSnapshots() - }() -} - -func (r *LockRing) takeSnapshotWithDelayedCompaction() { - r.doTakeSnapshot() - - r.cleanupWg.Add(1) - go func() { - defer r.cleanupWg.Done() - <-time.After(r.snapshotInterval) - r.compactSnapshots() - }() -} - -func (r *LockRing) doTakeSnapshot() { - servers := r.getSortedServers() - - r.addOneSnapshot(servers) -} - -func (r *LockRing) addOneSnapshot(servers []pb.ServerAddress) { - r.Lock() - defer r.Unlock() - - ts := time.Now() - t := &LockRingSnapshot{ - servers: servers, - ts: ts, - } - r.snapshots = append(r.snapshots, t) - for i := len(r.snapshots) - 2; i >= 0; i-- { - r.snapshots[i+1] = r.snapshots[i] - } - r.snapshots[0] = t - - if r.onTakeSnapshot != nil { - r.onTakeSnapshot(t.servers) - } -} - -func (r *LockRing) compactSnapshots() { - r.Lock() - defer r.Unlock() - - // Always attempt compaction when called, regardless of lastCompactTime - // This ensures proper cleanup even with multiple concurrent compaction requests - - ts := time.Now() - // remove old snapshots - recentSnapshotIndex := 1 - for ; recentSnapshotIndex < len(r.snapshots); recentSnapshotIndex++ { - if ts.Sub(r.snapshots[recentSnapshotIndex].ts) > r.snapshotInterval { - break - } - } - // keep the one that has been running for a while - if recentSnapshotIndex+1 <= len(r.snapshots) { - r.snapshots = r.snapshots[:recentSnapshotIndex+1] - } - r.lastCompactTime = ts -} - -func (r *LockRing) getSortedServers() []pb.ServerAddress { - sortedServers := make([]pb.ServerAddress, 0, len(r.candidateServers)) - for server := range r.candidateServers { - sortedServers = append(sortedServers, server) - } - sort.Slice(sortedServers, func(i, j int) bool { - return sortedServers[i] < sortedServers[j] - }) - return sortedServers -} - -func (r *LockRing) GetSnapshot() (servers []pb.ServerAddress) { - r.RLock() - defer r.RUnlock() - - if len(r.snapshots) == 0 { - return - } - return r.snapshots[0].servers -} - -// WaitForCleanup waits for all pending cleanup operations to complete -// This is useful for testing to ensure deterministic behavior -func (r *LockRing) WaitForCleanup() { - r.cleanupWg.Wait() -} - -// GetSnapshotCount safely returns the number of snapshots for testing -func (r *LockRing) GetSnapshotCount() int { - r.RLock() - defer r.RUnlock() - return len(r.snapshots) -} - -func hashKeyToServer(key string, servers []pb.ServerAddress) pb.ServerAddress { - if len(servers) == 0 { - return "" - } - x := util.HashStringToLong(key) - if x < 0 { - x = -x - } - x = x % int64(len(servers)) - return servers[x] -} diff --git a/weed/cluster/lock_manager/lock_ring_test.go b/weed/cluster/lock_manager/lock_ring_test.go deleted file mode 100644 index f82a5ffe4..000000000 --- a/weed/cluster/lock_manager/lock_ring_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package lock_manager - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/stretchr/testify/assert" -) - -func TestAddServer(t *testing.T) { - r := NewLockRing(100 * time.Millisecond) - - // Add servers - r.AddServer("localhost:8080") - r.AddServer("localhost:8081") - r.AddServer("localhost:8082") - r.AddServer("localhost:8083") - r.AddServer("localhost:8084") - - // Verify all servers are present - servers := r.GetSnapshot() - assert.Equal(t, 5, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8080")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8081")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8082")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8083")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8084")) - - // Remove servers - r.RemoveServer("localhost:8084") - r.RemoveServer("localhost:8082") - r.RemoveServer("localhost:8080") - - // Wait for all cleanup operations to complete - r.WaitForCleanup() - - // Verify only 2 servers remain (localhost:8081 and localhost:8083) - servers = r.GetSnapshot() - assert.Equal(t, 2, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8081")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8083")) - - // Verify cleanup has happened - wait for snapshot interval and check snapshots are compacted - time.Sleep(110 * time.Millisecond) - r.WaitForCleanup() - - // Verify snapshot history is cleaned up properly (should have at most 2 snapshots after compaction) - snapshotCount := r.GetSnapshotCount() - assert.LessOrEqual(t, snapshotCount, 2, "Snapshot history should be compacted") -} - -func TestLockRing(t *testing.T) { - r := NewLockRing(100 * time.Millisecond) - - // Test initial snapshot - r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081"}) - assert.Equal(t, 1, r.GetSnapshotCount()) - servers := r.GetSnapshot() - assert.Equal(t, 2, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8080")) - assert.Contains(t, servers, pb.ServerAddress("localhost:8081")) - - // Add another server - r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082"}) - assert.Equal(t, 2, r.GetSnapshotCount()) - servers = r.GetSnapshot() - assert.Equal(t, 3, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8082")) - - // Wait for cleanup interval and add another server - time.Sleep(110 * time.Millisecond) - r.WaitForCleanup() - r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083"}) - assert.LessOrEqual(t, r.GetSnapshotCount(), 3) - servers = r.GetSnapshot() - assert.Equal(t, 4, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8083")) - - // Wait for cleanup and verify compaction - time.Sleep(110 * time.Millisecond) - r.WaitForCleanup() - assert.LessOrEqual(t, r.GetSnapshotCount(), 2, "Snapshots should be compacted") - - // Add final server - r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083", "localhost:8084"}) - servers = r.GetSnapshot() - assert.Equal(t, 5, len(servers)) - assert.Contains(t, servers, pb.ServerAddress("localhost:8084")) - assert.LessOrEqual(t, r.GetSnapshotCount(), 3) -} diff --git a/weed/cluster/master_client.go b/weed/cluster/master_client.go deleted file mode 100644 index bab2360fe..000000000 --- a/weed/cluster/master_client.go +++ /dev/null @@ -1,34 +0,0 @@ -package cluster - -import ( - "context" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "google.golang.org/grpc" -) - -func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOption, filerGroup string, clientType string) (existingNodes []*master_pb.ClusterNodeUpdate) { - - if grpcErr := pb.WithMasterClient(false, master, grpcDialOption, false, func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: clientType, - FilerGroup: filerGroup, - }) - - glog.V(0).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType) - for _, node := range resp.ClusterNodes { - existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{ - NodeType: FilerType, - Address: node.Address, - IsAdd: true, - CreatedAtNs: node.CreatedAtNs, - }) - } - return err - }); grpcErr != nil { - glog.V(0).Infof("connect to %s: %v", master, grpcErr) - } - return -} diff --git a/weed/command/admin.go b/weed/command/admin.go deleted file mode 100644 index 8321aad80..000000000 --- a/weed/command/admin.go +++ /dev/null @@ -1,376 +0,0 @@ -package command - -import ( - "context" - "crypto/rand" - "fmt" - "log" - "net/http" - "os" - "os/signal" - "os/user" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/gin-contrib/sessions" - "github.com/gin-contrib/sessions/cookie" - "github.com/gin-gonic/gin" - "github.com/spf13/viper" - - "github.com/seaweedfs/seaweedfs/weed/admin" - "github.com/seaweedfs/seaweedfs/weed/admin/dash" - "github.com/seaweedfs/seaweedfs/weed/admin/handlers" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - a AdminOptions -) - -type AdminOptions struct { - port *int - grpcPort *int - masters *string - adminUser *string - adminPassword *string - dataDir *string -} - -func init() { - cmdAdmin.Run = runAdmin // break init cycle - a.port = cmdAdmin.Flag.Int("port", 23646, "admin server port") - a.grpcPort = cmdAdmin.Flag.Int("port.grpc", 0, "gRPC server port for worker connections (default: http port + 10000)") - a.masters = cmdAdmin.Flag.String("masters", "localhost:9333", "comma-separated master servers") - a.dataDir = cmdAdmin.Flag.String("dataDir", "", "directory to store admin configuration and data files") - - a.adminUser = cmdAdmin.Flag.String("adminUser", "admin", "admin interface username") - a.adminPassword = cmdAdmin.Flag.String("adminPassword", "", "admin interface password (if empty, auth is disabled)") -} - -var cmdAdmin = &Command{ - UsageLine: "admin -port=23646 -masters=localhost:9333 [-port.grpc=33646] [-dataDir=/path/to/data]", - Short: "start SeaweedFS web admin interface", - Long: `Start a web admin interface for SeaweedFS cluster management. - - The admin interface provides a modern web interface for: - - Cluster topology visualization and monitoring - - Volume management and operations - - File browser and management - - System metrics and performance monitoring - - Configuration management - - Maintenance operations - - The admin interface automatically discovers filers from the master servers. - A gRPC server for worker connections runs on the configured gRPC port (default: HTTP port + 10000). - - Example Usage: - weed admin -port=23646 -masters="master1:9333,master2:9333" - weed admin -port=23646 -masters="localhost:9333" -dataDir="/var/lib/seaweedfs-admin" - weed admin -port=23646 -port.grpc=33646 -masters="localhost:9333" -dataDir="~/seaweedfs-admin" - weed admin -port=9900 -port.grpc=19900 -masters="localhost:9333" - - Data Directory: - - If dataDir is specified, admin configuration and maintenance data is persisted - - The directory will be created if it doesn't exist - - Configuration files are stored in JSON format for easy editing - - Without dataDir, all configuration is kept in memory only - - Authentication: - - If adminPassword is not set, the admin interface runs without authentication - - If adminPassword is set, users must login with adminUser/adminPassword - - Sessions are secured with auto-generated session keys - - Security Configuration: - - The admin server reads TLS configuration from security.toml - - Configure [https.admin] section in security.toml for HTTPS support - - If https.admin.key is set, the server will start in TLS mode - - If https.admin.ca is set, mutual TLS authentication is enabled - - Set strong adminPassword for production deployments - - Configure firewall rules to restrict admin interface access - - security.toml Example: - [https.admin] - cert = "/etc/ssl/admin.crt" - key = "/etc/ssl/admin.key" - ca = "/etc/ssl/ca.crt" # optional, for mutual TLS - - Worker Communication: - - Workers connect via gRPC on HTTP port + 10000 - - Workers use [grpc.admin] configuration from security.toml - - TLS is automatically used if certificates are configured - - Workers fall back to insecure connections if TLS is unavailable - - Configuration File: - - The security.toml file is read from ".", "$HOME/.seaweedfs/", - "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order - - Generate example security.toml: weed scaffold -config=security - -`, -} - -func runAdmin(cmd *Command, args []string) bool { - // Load security configuration - util.LoadSecurityConfiguration() - - // Validate required parameters - if *a.masters == "" { - fmt.Println("Error: masters parameter is required") - fmt.Println("Usage: weed admin -masters=master1:9333,master2:9333") - return false - } - - // Validate that masters string can be parsed - masterAddresses := pb.ServerAddresses(*a.masters).ToAddresses() - if len(masterAddresses) == 0 { - fmt.Println("Error: no valid master addresses found") - fmt.Println("Usage: weed admin -masters=master1:9333,master2:9333") - return false - } - - // Set default gRPC port if not specified - if *a.grpcPort == 0 { - *a.grpcPort = *a.port + 10000 - } - - // Security warnings - if *a.adminPassword == "" { - fmt.Println("WARNING: Admin interface is running without authentication!") - fmt.Println(" Set -adminPassword for production use") - } - - fmt.Printf("Starting SeaweedFS Admin Interface on port %d\n", *a.port) - fmt.Printf("Worker gRPC server will run on port %d\n", *a.grpcPort) - fmt.Printf("Masters: %s\n", *a.masters) - fmt.Printf("Filers will be discovered automatically from masters\n") - if *a.dataDir != "" { - fmt.Printf("Data Directory: %s\n", *a.dataDir) - } else { - fmt.Printf("Data Directory: Not specified (configuration will be in-memory only)\n") - } - if *a.adminPassword != "" { - fmt.Printf("Authentication: Enabled (user: %s)\n", *a.adminUser) - } else { - fmt.Printf("Authentication: Disabled\n") - } - - // Set up graceful shutdown - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Handle interrupt signals - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - sig := <-sigChan - fmt.Printf("\nReceived signal %v, shutting down gracefully...\n", sig) - cancel() - }() - - // Start the admin server with all masters - err := startAdminServer(ctx, a) - if err != nil { - fmt.Printf("Admin server error: %v\n", err) - return false - } - - fmt.Println("Admin server stopped") - return true -} - -// startAdminServer starts the actual admin server -func startAdminServer(ctx context.Context, options AdminOptions) error { - // Set Gin mode - gin.SetMode(gin.ReleaseMode) - - // Create router - r := gin.New() - r.Use(gin.Logger(), gin.Recovery()) - - // Session store - always auto-generate session key - sessionKeyBytes := make([]byte, 32) - _, err := rand.Read(sessionKeyBytes) - if err != nil { - return fmt.Errorf("failed to generate session key: %w", err) - } - store := cookie.NewStore(sessionKeyBytes) - - // Configure session options to ensure cookies are properly saved - store.Options(sessions.Options{ - Path: "/", - MaxAge: 3600 * 24, // 24 hours - }) - - r.Use(sessions.Sessions("admin-session", store)) - - // Static files - serve from embedded filesystem - staticFS, err := admin.GetStaticFS() - if err != nil { - log.Printf("Warning: Failed to load embedded static files: %v", err) - } else { - r.StaticFS("/static", http.FS(staticFS)) - } - - // Create data directory if specified - var dataDir string - if *options.dataDir != "" { - // Expand tilde (~) to home directory - expandedDir, err := expandHomeDir(*options.dataDir) - if err != nil { - return fmt.Errorf("failed to expand dataDir path %s: %v", *options.dataDir, err) - } - dataDir = expandedDir - - // Show path expansion if it occurred - if dataDir != *options.dataDir { - fmt.Printf("Expanded dataDir: %s -> %s\n", *options.dataDir, dataDir) - } - - if err := os.MkdirAll(dataDir, 0755); err != nil { - return fmt.Errorf("failed to create data directory %s: %v", dataDir, err) - } - fmt.Printf("Data directory created/verified: %s\n", dataDir) - } - - // Create admin server - adminServer := dash.NewAdminServer(*options.masters, nil, dataDir) - - // Show discovered filers - filers := adminServer.GetAllFilers() - if len(filers) > 0 { - fmt.Printf("Discovered filers: %s\n", strings.Join(filers, ", ")) - } else { - fmt.Printf("No filers discovered from masters\n") - } - - // Start worker gRPC server for worker connections - err = adminServer.StartWorkerGrpcServer(*options.grpcPort) - if err != nil { - return fmt.Errorf("failed to start worker gRPC server: %w", err) - } - - // Set up cleanup for gRPC server - defer func() { - if stopErr := adminServer.StopWorkerGrpcServer(); stopErr != nil { - log.Printf("Error stopping worker gRPC server: %v", stopErr) - } - }() - - // Create handlers and setup routes - adminHandlers := handlers.NewAdminHandlers(adminServer) - adminHandlers.SetupRoutes(r, *options.adminPassword != "", *options.adminUser, *options.adminPassword) - - // Server configuration - addr := fmt.Sprintf(":%d", *options.port) - server := &http.Server{ - Addr: addr, - Handler: r, - } - - // Start server - go func() { - log.Printf("Starting SeaweedFS Admin Server on port %d", *options.port) - - // start http or https server with security.toml - var ( - clientCertFile, - certFile, - keyFile string - ) - useTLS := false - useMTLS := false - - if viper.GetString("https.admin.key") != "" { - useTLS = true - certFile = viper.GetString("https.admin.cert") - keyFile = viper.GetString("https.admin.key") - } - - if viper.GetString("https.admin.ca") != "" { - useMTLS = true - clientCertFile = viper.GetString("https.admin.ca") - } - - if useMTLS { - server.TLSConfig = security.LoadClientTLSHTTP(clientCertFile) - } - - if useTLS { - log.Printf("Starting SeaweedFS Admin Server with TLS on port %d", *options.port) - err = server.ListenAndServeTLS(certFile, keyFile) - } else { - err = server.ListenAndServe() - } - - if err != nil && err != http.ErrServerClosed { - log.Printf("Failed to start server: %v", err) - } - }() - - // Wait for context cancellation - <-ctx.Done() - - // Graceful shutdown - log.Println("Shutting down admin server...") - shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - if err := server.Shutdown(shutdownCtx); err != nil { - return fmt.Errorf("admin server forced to shutdown: %w", err) - } - - return nil -} - -// GetAdminOptions returns the admin command options for testing -func GetAdminOptions() *AdminOptions { - return &AdminOptions{} -} - -// expandHomeDir expands the tilde (~) in a path to the user's home directory -func expandHomeDir(path string) (string, error) { - if path == "" { - return path, nil - } - - if !strings.HasPrefix(path, "~") { - return path, nil - } - - // Get current user - currentUser, err := user.Current() - if err != nil { - return "", fmt.Errorf("failed to get current user: %w", err) - } - - // Handle different tilde patterns - if path == "~" { - return currentUser.HomeDir, nil - } - - if strings.HasPrefix(path, "~/") { - return filepath.Join(currentUser.HomeDir, path[2:]), nil - } - - // Handle ~username/ patterns - if strings.HasPrefix(path, "~") { - parts := strings.SplitN(path[1:], "/", 2) - username := parts[0] - - targetUser, err := user.Lookup(username) - if err != nil { - return "", fmt.Errorf("user %s not found: %v", username, err) - } - - if len(parts) == 1 { - return targetUser.HomeDir, nil - } - return filepath.Join(targetUser.HomeDir, parts[1]), nil - } - - return path, nil -} diff --git a/weed/command/autocomplete.go b/weed/command/autocomplete.go index f63c8df41..955ce4006 100644 --- a/weed/command/autocomplete.go +++ b/weed/command/autocomplete.go @@ -2,9 +2,9 @@ package command import ( "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "github.com/posener/complete" completeinstall "github.com/posener/complete/cmd/install" - flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" "runtime" ) diff --git a/weed/command/backup.go b/weed/command/backup.go index d5599372e..c43b0d351 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -1,18 +1,16 @@ package command import ( - "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/storage" ) var ( @@ -66,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { @@ -75,7 +73,7 @@ func runBackup(cmd *Command, args []string) bool { vid := needle.VolumeId(*s.volumeId) // find volume location, replication, ttl info - lookup, err := operation.LookupVolumeId(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String()) + lookup, err := operation.LookupVolumeId(func() pb.ServerAddress { return pb.ServerAddress(*s.master) }, grpcDialOption, vid.String()) if err != nil { fmt.Printf("Error looking up volume %d: %v\n", vid, err) return true @@ -115,10 +113,7 @@ func runBackup(cmd *Command, args []string) bool { return true } } - - ver := needle.Version(stats.Version) - - v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0) + v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true @@ -141,11 +136,9 @@ func runBackup(cmd *Command, args []string) bool { if datSize > stats.TailOffset { // remove the old data - if err := v.Destroy(false); err != nil { - fmt.Printf("Error destroying volume: %v\n", err) - } + v.Destroy() // recreate an empty volume - v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0) + v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index e0cb31437..9f18cc5b9 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -2,10 +2,8 @@ package command import ( "bufio" - "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/chrislusf/seaweedfs/weed/pb" "io" "math" "math/rand" @@ -18,12 +16,11 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) type BenchmarkOptions struct { @@ -44,6 +41,7 @@ type BenchmarkOptions struct { grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient fsync *bool + useTcp *bool } var ( @@ -70,6 +68,7 @@ func init() { b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write") + b.useTcp = cmdBenchmark.Flag.Bool("useTcp", false, "send data via tcp") sharedBytes = make([]byte, 1024) } @@ -113,10 +112,10 @@ var ( func runBenchmark(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - fmt.Printf("This is SeaweedFS version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() } @@ -130,10 +129,9 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", "", *pb.ServerAddresses(*b.masters).ToServiceDiscovery()) - ctx := context.Background() - go b.masterClient.KeepConnectedToMaster(ctx) - b.masterClient.WaitUntilConnected(ctx) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "", "client", "", "", pb.ServerAddresses(*b.masters).ToAddressMap()) + go b.masterClient.KeepConnectedToMaster() + b.masterClient.WaitUntilConnected() if *b.write { benchWrite() @@ -214,9 +212,9 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { } var jwtAuthorization security.EncodedJwt if isSecure { - jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(context.Background()), b.grpcDialOption, df.fp.Fid) + jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), b.grpcDialOption, df.fp.Fid) } - if e := util_http.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { + if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { s.completed++ } else { s.failed++ @@ -227,6 +225,8 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { random := rand.New(rand.NewSource(time.Now().UnixNano())) + volumeTcpClient := wdclient.NewVolumeTcpClient() + for id := range idChan { start := time.Now() fileSize := int64(*b.fileSize + random.Intn(64)) @@ -242,12 +242,20 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { Replication: *b.replication, DiskType: *b.diskType, } - if assignResult, err := operation.Assign(context.Background(), b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil { - fp.Server, fp.Fid, fp.Pref.Collection = assignResult.Url, assignResult.Fid, *b.collection + if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil { + fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { + if *b.useTcp { + if uploadByTcp(volumeTcpClient, fp) { + fileIdLineChan <- fp.Fid + s.completed++ + s.transferred += fileSize + } else { + s.failed++ + } + } else if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -289,7 +297,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { start := time.Now() var bytesRead int var err error - urls, err := b.masterClient.LookupFileId(context.Background(), fid) + urls, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") @@ -297,7 +305,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { } var bytes []byte for _, url := range urls { - bytes, _, err = util_http.Get(url) + bytes, _, err = util.Get(url) if err == nil { break } @@ -333,6 +341,17 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b } } +func uploadByTcp(volumeTcpClient *wdclient.VolumeTcpClient, fp *operation.FilePart) bool { + + err := volumeTcpClient.PutFileChunk(fp.Server, fp.Fid, uint32(fp.FileSize), fp.Reader) + if err != nil { + glog.Errorf("upload chunk err: %v", err) + return false + } + + return true +} + func readFileIds(fileName string, fileIdLineChan chan string) { file, err := os.Open(fileName) // For read access. if err != nil { diff --git a/weed/command/command.go b/weed/command/command.go index e4695a199..7635405dc 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -5,11 +5,10 @@ import ( "os" "strings" - flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" ) var Commands = []*Command{ - cmdAdmin, cmdAutocomplete, cmdUnautocomplete, cmdBackup, @@ -29,26 +28,20 @@ var Commands = []*Command{ cmdFilerSynchronize, cmdFix, cmdFuse, - cmdIam, cmdMaster, cmdMasterFollower, cmdMount, - cmdMqAgent, - cmdMqBroker, - cmdMqKafkaGateway, - cmdDB, cmdS3, + cmdIam, + cmdMsgBroker, cmdScaffold, cmdServer, cmdShell, - cmdSql, cmdUpdate, cmdUpload, cmdVersion, cmdVolume, cmdWebDav, - cmdSftp, - cmdWorker, } type Command struct { diff --git a/weed/command/compact.go b/weed/command/compact.go index 59e69bc74..6df28440a 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -1,10 +1,10 @@ package command import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -28,7 +28,7 @@ var ( compactVolumePath = cmdCompact.Flag.String("dir", ".", "data directory to store files") compactVolumeCollection = cmdCompact.Flag.String("collection", "", "volume collection name") compactVolumeId = cmdCompact.Flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir.") - compactMethod = cmdCompact.Flag.Int("method", 0, "option to choose which compact method. use 0 (default) or 1.") + compactMethod = cmdCompact.Flag.Int("method", 0, "option to choose which compact method. use 0 or 1.") compactVolumePreallocate = cmdCompact.Flag.Int64("preallocateMB", 0, "preallocate volume disk space") ) @@ -41,7 +41,7 @@ func runCompact(cmd *Command, args []string) bool { preallocate := *compactVolumePreallocate * (1 << 20) vid := needle.VolumeId(*compactVolumeId) - v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, needle.GetCurrentVersion(), 0, 0) + v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } diff --git a/weed/command/db.go b/weed/command/db.go deleted file mode 100644 index a521da093..000000000 --- a/weed/command/db.go +++ /dev/null @@ -1,404 +0,0 @@ -package command - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/seaweedfs/seaweedfs/weed/server/postgres" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - dbOptions DBOptions -) - -type DBOptions struct { - host *string - port *int - masterAddr *string - authMethod *string - users *string - database *string - maxConns *int - idleTimeout *string - tlsCert *string - tlsKey *string -} - -func init() { - cmdDB.Run = runDB // break init cycle - dbOptions.host = cmdDB.Flag.String("host", "localhost", "Database server host") - dbOptions.port = cmdDB.Flag.Int("port", 5432, "Database server port") - dbOptions.masterAddr = cmdDB.Flag.String("master", "localhost:9333", "SeaweedFS master server address") - dbOptions.authMethod = cmdDB.Flag.String("auth", "trust", "Authentication method: trust, password, md5") - dbOptions.users = cmdDB.Flag.String("users", "", "User credentials for auth (JSON format '{\"user1\":\"pass1\",\"user2\":\"pass2\"}' or file '@/path/to/users.json')") - dbOptions.database = cmdDB.Flag.String("database", "default", "Default database name") - dbOptions.maxConns = cmdDB.Flag.Int("max-connections", 100, "Maximum concurrent connections per server") - dbOptions.idleTimeout = cmdDB.Flag.String("idle-timeout", "1h", "Connection idle timeout") - dbOptions.tlsCert = cmdDB.Flag.String("tls-cert", "", "TLS certificate file path") - dbOptions.tlsKey = cmdDB.Flag.String("tls-key", "", "TLS private key file path") -} - -var cmdDB = &Command{ - UsageLine: "db -port=5432 -master=", - Short: "start a PostgreSQL-compatible database server for SQL queries", - Long: `Start a PostgreSQL wire protocol compatible database server that provides SQL query access to SeaweedFS. - -This database server enables any PostgreSQL client, tool, or application to connect to SeaweedFS -and execute SQL queries against MQ topics. It implements the PostgreSQL wire protocol for maximum -compatibility with the existing PostgreSQL ecosystem. - -Examples: - - # Start database server on default port 5432 - weed db - - # Start with MD5 authentication using JSON format (recommended) - weed db -auth=md5 -users='{"admin":"secret","readonly":"view123"}' - - # Start with complex passwords using JSON format - weed db -auth=md5 -users='{"admin":"pass;with;semicolons","user":"password:with:colons"}' - - # Start with credentials from JSON file (most secure) - weed db -auth=md5 -users="@/etc/seaweedfs/users.json" - - # Start with custom port and master - weed db -port=5433 -master=master1:9333 - - # Allow connections from any host - weed db -host=0.0.0.0 -port=5432 - - # Start with TLS encryption - weed db -tls-cert=server.crt -tls-key=server.key - -Client Connection Examples: - - # psql command line client - psql "host=localhost port=5432 dbname=default user=seaweedfs" - psql -h localhost -p 5432 -U seaweedfs -d default - - # With password - PGPASSWORD=secret psql -h localhost -p 5432 -U admin -d default - - # Connection string - psql "postgresql://admin:secret@localhost:5432/default" - -Programming Language Examples: - - # Python (psycopg2) - import psycopg2 - conn = psycopg2.connect( - host="localhost", port=5432, - user="seaweedfs", database="default" - ) - - # Java JDBC - String url = "jdbc:postgresql://localhost:5432/default"; - Connection conn = DriverManager.getConnection(url, "seaweedfs", ""); - - # Go (lib/pq) - db, err := sql.Open("postgres", "host=localhost port=5432 user=seaweedfs dbname=default sslmode=disable") - - # Node.js (pg) - const client = new Client({ - host: 'localhost', port: 5432, - user: 'seaweedfs', database: 'default' - }); - -Supported SQL Operations: - - SELECT queries on MQ topics - - DESCRIBE/DESC table_name commands - - EXPLAIN query execution plans - - SHOW DATABASES/TABLES commands - - Aggregation functions (COUNT, SUM, AVG, MIN, MAX) - - WHERE clauses with filtering - - System columns (_timestamp_ns, _key, _source) - - Basic PostgreSQL system queries (version(), current_database(), current_user) - -Authentication Methods: - - trust: No authentication required (default) - - password: Clear text password authentication - - md5: MD5 password authentication - -User Credential Formats: - - JSON format: '{"user1":"pass1","user2":"pass2"}' (supports any special characters) - - File format: "@/path/to/users.json" (JSON file) - - Note: JSON format supports passwords with semicolons, colons, and any other special characters. - File format is recommended for production to keep credentials secure. - -Compatible Tools: - - psql (PostgreSQL command line client) - - Any PostgreSQL JDBC/ODBC compatible tool - -Security Features: - - Multiple authentication methods - - TLS encryption support - - Read-only access (no data modification) - -Performance Features: - - Fast path aggregation optimization (COUNT, MIN, MAX without WHERE clauses) - - Hybrid data scanning (parquet files + live logs) - - PostgreSQL wire protocol - - Query result streaming - -`, -} - -func runDB(cmd *Command, args []string) bool { - - util.LoadConfiguration("security", false) - - // Validate options - if *dbOptions.masterAddr == "" { - fmt.Fprintf(os.Stderr, "Error: master address is required\n") - return false - } - - // Parse authentication method - authMethod, err := parseAuthMethod(*dbOptions.authMethod) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - return false - } - - // Parse user credentials - users, err := parseUsers(*dbOptions.users, authMethod) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - return false - } - - // Parse idle timeout - idleTimeout, err := time.ParseDuration(*dbOptions.idleTimeout) - if err != nil { - fmt.Fprintf(os.Stderr, "Error parsing idle timeout: %v\n", err) - return false - } - - // Validate port number - if err := validatePortNumber(*dbOptions.port); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - return false - } - - // Setup TLS if requested - var tlsConfig *tls.Config - if *dbOptions.tlsCert != "" && *dbOptions.tlsKey != "" { - cert, err := tls.LoadX509KeyPair(*dbOptions.tlsCert, *dbOptions.tlsKey) - if err != nil { - fmt.Fprintf(os.Stderr, "Error loading TLS certificates: %v\n", err) - return false - } - tlsConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - } - } - - // Create server configuration - config := &postgres.PostgreSQLServerConfig{ - Host: *dbOptions.host, - Port: *dbOptions.port, - AuthMethod: authMethod, - Users: users, - Database: *dbOptions.database, - MaxConns: *dbOptions.maxConns, - IdleTimeout: idleTimeout, - TLSConfig: tlsConfig, - } - - // Create database server - dbServer, err := postgres.NewPostgreSQLServer(config, *dbOptions.masterAddr) - if err != nil { - fmt.Fprintf(os.Stderr, "Error creating database server: %v\n", err) - return false - } - - // Print startup information - fmt.Printf("Starting SeaweedFS Database Server...\n") - fmt.Printf("Host: %s\n", *dbOptions.host) - fmt.Printf("Port: %d\n", *dbOptions.port) - fmt.Printf("Master: %s\n", *dbOptions.masterAddr) - fmt.Printf("Database: %s\n", *dbOptions.database) - fmt.Printf("Auth Method: %s\n", *dbOptions.authMethod) - fmt.Printf("Max Connections: %d\n", *dbOptions.maxConns) - fmt.Printf("Idle Timeout: %s\n", *dbOptions.idleTimeout) - if tlsConfig != nil { - fmt.Printf("TLS: Enabled\n") - } else { - fmt.Printf("TLS: Disabled\n") - } - if len(users) > 0 { - fmt.Printf("Users: %d configured\n", len(users)) - } - - fmt.Printf("\nDatabase Connection Examples:\n") - fmt.Printf(" psql -h %s -p %d -U seaweedfs -d %s\n", *dbOptions.host, *dbOptions.port, *dbOptions.database) - if len(users) > 0 { - // Show first user as example - for username := range users { - fmt.Printf(" psql -h %s -p %d -U %s -d %s\n", *dbOptions.host, *dbOptions.port, username, *dbOptions.database) - break - } - } - fmt.Printf(" postgresql://%s:%d/%s\n", *dbOptions.host, *dbOptions.port, *dbOptions.database) - - fmt.Printf("\nSupported Operations:\n") - fmt.Printf(" - SELECT queries on MQ topics\n") - fmt.Printf(" - DESCRIBE/DESC table_name\n") - fmt.Printf(" - EXPLAIN query execution plans\n") - fmt.Printf(" - SHOW DATABASES/TABLES\n") - fmt.Printf(" - Aggregations: COUNT, SUM, AVG, MIN, MAX\n") - fmt.Printf(" - System columns: _timestamp_ns, _key, _source\n") - fmt.Printf(" - Basic PostgreSQL system queries\n") - - fmt.Printf("\nReady for database connections!\n\n") - - // Start the server - err = dbServer.Start() - if err != nil { - fmt.Fprintf(os.Stderr, "Error starting database server: %v\n", err) - return false - } - - // Set up signal handling for graceful shutdown - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - // Wait for shutdown signal - <-sigChan - fmt.Printf("\nReceived shutdown signal, stopping database server...\n") - - // Create context with timeout for graceful shutdown - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // Stop the server with timeout - done := make(chan error, 1) - go func() { - done <- dbServer.Stop() - }() - - select { - case err := <-done: - if err != nil { - fmt.Fprintf(os.Stderr, "Error stopping database server: %v\n", err) - return false - } - fmt.Printf("Database server stopped successfully\n") - case <-ctx.Done(): - fmt.Fprintf(os.Stderr, "Timeout waiting for database server to stop\n") - return false - } - - return true -} - -// parseAuthMethod parses the authentication method string -func parseAuthMethod(method string) (postgres.AuthMethod, error) { - switch strings.ToLower(method) { - case "trust": - return postgres.AuthTrust, nil - case "password": - return postgres.AuthPassword, nil - case "md5": - return postgres.AuthMD5, nil - default: - return postgres.AuthTrust, fmt.Errorf("unsupported auth method '%s'. Supported: trust, password, md5", method) - } -} - -// parseUsers parses the user credentials string with support for secure formats only -// Supported formats: -// 1. JSON format: {"username":"password","username2":"password2"} -// 2. File format: /path/to/users.json or @/path/to/users.json -func parseUsers(usersStr string, authMethod postgres.AuthMethod) (map[string]string, error) { - users := make(map[string]string) - - if usersStr == "" { - // No users specified - if authMethod != postgres.AuthTrust { - return nil, fmt.Errorf("users must be specified when auth method is not 'trust'") - } - return users, nil - } - - // Trim whitespace - usersStr = strings.TrimSpace(usersStr) - - // Determine format and parse accordingly - if strings.HasPrefix(usersStr, "{") && strings.HasSuffix(usersStr, "}") { - // JSON format - return parseUsersJSON(usersStr, authMethod) - } - - // Check if it's a file path (with or without @ prefix) before declaring invalid format - filePath := strings.TrimPrefix(usersStr, "@") - if _, err := os.Stat(filePath); err == nil { - // File format - return parseUsersFile(usersStr, authMethod) // Pass original string to preserve @ handling - } - - // Invalid format - return nil, fmt.Errorf("invalid user credentials format. Use JSON format '{\"user\":\"pass\"}' or file format '@/path/to/users.json' or 'path/to/users.json'. Legacy semicolon-separated format is no longer supported") -} - -// parseUsersJSON parses user credentials from JSON format -func parseUsersJSON(jsonStr string, authMethod postgres.AuthMethod) (map[string]string, error) { - var users map[string]string - if err := json.Unmarshal([]byte(jsonStr), &users); err != nil { - return nil, fmt.Errorf("invalid JSON format for users: %v", err) - } - - // Validate users - for username, password := range users { - if username == "" { - return nil, fmt.Errorf("empty username in JSON user specification") - } - if authMethod != postgres.AuthTrust && password == "" { - return nil, fmt.Errorf("empty password for user '%s' with auth method", username) - } - } - - return users, nil -} - -// parseUsersFile parses user credentials from a JSON file -func parseUsersFile(filePath string, authMethod postgres.AuthMethod) (map[string]string, error) { - // Remove @ prefix if present - filePath = strings.TrimPrefix(filePath, "@") - - // Read file content - content, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("failed to read users file '%s': %v", filePath, err) - } - - contentStr := strings.TrimSpace(string(content)) - - // File must contain JSON format - if !strings.HasPrefix(contentStr, "{") || !strings.HasSuffix(contentStr, "}") { - return nil, fmt.Errorf("users file '%s' must contain JSON format: {\"user\":\"pass\"}. Legacy formats are no longer supported", filePath) - } - - // Parse as JSON - return parseUsersJSON(contentStr, authMethod) -} - -// validatePortNumber validates that the port number is reasonable -func validatePortNumber(port int) error { - if port < 1 || port > 65535 { - return fmt.Errorf("port number must be between 1 and 65535, got %d", port) - } - if port < 1024 { - fmt.Fprintf(os.Stderr, "Warning: port number %d may require root privileges\n", port) - } - return nil -} diff --git a/weed/command/download.go b/weed/command/download.go index 1b7098824..a3c05b53d 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -1,7 +1,6 @@ package command import ( - "context" "fmt" "io" "net/http" @@ -11,11 +10,10 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -48,11 +46,11 @@ var cmdDownload = &Command{ } func runDownload(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") for _, fid := range args { - if e := downloadToFile(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil { + if e := downloadToFile(func() pb.ServerAddress { return pb.ServerAddress(*d.server) }, grpcDialOption, fid, util.ResolvePath(*d.dir)); e != nil { fmt.Println("Download Error: ", fid, e) } } @@ -64,11 +62,11 @@ func downloadToFile(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpti if lookupError != nil { return lookupError } - filename, _, rc, err := util_http.DownloadFile(fileUrl, jwt) + filename, _, rc, err := util.DownloadFile(fileUrl, jwt) if err != nil { return err } - defer util_http.CloseResponse(rc) + defer util.CloseResponse(rc) if filename == "" { filename = fileId } @@ -117,10 +115,10 @@ func fetchContent(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption return "", nil, lookupError } var rc *http.Response - if filename, _, rc, e = util_http.DownloadFile(fileUrl, jwt); e != nil { + if filename, _, rc, e = util.DownloadFile(fileUrl, jwt); e != nil { return "", nil, e } - defer util_http.CloseResponse(rc) + defer util.CloseResponse(rc) content, e = io.ReadAll(rc.Body) return } diff --git a/weed/command/export.go b/weed/command/export.go index e09d57056..1c32e1050 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -13,13 +13,13 @@ import ( "text/template" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( diff --git a/weed/command/filer.go b/weed/command/filer.go index 053c5a147..7e0e92d4a 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -1,9 +1,6 @@ package command import ( - "context" - "crypto/tls" - "crypto/x509" "fmt" "net" "net/http" @@ -13,20 +10,16 @@ import ( "strings" "time" - "github.com/spf13/viper" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/credentials/tls/certprovider/pemfile" "google.golang.org/grpc/reflection" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -37,12 +30,10 @@ var ( filerWebDavOptions WebDavOption filerStartIam *bool filerIamOptions IamOptions - filerStartSftp *bool - filerSftpOptions SftpOptions ) type FilerOptions struct { - masters *pb.ServerDiscovery + masters map[string]pb.ServerAddress mastersString *string ip *string bindIp *string @@ -61,7 +52,6 @@ type FilerOptions struct { disableHttp *bool cipher *bool metricsHttpPort *int - metricsHttpIp *string saveToFilerLimit *int defaultLevelDbDirectory *string concurrentUploadLimitMB *int @@ -69,16 +59,11 @@ type FilerOptions struct { debugPort *int localSocket *string showUIDirectoryDelete *bool - downloadMaxMBps *int - diskType *string - allowedOrigins *string - exposeDirectoryData *bool - certProvider certprovider.Provider } func init() { cmdFiler.Run = runFiler // break init cycle - f.mastersString = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers or a single DNS SRV record of at least 1 master server, prepended with dnssrv+") + f.mastersString = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers") f.filerGroup = cmdFiler.Flag.String("filerGroup", "", "share metadata with other filers in the same filerGroup") f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection") f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address") @@ -95,7 +80,6 @@ func init() { f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - f.metricsHttpIp = cmdFiler.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") @@ -103,30 +87,18 @@ func init() { f.debugPort = cmdFiler.Flag.Int("debug.port", 6060, "http port for debugging") f.localSocket = cmdFiler.Flag.String("localSocket", "", "default to /tmp/seaweedfs-filer-.sock") f.showUIDirectoryDelete = cmdFiler.Flag.Bool("ui.deleteDir", true, "enable filer UI show delete directory button") - f.downloadMaxMBps = cmdFiler.Flag.Int("downloadMaxMBps", 0, "download max speed for each download request, in MB per second") - f.diskType = cmdFiler.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") - f.allowedOrigins = cmdFiler.Flag.String("allowedOrigins", "*", "comma separated list of allowed origins") - f.exposeDirectoryData = cmdFiler.Flag.Bool("exposeDirectoryData", true, "whether to return directory metadata and content in Filer UI") // start s3 on filer filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway") filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port") - filerS3Options.portHttps = cmdFiler.Flag.Int("s3.port.https", 0, "s3 server https listen port") filerS3Options.portGrpc = cmdFiler.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port") filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") - filerS3Options.allowedOrigins = cmdFiler.Flag.String("s3.allowedOrigins", "*", "comma separated list of allowed origins") - filerS3Options.dataCenter = cmdFiler.Flag.String("s3.dataCenter", "", "prefer to read and write to volumes in this data center") filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file") filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file") filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file") filerS3Options.auditLogConfig = cmdFiler.Flag.String("s3.auditLogConfig", "", "path to the audit log config file") filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", true, "allow empty folders") filerS3Options.allowDeleteBucketNotEmpty = cmdFiler.Flag.Bool("s3.allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket") - filerS3Options.localSocket = cmdFiler.Flag.String("s3.localSocket", "", "default to /tmp/seaweedfs-s3-.sock") - filerS3Options.tlsCACertificate = cmdFiler.Flag.String("s3.cacert.file", "", "path to the TLS CA certificate file") - filerS3Options.tlsVerifyClientCert = cmdFiler.Flag.Bool("s3.tlsVerifyClientCert", false, "whether to verify the client's certificate") - filerS3Options.bindIp = cmdFiler.Flag.String("s3.ip.bind", "", "ip address to bind to. If empty, default to same as -ip.bind option.") - filerS3Options.idleTimeout = cmdFiler.Flag.Int("s3.idleTimeout", 10, "connection idle seconds") // start webdav on filer filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway") @@ -138,28 +110,11 @@ func init() { filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB") - filerWebDavOptions.maxMB = cmdFiler.Flag.Int("webdav.maxMB", 4, "split files larger than the limit") - filerWebDavOptions.filerRootPath = cmdFiler.Flag.String("webdav.filer.path", "/", "use this remote path from filer server") // start iam on filer filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service") filerIamOptions.ip = cmdFiler.Flag.String("iam.ip", *f.ip, "iam server http listen ip address") filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port") - - filerStartSftp = cmdFiler.Flag.Bool("sftp", false, "whether to start the SFTP server") - filerSftpOptions.port = cmdFiler.Flag.Int("sftp.port", 2022, "SFTP server listen port") - filerSftpOptions.sshPrivateKey = cmdFiler.Flag.String("sftp.sshPrivateKey", "", "path to the SSH private key file for host authentication") - filerSftpOptions.hostKeysFolder = cmdFiler.Flag.String("sftp.hostKeysFolder", "", "path to folder containing SSH private key files for host authentication") - filerSftpOptions.authMethods = cmdFiler.Flag.String("sftp.authMethods", "password,publickey", "comma-separated list of allowed auth methods: password, publickey, keyboard-interactive") - filerSftpOptions.maxAuthTries = cmdFiler.Flag.Int("sftp.maxAuthTries", 6, "maximum number of authentication attempts per connection") - filerSftpOptions.bannerMessage = cmdFiler.Flag.String("sftp.bannerMessage", "SeaweedFS SFTP Server - Unauthorized access is prohibited", "message displayed before authentication") - filerSftpOptions.loginGraceTime = cmdFiler.Flag.Duration("sftp.loginGraceTime", 2*time.Minute, "timeout for authentication") - filerSftpOptions.clientAliveInterval = cmdFiler.Flag.Duration("sftp.clientAliveInterval", 5*time.Second, "interval for sending keep-alive messages") - filerSftpOptions.clientAliveCountMax = cmdFiler.Flag.Int("sftp.clientAliveCountMax", 3, "maximum number of missed keep-alive messages before disconnecting") - filerSftpOptions.userStoreFile = cmdFiler.Flag.String("sftp.userStoreFile", "", "path to JSON file containing user credentials and permissions") - filerSftpOptions.dataCenter = cmdFiler.Flag.String("sftp.dataCenter", "", "prefer to read and write to volumes in this data center") - filerSftpOptions.bindIp = cmdFiler.Flag.String("sftp.ip.bind", "", "ip address to bind to. If empty, default to same as -ip.bind option.") - filerSftpOptions.localSocket = cmdFiler.Flag.String("sftp.localSocket", "", "default to /tmp/seaweedfs-sftp-.sock") } func filerLongDesc() string { @@ -202,91 +157,48 @@ func runFiler(cmd *Command, args []string) bool { go http.ListenAndServe(fmt.Sprintf(":%d", *f.debugPort), nil) } - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) - switch { - case *f.metricsHttpIp != "": - // noting to do, use f.metricsHttpIp - case *f.bindIp != "": - *f.metricsHttpIp = *f.bindIp - case *f.ip != "": - *f.metricsHttpIp = *f.ip - } - go stats_collect.StartMetricsServer(*f.metricsHttpIp, *f.metricsHttpPort) + go stats_collect.StartMetricsServer(*f.metricsHttpPort) - filerAddress := pb.NewServerAddress(*f.ip, *f.port, *f.portGrpc).String() + filerAddress := util.JoinHostPort(*f.ip, *f.port) startDelay := time.Duration(2) if *filerStartS3 { filerS3Options.filer = &filerAddress - if *filerS3Options.bindIp == "" { - filerS3Options.bindIp = f.bindIp - } + filerS3Options.bindIp = f.bindIp filerS3Options.localFilerSocket = f.localSocket - if *f.dataCenter != "" && *filerS3Options.dataCenter == "" { - filerS3Options.dataCenter = f.dataCenter - } - go func(delay time.Duration) { - time.Sleep(delay * time.Second) + go func() { + time.Sleep(startDelay * time.Second) filerS3Options.startS3Server() - }(startDelay) + }() startDelay++ } if *filerStartWebDav { filerWebDavOptions.filer = &filerAddress - filerWebDavOptions.ipBind = f.bindIp - - if *filerWebDavOptions.disk == "" { - filerWebDavOptions.disk = f.diskType - } - - go func(delay time.Duration) { - time.Sleep(delay * time.Second) + go func() { + time.Sleep(startDelay * time.Second) filerWebDavOptions.startWebDav() - }(startDelay) + }() startDelay++ } if *filerStartIam { filerIamOptions.filer = &filerAddress filerIamOptions.masters = f.mastersString - go func(delay time.Duration) { - time.Sleep(delay * time.Second) + go func() { + time.Sleep(startDelay * time.Second) filerIamOptions.startIamServer() - }(startDelay) - startDelay++ + }() } - if *filerStartSftp { - filerSftpOptions.filer = &filerAddress - if *filerSftpOptions.bindIp == "" { - filerSftpOptions.bindIp = f.bindIp - } - if *f.dataCenter != "" && *filerSftpOptions.dataCenter == "" { - filerSftpOptions.dataCenter = f.dataCenter - } - go func(delay time.Duration) { - time.Sleep(delay * time.Second) - filerSftpOptions.startSftpServer() - }(startDelay) - } - - f.masters = pb.ServerAddresses(*f.mastersString).ToServiceDiscovery() + f.masters = pb.ServerAddresses(*f.mastersString).ToAddressMap() f.startFiler() return true } -// GetCertificateWithUpdate Auto refreshing TSL certificate -func (fo *FilerOptions) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) { - certs, err := fo.certProvider.KeyMaterial(context.Background()) - if certs == nil { - return nil, err - } - return &certs.Certs[0], err -} - func (fo *FilerOptions) startFiler() { defaultMux := http.NewServeMux() @@ -301,9 +213,6 @@ func (fo *FilerOptions) startFiler() { if *fo.bindIp == "" { *fo.bindIp = *fo.ip } - if *fo.allowedOrigins == "" { - *fo.allowedOrigins = "*" - } defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2") @@ -326,9 +235,6 @@ func (fo *FilerOptions) startFiler() { SaveToFilerLimit: int64(*fo.saveToFilerLimit), ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024, ShowUIDirectoryDelete: *fo.showUIDirectoryDelete, - DownloadMaxBytesPs: int64(*fo.downloadMaxMBps) * 1024 * 1024, - DiskType: *fo.diskType, - AllowedOrigins: strings.Split(*fo.allowedOrigins, ","), }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) @@ -336,8 +242,8 @@ func (fo *FilerOptions) startFiler() { if *fo.publicPort != 0 { publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort) - glog.V(0).Infoln("Start Seaweed filer server", version.Version(), "public at", publicListeningAddress) - publicListener, localPublicListener, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0) + glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) + publicListener, localPublicListner, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0) if e != nil { glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) } @@ -346,16 +252,16 @@ func (fo *FilerOptions) startFiler() { glog.Fatalf("Volume server fail to serve public: %v", e) } }() - if localPublicListener != nil { + if localPublicListner != nil { go func() { - if e := http.Serve(localPublicListener, publicVolumeMux); e != nil { + if e := http.Serve(localPublicListner, publicVolumeMux); e != nil { glog.Errorf("Volume server fail to serve public: %v", e) } }() } } - glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", version.Version(), *fo.ip, *fo.port) + glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) filerListener, filerLocalListener, e := util.NewIpAndLocalListeners( *fo.bindIp, *fo.port, time.Duration(10)*time.Second, @@ -378,81 +284,32 @@ func (fo *FilerOptions) startFiler() { } go grpcS.Serve(grpcL) + httpS := &http.Server{Handler: defaultMux} if runtime.GOOS != "windows" { - localSocket := *fo.localSocket - if localSocket == "" { - localSocket = fmt.Sprintf("/tmp/seaweedfs-filer-%d.sock", *fo.port) + if *fo.localSocket == "" { + *fo.localSocket = fmt.Sprintf("/tmp/seaweefs-filer-%d.sock", *fo.port) } - if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) + if err := os.Remove(*fo.localSocket); err != nil && !os.IsNotExist(err) { + glog.Fatalf("Failed to remove %s, error: %s", *fo.localSocket, err.Error()) } go func() { // start on local unix socket - filerSocketListener, err := net.Listen("unix", localSocket) + filerSocketListener, err := net.Listen("unix", *fo.localSocket) if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) + glog.Fatalf("Failed to listen on %s: %v", *fo.localSocket, err) } - newHttpServer(defaultMux, nil).Serve(filerSocketListener) + httpS.Serve(filerSocketListener) }() } - - if viper.GetString("https.filer.key") != "" { - certFile := viper.GetString("https.filer.cert") - keyFile := viper.GetString("https.filer.key") - caCertFile := viper.GetString("https.filer.ca") - disbaleTlsVerifyClientCert := viper.GetBool("https.filer.disable_tls_verify_client_cert") - - pemfileOptions := pemfile.Options{ - CertFile: certFile, - KeyFile: keyFile, - RefreshDuration: security.CredRefreshingInterval, - } - if fo.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil { - glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) - } - - caCertPool := x509.NewCertPool() - if caCertFile != "" { - caCertFile, err := os.ReadFile(caCertFile) - if err != nil { - glog.Fatalf("error reading CA certificate: %v", err) + if filerLocalListener != nil { + go func() { + if err := httpS.Serve(filerLocalListener); err != nil { + glog.Errorf("Filer Fail to serve: %v", e) } - caCertPool.AppendCertsFromPEM(caCertFile) - } - - clientAuth := tls.NoClientCert - if !disbaleTlsVerifyClientCert { - clientAuth = tls.RequireAndVerifyClientCert - } - - tlsConfig := &tls.Config{ - GetCertificate: fo.GetCertificateWithUpdate, - ClientAuth: clientAuth, - ClientCAs: caCertPool, - } - - security.FixTlsConfig(util.GetViper(), tlsConfig) - - if filerLocalListener != nil { - go func() { - if err := newHttpServer(defaultMux, tlsConfig).ServeTLS(filerLocalListener, "", ""); err != nil { - glog.Errorf("Filer Fail to serve: %v", e) - } - }() - } - if err := newHttpServer(defaultMux, tlsConfig).ServeTLS(filerListener, "", ""); err != nil { - glog.Fatalf("Filer Fail to serve: %v", e) - } - } else { - if filerLocalListener != nil { - go func() { - if err := newHttpServer(defaultMux, nil).Serve(filerLocalListener); err != nil { - glog.Errorf("Filer Fail to serve: %v", e) - } - }() - } - if err := newHttpServer(defaultMux, nil).Serve(filerListener); err != nil { - glog.Fatalf("Filer Fail to serve: %v", e) - } + }() } + if err := httpS.Serve(filerListener); err != nil { + glog.Fatalf("Filer Fail to serve: %v", e) + } + } diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go index 380540fd9..d191c693b 100644 --- a/weed/command/filer_backup.go +++ b/weed/command/filer_backup.go @@ -1,34 +1,23 @@ package command import ( - "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" - "regexp" - "strings" "time" ) type FilerBackupOptions struct { - isActivePassive *bool - filer *string - path *string - excludePaths *string - excludeFileName *string - debug *bool - proxyByFiler *bool - doDeleteFiles *bool - disableErrorRetry *bool - ignore404Error *bool - timeAgo *time.Duration - retentionDays *int + isActivePassive *bool + filer *string + path *string + debug *bool + proxyByFiler *bool + timeAgo *time.Duration } var ( @@ -39,15 +28,9 @@ func init() { cmdFilerBackup.Run = runFilerBackup // break init cycle filerBackupOptions.filer = cmdFilerBackup.Flag.String("filer", "localhost:8888", "filer of one SeaweedFS cluster") filerBackupOptions.path = cmdFilerBackup.Flag.String("filerPath", "/", "directory to sync on filer") - filerBackupOptions.excludePaths = cmdFilerBackup.Flag.String("filerExcludePaths", "", "exclude directories to sync on filer") - filerBackupOptions.excludeFileName = cmdFilerBackup.Flag.String("filerExcludeFileName", "", "exclude file names that match the regexp to sync on filer") filerBackupOptions.proxyByFiler = cmdFilerBackup.Flag.Bool("filerProxy", false, "read and write file chunks by filer instead of volume servers") - filerBackupOptions.doDeleteFiles = cmdFilerBackup.Flag.Bool("doDeleteFiles", false, "delete files on the destination") filerBackupOptions.debug = cmdFilerBackup.Flag.Bool("debug", false, "debug mode to print out received files") filerBackupOptions.timeAgo = cmdFilerBackup.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"ยตs\"), \"ms\", \"s\", \"m\", \"h\"") - filerBackupOptions.retentionDays = cmdFilerBackup.Flag.Int("retentionDays", 0, "incremental backup retention days") - filerBackupOptions.disableErrorRetry = cmdFilerBackup.Flag.Bool("disableErrorRetry", false, "disables errors retry, only logs will print") - filerBackupOptions.ignore404Error = cmdFilerBackup.Flag.Bool("ignore404Error", true, "ignore 404 errors from filer") } var cmdFilerBackup = &Command{ @@ -66,17 +49,15 @@ var cmdFilerBackup = &Command{ func runFilerBackup(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") clientId := util.RandomInt32() - var clientEpoch int32 for { - clientEpoch++ - err := doFilerBackup(grpcDialOption, &filerBackupOptions, clientId, clientEpoch) + err := doFilerBackup(grpcDialOption, &filerBackupOptions, clientId) if err != nil { glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err) time.Sleep(1747 * time.Millisecond) @@ -90,24 +71,17 @@ const ( BackupKeyPrefix = "backup." ) -func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions, clientId int32, clientEpoch int32) error { +func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions, clientId int32) error { // find data sink - dataSink := findSink(util.GetViper()) + config := util.GetViper() + dataSink := findSink(config) if dataSink == nil { return fmt.Errorf("no data sink configured in replication.toml") } sourceFiler := pb.ServerAddress(*backupOption.filer) sourcePath := *backupOption.path - excludePaths := util.StringSplit(*backupOption.excludePaths, ",") - var reExcludeFileName *regexp.Regexp - if *backupOption.excludeFileName != "" { - var err error - if reExcludeFileName, err = regexp.Compile(*backupOption.excludeFileName); err != nil { - return fmt.Errorf("error compile regexp %v for exclude file name: %+v", *backupOption.excludeFileName, err) - } - } timeAgo := *backupOption.timeAgo targetPath := dataSink.GetSinkToDirectory() debug := *backupOption.debug @@ -130,71 +104,16 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti // create filer sink filerSource := &source.FilerSource{} - filerSource.DoInitialize( - sourceFiler.ToHttpAddress(), - sourceFiler.ToGrpcAddress(), - sourcePath, - *backupOption.proxyByFiler) + filerSource.DoInitialize(sourceFiler.ToHttpAddress(), sourceFiler.ToGrpcAddress(), sourcePath, *backupOption.proxyByFiler) dataSink.SetSourceFiler(filerSource) - var processEventFn func(*filer_pb.SubscribeMetadataResponse) error - if *backupOption.ignore404Error { - processEventFnGenerated := genProcessFunction(sourcePath, targetPath, excludePaths, reExcludeFileName, dataSink, *backupOption.doDeleteFiles, debug) - processEventFn = func(resp *filer_pb.SubscribeMetadataResponse) error { - err := processEventFnGenerated(resp) - if err == nil { - return nil - } - if errors.Is(err, http.ErrNotFound) { - glog.V(0).Infof("got 404 error, ignore it: %s", err.Error()) - return nil - } - return err - } - } else { - processEventFn = genProcessFunction(sourcePath, targetPath, excludePaths, reExcludeFileName, dataSink, *backupOption.doDeleteFiles, debug) - } + processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug) processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error { glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3)) return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs) }) - if dataSink.IsIncremental() && *filerBackupOptions.retentionDays > 0 { - go func() { - for { - now := time.Now() - time.Sleep(time.Hour * 24) - key := util.Join(targetPath, now.Add(-1*time.Hour*24*time.Duration(*filerBackupOptions.retentionDays)).Format("2006-01-02")) - _ = dataSink.DeleteEntry(util.Join(targetPath, key), true, true, nil) - glog.V(0).Infof("incremental backup delete directory:%s", key) - } - }() - } - - prefix := sourcePath - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - eventErrorType := pb.RetryForeverOnError - if *backupOption.disableErrorRetry { - eventErrorType = pb.TrivialOnError - } - - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "backup_" + dataSink.GetName(), - ClientId: clientId, - ClientEpoch: clientEpoch, - SelfSignature: 0, - PathPrefix: prefix, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: nil, - StartTsNs: startFrom.UnixNano(), - StopTsNs: 0, - EventErrorType: eventErrorType, - } - - return pb.FollowMetadata(sourceFiler, grpcDialOption, metadataFollowOption, processEventFnWithOffset) + return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(), clientId, sourcePath, nil, startFrom.UnixNano(), 0, 0, processEventFnWithOffset, pb.TrivialOnError) } diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go index 7f2ac12d6..ada843dea 100644 --- a/weed/command/filer_cat.go +++ b/weed/command/filer_cat.go @@ -3,18 +3,17 @@ package command import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" "net/url" "os" "strings" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -29,9 +28,9 @@ type FilerCatOptions struct { } func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType { - return func(ctx context.Context, fileId string) (targetUrls []string, err error) { + return func(fileId string) (targetUrls []string, err error) { vid := filer.VolumeId(fileId) - resp, err := fco.filerClient.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -60,7 +59,7 @@ var cmdFilerCat = &Command{ func runFilerCat(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) if len(args) == 0 { return false @@ -97,13 +96,13 @@ func runFilerCat(cmd *Command, args []string) bool { writer = f } - pb.WithFilerClient(false, util.RandomInt32(), filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + pb.WithFilerClient(false, filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := filer_pb.LookupEntry(context.Background(), client, request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } @@ -115,7 +114,7 @@ func runFilerCat(cmd *Command, args []string) bool { filerCat.filerClient = client - return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry))) + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry))) }) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 38e4eb7b9..f20ae99bf 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,14 +14,15 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) var ( @@ -30,21 +31,21 @@ var ( ) type CopyOptions struct { - include *string - replication *string - collection *string - ttl *string - diskType *string - maxMB *int - concurrentFiles *int - concurrentChunks *int - grpcDialOption grpc.DialOption - masters []string - cipher bool - ttlSec int32 - checkSize *bool - verbose *bool - volumeServerAccess *string + include *string + replication *string + collection *string + ttl *string + diskType *string + maxMB *int + masterClient *wdclient.MasterClient + concurrenctFiles *int + concurrenctChunks *int + grpcDialOption grpc.DialOption + masters []string + cipher bool + ttlSec int32 + checkSize *bool + verbose *bool } func init() { @@ -56,11 +57,10 @@ func init() { copy.ttl = cmdFilerCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") copy.diskType = cmdFilerCopy.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") copy.maxMB = cmdFilerCopy.Flag.Int("maxMB", 4, "split files larger than the limit") - copy.concurrentFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines") - copy.concurrentChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") + copy.concurrenctFiles = cmdFilerCopy.Flag.Int("c", 8, "concurrent file copy goroutines") + copy.concurrenctChunks = cmdFilerCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") copy.checkSize = cmdFilerCopy.Flag.Bool("check.size", false, "copy when the target file size is different from the source file") copy.verbose = cmdFilerCopy.Flag.Bool("verbose", false, "print out details during copying") - copy.volumeServerAccess = cmdFilerCopy.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl]") } var cmdFilerCopy = &Command{ @@ -81,7 +81,7 @@ var cmdFilerCopy = &Command{ func runCopy(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) if len(args) <= 1 { return false @@ -141,7 +141,7 @@ func runCopy(cmd *Command, args []string) bool { grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") } - fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrentFiles) + fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles) go func() { defer close(fileCopyTaskChan) @@ -152,14 +152,13 @@ func runCopy(cmd *Command, args []string) bool { } } }() - for i := 0; i < *copy.concurrentFiles; i++ { + for i := 0; i < *copy.concurrenctFiles; i++ { waitGroup.Add(1) go func() { defer waitGroup.Done() worker := FileCopyWorker{ options: ©, filerAddress: filerAddress, - signature: util.RandomInt32(), } if err := worker.copyFiles(fileCopyTaskChan); err != nil { fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) @@ -173,7 +172,7 @@ func runCopy(cmd *Command, args []string) bool { } func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress pb.ServerAddress) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) { - err = pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(false, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) @@ -226,7 +225,6 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi type FileCopyWorker struct { options *CopyOptions filerAddress pb.ServerAddress - signature int32 } func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { @@ -268,7 +266,7 @@ func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { } if shouldCopy, err := worker.checkExistingFileFirst(task, f); err != nil { - return fmt.Errorf("check existing file: %w", err) + return fmt.Errorf("check existing file: %v", err) } else if !shouldCopy { if *worker.options.verbose { fmt.Printf("skipping copied file: %v\n", f.Name()) @@ -304,7 +302,7 @@ func (worker *FileCopyWorker) checkExistingFileFirst(task FileCopyTask, f *os.Fi return } - err = pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: task.destinationUrlPath, @@ -333,6 +331,8 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err var mimeType string var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 { @@ -342,40 +342,69 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err return err } - uploader, uploaderErr := operation.NewUploader() - if uploaderErr != nil { - return uploaderErr - } + err = util.Retry("upload", func() error { + // assign a volume + assignErr := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry( - worker, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: worker.options.ttlSec, - DiskType: *worker.options.diskType, - Path: task.destinationUrlPath, - }, - &operation.UploadOption{ + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + if assignResult.Location.Url == "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult) + } + return nil + }) + if assignErr != nil { + return assignErr + } + + // upload data + targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId + uploadOption := &operation.UploadOption{ + UploadUrl: targetUrl, Filename: fileName, Cipher: worker.options.cipher, IsInputCompressed: false, MimeType: mimeType, PairMap: nil, - }, - func(host, fileId string) string { - return fmt.Sprintf("http://%s/%s", host, fileId) - }, - util.NewBytesReader(data), - ) - if flushErr != nil { - return flushErr + Jwt: security.EncodedJwt(assignResult.Auth), + } + uploadResult, err := operation.UploadData(data, uploadOption) + if err != nil { + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) + } + if uploadResult.Error != "" { + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) + } + if *worker.options.verbose { + fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) + } + + fmt.Printf("copied %s => http://%s%s%s\n", f.Name(), worker.filerAddress.ToHttpAddress(), task.destinationUrlPath, fileName) + chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0)) + + return nil + }) + if err != nil { + return fmt.Errorf("upload %v: %v\n", fileName, err) } - chunks = append(chunks, uploadResult.ToPbFileChunk(finalFileId, 0, time.Now().UnixNano())) + } - if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -394,8 +423,8 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err }, } - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { - return fmt.Errorf("update fh: %w", err) + if err := filer_pb.CreateEntry(client, request); err != nil { + return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { @@ -412,9 +441,10 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunksChan := make(chan *filer_pb.FileChunk, chunkCount) - concurrentChunks := make(chan struct{}, *worker.options.concurrentChunks) + concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) var wg sync.WaitGroup var uploadError error + var collection, replication string fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { @@ -425,47 +455,64 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, wg.Done() <-concurrentChunks }() + // assign a volume + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := util.Retry("assignVolume", func() error { + return pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath + fileName, + } - uploader, err := operation.NewUploader() + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil + }) + }) if err != nil { - uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err) + uploadError = fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err) return } - fileId, uploadResult, err, _ := uploader.UploadWithRetry( - worker, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: worker.options.ttlSec, - DiskType: *worker.options.diskType, - Path: task.destinationUrlPath + fileName, - }, - &operation.UploadOption{ - Filename: fileName + "-" + strconv.FormatInt(i+1, 10), - Cipher: worker.options.cipher, - IsInputCompressed: false, - MimeType: "", - PairMap: nil, - }, - func(host, fileId string) string { - return fmt.Sprintf("http://%s/%s", host, fileId) - }, - io.NewSectionReader(f, i*chunkSize, chunkSize), - ) + targetUrl := "http://" + assignResult.Location.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } + uploadOption := &operation.UploadOption{ + UploadUrl: targetUrl, + Filename: fileName + "-" + strconv.FormatInt(i+1, 10), + Cipher: worker.options.cipher, + IsInputCompressed: false, + MimeType: "", + PairMap: nil, + Jwt: security.EncodedJwt(assignResult.Auth), + } + uploadResult, err, _ := operation.Upload(io.NewSectionReader(f, i*chunkSize, chunkSize), uploadOption) if err != nil { - uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err) + uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return } if uploadResult.Error != "" { - uploadError = fmt.Errorf("upload %v result: %v\n", fileName, uploadResult.Error) + uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) return } - chunksChan <- uploadResult.ToPbFileChunk(fileId, i*chunkSize, time.Now().UnixNano()) + chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize) - fmt.Printf("uploaded %s-%d [%d,%d)\n", fileName, i+1, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) + fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) } wg.Wait() @@ -481,7 +528,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFileIds(func(_ context.Context) pb.ServerAddress { + operation.DeleteFiles(func() pb.ServerAddress { return pb.ServerAddress(copy.masters[0]) }, false, worker.options.grpcDialOption, fileIds) return uploadError @@ -489,10 +536,10 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, manifestedChunks, manifestErr := filer.MaybeManifestize(worker.saveDataAsChunk, chunks) if manifestErr != nil { - return fmt.Errorf("create manifest: %w", manifestErr) + return fmt.Errorf("create manifest: %v", manifestErr) } - if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -511,8 +558,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, }, } - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { - return fmt.Errorf("update fh: %w", err) + if err := filer_pb.CreateEntry(client, request); err != nil { + return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { @@ -543,64 +590,62 @@ func detectMimeType(f *os.File) string { return mimeType } -func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { - uploader, uploaderErr := operation.NewUploader() - if uploaderErr != nil { - return nil, fmt.Errorf("upload data: %w", uploaderErr) +func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { + + var fileId, host string + var auth security.EncodedJwt + + if flushErr := pb.WithGrpcFilerClient(false, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx := context.Background() + + assignErr := util.Retry("assignVolume", func() error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: name, + } + + resp, err := client.AssignVolume(ctx, request) + if err != nil { + return fmt.Errorf("assign volume failure %v: %v", request, err) + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, host, auth = resp.FileId, resp.Location.Url, security.EncodedJwt(resp.Auth) + collection, replication = resp.Collection, resp.Replication + + return nil + }) + if assignErr != nil { + return assignErr + } + + return nil + }); flushErr != nil { + return nil, collection, replication, fmt.Errorf("filerGrpcAddress assign volume: %v", flushErr) } - finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry( - worker, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: worker.options.ttlSec, - DiskType: *worker.options.diskType, - Path: name, - }, - &operation.UploadOption{ - Filename: name, - Cipher: worker.options.cipher, - IsInputCompressed: false, - MimeType: "", - PairMap: nil, - }, - func(host, fileId string) string { - return fmt.Sprintf("http://%s/%s", host, fileId) - }, - reader, - ) - + uploadOption := &operation.UploadOption{ + UploadUrl: fmt.Sprintf("http://%s/%s", host, fileId), + Filename: name, + Cipher: worker.options.cipher, + IsInputCompressed: false, + MimeType: "", + PairMap: nil, + Jwt: auth, + } + uploadResult, flushErr, _ := operation.Upload(reader, uploadOption) if flushErr != nil { - return nil, fmt.Errorf("upload data: %w", flushErr) + return nil, collection, replication, fmt.Errorf("upload data: %v", flushErr) } if uploadResult.Error != "" { - return nil, fmt.Errorf("upload result: %v", uploadResult.Error) + return nil, collection, replication, fmt.Errorf("upload result: %v", uploadResult.Error) } - return uploadResult.ToPbFileChunk(finalFileId, offset, tsNs), nil -} - -var _ = filer_pb.FilerClient(&FileCopyWorker{}) - -func (worker *FileCopyWorker) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) (err error) { - - filerGrpcAddress := worker.filerAddress.ToGrpcAddress() - err = pb.WithGrpcClient(streamingMode, worker.signature, func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, filerGrpcAddress, false, worker.options.grpcDialOption) - - return -} - -func (worker *FileCopyWorker) AdjustedUrl(location *filer_pb.Location) string { - if *worker.options.volumeServerAccess == "publicUrl" { - return location.PublicUrl - } - return location.Url -} - -func (worker *FileCopyWorker) GetDataCenter() string { - return "" + return uploadResult.ToPbFileChunk(fileId, offset), collection, replication, nil } diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go index f77f758ab..cf679885d 100644 --- a/weed/command/filer_meta_backup.go +++ b/weed/command/filer_meta_backup.go @@ -3,18 +3,17 @@ package command import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/spf13/viper" "google.golang.org/grpc" "reflect" - "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -28,9 +27,8 @@ type FilerMetaBackupOptions struct { restart *bool backupFilerConfig *string - store filer.FilerStore - clientId int32 - clientEpoch int32 + store filer.FilerStore + clientId int32 } func init() { @@ -56,7 +54,7 @@ The backup writes to another filer store specified in a backup_filer.toml. func runFilerMetaBackup(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") // load backup_filer.toml @@ -64,9 +62,9 @@ func runFilerMetaBackup(cmd *Command, args []string) bool { v.SetConfigFile(*metaBackup.backupFilerConfig) if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file - glog.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+ + glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", - *metaBackup.backupFilerConfig, err, "backup_filer", "filer") + *metaBackup.backupFilerConfig, "backup_filer", "filer") } if err := metaBackup.initStore(v); err != nil { @@ -133,14 +131,14 @@ func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) { println("+", parentPath.Child(entry.Name)) if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil { - saveErr = fmt.Errorf("insert entry error: %w\n", err) + saveErr = fmt.Errorf("insert entry error: %v\n", err) return } }) if traverseErr != nil { - return fmt.Errorf("traverse: %w", traverseErr) + return fmt.Errorf("traverse: %v", traverseErr) } return saveErr } @@ -196,26 +194,8 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { return metaBackup.setOffset(lastTime) }) - metaBackup.clientEpoch++ - - prefix := *metaBackup.filerDirectory - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "meta_backup", - ClientId: metaBackup.clientId, - ClientEpoch: metaBackup.clientEpoch, - SelfSignature: 0, - PathPrefix: prefix, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: nil, - StartTsNs: startTime.UnixNano(), - StopTsNs: 0, - EventErrorType: pb.RetryForeverOnError, - } - - return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, metadataFollowOption, processEventFnWithOffset) + return pb.FollowMetadata(pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, "meta_backup", metaBackup.clientId, + *metaBackup.filerDirectory, nil, startTime.UnixNano(), 0, 0, processEventFnWithOffset, pb.TrivialOnError) } @@ -243,7 +223,7 @@ var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{}) func (metaBackup *FilerMetaBackupOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithFilerClient(streamingMode, metaBackup.clientId, pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithFilerClient(streamingMode, pb.ServerAddress(*metaBackup.filerAddress), metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { return fn(client) }) @@ -252,7 +232,3 @@ func (metaBackup *FilerMetaBackupOptions) WithFilerClient(streamingMode bool, fn func (metaBackup *FilerMetaBackupOptions) AdjustedUrl(location *filer_pb.Location) string { return location.Url } - -func (metaBackup *FilerMetaBackupOptions) GetDataCenter() string { - return "" -} diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go index d7a169535..66a87c3d9 100644 --- a/weed/command/filer_meta_tail.go +++ b/weed/command/filer_meta_tail.go @@ -2,16 +2,16 @@ package command import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/golang/protobuf/jsonpb" "os" "path/filepath" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -45,7 +45,7 @@ var ( func runFilerMetaTail(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") clientId := util.RandomInt32() @@ -88,8 +88,11 @@ func runFilerMetaTail(cmd *Command, args []string) bool { return false } + jsonpbMarshaler := jsonpb.Marshaler{ + EmitDefaults: false, + } eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { - filer.ProtoToText(os.Stdout, resp) + jsonpbMarshaler.Marshal(os.Stdout, resp) fmt.Fprintln(os.Stdout) return nil } @@ -107,28 +110,16 @@ func runFilerMetaTail(cmd *Command, args []string) bool { untilTsNs = time.Now().Add(-*tailStop).UnixNano() } - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "tail", - ClientId: clientId, - ClientEpoch: 0, - SelfSignature: 0, - PathPrefix: *tailTarget, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: nil, - StartTsNs: time.Now().Add(-*tailStart).UnixNano(), - StopTsNs: untilTsNs, - EventErrorType: pb.TrivialOnError, - } - - tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpcDialOption, metadataFollowOption, func(resp *filer_pb.SubscribeMetadataResponse) error { - if !shouldPrint(resp) { + tailErr := pb.FollowMetadata(pb.ServerAddress(*tailFiler), grpcDialOption, "tail", clientId, *tailTarget, nil, + time.Now().Add(-*tailStart).UnixNano(), untilTsNs, 0, func(resp *filer_pb.SubscribeMetadataResponse) error { + if !shouldPrint(resp) { + return nil + } + if err := eachEntryFunc(resp); err != nil { + return err + } return nil - } - if err := eachEntryFunc(resp); err != nil { - return err - } - return nil - }) + }, pb.TrivialOnError) if tailErr != nil { fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) diff --git a/weed/command/filer_meta_tail_elastic.go b/weed/command/filer_meta_tail_elastic.go index a72f88902..5776c4f97 100644 --- a/weed/command/filer_meta_tail_elastic.go +++ b/weed/command/filer_meta_tail_elastic.go @@ -5,10 +5,10 @@ package command import ( "context" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" "strings" ) diff --git a/weed/command/filer_meta_tail_non_elastic.go b/weed/command/filer_meta_tail_non_elastic.go index 989e32bec..f78f3ee09 100644 --- a/weed/command/filer_meta_tail_non_elastic.go +++ b/weed/command/filer_meta_tail_non_elastic.go @@ -4,7 +4,7 @@ package command import ( - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) { diff --git a/weed/command/filer_remote_gateway.go b/weed/command/filer_remote_gateway.go index 3e52e8d3f..33454f378 100644 --- a/weed/command/filer_remote_gateway.go +++ b/weed/command/filer_remote_gateway.go @@ -3,13 +3,13 @@ package command import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" "os" "time" @@ -29,13 +29,12 @@ type RemoteGatewayOptions struct { remoteConfs map[string]*remote_pb.RemoteConf bucketsDir string clientId int32 - clientEpoch int32 } var _ = filer_pb.FilerClient(&RemoteGatewayOptions{}) func (option *RemoteGatewayOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithFilerClient(streamingMode, option.clientId, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithFilerClient(streamingMode, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { return fn(client) }) } @@ -43,10 +42,6 @@ func (option *RemoteGatewayOptions) AdjustedUrl(location *filer_pb.Location) str return location.Url } -func (option *RemoteGatewayOptions) GetDataCenter() string { - return "" -} - var ( remoteGatewayOptions RemoteGatewayOptions ) @@ -71,14 +66,14 @@ var cmdFilerRemoteGateway = &Command{ filer.remote.gateway listens on filer local buckets update events. If any bucket is created, deleted, or updated, it will mirror the changes to remote object store. - weed filer.remote.gateway -createBucketAt=cloud1 + weed filer.remote.sync -createBucketAt=cloud1 `, } func runFilerRemoteGateway(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") remoteGatewayOptions.grpcDialOption = grpcDialOption @@ -111,7 +106,7 @@ func runFilerRemoteGateway(cmd *Command, args []string) bool { // synchronize /buckets folder fmt.Printf("synchronize buckets in %s ...\n", remoteGatewayOptions.bucketsDir) - util.RetryUntil("filer.remote.sync buckets", func() error { + util.RetryForever("filer.remote.sync buckets", func() error { return remoteGatewayOptions.followBucketUpdatesAndUploadToRemote(filerSource) }, func(err error) bool { if err != nil { diff --git a/weed/command/filer_remote_gateway_buckets.go b/weed/command/filer_remote_gateway_buckets.go index 5c7e0ae21..9fe0e29df 100644 --- a/weed/command/filer_remote_gateway_buckets.go +++ b/weed/command/filer_remote_gateway_buckets.go @@ -1,17 +1,17 @@ package command import ( - "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" "math" "math/rand" "path/filepath" @@ -23,7 +23,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo // read filer remote storage mount mappings if detectErr := option.collectRemoteStorageConf(); detectErr != nil { - return fmt.Errorf("read mount info: %w", detectErr) + return fmt.Errorf("read mount info: %v", detectErr) } eachEntryFunc, err := option.makeBucketedEventProcessor(filerSource) @@ -31,40 +31,16 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo return err } - lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, *option.timeAgo) - processor := NewMetadataProcessor(eachEntryFunc, 128, lastOffsetTs.UnixNano()) - - var lastLogTsNs = time.Now().UnixNano() - processEventFnWithOffset := pb.AddOffsetFunc(func(resp *filer_pb.SubscribeMetadataResponse) error { - processor.AddSyncJob(resp) - return nil - }, 3*time.Second, func(counter int64, lastTsNs int64) error { - offsetTsNs := processor.processedTsWatermark.Load() - if offsetTsNs == 0 { - return nil - } - now := time.Now().UnixNano() - glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) - lastLogTsNs = now - return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, offsetTsNs) + processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error { + lastTime := time.Unix(0, lastTsNs) + glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3)) + return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, lastTsNs) }) - option.clientEpoch++ + lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, *option.timeAgo) - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "filer.remote.sync", - ClientId: option.clientId, - ClientEpoch: option.clientEpoch, - SelfSignature: 0, - PathPrefix: option.bucketsDir + "/", - AdditionalPathPrefixes: []string{filer.DirectoryEtcRemote}, - DirectoriesToWatch: nil, - StartTsNs: lastOffsetTs.UnixNano(), - StopTsNs: 0, - EventErrorType: pb.RetryForeverOnError, - } - - return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset) + return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync", option.clientId, + option.bucketsDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, 0, processEventFnWithOffset, pb.TrivialOnError) } func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) { @@ -168,7 +144,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE { newMappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content) if readErr != nil { - return fmt.Errorf("unmarshal mappings: %w", readErr) + return fmt.Errorf("unmarshal mappings: %v", readErr) } option.mappings = newMappings } @@ -206,7 +182,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if message.NewParentPath == option.bucketsDir { return handleCreateBucket(message.NewEntry) } - if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) { + if strings.HasPrefix(message.NewParentPath, option.bucketsDir) && strings.Contains(message.NewParentPath, "/"+s3_constants.MultipartUploadsFolder+"/") { return nil } if !filer.HasData(message.NewEntry) { @@ -235,7 +211,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if writeErr != nil { return writeErr } - return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) + return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry) } if filer_pb.IsDelete(resp) { if resp.Directory == option.bucketsDir { @@ -272,9 +248,6 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour } } } - if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) { - return nil - } oldBucket, oldRemoteStorageMountLocation, oldRemoteStorage, oldOk := option.detectBucketInfo(resp.Directory) newBucket, newRemoteStorageMountLocation, newRemoteStorage, newOk := option.detectBucketInfo(message.NewParentPath) if oldOk && newOk { @@ -302,7 +275,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if writeErr != nil { return writeErr } - return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) + return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry) } } } @@ -339,7 +312,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if writeErr != nil { return writeErr } - return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) + return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry) } } @@ -399,9 +372,6 @@ func extractBucketPath(bucketsDir, dir string) (util.FullPath, bool) { func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) { if mappings, err := filer.ReadMountMappings(option.grpcDialOption, pb.ServerAddress(*option.filerAddress)); err != nil { - if err == filer_pb.ErrNotFound { - return fmt.Errorf("remote storage is not configured in filer server") - } return err } else { option.mappings = mappings @@ -409,7 +379,7 @@ func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) { option.remoteConfs = make(map[string]*remote_pb.RemoteConf) var lastConfName string - err = filer_pb.List(context.Background(), option, filer.DirectoryEtcRemote, "", func(entry *filer_pb.Entry, isLast bool) error { + err = filer_pb.List(option, filer.DirectoryEtcRemote, "", func(entry *filer_pb.Entry, isLast bool) error { if !strings.HasSuffix(entry.Name, filer.REMOTE_STORAGE_CONF_SUFFIX) { return nil } diff --git a/weed/command/filer_remote_sync.go b/weed/command/filer_remote_sync.go index 77dd95134..d6ccf7b79 100644 --- a/weed/command/filer_remote_sync.go +++ b/weed/command/filer_remote_sync.go @@ -2,31 +2,29 @@ package command import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" "time" ) type RemoteSyncOptions struct { filerAddress *string - storageClass *string grpcDialOption grpc.DialOption readChunkFromFiler *bool timeAgo *time.Duration dir *string clientId int32 - clientEpoch int32 } var _ = filer_pb.FilerClient(&RemoteSyncOptions{}) func (option *RemoteSyncOptions) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithFilerClient(streamingMode, option.clientId, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithFilerClient(streamingMode, pb.ServerAddress(*option.filerAddress), option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { return fn(client) }) } @@ -34,10 +32,6 @@ func (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string return location.Url } -func (option *RemoteSyncOptions) GetDataCenter() string { - return "" -} - var ( remoteSyncOptions RemoteSyncOptions ) @@ -46,7 +40,6 @@ func init() { cmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize // break init cycle remoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String("filer", "localhost:8888", "filer of the SeaweedFS cluster") remoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String("dir", "", "a mounted directory on filer") - remoteSyncOptions.storageClass = cmdFilerRemoteSynchronize.Flag.String("storageClass", "", "override amz storage class, empty to delete") remoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool("filerProxy", false, "read file chunks from filer instead of volume servers") remoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration("timeAgo", 0, "start time before now, skipping previous metadata changes. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"ยตs\"), \"ms\", \"s\", \"m\", \"h\"") remoteSyncOptions.clientId = util.RandomInt32() @@ -73,7 +66,7 @@ var cmdFilerRemoteSynchronize = &Command{ func runFilerRemoteSynchronize(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") remoteSyncOptions.grpcDialOption = grpcDialOption @@ -90,7 +83,7 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool { if dir != "" { fmt.Printf("synchronize %s to remote storage...\n", dir) - util.RetryUntil("filer.remote.sync "+dir, func() error { + util.RetryForever("filer.remote.sync "+dir, func() error { return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir) }, func(err error) bool { if err != nil { diff --git a/weed/command/filer_remote_sync_dir.go b/weed/command/filer_remote_sync_dir.go index 5011ca36e..5fc20be9a 100644 --- a/weed/command/filer_remote_sync_dir.go +++ b/weed/command/filer_remote_sync_dir.go @@ -3,21 +3,20 @@ package command import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "os" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" ) func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error { @@ -25,67 +24,27 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour // read filer remote storage mount mappings _, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir) if detectErr != nil { - return fmt.Errorf("read mount info: %w", detectErr) + return fmt.Errorf("read mount info: %v", detectErr) } - eachEntryFunc, err := option.makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource) + eachEntryFunc, err := makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource) if err != nil { return err } - lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, *option.timeAgo) - processor := NewMetadataProcessor(eachEntryFunc, 128, lastOffsetTs.UnixNano()) - - var lastLogTsNs = time.Now().UnixNano() - processEventFnWithOffset := pb.AddOffsetFunc(func(resp *filer_pb.SubscribeMetadataResponse) error { - if resp.EventNotification.NewEntry != nil { - if *option.storageClass == "" { - if _, ok := resp.EventNotification.NewEntry.Extended[s3_constants.AmzStorageClass]; ok { - delete(resp.EventNotification.NewEntry.Extended, s3_constants.AmzStorageClass) - } - } else { - resp.EventNotification.NewEntry.Extended[s3_constants.AmzStorageClass] = []byte(*option.storageClass) - } - } - - processor.AddSyncJob(resp) - return nil - }, 3*time.Second, func(counter int64, lastTsNs int64) error { - offsetTsNs := processor.processedTsWatermark.Load() - if offsetTsNs == 0 { - return nil - } - // use processor.processedTsWatermark instead of the lastTsNs from the most recent job - now := time.Now().UnixNano() - glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) - lastLogTsNs = now - return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, offsetTsNs) + processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error { + lastTime := time.Unix(0, lastTsNs) + glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, lastTime, float64(counter)/float64(3)) + return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, lastTsNs) }) - option.clientEpoch++ + lastOffsetTs := collectLastSyncOffset(option, option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, *option.timeAgo) - prefix := mountedDir - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "filer.remote.sync", - ClientId: option.clientId, - ClientEpoch: option.clientEpoch, - SelfSignature: 0, - PathPrefix: prefix, - AdditionalPathPrefixes: []string{filer.DirectoryEtcRemote}, - DirectoriesToWatch: nil, - StartTsNs: lastOffsetTs.UnixNano(), - StopTsNs: 0, - EventErrorType: pb.RetryForeverOnError, - } - - return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, metadataFollowOption, processEventFnWithOffset) + return pb.FollowMetadata(pb.ServerAddress(*option.filerAddress), option.grpcDialOption, "filer.remote.sync", option.clientId, + mountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, 0, processEventFnWithOffset, pb.TrivialOnError) } -func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) { +func makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) { client, err := remote_storage.GetRemoteStorage(remoteStorage) if err != nil { return nil, err @@ -99,7 +58,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE { mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content) if readErr != nil { - return fmt.Errorf("unmarshal mappings: %w", readErr) + return fmt.Errorf("unmarshal mappings: %v", readErr) } if remoteLoc, found := mappings.Mappings[mountedDir]; found { if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path { @@ -136,9 +95,6 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem return nil } if filer_pb.IsCreate(resp) { - if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) { - return nil - } if !filer.HasData(message.NewEntry) { return nil } @@ -157,7 +113,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem if writeErr != nil { return writeErr } - return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) + return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry) } if filer_pb.IsDelete(resp) { glog.V(2).Infof("delete: %+v", resp) @@ -170,9 +126,6 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem return client.DeleteFile(dest) } if message.OldEntry != nil && message.NewEntry != nil { - if isMultipartUploadFile(message.NewParentPath, message.NewEntry.Name) { - return nil - } oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation) dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation) if !shouldSendToRemote(message.NewEntry) { @@ -191,15 +144,13 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem glog.V(2).Infof("update: %+v", resp) glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest)) if err := client.DeleteFile(oldDest); err != nil { - if isMultipartUploadFile(resp.Directory, message.OldEntry.Name) { - return nil - } + return err } remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest) if writeErr != nil { return writeErr } - return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) + return updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry) } return nil @@ -230,7 +181,7 @@ func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc // 3. directory creation time var lastOffsetTs time.Time if timeAgo == 0 { - mountedDirEntry, err := filer_pb.GetEntry(context.Background(), filerClient, util.FullPath(mountedDir)) + mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir)) if err != nil { glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err) return time.Now() @@ -284,12 +235,3 @@ func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer return err }) } - -func isMultipartUploadFile(dir string, name string) bool { - return isMultipartUploadDir(dir) && strings.HasSuffix(name, ".part") -} - -func isMultipartUploadDir(dir string) bool { - return strings.HasPrefix(dir, "/buckets/") && - strings.Contains(dir, "/"+s3_constants.MultipartUploadsFolder+"/") -} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index f53fdfb48..bf0a3e140 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -4,11 +4,11 @@ import ( "context" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/replication" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/sub" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/replication" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/sub" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -30,7 +30,7 @@ var cmdFilerReplicate = &Command{ func runFilerReplicate(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) config := util.GetViper() @@ -99,7 +99,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { if m.OldEntry != nil && m.NewEntry == nil { glog.V(1).Infof("delete: %s", key) } else if m.OldEntry == nil && m.NewEntry != nil { - glog.V(1).Infof("add: %s", key) + glog.V(1).Infof(" add: %s", key) } else { glog.V(1).Infof("modify: %s", key) } diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go index 9b489297c..1550d155a 100644 --- a/weed/command/filer_sync.go +++ b/weed/command/filer_sync.go @@ -4,24 +4,21 @@ import ( "context" "errors" "fmt" - "os" - "regexp" - "strings" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/sink/filersink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/security" - statsCollect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + statsCollect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" "google.golang.org/grpc" + "os" + "strings" + "time" ) type SyncOptions struct { @@ -29,9 +26,7 @@ type SyncOptions struct { filerA *string filerB *string aPath *string - aExcludePaths *string bPath *string - bExcludePaths *string aReplication *string bReplication *string aCollection *string @@ -46,20 +41,10 @@ type SyncOptions struct { bFromTsMs *int64 aProxyByFiler *bool bProxyByFiler *bool - metricsHttpIp *string metricsHttpPort *int - concurrency *int - aDoDeleteFiles *bool - bDoDeleteFiles *bool clientId int32 - clientEpoch atomic.Int32 } -const ( - SyncKeyPrefix = "sync." - DefaultConcurrencyLimit = 32 -) - var ( syncOptions SyncOptions syncCpuProfile *string @@ -72,9 +57,7 @@ func init() { syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster") syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster") syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A") - syncOptions.aExcludePaths = cmdFilerSynchronize.Flag.String("a.excludePaths", "", "exclude directories to sync on filer A") syncOptions.bPath = cmdFilerSynchronize.Flag.String("b.path", "/", "directory to sync on filer B") - syncOptions.bExcludePaths = cmdFilerSynchronize.Flag.String("b.excludePaths", "", "exclude directories to sync on filer B") syncOptions.aReplication = cmdFilerSynchronize.Flag.String("a.replication", "", "replication on filer A") syncOptions.bReplication = cmdFilerSynchronize.Flag.String("b.replication", "", "replication on filer B") syncOptions.aCollection = cmdFilerSynchronize.Flag.String("a.collection", "", "collection on filer A") @@ -89,13 +72,9 @@ func init() { syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files") syncOptions.aFromTsMs = cmdFilerSynchronize.Flag.Int64("a.fromTsMs", 0, "synchronization from timestamp on filer A. The unit is millisecond") syncOptions.bFromTsMs = cmdFilerSynchronize.Flag.Int64("b.fromTsMs", 0, "synchronization from timestamp on filer B. The unit is millisecond") - syncOptions.concurrency = cmdFilerSynchronize.Flag.Int("concurrency", DefaultConcurrencyLimit, "The maximum number of files that will be synced concurrently.") syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file") syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file") - syncOptions.metricsHttpIp = cmdFilerSynchronize.Flag.String("metricsIp", "", "metrics listen ip") syncOptions.metricsHttpPort = cmdFilerSynchronize.Flag.Int("metricsPort", 0, "metrics listen port") - syncOptions.aDoDeleteFiles = cmdFilerSynchronize.Flag.Bool("a.doDeleteFiles", true, "delete and update files when synchronizing on filer A") - syncOptions.bDoDeleteFiles = cmdFilerSynchronize.Flag.Bool("b.doDeleteFiles", true, "delete and update files when synchronizing on filer B") syncOptions.clientId = util.RandomInt32() } @@ -119,7 +98,7 @@ var cmdFilerSynchronize = &Command{ func runFilerSynchronize(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grace.SetupProfiling(*syncCpuProfile, *syncMemProfile) @@ -128,7 +107,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool { filerB := pb.ServerAddress(*syncOptions.filerB) // start filer.sync metrics server - go statsCollect.StartMetricsServer(*syncOptions.metricsHttpIp, *syncOptions.metricsHttpPort) + go statsCollect.StartMetricsServer(*syncOptions.metricsHttpPort) // read a filer signature aFilerSignature, aFilerErr := replication.ReadFilerSignature(grpcDialOption, filerA) @@ -146,33 +125,15 @@ func runFilerSynchronize(cmd *Command, args []string) bool { go func() { // a->b // set synchronization start timestamp to offset - initOffsetError := initOffsetFromTsMs(grpcDialOption, filerB, aFilerSignature, *syncOptions.bFromTsMs, getSignaturePrefixByPath(*syncOptions.aPath)) + initOffsetError := initOffsetFromTsMs(grpcDialOption, filerB, aFilerSignature, *syncOptions.bFromTsMs) if initOffsetError != nil { glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.bFromTsMs, *syncOptions.filerA, *syncOptions.filerB, initOffsetError) os.Exit(2) } for { - syncOptions.clientEpoch.Add(1) - err := doSubscribeFilerMetaChanges( - syncOptions.clientId, - syncOptions.clientEpoch.Load(), - grpcDialOption, - filerA, - *syncOptions.aPath, - util.StringSplit(*syncOptions.aExcludePaths, ","), - *syncOptions.aProxyByFiler, - filerB, - *syncOptions.bPath, - *syncOptions.bReplication, - *syncOptions.bCollection, - *syncOptions.bTtlSec, - *syncOptions.bProxyByFiler, - *syncOptions.bDiskType, - *syncOptions.bDebug, - *syncOptions.concurrency, - *syncOptions.bDoDeleteFiles, - aFilerSignature, - bFilerSignature) + err := doSubscribeFilerMetaChanges(syncOptions.clientId, grpcDialOption, filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, filerB, + *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, + *syncOptions.bDebug, aFilerSignature, bFilerSignature) if err != nil { glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err) time.Sleep(1747 * time.Millisecond) @@ -183,34 +144,16 @@ func runFilerSynchronize(cmd *Command, args []string) bool { if !*syncOptions.isActivePassive { // b->a // set synchronization start timestamp to offset - initOffsetError := initOffsetFromTsMs(grpcDialOption, filerA, bFilerSignature, *syncOptions.aFromTsMs, getSignaturePrefixByPath(*syncOptions.bPath)) + initOffsetError := initOffsetFromTsMs(grpcDialOption, filerA, bFilerSignature, *syncOptions.aFromTsMs) if initOffsetError != nil { glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.aFromTsMs, *syncOptions.filerB, *syncOptions.filerA, initOffsetError) os.Exit(2) } go func() { for { - syncOptions.clientEpoch.Add(1) - err := doSubscribeFilerMetaChanges( - syncOptions.clientId, - syncOptions.clientEpoch.Load(), - grpcDialOption, - filerB, - *syncOptions.bPath, - util.StringSplit(*syncOptions.bExcludePaths, ","), - *syncOptions.bProxyByFiler, - filerA, - *syncOptions.aPath, - *syncOptions.aReplication, - *syncOptions.aCollection, - *syncOptions.aTtlSec, - *syncOptions.aProxyByFiler, - *syncOptions.aDiskType, - *syncOptions.aDebug, - *syncOptions.concurrency, - *syncOptions.aDoDeleteFiles, - bFilerSignature, - aFilerSignature) + err := doSubscribeFilerMetaChanges(syncOptions.clientId, grpcDialOption, filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, filerA, + *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, + *syncOptions.aDebug, bFilerSignature, aFilerSignature) if err != nil { glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err) time.Sleep(2147 * time.Millisecond) @@ -225,14 +168,14 @@ func runFilerSynchronize(cmd *Command, args []string) bool { } // initOffsetFromTsMs Initialize offset -func initOffsetFromTsMs(grpcDialOption grpc.DialOption, targetFiler pb.ServerAddress, sourceFilerSignature int32, fromTsMs int64, signaturePrefix string) error { +func initOffsetFromTsMs(grpcDialOption grpc.DialOption, targetFiler pb.ServerAddress, sourceFilerSignature int32, fromTsMs int64) error { if fromTsMs <= 0 { return nil } // convert to nanosecond fromTsNs := fromTsMs * 1000_000 // If not successful, exit the program. - setOffsetErr := setOffset(grpcDialOption, targetFiler, signaturePrefix, sourceFilerSignature, fromTsNs) + setOffsetErr := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, fromTsNs) if setOffsetErr != nil { return setOffsetErr } @@ -240,8 +183,8 @@ func initOffsetFromTsMs(grpcDialOption grpc.DialOption, targetFiler pb.ServerAdd return nil } -func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOption grpc.DialOption, sourceFiler pb.ServerAddress, sourcePath string, sourceExcludePaths []string, sourceReadChunkFromFiler bool, targetFiler pb.ServerAddress, targetPath string, - replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool, concurrency int, doDeleteFiles bool, sourceFilerSignature int32, targetFilerSignature int32) error { +func doSubscribeFilerMetaChanges(clientId int32, grpcDialOption grpc.DialOption, sourceFiler pb.ServerAddress, sourcePath string, sourceReadChunkFromFiler bool, targetFiler pb.ServerAddress, targetPath string, + replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool, sourceFilerSignature int32, targetFilerSignature int32) error { // if first time, start from now // if has previously synced, resume from that point of time @@ -259,7 +202,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti filerSink.DoInitialize(targetFiler.ToHttpAddress(), targetFiler.ToGrpcAddress(), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler) filerSink.SetSourceFiler(filerSource) - persistEventFn := genProcessFunction(sourcePath, targetPath, sourceExcludePaths, nil, filerSink, doDeleteFiles, debug) + persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug) processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { message := resp.EventNotification @@ -272,53 +215,26 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti return persistEventFn(resp) } - if concurrency < 0 || concurrency > 1024 { - glog.Warningf("invalid concurrency value, using default: %d", DefaultConcurrencyLimit) - concurrency = DefaultConcurrencyLimit - } - processor := NewMetadataProcessor(processEventFn, concurrency, sourceFilerOffsetTsNs) - var lastLogTsNs = time.Now().UnixNano() var clientName = fmt.Sprintf("syncFrom_%s_To_%s", string(sourceFiler), string(targetFiler)) - processEventFnWithOffset := pb.AddOffsetFunc(func(resp *filer_pb.SubscribeMetadataResponse) error { - processor.AddSyncJob(resp) - return nil - }, 3*time.Second, func(counter int64, lastTsNs int64) error { - offsetTsNs := processor.processedTsWatermark.Load() - if offsetTsNs == 0 { - return nil - } - // use processor.processedTsWatermark instead of the lastTsNs from the most recent job + processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error { now := time.Now().UnixNano() - glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) + glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, lastTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) lastLogTsNs = now // collect synchronous offset - statsCollect.FilerSyncOffsetGauge.WithLabelValues(sourceFiler.String(), targetFiler.String(), clientName, sourcePath).Set(float64(offsetTsNs)) - return setOffset(grpcDialOption, targetFiler, getSignaturePrefixByPath(sourcePath), sourceFilerSignature, offsetTsNs) + statsCollect.FilerSyncOffsetGauge.WithLabelValues(sourceFiler.String(), targetFiler.String(), clientName, sourcePath).Set(float64(lastTsNs)) + return setOffset(grpcDialOption, targetFiler, getSignaturePrefixByPath(sourcePath), sourceFilerSignature, lastTsNs) }) - prefix := sourcePath - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: clientName, - ClientId: clientId, - ClientEpoch: clientEpoch, - SelfSignature: targetFilerSignature, - PathPrefix: prefix, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: nil, - StartTsNs: sourceFilerOffsetTsNs, - StopTsNs: 0, - EventErrorType: pb.RetryForeverOnError, - } - - return pb.FollowMetadata(sourceFiler, grpcDialOption, metadataFollowOption, processEventFnWithOffset) + return pb.FollowMetadata(sourceFiler, grpcDialOption, clientName, clientId, + sourcePath, nil, sourceFilerOffsetTsNs, 0, targetFilerSignature, processEventFnWithOffset, pb.RetryForeverOnError) } +const ( + SyncKeyPrefix = "sync." +) + // When each business is distinguished according to path, and offsets need to be maintained separately. func getSignaturePrefixByPath(path string) string { // compatible historical version @@ -331,7 +247,7 @@ func getSignaturePrefixByPath(path string) string { func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) { - readErr = pb.WithFilerClient(false, signature, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + readErr = pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { syncKey := []byte(signaturePrefix + "____") util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) @@ -357,7 +273,7 @@ func getOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signature } func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signaturePrefix string, signature int32, offsetTsNs int64) error { - return pb.WithFilerClient(false, signature, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { syncKey := []byte(signaturePrefix + "____") util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) @@ -383,7 +299,7 @@ func setOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, signature } -func genProcessFunction(sourcePath string, targetPath string, excludePaths []string, reExcludeFileName *regexp.Regexp, dataSink sink.ReplicationSink, doDeleteFiles bool, debug bool) func(resp *filer_pb.SubscribeMetadataResponse) error { +func genProcessFunction(sourcePath string, targetPath string, dataSink sink.ReplicationSink, debug bool) func(resp *filer_pb.SubscribeMetadataResponse) error { // process function processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { message := resp.EventNotification @@ -400,34 +316,20 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str glog.V(0).Infof("received %v", resp) } - if isMultipartUploadDir(resp.Directory + "/") { + if !strings.HasPrefix(resp.Directory, sourcePath) { return nil } - if !strings.HasPrefix(resp.Directory+"/", sourcePath) { - return nil - } - for _, excludePath := range excludePaths { - if strings.HasPrefix(resp.Directory+"/", excludePath) { - return nil - } - } - if reExcludeFileName != nil && reExcludeFileName.MatchString(message.NewEntry.Name) { - return nil - } - if dataSink.IsIncremental() { - doDeleteFiles = false - } // handle deletions if filer_pb.IsDelete(resp) { - if !doDeleteFiles { - return nil - } if !strings.HasPrefix(string(sourceOldKey), sourcePath) { return nil } key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) - return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + if !dataSink.IsIncremental() { + return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + } + return nil } // handle new entries @@ -436,11 +338,7 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str return nil } key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) - if err := dataSink.CreateEntry(key, message.NewEntry, message.Signatures); err != nil { - return fmt.Errorf("create entry1 : %w", err) - } else { - return nil - } + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) } // this is something special? @@ -453,13 +351,9 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str // old key is in the watched directory if strings.HasPrefix(string(sourceNewKey), sourcePath) { // new key is also in the watched directory - if doDeleteFiles { + if !dataSink.IsIncremental() { oldKey := util.Join(targetPath, string(sourceOldKey)[len(sourcePath):]) - if strings.HasSuffix(sourcePath, "/") { - message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath)-1:]) - } else { - message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath):]) - } + message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath):]) foundExisting, err := dataSink.UpdateEntry(string(oldKey), message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) if foundExisting { return err @@ -467,36 +361,28 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str // not able to find old entry if err = dataSink.DeleteEntry(string(oldKey), message.OldEntry.IsDirectory, false, message.Signatures); err != nil { - return fmt.Errorf("delete old entry %v: %w", oldKey, err) + return fmt.Errorf("delete old entry %v: %v", oldKey, err) } } // create the new entry newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) - if err := dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures); err != nil { - return fmt.Errorf("create entry2 : %w", err) - } else { - return nil - } + return dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures) } else { - // new key is outside the watched directory - if doDeleteFiles { + // new key is outside of the watched directory + if !dataSink.IsIncremental() { key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) } } } else { - // old key is outside the watched directory + // old key is outside of the watched directory if strings.HasPrefix(string(sourceNewKey), sourcePath) { // new key is in the watched directory key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) - if err := dataSink.CreateEntry(key, message.NewEntry, message.Signatures); err != nil { - return fmt.Errorf("create entry3 : %w", err) - } else { - return nil - } + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) } else { - // new key is also outside the watched directory + // new key is also outside of the watched directory // skip } } diff --git a/weed/command/filer_sync_jobs.go b/weed/command/filer_sync_jobs.go deleted file mode 100644 index d49031b98..000000000 --- a/weed/command/filer_sync_jobs.go +++ /dev/null @@ -1,148 +0,0 @@ -package command - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "sync" - "sync/atomic" -) - -type MetadataProcessor struct { - activeJobs map[int64]*filer_pb.SubscribeMetadataResponse - activeJobsLock sync.Mutex - activeJobsCond *sync.Cond - concurrencyLimit int - fn pb.ProcessMetadataFunc - processedTsWatermark atomic.Int64 -} - -func NewMetadataProcessor(fn pb.ProcessMetadataFunc, concurrency int, offsetTsNs int64) *MetadataProcessor { - t := &MetadataProcessor{ - fn: fn, - activeJobs: make(map[int64]*filer_pb.SubscribeMetadataResponse), - concurrencyLimit: concurrency, - } - t.processedTsWatermark.Store(offsetTsNs) - t.activeJobsCond = sync.NewCond(&t.activeJobsLock) - return t -} - -func (t *MetadataProcessor) AddSyncJob(resp *filer_pb.SubscribeMetadataResponse) { - if filer_pb.IsEmpty(resp) { - return - } - - t.activeJobsLock.Lock() - defer t.activeJobsLock.Unlock() - - for len(t.activeJobs) >= t.concurrencyLimit || t.conflictsWith(resp) { - t.activeJobsCond.Wait() - } - t.activeJobs[resp.TsNs] = resp - go func() { - - if err := util.Retry("metadata processor", func() error { - return t.fn(resp) - }); err != nil { - glog.Errorf("process %v: %v", resp, err) - } - - t.activeJobsLock.Lock() - defer t.activeJobsLock.Unlock() - - delete(t.activeJobs, resp.TsNs) - - // if is the oldest job, write down the watermark - isOldest := true - for t := range t.activeJobs { - if resp.TsNs > t { - isOldest = false - break - } - } - if isOldest { - t.processedTsWatermark.Store(resp.TsNs) - } - t.activeJobsCond.Signal() - }() -} - -func (t *MetadataProcessor) conflictsWith(resp *filer_pb.SubscribeMetadataResponse) bool { - for _, r := range t.activeJobs { - if shouldWaitFor(resp, r) { - return true - } - } - return false -} - -// a is one possible job to schedule -// b is one existing active job -func shouldWaitFor(a *filer_pb.SubscribeMetadataResponse, b *filer_pb.SubscribeMetadataResponse) bool { - aPath, aNewPath, aIsDirectory := extractPathsFromMetadata(a) - bPath, bNewPath, bIsDirectory := extractPathsFromMetadata(b) - - if pairShouldWaitFor(aPath, bPath, aIsDirectory, bIsDirectory) { - return true - } - if aNewPath != "" { - if pairShouldWaitFor(aNewPath, bPath, aIsDirectory, bIsDirectory) { - return true - } - } - if bNewPath != "" { - if pairShouldWaitFor(aPath, bNewPath, aIsDirectory, bIsDirectory) { - return true - } - } - if aNewPath != "" && bNewPath != "" { - if pairShouldWaitFor(aNewPath, bNewPath, aIsDirectory, bIsDirectory) { - return true - } - } - return false -} - -func pairShouldWaitFor(aPath, bPath util.FullPath, aIsDirectory, bIsDirectory bool) bool { - if bIsDirectory { - if aIsDirectory { - return aPath.IsUnder(bPath) || bPath.IsUnder(aPath) - } else { - return aPath.IsUnder(bPath) - } - } else { - if aIsDirectory { - return bPath.IsUnder(aPath) - } else { - return aPath == bPath - } - } -} - -func extractPathsFromMetadata(resp *filer_pb.SubscribeMetadataResponse) (path, newPath util.FullPath, isDirectory bool) { - oldEntry := resp.EventNotification.OldEntry - newEntry := resp.EventNotification.NewEntry - // create - if filer_pb.IsCreate(resp) { - path = util.FullPath(resp.Directory).Child(newEntry.Name) - isDirectory = newEntry.IsDirectory - return - } - if filer_pb.IsDelete(resp) { - path = util.FullPath(resp.Directory).Child(oldEntry.Name) - isDirectory = oldEntry.IsDirectory - return - } - if filer_pb.IsUpdate(resp) { - path = util.FullPath(resp.Directory).Child(newEntry.Name) - isDirectory = newEntry.IsDirectory - return - } - // renaming - path = util.FullPath(resp.Directory).Child(oldEntry.Name) - isDirectory = oldEntry.IsDirectory - newPath = util.FullPath(resp.EventNotification.NewParentPath).Child(newEntry.Name) - return -} diff --git a/weed/command/fix.go b/weed/command/fix.go index 34dee3732..d19496a79 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -8,14 +8,13 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" - "github.com/seaweedfs/seaweedfs/weed/storage/super_block" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -23,52 +22,39 @@ func init() { } var cmdFix = &Command{ - UsageLine: "fix [-remoteFile=false] [-volumeId=234] [-collection=bigData] /tmp", + UsageLine: "fix [-volumeId=234] [-collection=bigData] /tmp", Short: "run weed tool fix on files or whole folders to recreate index file(s) if corrupted", - Long: `Fix runs the SeaweedFS fix command on local dat files ( or remote files) or whole folders to re-create the index .idx file. If fixing remote files, you need to synchronize master.toml to the same directory on the current node as on the master node. - You Need to stop the volume server when running this command. -`, + Long: `Fix runs the SeaweedFS fix command on dat files or whole folders to re-create the index .idx file. + `, } var ( fixVolumeCollection = cmdFix.Flag.String("collection", "", "an optional volume collection name, if specified only it will be processed") fixVolumeId = cmdFix.Flag.Int64("volumeId", 0, "an optional volume id, if not 0 (default) only it will be processed") - fixIncludeDeleted = cmdFix.Flag.Bool("includeDeleted", true, "include deleted entries in the index file") - fixIgnoreError = cmdFix.Flag.Bool("ignoreError", false, "an optional, if true will be processed despite errors") - fixRemoteFile = cmdFix.Flag.Bool("remoteFile", false, "an optional, if true will not try to load the local .dat file, but only the remote file") ) type VolumeFileScanner4Fix struct { - version needle.Version - nm *needle_map.MemDb - nmDeleted *needle_map.MemDb - includeDeleted bool + version needle.Version + nm *needle_map.MemDb } func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error { scanner.version = superBlock.Version return nil -} +} func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { return false } func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - glog.V(2).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) + glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) if n.Size.IsValid() { - if pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size); pe != nil { - return fmt.Errorf("saved %d with error %v", n.Size, pe) - } + pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) + glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { - if scanner.includeDeleted { - if pe := scanner.nmDeleted.Set(n.Id, types.ToOffset(offset), types.TombstoneFileSize); pe != nil { - return fmt.Errorf("saved deleted %d with error %v", n.Size, pe) - } - } else { - glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id) - } + glog.V(2).Infof("skipping deleted file ...") + return scanner.nm.Delete(n.Id) } return nil } @@ -76,10 +62,6 @@ func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64 func runFix(cmd *Command, args []string) bool { for _, arg := range args { basePath, f := path.Split(util.ResolvePath(arg)) - if util.FolderExists(arg) { - basePath = arg - f = "" - } files := []fs.DirEntry{} if f == "" { @@ -90,7 +72,7 @@ func runFix(cmd *Command, args []string) bool { } files = fileInfo } else { - fileInfo, err := os.Stat(arg) + fileInfo, err := os.Stat(basePath + f) if err != nil { fmt.Println(err) return false @@ -98,15 +80,8 @@ func runFix(cmd *Command, args []string) bool { files = []fs.DirEntry{fs.FileInfoToDirEntry(fileInfo)} } - ext := ".dat" - if *fixRemoteFile { - ext = ".idx" - util.LoadConfiguration("master", false) - backend.LoadConfiguration(util.GetViper()) - } - for _, file := range files { - if !strings.HasSuffix(file.Name(), ext) { + if !strings.HasSuffix(file.Name(), ".dat") { continue } if *fixVolumeCollection != "" { @@ -128,75 +103,31 @@ func runFix(cmd *Command, args []string) bool { if *fixVolumeId != 0 && *fixVolumeId != volumeId { continue } - doFixOneVolume(basePath, baseFileName, collection, volumeId, *fixIncludeDeleted) + doFixOneVolume(basePath, baseFileName, collection, volumeId) } } return true } -func SaveToIdx(scaner *VolumeFileScanner4Fix, idxName string) (ret error) { - idxFile, err := os.OpenFile(idxName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return - } - defer func() { - idxFile.Close() - }() +func doFixOneVolume(basepath string, baseFileName string, collection string, volumeId int64) { - return scaner.nm.AscendingVisit(func(value needle_map.NeedleValue) error { - _, err := idxFile.Write(value.ToBytes()) - if scaner.includeDeleted && err == nil { - if deleted, ok := scaner.nmDeleted.Get(value.Key); ok { - _, err = idxFile.Write(deleted.ToBytes()) - } - } - return err - }) -} - -func doFixOneVolume(basepath string, baseFileName string, collection string, volumeId int64, fixIncludeDeleted bool) { indexFileName := path.Join(basepath, baseFileName+".idx") nm := needle_map.NewMemDb() - nmDeleted := needle_map.NewMemDb() defer nm.Close() - defer nmDeleted.Close() - // Validate volumeId range before converting to uint32 - if volumeId < 0 || volumeId > 0xFFFFFFFF { - err := fmt.Errorf("volume ID out of range: %d", volumeId) - if *fixIgnoreError { - glog.Error(err) - return - } else { - glog.Fatal(err) - } - } - // lgtm[go/incorrect-integer-conversion] - // Safe conversion: volumeId has been validated to be in range [0, 0xFFFFFFFF] above vid := needle.VolumeId(volumeId) scanner := &VolumeFileScanner4Fix{ - nm: nm, - nmDeleted: nmDeleted, - includeDeleted: fixIncludeDeleted, + nm: nm, } if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil { - err := fmt.Errorf("scan .dat File: %w", err) - if *fixIgnoreError { - glog.Error(err) - } else { - glog.Fatal(err) - } + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) } - if err := SaveToIdx(scanner, indexFileName); err != nil { - err := fmt.Errorf("save to .idx File: %w", err) - if *fixIgnoreError { - glog.Error(err) - } else { - os.Remove(indexFileName) - glog.Fatal(err) - } + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) + os.Remove(indexFileName) } } diff --git a/weed/command/fuse.go b/weed/command/fuse.go index a3b7fb81e..a0dcaa86c 100644 --- a/weed/command/fuse.go +++ b/weed/command/fuse.go @@ -1,9 +1,239 @@ package command +import ( + "fmt" + "os" + "strconv" + "strings" + "time" +) + func init() { cmdFuse.Run = runFuse // break init cycle } +type parameter struct { + name string + value string +} + +func runFuse(cmd *Command, args []string) bool { + rawArgs := strings.Join(args, " ") + rawArgsLen := len(rawArgs) + option := strings.Builder{} + options := []parameter{} + masterProcess := true + fusermountPath := "" + + // first parameter + i := 0 + for i = 0; i < rawArgsLen && rawArgs[i] != ' '; i++ { + option.WriteByte(rawArgs[i]) + } + options = append(options, parameter{"arg0", option.String()}) + option.Reset() + + for i++; i < rawArgsLen; i++ { + + // space separator check for filled option + if rawArgs[i] == ' ' { + if option.Len() > 0 { + options = append(options, parameter{option.String(), "true"}) + option.Reset() + } + + // dash separator read option until next space + } else if rawArgs[i] == '-' { + for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ { + option.WriteByte(rawArgs[i]) + } + options = append(options, parameter{option.String(), "true"}) + option.Reset() + + // equal separator start option with pending value + } else if rawArgs[i] == '=' { + name := option.String() + option.Reset() + + for i++; i < rawArgsLen && rawArgs[i] != ',' && rawArgs[i] != ' '; i++ { + // double quote separator read option until next double quote + if rawArgs[i] == '"' { + for i++; i < rawArgsLen && rawArgs[i] != '"'; i++ { + option.WriteByte(rawArgs[i]) + } + + // single quote separator read option until next single quote + } else if rawArgs[i] == '\'' { + for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ { + option.WriteByte(rawArgs[i]) + } + + // add chars before comma + } else if rawArgs[i] != ' ' { + option.WriteByte(rawArgs[i]) + } + } + + options = append(options, parameter{name, option.String()}) + option.Reset() + + // comma separator just read current option + } else if rawArgs[i] == ',' { + options = append(options, parameter{option.String(), "true"}) + option.Reset() + + // what is not a separator fill option buffer + } else { + option.WriteByte(rawArgs[i]) + } + } + + // get residual option data + if option.Len() > 0 { + // add value to pending option + options = append(options, parameter{option.String(), "true"}) + option.Reset() + } + + // scan each parameter + for i := 0; i < len(options); i++ { + parameter := options[i] + + switch parameter.name { + case "child": + masterProcess = false + case "arg0": + mountOptions.dir = ¶meter.value + case "filer": + mountOptions.filer = ¶meter.value + case "filer.path": + mountOptions.filerMountRootPath = ¶meter.value + case "dirAutoCreate": + if parsed, err := strconv.ParseBool(parameter.value); err == nil { + mountOptions.dirAutoCreate = &parsed + } else { + panic(fmt.Errorf("dirAutoCreate: %s", err)) + } + case "collection": + mountOptions.collection = ¶meter.value + case "replication": + mountOptions.replication = ¶meter.value + case "disk": + mountOptions.diskType = ¶meter.value + case "ttl": + if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { + intValue := int(parsed) + mountOptions.ttlSec = &intValue + } else { + panic(fmt.Errorf("ttl: %s", err)) + } + case "chunkSizeLimitMB": + if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { + intValue := int(parsed) + mountOptions.chunkSizeLimitMB = &intValue + } else { + panic(fmt.Errorf("chunkSizeLimitMB: %s", err)) + } + case "concurrentWriters": + i++ + if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { + intValue := int(parsed) + mountOptions.concurrentWriters = &intValue + } else { + panic(fmt.Errorf("concurrentWriters: %s", err)) + } + case "cacheDir": + mountOptions.cacheDir = ¶meter.value + case "cacheCapacityMB": + if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err == nil { + mountOptions.cacheSizeMB = &parsed + } else { + panic(fmt.Errorf("cacheCapacityMB: %s", err)) + } + case "dataCenter": + mountOptions.dataCenter = ¶meter.value + case "allowOthers": + if parsed, err := strconv.ParseBool(parameter.value); err == nil { + mountOptions.allowOthers = &parsed + } else { + panic(fmt.Errorf("allowOthers: %s", err)) + } + case "umask": + mountOptions.umaskString = ¶meter.value + case "nonempty": + if parsed, err := strconv.ParseBool(parameter.value); err == nil { + mountOptions.nonempty = &parsed + } else { + panic(fmt.Errorf("nonempty: %s", err)) + } + case "volumeServerAccess": + mountOptions.volumeServerAccess = ¶meter.value + case "map.uid": + mountOptions.uidMap = ¶meter.value + case "map.gid": + mountOptions.gidMap = ¶meter.value + case "readOnly": + if parsed, err := strconv.ParseBool(parameter.value); err == nil { + mountOptions.readOnly = &parsed + } else { + panic(fmt.Errorf("readOnly: %s", err)) + } + case "cpuprofile": + mountCpuProfile = ¶meter.value + case "memprofile": + mountMemProfile = ¶meter.value + case "readRetryTime": + if parsed, err := time.ParseDuration(parameter.value); err == nil { + mountReadRetryTime = &parsed + } else { + panic(fmt.Errorf("readRetryTime: %s", err)) + } + case "fusermount.path": + fusermountPath = parameter.value + } + } + + // the master start the child, release it then finish himself + if masterProcess { + arg0, err := os.Executable() + if err != nil { + panic(err) + } + + argv := append(os.Args, "-o", "child") + + attr := os.ProcAttr{} + attr.Env = os.Environ() + + child, err := os.StartProcess(arg0, argv, &attr) + + if err != nil { + panic(fmt.Errorf("master process can not start child process: %s", err)) + } + + err = child.Release() + + if err != nil { + panic(fmt.Errorf("master process can not release child process: %s", err)) + } + + return true + } + + if fusermountPath != "" { + if err := os.Setenv("PATH", fusermountPath); err != nil { + panic(fmt.Errorf("setenv: %s", err)) + } + } else if os.Getenv("PATH") == "" { + if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin"); err != nil { + panic(fmt.Errorf("setenv: %s", err)) + } + } + + // just call "weed mount" command + return runMount(cmdMount, []string{}) +} + var cmdFuse = &Command{ UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"", Short: "Allow use weed with linux's mount command", diff --git a/weed/command/fuse_notsupported.go b/weed/command/fuse_notsupported.go deleted file mode 100644 index dc47bd566..000000000 --- a/weed/command/fuse_notsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !linux && !darwin -// +build !linux,!darwin - -package command - -import ( - "fmt" - "runtime" -) - -func runFuse(cmd *Command, args []string) bool { - fmt.Printf("Fuse is not supported on %s %s\n", runtime.GOOS, runtime.GOARCH) - - return true -} diff --git a/weed/command/fuse_std.go b/weed/command/fuse_std.go deleted file mode 100644 index b2839aaf8..000000000 --- a/weed/command/fuse_std.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build linux || darwin -// +build linux darwin - -package command - -import ( - "fmt" - "math" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" -) - -type parameter struct { - name string - value string -} - -func runFuse(cmd *Command, args []string) bool { - rawArgs := strings.Join(args, " ") - rawArgsLen := len(rawArgs) - option := strings.Builder{} - options := []parameter{} - masterProcess := true - fusermountPath := "" - - // first parameter - i := 0 - for i = 0; i < rawArgsLen && rawArgs[i] != ' '; i++ { - option.WriteByte(rawArgs[i]) - } - options = append(options, parameter{"arg0", option.String()}) - option.Reset() - - for i++; i < rawArgsLen; i++ { - - // space separator check for filled option - if rawArgs[i] == ' ' { - if option.Len() > 0 { - options = append(options, parameter{option.String(), "true"}) - option.Reset() - } - - // dash separator read option until next space - } else if rawArgs[i] == '-' { - for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ { - option.WriteByte(rawArgs[i]) - } - // ignore "-o" - if option.String() != "o" { - options = append(options, parameter{option.String(), "true"}) - } - option.Reset() - - // equal separator start option with pending value - } else if rawArgs[i] == '=' { - name := option.String() - option.Reset() - - for i++; i < rawArgsLen && rawArgs[i] != ',' && rawArgs[i] != ' '; i++ { - // double quote separator read option until next double quote - if rawArgs[i] == '"' { - for i++; i < rawArgsLen && rawArgs[i] != '"'; i++ { - option.WriteByte(rawArgs[i]) - } - - // single quote separator read option until next single quote - } else if rawArgs[i] == '\'' { - for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ { - option.WriteByte(rawArgs[i]) - } - - // add chars before comma - } else if rawArgs[i] != ' ' { - option.WriteByte(rawArgs[i]) - } - } - - options = append(options, parameter{name, option.String()}) - option.Reset() - - // comma separator just read current option - } else if rawArgs[i] == ',' { - options = append(options, parameter{option.String(), "true"}) - option.Reset() - - // what is not a separator fill option buffer - } else { - option.WriteByte(rawArgs[i]) - } - } - - // get residual option data - if option.Len() > 0 { - // add value to pending option - options = append(options, parameter{option.String(), "true"}) - option.Reset() - } - - // scan each parameter - for i := 0; i < len(options); i++ { - parameter := options[i] - - switch parameter.name { - case "child": - masterProcess = false - if parsed, err := strconv.ParseInt(parameter.value, 10, 64); err == nil { - if parsed > math.MaxInt || parsed <= 0 { - panic(fmt.Errorf("parent PID %d is invalid", parsed)) - } - mountOptions.fuseCommandPid = int(parsed) - } else { - panic(fmt.Errorf("parent PID %s is invalid: %w", parameter.value, err)) - } - case "arg0": - mountOptions.dir = ¶meter.value - case "filer": - mountOptions.filer = ¶meter.value - case "filer.path": - mountOptions.filerMountRootPath = ¶meter.value - case "dirAutoCreate": - if parsed, err := strconv.ParseBool(parameter.value); err == nil { - mountOptions.dirAutoCreate = &parsed - } else { - panic(fmt.Errorf("dirAutoCreate: %s", err)) - } - case "collection": - mountOptions.collection = ¶meter.value - case "replication": - mountOptions.replication = ¶meter.value - case "disk": - mountOptions.diskType = ¶meter.value - case "ttl": - if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { - intValue := int(parsed) - mountOptions.ttlSec = &intValue - } else { - panic(fmt.Errorf("ttl: %s", err)) - } - case "chunkSizeLimitMB": - if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { - intValue := int(parsed) - mountOptions.chunkSizeLimitMB = &intValue - } else { - panic(fmt.Errorf("chunkSizeLimitMB: %s", err)) - } - case "concurrentWriters": - i++ - if parsed, err := strconv.ParseInt(parameter.value, 0, 32); err == nil { - intValue := int(parsed) - mountOptions.concurrentWriters = &intValue - } else { - panic(fmt.Errorf("concurrentWriters: %s", err)) - } - case "cacheDir": - mountOptions.cacheDirForRead = ¶meter.value - case "cacheCapacityMB": - if parsed, err := strconv.ParseInt(parameter.value, 0, 64); err == nil { - mountOptions.cacheSizeMBForRead = &parsed - } else { - panic(fmt.Errorf("cacheCapacityMB: %s", err)) - } - case "cacheDirWrite": - mountOptions.cacheDirForWrite = ¶meter.value - case "dataCenter": - mountOptions.dataCenter = ¶meter.value - case "allowOthers": - if parsed, err := strconv.ParseBool(parameter.value); err == nil { - mountOptions.allowOthers = &parsed - } else { - panic(fmt.Errorf("allowOthers: %s", err)) - } - case "umask": - mountOptions.umaskString = ¶meter.value - case "nonempty": - if parsed, err := strconv.ParseBool(parameter.value); err == nil { - mountOptions.nonempty = &parsed - } else { - panic(fmt.Errorf("nonempty: %s", err)) - } - case "volumeServerAccess": - mountOptions.volumeServerAccess = ¶meter.value - case "map.uid": - mountOptions.uidMap = ¶meter.value - case "map.gid": - mountOptions.gidMap = ¶meter.value - case "readOnly": - if parsed, err := strconv.ParseBool(parameter.value); err == nil { - mountOptions.readOnly = &parsed - } else { - panic(fmt.Errorf("readOnly: %s", err)) - } - case "disableXAttr": - if parsed, err := strconv.ParseBool(parameter.value); err == nil { - - mountOptions.disableXAttr = &parsed - } else { - panic(fmt.Errorf("disableXAttr: %s", err)) - } - case "cpuprofile": - mountCpuProfile = ¶meter.value - case "memprofile": - mountMemProfile = ¶meter.value - case "readRetryTime": - if parsed, err := time.ParseDuration(parameter.value); err == nil { - mountReadRetryTime = &parsed - } else { - panic(fmt.Errorf("readRetryTime: %s", err)) - } - case "fusermount.path": - fusermountPath = parameter.value - default: - t := parameter.name - if parameter.value != "true" { - t = fmt.Sprintf("%s=%s", parameter.name, parameter.value) - } - mountOptions.extraOptions = append(mountOptions.extraOptions, t) - } - } - - // the master start the child, release it then finish himself - if masterProcess { - arg0, err := os.Executable() - if err != nil { - panic(err) - } - - // pass our PID to the child process - pid := os.Getpid() - argv := append(os.Args, "-o", "child="+strconv.Itoa(pid)) - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM) - - attr := os.ProcAttr{} - attr.Env = os.Environ() - - child, err := os.StartProcess(arg0, argv, &attr) - - if err != nil { - panic(fmt.Errorf("master process can not start child process: %s", err)) - } - - err = child.Release() - - if err != nil { - panic(fmt.Errorf("master process can not release child process: %s", err)) - } - - select { - case <-c: - return true - } - } - - if fusermountPath != "" { - if err := os.Setenv("PATH", fusermountPath); err != nil { - panic(fmt.Errorf("setenv: %s", err)) - } - } else if os.Getenv("PATH") == "" { - if err := os.Setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin"); err != nil { - panic(fmt.Errorf("setenv: %s", err)) - } - } - - // just call "weed mount" command - return runMount(cmdMount, []string{}) -} diff --git a/weed/command/iam.go b/weed/command/iam.go index c484ed18d..968d23095 100644 --- a/weed/command/iam.go +++ b/weed/command/iam.go @@ -3,23 +3,16 @@ package command import ( "context" "fmt" + "net/http" - "github.com/seaweedfs/seaweedfs/weed/util/version" - - "time" - + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/iamapi" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iamapi" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - - // Import credential stores to register them - _ "github.com/seaweedfs/seaweedfs/weed/credential/filer_etc" - _ "github.com/seaweedfs/seaweedfs/weed/credential/memory" - _ "github.com/seaweedfs/seaweedfs/weed/credential/postgres" + "time" ) var ( @@ -42,7 +35,7 @@ func init() { } var cmdIam = &Command{ - UsageLine: "iam [-port=8111] [-filer=] [-master=,]", + UsageLine: "iam [-port=8111] [-filer=] [-masters=,]", Short: "start a iam API compatible server", Long: "start a iam API compatible server.", } @@ -54,10 +47,10 @@ func runIam(cmd *Command, args []string) bool { func (iamopt *IamOptions) startIamServer() bool { filerAddress := pb.ServerAddress(*iamopt.filer) - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") for { - err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) @@ -87,21 +80,23 @@ func (iamopt *IamOptions) startIamServer() bool { glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err) } + httpS := &http.Server{Handler: router} + listenAddress := fmt.Sprintf(":%d", *iamopt.port) iamApiListener, iamApiLocalListener, err := util.NewIpAndLocalListeners(*iamopt.ip, *iamopt.port, time.Duration(10)*time.Second) if err != nil { glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) } - glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", version.Version(), *iamopt.port) + glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) if iamApiLocalListener != nil { go func() { - if err = newHttpServer(router, nil).Serve(iamApiLocalListener); err != nil { + if err = httpS.Serve(iamApiLocalListener); err != nil { glog.Errorf("IAM API Server Fail to serve: %v", err) } }() } - if err = newHttpServer(router, nil).Serve(iamApiListener); err != nil { + if err = httpS.Serve(iamApiListener); err != nil { glog.Fatalf("IAM API Server Fail to serve: %v", err) } diff --git a/weed/command/imports.go b/weed/command/imports.go index d3cefc703..afdbc5a10 100644 --- a/weed/command/imports.go +++ b/weed/command/imports.go @@ -3,35 +3,34 @@ package command import ( _ "net/http/pprof" - _ "github.com/seaweedfs/seaweedfs/weed/remote_storage/azure" - _ "github.com/seaweedfs/seaweedfs/weed/remote_storage/gcs" - _ "github.com/seaweedfs/seaweedfs/weed/remote_storage/s3" + _ "github.com/chrislusf/seaweedfs/weed/remote_storage/azure" + _ "github.com/chrislusf/seaweedfs/weed/remote_storage/gcs" + _ "github.com/chrislusf/seaweedfs/weed/remote_storage/s3" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/azuresink" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/b2sink" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/filersink" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/gcssink" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/localsink" - _ "github.com/seaweedfs/seaweedfs/weed/replication/sink/s3sink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" - _ "github.com/seaweedfs/seaweedfs/weed/filer/arangodb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/cassandra" - _ "github.com/seaweedfs/seaweedfs/weed/filer/elastic/v7" - _ "github.com/seaweedfs/seaweedfs/weed/filer/etcd" - _ "github.com/seaweedfs/seaweedfs/weed/filer/hbase" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb3" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mongodb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mysql" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mysql2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/postgres" - _ "github.com/seaweedfs/seaweedfs/weed/filer/postgres2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis3" - _ "github.com/seaweedfs/seaweedfs/weed/filer/sqlite" - _ "github.com/seaweedfs/seaweedfs/weed/filer/tarantool" - _ "github.com/seaweedfs/seaweedfs/weed/filer/tikv" - _ "github.com/seaweedfs/seaweedfs/weed/filer/ydb" + _ "github.com/chrislusf/seaweedfs/weed/filer/arangodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis3" + _ "github.com/chrislusf/seaweedfs/weed/filer/sqlite" + _ "github.com/chrislusf/seaweedfs/weed/filer/tikv" + _ "github.com/chrislusf/seaweedfs/weed/filer/ydb" ) diff --git a/weed/command/master.go b/weed/command/master.go index 8e10d25a2..ab8466d47 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -1,37 +1,29 @@ package command import ( - "context" - "crypto/tls" "fmt" + "golang.org/x/exp/slices" "net/http" "os" "path" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/util/version" - - hashicorpRaft "github.com/hashicorp/raft" - - "slices" - + "github.com/chrislusf/raft/protobuf" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/gorilla/mux" - "github.com/seaweedfs/raft/protobuf" "github.com/spf13/viper" "google.golang.org/grpc/reflection" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/util/grace" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -39,15 +31,14 @@ var ( ) type MasterOptions struct { - port *int - portGrpc *int - ip *string - ipBind *string - metaFolder *string - peers *string - volumeSizeLimitMB *uint - volumePreallocate *bool - maxParallelVacuumPerServer *int + port *int + portGrpc *int + ip *string + ipBind *string + metaFolder *string + peers *string + volumeSizeLimitMB *uint + volumePreallocate *bool // pulseSeconds *int defaultReplication *string garbageThreshold *float64 @@ -57,13 +48,10 @@ type MasterOptions struct { metricsIntervalSec *int raftResumeState *bool metricsHttpPort *int - metricsHttpIp *string heartbeatInterval *time.Duration electionTimeout *time.Duration raftHashicorp *bool raftBootstrap *bool - telemetryUrl *string - telemetryEnabled *bool } func init() { @@ -76,7 +64,6 @@ func init() { m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095") m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") - m.maxParallelVacuumPerServer = cmdMaster.Flag.Int("maxParallelVacuumPerServer", 1, "maximum number of volumes to vacuum in parallel per volume server") // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "", "Default replication type if not specified.") m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") @@ -85,14 +72,11 @@ func init() { m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address :") m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") m.metricsHttpPort = cmdMaster.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - m.metricsHttpIp = cmdMaster.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server") m.heartbeatInterval = cmdMaster.Flag.Duration("heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)") m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers") m.raftHashicorp = cmdMaster.Flag.Bool("raftHashicorp", false, "use hashicorp raft") m.raftBootstrap = cmdMaster.Flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster") - m.telemetryUrl = cmdMaster.Flag.String("telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics") - m.telemetryEnabled = cmdMaster.Flag.Bool("telemetry", false, "enable telemetry reporting") } var cmdMaster = &Command{ @@ -114,14 +98,9 @@ var ( func runMaster(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - // bind viper configuration to command line flags - if v := util.GetViper().GetString("master.mdir"); v != "" { - *m.metaFolder = v - } - grace.SetupProfiling(*masterCpuProfile, *masterMemProfile) parent, _ := util.FullPath(*m.metaFolder).DirAndName() @@ -132,22 +111,17 @@ func runMaster(cmd *Command, args []string) bool { glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } - masterWhiteList := util.StringSplit(*m.whiteList, ",") + var masterWhiteList []string + if *m.whiteList != "" { + masterWhiteList = strings.Split(*m.whiteList, ",") + } if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") } - switch { - case *m.metricsHttpIp != "": - // noting to do, use m.metricsHttpIp - case *m.ipBind != "": - *m.metricsHttpIp = *m.ipBind - case *m.ip != "": - *m.metricsHttpIp = *m.ip - } - go stats_collect.StartMetricsServer(*m.metricsHttpIp, *m.metricsHttpPort) - go stats_collect.LoopPushingMetric("masterServer", util.JoinHostPort(*m.ip, *m.port), *m.metricsAddress, *m.metricsIntervalSec) + go stats_collect.StartMetricsServer(*m.metricsHttpPort) startMaster(m, masterWhiteList) + return true } @@ -172,8 +146,8 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), masterPeers) listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", version.Version(), listeningAddress) - masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOption.ipBind, *masterOption.port, 0) + glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) + masterListener, masterLocalListner, e := util.NewIpAndLocalListeners(*masterOption.ipBind, *masterOption.port, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } @@ -189,25 +163,24 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { RaftResumeState: *masterOption.raftResumeState, HeartbeatInterval: *masterOption.heartbeatInterval, ElectionTimeout: *masterOption.electionTimeout, - RaftBootstrap: *masterOption.raftBootstrap, + RaftBootstrap: *m.raftBootstrap, } var raftServer *weed_server.RaftServer var err error - if *masterOption.raftHashicorp { + if *m.raftHashicorp { if raftServer, err = weed_server.NewHashicorpRaftServer(raftServerOption); err != nil { glog.Fatalf("NewHashicorpRaftServer: %s", err) } } else { raftServer, err = weed_server.NewRaftServer(raftServerOption) if raftServer == nil { - glog.Fatalf("please verify %s is writable, see https://github.com/seaweedfs/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) + glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) } } ms.SetRaftServer(raftServer) - r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods(http.MethodGet, http.MethodHead) - r.HandleFunc("/cluster/healthz", raftServer.HealthzHandler).Methods(http.MethodGet, http.MethodHead) - if *masterOption.raftHashicorp { - r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods(http.MethodGet) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") + if *m.raftHashicorp { + r.HandleFunc("/raft/stats", raftServer.StatsRaftHandler).Methods("GET") } // starting grpc server grpcPort := *masterOption.portGrpc @@ -217,33 +190,31 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) - if *masterOption.raftHashicorp { + if *m.raftHashicorp { raftServer.TransportManager.Register(grpcS) } else { protobuf.RegisterRaftServer(grpcS, raftServer) } reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", version.Version(), *masterOption.ipBind, grpcPort) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) if grpcLocalL != nil { go grpcS.Serve(grpcLocalL) } go grpcS.Serve(grpcL) timeSleep := 1500 * time.Millisecond - if !*masterOption.raftHashicorp { + if !*m.raftHashicorp { go func() { time.Sleep(timeSleep) - - ms.Topo.RaftServerAccessLock.RLock() - isEmptyMaster := ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() - if isEmptyMaster && isTheFirstOne(myMasterAddress, peers) && ms.MasterClient.FindLeaderFromOtherPeers(myMasterAddress) == "" { - raftServer.DoJoinCommand() + if ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() && isTheFirstOne(myMasterAddress, peers) { + if ms.MasterClient.FindLeaderFromOtherPeers(myMasterAddress) == "" { + raftServer.DoJoinCommand() + } } - ms.Topo.RaftServerAccessLock.RUnlock() }() } - go ms.MasterClient.KeepConnectedToMaster(context.Background()) + go ms.MasterClient.KeepConnectedToMaster() // start http server var ( @@ -265,29 +236,21 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { clientCertFile = viper.GetString("https.master.ca") } - if masterLocalListener != nil { - go newHttpServer(r, nil).Serve(masterLocalListener) + httpS := &http.Server{Handler: r} + if masterLocalListner != nil { + go httpS.Serve(masterLocalListner) } - var tlsConfig *tls.Config if useMTLS { - tlsConfig = security.LoadClientTLSHTTP(clientCertFile) - security.FixTlsConfig(util.GetViper(), tlsConfig) + httpS.TLSConfig = security.LoadClientTLSHTTP(clientCertFile) } if useTLS { - go newHttpServer(r, tlsConfig).ServeTLS(masterListener, certFile, keyFile) + go httpS.ServeTLS(masterListener, certFile, keyFile) } else { - go newHttpServer(r, nil).Serve(masterListener) + go httpS.Serve(masterListener) } - grace.OnInterrupt(ms.Shutdown) - grace.OnInterrupt(grpcS.Stop) - grace.OnReload(func() { - if ms.Topo.HashicorpRaft != nil && ms.Topo.HashicorpRaft.State() == hashicorpRaft.Leader { - ms.Topo.HashicorpRaft.LeadershipTransfer() - } - }) select {} } @@ -314,8 +277,8 @@ func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers strin } func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool { - slices.SortFunc(peers, func(a, b pb.ServerAddress) int { - return strings.Compare(string(a), string(b)) + slices.SortFunc(peers, func(a, b pb.ServerAddress) bool { + return strings.Compare(string(a), string(b)) < 0 }) if len(peers) <= 0 { return true @@ -326,11 +289,10 @@ func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool { func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { masterAddress := pb.NewServerAddress(*m.ip, *m.port, *m.portGrpc) return &weed_server.MasterOption{ - Master: masterAddress, - MetaFolder: *m.metaFolder, - VolumeSizeLimitMB: uint32(*m.volumeSizeLimitMB), - VolumePreallocate: *m.volumePreallocate, - MaxParallelVacuumPerServer: *m.maxParallelVacuumPerServer, + Master: masterAddress, + MetaFolder: *m.metaFolder, + VolumeSizeLimitMB: uint32(*m.volumeSizeLimitMB), + VolumePreallocate: *m.volumePreallocate, // PulseSeconds: *m.pulseSeconds, DefaultReplicaPlacement: *m.defaultReplication, GarbageThreshold: *m.garbageThreshold, @@ -338,7 +300,5 @@ func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOp DisableHttp: *m.disableHttp, MetricsAddress: *m.metricsAddress, MetricsIntervalSec: *m.metricsIntervalSec, - TelemetryUrl: *m.telemetryUrl, - TelemetryEnabled: *m.telemetryEnabled, } } diff --git a/weed/command/master_follower.go b/weed/command/master_follower.go index 55b046092..ec7d2758f 100644 --- a/weed/command/master_follower.go +++ b/weed/command/master_follower.go @@ -3,19 +3,18 @@ package command import ( "context" "fmt" + "net/http" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" "google.golang.org/grpc/reflection" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/version" ) var ( @@ -53,7 +52,7 @@ var cmdMasterFollower = &Command{ In most cases, the master follower is not needed. In big data centers with thousands of volume servers. In theory, the master may have trouble to keep up with the write requests and read requests. - The master follower can relieve the master from read requests, which only needs to + The master follower can relieve the master from from read requests, which only needs to lookup a fileId or volumeId. The master follower currently can handle fileId lookup requests: @@ -69,7 +68,7 @@ var cmdMasterFollower = &Command{ func runMasterFollower(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) if *mf.portGrpc == 0 { @@ -92,7 +91,7 @@ func startMasterFollower(masterOptions MasterOptions) { err = pb.WithOneOfGrpcMasterClients(false, masters, grpcDialOption, func(client master_pb.SeaweedClient) error { resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) if err != nil { - return fmt.Errorf("get master grpc address %v configuration: %w", masters, err) + return fmt.Errorf("get master grpc address %v configuration: %v", masters, err) } masterOptions.defaultReplication = &resp.DefaultReplication masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB)) @@ -120,8 +119,8 @@ func startMasterFollower(masterOptions MasterOptions) { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, option, masters) listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", version.Version(), listeningAddress) - masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOptions.ipBind, *masterOptions.port, 0) + glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) + masterListener, masterLocalListner, e := util.NewIpAndLocalListeners(*masterOptions.ipBind, *masterOptions.port, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } @@ -135,19 +134,20 @@ func startMasterFollower(masterOptions MasterOptions) { grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", version.Version(), *masterOptions.ip, grpcPort) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort) if grpcLocalL != nil { go grpcS.Serve(grpcLocalL) } go grpcS.Serve(grpcL) - go ms.MasterClient.KeepConnectedToMaster(context.Background()) + go ms.MasterClient.KeepConnectedToMaster() // start http server - if masterLocalListener != nil { - go newHttpServer(r, nil).Serve(masterLocalListener) + httpS := &http.Server{Handler: r} + if masterLocalListner != nil { + go httpS.Serve(masterLocalListner) } - go newHttpServer(r, nil).Serve(masterListener) + go httpS.Serve(masterListener) select {} } diff --git a/weed/command/mount.go b/weed/command/mount.go index 98f139c6f..0046ca03d 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -17,10 +17,8 @@ type MountOptions struct { ttlSec *int chunkSizeLimitMB *int concurrentWriters *int - cacheMetaTtlSec *int - cacheDirForRead *string - cacheDirForWrite *string - cacheSizeMBForRead *int64 + cacheDir *string + cacheSizeMB *int64 dataCenter *string allowOthers *bool umaskString *string @@ -33,16 +31,6 @@ type MountOptions struct { debugPort *int localSocket *string disableXAttr *bool - extraOptions []string - fuseCommandPid int - - // RDMA acceleration options - rdmaEnabled *bool - rdmaSidecarAddr *string - rdmaFallback *bool - rdmaReadOnly *bool - rdmaMaxConcurrent *int - rdmaTimeoutMs *int } var ( @@ -64,11 +52,9 @@ func init() { mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") - mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers") - mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") - mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 128, "file chunk read cache capacity in MB") - mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", "", "buffer writes mostly for large files") - mountOptions.cacheMetaTtlSec = cmdMount.Flag.Int("cacheMetaTtlSec", 60, "metadata cache validity seconds") + mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0") + mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") + mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 0, "local file chunk cache capacity in MB") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") @@ -81,15 +67,6 @@ func init() { mountOptions.debugPort = cmdMount.Flag.Int("debug.port", 6061, "http port for debugging") mountOptions.localSocket = cmdMount.Flag.String("localSocket", "", "default to /tmp/seaweedfs-mount-.sock") mountOptions.disableXAttr = cmdMount.Flag.Bool("disableXAttr", false, "disable xattr") - mountOptions.fuseCommandPid = 0 - - // RDMA acceleration flags - mountOptions.rdmaEnabled = cmdMount.Flag.Bool("rdma.enabled", false, "enable RDMA acceleration for reads") - mountOptions.rdmaSidecarAddr = cmdMount.Flag.String("rdma.sidecar", "", "RDMA sidecar address (e.g., localhost:8081)") - mountOptions.rdmaFallback = cmdMount.Flag.Bool("rdma.fallback", true, "fallback to HTTP when RDMA fails") - mountOptions.rdmaReadOnly = cmdMount.Flag.Bool("rdma.readOnly", false, "use RDMA for reads only (writes use HTTP)") - mountOptions.rdmaMaxConcurrent = cmdMount.Flag.Int("rdma.maxConcurrent", 64, "max concurrent RDMA operations") - mountOptions.rdmaTimeoutMs = cmdMount.Flag.Int("rdma.timeoutMs", 5000, "RDMA operation timeout in milliseconds") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") @@ -111,18 +88,5 @@ var cmdMount = &Command{ On OS X, it requires OSXFUSE (https://osxfuse.github.io/). - RDMA Acceleration: - For ultra-fast reads, enable RDMA acceleration with an RDMA sidecar: - weed mount -filer=localhost:8888 -dir=/mnt/seaweedfs \ - -rdma.enabled=true -rdma.sidecar=localhost:8081 - - RDMA Options: - -rdma.enabled=false Enable RDMA acceleration for reads - -rdma.sidecar="" RDMA sidecar address (required if enabled) - -rdma.fallback=true Fallback to HTTP when RDMA fails - -rdma.readOnly=false Use RDMA for reads only (writes use HTTP) - -rdma.maxConcurrent=64 Max concurrent RDMA operations - -rdma.timeoutMs=5000 RDMA operation timeout in milliseconds - `, } diff --git a/weed/command/mount_freebsd.go b/weed/command/mount_freebsd.go deleted file mode 100644 index 05d6a1bc4..000000000 --- a/weed/command/mount_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package command - -func checkMountPointAvailable(dir string) bool { - return true -} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 1d1727519..aebb14e61 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -3,7 +3,6 @@ package command import ( "bufio" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" "io" "os" "strings" @@ -143,9 +142,6 @@ func checkMountPointAvailable(dir string) bool { } if mounted, err := mounted(mountPoint); err != nil || mounted { - if err != nil { - glog.Errorf("check %s: %v", mountPoint, err) - } return false } diff --git a/weed/command/mount_notsupported.go b/weed/command/mount_notsupported.go index 1e5c9f53d..894c8e313 100644 --- a/weed/command/mount_notsupported.go +++ b/weed/command/mount_notsupported.go @@ -1,5 +1,5 @@ -//go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd +//go:build !linux && !darwin +// +build !linux,!darwin package command diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 53b09589d..1aff3c5bb 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -1,11 +1,22 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd +//go:build linux || darwin +// +build linux darwin package command import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/mount" + "github.com/chrislusf/seaweedfs/weed/mount/meta_cache" + "github.com/chrislusf/seaweedfs/weed/mount/unmount" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/mount_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/hanwen/go-fuse/v2/fuse" + "google.golang.org/grpc/reflection" "net" "net/http" "os" @@ -13,25 +24,10 @@ import ( "runtime" "strconv" "strings" - "syscall" "time" - "github.com/seaweedfs/seaweedfs/weed/util/version" - - "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount" - "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" - "github.com/seaweedfs/seaweedfs/weed/mount/unmount" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mount_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "google.golang.org/grpc/reflection" - - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" ) func runMount(cmd *Command, args []string) bool { @@ -64,13 +60,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { // basic checks chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB if chunkSizeLimitMB <= 0 { - fmt.Printf("Please specify a reasonable buffer size.\n") + fmt.Printf("Please specify a reasonable buffer size.") return false } // try to connect to filer filerAddresses := pb.ServerAddresses(*option.filer).ToAddresses() - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") var cipher bool var err error @@ -78,7 +74,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { err = pb.WithOneOfGrpcFilerClients(false, filerAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { - return fmt.Errorf("get filer grpc address %v configuration: %w", filerAddresses, err) + return fmt.Errorf("get filer grpc address %v configuration: %v", filerAddresses, err) } cipher = resp.Cipher return nil @@ -111,7 +107,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { if mountDirHash < 0 { mountDirHash = -mountDirHash } - *option.localSocket = fmt.Sprintf("/tmp/seaweedfs-mount-%d.sock", mountDirHash) + *option.localSocket = fmt.Sprintf("/tmp/seaweefs-mount-%d.sock", mountDirHash) } if err := os.Remove(*option.localSocket); err != nil && !os.IsNotExist(err) { glog.Fatalf("Failed to remove %s, error: %s", *option.localSocket, err.Error()) @@ -161,7 +157,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { // Ensure target mount point availability if isValid := checkMountPointAvailable(dir); !isValid { - glog.Fatalf("Target mount point is not available: %s, please check!", dir) + glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) return true } @@ -170,7 +166,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { // mount fuse fuseMountOptions := &fuse.MountOptions{ AllowOther: *option.allowOthers, - Options: option.extraOptions, + Options: nil, MaxBackground: 128, MaxWrite: 1024 * 1024 * 2, MaxReadAhead: 1024 * 1024 * 2, @@ -186,7 +182,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { DirectMount: true, DirectMountFlags: 0, //SyncRead: false, // set to false to enable the FUSE_CAP_ASYNC_READ capability - EnableAcl: true, + //EnableAcl: true, } if *option.nonempty { fuseMountOptions.Options = append(fuseMountOptions.Options, "nonempty") @@ -205,9 +201,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { ioSizeMB *= 2 } fuseMountOptions.Options = append(fuseMountOptions.Options, "daemon_timeout=600") - if runtime.GOARCH == "amd64" { - fuseMountOptions.Options = append(fuseMountOptions.Options, "noapplexattr") - } + fuseMountOptions.Options = append(fuseMountOptions.Options, "noapplexattr") // fuseMountOptions.Options = append(fuseMountOptions.Options, "novncache") // need to test effectiveness fuseMountOptions.Options = append(fuseMountOptions.Options, "slow_statfs") fuseMountOptions.Options = append(fuseMountOptions.Options, "volname="+serverFriendlyName) @@ -220,11 +214,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { mountRoot = mountRoot[0 : len(mountRoot)-1] } - cacheDirForWrite := *option.cacheDirForWrite - if cacheDirForWrite == "" { - cacheDirForWrite = *option.cacheDirForRead - } - seaweedFileSystem := mount.NewSeaweedFileSystem(&mount.Option{ MountDirectory: dir, FilerAddresses: filerAddresses, @@ -236,10 +225,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { DiskType: types.ToDiskType(*option.diskType), ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, ConcurrentWriters: *option.concurrentWriters, - CacheDirForRead: *option.cacheDirForRead, - CacheSizeMBForRead: *option.cacheSizeMBForRead, - CacheDirForWrite: cacheDirForWrite, - CacheMetaTTlSec: *option.cacheMetaTtlSec, + CacheDir: *option.cacheDir, + CacheSizeMB: *option.cacheSizeMB, DataCenter: *option.dataCenter, Quota: int64(*option.collectionQuota) * 1024 * 1024, MountUid: uid, @@ -252,24 +239,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { Cipher: cipher, UidGidMapper: uidGidMapper, DisableXAttr: *option.disableXAttr, - IsMacOs: runtime.GOOS == "darwin", - // RDMA acceleration options - RdmaEnabled: *option.rdmaEnabled, - RdmaSidecarAddr: *option.rdmaSidecarAddr, - RdmaFallback: *option.rdmaFallback, - RdmaReadOnly: *option.rdmaReadOnly, - RdmaMaxConcurrent: *option.rdmaMaxConcurrent, - RdmaTimeoutMs: *option.rdmaTimeoutMs, }) - // create mount root - mountRootPath := util.FullPath(mountRoot) - mountRootParent, mountDir := mountRootPath.DirAndName() - if err = filer_pb.Mkdir(context.Background(), seaweedFileSystem, mountRootParent, mountDir, nil); err != nil { - fmt.Printf("failed to create dir %s on filer %s: %v\n", mountRoot, filerAddresses, err) - return false - } - server, err := fuse.NewServer(seaweedFileSystem, dir, fuseMountOptions) if err != nil { glog.Fatalf("Mount fail: %v", err) @@ -278,32 +249,16 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { unmount.Unmount(dir) }) - if mountOptions.fuseCommandPid != 0 { - // send a signal to the parent process to notify that the mount is ready - err = syscall.Kill(mountOptions.fuseCommandPid, syscall.SIGTERM) - if err != nil { - fmt.Printf("failed to notify parent process: %v\n", err) - return false - } - } - grpcS := pb.NewGrpcServer() mount_pb.RegisterSeaweedMountServer(grpcS, seaweedFileSystem) reflection.Register(grpcS) go grpcS.Serve(montSocketListener) - err = seaweedFileSystem.StartBackgroundTasks() - if err != nil { - fmt.Printf("failed to start background tasks: %v\n", err) - return false - } + seaweedFileSystem.StartBackgroundTasks() - glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir) - glog.V(0).Infof("This is SeaweedFS version %s %s %s", version.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) server.Serve() - seaweedFileSystem.ClearCacheDir() - return true } diff --git a/weed/command/mq_agent.go b/weed/command/mq_agent.go deleted file mode 100644 index d9c6170a3..000000000 --- a/weed/command/mq_agent.go +++ /dev/null @@ -1,91 +0,0 @@ -package command - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/agent" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "google.golang.org/grpc/reflection" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - mqAgentOptions MessageQueueAgentOptions -) - -type MessageQueueAgentOptions struct { - brokers []pb.ServerAddress - brokersString *string - filerGroup *string - ip *string - port *int -} - -func init() { - cmdMqAgent.Run = runMqAgent // break init cycle - mqAgentOptions.brokersString = cmdMqAgent.Flag.String("broker", "localhost:17777", "comma-separated message queue brokers") - mqAgentOptions.ip = cmdMqAgent.Flag.String("ip", "", "message queue agent host address") - mqAgentOptions.port = cmdMqAgent.Flag.Int("port", 16777, "message queue agent gRPC server port") -} - -var cmdMqAgent = &Command{ - UsageLine: "mq.agent [-port=16777] [-broker=]", - Short: " start a message queue agent", - Long: `start a message queue agent - - The agent runs on local server to accept gRPC calls to write or read messages. - The messages are sent to message queue brokers. - -`, -} - -func runMqAgent(cmd *Command, args []string) bool { - - util.LoadSecurityConfiguration() - - mqAgentOptions.brokers = pb.ServerAddresses(*mqAgentOptions.brokersString).ToAddresses() - - return mqAgentOptions.startQueueAgent() - -} - -func (mqAgentOpt *MessageQueueAgentOptions) startQueueAgent() bool { - - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_agent") - - agentServer := agent.NewMessageQueueAgent(&agent.MessageQueueAgentOptions{ - SeedBrokers: mqAgentOpt.brokers, - }, grpcDialOption) - - // start grpc listener - grpcL, localL, err := util.NewIpAndLocalListeners(*mqAgentOpt.ip, *mqAgentOpt.port, 0) - if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", *mqAgentOpt.port, err) - } - - // Create main gRPC server - grpcS := pb.NewGrpcServer() - mq_agent_pb.RegisterSeaweedMessagingAgentServer(grpcS, agentServer) - reflection.Register(grpcS) - - // Start localhost listener if available - if localL != nil { - localGrpcS := pb.NewGrpcServer() - mq_agent_pb.RegisterSeaweedMessagingAgentServer(localGrpcS, agentServer) - reflection.Register(localGrpcS) - go func() { - glog.V(0).Infof("MQ Agent listening on localhost:%d", *mqAgentOpt.port) - if err := localGrpcS.Serve(localL); err != nil { - glog.Errorf("MQ Agent localhost listener error: %v", err) - } - }() - } - - glog.Infof("Start Seaweed Message Queue Agent on %s:%d", *mqAgentOpt.ip, *mqAgentOpt.port) - grpcS.Serve(grpcL) - - return true - -} diff --git a/weed/command/mq_broker.go b/weed/command/mq_broker.go deleted file mode 100644 index 8ea7f96a4..000000000 --- a/weed/command/mq_broker.go +++ /dev/null @@ -1,135 +0,0 @@ -package command - -import ( - "fmt" - "net/http" - _ "net/http/pprof" - - "google.golang.org/grpc/reflection" - - "github.com/seaweedfs/seaweedfs/weed/util/grace" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/broker" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - mqBrokerStandaloneOptions MessageQueueBrokerOptions -) - -type MessageQueueBrokerOptions struct { - masters map[string]pb.ServerAddress - mastersString *string - filerGroup *string - ip *string - port *int - pprofPort *int - dataCenter *string - rack *string - cpuprofile *string - memprofile *string - logFlushInterval *int -} - -func init() { - cmdMqBroker.Run = runMqBroker // break init cycle - mqBrokerStandaloneOptions.mastersString = cmdMqBroker.Flag.String("master", "localhost:9333", "comma-separated master servers") - mqBrokerStandaloneOptions.filerGroup = cmdMqBroker.Flag.String("filerGroup", "", "share metadata with other filers in the same filerGroup") - mqBrokerStandaloneOptions.ip = cmdMqBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address") - mqBrokerStandaloneOptions.port = cmdMqBroker.Flag.Int("port", 17777, "broker gRPC listen port") - mqBrokerStandaloneOptions.pprofPort = cmdMqBroker.Flag.Int("port.pprof", 0, "HTTP profiling port (0 to disable)") - mqBrokerStandaloneOptions.dataCenter = cmdMqBroker.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") - mqBrokerStandaloneOptions.rack = cmdMqBroker.Flag.String("rack", "", "prefer to write to volumes in this rack") - mqBrokerStandaloneOptions.cpuprofile = cmdMqBroker.Flag.String("cpuprofile", "", "cpu profile output file") - mqBrokerStandaloneOptions.memprofile = cmdMqBroker.Flag.String("memprofile", "", "memory profile output file") - mqBrokerStandaloneOptions.logFlushInterval = cmdMqBroker.Flag.Int("logFlushInterval", 5, "log buffer flush interval in seconds") -} - -var cmdMqBroker = &Command{ - UsageLine: "mq.broker [-port=17777] [-master=]", - Short: " start a message queue broker", - Long: `start a message queue broker - - The broker can accept gRPC calls to write or read messages. The messages are stored via filer. - The brokers are stateless. To scale up, just add more brokers. - -`, -} - -func runMqBroker(cmd *Command, args []string) bool { - - util.LoadSecurityConfiguration() - - mqBrokerStandaloneOptions.masters = pb.ServerAddresses(*mqBrokerStandaloneOptions.mastersString).ToAddressMap() - - return mqBrokerStandaloneOptions.startQueueServer() - -} - -func (mqBrokerOpt *MessageQueueBrokerOptions) startQueueServer() bool { - - grace.SetupProfiling(*mqBrokerStandaloneOptions.cpuprofile, *mqBrokerStandaloneOptions.memprofile) - - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker") - - qs, err := broker.NewMessageBroker(&broker.MessageQueueBrokerOption{ - Masters: mqBrokerOpt.masters, - FilerGroup: *mqBrokerOpt.filerGroup, - DataCenter: *mqBrokerOpt.dataCenter, - Rack: *mqBrokerOpt.rack, - DefaultReplication: "", - MaxMB: 0, - Ip: *mqBrokerOpt.ip, - Port: *mqBrokerOpt.port, - LogFlushInterval: *mqBrokerOpt.logFlushInterval, - }, grpcDialOption) - if err != nil { - glog.Fatalf("failed to create new message broker for queue server: %v", err) - } - - // start grpc listener - grpcL, localL, err := util.NewIpAndLocalListeners("", *mqBrokerOpt.port, 0) - if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", *mqBrokerOpt.port, err) - } - - // Create main gRPC server - grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) - mq_pb.RegisterSeaweedMessagingServer(grpcS, qs) - reflection.Register(grpcS) - - // Start localhost listener if available - if localL != nil { - localGrpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) - mq_pb.RegisterSeaweedMessagingServer(localGrpcS, qs) - reflection.Register(localGrpcS) - go func() { - glog.V(0).Infof("MQ Broker listening on localhost:%d", *mqBrokerOpt.port) - if err := localGrpcS.Serve(localL); err != nil { - glog.Errorf("MQ Broker localhost listener error: %v", err) - } - }() - } - - // Start HTTP profiling server if enabled - if mqBrokerOpt.pprofPort != nil && *mqBrokerOpt.pprofPort > 0 { - go func() { - pprofAddr := fmt.Sprintf(":%d", *mqBrokerOpt.pprofPort) - glog.V(0).Infof("MQ Broker pprof server listening on %s", pprofAddr) - glog.V(0).Infof("Access profiling at: http://localhost:%d/debug/pprof/", *mqBrokerOpt.pprofPort) - if err := http.ListenAndServe(pprofAddr, nil); err != nil { - glog.Errorf("pprof server error: %v", err) - } - }() - } - - glog.V(0).Infof("MQ Broker listening on %s:%d", *mqBrokerOpt.ip, *mqBrokerOpt.port) - grpcS.Serve(grpcL) - - return true - -} diff --git a/weed/command/mq_kafka_gateway.go b/weed/command/mq_kafka_gateway.go deleted file mode 100644 index 614f03e9c..000000000 --- a/weed/command/mq_kafka_gateway.go +++ /dev/null @@ -1,143 +0,0 @@ -package command - -import ( - "fmt" - "net/http" - _ "net/http/pprof" - "os" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/gateway" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - mqKafkaGatewayOptions mqKafkaGatewayOpts -) - -type mqKafkaGatewayOpts struct { - ip *string - ipBind *string - port *int - pprofPort *int - master *string - filerGroup *string - schemaRegistryURL *string - defaultPartitions *int -} - -func init() { - cmdMqKafkaGateway.Run = runMqKafkaGateway - mqKafkaGatewayOptions.ip = cmdMqKafkaGateway.Flag.String("ip", util.DetectedHostAddress(), "Kafka gateway advertised host address") - mqKafkaGatewayOptions.ipBind = cmdMqKafkaGateway.Flag.String("ip.bind", "", "Kafka gateway bind address (default: same as -ip)") - mqKafkaGatewayOptions.port = cmdMqKafkaGateway.Flag.Int("port", 9092, "Kafka gateway listen port") - mqKafkaGatewayOptions.pprofPort = cmdMqKafkaGateway.Flag.Int("port.pprof", 0, "HTTP profiling port (0 to disable)") - mqKafkaGatewayOptions.master = cmdMqKafkaGateway.Flag.String("master", "localhost:9333", "comma-separated SeaweedFS master servers") - mqKafkaGatewayOptions.filerGroup = cmdMqKafkaGateway.Flag.String("filerGroup", "", "filer group name") - mqKafkaGatewayOptions.schemaRegistryURL = cmdMqKafkaGateway.Flag.String("schema-registry-url", "", "Schema Registry URL (required for schema management)") - mqKafkaGatewayOptions.defaultPartitions = cmdMqKafkaGateway.Flag.Int("default-partitions", 4, "Default number of partitions for auto-created topics") -} - -var cmdMqKafkaGateway = &Command{ - UsageLine: "mq.kafka.gateway [-ip=] [-ip.bind=] [-port=9092] [-master=] [-filerGroup=] [-default-partitions=4] -schema-registry-url=", - Short: "start a Kafka wire-protocol gateway for SeaweedMQ with schema management", - Long: `Start a Kafka wire-protocol gateway translating Kafka client requests to SeaweedMQ. - -Connects to SeaweedFS master servers to discover available brokers and integrates with -Schema Registry for schema-aware topic management. - -Options: - -ip Advertised host address that clients should connect to (default: auto-detected) - -ip.bind Bind address for the gateway to listen on (default: same as -ip) - Use 0.0.0.0 to bind to all interfaces while advertising specific IP - -port Listen port (default: 9092) - -default-partitions Default number of partitions for auto-created topics (default: 4) - -schema-registry-url Schema Registry URL (REQUIRED for schema management) - -Examples: - weed mq.kafka.gateway -port=9092 -master=localhost:9333 -schema-registry-url=http://localhost:8081 - weed mq.kafka.gateway -ip=gateway1 -port=9092 -master=master1:9333,master2:9333 -schema-registry-url=http://schema-registry:8081 - weed mq.kafka.gateway -ip=external.host.com -ip.bind=0.0.0.0 -master=localhost:9333 -schema-registry-url=http://schema-registry:8081 - -This is experimental and currently supports a minimal subset for development. -`, -} - -func runMqKafkaGateway(cmd *Command, args []string) bool { - // Validate required options - if *mqKafkaGatewayOptions.master == "" { - glog.Fatalf("SeaweedFS master address is required (-master)") - return false - } - - // Schema Registry URL is required for schema management - if *mqKafkaGatewayOptions.schemaRegistryURL == "" { - glog.Fatalf("Schema Registry URL is required (-schema-registry-url)") - return false - } - - // Determine bind address - default to advertised IP if not specified - bindIP := *mqKafkaGatewayOptions.ipBind - if bindIP == "" { - bindIP = *mqKafkaGatewayOptions.ip - } - - // Construct listen address from bind IP and port - listenAddr := fmt.Sprintf("%s:%d", bindIP, *mqKafkaGatewayOptions.port) - - // Set advertised host for Kafka protocol handler - if err := os.Setenv("KAFKA_ADVERTISED_HOST", *mqKafkaGatewayOptions.ip); err != nil { - glog.Warningf("Failed to set KAFKA_ADVERTISED_HOST environment variable: %v", err) - } - - srv := gateway.NewServer(gateway.Options{ - Listen: listenAddr, - Masters: *mqKafkaGatewayOptions.master, - FilerGroup: *mqKafkaGatewayOptions.filerGroup, - SchemaRegistryURL: *mqKafkaGatewayOptions.schemaRegistryURL, - DefaultPartitions: int32(*mqKafkaGatewayOptions.defaultPartitions), - }) - - glog.Warningf("EXPERIMENTAL FEATURE: MQ Kafka Gateway is experimental and should NOT be used in production environments. It currently supports only a minimal subset of Kafka protocol for development purposes.") - - // Show bind vs advertised addresses for clarity - if bindIP != *mqKafkaGatewayOptions.ip { - glog.V(0).Infof("Starting MQ Kafka Gateway: binding to %s, advertising %s:%d to clients", - listenAddr, *mqKafkaGatewayOptions.ip, *mqKafkaGatewayOptions.port) - } else { - glog.V(0).Infof("Starting MQ Kafka Gateway on %s", listenAddr) - } - glog.V(0).Infof("Using SeaweedMQ brokers from masters: %s", *mqKafkaGatewayOptions.master) - - // Start HTTP profiling server if enabled - if *mqKafkaGatewayOptions.pprofPort > 0 { - go func() { - pprofAddr := fmt.Sprintf(":%d", *mqKafkaGatewayOptions.pprofPort) - glog.V(0).Infof("Kafka Gateway pprof server listening on %s", pprofAddr) - glog.V(0).Infof("Access profiling at: http://localhost:%d/debug/pprof/", *mqKafkaGatewayOptions.pprofPort) - if err := http.ListenAndServe(pprofAddr, nil); err != nil { - glog.Errorf("pprof server error: %v", err) - } - }() - } - - if err := srv.Start(); err != nil { - glog.Fatalf("mq kafka gateway start: %v", err) - return false - } - - // Set up graceful shutdown - defer func() { - glog.V(0).Infof("Shutting down MQ Kafka Gateway...") - if err := srv.Close(); err != nil { - glog.Errorf("mq kafka gateway close: %v", err) - } - }() - - // Serve blocks until closed - if err := srv.Wait(); err != nil { - glog.Errorf("mq kafka gateway wait: %v", err) - return false - } - return true -} diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..3274f599b --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,109 @@ +package command + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions MessageBrokerOptions +) + +type MessageBrokerOptions struct { + filer *string + ip *string + port *int + cpuprofile *string + memprofile *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port") + messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file") + messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msgBroker [-port=17777] [-filer=]", + Short: "start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool { + + grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile) + + filerAddress := pb.ServerAddress(*msgBrokerOpt.filer) + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker") + cipher := false + + for { + err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress()) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerAddress.ToGrpcAddress()) + break + } + } + + qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{ + Filers: []pb.ServerAddress{filerAddress}, + DefaultReplication: "", + MaxMB: 0, + Ip: *msgBrokerOpt.ip, + Port: *msgBrokerOpt.port, + Cipher: cipher, + }, grpcDialOption) + + // start grpc listener + grpcL, _, err := util.NewIpAndLocalListeners("", *msgBrokerOpt.port, 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/s3.go b/weed/command/s3.go index fa575b3db..42e447d90 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -2,31 +2,23 @@ package command import ( "context" - "crypto/tls" - "crypto/x509" "fmt" - "io/ioutil" - "net" - "os" - "runtime" - "strings" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "google.golang.org/grpc/reflection" + "net/http" "time" - "github.com/gorilla/mux" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/credentials/tls/certprovider/pemfile" - "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/security" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -37,26 +29,16 @@ type S3Options struct { filer *string bindIp *string port *int - portHttps *int portGrpc *int config *string - iamConfig *string domainName *string - allowedOrigins *string tlsPrivateKey *string tlsCertificate *string - tlsCACertificate *string - tlsVerifyClientCert *bool metricsHttpPort *int - metricsHttpIp *string allowEmptyFolder *bool allowDeleteBucketNotEmpty *bool auditLogConfig *string localFilerSocket *string - dataCenter *string - localSocket *string - certProvider certprovider.Provider - idleTimeout *int } func init() { @@ -64,25 +46,15 @@ func init() { s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") s3StandaloneOptions.bindIp = cmdS3.Flag.String("ip.bind", "", "ip address to bind to. Default to localhost.") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") - s3StandaloneOptions.portHttps = cmdS3.Flag.Int("port.https", 0, "s3 server https listen port") s3StandaloneOptions.portGrpc = cmdS3.Flag.Int("port.grpc", 0, "s3 server grpc listen port") s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") - s3StandaloneOptions.allowedOrigins = cmdS3.Flag.String("allowedOrigins", "*", "comma separated list of allowed origins") - s3StandaloneOptions.dataCenter = cmdS3.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") - s3StandaloneOptions.iamConfig = cmdS3.Flag.String("iam.config", "", "path to the advanced IAM config file") s3StandaloneOptions.auditLogConfig = cmdS3.Flag.String("auditLogConfig", "", "path to the audit log config file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") - s3StandaloneOptions.tlsCACertificate = cmdS3.Flag.String("cacert.file", "", "path to the TLS CA certificate file") - s3StandaloneOptions.tlsVerifyClientCert = cmdS3.Flag.Bool("tlsVerifyClientCert", false, "whether to verify the client's certificate") s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - s3StandaloneOptions.metricsHttpIp = cmdS3.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", true, "allow empty folders") s3StandaloneOptions.allowDeleteBucketNotEmpty = cmdS3.Flag.Bool("allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket") - s3StandaloneOptions.localFilerSocket = cmdS3.Flag.String("localFilerSocket", "", "local filer socket path") - s3StandaloneOptions.localSocket = cmdS3.Flag.String("localSocket", "", "default to /tmp/seaweedfs-s3-.sock") - s3StandaloneOptions.idleTimeout = cmdS3.Flag.Int("idleTimeout", 10, "connection idle seconds") } var cmdS3 = &Command{ @@ -162,48 +134,24 @@ var cmdS3 = &Command{ ] } - Alternatively, you can use environment variables as fallback admin credentials: - - AWS_ACCESS_KEY_ID=your_access_key AWS_SECRET_ACCESS_KEY=your_secret_key weed s3 - - Environment variables are only used when no S3 configuration file is provided - and no configuration is available from the filer. This provides a simple way - to get started without requiring configuration files. - `, } func runS3(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) - switch { - case *s3StandaloneOptions.metricsHttpIp != "": - // noting to do, use s3StandaloneOptions.metricsHttpIp - case *s3StandaloneOptions.bindIp != "": - *s3StandaloneOptions.metricsHttpIp = *s3StandaloneOptions.bindIp - } - go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpIp, *s3StandaloneOptions.metricsHttpPort) + go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort) return s3StandaloneOptions.startS3Server() } -// GetCertificateWithUpdate Auto refreshing TSL certificate -func (s3opt *S3Options) GetCertificateWithUpdate(*tls.ClientHelloInfo) (*tls.Certificate, error) { - certs, err := s3opt.certProvider.KeyMaterial(context.Background()) - if certs == nil { - return nil, err - } - return &certs.Certs[0], err -} - func (s3opt *S3Options) startS3Server() bool { filerAddress := pb.ServerAddress(*s3opt.filer) filerBucketsPath := "/buckets" - filerGroup := "" grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") @@ -212,13 +160,12 @@ func (s3opt *S3Options) startS3Server() bool { var metricsIntervalSec int for { - err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) } filerBucketsPath = resp.DirBuckets - filerGroup = resp.FilerGroup metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) return nil @@ -235,41 +182,24 @@ func (s3opt *S3Options) startS3Server() bool { go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec) router := mux.NewRouter().SkipClean(true) - var localFilerSocket string - if s3opt.localFilerSocket != nil { - localFilerSocket = *s3opt.localFilerSocket - } - var s3ApiServer *s3api.S3ApiServer - var s3ApiServer_err error - // Create S3 server with optional advanced IAM integration - var iamConfigPath string - if s3opt.iamConfig != nil && *s3opt.iamConfig != "" { - iamConfigPath = *s3opt.iamConfig - glog.V(0).Infof("Starting S3 API Server with advanced IAM integration") - } else { - glog.V(0).Infof("Starting S3 API Server with standard IAM") - } - - s3ApiServer, s3ApiServer_err = s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ + s3ApiServer, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ Filer: filerAddress, Port: *s3opt.port, Config: *s3opt.config, DomainName: *s3opt.domainName, - AllowedOrigins: strings.Split(*s3opt.allowedOrigins, ","), BucketsPath: filerBucketsPath, GrpcDialOption: grpcDialOption, AllowEmptyFolder: *s3opt.allowEmptyFolder, AllowDeleteBucketNotEmpty: *s3opt.allowDeleteBucketNotEmpty, - LocalFilerSocket: localFilerSocket, - DataCenter: *s3opt.dataCenter, - FilerGroup: filerGroup, - IamConfig: iamConfigPath, // Advanced IAM config (optional) + LocalFilerSocket: s3opt.localFilerSocket, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) } + httpS := &http.Server{Handler: router} + if *s3opt.portGrpc == 0 { *s3opt.portGrpc = 10000 + *s3opt.port } @@ -277,27 +207,8 @@ func (s3opt *S3Options) startS3Server() bool { *s3opt.bindIp = "localhost" } - if runtime.GOOS != "windows" { - localSocket := *s3opt.localSocket - if localSocket == "" { - localSocket = fmt.Sprintf("/tmp/seaweedfs-s3-%d.sock", *s3opt.port) - } - if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) - } - go func() { - // start on local unix socket - s3SocketListener, err := net.Listen("unix", localSocket) - if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) - } - newHttpServer(router, nil).Serve(s3SocketListener) - }() - } - listenAddress := fmt.Sprintf("%s:%d", *s3opt.bindIp, *s3opt.port) - s3ApiListener, s3ApiLocalListener, err := util.NewIpAndLocalListeners( - *s3opt.bindIp, *s3opt.port, time.Duration(*s3opt.idleTimeout)*time.Second) + s3ApiListener, s3ApiLocalListner, err := util.NewIpAndLocalListeners(*s3opt.bindIp, *s3opt.port, time.Duration(10)*time.Second) if err != nil { glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) } @@ -324,79 +235,27 @@ func (s3opt *S3Options) startS3Server() bool { go grpcS.Serve(grpcL) if *s3opt.tlsPrivateKey != "" { - pemfileOptions := pemfile.Options{ - CertFile: *s3opt.tlsCertificate, - KeyFile: *s3opt.tlsPrivateKey, - RefreshDuration: security.CredRefreshingInterval, - } - if s3opt.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil { - glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) - } - - caCertPool := x509.NewCertPool() - if *s3opt.tlsCACertificate != "" { - // load CA certificate file and add it to list of client CAs - caCertFile, err := ioutil.ReadFile(*s3opt.tlsCACertificate) - if err != nil { - glog.Fatalf("error reading CA certificate: %v", err) - } - caCertPool.AppendCertsFromPEM(caCertFile) - } - - clientAuth := tls.NoClientCert - if *s3opt.tlsVerifyClientCert { - clientAuth = tls.RequireAndVerifyClientCert - } - - tlsConfig := &tls.Config{ - GetCertificate: s3opt.GetCertificateWithUpdate, - ClientAuth: clientAuth, - ClientCAs: caCertPool, - } - err = security.FixTlsConfig(util.GetViper(), tlsConfig) - if err != nil { - glog.Fatalf("error with tls config: %v", err) - } - if *s3opt.portHttps == 0 { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", version.Version(), *s3opt.port) - if s3ApiLocalListener != nil { - go func() { - if err = newHttpServer(router, tlsConfig).ServeTLS(s3ApiLocalListener, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) - } - }() - } - if err = newHttpServer(router, tlsConfig).ServeTLS(s3ApiListener, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) - } - } else { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", version.Version(), *s3opt.portHttps) - s3ApiListenerHttps, s3ApiLocalListenerHttps, _ := util.NewIpAndLocalListeners( - *s3opt.bindIp, *s3opt.portHttps, time.Duration(*s3opt.idleTimeout)*time.Second) - if s3ApiLocalListenerHttps != nil { - go func() { - if err = newHttpServer(router, tlsConfig).ServeTLS(s3ApiLocalListenerHttps, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) - } - }() - } + glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) + if s3ApiLocalListner != nil { go func() { - if err = newHttpServer(router, tlsConfig).ServeTLS(s3ApiListenerHttps, "", ""); err != nil { + if err = httpS.ServeTLS(s3ApiLocalListner, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } }() } - } - if *s3opt.tlsPrivateKey == "" || *s3opt.portHttps > 0 { - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", version.Version(), *s3opt.port) - if s3ApiLocalListener != nil { + if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { + glog.Fatalf("S3 API Server Fail to serve: %v", err) + } + } else { + glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) + if s3ApiLocalListner != nil { go func() { - if err = newHttpServer(router, nil).Serve(s3ApiLocalListener); err != nil { + if err = httpS.Serve(s3ApiLocalListner); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } }() } - if err = newHttpServer(router, nil).Serve(s3ApiListener); err != nil { + if err = httpS.Serve(s3ApiListener); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } } diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 26de2e1fd..fb81f9966 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -2,11 +2,10 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "path/filepath" - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/command/scaffold" + "github.com/chrislusf/seaweedfs/weed/command/scaffold" ) func init() { @@ -14,24 +13,24 @@ func init() { } var cmdScaffold = &Command{ - UsageLine: "scaffold -config=[filer|notification|replication|security|master|shell|credential]", + UsageLine: "scaffold -config=[filer|notification|replication|security|master]", Short: "generate basic configuration files", - Long: `Generate configuration files with all possible configurations for you to customize. + Long: `Generate filer.toml with all possible configurations for you to customize. The options can also be overwritten by environment variables. For example, the filer.toml mysql password can be overwritten by environment variable export WEED_MYSQL_PASSWORD=some_password Environment variable rules: - * Prefix the variable name with "WEED_". - * Uppercase the rest of the variable name. - * Replace '.' with '_'. + * Prefix the variable name with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' `, } var ( outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory") - config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master|shell|credential] the configuration file to generate") + config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate") ) func runScaffold(cmd *Command, args []string) bool { @@ -50,8 +49,6 @@ func runScaffold(cmd *Command, args []string) bool { content = scaffold.Master case "shell": content = scaffold.Shell - case "credential": - content = scaffold.Credential } if content == "" { println("need a valid -config option") diff --git a/weed/command/scaffold/credential.toml b/weed/command/scaffold/credential.toml deleted file mode 100644 index d217786d6..000000000 --- a/weed/command/scaffold/credential.toml +++ /dev/null @@ -1,47 +0,0 @@ -# Put this file to one of the location, with descending priority -# ./credential.toml -# $HOME/.seaweedfs/credential.toml -# /etc/seaweedfs/credential.toml -# this file is read by S3 API and IAM API servers - -# Choose one of the credential stores below -# Only one store can be enabled at a time - -# Filer-based credential store (default, uses existing filer storage) -[credential.filer_etc] -enabled = true -# filer address and grpc_dial_option will be automatically configured by the server - - -# PostgreSQL credential store (recommended for multi-node deployments) -[credential.postgres] -enabled = false -hostname = "localhost" -port = 5432 -username = "seaweedfs" -password = "your_password" -database = "seaweedfs" -schema = "public" -sslmode = "disable" -# Optional: table name prefix (default: "sw_") -table_prefix = "sw_" -# Connection pool settings -connection_max_idle = 10 -connection_max_open = 100 -connection_max_lifetime_seconds = 3600 - -# Memory credential store (for testing only, data is lost on restart) -[credential.memory] -enabled = false - -# Environment variable overrides: -# Any configuration value can be overridden by environment variables -# Rules: -# * Prefix with "WEED_CREDENTIAL_" -# * Convert to uppercase -# * Replace '.' with '_' -# -# Examples: -# export WEED_CREDENTIAL_POSTGRES_PASSWORD=secret -# export WEED_CREDENTIAL_POSTGRES_HOSTNAME=db.example.com -# export WEED_CREDENTIAL_FILER_ETC_ENABLED=true diff --git a/weed/command/scaffold/example.go b/weed/command/scaffold/example.go index 26d0a306c..6be6804e5 100644 --- a/weed/command/scaffold/example.go +++ b/weed/command/scaffold/example.go @@ -19,6 +19,3 @@ var Master string //go:embed shell.toml var Shell string - -//go:embed credential.toml -var Credential string diff --git a/weed/command/scaffold/filer.toml b/weed/command/scaffold/filer.toml index 080d8f78b..860d8b291 100644 --- a/weed/command/scaffold/filer.toml +++ b/weed/command/scaffold/filer.toml @@ -12,7 +12,6 @@ # with http DELETE, by default the filer would check whether a folder is empty. # recursive_delete will delete all sub folders and files, similar to "rm -Rf" recursive_delete = false -#max_file_name_length = 255 #################################################### # The following are filer store options @@ -42,22 +41,15 @@ enabled = false dbFile = "./filer.db" # sqlite db file [mysql] # or memsql, tidb -# CREATE TABLE IF NOT EXISTS `filemeta` ( -# `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field', -# `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name', -# `directory` TEXT NOT NULL COMMENT 'full path to parent directory', -# `meta` LONGBLOB, -# PRIMARY KEY (`dirhash`, `name`) -# ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +# CREATE TABLE IF NOT EXISTS filemeta ( +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) BINARY COMMENT 'directory or file name', +# directory TEXT BINARY COMMENT 'full path to parent directory', +# meta LONGBLOB, +# PRIMARY KEY (dirhash, name) +# ) DEFAULT CHARSET=utf8; enabled = false -# dsn will take priority over "hostname, port, username, password, database". -# [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -dsn = "root@tcp(localhost:3306)/seaweedfs?collation=utf8mb4_bin" -enable_tls = false -ca_crt = "" # ca.crt dir when enable_tls set true -client_crt = "" # mysql client.crt dir when enable_tls set true -client_key = "" # mysql client.key dir when enable_tls set true hostname = "localhost" port = 3306 username = "root" @@ -69,18 +61,18 @@ connection_max_lifetime_seconds = 0 interpolateParams = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`""" +upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" [mysql2] # or memsql, tidb enabled = false createTable = """ CREATE TABLE IF NOT EXISTS `%s` ( - `dirhash` BIGINT NOT NULL, - `name` VARCHAR(766) NOT NULL, - `directory` TEXT NOT NULL, - `meta` LONGBLOB, - PRIMARY KEY (`dirhash`, `name`) - ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + dirhash BIGINT, + name VARCHAR(1000) BINARY, + directory TEXT BINARY, + meta LONGBLOB, + PRIMARY KEY (dirhash, name) + ) DEFAULT CHARSET=utf8; """ hostname = "localhost" port = 3306 @@ -93,7 +85,7 @@ connection_max_lifetime_seconds = 0 interpolateParams = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`""" +upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" [postgres] # or cockroachdb, YugabyteDB # CREATE TABLE IF NOT EXISTS filemeta ( @@ -111,26 +103,12 @@ password = "" database = "postgres" # create or use an existing database schema = "" sslmode = "disable" -# SSL certificate options for secure connections -# For sslmode=verify-full, uncomment and configure the following: -# sslcert = "/path/to/client.crt" # client certificate file -# sslkey = "/path/to/client.key" # client private key file -# sslrootcert = "/path/to/ca.crt" # CA certificate file -# sslcrl = "/path/to/client.crl" # Certificate Revocation List (CRL) (optional) connection_max_idle = 100 connection_max_open = 100 connection_max_lifetime_seconds = 0 -# Set to true when using PgBouncer connection pooler -pgbouncer_compatible = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """ - INSERT INTO "%[1]s" (dirhash, name, directory, meta) - VALUES($1, $2, $3, $4) - ON CONFLICT (dirhash, name) DO UPDATE SET - directory=EXCLUDED.directory, - meta=EXCLUDED.meta -""" +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" [postgres2] enabled = false @@ -150,34 +128,19 @@ password = "" database = "postgres" # create or use an existing database schema = "" sslmode = "disable" -# SSL certificate options for secure connections -# For sslmode=verify-full, uncomment and configure the following: -# sslcert = "/path/to/client.crt" # client certificate file -# sslkey = "/path/to/client.key" # client private key file -# sslrootcert = "/path/to/ca.crt" # CA certificate file -# sslcrl = "/path/to/client.crl" # Certificate Revocation List (CRL) (optional) connection_max_idle = 100 connection_max_open = 100 connection_max_lifetime_seconds = 0 -# Set to true when using PgBouncer connection pooler -pgbouncer_compatible = false # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: enableUpsert = true -upsertQuery = """ - INSERT INTO "%[1]s" (dirhash, name, directory, meta) - VALUES($1, $2, $3, $4) - ON CONFLICT (dirhash, name) DO UPDATE SET - directory=EXCLUDED.directory, - meta=EXCLUDED.meta -""" +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" -[cassandra2] +[cassandra] # CREATE TABLE filemeta ( -# dirhash bigint, # directory varchar, # name varchar, # meta blob, -# PRIMARY KEY ((dirhash, directory), name) +# PRIMARY KEY (directory, name) # ) WITH CLUSTERING ORDER BY (name ASC); enabled = false keyspace = "seaweedfs" @@ -203,10 +166,6 @@ enabled = false address = "localhost:6379" password = "" database = 0 -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" # This changes the data layout. Only add new directories. Removing/Updating will cause data loss. superLargeDirectories = [] @@ -217,10 +176,6 @@ masterName = "master" username = "" password = "" database = 0 -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" [redis_cluster2] enabled = false @@ -233,10 +188,6 @@ addresses = [ "localhost:30006", ] password = "" -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" # allows reads from slave servers or the master, but all writes still go to the master readOnly = false # automatically use the closest Redis server for reads @@ -244,16 +195,11 @@ routeByLatency = false # This changes the data layout. Only add new directories. Removing/Updating will cause data loss. superLargeDirectories = [] -# The following lua redis stores uses lua to ensure atomicity [redis_lua] enabled = false address = "localhost:6379" password = "" database = 0 -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" # This changes the data layout. Only add new directories. Removing/Updating will cause data loss. superLargeDirectories = [] @@ -264,10 +210,6 @@ masterName = "master" username = "" password = "" database = 0 -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" [redis_lua_cluster] enabled = false @@ -280,10 +222,6 @@ addresses = [ "localhost:30006", ] password = "" -enable_tls = false -ca_cert_path = "" -client_cert_path = "" -client_key_path = "" # allows reads from slave servers or the master, but all writes still go to the master readOnly = false # automatically use the closest Redis server for reads @@ -291,30 +229,44 @@ routeByLatency = false # This changes the data layout. Only add new directories. Removing/Updating will cause data loss. superLargeDirectories = [] +[redis3] # beta +enabled = false +address = "localhost:6379" +password = "" +database = 0 + +[redis3_sentinel] +enabled = false +addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"] +masterName = "master" +username = "" +password = "" +database = 0 + +[redis_cluster3] # beta +enabled = false +addresses = [ + "localhost:30001", + "localhost:30002", + "localhost:30003", + "localhost:30004", + "localhost:30005", + "localhost:30006", +] +password = "" +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = false +# automatically use the closest Redis server for reads +routeByLatency = false + [etcd] enabled = false servers = "localhost:2379" -username = "" -password = "" -key_prefix = "seaweedfs." timeout = "3s" -# Set the CA certificate path -tls_ca_file="" -# Set the client certificate path -tls_client_crt_file="" -# Set the client private key path -tls_client_key_file="" [mongodb] enabled = false uri = "mongodb://localhost:27017" -username = "" -password = "" -ssl = false -ssl_ca_file = "" -ssl_cert_file = "" -ssl_key_file = "" -insecure_skip_verify = false option_pool_size = 0 database = "seaweedfs" @@ -361,8 +313,8 @@ dialTimeOut = 10 ########################## # To add path-specific filer store: # -# 1. Add a name following the store type separated by a dot ".". E.g., cassandra2.tmp -# 2. Add a location configuration. E.g., location = "/tmp/" +# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp +# 2. Add a location configuraiton. E.g., location = "/tmp/" # 3. Copy and customize all other configurations. # Make sure they are not the same if using the same store type! # 4. Set enabled to true @@ -392,13 +344,4 @@ cert_path="" # Set the private key path key_path="" # The name list used to verify the cn name -verify_cn="" - -[tarantool] -address = "localhost:3301" -user = "guest" -password = "" -timeout = "5s" -maxReconnects = 1000 - - +verify_cn="" \ No newline at end of file diff --git a/weed/command/scaffold/master.toml b/weed/command/scaffold/master.toml index 5b58992c8..10d9d1914 100644 --- a/weed/command/scaffold/master.toml +++ b/weed/command/scaffold/master.toml @@ -13,7 +13,7 @@ scripts = """ ec.balance -force volume.deleteEmpty -quietFor=24h -force volume.balance -force - volume.fix.replication -force + volume.fix.replication s3.clean.uploads -timeAgo=24h unlock """ @@ -49,8 +49,6 @@ copy_1 = 7 # create 1 x 7 = 7 actual volumes copy_2 = 6 # create 2 x 6 = 12 actual volumes copy_3 = 3 # create 3 x 3 = 9 actual volumes copy_other = 1 # create n x 1 = n actual volumes -threshold = 0.9 # create threshold -disable = false # disables volume growth if true # configuration flags for replication [master.replication] diff --git a/weed/command/scaffold/notification.toml b/weed/command/scaffold/notification.toml index 4ddb3d4f6..f35101edd 100644 --- a/weed/command/scaffold/notification.toml +++ b/weed/command/scaffold/notification.toml @@ -10,7 +10,7 @@ # send and receive filer updates for each file to an external message queue #################################################### [notification.log] -# this is only for debugging purpose and does not work with "weed filer.replicate" +# this is only for debugging perpose and does not work with "weed filer.replicate" enabled = false diff --git a/weed/command/scaffold/replication.toml b/weed/command/scaffold/replication.toml index b23a1ef46..c463c8077 100644 --- a/weed/command/scaffold/replication.toml +++ b/weed/command/scaffold/replication.toml @@ -13,8 +13,6 @@ grpcAddress = "localhost:18888" # this is not a directory on your hard drive, but on your filer. # i.e., all files with this "prefix" are sent to notification message queue. directory = "/buckets" -# files from the directory separated by space are excluded from sending notifications -excludeDirectories = "/buckets/tmp" [sink.local] enabled = false @@ -68,7 +66,6 @@ is_incremental = false enabled = false b2_account_id = "" b2_master_application_key = "" -b2_region = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory is_incremental = false diff --git a/weed/command/scaffold/security.toml b/weed/command/scaffold/security.toml index 10f472d81..e5452cdff 100644 --- a/weed/command/scaffold/security.toml +++ b/weed/command/scaffold/security.toml @@ -2,12 +2,7 @@ # ./security.toml # $HOME/.seaweedfs/security.toml # /etc/seaweedfs/security.toml -# this file is read by master, volume server, filer, and worker - -# comma separated origins allowed to make requests to the filer and s3 gateway. -# enter in this format: https://domain.com, or http://localhost:port -[cors.allowed_origins] -values = "*" +# this file is read by master, volume server, and filer # this jwt signing key is read by master and volume server, and it is used for write operations: # - the Master server generates the JWT, which can be used to write a certain file on a volume server @@ -24,13 +19,6 @@ expires_after_seconds = 10 # seconds [access] ui = false -# by default the filer UI is enabled. This can be a security risk if the filer is exposed to the public -# and the JWT for reads is not set. If you don't want the public to have access to the objects in your -# storage, and you haven't set the JWT for reads it is wise to disable access to directory metadata. -# This disables access to the Filer UI, and will no longer return directory metadata in GET requests. -[filer.expose_directory_metadata] -enabled = true - # this jwt signing key is read by master and volume server, and it is used for read operations: # - the Master server generates the JWT, which can be used to read a certain file on a volume server # - the Volume server validates the JWT on reading @@ -89,65 +77,24 @@ cert = "" key = "" allowed_commonNames = "" # comma-separated SSL certificate common names -[grpc.msg_agent] -cert = "" -key = "" -allowed_commonNames = "" # comma-separated SSL certificate common names - -[grpc.admin] -cert = "" -key = "" -allowed_commonNames = "" # comma-separated SSL certificate common names - -[grpc.worker] -cert = "" -key = "" -allowed_commonNames = "" # comma-separated SSL certificate common names - -[grpc.mq] -cert = "" -key = "" -allowed_commonNames = "" # comma-separated SSL certificate common names - # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" [grpc.client] cert = "" key = "" -# https client for master|volume|filer|etc connection -# It is necessary that the parameters [https.volume]|[https.master]|[https.filer]|[https.admin] are set -[https.client] -enabled = false -cert = "" -key = "" -ca = "" - # volume server https options +# Note: work in progress! +# this does not work with other clients, e.g., "weed filer|mount" etc, yet. +[https.client] +enabled = true + [https.volume] cert = "" key = "" ca = "" -# master server https options [https.master] cert = "" key = "" ca = "" - -# filer server https options -[https.filer] -cert = "" -key = "" -ca = "" -# disable_tls_verify_client_cert = true|false (default: false) - -# admin server https options -[https.admin] -cert = "" -key = "" -ca = "" - -# white list. It's checking request ip address. -[guard] -white_list = "" diff --git a/weed/command/server.go b/weed/command/server.go index 8f7267d3e..b1812bb9b 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -1,18 +1,18 @@ package command import ( - "crypto/tls" "fmt" "net/http" "os" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/grace" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" ) type ServerOptions struct { @@ -24,15 +24,13 @@ type ServerOptions struct { } var ( - serverOptions ServerOptions - masterOptions MasterOptions - filerOptions FilerOptions - s3Options S3Options - sftpOptions SftpOptions - iamOptions IamOptions - webdavOptions WebDavOption - mqBrokerOptions MessageQueueBrokerOptions - mqAgentServerOptions MessageQueueAgentOptions + serverOptions ServerOptions + masterOptions MasterOptions + filerOptions FilerOptions + s3Options S3Options + iamOptions IamOptions + webdavOptions WebDavOption + msgBrokerOptions MessageBrokerOptions ) func init() { @@ -68,18 +66,17 @@ var ( volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).") volumeMinFreeSpace = cmdServer.Flag.String("volume.minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.") serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - serverMetricsHttpIp = cmdServer.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server") isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") - isStartingSftp = cmdServer.Flag.Bool("sftp", false, "whether to start Sftp server") isStartingIam = cmdServer.Flag.Bool("iam", false, "whether to start IAM service") isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway") - isStartingMqBroker = cmdServer.Flag.Bool("mq.broker", false, "whether to start message queue broker") - isStartingMqAgent = cmdServer.Flag.Bool("mq.agent", false, "whether to start message queue agent") + isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker") + + serverWhiteList []string False = false ) @@ -96,25 +93,19 @@ func init() { masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") masterOptions.volumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") - masterOptions.maxParallelVacuumPerServer = cmdServer.Flag.Int("master.maxParallelVacuumPerServer", 1, "maximum number of volumes to vacuum in parallel on one volume server") masterOptions.defaultReplication = cmdServer.Flag.String("master.defaultReplication", "", "Default replication type if not specified.") - masterOptions.garbageThreshold = cmdServer.Flag.Float64("master.garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") - masterOptions.metricsAddress = cmdServer.Flag.String("master.metrics.address", "", "Prometheus gateway address") - masterOptions.metricsIntervalSec = cmdServer.Flag.Int("master.metrics.intervalSeconds", 15, "Prometheus push interval in seconds") - masterOptions.raftResumeState = cmdServer.Flag.Bool("master.resumeState", false, "resume previous state on start master server") - masterOptions.raftHashicorp = cmdServer.Flag.Bool("master.raftHashicorp", false, "use hashicorp raft") - masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster") + masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") + masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") + masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + masterOptions.raftResumeState = cmdServer.Flag.Bool("resumeState", false, "resume previous state on start master server") masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)") masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers") - masterOptions.telemetryUrl = cmdServer.Flag.String("master.telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics") - masterOptions.telemetryEnabled = cmdServer.Flag.Bool("master.telemetry", false, "enable telemetry reporting") filerOptions.filerGroup = cmdServer.Flag.String("filer.filerGroup", "", "share metadata with other filers in the same filerGroup") filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.portGrpc = cmdServer.Flag.Int("filer.port.grpc", 0, "filer server grpc listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") - filerOptions.allowedOrigins = cmdServer.Flag.String("filer.allowedOrigins", "*", "comma separated list of allowed origins") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit") @@ -124,9 +115,6 @@ func init() { filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size") filerOptions.localSocket = cmdServer.Flag.String("filer.localSocket", "", "default to /tmp/seaweedfs-filer-.sock") filerOptions.showUIDirectoryDelete = cmdServer.Flag.Bool("filer.ui.deleteDir", true, "enable filer UI show delete directory button") - filerOptions.downloadMaxMBps = cmdServer.Flag.Int("filer.downloadMaxMBps", 0, "download max speed for each download request, in MB per second") - filerOptions.diskType = cmdServer.Flag.String("filer.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") - filerOptions.exposeDirectoryData = cmdServer.Flag.Bool("filer.exposeDirectoryData", true, "expose directory data via filer. If false, filer UI will be innaccessible.") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.portGrpc = cmdServer.Flag.Int("volume.port.grpc", 0, "volume server grpc listen port") @@ -137,47 +125,25 @@ func init() { serverOptions.v.readMode = cmdServer.Flag.String("volume.readMode", "proxy", "[local|proxy|redirect] how to deal with non-local volume: 'not found|read in remote node|redirect volume location'.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") - serverOptions.v.ldbTimeout = cmdServer.Flag.Int64("volume.index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.") serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.concurrentDownloadLimitMB = cmdServer.Flag.Int("volume.concurrentDownloadLimitMB", 64, "limit total concurrent download size") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") + serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, " enable tcp port") serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") - serverOptions.v.inflightDownloadDataTimeout = cmdServer.Flag.Duration("volume.inflightDownloadDataTimeout", 60*time.Second, "inflight download data wait timeout of volume servers") - - serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", true, " if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") - serverOptions.v.readBufferSizeMB = cmdServer.Flag.Int("volume.readBufferSizeMB", 4, " larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") - s3Options.portHttps = cmdServer.Flag.Int("s3.port.https", 0, "s3 server https listen port") s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port") s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") - s3Options.allowedOrigins = cmdServer.Flag.String("s3.allowedOrigins", "*", "comma separated list of allowed origins") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") - s3Options.tlsCACertificate = cmdServer.Flag.String("s3.cacert.file", "", "path to the TLS CA certificate file") - s3Options.tlsVerifyClientCert = cmdServer.Flag.Bool("s3.tlsVerifyClientCert", false, "whether to verify the client's certificate") s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") s3Options.auditLogConfig = cmdServer.Flag.String("s3.auditLogConfig", "", "path to the audit log config file") s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", true, "allow empty folders") s3Options.allowDeleteBucketNotEmpty = cmdServer.Flag.Bool("s3.allowDeleteBucketNotEmpty", true, "allow recursive deleting all entries along with bucket") - s3Options.localSocket = cmdServer.Flag.String("s3.localSocket", "", "default to /tmp/seaweedfs-s3-.sock") - s3Options.bindIp = cmdServer.Flag.String("s3.ip.bind", "", "ip address to bind to. If empty, default to same as -ip.bind option.") - s3Options.idleTimeout = cmdServer.Flag.Int("s3.idleTimeout", 10, "connection idle seconds") - sftpOptions.port = cmdServer.Flag.Int("sftp.port", 2022, "SFTP server listen port") - sftpOptions.sshPrivateKey = cmdServer.Flag.String("sftp.sshPrivateKey", "", "path to the SSH private key file for host authentication") - sftpOptions.hostKeysFolder = cmdServer.Flag.String("sftp.hostKeysFolder", "", "path to folder containing SSH private key files for host authentication") - sftpOptions.authMethods = cmdServer.Flag.String("sftp.authMethods", "password,publickey", "comma-separated list of allowed auth methods: password, publickey, keyboard-interactive") - sftpOptions.maxAuthTries = cmdServer.Flag.Int("sftp.maxAuthTries", 6, "maximum number of authentication attempts per connection") - sftpOptions.bannerMessage = cmdServer.Flag.String("sftp.bannerMessage", "SeaweedFS SFTP Server - Unauthorized access is prohibited", "message displayed before authentication") - sftpOptions.loginGraceTime = cmdServer.Flag.Duration("sftp.loginGraceTime", 2*time.Minute, "timeout for authentication") - sftpOptions.clientAliveInterval = cmdServer.Flag.Duration("sftp.clientAliveInterval", 5*time.Second, "interval for sending keep-alive messages") - sftpOptions.clientAliveCountMax = cmdServer.Flag.Int("sftp.clientAliveCountMax", 3, "maximum number of missed keep-alive messages before disconnecting") - sftpOptions.userStoreFile = cmdServer.Flag.String("sftp.userStoreFile", "", "path to JSON file containing user credentials and permissions") - sftpOptions.localSocket = cmdServer.Flag.String("sftp.localSocket", "", "default to /tmp/seaweedfs-sftp-.sock") iamOptions.port = cmdServer.Flag.Int("iam.port", 8111, "iam server http listen port") webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port") @@ -188,14 +154,8 @@ func init() { webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 0, "local cache capacity in MB") - webdavOptions.maxMB = cmdServer.Flag.Int("webdav.maxMB", 4, "split files larger than the limit") - webdavOptions.filerRootPath = cmdServer.Flag.String("webdav.filer.path", "/", "use this remote path from filer server") - mqBrokerOptions.port = cmdServer.Flag.Int("mq.broker.port", 17777, "message queue broker gRPC listen port") - mqBrokerOptions.logFlushInterval = cmdServer.Flag.Int("mq.broker.logFlushInterval", 5, "log buffer flush interval in seconds") - - mqAgentServerOptions.brokersString = cmdServer.Flag.String("mq.agent.brokers", "localhost:17777", "comma-separated message queue brokers") - mqAgentServerOptions.port = cmdServer.Flag.Int("mq.agent.port", 16777, "message queue agent gRPC listen port") + msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") } @@ -205,7 +165,7 @@ func runServer(cmd *Command, args []string) bool { go http.ListenAndServe(fmt.Sprintf(":%d", *serverOptions.debugPort), nil) } - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile) @@ -213,20 +173,13 @@ func runServer(cmd *Command, args []string) bool { if *isStartingS3 { *isStartingFiler = true } - if *isStartingSftp { - *isStartingFiler = true - } if *isStartingIam { *isStartingFiler = true } if *isStartingWebDav { *isStartingFiler = true } - if *isStartingMqBroker { - *isStartingFiler = true - } - if *isStartingMqAgent { - *isStartingMqBroker = true + if *isStartingMsgBroker { *isStartingFiler = true } @@ -240,36 +193,22 @@ func runServer(cmd *Command, args []string) bool { serverBindIp = serverIp } - if *serverMetricsHttpIp == "" { - *serverMetricsHttpIp = *serverBindIp - } - // ip address masterOptions.ip = serverIp masterOptions.ipBind = serverBindIp - filerOptions.masters = pb.ServerAddresses(*masterOptions.peers).ToServiceDiscovery() + filerOptions.masters = pb.ServerAddresses(*masterOptions.peers).ToAddressMap() filerOptions.ip = serverIp filerOptions.bindIp = serverBindIp - if *s3Options.bindIp == "" { - s3Options.bindIp = serverBindIp - } - if sftpOptions.bindIp == nil || *sftpOptions.bindIp == "" { - sftpOptions.bindIp = serverBindIp - } + s3Options.bindIp = serverBindIp iamOptions.ip = serverBindIp iamOptions.masters = masterOptions.peers - webdavOptions.ipBind = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp serverOptions.v.masters = pb.ServerAddresses(*masterOptions.peers).ToAddresses() serverOptions.v.idleConnectionTimeout = serverTimeout serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack - mqBrokerOptions.ip = serverIp - mqBrokerOptions.masters = filerOptions.masters.GetInstancesAsMap() - mqBrokerOptions.filerGroup = filerOptions.filerGroup - mqAgentServerOptions.ip = serverIp - mqAgentServerOptions.brokers = pb.ServerAddresses(*mqAgentServerOptions.brokersString).ToAddresses() + msgBrokerOptions.ip = serverIp // serverOptions.v.pulseSeconds = pulseSeconds // masterOptions.pulseSeconds = pulseSeconds @@ -278,21 +217,16 @@ func runServer(cmd *Command, args []string) bool { filerOptions.dataCenter = serverDataCenter filerOptions.rack = serverRack - mqBrokerOptions.dataCenter = serverDataCenter - mqBrokerOptions.rack = serverRack - s3Options.dataCenter = serverDataCenter - sftpOptions.dataCenter = serverDataCenter filerOptions.disableHttp = serverDisableHttp masterOptions.disableHttp = serverDisableHttp filerAddress := string(pb.NewServerAddress(*serverIp, *filerOptions.port, *filerOptions.portGrpc)) s3Options.filer = &filerAddress - sftpOptions.filer = &filerAddress iamOptions.filer = &filerAddress webdavOptions.filer = &filerAddress - mqBrokerOptions.filerGroup = filerOptions.filerGroup + msgBrokerOptions.filer = &filerAddress - go stats_collect.StartMetricsServer(*serverMetricsHttpIp, *serverMetricsHttpPort) + go stats_collect.StartMetricsServer(*serverMetricsHttpPort) folders := strings.Split(*volumeDataFolders, ",") @@ -308,12 +242,11 @@ func runServer(cmd *Command, args []string) bool { } filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder - serverWhiteList := util.StringSplit(*serverWhiteListOption, ",") + if *serverWhiteListOption != "" { + serverWhiteList = strings.Split(*serverWhiteListOption, ",") + } if *isStartingFiler { - if *filerOptions.diskType == "" && *serverOptions.v.diskType != "" { - filerOptions.diskType = serverOptions.v.diskType - } go func() { time.Sleep(1 * time.Second) filerOptions.startFiler() @@ -328,14 +261,6 @@ func runServer(cmd *Command, args []string) bool { }() } - if *isStartingSftp { - go func() { - time.Sleep(2 * time.Second) - sftpOptions.localSocket = filerOptions.localSocket - sftpOptions.startSftpServer() - }() - } - if *isStartingIam { go func() { time.Sleep(2 * time.Second) @@ -351,17 +276,10 @@ func runServer(cmd *Command, args []string) bool { }() } - if *isStartingMqBroker { + if *isStartingMsgBroker { go func() { time.Sleep(2 * time.Second) - mqBrokerOptions.startQueueServer() - }() - } - - if *isStartingMqAgent { - go func() { - time.Sleep(2 * time.Second) - mqAgentServerOptions.startQueueAgent() + msgBrokerOptions.startQueueServer() }() } @@ -377,13 +295,3 @@ func runServer(cmd *Command, args []string) bool { select {} } - -func newHttpServer(h http.Handler, tlsConfig *tls.Config) *http.Server { - s := &http.Server{ - Handler: h, - } - if tlsConfig != nil { - s.TLSConfig = tlsConfig.Clone() - } - return s -} diff --git a/weed/command/sftp.go b/weed/command/sftp.go deleted file mode 100644 index ed93e44fe..000000000 --- a/weed/command/sftp.go +++ /dev/null @@ -1,194 +0,0 @@ -package command - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/version" - "net" - "os" - "runtime" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/sftpd" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - sftpOptionsStandalone SftpOptions -) - -// SftpOptions holds configuration options for the SFTP server. -type SftpOptions struct { - filer *string - bindIp *string - port *int - sshPrivateKey *string - hostKeysFolder *string - authMethods *string - maxAuthTries *int - bannerMessage *string - loginGraceTime *time.Duration - clientAliveInterval *time.Duration - clientAliveCountMax *int - userStoreFile *string - dataCenter *string - metricsHttpPort *int - metricsHttpIp *string - localSocket *string -} - -// cmdSftp defines the SFTP command similar to the S3 command. -var cmdSftp = &Command{ - UsageLine: "sftp [-port=2022] [-filer=] [-sshPrivateKey=]", - Short: "start an SFTP server that is backed by a SeaweedFS filer", - Long: `Start an SFTP server that leverages the SeaweedFS filer service to handle file operations. - -Instead of reading from or writing to a local filesystem, all file operations -are routed through the filer (filer_pb) gRPC API. This allows you to centralize -your file management in SeaweedFS. - `, -} - -func init() { - // Register the command to avoid cyclic dependencies. - cmdSftp.Run = runSftp - - sftpOptionsStandalone.filer = cmdSftp.Flag.String("filer", "localhost:8888", "filer server address (ip:port)") - sftpOptionsStandalone.bindIp = cmdSftp.Flag.String("ip.bind", "0.0.0.0", "ip address to bind SFTP server") - sftpOptionsStandalone.port = cmdSftp.Flag.Int("port", 2022, "SFTP server listen port") - sftpOptionsStandalone.sshPrivateKey = cmdSftp.Flag.String("sshPrivateKey", "", "path to the SSH private key file for host authentication") - sftpOptionsStandalone.hostKeysFolder = cmdSftp.Flag.String("hostKeysFolder", "", "path to folder containing SSH private key files for host authentication") - sftpOptionsStandalone.authMethods = cmdSftp.Flag.String("authMethods", "password,publickey", "comma-separated list of allowed auth methods: password, publickey, keyboard-interactive") - sftpOptionsStandalone.maxAuthTries = cmdSftp.Flag.Int("maxAuthTries", 6, "maximum number of authentication attempts per connection") - sftpOptionsStandalone.bannerMessage = cmdSftp.Flag.String("bannerMessage", "SeaweedFS SFTP Server - Unauthorized access is prohibited", "message displayed before authentication") - sftpOptionsStandalone.loginGraceTime = cmdSftp.Flag.Duration("loginGraceTime", 2*time.Minute, "timeout for authentication") - sftpOptionsStandalone.clientAliveInterval = cmdSftp.Flag.Duration("clientAliveInterval", 5*time.Second, "interval for sending keep-alive messages") - sftpOptionsStandalone.clientAliveCountMax = cmdSftp.Flag.Int("clientAliveCountMax", 3, "maximum number of missed keep-alive messages before disconnecting") - sftpOptionsStandalone.userStoreFile = cmdSftp.Flag.String("userStoreFile", "", "path to JSON file containing user credentials and permissions") - sftpOptionsStandalone.dataCenter = cmdSftp.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") - sftpOptionsStandalone.metricsHttpPort = cmdSftp.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - sftpOptionsStandalone.metricsHttpIp = cmdSftp.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") - sftpOptionsStandalone.localSocket = cmdSftp.Flag.String("localSocket", "", "default to /tmp/seaweedfs-sftp-.sock") -} - -// runSftp is the command entry point. -func runSftp(cmd *Command, args []string) bool { - // Load security configuration as done in other SeaweedFS services. - util.LoadSecurityConfiguration() - - // Configure metrics - switch { - case *sftpOptionsStandalone.metricsHttpIp != "": - // nothing to do, use sftpOptionsStandalone.metricsHttpIp - case *sftpOptionsStandalone.bindIp != "": - *sftpOptionsStandalone.metricsHttpIp = *sftpOptionsStandalone.bindIp - } - go stats_collect.StartMetricsServer(*sftpOptionsStandalone.metricsHttpIp, *sftpOptionsStandalone.metricsHttpPort) - - return sftpOptionsStandalone.startSftpServer() -} - -func (sftpOpt *SftpOptions) startSftpServer() bool { - filerAddress := pb.ServerAddress(*sftpOpt.filer) - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - - // metrics read from the filer - var metricsAddress string - var metricsIntervalSec int - var filerGroup string - - // Connect to the filer service and try to retrieve basic configuration. - for { - err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) - } - metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) - filerGroup = resp.FilerGroup - glog.V(0).Infof("SFTP read filer configuration, using filer at: %s", filerAddress) - return nil - }) - if err != nil { - glog.V(0).Infof("Waiting to connect to filer %s grpc address %s...", *sftpOpt.filer, filerAddress.ToGrpcAddress()) - time.Sleep(time.Second) - } else { - glog.V(0).Infof("Connected to filer %s grpc address %s", *sftpOpt.filer, filerAddress.ToGrpcAddress()) - break - } - } - - go stats_collect.LoopPushingMetric("sftp", stats_collect.SourceName(uint32(*sftpOpt.port)), metricsAddress, metricsIntervalSec) - - // Parse auth methods - var authMethods []string - if *sftpOpt.authMethods != "" { - authMethods = util.StringSplit(*sftpOpt.authMethods, ",") - } - - // Create a new SFTP service instance with all options - service := sftpd.NewSFTPService(&sftpd.SFTPServiceOptions{ - GrpcDialOption: grpcDialOption, - DataCenter: *sftpOpt.dataCenter, - FilerGroup: filerGroup, - Filer: filerAddress, - SshPrivateKey: *sftpOpt.sshPrivateKey, - HostKeysFolder: *sftpOpt.hostKeysFolder, - AuthMethods: authMethods, - MaxAuthTries: *sftpOpt.maxAuthTries, - BannerMessage: *sftpOpt.bannerMessage, - LoginGraceTime: *sftpOpt.loginGraceTime, - ClientAliveInterval: *sftpOpt.clientAliveInterval, - ClientAliveCountMax: *sftpOpt.clientAliveCountMax, - UserStoreFile: *sftpOpt.userStoreFile, - }) - - // Set up Unix socket if on non-Windows platforms - if runtime.GOOS != "windows" { - localSocket := *sftpOpt.localSocket - if localSocket == "" { - localSocket = fmt.Sprintf("/tmp/seaweedfs-sftp-%d.sock", *sftpOpt.port) - } - if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) - } - go func() { - // start on local unix socket - sftpSocketListener, err := net.Listen("unix", localSocket) - if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) - } - if err := service.Serve(sftpSocketListener); err != nil { - glog.Fatalf("Failed to serve SFTP on socket %s: %v", localSocket, err) - } - }() - } - - // Start the SFTP service on TCP - listenAddress := fmt.Sprintf("%s:%d", *sftpOpt.bindIp, *sftpOpt.port) - sftpListener, sftpLocalListener, err := util.NewIpAndLocalListeners(*sftpOpt.bindIp, *sftpOpt.port, time.Duration(10)*time.Second) - if err != nil { - glog.Fatalf("SFTP server listener on %s error: %v", listenAddress, err) - } - - glog.V(0).Infof("Start Seaweed SFTP Server %s at %s", version.Version(), listenAddress) - - if sftpLocalListener != nil { - go func() { - if err := service.Serve(sftpLocalListener); err != nil { - glog.Fatalf("SFTP Server failed to serve on local listener: %v", err) - } - }() - } - - if err := service.Serve(sftpListener); err != nil { - glog.Fatalf("SFTP Server failed to serve: %v", err) - } - - return true -} diff --git a/weed/command/shell.go b/weed/command/shell.go index 1e921411b..c32a8e614 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -2,11 +2,11 @@ package command import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/shell" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/shell" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -19,7 +19,7 @@ func init() { cmdShell.Run = runShell // break init cycle shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333") shellOptions.FilerGroup = cmdShell.Flag.String("filerGroup", "", "filerGroup for the filers") - shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port for initial connection, e.g. localhost:8888") + shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888") shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml") } @@ -30,36 +30,32 @@ var cmdShell = &Command{ Generate shell.toml via "weed scaffold -config=shell" -`, + `, } func runShell(command *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - shellOptions.Directory = "/" - - util.LoadConfiguration("shell", false) - viper := util.GetViper() - cluster := viper.GetString("cluster.default") - if *shellCluster != "" { - cluster = *shellCluster - } if *shellOptions.Masters == "" { + util.LoadConfiguration("shell", false) + v := util.GetViper() + cluster := v.GetString("cluster.default") + if *shellCluster != "" { + cluster = *shellCluster + } if cluster == "" { *shellOptions.Masters = "localhost:9333" } else { - *shellOptions.Masters = viper.GetString("cluster." + cluster + ".master") + *shellOptions.Masters = v.GetString("cluster." + cluster + ".master") + *shellInitialFiler = v.GetString("cluster." + cluster + ".filer") + fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler) } } - filerAddress := *shellInitialFiler - if filerAddress == "" && cluster != "" { - filerAddress = viper.GetString("cluster." + cluster + ".filer") - } - shellOptions.FilerAddress = pb.ServerAddress(filerAddress) - fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, shellOptions.FilerAddress) + shellOptions.FilerAddress = pb.ServerAddress(*shellInitialFiler) + shellOptions.Directory = "/" shell.RunShell(shellOptions) diff --git a/weed/command/sql.go b/weed/command/sql.go deleted file mode 100644 index 682c8e46d..000000000 --- a/weed/command/sql.go +++ /dev/null @@ -1,596 +0,0 @@ -package command - -import ( - "context" - "encoding/csv" - "encoding/json" - "fmt" - "io" - "os" - "path" - "strings" - "time" - - "github.com/peterh/liner" - "github.com/seaweedfs/seaweedfs/weed/query/engine" - "github.com/seaweedfs/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/util/sqlutil" -) - -func init() { - cmdSql.Run = runSql -} - -var cmdSql = &Command{ - UsageLine: "sql [-master=localhost:9333] [-interactive] [-file=query.sql] [-output=table|json|csv] [-database=dbname] [-query=\"SQL\"]", - Short: "advanced SQL query interface for SeaweedFS MQ topics with multiple execution modes", - Long: `Enhanced SQL interface for SeaweedFS Message Queue topics with multiple execution modes. - -Execution Modes: -- Interactive shell (default): weed sql -interactive -- Single query: weed sql -query "SELECT * FROM user_events" -- Batch from file: weed sql -file queries.sql -- Context switching: weed sql -database analytics -interactive - -Output Formats: -- table: ASCII table format (default for interactive) -- json: JSON format (default for non-interactive) -- csv: Comma-separated values - -Features: -- Full WHERE clause support (=, <, >, <=, >=, !=, LIKE, IN) -- Advanced pattern matching with LIKE wildcards (%, _) -- Multi-value filtering with IN operator -- Real MQ namespace and topic discovery -- Database context switching - -Examples: - weed sql -interactive - weed sql -query "SHOW DATABASES" -output json - weed sql -file batch_queries.sql -output csv - weed sql -database analytics -query "SELECT COUNT(*) FROM metrics" - weed sql -master broker1:9333 -interactive -`, -} - -var ( - sqlMaster = cmdSql.Flag.String("master", "localhost:9333", "SeaweedFS master server HTTP address") - sqlInteractive = cmdSql.Flag.Bool("interactive", false, "start interactive shell mode") - sqlFile = cmdSql.Flag.String("file", "", "execute SQL queries from file") - sqlOutput = cmdSql.Flag.String("output", "", "output format: table, json, csv (auto-detected if not specified)") - sqlDatabase = cmdSql.Flag.String("database", "", "default database context") - sqlQuery = cmdSql.Flag.String("query", "", "execute single SQL query") -) - -// OutputFormat represents different output formatting options -type OutputFormat string - -const ( - OutputTable OutputFormat = "table" - OutputJSON OutputFormat = "json" - OutputCSV OutputFormat = "csv" -) - -// SQLContext holds the execution context for SQL operations -type SQLContext struct { - engine *engine.SQLEngine - currentDatabase string - outputFormat OutputFormat - interactive bool -} - -func runSql(command *Command, args []string) bool { - // Initialize SQL engine with master address for service discovery - sqlEngine := engine.NewSQLEngine(*sqlMaster) - - // Determine execution mode and output format - interactive := *sqlInteractive || (*sqlQuery == "" && *sqlFile == "") - outputFormat := determineOutputFormat(*sqlOutput, interactive) - - // Create SQL context - ctx := &SQLContext{ - engine: sqlEngine, - currentDatabase: *sqlDatabase, - outputFormat: outputFormat, - interactive: interactive, - } - - // Set current database in SQL engine if specified via command line - if *sqlDatabase != "" { - ctx.engine.GetCatalog().SetCurrentDatabase(*sqlDatabase) - } - - // Execute based on mode - switch { - case *sqlQuery != "": - // Single query mode - return executeSingleQuery(ctx, *sqlQuery) - case *sqlFile != "": - // Batch file mode - return executeFileQueries(ctx, *sqlFile) - default: - // Interactive mode - return runInteractiveShell(ctx) - } -} - -// determineOutputFormat selects the appropriate output format -func determineOutputFormat(specified string, interactive bool) OutputFormat { - switch strings.ToLower(specified) { - case "table": - return OutputTable - case "json": - return OutputJSON - case "csv": - return OutputCSV - default: - // Auto-detect based on mode - if interactive { - return OutputTable - } - return OutputJSON - } -} - -// executeSingleQuery executes a single query and outputs the result -func executeSingleQuery(ctx *SQLContext, query string) bool { - if ctx.outputFormat != OutputTable { - // Suppress banner for non-interactive output - return executeAndDisplay(ctx, query, false) - } - - fmt.Printf("Executing query against %s...\n", *sqlMaster) - return executeAndDisplay(ctx, query, true) -} - -// executeFileQueries processes SQL queries from a file -func executeFileQueries(ctx *SQLContext, filename string) bool { - content, err := os.ReadFile(filename) - if err != nil { - fmt.Printf("Error reading file %s: %v\n", filename, err) - return false - } - - if ctx.outputFormat == OutputTable && ctx.interactive { - fmt.Printf("Executing queries from %s against %s...\n", filename, *sqlMaster) - } - - // Split file content into individual queries (robust approach) - queries := sqlutil.SplitStatements(string(content)) - - for i, query := range queries { - query = strings.TrimSpace(query) - if query == "" { - continue - } - - if ctx.outputFormat == OutputTable && len(queries) > 1 { - fmt.Printf("\n--- Query %d ---\n", i+1) - } - - if !executeAndDisplay(ctx, query, ctx.outputFormat == OutputTable) { - return false - } - } - - return true -} - -// runInteractiveShell starts the enhanced interactive shell with readline support -func runInteractiveShell(ctx *SQLContext) bool { - fmt.Println("SeaweedFS Enhanced SQL Interface") - fmt.Println("Type 'help;' for help, 'exit;' to quit") - fmt.Printf("Connected to master: %s\n", *sqlMaster) - if ctx.currentDatabase != "" { - fmt.Printf("Current database: %s\n", ctx.currentDatabase) - } - fmt.Println("Advanced WHERE operators supported: <=, >=, !=, LIKE, IN") - fmt.Println("Use up/down arrows for command history") - fmt.Println() - - // Initialize liner for readline functionality - line := liner.NewLiner() - defer line.Close() - - // Handle Ctrl+C gracefully - line.SetCtrlCAborts(true) - grace.OnInterrupt(func() { - line.Close() - }) - - // Load command history - historyPath := path.Join(os.TempDir(), "weed-sql-history") - if f, err := os.Open(historyPath); err == nil { - line.ReadHistory(f) - f.Close() - } - - // Save history on exit - defer func() { - if f, err := os.Create(historyPath); err == nil { - line.WriteHistory(f) - f.Close() - } - }() - - var queryBuffer strings.Builder - - for { - // Show prompt with current database context - var prompt string - if queryBuffer.Len() == 0 { - if ctx.currentDatabase != "" { - prompt = fmt.Sprintf("seaweedfs:%s> ", ctx.currentDatabase) - } else { - prompt = "seaweedfs> " - } - } else { - prompt = " -> " // Continuation prompt - } - - // Read line with readline support - input, err := line.Prompt(prompt) - if err != nil { - if err == liner.ErrPromptAborted { - fmt.Println("Query cancelled") - queryBuffer.Reset() - continue - } - if err != io.EOF { - fmt.Printf("Input error: %v\n", err) - } - break - } - - lineStr := strings.TrimSpace(input) - - // Handle empty lines - if lineStr == "" { - continue - } - - // Accumulate lines in query buffer - if queryBuffer.Len() > 0 { - queryBuffer.WriteString(" ") - } - queryBuffer.WriteString(lineStr) - - // Check if we have a complete statement (ends with semicolon or special command) - fullQuery := strings.TrimSpace(queryBuffer.String()) - isComplete := strings.HasSuffix(lineStr, ";") || - isSpecialCommand(fullQuery) - - if !isComplete { - continue // Continue reading more lines - } - - // Add completed command to history - line.AppendHistory(fullQuery) - - // Handle special commands (with or without semicolon) - cleanQuery := strings.TrimSuffix(fullQuery, ";") - cleanQuery = strings.TrimSpace(cleanQuery) - - if cleanQuery == "exit" || cleanQuery == "quit" || cleanQuery == "\\q" { - fmt.Println("Goodbye!") - break - } - - if cleanQuery == "help" { - showEnhancedHelp() - queryBuffer.Reset() - continue - } - - // Handle database switching - use proper SQL parser instead of manual parsing - if strings.HasPrefix(strings.ToUpper(cleanQuery), "USE ") { - // Execute USE statement through the SQL engine for proper parsing - result, err := ctx.engine.ExecuteSQL(context.Background(), cleanQuery) - if err != nil { - fmt.Printf("Error: %v\n\n", err) - } else if result.Error != nil { - fmt.Printf("Error: %v\n\n", result.Error) - } else { - // Extract the database name from the result message for CLI context - if len(result.Rows) > 0 && len(result.Rows[0]) > 0 { - message := result.Rows[0][0].ToString() - // Extract database name from "Database changed to: dbname" - if strings.HasPrefix(message, "Database changed to: ") { - ctx.currentDatabase = strings.TrimPrefix(message, "Database changed to: ") - } - fmt.Printf("%s\n\n", message) - } - } - queryBuffer.Reset() - continue - } - - // Handle output format switching - if strings.HasPrefix(strings.ToUpper(cleanQuery), "\\FORMAT ") { - format := strings.TrimSpace(strings.TrimPrefix(strings.ToUpper(cleanQuery), "\\FORMAT ")) - switch format { - case "TABLE": - ctx.outputFormat = OutputTable - fmt.Println("Output format set to: table") - case "JSON": - ctx.outputFormat = OutputJSON - fmt.Println("Output format set to: json") - case "CSV": - ctx.outputFormat = OutputCSV - fmt.Println("Output format set to: csv") - default: - fmt.Printf("Invalid format: %s. Supported: table, json, csv\n", format) - } - queryBuffer.Reset() - continue - } - - // Execute SQL query (without semicolon) - executeAndDisplay(ctx, cleanQuery, true) - - // Reset buffer for next query - queryBuffer.Reset() - } - - return true -} - -// isSpecialCommand checks if a command is a special command that doesn't require semicolon -func isSpecialCommand(query string) bool { - cleanQuery := strings.TrimSuffix(strings.TrimSpace(query), ";") - cleanQuery = strings.ToLower(cleanQuery) - - // Special commands that work with or without semicolon - specialCommands := []string{ - "exit", "quit", "\\q", "help", - } - - for _, cmd := range specialCommands { - if cleanQuery == cmd { - return true - } - } - - // Commands that are exactly specific commands (not just prefixes) - parts := strings.Fields(strings.ToUpper(cleanQuery)) - if len(parts) == 0 { - return false - } - return (parts[0] == "USE" && len(parts) >= 2) || - strings.HasPrefix(strings.ToUpper(cleanQuery), "\\FORMAT ") -} - -// executeAndDisplay executes a query and displays the result in the specified format -func executeAndDisplay(ctx *SQLContext, query string, showTiming bool) bool { - startTime := time.Now() - - // Execute the query - execCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - result, err := ctx.engine.ExecuteSQL(execCtx, query) - if err != nil { - if ctx.outputFormat == OutputJSON { - errorResult := map[string]interface{}{ - "error": err.Error(), - "query": query, - } - jsonBytes, _ := json.MarshalIndent(errorResult, "", " ") - fmt.Println(string(jsonBytes)) - } else { - fmt.Printf("Error: %v\n", err) - } - return false - } - - if result.Error != nil { - if ctx.outputFormat == OutputJSON { - errorResult := map[string]interface{}{ - "error": result.Error.Error(), - "query": query, - } - jsonBytes, _ := json.MarshalIndent(errorResult, "", " ") - fmt.Println(string(jsonBytes)) - } else { - fmt.Printf("Query Error: %v\n", result.Error) - } - return false - } - - // Display results in the specified format - switch ctx.outputFormat { - case OutputTable: - displayTableResult(result) - case OutputJSON: - displayJSONResult(result) - case OutputCSV: - displayCSVResult(result) - } - - // Show execution time for interactive/table mode - // Only show timing if there are columns or if result is truly empty - if showTiming && ctx.outputFormat == OutputTable && (len(result.Columns) > 0 || len(result.Rows) == 0) { - elapsed := time.Since(startTime) - fmt.Printf("\n(%d rows in set, %.3f sec)\n\n", len(result.Rows), elapsed.Seconds()) - } - - return true -} - -// displayTableResult formats and displays query results in ASCII table format -func displayTableResult(result *engine.QueryResult) { - if len(result.Columns) == 0 { - fmt.Println("Empty result set") - return - } - - // Calculate column widths for formatting - colWidths := make([]int, len(result.Columns)) - for i, col := range result.Columns { - colWidths[i] = len(col) - } - - // Check data for wider columns - for _, row := range result.Rows { - for i, val := range row { - if i < len(colWidths) { - valStr := val.ToString() - if len(valStr) > colWidths[i] { - colWidths[i] = len(valStr) - } - } - } - } - - // Print header separator - fmt.Print("+") - for _, width := range colWidths { - fmt.Print(strings.Repeat("-", width+2) + "+") - } - fmt.Println() - - // Print column headers - fmt.Print("|") - for i, col := range result.Columns { - fmt.Printf(" %-*s |", colWidths[i], col) - } - fmt.Println() - - // Print separator - fmt.Print("+") - for _, width := range colWidths { - fmt.Print(strings.Repeat("-", width+2) + "+") - } - fmt.Println() - - // Print data rows - for _, row := range result.Rows { - fmt.Print("|") - for i, val := range row { - if i < len(colWidths) { - fmt.Printf(" %-*s |", colWidths[i], val.ToString()) - } - } - fmt.Println() - } - - // Print bottom separator - fmt.Print("+") - for _, width := range colWidths { - fmt.Print(strings.Repeat("-", width+2) + "+") - } - fmt.Println() -} - -// displayJSONResult outputs query results in JSON format -func displayJSONResult(result *engine.QueryResult) { - // Convert result to JSON-friendly format - jsonResult := map[string]interface{}{ - "columns": result.Columns, - "rows": make([]map[string]interface{}, len(result.Rows)), - "count": len(result.Rows), - } - - // Convert rows to JSON objects - for i, row := range result.Rows { - rowObj := make(map[string]interface{}) - for j, val := range row { - if j < len(result.Columns) { - rowObj[result.Columns[j]] = val.ToString() - } - } - jsonResult["rows"].([]map[string]interface{})[i] = rowObj - } - - // Marshal and print JSON - jsonBytes, err := json.MarshalIndent(jsonResult, "", " ") - if err != nil { - fmt.Printf("Error formatting JSON: %v\n", err) - return - } - - fmt.Println(string(jsonBytes)) -} - -// displayCSVResult outputs query results in CSV format -func displayCSVResult(result *engine.QueryResult) { - // Handle execution plan results specially to avoid CSV quoting issues - if len(result.Columns) == 1 && result.Columns[0] == "Query Execution Plan" { - // For execution plans, output directly without CSV encoding to avoid quotes - for _, row := range result.Rows { - if len(row) > 0 { - fmt.Println(row[0].ToString()) - } - } - return - } - - // Standard CSV output for regular query results - writer := csv.NewWriter(os.Stdout) - defer writer.Flush() - - // Write headers - if err := writer.Write(result.Columns); err != nil { - fmt.Printf("Error writing CSV headers: %v\n", err) - return - } - - // Write data rows - for _, row := range result.Rows { - csvRow := make([]string, len(row)) - for i, val := range row { - csvRow[i] = val.ToString() - } - if err := writer.Write(csvRow); err != nil { - fmt.Printf("Error writing CSV row: %v\n", err) - return - } - } -} - -func showEnhancedHelp() { - fmt.Println(`SeaweedFS Enhanced SQL Interface Help: - -METADATA OPERATIONS: - SHOW DATABASES; - List all MQ namespaces - SHOW TABLES; - List all topics in current namespace - SHOW TABLES FROM database; - List topics in specific namespace - DESCRIBE table_name; - Show table schema - -ADVANCED QUERYING: - SELECT * FROM table_name; - Query all data - SELECT col1, col2 FROM table WHERE ...; - Column projection - SELECT * FROM table WHERE id <= 100; - Range filtering - SELECT * FROM table WHERE name LIKE 'admin%'; - Pattern matching - SELECT * FROM table WHERE status IN ('active', 'pending'); - Multi-value - SELECT COUNT(*), MAX(id), MIN(id) FROM ...; - Aggregation functions - -QUERY ANALYSIS: - EXPLAIN SELECT ...; - Show hierarchical execution plan - (data sources, optimizations, timing) - -DDL OPERATIONS: - CREATE TABLE topic (field1 INT, field2 STRING); - Create topic - Note: ALTER TABLE and DROP TABLE are not supported - -SPECIAL COMMANDS: - USE database_name; - Switch database context - \format table|json|csv - Change output format - help; - Show this help - exit; or quit; or \q - Exit interface - -EXTENDED WHERE OPERATORS: - =, <, >, <=, >= - Comparison operators - !=, <> - Not equal operators - LIKE 'pattern%' - Pattern matching (% = any chars, _ = single char) - IN (value1, value2, ...) - Multi-value matching - AND, OR - Logical operators - -EXAMPLES: - SELECT * FROM user_events WHERE user_id >= 10 AND status != 'deleted'; - SELECT username FROM users WHERE email LIKE '%@company.com'; - SELECT * FROM logs WHERE level IN ('error', 'warning') AND timestamp >= '2023-01-01'; - EXPLAIN SELECT MAX(id) FROM events; -- View execution plan - -Current Status: Full WHERE clause support + Real MQ integration`) -} diff --git a/weed/command/update.go b/weed/command/update.go index cc464dd28..2d0dc42ad 100644 --- a/weed/command/update.go +++ b/weed/command/update.go @@ -10,8 +10,8 @@ import ( "encoding/hex" "encoding/json" "fmt" - swv "github.com/seaweedfs/seaweedfs/weed/util/version" "io" + "io/ioutil" "net/http" "os" "path/filepath" @@ -19,9 +19,8 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "golang.org/x/net/context/ctxhttp" ) @@ -53,7 +52,7 @@ type githubError struct { Message string } -// default version is not full version +//default version is not full version var isFullVersion = false var ( @@ -70,15 +69,15 @@ func init() { path, _ := os.Executable() _, name := filepath.Split(path) updateOpt.dir = cmdUpdate.Flag.String("dir", filepath.Dir(path), "directory to save new weed.") - updateOpt.name = cmdUpdate.Flag.String("name", name, "name of new weed. On windows, name shouldn't be same to the original name.") + updateOpt.name = cmdUpdate.Flag.String("name", name, "name of new weed. On windows, name shouldn't be same to the orignial name.") updateOpt.Version = cmdUpdate.Flag.String("version", "0", "specific version of weed you want to download. If not specified, get the latest version.") cmdUpdate.Run = runUpdate } var cmdUpdate = &Command{ UsageLine: "update [-dir=/path/to/dir] [-name=name] [-version=x.xx]", - Short: "get latest or specific version from https://github.com/seaweedfs/seaweedfs", - Long: `get latest or specific version from https://github.com/seaweedfs/seaweedfs`, + Short: "get latest or specific version from https://github.com/chrislusf/seaweedfs", + Long: `get latest or specific version from https://github.com/chrislusf/seaweedfs`, } func runUpdate(cmd *Command, args []string) bool { @@ -102,7 +101,7 @@ func runUpdate(cmd *Command, args []string) bool { if runtime.GOOS == "windows" { if target == path { - glog.Fatalf("On windows, name of the new weed shouldn't be same to the original name.") + glog.Fatalf("On windows, name of the new weed shouldn't be same to the orignial name.") return false } } @@ -118,8 +117,8 @@ func runUpdate(cmd *Command, args []string) bool { } func downloadRelease(ctx context.Context, target string, ver string) (version string, err error) { - currentVersion := swv.VERSION_NUMBER - rel, err := GitHubLatestRelease(ctx, ver, "seaweedfs", "seaweedfs") + currentVersion := util.VERSION_NUMBER + rel, err := GitHubLatestRelease(ctx, ver, "chrislusf", "seaweedfs") if err != nil { return "", err } @@ -200,7 +199,6 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R if err != nil { return Release{}, err } - defer util_http.CloseResponse(res) if res.StatusCode != http.StatusOK { content := res.Header.Get("Content-Type") @@ -213,10 +211,17 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R } } + _ = res.Body.Close() return Release{}, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) } - buf, err := io.ReadAll(res.Body) + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + _ = res.Body.Close() + return Release{}, err + } + + err = res.Body.Close() if err != nil { return Release{}, err } @@ -260,13 +265,18 @@ func getGithubData(ctx context.Context, url string) ([]byte, error) { if err != nil { return nil, err } - defer util_http.CloseResponse(res) if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) } - buf, err := io.ReadAll(res.Body) + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + _ = res.Body.Close() + return nil, err + } + + err = res.Body.Close() if err != nil { return nil, err } @@ -310,12 +320,7 @@ func extractToFile(buf []byte, filename, target string) error { trd := tar.NewReader(gr) hdr, terr := trd.Next() if terr != nil { - if hdr != nil { - glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr) - } else { - glog.Errorf("uncompress file is nil, failed:%s", terr) - } - + glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr) return terr } rd = trd @@ -343,7 +348,7 @@ func extractToFile(buf []byte, filename, target string) error { // Write everything to a temp file dir := filepath.Dir(target) - new, err := os.CreateTemp(dir, "weed") + new, err := ioutil.TempFile(dir, "weed") if err != nil { return err } diff --git a/weed/command/update_full.go b/weed/command/update_full.go index 95ca5fc00..185203aee 100644 --- a/weed/command/update_full.go +++ b/weed/command/update_full.go @@ -1,9 +1,9 @@ -//go:build elastic && gocdk && rclone && sqlite && tarantool && tikv && ydb -// +build elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb +//go:build elastic && ydb && gocdk && tikv +// +build elastic,ydb,gocdk,tikv package command -// set true if gtags are set +//set true if gtags are set func init() { isFullVersion = true } diff --git a/weed/command/upload.go b/weed/command/upload.go index 9f9ac1107..f2b0b7fe4 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -4,16 +4,15 @@ import ( "context" "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "google.golang.org/grpc" "os" "path/filepath" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -69,7 +68,7 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") defaultReplication, err := readMasterConfiguration(grpcDialOption, pb.ServerAddress(*upload.master)) @@ -97,14 +96,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, operation.StoragePreference{ - Replication: *upload.replication, - Collection: *upload.collection, - DataCenter: *upload.dataCenter, - Ttl: *upload.ttl, - DiskType: *upload.diskType, - MaxMB: *upload.maxMB, - }, *upload.usePublicUrl) + results, e := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -126,14 +118,7 @@ func runUpload(cmd *Command, args []string) bool { fmt.Println(e.Error()) return false } - results, err := operation.SubmitFiles(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, operation.StoragePreference{ - Replication: *upload.replication, - Collection: *upload.collection, - DataCenter: *upload.dataCenter, - Ttl: *upload.ttl, - DiskType: *upload.diskType, - MaxMB: *upload.maxMB, - }, *upload.usePublicUrl) + results, err := operation.SubmitFiles(func() pb.ServerAddress { return pb.ServerAddress(*upload.master) }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) if err != nil { fmt.Println(err.Error()) return false @@ -145,7 +130,7 @@ func runUpload(cmd *Command, args []string) bool { } func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress pb.ServerAddress) (replication string, err error) { - err = pb.WithMasterClient(false, masterAddress, grpcDialOption, false, func(client master_pb.SeaweedClient) error { + err = pb.WithMasterClient(false, masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error { resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) if err != nil { return fmt.Errorf("get master %s configuration: %v", masterAddress, err) diff --git a/weed/command/version.go b/weed/command/version.go index fdde081ad..9caf7dc4e 100644 --- a/weed/command/version.go +++ b/weed/command/version.go @@ -2,8 +2,9 @@ package command import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/version" "runtime" + + "github.com/chrislusf/seaweedfs/weed/util" ) var cmdVersion = &Command{ @@ -18,9 +19,6 @@ func runVersion(cmd *Command, args []string) bool { cmd.Usage() } - fmt.Printf("version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH) - println() - println("For enterprise users, please visit https://seaweedfs.com for SeaweedFS Enterprise Edition,") - println("which has a self-healing storage format with better data protection.") + fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) return true } diff --git a/weed/command/volume.go b/weed/command/volume.go index c18ed3222..158bdf162 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -10,23 +10,25 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/spf13/viper" "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + "google.golang.org/grpc/reflection" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - "github.com/seaweedfs/seaweedfs/weed/server/constants" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/util/httpdown" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -38,7 +40,7 @@ type VolumeServerOptions struct { portGrpc *int publicPort *int folders []string - folderMaxLimits []int32 + folderMaxLimits []int idxFolder *string ip *string publicUrl *string @@ -62,13 +64,9 @@ type VolumeServerOptions struct { pprof *bool preStopSeconds *int metricsHttpPort *int - metricsHttpIp *string // pulseSeconds *int - inflightUploadDataTimeout *time.Duration - inflightDownloadDataTimeout *time.Duration - hasSlowRead *bool - readBufferSizeMB *int - ldbTimeout *int64 + enableTcp *bool + inflightUploadDataTimeout *time.Duration } func init() { @@ -93,17 +91,13 @@ func init() { v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") - v.ldbTimeout = cmdVolume.Flag.Int64("index.leveldbTimeout", 0, "alive time for leveldb (default to 0). If leveldb of volume is not accessed in ldbTimeout hours, it will be off loaded to reduce opened files and memory consumption.") v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 256, "limit total concurrent upload size") v.concurrentDownloadLimitMB = cmdVolume.Flag.Int("concurrentDownloadLimitMB", 256, "limit total concurrent download size") v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") - v.metricsHttpIp = cmdVolume.Flag.String("metricsIp", "", "metrics listen ip. If empty, default to same as -ip.bind option.") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") + v.enableTcp = cmdVolume.Flag.Bool("tcp", false, " enable tcp port") v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") - v.inflightDownloadDataTimeout = cmdVolume.Flag.Duration("inflightDownloadDataTimeout", 60*time.Second, "inflight download data wait timeout of volume servers") - v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", true, " if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") - v.readBufferSizeMB = cmdVolume.Flag.Int("readBufferSizeMB", 4, " larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.") } var cmdVolume = &Command{ @@ -124,7 +118,7 @@ var ( func runVolume(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) // If --pprof is set we assume the caller wants to be able to collect // cpu and memory profiles via go tool pprof @@ -132,15 +126,7 @@ func runVolume(cmd *Command, args []string) bool { grace.SetupProfiling(*v.cpuProfile, *v.memProfile) } - switch { - case *v.metricsHttpIp != "": - // noting to do, use v.metricsHttpIp - case *v.bindIp != "": - *v.metricsHttpIp = *v.bindIp - case *v.ip != "": - *v.metricsHttpIp = *v.ip - } - go stats_collect.StartMetricsServer(*v.metricsHttpIp, *v.metricsHttpPort) + go stats_collect.StartMetricsServer(*v.metricsHttpPort) minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent) v.masters = pb.ServerAddresses(*v.mastersString).ToAddresses() @@ -162,8 +148,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v // set max maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { - if max, e := strconv.ParseInt(maxString, 10, 64); e == nil { - v.folderMaxLimits = append(v.folderMaxLimits, int32(max)) + if max, e := strconv.Atoi(maxString); e == nil { + v.folderMaxLimits = append(v.folderMaxLimits, max) } else { glog.Fatalf("The max specified in -max not a valid number %s", maxString) } @@ -202,7 +188,9 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } // security related white list configuration - v.whiteList = util.StringSplit(volumeWhiteListOption, ",") + if volumeWhiteListOption != "" { + v.whiteList = strings.Split(volumeWhiteListOption, ",") + } if *v.ip == "" { *v.ip = util.DetectedHostAddress() @@ -251,7 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes, *v.idxFolder, volumeNeedleMapKind, - v.masters, constants.VolumePulseSeconds, *v.dataCenter, *v.rack, + v.masters, 5, *v.dataCenter, *v.rack, v.whiteList, *v.fixJpgOrientation, *v.readMode, *v.compactionMBPerSecond, @@ -259,10 +247,6 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v int64(*v.concurrentUploadLimitMB)*1024*1024, int64(*v.concurrentDownloadLimitMB)*1024*1024, *v.inflightUploadDataTimeout, - *v.inflightDownloadDataTimeout, - *v.hasSlowRead, - *v.readBufferSizeMB, - *v.ldbTimeout, ) // starting grpc server grpcS := v.startGrpcService(volumeServer) @@ -276,12 +260,14 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } + // starting tcp server + if *v.enableTcp { + go v.startTcpService(volumeServer) + } + // starting the cluster http server clusterHttpServer := v.startClusterHttpService(volumeMux) - grace.OnReload(volumeServer.LoadNewVolumes) - grace.OnReload(volumeServer.Reload) - stopChan := make(chan bool) grace.OnInterrupt(func() { fmt.Println("volume server has been killed") @@ -351,7 +337,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", version.Version(), "public at", publicListeningAddress) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -378,7 +364,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd } listeningAddress := util.JoinHostPort(*v.bindIp, *v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", version.Version(), listeningAddress) + glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -394,7 +380,6 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd if viper.GetString("https.volume.ca") != "" { clientCertFile := viper.GetString("https.volume.ca") httpS.TLSConfig = security.LoadClientTLSHTTP(clientCertFile) - security.FixTlsConfig(util.GetViper(), httpS.TLSConfig) } clusterHttpServer := httpDown.Serve(httpS, listener) @@ -405,3 +390,22 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd }() return clusterHttpServer } + +func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) { + listeningAddress := util.JoinHostPort(*v.bindIp, *v.port+20000) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress) + listener, e := util.NewListener(listeningAddress, 0) + if e != nil { + glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e) + } + defer listener.Close() + + for { + c, err := listener.Accept() + if err != nil { + fmt.Println(err) + return + } + go volumeServer.HandleTcpConnection(c) + } +} diff --git a/weed/command/volume_test.go b/weed/command/volume_test.go index 801041a88..7399f1248 100644 --- a/weed/command/volume_test.go +++ b/weed/command/volume_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" ) func TestXYZ(t *testing.T) { diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 5ad0ca225..689bf3c30 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -3,19 +3,18 @@ package command import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/version" "net/http" "os" "os/user" "strconv" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -24,8 +23,6 @@ var ( type WebDavOption struct { filer *string - ipBind *string - filerRootPath *string port *int collection *string replication *string @@ -34,13 +31,11 @@ type WebDavOption struct { tlsCertificate *string cacheDir *string cacheSizeMB *int64 - maxMB *int } func init() { cmdWebDav.Run = runWebDav // break init cycle webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address") - webDavStandaloneOptions.ipBind = cmdWebDav.Flag.String("ip.bind", "", "ip address to bind to. Default listen to all.") webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port") webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files") @@ -49,8 +44,6 @@ func init() { webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks") webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB") - webDavStandaloneOptions.maxMB = cmdWebDav.Flag.Int("maxMB", 4, "split files larger than the limit") - webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server") } var cmdWebDav = &Command{ @@ -63,10 +56,9 @@ var cmdWebDav = &Command{ func runWebDav(cmd *Command, args []string) bool { - util.LoadSecurityConfiguration() + util.LoadConfiguration("security", false) - listenAddress := fmt.Sprintf("%s:%d", *webDavStandaloneOptions.ipBind, *webDavStandaloneOptions.port) - glog.V(0).Infof("Starting Seaweed WebDav Server %s at %s", version.Version(), listenAddress) + glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port) return webDavStandaloneOptions.startWebDav() @@ -93,7 +85,7 @@ func (wo *WebDavOption) startWebDav() bool { var cipher bool // connect to filer for { - err := pb.WithGrpcFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithGrpcFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) @@ -112,7 +104,6 @@ func (wo *WebDavOption) startWebDav() bool { ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: filerAddress, - FilerRootPath: *wo.filerRootPath, GrpcDialOption: grpcDialOption, Collection: *wo.collection, Replication: *wo.replication, @@ -122,7 +113,6 @@ func (wo *WebDavOption) startWebDav() bool { Cipher: cipher, CacheDir: util.ResolvePath(*wo.cacheDir), CacheSizeMB: *wo.cacheSizeMB, - MaxMB: *wo.maxMB, }) if webdavServer_err != nil { glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) @@ -130,19 +120,19 @@ func (wo *WebDavOption) startWebDav() bool { httpS := &http.Server{Handler: ws.Handler} - listenAddress := fmt.Sprintf("%s:%d", *wo.ipBind, *wo.port) + listenAddress := fmt.Sprintf(":%d", *wo.port) webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) if err != nil { glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err) } if *wo.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed WebDav Server %s at https %s", version.Version(), listenAddress) + glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port) if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed WebDav Server %s at http %s", version.Version(), listenAddress) + glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port) if err = httpS.Serve(webDavListener); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } diff --git a/weed/command/worker.go b/weed/command/worker.go deleted file mode 100644 index 6e592f73f..000000000 --- a/weed/command/worker.go +++ /dev/null @@ -1,238 +0,0 @@ -package command - -import ( - "os" - "os/signal" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/worker" - "github.com/seaweedfs/seaweedfs/weed/worker/tasks" - "github.com/seaweedfs/seaweedfs/weed/worker/types" - - // Import task packages to trigger their auto-registration - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/erasure_coding" - _ "github.com/seaweedfs/seaweedfs/weed/worker/tasks/vacuum" -) - -var cmdWorker = &Command{ - UsageLine: "worker -admin= [-capabilities=] [-maxConcurrent=] [-workingDir=]", - Short: "start a maintenance worker to process cluster maintenance tasks", - Long: `Start a maintenance worker that connects to an admin server to process -maintenance tasks like vacuum, erasure coding, remote upload, and replication fixes. - -The worker ID and address are automatically generated. -The worker connects to the admin server via gRPC (admin HTTP port + 10000). - -Examples: - weed worker -admin=localhost:23646 - weed worker -admin=admin.example.com:23646 - weed worker -admin=localhost:23646 -capabilities=vacuum,replication - weed worker -admin=localhost:23646 -maxConcurrent=4 - weed worker -admin=localhost:23646 -workingDir=/tmp/worker -`, -} - -var ( - workerAdminServer = cmdWorker.Flag.String("admin", "localhost:23646", "admin server address") - workerCapabilities = cmdWorker.Flag.String("capabilities", "vacuum,ec,remote,replication,balance", "comma-separated list of task types this worker can handle") - workerMaxConcurrent = cmdWorker.Flag.Int("maxConcurrent", 2, "maximum number of concurrent tasks") - workerHeartbeatInterval = cmdWorker.Flag.Duration("heartbeat", 30*time.Second, "heartbeat interval") - workerTaskRequestInterval = cmdWorker.Flag.Duration("taskInterval", 5*time.Second, "task request interval") - workerWorkingDir = cmdWorker.Flag.String("workingDir", "", "working directory for the worker") -) - -func init() { - cmdWorker.Run = runWorker - - // Set default capabilities from registered task types - // This happens after package imports have triggered auto-registration - tasks.SetDefaultCapabilitiesFromRegistry() -} - -func runWorker(cmd *Command, args []string) bool { - util.LoadConfiguration("security", false) - - glog.Infof("Starting maintenance worker") - glog.Infof("Admin server: %s", *workerAdminServer) - glog.Infof("Capabilities: %s", *workerCapabilities) - - // Parse capabilities - capabilities := parseCapabilities(*workerCapabilities) - if len(capabilities) == 0 { - glog.Fatalf("No valid capabilities specified") - return false - } - - // Set working directory and create task-specific subdirectories - var baseWorkingDir string - if *workerWorkingDir != "" { - glog.Infof("Setting working directory to: %s", *workerWorkingDir) - if err := os.Chdir(*workerWorkingDir); err != nil { - glog.Fatalf("Failed to change working directory: %v", err) - return false - } - wd, err := os.Getwd() - if err != nil { - glog.Fatalf("Failed to get working directory: %v", err) - return false - } - baseWorkingDir = wd - glog.Infof("Current working directory: %s", baseWorkingDir) - } else { - // Use default working directory when not specified - wd, err := os.Getwd() - if err != nil { - glog.Fatalf("Failed to get current working directory: %v", err) - return false - } - baseWorkingDir = wd - glog.Infof("Using current working directory: %s", baseWorkingDir) - } - - // Create task-specific subdirectories - for _, capability := range capabilities { - taskDir := filepath.Join(baseWorkingDir, string(capability)) - if err := os.MkdirAll(taskDir, 0755); err != nil { - glog.Fatalf("Failed to create task directory %s: %v", taskDir, err) - return false - } - glog.Infof("Created task directory: %s", taskDir) - } - - // Create gRPC dial option using TLS configuration - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.worker") - - // Create worker configuration - config := &types.WorkerConfig{ - AdminServer: *workerAdminServer, - Capabilities: capabilities, - MaxConcurrent: *workerMaxConcurrent, - HeartbeatInterval: *workerHeartbeatInterval, - TaskRequestInterval: *workerTaskRequestInterval, - BaseWorkingDir: baseWorkingDir, - GrpcDialOption: grpcDialOption, - } - - // Create worker instance - workerInstance, err := worker.NewWorker(config) - if err != nil { - glog.Fatalf("Failed to create worker: %v", err) - return false - } - adminClient, err := worker.CreateAdminClient(*workerAdminServer, workerInstance.ID(), grpcDialOption) - if err != nil { - glog.Fatalf("Failed to create admin client: %v", err) - return false - } - - // Set admin client - workerInstance.SetAdminClient(adminClient) - - // Set working directory - if *workerWorkingDir != "" { - glog.Infof("Setting working directory to: %s", *workerWorkingDir) - if err := os.Chdir(*workerWorkingDir); err != nil { - glog.Fatalf("Failed to change working directory: %v", err) - return false - } - wd, err := os.Getwd() - if err != nil { - glog.Fatalf("Failed to get working directory: %v", err) - return false - } - glog.Infof("Current working directory: %s", wd) - } - - // Start the worker - err = workerInstance.Start() - if err != nil { - glog.Errorf("Failed to start worker: %v", err) - return false - } - - // Set up signal handling - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - glog.Infof("Maintenance worker %s started successfully", workerInstance.ID()) - glog.Infof("Press Ctrl+C to stop the worker") - - // Wait for shutdown signal - <-sigChan - glog.Infof("Shutdown signal received, stopping worker...") - - // Gracefully stop the worker - err = workerInstance.Stop() - if err != nil { - glog.Errorf("Error stopping worker: %v", err) - } - glog.Infof("Worker stopped") - - return true -} - -// parseCapabilities converts comma-separated capability string to task types -func parseCapabilities(capabilityStr string) []types.TaskType { - if capabilityStr == "" { - return nil - } - - capabilityMap := map[string]types.TaskType{} - - // Populate capabilityMap with registered task types - typesRegistry := tasks.GetGlobalTypesRegistry() - for taskType := range typesRegistry.GetAllDetectors() { - // Use the task type string directly as the key - capabilityMap[strings.ToLower(string(taskType))] = taskType - } - - // Add common aliases for convenience - if taskType, exists := capabilityMap["erasure_coding"]; exists { - capabilityMap["ec"] = taskType - } - if taskType, exists := capabilityMap["remote_upload"]; exists { - capabilityMap["remote"] = taskType - } - if taskType, exists := capabilityMap["fix_replication"]; exists { - capabilityMap["replication"] = taskType - } - - var capabilities []types.TaskType - parts := strings.Split(capabilityStr, ",") - - for _, part := range parts { - part = strings.TrimSpace(part) - if taskType, exists := capabilityMap[part]; exists { - capabilities = append(capabilities, taskType) - } else { - glog.Warningf("Unknown capability: %s", part) - } - } - - return capabilities -} - -// Legacy compatibility types for backward compatibility -// These will be deprecated in future versions - -// WorkerStatus represents the current status of a worker (deprecated) -type WorkerStatus struct { - WorkerID string `json:"worker_id"` - Address string `json:"address"` - Status string `json:"status"` - Capabilities []types.TaskType `json:"capabilities"` - MaxConcurrent int `json:"max_concurrent"` - CurrentLoad int `json:"current_load"` - LastHeartbeat time.Time `json:"last_heartbeat"` - CurrentTasks []types.Task `json:"current_tasks"` - Uptime time.Duration `json:"uptime"` - TasksCompleted int `json:"tasks_completed"` - TasksFailed int `json:"tasks_failed"` -} diff --git a/weed/credential/README.md b/weed/credential/README.md deleted file mode 100644 index dc3dc04c4..000000000 --- a/weed/credential/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# Credential Store Integration - -This document shows how the credential store has been integrated into SeaweedFS's S3 API and IAM API components. - -## Quick Start - -1. **Generate credential configuration:** - ```bash - weed scaffold -config=credential -output=. - ``` - -2. **Edit credential.toml** to enable your preferred store (filer_etc is enabled by default) - -3. **Start S3 API server** - it will automatically load credential.toml: - ```bash - weed s3 -filer=localhost:8888 - ``` - -## Integration Overview - -The credential store provides a pluggable backend for storing S3 identities and credentials, supporting: -- **Filer-based storage** (filer_etc) - Uses existing filer storage (default) -- **PostgreSQL** - Shared database for multiple servers -- **Memory** - In-memory storage for testing - -## Configuration - -### Using credential.toml - -Generate the configuration template: -```bash -weed scaffold -config=credential -``` - -This creates a `credential.toml` file with all available options. The filer_etc store is enabled by default: - -```toml -# Filer-based credential store (default, uses existing filer storage) -[credential.filer_etc] -enabled = true - - -# PostgreSQL credential store (recommended for multi-node deployments) -[credential.postgres] -enabled = false -hostname = "localhost" -port = 5432 -username = "seaweedfs" -password = "your_password" -database = "seaweedfs" - -# Memory credential store (for testing only, data is lost on restart) -[credential.memory] -enabled = false -``` - -The credential.toml file is automatically loaded from these locations (in priority order): -- `./credential.toml` -- `$HOME/.seaweedfs/credential.toml` -- `/etc/seaweedfs/credential.toml` - -### Server Configuration - -Both S3 API and IAM API servers automatically load credential.toml during startup. No additional configuration is required. - -## Usage Examples - -### Filer-based Store (Default) - -```toml -[credential.filer_etc] -enabled = true -``` - -This uses the existing filer storage and is compatible with current deployments. - - - -### PostgreSQL Store - -```toml -[credential.postgres] -enabled = true -hostname = "localhost" -port = 5432 -username = "seaweedfs" -password = "your_password" -database = "seaweedfs" -schema = "public" -sslmode = "disable" -table_prefix = "sw_" -connection_max_idle = 10 -connection_max_open = 100 -connection_max_lifetime_seconds = 3600 -``` - -### Memory Store (Testing) - -```toml -[credential.memory] -enabled = true -``` - -## Environment Variables - -All credential configuration can be overridden with environment variables: - -```bash -# Override PostgreSQL password -export WEED_CREDENTIAL_POSTGRES_PASSWORD=secret - - -# Override PostgreSQL hostname -export WEED_CREDENTIAL_POSTGRES_HOSTNAME=db.example.com - -# Enable/disable stores -export WEED_CREDENTIAL_FILER_ETC_ENABLED=true -``` - -Rules: -- Prefix with `WEED_CREDENTIAL_` -- Convert to uppercase -- Replace `.` with `_` - -## Implementation Details - -Components automatically load credential configuration during startup: - -```go -// Server initialization -if credConfig, err := credential.LoadCredentialConfiguration(); err == nil && credConfig != nil { - credentialManager, err := credential.NewCredentialManager( - credConfig.Store, - credConfig.Config, - credConfig.Prefix, - ) - if err != nil { - return nil, fmt.Errorf("failed to initialize credential manager: %v", err) - } - // Use credential manager for operations -} -``` - -## Benefits - -1. **Easy Configuration** - Generate template with `weed scaffold -config=credential` -2. **Pluggable Storage** - Switch between filer_etc, PostgreSQL without code changes -3. **Backward Compatibility** - Filer-based storage works with existing deployments -4. **Scalability** - Database stores support multiple concurrent servers -5. **Performance** - Database access can be faster than file-based storage -6. **Testing** - Memory store simplifies unit testing -7. **Environment Override** - All settings can be overridden with environment variables - -## Error Handling - -When a credential store is configured, it must initialize successfully or the server will fail to start: - -```go -if credConfig != nil { - credentialManager, err = credential.NewCredentialManager(...) - if err != nil { - return nil, fmt.Errorf("failed to initialize credential manager: %v", err) - } -} -``` - -This ensures explicit configuration - if you configure a credential store, it must work properly. \ No newline at end of file diff --git a/weed/credential/config_loader.go b/weed/credential/config_loader.go deleted file mode 100644 index 959f1cfb4..000000000 --- a/weed/credential/config_loader.go +++ /dev/null @@ -1,133 +0,0 @@ -package credential - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// CredentialConfig represents the credential configuration from credential.toml -type CredentialConfig struct { - Store string - Config util.Configuration - Prefix string -} - -// LoadCredentialConfiguration loads credential configuration from credential.toml -// Returns the store type, configuration, and prefix for credential management -func LoadCredentialConfiguration() (*CredentialConfig, error) { - // Try to load credential.toml configuration - loaded := util.LoadConfiguration("credential", false) - if !loaded { - glog.V(1).Info("No credential.toml found, credential store disabled") - return nil, nil - } - - viper := util.GetViper() - - // Find which credential store is enabled - var enabledStore string - var storePrefix string - - // Get available store types from registered stores - storeTypes := GetAvailableStores() - for _, storeType := range storeTypes { - key := fmt.Sprintf("credential.%s.enabled", string(storeType)) - if viper.GetBool(key) { - if enabledStore != "" { - return nil, fmt.Errorf("multiple credential stores enabled: %s and %s. Only one store can be enabled", enabledStore, string(storeType)) - } - enabledStore = string(storeType) - storePrefix = fmt.Sprintf("credential.%s.", string(storeType)) - } - } - - if enabledStore == "" { - glog.V(1).Info("No credential store enabled in credential.toml") - return nil, nil - } - - glog.V(0).Infof("Loaded credential configuration: store=%s", enabledStore) - - return &CredentialConfig{ - Store: enabledStore, - Config: viper, - Prefix: storePrefix, - }, nil -} - -// GetCredentialStoreConfig extracts credential store configuration from command line flags -// This is used when credential store is configured via command line instead of credential.toml -func GetCredentialStoreConfig(store string, config util.Configuration, prefix string) *CredentialConfig { - if store == "" { - return nil - } - - return &CredentialConfig{ - Store: store, - Config: config, - Prefix: prefix, - } -} - -// MergeCredentialConfig merges command line credential config with credential.toml config -// Command line flags take priority over credential.toml -func MergeCredentialConfig(cmdLineStore string, cmdLineConfig util.Configuration, cmdLinePrefix string) (*CredentialConfig, error) { - // If command line credential store is specified, use it - if cmdLineStore != "" { - glog.V(0).Infof("Using command line credential configuration: store=%s", cmdLineStore) - return GetCredentialStoreConfig(cmdLineStore, cmdLineConfig, cmdLinePrefix), nil - } - - // Otherwise, try to load from credential.toml - config, err := LoadCredentialConfiguration() - if err != nil { - return nil, err - } - - if config == nil { - glog.V(1).Info("No credential store configured") - } - - return config, nil -} - -// NewCredentialManagerWithDefaults creates a credential manager with fallback to defaults -// If explicitStore is provided, it will be used regardless of credential.toml -// If explicitStore is empty, it tries credential.toml first, then defaults to "filer_etc" -func NewCredentialManagerWithDefaults(explicitStore CredentialStoreTypeName) (*CredentialManager, error) { - var storeName CredentialStoreTypeName - var config util.Configuration - var prefix string - - // If explicit store is provided, use it - if explicitStore != "" { - storeName = explicitStore - config = nil - prefix = "" - glog.V(0).Infof("Using explicit credential store: %s", storeName) - } else { - // Try to load from credential.toml first - if credConfig, err := LoadCredentialConfiguration(); err == nil && credConfig != nil { - storeName = CredentialStoreTypeName(credConfig.Store) - config = credConfig.Config - prefix = credConfig.Prefix - glog.V(0).Infof("Loaded credential configuration from credential.toml: store=%s", storeName) - } else { - // Default to filer_etc store - storeName = StoreTypeFilerEtc - config = nil - prefix = "" - glog.V(1).Info("No credential.toml found, defaulting to filer_etc store") - } - } - - // Create the credential manager - credentialManager, err := NewCredentialManager(storeName, config, prefix) - if err != nil { - return nil, fmt.Errorf("failed to initialize credential manager with store '%s': %v", storeName, err) - } - - return credentialManager, nil -} diff --git a/weed/credential/credential_manager.go b/weed/credential/credential_manager.go deleted file mode 100644 index d4323e920..000000000 --- a/weed/credential/credential_manager.go +++ /dev/null @@ -1,125 +0,0 @@ -package credential - -import ( - "context" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// CredentialManager manages user credentials using a configurable store -type CredentialManager struct { - store CredentialStore -} - -// NewCredentialManager creates a new credential manager with the specified store -func NewCredentialManager(storeName CredentialStoreTypeName, configuration util.Configuration, prefix string) (*CredentialManager, error) { - var store CredentialStore - - // Find the requested store implementation - for _, s := range Stores { - if s.GetName() == storeName { - store = s - break - } - } - - if store == nil { - return nil, fmt.Errorf("credential store '%s' not found. Available stores: %s", - storeName, getAvailableStores()) - } - - // Initialize the store - if err := store.Initialize(configuration, prefix); err != nil { - return nil, fmt.Errorf("failed to initialize credential store '%s': %v", storeName, err) - } - - return &CredentialManager{ - store: store, - }, nil -} - -// GetStore returns the underlying credential store -func (cm *CredentialManager) GetStore() CredentialStore { - return cm.store -} - -// LoadConfiguration loads the S3 API configuration -func (cm *CredentialManager) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) { - return cm.store.LoadConfiguration(ctx) -} - -// SaveConfiguration saves the S3 API configuration -func (cm *CredentialManager) SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error { - return cm.store.SaveConfiguration(ctx, config) -} - -// CreateUser creates a new user -func (cm *CredentialManager) CreateUser(ctx context.Context, identity *iam_pb.Identity) error { - return cm.store.CreateUser(ctx, identity) -} - -// GetUser retrieves a user by username -func (cm *CredentialManager) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) { - return cm.store.GetUser(ctx, username) -} - -// UpdateUser updates an existing user -func (cm *CredentialManager) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error { - return cm.store.UpdateUser(ctx, username, identity) -} - -// DeleteUser removes a user -func (cm *CredentialManager) DeleteUser(ctx context.Context, username string) error { - return cm.store.DeleteUser(ctx, username) -} - -// ListUsers returns all usernames -func (cm *CredentialManager) ListUsers(ctx context.Context) ([]string, error) { - return cm.store.ListUsers(ctx) -} - -// GetUserByAccessKey retrieves a user by access key -func (cm *CredentialManager) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) { - return cm.store.GetUserByAccessKey(ctx, accessKey) -} - -// CreateAccessKey creates a new access key for a user -func (cm *CredentialManager) CreateAccessKey(ctx context.Context, username string, credential *iam_pb.Credential) error { - return cm.store.CreateAccessKey(ctx, username, credential) -} - -// DeleteAccessKey removes an access key for a user -func (cm *CredentialManager) DeleteAccessKey(ctx context.Context, username string, accessKey string) error { - return cm.store.DeleteAccessKey(ctx, username, accessKey) -} - -// Shutdown performs cleanup -func (cm *CredentialManager) Shutdown() { - if cm.store != nil { - cm.store.Shutdown() - } -} - -// getAvailableStores returns a comma-separated list of available store names -func getAvailableStores() string { - var storeNames []string - for _, store := range Stores { - storeNames = append(storeNames, string(store.GetName())) - } - return strings.Join(storeNames, ", ") -} - -// GetAvailableStores returns a list of available credential store names -func GetAvailableStores() []CredentialStoreTypeName { - var storeNames []CredentialStoreTypeName - for _, store := range Stores { - storeNames = append(storeNames, store.GetName()) - } - if storeNames == nil { - return []CredentialStoreTypeName{} - } - return storeNames -} diff --git a/weed/credential/credential_store.go b/weed/credential/credential_store.go deleted file mode 100644 index 9bcb69260..000000000 --- a/weed/credential/credential_store.go +++ /dev/null @@ -1,100 +0,0 @@ -package credential - -import ( - "context" - "errors" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -var ( - ErrUserNotFound = errors.New("user not found") - ErrUserAlreadyExists = errors.New("user already exists") - ErrAccessKeyNotFound = errors.New("access key not found") -) - -// CredentialStoreTypeName represents the type name of a credential store -type CredentialStoreTypeName string - -// Credential store name constants -const ( - StoreTypeMemory CredentialStoreTypeName = "memory" - StoreTypeFilerEtc CredentialStoreTypeName = "filer_etc" - StoreTypePostgres CredentialStoreTypeName = "postgres" -) - -// CredentialStore defines the interface for user credential storage and retrieval -type CredentialStore interface { - // GetName returns the name of the credential store implementation - GetName() CredentialStoreTypeName - - // Initialize initializes the credential store with configuration - Initialize(configuration util.Configuration, prefix string) error - - // LoadConfiguration loads the entire S3 API configuration - LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) - - // SaveConfiguration saves the entire S3 API configuration - SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error - - // CreateUser creates a new user with the given identity - CreateUser(ctx context.Context, identity *iam_pb.Identity) error - - // GetUser retrieves a user by username - GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) - - // UpdateUser updates an existing user - UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error - - // DeleteUser removes a user by username - DeleteUser(ctx context.Context, username string) error - - // ListUsers returns all usernames - ListUsers(ctx context.Context) ([]string, error) - - // GetUserByAccessKey retrieves a user by access key - GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) - - // CreateAccessKey creates a new access key for a user - CreateAccessKey(ctx context.Context, username string, credential *iam_pb.Credential) error - - // DeleteAccessKey removes an access key for a user - DeleteAccessKey(ctx context.Context, username string, accessKey string) error - - // Shutdown performs cleanup when the store is being shut down - Shutdown() -} - -// AccessKeyInfo represents access key information with metadata -type AccessKeyInfo struct { - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - Username string `json:"username"` - CreatedAt time.Time `json:"createdAt"` -} - -// UserCredentials represents a user's credentials and metadata -type UserCredentials struct { - Username string `json:"username"` - Email string `json:"email"` - Account *iam_pb.Account `json:"account,omitempty"` - Credentials []*iam_pb.Credential `json:"credentials"` - Actions []string `json:"actions"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// PolicyManager interface for managing IAM policies -type PolicyManager interface { - GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) - CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error - UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error - DeletePolicy(ctx context.Context, name string) error - GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) -} - -// Stores holds all available credential store implementations -var Stores []CredentialStore diff --git a/weed/credential/credential_test.go b/weed/credential/credential_test.go deleted file mode 100644 index dd1449fa5..000000000 --- a/weed/credential/credential_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package credential - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func TestCredentialStoreInterface(t *testing.T) { - // Note: This test may fail if run without importing store packages - // For full integration testing, see the test/ package - if len(Stores) == 0 { - t.Skip("No credential stores registered - this is expected when testing the base package without store imports") - } - - // Check that expected stores are available - storeNames := GetAvailableStores() - expectedStores := []string{string(StoreTypeFilerEtc), string(StoreTypeMemory)} - - // Add PostgreSQL if it's available (build tags dependent) - for _, storeName := range storeNames { - found := false - for _, expected := range append(expectedStores, string(StoreTypePostgres)) { - if string(storeName) == expected { - found = true - break - } - } - if !found { - t.Errorf("Unexpected store found: %s", storeName) - } - } - - // Test that filer_etc store is always available - filerEtcStoreFound := false - memoryStoreFound := false - for _, storeName := range storeNames { - if string(storeName) == string(StoreTypeFilerEtc) { - filerEtcStoreFound = true - } - if string(storeName) == string(StoreTypeMemory) { - memoryStoreFound = true - } - } - if !filerEtcStoreFound { - t.Error("FilerEtc store should always be available") - } - if !memoryStoreFound { - t.Error("Memory store should always be available") - } -} - -func TestCredentialManagerCreation(t *testing.T) { - config := util.GetViper() - - // Test creating credential manager with invalid store - _, err := NewCredentialManager(CredentialStoreTypeName("nonexistent"), config, "test.") - if err == nil { - t.Error("Expected error for nonexistent store") - } - - // Skip store-specific tests if no stores are registered - if len(Stores) == 0 { - t.Skip("No credential stores registered - skipping store-specific tests") - } - - // Test creating credential manager with available stores - availableStores := GetAvailableStores() - if len(availableStores) == 0 { - t.Skip("No stores available for testing") - } - - // Test with the first available store - storeName := availableStores[0] - cm, err := NewCredentialManager(storeName, config, "test.") - if err != nil { - t.Fatalf("Failed to create credential manager with store %s: %v", storeName, err) - } - if cm == nil { - t.Error("Credential manager should not be nil") - } - defer cm.Shutdown() - - // Test that the store is of the correct type - if cm.GetStore().GetName() != storeName { - t.Errorf("Expected %s store, got %s", storeName, cm.GetStore().GetName()) - } -} - -func TestCredentialInterface(t *testing.T) { - // Skip if no stores are registered - if len(Stores) == 0 { - t.Skip("No credential stores registered - for full testing see test/ package") - } - - // Test the interface with the first available store - availableStores := GetAvailableStores() - if len(availableStores) == 0 { - t.Skip("No stores available for testing") - } - - testCredentialInterfaceWithStore(t, availableStores[0]) -} - -func testCredentialInterfaceWithStore(t *testing.T, storeName CredentialStoreTypeName) { - // Create a test identity - testIdentity := &iam_pb.Identity{ - Name: "testuser", - Actions: []string{"Read", "Write"}, - Account: &iam_pb.Account{ - Id: "123456789012", - DisplayName: "Test User", - EmailAddress: "test@example.com", - }, - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - } - - // Test the interface methods exist (compile-time check) - config := util.GetViper() - cm, err := NewCredentialManager(storeName, config, "test.") - if err != nil { - t.Fatalf("Failed to create credential manager: %v", err) - } - defer cm.Shutdown() - - ctx := context.Background() - - // Test LoadConfiguration - _, err = cm.LoadConfiguration(ctx) - if err != nil { - t.Fatalf("LoadConfiguration failed: %v", err) - } - - // Test CreateUser - err = cm.CreateUser(ctx, testIdentity) - if err != nil { - t.Fatalf("CreateUser failed: %v", err) - } - - // Test GetUser - user, err := cm.GetUser(ctx, "testuser") - if err != nil { - t.Fatalf("GetUser failed: %v", err) - } - if user.Name != "testuser" { - t.Errorf("Expected user name 'testuser', got %s", user.Name) - } - - // Test ListUsers - users, err := cm.ListUsers(ctx) - if err != nil { - t.Fatalf("ListUsers failed: %v", err) - } - if len(users) != 1 || users[0] != "testuser" { - t.Errorf("Expected ['testuser'], got %v", users) - } - - // Test GetUserByAccessKey - userByKey, err := cm.GetUserByAccessKey(ctx, "AKIAIOSFODNN7EXAMPLE") - if err != nil { - t.Fatalf("GetUserByAccessKey failed: %v", err) - } - if userByKey.Name != "testuser" { - t.Errorf("Expected user name 'testuser', got %s", userByKey.Name) - } -} - -func TestCredentialManagerIntegration(t *testing.T) { - // Skip if no stores are registered - if len(Stores) == 0 { - t.Skip("No credential stores registered - for full testing see test/ package") - } - - // Test with the first available store - availableStores := GetAvailableStores() - if len(availableStores) == 0 { - t.Skip("No stores available for testing") - } - - storeName := availableStores[0] - config := util.GetViper() - cm, err := NewCredentialManager(storeName, config, "test.") - if err != nil { - t.Fatalf("Failed to create credential manager: %v", err) - } - defer cm.Shutdown() - - ctx := context.Background() - - // Test complete workflow - user1 := &iam_pb.Identity{ - Name: "user1", - Actions: []string{"Read"}, - Account: &iam_pb.Account{ - Id: "111111111111", - DisplayName: "User One", - EmailAddress: "user1@example.com", - }, - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAUSER1", - SecretKey: "secret1", - }, - }, - } - - user2 := &iam_pb.Identity{ - Name: "user2", - Actions: []string{"Write"}, - Account: &iam_pb.Account{ - Id: "222222222222", - DisplayName: "User Two", - EmailAddress: "user2@example.com", - }, - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAUSER2", - SecretKey: "secret2", - }, - }, - } - - // Create users - err = cm.CreateUser(ctx, user1) - if err != nil { - t.Fatalf("Failed to create user1: %v", err) - } - - err = cm.CreateUser(ctx, user2) - if err != nil { - t.Fatalf("Failed to create user2: %v", err) - } - - // List users - users, err := cm.ListUsers(ctx) - if err != nil { - t.Fatalf("Failed to list users: %v", err) - } - - if len(users) != 2 { - t.Errorf("Expected 2 users, got %d", len(users)) - } - - // Test access key lookup - foundUser, err := cm.GetUserByAccessKey(ctx, "AKIAUSER1") - if err != nil { - t.Fatalf("Failed to get user by access key: %v", err) - } - if foundUser.Name != "user1" { - t.Errorf("Expected user1, got %s", foundUser.Name) - } - - // Delete user - err = cm.DeleteUser(ctx, "user1") - if err != nil { - t.Fatalf("Failed to delete user: %v", err) - } - - // Verify user is deleted - _, err = cm.GetUser(ctx, "user1") - if err != ErrUserNotFound { - t.Errorf("Expected ErrUserNotFound, got %v", err) - } - - // Clean up - err = cm.DeleteUser(ctx, "user2") - if err != nil { - t.Fatalf("Failed to delete user2: %v", err) - } -} - -// TestErrorTypes tests that the custom error types are defined correctly -func TestErrorTypes(t *testing.T) { - // Test that error types are defined - if ErrUserNotFound == nil { - t.Error("ErrUserNotFound should be defined") - } - if ErrUserAlreadyExists == nil { - t.Error("ErrUserAlreadyExists should be defined") - } - if ErrAccessKeyNotFound == nil { - t.Error("ErrAccessKeyNotFound should be defined") - } - - // Test error messages - if ErrUserNotFound.Error() != "user not found" { - t.Errorf("Expected 'user not found', got '%s'", ErrUserNotFound.Error()) - } - if ErrUserAlreadyExists.Error() != "user already exists" { - t.Errorf("Expected 'user already exists', got '%s'", ErrUserAlreadyExists.Error()) - } - if ErrAccessKeyNotFound.Error() != "access key not found" { - t.Errorf("Expected 'access key not found', got '%s'", ErrAccessKeyNotFound.Error()) - } -} - -// TestGetAvailableStores tests the store discovery function -func TestGetAvailableStores(t *testing.T) { - stores := GetAvailableStores() - if len(stores) == 0 { - t.Skip("No stores available for testing") - } - - // Convert to strings for comparison - storeNames := make([]string, len(stores)) - for i, store := range stores { - storeNames[i] = string(store) - } - - t.Logf("Available stores: %v (count: %d)", storeNames, len(storeNames)) - - // We expect at least memory and filer_etc stores to be available - expectedStores := []string{string(StoreTypeFilerEtc), string(StoreTypeMemory)} - - // Add PostgreSQL if it's available (build tags dependent) - for _, storeName := range storeNames { - found := false - for _, expected := range append(expectedStores, string(StoreTypePostgres)) { - if storeName == expected { - found = true - break - } - } - if !found { - t.Errorf("Unexpected store found: %s", storeName) - } - } - - // Test that filer_etc store is always available - filerEtcStoreFound := false - memoryStoreFound := false - for _, storeName := range storeNames { - if storeName == string(StoreTypeFilerEtc) { - filerEtcStoreFound = true - } - if storeName == string(StoreTypeMemory) { - memoryStoreFound = true - } - } - if !filerEtcStoreFound { - t.Error("FilerEtc store should always be available") - } - if !memoryStoreFound { - t.Error("Memory store should always be available") - } -} diff --git a/weed/credential/filer_etc/filer_etc_identity.go b/weed/credential/filer_etc/filer_etc_identity.go deleted file mode 100644 index f57c7c3ac..000000000 --- a/weed/credential/filer_etc/filer_etc_identity.go +++ /dev/null @@ -1,188 +0,0 @@ -package filer_etc - -import ( - "bytes" - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" -) - -func (store *FilerEtcStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) { - s3cfg := &iam_pb.S3ApiConfiguration{} - - err := store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var buf bytes.Buffer - if err := filer.ReadEntry(nil, client, filer.IamConfigDirectory, filer.IamIdentityFile, &buf); err != nil { - if err != filer_pb.ErrNotFound { - return err - } - } - if buf.Len() > 0 { - return filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg) - } - return nil - }) - - return s3cfg, err -} - -func (store *FilerEtcStore) SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error { - return store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var buf bytes.Buffer - if err := filer.ProtoToText(&buf, config); err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) - } - return filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile, buf.Bytes()) - }) -} - -func (store *FilerEtcStore) CreateUser(ctx context.Context, identity *iam_pb.Identity) error { - // Load existing configuration - config, err := store.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - // Check if user already exists - for _, existingIdentity := range config.Identities { - if existingIdentity.Name == identity.Name { - return credential.ErrUserAlreadyExists - } - } - - // Add new identity - config.Identities = append(config.Identities, identity) - - // Save configuration - return store.SaveConfiguration(ctx, config) -} - -func (store *FilerEtcStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - for _, identity := range config.Identities { - if identity.Name == username { - return identity, nil - } - } - - return nil, credential.ErrUserNotFound -} - -func (store *FilerEtcStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - // Find and update the user - for i, existingIdentity := range config.Identities { - if existingIdentity.Name == username { - config.Identities[i] = identity - return store.SaveConfiguration(ctx, config) - } - } - - return credential.ErrUserNotFound -} - -func (store *FilerEtcStore) DeleteUser(ctx context.Context, username string) error { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - // Find and remove the user - for i, identity := range config.Identities { - if identity.Name == username { - config.Identities = append(config.Identities[:i], config.Identities[i+1:]...) - return store.SaveConfiguration(ctx, config) - } - } - - return credential.ErrUserNotFound -} - -func (store *FilerEtcStore) ListUsers(ctx context.Context) ([]string, error) { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - var usernames []string - for _, identity := range config.Identities { - usernames = append(usernames, identity.Name) - } - - return usernames, nil -} - -func (store *FilerEtcStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - for _, identity := range config.Identities { - for _, credential := range identity.Credentials { - if credential.AccessKey == accessKey { - return identity, nil - } - } - } - - return nil, credential.ErrAccessKeyNotFound -} - -func (store *FilerEtcStore) CreateAccessKey(ctx context.Context, username string, cred *iam_pb.Credential) error { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - // Find the user and add the credential - for _, identity := range config.Identities { - if identity.Name == username { - // Check if access key already exists - for _, existingCred := range identity.Credentials { - if existingCred.AccessKey == cred.AccessKey { - return fmt.Errorf("access key %s already exists", cred.AccessKey) - } - } - - identity.Credentials = append(identity.Credentials, cred) - return store.SaveConfiguration(ctx, config) - } - } - - return credential.ErrUserNotFound -} - -func (store *FilerEtcStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error { - config, err := store.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - // Find the user and remove the credential - for _, identity := range config.Identities { - if identity.Name == username { - for i, cred := range identity.Credentials { - if cred.AccessKey == accessKey { - identity.Credentials = append(identity.Credentials[:i], identity.Credentials[i+1:]...) - return store.SaveConfiguration(ctx, config) - } - } - return credential.ErrAccessKeyNotFound - } - } - - return credential.ErrUserNotFound -} diff --git a/weed/credential/filer_etc/filer_etc_policy.go b/weed/credential/filer_etc/filer_etc_policy.go deleted file mode 100644 index 8b4647cb1..000000000 --- a/weed/credential/filer_etc/filer_etc_policy.go +++ /dev/null @@ -1,114 +0,0 @@ -package filer_etc - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" -) - -type PoliciesCollection struct { - Policies map[string]policy_engine.PolicyDocument `json:"policies"` -} - -// GetPolicies retrieves all IAM policies from the filer -func (store *FilerEtcStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) { - policiesCollection := &PoliciesCollection{ - Policies: make(map[string]policy_engine.PolicyDocument), - } - - // Check if filer client is configured - if store.filerGrpcAddress == "" { - glog.V(1).Infof("Filer client not configured for policy retrieval, returning empty policies") - // Return empty policies if filer client is not configured - return policiesCollection.Policies, nil - } - - err := store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var buf bytes.Buffer - if err := filer.ReadEntry(nil, client, filer.IamConfigDirectory, filer.IamPoliciesFile, &buf); err != nil { - if err == filer_pb.ErrNotFound { - glog.V(1).Infof("Policies file not found at %s/%s, returning empty policies", filer.IamConfigDirectory, filer.IamPoliciesFile) - // If file doesn't exist, return empty collection - return nil - } - return err - } - - if buf.Len() > 0 { - return json.Unmarshal(buf.Bytes(), policiesCollection) - } - return nil - }) - - if err != nil { - return nil, err - } - - return policiesCollection.Policies, nil -} - -// CreatePolicy creates a new IAM policy in the filer -func (store *FilerEtcStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) { - policies[name] = document - }) -} - -// UpdatePolicy updates an existing IAM policy in the filer -func (store *FilerEtcStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) { - policies[name] = document - }) -} - -// DeletePolicy deletes an IAM policy from the filer -func (store *FilerEtcStore) DeletePolicy(ctx context.Context, name string) error { - return store.updatePolicies(ctx, func(policies map[string]policy_engine.PolicyDocument) { - delete(policies, name) - }) -} - -// updatePolicies is a helper method to update policies atomically -func (store *FilerEtcStore) updatePolicies(ctx context.Context, updateFunc func(map[string]policy_engine.PolicyDocument)) error { - // Load existing policies - policies, err := store.GetPolicies(ctx) - if err != nil { - return err - } - - // Apply update - updateFunc(policies) - - // Save back to filer - policiesCollection := &PoliciesCollection{ - Policies: policies, - } - - data, err := json.Marshal(policiesCollection) - if err != nil { - return err - } - - return store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - return filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamPoliciesFile, data) - }) -} - -// GetPolicy retrieves a specific IAM policy by name from the filer -func (store *FilerEtcStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) { - policies, err := store.GetPolicies(ctx) - if err != nil { - return nil, err - } - - if policy, exists := policies[name]; exists { - return &policy, nil - } - - return nil, nil // Policy not found -} diff --git a/weed/credential/filer_etc/filer_etc_store.go b/weed/credential/filer_etc/filer_etc_store.go deleted file mode 100644 index f8750cb25..000000000 --- a/weed/credential/filer_etc/filer_etc_store.go +++ /dev/null @@ -1,55 +0,0 @@ -package filer_etc - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" -) - -func init() { - credential.Stores = append(credential.Stores, &FilerEtcStore{}) -} - -// FilerEtcStore implements CredentialStore using SeaweedFS filer for storage -type FilerEtcStore struct { - filerGrpcAddress string - grpcDialOption grpc.DialOption -} - -func (store *FilerEtcStore) GetName() credential.CredentialStoreTypeName { - return credential.StoreTypeFilerEtc -} - -func (store *FilerEtcStore) Initialize(configuration util.Configuration, prefix string) error { - // Handle nil configuration gracefully - if configuration != nil { - store.filerGrpcAddress = configuration.GetString(prefix + "filer") - // TODO: Initialize grpcDialOption based on configuration - } - // Note: filerGrpcAddress can be set later via SetFilerClient method - return nil -} - -// SetFilerClient sets the filer client details for the file store -func (store *FilerEtcStore) SetFilerClient(filerAddress string, grpcDialOption grpc.DialOption) { - store.filerGrpcAddress = filerAddress - store.grpcDialOption = grpcDialOption -} - -// withFilerClient executes a function with a filer client -func (store *FilerEtcStore) withFilerClient(fn func(client filer_pb.SeaweedFilerClient) error) error { - if store.filerGrpcAddress == "" { - return fmt.Errorf("filer address not configured") - } - - // Use the pb.WithGrpcFilerClient helper similar to existing code - return pb.WithGrpcFilerClient(false, 0, pb.ServerAddress(store.filerGrpcAddress), store.grpcDialOption, fn) -} - -func (store *FilerEtcStore) Shutdown() { - // No cleanup needed for file store -} diff --git a/weed/credential/memory/memory_identity.go b/weed/credential/memory/memory_identity.go deleted file mode 100644 index 191aa5d16..000000000 --- a/weed/credential/memory/memory_identity.go +++ /dev/null @@ -1,302 +0,0 @@ -package memory - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" -) - -func (store *MemoryStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.initialized { - return nil, fmt.Errorf("store not initialized") - } - - config := &iam_pb.S3ApiConfiguration{} - - // Convert all users to identities - for _, user := range store.users { - // Deep copy the identity to avoid mutation issues - identityCopy := store.deepCopyIdentity(user) - config.Identities = append(config.Identities, identityCopy) - } - - return config, nil -} - -func (store *MemoryStore) SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - // Clear existing data - store.users = make(map[string]*iam_pb.Identity) - store.accessKeys = make(map[string]string) - - // Add all identities - for _, identity := range config.Identities { - // Deep copy to avoid mutation issues - identityCopy := store.deepCopyIdentity(identity) - store.users[identity.Name] = identityCopy - - // Index access keys - for _, credential := range identity.Credentials { - store.accessKeys[credential.AccessKey] = identity.Name - } - } - - return nil -} - -func (store *MemoryStore) CreateUser(ctx context.Context, identity *iam_pb.Identity) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - if _, exists := store.users[identity.Name]; exists { - return credential.ErrUserAlreadyExists - } - - // Check for duplicate access keys - for _, cred := range identity.Credentials { - if _, exists := store.accessKeys[cred.AccessKey]; exists { - return fmt.Errorf("access key %s already exists", cred.AccessKey) - } - } - - // Deep copy to avoid mutation issues - identityCopy := store.deepCopyIdentity(identity) - store.users[identity.Name] = identityCopy - - // Index access keys - for _, cred := range identity.Credentials { - store.accessKeys[cred.AccessKey] = identity.Name - } - - return nil -} - -func (store *MemoryStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.initialized { - return nil, fmt.Errorf("store not initialized") - } - - user, exists := store.users[username] - if !exists { - return nil, credential.ErrUserNotFound - } - - // Return a deep copy to avoid mutation issues - return store.deepCopyIdentity(user), nil -} - -func (store *MemoryStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - existingUser, exists := store.users[username] - if !exists { - return credential.ErrUserNotFound - } - - // Remove old access keys from index - for _, cred := range existingUser.Credentials { - delete(store.accessKeys, cred.AccessKey) - } - - // Check for duplicate access keys (excluding current user) - for _, cred := range identity.Credentials { - if existingUsername, exists := store.accessKeys[cred.AccessKey]; exists && existingUsername != username { - return fmt.Errorf("access key %s already exists", cred.AccessKey) - } - } - - // Deep copy to avoid mutation issues - identityCopy := store.deepCopyIdentity(identity) - store.users[username] = identityCopy - - // Re-index access keys - for _, cred := range identity.Credentials { - store.accessKeys[cred.AccessKey] = username - } - - return nil -} - -func (store *MemoryStore) DeleteUser(ctx context.Context, username string) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - user, exists := store.users[username] - if !exists { - return credential.ErrUserNotFound - } - - // Remove access keys from index - for _, cred := range user.Credentials { - delete(store.accessKeys, cred.AccessKey) - } - - // Remove user - delete(store.users, username) - - return nil -} - -func (store *MemoryStore) ListUsers(ctx context.Context) ([]string, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.initialized { - return nil, fmt.Errorf("store not initialized") - } - - var usernames []string - for username := range store.users { - usernames = append(usernames, username) - } - - return usernames, nil -} - -func (store *MemoryStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.initialized { - return nil, fmt.Errorf("store not initialized") - } - - username, exists := store.accessKeys[accessKey] - if !exists { - return nil, credential.ErrAccessKeyNotFound - } - - user, exists := store.users[username] - if !exists { - // This should not happen, but handle it gracefully - return nil, credential.ErrUserNotFound - } - - // Return a deep copy to avoid mutation issues - return store.deepCopyIdentity(user), nil -} - -func (store *MemoryStore) CreateAccessKey(ctx context.Context, username string, cred *iam_pb.Credential) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - user, exists := store.users[username] - if !exists { - return credential.ErrUserNotFound - } - - // Check if access key already exists - if _, exists := store.accessKeys[cred.AccessKey]; exists { - return fmt.Errorf("access key %s already exists", cred.AccessKey) - } - - // Add credential to user - user.Credentials = append(user.Credentials, &iam_pb.Credential{ - AccessKey: cred.AccessKey, - SecretKey: cred.SecretKey, - }) - - // Index the access key - store.accessKeys[cred.AccessKey] = username - - return nil -} - -func (store *MemoryStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - user, exists := store.users[username] - if !exists { - return credential.ErrUserNotFound - } - - // Find and remove the credential - var newCredentials []*iam_pb.Credential - found := false - for _, cred := range user.Credentials { - if cred.AccessKey == accessKey { - found = true - // Remove from access key index - delete(store.accessKeys, accessKey) - } else { - newCredentials = append(newCredentials, cred) - } - } - - if !found { - return credential.ErrAccessKeyNotFound - } - - user.Credentials = newCredentials - return nil -} - -// deepCopyIdentity creates a deep copy of an identity to avoid mutation issues -func (store *MemoryStore) deepCopyIdentity(identity *iam_pb.Identity) *iam_pb.Identity { - if identity == nil { - return nil - } - - // Use JSON marshaling/unmarshaling for deep copy - // This is simple and safe for protobuf messages - data, err := json.Marshal(identity) - if err != nil { - // Fallback to shallow copy if JSON fails - return &iam_pb.Identity{ - Name: identity.Name, - Account: identity.Account, - Credentials: identity.Credentials, - Actions: identity.Actions, - } - } - - var copy iam_pb.Identity - if err := json.Unmarshal(data, ©); err != nil { - // Fallback to shallow copy if JSON fails - return &iam_pb.Identity{ - Name: identity.Name, - Account: identity.Account, - Credentials: identity.Credentials, - Actions: identity.Actions, - } - } - - return © -} diff --git a/weed/credential/memory/memory_policy.go b/weed/credential/memory/memory_policy.go deleted file mode 100644 index 8a4700467..000000000 --- a/weed/credential/memory/memory_policy.go +++ /dev/null @@ -1,77 +0,0 @@ -package memory - -import ( - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" -) - -// GetPolicies retrieves all IAM policies from memory -func (store *MemoryStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if !store.initialized { - return nil, fmt.Errorf("store not initialized") - } - - // Create a copy of the policies map to avoid mutation issues - policies := make(map[string]policy_engine.PolicyDocument) - for name, doc := range store.policies { - policies[name] = doc - } - - return policies, nil -} - -// GetPolicy retrieves a specific IAM policy by name from memory -func (store *MemoryStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) { - store.mu.RLock() - defer store.mu.RUnlock() - - if policy, exists := store.policies[name]; exists { - return &policy, nil - } - - return nil, nil // Policy not found -} - -// CreatePolicy creates a new IAM policy in memory -func (store *MemoryStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - store.policies[name] = document - return nil -} - -// UpdatePolicy updates an existing IAM policy in memory -func (store *MemoryStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - store.policies[name] = document - return nil -} - -// DeletePolicy deletes an IAM policy from memory -func (store *MemoryStore) DeletePolicy(ctx context.Context, name string) error { - store.mu.Lock() - defer store.mu.Unlock() - - if !store.initialized { - return fmt.Errorf("store not initialized") - } - - delete(store.policies, name) - return nil -} diff --git a/weed/credential/memory/memory_store.go b/weed/credential/memory/memory_store.go deleted file mode 100644 index acd05a456..000000000 --- a/weed/credential/memory/memory_store.go +++ /dev/null @@ -1,81 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - credential.Stores = append(credential.Stores, &MemoryStore{}) -} - -// MemoryStore implements CredentialStore using in-memory storage -// This is primarily intended for testing purposes -type MemoryStore struct { - mu sync.RWMutex - users map[string]*iam_pb.Identity // username -> identity - accessKeys map[string]string // access_key -> username - policies map[string]policy_engine.PolicyDocument // policy_name -> policy_document - initialized bool -} - -func (store *MemoryStore) GetName() credential.CredentialStoreTypeName { - return credential.StoreTypeMemory -} - -func (store *MemoryStore) Initialize(configuration util.Configuration, prefix string) error { - store.mu.Lock() - defer store.mu.Unlock() - - if store.initialized { - return nil - } - - store.users = make(map[string]*iam_pb.Identity) - store.accessKeys = make(map[string]string) - store.policies = make(map[string]policy_engine.PolicyDocument) - store.initialized = true - - return nil -} - -func (store *MemoryStore) Shutdown() { - store.mu.Lock() - defer store.mu.Unlock() - - store.users = nil - store.accessKeys = nil - store.policies = nil - store.initialized = false -} - -// Reset clears all data in the store (useful for testing) -func (store *MemoryStore) Reset() { - store.mu.Lock() - defer store.mu.Unlock() - - if store.initialized { - store.users = make(map[string]*iam_pb.Identity) - store.accessKeys = make(map[string]string) - } -} - -// GetUserCount returns the number of users in the store (useful for testing) -func (store *MemoryStore) GetUserCount() int { - store.mu.RLock() - defer store.mu.RUnlock() - - return len(store.users) -} - -// GetAccessKeyCount returns the number of access keys in the store (useful for testing) -func (store *MemoryStore) GetAccessKeyCount() int { - store.mu.RLock() - defer store.mu.RUnlock() - - return len(store.accessKeys) -} diff --git a/weed/credential/memory/memory_store_test.go b/weed/credential/memory/memory_store_test.go deleted file mode 100644 index 567b5bb3e..000000000 --- a/weed/credential/memory/memory_store_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package memory - -import ( - "context" - "fmt" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func TestMemoryStore(t *testing.T) { - store := &MemoryStore{} - - // Test initialization - config := util.GetViper() - if err := store.Initialize(config, "credential."); err != nil { - t.Fatalf("Failed to initialize store: %v", err) - } - - ctx := context.Background() - - // Test creating a user - identity := &iam_pb.Identity{ - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access123", - SecretKey: "secret123", - }, - }, - } - - if err := store.CreateUser(ctx, identity); err != nil { - t.Fatalf("Failed to create user: %v", err) - } - - // Test getting user - retrievedUser, err := store.GetUser(ctx, "testuser") - if err != nil { - t.Fatalf("Failed to get user: %v", err) - } - - if retrievedUser.Name != "testuser" { - t.Errorf("Expected username 'testuser', got '%s'", retrievedUser.Name) - } - - if len(retrievedUser.Credentials) != 1 { - t.Errorf("Expected 1 credential, got %d", len(retrievedUser.Credentials)) - } - - // Test getting user by access key - userByAccessKey, err := store.GetUserByAccessKey(ctx, "access123") - if err != nil { - t.Fatalf("Failed to get user by access key: %v", err) - } - - if userByAccessKey.Name != "testuser" { - t.Errorf("Expected username 'testuser', got '%s'", userByAccessKey.Name) - } - - // Test listing users - users, err := store.ListUsers(ctx) - if err != nil { - t.Fatalf("Failed to list users: %v", err) - } - - if len(users) != 1 || users[0] != "testuser" { - t.Errorf("Expected ['testuser'], got %v", users) - } - - // Test creating access key - newCred := &iam_pb.Credential{ - AccessKey: "access456", - SecretKey: "secret456", - } - - if err := store.CreateAccessKey(ctx, "testuser", newCred); err != nil { - t.Fatalf("Failed to create access key: %v", err) - } - - // Verify user now has 2 credentials - updatedUser, err := store.GetUser(ctx, "testuser") - if err != nil { - t.Fatalf("Failed to get updated user: %v", err) - } - - if len(updatedUser.Credentials) != 2 { - t.Errorf("Expected 2 credentials, got %d", len(updatedUser.Credentials)) - } - - // Test deleting access key - if err := store.DeleteAccessKey(ctx, "testuser", "access456"); err != nil { - t.Fatalf("Failed to delete access key: %v", err) - } - - // Verify user now has 1 credential again - finalUser, err := store.GetUser(ctx, "testuser") - if err != nil { - t.Fatalf("Failed to get final user: %v", err) - } - - if len(finalUser.Credentials) != 1 { - t.Errorf("Expected 1 credential, got %d", len(finalUser.Credentials)) - } - - // Test deleting user - if err := store.DeleteUser(ctx, "testuser"); err != nil { - t.Fatalf("Failed to delete user: %v", err) - } - - // Verify user is gone - _, err = store.GetUser(ctx, "testuser") - if err != credential.ErrUserNotFound { - t.Errorf("Expected ErrUserNotFound, got %v", err) - } - - // Test error cases - if err := store.CreateUser(ctx, identity); err != nil { - t.Fatalf("Failed to create user for error tests: %v", err) - } - - // Try to create duplicate user - if err := store.CreateUser(ctx, identity); err != credential.ErrUserAlreadyExists { - t.Errorf("Expected ErrUserAlreadyExists, got %v", err) - } - - // Try to get non-existent user - _, err = store.GetUser(ctx, "nonexistent") - if err != credential.ErrUserNotFound { - t.Errorf("Expected ErrUserNotFound, got %v", err) - } - - // Try to get user by non-existent access key - _, err = store.GetUserByAccessKey(ctx, "nonexistent") - if err != credential.ErrAccessKeyNotFound { - t.Errorf("Expected ErrAccessKeyNotFound, got %v", err) - } -} - -func TestMemoryStoreConcurrency(t *testing.T) { - store := &MemoryStore{} - config := util.GetViper() - if err := store.Initialize(config, "credential."); err != nil { - t.Fatalf("Failed to initialize store: %v", err) - } - - ctx := context.Background() - - // Test concurrent access - done := make(chan bool, 10) - for i := 0; i < 10; i++ { - go func(i int) { - defer func() { done <- true }() - - username := fmt.Sprintf("user%d", i) - identity := &iam_pb.Identity{ - Name: username, - Credentials: []*iam_pb.Credential{ - { - AccessKey: fmt.Sprintf("access%d", i), - SecretKey: fmt.Sprintf("secret%d", i), - }, - }, - } - - if err := store.CreateUser(ctx, identity); err != nil { - t.Errorf("Failed to create user %s: %v", username, err) - return - } - - if _, err := store.GetUser(ctx, username); err != nil { - t.Errorf("Failed to get user %s: %v", username, err) - return - } - }(i) - } - - // Wait for all goroutines to complete - for i := 0; i < 10; i++ { - <-done - } - - // Verify all users were created - users, err := store.ListUsers(ctx) - if err != nil { - t.Fatalf("Failed to list users: %v", err) - } - - if len(users) != 10 { - t.Errorf("Expected 10 users, got %d", len(users)) - } -} - -func TestMemoryStoreReset(t *testing.T) { - store := &MemoryStore{} - config := util.GetViper() - if err := store.Initialize(config, "credential."); err != nil { - t.Fatalf("Failed to initialize store: %v", err) - } - - ctx := context.Background() - - // Create a user - identity := &iam_pb.Identity{ - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access123", - SecretKey: "secret123", - }, - }, - } - - if err := store.CreateUser(ctx, identity); err != nil { - t.Fatalf("Failed to create user: %v", err) - } - - // Verify user exists - if store.GetUserCount() != 1 { - t.Errorf("Expected 1 user, got %d", store.GetUserCount()) - } - - if store.GetAccessKeyCount() != 1 { - t.Errorf("Expected 1 access key, got %d", store.GetAccessKeyCount()) - } - - // Reset the store - store.Reset() - - // Verify store is empty - if store.GetUserCount() != 0 { - t.Errorf("Expected 0 users after reset, got %d", store.GetUserCount()) - } - - if store.GetAccessKeyCount() != 0 { - t.Errorf("Expected 0 access keys after reset, got %d", store.GetAccessKeyCount()) - } - - // Verify user is gone - _, err := store.GetUser(ctx, "testuser") - if err != credential.ErrUserNotFound { - t.Errorf("Expected ErrUserNotFound after reset, got %v", err) - } -} - -func TestMemoryStoreConfigurationSaveLoad(t *testing.T) { - store := &MemoryStore{} - config := util.GetViper() - if err := store.Initialize(config, "credential."); err != nil { - t.Fatalf("Failed to initialize store: %v", err) - } - - ctx := context.Background() - - // Create initial configuration - originalConfig := &iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "user1", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access1", - SecretKey: "secret1", - }, - }, - }, - { - Name: "user2", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access2", - SecretKey: "secret2", - }, - }, - }, - }, - } - - // Save configuration - if err := store.SaveConfiguration(ctx, originalConfig); err != nil { - t.Fatalf("Failed to save configuration: %v", err) - } - - // Load configuration - loadedConfig, err := store.LoadConfiguration(ctx) - if err != nil { - t.Fatalf("Failed to load configuration: %v", err) - } - - // Verify configuration matches - if len(loadedConfig.Identities) != 2 { - t.Errorf("Expected 2 identities, got %d", len(loadedConfig.Identities)) - } - - // Check users exist - user1, err := store.GetUser(ctx, "user1") - if err != nil { - t.Fatalf("Failed to get user1: %v", err) - } - - if len(user1.Credentials) != 1 || user1.Credentials[0].AccessKey != "access1" { - t.Errorf("User1 credentials not correct: %+v", user1.Credentials) - } - - user2, err := store.GetUser(ctx, "user2") - if err != nil { - t.Fatalf("Failed to get user2: %v", err) - } - - if len(user2.Credentials) != 1 || user2.Credentials[0].AccessKey != "access2" { - t.Errorf("User2 credentials not correct: %+v", user2.Credentials) - } -} diff --git a/weed/credential/migration.go b/weed/credential/migration.go deleted file mode 100644 index 41d0e3840..000000000 --- a/weed/credential/migration.go +++ /dev/null @@ -1,221 +0,0 @@ -package credential - -import ( - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// MigrateCredentials migrates credentials from one store to another -func MigrateCredentials(fromStoreName, toStoreName CredentialStoreTypeName, configuration util.Configuration, fromPrefix, toPrefix string) error { - ctx := context.Background() - - // Create source credential manager - fromCM, err := NewCredentialManager(fromStoreName, configuration, fromPrefix) - if err != nil { - return fmt.Errorf("failed to create source credential manager (%s): %v", fromStoreName, err) - } - defer fromCM.Shutdown() - - // Create destination credential manager - toCM, err := NewCredentialManager(toStoreName, configuration, toPrefix) - if err != nil { - return fmt.Errorf("failed to create destination credential manager (%s): %v", toStoreName, err) - } - defer toCM.Shutdown() - - // Load configuration from source - glog.Infof("Loading configuration from %s store...", fromStoreName) - config, err := fromCM.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration from source store: %w", err) - } - - if config == nil || len(config.Identities) == 0 { - glog.Info("No identities found in source store") - return nil - } - - glog.Infof("Found %d identities in source store", len(config.Identities)) - - // Migrate each identity - var migrated, failed int - for _, identity := range config.Identities { - glog.V(1).Infof("Migrating user: %s", identity.Name) - - // Check if user already exists in destination - existingUser, err := toCM.GetUser(ctx, identity.Name) - if err != nil && err != ErrUserNotFound { - glog.Errorf("Failed to check if user %s exists in destination: %v", identity.Name, err) - failed++ - continue - } - - if existingUser != nil { - glog.Warningf("User %s already exists in destination store, skipping", identity.Name) - continue - } - - // Create user in destination - err = toCM.CreateUser(ctx, identity) - if err != nil { - glog.Errorf("Failed to create user %s in destination store: %v", identity.Name, err) - failed++ - continue - } - - migrated++ - glog.V(1).Infof("Successfully migrated user: %s", identity.Name) - } - - glog.Infof("Migration completed: %d migrated, %d failed", migrated, failed) - - if failed > 0 { - return fmt.Errorf("migration completed with %d failures", failed) - } - - return nil -} - -// ExportCredentials exports credentials from a store to a configuration -func ExportCredentials(storeName CredentialStoreTypeName, configuration util.Configuration, prefix string) (*iam_pb.S3ApiConfiguration, error) { - ctx := context.Background() - - // Create credential manager - cm, err := NewCredentialManager(storeName, configuration, prefix) - if err != nil { - return nil, fmt.Errorf("failed to create credential manager (%s): %v", storeName, err) - } - defer cm.Shutdown() - - // Load configuration - config, err := cm.LoadConfiguration(ctx) - if err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - return config, nil -} - -// ImportCredentials imports credentials from a configuration to a store -func ImportCredentials(storeName CredentialStoreTypeName, configuration util.Configuration, prefix string, config *iam_pb.S3ApiConfiguration) error { - ctx := context.Background() - - // Create credential manager - cm, err := NewCredentialManager(storeName, configuration, prefix) - if err != nil { - return fmt.Errorf("failed to create credential manager (%s): %v", storeName, err) - } - defer cm.Shutdown() - - // Import each identity - var imported, failed int - for _, identity := range config.Identities { - glog.V(1).Infof("Importing user: %s", identity.Name) - - // Check if user already exists - existingUser, err := cm.GetUser(ctx, identity.Name) - if err != nil && err != ErrUserNotFound { - glog.Errorf("Failed to check if user %s exists: %v", identity.Name, err) - failed++ - continue - } - - if existingUser != nil { - glog.Warningf("User %s already exists, skipping", identity.Name) - continue - } - - // Create user - err = cm.CreateUser(ctx, identity) - if err != nil { - glog.Errorf("Failed to create user %s: %v", identity.Name, err) - failed++ - continue - } - - imported++ - glog.V(1).Infof("Successfully imported user: %s", identity.Name) - } - - glog.Infof("Import completed: %d imported, %d failed", imported, failed) - - if failed > 0 { - return fmt.Errorf("import completed with %d failures", failed) - } - - return nil -} - -// ValidateCredentials validates that all credentials in a store are accessible -func ValidateCredentials(storeName CredentialStoreTypeName, configuration util.Configuration, prefix string) error { - ctx := context.Background() - - // Create credential manager - cm, err := NewCredentialManager(storeName, configuration, prefix) - if err != nil { - return fmt.Errorf("failed to create credential manager (%s): %v", storeName, err) - } - defer cm.Shutdown() - - // Load configuration - config, err := cm.LoadConfiguration(ctx) - if err != nil { - return fmt.Errorf("failed to load configuration: %w", err) - } - - if config == nil || len(config.Identities) == 0 { - glog.Info("No identities found in store") - return nil - } - - glog.Infof("Validating %d identities...", len(config.Identities)) - - // Validate each identity - var validated, failed int - for _, identity := range config.Identities { - // Check if user can be retrieved - user, err := cm.GetUser(ctx, identity.Name) - if err != nil { - glog.Errorf("Failed to retrieve user %s: %v", identity.Name, err) - failed++ - continue - } - - if user == nil { - glog.Errorf("User %s not found", identity.Name) - failed++ - continue - } - - // Validate access keys - for _, credential := range identity.Credentials { - accessKeyUser, err := cm.GetUserByAccessKey(ctx, credential.AccessKey) - if err != nil { - glog.Errorf("Failed to retrieve user by access key %s: %v", credential.AccessKey, err) - failed++ - continue - } - - if accessKeyUser == nil || accessKeyUser.Name != identity.Name { - glog.Errorf("Access key %s does not map to correct user %s", credential.AccessKey, identity.Name) - failed++ - continue - } - } - - validated++ - glog.V(1).Infof("Successfully validated user: %s", identity.Name) - } - - glog.Infof("Validation completed: %d validated, %d failed", validated, failed) - - if failed > 0 { - return fmt.Errorf("validation completed with %d failures", failed) - } - - return nil -} diff --git a/weed/credential/postgres/postgres_identity.go b/weed/credential/postgres/postgres_identity.go deleted file mode 100644 index 11908b0d8..000000000 --- a/weed/credential/postgres/postgres_identity.go +++ /dev/null @@ -1,446 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" -) - -func (store *PostgresStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3ApiConfiguration, error) { - if !store.configured { - return nil, fmt.Errorf("store not configured") - } - - config := &iam_pb.S3ApiConfiguration{} - - // Query all users - rows, err := store.db.QueryContext(ctx, "SELECT username, email, account_data, actions FROM users") - if err != nil { - return nil, fmt.Errorf("failed to query users: %w", err) - } - defer rows.Close() - - for rows.Next() { - var username, email string - var accountDataJSON, actionsJSON []byte - - if err := rows.Scan(&username, &email, &accountDataJSON, &actionsJSON); err != nil { - return nil, fmt.Errorf("failed to scan user row: %w", err) - } - - identity := &iam_pb.Identity{ - Name: username, - } - - // Parse account data - if len(accountDataJSON) > 0 { - if err := json.Unmarshal(accountDataJSON, &identity.Account); err != nil { - return nil, fmt.Errorf("failed to unmarshal account data for user %s: %v", username, err) - } - } - - // Parse actions - if len(actionsJSON) > 0 { - if err := json.Unmarshal(actionsJSON, &identity.Actions); err != nil { - return nil, fmt.Errorf("failed to unmarshal actions for user %s: %v", username, err) - } - } - - // Query credentials for this user - credRows, err := store.db.QueryContext(ctx, "SELECT access_key, secret_key FROM credentials WHERE username = $1", username) - if err != nil { - return nil, fmt.Errorf("failed to query credentials for user %s: %v", username, err) - } - - for credRows.Next() { - var accessKey, secretKey string - if err := credRows.Scan(&accessKey, &secretKey); err != nil { - credRows.Close() - return nil, fmt.Errorf("failed to scan credential row for user %s: %v", username, err) - } - - identity.Credentials = append(identity.Credentials, &iam_pb.Credential{ - AccessKey: accessKey, - SecretKey: secretKey, - }) - } - credRows.Close() - - config.Identities = append(config.Identities, identity) - } - - return config, nil -} - -func (store *PostgresStore) SaveConfiguration(ctx context.Context, config *iam_pb.S3ApiConfiguration) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - // Start transaction - tx, err := store.db.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("failed to begin transaction: %w", err) - } - defer tx.Rollback() - - // Clear existing data - if _, err := tx.ExecContext(ctx, "DELETE FROM credentials"); err != nil { - return fmt.Errorf("failed to clear credentials: %w", err) - } - if _, err := tx.ExecContext(ctx, "DELETE FROM users"); err != nil { - return fmt.Errorf("failed to clear users: %w", err) - } - - // Insert all identities - for _, identity := range config.Identities { - // Marshal account data - var accountDataJSON []byte - if identity.Account != nil { - accountDataJSON, err = json.Marshal(identity.Account) - if err != nil { - return fmt.Errorf("failed to marshal account data for user %s: %v", identity.Name, err) - } - } - - // Marshal actions - var actionsJSON []byte - if identity.Actions != nil { - actionsJSON, err = json.Marshal(identity.Actions) - if err != nil { - return fmt.Errorf("failed to marshal actions for user %s: %v", identity.Name, err) - } - } - - // Insert user - _, err := tx.ExecContext(ctx, - "INSERT INTO users (username, email, account_data, actions) VALUES ($1, $2, $3, $4)", - identity.Name, "", accountDataJSON, actionsJSON) - if err != nil { - return fmt.Errorf("failed to insert user %s: %v", identity.Name, err) - } - - // Insert credentials - for _, cred := range identity.Credentials { - _, err := tx.ExecContext(ctx, - "INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)", - identity.Name, cred.AccessKey, cred.SecretKey) - if err != nil { - return fmt.Errorf("failed to insert credential for user %s: %v", identity.Name, err) - } - } - } - - return tx.Commit() -} - -func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Identity) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - // Check if user already exists - var count int - err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", identity.Name).Scan(&count) - if err != nil { - return fmt.Errorf("failed to check user existence: %w", err) - } - if count > 0 { - return credential.ErrUserAlreadyExists - } - - // Start transaction - tx, err := store.db.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("failed to begin transaction: %w", err) - } - defer tx.Rollback() - - // Marshal account data - var accountDataJSON []byte - if identity.Account != nil { - accountDataJSON, err = json.Marshal(identity.Account) - if err != nil { - return fmt.Errorf("failed to marshal account data: %w", err) - } - } - - // Marshal actions - var actionsJSON []byte - if identity.Actions != nil { - actionsJSON, err = json.Marshal(identity.Actions) - if err != nil { - return fmt.Errorf("failed to marshal actions: %w", err) - } - } - - // Insert user - _, err = tx.ExecContext(ctx, - "INSERT INTO users (username, email, account_data, actions) VALUES ($1, $2, $3, $4)", - identity.Name, "", accountDataJSON, actionsJSON) - if err != nil { - return fmt.Errorf("failed to insert user: %w", err) - } - - // Insert credentials - for _, cred := range identity.Credentials { - _, err = tx.ExecContext(ctx, - "INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)", - identity.Name, cred.AccessKey, cred.SecretKey) - if err != nil { - return fmt.Errorf("failed to insert credential: %w", err) - } - } - - return tx.Commit() -} - -func (store *PostgresStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) { - if !store.configured { - return nil, fmt.Errorf("store not configured") - } - - var email string - var accountDataJSON, actionsJSON []byte - - err := store.db.QueryRowContext(ctx, - "SELECT email, account_data, actions FROM users WHERE username = $1", - username).Scan(&email, &accountDataJSON, &actionsJSON) - if err != nil { - if err == sql.ErrNoRows { - return nil, credential.ErrUserNotFound - } - return nil, fmt.Errorf("failed to query user: %w", err) - } - - identity := &iam_pb.Identity{ - Name: username, - } - - // Parse account data - if len(accountDataJSON) > 0 { - if err := json.Unmarshal(accountDataJSON, &identity.Account); err != nil { - return nil, fmt.Errorf("failed to unmarshal account data: %w", err) - } - } - - // Parse actions - if len(actionsJSON) > 0 { - if err := json.Unmarshal(actionsJSON, &identity.Actions); err != nil { - return nil, fmt.Errorf("failed to unmarshal actions: %w", err) - } - } - - // Query credentials - rows, err := store.db.QueryContext(ctx, "SELECT access_key, secret_key FROM credentials WHERE username = $1", username) - if err != nil { - return nil, fmt.Errorf("failed to query credentials: %w", err) - } - defer rows.Close() - - for rows.Next() { - var accessKey, secretKey string - if err := rows.Scan(&accessKey, &secretKey); err != nil { - return nil, fmt.Errorf("failed to scan credential: %w", err) - } - - identity.Credentials = append(identity.Credentials, &iam_pb.Credential{ - AccessKey: accessKey, - SecretKey: secretKey, - }) - } - - return identity, nil -} - -func (store *PostgresStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - // Start transaction - tx, err := store.db.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("failed to begin transaction: %w", err) - } - defer tx.Rollback() - - // Check if user exists - var count int - err = tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count) - if err != nil { - return fmt.Errorf("failed to check user existence: %w", err) - } - if count == 0 { - return credential.ErrUserNotFound - } - - // Marshal account data - var accountDataJSON []byte - if identity.Account != nil { - accountDataJSON, err = json.Marshal(identity.Account) - if err != nil { - return fmt.Errorf("failed to marshal account data: %w", err) - } - } - - // Marshal actions - var actionsJSON []byte - if identity.Actions != nil { - actionsJSON, err = json.Marshal(identity.Actions) - if err != nil { - return fmt.Errorf("failed to marshal actions: %w", err) - } - } - - // Update user - _, err = tx.ExecContext(ctx, - "UPDATE users SET email = $2, account_data = $3, actions = $4, updated_at = CURRENT_TIMESTAMP WHERE username = $1", - username, "", accountDataJSON, actionsJSON) - if err != nil { - return fmt.Errorf("failed to update user: %w", err) - } - - // Delete existing credentials - _, err = tx.ExecContext(ctx, "DELETE FROM credentials WHERE username = $1", username) - if err != nil { - return fmt.Errorf("failed to delete existing credentials: %w", err) - } - - // Insert new credentials - for _, cred := range identity.Credentials { - _, err = tx.ExecContext(ctx, - "INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)", - username, cred.AccessKey, cred.SecretKey) - if err != nil { - return fmt.Errorf("failed to insert credential: %w", err) - } - } - - return tx.Commit() -} - -func (store *PostgresStore) DeleteUser(ctx context.Context, username string) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - result, err := store.db.ExecContext(ctx, "DELETE FROM users WHERE username = $1", username) - if err != nil { - return fmt.Errorf("failed to delete user: %w", err) - } - - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected: %w", err) - } - - if rowsAffected == 0 { - return credential.ErrUserNotFound - } - - return nil -} - -func (store *PostgresStore) ListUsers(ctx context.Context) ([]string, error) { - if !store.configured { - return nil, fmt.Errorf("store not configured") - } - - rows, err := store.db.QueryContext(ctx, "SELECT username FROM users ORDER BY username") - if err != nil { - return nil, fmt.Errorf("failed to query users: %w", err) - } - defer rows.Close() - - var usernames []string - for rows.Next() { - var username string - if err := rows.Scan(&username); err != nil { - return nil, fmt.Errorf("failed to scan username: %w", err) - } - usernames = append(usernames, username) - } - - return usernames, nil -} - -func (store *PostgresStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) { - if !store.configured { - return nil, fmt.Errorf("store not configured") - } - - var username string - err := store.db.QueryRowContext(ctx, "SELECT username FROM credentials WHERE access_key = $1", accessKey).Scan(&username) - if err != nil { - if err == sql.ErrNoRows { - return nil, credential.ErrAccessKeyNotFound - } - return nil, fmt.Errorf("failed to query access key: %w", err) - } - - return store.GetUser(ctx, username) -} - -func (store *PostgresStore) CreateAccessKey(ctx context.Context, username string, cred *iam_pb.Credential) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - // Check if user exists - var count int - err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count) - if err != nil { - return fmt.Errorf("failed to check user existence: %w", err) - } - if count == 0 { - return credential.ErrUserNotFound - } - - // Insert credential - _, err = store.db.ExecContext(ctx, - "INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)", - username, cred.AccessKey, cred.SecretKey) - if err != nil { - return fmt.Errorf("failed to insert credential: %w", err) - } - - return nil -} - -func (store *PostgresStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - result, err := store.db.ExecContext(ctx, - "DELETE FROM credentials WHERE username = $1 AND access_key = $2", - username, accessKey) - if err != nil { - return fmt.Errorf("failed to delete access key: %w", err) - } - - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected: %w", err) - } - - if rowsAffected == 0 { - // Check if user exists - var count int - err = store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count) - if err != nil { - return fmt.Errorf("failed to check user existence: %w", err) - } - if count == 0 { - return credential.ErrUserNotFound - } - return credential.ErrAccessKeyNotFound - } - - return nil -} diff --git a/weed/credential/postgres/postgres_policy.go b/weed/credential/postgres/postgres_policy.go deleted file mode 100644 index 061646f7f..000000000 --- a/weed/credential/postgres/postgres_policy.go +++ /dev/null @@ -1,130 +0,0 @@ -package postgres - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" -) - -// GetPolicies retrieves all IAM policies from PostgreSQL -func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_engine.PolicyDocument, error) { - if !store.configured { - return nil, fmt.Errorf("store not configured") - } - - policies := make(map[string]policy_engine.PolicyDocument) - - rows, err := store.db.QueryContext(ctx, "SELECT name, document FROM policies") - if err != nil { - return nil, fmt.Errorf("failed to query policies: %w", err) - } - defer rows.Close() - - for rows.Next() { - var name string - var documentJSON []byte - - if err := rows.Scan(&name, &documentJSON); err != nil { - return nil, fmt.Errorf("failed to scan policy row: %w", err) - } - - var document policy_engine.PolicyDocument - if err := json.Unmarshal(documentJSON, &document); err != nil { - return nil, fmt.Errorf("failed to unmarshal policy document for %s: %v", name, err) - } - - policies[name] = document - } - - return policies, nil -} - -// CreatePolicy creates a new IAM policy in PostgreSQL -func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - documentJSON, err := json.Marshal(document) - if err != nil { - return fmt.Errorf("failed to marshal policy document: %w", err) - } - - _, err = store.db.ExecContext(ctx, - "INSERT INTO policies (name, document) VALUES ($1, $2) ON CONFLICT (name) DO UPDATE SET document = $2, updated_at = CURRENT_TIMESTAMP", - name, documentJSON) - if err != nil { - return fmt.Errorf("failed to insert policy: %w", err) - } - - return nil -} - -// UpdatePolicy updates an existing IAM policy in PostgreSQL -func (store *PostgresStore) UpdatePolicy(ctx context.Context, name string, document policy_engine.PolicyDocument) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - documentJSON, err := json.Marshal(document) - if err != nil { - return fmt.Errorf("failed to marshal policy document: %w", err) - } - - result, err := store.db.ExecContext(ctx, - "UPDATE policies SET document = $2, updated_at = CURRENT_TIMESTAMP WHERE name = $1", - name, documentJSON) - if err != nil { - return fmt.Errorf("failed to update policy: %w", err) - } - - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected: %w", err) - } - - if rowsAffected == 0 { - return fmt.Errorf("policy %s not found", name) - } - - return nil -} - -// DeletePolicy deletes an IAM policy from PostgreSQL -func (store *PostgresStore) DeletePolicy(ctx context.Context, name string) error { - if !store.configured { - return fmt.Errorf("store not configured") - } - - result, err := store.db.ExecContext(ctx, "DELETE FROM policies WHERE name = $1", name) - if err != nil { - return fmt.Errorf("failed to delete policy: %w", err) - } - - rowsAffected, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected: %w", err) - } - - if rowsAffected == 0 { - return fmt.Errorf("policy %s not found", name) - } - - return nil -} - -// GetPolicy retrieves a specific IAM policy by name from PostgreSQL -func (store *PostgresStore) GetPolicy(ctx context.Context, name string) (*policy_engine.PolicyDocument, error) { - policies, err := store.GetPolicies(ctx) - if err != nil { - return nil, err - } - - if policy, exists := policies[name]; exists { - return &policy, nil - } - - return nil, nil // Policy not found -} diff --git a/weed/credential/postgres/postgres_store.go b/weed/credential/postgres/postgres_store.go deleted file mode 100644 index 58cb3f868..000000000 --- a/weed/credential/postgres/postgres_store.go +++ /dev/null @@ -1,148 +0,0 @@ -package postgres - -import ( - "database/sql" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/util" - - _ "github.com/jackc/pgx/v5/stdlib" -) - -func init() { - credential.Stores = append(credential.Stores, &PostgresStore{}) -} - -// PostgresStore implements CredentialStore using PostgreSQL -type PostgresStore struct { - db *sql.DB - configured bool -} - -func (store *PostgresStore) GetName() credential.CredentialStoreTypeName { - return credential.StoreTypePostgres -} - -func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) error { - if store.configured { - return nil - } - - hostname := configuration.GetString(prefix + "hostname") - port := configuration.GetInt(prefix + "port") - username := configuration.GetString(prefix + "username") - password := configuration.GetString(prefix + "password") - database := configuration.GetString(prefix + "database") - schema := configuration.GetString(prefix + "schema") - sslmode := configuration.GetString(prefix + "sslmode") - - // Set defaults - if hostname == "" { - hostname = "localhost" - } - if port == 0 { - port = 5432 - } - if schema == "" { - schema = "public" - } - if sslmode == "" { - sslmode = "disable" - } - - // Build pgx-optimized connection string - // Note: prefer_simple_protocol=true is only needed for PgBouncer, not direct PostgreSQL connections - connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s search_path=%s", - hostname, port, username, password, database, sslmode, schema) - - db, err := sql.Open("pgx", connStr) - if err != nil { - return fmt.Errorf("failed to open database: %w", err) - } - - // Test connection - if err := db.Ping(); err != nil { - db.Close() - return fmt.Errorf("failed to ping database: %w", err) - } - - // Set connection pool settings - db.SetMaxOpenConns(25) - db.SetMaxIdleConns(5) - db.SetConnMaxLifetime(5 * time.Minute) - - store.db = db - - // Create tables if they don't exist - if err := store.createTables(); err != nil { - db.Close() - return fmt.Errorf("failed to create tables: %w", err) - } - - store.configured = true - return nil -} - -func (store *PostgresStore) createTables() error { - // Create users table - usersTable := ` - CREATE TABLE IF NOT EXISTS users ( - username VARCHAR(255) PRIMARY KEY, - email VARCHAR(255), - account_data JSONB, - actions JSONB, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ); - CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); - ` - - // Create credentials table - credentialsTable := ` - CREATE TABLE IF NOT EXISTS credentials ( - id SERIAL PRIMARY KEY, - username VARCHAR(255) REFERENCES users(username) ON DELETE CASCADE, - access_key VARCHAR(255) UNIQUE NOT NULL, - secret_key VARCHAR(255) NOT NULL, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ); - CREATE INDEX IF NOT EXISTS idx_credentials_username ON credentials(username); - CREATE INDEX IF NOT EXISTS idx_credentials_access_key ON credentials(access_key); - ` - - // Create policies table - policiesTable := ` - CREATE TABLE IF NOT EXISTS policies ( - name VARCHAR(255) PRIMARY KEY, - document JSONB NOT NULL, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ); - CREATE INDEX IF NOT EXISTS idx_policies_name ON policies(name); - ` - - // Execute table creation - if _, err := store.db.Exec(usersTable); err != nil { - return fmt.Errorf("failed to create users table: %w", err) - } - - if _, err := store.db.Exec(credentialsTable); err != nil { - return fmt.Errorf("failed to create credentials table: %w", err) - } - - if _, err := store.db.Exec(policiesTable); err != nil { - return fmt.Errorf("failed to create policies table: %w", err) - } - - return nil -} - -func (store *PostgresStore) Shutdown() { - if store.db != nil { - store.db.Close() - store.db = nil - } - store.configured = false -} diff --git a/weed/credential/test/integration_test.go b/weed/credential/test/integration_test.go deleted file mode 100644 index c1e55ecf8..000000000 --- a/weed/credential/test/integration_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package test - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - - // Import all store implementations to register them - _ "github.com/seaweedfs/seaweedfs/weed/credential/filer_etc" - _ "github.com/seaweedfs/seaweedfs/weed/credential/memory" - _ "github.com/seaweedfs/seaweedfs/weed/credential/postgres" -) - -func TestStoreRegistration(t *testing.T) { - // Test that stores are registered - storeNames := credential.GetAvailableStores() - if len(storeNames) == 0 { - t.Fatal("No credential stores registered") - } - - expectedStores := []string{string(credential.StoreTypeFilerEtc), string(credential.StoreTypeMemory), string(credential.StoreTypePostgres)} - - // Verify all expected stores are present - for _, expected := range expectedStores { - found := false - for _, storeName := range storeNames { - if string(storeName) == expected { - found = true - break - } - } - if !found { - t.Errorf("Expected store not found: %s", expected) - } - } - - t.Logf("Available stores: %v", storeNames) -} - -func TestMemoryStoreIntegration(t *testing.T) { - // Test creating credential manager with memory store - config := util.GetViper() - cm, err := credential.NewCredentialManager(credential.StoreTypeMemory, config, "test.") - if err != nil { - t.Fatalf("Failed to create memory credential manager: %v", err) - } - defer cm.Shutdown() - - // Test that the store is of the correct type - if cm.GetStore().GetName() != credential.StoreTypeMemory { - t.Errorf("Expected memory store, got %s", cm.GetStore().GetName()) - } - - // Test basic operations - ctx := context.Background() - - // Create test user - testUser := &iam_pb.Identity{ - Name: "testuser", - Actions: []string{"Read", "Write"}, - Account: &iam_pb.Account{ - Id: "123456789012", - DisplayName: "Test User", - EmailAddress: "test@example.com", - }, - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - } - - // Test CreateUser - err = cm.CreateUser(ctx, testUser) - if err != nil { - t.Fatalf("CreateUser failed: %v", err) - } - - // Test GetUser - user, err := cm.GetUser(ctx, "testuser") - if err != nil { - t.Fatalf("GetUser failed: %v", err) - } - if user.Name != "testuser" { - t.Errorf("Expected user name 'testuser', got %s", user.Name) - } - - // Test ListUsers - users, err := cm.ListUsers(ctx) - if err != nil { - t.Fatalf("ListUsers failed: %v", err) - } - if len(users) != 1 || users[0] != "testuser" { - t.Errorf("Expected ['testuser'], got %v", users) - } - - // Test GetUserByAccessKey - userByKey, err := cm.GetUserByAccessKey(ctx, "AKIAIOSFODNN7EXAMPLE") - if err != nil { - t.Fatalf("GetUserByAccessKey failed: %v", err) - } - if userByKey.Name != "testuser" { - t.Errorf("Expected user name 'testuser', got %s", userByKey.Name) - } - - // Test DeleteUser - err = cm.DeleteUser(ctx, "testuser") - if err != nil { - t.Fatalf("DeleteUser failed: %v", err) - } - - // Verify user was deleted - _, err = cm.GetUser(ctx, "testuser") - if err != credential.ErrUserNotFound { - t.Errorf("Expected ErrUserNotFound, got %v", err) - } -} diff --git a/weed/credential/test/policy_test.go b/weed/credential/test/policy_test.go deleted file mode 100644 index 28fa2c619..000000000 --- a/weed/credential/test/policy_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package test - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/credential/memory" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - - // Import all store implementations to register them - _ "github.com/seaweedfs/seaweedfs/weed/credential/filer_etc" - _ "github.com/seaweedfs/seaweedfs/weed/credential/memory" - _ "github.com/seaweedfs/seaweedfs/weed/credential/postgres" -) - -// TestPolicyManagement tests policy management across all credential stores -func TestPolicyManagement(t *testing.T) { - ctx := context.Background() - - // Test with memory store (easiest to test) - credentialManager, err := credential.NewCredentialManager(credential.StoreTypeMemory, nil, "") - if err != nil { - t.Fatalf("Failed to create credential manager: %v", err) - } - - // Test policy operations - testPolicyOperations(t, ctx, credentialManager) -} - -func testPolicyOperations(t *testing.T, ctx context.Context, credentialManager *credential.CredentialManager) { - store := credentialManager.GetStore() - - // Cast to memory store to access policy methods - memoryStore, ok := store.(*memory.MemoryStore) - if !ok { - t.Skip("Store is not a memory store") - } - - // Test GetPolicies (should be empty initially) - policies, err := memoryStore.GetPolicies(ctx) - if err != nil { - t.Fatalf("Failed to get policies: %v", err) - } - if len(policies) != 0 { - t.Errorf("Expected 0 policies, got %d", len(policies)) - } - - // Test CreatePolicy - testPolicy := policy_engine.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy_engine.PolicyStatement{ - { - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice("s3:GetObject"), - Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::test-bucket/*"), - }, - }, - } - - err = memoryStore.CreatePolicy(ctx, "test-policy", testPolicy) - if err != nil { - t.Fatalf("Failed to create policy: %v", err) - } - - // Test GetPolicies (should have 1 policy now) - policies, err = memoryStore.GetPolicies(ctx) - if err != nil { - t.Fatalf("Failed to get policies: %v", err) - } - if len(policies) != 1 { - t.Errorf("Expected 1 policy, got %d", len(policies)) - } - - // Verify policy content - policy, exists := policies["test-policy"] - if !exists { - t.Error("test-policy not found") - } - if policy.Version != "2012-10-17" { - t.Errorf("Expected policy version '2012-10-17', got '%s'", policy.Version) - } - if len(policy.Statement) != 1 { - t.Errorf("Expected 1 statement, got %d", len(policy.Statement)) - } - - // Test UpdatePolicy - updatedPolicy := policy_engine.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy_engine.PolicyStatement{ - { - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice("s3:GetObject", "s3:PutObject"), - Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::test-bucket/*"), - }, - }, - } - - err = memoryStore.UpdatePolicy(ctx, "test-policy", updatedPolicy) - if err != nil { - t.Fatalf("Failed to update policy: %v", err) - } - - // Verify the update - policies, err = memoryStore.GetPolicies(ctx) - if err != nil { - t.Fatalf("Failed to get policies after update: %v", err) - } - - updatedPolicyResult, exists := policies["test-policy"] - if !exists { - t.Error("test-policy not found after update") - } - if len(updatedPolicyResult.Statement) != 1 { - t.Errorf("Expected 1 statement after update, got %d", len(updatedPolicyResult.Statement)) - } - if len(updatedPolicyResult.Statement[0].Action.Strings()) != 2 { - t.Errorf("Expected 2 actions after update, got %d", len(updatedPolicyResult.Statement[0].Action.Strings())) - } - - // Test DeletePolicy - err = memoryStore.DeletePolicy(ctx, "test-policy") - if err != nil { - t.Fatalf("Failed to delete policy: %v", err) - } - - // Verify deletion - policies, err = memoryStore.GetPolicies(ctx) - if err != nil { - t.Fatalf("Failed to get policies after deletion: %v", err) - } - if len(policies) != 0 { - t.Errorf("Expected 0 policies after deletion, got %d", len(policies)) - } -} - -// TestPolicyManagementWithFilerEtc tests policy management with filer_etc store -func TestPolicyManagementWithFilerEtc(t *testing.T) { - // Skip this test if we can't connect to a filer - t.Skip("Filer connection required for filer_etc store testing") -} - -// TestPolicyManagementWithPostgres tests policy management with postgres store -func TestPolicyManagementWithPostgres(t *testing.T) { - // Skip this test if we can't connect to PostgreSQL - t.Skip("PostgreSQL connection required for postgres store testing") -} diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index a83b33341..13268b944 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -4,14 +4,12 @@ import ( "context" "database/sql" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "strings" "sync" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" - "github.com/seaweedfs/seaweedfs/weed/util" ) type SqlGenerator interface { @@ -34,8 +32,6 @@ type AbstractSqlStore struct { dbsLock sync.Mutex } -var _ filer.BucketAware = (*AbstractSqlStore)(nil) - func (store *AbstractSqlStore) CanDropWholeBucket() bool { return store.SupportBucketTable } @@ -142,8 +138,6 @@ func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.Full } } - } else { - err = fmt.Errorf("invalid bucket name %s", bucket) } return @@ -162,27 +156,34 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } - sqlInsert := "insert" + res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta) - if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") { - // now the insert failed possibly due to duplication constraints - sqlInsert = "falls back to update" - glog.V(1).InfofCtx(ctx, "insert %s %s: %v", entry.FullPath, sqlInsert, err) - res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) + if err == nil { + return } + + if !strings.Contains(strings.ToLower(err.Error()), "duplicate") { + // return fmt.Errorf("insert: %s", err) + // skip this since the error can be in a different language + } + + // now the insert failed possibly due to duplication constraints + glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err) + + res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) if err != nil { - return fmt.Errorf("%s %s: %s", sqlInsert, entry.FullPath, err) + return fmt.Errorf("upsert %s: %s", entry.FullPath, err) } _, err = res.RowsAffected() if err != nil { - return fmt.Errorf("%s %s but no rows affected: %s", sqlInsert, entry.FullPath, err) + return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err) } - return nil + } func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { @@ -278,7 +279,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat } } - glog.V(4).InfofCtx(ctx, "delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) if err != nil { return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) @@ -313,7 +314,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, var name string var data []byte if err = rows.Scan(&name, &data); err != nil { - glog.V(0).InfofCtx(ctx, "scan %s : %v", dirPath, err) + glog.V(0).Infof("scan %s : %v", dirPath, err) return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err) } lastFileName = name @@ -322,7 +323,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, FullPath: util.NewFullPath(string(dirPath), name), } if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { - glog.V(0).InfofCtx(ctx, "scan decode %s : %v", entry.FullPath, err) + glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) } @@ -344,9 +345,6 @@ func (store *AbstractSqlStore) Shutdown() { } func isValidBucket(bucket string) bool { - if s3bucket.VerifyS3BucketName(bucket) != nil { - return false - } return bucket != DEFAULT_TABLE && bucket != "" } diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go index ad9e6ab41..aaf1c196c 100644 --- a/weed/filer/abstract_sql/abstract_sql_store_kv.go +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -5,9 +5,9 @@ import ( "database/sql" "encoding/base64" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "strings" ) @@ -15,7 +15,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by db, _, _, err := store.getTxOrDB(ctx, "", false) if err != nil { - return fmt.Errorf("findDB: %w", err) + return fmt.Errorf("findDB: %v", err) } dirStr, dirHash, name := GenDirAndName(key) @@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by } // now the insert failed possibly due to duplication constraints - glog.V(1).InfofCtx(ctx, "kv insert falls back to update: %s", err) + glog.V(1).Infof("kv insert falls back to update: %s", err) res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr) if err != nil { @@ -50,7 +50,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b db, _, _, err := store.getTxOrDB(ctx, "", false) if err != nil { - return nil, fmt.Errorf("findDB: %w", err) + return nil, fmt.Errorf("findDB: %v", err) } dirStr, dirHash, name := GenDirAndName(key) @@ -63,7 +63,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b } if err != nil { - return nil, fmt.Errorf("kv get: %w", err) + return nil, fmt.Errorf("kv get: %v", err) } return @@ -73,7 +73,7 @@ func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err er db, _, _, err := store.getTxOrDB(ctx, "", false) if err != nil { - return fmt.Errorf("findDB: %w", err) + return fmt.Errorf("findDB: %v", err) } dirStr, dirHash, name := GenDirAndName(key) diff --git a/weed/filer/arangodb/arangodb_store.go b/weed/filer/arangodb/arangodb_store.go index 0a3a06d16..13d14b2b0 100644 --- a/weed/filer/arangodb/arangodb_store.go +++ b/weed/filer/arangodb/arangodb_store.go @@ -11,10 +11,10 @@ import ( "github.com/arangodb/go-driver" "github.com/arangodb/go-driver/http" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -66,8 +66,8 @@ func (store *ArangodbStore) Initialize(configuration util.Configuration, prefix } func (store *ArangodbStore) connection(uris []string, user string, pass string, insecure bool) (err error) { - ctx, cn := context.WithTimeout(context.Background(), 10*time.Second) - defer cn() + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + store.connect, err = http.NewConnection(http.ConnectionConfig{ Endpoints: uris, TLSConfig: &tls.Config{ @@ -121,7 +121,7 @@ func (store *ArangodbStore) BeginTransaction(ctx context.Context) (context.Conte return nil, err } - return context.WithValue(driver.WithTransactionID(ctx, txn), transactionKey, txn), nil + return context.WithValue(ctx, transactionKey, txn), nil } func (store *ArangodbStore) CommitTransaction(ctx context.Context) error { @@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } model := &Model{ @@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } model := &Model{ @@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat if driver.IsNotFound(err) { return nil, filer_pb.ErrNotFound } - glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err) + glog.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } if len(data.Meta) == 0 { @@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP } _, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath))) if err != nil && !driver.IsNotFound(err) { - glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err) + glog.Errorf("find %s: %v", fullpath, err) return fmt.Errorf("delete %s : %v", fullpath, err) } return nil @@ -274,10 +274,10 @@ func (store *ArangodbStore) DeleteFolderChildren(ctx context.Context, fullpath u for d in %s filter starts_with(d.directory, "%s/") || d.directory == "%s" remove d._key in %s`, - "`"+targetCollection.Name()+"`", + targetCollection.Name(), strings.Join(strings.Split(string(fullpath), "/"), ","), string(fullpath), - "`"+targetCollection.Name()+"`", + targetCollection.Name(), ) cur, err := store.database.Query(ctx, query, nil) if err != nil { @@ -296,7 +296,7 @@ func (store *ArangodbStore) ListDirectoryPrefixedEntries(ctx context.Context, di if err != nil { return lastFileName, err } - query := "for d in " + "`" + targetCollection.Name() + "`" + query := "for d in " + targetCollection.Name() if includeStartFile { query = query + " filter d.name >= \"" + startFileName + "\" " } else { @@ -331,7 +331,7 @@ sort d.name asc converted := arrayToBytes(data.Meta) if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } diff --git a/weed/filer/arangodb/arangodb_store_bucket.go b/weed/filer/arangodb/arangodb_store_bucket.go index 44aeeadea..810d639a7 100644 --- a/weed/filer/arangodb/arangodb_store_bucket.go +++ b/weed/filer/arangodb/arangodb_store_bucket.go @@ -5,9 +5,9 @@ import ( "github.com/arangodb/go-driver" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" ) var _ filer.BucketAware = (*ArangodbStore)(nil) @@ -34,9 +34,6 @@ func (store *ArangodbStore) OnBucketDeletion(bucket string) { glog.Errorf("bucket delete %s: %v", bucket, err) return } - store.mu.Lock() - delete(store.buckets, bucket) - store.mu.Unlock() } func (store *ArangodbStore) CanDropWholeBucket() bool { return true diff --git a/weed/filer/arangodb/arangodb_store_kv.go b/weed/filer/arangodb/arangodb_store_kv.go index ae768c7fb..c1307e78d 100644 --- a/weed/filer/arangodb/arangodb_store_kv.go +++ b/weed/filer/arangodb/arangodb_store_kv.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/arangodb/go-driver" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" ) func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -18,7 +18,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) exists, err := store.kvCollection.DocumentExists(ctx, model.Key) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } if exists { _, err = store.kvCollection.UpdateDocument(ctx, model.Key, model) @@ -26,7 +26,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) _, err = store.kvCollection.CreateDocument(ctx, model) } if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte return nil, filer.ErrKvNotFound } if err != nil { - glog.ErrorfCtx(ctx, "kv get: %s %v", string(key), err) + glog.Errorf("kv get: %s %v", string(key), err) return nil, filer.ErrKvNotFound } return arrayToBytes(model.Meta), nil @@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) { _, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key))) if err != nil { - glog.ErrorfCtx(ctx, "kv del: %v", err) + glog.Errorf("kv del: %v", err) return filer.ErrKvNotFound } return nil diff --git a/weed/filer/arangodb/helpers.go b/weed/filer/arangodb/helpers.go index 776e6d1b8..943189781 100644 --- a/weed/filer/arangodb/helpers.go +++ b/weed/filer/arangodb/helpers.go @@ -9,10 +9,10 @@ import ( "strings" "github.com/arangodb/go-driver" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" ) -// convert a string into arango-key safe hex bytes hash +//convert a string into arango-key safe hex bytes hash func hashString(dir string) string { h := md5.New() io.WriteString(h, dir) @@ -86,7 +86,7 @@ func (store *ArangodbStore) ensureBucket(ctx context.Context, bucket string) (bc store.mu.RLock() bc, ok = store.buckets[bucket] store.mu.RUnlock() - if ok && bc != nil { + if ok { return bc, nil } store.mu.Lock() @@ -98,26 +98,8 @@ func (store *ArangodbStore) ensureBucket(ctx context.Context, bucket string) (bc return store.buckets[bucket], nil } -// transform to an arango compliant name -func bucketToCollectionName(s string) string { - if len(s) == 0 { - return "" - } - // replace all "." with _ - s = strings.ReplaceAll(s, ".", "_") - - // if starts with number or '.' then add a special prefix - if (s[0] >= '0' && s[0] <= '9') || (s[0] == '.' || s[0] == '_' || s[0] == '-') { - s = "xN--" + s - } - return s -} - // creates collection if not exist, ensures indices if not exist -func (store *ArangodbStore) ensureCollection(ctx context.Context, bucket_name string) (c driver.Collection, err error) { - // convert the bucket to collection name - name := bucketToCollectionName(bucket_name) - +func (store *ArangodbStore) ensureCollection(ctx context.Context, name string) (c driver.Collection, err error) { ok, err := store.database.CollectionExists(ctx, name) if err != nil { return diff --git a/weed/filer/arangodb/readme.md b/weed/filer/arangodb/readme.md index 57a594592..e189811fb 100644 --- a/weed/filer/arangodb/readme.md +++ b/weed/filer/arangodb/readme.md @@ -22,39 +22,6 @@ i test using this dev database: `docker run -p 8529:8529 -e ARANGO_ROOT_PASSWORD=test arangodb/arangodb:3.9.0` - -## database structure - - -arangodb has a few restrictions which require the use of a few tricks in order to losslessly store the data. - -### filer store - -arangodb does not support []byte, and will store such as a uint64 array. this would be a waste of space. to counteract this, we store the data as a length prefixed uint64 byteset. - -### filer kv - -same as above - -### filer buckets - -s3 buckets are implemented through arangodb collection. this allows us to do very fast bucket deletion by simply deleting the collection - - -arangodb collection name rules is character set `azAZ09_-` with a 256 character max. however the first character must be a letter. - - -s3 bucket name rule is the set `azAZ09.-` with a 63 characters max. - -the rules for collection names is then the following: - -1. if the bucket name is a valid arangodb collection name, then nothing is done. -2. if the bucket name contains a ".", the "." is replaced with "_" -3. if the bucket name now begins with a number or "_", the prefix "xN--" is prepended to the collection name - -this allows for these collection names to be used. - - ## features i don't personally need but are missing [ ] provide tls cert to arango [ ] authentication that is not basic auth diff --git a/weed/filer/cassandra/README.txt b/weed/filer/cassandra/README.txt index 6a1c8ecb3..122c9c3f4 100644 --- a/weed/filer/cassandra/README.txt +++ b/weed/filer/cassandra/README.txt @@ -1 +1,14 @@ -Deprecated by cassandra2 \ No newline at end of file +1. create a keyspace + +CREATE KEYSPACE seaweedfs WITH replication = {'class':'SimpleStrategy', 'replication_factor' : 1}; + +2. create filemeta table + + USE seaweedfs; + + CREATE TABLE filemeta ( + directory varchar, + name varchar, + meta blob, + PRIMARY KEY (directory, name) + ) WITH CLUSTERING ORDER BY (name ASC); diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go index 0d0c17e1d..d8c094a45 100644 --- a/weed/filer/cassandra/cassandra_store.go +++ b/weed/filer/cassandra/cassandra_store.go @@ -2,16 +2,14 @@ package cassandra import ( "context" - "errors" "fmt" + "github.com/gocql/gocql" "time" - "github.com/gocql/gocql" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -102,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } @@ -131,10 +129,13 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPa if err := store.session.Query( "SELECT meta FROM filemeta WHERE directory=? AND name=?", dir, name).Scan(&data); err != nil { - if errors.Is(err, gocql.ErrNotFound) { + if err != gocql.ErrNotFound { return nil, filer_pb.ErrNotFound } - return nil, err + } + + if len(data) == 0 { + return nil, filer_pb.ErrNotFound } entry = &filer.Entry{ @@ -203,15 +204,15 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { break } } - if err = iter.Close(); err != nil { - glog.V(0).InfofCtx(ctx, "list iterator close: %v", err) + if err := iter.Close(); err != nil { + glog.V(0).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go index eea59ffbd..3e9730238 100644 --- a/weed/filer/cassandra/cassandra_store_kv.go +++ b/weed/filer/cassandra/cassandra_store_kv.go @@ -4,8 +4,8 @@ import ( "context" "encoding/base64" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/gocql/gocql" - "github.com/seaweedfs/seaweedfs/weed/filer" ) func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -44,7 +44,7 @@ func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err erro if err := store.session.Query( "DELETE FROM filemeta WHERE directory=? AND name=?", dir, name).Exec(); err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/cassandra2/README.txt b/weed/filer/cassandra2/README.txt deleted file mode 100644 index c8cf921de..000000000 --- a/weed/filer/cassandra2/README.txt +++ /dev/null @@ -1,15 +0,0 @@ -1. create a keyspace - -CREATE KEYSPACE seaweedfs WITH replication = {'class':'SimpleStrategy', 'replication_factor' : 1}; - -2. create filemeta table - - USE seaweedfs; - - CREATE TABLE filemeta ( - dirhash bigint, - directory varchar, - name varchar, - meta blob, - PRIMARY KEY ((dirhash, directory), name) - ) WITH CLUSTERING ORDER BY (name ASC); diff --git a/weed/filer/cassandra2/cassandra_store.go b/weed/filer/cassandra2/cassandra_store.go deleted file mode 100644 index 8ff7f5874..000000000 --- a/weed/filer/cassandra2/cassandra_store.go +++ /dev/null @@ -1,222 +0,0 @@ -package cassandra2 - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/gocql/gocql" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - filer.Stores = append(filer.Stores, &Cassandra2Store{}) -} - -type Cassandra2Store struct { - cluster *gocql.ClusterConfig - session *gocql.Session - superLargeDirectoryHash map[string]string -} - -func (store *Cassandra2Store) GetName() string { - return "cassandra2" -} - -func (store *Cassandra2Store) Initialize(configuration util.Configuration, prefix string) (err error) { - return store.initialize( - configuration.GetString(prefix+"keyspace"), - configuration.GetStringSlice(prefix+"hosts"), - configuration.GetString(prefix+"username"), - configuration.GetString(prefix+"password"), - configuration.GetStringSlice(prefix+"superLargeDirectories"), - configuration.GetString(prefix+"localDC"), - configuration.GetInt(prefix+"connection_timeout_millisecond"), - ) -} - -func (store *Cassandra2Store) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) { - dirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir] - return -} - -func (store *Cassandra2Store) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string, localDC string, timeout int) (err error) { - store.cluster = gocql.NewCluster(hosts...) - if username != "" && password != "" { - store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password} - } - store.cluster.Keyspace = keyspace - store.cluster.Timeout = time.Duration(timeout) * time.Millisecond - glog.V(0).Infof("timeout = %d", timeout) - fallback := gocql.RoundRobinHostPolicy() - if localDC != "" { - fallback = gocql.DCAwareRoundRobinPolicy(localDC) - } - store.cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback) - store.cluster.Consistency = gocql.LocalQuorum - - store.session, err = store.cluster.CreateSession() - if err != nil { - glog.V(0).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace) - } - - // set directory hash - store.superLargeDirectoryHash = make(map[string]string) - existingHash := make(map[string]string) - for _, dir := range superLargeDirectories { - // adding dir hash to avoid duplicated names - dirHash := util.Md5String([]byte(dir))[:4] - store.superLargeDirectoryHash[dir] = dirHash - if existingDir, found := existingHash[dirHash]; found { - glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) - } - existingHash[dirHash] = dir - } - return -} - -func (store *Cassandra2Store) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} -func (store *Cassandra2Store) CommitTransaction(ctx context.Context) error { - return nil -} -func (store *Cassandra2Store) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *Cassandra2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - if dirHash, ok := store.isSuperLargeDirectory(dir); ok { - dir, name = dirHash+name, "" - } - - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { - meta = util.MaybeGzipData(meta) - } - - if err := store.session.Query( - "INSERT INTO filemeta (dirhash,directory,name,meta) VALUES(?,?,?,?) USING TTL ? ", - util.HashStringToLong(dir), dir, name, meta, entry.TtlSec).Exec(); err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - return nil -} - -func (store *Cassandra2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *Cassandra2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { - - dir, name := fullpath.DirAndName() - if dirHash, ok := store.isSuperLargeDirectory(dir); ok { - dir, name = dirHash+name, "" - } - - var data []byte - if err := store.session.Query( - "SELECT meta FROM filemeta WHERE dirhash=? AND directory=? AND name=?", - util.HashStringToLong(dir), dir, name).Scan(&data); err != nil { - if errors.Is(err, gocql.ErrNotFound) { - return nil, filer_pb.ErrNotFound - } - return nil, err - } - - entry = &filer.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *Cassandra2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { - - dir, name := fullpath.DirAndName() - if dirHash, ok := store.isSuperLargeDirectory(dir); ok { - dir, name = dirHash+name, "" - } - - if err := store.session.Query( - "DELETE FROM filemeta WHERE dirhash=? AND directory=? AND name=?", - util.HashStringToLong(dir), dir, name).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *Cassandra2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { - if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok { - return nil // filer.ErrUnsupportedSuperLargeDirectoryListing - } - - if err := store.session.Query( - "DELETE FROM filemeta WHERE dirhash=? AND directory=?", - util.HashStringToLong(string(fullpath)), fullpath).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *Cassandra2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed -} - -func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - - if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok { - return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing - } - - cqlStr := "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND directory=? AND name>? ORDER BY NAME ASC LIMIT ?" - if includeStartFile { - cqlStr = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" - } - - var data []byte - var name string - iter := store.session.Query(cqlStr, util.HashStringToLong(string(dirPath)), string(dirPath), startFileName, limit+1).Iter() - for iter.Scan(&name, &data) { - entry := &filer.Entry{ - FullPath: util.NewFullPath(string(dirPath), name), - } - lastFileName = name - if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { - err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) - break - } - if !eachEntryFunc(entry) { - break - } - } - if err = iter.Close(); err != nil { - glog.V(0).InfofCtx(ctx, "list iterator close: %v", err) - } - - return lastFileName, err -} - -func (store *Cassandra2Store) Shutdown() { - store.session.Close() -} diff --git a/weed/filer/cassandra2/cassandra_store_kv.go b/weed/filer/cassandra2/cassandra_store_kv.go deleted file mode 100644 index e11193b8b..000000000 --- a/weed/filer/cassandra2/cassandra_store_kv.go +++ /dev/null @@ -1,63 +0,0 @@ -package cassandra2 - -import ( - "context" - "encoding/base64" - "fmt" - "github.com/gocql/gocql" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func (store *Cassandra2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { - dir, name := genDirAndName(key) - - if err := store.session.Query( - "INSERT INTO filemeta (dirhash,directory,name,meta) VALUES(?,?,?,?) USING TTL ? ", - util.HashStringToLong(dir), dir, name, value, 0).Exec(); err != nil { - return fmt.Errorf("kv insert: %s", err) - } - - return nil -} - -func (store *Cassandra2Store) KvGet(ctx context.Context, key []byte) (data []byte, err error) { - dir, name := genDirAndName(key) - - if err := store.session.Query( - "SELECT meta FROM filemeta WHERE dirhash=? AND directory=? AND name=?", - util.HashStringToLong(dir), dir, name).Scan(&data); err != nil { - if err != gocql.ErrNotFound { - return nil, filer.ErrKvNotFound - } - } - - if len(data) == 0 { - return nil, filer.ErrKvNotFound - } - - return data, nil -} - -func (store *Cassandra2Store) KvDelete(ctx context.Context, key []byte) (err error) { - dir, name := genDirAndName(key) - - if err := store.session.Query( - "DELETE FROM filemeta WHERE dirhash=? AND directory=? AND name=?", - util.HashStringToLong(dir), dir, name).Exec(); err != nil { - return fmt.Errorf("kv delete: %w", err) - } - - return nil -} - -func genDirAndName(key []byte) (dir string, name string) { - for len(key) < 8 { - key = append(key, 0) - } - - dir = base64.StdEncoding.EncodeToString(key[:8]) - name = base64.StdEncoding.EncodeToString(key[8:]) - - return -} diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index db4af1559..85fc65d13 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -1,8 +1,8 @@ package filer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "os" "reflect" "strings" @@ -33,7 +33,7 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { if !hasDefaultStoreConfigured { println() - println("Supported filer stores are the following. If not found, check the full version.") + println("Supported filer stores are:") for _, store := range Stores { println(" " + store.GetName()) } @@ -63,11 +63,6 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { if !found { continue } - - if !config.GetBool(key + ".enabled") { - continue - } - store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) if err := store.Initialize(config, key+"."); err != nil { glog.Fatalf("Failed to initialize store for %s: %+v", key, err) diff --git a/weed/filer/elastic/v7/doc.go b/weed/filer/elastic/v7/doc.go index 70e2f105c..704bbf6de 100644 --- a/weed/filer/elastic/v7/doc.go +++ b/weed/filer/elastic/v7/doc.go @@ -1,7 +1,9 @@ /* + Package elastic is for elastic filer store. The referenced "github.com/olivere/elastic/v7" library is too big when compiled. So this is only compiled in "make full_install". + */ package elastic diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go index 5b88025e4..cb2c66f5a 100644 --- a/weed/filer/elastic/v7/elastic_store.go +++ b/weed/filer/elastic/v7/elastic_store.go @@ -9,12 +9,12 @@ import ( "math" "strings" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) var ( @@ -78,12 +78,12 @@ func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err e ctx := context.Background() store.client, err = elastic.NewClient(options...) if err != nil { - return fmt.Errorf("init elastic %w", err) + return fmt.Errorf("init elastic %v.", err) } if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok { _, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx) if err != nil { - return fmt.Errorf("create index(%s) %v", indexKV, err) + return fmt.Errorf("create index(%s) %v.", indexKV, err) } } return nil @@ -113,8 +113,8 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) } value, err := jsoniter.Marshal(esEntry) if err != nil { - glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err) - return fmt.Errorf("insert entry marshal %w", err) + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) } _, err = store.client.Index(). Index(index). @@ -123,8 +123,8 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) BodyJson(string(value)). Do(ctx) if err != nil { - glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err) - return fmt.Errorf("insert entry %w", err) + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) } return nil } @@ -152,23 +152,15 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful err := jsoniter.Unmarshal(searchResult.Source, esEntry) return esEntry.Entry, err } - glog.ErrorfCtx(ctx, "find entry(%s),%v.", string(fullpath), err) + glog.Errorf("find entry(%s),%v.", string(fullpath), err) return nil, filer_pb.ErrNotFound } func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { index := getIndex(fullpath, false) id := weed_util.Md5String([]byte(fullpath)) - strFullpath := string(fullpath) - - // A top-level subdirectory refers to an Elasticsearch index. - // If we delete an entry at the top level, we should attempt to delete the corresponding Elasticsearch index. - if strings.Count(strFullpath, "/") == 1 { - entry, err2 := store.FindEntry(ctx, fullpath) - if err2 == nil && entry.IsDirectory() { - bucketIndex := indexPrefix + strFullpath[1:] - store.deleteIndex(ctx, bucketIndex) - } + if strings.Count(string(fullpath), "/") == 1 { + return store.deleteIndex(ctx, index) } return store.deleteEntry(ctx, index, id) } @@ -178,7 +170,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { return nil } - glog.ErrorfCtx(ctx, "delete index(%s) %v.", index, err) + glog.Errorf("delete index(%s) %v.", index, err) return err } @@ -193,14 +185,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e return nil } } - glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err) - return fmt.Errorf("delete entry %w", err) + glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + return fmt.Errorf("delete entry %v.", err) } func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool { if err := store.DeleteEntry(ctx, entry.FullPath); err != nil { - glog.ErrorfCtx(ctx, "elastic delete %s: %v.", entry.FullPath, err) + glog.Errorf("elastic delete %s: %v.", entry.FullPath, err) return false } return true @@ -228,7 +220,7 @@ func (store *ElasticStore) listDirectoryEntries( result := &elastic.SearchResult{} if (startFileName == "" && first) || inclusive { if result, err = store.search(ctx, index, parentId); err != nil { - glog.ErrorfCtx(ctx, "search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } else { @@ -238,7 +230,7 @@ func (store *ElasticStore) listDirectoryEntries( } after := weed_util.Md5String([]byte(fullPath)) if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { - glog.ErrorfCtx(ctx, "searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go index 6986ea7ef..43835c153 100644 --- a/weed/filer/elastic/v7/elastic_store_kv.go +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -7,11 +7,11 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" - "github.com/seaweedfs/seaweedfs/weed/glog" ) func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) { @@ -25,8 +25,8 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) return nil } } - glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err) - return fmt.Errorf("delete key %w", err) + glog.Errorf("delete key(id:%s) %v.", string(key), err) + return fmt.Errorf("delete key %v.", err) } func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { @@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, return esEntry.Value, nil } } - glog.ErrorfCtx(ctx, "find key(%s),%v.", string(key), err) + glog.Errorf("find key(%s),%v.", string(key), err) return value, filer.ErrKvNotFound } @@ -52,8 +52,8 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) esEntry := &ESKVEntry{value} val, err := jsoniter.Marshal(esEntry) if err != nil { - glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err) - return fmt.Errorf("insert key %w", err) + glog.Errorf("insert key(%s) %v.", string(key), err) + return fmt.Errorf("insert key %v.", err) } _, err = store.client.Index(). Index(indexKV). @@ -62,7 +62,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) BodyJson(string(val)). Do(ctx) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil } diff --git a/weed/filer/entry.go b/weed/filer/entry.go index 5bd1a3c56..8dd00f010 100644 --- a/weed/filer/entry.go +++ b/weed/filer/entry.go @@ -4,8 +4,8 @@ import ( "os" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type Attr struct { @@ -38,16 +38,15 @@ type Entry struct { // the following is for files Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` - HardLinkId HardLinkId - HardLinkCounter int32 - Content []byte - Remote *filer_pb.RemoteEntry - Quota int64 - WORMEnforcedAtTsNs int64 + HardLinkId HardLinkId + HardLinkCounter int32 + Content []byte + Remote *filer_pb.RemoteEntry + Quota int64 } func (entry *Entry) Size() uint64 { - return maxUint64(maxUint64(TotalSize(entry.GetChunks()), entry.FileSize), uint64(len(entry.Content))) + return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content))) } func (entry *Entry) Timestamp() time.Time { @@ -92,14 +91,13 @@ func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) { } message.IsDirectory = entry.IsDirectory() message.Attributes = EntryAttributeToPb(entry) - message.Chunks = entry.GetChunks() + message.Chunks = entry.Chunks message.Extended = entry.Extended message.HardLinkId = entry.HardLinkId message.HardLinkCounter = entry.HardLinkCounter message.Content = entry.Content message.RemoteEntry = entry.Remote message.Quota = entry.Quota - message.WormEnforcedAtTsNs = entry.WORMEnforcedAtTsNs } func FromPbEntryToExistingEntry(message *filer_pb.Entry, fsEntry *Entry) { @@ -111,8 +109,6 @@ func FromPbEntryToExistingEntry(message *filer_pb.Entry, fsEntry *Entry) { fsEntry.Content = message.Content fsEntry.Remote = message.RemoteEntry fsEntry.Quota = message.Quota - fsEntry.FileSize = FileSize(message) - fsEntry.WORMEnforcedAtTsNs = message.WormEnforcedAtTsNs } func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { @@ -126,10 +122,6 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { } } -func (entry *Entry) GetChunks() []*filer_pb.FileChunk { - return entry.Chunks -} - func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry { t := &Entry{} t.FullPath = util.NewFullPath(dir, entry.Name) diff --git a/weed/filer/entry_codec.go b/weed/filer/entry_codec.go index ce9c0484b..3d29ba0b4 100644 --- a/weed/filer/entry_codec.go +++ b/weed/filer/entry_codec.go @@ -6,9 +6,9 @@ import ( "os" "time" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { @@ -21,7 +21,7 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { message := &filer_pb.Entry{} - if err := proto.Unmarshal(blob, message); err != nil { + if err := proto.UnmarshalMerge(blob, message); err != nil { return fmt.Errorf("decoding value blob for %s: %v", entry.FullPath, err) } diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index d300a7048..0dd7dbee2 100644 --- a/weed/filer/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -1,20 +1,18 @@ package etcd import ( + "bytes" "context" - "crypto/tls" "fmt" "strings" "time" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/v3" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -26,80 +24,44 @@ func init() { } type EtcdStore struct { - client *clientv3.Client - etcdKeyPrefix string - timeout time.Duration + client *clientv3.Client } func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) error { - configuration.SetDefault(prefix+"servers", "localhost:2379") - configuration.SetDefault(prefix+"timeout", "3s") - +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { servers := configuration.GetString(prefix + "servers") - - username := configuration.GetString(prefix + "username") - password := configuration.GetString(prefix + "password") - store.etcdKeyPrefix = configuration.GetString(prefix + "key_prefix") - - timeoutStr := configuration.GetString(prefix + "timeout") - timeout, err := time.ParseDuration(timeoutStr) - if err != nil { - return fmt.Errorf("parse etcd store timeout: %w", err) - } - store.timeout = timeout - - certFile := configuration.GetString(prefix + "tls_client_crt_file") - keyFile := configuration.GetString(prefix + "tls_client_key_file") - caFile := configuration.GetString(prefix + "tls_ca_file") - - var tlsConfig *tls.Config - if caFile != "" { - tlsInfo := transport.TLSInfo{ - CertFile: certFile, - KeyFile: keyFile, - TrustedCAFile: caFile, - } - var err error - tlsConfig, err = tlsInfo.ClientConfig() - if err != nil { - return fmt.Errorf("TLS client configuration error: %w", err) - } + if servers == "" { + servers = "localhost:2379" } - return store.initialize(servers, username, password, store.timeout, tlsConfig) + timeout := configuration.GetString(prefix + "timeout") + if timeout == "" { + timeout = "3s" + } + + return store.initialize(servers, timeout) } -func (store *EtcdStore) initialize(servers, username, password string, timeout time.Duration, tlsConfig *tls.Config) error { +func (store *EtcdStore) initialize(servers string, timeout string) (err error) { glog.Infof("filer store etcd: %s", servers) - client, err := clientv3.New(clientv3.Config{ + to, err := time.ParseDuration(timeout) + if err != nil { + return fmt.Errorf("parse timeout %s: %s", timeout, err) + } + + store.client, err = clientv3.New(clientv3.Config{ Endpoints: strings.Split(servers, ","), - Username: username, - Password: password, - DialTimeout: timeout, - TLS: tlsConfig, + DialTimeout: to, }) if err != nil { return fmt.Errorf("connect to etcd %s: %s", servers, err) } - ctx, cancel := context.WithTimeout(context.Background(), store.timeout) - defer cancel() - - resp, err := client.Status(ctx, client.Endpoints()[0]) - if err != nil { - client.Close() - return fmt.Errorf("error checking etcd connection: %s", err) - } - - glog.V(0).InfofCtx(ctx, "ัonnection to etcd has been successfully verified. etcd version: %s", resp.Version) - store.client = client - - return nil + return } func (store *EtcdStore) BeginTransaction(ctx context.Context) (context.Context, error) { @@ -120,11 +82,11 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = weed_util.MaybeGzipData(meta) } - if _, err := store.client.Put(ctx, store.etcdKeyPrefix+string(key), string(meta)); err != nil { + if _, err := store.client.Put(ctx, string(key), string(meta)); err != nil { return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } @@ -138,7 +100,7 @@ func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (er func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { key := genKey(fullpath.DirAndName()) - resp, err := store.client.Get(ctx, store.etcdKeyPrefix+string(key)) + resp, err := store.client.Get(ctx, string(key)) if err != nil { return nil, fmt.Errorf("get %s : %v", fullpath, err) } @@ -161,7 +123,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPa func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) - if _, err := store.client.Delete(ctx, store.etcdKeyPrefix+string(key)); err != nil { + if _, err := store.client.Delete(ctx, string(key)); err != nil { return fmt.Errorf("delete %s : %v", fullpath, err) } @@ -171,7 +133,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.Full func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - if _, err := store.client.Delete(ctx, store.etcdKeyPrefix+string(directoryPrefix), clientv3.WithPrefix()); err != nil { + if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err) } @@ -179,20 +141,26 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_ } func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + directoryPrefix := genDirectoryKeyPrefix(dirPath, "") lastFileStart := directoryPrefix if startFileName != "" { lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) } - resp, err := store.client.Get(ctx, store.etcdKeyPrefix+string(lastFileStart), - clientv3.WithRange(clientv3.GetPrefixRangeEnd(store.etcdKeyPrefix+string(directoryPrefix))), - clientv3.WithLimit(limit+1)) + resp, err := store.client.Get(ctx, string(lastFileStart), + clientv3.WithFromKey(), clientv3.WithLimit(limit+1)) if err != nil { return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) } for _, kv := range resp.Kvs { + if !bytes.HasPrefix(kv.Key, directoryPrefix) { + break + } fileName := getNameFromKey(kv.Key) if fileName == "" { continue @@ -209,7 +177,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { @@ -221,10 +189,6 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat return lastFileName, err } -func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) -} - func genKey(dirPath, fileName string) (key []byte) { key = []byte(dirPath) key = append(key, DIR_FILE_SEPARATOR) diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go index e2536ba1c..df252f46c 100644 --- a/weed/filer/etcd/etcd_store_kv.go +++ b/weed/filer/etcd/etcd_store_kv.go @@ -3,15 +3,15 @@ package etcd import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" ) func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { - _, err = store.client.Put(ctx, store.etcdKeyPrefix+string(key), string(value)) + _, err = store.client.Put(ctx, string(key), string(value)) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -19,10 +19,10 @@ func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (er func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { - resp, err := store.client.Get(ctx, store.etcdKeyPrefix+string(key)) + resp, err := store.client.Get(ctx, string(key)) if err != nil { - return nil, fmt.Errorf("kv get: %w", err) + return nil, fmt.Errorf("kv get: %v", err) } if len(resp.Kvs) == 0 { @@ -34,10 +34,10 @@ func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, er func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) { - _, err = store.client.Delete(ctx, store.etcdKeyPrefix+string(key)) + _, err = store.client.Delete(ctx, string(key)) if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/etcd/etcd_store_test.go b/weed/filer/etcd/etcd_store_test.go index 6abb74697..824c28f5a 100644 --- a/weed/filer/etcd/etcd_store_test.go +++ b/weed/filer/etcd/etcd_store_test.go @@ -1,7 +1,7 @@ package etcd import ( - "github.com/seaweedfs/seaweedfs/weed/filer/store_test" + "github.com/chrislusf/seaweedfs/weed/filer/store_test" "testing" ) @@ -10,7 +10,7 @@ func TestStore(t *testing.T) { // to set up local env if false { store := &EtcdStore{} - store.initialize("localhost:2379", "", "", 3, nil) + store.initialize("localhost:2379", "3s") store_test.TestFilerStore(t, store) } } diff --git a/weed/filer/filechunk_group.go b/weed/filer/filechunk_group.go deleted file mode 100644 index 0f449735a..000000000 --- a/weed/filer/filechunk_group.go +++ /dev/null @@ -1,164 +0,0 @@ -package filer - -import ( - "context" - "io" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" - "github.com/seaweedfs/seaweedfs/weed/wdclient" -) - -type ChunkGroup struct { - lookupFn wdclient.LookupFileIdFunctionType - sections map[SectionIndex]*FileChunkSection - sectionsLock sync.RWMutex - readerCache *ReaderCache -} - -func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk) (*ChunkGroup, error) { - group := &ChunkGroup{ - lookupFn: lookupFn, - sections: make(map[SectionIndex]*FileChunkSection), - readerCache: NewReaderCache(32, chunkCache, lookupFn), - } - - err := group.SetChunks(chunks) - return group, err -} - -func (group *ChunkGroup) AddChunk(chunk *filer_pb.FileChunk) error { - - group.sectionsLock.Lock() - defer group.sectionsLock.Unlock() - - sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) - for si := sectionIndexStart; si < sectionIndexStop+1; si++ { - section, found := group.sections[si] - if !found { - section = NewFileChunkSection(si) - group.sections[si] = section - } - section.addChunk(chunk) - } - return nil -} - -func (group *ChunkGroup) ReadDataAt(ctx context.Context, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { - if offset >= fileSize { - return 0, 0, io.EOF - } - - group.sectionsLock.RLock() - defer group.sectionsLock.RUnlock() - - sectionIndexStart, sectionIndexStop := SectionIndex(offset/SectionSize), SectionIndex((offset+int64(len(buff)))/SectionSize) - for si := sectionIndexStart; si < sectionIndexStop+1; si++ { - section, found := group.sections[si] - rangeStart, rangeStop := max(offset, int64(si*SectionSize)), min(offset+int64(len(buff)), int64((si+1)*SectionSize)) - if rangeStart >= rangeStop { - continue - } - if !found { - rangeStop = min(rangeStop, fileSize) - for i := rangeStart; i < rangeStop; i++ { - buff[i-offset] = 0 - } - n = int(int64(n) + rangeStop - rangeStart) - continue - } - xn, xTsNs, xErr := section.readDataAt(ctx, group, fileSize, buff[rangeStart-offset:rangeStop-offset], rangeStart) - if xErr != nil { - return n + xn, max(tsNs, xTsNs), xErr - } - n += xn - tsNs = max(tsNs, xTsNs) - } - return -} - -func (group *ChunkGroup) SetChunks(chunks []*filer_pb.FileChunk) error { - group.sectionsLock.RLock() - defer group.sectionsLock.RUnlock() - - var dataChunks []*filer_pb.FileChunk - for _, chunk := range chunks { - - if !chunk.IsChunkManifest { - dataChunks = append(dataChunks, chunk) - continue - } - - resolvedChunks, err := ResolveOneChunkManifest(context.Background(), group.lookupFn, chunk) - if err != nil { - return err - } - - dataChunks = append(dataChunks, resolvedChunks...) - } - - sections := make(map[SectionIndex]*FileChunkSection) - - for _, chunk := range dataChunks { - sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) - for si := sectionIndexStart; si < sectionIndexStop+1; si++ { - section, found := sections[si] - if !found { - section = NewFileChunkSection(si) - sections[si] = section - } - section.chunks = append(section.chunks, chunk) - } - } - - group.sections = sections - return nil -} - -const ( - // see weedfs_file_lseek.go - SEEK_DATA uint32 = 3 // seek to next data after the offset - // SEEK_HOLE uint32 = 4 // seek to next hole after the offset -) - -// FIXME: needa tests -func (group *ChunkGroup) SearchChunks(ctx context.Context, offset, fileSize int64, whence uint32) (found bool, out int64) { - group.sectionsLock.RLock() - defer group.sectionsLock.RUnlock() - - return group.doSearchChunks(ctx, offset, fileSize, whence) -} - -func (group *ChunkGroup) doSearchChunks(ctx context.Context, offset, fileSize int64, whence uint32) (found bool, out int64) { - - sectionIndex, maxSectionIndex := SectionIndex(offset/SectionSize), SectionIndex(fileSize/SectionSize) - if whence == SEEK_DATA { - for si := sectionIndex; si < maxSectionIndex+1; si++ { - section, foundSection := group.sections[si] - if !foundSection { - continue - } - sectionStart := section.DataStartOffset(ctx, group, offset, fileSize) - if sectionStart == -1 { - continue - } - return true, sectionStart - } - return false, 0 - } else { - // whence == SEEK_HOLE - for si := sectionIndex; si < maxSectionIndex; si++ { - section, foundSection := group.sections[si] - if !foundSection { - return true, offset - } - holeStart := section.NextStopOffset(ctx, group, offset, fileSize) - if holeStart%SectionSize == 0 { - continue - } - return true, holeStart - } - return true, fileSize - } -} diff --git a/weed/filer/filechunk_group_test.go b/weed/filer/filechunk_group_test.go deleted file mode 100644 index a7103ce2e..000000000 --- a/weed/filer/filechunk_group_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package filer - -import ( - "context" - "errors" - "io" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestChunkGroup_ReadDataAt_ErrorHandling(t *testing.T) { - // Test that ReadDataAt behaves correctly in various scenarios - // This indirectly verifies that our error handling fix works properly - - // Create a ChunkGroup with no sections - group := &ChunkGroup{ - sections: make(map[SectionIndex]*FileChunkSection), - } - - t.Run("should return immediately on error", func(t *testing.T) { - // This test verifies that our fix is working by checking the behavior - // We'll create a simple scenario where the fix would make a difference - - buff := make([]byte, 100) - fileSize := int64(1000) - offset := int64(0) - - // With an empty ChunkGroup, we should get no error - n, tsNs, err := group.ReadDataAt(context.Background(), fileSize, buff, offset) - - // Should return 100 (length of buffer) and no error since there are no sections - // and missing sections are filled with zeros - assert.Equal(t, 100, n) - assert.Equal(t, int64(0), tsNs) - assert.NoError(t, err) - - // Verify buffer is filled with zeros - for i, b := range buff { - assert.Equal(t, byte(0), b, "buffer[%d] should be zero", i) - } - }) - - t.Run("should handle EOF correctly", func(t *testing.T) { - buff := make([]byte, 100) - fileSize := int64(50) // File smaller than buffer - offset := int64(0) - - n, tsNs, err := group.ReadDataAt(context.Background(), fileSize, buff, offset) - - // Should return 50 (file size) and no error - assert.Equal(t, 50, n) - assert.Equal(t, int64(0), tsNs) - assert.NoError(t, err) - }) - - t.Run("should return EOF when offset exceeds file size", func(t *testing.T) { - buff := make([]byte, 100) - fileSize := int64(50) - offset := int64(100) // Offset beyond file size - - n, tsNs, err := group.ReadDataAt(context.Background(), fileSize, buff, offset) - - assert.Equal(t, 0, n) - assert.Equal(t, int64(0), tsNs) - assert.Equal(t, io.EOF, err) - }) - - t.Run("should demonstrate the GitHub issue fix - errors should not be masked", func(t *testing.T) { - // This test demonstrates the exact scenario described in GitHub issue #6991 - // where io.EOF could mask real errors if we continued processing sections - - // The issue: - // - Before the fix: if section 1 returns a real error, but section 2 returns io.EOF, - // the real error would be overwritten by io.EOF - // - After the fix: return immediately on any error, preserving the original error - - // Our fix ensures that we return immediately on ANY error (including io.EOF) - // This test verifies that the fix pattern works correctly for the most critical cases - - buff := make([]byte, 100) - fileSize := int64(1000) - - // Test 1: Normal operation with no sections (filled with zeros) - n, tsNs, err := group.ReadDataAt(context.Background(), fileSize, buff, int64(0)) - assert.Equal(t, 100, n, "should read full buffer") - assert.Equal(t, int64(0), tsNs, "timestamp should be zero for missing sections") - assert.NoError(t, err, "should not error for missing sections") - - // Test 2: Reading beyond file size should return io.EOF immediately - n, tsNs, err = group.ReadDataAt(context.Background(), fileSize, buff, fileSize+1) - assert.Equal(t, 0, n, "should not read any bytes when beyond file size") - assert.Equal(t, int64(0), tsNs, "timestamp should be zero") - assert.Equal(t, io.EOF, err, "should return io.EOF when reading beyond file size") - - // Test 3: Reading at exact file boundary - n, tsNs, err = group.ReadDataAt(context.Background(), fileSize, buff, fileSize) - assert.Equal(t, 0, n, "should not read any bytes at exact file size boundary") - assert.Equal(t, int64(0), tsNs, "timestamp should be zero") - assert.Equal(t, io.EOF, err, "should return io.EOF at file boundary") - - // The key insight: Our fix ensures that ANY error from section.readDataAt() - // causes immediate return with proper context (bytes read + timestamp + error) - // This prevents later sections from masking earlier errors, especially - // preventing io.EOF from masking network errors or other real failures. - }) - - t.Run("Context Cancellation", func(t *testing.T) { - // Test 4: Context cancellation should be properly propagated through ReadDataAt - - // This test verifies that the context parameter is properly threaded through - // the call chain and that cancellation checks are in place at the right points - - // Test with a pre-cancelled context to ensure the cancellation is detected - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - group := &ChunkGroup{ - sections: make(map[SectionIndex]*FileChunkSection), - } - - buff := make([]byte, 100) - fileSize := int64(1000) - - // Call ReadDataAt with the already cancelled context - n, tsNs, err := group.ReadDataAt(ctx, fileSize, buff, int64(0)) - - // For an empty ChunkGroup (no sections), the operation will complete successfully - // since it just fills the buffer with zeros. However, the important thing is that - // the context is properly threaded through the call chain. - // The actual cancellation would be more evident with real chunk sections that - // perform network operations. - - if err != nil { - // If an error is returned, it should be a context cancellation error - assert.True(t, - errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded), - "Expected context.Canceled or context.DeadlineExceeded, got: %v", err) - } else { - // If no error (operation completed before cancellation check), - // verify normal behavior for empty ChunkGroup - assert.Equal(t, 100, n, "should read full buffer size when no sections exist") - assert.Equal(t, int64(0), tsNs, "timestamp should be zero") - t.Log("Operation completed before context cancellation was checked - this is expected for empty ChunkGroup") - } - }) - - t.Run("Context Cancellation with Timeout", func(t *testing.T) { - // Test 5: Context with timeout should be respected - - group := &ChunkGroup{ - sections: make(map[SectionIndex]*FileChunkSection), - } - - // Create a context with a very short timeout - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) - defer cancel() - - buff := make([]byte, 100) - fileSize := int64(1000) - - // This should fail due to timeout - n, tsNs, err := group.ReadDataAt(ctx, fileSize, buff, int64(0)) - - // For this simple case with no sections, it might complete before timeout - // But if it does timeout, we should handle it properly - if err != nil { - assert.True(t, - errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded), - "Expected context.Canceled or context.DeadlineExceeded when context times out, got: %v", err) - } else { - // If no error, verify normal behavior - assert.Equal(t, 100, n, "should read full buffer size when no sections exist") - assert.Equal(t, int64(0), tsNs, "timestamp should be zero") - } - }) -} - -func TestChunkGroup_SearchChunks_Cancellation(t *testing.T) { - t.Run("Context Cancellation in SearchChunks", func(t *testing.T) { - // Test that SearchChunks properly handles context cancellation - - group := &ChunkGroup{ - sections: make(map[SectionIndex]*FileChunkSection), - } - - // Test with a pre-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - fileSize := int64(1000) - offset := int64(0) - whence := uint32(3) // SEEK_DATA - - // Call SearchChunks with cancelled context - found, resultOffset := group.SearchChunks(ctx, offset, fileSize, whence) - - // For an empty ChunkGroup, SearchChunks should complete quickly - // The main goal is to verify the context parameter is properly threaded through - // In real scenarios with actual chunk sections, context cancellation would be more meaningful - - // Verify the function completes and returns reasonable values - assert.False(t, found, "should not find data in empty chunk group") - assert.Equal(t, int64(0), resultOffset, "should return 0 offset when no data found") - - t.Log("SearchChunks completed with cancelled context - context threading verified") - }) - - t.Run("Context with Timeout in SearchChunks", func(t *testing.T) { - // Test SearchChunks with a timeout context - - group := &ChunkGroup{ - sections: make(map[SectionIndex]*FileChunkSection), - } - - // Create a context with very short timeout - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) - defer cancel() - - fileSize := int64(1000) - offset := int64(0) - whence := uint32(3) // SEEK_DATA - - // Call SearchChunks - should complete quickly for empty group - found, resultOffset := group.SearchChunks(ctx, offset, fileSize, whence) - - // Verify reasonable behavior - assert.False(t, found, "should not find data in empty chunk group") - assert.Equal(t, int64(0), resultOffset, "should return 0 offset when no data found") - }) -} - -func TestChunkGroup_doSearchChunks(t *testing.T) { - type fields struct { - sections map[SectionIndex]*FileChunkSection - } - type args struct { - offset int64 - fileSize int64 - whence uint32 - } - tests := []struct { - name string - fields fields - args args - wantFound bool - wantOut int64 - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - group := &ChunkGroup{ - sections: tt.fields.sections, - } - gotFound, gotOut := group.doSearchChunks(context.Background(), tt.args.offset, tt.args.fileSize, tt.args.whence) - assert.Equalf(t, tt.wantFound, gotFound, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) - assert.Equalf(t, tt.wantOut, gotOut, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) - }) - } -} diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 80a741cf5..4eb657dfa 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -2,21 +2,21 @@ package filer import ( "bytes" - "context" "fmt" "io" "math" + "net/url" + "strings" "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/wdclient" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -49,7 +49,7 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa return } -func ResolveChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) { +func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) { // TODO maybe parallel this for _, chunk := range chunks { @@ -62,14 +62,14 @@ func ResolveChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFil continue } - resolvedChunks, err := ResolveOneChunkManifest(ctx, lookupFileIdFn, chunk) + resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk) if err != nil { return dataChunks, nil, err } manifestChunks = append(manifestChunks, chunk) // recursive - subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(ctx, lookupFileIdFn, resolvedChunks, startOffset, stopOffset) + subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset) if subErr != nil { return dataChunks, nil, subErr } @@ -79,7 +79,7 @@ func ResolveChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFil return } -func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) { +func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) { if !chunk.IsChunkManifest { return } @@ -88,7 +88,7 @@ func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.Lookup bytesBuffer := bytesBufferPool.Get().(*bytes.Buffer) bytesBuffer.Reset() defer bytesBufferPool.Put(bytesBuffer) - err := fetchWholeChunk(ctx, bytesBuffer, lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed) + err := fetchWholeChunk(bytesBuffer, lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed) if err != nil { return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err) } @@ -103,104 +103,99 @@ func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.Lookup } // TODO fetch from cache for weed mount? -func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error { - urlStrings, err := lookupFileIdFn(ctx, fileId) +func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error { + urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err) + glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return err } - err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0) + err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, cipherKey, isGzipped, true, 0, 0) if err != nil { return err } return nil } -func fetchChunkRange(ctx context.Context, buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) { - urlStrings, err := lookupFileIdFn(ctx, fileId) +func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) { + urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err) + glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return 0, err } - return util_http.RetriedFetchChunkData(ctx, buffer, urlStrings, cipherKey, isGzipped, false, offset, fileId) + return retriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) } -func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) { +func retriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64) (n int, err error) { + + var shouldRetry bool + + for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { + for _, urlString := range urlStrings { + n = 0 + if strings.Contains(urlString, "%") { + urlString = url.PathEscape(urlString) + } + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, len(buffer), func(data []byte) { + if n < len(buffer) { + x := copy(buffer[n:], data) + n += x + } + }) + if !shouldRetry { + break + } + if err != nil { + glog.V(0).Infof("read %s failed, err: %v", urlString, err) + } else { + break + } + } + if err != nil && shouldRetry { + glog.V(0).Infof("retry reading in %v", waitTime) + time.Sleep(waitTime) + } else { + break + } + } + + return n, err + +} + +func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) { var shouldRetry bool var totalWritten int for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { - // Check for context cancellation before starting retry loop - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - retriedCnt := 0 for _, urlString := range urlStrings { - // Check for context cancellation before each volume server request - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - retriedCnt++ - var localProcessed int - var writeErr error - shouldRetry, err = util_http.ReadUrlAsStreamAuthenticated(ctx, urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { - // Check for context cancellation during data processing - select { - case <-ctx.Done(): - writeErr = ctx.Err() - return - default: - } - - if totalWritten > localProcessed { - toBeSkipped := totalWritten - localProcessed + var localProcesed int + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { + if totalWritten > localProcesed { + toBeSkipped := totalWritten - localProcesed if len(data) <= toBeSkipped { - localProcessed += len(data) + localProcesed += len(data) return // skip if already processed } data = data[toBeSkipped:] - localProcessed += toBeSkipped + localProcesed += toBeSkipped } - var writtenCount int - writtenCount, writeErr = writer.Write(data) - localProcessed += writtenCount - totalWritten += writtenCount + writer.Write(data) + localProcesed += len(data) + totalWritten += len(data) }) if !shouldRetry { break } - if writeErr != nil { - err = writeErr - break - } if err != nil { - glog.V(0).InfofCtx(ctx, "read %s failed, err: %v", urlString, err) + glog.V(0).Infof("read %s failed, err: %v", urlString, err) } else { break } } - // all nodes have tried it - if retriedCnt == len(urlStrings) { - break - } if err != nil && shouldRetry { - glog.V(0).InfofCtx(ctx, "retry reading in %v", waitTime) - // Sleep with proper context cancellation and timer cleanup - timer := time.NewTimer(waitTime) - select { - case <-ctx.Done(): - timer.Stop() - return ctx.Err() - case <-timer.C: - // Continue with retry - } + glog.V(0).Infof("retry reading in %v", waitTime) + time.Sleep(waitTime) } else { break } @@ -211,12 +206,6 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin } func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) { - // Don't manifestize SSE-encrypted chunks to preserve per-chunk metadata - for _, chunk := range inputChunks { - if chunk.GetSseType() != 0 { // Any SSE type (SSE-C or SSE-KMS) - return inputChunks, nil - } - } return doMaybeManifestize(saveFunc, inputChunks, ManifestBatch, mergeIntoManifest) } @@ -256,7 +245,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer Chunks: dataChunks, }) if serErr != nil { - return nil, fmt.Errorf("serializing manifest: %w", serErr) + return nil, fmt.Errorf("serializing manifest: %v", serErr) } minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64) @@ -269,7 +258,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer } } - manifestChunk, err = saveFunc(bytes.NewReader(data), "", 0, 0) + manifestChunk, _, _, err = saveFunc(bytes.NewReader(data), "", 0) if err != nil { return nil, err } @@ -280,4 +269,4 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer return } -type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) +type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) diff --git a/weed/filer/filechunk_manifest_test.go b/weed/filer/filechunk_manifest_test.go index a03f26a6d..ce12c5da6 100644 --- a/weed/filer/filechunk_manifest_test.go +++ b/weed/filer/filechunk_manifest_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func TestDoMaybeManifestize(t *testing.T) { diff --git a/weed/filer/filechunk_section.go b/weed/filer/filechunk_section.go deleted file mode 100644 index 76eb84c23..000000000 --- a/weed/filer/filechunk_section.go +++ /dev/null @@ -1,147 +0,0 @@ -package filer - -import ( - "context" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -const SectionSize = 2 * 1024 * 1024 * 32 // 64MiB -type SectionIndex int64 -type FileChunkSection struct { - sectionIndex SectionIndex - chunks []*filer_pb.FileChunk - visibleIntervals *IntervalList[*VisibleInterval] - chunkViews *IntervalList[*ChunkView] - reader *ChunkReadAt - lock sync.RWMutex - isPrepared bool -} - -func NewFileChunkSection(si SectionIndex) *FileChunkSection { - return &FileChunkSection{ - sectionIndex: si, - } -} - -func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error { - section.lock.Lock() - defer section.lock.Unlock() - - start, stop := max(int64(section.sectionIndex)*SectionSize, chunk.Offset), min(((int64(section.sectionIndex)+1)*SectionSize), chunk.Offset+int64(chunk.Size)) - - section.chunks = append(section.chunks, chunk) - - if section.visibleIntervals == nil { - section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) - } else { - MergeIntoVisibles(section.visibleIntervals, start, stop, chunk) - garbageFileIds := FindGarbageChunks(section.visibleIntervals, start, stop) - removeGarbageChunks(section, garbageFileIds) - } - - if section.chunkViews != nil { - MergeIntoChunkViews(section.chunkViews, start, stop, chunk) - } - - return nil -} - -func removeGarbageChunks(section *FileChunkSection, garbageFileIds map[string]struct{}) { - for i := 0; i < len(section.chunks); { - t := section.chunks[i] - length := len(section.chunks) - if _, found := garbageFileIds[t.FileId]; found { - if i < length-1 { - section.chunks[i] = section.chunks[length-1] - } - section.chunks = section.chunks[:length-1] - } else { - i++ - } - } -} - -func (section *FileChunkSection) setupForRead(ctx context.Context, group *ChunkGroup, fileSize int64) { - section.lock.Lock() - defer section.lock.Unlock() - - if section.isPrepared { - section.reader.fileSize = fileSize - return - } - - if section.visibleIntervals == nil { - section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) - section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks) - if section.reader != nil { - _ = section.reader.Close() - section.reader = nil - } - } - if section.chunkViews == nil { - section.chunkViews = ViewFromVisibleIntervals(section.visibleIntervals, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) - } - - if section.reader == nil { - section.reader = NewChunkReaderAtFromClient(ctx, group.readerCache, section.chunkViews, min(int64(section.sectionIndex+1)*SectionSize, fileSize)) - } - - section.isPrepared = true - section.reader.fileSize = fileSize -} - -func (section *FileChunkSection) readDataAt(ctx context.Context, group *ChunkGroup, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { - - section.setupForRead(ctx, group, fileSize) - section.lock.RLock() - defer section.lock.RUnlock() - - return section.reader.ReadAtWithTime(ctx, buff, offset) -} - -func (section *FileChunkSection) DataStartOffset(ctx context.Context, group *ChunkGroup, offset int64, fileSize int64) int64 { - - section.setupForRead(ctx, group, fileSize) - section.lock.RLock() - defer section.lock.RUnlock() - - for x := section.visibleIntervals.Front(); x != nil; x = x.Next { - visible := x.Value - if visible.stop <= offset { - continue - } - if offset < visible.start { - return offset - } - return offset - } - return -1 -} - -func (section *FileChunkSection) NextStopOffset(ctx context.Context, group *ChunkGroup, offset int64, fileSize int64) int64 { - - section.setupForRead(ctx, group, fileSize) - section.lock.RLock() - defer section.lock.RUnlock() - - isAfterOffset := false - for x := section.visibleIntervals.Front(); x != nil; x = x.Next { - visible := x.Value - if !isAfterOffset { - if visible.stop <= offset { - continue - } - isAfterOffset = true - } - if offset < visible.start { - return offset - } - // now visible.start <= offset - if offset < visible.stop { - offset = visible.stop - } - } - return offset -} diff --git a/weed/filer/filechunk_section_test.go b/weed/filer/filechunk_section_test.go deleted file mode 100644 index 7b76c8456..000000000 --- a/weed/filer/filechunk_section_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package filer - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "testing" -) - -func Test_removeGarbageChunks(t *testing.T) { - section := NewFileChunkSection(0) - section.chunks = append(section.chunks, &filer_pb.FileChunk{ - FileId: "0", - Offset: 0, - Size: 1, - ModifiedTsNs: 0, - }) - section.chunks = append(section.chunks, &filer_pb.FileChunk{ - FileId: "1", - Offset: 1, - Size: 1, - ModifiedTsNs: 1, - }) - section.chunks = append(section.chunks, &filer_pb.FileChunk{ - FileId: "2", - Offset: 2, - Size: 1, - ModifiedTsNs: 2, - }) - section.chunks = append(section.chunks, &filer_pb.FileChunk{ - FileId: "3", - Offset: 3, - Size: 1, - ModifiedTsNs: 3, - }) - section.chunks = append(section.chunks, &filer_pb.FileChunk{ - FileId: "4", - Offset: 4, - Size: 1, - ModifiedTsNs: 4, - }) - garbageFileIds := make(map[string]struct{}) - garbageFileIds["0"] = struct{}{} - garbageFileIds["2"] = struct{}{} - garbageFileIds["4"] = struct{}{} - removeGarbageChunks(section, garbageFileIds) - if len(section.chunks) != 2 { - t.Errorf("remove chunk 2 failed") - } -} diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 43261a970..4c732ddcd 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -2,13 +2,13 @@ package filer import ( "bytes" - "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "golang.org/x/exp/slices" "math" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { @@ -31,22 +31,19 @@ func FileSize(entry *filer_pb.Entry) (size uint64) { fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize)) } } - return maxUint64(TotalSize(entry.GetChunks()), fileSize) + return maxUint64(TotalSize(entry.Chunks), fileSize) } func ETag(entry *filer_pb.Entry) (etag string) { if entry.Attributes == nil || entry.Attributes.Md5 == nil { - return ETagChunks(entry.GetChunks()) + return ETagChunks(entry.Chunks) } return fmt.Sprintf("%x", entry.Attributes.Md5) } func ETagEntry(entry *Entry) (etag string) { - if entry.IsInRemoteOnly() { - return entry.Remote.RemoteETag - } if entry.Attr.Md5 == nil { - return ETagChunks(entry.GetChunks()) + return ETagChunks(entry.Chunks) } return fmt.Sprintf("%x", entry.Attr.Md5) } @@ -62,19 +59,12 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) { return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks)) } -func CompactFileChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { +func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { - visibles, _ := NonOverlappingVisibleIntervals(ctx, lookupFileIdFn, chunks, 0, math.MaxInt64) + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64) - compacted, garbage = SeparateGarbageChunks(visibles, chunks) - - return -} - -func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*filer_pb.FileChunk) (compacted []*filer_pb.FileChunk, garbage []*filer_pb.FileChunk) { fileIds := make(map[string]bool) - for x := visibles.Front(); x != nil; x = x.Next { - interval := x.Value + for _, interval := range visibles { fileIds[interval.fileId] = true } for _, chunk := range chunks { @@ -84,28 +74,17 @@ func SeparateGarbageChunks(visibles *IntervalList[*VisibleInterval], chunks []*f garbage = append(garbage, chunk) } } - return compacted, garbage -} -func FindGarbageChunks(visibles *IntervalList[*VisibleInterval], start int64, stop int64) (garbageFileIds map[string]struct{}) { - garbageFileIds = make(map[string]struct{}) - for x := visibles.Front(); x != nil; x = x.Next { - interval := x.Value - offset := interval.start - interval.offsetInChunk - if start <= offset && offset+int64(interval.chunkSize) <= stop { - garbageFileIds[interval.fileId] = struct{}{} - } - } return } -func MinusChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { +func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { - aData, aMeta, aErr := ResolveChunkManifest(ctx, lookupFileIdFn, as, 0, math.MaxInt64) + aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64) if aErr != nil { return nil, aErr } - bData, bMeta, bErr := ResolveChunkManifest(ctx, lookupFileIdFn, bs, 0, math.MaxInt64) + bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64) if bErr != nil { return nil, bErr } @@ -135,12 +114,9 @@ func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_p fileIds := make(map[string]bool) for _, interval := range bs { fileIds[interval.GetFileIdString()] = true - fileIds[interval.GetSourceFileId()] = true } for _, chunk := range as { - _, sourceFileIdFound := fileIds[chunk.GetSourceFileId()] - _, fileIdFound := fileIds[chunk.GetFileId()] - if !sourceFileIdFound && !fileIdFound { + if _, found := fileIds[chunk.GetSourceFileId()]; !found { delta = append(delta, chunk) } } @@ -149,50 +125,28 @@ func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_p } type ChunkView struct { - FileId string - OffsetInChunk int64 // offset within the chunk - ViewSize uint64 - ViewOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk - ChunkSize uint64 - CipherKey []byte - IsGzipped bool - ModifiedTsNs int64 -} - -func (cv *ChunkView) SetStartStop(start, stop int64) { - cv.OffsetInChunk += start - cv.ViewOffset - cv.ViewOffset = start - cv.ViewSize = uint64(stop - start) -} -func (cv *ChunkView) Clone() IntervalValue { - return &ChunkView{ - FileId: cv.FileId, - OffsetInChunk: cv.OffsetInChunk, - ViewSize: cv.ViewSize, - ViewOffset: cv.ViewOffset, - ChunkSize: cv.ChunkSize, - CipherKey: cv.CipherKey, - IsGzipped: cv.IsGzipped, - ModifiedTsNs: cv.ModifiedTsNs, - } + FileId string + Offset int64 + Size uint64 + LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk + ChunkSize uint64 + CipherKey []byte + IsGzipped bool } func (cv *ChunkView) IsFullChunk() bool { - // IsFullChunk returns true if the view covers the entire chunk from the beginning. - // This prevents bandwidth amplification when range requests happen to align - // with chunk boundaries but don't actually want the full chunk. - return cv.OffsetInChunk == 0 && cv.ViewSize == cv.ChunkSize + return cv.Size == cv.ChunkSize } -func ViewFromChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) { +func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { - visibles, _ := NonOverlappingVisibleIntervals(ctx, lookupFileIdFn, chunks, offset, offset+size) + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size) return ViewFromVisibleIntervals(visibles, offset, size) } -func ViewFromVisibleIntervals(visibles *IntervalList[*VisibleInterval], offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) { +func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { stop := offset + size if size == math.MaxInt64 { @@ -202,112 +156,164 @@ func ViewFromVisibleIntervals(visibles *IntervalList[*VisibleInterval], offset i stop = math.MaxInt64 } - chunkViews = NewIntervalList[*ChunkView]() - for x := visibles.Front(); x != nil; x = x.Next { - chunk := x.Value + for _, chunk := range visibles { chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) if chunkStart < chunkStop { - chunkView := &ChunkView{ - FileId: chunk.fileId, - OffsetInChunk: chunkStart - chunk.start + chunk.offsetInChunk, - ViewSize: uint64(chunkStop - chunkStart), - ViewOffset: chunkStart, - ChunkSize: chunk.chunkSize, - CipherKey: chunk.cipherKey, - IsGzipped: chunk.isGzipped, - ModifiedTsNs: chunk.modifiedTsNs, - } - chunkViews.AppendInterval(&Interval[*ChunkView]{ - StartOffset: chunkStart, - StopOffset: chunkStop, - TsNs: chunk.modifiedTsNs, - Value: chunkView, - Prev: nil, - Next: nil, + views = append(views, &ChunkView{ + FileId: chunk.fileId, + Offset: chunkStart - chunk.start + chunk.chunkOffset, + Size: uint64(chunkStop - chunkStart), + LogicOffset: chunkStart, + ChunkSize: chunk.chunkSize, + CipherKey: chunk.cipherKey, + IsGzipped: chunk.isGzipped, }) } } - return chunkViews + return views } -func MergeIntoVisibles(visibles *IntervalList[*VisibleInterval], start int64, stop int64, chunk *filer_pb.FileChunk) { +func logPrintf(name string, visibles []VisibleInterval) { - newV := &VisibleInterval{ - start: start, - stop: stop, - fileId: chunk.GetFileIdString(), - modifiedTsNs: chunk.ModifiedTsNs, - offsetInChunk: start - chunk.Offset, // the starting position in the chunk - chunkSize: chunk.Size, // size of the chunk - cipherKey: chunk.CipherKey, - isGzipped: chunk.IsCompressed, - } - - visibles.InsertInterval(start, stop, chunk.ModifiedTsNs, newV) + /* + glog.V(0).Infof("%s len %d", name, len(visibles)) + for _, v := range visibles { + glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) + } + */ } -func MergeIntoChunkViews(chunkViews *IntervalList[*ChunkView], start int64, stop int64, chunk *filer_pb.FileChunk) { +func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { - chunkView := &ChunkView{ - FileId: chunk.GetFileIdString(), - OffsetInChunk: start - chunk.Offset, - ViewSize: uint64(stop - start), - ViewOffset: start, - ChunkSize: chunk.Size, - CipherKey: chunk.CipherKey, - IsGzipped: chunk.IsCompressed, - ModifiedTsNs: chunk.ModifiedTsNs, + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) + + length := len(visibles) + if length == 0 { + return append(visibles, newV) + } + last := visibles[length-1] + if last.stop <= chunk.Offset { + return append(visibles, newV) } - chunkViews.InsertInterval(start, stop, chunk.ModifiedTsNs, chunkView) + logPrintf(" before", visibles) + // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) + chunkStop := chunk.Offset + int64(chunk.Size) + for _, v := range visibles { + if v.start < chunk.Offset && chunk.Offset < v.stop { + t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if v.start < chunkStop && chunkStop < v.stop { + t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if chunkStop <= v.start || v.stop <= chunk.Offset { + newVisibles = append(newVisibles, v) + // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) + } + } + newVisibles = append(newVisibles, newV) + + logPrintf(" append", newVisibles) + + for i := len(newVisibles) - 1; i >= 0; i-- { + if i > 0 && newV.start < newVisibles[i-1].start { + newVisibles[i] = newVisibles[i-1] + } else { + newVisibles[i] = newV + break + } + } + logPrintf(" sorted", newVisibles) + + return newVisibles } // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory // If the file chunk content is a chunk manifest -func NonOverlappingVisibleIntervals(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) { +func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) { - chunks, _, err = ResolveChunkManifest(ctx, lookupFileIdFn, chunks, startOffset, stopOffset) + chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset) if err != nil { return } - visibles2 := readResolvedChunks(chunks, 0, math.MaxInt64) + visibles2 := readResolvedChunks(chunks) - return visibles2, err + if true { + return visibles2, err + } + slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { + if a.Mtime == b.Mtime { + filer_pb.EnsureFid(a) + filer_pb.EnsureFid(b) + if a.Fid == nil || b.Fid == nil { + return true + } + return a.Fid.FileKey < b.Fid.FileKey + } + return a.Mtime < b.Mtime + }) + for _, chunk := range chunks { + + // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) + visibles = MergeIntoVisibles(visibles, chunk) + + logPrintf("add", visibles) + + } + + if len(visibles) != len(visibles2) { + fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2)) + } else { + for i := 0; i < len(visibles); i++ { + checkDifference(visibles[i], visibles2[i]) + } + } + + return +} + +func checkDifference(x, y VisibleInterval) { + if x.start != y.start || + x.stop != y.stop || + x.fileId != y.fileId || + x.modifiedTime != y.modifiedTime { + fmt.Printf("different visible %+v : %+v\n", x, y) + } } // find non-overlapping visible intervals // visible interval map to one file chunk type VisibleInterval struct { - start int64 - stop int64 - modifiedTsNs int64 - fileId string - offsetInChunk int64 - chunkSize uint64 - cipherKey []byte - isGzipped bool + start int64 + stop int64 + modifiedTime int64 + fileId string + chunkOffset int64 + chunkSize uint64 + cipherKey []byte + isGzipped bool } -func (v *VisibleInterval) SetStartStop(start, stop int64) { - v.offsetInChunk += start - v.start - v.start, v.stop = start, stop -} -func (v *VisibleInterval) Clone() IntervalValue { - return &VisibleInterval{ - start: v.start, - stop: v.stop, - modifiedTsNs: v.modifiedTsNs, - fileId: v.fileId, - offsetInChunk: v.offsetInChunk, - chunkSize: v.chunkSize, - cipherKey: v.cipherKey, - isGzipped: v.isGzipped, +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval { + return VisibleInterval{ + start: start, + stop: stop, + fileId: fileId, + modifiedTime: modifiedTime, + chunkOffset: chunkOffset, // the starting position in the chunk + chunkSize: chunkSize, + cipherKey: cipherKey, + isGzipped: isGzipped, } } diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go index b735b8a27..39dec87c9 100644 --- a/weed/filer/filechunks2_test.go +++ b/weed/filer/filechunks2_test.go @@ -1,72 +1,32 @@ package filer import ( - "context" - "github.com/stretchr/testify/assert" - "log" - "slices" + "golang.org/x/exp/slices" "testing" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func TestDoMinusChunks(t *testing.T) { - // https://github.com/seaweedfs/seaweedfs/issues/3328 - - // clusterA and clusterB using filer.sync to sync file: hello.txt - // clusterA append a new line and then clusterB also append a new line - // clusterA append a new line again - chunksInA := []*filer_pb.FileChunk{ - {Offset: 0, Size: 3, FileId: "11", ModifiedTsNs: 100}, - {Offset: 3, Size: 3, FileId: "22", SourceFileId: "2", ModifiedTsNs: 200}, - {Offset: 6, Size: 3, FileId: "33", ModifiedTsNs: 300}, - } - chunksInB := []*filer_pb.FileChunk{ - {Offset: 0, Size: 3, FileId: "1", SourceFileId: "11", ModifiedTsNs: 100}, - {Offset: 3, Size: 3, FileId: "2", ModifiedTsNs: 200}, - {Offset: 6, Size: 3, FileId: "3", SourceFileId: "33", ModifiedTsNs: 300}, - } - - // clusterB using command "echo 'content' > hello.txt" to overwrite file - // clusterA will receive two evenNotification, need to empty the whole file content first and add new content - // the first one is oldEntry is chunksInB and newEntry is empty fileChunks - firstOldEntry := chunksInB - var firstNewEntry []*filer_pb.FileChunk - - // clusterA received the first one event, gonna empty the whole chunk, according the code in filer_sink 194 - // we can get the deleted chunks and newChunks - firstDeletedChunks := DoMinusChunks(firstOldEntry, firstNewEntry) - log.Println("first deleted chunks:", firstDeletedChunks) - //firstNewEntry := DoMinusChunks(firstNewEntry, firstOldEntry) - - // clusterA need to delete all chunks in firstDeletedChunks - emptiedChunksInA := DoMinusChunksBySourceFileId(chunksInA, firstDeletedChunks) - // chunksInA supposed to be empty by minus the deletedChunks but it just delete the chunk which sync from clusterB - log.Println("clusterA synced empty chunks event result:", emptiedChunksInA) - // clusterB emptied it's chunks and clusterA must sync the change and empty chunks too - assert.Equalf(t, firstNewEntry, emptiedChunksInA, "empty") -} - func TestCompactFileChunksRealCase(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, ModifiedTsNs: 5320497}, - {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, ModifiedTsNs: 5320492}, - {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, ModifiedTsNs: 5325928}, - {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, ModifiedTsNs: 5325894}, - {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, ModifiedTsNs: 5325900}, - {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, ModifiedTsNs: 5325904}, - {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, ModifiedTsNs: 5325910}, - {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, ModifiedTsNs: 5325903}, - {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, ModifiedTsNs: 5325911}, - {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, ModifiedTsNs: 5325909}, - {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, ModifiedTsNs: 5325922}, + {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497}, + {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492}, + {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928}, + {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894}, + {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900}, + {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904}, + {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910}, + {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903}, + {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911}, + {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909}, + {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922}, } printChunks("before", chunks) - compacted, garbage := CompactFileChunks(context.Background(), nil, chunks) + compacted, garbage := CompactFileChunks(nil, chunks) printChunks("compacted", compacted) printChunks("garbage", garbage) @@ -74,11 +34,11 @@ func TestCompactFileChunksRealCase(t *testing.T) { } func printChunks(name string, chunks []*filer_pb.FileChunk) { - slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) int { + slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { if a.Offset == b.Offset { - return int(a.ModifiedTsNs - b.ModifiedTsNs) + return a.Mtime < b.Mtime } - return int(a.Offset - b.Offset) + return a.Offset < b.Offset }) for _, chunk := range chunks { glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) diff --git a/weed/filer/filechunks_read.go b/weed/filer/filechunks_read.go index 756e18b34..1d0bd837a 100644 --- a/weed/filer/filechunks_read.go +++ b/weed/filer/filechunks_read.go @@ -1,91 +1,72 @@ package filer import ( - "container/list" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "slices" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "golang.org/x/exp/slices" ) -func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval]) { +func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) { var points []*Point for _, chunk := range chunks { - if chunk.IsChunkManifest { - println("This should not happen! A manifest chunk found:", chunk.GetFileIdString()) - } - start, stop := max(chunk.Offset, startOffset), min(chunk.Offset+int64(chunk.Size), stopOffset) - if start >= stop { - continue - } points = append(points, &Point{ x: chunk.Offset, - ts: chunk.ModifiedTsNs, + ts: chunk.Mtime, chunk: chunk, isStart: true, }) points = append(points, &Point{ x: chunk.Offset + int64(chunk.Size), - ts: chunk.ModifiedTsNs, + ts: chunk.Mtime, chunk: chunk, isStart: false, }) } - slices.SortFunc(points, func(a, b *Point) int { + slices.SortFunc(points, func(a, b *Point) bool { if a.x != b.x { - return int(a.x - b.x) + return a.x < b.x } if a.ts != b.ts { - return int(a.ts - b.ts) + return a.ts < b.ts } - if a.isStart { - return 1 - } - if b.isStart { - return -1 - } - return 0 + return !a.isStart }) var prevX int64 - queue := list.New() // points with higher ts are at the tail - visibles = NewIntervalList[*VisibleInterval]() - var prevPoint *Point + var queue []*Point for _, point := range points { - if queue.Len() > 0 { - prevPoint = queue.Back().Value.(*Point) - } else { - prevPoint = nil - } if point.isStart { - if prevPoint != nil { - if point.x != prevX && prevPoint.ts < point.ts { - addToVisibles(visibles, prevX, prevPoint, point) + if len(queue) > 0 { + lastIndex := len(queue) - 1 + lastPoint := queue[lastIndex] + if point.x != prevX && lastPoint.ts < point.ts { + visibles = addToVisibles(visibles, prevX, lastPoint, point) prevX = point.x } } // insert into queue - if prevPoint == nil || prevPoint.ts < point.ts { - queue.PushBack(point) - prevX = point.x - } else { - for e := queue.Front(); e != nil; e = e.Next() { - if e.Value.(*Point).ts > point.ts { - queue.InsertBefore(point, e) - break + for i := len(queue); i >= 0; i-- { + if i == 0 || queue[i-1].ts <= point.ts { + if i == len(queue) { + prevX = point.x } + queue = addToQueue(queue, i, point) + break } } } else { - isLast := true - for e := queue.Back(); e != nil; e = e.Prev() { - if e.Value.(*Point).ts == point.ts { - queue.Remove(e) + lastIndex := len(queue) - 1 + index := lastIndex + var startPoint *Point + for ; index >= 0; index-- { + startPoint = queue[index] + if startPoint.ts == point.ts { + queue = removeFromQueue(queue, index) break } - isLast = false } - if isLast && prevPoint != nil { - addToVisibles(visibles, prevX, prevPoint, point) + if index == lastIndex && startPoint != nil { + visibles = addToVisibles(visibles, prevX, startPoint, point) prevX = point.x } } @@ -94,30 +75,37 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOff return } -func addToVisibles(visibles *IntervalList[*VisibleInterval], prevX int64, startPoint *Point, point *Point) { - if prevX < point.x { - chunk := startPoint.chunk - visible := &VisibleInterval{ - start: prevX, - stop: point.x, - fileId: chunk.GetFileIdString(), - modifiedTsNs: chunk.ModifiedTsNs, - offsetInChunk: prevX - chunk.Offset, - chunkSize: chunk.Size, - cipherKey: chunk.CipherKey, - isGzipped: chunk.IsCompressed, - } - appendVisibleInterfal(visibles, visible) +func removeFromQueue(queue []*Point, index int) []*Point { + for i := index; i < len(queue)-1; i++ { + queue[i] = queue[i+1] } + queue = queue[:len(queue)-1] + return queue } -func appendVisibleInterfal(visibles *IntervalList[*VisibleInterval], visible *VisibleInterval) { - visibles.AppendInterval(&Interval[*VisibleInterval]{ - StartOffset: visible.start, - StopOffset: visible.stop, - TsNs: visible.modifiedTsNs, - Value: visible, - }) +func addToQueue(queue []*Point, index int, point *Point) []*Point { + queue = append(queue, point) + for i := len(queue) - 1; i > index; i-- { + queue[i], queue[i-1] = queue[i-1], queue[i] + } + return queue +} + +func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, point *Point) []VisibleInterval { + if prevX < point.x { + chunk := startPoint.chunk + visibles = append(visibles, VisibleInterval{ + start: prevX, + stop: point.x, + fileId: chunk.GetFileIdString(), + modifiedTime: chunk.Mtime, + chunkOffset: prevX - chunk.Offset, + chunkSize: chunk.Size, + cipherKey: chunk.CipherKey, + isGzipped: chunk.IsCompressed, + }) + } + return visibles } type Point struct { diff --git a/weed/filer/filechunks_read_test.go b/weed/filer/filechunks_read_test.go index c66a874bc..e70c66e6f 100644 --- a/weed/filer/filechunks_read_test.go +++ b/weed/filer/filechunks_read_test.go @@ -2,8 +2,7 @@ package filer import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "math" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "math/rand" "testing" ) @@ -12,70 +11,41 @@ func TestReadResolvedChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ { - FileId: "a", - Offset: 0, - Size: 100, - ModifiedTsNs: 1, + FileId: "a", + Offset: 0, + Size: 100, + Mtime: 1, }, { - FileId: "b", - Offset: 50, - Size: 100, - ModifiedTsNs: 2, + FileId: "b", + Offset: 50, + Size: 100, + Mtime: 2, }, { - FileId: "c", - Offset: 200, - Size: 50, - ModifiedTsNs: 3, + FileId: "c", + Offset: 200, + Size: 50, + Mtime: 3, }, { - FileId: "d", - Offset: 250, - Size: 50, - ModifiedTsNs: 4, + FileId: "d", + Offset: 250, + Size: 50, + Mtime: 4, }, { - FileId: "e", - Offset: 175, - Size: 100, - ModifiedTsNs: 5, + FileId: "e", + Offset: 175, + Size: 100, + Mtime: 5, }, } - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + visibles := readResolvedChunks(chunks) - fmt.Printf("resolved to %d visible intervales\n", visibles.Len()) - for x := visibles.Front(); x != nil; x = x.Next { - visible := x.Value - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) - } - -} - -func TestReadResolvedChunks2(t *testing.T) { - - chunks := []*filer_pb.FileChunk{ - { - FileId: "c", - Offset: 200, - Size: 50, - ModifiedTsNs: 3, - }, - { - FileId: "e", - Offset: 200, - Size: 25, - ModifiedTsNs: 5, - }, - } - - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - - fmt.Printf("resolved to %d visible intervales\n", visibles.Len()) - for x := visibles.Front(); x != nil; x = x.Next { - visible := x.Value - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) + for _, visible := range visibles { + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) } } @@ -102,13 +72,12 @@ func TestRandomizedReadResolvedChunks(t *testing.T) { chunks = append(chunks, randomWrite(array, start, size, ts)) } - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + visibles := readResolvedChunks(chunks) - for x := visibles.Front(); x != nil; x = x.Next { - visible := x.Value + for _, visible := range visibles { for i := visible.start; i < visible.stop; i++ { - if array[i] != visible.modifiedTsNs { - t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs) + if array[i] != visible.modifiedTime { + t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime) } } } @@ -123,10 +92,10 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil } // fmt.Printf("write [%d,%d) %d\n", start, start+size, ts) return &filer_pb.FileChunk{ - FileId: "", - Offset: start, - Size: uint64(size), - ModifiedTsNs: ts, + FileId: "", + Offset: start, + Size: uint64(size), + Mtime: ts, } } @@ -136,16 +105,16 @@ func TestSequentialReadResolvedChunks(t *testing.T) { var chunks []*filer_pb.FileChunk for ts := int64(0); ts < 13; ts++ { chunks = append(chunks, &filer_pb.FileChunk{ - FileId: "", - Offset: chunkSize * ts, - Size: uint64(chunkSize), - ModifiedTsNs: 1, + FileId: "", + Offset: chunkSize * ts, + Size: uint64(chunkSize), + Mtime: 1, }) } - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + visibles := readResolvedChunks(chunks) - fmt.Printf("visibles %d", visibles.Len()) + fmt.Printf("visibles %d", len(visibles)) } @@ -153,128 +122,89 @@ func TestActualReadResolvedChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ { - FileId: "5,e7b96fef48", - Offset: 0, - Size: 2097152, - ModifiedTsNs: 1634447487595823000, + FileId: "5,e7b96fef48", + Offset: 0, + Size: 2097152, + Mtime: 1634447487595823000, }, { - FileId: "5,e5562640b9", - Offset: 2097152, - Size: 2097152, - ModifiedTsNs: 1634447487595826000, + FileId: "5,e5562640b9", + Offset: 2097152, + Size: 2097152, + Mtime: 1634447487595826000, }, { - FileId: "5,df033e0fe4", - Offset: 4194304, - Size: 2097152, - ModifiedTsNs: 1634447487595827000, + FileId: "5,df033e0fe4", + Offset: 4194304, + Size: 2097152, + Mtime: 1634447487595827000, }, { - FileId: "7,eb08148a9b", - Offset: 6291456, - Size: 2097152, - ModifiedTsNs: 1634447487595827000, + FileId: "7,eb08148a9b", + Offset: 6291456, + Size: 2097152, + Mtime: 1634447487595827000, }, { - FileId: "7,e0f92d1604", - Offset: 8388608, - Size: 2097152, - ModifiedTsNs: 1634447487595828000, + FileId: "7,e0f92d1604", + Offset: 8388608, + Size: 2097152, + Mtime: 1634447487595828000, }, { - FileId: "7,e33cb63262", - Offset: 10485760, - Size: 2097152, - ModifiedTsNs: 1634447487595828000, + FileId: "7,e33cb63262", + Offset: 10485760, + Size: 2097152, + Mtime: 1634447487595828000, }, { - FileId: "5,ea98e40e93", - Offset: 12582912, - Size: 2097152, - ModifiedTsNs: 1634447487595829000, + FileId: "5,ea98e40e93", + Offset: 12582912, + Size: 2097152, + Mtime: 1634447487595829000, }, { - FileId: "5,e165661172", - Offset: 14680064, - Size: 2097152, - ModifiedTsNs: 1634447487595829000, + FileId: "5,e165661172", + Offset: 14680064, + Size: 2097152, + Mtime: 1634447487595829000, }, { - FileId: "3,e692097486", - Offset: 16777216, - Size: 2097152, - ModifiedTsNs: 1634447487595830000, + FileId: "3,e692097486", + Offset: 16777216, + Size: 2097152, + Mtime: 1634447487595830000, }, { - FileId: "3,e28e2e3cbd", - Offset: 18874368, - Size: 2097152, - ModifiedTsNs: 1634447487595830000, + FileId: "3,e28e2e3cbd", + Offset: 18874368, + Size: 2097152, + Mtime: 1634447487595830000, }, { - FileId: "3,e443974d4e", - Offset: 20971520, - Size: 2097152, - ModifiedTsNs: 1634447487595830000, + FileId: "3,e443974d4e", + Offset: 20971520, + Size: 2097152, + Mtime: 1634447487595830000, }, { - FileId: "2,e815bed597", - Offset: 23068672, - Size: 2097152, - ModifiedTsNs: 1634447487595831000, + FileId: "2,e815bed597", + Offset: 23068672, + Size: 2097152, + Mtime: 1634447487595831000, }, { - FileId: "5,e94715199e", - Offset: 25165824, - Size: 1974736, - ModifiedTsNs: 1634447487595832000, + FileId: "5,e94715199e", + Offset: 25165824, + Size: 1974736, + Mtime: 1634447487595832000, }, } - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) + visibles := readResolvedChunks(chunks) - for x := visibles.Front(); x != nil; x = x.Next { - visible := x.Value - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) - } - -} - -func TestActualReadResolvedChunks2(t *testing.T) { - - chunks := []*filer_pb.FileChunk{ - { - FileId: "1,e7b96fef48", - Offset: 0, - Size: 184320, - ModifiedTsNs: 1, - }, - { - FileId: "2,22562640b9", - Offset: 184320, - Size: 4096, - ModifiedTsNs: 2, - }, - { - FileId: "2,33562640b9", - Offset: 184320, - Size: 4096, - ModifiedTsNs: 4, - }, - { - FileId: "4,df033e0fe4", - Offset: 188416, - Size: 2097152, - ModifiedTsNs: 3, - }, - } - - visibles := readResolvedChunks(chunks, 0, math.MaxInt64) - - for x := visibles.Front(); x != nil; x = x.Next { - visible := x.Value - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) + for _, visible := range visibles { + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) } } diff --git a/weed/filer/filechunks_test.go b/weed/filer/filechunks_test.go index 4ae7d6133..b0ea20848 100644 --- a/weed/filer/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -1,28 +1,27 @@ package filer import ( - "context" "fmt" "log" "math" - "math/rand/v2" + "math/rand" "strconv" "testing" "github.com/stretchr/testify/assert" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func TestCompactFileChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {Offset: 10, Size: 100, FileId: "abc", ModifiedTsNs: 50}, - {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100}, - {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200}, - {Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300}, + {Offset: 10, Size: 100, FileId: "abc", Mtime: 50}, + {Offset: 100, Size: 100, FileId: "def", Mtime: 100}, + {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200}, + {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300}, } - compacted, garbage := CompactFileChunks(context.Background(), nil, chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 3 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -36,26 +35,26 @@ func TestCompactFileChunks(t *testing.T) { func TestCompactFileChunks2(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50}, - {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100}, - {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200}, - {Offset: 0, Size: 100, FileId: "abcf", ModifiedTsNs: 300}, - {Offset: 50, Size: 100, FileId: "fhfh", ModifiedTsNs: 400}, - {Offset: 100, Size: 100, FileId: "yuyu", ModifiedTsNs: 500}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 50}, + {Offset: 100, Size: 100, FileId: "def", Mtime: 100}, + {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200}, + {Offset: 0, Size: 100, FileId: "abcf", Mtime: 300}, + {Offset: 50, Size: 100, FileId: "fhfh", Mtime: 400}, + {Offset: 100, Size: 100, FileId: "yuyu", Mtime: 500}, } k := 3 for n := 0; n < k; n++ { chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n), + Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n), }) chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k), + Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k), }) } - compacted, garbage := CompactFileChunks(context.Background(), nil, chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 4 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -71,7 +70,7 @@ func TestRandomFileChunksCompact(t *testing.T) { var chunks []*filer_pb.FileChunk for i := 0; i < 15; i++ { - start, stop := rand.IntN(len(data)), rand.IntN(len(data)) + start, stop := rand.Intn(len(data)), rand.Intn(len(data)) if start > stop { start, stop = stop, start } @@ -79,11 +78,11 @@ func TestRandomFileChunksCompact(t *testing.T) { stop = start + 16 } chunk := &filer_pb.FileChunk{ - FileId: strconv.Itoa(i), - Offset: int64(start), - Size: uint64(stop - start), - ModifiedTsNs: int64(i), - Fid: &filer_pb.FileId{FileKey: uint64(i)}, + FileId: strconv.Itoa(i), + Offset: int64(start), + Size: uint64(stop - start), + Mtime: int64(i), + Fid: &filer_pb.FileId{FileKey: uint64(i)}, } chunks = append(chunks, chunk) for x := start; x < stop; x++ { @@ -91,10 +90,9 @@ func TestRandomFileChunksCompact(t *testing.T) { } } - visibles, _ := NonOverlappingVisibleIntervals(context.Background(), nil, chunks, 0, math.MaxInt64) + visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64) - for visible := visibles.Front(); visible != nil; visible = visible.Next { - v := visible.Value + for _, v := range visibles { for x := v.start; x < v.stop; x++ { assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId) } @@ -111,9 +109,9 @@ func TestIntervalMerging(t *testing.T) { // case 0: normal { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, + {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, }, Expected: []*VisibleInterval{ {start: 0, stop: 100, fileId: "abc"}, @@ -124,8 +122,8 @@ func TestIntervalMerging(t *testing.T) { // case 1: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "asdf"}, @@ -134,20 +132,20 @@ func TestIntervalMerging(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, - {Offset: 0, Size: 70, FileId: "b", ModifiedTsNs: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 70, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 70, fileId: "b"}, - {start: 70, stop: 100, fileId: "a", offsetInChunk: 70}, + {start: 70, stop: 100, fileId: "a", chunkOffset: 70}, }, }, // case 3: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, }, Expected: []*VisibleInterval{ {start: 0, stop: 50, fileId: "asdf"}, @@ -157,9 +155,9 @@ func TestIntervalMerging(t *testing.T) { // case 4: updates far away from prev chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "asdf"}, @@ -169,22 +167,22 @@ func TestIntervalMerging(t *testing.T) { // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "d", ModifiedTsNs: 184}, - {Offset: 70, Size: 150, FileId: "c", ModifiedTsNs: 143}, - {Offset: 80, Size: 100, FileId: "b", ModifiedTsNs: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "d", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "c", Mtime: 143}, + {Offset: 80, Size: 100, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "d"}, - {start: 200, stop: 220, fileId: "c", offsetInChunk: 130}, + {start: 200, stop: 220, fileId: "c", chunkOffset: 130}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Expected: []*VisibleInterval{ {start: 0, stop: 100, fileId: "xyz"}, @@ -193,12 +191,12 @@ func TestIntervalMerging(t *testing.T) { // case 7: real updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", ModifiedTsNs: 123}, - {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", ModifiedTsNs: 130}, - {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", ModifiedTsNs: 140}, - {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", ModifiedTsNs: 150}, - {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", ModifiedTsNs: 160}, - {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", ModifiedTsNs: 170}, + {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123}, + {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130}, + {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140}, + {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150}, + {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160}, + {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170}, }, Expected: []*VisibleInterval{ {start: 0, stop: 2097152, fileId: "3,029565bf3092"}, @@ -211,11 +209,11 @@ func TestIntervalMerging(t *testing.T) { // case 8: real bug { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", ModifiedTsNs: 123}, - {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", ModifiedTsNs: 130}, - {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", ModifiedTsNs: 140}, - {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", ModifiedTsNs: 150}, - {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", ModifiedTsNs: 160}, + {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", Mtime: 123}, + {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", Mtime: 130}, + {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", Mtime: 140}, + {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", Mtime: 150}, + {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", Mtime: 160}, }, Expected: []*VisibleInterval{ {start: 0, stop: 77824, fileId: "4,0b3df938e301"}, @@ -229,18 +227,12 @@ func TestIntervalMerging(t *testing.T) { for i, testcase := range testcases { log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) - intervals, _ := NonOverlappingVisibleIntervals(context.Background(), nil, testcase.Chunks, 0, math.MaxInt64) - x := -1 - for visible := intervals.Front(); visible != nil; visible = visible.Next { - x++ - interval := visible.Value - log.Printf("test case %d, interval start=%d, stop=%d, fileId=%s", - i, interval.start, interval.stop, interval.fileId) + intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64) + for x, interval := range intervals { + log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s", + i, x, interval.start, interval.stop, interval.fileId) } - x = -1 - for visible := intervals.Front(); visible != nil; visible = visible.Next { - x++ - interval := visible.Value + for x, interval := range intervals { if interval.start != testcase.Expected[x].start { t.Fatalf("failed on test case %d, interval %d, start %d, expect %d", i, x, interval.start, testcase.Expected[x].start) @@ -253,13 +245,13 @@ func TestIntervalMerging(t *testing.T) { t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", i, x, interval.fileId, testcase.Expected[x].fileId) } - if interval.offsetInChunk != testcase.Expected[x].offsetInChunk { - t.Fatalf("failed on test case %d, interval %d, offsetInChunk %d, expect %d", - i, x, interval.offsetInChunk, testcase.Expected[x].offsetInChunk) + if interval.chunkOffset != testcase.Expected[x].chunkOffset { + t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d", + i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset) } } - if intervals.Len() != len(testcase.Expected) { - t.Fatalf("failed to compact test case %d, len %d expected %d", i, intervals.Len(), len(testcase.Expected)) + if len(intervals) != len(testcase.Expected) { + t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected)) } } @@ -277,147 +269,147 @@ func TestChunksReading(t *testing.T) { // case 0: normal { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, + {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, }, Offset: 0, Size: 250, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100}, - {OffsetInChunk: 0, ViewSize: 50, FileId: "fsad", ViewOffset: 200}, + {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, + {Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200}, }, }, // case 1: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, }, Offset: 50, Size: 100, Expected: []*ChunkView{ - {OffsetInChunk: 50, ViewSize: 100, FileId: "asdf", ViewOffset: 50}, + {Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50}, }, }, // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 3, Size: 100, FileId: "a", ModifiedTsNs: 123}, - {Offset: 10, Size: 50, FileId: "b", ModifiedTsNs: 134}, + {Offset: 3, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 10, Size: 50, FileId: "b", Mtime: 134}, }, Offset: 30, Size: 40, Expected: []*ChunkView{ - {OffsetInChunk: 20, ViewSize: 30, FileId: "b", ViewOffset: 30}, - {OffsetInChunk: 57, ViewSize: 10, FileId: "a", ViewOffset: 60}, + {Offset: 20, Size: 30, FileId: "b", LogicOffset: 30}, + {Offset: 57, Size: 10, FileId: "a", LogicOffset: 60}, }, }, // case 3: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, }, Offset: 0, Size: 200, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 50, FileId: "asdf", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 50}, + {Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0}, + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50}, }, }, // case 4: updates far away from prev chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, }, Offset: 0, Size: 400, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 200, FileId: "asdf", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 250}, + {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250}, }, }, // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, - {Offset: 0, Size: 200, FileId: "c", ModifiedTsNs: 184}, - {Offset: 70, Size: 150, FileId: "b", ModifiedTsNs: 143}, - {Offset: 80, Size: 100, FileId: "xxxx", ModifiedTsNs: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "c", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "b", Mtime: 143}, + {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, }, Offset: 0, Size: 220, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 200, FileId: "c", ViewOffset: 0}, - {OffsetInChunk: 130, ViewSize: 20, FileId: "b", ViewOffset: 200}, + {Offset: 0, Size: 200, FileId: "c", LogicOffset: 0}, + {Offset: 130, Size: 20, FileId: "b", LogicOffset: 200}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, - {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Offset: 0, Size: 100, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 100, FileId: "xyz", ViewOffset: 0}, + {Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0}, }, }, // case 7: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, + {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, }, Offset: 0, Size: 200, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100}, + {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, // case 8: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, - {Offset: 90, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, - {Offset: 190, Size: 300, FileId: "fsad", ModifiedTsNs: 353}, + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, }, Offset: 0, Size: 300, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 90, FileId: "abc", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 90}, - {OffsetInChunk: 0, ViewSize: 110, FileId: "fsad", ViewOffset: 190}, + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, }, }, // case 9: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", ModifiedTsNs: 1}, - {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", ModifiedTsNs: 2}, - {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", ModifiedTsNs: 3}, - {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", ModifiedTsNs: 4}, - {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", ModifiedTsNs: 5}, - {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", ModifiedTsNs: 6}, + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, }, Offset: 0, Size: 153578836, Expected: []*ChunkView{ - {OffsetInChunk: 0, ViewSize: 43175936, FileId: "2,111fc2cbfac1", ViewOffset: 0}, - {OffsetInChunk: 0, ViewSize: 52981760 - 43175936, FileId: "2,112a36ea7f85", ViewOffset: 43175936}, - {OffsetInChunk: 0, ViewSize: 72564736 - 52981760, FileId: "4,112d5f31c5e7", ViewOffset: 52981760}, - {OffsetInChunk: 0, ViewSize: 133255168 - 72564736, FileId: "1,113245f0cdb6", ViewOffset: 72564736}, - {OffsetInChunk: 0, ViewSize: 137269248 - 133255168, FileId: "3,1141a70733b5", ViewOffset: 133255168}, - {OffsetInChunk: 0, ViewSize: 153578836 - 137269248, FileId: "1,114201d5bbdb", ViewOffset: 137269248}, + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, }, }, } @@ -427,32 +419,29 @@ func TestChunksReading(t *testing.T) { // continue } log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) - chunks := ViewFromChunks(context.Background(), nil, testcase.Chunks, testcase.Offset, testcase.Size) - x := -1 - for c := chunks.Front(); c != nil; c = c.Next { - x++ - chunk := c.Value + chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size) + for x, chunk := range chunks { log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", - i, x, chunk.OffsetInChunk, chunk.ViewSize, chunk.FileId) - if chunk.OffsetInChunk != testcase.Expected[x].OffsetInChunk { + i, x, chunk.Offset, chunk.Size, chunk.FileId) + if chunk.Offset != testcase.Expected[x].Offset { t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d", - i, chunk.FileId, chunk.OffsetInChunk, testcase.Expected[x].OffsetInChunk) + i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset) } - if chunk.ViewSize != testcase.Expected[x].ViewSize { - t.Fatalf("failed on read case %d, chunk %s, ViewSize %d, expect %d", - i, chunk.FileId, chunk.ViewSize, testcase.Expected[x].ViewSize) + if chunk.Size != testcase.Expected[x].Size { + t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d", + i, chunk.FileId, chunk.Size, testcase.Expected[x].Size) } if chunk.FileId != testcase.Expected[x].FileId { t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", i, x, chunk.FileId, testcase.Expected[x].FileId) } - if chunk.ViewOffset != testcase.Expected[x].ViewOffset { - t.Fatalf("failed on read case %d, chunk %d, ViewOffset %d, expect %d", - i, x, chunk.ViewOffset, testcase.Expected[x].ViewOffset) + if chunk.LogicOffset != testcase.Expected[x].LogicOffset { + t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d", + i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset) } } - if chunks.Len() != len(testcase.Expected) { - t.Fatalf("failed to read test case %d, len %d expected %d", i, chunks.Len(), len(testcase.Expected)) + if len(chunks) != len(testcase.Expected) { + t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected)) } } @@ -466,106 +455,85 @@ func BenchmarkCompactFileChunks(b *testing.B) { for n := 0; n < k; n++ { chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n), + Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n), }) chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k), + Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k), }) } for n := 0; n < b.N; n++ { - CompactFileChunks(context.Background(), nil, chunks) + CompactFileChunks(nil, chunks) } } -func addVisibleInterval(visibles *IntervalList[*VisibleInterval], x *VisibleInterval) { - visibles.AppendInterval(&Interval[*VisibleInterval]{ - StartOffset: x.start, - StopOffset: x.stop, - TsNs: x.modifiedTsNs, - Value: x, - }) -} - func TestViewFromVisibleIntervals(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 0, - stop: 25, - fileId: "fid1", - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 4096, - stop: 8192, - fileId: "fid2", - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 16384, - stop: 18551, - fileId: "fid3", - }) + visibles := []VisibleInterval{ + { + start: 0, + stop: 25, + fileId: "fid1", + }, + { + start: 4096, + stop: 8192, + fileId: "fid2", + }, + { + start: 16384, + stop: 18551, + fileId: "fid3", + }, + } views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) - if views.Len() != visibles.Len() { - assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") } } func TestViewFromVisibleIntervals2(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 344064, - stop: 348160, - fileId: "fid1", - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 348160, - stop: 356352, - fileId: "fid2", - }) + visibles := []VisibleInterval{ + { + start: 344064, + stop: 348160, + fileId: "fid1", + }, + { + start: 348160, + stop: 356352, + fileId: "fid2", + }, + } views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) - if views.Len() != visibles.Len() { - assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") } } func TestViewFromVisibleIntervals3(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 1000, - stop: 2000, - fileId: "fid1", - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 3000, - stop: 4000, - fileId: "fid2", - }) + visibles := []VisibleInterval{ + { + start: 1000, + stop: 2000, + fileId: "fid1", + }, + { + start: 3000, + stop: 4000, + fileId: "fid2", + }, + } views := ViewFromVisibleIntervals(visibles, 1700, 1500) - if views.Len() != visibles.Len() { - assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error") + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") } } - -func TestCompactFileChunks3(t *testing.T) { - chunks := []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50}, - {Offset: 100, Size: 100, FileId: "ghi", ModifiedTsNs: 50}, - {Offset: 200, Size: 100, FileId: "jkl", ModifiedTsNs: 100}, - {Offset: 300, Size: 100, FileId: "def", ModifiedTsNs: 200}, - } - - compacted, _ := CompactFileChunks(context.Background(), nil, chunks) - - if len(compacted) != 4 { - t.Fatalf("unexpected compacted: %d", len(compacted)) - } -} diff --git a/weed/filer/filer.go b/weed/filer/filer.go index 71185d3d1..15fe69116 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -3,26 +3,21 @@ package filer import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/cluster" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "os" "sort" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" - - "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) const ( @@ -37,8 +32,6 @@ var ( ) type Filer struct { - UniqueFilerId int32 - UniqueFilerEpoch int32 Store VirtualFilerStore MasterClient *wdclient.MasterClient fileIdDeletionQueue *util.UnboundedQueue @@ -52,26 +45,20 @@ type Filer struct { Signature int32 FilerConf *FilerConf RemoteStorage *FilerRemoteStorage - Dlm *lock_manager.DistributedLockManager - MaxFilenameLength uint32 + UniqueFileId uint32 } -func NewFiler(masters pb.ServerDiscovery, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress, filerGroup string, collection string, replication string, dataCenter string, maxFilenameLength uint32, notifyFn func()) *Filer { +func NewFiler(masters map[string]pb.ServerAddress, grpcDialOption grpc.DialOption, filerHost pb.ServerAddress, + filerGroup string, collection string, replication string, dataCenter string, notifyFn func()) *Filer { f := &Filer{ - MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, "", masters), + MasterClient: wdclient.NewMasterClient(grpcDialOption, filerGroup, cluster.FilerType, filerHost, dataCenter, masters), fileIdDeletionQueue: util.NewUnboundedQueue(), GrpcDialOption: grpcDialOption, FilerConf: NewFilerConf(), RemoteStorage: NewFilerRemoteStorage(), - UniqueFilerId: util.RandomInt32(), - Dlm: lock_manager.NewDistributedLockManager(filerHost), - MaxFilenameLength: maxFilenameLength, + UniqueFileId: uint32(util.RandomInt32()), } - if f.UniqueFilerId < 0 { - f.UniqueFilerId = -f.UniqueFilerId - } - - f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, nil, notifyFn) + f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn) f.metaLogCollection = collection f.metaLogReplication = replication @@ -80,7 +67,7 @@ func NewFiler(masters pb.ServerDiscovery, grpcDialOption grpc.DialOption, filerH return f } -func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) { +func (f *Filer) MaybeBootstrapFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, snapshotTime time.Time) (err error) { if len(existingNodes) == 0 { return } @@ -92,40 +79,18 @@ func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes [ return } - glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId) - - return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error { - return f.Store.InsertEntry(context.Background(), FromPbEntry(string(parentPath), entry)) - }) - }) - + glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFileId) + err = pb.FollowMetadata(pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, "bootstrap", int32(f.UniqueFileId), "/", nil, + 0, snapshotTime.UnixNano(), f.Signature, func(resp *filer_pb.SubscribeMetadataResponse) error { + return Replay(f.Store, resp) + }, pb.FatalOnError) + return } func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*master_pb.ClusterNodeUpdate, startFrom time.Time) { - var snapshot []pb.ServerAddress - for _, node := range existingNodes { - address := pb.ServerAddress(node.Address) - snapshot = append(snapshot, address) - } - f.Dlm.LockRing.SetSnapshot(snapshot) - glog.V(0).Infof("%s aggregate from peers %+v", self, snapshot) - f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption) - f.MasterClient.SetOnPeerUpdateFn(func(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { - if update.NodeType != cluster.FilerType { - return - } - address := pb.ServerAddress(update.Address) - - if update.IsAdd { - f.Dlm.LockRing.AddServer(address) - } else { - f.Dlm.LockRing.RemoveServer(address) - } - f.MetaAggregator.OnPeerUpdate(update, startFrom) - }) + f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate for _, peerUpdate := range existingNodes { f.MetaAggregator.OnPeerUpdate(peerUpdate, startFrom) @@ -133,8 +98,29 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste } -func (f *Filer) ListExistingPeerUpdates(ctx context.Context) (existingNodes []*master_pb.ClusterNodeUpdate) { - return cluster.ListExistingPeerUpdates(f.GetMaster(ctx), f.GrpcDialOption, f.MasterClient.FilerGroup, cluster.FilerType) +func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate) { + + if grpcErr := pb.WithMasterClient(false, f.MasterClient.GetMaster(), f.GrpcDialOption, func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + FilerGroup: f.MasterClient.FilerGroup, + }) + + glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes)) + for _, node := range resp.ClusterNodes { + existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{ + NodeType: cluster.FilerType, + Address: node.Address, + IsLeader: node.IsLeader, + IsAdd: true, + CreatedAtNs: node.CreatedAtNs, + }) + } + return err + }); grpcErr != nil { + glog.V(0).Infof("connect to %s: %v", f.MasterClient.GetMaster(), grpcErr) + } + return } func (f *Filer) SetStore(store FilerStore) (isFresh bool) { @@ -167,12 +153,12 @@ func (f *Filer) GetStore() (store FilerStore) { return f.Store } -func (fs *Filer) GetMaster(ctx context.Context) pb.ServerAddress { - return fs.MasterClient.GetMaster(ctx) +func (fs *Filer) GetMaster() pb.ServerAddress { + return fs.MasterClient.GetMaster() } -func (fs *Filer) KeepMasterClientConnected(ctx context.Context) { - fs.MasterClient.KeepConnectedToMaster(ctx) +func (fs *Filer) KeepMasterClientConnected() { + fs.MasterClient.KeepConnectedToMaster() } func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { @@ -187,20 +173,12 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { return f.Store.RollbackTransaction(ctx) } -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32, skipCreateParentDir bool, maxFilenameLength uint32) error { +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32, skipCreateParentDir bool) error { if string(entry.FullPath) == "/" { return nil } - if entry.FullPath.IsLongerFileName(maxFilenameLength) { - return fmt.Errorf("entry name too long") - } - - if entry.IsDirectory() { - entry.Attr.TtlSec = 0 - } - oldEntry, _ := f.FindEntry(ctx, entry.FullPath) /* @@ -215,63 +193,55 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr if !skipCreateParentDir { dirParts := strings.Split(string(entry.FullPath), "/") - if err := f.ensureParentDirectoryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil { + if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil { return err } } - glog.V(4).InfofCtx(ctx, "InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) if err := f.Store.InsertEntry(ctx, entry); err != nil { - glog.ErrorfCtx(ctx, "insert entry %s: %v", entry.FullPath, err) + glog.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { if o_excl { - glog.V(3).InfofCtx(ctx, "EEXIST: entry %s already exists", entry.FullPath) + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) } - glog.V(4).InfofCtx(ctx, "UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.ErrorfCtx(ctx, "update entry %s: %v", entry.FullPath, err) + glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures) - f.deleteChunksIfNotNew(ctx, oldEntry, entry) + f.deleteChunksIfNotNew(oldEntry, entry) - glog.V(4).InfofCtx(ctx, "CreateEntry %s: created", entry.FullPath) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) return nil } -func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) { +func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) { if level == 0 { return nil } dirPath := "/" + util.Join(dirParts[:level]...) - // fmt.Printf("%d dirPath: %+v\n", level, dirPath) + // fmt.Printf("%d directory: %+v\n", i, dirPath) // check the store directly - glog.V(4).InfofCtx(ctx, "find uncached directory: %s", dirPath) + glog.V(4).Infof("find uncached directory: %s", dirPath) dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) // no such existing directory if dirEntry == nil { - // fmt.Printf("dirParts: %v %v %v\n", dirParts[0], dirParts[1], dirParts[2]) - // dirParts[0] == "" and dirParts[1] == "buckets" - if len(dirParts) >= 3 && dirParts[1] == "buckets" { - if err := s3bucket.VerifyS3BucketName(dirParts[2]); err != nil { - return fmt.Errorf("invalid bucket name %s: %v", dirParts[2], err) - } - } - // ensure parent directory - if err = f.ensureParentDirectoryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil { + if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil { return err } @@ -291,21 +261,19 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di }, } - glog.V(2).InfofCtx(ctx, "create directory: %s %v", dirPath, dirEntry.Mode) + glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.Store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil { - glog.V(3).InfofCtx(ctx, "mkdir %s: %v", dirPath, mkdirErr) + if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { - if !strings.HasPrefix("/"+util.Join(dirParts[:]...), SystemLogDir) { - f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil) - } + f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil) } } else if !dirEntry.IsDirectory() { - glog.ErrorfCtx(ctx, "CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -316,11 +284,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er if oldEntry != nil { entry.Attr.Crtime = oldEntry.Attr.Crtime if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.ErrorfCtx(ctx, "existing %s is a directory", oldEntry.FullPath) + glog.Errorf("existing %s is a directory", oldEntry.FullPath) return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.ErrorfCtx(ctx, "existing %s is a file", oldEntry.FullPath) + glog.Errorf("existing %s is a file", oldEntry.FullPath) return fmt.Errorf("existing %s is a file", oldEntry.FullPath) } } @@ -379,6 +347,6 @@ func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, sta } func (f *Filer) Shutdown() { - f.LocalMetaLogBuffer.ShutdownLogBuffer() + f.LocalMetaLogBuffer.Shutdown() f.Store.Shutdown() } diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index e93279fba..32fc647d9 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -4,36 +4,35 @@ import ( "bytes" "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" "io" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/jsonpb" "github.com/viant/ptrie" - jsonpb "google.golang.org/protobuf/encoding/protojson" ) const ( - DirectoryEtcRoot = "/etc/" + DirectoryEtcRoot = "/etc" DirectoryEtcSeaweedFS = "/etc/seaweedfs" DirectoryEtcRemote = "/etc/remote" FilerConfName = "filer.conf" - IamConfigDirectory = "/etc/iam" + IamConfigDirecotry = "/etc/iam" IamIdentityFile = "identity.json" IamPoliciesFile = "policies.json" ) type FilerConf struct { - rules ptrie.Trie[*filer_pb.FilerConf_PathConf] + rules ptrie.Trie } func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) { var buf bytes.Buffer - if err := pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(false, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { if masterClient != nil { return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf) } else { @@ -56,7 +55,7 @@ func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOp func NewFilerConf() (fc *FilerConf) { fc = &FilerConf{ - rules: ptrie.New[*filer_pb.FilerConf_PathConf](), + rules: ptrie.New(), } return fc } @@ -76,7 +75,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { return fc.LoadFromBytes(entry.Content) } - return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size()) + return fc.loadFromChunks(filer, entry.Content, entry.Chunks, entry.Size()) } func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) { @@ -94,7 +93,7 @@ func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*file func (fc *FilerConf) LoadFromBytes(data []byte) (err error) { conf := &filer_pb.FilerConf{} - if err := jsonpb.Unmarshal(data, conf); err != nil { + if err := jsonpb.Unmarshal(bytes.NewReader(data), conf); err != nil { return err } @@ -103,7 +102,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) { func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) { for _, location := range conf.Locations { - err = fc.SetLocationConf(location) + err = fc.AddLocationConf(location) if err != nil { // this is not recoverable return nil @@ -112,24 +111,7 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) { return nil } -func (fc *FilerConf) GetLocationConf(locationPrefix string) (locConf *filer_pb.FilerConf_PathConf, found bool) { - return fc.rules.Get([]byte(locationPrefix)) -} - -func (fc *FilerConf) SetLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { - err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) - if err != nil { - glog.Errorf("put location prefix: %v", err) - } - return -} - func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { - existingConf, found := fc.rules.Get([]byte(locConf.LocationPrefix)) - if found { - mergePathConf(existingConf, locConf) - locConf = existingConf - } err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { glog.Errorf("put location prefix: %v", err) @@ -138,13 +120,12 @@ func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err } func (fc *FilerConf) DeleteLocationConf(locationPrefix string) { - rules := ptrie.New[*filer_pb.FilerConf_PathConf]() - fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool { + rules := ptrie.New() + fc.rules.Walk(func(key []byte, value interface{}) bool { if string(key) == locationPrefix { return true } - key = bytes.Clone(key) - _ = rules.Put(key, value) + rules.Put(key, value) return true }) fc.rules = rules @@ -153,8 +134,9 @@ func (fc *FilerConf) DeleteLocationConf(locationPrefix string) { func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) { pathConf = &filer_pb.FilerConf_PathConf{} - fc.rules.MatchPrefix([]byte(path), func(key []byte, value *filer_pb.FilerConf_PathConf) bool { - mergePathConf(pathConf, value) + fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + t := value.(*filer_pb.FilerConf_PathConf) + mergePathConf(pathConf, t) return true }) return pathConf @@ -162,9 +144,10 @@ func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) { ttls = make(map[string]string) - fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool { - if value.Collection == collection { - ttls[value.LocationPrefix] = value.GetTtl() + fc.rules.Walk(func(key []byte, value interface{}) bool { + t := value.(*filer_pb.FilerConf_PathConf) + if t.Collection == collection { + ttls[t.LocationPrefix] = t.GetTtl() } return true }) @@ -182,31 +165,27 @@ func mergePathConf(a, b *filer_pb.FilerConf_PathConf) { a.VolumeGrowthCount = b.VolumeGrowthCount } a.ReadOnly = b.ReadOnly || a.ReadOnly - if b.MaxFileNameLength > 0 { - a.MaxFileNameLength = b.MaxFileNameLength - } a.DataCenter = util.Nvl(b.DataCenter, a.DataCenter) a.Rack = util.Nvl(b.Rack, a.Rack) a.DataNode = util.Nvl(b.DataNode, a.DataNode) - a.DisableChunkDeletion = b.DisableChunkDeletion || a.DisableChunkDeletion - a.Worm = b.Worm || a.Worm - if b.WormRetentionTimeSeconds > 0 { - a.WormRetentionTimeSeconds = b.WormRetentionTimeSeconds - } - if b.WormGracePeriodSeconds > 0 { - a.WormGracePeriodSeconds = b.WormGracePeriodSeconds - } } func (fc *FilerConf) ToProto() *filer_pb.FilerConf { m := &filer_pb.FilerConf{} - fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool { - m.Locations = append(m.Locations, value) + fc.rules.Walk(func(key []byte, value interface{}) bool { + pathConf := value.(*filer_pb.FilerConf_PathConf) + m.Locations = append(m.Locations, pathConf) return true }) return m } func (fc *FilerConf) ToText(writer io.Writer) error { - return ProtoToText(writer, fc.ToProto()) + + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", + } + + return m.Marshal(writer, fc.ToProto()) } diff --git a/weed/filer/filer_conf_test.go b/weed/filer/filer_conf_test.go index 02615b814..1576c7d82 100644 --- a/weed/filer/filer_conf_test.go +++ b/weed/filer/filer_conf_test.go @@ -3,7 +3,7 @@ package filer import ( "testing" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/stretchr/testify/assert" ) diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index cfe07ec5a..0fc2f6c3c 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -3,11 +3,10 @@ package filer import ( "context" "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -17,7 +16,7 @@ const ( type OnChunksFunc func([]*filer_pb.FileChunk) error type OnHardLinkIdsFunc func([]HardLinkId) error -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32, ifNotModifiedAfter int64) (err error) { +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) { if p == "/" { return nil } @@ -26,60 +25,60 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR if findErr != nil { return findErr } - if ifNotModifiedAfter > 0 && entry.Attr.Mtime.Unix() > ifNotModifiedAfter { - return nil - } isDeleteCollection := f.isBucket(entry) if entry.IsDirectory() { // delete the folder children, not including the folder itself - err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(hardLinkIds []HardLinkId) error { + err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures, func(chunks []*filer_pb.FileChunk) error { + if shouldDeleteChunks && !isDeleteCollection { + f.DirectDeleteChunks(chunks) + } + return nil + }, func(hardLinkIds []HardLinkId) error { // A case not handled: // what if the chunk is in a different collection? if shouldDeleteChunks { - f.maybeDeleteHardLinks(ctx, hardLinkIds) + f.maybeDeleteHardLinks(hardLinkIds) } return nil }) if err != nil { - glog.V(2).InfofCtx(ctx, "delete directory %s: %v", p, err) + glog.V(0).Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } } + if shouldDeleteChunks && !isDeleteCollection { + f.DirectDeleteChunks(entry.Chunks) + } + // delete the file or folder err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster, signatures) if err != nil { return fmt.Errorf("delete file %s: %v", p, err) } - if shouldDeleteChunks && !isDeleteCollection { - f.DeleteChunks(ctx, p, entry.GetChunks()) - } - if isDeleteCollection { collectionName := entry.Name() - f.DoDeleteCollection(collectionName) + f.doDeleteCollection(collectionName) } return nil } -func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) { +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32, onChunksFn OnChunksFunc, onHardLinkIdsFn OnHardLinkIdsFunc) (err error) { - //collect all the chunks of this layer and delete them together at the end - var chunksToDelete []*filer_pb.FileChunk lastFileName := "" includeLastFile := false if !isDeletingBucket || !f.Store.CanDropWholeBucket() { for { entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") if err != nil { - glog.ErrorfCtx(ctx, "list folder %s: %v", entry.FullPath, err) + glog.Errorf("list folder %s: %v", entry.FullPath, err) return fmt.Errorf("list folder %s: %v", entry.FullPath, err) } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - glog.V(2).InfofCtx(ctx, "deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + glog.V(0).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) } @@ -87,16 +86,14 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry lastFileName = sub.Name() if sub.IsDirectory() { subIsDeletingBucket := f.isBucket(sub) - err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil, onHardLinkIdsFn) + err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil, onChunksFn, onHardLinkIdsFn) } else { f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil) if len(sub.HardLinkId) != 0 { // hard link chunk data are deleted separately err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId}) } else { - if shouldDeleteChunks { - chunksToDelete = append(chunksToDelete, sub.GetChunks()...) - } + err = onChunksFn(sub.Chunks) } } if err != nil && !ignoreRecursiveError { @@ -110,24 +107,27 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } } - glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) + glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { - return fmt.Errorf("filer store delete: %w", storeDeletionErr) + return fmt.Errorf("filer store delete: %v", storeDeletionErr) } f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) - f.DeleteChunks(ctx, entry.FullPath, chunksToDelete) return nil } func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { - glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) - if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { - return fmt.Errorf("filer store delete: %w", storeDeletionErr) + if !entry.IsDirectory() && !shouldDeleteChunks { + if storeDeletionErr := f.Store.DeleteOneEntrySkipHardlink(ctx, entry.FullPath); storeDeletionErr != nil { + return fmt.Errorf("filer store delete skip hardlink: %v", storeDeletionErr) + } + } else if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) } if !entry.IsDirectory() { f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) @@ -136,7 +136,7 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou return nil } -func (f *Filer) DoDeleteCollection(collectionName string) (err error) { +func (f *Filer) doDeleteCollection(collectionName string) (err error) { return f.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ @@ -150,10 +150,10 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) { } -func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) { +func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { for _, hardLinkId := range hardLinkIds { - if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil { - glog.ErrorfCtx(ctx, "delete hard link id %d : %v", hardLinkId, err) + if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { + glog.Errorf("delete hard link id %d : %v", hardLinkId, err) } } } diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index 6d22be600..e73f94151 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -1,17 +1,14 @@ package filer import ( - "context" + "math" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/storage" - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]*operation.LookupResult, error) { @@ -24,7 +21,6 @@ func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []str locations = append(locations, operation.Location{ Url: loc.Url, PublicUrl: loc.PublicUrl, - GrpcPort: loc.GrpcPort, }) } m[vid] = &operation.LookupResult{ @@ -56,13 +52,13 @@ func (f *Filer) loopProcessingDeletion() { fileIds = fileIds[:0] } deletionCount = len(toDeleteFileIds) - _, err := operation.DeleteFileIdsWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) if err != nil { - if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) { + if !strings.Contains(err.Error(), "already deleted") { glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) } } else { - glog.V(2).Infof("deleting fileIds %+v", toDeleteFileIds) + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) } } }) @@ -73,27 +69,59 @@ func (f *Filer) loopProcessingDeletion() { } } -func (f *Filer) DeleteUncommittedChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { - f.doDeleteChunks(ctx, chunks) -} +func (f *Filer) doDeleteFileIds(fileIds []string) { -func (f *Filer) DeleteChunks(ctx context.Context, fullpath util.FullPath, chunks []*filer_pb.FileChunk) { - rule := f.FilerConf.MatchStorageRule(string(fullpath)) - if rule.DisableChunkDeletion { - return + lookupFunc := LookupByMasterClientFn(f.MasterClient) + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount := len(toDeleteFileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } + } } - f.doDeleteChunks(ctx, chunks) } -func (f *Filer) doDeleteChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { +func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) { + var fildIdsToDelete []string + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + continue + } + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) + if manifestResolveErr != nil { + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + } + for _, dChunk := range dataChunks { + fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString()) + } + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + } + + f.doDeleteFileIds(fildIdsToDelete) +} + +func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { if !chunk.IsChunkManifest { f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) continue } - dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk) + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) if manifestResolveErr != nil { - glog.V(0).InfofCtx(ctx, "failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) } for _, dChunk := range dataChunks { f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) @@ -108,19 +136,48 @@ func (f *Filer) DeleteChunksNotRecursive(chunks []*filer_pb.FileChunk) { } } -func (f *Filer) deleteChunksIfNotNew(ctx context.Context, oldEntry, newEntry *Entry) { - var oldChunks, newChunks []*filer_pb.FileChunk - if oldEntry != nil { - oldChunks = oldEntry.GetChunks() +func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { + + if oldEntry == nil { + return } - if newEntry != nil { - newChunks = newEntry.GetChunks() + if newEntry == nil { + f.DeleteChunks(oldEntry.Chunks) + return } - toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks) + var toDelete []*filer_pb.FileChunk + newChunkIds := make(map[string]bool) + newDataChunks, newManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(), + newEntry.Chunks, 0, math.MaxInt64) if err != nil { - glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks) + glog.Errorf("Failed to resolve new entry chunks when delete old entry chunks. new: %s, old: %s", + newEntry.Chunks, oldEntry.Chunks) return } + for _, newChunk := range newDataChunks { + newChunkIds[newChunk.GetFileIdString()] = true + } + for _, newChunk := range newManifestChunks { + newChunkIds[newChunk.GetFileIdString()] = true + } + + oldDataChunks, oldManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(), + oldEntry.Chunks, 0, math.MaxInt64) + if err != nil { + glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", + newEntry.Chunks, oldEntry.Chunks) + return + } + for _, oldChunk := range oldDataChunks { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { + toDelete = append(toDelete, oldChunk) + } + } + for _, oldChunk := range oldManifestChunks { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { + toDelete = append(toDelete, oldChunk) + } + } f.DeleteChunksNotRecursive(toDelete) } diff --git a/weed/filer/filer_hardlink.go b/weed/filer/filer_hardlink.go index cad153d79..7a91602fd 100644 --- a/weed/filer/filer_hardlink.go +++ b/weed/filer/filer_hardlink.go @@ -1,7 +1,7 @@ package filer import ( - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 1867ccc07..4d26a695c 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -3,18 +3,17 @@ package filer import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" "io" - "regexp" + "math" "strings" "time" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) { @@ -82,11 +81,11 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica return } - f.LocalMetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs) + f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs) } -func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { +func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { if len(buf) == 0 { return @@ -95,7 +94,7 @@ func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTim startTime, stopTime = startTime.UTC(), stopTime.UTC() targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir, - startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFilerId, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFileId, // startTime.Second(), startTime.Nanosecond(), ) @@ -109,40 +108,100 @@ func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTim } } -var ( - VolumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`) -) +func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) { -func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastTsNs int64, isDone bool, err error) { - - visitor, visitErr := f.collectPersistedLogBuffer(startPosition, stopTsNs) - if visitErr != nil { - if visitErr == io.EOF { - return - } - err = fmt.Errorf("reading from persisted logs: %w", visitErr) - return + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute()) + var stopDate, stopHourMinute string + if stopTsNs != 0 { + stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC() + stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day()) + stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute()) } - var logEntry *filer_pb.LogEntry - for { - logEntry, visitErr = visitor.GetNext() - if visitErr != nil { - if visitErr == io.EOF { + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "") + if listDayErr != nil { + return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr) + } + for _, dayEntry := range dayEntries { + if stopDate != "" { + if strings.Compare(dayEntry.Name(), stopDate) > 0 { break } - err = fmt.Errorf("read next from persisted logs: %w", visitErr) - return } - isDone, visitErr = eachLogEntryFn(logEntry) - if visitErr != nil { - err = fmt.Errorf("process persisted log entry: %w", visitErr) - return + // println("checking day", dayEntry.FullPath) + hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "") + if listHourMinuteErr != nil { + return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) } - lastTsNs = logEntry.TsNs - if isDone { - return + for _, hourMinuteEntry := range hourMinuteEntries { + // println("checking hh-mm", hourMinuteEntry.FullPath) + if dayEntry.Name() == startDate { + hourMinute := util.FileNameBase(hourMinuteEntry.Name()) + if strings.Compare(hourMinute, startHourMinute) < 0 { + continue + } + } + if dayEntry.Name() == stopDate { + hourMinute := util.FileNameBase(hourMinuteEntry.Name()) + if strings.Compare(hourMinute, stopHourMinute) > 0 { + break + } + } + // println("processing", hourMinuteEntry.FullPath) + chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) + if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + continue + } + return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) + } + chunkedFileReader.Close() } } - return + return lastTsNs, isDone, nil +} + +func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + for { + n, err := r.Read(sizeBuf) + if err != nil { + return lastTsNs, err + } + if n != 4 { + return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n) + } + size := util.BytesToUint32(sizeBuf) + // println("entry size", size) + entryData := make([]byte, size) + n, err = r.Read(entryData) + if err != nil { + return lastTsNs, err + } + if n != int(size) { + return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size) + } + logEntry := &filer_pb.LogEntry{} + if err = proto.Unmarshal(entryData, logEntry); err != nil { + return lastTsNs, err + } + if logEntry.TsNs <= startTsNs { + continue + } + if stopTsNs != 0 && logEntry.TsNs > stopTsNs { + return lastTsNs, err + } + // println("each log: ", logEntry.TsNs) + if err := eachLogEntryFn(logEntry); err != nil { + return lastTsNs, err + } else { + lastTsNs = logEntry.TsNs + } + } } diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go index 9150f92d6..25b99d0f7 100644 --- a/weed/filer/filer_notify_append.go +++ b/weed/filer/filer_notify_append.go @@ -6,9 +6,9 @@ import ( "os" "time" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (f *Filer) appendToFile(targetFile string, data []byte) error { @@ -36,14 +36,14 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error { } else if err != nil { return fmt.Errorf("find %s: %v", fullpath, err) } else { - offset = int64(TotalSize(entry.GetChunks())) + offset = int64(TotalSize(entry.Chunks)) } // append to existing chunks - entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset, time.Now().UnixNano())) + entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset)) // update the entry - err = f.CreateEntry(context.Background(), entry, false, false, nil, false, f.MaxFilenameLength) + err = f.CreateEntry(context.Background(), entry, false, false, nil, false) return err } @@ -58,9 +58,9 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi WritableVolumeCount: rule.VolumeGrowthCount, } - assignResult, err := operation.Assign(context.Background(), f.GetMaster, f.GrpcDialOption, assignRequest) + assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest) if err != nil { - return nil, nil, fmt.Errorf("AssignVolume: %w", err) + return nil, nil, fmt.Errorf("AssignVolume: %v", err) } if assignResult.Error != "" { return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error) @@ -77,13 +77,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi PairMap: nil, Jwt: assignResult.Auth, } - - uploader, err := operation.NewUploader() - if err != nil { - return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) - } - - uploadResult, err := uploader.UploadData(context.Background(), data, uploadOption) + uploadResult, err := operation.UploadData(data, uploadOption) if err != nil { return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) } diff --git a/weed/filer/filer_notify_read.go b/weed/filer/filer_notify_read.go deleted file mode 100644 index 62cede687..000000000 --- a/weed/filer/filer_notify_read.go +++ /dev/null @@ -1,371 +0,0 @@ -package filer - -import ( - "container/heap" - "context" - "fmt" - "io" - "math" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/protobuf/proto" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -type LogFileEntry struct { - TsNs int64 - FileEntry *Entry -} - -func (f *Filer) collectPersistedLogBuffer(startPosition log_buffer.MessagePosition, stopTsNs int64) (v *OrderedLogVisitor, err error) { - - if stopTsNs != 0 && startPosition.Time.UnixNano() > stopTsNs { - return nil, io.EOF - } - - startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Time.Year(), startPosition.Time.Month(), startPosition.Time.Day()) - - dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "") - if listDayErr != nil { - return nil, fmt.Errorf("fail to list log by day: %w", listDayErr) - } - - return NewOrderedLogVisitor(f, startPosition, stopTsNs, dayEntries) - -} - -func (f *Filer) HasPersistedLogFiles(startPosition log_buffer.MessagePosition) (bool, error) { - startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Time.Year(), startPosition.Time.Month(), startPosition.Time.Day()) - dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 1, "", "", "") - - if listDayErr != nil { - return false, fmt.Errorf("fail to list log by day: %w", listDayErr) - } - if len(dayEntries) == 0 { - return false, nil - } - return true, nil -} - -// ---------- -type LogEntryItem struct { - Entry *filer_pb.LogEntry - filer string -} - -// LogEntryItemPriorityQueue a priority queue for LogEntry -type LogEntryItemPriorityQueue []*LogEntryItem - -func (pq LogEntryItemPriorityQueue) Len() int { return len(pq) } -func (pq LogEntryItemPriorityQueue) Less(i, j int) bool { - return pq[i].Entry.TsNs < pq[j].Entry.TsNs -} -func (pq LogEntryItemPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } -func (pq *LogEntryItemPriorityQueue) Push(x any) { - item := x.(*LogEntryItem) - *pq = append(*pq, item) -} -func (pq *LogEntryItemPriorityQueue) Pop() any { - n := len(*pq) - item := (*pq)[n-1] - *pq = (*pq)[:n-1] - return item -} - -// ---------- - -type OrderedLogVisitor struct { - perFilerIteratorMap map[string]*LogFileQueueIterator - pq *LogEntryItemPriorityQueue - logFileEntryCollector *LogFileEntryCollector -} - -func NewOrderedLogVisitor(f *Filer, startPosition log_buffer.MessagePosition, stopTsNs int64, dayEntries []*Entry) (*OrderedLogVisitor, error) { - - perFilerQueueMap := make(map[string]*LogFileQueueIterator) - // initialize the priority queue - pq := &LogEntryItemPriorityQueue{} - heap.Init(pq) - - t := &OrderedLogVisitor{ - perFilerIteratorMap: perFilerQueueMap, - pq: pq, - logFileEntryCollector: NewLogFileEntryCollector(f, startPosition, stopTsNs, dayEntries), - } - if err := t.logFileEntryCollector.collectMore(t); err != nil && err != io.EOF { - return nil, err - } - return t, nil -} - -func (o *OrderedLogVisitor) GetNext() (logEntry *filer_pb.LogEntry, err error) { - if o.pq.Len() == 0 { - return nil, io.EOF - } - item := heap.Pop(o.pq).(*LogEntryItem) - filerId := item.filer - - // fill the pq with the next log entry from the same filer - it := o.perFilerIteratorMap[filerId] - next, nextErr := it.getNext(o) - if nextErr != nil { - if nextErr == io.EOF { - // do nothing since the filer has no more log entries - } else { - return nil, fmt.Errorf("failed to get next log entry: %w", nextErr) - } - } else { - heap.Push(o.pq, &LogEntryItem{ - Entry: next, - filer: filerId, - }) - } - return item.Entry, nil -} - -func getFilerId(name string) string { - idx := strings.LastIndex(name, ".") - if idx < 0 { - return "" - } - return name[idx+1:] -} - -// ---------- - -type LogFileEntryCollector struct { - f *Filer - startTsNs int64 - stopTsNs int64 - dayEntryQueue *util.Queue[*Entry] - startDate string - startHourMinute string - stopDate string - stopHourMinute string -} - -func NewLogFileEntryCollector(f *Filer, startPosition log_buffer.MessagePosition, stopTsNs int64, dayEntries []*Entry) *LogFileEntryCollector { - dayEntryQueue := util.NewQueue[*Entry]() - for _, dayEntry := range dayEntries { - dayEntryQueue.Enqueue(dayEntry) - // println("enqueue day entry", dayEntry.Name()) - } - - startDate := fmt.Sprintf("%04d-%02d-%02d", startPosition.Time.Year(), startPosition.Time.Month(), startPosition.Time.Day()) - startHourMinute := fmt.Sprintf("%02d-%02d", startPosition.Time.Hour(), startPosition.Time.Minute()) - var stopDate, stopHourMinute string - if stopTsNs != 0 { - stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Second)).UTC() - stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day()) - stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute()) - } - - return &LogFileEntryCollector{ - f: f, - startTsNs: startPosition.Time.UnixNano(), - stopTsNs: stopTsNs, - dayEntryQueue: dayEntryQueue, - startDate: startDate, - startHourMinute: startHourMinute, - stopDate: stopDate, - stopHourMinute: stopHourMinute, - } -} - -func (c *LogFileEntryCollector) hasMore() bool { - return c.dayEntryQueue.Len() > 0 -} - -func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) { - dayEntry := c.dayEntryQueue.Dequeue() - if dayEntry == nil { - return io.EOF - } - // println("dequeue day entry", dayEntry.Name()) - if c.stopDate != "" { - if strings.Compare(dayEntry.Name(), c.stopDate) > 0 { - return io.EOF - } - } - - hourMinuteEntries, _, listHourMinuteErr := c.f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "") - if listHourMinuteErr != nil { - return fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) - } - freshFilerIds := make(map[string]string) - for _, hourMinuteEntry := range hourMinuteEntries { - // println("checking hh-mm", hourMinuteEntry.FullPath) - hourMinute := util.FileNameBase(hourMinuteEntry.Name()) - if dayEntry.Name() == c.startDate { - if strings.Compare(hourMinute, c.startHourMinute) < 0 { - continue - } - } - if dayEntry.Name() == c.stopDate { - if strings.Compare(hourMinute, c.stopHourMinute) > 0 { - break - } - } - - tsMinute := fmt.Sprintf("%s-%s", dayEntry.Name(), hourMinute) - // println(" enqueue", tsMinute) - t, parseErr := time.Parse("2006-01-02-15-04", tsMinute) - if parseErr != nil { - glog.Errorf("failed to parse %s: %v", tsMinute, parseErr) - continue - } - filerId := getFilerId(hourMinuteEntry.Name()) - if filerId == "" { - glog.Warningf("Invalid log file name format: %s", hourMinuteEntry.Name()) - continue // Skip files with invalid format - } - iter, found := v.perFilerIteratorMap[filerId] - if !found { - iter = newLogFileQueueIterator(c.f.MasterClient, util.NewQueue[*LogFileEntry](), c.startTsNs, c.stopTsNs) - v.perFilerIteratorMap[filerId] = iter - freshFilerIds[filerId] = hourMinuteEntry.Name() - } - iter.q.Enqueue(&LogFileEntry{ - TsNs: t.UnixNano(), - FileEntry: hourMinuteEntry, - }) - } - - // fill the pq with the next log entry if it is a new filer - for filerId, entryName := range freshFilerIds { - iter, found := v.perFilerIteratorMap[filerId] - if !found { - glog.Errorf("Unexpected! failed to find iterator for filer %s", filerId) - continue - } - next, nextErr := iter.getNext(v) - if nextErr != nil { - if nextErr == io.EOF { - // do nothing since the filer has no more log entries - } else { - return fmt.Errorf("failed to get next log entry for %v: %w", entryName, nextErr) - } - } else { - heap.Push(v.pq, &LogEntryItem{ - Entry: next, - filer: filerId, - }) - } - } - - return nil -} - -// ---------- - -type LogFileQueueIterator struct { - q *util.Queue[*LogFileEntry] - masterClient *wdclient.MasterClient - startTsNs int64 - stopTsNs int64 - currentFileIterator *LogFileIterator -} - -func newLogFileQueueIterator(masterClient *wdclient.MasterClient, q *util.Queue[*LogFileEntry], startTsNs, stopTsNs int64) *LogFileQueueIterator { - return &LogFileQueueIterator{ - q: q, - masterClient: masterClient, - startTsNs: startTsNs, - stopTsNs: stopTsNs, - } -} - -// getNext will return io.EOF when done -func (iter *LogFileQueueIterator) getNext(v *OrderedLogVisitor) (logEntry *filer_pb.LogEntry, err error) { - for { - if iter.currentFileIterator != nil { - logEntry, err = iter.currentFileIterator.getNext() - if err != io.EOF { - return - } - } - // now either iter.currentFileIterator is nil or err is io.EOF - if iter.q.Len() == 0 { - return nil, io.EOF - } - t := iter.q.Dequeue() - if t == nil { - continue - } - // skip the file if it is after the stopTsNs - if iter.stopTsNs != 0 && t.TsNs > iter.stopTsNs { - return nil, io.EOF - } - next := iter.q.Peek() - if next == nil { - if collectErr := v.logFileEntryCollector.collectMore(v); collectErr != nil && collectErr != io.EOF { - return nil, collectErr - } - next = iter.q.Peek() // Re-peek after collectMore - } - // skip the file if the next entry is before the startTsNs - if next != nil && next.TsNs <= iter.startTsNs { - continue - } - iter.currentFileIterator = newLogFileIterator(iter.masterClient, t.FileEntry, iter.startTsNs, iter.stopTsNs) - } -} - -// ---------- - -type LogFileIterator struct { - r io.Reader - sizeBuf []byte - startTsNs int64 - stopTsNs int64 -} - -func newLogFileIterator(masterClient *wdclient.MasterClient, fileEntry *Entry, startTsNs, stopTsNs int64) *LogFileIterator { - return &LogFileIterator{ - r: NewChunkStreamReaderFromFiler(context.Background(), masterClient, fileEntry.Chunks), - sizeBuf: make([]byte, 4), - startTsNs: startTsNs, - stopTsNs: stopTsNs, - } -} - -// getNext will return io.EOF when done -func (iter *LogFileIterator) getNext() (logEntry *filer_pb.LogEntry, err error) { - var n int - for { - n, err = iter.r.Read(iter.sizeBuf) - if err != nil { - return - } - if n != 4 { - return nil, fmt.Errorf("size %d bytes, expected 4 bytes", n) - } - size := util.BytesToUint32(iter.sizeBuf) - // println("entry size", size) - entryData := make([]byte, size) - n, err = iter.r.Read(entryData) - if err != nil { - return - } - if n != int(size) { - return nil, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size) - } - logEntry = &filer_pb.LogEntry{} - if err = proto.Unmarshal(entryData, logEntry); err != nil { - return - } - if logEntry.TsNs <= iter.startTsNs { - continue - } - if iter.stopTsNs != 0 && logEntry.TsNs > iter.stopTsNs { - return nil, io.EOF - } - return - } -} diff --git a/weed/filer/filer_notify_test.go b/weed/filer/filer_notify_test.go index af99d7015..6a2be8f18 100644 --- a/weed/filer/filer_notify_test.go +++ b/weed/filer/filer_notify_test.go @@ -4,13 +4,13 @@ import ( "testing" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" ) -func TestProtoMarshal(t *testing.T) { +func TestProtoMarshalText(t *testing.T) { oldEntry := &Entry{ FullPath: util.FullPath("/this/path/to"), @@ -22,11 +22,11 @@ func TestProtoMarshal(t *testing.T) { TtlSec: 25, }, Chunks: []*filer_pb.FileChunk{ - { + &filer_pb.FileChunk{ FileId: "234,2423423422", Offset: 234234, Size: 234, - ModifiedTsNs: 12312423, + Mtime: 12312423, ETag: "2342342354", SourceFileId: "23234,2342342342", }, @@ -39,15 +39,15 @@ func TestProtoMarshal(t *testing.T) { DeleteChunks: true, } - text, _ := proto.Marshal(notification) + text := proto.MarshalTextString(notification) notification2 := &filer_pb.EventNotification{} - proto.Unmarshal(text, notification2) + proto.UnmarshalText(text, notification2) - if notification2.OldEntry.GetChunks()[0].SourceFileId != notification.OldEntry.GetChunks()[0].SourceFileId { + if notification2.OldEntry.Chunks[0].SourceFileId != notification.OldEntry.Chunks[0].SourceFileId { t.Fatalf("marshal/unmarshal error: %s", text) } - println(string(text)) + println(text) } diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index acbf4aa47..3b290deca 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -2,10 +2,9 @@ package filer import ( "bytes" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) // onMetadataChangeEvent is triggered after filer processed change events from local or remote filers @@ -17,7 +16,11 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) { message := event.EventNotification - + for _, sig := range message.Signatures { + if sig == f.Signature { + return + } + } if f.DirBucketsPath == event.Directory { if filer_pb.IsCreate(event) { if message.NewEntry.IsDirectory { @@ -61,7 +64,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk, size uint64) ([]byte, er func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { fc := NewFilerConf() - err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry)) + err := fc.loadFromChunks(f, entry.Content, entry.Chunks, FileSize(entry)) if err != nil { glog.Errorf("read filer conf chunks: %v", err) return @@ -81,9 +84,9 @@ func (f *Filer) LoadFilerConf() { f.FilerConf = fc } -// ////////////////////////////////// +//////////////////////////////////// // load and maintain remote storages -// ////////////////////////////////// +//////////////////////////////////// func (f *Filer) LoadRemoteStorageConfAndMapping() { if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil { glog.Errorf("read remote conf and mapping: %v", err) diff --git a/weed/filer/filer_rename.go b/weed/filer/filer_rename.go index cf4a2b79f..b6f0cf6de 100644 --- a/weed/filer/filer_rename.go +++ b/weed/filer/filer_rename.go @@ -2,23 +2,16 @@ package filer import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "strings" - - "github.com/seaweedfs/seaweedfs/weed/util" ) -func (f *Filer) CanRename(source, target util.FullPath, oldName string) error { - sourcePath := source.Child(oldName) - if strings.HasPrefix(string(target), string(sourcePath)) { - return fmt.Errorf("mv: can not move directory to a subdirectory of itself") - } - +func (f *Filer) CanRename(source, target util.FullPath) error { sourceBucket := f.DetectBucket(source) targetBucket := f.DetectBucket(target) if sourceBucket != targetBucket { return fmt.Errorf("can not move across collection %s => %s", sourceBucket, targetBucket) } - return nil } diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go index 6c7ba0747..112df7984 100644 --- a/weed/filer/filer_search.go +++ b/weed/filer/filer_search.go @@ -2,7 +2,7 @@ package filer import ( "context" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "math" "path/filepath" "strings" diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go index 87e212ea5..260945b33 100644 --- a/weed/filer/filerstore.go +++ b/weed/filer/filerstore.go @@ -3,7 +3,7 @@ package filer import ( "context" "errors" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "io" ) diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go index 55ce4c9ce..ae2f5fee7 100644 --- a/weed/filer/filerstore_hardlink.go +++ b/weed/filer/filerstore_hardlink.go @@ -4,9 +4,8 @@ import ( "bytes" "context" "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry *Entry) error { @@ -32,7 +31,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry // remove old hard link if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 { - glog.V(4).InfofCtx(ctx, "handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) + glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } @@ -51,7 +50,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err return encodeErr } - glog.V(4).InfofCtx(ctx, "setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return fsw.KvPut(ctx, key, newBlob) } @@ -64,16 +63,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr value, err := fsw.KvGet(ctx, key) if err != nil { - glog.ErrorfCtx(ctx, "read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } if err = entry.DecodeAttributesAndChunks(value); err != nil { - glog.ErrorfCtx(ctx, "decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } - glog.V(4).InfofCtx(ctx, "maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return nil } @@ -95,7 +94,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har entry.HardLinkCounter-- if entry.HardLinkCounter <= 0 { - glog.V(4).InfofCtx(ctx, "DeleteHardLink KvDelete %v", key) + glog.V(4).Infof("DeleteHardLink KvDelete %v", key) return fsw.KvDelete(ctx, key) } @@ -104,7 +103,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har return encodeErr } - glog.V(4).InfofCtx(ctx, "DeleteHardLink KvPut %v", key) + glog.V(4).Infof("DeleteHardLink KvPut %v", key) return fsw.KvPut(ctx, key, newBlob) } diff --git a/weed/filer/filerstore_translate_path.go b/weed/filer/filerstore_translate_path.go index 900154fde..9e74dd41c 100644 --- a/weed/filer/filerstore_translate_path.go +++ b/weed/filer/filerstore_translate_path.go @@ -2,7 +2,7 @@ package filer import ( "context" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "math" "strings" ) diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go index ea039d444..3ece25ce6 100644 --- a/weed/filer/filerstore_wrapper.go +++ b/weed/filer/filerstore_wrapper.go @@ -2,17 +2,16 @@ package filer import ( "context" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/viant/ptrie" "io" "math" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/viant/ptrie" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -24,6 +23,7 @@ type VirtualFilerStore interface { FilerStore DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error DeleteOneEntry(ctx context.Context, entry *Entry) error + DeleteOneEntrySkipHardlink(ctx context.Context, fullpath util.FullPath) error AddPathSpecificStore(path string, storeId string, store FilerStore) OnBucketCreation(bucket string) OnBucketDeletion(bucket string) @@ -32,7 +32,7 @@ type VirtualFilerStore interface { type FilerStoreWrapper struct { defaultStore FilerStore - pathToStore ptrie.Trie[string] + pathToStore ptrie.Trie storeIdToStore map[string]FilerStore } @@ -42,7 +42,7 @@ func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { } return &FilerStoreWrapper{ defaultStore: store, - pathToStore: ptrie.New[string](), + pathToStore: ptrie.New(), storeIdToStore: make(map[string]FilerStore), } } @@ -85,12 +85,12 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) { store = fsw.defaultStore - if path == "/" || path == "//" { + if path == "/" { return } var storeId string - fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value string) bool { - storeId = value + fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + storeId = value.(string) return false }) if storeId != "" { @@ -112,7 +112,6 @@ func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefi } func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(entry.FullPath) stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "insert").Inc() start := time.Now() @@ -120,7 +119,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) }() - filer_pb.BeforeEntrySerialization(entry.GetChunks()) + filer_pb.BeforeEntrySerialization(entry.Chunks) if entry.Mime == "application/octet-stream" { entry.Mime = "" } @@ -134,7 +133,6 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err } func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(entry.FullPath) stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "update").Inc() start := time.Now() @@ -142,7 +140,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) }() - filer_pb.BeforeEntrySerialization(entry.GetChunks()) + filer_pb.BeforeEntrySerialization(entry.Chunks) if entry.Mime == "application/octet-stream" { entry.Mime = "" } @@ -156,7 +154,6 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err } func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(fp) stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "find").Inc() start := time.Now() @@ -167,20 +164,16 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) ( entry, err = actualStore.FindEntry(ctx, fp) // glog.V(4).Infof("FindEntry %s: %v", fp, err) if err != nil { - if fsw.CanDropWholeBucket() && strings.Contains(err.Error(), "Table") && strings.Contains(err.Error(), "doesn't exist") { - err = filer_pb.ErrNotFound - } return nil, err } fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.GetChunks()) + filer_pb.AfterEntryDeserialization(entry.Chunks) return } func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(fp) stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() start := time.Now() @@ -189,17 +182,14 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) }() existingEntry, findErr := fsw.FindEntry(ctx, fp) - if findErr == filer_pb.ErrNotFound || existingEntry == nil { + if findErr == filer_pb.ErrNotFound { return nil } if len(existingEntry.HardLinkId) != 0 { // remove hard link - op := ctx.Value("OP") - if op != "MV" { - glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath) - if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { - return err - } + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err } } @@ -208,7 +198,6 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) } func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(existingEntry.FullPath) stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() start := time.Now() @@ -218,12 +207,9 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry if len(existingEntry.HardLinkId) != 0 { // remove hard link - op := ctx.Value("OP") - if op != "MV" { - glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath) - if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { - return err - } + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err } } @@ -231,8 +217,19 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry return actualStore.DeleteEntry(ctx, existingEntry.FullPath) } +func (fsw *FilerStoreWrapper) DeleteOneEntrySkipHardlink(ctx context.Context, fullpath util.FullPath) (err error) { + actualStore := fsw.getActualStore(fullpath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + glog.V(4).Infof("DeleteOneEntrySkipHardlink %s", fullpath) + return actualStore.DeleteEntry(ctx, fullpath) +} + func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(fp + "/") stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Inc() start := time.Now() @@ -245,7 +242,6 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util. } func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(dirPath + "/") stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc() start := time.Now() @@ -256,13 +252,12 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath // glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.GetChunks()) + filer_pb.AfterEntryDeserialization(entry.Chunks) return eachEntryFunc(entry) }) } func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { - ctx = context.WithoutCancel(ctx) actualStore := fsw.getActualStore(dirPath + "/") stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc() start := time.Now() @@ -275,7 +270,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, // glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) adjustedEntryFunc := func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) - filer_pb.AfterEntryDeserialization(entry.GetChunks()) + filer_pb.AfterEntryDeserialization(entry.Chunks) return eachEntryFunc(entry) } lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc) @@ -303,8 +298,10 @@ func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath u count := int64(0) for count < limit && len(notPrefixed) > 0 { + var isLastItemHasPrefix bool for _, entry := range notPrefixed { if strings.HasPrefix(entry.Name(), prefix) { + isLastItemHasPrefix = true count++ if !eachEntryFunc(entry) { return @@ -312,9 +309,11 @@ func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath u if count >= limit { break } + } else { + isLastItemHasPrefix = false } } - if count < limit && lastFileName < prefix { + if count < limit && isLastItemHasPrefix && len(notPrefixed) == int(limit) { notPrefixed = notPrefixed[:0] lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool { notPrefixed = append(notPrefixed, entry) @@ -331,17 +330,14 @@ func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath u } func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().BeginTransaction(ctx) } func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().CommitTransaction(ctx) } func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().RollbackTransaction(ctx) } @@ -350,15 +346,12 @@ func (fsw *FilerStoreWrapper) Shutdown() { } func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().KvPut(ctx, key, value) } func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().KvGet(ctx, key) } func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) { - ctx = context.WithoutCancel(ctx) return fsw.getDefaultStore().KvDelete(ctx, key) } diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go index 8642146e6..c5d6eb48c 100644 --- a/weed/filer/hbase/hbase_store.go +++ b/weed/filer/hbase/hbase_store.go @@ -4,10 +4,10 @@ import ( "bytes" "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/tsuna/gohbase" "github.com/tsuna/gohbase/hrpc" "io" @@ -48,7 +48,7 @@ func (store *HbaseStore) initialize(zkquorum, table string) (err error) { headers := map[string][]string{store.cfMetaDir: nil} get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers)) if err != nil { - return fmt.Errorf("NewGet returned an error: %w", err) + return fmt.Errorf("NewGet returned an error: %v", err) } _, err = store.Client.Get(get) if err != gohbase.TableNotFound { @@ -75,7 +75,7 @@ func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) er if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } @@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa } if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/hbase/hbase_store_kv.go b/weed/filer/hbase/hbase_store_kv.go index 0939b8752..990e55a24 100644 --- a/weed/filer/hbase/hbase_store_kv.go +++ b/weed/filer/hbase/hbase_store_kv.go @@ -2,7 +2,7 @@ package hbase import ( "context" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/tsuna/gohbase/hrpc" "time" ) diff --git a/weed/filer/interval_list.go b/weed/filer/interval_list.go deleted file mode 100644 index 27720231d..000000000 --- a/weed/filer/interval_list.go +++ /dev/null @@ -1,259 +0,0 @@ -package filer - -import ( - "math" - "sync" -) - -type IntervalValue interface { - SetStartStop(start, stop int64) - Clone() IntervalValue -} - -type Interval[T IntervalValue] struct { - StartOffset int64 - StopOffset int64 - TsNs int64 - Value T - Prev *Interval[T] - Next *Interval[T] -} - -func (interval *Interval[T]) Size() int64 { - return interval.StopOffset - interval.StartOffset -} - -// IntervalList mark written intervals within one page chunk -type IntervalList[T IntervalValue] struct { - head *Interval[T] - tail *Interval[T] - Lock sync.RWMutex -} - -func NewIntervalList[T IntervalValue]() *IntervalList[T] { - list := &IntervalList[T]{ - head: &Interval[T]{ - StartOffset: -1, - StopOffset: -1, - }, - tail: &Interval[T]{ - StartOffset: math.MaxInt64, - StopOffset: math.MaxInt64, - }, - } - return list -} - -func (list *IntervalList[T]) Front() (interval *Interval[T]) { - return list.head.Next -} - -func (list *IntervalList[T]) AppendInterval(interval *Interval[T]) { - list.Lock.Lock() - defer list.Lock.Unlock() - - if list.head.Next == nil { - list.head.Next = interval - } - interval.Prev = list.tail.Prev - if list.tail.Prev != nil { - list.tail.Prev.Next = interval - } - list.tail.Prev = interval -} - -func (list *IntervalList[T]) Overlay(startOffset, stopOffset, tsNs int64, value T) { - if startOffset >= stopOffset { - return - } - interval := &Interval[T]{ - StartOffset: startOffset, - StopOffset: stopOffset, - TsNs: tsNs, - Value: value, - } - - list.Lock.Lock() - defer list.Lock.Unlock() - - list.overlayInterval(interval) -} - -func (list *IntervalList[T]) InsertInterval(startOffset, stopOffset, tsNs int64, value T) { - interval := &Interval[T]{ - StartOffset: startOffset, - StopOffset: stopOffset, - TsNs: tsNs, - Value: value, - } - - list.Lock.Lock() - defer list.Lock.Unlock() - - value.SetStartStop(startOffset, stopOffset) - list.insertInterval(interval) -} - -func (list *IntervalList[T]) insertInterval(interval *Interval[T]) { - prev := list.head - next := prev.Next - - for interval.StartOffset < interval.StopOffset { - if next == nil { - // add to the end - list.insertBetween(prev, interval, list.tail) - break - } - - // interval is ahead of the next - if interval.StopOffset <= next.StartOffset { - list.insertBetween(prev, interval, next) - break - } - - // interval is after the next - if next.StopOffset <= interval.StartOffset { - prev = next - next = next.Next - continue - } - - // intersecting next and interval - if interval.TsNs >= next.TsNs { - // interval is newer - if next.StartOffset < interval.StartOffset { - // left side of next is ahead of interval - t := &Interval[T]{ - StartOffset: next.StartOffset, - StopOffset: interval.StartOffset, - TsNs: next.TsNs, - Value: next.Value.Clone().(T), - } - t.Value.SetStartStop(t.StartOffset, t.StopOffset) - list.insertBetween(prev, t, interval) - next.StartOffset = interval.StartOffset - next.Value.SetStartStop(next.StartOffset, next.StopOffset) - prev = t - } - if interval.StopOffset < next.StopOffset { - // right side of next is after interval - next.StartOffset = interval.StopOffset - next.Value.SetStartStop(next.StartOffset, next.StopOffset) - list.insertBetween(prev, interval, next) - break - } else { - // next is covered - prev.Next = interval - next = next.Next - } - } else { - // next is newer - if interval.StartOffset < next.StartOffset { - // left side of interval is ahead of next - t := &Interval[T]{ - StartOffset: interval.StartOffset, - StopOffset: next.StartOffset, - TsNs: interval.TsNs, - Value: interval.Value.Clone().(T), - } - t.Value.SetStartStop(t.StartOffset, t.StopOffset) - list.insertBetween(prev, t, next) - interval.StartOffset = next.StartOffset - interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) - } - if next.StopOffset < interval.StopOffset { - // right side of interval is after next - interval.StartOffset = next.StopOffset - interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) - } else { - // interval is covered - break - } - } - - } -} - -func (list *IntervalList[T]) insertBetween(a, interval, b *Interval[T]) { - a.Next = interval - b.Prev = interval - if a != list.head { - interval.Prev = a - } - if b != list.tail { - interval.Next = b - } -} - -func (list *IntervalList[T]) overlayInterval(interval *Interval[T]) { - - //t := list.head - //for ; t.Next != nil; t = t.Next { - // if t.TsNs > interval.TsNs { - // println("writes is out of order", t.TsNs-interval.TsNs, "ns") - // } - //} - - p := list.head - for ; p.Next != nil && p.Next.StopOffset <= interval.StartOffset; p = p.Next { - } - q := list.tail - for ; q.Prev != nil && q.Prev.StartOffset >= interval.StopOffset; q = q.Prev { - } - - // left side - // interval after p.Next start - if p.Next != nil && p.Next.StartOffset < interval.StartOffset { - t := &Interval[T]{ - StartOffset: p.Next.StartOffset, - StopOffset: interval.StartOffset, - TsNs: p.Next.TsNs, - Value: p.Next.Value, - } - p.Next = t - if p != list.head { - t.Prev = p - } - t.Next = interval - interval.Prev = t - } else { - p.Next = interval - if p != list.head { - interval.Prev = p - } - } - - // right side - // interval ends before p.Prev - if q.Prev != nil && interval.StopOffset < q.Prev.StopOffset { - t := &Interval[T]{ - StartOffset: interval.StopOffset, - StopOffset: q.Prev.StopOffset, - TsNs: q.Prev.TsNs, - Value: q.Prev.Value, - } - q.Prev = t - if q != list.tail { - t.Next = q - } - interval.Next = t - t.Prev = interval - } else { - q.Prev = interval - if q != list.tail { - interval.Next = q - } - } - -} - -func (list *IntervalList[T]) Len() int { - list.Lock.RLock() - defer list.Lock.RUnlock() - - var count int - for t := list.head; t != nil; t = t.Next { - count++ - } - return count - 1 -} diff --git a/weed/filer/interval_list_test.go b/weed/filer/interval_list_test.go deleted file mode 100644 index dea510fed..000000000 --- a/weed/filer/interval_list_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package filer - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "testing" -) - -type IntervalInt int - -func (i IntervalInt) SetStartStop(start, stop int64) { -} -func (i IntervalInt) Clone() IntervalValue { - return i -} - -func TestIntervalList_Overlay(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(0, 100, 1, 1) - list.Overlay(50, 150, 2, 2) - list.Overlay(200, 250, 3, 3) - list.Overlay(225, 250, 4, 4) - list.Overlay(175, 210, 5, 5) - list.Overlay(0, 25, 6, 6) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 6, list.Len()) - println() - list.Overlay(50, 150, 7, 7) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 6, list.Len()) -} - -func TestIntervalList_Overlay2(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(0, 50, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } -} - -func TestIntervalList_Overlay3(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - assert.Equal(t, 1, list.Len()) - - list.Overlay(0, 60, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_Overlay4(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(0, 100, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 1, list.Len()) -} - -func TestIntervalList_Overlay5(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(0, 110, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 1, list.Len()) -} - -func TestIntervalList_Overlay6(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(50, 110, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 1, list.Len()) -} - -func TestIntervalList_Overlay7(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(50, 90, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_Overlay8(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(60, 90, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 3, list.Len()) -} - -func TestIntervalList_Overlay9(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(60, 100, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_Overlay10(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(50, 100, 1, 1) - list.Overlay(60, 110, 2, 2) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_Overlay11(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.Overlay(0, 100, 1, 1) - list.Overlay(100, 110, 2, 2) - list.Overlay(0, 90, 3, 3) - list.Overlay(0, 80, 4, 4) - list.Overlay(0, 90, 5, 5) - list.Overlay(90, 90, 6, 6) - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 3, list.Len()) -} - -func TestIntervalList_insertInterval1(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_insertInterval2(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(0, 25, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_insertInterval3(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(0, 75, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 3, list.Len()) -} - -func TestIntervalList_insertInterval4(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(0, 225, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_insertInterval5(t *testing.T) { - list := NewIntervalList[IntervalInt]() - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(0, 225, 5, 5) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_insertInterval6(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(0, 275, 1, 1) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 5, list.Len()) -} - -func TestIntervalList_insertInterval7(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(75, 275, 1, 1) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 4, list.Len()) -} - -func TestIntervalList_insertInterval8(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(75, 275, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 4, list.Len()) -} - -func TestIntervalList_insertInterval9(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(50, 150, 2, 2) - list.InsertInterval(200, 250, 4, 4) - - list.InsertInterval(50, 150, 3, 3) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 2, list.Len()) -} - -func TestIntervalList_insertInterval10(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(50, 100, 2, 2) - - list.InsertInterval(200, 300, 4, 4) - - list.InsertInterval(100, 200, 5, 5) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 3, list.Len()) -} - -func TestIntervalList_insertInterval11(t *testing.T) { - list := NewIntervalList[IntervalInt]() - - list.InsertInterval(0, 64, 1, 1) - - list.InsertInterval(72, 136, 3, 3) - - list.InsertInterval(64, 128, 2, 2) - - list.InsertInterval(68, 72, 4, 4) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 4, list.Len()) -} - -type IntervalStruct struct { - x int - start int64 - stop int64 -} - -func newIntervalStruct(i int) IntervalStruct { - return IntervalStruct{ - x: i, - } -} - -func (i IntervalStruct) SetStartStop(start, stop int64) { - i.start, i.stop = start, stop -} -func (i IntervalStruct) Clone() IntervalValue { - return &IntervalStruct{ - x: i.x, - start: i.start, - stop: i.stop, - } -} - -func TestIntervalList_insertIntervalStruct(t *testing.T) { - list := NewIntervalList[IntervalStruct]() - - list.InsertInterval(0, 64, 1, newIntervalStruct(1)) - - list.InsertInterval(64, 72, 2, newIntervalStruct(2)) - - list.InsertInterval(72, 136, 3, newIntervalStruct(3)) - - list.InsertInterval(64, 68, 4, newIntervalStruct(4)) - - for p := list.Front(); p != nil; p = p.Next { - fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) - } - assert.Equal(t, 4, list.Len()) -} diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index ff1465c23..6abb37f99 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -4,19 +4,18 @@ import ( "bytes" "context" "fmt" - "io" - "os" - "github.com/syndtr/goleveldb/leveldb" leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "io" + "os" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -45,7 +44,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *LevelDBStore) initialize(dir string) (err error) { - glog.V(0).Infof("filer store dir: %s", dir) + glog.Infof("filer store dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -87,7 +86,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -97,7 +96,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) return nil } @@ -127,7 +126,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) return entry, nil } @@ -206,7 +205,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go index c961b5e91..f686cbf21 100644 --- a/weed/filer/leveldb/leveldb_store_kv.go +++ b/weed/filer/leveldb/leveldb_store_kv.go @@ -3,7 +3,7 @@ package leveldb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/syndtr/goleveldb/leveldb" ) @@ -12,7 +12,7 @@ func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) err = store.db.Put(key, value, nil) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -27,7 +27,7 @@ func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, } if err != nil { - return nil, fmt.Errorf("kv get: %w", err) + return nil, fmt.Errorf("kv get: %v", err) } return @@ -38,7 +38,7 @@ func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) err = store.db.Delete(key, nil) if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go index 5676b1617..4cd8b88e8 100644 --- a/weed/filer/leveldb/leveldb_store_test.go +++ b/weed/filer/leveldb/leveldb_store_test.go @@ -3,17 +3,16 @@ package leveldb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" "os" "testing" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDBStore{} store.initialize(dir) @@ -32,7 +31,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false, testFiler.MaxFilenameLength); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -66,7 +65,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDBStore{} store.initialize(dir) @@ -88,7 +87,7 @@ func TestEmptyRoot(t *testing.T) { } func BenchmarkInsertEntry(b *testing.B) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := b.TempDir() store := &LevelDBStore{} store.initialize(dir) diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index 1bd6fe597..d68493bd7 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -14,10 +14,10 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -25,9 +25,8 @@ func init() { } type LevelDB2Store struct { - dbs []*leveldb.DB - dbCount int - ReadOnly bool + dbs []*leveldb.DB + dbCount int } func (store *LevelDB2Store) GetName() string { @@ -50,7 +49,6 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB Filter: filter.NewBloomFilter(8), // false positive rate 0.02 - ReadOnly: store.ReadOnly, } for d := 0; d < dbCount; d++ { @@ -90,7 +88,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -100,7 +98,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) return nil } @@ -131,7 +129,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) return entry, nil } @@ -210,10 +208,10 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb2/leveldb2_store_kv.go b/weed/filer/leveldb2/leveldb2_store_kv.go index 3d65f98e8..b415d3c32 100644 --- a/weed/filer/leveldb2/leveldb2_store_kv.go +++ b/weed/filer/leveldb2/leveldb2_store_kv.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/syndtr/goleveldb/leveldb" ) diff --git a/weed/filer/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go index 34dc762da..1f8e33116 100644 --- a/weed/filer/leveldb2/leveldb2_store_test.go +++ b/weed/filer/leveldb2/leveldb2_store_test.go @@ -2,15 +2,14 @@ package leveldb import ( "context" - "github.com/seaweedfs/seaweedfs/weed/pb" "testing" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDB2Store{} store.initialize(dir, 2) @@ -29,7 +28,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false, testFiler.MaxFilenameLength); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -63,7 +62,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDB2Store{} store.initialize(dir, 2) diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go index eb8b4e578..d21515bd4 100644 --- a/weed/filer/leveldb3/leveldb3_store.go +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -16,10 +16,10 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -31,10 +31,9 @@ func init() { } type LevelDB3Store struct { - dir string - dbs map[string]*leveldb.DB - dbsLock sync.RWMutex - ReadOnly bool + dir string + dbs map[string]*leveldb.DB + dbsLock sync.RWMutex } func (store *LevelDB3Store) GetName() string { @@ -70,14 +69,12 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB Filter: bloom, - ReadOnly: store.ReadOnly, } if name != DEFAULT { opts = &opt.Options{ BlockCacheCapacity: 16 * 1024 * 1024, // default value is 8MiB WriteBuffer: 8 * 1024 * 1024, // default value is 4MiB Filter: bloom, - ReadOnly: store.ReadOnly, } } @@ -124,31 +121,23 @@ func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bo } store.dbsLock.RUnlock() - - db, err := store.createDB(bucket) - - return db, bucket, shortPath, err -} - -func (store *LevelDB3Store) createDB(bucket string) (*leveldb.DB, error) { - + // upgrade to write lock store.dbsLock.Lock() defer store.dbsLock.Unlock() // double check after getting the write lock if db, found := store.dbs[bucket]; found { - return db, nil + return db, bucket, shortPath, nil } // create db db, err := store.loadDB(bucket) if err != nil { - return nil, err + return nil, bucket, shortPath, err } - store.dbs[bucket] = db - return db, nil + return db, bucket, shortPath, nil } func (store *LevelDB3Store) closeDB(bucket string) { @@ -188,7 +177,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { value = weed_util.MaybeGzipData(value) } @@ -198,7 +187,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) return nil } @@ -235,7 +224,7 @@ func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.Fu return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) return entry, nil } @@ -339,10 +328,10 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb3/leveldb3_store_bucket.go b/weed/filer/leveldb3/leveldb3_store_bucket.go deleted file mode 100644 index 8cd3732ae..000000000 --- a/weed/filer/leveldb3/leveldb3_store_bucket.go +++ /dev/null @@ -1,23 +0,0 @@ -package leveldb - -import ( - "github.com/seaweedfs/seaweedfs/weed/filer" - "os" -) - -var _ filer.BucketAware = (*LevelDB3Store)(nil) - -func (store *LevelDB3Store) OnBucketCreation(bucket string) { - store.createDB(bucket) -} - -func (store *LevelDB3Store) OnBucketDeletion(bucket string) { - store.closeDB(bucket) - if bucket != "" { // just to make sure - os.RemoveAll(store.dir + "/" + bucket) - } -} - -func (store *LevelDB3Store) CanDropWholeBucket() bool { - return true -} diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go index 984a25b51..18d782b80 100644 --- a/weed/filer/leveldb3/leveldb3_store_kv.go +++ b/weed/filer/leveldb3/leveldb3_store_kv.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/syndtr/goleveldb/leveldb" ) @@ -13,7 +13,7 @@ func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) err = store.dbs[DEFAULT].Put(key, value, nil) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -28,7 +28,7 @@ func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte } if err != nil { - return nil, fmt.Errorf("kv get: %w", err) + return nil, fmt.Errorf("kv get: %v", err) } return @@ -39,7 +39,7 @@ func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error err = store.dbs[DEFAULT].Delete(key, nil) if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/leveldb3/leveldb3_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go index 6056e0955..823c3a1bf 100644 --- a/weed/filer/leveldb3/leveldb3_store_test.go +++ b/weed/filer/leveldb3/leveldb3_store_test.go @@ -2,15 +2,14 @@ package leveldb import ( "context" - "github.com/seaweedfs/seaweedfs/weed/pb" "testing" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDB3Store{} store.initialize(dir) @@ -29,7 +28,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false, testFiler.MaxFilenameLength); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -63,7 +62,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) + testFiler := filer.NewFiler(nil, nil, "", "", "", "", "", nil) dir := t.TempDir() store := &LevelDB3Store{} store.initialize(dir) diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 1ea334224..c672ce342 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -3,36 +3,33 @@ package filer import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/cluster" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" "io" - "strings" "sync" - "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - + "github.com/golang/protobuf/proto" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" ) type MetaAggregator struct { - filer *Filer - self pb.ServerAddress - isLeader bool - grpcDialOption grpc.DialOption - MetaLogBuffer *log_buffer.LogBuffer - peerChans map[pb.ServerAddress]chan struct{} - peerChansLock sync.Mutex + filer *Filer + self pb.ServerAddress + isLeader bool + grpcDialOption grpc.DialOption + MetaLogBuffer *log_buffer.LogBuffer + peerStatues map[pb.ServerAddress]int + peerStatuesLock sync.Mutex // notifying clients - ListenersLock sync.Mutex - ListenersCond *sync.Cond - ListenersWaits int64 // Atomic counter + ListenersLock sync.Mutex + ListenersCond *sync.Cond } // MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk. @@ -42,58 +39,67 @@ func NewMetaAggregator(filer *Filer, self pb.ServerAddress, grpcDialOption grpc. filer: filer, self: self, grpcDialOption: grpcDialOption, - peerChans: make(map[pb.ServerAddress]chan struct{}), + peerStatues: make(map[pb.ServerAddress]int), } t.ListenersCond = sync.NewCond(&t.ListenersLock) - t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, nil, func() { - if atomic.LoadInt64(&t.ListenersWaits) > 0 { - t.ListenersCond.Broadcast() - } + t.MetaLogBuffer = log_buffer.NewLogBuffer("aggr", LogFlushInterval, nil, func() { + t.ListenersCond.Broadcast() }) return t } func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { - ma.peerChansLock.Lock() - defer ma.peerChansLock.Unlock() + if update.NodeType != cluster.FilerType { + return + } address := pb.ServerAddress(update.Address) if update.IsAdd { - // cancel previous subscription if any - if prevChan, found := ma.peerChans[address]; found { - close(prevChan) + // every filer should subscribe to a new filer + if ma.setActive(address, true) { + go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom) } - stopChan := make(chan struct{}) - ma.peerChans[address] = stopChan - go ma.loopSubscribeToOneFiler(ma.filer, ma.self, address, startFrom, stopChan) } else { - if prevChan, found := ma.peerChans[address]; found { - close(prevChan) - delete(ma.peerChans, address) - } + ma.setActive(address, false) } } -func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) { +func (ma *MetaAggregator) setActive(address pb.ServerAddress, isActive bool) (notDuplicated bool) { + ma.peerStatuesLock.Lock() + defer ma.peerStatuesLock.Unlock() + if isActive { + if _, found := ma.peerStatues[address]; found { + ma.peerStatues[address] += 1 + } else { + ma.peerStatues[address] = 1 + notDuplicated = true + } + } else { + if _, found := ma.peerStatues[address]; found { + delete(ma.peerStatues, address) + } + } + return +} +func (ma *MetaAggregator) isActive(address pb.ServerAddress) (isActive bool) { + ma.peerStatuesLock.Lock() + defer ma.peerStatuesLock.Unlock() + var count int + count, isActive = ma.peerStatues[address] + return count > 0 && isActive +} + +func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time) { lastTsNs := startFrom.UnixNano() for { glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs) - - // check stopChan to see if we should stop - select { - case <-stopChan: - glog.V(0).Infof("stop subscribing peer %s meta change", peer) + if !ma.isActive(peer) { + glog.V(0).Infof("stop subscribing remote %s meta change", peer) return - default: } - if err != nil { - errLvl := glog.Level(0) - if strings.Contains(err.Error(), "duplicated local subscription detected") { - errLvl = glog.Level(4) - } - glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) + glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) } if lastTsNs < nextLastTsNs { lastTsNs = nextLastTsNs @@ -172,28 +178,25 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, } dir := event.Directory // println("received meta change", dir, "size", len(data)) - ma.MetaLogBuffer.AddDataToBuffer([]byte(dir), data, event.TsNs) + ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs) if maybeReplicateMetadataChange != nil { maybeReplicateMetadataChange(event) } return nil } - glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) - err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFileId) + err = pb.WithFilerClient(true, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - atomic.AddInt32(&ma.filer.UniqueFilerEpoch, 1) stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ - ClientName: "filer:" + string(self), - PathPrefix: "/", - SinceNs: lastTsNs, - ClientId: ma.filer.UniqueFilerId, - ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch), + ClientName: "filer:" + string(self), + PathPrefix: "/", + SinceNs: lastTsNs, + ClientId: int32(ma.filer.UniqueFileId), }) if err != nil { - glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err) - return fmt.Errorf("subscribe: %w", err) + return fmt.Errorf("subscribe: %v", err) } for { @@ -202,13 +205,11 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } if listenErr != nil { - glog.V(0).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr) return listenErr } if err := processEventFn(resp); err != nil { - glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err) - return fmt.Errorf("process %v: %w", resp, err) + return fmt.Errorf("process %v: %v", resp, err) } f.onMetadataChangeEvent(resp) @@ -219,7 +220,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, } func (ma *MetaAggregator) readFilerStoreSignature(peer pb.ServerAddress) (sig int32, err error) { - err = pb.WithFilerClient(false, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithFilerClient(false, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return err @@ -234,15 +235,10 @@ const ( MetaOffsetPrefix = "Meta" ) -func GetPeerMetaOffsetKey(peerSignature int32) []byte { - key := []byte(MetaOffsetPrefix + "xxxx") - util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) - return key -} - func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignature int32) (lastTsNs int64, err error) { - key := GetPeerMetaOffsetKey(peerSignature) + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) value, err := f.Store.KvGet(context.Background(), key) @@ -259,7 +255,8 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSignature int32, lastTsNs int64) (err error) { - key := GetPeerMetaOffsetKey(peerSignature) + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) value := make([]byte, 8) util.Uint64toBytes(value, uint64(lastTsNs)) diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go index f6b009e92..feb76278b 100644 --- a/weed/filer/meta_replay.go +++ b/weed/filer/meta_replay.go @@ -2,11 +2,10 @@ package filer import ( "context" - "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) error { @@ -36,39 +35,3 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err return nil } - -// ParallelProcessDirectoryStructure processes each entry in parallel, and also ensure parent directories are processed first. -// This also assumes the parent directories are in the entryChan already. -func ParallelProcessDirectoryStructure(entryChan chan *Entry, concurrency int, eachEntryFn func(entry *Entry) error) (firstErr error) { - - executors := util.NewLimitedConcurrentExecutor(concurrency) - - var wg sync.WaitGroup - for entry := range entryChan { - wg.Add(1) - if entry.IsDirectory() { - func() { - defer wg.Done() - if err := eachEntryFn(entry); err != nil { - if firstErr == nil { - firstErr = err - } - } - }() - } else { - executors.Execute(func() { - defer wg.Done() - if err := eachEntryFn(entry); err != nil { - if firstErr == nil { - firstErr = err - } - } - }) - } - if firstErr != nil { - break - } - } - wg.Wait() - return -} diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go index 21463dc32..83686bfe7 100644 --- a/weed/filer/mongodb/mongodb_store.go +++ b/weed/filer/mongodb/mongodb_store.go @@ -2,21 +2,16 @@ package mongodb import ( "context" - "crypto/tls" - "crypto/x509" "fmt" - "os" - "regexp" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" + "time" ) func init() { @@ -43,44 +38,17 @@ func (store *MongodbStore) Initialize(configuration util.Configuration, prefix s store.database = configuration.GetString(prefix + "database") store.collectionName = "filemeta" poolSize := configuration.GetInt(prefix + "option_pool_size") - uri := configuration.GetString(prefix + "uri") - ssl := configuration.GetBool(prefix + "ssl") - sslCAFile := configuration.GetString(prefix + "ssl_ca_file") - sslCertFile := configuration.GetString(prefix + "ssl_cert_file") - sslKeyFile := configuration.GetString(prefix + "ssl_key_file") - username := configuration.GetString(prefix + "username") - password := configuration.GetString(prefix + "password") - insecure_skip_verify := configuration.GetBool(prefix + "insecure_skip_verify") - - return store.connection(uri, uint64(poolSize), ssl, sslCAFile, sslCertFile, sslKeyFile, username, password, insecure_skip_verify) + return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize)) } -func (store *MongodbStore) connection(uri string, poolSize uint64, ssl bool, sslCAFile, sslCertFile, sslKeyFile string, username, password string, insecure bool) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - +func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) opts := options.Client().ApplyURI(uri) if poolSize > 0 { opts.SetMaxPoolSize(poolSize) } - if ssl { - tlsConfig, err := configureTLS(sslCAFile, sslCertFile, sslKeyFile, insecure) - if err != nil { - return err - } - opts.SetTLSConfig(tlsConfig) - } - - if username != "" && password != "" { - creds := options.Credential{ - Username: username, - Password: password, - } - opts.SetAuth(creds) - } - client, err := mongo.Connect(ctx, opts) if err != nil { return err @@ -88,36 +56,10 @@ func (store *MongodbStore) connection(uri string, poolSize uint64, ssl bool, ssl c := client.Database(store.database).Collection(store.collectionName) err = store.indexUnique(c) - store.connect = client return err } -func configureTLS(caFile, certFile, keyFile string, insecure bool) (*tls.Config, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, fmt.Errorf("could not load client key pair: %s", err) - } - - caCert, err := os.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate: %s", err) - } - - caCertPool := x509.NewCertPool() - if !caCertPool.AppendCertsFromPEM(caCert) { - return nil, fmt.Errorf("failed to append CA certificate") - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - InsecureSkipVerify: insecure, - } - - return tlsConfig, nil -} - func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error { _, err := c.Indexes().CreateOne(context.Background(), index, opts) return err @@ -130,7 +72,7 @@ func (store *MongodbStore) indexUnique(c *mongo.Collection) error { *unique = true index := mongo.IndexModel{ - Keys: bson.D{{Key: "directory", Value: int32(1)}, {Key: "name", Value: int32(1)}}, + Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}}, Options: &options.IndexOptions{ Unique: unique, }, @@ -152,35 +94,28 @@ func (store *MongodbStore) RollbackTransaction(ctx context.Context) error { } func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + return store.UpdateEntry(ctx, entry) + } func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + dir, name := entry.FullPath.DirAndName() - - // Validate directory and name to prevent potential injection - // Note: BSON library already provides type safety, but we validate for defense in depth - if strings.ContainsAny(dir, "\x00") || strings.ContainsAny(name, "\x00") { - return fmt.Errorf("invalid path contains null bytes: %s", entry.FullPath) - } - meta, err := entry.EncodeAttributesAndChunks() if err != nil { return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } c := store.connect.Database(store.database).Collection(store.collectionName) opts := options.Update().SetUpsert(true) - // Use BSON builders for type-safe query construction (prevents injection) - // lgtm[go/sql-injection] - // Safe: Using BSON type-safe builders (bson.D) + validated inputs (null byte check above) - filter := bson.D{{Key: "directory", Value: dir}, {Key: "name", Value: name}} - update := bson.D{{Key: "$set", Value: bson.D{{Key: "meta", Value: meta}}}} + filter := bson.D{{"directory", dir}, {"name", name}} + update := bson.D{{"$set", bson.D{{"meta", meta}}}} _, err = c.UpdateOne(ctx, filter, update, opts) @@ -192,23 +127,14 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) } func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + dir, name := fullpath.DirAndName() - - // Validate directory and name to prevent potential injection - // Note: BSON library already provides type safety, but we validate for defense in depth - if strings.ContainsAny(dir, "\x00") || strings.ContainsAny(name, "\x00") { - return nil, fmt.Errorf("invalid path contains null bytes: %s", fullpath) - } - var data Model - // Use BSON builders for type-safe query construction (prevents injection) - // lgtm[go/sql-injection] - // Safe: Using BSON type-safe builders (bson.M) + validated inputs (null byte check above) var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err) + glog.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } @@ -229,15 +155,9 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath } func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + dir, name := fullpath.DirAndName() - // Validate directory and name to prevent potential injection - if strings.ContainsAny(dir, "\x00") || strings.ContainsAny(name, "\x00") { - return fmt.Errorf("invalid path contains null bytes: %s", fullpath) - } - - // lgtm[go/sql-injection] - // Safe: Using BSON type-safe builders (bson.M) + validated inputs (null byte check above) where := bson.M{"directory": dir, "name": name} _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where) if err != nil { @@ -248,13 +168,7 @@ func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPa } func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { - // Validate path to prevent potential injection - if strings.ContainsAny(string(fullpath), "\x00") { - return fmt.Errorf("invalid path contains null bytes: %s", fullpath) - } - // lgtm[go/sql-injection] - // Safe: Using BSON type-safe builders (bson.M) + validated inputs (null byte check above) where := bson.M{"directory": fullpath} _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where) if err != nil { @@ -265,36 +179,17 @@ func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath ut } func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - // Validate inputs to prevent potential injection - if strings.ContainsAny(string(dirPath), "\x00") || strings.ContainsAny(startFileName, "\x00") || strings.ContainsAny(prefix, "\x00") { - return "", fmt.Errorf("invalid path contains null bytes") - } + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} - // lgtm[go/sql-injection] - // Safe: Using BSON type-safe builders (bson.M) + validated inputs (null byte check above) - // Safe: regex uses regexp.QuoteMeta to escape special characters - where := bson.M{ - "directory": string(dirPath), - } +func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - nameQuery := bson.M{} - - if len(prefix) > 0 { - nameQuery["$regex"] = "^" + regexp.QuoteMeta(prefix) - } - - if len(startFileName) > 0 { - if includeStartFile { - nameQuery["$gte"] = startFileName - } else { - nameQuery["$gt"] = startFileName + var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}} + if includeStartFile { + where["name"] = bson.M{ + "$gte": startFileName, } } - - if len(nameQuery) > 0 { - where["name"] = nameQuery - } - optLimit := int64(limit) opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}} cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts) @@ -315,7 +210,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir lastFileName = data.Name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } @@ -326,18 +221,13 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if err := cur.Close(ctx); err != nil { - glog.V(0).InfofCtx(ctx, "list iterator close: %v", err) + glog.V(0).Infof("list iterator close: %v", err) } return lastFileName, err } -func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) -} - func (store *MongodbStore) Shutdown() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) store.connect.Disconnect(ctx) } diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go index 13d2dd08c..59b8f1d93 100644 --- a/weed/filer/mongodb/mongodb_store_kv.go +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -3,9 +3,8 @@ package mongodb import ( "context" "fmt" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -24,7 +23,7 @@ func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) _, err = c.UpdateOne(ctx, filter, update, opts) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -38,7 +37,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.ErrorfCtx(ctx, "kv get: %v", err) + glog.Errorf("kv get: %v", err) return nil, filer.ErrKvNotFound } @@ -56,7 +55,7 @@ func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) where := bson.M{"directory": dir, "name": name} _, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go index a2e07002b..93d3e3f9e 100644 --- a/weed/filer/mysql/mysql_sql_gen.go +++ b/weed/filer/mysql/mysql_sql_gen.go @@ -3,8 +3,8 @@ package mysql import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" _ "github.com/go-sql-driver/mysql" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" ) type SqlGenMysql struct { @@ -21,32 +21,32 @@ func (gen *SqlGenMysql) GetSqlInsert(tableName string) string { if gen.UpsertQueryTemplate != "" { return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) } else { - return fmt.Sprintf("INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES(?,?,?,?)", tableName) + return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName) } } func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { - return fmt.Sprintf("UPDATE `%s` SET `meta` = ? WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName) + return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName) } func (gen *SqlGenMysql) GetSqlFind(tableName string) string { - return fmt.Sprintf("SELECT `meta` FROM `%s` WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName) + return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) } func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `name` = ? AND `directory` = ?", tableName) + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) } func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE `dirhash` = ? AND `directory` = ?", tableName) + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName) } func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { - return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` > ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName) + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) } func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { - return fmt.Sprintf("SELECT `name`, `meta` FROM `%s` WHERE `dirhash` = ? AND `name` >= ? AND `directory` = ? AND `name` LIKE ? ORDER BY `name` ASC LIMIT ?", tableName) + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) } func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go index 9dbe09069..fbaa4d5f9 100644 --- a/weed/filer/mysql/mysql_store.go +++ b/weed/filer/mysql/mysql_store.go @@ -1,25 +1,19 @@ package mysql import ( - "crypto/tls" - "crypto/x509" "database/sql" "fmt" - "github.com/go-sql-driver/mysql" - "os" - "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" _ "github.com/go-sql-driver/mysql" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/util" ) const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin" - CONNECTION_TLS_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin&tls=mysql-tls" + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" ) func init() { @@ -36,7 +30,6 @@ func (store *MysqlStore) GetName() string { func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString(prefix+"dsn"), configuration.GetString(prefix+"upsertQuery"), configuration.GetBool(prefix+"enableUpsert"), configuration.GetString(prefix+"username"), @@ -48,15 +41,11 @@ func (store *MysqlStore) Initialize(configuration util.Configuration, prefix str configuration.GetInt(prefix+"connection_max_open"), configuration.GetInt(prefix+"connection_max_lifetime_seconds"), configuration.GetBool(prefix+"interpolateParams"), - configuration.GetBool(prefix+"enable_tls"), - configuration.GetString(prefix+"ca_crt"), - configuration.GetString(prefix+"client_crt"), - configuration.GetString(prefix+"client_key"), ) } -func (store *MysqlStore) initialize(dsn string, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, - maxLifetimeSeconds int, interpolateParams bool, enableTls bool, caCrtDir string, clientCrtDir string, clientKeyDir string) (err error) { +func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, + maxLifetimeSeconds int, interpolateParams bool) (err error) { store.SupportBucketTable = false if !enableUpsert { @@ -64,57 +53,23 @@ func (store *MysqlStore) initialize(dsn string, upsertQuery string, enableUpsert } store.SqlGenerator = &SqlGenMysql{ CreateTableSqlTemplate: "", - DropTableSqlTemplate: "DROP TABLE `%s`", + DropTableSqlTemplate: "drop table `%s`", UpsertQueryTemplate: upsertQuery, } - if enableTls { - rootCertPool := x509.NewCertPool() - pem, err := os.ReadFile(caCrtDir) - if err != nil { - return err - } - if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { - return fmt.Errorf("failed to append root certificate") - } - - clientCert := make([]tls.Certificate, 0) - if cert, err := tls.LoadX509KeyPair(clientCrtDir, clientKeyDir); err == nil { - clientCert = append(clientCert, cert) - } - - tlsConfig := &tls.Config{ - RootCAs: rootCertPool, - Certificates: clientCert, - MinVersion: tls.VersionTLS12, - } - err = mysql.RegisterTLSConfig("mysql-tls", tlsConfig) - if err != nil { - return err - } - } - - if dsn == "" { - pattern := CONNECTION_URL_PATTERN - if enableTls { - pattern = CONNECTION_TLS_URL_PATTERN - } - dsn = fmt.Sprintf(pattern, user, password, hostname, port, database) - if interpolateParams { - dsn += "&interpolateParams=true" - } - } - cfg, err := mysql.ParseDSN(dsn) - if err != nil { - return fmt.Errorf("can not parse DSN error:%w", err) + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "", hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + adaptedSqlUrl += "&interpolateParams=true" } var dbErr error - store.DB, dbErr = sql.Open("mysql", dsn) + store.DB, dbErr = sql.Open("mysql", sqlUrl) if dbErr != nil { store.DB.Close() store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", strings.ReplaceAll(dsn, cfg.Passwd, ""), err) + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) } store.DB.SetMaxIdleConns(maxIdle) @@ -122,7 +77,7 @@ func (store *MysqlStore) initialize(dsn string, upsertQuery string, enableUpsert store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", strings.ReplaceAll(dsn, cfg.Passwd, ""), err) + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) } return nil diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go index 2bce3c063..e50480150 100644 --- a/weed/filer/mysql2/mysql2_store.go +++ b/weed/filer/mysql2/mysql2_store.go @@ -7,19 +7,17 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/mysql" + "github.com/chrislusf/seaweedfs/weed/util" _ "github.com/go-sql-driver/mysql" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/filer/mysql" - "github.com/seaweedfs/seaweedfs/weed/util" ) const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?collation=utf8mb4_bin" + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" ) -var _ filer.BucketAware = (*MysqlStore2)(nil) - func init() { filer.Stores = append(filer.Stores, &MysqlStore2{}) } @@ -58,7 +56,7 @@ func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpse } store.SqlGenerator = &mysql.SqlGenMysql{ CreateTableSqlTemplate: createTable, - DropTableSqlTemplate: "DROP TABLE `%s`", + DropTableSqlTemplate: "drop table `%s`", UpsertQueryTemplate: upsertQuery, } @@ -82,7 +80,7 @@ func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpse store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", adaptedSqlUrl, err) + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) } if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil && !strings.Contains(err.Error(), "table already exist") { diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go index 8832e1a45..6cee3d2da 100644 --- a/weed/filer/postgres/postgres_sql_gen.go +++ b/weed/filer/postgres/postgres_sql_gen.go @@ -3,8 +3,8 @@ package postgres import ( "fmt" - _ "github.com/jackc/pgx/v5/stdlib" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + _ "github.com/lib/pq" ) type SqlGenPostgres struct { diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go index 1c60575ee..a1e16a92a 100644 --- a/weed/filer/postgres/postgres_store.go +++ b/weed/filer/postgres/postgres_store.go @@ -1,22 +1,18 @@ -// Package postgres provides PostgreSQL filer store implementation -// Migrated from github.com/lib/pq to github.com/jackc/pgx for: -// - Active development and support -// - Better performance and PostgreSQL-specific features -// - Improved error handling (no more panics) -// - Built-in logging capabilities -// - Superior SSL certificate support package postgres import ( "database/sql" "fmt" - "strconv" "time" - _ "github.com/jackc/pgx/v5/stdlib" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" +) + +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" ) func init() { @@ -42,18 +38,13 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix configuration.GetString(prefix+"database"), configuration.GetString(prefix+"schema"), configuration.GetString(prefix+"sslmode"), - configuration.GetString(prefix+"sslcert"), - configuration.GetString(prefix+"sslkey"), - configuration.GetString(prefix+"sslrootcert"), - configuration.GetString(prefix+"sslcrl"), - configuration.GetBool(prefix+"pgbouncer_compatible"), configuration.GetInt(prefix+"connection_max_idle"), configuration.GetInt(prefix+"connection_max_open"), configuration.GetInt(prefix+"connection_max_lifetime_seconds"), ) } -func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode, sslcert, sslkey, sslrootcert, sslcrl string, pgbouncerCompatible bool, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { +func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { store.SupportBucketTable = false if !enableUpsert { @@ -65,38 +56,7 @@ func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, us UpsertQueryTemplate: upsertQuery, } - // pgx-optimized connection string with better timeouts and connection handling - sqlUrl := "connect_timeout=30" - - // PgBouncer compatibility: add prefer_simple_protocol=true when needed - // This avoids prepared statement issues with PgBouncer's transaction pooling mode - if pgbouncerCompatible { - sqlUrl += " prefer_simple_protocol=true" - } - - if hostname != "" { - sqlUrl += " host=" + hostname - } - if port != 0 { - sqlUrl += " port=" + strconv.Itoa(port) - } - - // SSL configuration - pgx provides better SSL support than lib/pq - if sslmode != "" { - sqlUrl += " sslmode=" + sslmode - } - if sslcert != "" { - sqlUrl += " sslcert=" + sslcert - } - if sslkey != "" { - sqlUrl += " sslkey=" + sslkey - } - if sslrootcert != "" { - sqlUrl += " sslrootcert=" + sslrootcert - } - if sslcrl != "" { - sqlUrl += " sslcrl=" + sslcrl - } + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) if user != "" { sqlUrl += " user=" + user } @@ -109,18 +69,16 @@ func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, us sqlUrl += " dbname=" + database adaptedSqlUrl += " dbname=" + database } - if schema != "" && !pgbouncerCompatible { + if schema != "" { sqlUrl += " search_path=" + schema adaptedSqlUrl += " search_path=" + schema } var dbErr error - store.DB, dbErr = sql.Open("pgx", sqlUrl) + store.DB, dbErr = sql.Open("postgres", sqlUrl) if dbErr != nil { - if store.DB != nil { - store.DB.Close() - } + store.DB.Close() store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, dbErr) + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) } store.DB.SetMaxIdleConns(maxIdle) @@ -128,7 +86,7 @@ func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, us store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", adaptedSqlUrl, err) + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) } return nil diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go index 23d811816..0f573d8d0 100644 --- a/weed/filer/postgres2/postgres2_store.go +++ b/weed/filer/postgres2/postgres2_store.go @@ -1,27 +1,21 @@ -// Package postgres2 provides PostgreSQL filer store implementation with bucket support -// Migrated from github.com/lib/pq to github.com/jackc/pgx for: -// - Active development and support -// - Better performance and PostgreSQL-specific features -// - Improved error handling (no more panics) -// - Built-in logging capabilities -// - Superior SSL certificate support package postgres2 import ( "context" "database/sql" "fmt" - "strconv" "time" - _ "github.com/jackc/pgx/v5/stdlib" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/filer/postgres" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/postgres" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" ) -var _ filer.BucketAware = (*PostgresStore2)(nil) +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" +) func init() { filer.Stores = append(filer.Stores, &PostgresStore2{}) @@ -47,18 +41,13 @@ func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix configuration.GetString(prefix+"database"), configuration.GetString(prefix+"schema"), configuration.GetString(prefix+"sslmode"), - configuration.GetString(prefix+"sslcert"), - configuration.GetString(prefix+"sslkey"), - configuration.GetString(prefix+"sslrootcert"), - configuration.GetString(prefix+"sslcrl"), - configuration.GetBool(prefix+"pgbouncer_compatible"), configuration.GetInt(prefix+"connection_max_idle"), configuration.GetInt(prefix+"connection_max_open"), configuration.GetInt(prefix+"connection_max_lifetime_seconds"), ) } -func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode, sslcert, sslkey, sslrootcert, sslcrl string, pgbouncerCompatible bool, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { +func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { store.SupportBucketTable = true if !enableUpsert { @@ -70,38 +59,7 @@ func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableU UpsertQueryTemplate: upsertQuery, } - // pgx-optimized connection string with better timeouts and connection handling - sqlUrl := "connect_timeout=30" - - // PgBouncer compatibility: add prefer_simple_protocol=true when needed - // This avoids prepared statement issues with PgBouncer's transaction pooling mode - if pgbouncerCompatible { - sqlUrl += " prefer_simple_protocol=true" - } - - if hostname != "" { - sqlUrl += " host=" + hostname - } - if port != 0 { - sqlUrl += " port=" + strconv.Itoa(port) - } - - // SSL configuration - pgx provides better SSL support than lib/pq - if sslmode != "" { - sqlUrl += " sslmode=" + sslmode - } - if sslcert != "" { - sqlUrl += " sslcert=" + sslcert - } - if sslkey != "" { - sqlUrl += " sslkey=" + sslkey - } - if sslrootcert != "" { - sqlUrl += " sslrootcert=" + sslrootcert - } - if sslcrl != "" { - sqlUrl += " sslcrl=" + sslcrl - } + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) if user != "" { sqlUrl += " user=" + user } @@ -114,18 +72,16 @@ func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableU sqlUrl += " dbname=" + database adaptedSqlUrl += " dbname=" + database } - if schema != "" && !pgbouncerCompatible { + if schema != "" { sqlUrl += " search_path=" + schema adaptedSqlUrl += " search_path=" + schema } var dbErr error - store.DB, dbErr = sql.Open("pgx", sqlUrl) + store.DB, dbErr = sql.Open("postgres", sqlUrl) if dbErr != nil { - if store.DB != nil { - store.DB.Close() - } + store.DB.Close() store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, dbErr) + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) } store.DB.SetMaxIdleConns(maxIdle) @@ -133,7 +89,7 @@ func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableU store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", adaptedSqlUrl, err) + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) } if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { diff --git a/weed/filer/read_remote.go b/weed/filer/read_remote.go index 992d1e95a..6372dac72 100644 --- a/weed/filer/read_remote.go +++ b/weed/filer/read_remote.go @@ -2,13 +2,13 @@ package filer import ( "context" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (entry *Entry) IsInRemoteOnly() bool { - return len(entry.GetChunks()) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0 + return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0 } func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation { diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go index 66f4c0bf8..8d0e6567b 100644 --- a/weed/filer/read_write.go +++ b/weed/filer/read_write.go @@ -2,11 +2,10 @@ package filer import ( "bytes" - "context" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.SeaweedFilerClient, dir, name string, byteBuffer *bytes.Buffer) error { @@ -15,7 +14,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed Directory: dir, Name: name, } - respLookupEntry, err := filer_pb.LookupEntry(context.Background(), filerClient, request) + respLookupEntry, err := filer_pb.LookupEntry(filerClient, request) if err != nil { return err } @@ -24,7 +23,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed return err } - return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.GetChunks(), 0, int64(FileSize(respLookupEntry.Entry))) + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, int64(FileSize(respLookupEntry.Entry))) } @@ -33,7 +32,7 @@ func ReadInsideFiler(filerClient filer_pb.SeaweedFilerClient, dir, name string) Directory: dir, Name: name, } - respLookupEntry, err := filer_pb.LookupEntry(context.Background(), filerClient, request) + respLookupEntry, err := filer_pb.LookupEntry(filerClient, request) if err != nil { return } @@ -43,13 +42,13 @@ func ReadInsideFiler(filerClient filer_pb.SeaweedFilerClient, dir, name string) func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error { - resp, err := filer_pb.LookupEntry(context.Background(), client, &filer_pb.LookupDirectoryEntryRequest{ + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, Name: name, }) if err == filer_pb.ErrNotFound { - err = filer_pb.CreateEntry(context.Background(), client, &filer_pb.CreateEntryRequest{ + err = filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, @@ -69,7 +68,7 @@ func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, conte entry.Content = content entry.Attributes.Mtime = time.Now().Unix() entry.Attributes.FileSize = uint64(len(content)) - err = filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{ + err = filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ Directory: dir, Entry: entry, }) diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index 27d773f49..b938083d8 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -7,20 +7,21 @@ import ( "math/rand" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) type ChunkReadAt struct { masterClient *wdclient.MasterClient - chunkViews *IntervalList[*ChunkView] + chunkViews []*ChunkView + readerLock sync.Mutex fileSize int64 readerCache *ReaderCache readerPattern *ReaderPattern lastChunkFid string - ctx context.Context // Context used for cancellation during chunk read operations } var _ = io.ReaderAt(&ChunkReadAt{}) @@ -30,7 +31,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp vidCache := make(map[string]*filer_pb.Locations) var vicCacheLock sync.RWMutex - return func(ctx context.Context, fileId string) (targetUrls []string, err error) { + return func(fileId string) (targetUrls []string, err error) { vid := VolumeId(fileId) vicCacheLock.RLock() locations, found := vidCache[vid] @@ -39,7 +40,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp if !found { util.Retry("lookup volume "+vid, func() error { err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -48,7 +49,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp locations = resp.LocationsMap[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(0).InfofCtx(ctx, "failed to locate %s", fileId) + glog.V(0).Infof("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId) } vicCacheLock.Lock() @@ -65,44 +66,31 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp return nil, err } - fcDataCenter := filerClient.GetDataCenter() - var sameDcTargetUrls, otherTargetUrls []string for _, loc := range locations.Locations { volumeServerAddress := filerClient.AdjustedUrl(loc) targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) - if fcDataCenter == "" || fcDataCenter != loc.DataCenter { - otherTargetUrls = append(otherTargetUrls, targetUrl) - } else { - sameDcTargetUrls = append(sameDcTargetUrls, targetUrl) - } + targetUrls = append(targetUrls, targetUrl) } - rand.Shuffle(len(sameDcTargetUrls), func(i, j int) { - sameDcTargetUrls[i], sameDcTargetUrls[j] = sameDcTargetUrls[j], sameDcTargetUrls[i] - }) - rand.Shuffle(len(otherTargetUrls), func(i, j int) { - otherTargetUrls[i], otherTargetUrls[j] = otherTargetUrls[j], otherTargetUrls[i] - }) - // Prefer same data center - targetUrls = append(sameDcTargetUrls, otherTargetUrls...) + + for i := len(targetUrls) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + targetUrls[i], targetUrls[j] = targetUrls[j], targetUrls[i] + } + return } } -func NewChunkReaderAtFromClient(ctx context.Context, readerCache *ReaderCache, chunkViews *IntervalList[*ChunkView], fileSize int64) *ChunkReadAt { +func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { return &ChunkReadAt{ chunkViews: chunkViews, fileSize: fileSize, - readerCache: readerCache, + readerCache: newReaderCache(32, chunkCache, lookupFn), readerPattern: NewReaderPattern(), - ctx: ctx, } } -func (c *ChunkReadAt) Size() int64 { - return c.fileSize -} - func (c *ChunkReadAt) Close() error { c.readerCache.destroy() return nil @@ -112,58 +100,44 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.readerPattern.MonitorReadAt(offset, len(p)) - c.chunkViews.Lock.RLock() - defer c.chunkViews.Lock.RUnlock() + c.readerLock.Lock() + defer c.readerLock.Unlock() // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) - n, _, err = c.doReadAt(c.ctx, p, offset) - return + return c.doReadAt(p, offset) } -func (c *ChunkReadAt) ReadAtWithTime(ctx context.Context, p []byte, offset int64) (n int, ts int64, err error) { - - c.readerPattern.MonitorReadAt(offset, len(p)) - - c.chunkViews.Lock.RLock() - defer c.chunkViews.Lock.RUnlock() - - // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) - return c.doReadAt(ctx, p, offset) -} - -func (c *ChunkReadAt) doReadAt(ctx context.Context, p []byte, offset int64) (n int, ts int64, err error) { +func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { startOffset, remaining := offset, int64(len(p)) - var nextChunks *Interval[*ChunkView] - for x := c.chunkViews.Front(); x != nil; x = x.Next { - chunk := x.Value + var nextChunks []*ChunkView + for i, chunk := range c.chunkViews { if remaining <= 0 { break } - if x.Next != nil { - nextChunks = x.Next + if i+1 < len(c.chunkViews) { + nextChunks = c.chunkViews[i+1:] } - if startOffset < chunk.ViewOffset { - gap := chunk.ViewOffset - startOffset - glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) - n += zero(p, startOffset-offset, gap) - startOffset, remaining = chunk.ViewOffset, remaining-gap + if startOffset < chunk.LogicOffset { + gap := int(chunk.LogicOffset - startOffset) + glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.LogicOffset) + n += int(min(int64(gap), remaining)) + startOffset, remaining = chunk.LogicOffset, remaining-int64(gap) if remaining <= 0 { break } } - // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) - chunkStart, chunkStop := max(chunk.ViewOffset, startOffset), min(chunk.ViewOffset+int64(chunk.ViewSize), startOffset+remaining) + // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining) if chunkStart >= chunkStop { continue } - // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) - bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk - ts = chunk.ModifiedTsNs - copied, err := c.readChunkSliceAt(ctx, p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) + // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) + bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset + copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) if err != nil { glog.Errorf("fetching chunk %+v: %v\n", chunk, err) - return copied, ts, err + return copied, err } n += copied @@ -172,17 +146,10 @@ func (c *ChunkReadAt) doReadAt(ctx context.Context, p []byte, offset int64) (n i // glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) - // zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file) - if err == nil && remaining > 0 { - var delta int64 - if c.fileSize >= startOffset { - delta = min(remaining, c.fileSize-startOffset) - startOffset -= offset - } - if delta > 0 { - glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize) - n += zero(p, startOffset, delta) - } + if err == nil && remaining > 0 && c.fileSize > startOffset { + delta := int(min(remaining, c.fileSize-startOffset)) + glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize) + n += delta } if err == nil && offset+int64(len(p)) >= c.fileSize { @@ -194,42 +161,29 @@ func (c *ChunkReadAt) doReadAt(ctx context.Context, p []byte, offset int64) (n i } -func (c *ChunkReadAt) readChunkSliceAt(ctx context.Context, buffer []byte, chunkView *ChunkView, nextChunkViews *Interval[*ChunkView], offset uint64) (n int, err error) { +func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, nextChunkViews []*ChunkView, offset uint64) (n int, err error) { if c.readerPattern.IsRandomMode() { n, err := c.readerCache.chunkCache.ReadChunkAt(buffer, chunkView.FileId, offset) if n > 0 { return n, err } - return fetchChunkRange(ctx, buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset)) + return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset)) } - shouldCache := (uint64(chunkView.ViewOffset) + chunkView.ChunkSize) <= c.readerCache.chunkCache.GetMaxFilePartSizeInCache() - n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), shouldCache) + n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.LogicOffset == 0) if c.lastChunkFid != chunkView.FileId { - if chunkView.OffsetInChunk == 0 { // start of a new chunk + if chunkView.Offset == 0 { // start of a new chunk if c.lastChunkFid != "" { c.readerCache.UnCache(c.lastChunkFid) - } - if nextChunkViews != nil { - c.readerCache.MaybeCache(nextChunkViews) // just read the next chunk if at the very beginning + c.readerCache.MaybeCache(nextChunkViews) + } else { + if len(nextChunkViews) >= 1 { + c.readerCache.MaybeCache(nextChunkViews[:1]) // just read the next chunk if at the very beginning + } } } } c.lastChunkFid = chunkView.FileId return } - -func zero(buffer []byte, start, length int64) int { - if length <= 0 { - return 0 - } - end := min(start+length, int64(len(buffer))) - start = max(start, 0) - - // zero the bytes - for o := start; o < end; o++ { - buffer[o] = 0 - } - return int(end - start) -} diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go index 6c9041cd9..d9afb460c 100644 --- a/weed/filer/reader_at_test.go +++ b/weed/filer/reader_at_test.go @@ -1,11 +1,11 @@ package filer import ( - "bytes" - "context" + "fmt" "io" "math" "strconv" + "sync" "testing" ) @@ -32,173 +32,134 @@ func (m *mockChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64) func (m *mockChunkCache) SetChunk(fileId string, data []byte) { } -func (m *mockChunkCache) GetMaxFilePartSizeInCache() uint64 { - return 0 -} - -func (m *mockChunkCache) IsInCache(fileId string, lockNeeded bool) (answer bool) { - return false -} - func TestReaderAt(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 1, - stop: 2, - fileId: "1", - chunkSize: 9, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 3, - stop: 4, - fileId: "3", - chunkSize: 1, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 5, - stop: 6, - fileId: "5", - chunkSize: 2, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 7, - stop: 9, - fileId: "7", - chunkSize: 2, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 9, - stop: 10, - fileId: "9", - chunkSize: 2, - }) + visibles := []VisibleInterval{ + { + start: 1, + stop: 2, + fileId: "1", + chunkSize: 9, + }, + { + start: 3, + stop: 4, + fileId: "3", + chunkSize: 1, + }, + { + start: 5, + stop: 6, + fileId: "5", + chunkSize: 2, + }, + { + start: 7, + stop: 9, + fileId: "7", + chunkSize: 2, + }, + { + start: 9, + stop: 10, + fileId: "9", + chunkSize: 2, + }, + } readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + readerLock: sync.Mutex{}, fileSize: 10, - readerCache: NewReaderCache(3, &mockChunkCache{}, nil), + readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), } - testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil) - testReadAt(t, readerAt, 0, 12, 10, io.EOF, nil, nil) - testReadAt(t, readerAt, 2, 8, 8, io.EOF, nil, nil) - testReadAt(t, readerAt, 3, 6, 6, nil, nil, nil) + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 0, 12, 10, io.EOF) + testReadAt(t, readerAt, 2, 8, 8, io.EOF) + testReadAt(t, readerAt, 3, 6, 6, nil) } -func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expectedN int, expectedErr error, data, expectedData []byte) { - if data == nil { - data = make([]byte, size) - } - n, _, err := readerAt.doReadAt(context.Background(), data, offset) +func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) { + data := make([]byte, size) + n, err := readerAt.doReadAt(data, offset) - if expectedN != n { - t.Errorf("unexpected read size: %d, expect: %d", n, expectedN) + for _, d := range data { + fmt.Printf("%x", d) + } + fmt.Println() + + if expected != n { + t.Errorf("unexpected read size: %d, expect: %d", n, expected) } if err != expectedErr { t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr) } - if expectedData != nil && !bytes.Equal(data, expectedData) { - t.Errorf("unexpected read data: %v, expect: %v", data, expectedData) - } + } func TestReaderAt0(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 2, - stop: 5, - fileId: "1", - chunkSize: 9, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 7, - stop: 9, - fileId: "2", - chunkSize: 9, - }) + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + { + start: 7, + stop: 9, + fileId: "2", + chunkSize: 9, + }, + } readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + readerLock: sync.Mutex{}, fileSize: 10, - readerCache: NewReaderCache(3, &mockChunkCache{}, nil), + readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), } - testReadAt(t, readerAt, 0, 10, 10, io.EOF, nil, nil) - testReadAt(t, readerAt, 3, 16, 7, io.EOF, nil, nil) - testReadAt(t, readerAt, 3, 5, 5, nil, nil, nil) + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 3, 16, 7, io.EOF) + testReadAt(t, readerAt, 3, 5, 5, nil) - testReadAt(t, readerAt, 11, 5, 0, io.EOF, nil, nil) - testReadAt(t, readerAt, 10, 5, 0, io.EOF, nil, nil) + testReadAt(t, readerAt, 11, 5, 0, io.EOF) + testReadAt(t, readerAt, 10, 5, 0, io.EOF) } func TestReaderAt1(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 2, - stop: 5, - fileId: "1", - chunkSize: 9, - }) + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + } readerAt := &ChunkReadAt{ chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + readerLock: sync.Mutex{}, fileSize: 20, - readerCache: NewReaderCache(3, &mockChunkCache{}, nil), + readerCache: newReaderCache(3, &mockChunkCache{}, nil), readerPattern: NewReaderPattern(), } - testReadAt(t, readerAt, 0, 20, 20, io.EOF, nil, nil) - testReadAt(t, readerAt, 1, 7, 7, nil, nil, nil) - testReadAt(t, readerAt, 0, 1, 1, nil, nil, nil) - testReadAt(t, readerAt, 18, 4, 2, io.EOF, nil, nil) - testReadAt(t, readerAt, 12, 4, 4, nil, nil, nil) - testReadAt(t, readerAt, 4, 20, 16, io.EOF, nil, nil) - testReadAt(t, readerAt, 4, 10, 10, nil, nil, nil) - testReadAt(t, readerAt, 1, 10, 10, nil, nil, nil) + testReadAt(t, readerAt, 0, 20, 20, io.EOF) + testReadAt(t, readerAt, 1, 7, 7, nil) + testReadAt(t, readerAt, 0, 1, 1, nil) + testReadAt(t, readerAt, 18, 4, 2, io.EOF) + testReadAt(t, readerAt, 12, 4, 4, nil) + testReadAt(t, readerAt, 4, 20, 16, io.EOF) + testReadAt(t, readerAt, 4, 10, 10, nil) + testReadAt(t, readerAt, 1, 10, 10, nil) } - -func TestReaderAtGappedChunksDoNotLeak(t *testing.T) { - visibles := NewIntervalList[*VisibleInterval]() - addVisibleInterval(visibles, &VisibleInterval{ - start: 2, - stop: 3, - fileId: "1", - chunkSize: 5, - }) - addVisibleInterval(visibles, &VisibleInterval{ - start: 7, - stop: 9, - fileId: "1", - chunkSize: 4, - }) - - readerAt := &ChunkReadAt{ - chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), - fileSize: 9, - readerCache: NewReaderCache(3, &mockChunkCache{}, nil), - readerPattern: NewReaderPattern(), - } - - testReadAt(t, readerAt, 0, 9, 9, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 0, 1, 0, 0, 0, 0, 1, 1}) - testReadAt(t, readerAt, 1, 8, 8, io.EOF, []byte{2, 2, 2, 2, 2, 2, 2, 2}, []byte{0, 1, 0, 0, 0, 0, 1, 1}) -} - -func TestReaderAtSparseFileDoesNotLeak(t *testing.T) { - readerAt := &ChunkReadAt{ - chunkViews: ViewFromVisibleIntervals(NewIntervalList[*VisibleInterval](), 0, math.MaxInt64), - fileSize: 3, - readerCache: NewReaderCache(3, &mockChunkCache{}, nil), - readerPattern: NewReaderPattern(), - } - - testReadAt(t, readerAt, 0, 3, 3, io.EOF, []byte{2, 2, 2}, []byte{0, 0, 0}) - testReadAt(t, readerAt, 1, 2, 2, io.EOF, []byte{2, 2}, []byte{0, 0}) -} diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index 11382bed3..4f375e764 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -1,17 +1,12 @@ package filer import ( - "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/util/mem" + "github.com/chrislusf/seaweedfs/weed/wdclient" "sync" - "sync/atomic" "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/util/mem" - "github.com/seaweedfs/seaweedfs/weed/wdclient" ) type ReaderCache struct { @@ -23,21 +18,21 @@ type ReaderCache struct { } type SingleChunkCacher struct { - completedTimeNew int64 sync.Mutex - parent *ReaderCache - chunkFileId string - data []byte - err error - cipherKey []byte - isGzipped bool - chunkSize int - shouldCache bool - wg sync.WaitGroup - cacheStartedCh chan struct{} + cond *sync.Cond + parent *ReaderCache + chunkFileId string + data []byte + err error + cipherKey []byte + isGzipped bool + chunkSize int + shouldCache bool + wg sync.WaitGroup + completedTime time.Time } -func NewReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache { +func newReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache { return &ReaderCache{ limit: limit, chunkCache: chunkCache, @@ -46,7 +41,7 @@ func NewReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn } } -func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { +func (rc *ReaderCache) MaybeCache(chunkViews []*ChunkView) { if rc.lookupFileIdFn == nil { return } @@ -54,31 +49,22 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { rc.Lock() defer rc.Unlock() - if len(rc.downloaders) >= rc.limit { - return - } - - for x := chunkViews; x != nil; x = x.Next { - chunkView := x.Value + for _, chunkView := range chunkViews { if _, found := rc.downloaders[chunkView.FileId]; found { continue } - if rc.chunkCache.IsInCache(chunkView.FileId, true) { - glog.V(4).Infof("%s is in cache", chunkView.FileId) - continue - } if len(rc.downloaders) >= rc.limit { - // abort when slots are filled + // if still no slots, return return } - // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) + // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.LogicOffset) // cache this chunk if not yet - shouldCache := (uint64(chunkView.ViewOffset) + chunkView.ChunkSize) <= rc.chunkCache.GetMaxFilePartSizeInCache() - cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), shouldCache) + cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false) + cacher.wg.Add(1) go cacher.startCaching() - <-cacher.cacheStartedCh + cacher.wg.Wait() rc.downloaders[chunkView.FileId] = cacher } @@ -88,28 +74,26 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byte, isGzipped bool, offset int64, chunkSize int, shouldCache bool) (int, error) { rc.Lock() - + defer rc.Unlock() if cacher, found := rc.downloaders[fileId]; found { if n, err := cacher.readChunkAt(buffer, offset); n != 0 && err == nil { - rc.Unlock() return n, err } } if shouldCache || rc.lookupFileIdFn == nil { n, err := rc.chunkCache.ReadChunkAt(buffer, fileId, uint64(offset)) if n > 0 { - rc.Unlock() return n, err } } - // clean up old downloaders if len(rc.downloaders) >= rc.limit { - oldestFid, oldestTime := "", time.Now().UnixNano() + oldestFid, oldestTime := "", time.Now() for fid, downloader := range rc.downloaders { - completedTime := atomic.LoadInt64(&downloader.completedTimeNew) - if completedTime > 0 && completedTime < oldestTime { - oldestFid, oldestTime = fid, completedTime + if !downloader.completedTime.IsZero() { + if downloader.completedTime.Before(oldestTime) { + oldestFid, oldestTime = fid, downloader.completedTime + } } } if oldestFid != "" { @@ -122,10 +106,10 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt // glog.V(4).Infof("cache1 %s", fileId) cacher := newSingleChunkCacher(rc, fileId, cipherKey, isGzipped, chunkSize, shouldCache) + cacher.wg.Add(1) go cacher.startCaching() - <-cacher.cacheStartedCh + cacher.wg.Wait() rc.downloaders[fileId] = cacher - rc.Unlock() return cacher.readChunkAt(buffer, offset) } @@ -151,26 +135,25 @@ func (rc *ReaderCache) destroy() { } func newSingleChunkCacher(parent *ReaderCache, fileId string, cipherKey []byte, isGzipped bool, chunkSize int, shouldCache bool) *SingleChunkCacher { - return &SingleChunkCacher{ - parent: parent, - chunkFileId: fileId, - cipherKey: cipherKey, - isGzipped: isGzipped, - chunkSize: chunkSize, - shouldCache: shouldCache, - cacheStartedCh: make(chan struct{}), + t := &SingleChunkCacher{ + parent: parent, + chunkFileId: fileId, + cipherKey: cipherKey, + isGzipped: isGzipped, + chunkSize: chunkSize, + shouldCache: shouldCache, } + t.cond = sync.NewCond(t) + return t } func (s *SingleChunkCacher) startCaching() { - s.wg.Add(1) - defer s.wg.Done() s.Lock() defer s.Unlock() - s.cacheStartedCh <- struct{}{} // means this has been started + s.wg.Done() // means this has been started - urlStrings, err := s.parent.lookupFileIdFn(context.Background(), s.chunkFileId) + urlStrings, err := s.parent.lookupFileIdFn(s.chunkFileId) if err != nil { s.err = fmt.Errorf("operation LookupFileId %s failed, err: %v", s.chunkFileId, err) return @@ -178,45 +161,45 @@ func (s *SingleChunkCacher) startCaching() { s.data = mem.Allocate(s.chunkSize) - _, s.err = util_http.RetriedFetchChunkData(context.Background(), s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0, s.chunkFileId) + _, s.err = retriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0) if s.err != nil { mem.Free(s.data) s.data = nil return } + s.completedTime = time.Now() if s.shouldCache { s.parent.chunkCache.SetChunk(s.chunkFileId, s.data) } - atomic.StoreInt64(&s.completedTimeNew, time.Now().UnixNano()) + s.cond.Broadcast() return } func (s *SingleChunkCacher) destroy() { - // wait for all reads to finish before destroying the data - s.wg.Wait() s.Lock() defer s.Unlock() if s.data != nil { mem.Free(s.data) s.data = nil - close(s.cacheStartedCh) } } func (s *SingleChunkCacher) readChunkAt(buf []byte, offset int64) (int, error) { - s.wg.Add(1) - defer s.wg.Done() s.Lock() defer s.Unlock() + for s.completedTime.IsZero() { + s.cond.Wait() + } + if s.err != nil { return 0, s.err } - if len(s.data) <= int(offset) { + if len(s.data) == 0 { return 0, nil } diff --git a/weed/filer/reader_pattern.go b/weed/filer/reader_pattern.go index b0906e99f..ec73c59a2 100644 --- a/weed/filer/reader_pattern.go +++ b/weed/filer/reader_pattern.go @@ -1,16 +1,10 @@ package filer -import ( - "sync/atomic" -) - type ReaderPattern struct { isSequentialCounter int64 lastReadStopOffset int64 } -const ModeChangeLimit = 3 - // For streaming read: only cache the first chunk // For random read: only fetch the requested range, instead of the whole chunk @@ -22,20 +16,14 @@ func NewReaderPattern() *ReaderPattern { } func (rp *ReaderPattern) MonitorReadAt(offset int64, size int) { - lastOffset := atomic.SwapInt64(&rp.lastReadStopOffset, offset+int64(size)) - counter := atomic.LoadInt64(&rp.isSequentialCounter) - - if lastOffset == offset { - if counter < ModeChangeLimit { - atomic.AddInt64(&rp.isSequentialCounter, 1) - } + if rp.lastReadStopOffset == offset { + rp.isSequentialCounter++ } else { - if counter > -ModeChangeLimit { - atomic.AddInt64(&rp.isSequentialCounter, -1) - } + rp.isSequentialCounter-- } + rp.lastReadStopOffset = offset + int64(size) } func (rp *ReaderPattern) IsRandomMode() bool { - return atomic.LoadInt64(&rp.isSequentialCounter) < 0 + return rp.isSequentialCounter >= 0 } diff --git a/weed/filer/redis/README.md b/weed/filer/redis/README.md deleted file mode 100644 index 5fff2dbf7..000000000 --- a/weed/filer/redis/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Deprecated by redis2. - -This implementaiton uses unsorted set. For example, add a directory child via SAdd. - -Redis2 moves to sorted set. Adding a child uses ZAddNX. \ No newline at end of file diff --git a/weed/filer/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go index be2710948..9572058a8 100644 --- a/weed/filer/redis/redis_cluster_store.go +++ b/weed/filer/redis/redis_cluster_store.go @@ -1,9 +1,9 @@ package redis import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis/redis_store.go b/weed/filer/redis/redis_store.go index 823bbf610..665352a63 100644 --- a/weed/filer/redis/redis_store.go +++ b/weed/filer/redis/redis_store.go @@ -1,9 +1,9 @@ package redis import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 407491a04..89684647b 100644 --- a/weed/filer/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -3,16 +3,16 @@ package redis import ( "context" "fmt" - "slices" + "golang.org/x/exp/slices" "strings" "time" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -35,8 +35,19 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { - if err = store.doInsertEntry(ctx, entry); err != nil { - return err + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > filer.CountEntryChunksForGzip { + value = util.MaybeGzipData(value) + } + + _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } dir, name := entry.FullPath.DirAndName() @@ -50,27 +61,9 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer. return nil } -func (store *UniversalRedisStore) doInsertEntry(ctx context.Context, entry *filer.Entry) error { - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { - value = util.MaybeGzipData(value) - } - - _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() - - if err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - return nil -} - func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { - return store.doInsertEntry(ctx, entry) + return store.InsertEntry(ctx, entry) } func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { @@ -164,8 +157,8 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP } // sort - slices.SortFunc(members, func(a, b string) int { - return strings.Compare(a, b) + slices.SortFunc(members, func(a, b string) bool { + return strings.Compare(a, b) < 0 }) // limit @@ -179,7 +172,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).InfofCtx(ctx, "list %s : %v", path, err) + glog.V(0).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go index 1098b7482..ad6e389ed 100644 --- a/weed/filer/redis/universal_redis_store_kv.go +++ b/weed/filer/redis/universal_redis_store_kv.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" ) func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -13,7 +13,7 @@ func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value [ _, err = store.Client.Set(ctx, string(key), value, 0).Result() if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -35,7 +35,7 @@ func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err _, err = store.Client.Del(ctx, string(key)).Result() if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go index 6e4f11d22..22d09da25 100644 --- a/weed/filer/redis2/redis_cluster_store.go +++ b/weed/filer/redis2/redis_cluster_store.go @@ -1,9 +1,9 @@ package redis2 import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis2/redis_sentinel_store.go b/weed/filer/redis2/redis_sentinel_store.go index 5fc368fc7..802588b2b 100644 --- a/weed/filer/redis2/redis_sentinel_store.go +++ b/weed/filer/redis2/redis_sentinel_store.go @@ -1,9 +1,9 @@ package redis2 import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" "time" ) diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go index f9322be42..8eb97e374 100644 --- a/weed/filer/redis2/redis_store.go +++ b/weed/filer/redis2/redis_store.go @@ -1,15 +1,9 @@ package redis2 import ( - "crypto/tls" - "crypto/x509" - "net" - "os" - - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { @@ -30,54 +24,15 @@ func (store *Redis2Store) Initialize(configuration util.Configuration, prefix st configuration.GetString(prefix+"password"), configuration.GetInt(prefix+"database"), configuration.GetStringSlice(prefix+"superLargeDirectories"), - configuration.GetBool(prefix+"enable_mtls"), - configuration.GetString(prefix+"ca_cert_path"), - configuration.GetString(prefix+"client_cert_path"), - configuration.GetString(prefix+"client_key_path"), ) } -func (store *Redis2Store) initialize(hostPort string, password string, database int, superLargeDirectories []string, enableMtls bool, caCertPath string, clientCertPath string, clientKeyPath string) (err error) { - if enableMtls { - clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) - if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) - } - - caCertBytes, err := os.ReadFile(caCertPath) - if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) - } - - caCertPool := x509.NewCertPool() - if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") - } - - redisHost, _, err := net.SplitHostPort(hostPort) - if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{clientCert}, - RootCAs: caCertPool, - ServerName: redisHost, - MinVersion: tls.VersionTLS12, - } - store.Client = redis.NewClient(&redis.Options{ - Addr: hostPort, - Password: password, - DB: database, - TLSConfig: tlsConfig, - }) - } else { - store.Client = redis.NewClient(&redis.Options{ - Addr: hostPort, - Password: password, - DB: database, - }) - } +func (store *Redis2Store) initialize(hostPort string, password string, database int, superLargeDirectories []string) (err error) { + store.Client = redis.NewClient(&redis.Options{ + Addr: hostPort, + Password: password, + DB: database, + }) store.loadSuperLargeDirectories(superLargeDirectories) return } diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go index 1fa384f29..7a34092a0 100644 --- a/weed/filer/redis2/universal_redis_store.go +++ b/weed/filer/redis2/universal_redis_store.go @@ -5,12 +5,12 @@ import ( "fmt" "time" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -47,8 +47,17 @@ func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) erro func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { - if err = store.doInsertEntry(ctx, entry); err != nil { - return err + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > filer.CountEntryChunksForGzip { + value = util.MaybeGzipData(value) + } + + if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } dir, name := entry.FullPath.DirAndName() @@ -57,7 +66,7 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer } if name != "" { - if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil { + if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil { return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) } } @@ -65,25 +74,9 @@ func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer return nil } -func (store *UniversalRedis2Store) doInsertEntry(ctx context.Context, entry *filer.Entry) error { - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { - value = util.MaybeGzipData(value) - } - - if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - return nil -} - func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { - return store.doInsertEntry(ctx, entry) + return store.InsertEntry(ctx, entry) } func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { @@ -194,7 +187,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).InfofCtx(ctx, "list %s : %v", path, err) + glog.V(0).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go index ab85ccb61..bde994dc9 100644 --- a/weed/filer/redis2/universal_redis_store_kv.go +++ b/weed/filer/redis2/universal_redis_store_kv.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" ) func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -13,7 +13,7 @@ func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value _, err = store.Client.Set(ctx, string(key), value, 0).Result() if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -35,7 +35,7 @@ func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (er _, err = store.Client.Del(ctx, string(key)).Result() if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/redis3/ItemList.go b/weed/filer/redis3/ItemList.go index 9e38089a7..af3b8ae5a 100644 --- a/weed/filer/redis3/ItemList.go +++ b/weed/filer/redis3/ItemList.go @@ -4,8 +4,8 @@ import ( "bytes" "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/util/skiplist" + "github.com/chrislusf/seaweedfs/weed/util/skiplist" + "github.com/go-redis/redis/v8" ) type ItemList struct { @@ -198,45 +198,36 @@ func (nl *ItemList) WriteName(name string) error { /* // case 1: exists in nextNode - - if nextNode != nil && nextNode.Key == name { - remove from nextNode, update nextNode - // TODO: merge with prevNode if possible? - return - } - +if nextNode != nil && nextNode.Key == name { + remove from nextNode, update nextNode + // TODO: merge with prevNode if possible? + return +} if nextNode is nil - prevNode = list.Largestnode - if prevNode == nil and nextNode.Prev != nil - prevNode = load(nextNode.Prev) // case 2: does not exist // case 2.1 - - if prevNode == nil { - return - } - +if prevNode == nil { + return +} // case 2.2 - - if prevNameBatch does not contain name { - return - } +if prevNameBatch does not contain name { + return +} // case 3 delete from prevNameBatch if prevNameBatch + nextNode < capacityList - // case 3.1 merge - else - // case 3.2 update prevNode + + */ func (nl *ItemList) DeleteName(name string) error { lookupKey := []byte(name) @@ -334,13 +325,13 @@ func (nl *ItemList) ListNames(startFrom string, visitNamesFn func(name string) b } if prevNode != nil { - if !nl.NodeScanInclusiveAfter(prevNode.Reference(), startFrom, visitNamesFn) { + if !nl.NodeScanIncluseiveAfter(prevNode.Reference(), startFrom, visitNamesFn) { return nil } } for nextNode != nil { - if !nl.NodeScanInclusiveAfter(nextNode.Reference(), startFrom, visitNamesFn) { + if !nl.NodeScanIncluseiveAfter(nextNode.Reference(), startFrom, visitNamesFn) { return nil } nextNode, err = nl.skipList.LoadElement(nextNode.Next[0]) @@ -399,9 +390,9 @@ func (nl *ItemList) NodeSize(node *skiplist.SkipListElementReference) int { func (nl *ItemList) NodeAddMember(node *skiplist.SkipListElementReference, names ...string) error { key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer) - var members []redis.Z + var members []*redis.Z for _, name := range names { - members = append(members, redis.Z{ + members = append(members, &redis.Z{ Score: 0, Member: name, }) @@ -438,7 +429,7 @@ func (nl *ItemList) NodeMin(node *skiplist.SkipListElementReference) string { return "" } -func (nl *ItemList) NodeScanInclusiveAfter(node *skiplist.SkipListElementReference, startFrom string, visitNamesFn func(name string) bool) bool { +func (nl *ItemList) NodeScanIncluseiveAfter(node *skiplist.SkipListElementReference, startFrom string, visitNamesFn func(name string) bool) bool { key := fmt.Sprintf("%s%dm", nl.prefix, node.ElementPointer) if startFrom == "" { startFrom = "-" diff --git a/weed/filer/redis3/README.md b/weed/filer/redis3/README.md deleted file mode 100644 index fc696e1d3..000000000 --- a/weed/filer/redis3/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Desuppported. - -This implementation attempts to use skip list. -Did not get any report on actual benefits. diff --git a/weed/filer/redis3/item_list_serde.go b/weed/filer/redis3/item_list_serde.go index f4410b61b..d0310ce40 100644 --- a/weed/filer/redis3/item_list_serde.go +++ b/weed/filer/redis3/item_list_serde.go @@ -1,10 +1,10 @@ package redis3 import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util/skiplist" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/skiplist" + "github.com/go-redis/redis/v8" + "github.com/golang/protobuf/proto" ) func LoadItemList(data []byte, prefix string, client redis.UniversalClient, store skiplist.ListStore, batchSize int) *ItemList { diff --git a/weed/filer/redis3/kv_directory_children.go b/weed/filer/redis3/kv_directory_children.go index 281b01195..d92dddfe6 100644 --- a/weed/filer/redis3/kv_directory_children.go +++ b/weed/filer/redis3/kv_directory_children.go @@ -3,9 +3,8 @@ package redis3 import ( "context" "fmt" - - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/go-redis/redis/v8" ) const maxNameBatchSizeLimit = 1000000 @@ -32,7 +31,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit) if err := nameList.WriteName(name); err != nil { - glog.ErrorfCtx(ctx, "add %s %s: %v", key, name, err) + glog.Errorf("add %s %s: %v", key, name, err) return err } @@ -101,7 +100,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s if err = nameList.ListNames("", func(name string) bool { if err := onDeleteFn(name); err != nil { - glog.ErrorfCtx(ctx, "delete %s child %s: %v", key, name, err) + glog.Errorf("delete %s child %s: %v", key, name, err) return false } return true diff --git a/weed/filer/redis3/kv_directory_children_test.go b/weed/filer/redis3/kv_directory_children_test.go index 76a8dc00f..9d7acacf1 100644 --- a/weed/filer/redis3/kv_directory_children_test.go +++ b/weed/filer/redis3/kv_directory_children_test.go @@ -3,7 +3,7 @@ package redis3 import ( "context" "fmt" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" "github.com/stvp/tempredis" "strconv" "testing" @@ -116,7 +116,7 @@ func BenchmarkRedis(b *testing.B) { }) for i := 0; i < b.N; i++ { - client.ZAddNX(context.Background(), "/yyy/bin", redis.Z{Score: 0, Member: strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx"}) + client.ZAddNX(context.Background(), "/yyy/bin", &redis.Z{Score: 0, Member: strconv.Itoa(i) + "namexxxxxxxxxxxxxxxxxxx"}) } } @@ -149,7 +149,7 @@ func xTestNameListAdd(t *testing.T) { ts1 := time.Now() for i := 0; i < N; i++ { - client.ZAddNX(context.Background(), "/x", redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)}) + client.ZAddNX(context.Background(), "/x", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)}) } ts2 := time.Now() @@ -205,6 +205,6 @@ func xBenchmarkRedis(b *testing.B) { }) for i := 0; i < b.N; i++ { - client.ZAddNX(context.Background(), "/xxx/bin", redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)}) + client.ZAddNX(context.Background(), "/xxx/bin", &redis.Z{Score: 0, Member: fmt.Sprintf("name %8d", i)}) } } diff --git a/weed/filer/redis3/redis_cluster_store.go b/weed/filer/redis3/redis_cluster_store.go index cb8c1896d..73fc0dd20 100644 --- a/weed/filer/redis3/redis_cluster_store.go +++ b/weed/filer/redis3/redis_cluster_store.go @@ -1,11 +1,11 @@ package redis3 import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" "github.com/go-redsync/redsync/v4" - "github.com/go-redsync/redsync/v4/redis/goredis/v9" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/go-redsync/redsync/v4/redis/goredis/v8" ) func init() { diff --git a/weed/filer/redis3/redis_sentinel_store.go b/weed/filer/redis3/redis_sentinel_store.go index b9b0354e0..a87302167 100644 --- a/weed/filer/redis3/redis_sentinel_store.go +++ b/weed/filer/redis3/redis_sentinel_store.go @@ -3,11 +3,11 @@ package redis3 import ( "time" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" "github.com/go-redsync/redsync/v4" - "github.com/go-redsync/redsync/v4/redis/goredis/v9" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/go-redsync/redsync/v4/redis/goredis/v8" ) func init() { diff --git a/weed/filer/redis3/redis_store.go b/weed/filer/redis3/redis_store.go index 3bb0ce46f..2eec3d37a 100644 --- a/weed/filer/redis3/redis_store.go +++ b/weed/filer/redis3/redis_store.go @@ -1,17 +1,11 @@ package redis3 import ( - "crypto/tls" - "crypto/x509" - "net" - "os" - + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" "github.com/go-redsync/redsync/v4" - "github.com/go-redsync/redsync/v4/redis/goredis/v9" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/go-redsync/redsync/v4/redis/goredis/v8" ) func init() { @@ -31,54 +25,15 @@ func (store *Redis3Store) Initialize(configuration util.Configuration, prefix st configuration.GetString(prefix+"address"), configuration.GetString(prefix+"password"), configuration.GetInt(prefix+"database"), - configuration.GetBool(prefix+"enable_mtls"), - configuration.GetString(prefix+"ca_cert_path"), - configuration.GetString(prefix+"client_cert_path"), - configuration.GetString(prefix+"client_key_path"), ) } -func (store *Redis3Store) initialize(hostPort string, password string, database int, enableMtls bool, caCertPath string, clientCertPath string, clientKeyPath string) (err error) { - if enableMtls { - clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) - if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) - } - - caCertBytes, err := os.ReadFile(caCertPath) - if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) - } - - caCertPool := x509.NewCertPool() - if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") - } - - redisHost, _, err := net.SplitHostPort(hostPort) - if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{clientCert}, - RootCAs: caCertPool, - ServerName: redisHost, - MinVersion: tls.VersionTLS12, - } - store.Client = redis.NewClient(&redis.Options{ - Addr: hostPort, - Password: password, - DB: database, - TLSConfig: tlsConfig, - }) - } else { - store.Client = redis.NewClient(&redis.Options{ - Addr: hostPort, - Password: password, - DB: database, - }) - } +func (store *Redis3Store) initialize(hostPort string, password string, database int) (err error) { + store.Client = redis.NewClient(&redis.Options{ + Addr: hostPort, + Password: password, + DB: database, + }) store.redsync = redsync.New(goredis.NewPool(store.Client)) return } diff --git a/weed/filer/redis3/skiplist_element_store.go b/weed/filer/redis3/skiplist_element_store.go index 46506187e..8c101d006 100644 --- a/weed/filer/redis3/skiplist_element_store.go +++ b/weed/filer/redis3/skiplist_element_store.go @@ -3,10 +3,10 @@ package redis3 import ( "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util/skiplist" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util/skiplist" + "github.com/go-redis/redis/v8" + "github.com/golang/protobuf/proto" ) type SkipListElementStore struct { diff --git a/weed/filer/redis3/universal_redis_store.go b/weed/filer/redis3/universal_redis_store.go index 699683d91..10a87e2a4 100644 --- a/weed/filer/redis3/universal_redis_store.go +++ b/weed/filer/redis3/universal_redis_store.go @@ -3,15 +3,15 @@ package redis3 import ( "context" "fmt" + "github.com/go-redsync/redsync/v4" "time" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" - redsync "github.com/go-redsync/redsync/v4" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -35,8 +35,17 @@ func (store *UniversalRedis3Store) RollbackTransaction(ctx context.Context) erro func (store *UniversalRedis3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { - if err = store.doInsertEntry(ctx, entry); err != nil { - return err + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > filer.CountEntryChunksForGzip { + value = util.MaybeGzipData(value) + } + + if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } dir, name := entry.FullPath.DirAndName() @@ -50,25 +59,9 @@ func (store *UniversalRedis3Store) InsertEntry(ctx context.Context, entry *filer return nil } -func (store *UniversalRedis3Store) doInsertEntry(ctx context.Context, entry *filer.Entry) error { - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { - value = util.MaybeGzipData(value) - } - - if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - return nil -} - func (store *UniversalRedis3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { - return store.doInsertEntry(ctx, entry) + return store.InsertEntry(ctx, entry) } func (store *UniversalRedis3Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { @@ -151,7 +144,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).InfofCtx(ctx, "list %s : %v", path, err) + glog.V(0).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { return true } diff --git a/weed/filer/redis3/universal_redis_store_kv.go b/weed/filer/redis3/universal_redis_store_kv.go index ba44a9c07..a9c440a37 100644 --- a/weed/filer/redis3/universal_redis_store_kv.go +++ b/weed/filer/redis3/universal_redis_store_kv.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" ) func (store *UniversalRedis3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -13,7 +13,7 @@ func (store *UniversalRedis3Store) KvPut(ctx context.Context, key []byte, value _, err = store.Client.Set(ctx, string(key), value, 0).Result() if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -35,7 +35,7 @@ func (store *UniversalRedis3Store) KvDelete(ctx context.Context, key []byte) (er _, err = store.Client.Del(ctx, string(key)).Result() if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/redis_lua/redis_cluster_store.go b/weed/filer/redis_lua/redis_cluster_store.go index 251aadbcd..b68d1092c 100644 --- a/weed/filer/redis_lua/redis_cluster_store.go +++ b/weed/filer/redis_lua/redis_cluster_store.go @@ -1,9 +1,9 @@ package redis_lua import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis_lua/redis_sentinel_store.go b/weed/filer/redis_lua/redis_sentinel_store.go index f22a7fa66..5530c098e 100644 --- a/weed/filer/redis_lua/redis_sentinel_store.go +++ b/weed/filer/redis_lua/redis_sentinel_store.go @@ -1,9 +1,9 @@ package redis_lua import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" "time" ) diff --git a/weed/filer/redis_lua/redis_store.go b/weed/filer/redis_lua/redis_store.go index 8574baa09..a7d11c73c 100644 --- a/weed/filer/redis_lua/redis_store.go +++ b/weed/filer/redis_lua/redis_store.go @@ -1,9 +1,9 @@ package redis_lua import ( - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis_lua/stored_procedure/init.go b/weed/filer/redis_lua/stored_procedure/init.go index 9373cc5a3..1412ceba2 100644 --- a/weed/filer/redis_lua/stored_procedure/init.go +++ b/weed/filer/redis_lua/stored_procedure/init.go @@ -2,7 +2,7 @@ package stored_procedure import ( _ "embed" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" ) func init() { diff --git a/weed/filer/redis_lua/universal_redis_store.go b/weed/filer/redis_lua/universal_redis_store.go index 20b83a2a9..0ab0f2f24 100644 --- a/weed/filer/redis_lua/universal_redis_store.go +++ b/weed/filer/redis_lua/universal_redis_store.go @@ -5,13 +5,13 @@ import ( "fmt" "time" - "github.com/redis/go-redis/v9" + "github.com/go-redis/redis/v8" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/redis_lua/stored_procedure" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/redis_lua/stored_procedure" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -53,7 +53,7 @@ func (store *UniversalRedisLuaStore) InsertEntry(ctx context.Context, entry *fil return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { value = util.MaybeGzipData(value) } @@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).InfofCtx(ctx, "list %s : %v", path, err) + glog.V(0).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis_lua/universal_redis_store_kv.go b/weed/filer/redis_lua/universal_redis_store_kv.go index 79b6495ce..3df980b66 100644 --- a/weed/filer/redis_lua/universal_redis_store_kv.go +++ b/weed/filer/redis_lua/universal_redis_store_kv.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" ) func (store *UniversalRedisLuaStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -13,7 +13,7 @@ func (store *UniversalRedisLuaStore) KvPut(ctx context.Context, key []byte, valu _, err = store.Client.Set(ctx, string(key), value, 0).Result() if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -35,7 +35,7 @@ func (store *UniversalRedisLuaStore) KvDelete(ctx context.Context, key []byte) ( _, err = store.Client.Del(ctx, string(key)).Result() if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/remote_mapping.go b/weed/filer/remote_mapping.go index cc48d859e..b0534e2ca 100644 --- a/weed/filer/remote_mapping.go +++ b/weed/filer/remote_mapping.go @@ -2,27 +2,25 @@ package filer import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/golang/protobuf/proto" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" ) func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress) (mappings *remote_pb.RemoteStorageMapping, readErr error) { var oldContent []byte - if readErr = pb.WithFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if readErr = pb.WithFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE) return readErr }); readErr != nil { - if readErr != filer_pb.ErrNotFound { - return nil, fmt.Errorf("read existing mapping: %w", readErr) - } - oldContent = nil + return nil, readErr } + mappings, readErr = UnmarshalRemoteStorageMappings(oldContent) if readErr != nil { - return nil, fmt.Errorf("unmarshal mappings: %w", readErr) + return nil, fmt.Errorf("unmarshal mappings: %v", readErr) } return @@ -38,7 +36,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor }) if err != nil { if err != filer_pb.ErrNotFound { - return fmt.Errorf("read existing mapping: %w", err) + return fmt.Errorf("read existing mapping: %v", err) } } @@ -53,7 +51,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent) }) if err != nil { - return fmt.Errorf("save mapping: %w", err) + return fmt.Errorf("save mapping: %v", err) } return nil @@ -69,7 +67,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error }) if err != nil { if err != filer_pb.ErrNotFound { - return fmt.Errorf("read existing mapping: %w", err) + return fmt.Errorf("read existing mapping: %v", err) } } @@ -84,7 +82,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent) }) if err != nil { - return fmt.Errorf("save mapping: %w", err) + return fmt.Errorf("save mapping: %v", err) } return nil @@ -100,7 +98,7 @@ func addRemoteStorageMapping(oldContent []byte, dir string, storageLocation *rem mappings.Mappings[dir] = storageLocation if newContent, err = proto.Marshal(mappings); err != nil { - return oldContent, fmt.Errorf("marshal mappings: %w", err) + return oldContent, fmt.Errorf("marshal mappings: %v", err) } return @@ -116,7 +114,7 @@ func removeRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt delete(mappings.Mappings, dir) if newContent, err = proto.Marshal(mappings); err != nil { - return oldContent, fmt.Errorf("marshal mappings: %w", err) + return oldContent, fmt.Errorf("marshal mappings: %v", err) } return diff --git a/weed/filer/remote_storage.go b/weed/filer/remote_storage.go index 3764fbac6..5362ba738 100644 --- a/weed/filer/remote_storage.go +++ b/weed/filer/remote_storage.go @@ -3,17 +3,17 @@ package filer import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" "math" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/viant/ptrie" ) @@ -21,13 +21,13 @@ const REMOTE_STORAGE_CONF_SUFFIX = ".conf" const REMOTE_STORAGE_MOUNT_FILE = "mount.mapping" type FilerRemoteStorage struct { - rules ptrie.Trie[*remote_pb.RemoteStorageLocation] + rules ptrie.Trie storageNameToConf map[string]*remote_pb.RemoteConf } func NewFilerRemoteStorage() (rs *FilerRemoteStorage) { rs = &FilerRemoteStorage{ - rules: ptrie.New[*remote_pb.RemoteStorageLocation](), + rules: ptrie.New(), storageNameToConf: make(map[string]*remote_pb.RemoteConf), } return rs @@ -82,9 +82,9 @@ func (rs *FilerRemoteStorage) mapDirectoryToRemoteStorage(dir util.FullPath, loc } func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util.FullPath, remoteLocation *remote_pb.RemoteStorageLocation) { - rs.rules.MatchPrefix([]byte(p), func(key []byte, value *remote_pb.RemoteStorageLocation) bool { + rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool { mountDir = util.FullPath(string(key[:len(key)-1])) - remoteLocation = value + remoteLocation = value.(*remote_pb.RemoteStorageLocation) return true }) return @@ -92,8 +92,8 @@ func (rs *FilerRemoteStorage) FindMountDirectory(p util.FullPath) (mountDir util func (rs *FilerRemoteStorage) FindRemoteStorageClient(p util.FullPath) (client remote_storage.RemoteStorageClient, remoteConf *remote_pb.RemoteConf, found bool) { var storageLocation *remote_pb.RemoteStorageLocation - rs.rules.MatchPrefix([]byte(p), func(key []byte, value *remote_pb.RemoteStorageLocation) bool { - storageLocation = value + rs.rules.MatchPrefix([]byte(p), func(key []byte, value interface{}) bool { + storageLocation = value.(*remote_pb.RemoteStorageLocation) return true }) @@ -133,7 +133,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.Remo func ReadRemoteStorageConf(grpcDialOption grpc.DialOption, filerAddress pb.ServerAddress, storageName string) (conf *remote_pb.RemoteConf, readErr error) { var oldContent []byte - if readErr = pb.WithFilerClient(false, 0, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if readErr = pb.WithFilerClient(false, filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { oldContent, readErr = ReadInsideFiler(client, DirectoryEtcRemote, storageName+REMOTE_STORAGE_CONF_SUFFIX) return readErr }); readErr != nil { diff --git a/weed/filer/remote_storage_test.go b/weed/filer/remote_storage_test.go index b3785db85..9f4d7af2f 100644 --- a/weed/filer/remote_storage_test.go +++ b/weed/filer/remote_storage_test.go @@ -1,7 +1,7 @@ package filer import ( - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" "github.com/stretchr/testify/assert" "testing" ) diff --git a/weed/filer/rocksdb/README.md b/weed/filer/rocksdb/README.md index 8eb9051e9..6bae6d34e 100644 --- a/weed/filer/rocksdb/README.md +++ b/weed/filer/rocksdb/README.md @@ -36,6 +36,6 @@ go get github.com/tecbot/gorocksdb # compile with rocksdb ``` -cd ~/go/src/github.com/seaweedfs/seaweedfs/weed +cd ~/go/src/github.com/chrislusf/seaweedfs/weed go install -tags rocksdb ``` diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go index 044dc1342..f48c3988c 100644 --- a/weed/filer/rocksdb/rocksdb_store.go +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -1,4 +1,3 @@ -//go:build rocksdb // +build rocksdb package rocksdb @@ -13,10 +12,10 @@ import ( gorocksdb "github.com/linxGnu/grocksdb" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -108,7 +107,7 @@ func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } - // println("saved", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) return nil } @@ -123,7 +122,7 @@ func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful key := genKey(dir, name) data, err := store.db.Get(store.ro, key) - if data == nil || !data.Exists() { + if data == nil { return nil, filer_pb.ErrNotFound } defer data.Free() @@ -140,7 +139,7 @@ func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } - // println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data)) + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) return entry, nil } @@ -169,7 +168,7 @@ func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath we iter := store.db.NewIterator(ro) defer iter.Close() - err = enumerate(iter, directoryPrefix, nil, false, -1, "", func(key, value []byte) bool { + err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool { batch.Delete(key) return true }) @@ -186,16 +185,23 @@ func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath we return nil } -func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, startFileName string, fn func(key, value []byte) bool) (err error) { +func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) { if len(lastKey) == 0 { iter.Seek(prefix) } else { iter.Seek(lastKey) + if !includeLastKey { + if iter.Valid() { + if bytes.Equal(iter.Key().Data(), lastKey) { + iter.Next() + } + } + } } i := int64(0) - for iter.Valid() { + for ; iter.Valid(); iter.Next() { if limit > 0 { i++ @@ -210,27 +216,16 @@ func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey break } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - if fileName == startFileName && !includeLastKey { - iter.Next() - continue - } - ret := fn(key, iter.Value().Data()) if !ret { break } - iter.Next() } if err := iter.Err(); err != nil { - return fmt.Errorf("prefix scan iterator: %w", err) + return fmt.Errorf("prefix scan iterator: %v", err) } return nil } @@ -253,7 +248,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir iter := store.db.NewIterator(ro) defer iter.Close() - err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, startFileName, func(key, value []byte) bool { + err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool { fileName := getNameFromKey(key) if fileName == "" { return true @@ -263,10 +258,10 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } lastFileName = fileName - // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) + glog.V(0).Infof("list %s : %v", entry.FullPath, err) return false } if !eachEntryFunc(entry) { diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go index 8432303b9..cf1214d5b 100644 --- a/weed/filer/rocksdb/rocksdb_store_kv.go +++ b/weed/filer/rocksdb/rocksdb_store_kv.go @@ -1,4 +1,3 @@ -//go:build rocksdb // +build rocksdb package rocksdb @@ -7,7 +6,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" ) func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -15,7 +14,7 @@ func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) err = store.db.Put(store.wo, key, value) if err != nil { - return fmt.Errorf("kv put: %w", err) + return fmt.Errorf("kv put: %v", err) } return nil @@ -30,7 +29,7 @@ func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, } if err != nil { - return nil, fmt.Errorf("kv get: %w", err) + return nil, fmt.Errorf("kv get: %v", err) } return @@ -41,7 +40,7 @@ func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) err = store.db.Delete(store.wo, key) if err != nil { - return fmt.Errorf("kv delete: %w", err) + return fmt.Errorf("kv delete: %v", err) } return nil diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go index 35181d601..faabcd341 100644 --- a/weed/filer/rocksdb/rocksdb_store_test.go +++ b/weed/filer/rocksdb/rocksdb_store_test.go @@ -10,13 +10,12 @@ import ( "testing" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", 0, "", "", "", nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir := t.TempDir() store := &RocksDBStore{} store.initialize(dir) @@ -35,7 +34,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false, testFiler.MaxFilenameLength); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } @@ -69,7 +68,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", 0, "", "", "", nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir := t.TempDir() store := &RocksDBStore{} store.initialize(dir) @@ -91,7 +90,7 @@ func TestEmptyRoot(t *testing.T) { } func BenchmarkInsertEntry(b *testing.B) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", 0, "", "", "", nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir := b.TempDir() store := &RocksDBStore{} store.initialize(dir) @@ -113,73 +112,3 @@ func BenchmarkInsertEntry(b *testing.B) { store.InsertEntry(ctx, entry) } } - -func TestListDirectoryWithPrefix(t *testing.T) { - testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil) - dir := t.TempDir() - store := &RocksDBStore{} - store.initialize(dir) - testFiler.SetStore(store) - - ctx := context.Background() - - files := []string{ - "/bucket1/test-prefix1/file1.txt", - "/bucket1/test-prefix1/file2.txt", - "/bucket1/test-prefix1-extra.txt", - } - - expected1 := []string{ - "/bucket1/test-prefix1", - "/bucket1/test-prefix1-extra.txt", - } - - expected2 := []string{ - "/bucket1/test-prefix1/file1.txt", - "/bucket1/test-prefix1/file2.txt", - } - - for _, file := range files { - fullpath := util.FullPath(file) - entry := &filer.Entry{ - FullPath: fullpath, - Attr: filer.Attr{ - Mode: 0644, - Uid: 1, - Gid: 1, - }, - } - if err := testFiler.CreateEntry(ctx, entry, false, false, nil, false, testFiler.MaxFilenameLength); err != nil { - t.Fatalf("Failed to create entry %s: %v", fullpath, err) - } - } - - prefix1 := "test-prefix1" - entries1, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/bucket1"), "", false, 100, prefix1, "", "") - if err != nil { - t.Fatalf("Failed to list entries with prefix %s: %v", prefix1, err) - } - if len(entries1) != 2 { - t.Errorf("Expected 2 entries with prefix %s, got %d", prefix1, len(entries1)) - } else { - t.Logf("Found %d entries with prefix %s", len(entries1), prefix1) - } - for i, entry := range entries1 { - if string(entry.FullPath) != expected1[i] { - t.Errorf("Expected entry %s, got %s", expected1[i], entry.FullPath) - } - } - - entries2, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/bucket1/test-prefix1"), "", false, 100, "", "", "") - if err != nil { - t.Fatalf("Failed to list entries with prefix %s: %v", prefix1, err) - } - if len(entries2) != 2 { - t.Errorf("Expected 2 entries with prefix %s, got %d", prefix1, len(entries1)) - } - for i, entry := range entries2 { - if string(entry.FullPath) != expected2[i] { - t.Errorf("Expected entry %s, got %s", expected2[i], entry.FullPath) - } - } -} diff --git a/weed/filer/rocksdb/rocksdb_ttl.go b/weed/filer/rocksdb/rocksdb_ttl.go index 08ad31061..7e9643083 100644 --- a/weed/filer/rocksdb/rocksdb_ttl.go +++ b/weed/filer/rocksdb/rocksdb_ttl.go @@ -1,5 +1,4 @@ -//go:build rocksdb -// +build rocksdb +//+build rocksdb package rocksdb @@ -8,7 +7,7 @@ import ( gorocksdb "github.com/linxGnu/grocksdb" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" ) type TTLFilter struct { diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go index 3d0a09214..d8f3c2445 100644 --- a/weed/filer/s3iam_conf.go +++ b/weed/filer/s3iam_conf.go @@ -1,20 +1,17 @@ package filer import ( + "bytes" "fmt" "io" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - jsonpb "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" ) func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) error { - options := &jsonpb.UnmarshalOptions{ - DiscardUnknown: true, - AllowPartial: true, - } - if err := options.Unmarshal(content, config); err != nil { + if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil { return err } return nil @@ -22,22 +19,12 @@ func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) er func ProtoToText(writer io.Writer, config proto.Message) error { - m := jsonpb.MarshalOptions{ - EmitUnpopulated: true, - Indent: " ", + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", } - text, marshalErr := m.Marshal(config) - if marshalErr != nil { - return fmt.Errorf("marshal proto message: %w", marshalErr) - } - - _, writeErr := writer.Write(text) - if writeErr != nil { - return fmt.Errorf("fail to write proto message: %w", writeErr) - } - - return writeErr + return m.Marshal(writer, config) } // CheckDuplicateAccessKey returns an error message when s3cfg has duplicate access keys @@ -47,7 +34,7 @@ func CheckDuplicateAccessKey(s3cfg *iam_pb.S3ApiConfiguration) error { for _, cred := range ident.Credentials { if userName, found := accessKeySet[cred.AccessKey]; !found { accessKeySet[cred.AccessKey] = ident.Name - } else if userName != ident.Name { + } else { return fmt.Errorf("duplicate accessKey[%s], already configured in user[%s]", cred.AccessKey, userName) } } diff --git a/weed/filer/s3iam_conf_test.go b/weed/filer/s3iam_conf_test.go index ab1af4bc1..bd9eb85ae 100644 --- a/weed/filer/s3iam_conf_test.go +++ b/weed/filer/s3iam_conf_test.go @@ -4,9 +4,9 @@ import ( "bytes" "testing" - . "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/stretchr/testify/assert" ) @@ -97,41 +97,6 @@ func TestCheckDuplicateAccessKey(t *testing.T) { }, "", }, - { - &iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "some_name", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - }, - }, - Actions: []string{ - ACTION_ADMIN, - ACTION_READ, - ACTION_WRITE, - }, - }, - { - Name: "some_name", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "some_access_key1", - SecretKey: "some_secret_key1", - }, - }, - Actions: []string{ - ACTION_READ, - ACTION_TAGGING, - ACTION_LIST, - }, - }, - }, - }, - "", - }, { &iam_pb.S3ApiConfiguration{ Identities: []*iam_pb.Identity{ diff --git a/weed/filer/sqlite/doc.go b/weed/filer/sqlite/doc.go index af6d33491..833addf54 100644 --- a/weed/filer/sqlite/doc.go +++ b/weed/filer/sqlite/doc.go @@ -1,7 +1,9 @@ /* + Package sqlite is for sqlite filer store. The referenced "modernc.org/sqlite" library is too big when compiled. So this is only compiled in "make full_install". + */ package sqlite diff --git a/weed/filer/sqlite/sqlite_store.go b/weed/filer/sqlite/sqlite_store.go index 6c9ca4ecc..70a4bf390 100644 --- a/weed/filer/sqlite/sqlite_store.go +++ b/weed/filer/sqlite/sqlite_store.go @@ -11,10 +11,10 @@ import ( "database/sql" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/filer/mysql" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/mysql" + "github.com/chrislusf/seaweedfs/weed/util" _ "modernc.org/sqlite" ) @@ -63,19 +63,15 @@ func (store *SqliteStore) initialize(dbFile, createTable, upsertQuery string) (e var dbErr error store.DB, dbErr = sql.Open("sqlite", dbFile) if dbErr != nil { - if store.DB != nil { - store.DB.Close() - store.DB = nil - } - return fmt.Errorf("can not connect to %s error:%v", dbFile, dbErr) + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", dbFile, err) } if err = store.DB.Ping(); err != nil { return fmt.Errorf("connect to %s error:%v", dbFile, err) } - store.DB.SetMaxOpenConns(1) - if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) } diff --git a/weed/filer/store_test/test_suite.go b/weed/filer/store_test/test_suite.go index fda694f26..ad578442c 100644 --- a/weed/filer/store_test/test_suite.go +++ b/weed/filer/store_test/test_suite.go @@ -3,8 +3,8 @@ package store_test import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/stretchr/testify/assert" "os" "testing" @@ -29,29 +29,16 @@ func TestFilerStore(t *testing.T, store filer.FilerStore) { }) assert.Nil(t, err, "list directory") assert.Equal(t, 3, counter, "directory list counter") - assert.Equal(t, "f00002", lastFileName, "directory list last file") + assert.Equal(t, "f00003", lastFileName, "directory list last file") lastFileName, err = store.ListDirectoryEntries(ctx, util.FullPath("/a/b/c"), lastFileName, false, 1024, func(entry *filer.Entry) bool { counter++ return true }) assert.Nil(t, err, "list directory") assert.Equal(t, 1027, counter, "directory list counter") - assert.Equal(t, "f01026", lastFileName, "directory list last file") + assert.Equal(t, "f01027", lastFileName, "directory list last file") } - testKey := []byte("test_key") - testValue1 := []byte("test_value1") - testValue2 := []byte("test_value2") - - err := store.KvPut(ctx, testKey, testValue1) - assert.Nil(t, err, "KV put") - value, err := store.KvGet(ctx, testKey) - assert.Equal(t, value, testValue1, "KV get") - - err = store.KvPut(ctx, testKey, testValue2) - assert.Nil(t, err, "KV update") - value, err = store.KvGet(ctx, testKey) - assert.Equal(t, value, testValue2, "KV get after update") } func makeEntry(fullPath util.FullPath, isDirectory bool) *filer.Entry { diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 87280d6b0..7da9fd0a0 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -2,37 +2,29 @@ package filer import ( "bytes" - "context" "fmt" + "golang.org/x/exp/slices" "io" "math" + "sort" "strings" "sync" "time" - "slices" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) -var getLookupFileIdBackoffSchedule = []time.Duration{ - 150 * time.Millisecond, - 600 * time.Millisecond, - 1800 * time.Millisecond, -} - func HasData(entry *filer_pb.Entry) bool { if len(entry.Content) > 0 { return true } - return len(entry.GetChunks()) > 0 + return len(entry.Chunks) > 0 } func IsSameData(a, b *filer_pb.Entry) bool { @@ -48,11 +40,11 @@ func isSameChunks(a, b []*filer_pb.FileChunk) bool { if len(a) != len(b) { return false } - slices.SortFunc(a, func(i, j *filer_pb.FileChunk) int { - return strings.Compare(i.ETag, j.ETag) + slices.SortFunc(a, func(i, j *filer_pb.FileChunk) bool { + return strings.Compare(i.ETag, j.ETag) < 0 }) - slices.SortFunc(b, func(i, j *filer_pb.FileChunk) int { - return strings.Compare(i.ETag, j.ETag) + slices.SortFunc(b, func(i, j *filer_pb.FileChunk) bool { + return strings.Compare(i.ETag, j.ETag) < 0 }) for i := 0; i < len(a); i++ { if a[i].ETag != b[i].ETag { @@ -66,97 +58,63 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R if len(entry.Content) > 0 { return bytes.NewReader(entry.Content) } - return NewChunkStreamReader(filerClient, entry.GetChunks()) + return NewChunkStreamReader(filerClient, entry.Chunks) } -type DoStreamContent func(writer io.Writer) error +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { -func PrepareStreamContent(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64) (DoStreamContent, error) { - return PrepareStreamContentWithThrottler(context.Background(), masterClient, jwtFunc, chunks, offset, size, 0) -} - -type VolumeServerJwtFunction func(fileId string) string - -func noJwtFunc(string) string { - return "" -} - -func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) { - glog.V(4).InfofCtx(ctx, "prepare to stream content for chunks: %d", len(chunks)) - chunkViews := ViewFromChunks(ctx, masterClient.GetLookupFileIdFunction(), chunks, offset, size) + glog.V(4).Infof("start to stream content for chunks: %+v", chunks) + chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) fileId2Url := make(map[string][]string) - for x := chunkViews.Front(); x != nil; x = x.Next { - chunkView := x.Value - var urlStrings []string - var err error - for _, backoff := range getLookupFileIdBackoffSchedule { - urlStrings, err = masterClient.GetLookupFileIdFunction()(ctx, chunkView.FileId) - if err == nil && len(urlStrings) > 0 { - break - } - glog.V(4).InfofCtx(ctx, "waiting for chunk: %s", chunkView.FileId) - time.Sleep(backoff) - } + for _, chunkView := range chunkViews { + + urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId) if err != nil { - glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return nil, err + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err } else if len(urlStrings) == 0 { - errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) - glog.ErrorCtx(ctx, errUrlNotFound) - return nil, errUrlNotFound + glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) } fileId2Url[chunkView.FileId] = urlStrings } - return func(writer io.Writer) error { - downloadThrottler := util.NewWriteThrottler(downloadMaxBytesPs) - remaining := size - for x := chunkViews.Front(); x != nil; x = x.Next { - chunkView := x.Value - if offset < chunkView.ViewOffset { - gap := chunkView.ViewOffset - offset - remaining -= gap - glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, chunkView.ViewOffset) - err := writeZero(writer, gap) - if err != nil { - return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) - } - offset = chunkView.ViewOffset - } - urlStrings := fileId2Url[chunkView.FileId] - start := time.Now() - jwt := jwtFunc(chunkView.FileId) - err := retriedStreamFetchChunkData(ctx, writer, urlStrings, jwt, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize)) - offset += int64(chunkView.ViewSize) - remaining -= int64(chunkView.ViewSize) - stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds()) + remaining := size + for _, chunkView := range chunkViews { + if offset < chunkView.LogicOffset { + gap := chunkView.LogicOffset - offset + remaining -= gap + glog.V(4).Infof("zero [%d,%d)", offset, chunkView.LogicOffset) + err := writeZero(writer, gap) if err != nil { - stats.FilerHandlerCounter.WithLabelValues("chunkDownloadError").Inc() - return fmt.Errorf("read chunk: %w", err) + return fmt.Errorf("write zero [%d,%d)", offset, chunkView.LogicOffset) } - stats.FilerHandlerCounter.WithLabelValues("chunkDownload").Inc() - downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) + offset = chunkView.LogicOffset } - if remaining > 0 { - glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, offset+remaining) - err := writeZero(writer, remaining) - if err != nil { - return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) - } + urlStrings := fileId2Url[chunkView.FileId] + start := time.Now() + err := retriedStreamFetchChunkData(writer, urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + offset += int64(chunkView.Size) + remaining -= int64(chunkView.Size) + stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds()) + if err != nil { + stats.FilerRequestCounter.WithLabelValues("chunkDownloadError").Inc() + return fmt.Errorf("read chunk: %v", err) } - - return nil - }, nil -} - -func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { - streamFn, err := PrepareStreamContent(masterClient, noJwtFunc, chunks, offset, size) - if err != nil { - return err + stats.FilerRequestCounter.WithLabelValues("chunkDownload").Inc() } - return streamFn(writer) + if remaining > 0 { + glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) + err := writeZero(writer, remaining) + if err != nil { + return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) + } + } + + return nil + } // ---------------- ReadAllReader ---------------------------------- @@ -178,25 +136,24 @@ func writeZero(w io.Writer, size int64) (err error) { return } -func ReadAll(ctx context.Context, buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) error { +func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) error { - lookupFileIdFn := func(ctx context.Context, fileId string) (targetUrls []string, err error) { - return masterClient.LookupFileId(ctx, fileId) + lookupFileIdFn := func(fileId string) (targetUrls []string, err error) { + return masterClient.LookupFileId(fileId) } - chunkViews := ViewFromChunks(ctx, lookupFileIdFn, chunks, 0, int64(len(buffer))) + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, int64(len(buffer))) idx := 0 - for x := chunkViews.Front(); x != nil; x = x.Next { - chunkView := x.Value - urlStrings, err := lookupFileIdFn(ctx, chunkView.FileId) + for _, chunkView := range chunkViews { + urlStrings, err := lookupFileIdFn(chunkView.FileId) if err != nil { - glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } - n, err := util_http.RetriedFetchChunkData(ctx, buffer[idx:idx+int(chunkView.ViewSize)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, chunkView.FileId) + n, err := retriedFetchChunkData(buffer[idx:idx+int(chunkView.Size)], urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset) if err != nil { return err } @@ -207,8 +164,7 @@ func ReadAll(ctx context.Context, buffer []byte, masterClient *wdclient.MasterCl // ---------------- ChunkStreamReader ---------------------------------- type ChunkStreamReader struct { - head *Interval[*ChunkView] - chunkView *Interval[*ChunkView] + chunkViews []*ChunkView totalSize int64 logicOffset int64 buffer []byte @@ -221,38 +177,39 @@ type ChunkStreamReader struct { var _ = io.ReadSeeker(&ChunkStreamReader{}) var _ = io.ReaderAt(&ChunkStreamReader{}) -func doNewChunkStreamReader(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) *ChunkStreamReader { +func doNewChunkStreamReader(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) *ChunkStreamReader { - chunkViews := ViewFromChunks(ctx, lookupFileIdFn, chunks, 0, math.MaxInt64) + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + slices.SortFunc(chunkViews, func(a, b *ChunkView) bool { + return a.LogicOffset < b.LogicOffset + }) var totalSize int64 - for x := chunkViews.Front(); x != nil; x = x.Next { - chunk := x.Value - totalSize += int64(chunk.ViewSize) + for _, chunk := range chunkViews { + totalSize += int64(chunk.Size) } return &ChunkStreamReader{ - head: chunkViews.Front(), - chunkView: chunkViews.Front(), + chunkViews: chunkViews, lookupFileId: lookupFileIdFn, totalSize: totalSize, } } -func NewChunkStreamReaderFromFiler(ctx context.Context, masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { +func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { - lookupFileIdFn := func(ctx context.Context, fileId string) (targetUrl []string, err error) { - return masterClient.LookupFileId(ctx, fileId) + lookupFileIdFn := func(fileId string) (targetUrl []string, err error) { + return masterClient.LookupFileId(fileId) } - return doNewChunkStreamReader(ctx, lookupFileIdFn, chunks) + return doNewChunkStreamReader(lookupFileIdFn, chunks) } func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { lookupFileIdFn := LookupFn(filerClient) - return doNewChunkStreamReader(context.Background(), lookupFileIdFn, chunks) + return doNewChunkStreamReader(lookupFileIdFn, chunks) } func (c *ChunkStreamReader) ReadAt(p []byte, off int64) (n int, err error) { @@ -312,7 +269,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { } func insideChunk(offset int64, chunk *ChunkView) bool { - return chunk.ViewOffset <= offset && offset < chunk.ViewOffset+int64(chunk.ViewSize) + return chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) } func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { @@ -320,31 +277,56 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) { return nil } - // glog.V(2).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize) - // find a possible chunk view - p := c.chunkView - for p != nil { - chunk := p.Value - // glog.V(2).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) - if insideChunk(offset, chunk) { - if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { - c.chunkView = p - return c.fetchChunkToBuffer(chunk) - } - } - if offset < c.bufferOffset { - p = p.Prev + // fmt.Printf("fetch for offset %d\n", offset) + + // need to seek to a different chunk + currentChunkIndex := sort.Search(len(c.chunkViews), func(i int) bool { + return offset < c.chunkViews[i].LogicOffset + }) + if currentChunkIndex == len(c.chunkViews) { + // not found + if insideChunk(offset, c.chunkViews[0]) { + // fmt.Printf("select0 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) + currentChunkIndex = 0 + } else if insideChunk(offset, c.chunkViews[len(c.chunkViews)-1]) { + currentChunkIndex = len(c.chunkViews) - 1 + // fmt.Printf("select last chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) } else { - p = p.Next + return io.EOF } + } else if currentChunkIndex > 0 { + if insideChunk(offset, c.chunkViews[currentChunkIndex]) { + // good hit + } else if insideChunk(offset, c.chunkViews[currentChunkIndex-1]) { + currentChunkIndex -= 1 + // fmt.Printf("select -1 chunk %d %s\n", currentChunkIndex, c.chunkViews[currentChunkIndex].FileId) + } else { + // glog.Fatalf("unexpected1 offset %d", offset) + return fmt.Errorf("unexpected1 offset %d", offset) + } + } else { + // glog.Fatalf("unexpected2 offset %d", offset) + return fmt.Errorf("unexpected2 offset %d", offset) } - return io.EOF + // positioning within the new chunk + chunk := c.chunkViews[currentChunkIndex] + if insideChunk(offset, chunk) { + if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { + if err = c.fetchChunkToBuffer(chunk); err != nil { + return + } + } + } else { + // glog.Fatalf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + return fmt.Errorf("unexpected3 offset %d in %s [%d,%d)", offset, chunk.FileId, chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + } + return } func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { - urlStrings, err := c.lookupFileId(context.Background(), chunkView.FileId) + urlStrings, err := c.lookupFileId(chunkView.FileId) if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err @@ -352,7 +334,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { var buffer bytes.Buffer var shouldRetry bool for _, urlString := range urlStrings { - shouldRetry, err = util_http.ReadUrlAsStream(context.Background(), urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.OffsetInChunk, int(chunkView.ViewSize), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { buffer.Write(data) }) if !shouldRetry { @@ -369,10 +351,10 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { return err } c.buffer = buffer.Bytes() - c.bufferOffset = chunkView.ViewOffset + c.bufferOffset = chunkView.LogicOffset c.chunk = chunkView.FileId - // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) + // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) return nil } diff --git a/weed/filer/tarantool/doc.go b/weed/filer/tarantool/doc.go deleted file mode 100644 index 3c448e8e1..000000000 --- a/weed/filer/tarantool/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Package tarantool is for Tarantool filer store. - -The referenced "github.com/tarantool/go-tarantool/v2" library is too big when compiled. -So this is only compiled in "make full_install". -*/ -package tarantool diff --git a/weed/filer/tarantool/readme.md b/weed/filer/tarantool/readme.md deleted file mode 100644 index b51241488..000000000 --- a/weed/filer/tarantool/readme.md +++ /dev/null @@ -1,11 +0,0 @@ -## Tarantool - -database: https://www.tarantool.io/ - -go driver: https://github.com/tarantool/go-tarantool/ - -To set up local env: -`make -C docker test_tarantool` - -Run tests: -`RUN_TARANTOOL_TESTS=1 go test -tags=tarantool ./weed/filer/tarantool` \ No newline at end of file diff --git a/weed/filer/tarantool/tarantool_store.go b/weed/filer/tarantool/tarantool_store.go deleted file mode 100644 index 4c9f8a600..000000000 --- a/weed/filer/tarantool/tarantool_store.go +++ /dev/null @@ -1,318 +0,0 @@ -//go:build tarantool -// +build tarantool - -package tarantool - -import ( - "context" - "fmt" - "reflect" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - weed_util "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/tarantool/go-tarantool/v2" - "github.com/tarantool/go-tarantool/v2/crud" - "github.com/tarantool/go-tarantool/v2/pool" -) - -const ( - tarantoolSpaceName = "filer_metadata" -) - -func init() { - filer.Stores = append(filer.Stores, &TarantoolStore{}) -} - -type TarantoolStore struct { - pool *pool.ConnectionPool -} - -func (store *TarantoolStore) GetName() string { - return "tarantool" -} - -func (store *TarantoolStore) Initialize(configuration weed_util.Configuration, prefix string) error { - - configuration.SetDefault(prefix+"address", "localhost:3301") - configuration.SetDefault(prefix+"user", "guest") - configuration.SetDefault(prefix+"password", "") - configuration.SetDefault(prefix+"timeout", "5s") - configuration.SetDefault(prefix+"maxReconnects", "1000") - - address := configuration.GetString(prefix + "address") - user := configuration.GetString(prefix + "user") - password := configuration.GetString(prefix + "password") - - timeoutStr := configuration.GetString(prefix + "timeout") - timeout, err := time.ParseDuration(timeoutStr) - if err != nil { - return fmt.Errorf("parse tarantool store timeout: %w", err) - } - - maxReconnects := configuration.GetInt(prefix + "maxReconnects") - if maxReconnects < 0 { - return fmt.Errorf("maxReconnects is negative") - } - - addresses := strings.Split(address, ",") - - return store.initialize(addresses, user, password, timeout, uint(maxReconnects)) -} - -func (store *TarantoolStore) initialize(addresses []string, user string, password string, timeout time.Duration, maxReconnects uint) error { - - opts := tarantool.Opts{ - Timeout: timeout, - Reconnect: time.Second, - MaxReconnects: maxReconnects, - } - - poolInstances := makePoolInstances(addresses, user, password, opts) - poolOpts := pool.Opts{ - CheckTimeout: time.Second, - } - - ctx := context.Background() - p, err := pool.ConnectWithOpts(ctx, poolInstances, poolOpts) - if err != nil { - return fmt.Errorf("Can't create connection pool: %w", err) - } - - _, err = p.Do(tarantool.NewPingRequest(), pool.ANY).Get() - if err != nil { - return err - } - - store.pool = p - - return nil -} - -func makePoolInstances(addresses []string, user string, password string, opts tarantool.Opts) []pool.Instance { - poolInstances := make([]pool.Instance, 0, len(addresses)) - for i, address := range addresses { - poolInstances = append(poolInstances, makePoolInstance(address, user, password, opts, i)) - } - return poolInstances -} - -func makePoolInstance(address string, user string, password string, opts tarantool.Opts, serial int) pool.Instance { - return pool.Instance{ - Name: fmt.Sprintf("instance%d", serial), - Dialer: tarantool.NetDialer{ - Address: address, - User: user, - Password: password, - }, - Opts: opts, - } -} - -func (store *TarantoolStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} - -func (store *TarantoolStore) CommitTransaction(ctx context.Context) error { - return nil -} - -func (store *TarantoolStore) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *TarantoolStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { - meta = util.MaybeGzipData(meta) - } - - var ttl int64 - if entry.TtlSec > 0 { - ttl = time.Now().Unix() + int64(entry.TtlSec) - } else { - ttl = 0 - } - - var operations = []crud.Operation{ - { - Operator: crud.Insert, - Field: "data", - Value: string(meta), - }, - } - - req := crud.MakeUpsertRequest(tarantoolSpaceName). - Tuple([]interface{}{dir, nil, name, ttl, string(meta)}). - Operations(operations) - - ret := crud.Result{} - - if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - return nil -} - -func (store *TarantoolStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { - return store.InsertEntry(ctx, entry) -} - -func (store *TarantoolStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { - dir, name := fullpath.DirAndName() - - findEntryGetOpts := crud.GetOpts{ - Fields: crud.MakeOptTuple([]interface{}{"data"}), - Mode: crud.MakeOptString("read"), - PreferReplica: crud.MakeOptBool(true), - Balance: crud.MakeOptBool(true), - } - - req := crud.MakeGetRequest(tarantoolSpaceName). - Key(crud.Tuple([]interface{}{dir, name})). - Opts(findEntryGetOpts) - - resp := crud.Result{} - - err = store.pool.Do(req, pool.PreferRO).GetTyped(&resp) - if err != nil { - return nil, err - } - - results, ok := resp.Rows.([]interface{}) - if !ok || len(results) != 1 { - return nil, filer_pb.ErrNotFound - } - - rows, ok := results[0].([]interface{}) - if !ok || len(rows) != 1 { - return nil, filer_pb.ErrNotFound - } - - row, ok := rows[0].(string) - if !ok { - return nil, fmt.Errorf("Can't convert rows[0] field to string. Actual type: %v, value: %v", reflect.TypeOf(rows[0]), rows[0]) - } - - entry = &filer.Entry{ - FullPath: fullpath, - } - - err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData([]byte(row))) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *TarantoolStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { - dir, name := fullpath.DirAndName() - - delOpts := crud.DeleteOpts{ - Noreturn: crud.MakeOptBool(true), - } - - req := crud.MakeDeleteRequest(tarantoolSpaceName). - Key(crud.Tuple([]interface{}{dir, name})). - Opts(delOpts) - - if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *TarantoolStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { - req := tarantool.NewCallRequest("filer_metadata.delete_by_directory_idx"). - Args([]interface{}{fullpath}) - - if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *TarantoolStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed -} - -func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - - req := tarantool.NewCallRequest("filer_metadata.find_by_directory_idx_and_name"). - Args([]interface{}{string(dirPath), startFileName, includeStartFile, limit}) - - results, err := store.pool.Do(req, pool.PreferRO).Get() - if err != nil { - return - } - - if len(results) < 1 { - glog.ErrorfCtx(ctx, "Can't find results, data is empty") - return - } - - rows, ok := results[0].([]interface{}) - if !ok { - glog.ErrorfCtx(ctx, "Can't convert results[0] to list") - return - } - - for _, result := range rows { - row, ok := result.([]interface{}) - if !ok { - glog.ErrorfCtx(ctx, "Can't convert result to list") - return - } - - if len(row) < 5 { - glog.ErrorfCtx(ctx, "Length of result is less than needed: %v", len(row)) - return - } - - nameRaw := row[2] - name, ok := nameRaw.(string) - if !ok { - glog.ErrorfCtx(ctx, "Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) - return - } - - dataRaw := row[4] - data, ok := dataRaw.(string) - if !ok { - glog.ErrorfCtx(ctx, "Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) - return - } - - entry := &filer.Entry{ - FullPath: util.NewFullPath(string(dirPath), name), - } - lastFileName = name - if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil { - err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) - break - } - if !eachEntryFunc(entry) { - break - } - } - - return lastFileName, err -} - -func (store *TarantoolStore) Shutdown() { - store.pool.Close() -} diff --git a/weed/filer/tarantool/tarantool_store_kv.go b/weed/filer/tarantool/tarantool_store_kv.go deleted file mode 100644 index e45ff778c..000000000 --- a/weed/filer/tarantool/tarantool_store_kv.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build tarantool -// +build tarantool - -package tarantool - -import ( - "context" - "fmt" - "reflect" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/tarantool/go-tarantool/v2/crud" - "github.com/tarantool/go-tarantool/v2/pool" -) - -const ( - tarantoolKVSpaceName = "key_value" -) - -func (store *TarantoolStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { - - var operations = []crud.Operation{ - { - Operator: crud.Insert, - Field: "value", - Value: string(value), - }, - } - - req := crud.MakeUpsertRequest(tarantoolKVSpaceName). - Tuple([]interface{}{string(key), nil, string(value)}). - Operations(operations) - - ret := crud.Result{} - if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil { - return fmt.Errorf("kv put: %w", err) - } - - return nil -} - -func (store *TarantoolStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { - - getOpts := crud.GetOpts{ - Fields: crud.MakeOptTuple([]interface{}{"value"}), - Mode: crud.MakeOptString("read"), - PreferReplica: crud.MakeOptBool(true), - Balance: crud.MakeOptBool(true), - } - - req := crud.MakeGetRequest(tarantoolKVSpaceName). - Key(crud.Tuple([]interface{}{string(key)})). - Opts(getOpts) - - resp := crud.Result{} - - err = store.pool.Do(req, pool.PreferRO).GetTyped(&resp) - if err != nil { - return nil, err - } - - results, ok := resp.Rows.([]interface{}) - if !ok || len(results) != 1 { - return nil, filer.ErrKvNotFound - } - - rows, ok := results[0].([]interface{}) - if !ok || len(rows) != 1 { - return nil, filer.ErrKvNotFound - } - - row, ok := rows[0].(string) - if !ok { - return nil, fmt.Errorf("Can't convert rows[0] field to string. Actual type: %v, value: %v", reflect.TypeOf(rows[0]), rows[0]) - } - - return []byte(row), nil -} - -func (store *TarantoolStore) KvDelete(ctx context.Context, key []byte) (err error) { - - delOpts := crud.DeleteOpts{ - Noreturn: crud.MakeOptBool(true), - } - - req := crud.MakeDeleteRequest(tarantoolKVSpaceName). - Key(crud.Tuple([]interface{}{string(key)})). - Opts(delOpts) - - if _, err := store.pool.Do(req, pool.RW).Get(); err != nil { - return fmt.Errorf("kv delete: %w", err) - } - - return nil -} diff --git a/weed/filer/tarantool/tarantool_store_test.go b/weed/filer/tarantool/tarantool_store_test.go deleted file mode 100644 index 500289773..000000000 --- a/weed/filer/tarantool/tarantool_store_test.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build tarantool -// +build tarantool - -package tarantool - -import ( - "os" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer/store_test" -) - -func TestStore(t *testing.T) { - // run "make test_tarantool" under docker folder. - // to set up local env - if os.Getenv("RUN_TARANTOOL_TESTS") != "1" { - t.Skip("Tarantool tests are disabled. Set RUN_TARANTOOL_TESTS=1 to enable.") - } - store := &TarantoolStore{} - addresses := []string{"127.0.1:3303"} - store.initialize(addresses, "client", "client", 5*time.Second, 1000) - store_test.TestFilerStore(t, store) -} diff --git a/weed/filer/tikv/tikv_store.go b/weed/filer/tikv/tikv_store.go index 3708ddec5..ca6794f9c 100644 --- a/weed/filer/tikv/tikv_store.go +++ b/weed/filer/tikv/tikv_store.go @@ -10,12 +10,11 @@ import ( "fmt" "io" "strings" - "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/tikv/client-go/v2/config" "github.com/tikv/client-go/v2/txnkv" ) @@ -230,64 +229,67 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat } defer iter.Close() i := int64(0) + first := true for iter.Valid() { + if first { + first = false + if !includeStartFile { + if iter.Valid() { + // Check first item is lastFileStart + if bytes.Equal(iter.Key(), lastFileStart) { + // Is lastFileStart and not include start file, just + // ignore it. + err = iter.Next() + if err != nil { + return err + } + continue + } + } + } + } + // Check for limitation + if limit > 0 { + i++ + if i > limit { + break + } + } + // Validate key prefix key := iter.Key() if !bytes.HasPrefix(key, directoryPrefix) { break } + value := iter.Value() + + // Start process fileName := getNameFromKey(key) - if fileName == "" { - if err := iter.Next(); err != nil { + if fileName != "" { + // Got file name, then generate the Entry + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), fileName), + } + // Update lastFileName + lastFileName = fileName + // Check for decode value. + if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { + // Got error just return the error + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + return err + } + // Run for each callback if return false just break the iteration + if !eachEntryFunc(entry) { break } - continue - } - if fileName == startFileName && !includeStartFile { - if err := iter.Next(); err != nil { - break - } - continue } + // End process - // Check limit only before processing valid entries - if limit > 0 && i >= limit { - break - } - - lastFileName = fileName - entry := &filer.Entry{ - FullPath: util.NewFullPath(string(dirPath), fileName), - } - - // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) - if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil { - err = decodeErr - glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err) - break - } - - // Check TTL expiration before calling eachEntryFunc (similar to Redis stores) - if entry.TtlSec > 0 { - if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { - // Entry is expired, delete it and continue without counting toward limit - if deleteErr := store.DeleteEntry(ctx, entry.FullPath); deleteErr != nil { - glog.V(0).InfofCtx(ctx, "failed to delete expired entry %s: %v", entry.FullPath, deleteErr) - } - if err := iter.Next(); err != nil { - break - } - continue - } - } - - // Only increment counter for non-expired entries - i++ - - if err := iter.Next(); !eachEntryFunc(entry) || err != nil { - break + err = iter.Next() + if err != nil { + return err } } - return err + return nil }) if err != nil { return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err) diff --git a/weed/filer/tikv/tikv_store_kv.go b/weed/filer/tikv/tikv_store_kv.go index a2aaafb7a..1d9428c69 100644 --- a/weed/filer/tikv/tikv_store_kv.go +++ b/weed/filer/tikv/tikv_store_kv.go @@ -6,7 +6,7 @@ package tikv import ( "context" - "github.com/seaweedfs/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/tikv/client-go/v2/txnkv" ) diff --git a/weed/filer/topics.go b/weed/filer/topics.go index 707a4f878..3a2fde8c4 100644 --- a/weed/filer/topics.go +++ b/weed/filer/topics.go @@ -1,7 +1,6 @@ package filer const ( - TopicsDir = "/topics" - SystemLogDir = TopicsDir + "/.system/log" - TopicConfFile = "topic.conf" + TopicsDir = "/topics" + SystemLogDir = TopicsDir + "/.system/log" ) diff --git a/weed/filer/ydb/doc.go b/weed/filer/ydb/doc.go index b0a713145..6ade3a8d8 100644 --- a/weed/filer/ydb/doc.go +++ b/weed/filer/ydb/doc.go @@ -1,7 +1,9 @@ /* + Package ydb is for YDB filer store. The referenced "github.com/ydb-platform/ydb-go-sdk/v3" library is too big when compiled. So this is only compiled in "make full_install". + */ package ydb diff --git a/weed/filer/ydb/ydb_queries.go b/weed/filer/ydb/ydb_queries.go index baafc59a1..f1db9a143 100644 --- a/weed/filer/ydb/ydb_queries.go +++ b/weed/filer/ydb/ydb_queries.go @@ -3,7 +3,7 @@ package ydb -import asql "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" +import asql "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" const ( upsertQuery = ` @@ -22,21 +22,19 @@ const ( deleteQuery = ` PRAGMA TablePathPrefix("%v"); DECLARE $dir_hash AS int64; - DECLARE $directory AS Utf8; DECLARE $name AS Utf8; DELETE FROM ` + asql.DEFAULT_TABLE + ` - WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;` + WHERE dir_hash = $dir_hash AND name = $name;` findQuery = ` PRAGMA TablePathPrefix("%v"); DECLARE $dir_hash AS int64; - DECLARE $directory AS Utf8; DECLARE $name AS Utf8; SELECT meta FROM ` + asql.DEFAULT_TABLE + ` - WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;` + WHERE dir_hash = $dir_hash AND name = $name;` deleteFolderChildrenQuery = ` PRAGMA TablePathPrefix("%v"); diff --git a/weed/filer/ydb/ydb_store.go b/weed/filer/ydb/ydb_store.go index 90b13aa04..1e3a55a09 100644 --- a/weed/filer/ydb/ydb_store.go +++ b/weed/filer/ydb/ydb_store.go @@ -6,54 +6,44 @@ package ydb import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + environ "github.com/ydb-platform/ydb-go-sdk-auth-environ" + "github.com/ydb-platform/ydb-go-sdk/v3" + "github.com/ydb-platform/ydb-go-sdk/v3/sugar" + "github.com/ydb-platform/ydb-go-sdk/v3/table" + "github.com/ydb-platform/ydb-go-sdk/v3/table/result" + "github.com/ydb-platform/ydb-go-sdk/v3/table/result/named" + "github.com/ydb-platform/ydb-go-sdk/v3/table/types" "os" "path" "strings" "sync" "time" - - "github.com/ydb-platform/ydb-go-sdk/v3/query" - "github.com/ydb-platform/ydb-go-sdk/v3/table/options" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - environ "github.com/ydb-platform/ydb-go-sdk-auth-environ" - "github.com/ydb-platform/ydb-go-sdk/v3" - "github.com/ydb-platform/ydb-go-sdk/v3/table" - "github.com/ydb-platform/ydb-go-sdk/v3/table/types" ) const ( - defaultDialTimeOut = 10 - defaultPartitionBySizeEnabled = true - defaultPartitionSizeMb = 200 - defaultPartitionByLoadEnabled = true - defaultMinPartitionsCount = 5 - defaultMaxPartitionsCount = 1000 - defaultMaxListChunk = 2000 + defaultDialTimeOut = 10 ) var ( - roQC = query.WithTxControl(query.OnlineReadOnlyTxControl()) - rwQC = query.WithTxControl(query.DefaultTxControl()) + roTX = table.TxControl( + table.BeginTx(table.WithOnlineReadOnly()), + table.CommitTx(), + ) + rwTX = table.DefaultTxControl() ) type YdbStore struct { - DB *ydb.Driver - dirBuckets string - tablePathPrefix string - SupportBucketTable bool - partitionBySizeEnabled options.FeatureFlag - partitionSizeMb uint64 - partitionByLoadEnabled options.FeatureFlag - minPartitionsCount uint64 - maxPartitionsCount uint64 - maxListChunk int - dbs map[string]bool - dbsLock sync.Mutex + DB ydb.Connection + dirBuckets string + tablePathPrefix string + SupportBucketTable bool + dbs map[string]bool + dbsLock sync.Mutex } func init() { @@ -65,12 +55,6 @@ func (store *YdbStore) GetName() string { } func (store *YdbStore) Initialize(configuration util.Configuration, prefix string) (err error) { - configuration.SetDefault(prefix+"partitionBySizeEnabled", defaultPartitionBySizeEnabled) - configuration.SetDefault(prefix+"partitionSizeMb", defaultPartitionSizeMb) - configuration.SetDefault(prefix+"partitionByLoadEnabled", defaultPartitionByLoadEnabled) - configuration.SetDefault(prefix+"minPartitionsCount", defaultMinPartitionsCount) - configuration.SetDefault(prefix+"maxPartitionsCount", defaultMaxPartitionsCount) - configuration.SetDefault(prefix+"maxListChunk", defaultMaxListChunk) return store.initialize( configuration.GetString("filer.options.buckets_folder"), configuration.GetString(prefix+"dsn"), @@ -78,43 +62,24 @@ func (store *YdbStore) Initialize(configuration util.Configuration, prefix strin configuration.GetBool(prefix+"useBucketPrefix"), configuration.GetInt(prefix+"dialTimeOut"), configuration.GetInt(prefix+"poolSizeLimit"), - configuration.GetBool(prefix+"partitionBySizeEnabled"), - uint64(configuration.GetInt(prefix+"partitionSizeMb")), - configuration.GetBool(prefix+"partitionByLoadEnabled"), - uint64(configuration.GetInt(prefix+"minPartitionsCount")), - uint64(configuration.GetInt(prefix+"maxPartitionsCount")), - configuration.GetInt(prefix+"maxListChunk"), ) } -func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int, partitionBySizeEnabled bool, partitionSizeMb uint64, partitionByLoadEnabled bool, minPartitionsCount uint64, maxPartitionsCount uint64, maxListChunk int) (err error) { +func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int) (err error) { store.dirBuckets = dirBuckets store.SupportBucketTable = useBucketPrefix - if partitionBySizeEnabled { - store.partitionBySizeEnabled = options.FeatureEnabled - } else { - store.partitionBySizeEnabled = options.FeatureDisabled - } - if partitionByLoadEnabled { - store.partitionByLoadEnabled = options.FeatureEnabled - } else { - store.partitionByLoadEnabled = options.FeatureDisabled - } - store.partitionSizeMb = partitionSizeMb - store.minPartitionsCount = minPartitionsCount - store.maxPartitionsCount = maxPartitionsCount - store.maxListChunk = maxListChunk if store.SupportBucketTable { glog.V(0).Infof("enabled BucketPrefix") } store.dbs = make(map[string]bool) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() if dialTimeOut == 0 { dialTimeOut = defaultDialTimeOut } opts := []ydb.Option{ ydb.WithDialTimeout(time.Duration(dialTimeOut) * time.Second), - environ.WithEnvironCredentials(), + environ.WithEnvironCredentials(ctx), } if poolSizeLimit > 0 { opts = append(opts, ydb.WithSessionPoolSizeLimit(poolSizeLimit)) @@ -124,41 +89,48 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix } store.DB, err = ydb.Open(ctx, dsn, opts...) if err != nil { - return fmt.Errorf("can not connect to %s: %w", dsn, err) + if store.DB != nil { + _ = store.DB.Close(ctx) + store.DB = nil + } + return fmt.Errorf("can not connect to %s error: %v", dsn, err) } store.tablePathPrefix = path.Join(store.DB.Name(), tablePathPrefix) + if err = sugar.MakeRecursive(ctx, store.DB, store.tablePathPrefix); err != nil { + return fmt.Errorf("MakeRecursive %s : %v", store.tablePathPrefix, err) + } - if err := store.ensureTables(ctx); err != nil { - return err + if err = store.createTable(ctx, store.tablePathPrefix); err != nil { + glog.Errorf("createTable %s: %v", store.tablePathPrefix, err) } return err } -func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.QueryParameters, ts query.ExecuteOption, processResultFunc func(res query.Result) error) (err error) { - var res query.Result - if tx, ok := ctx.Value("tx").(query.Transaction); ok { - res, err = tx.Query(ctx, *q, query.WithParameters(params)) +func (store *YdbStore) doTxOrDB(ctx context.Context, query *string, params *table.QueryParameters, tc *table.TransactionControl, processResultFunc func(res result.Result) error) (err error) { + var res result.Result + if tx, ok := ctx.Value("tx").(table.Transaction); ok { + res, err = tx.Execute(ctx, *query, params) if err != nil { - return fmt.Errorf("execute transaction: %w", err) + return fmt.Errorf("execute transaction: %v", err) } } else { - err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) { - res, err = s.Query(ctx, *q, query.WithParameters(params), ts) + err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) { + _, res, err = s.Execute(ctx, tc, *query, params) if err != nil { - return fmt.Errorf("execute statement: %w", err) + return fmt.Errorf("execute statement: %v", err) } return nil - }, query.WithIdempotent()) + }) } if err != nil { return err } if res != nil { - defer func() { _ = res.Close(ctx) }() + defer func() { _ = res.Close() }() if processResultFunc != nil { if err = processResultFunc(res); err != nil { - return fmt.Errorf("process result: %w", err) + return fmt.Errorf("process result: %v", err) } } } @@ -172,12 +144,12 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - if len(entry.GetChunks()) > filer.CountEntryChunksForGzip { + if len(entry.Chunks) > filer.CountEntryChunksForGzip { meta = util.MaybeGzipData(meta) } tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) fileMeta := FileMeta{util.HashStringToLong(dir), name, *shortDir, meta} - return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwQC, nil) + return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwTX, nil) } func (store *YdbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { @@ -193,29 +165,23 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e var data []byte entryFound := false tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) - q := withPragma(tablePathPrefix, findQuery) + query := withPragma(tablePathPrefix, findQuery) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), - table.ValueParam("$directory", types.UTF8Value(*shortDir)), table.ValueParam("$name", types.UTF8Value(name))) - err = store.doTxOrDB(ctx, q, queryParams, roQC, func(res query.Result) error { - for rs, err := range res.ResultSets(ctx) { - if err != nil { - return err - } - for row, err := range rs.Rows(ctx) { - if err != nil { - return err - } - if scanErr := row.Scan(&data); scanErr != nil { - return fmt.Errorf("scan %s: %v", fullpath, scanErr) - } - entryFound = true - return nil - } + err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error { + if !res.NextResultSet(ctx) || !res.HasNextRow() { + return nil } - return nil + for res.NextRow() { + if err = res.ScanNamed(named.OptionalWithDefault("meta", &data)); err != nil { + return fmt.Errorf("scanNamed %s : %v", fullpath, err) + } + entryFound = true + return nil + } + return res.Err() }) if err != nil { return nil, err @@ -224,35 +190,36 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e return nil, filer_pb.ErrNotFound } - entry = &filer.Entry{FullPath: fullpath} - if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { - return nil, fmt.Errorf("decode %s: %v", fullpath, decodeErr) + entry = &filer.Entry{ + FullPath: fullpath, } + if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + return nil, fmt.Errorf("decode %s : %v", fullpath, err) + } + return entry, nil } func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { dir, name := fullpath.DirAndName() tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) - q := withPragma(tablePathPrefix, deleteQuery) - glog.V(4).InfofCtx(ctx, "DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir) + query := withPragma(tablePathPrefix, deleteQuery) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), - table.ValueParam("$directory", types.UTF8Value(*shortDir)), table.ValueParam("$name", types.UTF8Value(name))) - return store.doTxOrDB(ctx, q, queryParams, rwQC, nil) + return store.doTxOrDB(ctx, query, queryParams, rwTX, nil) } func (store *YdbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { - dir := string(fullpath) + dir, _ := fullpath.DirAndName() tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) - q := withPragma(tablePathPrefix, deleteFolderChildrenQuery) + query := withPragma(tablePathPrefix, deleteFolderChildrenQuery) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), table.ValueParam("$directory", types.UTF8Value(*shortDir))) - return store.doTxOrDB(ctx, q, queryParams, rwQC, nil) + return store.doTxOrDB(ctx, query, queryParams, rwTX, nil) } func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { @@ -262,79 +229,62 @@ func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.Fu func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { dir := string(dirPath) tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) - baseInclusive := withPragma(tablePathPrefix, listInclusiveDirectoryQuery) - baseExclusive := withPragma(tablePathPrefix, listDirectoryQuery) - var entryCount int64 - var prevFetchedLessThanChunk bool - for entryCount < limit { - if prevFetchedLessThanChunk { - break + var query *string + if includeStartFile { + query = withPragma(tablePathPrefix, listInclusiveDirectoryQuery) + } else { + query = withPragma(tablePathPrefix, listDirectoryQuery) + } + truncated := true + eachEntryFuncIsNotBreake := true + entryCount := int64(0) + for truncated && eachEntryFuncIsNotBreake { + if lastFileName != "" { + startFileName = lastFileName + if includeStartFile { + query = withPragma(tablePathPrefix, listDirectoryQuery) + } } - var q *string - if entryCount == 0 && includeStartFile { - q = baseInclusive - } else { - q = baseExclusive - } - rest := limit - entryCount - chunkLimit := rest - if chunkLimit > int64(store.maxListChunk) { - chunkLimit = int64(store.maxListChunk) - } - var rowCount int64 - - params := table.NewQueryParameters( + restLimit := limit - entryCount + queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), table.ValueParam("$directory", types.UTF8Value(*shortDir)), table.ValueParam("$start_name", types.UTF8Value(startFileName)), table.ValueParam("$prefix", types.UTF8Value(prefix+"%")), - table.ValueParam("$limit", types.Uint64Value(uint64(chunkLimit))), + table.ValueParam("$limit", types.Uint64Value(uint64(restLimit))), ) - - err := store.doTxOrDB(ctx, q, params, roQC, func(res query.Result) error { - for rs, err := range res.ResultSets(ctx) { - if err != nil { - return err - } - for row, err := range rs.Rows(ctx) { - if err != nil { - return err - } - - var name string - var data []byte - if scanErr := row.Scan(&name, &data); scanErr != nil { - return fmt.Errorf("scan %s: %w", dir, scanErr) - } - - lastFileName = name - entry := &filer.Entry{FullPath: util.NewFullPath(dir, name)} - if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { - return fmt.Errorf("decode entry %s: %w", entry.FullPath, decodeErr) - } - - if !eachEntryFunc(entry) { - return nil - } - - rowCount++ - entryCount++ - startFileName = lastFileName - - if entryCount >= limit { - return nil - } - } + err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error { + var name string + var data []byte + if !res.NextResultSet(ctx) || !res.HasNextRow() { + truncated = false + return nil } - return nil + truncated = res.CurrentResultSet().Truncated() + for res.NextRow() { + if err := res.ScanNamed( + named.OptionalWithDefault("name", &name), + named.OptionalWithDefault("meta", &data)); err != nil { + return fmt.Errorf("list scanNamed %s : %v", dir, err) + } + lastFileName = name + entry := &filer.Entry{ + FullPath: util.NewFullPath(dir, name), + } + if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + return fmt.Errorf("scan decode %s : %v", entry.FullPath, err) + } + if !eachEntryFunc(entry) { + eachEntryFuncIsNotBreake = false + break + } + entryCount += 1 + } + return res.Err() }) - if err != nil { - return lastFileName, err - } - - if rowCount < chunkLimit { - prevFetchedLessThanChunk = true - } + } + if err != nil { + return lastFileName, err } return lastFileName, nil } @@ -370,23 +320,17 @@ func (store *YdbStore) Shutdown() { _ = store.DB.Close(context.Background()) } -var _ filer.BucketAware = (*YdbStore)(nil) - func (store *YdbStore) CanDropWholeBucket() bool { return store.SupportBucketTable } func (store *YdbStore) OnBucketCreation(bucket string) { - if !store.SupportBucketTable { - return - } - prefix := path.Join(store.tablePathPrefix, bucket) - store.dbsLock.Lock() defer store.dbsLock.Unlock() - if err := store.createTable(context.Background(), prefix); err != nil { - glog.Errorf("createTable %s: %v", prefix, err) + if err := store.createTable(context.Background(), + path.Join(store.tablePathPrefix, bucket)); err != nil { + glog.Errorf("createTable %s: %v", bucket, err) } if store.dbs == nil { @@ -396,21 +340,12 @@ func (store *YdbStore) OnBucketCreation(bucket string) { } func (store *YdbStore) OnBucketDeletion(bucket string) { - if !store.SupportBucketTable { - return - } store.dbsLock.Lock() defer store.dbsLock.Unlock() - prefix := path.Join(store.tablePathPrefix, bucket) - glog.V(4).Infof("deleting table %s", prefix) - - if err := store.deleteTable(context.Background(), prefix); err != nil { - glog.Errorf("deleteTable %s: %v", prefix, err) - } - - if err := store.DB.Scheme().RemoveDirectory(context.Background(), prefix); err != nil { - glog.Errorf("remove directory %s: %v", prefix, err) + if err := store.deleteTable(context.Background(), + path.Join(store.tablePathPrefix, bucket)); err != nil { + glog.Errorf("deleteTable %s: %v", bucket, err) } if store.dbs == nil { @@ -421,7 +356,7 @@ func (store *YdbStore) OnBucketDeletion(bucket string) { func (store *YdbStore) createTable(ctx context.Context, prefix string) error { return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { - return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), store.createTableOptions()...) + return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), createTableOptions()...) }) } @@ -434,7 +369,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error { }); err != nil { return err } - glog.V(4).InfofCtx(ctx, "deleted table %s", prefix) + glog.V(4).Infof("deleted table %s", prefix) return nil } @@ -447,11 +382,9 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre } prefixBuckets := store.dirBuckets + "/" - glog.V(4).InfofCtx(ctx, "dir: %s, prefixBuckets: %s", *dir, prefixBuckets) if strings.HasPrefix(*dir, prefixBuckets) { // detect bucket bucketAndDir := (*dir)[len(prefixBuckets):] - glog.V(4).InfofCtx(ctx, "bucketAndDir: %s", bucketAndDir) var bucket string if t := strings.Index(bucketAndDir, "/"); t > 0 { bucket = bucketAndDir[:t] @@ -465,50 +398,16 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre store.dbsLock.Lock() defer store.dbsLock.Unlock() + tablePathPrefixWithBucket := path.Join(store.tablePathPrefix, bucket) if _, found := store.dbs[bucket]; !found { - glog.V(4).InfofCtx(ctx, "bucket %q not in cache, verifying existence via DescribeTable", bucket) - tablePath := path.Join(store.tablePathPrefix, bucket, abstract_sql.DEFAULT_TABLE) - err2 := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { - _, err3 := s.DescribeTable(ctx, tablePath) - return err3 - }) - if err2 != nil { - glog.V(4).InfofCtx(ctx, "bucket %q not found (DescribeTable %s failed)", bucket, tablePath) - return + if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil { + store.dbs[bucket] = true + glog.V(4).Infof("created table %s", tablePathPrefixWithBucket) + } else { + glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err) } - glog.V(4).InfofCtx(ctx, "bucket %q exists, adding to cache", bucket) - store.dbs[bucket] = true } - bucketPrefix := path.Join(store.tablePathPrefix, bucket) - tablePathPrefix = &bucketPrefix + tablePathPrefix = &tablePathPrefixWithBucket } return } - -func (store *YdbStore) ensureTables(ctx context.Context) error { - prefixFull := store.tablePathPrefix - - glog.V(4).InfofCtx(ctx, "creating base table %s", prefixFull) - baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE) - if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { - return s.CreateTable(ctx, baseTable, store.createTableOptions()...) - }); err != nil { - return fmt.Errorf("failed to create base table %s: %v", baseTable, err) - } - - glog.V(4).InfofCtx(ctx, "creating bucket tables") - if store.SupportBucketTable { - store.dbsLock.Lock() - defer store.dbsLock.Unlock() - for bucket := range store.dbs { - glog.V(4).InfofCtx(ctx, "creating bucket table %s", bucket) - bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE) - if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { - return s.CreateTable(ctx, bucketTable, store.createTableOptions()...) - }); err != nil { - glog.ErrorfCtx(ctx, "failed to create bucket table %s: %v", bucketTable, err) - } - } - } - return nil -} diff --git a/weed/filer/ydb/ydb_store_kv.go b/weed/filer/ydb/ydb_store_kv.go index 070f17e23..72bbfff42 100644 --- a/weed/filer/ydb/ydb_store_kv.go +++ b/weed/filer/ydb/ydb_store_kv.go @@ -6,57 +6,51 @@ package ydb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/ydb-platform/ydb-go-sdk/v3/query" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/ydb-platform/ydb-go-sdk/v3/table" + "github.com/ydb-platform/ydb-go-sdk/v3/table/result/named" "github.com/ydb-platform/ydb-go-sdk/v3/table/types" ) func (store *YdbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { dirStr, dirHash, name := abstract_sql.GenDirAndName(key) fileMeta := FileMeta{dirHash, name, dirStr, value} - return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) { - _, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, upsertQuery), - query.WithParameters(fileMeta.queryParameters(0)), rwQC) + return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) { + _, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, upsertQuery), + fileMeta.queryParameters(0)) if err != nil { return fmt.Errorf("kv put execute %s: %v", util.NewFullPath(dirStr, name).Name(), err) } return nil - }, query.WithIdempotent()) + }) } func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { dirStr, dirHash, name := abstract_sql.GenDirAndName(key) valueFound := false - err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) error { - res, err := s.Query(ctx, *withPragma(&store.tablePathPrefix, findQuery), - query.WithParameters(table.NewQueryParameters( + err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { + _, res, err := s.Execute(ctx, roTX, *withPragma(&store.tablePathPrefix, findQuery), + table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(dirHash)), - table.ValueParam("$directory", types.UTF8Value(dirStr)), - table.ValueParam("$name", types.UTF8Value(name)))), roQC) + table.ValueParam("$name", types.UTF8Value(name)))) if err != nil { return fmt.Errorf("kv get execute %s: %v", util.NewFullPath(dirStr, name).Name(), err) } - defer func() { _ = res.Close(ctx) }() - for rs, err := range res.ResultSets(ctx) { - if err != nil { - return err - } - for row, err := range rs.Rows(ctx) { - if err != nil { - return err - } - if err := row.Scan(&value); err != nil { - return fmt.Errorf("scan %s : %v", util.NewFullPath(dirStr, name).Name(), err) - } - valueFound = true - return nil - } + defer func() { _ = res.Close() }() + if !res.NextResultSet(ctx) || !res.HasNextRow() { + return nil } - return nil - }, query.WithIdempotent()) + for res.NextRow() { + if err := res.ScanNamed(named.OptionalWithDefault("meta", &value)); err != nil { + return fmt.Errorf("scanNamed %s : %v", util.NewFullPath(dirStr, name).Name(), err) + } + valueFound = true + return nil + } + return res.Err() + }) if !valueFound { return nil, filer.ErrKvNotFound @@ -67,16 +61,15 @@ func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err func (store *YdbStore) KvDelete(ctx context.Context, key []byte) (err error) { dirStr, dirHash, name := abstract_sql.GenDirAndName(key) - return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) { - _, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, deleteQuery), - query.WithParameters(table.NewQueryParameters( + return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) { + _, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, deleteQuery), + table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(dirHash)), - table.ValueParam("$directory", types.UTF8Value(dirStr)), - table.ValueParam("$name", types.UTF8Value(name)))), rwQC) + table.ValueParam("$name", types.UTF8Value(name)))) if err != nil { return fmt.Errorf("kv delete %s: %v", util.NewFullPath(dirStr, name).Name(), err) } return nil - }, query.WithIdempotent()) + }) } diff --git a/weed/filer/ydb/ydb_store_test.go b/weed/filer/ydb/ydb_store_test.go index 1deef465c..cb3c77018 100644 --- a/weed/filer/ydb/ydb_store_test.go +++ b/weed/filer/ydb/ydb_store_test.go @@ -4,7 +4,7 @@ package ydb import ( - "github.com/seaweedfs/seaweedfs/weed/filer/store_test" + "github.com/chrislusf/seaweedfs/weed/filer/store_test" "testing" ) @@ -13,8 +13,7 @@ func TestStore(t *testing.T) { // to set up local env if false { store := &YdbStore{} - store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50, - true, 200, true, 5, 1000, 2000) + store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50) store_test.TestFilerStore(t, store) } } diff --git a/weed/filer/ydb/ydb_types.go b/weed/filer/ydb/ydb_types.go index 05e0b7173..4e5100236 100644 --- a/weed/filer/ydb/ydb_types.go +++ b/weed/filer/ydb/ydb_types.go @@ -22,7 +22,7 @@ type FileMetas []FileMeta func (fm *FileMeta) queryParameters(ttlSec int32) *table.QueryParameters { var expireAtValue types.Value if ttlSec > 0 { - expireAtValue = types.OptionalValue(types.Uint32Value(uint32(ttlSec))) + expireAtValue = types.Uint32Value(uint32(ttlSec)) } else { expireAtValue = types.NullValue(types.TypeUint32) } @@ -30,35 +30,26 @@ func (fm *FileMeta) queryParameters(ttlSec int32) *table.QueryParameters { table.ValueParam("$dir_hash", types.Int64Value(fm.DirHash)), table.ValueParam("$directory", types.UTF8Value(fm.Directory)), table.ValueParam("$name", types.UTF8Value(fm.Name)), - table.ValueParam("$meta", types.BytesValue(fm.Meta)), + table.ValueParam("$meta", types.StringValue(fm.Meta)), table.ValueParam("$expire_at", expireAtValue)) } -func (store *YdbStore) createTableOptions() []options.CreateTableOption { +func createTableOptions() []options.CreateTableOption { columnUnit := options.TimeToLiveUnitSeconds return []options.CreateTableOption{ - options.WithColumn("dir_hash", types.TypeInt64), - options.WithColumn("directory", types.TypeUTF8), - options.WithColumn("name", types.TypeUTF8), - options.WithColumn("meta", types.TypeString), + options.WithColumn("dir_hash", types.Optional(types.TypeInt64)), + options.WithColumn("directory", types.Optional(types.TypeUTF8)), + options.WithColumn("name", types.Optional(types.TypeUTF8)), + options.WithColumn("meta", types.Optional(types.TypeString)), options.WithColumn("expire_at", types.Optional(types.TypeUint32)), - options.WithPrimaryKeyColumn("dir_hash", "directory", "name"), + options.WithPrimaryKeyColumn("dir_hash", "name"), options.WithTimeToLiveSettings(options.TimeToLiveSettings{ ColumnName: "expire_at", ColumnUnit: &columnUnit, Mode: options.TimeToLiveModeValueSinceUnixEpoch}, ), - options.WithPartitioningSettings( - options.WithPartitioningBy([]string{"dir_hash", "name"}), - options.WithPartitioningBySize(store.partitionBySizeEnabled), - options.WithPartitionSizeMb(store.partitionSizeMb), - options.WithPartitioningByLoad(store.partitionByLoadEnabled), - options.WithMinPartitionsCount(store.minPartitionsCount), - options.WithMaxPartitionsCount(store.maxPartitionsCount), - ), } } - func withPragma(prefix *string, query string) *string { queryWithPragma := fmt.Sprintf(query, *prefix) return &queryWithPragma diff --git a/weed/filer_client/filer_client_accessor.go b/weed/filer_client/filer_client_accessor.go deleted file mode 100644 index 955a295cc..000000000 --- a/weed/filer_client/filer_client_accessor.go +++ /dev/null @@ -1,244 +0,0 @@ -package filer_client - -import ( - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc" -) - -// filerHealth tracks the health status of a filer -type filerHealth struct { - address pb.ServerAddress - failureCount int32 - lastFailure time.Time - backoffUntil time.Time -} - -// isHealthy returns true if the filer is not in backoff period -func (fh *filerHealth) isHealthy() bool { - return time.Now().After(fh.backoffUntil) -} - -// recordFailure updates failure count and sets backoff time using exponential backoff -func (fh *filerHealth) recordFailure() { - count := atomic.AddInt32(&fh.failureCount, 1) - fh.lastFailure = time.Now() - - // Exponential backoff: 1s, 2s, 4s, 8s, 16s, 32s, max 30s - // Calculate 2^(count-1) but cap the result at 30 seconds - backoffSeconds := 1 << (count - 1) - if backoffSeconds > 30 { - backoffSeconds = 30 - } - fh.backoffUntil = time.Now().Add(time.Duration(backoffSeconds) * time.Second) - - glog.V(1).Infof("Filer %v failed %d times, backing off for %ds", fh.address, count, backoffSeconds) -} - -// recordSuccess resets failure count and clears backoff -func (fh *filerHealth) recordSuccess() { - atomic.StoreInt32(&fh.failureCount, 0) - fh.backoffUntil = time.Time{} -} - -type FilerClientAccessor struct { - GetGrpcDialOption func() grpc.DialOption - GetFilers func() []pb.ServerAddress // Returns multiple filer addresses for failover - - // Health tracking for smart failover - filerHealthMap sync.Map // map[pb.ServerAddress]*filerHealth -} - -// getOrCreateFilerHealth returns the health tracker for a filer, creating one if needed -func (fca *FilerClientAccessor) getOrCreateFilerHealth(address pb.ServerAddress) *filerHealth { - if health, ok := fca.filerHealthMap.Load(address); ok { - return health.(*filerHealth) - } - - newHealth := &filerHealth{ - address: address, - failureCount: 0, - backoffUntil: time.Time{}, - } - - actual, _ := fca.filerHealthMap.LoadOrStore(address, newHealth) - return actual.(*filerHealth) -} - -// partitionFilers separates filers into healthy and backoff groups -func (fca *FilerClientAccessor) partitionFilers(filers []pb.ServerAddress) (healthy, backoff []pb.ServerAddress) { - for _, filer := range filers { - health := fca.getOrCreateFilerHealth(filer) - if health.isHealthy() { - healthy = append(healthy, filer) - } else { - backoff = append(backoff, filer) - } - } - return healthy, backoff -} - -// shuffleFilers randomizes the order of filers to distribute load -func (fca *FilerClientAccessor) shuffleFilers(filers []pb.ServerAddress) []pb.ServerAddress { - if len(filers) <= 1 { - return filers - } - - shuffled := make([]pb.ServerAddress, len(filers)) - copy(shuffled, filers) - - // Fisher-Yates shuffle - for i := len(shuffled) - 1; i > 0; i-- { - j := rand.Intn(i + 1) - shuffled[i], shuffled[j] = shuffled[j], shuffled[i] - } - - return shuffled -} - -func (fca *FilerClientAccessor) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return fca.withMultipleFilers(streamingMode, fn) -} - -// withMultipleFilers tries each filer with smart failover and backoff logic -func (fca *FilerClientAccessor) withMultipleFilers(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - filers := fca.GetFilers() - if len(filers) == 0 { - return fmt.Errorf("no filer addresses available") - } - - // Partition filers into healthy and backoff groups - healthyFilers, backoffFilers := fca.partitionFilers(filers) - - // Shuffle healthy filers to distribute load evenly - healthyFilers = fca.shuffleFilers(healthyFilers) - - // Try healthy filers first - var lastErr error - for _, filerAddress := range healthyFilers { - health := fca.getOrCreateFilerHealth(filerAddress) - - err := pb.WithFilerClient(streamingMode, 0, filerAddress, fca.GetGrpcDialOption(), fn) - if err == nil { - // Success - record it and return - health.recordSuccess() - glog.V(2).Infof("Filer %v succeeded", filerAddress) - return nil - } - - // Record failure and continue to next filer - health.recordFailure() - lastErr = err - glog.V(1).Infof("Healthy filer %v failed: %v, trying next", filerAddress, err) - } - - // If all healthy filers failed, try backoff filers as last resort - if len(backoffFilers) > 0 { - glog.V(1).Infof("All healthy filers failed, trying %d backoff filers", len(backoffFilers)) - - for _, filerAddress := range backoffFilers { - health := fca.getOrCreateFilerHealth(filerAddress) - - err := pb.WithFilerClient(streamingMode, 0, filerAddress, fca.GetGrpcDialOption(), fn) - if err == nil { - // Success - record it and return - health.recordSuccess() - glog.V(1).Infof("Backoff filer %v recovered and succeeded", filerAddress) - return nil - } - - // Update failure record - health.recordFailure() - lastErr = err - glog.V(1).Infof("Backoff filer %v still failing: %v", filerAddress, err) - } - } - - return fmt.Errorf("all filer connections failed, last error: %v", lastErr) -} - -func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) error { - - glog.V(0).Infof("save conf for topic %v to filer", t) - - // save the topic configuration on filer - return fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return t.WriteConfFile(client, conf) - }) -} - -func (fca *FilerClientAccessor) ReadTopicConfFromFiler(t topic.Topic) (conf *mq_pb.ConfigureTopicResponse, err error) { - - glog.V(1).Infof("load conf for topic %v from filer", t) - - if err = fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - conf, err = t.ReadConfFile(client) - return err - }); err != nil { - return nil, err - } - - return conf, nil -} - -// ReadTopicConfFromFilerWithMetadata reads topic configuration along with file creation and modification times -func (fca *FilerClientAccessor) ReadTopicConfFromFilerWithMetadata(t topic.Topic) (conf *mq_pb.ConfigureTopicResponse, createdAtNs, modifiedAtNs int64, err error) { - - glog.V(1).Infof("load conf with metadata for topic %v from filer", t) - - if err = fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - conf, createdAtNs, modifiedAtNs, err = t.ReadConfFileWithMetadata(client) - return err - }); err != nil { - return nil, 0, 0, err - } - - return conf, createdAtNs, modifiedAtNs, nil -} - -// NewFilerClientAccessor creates a FilerClientAccessor with one or more filers -func NewFilerClientAccessor(filerAddresses []pb.ServerAddress, grpcDialOption grpc.DialOption) *FilerClientAccessor { - if len(filerAddresses) == 0 { - panic("at least one filer address is required") - } - - return &FilerClientAccessor{ - GetGrpcDialOption: func() grpc.DialOption { - return grpcDialOption - }, - GetFilers: func() []pb.ServerAddress { - return filerAddresses - }, - filerHealthMap: sync.Map{}, - } -} - -// AddFilerAddresses adds more filer addresses to the existing list -func (fca *FilerClientAccessor) AddFilerAddresses(additionalFilers []pb.ServerAddress) { - if len(additionalFilers) == 0 { - return - } - - // Get the current filers if available - var allFilers []pb.ServerAddress - if fca.GetFilers != nil { - allFilers = append(allFilers, fca.GetFilers()...) - } - - // Add the additional filers - allFilers = append(allFilers, additionalFilers...) - - // Update the filers list - fca.GetFilers = func() []pb.ServerAddress { - return allFilers - } -} diff --git a/weed/filer_client/filer_discovery.go b/weed/filer_client/filer_discovery.go deleted file mode 100644 index 49cfcd314..000000000 --- a/weed/filer_client/filer_discovery.go +++ /dev/null @@ -1,193 +0,0 @@ -package filer_client - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "google.golang.org/grpc" -) - -const ( - // FilerDiscoveryInterval is the interval for refreshing filer list from masters - FilerDiscoveryInterval = 30 * time.Second - // InitialDiscoveryInterval is the faster interval for initial discovery - InitialDiscoveryInterval = 5 * time.Second - // InitialDiscoveryRetries is the number of fast retries during startup - InitialDiscoveryRetries = 6 // 6 retries * 5 seconds = 30 seconds total -) - -// FilerDiscoveryService handles dynamic discovery and refresh of filers from masters -type FilerDiscoveryService struct { - masters []pb.ServerAddress - grpcDialOption grpc.DialOption - filers []pb.ServerAddress - filersMutex sync.RWMutex - refreshTicker *time.Ticker - stopChan chan struct{} - wg sync.WaitGroup - initialRetries int -} - -// NewFilerDiscoveryService creates a new filer discovery service -func NewFilerDiscoveryService(masters []pb.ServerAddress, grpcDialOption grpc.DialOption) *FilerDiscoveryService { - return &FilerDiscoveryService{ - masters: masters, - grpcDialOption: grpcDialOption, - filers: make([]pb.ServerAddress, 0), - stopChan: make(chan struct{}), - } -} - -// No need for convertHTTPToGRPC - pb.ServerAddress.ToGrpcAddress() already handles this - -// discoverFilersFromMaster discovers filers from a single master -func (fds *FilerDiscoveryService) discoverFilersFromMaster(masterAddr pb.ServerAddress) ([]pb.ServerAddress, error) { - // Convert HTTP master address to gRPC address (HTTP port + 10000) - grpcAddr := masterAddr.ToGrpcAddress() - - conn, err := grpc.NewClient(grpcAddr, fds.grpcDialOption) - if err != nil { - return nil, fmt.Errorf("failed to connect to master at %s: %v", grpcAddr, err) - } - defer conn.Close() - - client := master_pb.NewSeaweedClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.ListClusterNodes(ctx, &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - glog.Errorf("FILER DISCOVERY: ListClusterNodes failed for master %s: %v", masterAddr, err) - return nil, fmt.Errorf("failed to list filers from master %s: %v", masterAddr, err) - } - - var filers []pb.ServerAddress - for _, node := range resp.ClusterNodes { - // Return HTTP address (lock client will convert to gRPC when needed) - filers = append(filers, pb.ServerAddress(node.Address)) - } - - return filers, nil -} - -// refreshFilers discovers filers from all masters and updates the filer list -func (fds *FilerDiscoveryService) refreshFilers() { - glog.V(2).Info("Refreshing filer list from masters") - - var allFilers []pb.ServerAddress - var discoveryErrors []error - - // Try each master to discover filers - for _, masterAddr := range fds.masters { - filers, err := fds.discoverFilersFromMaster(masterAddr) - if err != nil { - discoveryErrors = append(discoveryErrors, err) - glog.V(1).Infof("Failed to discover filers from master %s: %v", masterAddr, err) - continue - } - - allFilers = append(allFilers, filers...) - glog.V(2).Infof("Discovered %d filers from master %s", len(filers), masterAddr) - } - - // Deduplicate filers - filerSet := make(map[pb.ServerAddress]bool) - for _, filer := range allFilers { - filerSet[filer] = true - } - - uniqueFilers := make([]pb.ServerAddress, 0, len(filerSet)) - for filer := range filerSet { - uniqueFilers = append(uniqueFilers, filer) - } - - // Update the filer list - fds.filersMutex.Lock() - oldCount := len(fds.filers) - fds.filers = uniqueFilers - newCount := len(fds.filers) - fds.filersMutex.Unlock() - - if newCount > 0 { - glog.V(1).Infof("Filer discovery successful: updated from %d to %d filers", oldCount, newCount) - } else if len(discoveryErrors) > 0 { - glog.Warningf("Failed to discover any filers from %d masters, keeping existing %d filers", len(fds.masters), oldCount) - } -} - -// GetFilers returns the current list of filers -func (fds *FilerDiscoveryService) GetFilers() []pb.ServerAddress { - fds.filersMutex.RLock() - defer fds.filersMutex.RUnlock() - - // Return a copy to avoid concurrent modification - filers := make([]pb.ServerAddress, len(fds.filers)) - copy(filers, fds.filers) - return filers -} - -// Start begins the filer discovery service -func (fds *FilerDiscoveryService) Start() error { - glog.V(1).Info("Starting filer discovery service") - - // Initial discovery - fds.refreshFilers() - - // Start with faster discovery during startup - fds.initialRetries = InitialDiscoveryRetries - interval := InitialDiscoveryInterval - if len(fds.GetFilers()) > 0 { - // If we found filers immediately, use normal interval - interval = FilerDiscoveryInterval - fds.initialRetries = 0 - } - - // Start periodic refresh - fds.refreshTicker = time.NewTicker(interval) - fds.wg.Add(1) - go func() { - defer fds.wg.Done() - for { - select { - case <-fds.refreshTicker.C: - fds.refreshFilers() - - // Switch to normal interval after initial retries - if fds.initialRetries > 0 { - fds.initialRetries-- - if fds.initialRetries == 0 || len(fds.GetFilers()) > 0 { - glog.V(1).Info("Switching to normal filer discovery interval") - fds.refreshTicker.Stop() - fds.refreshTicker = time.NewTicker(FilerDiscoveryInterval) - } - } - case <-fds.stopChan: - glog.V(1).Info("Filer discovery service stopping") - return - } - } - }() - - return nil -} - -// Stop stops the filer discovery service -func (fds *FilerDiscoveryService) Stop() error { - glog.V(1).Info("Stopping filer discovery service") - - close(fds.stopChan) - if fds.refreshTicker != nil { - fds.refreshTicker.Stop() - } - fds.wg.Wait() - - return nil -} diff --git a/weed/ftpd/ftp_server.go b/weed/ftpd/ftp_server.go new file mode 100644 index 000000000..253ff3edd --- /dev/null +++ b/weed/ftpd/ftp_server.go @@ -0,0 +1,81 @@ +package ftpd + +import ( + "crypto/tls" + "errors" + "github.com/chrislusf/seaweedfs/weed/util" + "net" + + ftpserver "github.com/fclairamb/ftpserverlib" + "google.golang.org/grpc" +) + +type FtpServerOption struct { + Filer string + IP string + IpBind string + Port int + FilerGrpcAddress string + FtpRoot string + GrpcDialOption grpc.DialOption + PassivePortStart int + PassivePortStop int +} + +type SftpServer struct { + option *FtpServerOption + ftpListener net.Listener +} + +var _ = ftpserver.MainDriver(&SftpServer{}) + +// NewServer returns a new FTP server driver +func NewFtpServer(ftpListener net.Listener, option *FtpServerOption) (*SftpServer, error) { + var err error + server := &SftpServer{ + option: option, + ftpListener: ftpListener, + } + return server, err +} + +// GetSettings returns some general settings around the server setup +func (s *SftpServer) GetSettings() (*ftpserver.Settings, error) { + var portRange *ftpserver.PortRange + if s.option.PassivePortStart > 0 && s.option.PassivePortStop > s.option.PassivePortStart { + portRange = &ftpserver.PortRange{ + Start: s.option.PassivePortStart, + End: s.option.PassivePortStop, + } + } + + return &ftpserver.Settings{ + Listener: s.ftpListener, + ListenAddr: util.JoinHostPort(s.option.IpBind, s.option.Port), + PublicHost: s.option.IP, + PassiveTransferPortRange: portRange, + ActiveTransferPortNon20: true, + IdleTimeout: -1, + ConnectionTimeout: 20, + }, nil +} + +// ClientConnected is called to send the very first welcome message +func (s *SftpServer) ClientConnected(cc ftpserver.ClientContext) (string, error) { + return "Welcome to SeaweedFS FTP Server", nil +} + +// ClientDisconnected is called when the user disconnects, even if he never authenticated +func (s *SftpServer) ClientDisconnected(cc ftpserver.ClientContext) { +} + +// AuthUser authenticates the user and selects an handling driver +func (s *SftpServer) AuthUser(cc ftpserver.ClientContext, username, password string) (ftpserver.ClientDriver, error) { + return nil, nil +} + +// GetTLSConfig returns a TLS Certificate to use +// The certificate could frequently change if we use something like "let's encrypt" +func (s *SftpServer) GetTLSConfig() (*tls.Config, error) { + return nil, errors.New("no TLS certificate configured") +} diff --git a/weed/glog/glog.go b/weed/glog/glog.go index e04df39e6..352a7e185 100644 --- a/weed/glog/glog.go +++ b/weed/glog/glog.go @@ -67,6 +67,7 @@ // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 // sets the V level to 3 in all Go files whose names begin "gopher". +// package glog import ( @@ -74,6 +75,7 @@ import ( "bytes" "errors" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "io" stdLog "log" "os" @@ -84,8 +86,6 @@ import ( "sync" "sync/atomic" "time" - - flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" ) // severity identifies the sort of log: info, warning etc. It also implements @@ -524,11 +524,8 @@ It returns a buffer containing the formatted header and the user's file and line The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... - where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) @@ -577,15 +574,16 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { buf.twoDigits(9, minute) buf.tmp[11] = ':' buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.Write(buf.tmp[:22]) + buf.tmp[14] = ' ' + buf.nDigits(5, 15, pid, ' ') // TODO: should be TID + buf.tmp[20] = ' ' + buf.Write(buf.tmp[:21]) buf.WriteString(file) buf.tmp[0] = ':' n := buf.someDigits(1, line) - buf.tmp[n+1] = ' ' - buf.Write(buf.tmp[:n+2]) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) return buf } @@ -691,29 +689,18 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo l.exit(err) } } - // After exit is called, don't try to write to files - if !l.exited { - switch s { - case fatalLog: - if l.file[fatalLog] != nil { - l.file[fatalLog].Write(data) - } - fallthrough - case errorLog: - if l.file[errorLog] != nil { - l.file[errorLog].Write(data) - } - fallthrough - case warningLog: - if l.file[warningLog] != nil { - l.file[warningLog].Write(data) - } - fallthrough - case infoLog: - if l.file[infoLog] != nil { - l.file[infoLog].Write(data) - } - } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) } } if s == fatalLog { @@ -826,14 +813,9 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { if sb.logger.exited { return } - // Check if Writer is nil (can happen if rotateFile failed) - if sb.Writer == nil { - return 0, errors.New("log writer is nil") - } if sb.nbytes+uint64(len(p)) >= MaxSize { if err := sb.rotateFile(time.Now()); err != nil { sb.logger.exit(err) - return 0, err } } n, err = sb.Writer.Write(p) @@ -1005,13 +987,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// // if glog.V(2) { glog.Info("log this") } -// // or -// // glog.V(2).Info("log this") -// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // diff --git a/weed/glog/glog_ctx.go b/weed/glog/glog_ctx.go deleted file mode 100644 index daae3b148..000000000 --- a/weed/glog/glog_ctx.go +++ /dev/null @@ -1,246 +0,0 @@ -package glog - -import ( - "context" - "fmt" - "sync/atomic" - - reqid "github.com/seaweedfs/seaweedfs/weed/util/request_id" -) - -const requestIDField = "request_id" - -// formatMetaTag returns a formatted request ID tag from the context, -// like "request_id:abc123". Returns an empty string if no request ID is found. -func formatMetaTag(ctx context.Context) string { - if requestID := reqid.Get(ctx); requestID != "" { - return fmt.Sprintf("%s:%s", requestIDField, requestID) - } - return "" -} - -// InfoCtx is a context-aware alternative to Verbose.Info. -// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present. -// Arguments are handled in the manner of fmt.Print. -func (v Verbose) InfoCtx(ctx context.Context, args ...interface{}) { - if !v { - return - } - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(infoLog, args...) -} - -// InfolnCtx is a context-aware alternative to Verbose.Infoln. -// Logs to the INFO log, prepending a request ID from the context if it exists. -// Arguments are handled in the manner of fmt.Println. -func (v Verbose) InfolnCtx(ctx context.Context, args ...interface{}) { - if !v { - return - } - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.println(infoLog, args...) -} - -// InfofCtx is a context-aware alternative to Verbose.Infof. -// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present. -// Arguments are handled in the manner of fmt.Printf. -func (v Verbose) InfofCtx(ctx context.Context, format string, args ...interface{}) { - if !v { - return - } - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(infoLog, format, args...) -} - -// InfofCtx logs a formatted message at info level, prepending a request ID from -// the context if it exists. This is a context-aware alternative to Infof. -func InfofCtx(ctx context.Context, format string, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(infoLog, format, args...) -} - -// InfoCtx logs a message at info level, prepending a request ID from the context -// if it exists. This is a context-aware alternative to Info. -func InfoCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(infoLog, args...) -} - -// WarningCtx logs to the WARNING and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to Warning. -func WarningCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(warningLog, args...) -} - -// WarningDepthCtx logs to the WARNING and INFO logs with a custom call depth. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to WarningDepth. -func WarningDepthCtx(ctx context.Context, depth int, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.printDepth(warningLog, depth, args...) -} - -// WarninglnCtx logs to the WARNING and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println. -// This is a context-aware alternative to Warningln. -func WarninglnCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.println(warningLog, args...) -} - -// WarningfCtx logs to the WARNING and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf. -// This is a context-aware alternative to Warningf. -func WarningfCtx(ctx context.Context, format string, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(warningLog, format, args...) -} - -// ErrorCtx logs to the ERROR, WARNING, and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to Error. -func ErrorCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(errorLog, args...) -} - -// ErrorDepthCtx logs to the ERROR, WARNING, and INFO logs with a custom call depth. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to ErrorDepth. -func ErrorDepthCtx(ctx context.Context, depth int, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.printDepth(errorLog, depth, args...) -} - -// ErrorlnCtx logs to the ERROR, WARNING, and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println. -// This is a context-aware alternative to Errorln. -func ErrorlnCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.println(errorLog, args...) -} - -// ErrorfCtx logs to the ERROR, WARNING, and INFO logs. -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf. -// This is a context-aware alternative to Errorf. -func ErrorfCtx(ctx context.Context, format string, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(errorLog, format, args...) -} - -// FatalCtx logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to Fatal. -func FatalCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(fatalLog, args...) -} - -// FatalDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to FatalDepth. -func FatalDepthCtx(ctx context.Context, depth int, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.printDepth(fatalLog, depth, args...) -} - -// FatallnCtx logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println. -// This is a context-aware alternative to Fatalln. -func FatallnCtx(ctx context.Context, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.println(fatalLog, args...) -} - -// FatalfCtx logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf. -// This is a context-aware alternative to Fatalf. -func FatalfCtx(ctx context.Context, format string, args ...interface{}) { - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(fatalLog, format, args...) -} - -// ExitCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to ExitCtx -func ExitCtx(ctx context.Context, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.print(fatalLog, args...) -} - -// ExitDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth, -// then calls os.Exit(1). Prepends a request ID from the context if it exists. -// Arguments are handled in the manner of fmt.Print. -// This is a context-aware alternative to ExitDepth. -func ExitDepthCtx(ctx context.Context, depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.printDepth(fatalLog, depth, args...) -} - -// ExitlnCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println. -// This is a context-aware alternative to Exitln. -func ExitlnCtx(ctx context.Context, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - if metaTag := formatMetaTag(ctx); metaTag != "" { - args = append([]interface{}{metaTag}, args...) - } - logging.println(fatalLog, args...) -} - -// ExitfCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf. -// This is a context-aware alternative to Exitf. -func ExitfCtx(ctx context.Context, format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - if metaTag := formatMetaTag(ctx); metaTag != "" { - format = metaTag + " " + format - } - logging.printf(fatalLog, format, args...) -} diff --git a/weed/glog/glog_file.go b/weed/glog/glog_file.go index 631a4cc99..3f700d8fc 100644 --- a/weed/glog/glog_file.go +++ b/weed/glog/glog_file.go @@ -21,11 +21,10 @@ package glog import ( "errors" "fmt" - flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "os" "os/user" "path/filepath" - "sort" "strings" "sync" "time" @@ -33,7 +32,6 @@ import ( // MaxSize is the maximum size of a log file in bytes. var MaxSize uint64 = 1024 * 1024 * 1800 -var MaxFileCount = 5 // logDirs lists the candidate directories for new log files. var logDirs []string @@ -45,9 +43,8 @@ var logDir = flag.String("logdir", "", "If non-empty, write log files in this di func createLogDirs() { if *logDir != "" { logDirs = append(logDirs, *logDir) - } else { - logDirs = append(logDirs, os.TempDir()) } + logDirs = append(logDirs, os.TempDir()) } var ( @@ -99,15 +96,6 @@ func logName(tag string, t time.Time) (name, link string) { return name, program + "." + tag } -func prefix(tag string) string { - return fmt.Sprintf("%s.%s.%s.log.%s.", - program, - host, - userName, - tag, - ) -} - var onceLogDirs sync.Once // create creates a new log file and returns the file and its filename, which @@ -120,29 +108,8 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { return nil, "", errors.New("log: no log dirs") } name, link := logName(tag, t) - logPrefix := prefix(tag) var lastErr error for _, dir := range logDirs { - - // remove old logs - entries, _ := os.ReadDir(dir) - var previousLogs []string - for _, entry := range entries { - if strings.HasPrefix(entry.Name(), logPrefix) { - previousLogs = append(previousLogs, entry.Name()) - } - } - if len(previousLogs) >= MaxFileCount { - sort.Strings(previousLogs) - for i, entry := range previousLogs { - if i > len(previousLogs)-MaxFileCount { - break - } - os.Remove(filepath.Join(dir, entry)) - } - } - - // create new log file fname := filepath.Join(dir, name) f, err := os.Create(fname) if err == nil { @@ -153,5 +120,5 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } lastErr = err } - return nil, "", fmt.Errorf("log: cannot create log: %w", lastErr) + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } diff --git a/weed/glog/glog_test.go b/weed/glog/glog_test.go index 4a667259b..12c3acf3d 100644 --- a/weed/glog/glog_test.go +++ b/weed/glog/glog_test.go @@ -125,9 +125,9 @@ func TestInfoDepth(t *testing.T) { // pull out the line number (between : and ]) msg := m[strings.LastIndex(m, ":")+1:] - x := strings.Index(msg, " ") + x := strings.Index(msg, "]") if x < 0 { - t.Errorf("InfoDepth[%d]: missing ' ': %q", i, m) + t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) continue } line, err := strconv.Atoi(msg[:x]) @@ -180,7 +180,7 @@ func TestHeader(t *testing.T) { pid = 1234 Info("test") var line int - format := "I0102 15:04:05.067890 glog_test.go:%d test\n" + format := "I0102 15:04:05 1234 glog_test.go:%d] test\n" n, err := fmt.Sscanf(contents(infoLog), format, &line) if n != 1 || err != nil { t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) diff --git a/weed/iam/integration/cached_role_store_generic.go b/weed/iam/integration/cached_role_store_generic.go deleted file mode 100644 index 510fc147f..000000000 --- a/weed/iam/integration/cached_role_store_generic.go +++ /dev/null @@ -1,153 +0,0 @@ -package integration - -import ( - "context" - "encoding/json" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/util" -) - -// RoleStoreAdapter adapts RoleStore interface to CacheableStore[*RoleDefinition] -type RoleStoreAdapter struct { - store RoleStore -} - -// NewRoleStoreAdapter creates a new adapter for RoleStore -func NewRoleStoreAdapter(store RoleStore) *RoleStoreAdapter { - return &RoleStoreAdapter{store: store} -} - -// Get implements CacheableStore interface -func (a *RoleStoreAdapter) Get(ctx context.Context, filerAddress string, key string) (*RoleDefinition, error) { - return a.store.GetRole(ctx, filerAddress, key) -} - -// Store implements CacheableStore interface -func (a *RoleStoreAdapter) Store(ctx context.Context, filerAddress string, key string, value *RoleDefinition) error { - return a.store.StoreRole(ctx, filerAddress, key, value) -} - -// Delete implements CacheableStore interface -func (a *RoleStoreAdapter) Delete(ctx context.Context, filerAddress string, key string) error { - return a.store.DeleteRole(ctx, filerAddress, key) -} - -// List implements CacheableStore interface -func (a *RoleStoreAdapter) List(ctx context.Context, filerAddress string) ([]string, error) { - return a.store.ListRoles(ctx, filerAddress) -} - -// GenericCachedRoleStore implements RoleStore using the generic cache -type GenericCachedRoleStore struct { - *util.CachedStore[*RoleDefinition] - adapter *RoleStoreAdapter -} - -// NewGenericCachedRoleStore creates a new cached role store using generics -func NewGenericCachedRoleStore(config map[string]interface{}, filerAddressProvider func() string) (*GenericCachedRoleStore, error) { - // Create underlying filer store - filerStore, err := NewFilerRoleStore(config, filerAddressProvider) - if err != nil { - return nil, err - } - - // Parse cache configuration with defaults - cacheTTL := 5 * time.Minute - listTTL := 1 * time.Minute - maxCacheSize := int64(1000) - - if config != nil { - if ttlStr, ok := config["ttl"].(string); ok && ttlStr != "" { - if parsed, err := time.ParseDuration(ttlStr); err == nil { - cacheTTL = parsed - } - } - if listTTLStr, ok := config["listTtl"].(string); ok && listTTLStr != "" { - if parsed, err := time.ParseDuration(listTTLStr); err == nil { - listTTL = parsed - } - } - if maxSize, ok := config["maxCacheSize"].(int); ok && maxSize > 0 { - maxCacheSize = int64(maxSize) - } - } - - // Create adapter and generic cached store - adapter := NewRoleStoreAdapter(filerStore) - cachedStore := util.NewCachedStore( - adapter, - genericCopyRoleDefinition, // Copy function - util.CachedStoreConfig{ - TTL: cacheTTL, - ListTTL: listTTL, - MaxCacheSize: maxCacheSize, - }, - ) - - glog.V(2).Infof("Initialized GenericCachedRoleStore with TTL %v, List TTL %v, Max Cache Size %d", - cacheTTL, listTTL, maxCacheSize) - - return &GenericCachedRoleStore{ - CachedStore: cachedStore, - adapter: adapter, - }, nil -} - -// StoreRole implements RoleStore interface -func (c *GenericCachedRoleStore) StoreRole(ctx context.Context, filerAddress string, roleName string, role *RoleDefinition) error { - return c.Store(ctx, filerAddress, roleName, role) -} - -// GetRole implements RoleStore interface -func (c *GenericCachedRoleStore) GetRole(ctx context.Context, filerAddress string, roleName string) (*RoleDefinition, error) { - return c.Get(ctx, filerAddress, roleName) -} - -// ListRoles implements RoleStore interface -func (c *GenericCachedRoleStore) ListRoles(ctx context.Context, filerAddress string) ([]string, error) { - return c.List(ctx, filerAddress) -} - -// DeleteRole implements RoleStore interface -func (c *GenericCachedRoleStore) DeleteRole(ctx context.Context, filerAddress string, roleName string) error { - return c.Delete(ctx, filerAddress, roleName) -} - -// genericCopyRoleDefinition creates a deep copy of a RoleDefinition for the generic cache -func genericCopyRoleDefinition(role *RoleDefinition) *RoleDefinition { - if role == nil { - return nil - } - - result := &RoleDefinition{ - RoleName: role.RoleName, - RoleArn: role.RoleArn, - Description: role.Description, - } - - // Deep copy trust policy if it exists - if role.TrustPolicy != nil { - trustPolicyData, err := json.Marshal(role.TrustPolicy) - if err != nil { - glog.Errorf("Failed to marshal trust policy for deep copy: %v", err) - return nil - } - var trustPolicyCopy policy.PolicyDocument - if err := json.Unmarshal(trustPolicyData, &trustPolicyCopy); err != nil { - glog.Errorf("Failed to unmarshal trust policy for deep copy: %v", err) - return nil - } - result.TrustPolicy = &trustPolicyCopy - } - - // Deep copy attached policies slice - if role.AttachedPolicies != nil { - result.AttachedPolicies = make([]string, len(role.AttachedPolicies)) - copy(result.AttachedPolicies, role.AttachedPolicies) - } - - return result -} diff --git a/weed/iam/integration/iam_integration_test.go b/weed/iam/integration/iam_integration_test.go deleted file mode 100644 index 7684656ce..000000000 --- a/weed/iam/integration/iam_integration_test.go +++ /dev/null @@ -1,513 +0,0 @@ -package integration - -import ( - "context" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/ldap" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestFullOIDCWorkflow tests the complete OIDC โ†’ STS โ†’ Policy workflow -func TestFullOIDCWorkflow(t *testing.T) { - // Set up integrated IAM system - iamManager := setupIntegratedIAMSystem(t) - - // Create JWT tokens for testing with the correct issuer - validJWTToken := createTestJWT(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - invalidJWTToken := createTestJWT(t, "https://invalid-issuer.com", "test-user", "wrong-key") - - tests := []struct { - name string - roleArn string - sessionName string - webToken string - expectedAllow bool - testAction string - testResource string - }{ - { - name: "successful role assumption with policy validation", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - sessionName: "oidc-session", - webToken: validJWTToken, - expectedAllow: true, - testAction: "s3:GetObject", - testResource: "arn:seaweed:s3:::test-bucket/file.txt", - }, - { - name: "role assumption denied by trust policy", - roleArn: "arn:seaweed:iam::role/RestrictedRole", - sessionName: "oidc-session", - webToken: validJWTToken, - expectedAllow: false, - }, - { - name: "invalid token rejected", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - sessionName: "oidc-session", - webToken: invalidJWTToken, - expectedAllow: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - // Step 1: Attempt role assumption - assumeRequest := &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: tt.roleArn, - WebIdentityToken: tt.webToken, - RoleSessionName: tt.sessionName, - } - - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, assumeRequest) - - if !tt.expectedAllow { - assert.Error(t, err) - assert.Nil(t, response) - return - } - - // Should succeed if expectedAllow is true - require.NoError(t, err) - require.NotNil(t, response) - require.NotNil(t, response.Credentials) - - // Step 2: Test policy enforcement with assumed credentials - if tt.testAction != "" && tt.testResource != "" { - allowed, err := iamManager.IsActionAllowed(ctx, &ActionRequest{ - Principal: response.AssumedRoleUser.Arn, - Action: tt.testAction, - Resource: tt.testResource, - SessionToken: response.Credentials.SessionToken, - }) - - require.NoError(t, err) - assert.True(t, allowed, "Action should be allowed by role policy") - } - }) - } -} - -// TestFullLDAPWorkflow tests the complete LDAP โ†’ STS โ†’ Policy workflow -func TestFullLDAPWorkflow(t *testing.T) { - iamManager := setupIntegratedIAMSystem(t) - - tests := []struct { - name string - roleArn string - sessionName string - username string - password string - expectedAllow bool - testAction string - testResource string - }{ - { - name: "successful LDAP role assumption", - roleArn: "arn:seaweed:iam::role/LDAPUserRole", - sessionName: "ldap-session", - username: "testuser", - password: "testpass", - expectedAllow: true, - testAction: "filer:CreateEntry", - testResource: "arn:seaweed:filer::path/user-docs/*", - }, - { - name: "invalid LDAP credentials", - roleArn: "arn:seaweed:iam::role/LDAPUserRole", - sessionName: "ldap-session", - username: "testuser", - password: "wrongpass", - expectedAllow: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - // Step 1: Attempt role assumption with LDAP credentials - assumeRequest := &sts.AssumeRoleWithCredentialsRequest{ - RoleArn: tt.roleArn, - Username: tt.username, - Password: tt.password, - RoleSessionName: tt.sessionName, - ProviderName: "test-ldap", - } - - response, err := iamManager.AssumeRoleWithCredentials(ctx, assumeRequest) - - if !tt.expectedAllow { - assert.Error(t, err) - assert.Nil(t, response) - return - } - - require.NoError(t, err) - require.NotNil(t, response) - - // Step 2: Test policy enforcement - if tt.testAction != "" && tt.testResource != "" { - allowed, err := iamManager.IsActionAllowed(ctx, &ActionRequest{ - Principal: response.AssumedRoleUser.Arn, - Action: tt.testAction, - Resource: tt.testResource, - SessionToken: response.Credentials.SessionToken, - }) - - require.NoError(t, err) - assert.True(t, allowed) - } - }) - } -} - -// TestPolicyEnforcement tests policy evaluation for various scenarios -func TestPolicyEnforcement(t *testing.T) { - iamManager := setupIntegratedIAMSystem(t) - - // Create a valid JWT token for testing - validJWTToken := createTestJWT(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Create a session for testing - ctx := context.Background() - assumeRequest := &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "policy-test-session", - } - - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, assumeRequest) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - principal := response.AssumedRoleUser.Arn - - tests := []struct { - name string - action string - resource string - shouldAllow bool - reason string - }{ - { - name: "allow read access", - action: "s3:GetObject", - resource: "arn:seaweed:s3:::test-bucket/file.txt", - shouldAllow: true, - reason: "S3ReadOnlyRole should allow GetObject", - }, - { - name: "allow list bucket", - action: "s3:ListBucket", - resource: "arn:seaweed:s3:::test-bucket", - shouldAllow: true, - reason: "S3ReadOnlyRole should allow ListBucket", - }, - { - name: "deny write access", - action: "s3:PutObject", - resource: "arn:seaweed:s3:::test-bucket/newfile.txt", - shouldAllow: false, - reason: "S3ReadOnlyRole should deny write operations", - }, - { - name: "deny delete access", - action: "s3:DeleteObject", - resource: "arn:seaweed:s3:::test-bucket/file.txt", - shouldAllow: false, - reason: "S3ReadOnlyRole should deny delete operations", - }, - { - name: "deny filer access", - action: "filer:CreateEntry", - resource: "arn:seaweed:filer::path/test", - shouldAllow: false, - reason: "S3ReadOnlyRole should not allow filer operations", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - allowed, err := iamManager.IsActionAllowed(ctx, &ActionRequest{ - Principal: principal, - Action: tt.action, - Resource: tt.resource, - SessionToken: sessionToken, - }) - - require.NoError(t, err) - assert.Equal(t, tt.shouldAllow, allowed, tt.reason) - }) - } -} - -// TestSessionExpiration tests session expiration and cleanup -func TestSessionExpiration(t *testing.T) { - iamManager := setupIntegratedIAMSystem(t) - ctx := context.Background() - - // Create a valid JWT token for testing - validJWTToken := createTestJWT(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Create a short-lived session - assumeRequest := &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "expiration-test", - DurationSeconds: int64Ptr(900), // 15 minutes - } - - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, assumeRequest) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - - // Verify session is initially valid - allowed, err := iamManager.IsActionAllowed(ctx, &ActionRequest{ - Principal: response.AssumedRoleUser.Arn, - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::test-bucket/file.txt", - SessionToken: sessionToken, - }) - require.NoError(t, err) - assert.True(t, allowed) - - // Verify the expiration time is set correctly - assert.True(t, response.Credentials.Expiration.After(time.Now())) - assert.True(t, response.Credentials.Expiration.Before(time.Now().Add(16*time.Minute))) - - // Test session expiration behavior in stateless JWT system - // In a stateless system, manual expiration is not supported - err = iamManager.ExpireSessionForTesting(ctx, sessionToken) - require.Error(t, err, "Manual session expiration should not be supported in stateless system") - assert.Contains(t, err.Error(), "manual session expiration not supported") - - // Verify session is still valid (since it hasn't naturally expired) - allowed, err = iamManager.IsActionAllowed(ctx, &ActionRequest{ - Principal: response.AssumedRoleUser.Arn, - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::test-bucket/file.txt", - SessionToken: sessionToken, - }) - require.NoError(t, err, "Session should still be valid in stateless system") - assert.True(t, allowed, "Access should still be allowed since token hasn't naturally expired") -} - -// TestTrustPolicyValidation tests role trust policy validation -func TestTrustPolicyValidation(t *testing.T) { - iamManager := setupIntegratedIAMSystem(t) - ctx := context.Background() - - tests := []struct { - name string - roleArn string - provider string - userID string - shouldAllow bool - reason string - }{ - { - name: "OIDC user allowed by trust policy", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - provider: "oidc", - userID: "test-user-id", - shouldAllow: true, - reason: "Trust policy should allow OIDC users", - }, - { - name: "LDAP user allowed by different role", - roleArn: "arn:seaweed:iam::role/LDAPUserRole", - provider: "ldap", - userID: "testuser", - shouldAllow: true, - reason: "Trust policy should allow LDAP users for LDAP role", - }, - { - name: "Wrong provider for role", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - provider: "ldap", - userID: "testuser", - shouldAllow: false, - reason: "S3ReadOnlyRole trust policy should reject LDAP users", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This would test trust policy evaluation - // For now, we'll implement this as part of the IAM manager - result := iamManager.ValidateTrustPolicy(ctx, tt.roleArn, tt.provider, tt.userID) - assert.Equal(t, tt.shouldAllow, result, tt.reason) - }) - } -} - -// Helper functions and test setup - -// createTestJWT creates a test JWT token with the specified issuer, subject and signing key -func createTestJWT(t *testing.T, issuer, subject, signingKey string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client-id", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - // Add claims that trust policy validation expects - "idp": "test-oidc", // Identity provider claim for trust policy matching - }) - - tokenString, err := token.SignedString([]byte(signingKey)) - require.NoError(t, err) - return tokenString -} - -func setupIntegratedIAMSystem(t *testing.T) *IAMManager { - // Create IAM manager with all components - manager := NewIAMManager() - - // Configure and initialize - config := &IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", // Use memory for unit tests - }, - Roles: &RoleStoreConfig{ - StoreType: "memory", // Use memory for unit tests - }, - } - - err := manager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Set up test providers - setupTestProviders(t, manager) - - // Set up test policies and roles - setupTestPoliciesAndRoles(t, manager) - - return manager -} - -func setupTestProviders(t *testing.T, manager *IAMManager) { - // Set up OIDC provider - oidcProvider := oidc.NewMockOIDCProvider("test-oidc") - oidcConfig := &oidc.OIDCConfig{ - Issuer: "https://test-issuer.com", - ClientID: "test-client-id", - } - err := oidcProvider.Initialize(oidcConfig) - require.NoError(t, err) - oidcProvider.SetupDefaultTestData() - - // Set up LDAP mock provider (no config needed for mock) - ldapProvider := ldap.NewMockLDAPProvider("test-ldap") - err = ldapProvider.Initialize(nil) // Mock doesn't need real config - require.NoError(t, err) - ldapProvider.SetupDefaultTestData() - - // Register providers - err = manager.RegisterIdentityProvider(oidcProvider) - require.NoError(t, err) - err = manager.RegisterIdentityProvider(ldapProvider) - require.NoError(t, err) -} - -func setupTestPoliciesAndRoles(t *testing.T, manager *IAMManager) { - ctx := context.Background() - - // Create S3 read-only policy - s3ReadPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "S3ReadAccess", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } - - err := manager.CreatePolicy(ctx, "", "S3ReadOnlyPolicy", s3ReadPolicy) - require.NoError(t, err) - - // Create LDAP user policy - ldapUserPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "FilerAccess", - Effect: "Allow", - Action: []string{"filer:*"}, - Resource: []string{ - "arn:seaweed:filer::path/user-docs/*", - }, - }, - }, - } - - err = manager.CreatePolicy(ctx, "", "LDAPUserPolicy", ldapUserPolicy) - require.NoError(t, err) - - // Create roles with trust policies - err = manager.CreateRole(ctx, "", "S3ReadOnlyRole", &RoleDefinition{ - RoleName: "S3ReadOnlyRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - }) - require.NoError(t, err) - - err = manager.CreateRole(ctx, "", "LDAPUserRole", &RoleDefinition{ - RoleName: "LDAPUserRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-ldap", - }, - Action: []string{"sts:AssumeRoleWithCredentials"}, - }, - }, - }, - AttachedPolicies: []string{"LDAPUserPolicy"}, - }) - require.NoError(t, err) -} - -func int64Ptr(v int64) *int64 { - return &v -} diff --git a/weed/iam/integration/iam_manager.go b/weed/iam/integration/iam_manager.go deleted file mode 100644 index 51deb9fd6..000000000 --- a/weed/iam/integration/iam_manager.go +++ /dev/null @@ -1,662 +0,0 @@ -package integration - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/iam/utils" -) - -// IAMManager orchestrates all IAM components -type IAMManager struct { - stsService *sts.STSService - policyEngine *policy.PolicyEngine - roleStore RoleStore - filerAddressProvider func() string // Function to get current filer address - initialized bool -} - -// IAMConfig holds configuration for all IAM components -type IAMConfig struct { - // STS service configuration - STS *sts.STSConfig `json:"sts"` - - // Policy engine configuration - Policy *policy.PolicyEngineConfig `json:"policy"` - - // Role store configuration - Roles *RoleStoreConfig `json:"roleStore"` -} - -// RoleStoreConfig holds role store configuration -type RoleStoreConfig struct { - // StoreType specifies the role store backend (memory, filer, etc.) - StoreType string `json:"storeType"` - - // StoreConfig contains store-specific configuration - StoreConfig map[string]interface{} `json:"storeConfig,omitempty"` -} - -// RoleDefinition defines a role with its trust policy and attached policies -type RoleDefinition struct { - // RoleName is the name of the role - RoleName string `json:"roleName"` - - // RoleArn is the full ARN of the role - RoleArn string `json:"roleArn"` - - // TrustPolicy defines who can assume this role - TrustPolicy *policy.PolicyDocument `json:"trustPolicy"` - - // AttachedPolicies lists the policy names attached to this role - AttachedPolicies []string `json:"attachedPolicies"` - - // Description is an optional description of the role - Description string `json:"description,omitempty"` -} - -// ActionRequest represents a request to perform an action -type ActionRequest struct { - // Principal is the entity performing the action - Principal string `json:"principal"` - - // Action is the action being requested - Action string `json:"action"` - - // Resource is the resource being accessed - Resource string `json:"resource"` - - // SessionToken for temporary credential validation - SessionToken string `json:"sessionToken"` - - // RequestContext contains additional request information - RequestContext map[string]interface{} `json:"requestContext,omitempty"` -} - -// NewIAMManager creates a new IAM manager -func NewIAMManager() *IAMManager { - return &IAMManager{} -} - -// Initialize initializes the IAM manager with all components -func (m *IAMManager) Initialize(config *IAMConfig, filerAddressProvider func() string) error { - if config == nil { - return fmt.Errorf("config cannot be nil") - } - - // Store the filer address provider function - m.filerAddressProvider = filerAddressProvider - - // Initialize STS service - m.stsService = sts.NewSTSService() - if err := m.stsService.Initialize(config.STS); err != nil { - return fmt.Errorf("failed to initialize STS service: %w", err) - } - - // CRITICAL SECURITY: Set trust policy validator to ensure proper role assumption validation - m.stsService.SetTrustPolicyValidator(m) - - // Initialize policy engine - m.policyEngine = policy.NewPolicyEngine() - if err := m.policyEngine.InitializeWithProvider(config.Policy, m.filerAddressProvider); err != nil { - return fmt.Errorf("failed to initialize policy engine: %w", err) - } - - // Initialize role store - roleStore, err := m.createRoleStoreWithProvider(config.Roles, m.filerAddressProvider) - if err != nil { - return fmt.Errorf("failed to initialize role store: %w", err) - } - m.roleStore = roleStore - - m.initialized = true - return nil -} - -// getFilerAddress returns the current filer address using the provider function -func (m *IAMManager) getFilerAddress() string { - if m.filerAddressProvider != nil { - return m.filerAddressProvider() - } - return "" // Fallback to empty string if no provider is set -} - -// createRoleStore creates a role store based on configuration -func (m *IAMManager) createRoleStore(config *RoleStoreConfig) (RoleStore, error) { - if config == nil { - // Default to generic cached filer role store when no config provided - return NewGenericCachedRoleStore(nil, nil) - } - - switch config.StoreType { - case "", "filer": - // Check if caching is explicitly disabled - if config.StoreConfig != nil { - if noCache, ok := config.StoreConfig["noCache"].(bool); ok && noCache { - return NewFilerRoleStore(config.StoreConfig, nil) - } - } - // Default to generic cached filer store for better performance - return NewGenericCachedRoleStore(config.StoreConfig, nil) - case "cached-filer", "generic-cached": - return NewGenericCachedRoleStore(config.StoreConfig, nil) - case "memory": - return NewMemoryRoleStore(), nil - default: - return nil, fmt.Errorf("unsupported role store type: %s", config.StoreType) - } -} - -// createRoleStoreWithProvider creates a role store with a filer address provider function -func (m *IAMManager) createRoleStoreWithProvider(config *RoleStoreConfig, filerAddressProvider func() string) (RoleStore, error) { - if config == nil { - // Default to generic cached filer role store when no config provided - return NewGenericCachedRoleStore(nil, filerAddressProvider) - } - - switch config.StoreType { - case "", "filer": - // Check if caching is explicitly disabled - if config.StoreConfig != nil { - if noCache, ok := config.StoreConfig["noCache"].(bool); ok && noCache { - return NewFilerRoleStore(config.StoreConfig, filerAddressProvider) - } - } - // Default to generic cached filer store for better performance - return NewGenericCachedRoleStore(config.StoreConfig, filerAddressProvider) - case "cached-filer", "generic-cached": - return NewGenericCachedRoleStore(config.StoreConfig, filerAddressProvider) - case "memory": - return NewMemoryRoleStore(), nil - default: - return nil, fmt.Errorf("unsupported role store type: %s", config.StoreType) - } -} - -// RegisterIdentityProvider registers an identity provider -func (m *IAMManager) RegisterIdentityProvider(provider providers.IdentityProvider) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - return m.stsService.RegisterProvider(provider) -} - -// CreatePolicy creates a new policy -func (m *IAMManager) CreatePolicy(ctx context.Context, filerAddress string, name string, policyDoc *policy.PolicyDocument) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - return m.policyEngine.AddPolicy(filerAddress, name, policyDoc) -} - -// CreateRole creates a new role with trust policy and attached policies -func (m *IAMManager) CreateRole(ctx context.Context, filerAddress string, roleName string, roleDef *RoleDefinition) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - if roleName == "" { - return fmt.Errorf("role name cannot be empty") - } - - if roleDef == nil { - return fmt.Errorf("role definition cannot be nil") - } - - // Set role ARN if not provided - if roleDef.RoleArn == "" { - roleDef.RoleArn = fmt.Sprintf("arn:seaweed:iam::role/%s", roleName) - } - - // Validate trust policy - if roleDef.TrustPolicy != nil { - if err := policy.ValidateTrustPolicyDocument(roleDef.TrustPolicy); err != nil { - return fmt.Errorf("invalid trust policy: %w", err) - } - } - - // Store role definition - return m.roleStore.StoreRole(ctx, "", roleName, roleDef) -} - -// AssumeRoleWithWebIdentity assumes a role using web identity (OIDC) -func (m *IAMManager) AssumeRoleWithWebIdentity(ctx context.Context, request *sts.AssumeRoleWithWebIdentityRequest) (*sts.AssumeRoleResponse, error) { - if !m.initialized { - return nil, fmt.Errorf("IAM manager not initialized") - } - - // Extract role name from ARN - roleName := utils.ExtractRoleNameFromArn(request.RoleArn) - - // Get role definition - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return nil, fmt.Errorf("role not found: %s", roleName) - } - - // Validate trust policy before allowing STS to assume the role - if err := m.validateTrustPolicyForWebIdentity(ctx, roleDef, request.WebIdentityToken); err != nil { - return nil, fmt.Errorf("trust policy validation failed: %w", err) - } - - // Use STS service to assume the role - return m.stsService.AssumeRoleWithWebIdentity(ctx, request) -} - -// AssumeRoleWithCredentials assumes a role using credentials (LDAP) -func (m *IAMManager) AssumeRoleWithCredentials(ctx context.Context, request *sts.AssumeRoleWithCredentialsRequest) (*sts.AssumeRoleResponse, error) { - if !m.initialized { - return nil, fmt.Errorf("IAM manager not initialized") - } - - // Extract role name from ARN - roleName := utils.ExtractRoleNameFromArn(request.RoleArn) - - // Get role definition - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return nil, fmt.Errorf("role not found: %s", roleName) - } - - // Validate trust policy - if err := m.validateTrustPolicyForCredentials(ctx, roleDef, request); err != nil { - return nil, fmt.Errorf("trust policy validation failed: %w", err) - } - - // Use STS service to assume the role - return m.stsService.AssumeRoleWithCredentials(ctx, request) -} - -// IsActionAllowed checks if a principal is allowed to perform an action on a resource -func (m *IAMManager) IsActionAllowed(ctx context.Context, request *ActionRequest) (bool, error) { - if !m.initialized { - return false, fmt.Errorf("IAM manager not initialized") - } - - // Validate session token first (skip for OIDC tokens which are already validated) - if !isOIDCToken(request.SessionToken) { - _, err := m.stsService.ValidateSessionToken(ctx, request.SessionToken) - if err != nil { - return false, fmt.Errorf("invalid session: %w", err) - } - } - - // Extract role name from principal ARN - roleName := utils.ExtractRoleNameFromPrincipal(request.Principal) - if roleName == "" { - return false, fmt.Errorf("could not extract role from principal: %s", request.Principal) - } - - // Get role definition - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return false, fmt.Errorf("role not found: %s", roleName) - } - - // Create evaluation context - evalCtx := &policy.EvaluationContext{ - Principal: request.Principal, - Action: request.Action, - Resource: request.Resource, - RequestContext: request.RequestContext, - } - - // Evaluate policies attached to the role - result, err := m.policyEngine.Evaluate(ctx, "", evalCtx, roleDef.AttachedPolicies) - if err != nil { - return false, fmt.Errorf("policy evaluation failed: %w", err) - } - - return result.Effect == policy.EffectAllow, nil -} - -// ValidateTrustPolicy validates if a principal can assume a role (for testing) -func (m *IAMManager) ValidateTrustPolicy(ctx context.Context, roleArn, provider, userID string) bool { - roleName := utils.ExtractRoleNameFromArn(roleArn) - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return false - } - - // Simple validation based on provider in trust policy - if roleDef.TrustPolicy != nil { - for _, statement := range roleDef.TrustPolicy.Statement { - if statement.Effect == "Allow" { - if principal, ok := statement.Principal.(map[string]interface{}); ok { - if federated, ok := principal["Federated"].(string); ok { - if federated == "test-"+provider { - return true - } - } - } - } - } - } - - return false -} - -// validateTrustPolicyForWebIdentity validates trust policy for OIDC assumption -func (m *IAMManager) validateTrustPolicyForWebIdentity(ctx context.Context, roleDef *RoleDefinition, webIdentityToken string) error { - if roleDef.TrustPolicy == nil { - return fmt.Errorf("role has no trust policy") - } - - // Create evaluation context for trust policy validation - requestContext := make(map[string]interface{}) - - // Try to parse as JWT first, fallback to mock token handling - tokenClaims, err := parseJWTTokenForTrustPolicy(webIdentityToken) - if err != nil { - // If JWT parsing fails, this might be a mock token (like "valid-oidc-token") - // For mock tokens, we'll use default values that match the trust policy expectations - requestContext["seaweed:TokenIssuer"] = "test-oidc" - requestContext["seaweed:FederatedProvider"] = "test-oidc" - requestContext["seaweed:Subject"] = "mock-user" - } else { - // Add standard context values from JWT claims that trust policies might check - if idp, ok := tokenClaims["idp"].(string); ok { - requestContext["seaweed:TokenIssuer"] = idp - requestContext["seaweed:FederatedProvider"] = idp - } - if iss, ok := tokenClaims["iss"].(string); ok { - requestContext["seaweed:Issuer"] = iss - } - if sub, ok := tokenClaims["sub"].(string); ok { - requestContext["seaweed:Subject"] = sub - } - if extUid, ok := tokenClaims["ext_uid"].(string); ok { - requestContext["seaweed:ExternalUserId"] = extUid - } - } - - // Create evaluation context for trust policy - evalCtx := &policy.EvaluationContext{ - Principal: "web-identity-user", // Placeholder principal for trust policy evaluation - Action: "sts:AssumeRoleWithWebIdentity", - Resource: roleDef.RoleArn, - RequestContext: requestContext, - } - - // Evaluate the trust policy directly - if !m.evaluateTrustPolicy(roleDef.TrustPolicy, evalCtx) { - return fmt.Errorf("trust policy denies web identity assumption") - } - - return nil -} - -// validateTrustPolicyForCredentials validates trust policy for credential assumption -func (m *IAMManager) validateTrustPolicyForCredentials(ctx context.Context, roleDef *RoleDefinition, request *sts.AssumeRoleWithCredentialsRequest) error { - if roleDef.TrustPolicy == nil { - return fmt.Errorf("role has no trust policy") - } - - // Check if trust policy allows credential assumption for the specific provider - for _, statement := range roleDef.TrustPolicy.Statement { - if statement.Effect == "Allow" { - for _, action := range statement.Action { - if action == "sts:AssumeRoleWithCredentials" { - if principal, ok := statement.Principal.(map[string]interface{}); ok { - if federated, ok := principal["Federated"].(string); ok { - if federated == request.ProviderName { - return nil // Allow - } - } - } - } - } - } - } - - return fmt.Errorf("trust policy does not allow credential assumption for provider: %s", request.ProviderName) -} - -// Helper functions - -// ExpireSessionForTesting manually expires a session for testing purposes -func (m *IAMManager) ExpireSessionForTesting(ctx context.Context, sessionToken string) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - return m.stsService.ExpireSessionForTesting(ctx, sessionToken) -} - -// GetSTSService returns the STS service instance -func (m *IAMManager) GetSTSService() *sts.STSService { - return m.stsService -} - -// parseJWTTokenForTrustPolicy parses a JWT token to extract claims for trust policy evaluation -func parseJWTTokenForTrustPolicy(tokenString string) (map[string]interface{}, error) { - // Simple JWT parsing without verification (for trust policy context only) - // In production, this should use proper JWT parsing with signature verification - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, fmt.Errorf("invalid JWT format") - } - - // Decode the payload (second part) - payload := parts[1] - // Add padding if needed - for len(payload)%4 != 0 { - payload += "=" - } - - decoded, err := base64.URLEncoding.DecodeString(payload) - if err != nil { - return nil, fmt.Errorf("failed to decode JWT payload: %w", err) - } - - var claims map[string]interface{} - if err := json.Unmarshal(decoded, &claims); err != nil { - return nil, fmt.Errorf("failed to unmarshal JWT claims: %w", err) - } - - return claims, nil -} - -// evaluateTrustPolicy evaluates a trust policy against the evaluation context -func (m *IAMManager) evaluateTrustPolicy(trustPolicy *policy.PolicyDocument, evalCtx *policy.EvaluationContext) bool { - if trustPolicy == nil { - return false - } - - // Trust policies work differently from regular policies: - // - They check the Principal field to see who can assume the role - // - They check Action to see what actions are allowed - // - They may have Conditions that must be satisfied - - for _, statement := range trustPolicy.Statement { - if statement.Effect == "Allow" { - // Check if the action matches - actionMatches := false - for _, action := range statement.Action { - if action == evalCtx.Action || action == "*" { - actionMatches = true - break - } - } - if !actionMatches { - continue - } - - // Check if the principal matches - principalMatches := false - if principal, ok := statement.Principal.(map[string]interface{}); ok { - // Check for Federated principal (OIDC/SAML) - if federatedValue, ok := principal["Federated"]; ok { - principalMatches = m.evaluatePrincipalValue(federatedValue, evalCtx, "seaweed:FederatedProvider") - } - // Check for AWS principal (IAM users/roles) - if !principalMatches { - if awsValue, ok := principal["AWS"]; ok { - principalMatches = m.evaluatePrincipalValue(awsValue, evalCtx, "seaweed:AWSPrincipal") - } - } - // Check for Service principal (AWS services) - if !principalMatches { - if serviceValue, ok := principal["Service"]; ok { - principalMatches = m.evaluatePrincipalValue(serviceValue, evalCtx, "seaweed:ServicePrincipal") - } - } - } else if principalStr, ok := statement.Principal.(string); ok { - // Handle string principal - if principalStr == "*" { - principalMatches = true - } - } - - if !principalMatches { - continue - } - - // Check conditions if present - if len(statement.Condition) > 0 { - conditionsMatch := m.evaluateTrustPolicyConditions(statement.Condition, evalCtx) - if !conditionsMatch { - continue - } - } - - // All checks passed for this Allow statement - return true - } - } - - return false -} - -// evaluateTrustPolicyConditions evaluates conditions in a trust policy statement -func (m *IAMManager) evaluateTrustPolicyConditions(conditions map[string]map[string]interface{}, evalCtx *policy.EvaluationContext) bool { - for conditionType, conditionBlock := range conditions { - switch conditionType { - case "StringEquals": - if !m.policyEngine.EvaluateStringCondition(conditionBlock, evalCtx, true, false) { - return false - } - case "StringNotEquals": - if !m.policyEngine.EvaluateStringCondition(conditionBlock, evalCtx, false, false) { - return false - } - case "StringLike": - if !m.policyEngine.EvaluateStringCondition(conditionBlock, evalCtx, true, true) { - return false - } - // Add other condition types as needed - default: - // Unknown condition type - fail safe - return false - } - } - return true -} - -// evaluatePrincipalValue evaluates a principal value (string or array) against the context -func (m *IAMManager) evaluatePrincipalValue(principalValue interface{}, evalCtx *policy.EvaluationContext, contextKey string) bool { - // Get the value from evaluation context - contextValue, exists := evalCtx.RequestContext[contextKey] - if !exists { - return false - } - - contextStr, ok := contextValue.(string) - if !ok { - return false - } - - // Handle single string value - if principalStr, ok := principalValue.(string); ok { - return principalStr == contextStr || principalStr == "*" - } - - // Handle array of strings - if principalArray, ok := principalValue.([]interface{}); ok { - for _, item := range principalArray { - if itemStr, ok := item.(string); ok { - if itemStr == contextStr || itemStr == "*" { - return true - } - } - } - } - - // Handle array of strings (alternative JSON unmarshaling format) - if principalStrArray, ok := principalValue.([]string); ok { - for _, itemStr := range principalStrArray { - if itemStr == contextStr || itemStr == "*" { - return true - } - } - } - - return false -} - -// isOIDCToken checks if a token is an OIDC JWT token (vs STS session token) -func isOIDCToken(token string) bool { - // JWT tokens have three parts separated by dots and start with base64-encoded JSON - parts := strings.Split(token, ".") - if len(parts) != 3 { - return false - } - - // JWT tokens typically start with "eyJ" (base64 encoded JSON starting with "{") - return strings.HasPrefix(token, "eyJ") -} - -// TrustPolicyValidator interface implementation -// These methods allow the IAMManager to serve as the trust policy validator for the STS service - -// ValidateTrustPolicyForWebIdentity implements the TrustPolicyValidator interface -func (m *IAMManager) ValidateTrustPolicyForWebIdentity(ctx context.Context, roleArn string, webIdentityToken string) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - // Extract role name from ARN - roleName := utils.ExtractRoleNameFromArn(roleArn) - - // Get role definition - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return fmt.Errorf("role not found: %s", roleName) - } - - // Use existing trust policy validation logic - return m.validateTrustPolicyForWebIdentity(ctx, roleDef, webIdentityToken) -} - -// ValidateTrustPolicyForCredentials implements the TrustPolicyValidator interface -func (m *IAMManager) ValidateTrustPolicyForCredentials(ctx context.Context, roleArn string, identity *providers.ExternalIdentity) error { - if !m.initialized { - return fmt.Errorf("IAM manager not initialized") - } - - // Extract role name from ARN - roleName := utils.ExtractRoleNameFromArn(roleArn) - - // Get role definition - roleDef, err := m.roleStore.GetRole(ctx, m.getFilerAddress(), roleName) - if err != nil { - return fmt.Errorf("role not found: %s", roleName) - } - - // For credentials, we need to create a mock request to reuse existing validation - // This is a bit of a hack, but it allows us to reuse the existing logic - mockRequest := &sts.AssumeRoleWithCredentialsRequest{ - ProviderName: identity.Provider, // Use the provider name from the identity - } - - // Use existing trust policy validation logic - return m.validateTrustPolicyForCredentials(ctx, roleDef, mockRequest) -} diff --git a/weed/iam/integration/role_store.go b/weed/iam/integration/role_store.go deleted file mode 100644 index f2dc128c7..000000000 --- a/weed/iam/integration/role_store.go +++ /dev/null @@ -1,544 +0,0 @@ -package integration - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/karlseguin/ccache/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" -) - -// RoleStore defines the interface for storing IAM role definitions -type RoleStore interface { - // StoreRole stores a role definition (filerAddress ignored for memory stores) - StoreRole(ctx context.Context, filerAddress string, roleName string, role *RoleDefinition) error - - // GetRole retrieves a role definition (filerAddress ignored for memory stores) - GetRole(ctx context.Context, filerAddress string, roleName string) (*RoleDefinition, error) - - // ListRoles lists all role names (filerAddress ignored for memory stores) - ListRoles(ctx context.Context, filerAddress string) ([]string, error) - - // DeleteRole deletes a role definition (filerAddress ignored for memory stores) - DeleteRole(ctx context.Context, filerAddress string, roleName string) error -} - -// MemoryRoleStore implements RoleStore using in-memory storage -type MemoryRoleStore struct { - roles map[string]*RoleDefinition - mutex sync.RWMutex -} - -// NewMemoryRoleStore creates a new memory-based role store -func NewMemoryRoleStore() *MemoryRoleStore { - return &MemoryRoleStore{ - roles: make(map[string]*RoleDefinition), - } -} - -// StoreRole stores a role definition in memory (filerAddress ignored for memory store) -func (m *MemoryRoleStore) StoreRole(ctx context.Context, filerAddress string, roleName string, role *RoleDefinition) error { - if roleName == "" { - return fmt.Errorf("role name cannot be empty") - } - if role == nil { - return fmt.Errorf("role cannot be nil") - } - - m.mutex.Lock() - defer m.mutex.Unlock() - - // Deep copy the role to prevent external modifications - m.roles[roleName] = copyRoleDefinition(role) - return nil -} - -// GetRole retrieves a role definition from memory (filerAddress ignored for memory store) -func (m *MemoryRoleStore) GetRole(ctx context.Context, filerAddress string, roleName string) (*RoleDefinition, error) { - if roleName == "" { - return nil, fmt.Errorf("role name cannot be empty") - } - - m.mutex.RLock() - defer m.mutex.RUnlock() - - role, exists := m.roles[roleName] - if !exists { - return nil, fmt.Errorf("role not found: %s", roleName) - } - - // Return a copy to prevent external modifications - return copyRoleDefinition(role), nil -} - -// ListRoles lists all role names in memory (filerAddress ignored for memory store) -func (m *MemoryRoleStore) ListRoles(ctx context.Context, filerAddress string) ([]string, error) { - m.mutex.RLock() - defer m.mutex.RUnlock() - - names := make([]string, 0, len(m.roles)) - for name := range m.roles { - names = append(names, name) - } - - return names, nil -} - -// DeleteRole deletes a role definition from memory (filerAddress ignored for memory store) -func (m *MemoryRoleStore) DeleteRole(ctx context.Context, filerAddress string, roleName string) error { - if roleName == "" { - return fmt.Errorf("role name cannot be empty") - } - - m.mutex.Lock() - defer m.mutex.Unlock() - - delete(m.roles, roleName) - return nil -} - -// copyRoleDefinition creates a deep copy of a role definition -func copyRoleDefinition(original *RoleDefinition) *RoleDefinition { - if original == nil { - return nil - } - - copied := &RoleDefinition{ - RoleName: original.RoleName, - RoleArn: original.RoleArn, - Description: original.Description, - } - - // Deep copy trust policy if it exists - if original.TrustPolicy != nil { - // Use JSON marshaling for deep copy of the complex policy structure - trustPolicyData, _ := json.Marshal(original.TrustPolicy) - var trustPolicyCopy policy.PolicyDocument - json.Unmarshal(trustPolicyData, &trustPolicyCopy) - copied.TrustPolicy = &trustPolicyCopy - } - - // Copy attached policies slice - if original.AttachedPolicies != nil { - copied.AttachedPolicies = make([]string, len(original.AttachedPolicies)) - copy(copied.AttachedPolicies, original.AttachedPolicies) - } - - return copied -} - -// FilerRoleStore implements RoleStore using SeaweedFS filer -type FilerRoleStore struct { - grpcDialOption grpc.DialOption - basePath string - filerAddressProvider func() string -} - -// NewFilerRoleStore creates a new filer-based role store -func NewFilerRoleStore(config map[string]interface{}, filerAddressProvider func() string) (*FilerRoleStore, error) { - store := &FilerRoleStore{ - basePath: "/etc/iam/roles", // Default path for role storage - aligned with /etc/ convention - filerAddressProvider: filerAddressProvider, - } - - // Parse configuration - only basePath and other settings, NOT filerAddress - if config != nil { - if basePath, ok := config["basePath"].(string); ok && basePath != "" { - store.basePath = strings.TrimSuffix(basePath, "/") - } - } - - glog.V(2).Infof("Initialized FilerRoleStore with basePath %s", store.basePath) - - return store, nil -} - -// StoreRole stores a role definition in filer -func (f *FilerRoleStore) StoreRole(ctx context.Context, filerAddress string, roleName string, role *RoleDefinition) error { - // Use provider function if filerAddress is not provided - if filerAddress == "" && f.filerAddressProvider != nil { - filerAddress = f.filerAddressProvider() - } - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerRoleStore") - } - if roleName == "" { - return fmt.Errorf("role name cannot be empty") - } - if role == nil { - return fmt.Errorf("role cannot be nil") - } - - // Serialize role to JSON - roleData, err := json.MarshalIndent(role, "", " ") - if err != nil { - return fmt.Errorf("failed to serialize role: %v", err) - } - - rolePath := f.getRolePath(roleName) - - // Store in filer - return f.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.CreateEntryRequest{ - Directory: f.basePath, - Entry: &filer_pb.Entry{ - Name: f.getRoleFileName(roleName), - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0600), // Read/write for owner only - Uid: uint32(0), - Gid: uint32(0), - }, - Content: roleData, - }, - } - - glog.V(3).Infof("Storing role %s at %s", roleName, rolePath) - _, err := client.CreateEntry(ctx, request) - if err != nil { - return fmt.Errorf("failed to store role %s: %v", roleName, err) - } - - return nil - }) -} - -// GetRole retrieves a role definition from filer -func (f *FilerRoleStore) GetRole(ctx context.Context, filerAddress string, roleName string) (*RoleDefinition, error) { - // Use provider function if filerAddress is not provided - if filerAddress == "" && f.filerAddressProvider != nil { - filerAddress = f.filerAddressProvider() - } - if filerAddress == "" { - return nil, fmt.Errorf("filer address is required for FilerRoleStore") - } - if roleName == "" { - return nil, fmt.Errorf("role name cannot be empty") - } - - var roleData []byte - err := f.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: f.basePath, - Name: f.getRoleFileName(roleName), - } - - glog.V(3).Infof("Looking up role %s", roleName) - response, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - return fmt.Errorf("role not found: %v", err) - } - - if response.Entry == nil { - return fmt.Errorf("role not found") - } - - roleData = response.Entry.Content - return nil - }) - - if err != nil { - return nil, err - } - - // Deserialize role from JSON - var role RoleDefinition - if err := json.Unmarshal(roleData, &role); err != nil { - return nil, fmt.Errorf("failed to deserialize role: %v", err) - } - - return &role, nil -} - -// ListRoles lists all role names in filer -func (f *FilerRoleStore) ListRoles(ctx context.Context, filerAddress string) ([]string, error) { - // Use provider function if filerAddress is not provided - if filerAddress == "" && f.filerAddressProvider != nil { - filerAddress = f.filerAddressProvider() - } - if filerAddress == "" { - return nil, fmt.Errorf("filer address is required for FilerRoleStore") - } - - var roleNames []string - - err := f.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.ListEntriesRequest{ - Directory: f.basePath, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, // Process in batches of 1000 - } - - glog.V(3).Infof("Listing roles in %s", f.basePath) - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("failed to list roles: %v", err) - } - - for { - resp, err := stream.Recv() - if err != nil { - break // End of stream or error - } - - if resp.Entry == nil || resp.Entry.IsDirectory { - continue - } - - // Extract role name from filename - filename := resp.Entry.Name - if strings.HasSuffix(filename, ".json") { - roleName := strings.TrimSuffix(filename, ".json") - roleNames = append(roleNames, roleName) - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - return roleNames, nil -} - -// DeleteRole deletes a role definition from filer -func (f *FilerRoleStore) DeleteRole(ctx context.Context, filerAddress string, roleName string) error { - // Use provider function if filerAddress is not provided - if filerAddress == "" && f.filerAddressProvider != nil { - filerAddress = f.filerAddressProvider() - } - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerRoleStore") - } - if roleName == "" { - return fmt.Errorf("role name cannot be empty") - } - - return f.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: f.basePath, - Name: f.getRoleFileName(roleName), - IsDeleteData: true, - } - - glog.V(3).Infof("Deleting role %s", roleName) - resp, err := client.DeleteEntry(ctx, request) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil // Idempotent: deletion of non-existent role is successful - } - return fmt.Errorf("failed to delete role %s: %v", roleName, err) - } - - if resp.Error != "" { - if strings.Contains(resp.Error, "not found") { - return nil // Idempotent: deletion of non-existent role is successful - } - return fmt.Errorf("failed to delete role %s: %s", roleName, resp.Error) - } - - return nil - }) -} - -// Helper methods for FilerRoleStore - -func (f *FilerRoleStore) getRoleFileName(roleName string) string { - return roleName + ".json" -} - -func (f *FilerRoleStore) getRolePath(roleName string) string { - return f.basePath + "/" + f.getRoleFileName(roleName) -} - -func (f *FilerRoleStore) withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerRoleStore") - } - return pb.WithGrpcFilerClient(false, 0, pb.ServerAddress(filerAddress), f.grpcDialOption, fn) -} - -// CachedFilerRoleStore implements RoleStore with TTL caching on top of FilerRoleStore -type CachedFilerRoleStore struct { - filerStore *FilerRoleStore - cache *ccache.Cache - listCache *ccache.Cache - ttl time.Duration - listTTL time.Duration -} - -// CachedFilerRoleStoreConfig holds configuration for the cached role store -type CachedFilerRoleStoreConfig struct { - BasePath string `json:"basePath,omitempty"` - TTL string `json:"ttl,omitempty"` // e.g., "5m", "1h" - ListTTL string `json:"listTtl,omitempty"` // e.g., "1m", "30s" - MaxCacheSize int `json:"maxCacheSize,omitempty"` // Maximum number of cached roles -} - -// NewCachedFilerRoleStore creates a new cached filer-based role store -func NewCachedFilerRoleStore(config map[string]interface{}) (*CachedFilerRoleStore, error) { - // Create underlying filer store - filerStore, err := NewFilerRoleStore(config, nil) - if err != nil { - return nil, fmt.Errorf("failed to create filer role store: %w", err) - } - - // Parse cache configuration with defaults - cacheTTL := 5 * time.Minute // Default 5 minutes for role cache - listTTL := 1 * time.Minute // Default 1 minute for list cache - maxCacheSize := 1000 // Default max 1000 cached roles - - if config != nil { - if ttlStr, ok := config["ttl"].(string); ok && ttlStr != "" { - if parsed, err := time.ParseDuration(ttlStr); err == nil { - cacheTTL = parsed - } - } - if listTTLStr, ok := config["listTtl"].(string); ok && listTTLStr != "" { - if parsed, err := time.ParseDuration(listTTLStr); err == nil { - listTTL = parsed - } - } - if maxSize, ok := config["maxCacheSize"].(int); ok && maxSize > 0 { - maxCacheSize = maxSize - } - } - - // Create ccache instances with appropriate configurations - pruneCount := int64(maxCacheSize) >> 3 - if pruneCount <= 0 { - pruneCount = 100 - } - - store := &CachedFilerRoleStore{ - filerStore: filerStore, - cache: ccache.New(ccache.Configure().MaxSize(int64(maxCacheSize)).ItemsToPrune(uint32(pruneCount))), - listCache: ccache.New(ccache.Configure().MaxSize(100).ItemsToPrune(10)), // Smaller cache for lists - ttl: cacheTTL, - listTTL: listTTL, - } - - glog.V(2).Infof("Initialized CachedFilerRoleStore with TTL %v, List TTL %v, Max Cache Size %d", - cacheTTL, listTTL, maxCacheSize) - - return store, nil -} - -// StoreRole stores a role definition and invalidates the cache -func (c *CachedFilerRoleStore) StoreRole(ctx context.Context, filerAddress string, roleName string, role *RoleDefinition) error { - // Store in filer - err := c.filerStore.StoreRole(ctx, filerAddress, roleName, role) - if err != nil { - return err - } - - // Invalidate cache entries - c.cache.Delete(roleName) - c.listCache.Clear() // Invalidate list cache - - glog.V(3).Infof("Stored and invalidated cache for role %s", roleName) - return nil -} - -// GetRole retrieves a role definition with caching -func (c *CachedFilerRoleStore) GetRole(ctx context.Context, filerAddress string, roleName string) (*RoleDefinition, error) { - // Try to get from cache first - item := c.cache.Get(roleName) - if item != nil { - // Cache hit - return cached role (DO NOT extend TTL) - role := item.Value().(*RoleDefinition) - glog.V(4).Infof("Cache hit for role %s", roleName) - return copyRoleDefinition(role), nil - } - - // Cache miss - fetch from filer - glog.V(4).Infof("Cache miss for role %s, fetching from filer", roleName) - role, err := c.filerStore.GetRole(ctx, filerAddress, roleName) - if err != nil { - return nil, err - } - - // Cache the result with TTL - c.cache.Set(roleName, copyRoleDefinition(role), c.ttl) - glog.V(3).Infof("Cached role %s with TTL %v", roleName, c.ttl) - return role, nil -} - -// ListRoles lists all role names with caching -func (c *CachedFilerRoleStore) ListRoles(ctx context.Context, filerAddress string) ([]string, error) { - // Use a constant key for the role list cache - const listCacheKey = "role_list" - - // Try to get from list cache first - item := c.listCache.Get(listCacheKey) - if item != nil { - // Cache hit - return cached list (DO NOT extend TTL) - roles := item.Value().([]string) - glog.V(4).Infof("List cache hit, returning %d roles", len(roles)) - return append([]string(nil), roles...), nil // Return a copy - } - - // Cache miss - fetch from filer - glog.V(4).Infof("List cache miss, fetching from filer") - roles, err := c.filerStore.ListRoles(ctx, filerAddress) - if err != nil { - return nil, err - } - - // Cache the result with TTL (store a copy) - rolesCopy := append([]string(nil), roles...) - c.listCache.Set(listCacheKey, rolesCopy, c.listTTL) - glog.V(3).Infof("Cached role list with %d entries, TTL %v", len(roles), c.listTTL) - return roles, nil -} - -// DeleteRole deletes a role definition and invalidates the cache -func (c *CachedFilerRoleStore) DeleteRole(ctx context.Context, filerAddress string, roleName string) error { - // Delete from filer - err := c.filerStore.DeleteRole(ctx, filerAddress, roleName) - if err != nil { - return err - } - - // Invalidate cache entries - c.cache.Delete(roleName) - c.listCache.Clear() // Invalidate list cache - - glog.V(3).Infof("Deleted and invalidated cache for role %s", roleName) - return nil -} - -// ClearCache clears all cached entries (for testing or manual cache invalidation) -func (c *CachedFilerRoleStore) ClearCache() { - c.cache.Clear() - c.listCache.Clear() - glog.V(2).Infof("Cleared all role cache entries") -} - -// GetCacheStats returns cache statistics -func (c *CachedFilerRoleStore) GetCacheStats() map[string]interface{} { - return map[string]interface{}{ - "roleCache": map[string]interface{}{ - "size": c.cache.ItemCount(), - "ttl": c.ttl.String(), - }, - "listCache": map[string]interface{}{ - "size": c.listCache.ItemCount(), - "ttl": c.listTTL.String(), - }, - } -} diff --git a/weed/iam/integration/role_store_test.go b/weed/iam/integration/role_store_test.go deleted file mode 100644 index 53ee339c3..000000000 --- a/weed/iam/integration/role_store_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package integration - -import ( - "context" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMemoryRoleStore(t *testing.T) { - ctx := context.Background() - store := NewMemoryRoleStore() - - // Test storing a role - roleDef := &RoleDefinition{ - RoleName: "TestRole", - RoleArn: "arn:seaweed:iam::role/TestRole", - Description: "Test role for unit testing", - AttachedPolicies: []string{"TestPolicy"}, - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - Principal: map[string]interface{}{ - "Federated": "test-provider", - }, - }, - }, - }, - } - - err := store.StoreRole(ctx, "", "TestRole", roleDef) - require.NoError(t, err) - - // Test retrieving the role - retrievedRole, err := store.GetRole(ctx, "", "TestRole") - require.NoError(t, err) - assert.Equal(t, "TestRole", retrievedRole.RoleName) - assert.Equal(t, "arn:seaweed:iam::role/TestRole", retrievedRole.RoleArn) - assert.Equal(t, "Test role for unit testing", retrievedRole.Description) - assert.Equal(t, []string{"TestPolicy"}, retrievedRole.AttachedPolicies) - - // Test listing roles - roles, err := store.ListRoles(ctx, "") - require.NoError(t, err) - assert.Contains(t, roles, "TestRole") - - // Test deleting the role - err = store.DeleteRole(ctx, "", "TestRole") - require.NoError(t, err) - - // Verify role is deleted - _, err = store.GetRole(ctx, "", "TestRole") - assert.Error(t, err) -} - -func TestRoleStoreConfiguration(t *testing.T) { - // Test memory role store creation - memoryStore, err := NewMemoryRoleStore(), error(nil) - require.NoError(t, err) - assert.NotNil(t, memoryStore) - - // Test filer role store creation without filerAddress in config - filerStore2, err := NewFilerRoleStore(map[string]interface{}{ - // filerAddress not required in config - "basePath": "/test/roles", - }, nil) - assert.NoError(t, err) - assert.NotNil(t, filerStore2) - - // Test filer role store creation with valid config - filerStore, err := NewFilerRoleStore(map[string]interface{}{ - "filerAddress": "localhost:8888", - "basePath": "/test/roles", - }, nil) - require.NoError(t, err) - assert.NotNil(t, filerStore) -} - -func TestDistributedIAMManagerWithRoleStore(t *testing.T) { - ctx := context.Background() - - // Create IAM manager with role store configuration - config := &IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Duration(3600) * time.Second}, - MaxSessionLength: sts.FlexibleDuration{time.Duration(43200) * time.Second}, - Issuer: "test-issuer", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &RoleStoreConfig{ - StoreType: "memory", - }, - } - - iamManager := NewIAMManager() - err := iamManager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Test creating a role - roleDef := &RoleDefinition{ - RoleName: "DistributedTestRole", - RoleArn: "arn:seaweed:iam::role/DistributedTestRole", - Description: "Test role for distributed IAM", - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - } - - err = iamManager.CreateRole(ctx, "", "DistributedTestRole", roleDef) - require.NoError(t, err) - - // Test that role is accessible through the IAM manager - // Note: We can't directly test GetRole as it's not exposed, - // but we can test through IsActionAllowed which internally uses the role store - assert.True(t, iamManager.initialized) -} diff --git a/weed/iam/ldap/mock_provider.go b/weed/iam/ldap/mock_provider.go deleted file mode 100644 index 080fd8bec..000000000 --- a/weed/iam/ldap/mock_provider.go +++ /dev/null @@ -1,186 +0,0 @@ -package ldap - -import ( - "context" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// MockLDAPProvider is a mock implementation for testing -// This is a standalone mock that doesn't depend on production LDAP code -type MockLDAPProvider struct { - name string - initialized bool - TestUsers map[string]*providers.ExternalIdentity - TestCredentials map[string]string // username -> password -} - -// NewMockLDAPProvider creates a mock LDAP provider for testing -func NewMockLDAPProvider(name string) *MockLDAPProvider { - return &MockLDAPProvider{ - name: name, - initialized: true, // Mock is always initialized - TestUsers: make(map[string]*providers.ExternalIdentity), - TestCredentials: make(map[string]string), - } -} - -// Name returns the provider name -func (m *MockLDAPProvider) Name() string { - return m.name -} - -// Initialize initializes the mock provider (no-op for testing) -func (m *MockLDAPProvider) Initialize(config interface{}) error { - m.initialized = true - return nil -} - -// AddTestUser adds a test user with credentials -func (m *MockLDAPProvider) AddTestUser(username, password string, identity *providers.ExternalIdentity) { - m.TestCredentials[username] = password - m.TestUsers[username] = identity -} - -// Authenticate authenticates using test data -func (m *MockLDAPProvider) Authenticate(ctx context.Context, credentials string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if credentials == "" { - return nil, fmt.Errorf("credentials cannot be empty") - } - - // Parse credentials (username:password format) - parts := strings.SplitN(credentials, ":", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid credentials format (expected username:password)") - } - - username, password := parts[0], parts[1] - - // Check test credentials - expectedPassword, userExists := m.TestCredentials[username] - if !userExists { - return nil, fmt.Errorf("user not found") - } - - if password != expectedPassword { - return nil, fmt.Errorf("invalid credentials") - } - - // Return test user identity - if identity, exists := m.TestUsers[username]; exists { - return identity, nil - } - - return nil, fmt.Errorf("user identity not found") -} - -// GetUserInfo returns test user info -func (m *MockLDAPProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if userID == "" { - return nil, fmt.Errorf("user ID cannot be empty") - } - - // Check test users - if identity, exists := m.TestUsers[userID]; exists { - return identity, nil - } - - // Return default test user if not found - return &providers.ExternalIdentity{ - UserID: userID, - Email: userID + "@test-ldap.com", - DisplayName: "Test LDAP User " + userID, - Groups: []string{"test-group"}, - Provider: m.name, - }, nil -} - -// ValidateToken validates credentials using test data -func (m *MockLDAPProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Parse credentials (username:password format) - parts := strings.SplitN(token, ":", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid token format (expected username:password)") - } - - username, password := parts[0], parts[1] - - // Check test credentials - expectedPassword, userExists := m.TestCredentials[username] - if !userExists { - return nil, fmt.Errorf("user not found") - } - - if password != expectedPassword { - return nil, fmt.Errorf("invalid credentials") - } - - // Return test claims - identity := m.TestUsers[username] - return &providers.TokenClaims{ - Subject: username, - Claims: map[string]interface{}{ - "ldap_dn": "CN=" + username + ",DC=test,DC=com", - "email": identity.Email, - "name": identity.DisplayName, - "groups": identity.Groups, - "provider": m.name, - }, - }, nil -} - -// SetupDefaultTestData configures common test data -func (m *MockLDAPProvider) SetupDefaultTestData() { - // Add default test user - m.AddTestUser("testuser", "testpass", &providers.ExternalIdentity{ - UserID: "testuser", - Email: "testuser@ldap-test.com", - DisplayName: "Test LDAP User", - Groups: []string{"developers", "users"}, - Provider: m.name, - Attributes: map[string]string{ - "department": "Engineering", - "location": "Test City", - }, - }) - - // Add admin test user - m.AddTestUser("admin", "adminpass", &providers.ExternalIdentity{ - UserID: "admin", - Email: "admin@ldap-test.com", - DisplayName: "LDAP Administrator", - Groups: []string{"admins", "users"}, - Provider: m.name, - Attributes: map[string]string{ - "department": "IT", - "role": "administrator", - }, - }) - - // Add readonly user - m.AddTestUser("readonly", "readpass", &providers.ExternalIdentity{ - UserID: "readonly", - Email: "readonly@ldap-test.com", - DisplayName: "Read Only User", - Groups: []string{"readonly"}, - Provider: m.name, - }) -} diff --git a/weed/iam/oidc/mock_provider.go b/weed/iam/oidc/mock_provider.go deleted file mode 100644 index c4ff9a401..000000000 --- a/weed/iam/oidc/mock_provider.go +++ /dev/null @@ -1,203 +0,0 @@ -// This file contains mock OIDC provider implementations for testing only. -// These should NOT be used in production environments. - -package oidc - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// MockOIDCProvider is a mock implementation for testing -type MockOIDCProvider struct { - *OIDCProvider - TestTokens map[string]*providers.TokenClaims - TestUsers map[string]*providers.ExternalIdentity -} - -// NewMockOIDCProvider creates a mock OIDC provider for testing -func NewMockOIDCProvider(name string) *MockOIDCProvider { - return &MockOIDCProvider{ - OIDCProvider: NewOIDCProvider(name), - TestTokens: make(map[string]*providers.TokenClaims), - TestUsers: make(map[string]*providers.ExternalIdentity), - } -} - -// AddTestToken adds a test token with expected claims -func (m *MockOIDCProvider) AddTestToken(token string, claims *providers.TokenClaims) { - m.TestTokens[token] = claims -} - -// AddTestUser adds a test user with expected identity -func (m *MockOIDCProvider) AddTestUser(userID string, identity *providers.ExternalIdentity) { - m.TestUsers[userID] = identity -} - -// Authenticate overrides the parent Authenticate method to use mock data -func (m *MockOIDCProvider) Authenticate(ctx context.Context, token string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Validate token using mock validation - claims, err := m.ValidateToken(ctx, token) - if err != nil { - return nil, err - } - - // Map claims to external identity - email, _ := claims.GetClaimString("email") - displayName, _ := claims.GetClaimString("name") - groups, _ := claims.GetClaimStringSlice("groups") - - return &providers.ExternalIdentity{ - UserID: claims.Subject, - Email: email, - DisplayName: displayName, - Groups: groups, - Provider: m.name, - }, nil -} - -// ValidateToken validates tokens using test data -func (m *MockOIDCProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Special test tokens - if token == "expired_token" { - return nil, fmt.Errorf("token has expired") - } - if token == "invalid_token" { - return nil, fmt.Errorf("invalid token") - } - - // Try to parse as JWT token first - if len(token) > 20 && strings.Count(token, ".") >= 2 { - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err == nil { - if jwtClaims, ok := parsedToken.Claims.(jwt.MapClaims); ok { - issuer, _ := jwtClaims["iss"].(string) - subject, _ := jwtClaims["sub"].(string) - audience, _ := jwtClaims["aud"].(string) - - // Verify the issuer matches our configuration - if issuer == m.config.Issuer && subject != "" { - // Extract expiration and issued at times - var expiresAt, issuedAt time.Time - if exp, ok := jwtClaims["exp"].(float64); ok { - expiresAt = time.Unix(int64(exp), 0) - } - if iat, ok := jwtClaims["iat"].(float64); ok { - issuedAt = time.Unix(int64(iat), 0) - } - - return &providers.TokenClaims{ - Subject: subject, - Issuer: issuer, - Audience: audience, - ExpiresAt: expiresAt, - IssuedAt: issuedAt, - Claims: map[string]interface{}{ - "email": subject + "@test-domain.com", - "name": "Test User " + subject, - }, - }, nil - } - } - } - } - - // Check test tokens - if claims, exists := m.TestTokens[token]; exists { - return claims, nil - } - - // Default test token for basic testing - if token == "valid_test_token" { - return &providers.TokenClaims{ - Subject: "test-user-id", - Issuer: m.config.Issuer, - Audience: m.config.ClientID, - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now(), - Claims: map[string]interface{}{ - "email": "test@example.com", - "name": "Test User", - "groups": []string{"developers", "users"}, - }, - }, nil - } - - return nil, fmt.Errorf("unknown test token: %s", token) -} - -// GetUserInfo returns test user info -func (m *MockOIDCProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if userID == "" { - return nil, fmt.Errorf("user ID cannot be empty") - } - - // Check test users - if identity, exists := m.TestUsers[userID]; exists { - return identity, nil - } - - // Default test user - return &providers.ExternalIdentity{ - UserID: userID, - Email: userID + "@example.com", - DisplayName: "Test User " + userID, - Provider: m.name, - }, nil -} - -// SetupDefaultTestData configures common test data -func (m *MockOIDCProvider) SetupDefaultTestData() { - // Create default token claims - defaultClaims := &providers.TokenClaims{ - Subject: "test-user-123", - Issuer: "https://test-issuer.com", - Audience: "test-client-id", - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now(), - Claims: map[string]interface{}{ - "email": "testuser@example.com", - "name": "Test User", - "groups": []string{"developers"}, - }, - } - - // Add multiple token variants for compatibility - m.AddTestToken("valid_token", defaultClaims) - m.AddTestToken("valid-oidc-token", defaultClaims) // For integration tests - m.AddTestToken("valid_test_token", defaultClaims) // For STS tests - - // Add default test users - m.AddTestUser("test-user-123", &providers.ExternalIdentity{ - UserID: "test-user-123", - Email: "testuser@example.com", - DisplayName: "Test User", - Groups: []string{"developers"}, - Provider: m.name, - }) -} diff --git a/weed/iam/oidc/mock_provider_test.go b/weed/iam/oidc/mock_provider_test.go deleted file mode 100644 index 920b2b3be..000000000 --- a/weed/iam/oidc/mock_provider_test.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build test -// +build test - -package oidc - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// MockOIDCProvider is a mock implementation for testing -type MockOIDCProvider struct { - *OIDCProvider - TestTokens map[string]*providers.TokenClaims - TestUsers map[string]*providers.ExternalIdentity -} - -// NewMockOIDCProvider creates a mock OIDC provider for testing -func NewMockOIDCProvider(name string) *MockOIDCProvider { - return &MockOIDCProvider{ - OIDCProvider: NewOIDCProvider(name), - TestTokens: make(map[string]*providers.TokenClaims), - TestUsers: make(map[string]*providers.ExternalIdentity), - } -} - -// AddTestToken adds a test token with expected claims -func (m *MockOIDCProvider) AddTestToken(token string, claims *providers.TokenClaims) { - m.TestTokens[token] = claims -} - -// AddTestUser adds a test user with expected identity -func (m *MockOIDCProvider) AddTestUser(userID string, identity *providers.ExternalIdentity) { - m.TestUsers[userID] = identity -} - -// Authenticate overrides the parent Authenticate method to use mock data -func (m *MockOIDCProvider) Authenticate(ctx context.Context, token string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Validate token using mock validation - claims, err := m.ValidateToken(ctx, token) - if err != nil { - return nil, err - } - - // Map claims to external identity - email, _ := claims.GetClaimString("email") - displayName, _ := claims.GetClaimString("name") - groups, _ := claims.GetClaimStringSlice("groups") - - return &providers.ExternalIdentity{ - UserID: claims.Subject, - Email: email, - DisplayName: displayName, - Groups: groups, - Provider: m.name, - }, nil -} - -// ValidateToken validates tokens using test data -func (m *MockOIDCProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Special test tokens - if token == "expired_token" { - return nil, fmt.Errorf("token has expired") - } - if token == "invalid_token" { - return nil, fmt.Errorf("invalid token") - } - - // Try to parse as JWT token first - if len(token) > 20 && strings.Count(token, ".") >= 2 { - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err == nil { - if jwtClaims, ok := parsedToken.Claims.(jwt.MapClaims); ok { - issuer, _ := jwtClaims["iss"].(string) - subject, _ := jwtClaims["sub"].(string) - audience, _ := jwtClaims["aud"].(string) - - // Verify the issuer matches our configuration - if issuer == m.config.Issuer && subject != "" { - // Extract expiration and issued at times - var expiresAt, issuedAt time.Time - if exp, ok := jwtClaims["exp"].(float64); ok { - expiresAt = time.Unix(int64(exp), 0) - } - if iat, ok := jwtClaims["iat"].(float64); ok { - issuedAt = time.Unix(int64(iat), 0) - } - - return &providers.TokenClaims{ - Subject: subject, - Issuer: issuer, - Audience: audience, - ExpiresAt: expiresAt, - IssuedAt: issuedAt, - Claims: map[string]interface{}{ - "email": subject + "@test-domain.com", - "name": "Test User " + subject, - }, - }, nil - } - } - } - } - - // Check test tokens - if claims, exists := m.TestTokens[token]; exists { - return claims, nil - } - - // Default test token for basic testing - if token == "valid_test_token" { - return &providers.TokenClaims{ - Subject: "test-user-id", - Issuer: m.config.Issuer, - Audience: m.config.ClientID, - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now(), - Claims: map[string]interface{}{ - "email": "test@example.com", - "name": "Test User", - "groups": []string{"developers", "users"}, - }, - }, nil - } - - return nil, fmt.Errorf("unknown test token: %s", token) -} - -// GetUserInfo returns test user info -func (m *MockOIDCProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - if !m.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if userID == "" { - return nil, fmt.Errorf("user ID cannot be empty") - } - - // Check test users - if identity, exists := m.TestUsers[userID]; exists { - return identity, nil - } - - // Default test user - return &providers.ExternalIdentity{ - UserID: userID, - Email: userID + "@example.com", - DisplayName: "Test User " + userID, - Provider: m.name, - }, nil -} - -// SetupDefaultTestData configures common test data -func (m *MockOIDCProvider) SetupDefaultTestData() { - // Create default token claims - defaultClaims := &providers.TokenClaims{ - Subject: "test-user-123", - Issuer: "https://test-issuer.com", - Audience: "test-client-id", - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now(), - Claims: map[string]interface{}{ - "email": "testuser@example.com", - "name": "Test User", - "groups": []string{"developers"}, - }, - } - - // Add multiple token variants for compatibility - m.AddTestToken("valid_token", defaultClaims) - m.AddTestToken("valid-oidc-token", defaultClaims) // For integration tests - m.AddTestToken("valid_test_token", defaultClaims) // For STS tests - - // Add default test users - m.AddTestUser("test-user-123", &providers.ExternalIdentity{ - UserID: "test-user-123", - Email: "testuser@example.com", - DisplayName: "Test User", - Groups: []string{"developers"}, - Provider: m.name, - }) -} diff --git a/weed/iam/oidc/oidc_provider.go b/weed/iam/oidc/oidc_provider.go deleted file mode 100644 index d31f322b0..000000000 --- a/weed/iam/oidc/oidc_provider.go +++ /dev/null @@ -1,670 +0,0 @@ -package oidc - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "encoding/json" - "fmt" - "math/big" - "net/http" - "strings" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// OIDCProvider implements OpenID Connect authentication -type OIDCProvider struct { - name string - config *OIDCConfig - initialized bool - jwksCache *JWKS - httpClient *http.Client - jwksFetchedAt time.Time - jwksTTL time.Duration -} - -// OIDCConfig holds OIDC provider configuration -type OIDCConfig struct { - // Issuer is the OIDC issuer URL - Issuer string `json:"issuer"` - - // ClientID is the OAuth2 client ID - ClientID string `json:"clientId"` - - // ClientSecret is the OAuth2 client secret (optional for public clients) - ClientSecret string `json:"clientSecret,omitempty"` - - // JWKSUri is the JSON Web Key Set URI - JWKSUri string `json:"jwksUri,omitempty"` - - // UserInfoUri is the UserInfo endpoint URI - UserInfoUri string `json:"userInfoUri,omitempty"` - - // Scopes are the OAuth2 scopes to request - Scopes []string `json:"scopes,omitempty"` - - // RoleMapping defines how to map OIDC claims to roles - RoleMapping *providers.RoleMapping `json:"roleMapping,omitempty"` - - // ClaimsMapping defines how to map OIDC claims to identity attributes - ClaimsMapping map[string]string `json:"claimsMapping,omitempty"` - - // JWKSCacheTTLSeconds sets how long to cache JWKS before refresh (default 3600 seconds) - JWKSCacheTTLSeconds int `json:"jwksCacheTTLSeconds,omitempty"` -} - -// JWKS represents JSON Web Key Set -type JWKS struct { - Keys []JWK `json:"keys"` -} - -// JWK represents a JSON Web Key -type JWK struct { - Kty string `json:"kty"` // Key Type (RSA, EC, etc.) - Kid string `json:"kid"` // Key ID - Use string `json:"use"` // Usage (sig for signature) - Alg string `json:"alg"` // Algorithm (RS256, etc.) - N string `json:"n"` // RSA public key modulus - E string `json:"e"` // RSA public key exponent - X string `json:"x"` // EC public key x coordinate - Y string `json:"y"` // EC public key y coordinate - Crv string `json:"crv"` // EC curve -} - -// NewOIDCProvider creates a new OIDC provider -func NewOIDCProvider(name string) *OIDCProvider { - return &OIDCProvider{ - name: name, - httpClient: &http.Client{Timeout: 30 * time.Second}, - } -} - -// Name returns the provider name -func (p *OIDCProvider) Name() string { - return p.name -} - -// GetIssuer returns the configured issuer URL for efficient provider lookup -func (p *OIDCProvider) GetIssuer() string { - if p.config == nil { - return "" - } - return p.config.Issuer -} - -// Initialize initializes the OIDC provider with configuration -func (p *OIDCProvider) Initialize(config interface{}) error { - if config == nil { - return fmt.Errorf("config cannot be nil") - } - - oidcConfig, ok := config.(*OIDCConfig) - if !ok { - return fmt.Errorf("invalid config type for OIDC provider") - } - - if err := p.validateConfig(oidcConfig); err != nil { - return fmt.Errorf("invalid OIDC configuration: %w", err) - } - - p.config = oidcConfig - p.initialized = true - - // Configure JWKS cache TTL - if oidcConfig.JWKSCacheTTLSeconds > 0 { - p.jwksTTL = time.Duration(oidcConfig.JWKSCacheTTLSeconds) * time.Second - } else { - p.jwksTTL = time.Hour - } - - // For testing, we'll skip the actual OIDC client initialization - return nil -} - -// validateConfig validates the OIDC configuration -func (p *OIDCProvider) validateConfig(config *OIDCConfig) error { - if config.Issuer == "" { - return fmt.Errorf("issuer is required") - } - - if config.ClientID == "" { - return fmt.Errorf("client ID is required") - } - - // Basic URL validation for issuer - if config.Issuer != "" && config.Issuer != "https://accounts.google.com" && config.Issuer[0:4] != "http" { - return fmt.Errorf("invalid issuer URL format") - } - - return nil -} - -// Authenticate authenticates a user with an OIDC token -func (p *OIDCProvider) Authenticate(ctx context.Context, token string) (*providers.ExternalIdentity, error) { - if !p.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Validate token and get claims - claims, err := p.ValidateToken(ctx, token) - if err != nil { - return nil, err - } - - // Map claims to external identity - email, _ := claims.GetClaimString("email") - displayName, _ := claims.GetClaimString("name") - groups, _ := claims.GetClaimStringSlice("groups") - - // Debug: Log available claims - glog.V(3).Infof("Available claims: %+v", claims.Claims) - if rolesFromClaims, exists := claims.GetClaimStringSlice("roles"); exists { - glog.V(3).Infof("Roles claim found as string slice: %v", rolesFromClaims) - } else if roleFromClaims, exists := claims.GetClaimString("roles"); exists { - glog.V(3).Infof("Roles claim found as string: %s", roleFromClaims) - } else { - glog.V(3).Infof("No roles claim found in token") - } - - // Map claims to roles using configured role mapping - roles := p.mapClaimsToRolesWithConfig(claims) - - // Create attributes map and add roles - attributes := make(map[string]string) - if len(roles) > 0 { - // Store roles as a comma-separated string in attributes - attributes["roles"] = strings.Join(roles, ",") - } - - return &providers.ExternalIdentity{ - UserID: claims.Subject, - Email: email, - DisplayName: displayName, - Groups: groups, - Attributes: attributes, - Provider: p.name, - }, nil -} - -// GetUserInfo retrieves user information from the UserInfo endpoint -func (p *OIDCProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - if !p.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if userID == "" { - return nil, fmt.Errorf("user ID cannot be empty") - } - - // For now, we'll use a token-based approach since OIDC UserInfo typically requires a token - // In a real implementation, this would need an access token from the authentication flow - return p.getUserInfoWithToken(ctx, userID, "") -} - -// GetUserInfoWithToken retrieves user information using an access token -func (p *OIDCProvider) GetUserInfoWithToken(ctx context.Context, accessToken string) (*providers.ExternalIdentity, error) { - if !p.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if accessToken == "" { - return nil, fmt.Errorf("access token cannot be empty") - } - - return p.getUserInfoWithToken(ctx, "", accessToken) -} - -// getUserInfoWithToken is the internal implementation for UserInfo endpoint calls -func (p *OIDCProvider) getUserInfoWithToken(ctx context.Context, userID, accessToken string) (*providers.ExternalIdentity, error) { - // Determine UserInfo endpoint URL - userInfoUri := p.config.UserInfoUri - if userInfoUri == "" { - // Use standard OIDC discovery endpoint convention - userInfoUri = strings.TrimSuffix(p.config.Issuer, "/") + "/userinfo" - } - - // Create HTTP request - req, err := http.NewRequestWithContext(ctx, "GET", userInfoUri, nil) - if err != nil { - return nil, fmt.Errorf("failed to create UserInfo request: %v", err) - } - - // Set authorization header if access token is provided - if accessToken != "" { - req.Header.Set("Authorization", "Bearer "+accessToken) - } - req.Header.Set("Accept", "application/json") - - // Make HTTP request - resp, err := p.httpClient.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to call UserInfo endpoint: %v", err) - } - defer resp.Body.Close() - - // Check response status - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("UserInfo endpoint returned status %d", resp.StatusCode) - } - - // Parse JSON response - var userInfo map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&userInfo); err != nil { - return nil, fmt.Errorf("failed to decode UserInfo response: %v", err) - } - - glog.V(4).Infof("Received UserInfo response: %+v", userInfo) - - // Map UserInfo claims to ExternalIdentity - identity := p.mapUserInfoToIdentity(userInfo) - - // If userID was provided but not found in claims, use it - if userID != "" && identity.UserID == "" { - identity.UserID = userID - } - - glog.V(3).Infof("Retrieved user info from OIDC provider: %s", identity.UserID) - return identity, nil -} - -// ValidateToken validates an OIDC JWT token -func (p *OIDCProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if !p.initialized { - return nil, fmt.Errorf("provider not initialized") - } - - if token == "" { - return nil, fmt.Errorf("token cannot be empty") - } - - // Parse token without verification first to get header info - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err != nil { - return nil, fmt.Errorf("failed to parse JWT token: %v", err) - } - - // Get key ID from header - kid, ok := parsedToken.Header["kid"].(string) - if !ok { - return nil, fmt.Errorf("missing key ID in JWT header") - } - - // Get signing key from JWKS - publicKey, err := p.getPublicKey(ctx, kid) - if err != nil { - return nil, fmt.Errorf("failed to get public key: %v", err) - } - - // Parse and validate token with proper signature verification - claims := jwt.MapClaims{} - validatedToken, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) { - // Verify signing method - switch token.Method.(type) { - case *jwt.SigningMethodRSA: - return publicKey, nil - default: - return nil, fmt.Errorf("unsupported signing method: %v", token.Header["alg"]) - } - }) - - if err != nil { - return nil, fmt.Errorf("failed to validate JWT token: %v", err) - } - - if !validatedToken.Valid { - return nil, fmt.Errorf("JWT token is invalid") - } - - // Validate required claims - issuer, ok := claims["iss"].(string) - if !ok || issuer != p.config.Issuer { - return nil, fmt.Errorf("invalid or missing issuer claim") - } - - // Check audience claim (aud) or authorized party (azp) - Keycloak uses azp - // Per RFC 7519, aud can be either a string or an array of strings - var audienceMatched bool - if audClaim, ok := claims["aud"]; ok { - switch aud := audClaim.(type) { - case string: - if aud == p.config.ClientID { - audienceMatched = true - } - case []interface{}: - for _, a := range aud { - if str, ok := a.(string); ok && str == p.config.ClientID { - audienceMatched = true - break - } - } - } - } - - if !audienceMatched { - if azp, ok := claims["azp"].(string); ok && azp == p.config.ClientID { - audienceMatched = true - } - } - - if !audienceMatched { - return nil, fmt.Errorf("invalid or missing audience claim for client ID %s", p.config.ClientID) - } - - subject, ok := claims["sub"].(string) - if !ok { - return nil, fmt.Errorf("missing subject claim") - } - - // Convert to our TokenClaims structure - tokenClaims := &providers.TokenClaims{ - Subject: subject, - Issuer: issuer, - Claims: make(map[string]interface{}), - } - - // Copy all claims - for key, value := range claims { - tokenClaims.Claims[key] = value - } - - return tokenClaims, nil -} - -// mapClaimsToRoles maps token claims to SeaweedFS roles (legacy method) -func (p *OIDCProvider) mapClaimsToRoles(claims *providers.TokenClaims) []string { - roles := []string{} - - // Get groups from claims - groups, _ := claims.GetClaimStringSlice("groups") - - // Basic role mapping based on groups - for _, group := range groups { - switch group { - case "admins": - roles = append(roles, "admin") - case "developers": - roles = append(roles, "readwrite") - case "users": - roles = append(roles, "readonly") - } - } - - if len(roles) == 0 { - roles = []string{"readonly"} // Default role - } - - return roles -} - -// mapClaimsToRolesWithConfig maps token claims to roles using configured role mapping -func (p *OIDCProvider) mapClaimsToRolesWithConfig(claims *providers.TokenClaims) []string { - glog.V(3).Infof("mapClaimsToRolesWithConfig: RoleMapping is nil? %t", p.config.RoleMapping == nil) - - if p.config.RoleMapping == nil { - glog.V(2).Infof("No role mapping configured for provider %s, using legacy mapping", p.name) - // Fallback to legacy mapping if no role mapping configured - return p.mapClaimsToRoles(claims) - } - - glog.V(3).Infof("Applying %d role mapping rules", len(p.config.RoleMapping.Rules)) - roles := []string{} - - // Apply role mapping rules - for i, rule := range p.config.RoleMapping.Rules { - glog.V(3).Infof("Rule %d: claim=%s, value=%s, role=%s", i, rule.Claim, rule.Value, rule.Role) - - if rule.Matches(claims) { - glog.V(2).Infof("Rule %d matched! Adding role: %s", i, rule.Role) - roles = append(roles, rule.Role) - } else { - glog.V(3).Infof("Rule %d did not match", i) - } - } - - // Use default role if no rules matched - if len(roles) == 0 && p.config.RoleMapping.DefaultRole != "" { - glog.V(2).Infof("No rules matched, using default role: %s", p.config.RoleMapping.DefaultRole) - roles = []string{p.config.RoleMapping.DefaultRole} - } - - glog.V(2).Infof("Role mapping result: %v", roles) - return roles -} - -// getPublicKey retrieves the public key for the given key ID from JWKS -func (p *OIDCProvider) getPublicKey(ctx context.Context, kid string) (interface{}, error) { - // Fetch JWKS if not cached or refresh if expired - if p.jwksCache == nil || (!p.jwksFetchedAt.IsZero() && time.Since(p.jwksFetchedAt) > p.jwksTTL) { - if err := p.fetchJWKS(ctx); err != nil { - return nil, fmt.Errorf("failed to fetch JWKS: %v", err) - } - } - - // Find the key with matching kid - for _, key := range p.jwksCache.Keys { - if key.Kid == kid { - return p.parseJWK(&key) - } - } - - // Key not found in cache. Refresh JWKS once to handle key rotation and retry. - if err := p.fetchJWKS(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh JWKS after key miss: %v", err) - } - for _, key := range p.jwksCache.Keys { - if key.Kid == kid { - return p.parseJWK(&key) - } - } - return nil, fmt.Errorf("key with ID %s not found in JWKS after refresh", kid) -} - -// fetchJWKS fetches the JWKS from the provider -func (p *OIDCProvider) fetchJWKS(ctx context.Context) error { - jwksURL := p.config.JWKSUri - if jwksURL == "" { - jwksURL = strings.TrimSuffix(p.config.Issuer, "/") + "/.well-known/jwks.json" - } - - req, err := http.NewRequestWithContext(ctx, "GET", jwksURL, nil) - if err != nil { - return fmt.Errorf("failed to create JWKS request: %v", err) - } - - resp, err := p.httpClient.Do(req) - if err != nil { - return fmt.Errorf("failed to fetch JWKS: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("JWKS endpoint returned status: %d", resp.StatusCode) - } - - var jwks JWKS - if err := json.NewDecoder(resp.Body).Decode(&jwks); err != nil { - return fmt.Errorf("failed to decode JWKS response: %v", err) - } - - p.jwksCache = &jwks - p.jwksFetchedAt = time.Now() - glog.V(3).Infof("Fetched JWKS with %d keys from %s", len(jwks.Keys), jwksURL) - return nil -} - -// parseJWK converts a JWK to a public key -func (p *OIDCProvider) parseJWK(key *JWK) (interface{}, error) { - switch key.Kty { - case "RSA": - return p.parseRSAKey(key) - case "EC": - return p.parseECKey(key) - default: - return nil, fmt.Errorf("unsupported key type: %s", key.Kty) - } -} - -// parseRSAKey parses an RSA key from JWK -func (p *OIDCProvider) parseRSAKey(key *JWK) (*rsa.PublicKey, error) { - // Decode the modulus (n) - nBytes, err := base64.RawURLEncoding.DecodeString(key.N) - if err != nil { - return nil, fmt.Errorf("failed to decode RSA modulus: %v", err) - } - - // Decode the exponent (e) - eBytes, err := base64.RawURLEncoding.DecodeString(key.E) - if err != nil { - return nil, fmt.Errorf("failed to decode RSA exponent: %v", err) - } - - // Convert exponent bytes to int - var exponent int - for _, b := range eBytes { - exponent = exponent*256 + int(b) - } - - // Create RSA public key - pubKey := &rsa.PublicKey{ - E: exponent, - } - pubKey.N = new(big.Int).SetBytes(nBytes) - - return pubKey, nil -} - -// parseECKey parses an Elliptic Curve key from JWK -func (p *OIDCProvider) parseECKey(key *JWK) (*ecdsa.PublicKey, error) { - // Validate required fields - if key.X == "" || key.Y == "" || key.Crv == "" { - return nil, fmt.Errorf("incomplete EC key: missing x, y, or crv parameter") - } - - // Get the curve - var curve elliptic.Curve - switch key.Crv { - case "P-256": - curve = elliptic.P256() - case "P-384": - curve = elliptic.P384() - case "P-521": - curve = elliptic.P521() - default: - return nil, fmt.Errorf("unsupported EC curve: %s", key.Crv) - } - - // Decode x coordinate - xBytes, err := base64.RawURLEncoding.DecodeString(key.X) - if err != nil { - return nil, fmt.Errorf("failed to decode EC x coordinate: %v", err) - } - - // Decode y coordinate - yBytes, err := base64.RawURLEncoding.DecodeString(key.Y) - if err != nil { - return nil, fmt.Errorf("failed to decode EC y coordinate: %v", err) - } - - // Create EC public key - pubKey := &ecdsa.PublicKey{ - Curve: curve, - X: new(big.Int).SetBytes(xBytes), - Y: new(big.Int).SetBytes(yBytes), - } - - // Validate that the point is on the curve - if !curve.IsOnCurve(pubKey.X, pubKey.Y) { - return nil, fmt.Errorf("EC key coordinates are not on the specified curve") - } - - return pubKey, nil -} - -// mapUserInfoToIdentity maps UserInfo response to ExternalIdentity -func (p *OIDCProvider) mapUserInfoToIdentity(userInfo map[string]interface{}) *providers.ExternalIdentity { - identity := &providers.ExternalIdentity{ - Provider: p.name, - Attributes: make(map[string]string), - } - - // Map standard OIDC claims - if sub, ok := userInfo["sub"].(string); ok { - identity.UserID = sub - } - - if email, ok := userInfo["email"].(string); ok { - identity.Email = email - } - - if name, ok := userInfo["name"].(string); ok { - identity.DisplayName = name - } - - // Handle groups claim (can be array of strings or single string) - if groupsData, exists := userInfo["groups"]; exists { - switch groups := groupsData.(type) { - case []interface{}: - // Array of groups - for _, group := range groups { - if groupStr, ok := group.(string); ok { - identity.Groups = append(identity.Groups, groupStr) - } - } - case []string: - // Direct string array - identity.Groups = groups - case string: - // Single group as string - identity.Groups = []string{groups} - } - } - - // Map configured custom claims - if p.config.ClaimsMapping != nil { - for identityField, oidcClaim := range p.config.ClaimsMapping { - if value, exists := userInfo[oidcClaim]; exists { - if strValue, ok := value.(string); ok { - switch identityField { - case "email": - if identity.Email == "" { - identity.Email = strValue - } - case "displayName": - if identity.DisplayName == "" { - identity.DisplayName = strValue - } - case "userID": - if identity.UserID == "" { - identity.UserID = strValue - } - default: - identity.Attributes[identityField] = strValue - } - } - } - } - } - - // Store all additional claims as attributes - for key, value := range userInfo { - if key != "sub" && key != "email" && key != "name" && key != "groups" { - if strValue, ok := value.(string); ok { - identity.Attributes[key] = strValue - } else if jsonValue, err := json.Marshal(value); err == nil { - identity.Attributes[key] = string(jsonValue) - } - } - } - - return identity -} diff --git a/weed/iam/oidc/oidc_provider_test.go b/weed/iam/oidc/oidc_provider_test.go deleted file mode 100644 index d37bee1f0..000000000 --- a/weed/iam/oidc/oidc_provider_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package oidc - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "encoding/base64" - "encoding/json" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestOIDCProviderInitialization tests OIDC provider initialization -func TestOIDCProviderInitialization(t *testing.T) { - tests := []struct { - name string - config *OIDCConfig - wantErr bool - }{ - { - name: "valid config", - config: &OIDCConfig{ - Issuer: "https://accounts.google.com", - ClientID: "test-client-id", - JWKSUri: "https://www.googleapis.com/oauth2/v3/certs", - }, - wantErr: false, - }, - { - name: "missing issuer", - config: &OIDCConfig{ - ClientID: "test-client-id", - }, - wantErr: true, - }, - { - name: "missing client id", - config: &OIDCConfig{ - Issuer: "https://accounts.google.com", - }, - wantErr: true, - }, - { - name: "invalid issuer url", - config: &OIDCConfig{ - Issuer: "not-a-url", - ClientID: "test-client-id", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - provider := NewOIDCProvider("test-provider") - - err := provider.Initialize(tt.config) - - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, "test-provider", provider.Name()) - } - }) - } -} - -// TestOIDCProviderJWTValidation tests JWT token validation -func TestOIDCProviderJWTValidation(t *testing.T) { - // Set up test server with JWKS endpoint - privateKey, publicKey := generateTestKeys(t) - - jwks := map[string]interface{}{ - "keys": []map[string]interface{}{ - { - "kty": "RSA", - "kid": "test-key-id", - "use": "sig", - "alg": "RS256", - "n": encodePublicKey(t, publicKey), - "e": "AQAB", - }, - }, - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/.well-known/openid_configuration" { - config := map[string]interface{}{ - "issuer": "http://" + r.Host, - "jwks_uri": "http://" + r.Host + "/jwks", - } - json.NewEncoder(w).Encode(config) - } else if r.URL.Path == "/jwks" { - json.NewEncoder(w).Encode(jwks) - } - })) - defer server.Close() - - provider := NewOIDCProvider("test-oidc") - config := &OIDCConfig{ - Issuer: server.URL, - ClientID: "test-client", - JWKSUri: server.URL + "/jwks", - } - - err := provider.Initialize(config) - require.NoError(t, err) - - t.Run("valid token", func(t *testing.T) { - // Create valid JWT token - token := createTestJWT(t, privateKey, jwt.MapClaims{ - "iss": server.URL, - "aud": "test-client", - "sub": "user123", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - "email": "user@example.com", - "name": "Test User", - }) - - claims, err := provider.ValidateToken(context.Background(), token) - require.NoError(t, err) - require.NotNil(t, claims) - assert.Equal(t, "user123", claims.Subject) - assert.Equal(t, server.URL, claims.Issuer) - - email, exists := claims.GetClaimString("email") - assert.True(t, exists) - assert.Equal(t, "user@example.com", email) - }) - - t.Run("valid token with array audience", func(t *testing.T) { - // Create valid JWT token with audience as an array (per RFC 7519) - token := createTestJWT(t, privateKey, jwt.MapClaims{ - "iss": server.URL, - "aud": []string{"test-client", "another-client"}, - "sub": "user456", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - "email": "user2@example.com", - "name": "Test User 2", - }) - - claims, err := provider.ValidateToken(context.Background(), token) - require.NoError(t, err) - require.NotNil(t, claims) - assert.Equal(t, "user456", claims.Subject) - assert.Equal(t, server.URL, claims.Issuer) - - email, exists := claims.GetClaimString("email") - assert.True(t, exists) - assert.Equal(t, "user2@example.com", email) - }) - - t.Run("expired token", func(t *testing.T) { - // Create expired JWT token - token := createTestJWT(t, privateKey, jwt.MapClaims{ - "iss": server.URL, - "aud": "test-client", - "sub": "user123", - "exp": time.Now().Add(-time.Hour).Unix(), // Expired - "iat": time.Now().Add(-time.Hour * 2).Unix(), - }) - - _, err := provider.ValidateToken(context.Background(), token) - assert.Error(t, err) - assert.Contains(t, err.Error(), "expired") - }) - - t.Run("invalid signature", func(t *testing.T) { - // Create token with wrong key - wrongKey, _ := generateTestKeys(t) - token := createTestJWT(t, wrongKey, jwt.MapClaims{ - "iss": server.URL, - "aud": "test-client", - "sub": "user123", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - }) - - _, err := provider.ValidateToken(context.Background(), token) - assert.Error(t, err) - }) -} - -// TestOIDCProviderAuthentication tests authentication flow -func TestOIDCProviderAuthentication(t *testing.T) { - // Set up test OIDC provider - privateKey, publicKey := generateTestKeys(t) - - server := setupOIDCTestServer(t, publicKey) - defer server.Close() - - provider := NewOIDCProvider("test-oidc") - config := &OIDCConfig{ - Issuer: server.URL, - ClientID: "test-client", - JWKSUri: server.URL + "/jwks", - RoleMapping: &providers.RoleMapping{ - Rules: []providers.MappingRule{ - { - Claim: "email", - Value: "*@example.com", - Role: "arn:seaweed:iam::role/UserRole", - }, - { - Claim: "groups", - Value: "admins", - Role: "arn:seaweed:iam::role/AdminRole", - }, - }, - DefaultRole: "arn:seaweed:iam::role/GuestRole", - }, - } - - err := provider.Initialize(config) - require.NoError(t, err) - - t.Run("successful authentication", func(t *testing.T) { - token := createTestJWT(t, privateKey, jwt.MapClaims{ - "iss": server.URL, - "aud": "test-client", - "sub": "user123", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - "email": "user@example.com", - "name": "Test User", - "groups": []string{"users", "developers"}, - }) - - identity, err := provider.Authenticate(context.Background(), token) - require.NoError(t, err) - require.NotNil(t, identity) - assert.Equal(t, "user123", identity.UserID) - assert.Equal(t, "user@example.com", identity.Email) - assert.Equal(t, "Test User", identity.DisplayName) - assert.Equal(t, "test-oidc", identity.Provider) - assert.Contains(t, identity.Groups, "users") - assert.Contains(t, identity.Groups, "developers") - }) - - t.Run("authentication with invalid token", func(t *testing.T) { - _, err := provider.Authenticate(context.Background(), "invalid-token") - assert.Error(t, err) - }) -} - -// TestOIDCProviderUserInfo tests user info retrieval -func TestOIDCProviderUserInfo(t *testing.T) { - // Set up test server with UserInfo endpoint - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/userinfo" { - // Check for Authorization header - authHeader := r.Header.Get("Authorization") - if !strings.HasPrefix(authHeader, "Bearer ") { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "unauthorized"}`)) - return - } - - accessToken := strings.TrimPrefix(authHeader, "Bearer ") - - // Return 401 for explicitly invalid tokens - if accessToken == "invalid-token" { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "invalid_token"}`)) - return - } - - // Mock user info response - userInfo := map[string]interface{}{ - "sub": "user123", - "email": "user@example.com", - "name": "Test User", - "groups": []string{"users", "developers"}, - } - - // Customize response based on token - if strings.Contains(accessToken, "admin") { - userInfo["groups"] = []string{"admins"} - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(userInfo) - } - })) - defer server.Close() - - provider := NewOIDCProvider("test-oidc") - config := &OIDCConfig{ - Issuer: server.URL, - ClientID: "test-client", - UserInfoUri: server.URL + "/userinfo", - } - - err := provider.Initialize(config) - require.NoError(t, err) - - t.Run("get user info with access token", func(t *testing.T) { - // Test using access token (real UserInfo endpoint call) - identity, err := provider.GetUserInfoWithToken(context.Background(), "valid-access-token") - require.NoError(t, err) - require.NotNil(t, identity) - assert.Equal(t, "user123", identity.UserID) - assert.Equal(t, "user@example.com", identity.Email) - assert.Equal(t, "Test User", identity.DisplayName) - assert.Contains(t, identity.Groups, "users") - assert.Contains(t, identity.Groups, "developers") - assert.Equal(t, "test-oidc", identity.Provider) - }) - - t.Run("get admin user info", func(t *testing.T) { - // Test admin token response - identity, err := provider.GetUserInfoWithToken(context.Background(), "admin-access-token") - require.NoError(t, err) - require.NotNil(t, identity) - assert.Equal(t, "user123", identity.UserID) - assert.Contains(t, identity.Groups, "admins") - }) - - t.Run("get user info without token", func(t *testing.T) { - // Test without access token (should fail) - _, err := provider.GetUserInfoWithToken(context.Background(), "") - assert.Error(t, err) - assert.Contains(t, err.Error(), "access token cannot be empty") - }) - - t.Run("get user info with invalid token", func(t *testing.T) { - // Test with invalid access token (should get 401) - _, err := provider.GetUserInfoWithToken(context.Background(), "invalid-token") - assert.Error(t, err) - assert.Contains(t, err.Error(), "UserInfo endpoint returned status 401") - }) - - t.Run("get user info with custom claims mapping", func(t *testing.T) { - // Create provider with custom claims mapping - customProvider := NewOIDCProvider("test-custom-oidc") - customConfig := &OIDCConfig{ - Issuer: server.URL, - ClientID: "test-client", - UserInfoUri: server.URL + "/userinfo", - ClaimsMapping: map[string]string{ - "customEmail": "email", - "customName": "name", - }, - } - - err := customProvider.Initialize(customConfig) - require.NoError(t, err) - - identity, err := customProvider.GetUserInfoWithToken(context.Background(), "valid-access-token") - require.NoError(t, err) - require.NotNil(t, identity) - - // Standard claims should still work - assert.Equal(t, "user123", identity.UserID) - assert.Equal(t, "user@example.com", identity.Email) - assert.Equal(t, "Test User", identity.DisplayName) - }) - - t.Run("get user info with empty id", func(t *testing.T) { - _, err := provider.GetUserInfo(context.Background(), "") - assert.Error(t, err) - }) -} - -// Helper functions for testing - -func generateTestKeys(t *testing.T) (*rsa.PrivateKey, *rsa.PublicKey) { - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - return privateKey, &privateKey.PublicKey -} - -func createTestJWT(t *testing.T, privateKey *rsa.PrivateKey, claims jwt.MapClaims) string { - token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) - token.Header["kid"] = "test-key-id" - - tokenString, err := token.SignedString(privateKey) - require.NoError(t, err) - return tokenString -} - -func encodePublicKey(t *testing.T, publicKey *rsa.PublicKey) string { - // Properly encode the RSA modulus (N) as base64url - return base64.RawURLEncoding.EncodeToString(publicKey.N.Bytes()) -} - -func setupOIDCTestServer(t *testing.T, publicKey *rsa.PublicKey) *httptest.Server { - jwks := map[string]interface{}{ - "keys": []map[string]interface{}{ - { - "kty": "RSA", - "kid": "test-key-id", - "use": "sig", - "alg": "RS256", - "n": encodePublicKey(t, publicKey), - "e": "AQAB", - }, - }, - } - - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/.well-known/openid_configuration": - config := map[string]interface{}{ - "issuer": "http://" + r.Host, - "jwks_uri": "http://" + r.Host + "/jwks", - "userinfo_endpoint": "http://" + r.Host + "/userinfo", - } - json.NewEncoder(w).Encode(config) - case "/jwks": - json.NewEncoder(w).Encode(jwks) - case "/userinfo": - // Mock UserInfo endpoint - authHeader := r.Header.Get("Authorization") - if !strings.HasPrefix(authHeader, "Bearer ") { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "unauthorized"}`)) - return - } - - accessToken := strings.TrimPrefix(authHeader, "Bearer ") - - // Return 401 for explicitly invalid tokens - if accessToken == "invalid-token" { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "invalid_token"}`)) - return - } - - // Mock user info response based on access token - userInfo := map[string]interface{}{ - "sub": "user123", - "email": "user@example.com", - "name": "Test User", - "groups": []string{"users", "developers"}, - } - - // Customize response based on token - if strings.Contains(accessToken, "admin") { - userInfo["groups"] = []string{"admins"} - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(userInfo) - default: - http.NotFound(w, r) - } - })) -} diff --git a/weed/iam/policy/aws_iam_compliance_test.go b/weed/iam/policy/aws_iam_compliance_test.go deleted file mode 100644 index 0979589a5..000000000 --- a/weed/iam/policy/aws_iam_compliance_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package policy - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAWSIAMMatch(t *testing.T) { - evalCtx := &EvaluationContext{ - RequestContext: map[string]interface{}{ - "aws:username": "testuser", - "saml:username": "john.doe", - "oidc:sub": "user123", - "aws:userid": "AIDACKCEVSQ6C2EXAMPLE", - "aws:principaltype": "User", - }, - } - - tests := []struct { - name string - pattern string - value string - evalCtx *EvaluationContext - expected bool - }{ - // Case insensitivity tests - { - name: "case insensitive exact match", - pattern: "S3:GetObject", - value: "s3:getobject", - evalCtx: evalCtx, - expected: true, - }, - { - name: "case insensitive wildcard match", - pattern: "S3:Get*", - value: "s3:getobject", - evalCtx: evalCtx, - expected: true, - }, - // Policy variable expansion tests - { - name: "AWS username variable expansion", - pattern: "arn:aws:s3:::mybucket/${aws:username}/*", - value: "arn:aws:s3:::mybucket/testuser/document.pdf", - evalCtx: evalCtx, - expected: true, - }, - { - name: "SAML username variable expansion", - pattern: "home/${saml:username}/*", - value: "home/john.doe/private.txt", - evalCtx: evalCtx, - expected: true, - }, - { - name: "OIDC subject variable expansion", - pattern: "users/${oidc:sub}/data", - value: "users/user123/data", - evalCtx: evalCtx, - expected: true, - }, - // Mixed case and variable tests - { - name: "case insensitive with variable", - pattern: "S3:GetObject/${aws:username}/*", - value: "s3:getobject/testuser/file.txt", - evalCtx: evalCtx, - expected: true, - }, - // Universal wildcard - { - name: "universal wildcard", - pattern: "*", - value: "anything", - evalCtx: evalCtx, - expected: true, - }, - // Question mark wildcard - { - name: "question mark wildcard", - pattern: "file?.txt", - value: "file1.txt", - evalCtx: evalCtx, - expected: true, - }, - // No match cases - { - name: "no match different pattern", - pattern: "s3:PutObject", - value: "s3:GetObject", - evalCtx: evalCtx, - expected: false, - }, - { - name: "variable not expanded due to missing context", - pattern: "users/${aws:username}/data", - value: "users/${aws:username}/data", - evalCtx: nil, - expected: true, // Should match literally when no context - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := awsIAMMatch(tt.pattern, tt.value, tt.evalCtx) - assert.Equal(t, tt.expected, result, "AWS IAM match result should match expected") - }) - } -} - -func TestExpandPolicyVariables(t *testing.T) { - evalCtx := &EvaluationContext{ - RequestContext: map[string]interface{}{ - "aws:username": "alice", - "saml:username": "alice.smith", - "oidc:sub": "sub123", - }, - } - - tests := []struct { - name string - pattern string - evalCtx *EvaluationContext - expected string - }{ - { - name: "expand aws username", - pattern: "home/${aws:username}/documents/*", - evalCtx: evalCtx, - expected: "home/alice/documents/*", - }, - { - name: "expand multiple variables", - pattern: "${aws:username}/${oidc:sub}/data", - evalCtx: evalCtx, - expected: "alice/sub123/data", - }, - { - name: "no variables to expand", - pattern: "static/path/file.txt", - evalCtx: evalCtx, - expected: "static/path/file.txt", - }, - { - name: "nil context", - pattern: "home/${aws:username}/file", - evalCtx: nil, - expected: "home/${aws:username}/file", - }, - { - name: "missing variable in context", - pattern: "home/${aws:nonexistent}/file", - evalCtx: evalCtx, - expected: "home/${aws:nonexistent}/file", // Should remain unchanged - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := expandPolicyVariables(tt.pattern, tt.evalCtx) - assert.Equal(t, tt.expected, result, "Policy variable expansion should match expected") - }) - } -} - -func TestAWSWildcardMatch(t *testing.T) { - tests := []struct { - name string - pattern string - value string - expected bool - }{ - { - name: "case insensitive asterisk", - pattern: "S3:Get*", - value: "s3:getobject", - expected: true, - }, - { - name: "case insensitive question mark", - pattern: "file?.TXT", - value: "file1.txt", - expected: true, - }, - { - name: "mixed wildcards", - pattern: "S3:*Object?", - value: "s3:getobjects", - expected: true, - }, - { - name: "no match", - pattern: "s3:Put*", - value: "s3:GetObject", - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := AwsWildcardMatch(tt.pattern, tt.value) - assert.Equal(t, tt.expected, result, "AWS wildcard match should match expected") - }) - } -} diff --git a/weed/iam/policy/cached_policy_store_generic.go b/weed/iam/policy/cached_policy_store_generic.go deleted file mode 100644 index e76f7aba5..000000000 --- a/weed/iam/policy/cached_policy_store_generic.go +++ /dev/null @@ -1,139 +0,0 @@ -package policy - -import ( - "context" - "encoding/json" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/util" -) - -// PolicyStoreAdapter adapts PolicyStore interface to CacheableStore[*PolicyDocument] -type PolicyStoreAdapter struct { - store PolicyStore -} - -// NewPolicyStoreAdapter creates a new adapter for PolicyStore -func NewPolicyStoreAdapter(store PolicyStore) *PolicyStoreAdapter { - return &PolicyStoreAdapter{store: store} -} - -// Get implements CacheableStore interface -func (a *PolicyStoreAdapter) Get(ctx context.Context, filerAddress string, key string) (*PolicyDocument, error) { - return a.store.GetPolicy(ctx, filerAddress, key) -} - -// Store implements CacheableStore interface -func (a *PolicyStoreAdapter) Store(ctx context.Context, filerAddress string, key string, value *PolicyDocument) error { - return a.store.StorePolicy(ctx, filerAddress, key, value) -} - -// Delete implements CacheableStore interface -func (a *PolicyStoreAdapter) Delete(ctx context.Context, filerAddress string, key string) error { - return a.store.DeletePolicy(ctx, filerAddress, key) -} - -// List implements CacheableStore interface -func (a *PolicyStoreAdapter) List(ctx context.Context, filerAddress string) ([]string, error) { - return a.store.ListPolicies(ctx, filerAddress) -} - -// GenericCachedPolicyStore implements PolicyStore using the generic cache -type GenericCachedPolicyStore struct { - *util.CachedStore[*PolicyDocument] - adapter *PolicyStoreAdapter -} - -// NewGenericCachedPolicyStore creates a new cached policy store using generics -func NewGenericCachedPolicyStore(config map[string]interface{}, filerAddressProvider func() string) (*GenericCachedPolicyStore, error) { - // Create underlying filer store - filerStore, err := NewFilerPolicyStore(config, filerAddressProvider) - if err != nil { - return nil, err - } - - // Parse cache configuration with defaults - cacheTTL := 5 * time.Minute - listTTL := 1 * time.Minute - maxCacheSize := int64(500) - - if config != nil { - if ttlStr, ok := config["ttl"].(string); ok && ttlStr != "" { - if parsed, err := time.ParseDuration(ttlStr); err == nil { - cacheTTL = parsed - } - } - if listTTLStr, ok := config["listTtl"].(string); ok && listTTLStr != "" { - if parsed, err := time.ParseDuration(listTTLStr); err == nil { - listTTL = parsed - } - } - if maxSize, ok := config["maxCacheSize"].(int); ok && maxSize > 0 { - maxCacheSize = int64(maxSize) - } - } - - // Create adapter and generic cached store - adapter := NewPolicyStoreAdapter(filerStore) - cachedStore := util.NewCachedStore( - adapter, - genericCopyPolicyDocument, // Copy function - util.CachedStoreConfig{ - TTL: cacheTTL, - ListTTL: listTTL, - MaxCacheSize: maxCacheSize, - }, - ) - - glog.V(2).Infof("Initialized GenericCachedPolicyStore with TTL %v, List TTL %v, Max Cache Size %d", - cacheTTL, listTTL, maxCacheSize) - - return &GenericCachedPolicyStore{ - CachedStore: cachedStore, - adapter: adapter, - }, nil -} - -// StorePolicy implements PolicyStore interface -func (c *GenericCachedPolicyStore) StorePolicy(ctx context.Context, filerAddress string, name string, policy *PolicyDocument) error { - return c.Store(ctx, filerAddress, name, policy) -} - -// GetPolicy implements PolicyStore interface -func (c *GenericCachedPolicyStore) GetPolicy(ctx context.Context, filerAddress string, name string) (*PolicyDocument, error) { - return c.Get(ctx, filerAddress, name) -} - -// ListPolicies implements PolicyStore interface -func (c *GenericCachedPolicyStore) ListPolicies(ctx context.Context, filerAddress string) ([]string, error) { - return c.List(ctx, filerAddress) -} - -// DeletePolicy implements PolicyStore interface -func (c *GenericCachedPolicyStore) DeletePolicy(ctx context.Context, filerAddress string, name string) error { - return c.Delete(ctx, filerAddress, name) -} - -// genericCopyPolicyDocument creates a deep copy of a PolicyDocument for the generic cache -func genericCopyPolicyDocument(policy *PolicyDocument) *PolicyDocument { - if policy == nil { - return nil - } - - // Perform a deep copy to ensure cache isolation - // Using JSON marshaling is a safe way to achieve this - policyData, err := json.Marshal(policy) - if err != nil { - glog.Errorf("Failed to marshal policy document for deep copy: %v", err) - return nil - } - - var copied PolicyDocument - if err := json.Unmarshal(policyData, &copied); err != nil { - glog.Errorf("Failed to unmarshal policy document for deep copy: %v", err) - return nil - } - - return &copied -} diff --git a/weed/iam/policy/policy_engine.go b/weed/iam/policy/policy_engine.go deleted file mode 100644 index 5af1d7e1a..000000000 --- a/weed/iam/policy/policy_engine.go +++ /dev/null @@ -1,1142 +0,0 @@ -package policy - -import ( - "context" - "fmt" - "net" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "time" -) - -// Effect represents the policy evaluation result -type Effect string - -const ( - EffectAllow Effect = "Allow" - EffectDeny Effect = "Deny" -) - -// Package-level regex cache for performance optimization -var ( - regexCache = make(map[string]*regexp.Regexp) - regexCacheMu sync.RWMutex -) - -// PolicyEngine evaluates policies against requests -type PolicyEngine struct { - config *PolicyEngineConfig - initialized bool - store PolicyStore -} - -// PolicyEngineConfig holds policy engine configuration -type PolicyEngineConfig struct { - // DefaultEffect when no policies match (Allow or Deny) - DefaultEffect string `json:"defaultEffect"` - - // StoreType specifies the policy store backend (memory, filer, etc.) - StoreType string `json:"storeType"` - - // StoreConfig contains store-specific configuration - StoreConfig map[string]interface{} `json:"storeConfig,omitempty"` -} - -// PolicyDocument represents an IAM policy document -type PolicyDocument struct { - // Version of the policy language (e.g., "2012-10-17") - Version string `json:"Version"` - - // Id is an optional policy identifier - Id string `json:"Id,omitempty"` - - // Statement contains the policy statements - Statement []Statement `json:"Statement"` -} - -// Statement represents a single policy statement -type Statement struct { - // Sid is an optional statement identifier - Sid string `json:"Sid,omitempty"` - - // Effect specifies whether to Allow or Deny - Effect string `json:"Effect"` - - // Principal specifies who the statement applies to (optional in role policies) - Principal interface{} `json:"Principal,omitempty"` - - // NotPrincipal specifies who the statement does NOT apply to - NotPrincipal interface{} `json:"NotPrincipal,omitempty"` - - // Action specifies the actions this statement applies to - Action []string `json:"Action"` - - // NotAction specifies actions this statement does NOT apply to - NotAction []string `json:"NotAction,omitempty"` - - // Resource specifies the resources this statement applies to - Resource []string `json:"Resource"` - - // NotResource specifies resources this statement does NOT apply to - NotResource []string `json:"NotResource,omitempty"` - - // Condition specifies conditions for when this statement applies - Condition map[string]map[string]interface{} `json:"Condition,omitempty"` -} - -// EvaluationContext provides context for policy evaluation -type EvaluationContext struct { - // Principal making the request (e.g., "user:alice", "role:admin") - Principal string `json:"principal"` - - // Action being requested (e.g., "s3:GetObject") - Action string `json:"action"` - - // Resource being accessed (e.g., "arn:seaweed:s3:::bucket/key") - Resource string `json:"resource"` - - // RequestContext contains additional request information - RequestContext map[string]interface{} `json:"requestContext,omitempty"` -} - -// EvaluationResult contains the result of policy evaluation -type EvaluationResult struct { - // Effect is the final decision (Allow or Deny) - Effect Effect `json:"effect"` - - // MatchingStatements contains statements that matched the request - MatchingStatements []StatementMatch `json:"matchingStatements,omitempty"` - - // EvaluationDetails provides detailed evaluation information - EvaluationDetails *EvaluationDetails `json:"evaluationDetails,omitempty"` -} - -// StatementMatch represents a statement that matched during evaluation -type StatementMatch struct { - // PolicyName is the name of the policy containing this statement - PolicyName string `json:"policyName"` - - // StatementSid is the statement identifier - StatementSid string `json:"statementSid,omitempty"` - - // Effect is the effect of this statement - Effect Effect `json:"effect"` - - // Reason explains why this statement matched - Reason string `json:"reason,omitempty"` -} - -// EvaluationDetails provides detailed information about policy evaluation -type EvaluationDetails struct { - // Principal that was evaluated - Principal string `json:"principal"` - - // Action that was evaluated - Action string `json:"action"` - - // Resource that was evaluated - Resource string `json:"resource"` - - // PoliciesEvaluated lists all policies that were evaluated - PoliciesEvaluated []string `json:"policiesEvaluated"` - - // ConditionsEvaluated lists all conditions that were evaluated - ConditionsEvaluated []string `json:"conditionsEvaluated,omitempty"` -} - -// PolicyStore defines the interface for storing and retrieving policies -type PolicyStore interface { - // StorePolicy stores a policy document (filerAddress ignored for memory stores) - StorePolicy(ctx context.Context, filerAddress string, name string, policy *PolicyDocument) error - - // GetPolicy retrieves a policy document (filerAddress ignored for memory stores) - GetPolicy(ctx context.Context, filerAddress string, name string) (*PolicyDocument, error) - - // DeletePolicy deletes a policy document (filerAddress ignored for memory stores) - DeletePolicy(ctx context.Context, filerAddress string, name string) error - - // ListPolicies lists all policy names (filerAddress ignored for memory stores) - ListPolicies(ctx context.Context, filerAddress string) ([]string, error) -} - -// NewPolicyEngine creates a new policy engine -func NewPolicyEngine() *PolicyEngine { - return &PolicyEngine{} -} - -// Initialize initializes the policy engine with configuration -func (e *PolicyEngine) Initialize(config *PolicyEngineConfig) error { - if config == nil { - return fmt.Errorf("config cannot be nil") - } - - if err := e.validateConfig(config); err != nil { - return fmt.Errorf("invalid configuration: %w", err) - } - - e.config = config - - // Initialize policy store - store, err := e.createPolicyStore(config) - if err != nil { - return fmt.Errorf("failed to create policy store: %w", err) - } - e.store = store - - e.initialized = true - return nil -} - -// InitializeWithProvider initializes the policy engine with configuration and a filer address provider -func (e *PolicyEngine) InitializeWithProvider(config *PolicyEngineConfig, filerAddressProvider func() string) error { - if config == nil { - return fmt.Errorf("config cannot be nil") - } - - if err := e.validateConfig(config); err != nil { - return fmt.Errorf("invalid configuration: %w", err) - } - - e.config = config - - // Initialize policy store with provider - store, err := e.createPolicyStoreWithProvider(config, filerAddressProvider) - if err != nil { - return fmt.Errorf("failed to create policy store: %w", err) - } - e.store = store - - e.initialized = true - return nil -} - -// validateConfig validates the policy engine configuration -func (e *PolicyEngine) validateConfig(config *PolicyEngineConfig) error { - if config.DefaultEffect != "Allow" && config.DefaultEffect != "Deny" { - return fmt.Errorf("invalid default effect: %s", config.DefaultEffect) - } - - if config.StoreType == "" { - config.StoreType = "filer" // Default to filer store for persistence - } - - return nil -} - -// createPolicyStore creates a policy store based on configuration -func (e *PolicyEngine) createPolicyStore(config *PolicyEngineConfig) (PolicyStore, error) { - switch config.StoreType { - case "memory": - return NewMemoryPolicyStore(), nil - case "", "filer": - // Check if caching is explicitly disabled - if config.StoreConfig != nil { - if noCache, ok := config.StoreConfig["noCache"].(bool); ok && noCache { - return NewFilerPolicyStore(config.StoreConfig, nil) - } - } - // Default to generic cached filer store for better performance - return NewGenericCachedPolicyStore(config.StoreConfig, nil) - case "cached-filer", "generic-cached": - return NewGenericCachedPolicyStore(config.StoreConfig, nil) - default: - return nil, fmt.Errorf("unsupported store type: %s", config.StoreType) - } -} - -// createPolicyStoreWithProvider creates a policy store with a filer address provider function -func (e *PolicyEngine) createPolicyStoreWithProvider(config *PolicyEngineConfig, filerAddressProvider func() string) (PolicyStore, error) { - switch config.StoreType { - case "memory": - return NewMemoryPolicyStore(), nil - case "", "filer": - // Check if caching is explicitly disabled - if config.StoreConfig != nil { - if noCache, ok := config.StoreConfig["noCache"].(bool); ok && noCache { - return NewFilerPolicyStore(config.StoreConfig, filerAddressProvider) - } - } - // Default to generic cached filer store for better performance - return NewGenericCachedPolicyStore(config.StoreConfig, filerAddressProvider) - case "cached-filer", "generic-cached": - return NewGenericCachedPolicyStore(config.StoreConfig, filerAddressProvider) - default: - return nil, fmt.Errorf("unsupported store type: %s", config.StoreType) - } -} - -// IsInitialized returns whether the engine is initialized -func (e *PolicyEngine) IsInitialized() bool { - return e.initialized -} - -// AddPolicy adds a policy to the engine (filerAddress ignored for memory stores) -func (e *PolicyEngine) AddPolicy(filerAddress string, name string, policy *PolicyDocument) error { - if !e.initialized { - return fmt.Errorf("policy engine not initialized") - } - - if name == "" { - return fmt.Errorf("policy name cannot be empty") - } - - if policy == nil { - return fmt.Errorf("policy cannot be nil") - } - - if err := ValidatePolicyDocument(policy); err != nil { - return fmt.Errorf("invalid policy document: %w", err) - } - - return e.store.StorePolicy(context.Background(), filerAddress, name, policy) -} - -// Evaluate evaluates policies against a request context (filerAddress ignored for memory stores) -func (e *PolicyEngine) Evaluate(ctx context.Context, filerAddress string, evalCtx *EvaluationContext, policyNames []string) (*EvaluationResult, error) { - if !e.initialized { - return nil, fmt.Errorf("policy engine not initialized") - } - - if evalCtx == nil { - return nil, fmt.Errorf("evaluation context cannot be nil") - } - - result := &EvaluationResult{ - Effect: Effect(e.config.DefaultEffect), - EvaluationDetails: &EvaluationDetails{ - Principal: evalCtx.Principal, - Action: evalCtx.Action, - Resource: evalCtx.Resource, - PoliciesEvaluated: policyNames, - }, - } - - var matchingStatements []StatementMatch - explicitDeny := false - hasAllow := false - - // Evaluate each policy - for _, policyName := range policyNames { - policy, err := e.store.GetPolicy(ctx, filerAddress, policyName) - if err != nil { - continue // Skip policies that can't be loaded - } - - // Evaluate each statement in the policy - for _, statement := range policy.Statement { - if e.statementMatches(&statement, evalCtx) { - match := StatementMatch{ - PolicyName: policyName, - StatementSid: statement.Sid, - Effect: Effect(statement.Effect), - Reason: "Action, Resource, and Condition matched", - } - matchingStatements = append(matchingStatements, match) - - if statement.Effect == "Deny" { - explicitDeny = true - } else if statement.Effect == "Allow" { - hasAllow = true - } - } - } - } - - result.MatchingStatements = matchingStatements - - // AWS IAM evaluation logic: - // 1. If there's an explicit Deny, the result is Deny - // 2. If there's an Allow and no Deny, the result is Allow - // 3. Otherwise, use the default effect - if explicitDeny { - result.Effect = EffectDeny - } else if hasAllow { - result.Effect = EffectAllow - } - - return result, nil -} - -// statementMatches checks if a statement matches the evaluation context -func (e *PolicyEngine) statementMatches(statement *Statement, evalCtx *EvaluationContext) bool { - // Check action match - if !e.matchesActions(statement.Action, evalCtx.Action, evalCtx) { - return false - } - - // Check resource match - if !e.matchesResources(statement.Resource, evalCtx.Resource, evalCtx) { - return false - } - - // Check conditions - if !e.matchesConditions(statement.Condition, evalCtx) { - return false - } - - return true -} - -// matchesActions checks if any action in the list matches the requested action -func (e *PolicyEngine) matchesActions(actions []string, requestedAction string, evalCtx *EvaluationContext) bool { - for _, action := range actions { - if awsIAMMatch(action, requestedAction, evalCtx) { - return true - } - } - return false -} - -// matchesResources checks if any resource in the list matches the requested resource -func (e *PolicyEngine) matchesResources(resources []string, requestedResource string, evalCtx *EvaluationContext) bool { - for _, resource := range resources { - if awsIAMMatch(resource, requestedResource, evalCtx) { - return true - } - } - return false -} - -// matchesConditions checks if all conditions are satisfied -func (e *PolicyEngine) matchesConditions(conditions map[string]map[string]interface{}, evalCtx *EvaluationContext) bool { - if len(conditions) == 0 { - return true // No conditions means always match - } - - for conditionType, conditionBlock := range conditions { - if !e.evaluateConditionBlock(conditionType, conditionBlock, evalCtx) { - return false - } - } - - return true -} - -// evaluateConditionBlock evaluates a single condition block -func (e *PolicyEngine) evaluateConditionBlock(conditionType string, block map[string]interface{}, evalCtx *EvaluationContext) bool { - switch conditionType { - // IP Address conditions - case "IpAddress": - return e.evaluateIPCondition(block, evalCtx, true) - case "NotIpAddress": - return e.evaluateIPCondition(block, evalCtx, false) - - // String conditions - case "StringEquals": - return e.EvaluateStringCondition(block, evalCtx, true, false) - case "StringNotEquals": - return e.EvaluateStringCondition(block, evalCtx, false, false) - case "StringLike": - return e.EvaluateStringCondition(block, evalCtx, true, true) - case "StringEqualsIgnoreCase": - return e.evaluateStringConditionIgnoreCase(block, evalCtx, true, false) - case "StringNotEqualsIgnoreCase": - return e.evaluateStringConditionIgnoreCase(block, evalCtx, false, false) - case "StringLikeIgnoreCase": - return e.evaluateStringConditionIgnoreCase(block, evalCtx, true, true) - - // Numeric conditions - case "NumericEquals": - return e.evaluateNumericCondition(block, evalCtx, "==") - case "NumericNotEquals": - return e.evaluateNumericCondition(block, evalCtx, "!=") - case "NumericLessThan": - return e.evaluateNumericCondition(block, evalCtx, "<") - case "NumericLessThanEquals": - return e.evaluateNumericCondition(block, evalCtx, "<=") - case "NumericGreaterThan": - return e.evaluateNumericCondition(block, evalCtx, ">") - case "NumericGreaterThanEquals": - return e.evaluateNumericCondition(block, evalCtx, ">=") - - // Date conditions - case "DateEquals": - return e.evaluateDateCondition(block, evalCtx, "==") - case "DateNotEquals": - return e.evaluateDateCondition(block, evalCtx, "!=") - case "DateLessThan": - return e.evaluateDateCondition(block, evalCtx, "<") - case "DateLessThanEquals": - return e.evaluateDateCondition(block, evalCtx, "<=") - case "DateGreaterThan": - return e.evaluateDateCondition(block, evalCtx, ">") - case "DateGreaterThanEquals": - return e.evaluateDateCondition(block, evalCtx, ">=") - - // Boolean conditions - case "Bool": - return e.evaluateBoolCondition(block, evalCtx) - - // Null conditions - case "Null": - return e.evaluateNullCondition(block, evalCtx) - - default: - // Unknown condition types default to false (more secure) - return false - } -} - -// evaluateIPCondition evaluates IP address conditions -func (e *PolicyEngine) evaluateIPCondition(block map[string]interface{}, evalCtx *EvaluationContext, shouldMatch bool) bool { - sourceIP, exists := evalCtx.RequestContext["sourceIP"] - if !exists { - return !shouldMatch // If no IP in context, condition fails for positive match - } - - sourceIPStr, ok := sourceIP.(string) - if !ok { - return !shouldMatch - } - - sourceIPAddr := net.ParseIP(sourceIPStr) - if sourceIPAddr == nil { - return !shouldMatch - } - - for key, value := range block { - if key == "seaweed:SourceIP" { - ranges, ok := value.([]string) - if !ok { - continue - } - - for _, ipRange := range ranges { - if strings.Contains(ipRange, "/") { - // CIDR range - _, cidr, err := net.ParseCIDR(ipRange) - if err != nil { - continue - } - if cidr.Contains(sourceIPAddr) { - return shouldMatch - } - } else { - // Single IP - if sourceIPStr == ipRange { - return shouldMatch - } - } - } - } - } - - return !shouldMatch -} - -// EvaluateStringCondition evaluates string-based conditions -func (e *PolicyEngine) EvaluateStringCondition(block map[string]interface{}, evalCtx *EvaluationContext, shouldMatch bool, useWildcard bool) bool { - // Iterate through all condition keys in the block - for conditionKey, conditionValue := range block { - // Get the context values for this condition key - contextValues, exists := evalCtx.RequestContext[conditionKey] - if !exists { - // If the context key doesn't exist, condition fails for positive match - if shouldMatch { - return false - } - continue - } - - // Convert context value to string slice - var contextStrings []string - switch v := contextValues.(type) { - case string: - contextStrings = []string{v} - case []string: - contextStrings = v - case []interface{}: - for _, item := range v { - if str, ok := item.(string); ok { - contextStrings = append(contextStrings, str) - } - } - default: - // Convert to string as fallback - contextStrings = []string{fmt.Sprintf("%v", v)} - } - - // Convert condition value to string slice - var expectedStrings []string - switch v := conditionValue.(type) { - case string: - expectedStrings = []string{v} - case []string: - expectedStrings = v - case []interface{}: - for _, item := range v { - if str, ok := item.(string); ok { - expectedStrings = append(expectedStrings, str) - } else { - expectedStrings = append(expectedStrings, fmt.Sprintf("%v", item)) - } - } - default: - expectedStrings = []string{fmt.Sprintf("%v", v)} - } - - // Evaluate the condition using AWS IAM-compliant matching - conditionMet := false - for _, expected := range expectedStrings { - for _, contextValue := range contextStrings { - if useWildcard { - // Use AWS IAM-compliant wildcard matching for StringLike conditions - // This handles case-insensitivity and policy variables - if awsIAMMatch(expected, contextValue, evalCtx) { - conditionMet = true - break - } - } else { - // For StringEquals/StringNotEquals, also support policy variables but be case-sensitive - expandedExpected := expandPolicyVariables(expected, evalCtx) - if expandedExpected == contextValue { - conditionMet = true - break - } - } - } - if conditionMet { - break - } - } - - // For shouldMatch=true (StringEquals, StringLike): condition must be met - // For shouldMatch=false (StringNotEquals): condition must NOT be met - if shouldMatch && !conditionMet { - return false - } - if !shouldMatch && conditionMet { - return false - } - } - - return true -} - -// ValidatePolicyDocument validates a policy document structure -func ValidatePolicyDocument(policy *PolicyDocument) error { - return ValidatePolicyDocumentWithType(policy, "resource") -} - -// ValidateTrustPolicyDocument validates a trust policy document structure -func ValidateTrustPolicyDocument(policy *PolicyDocument) error { - return ValidatePolicyDocumentWithType(policy, "trust") -} - -// ValidatePolicyDocumentWithType validates a policy document for specific type -func ValidatePolicyDocumentWithType(policy *PolicyDocument, policyType string) error { - if policy == nil { - return fmt.Errorf("policy document cannot be nil") - } - - if policy.Version == "" { - return fmt.Errorf("version is required") - } - - if len(policy.Statement) == 0 { - return fmt.Errorf("at least one statement is required") - } - - for i, statement := range policy.Statement { - if err := validateStatementWithType(&statement, policyType); err != nil { - return fmt.Errorf("statement %d is invalid: %w", i, err) - } - } - - return nil -} - -// validateStatement validates a single statement (for backward compatibility) -func validateStatement(statement *Statement) error { - return validateStatementWithType(statement, "resource") -} - -// validateStatementWithType validates a single statement based on policy type -func validateStatementWithType(statement *Statement, policyType string) error { - if statement.Effect != "Allow" && statement.Effect != "Deny" { - return fmt.Errorf("invalid effect: %s (must be Allow or Deny)", statement.Effect) - } - - if len(statement.Action) == 0 { - return fmt.Errorf("at least one action is required") - } - - // Trust policies don't require Resource field, but resource policies do - if policyType == "resource" { - if len(statement.Resource) == 0 { - return fmt.Errorf("at least one resource is required") - } - } else if policyType == "trust" { - // Trust policies should have Principal field - if statement.Principal == nil { - return fmt.Errorf("trust policy statement must have Principal field") - } - - // Trust policies typically have specific actions - validTrustActions := map[string]bool{ - "sts:AssumeRole": true, - "sts:AssumeRoleWithWebIdentity": true, - "sts:AssumeRoleWithCredentials": true, - } - - for _, action := range statement.Action { - if !validTrustActions[action] { - return fmt.Errorf("invalid action for trust policy: %s", action) - } - } - } - - return nil -} - -// matchResource checks if a resource pattern matches a requested resource -// Uses hybrid approach: simple suffix wildcards for compatibility, filepath.Match for complex patterns -func matchResource(pattern, resource string) bool { - if pattern == resource { - return true - } - - // Handle simple suffix wildcard (backward compatibility) - if strings.HasSuffix(pattern, "*") { - prefix := pattern[:len(pattern)-1] - return strings.HasPrefix(resource, prefix) - } - - // For complex patterns, use filepath.Match for advanced wildcard support (*, ?, []) - matched, err := filepath.Match(pattern, resource) - if err != nil { - // Fallback to exact match if pattern is malformed - return pattern == resource - } - - return matched -} - -// awsIAMMatch performs AWS IAM-compliant pattern matching with case-insensitivity and policy variable support -func awsIAMMatch(pattern, value string, evalCtx *EvaluationContext) bool { - // Step 1: Substitute policy variables (e.g., ${aws:username}, ${saml:username}) - expandedPattern := expandPolicyVariables(pattern, evalCtx) - - // Step 2: Handle special patterns - if expandedPattern == "*" { - return true // Universal wildcard - } - - // Step 3: Case-insensitive exact match - if strings.EqualFold(expandedPattern, value) { - return true - } - - // Step 4: Handle AWS-style wildcards (case-insensitive) - if strings.Contains(expandedPattern, "*") || strings.Contains(expandedPattern, "?") { - return AwsWildcardMatch(expandedPattern, value) - } - - return false -} - -// expandPolicyVariables substitutes AWS policy variables in the pattern -func expandPolicyVariables(pattern string, evalCtx *EvaluationContext) string { - if evalCtx == nil || evalCtx.RequestContext == nil { - return pattern - } - - expanded := pattern - - // Common AWS policy variables that might be used in SeaweedFS - variableMap := map[string]string{ - "${aws:username}": getContextValue(evalCtx, "aws:username", ""), - "${saml:username}": getContextValue(evalCtx, "saml:username", ""), - "${oidc:sub}": getContextValue(evalCtx, "oidc:sub", ""), - "${aws:userid}": getContextValue(evalCtx, "aws:userid", ""), - "${aws:principaltype}": getContextValue(evalCtx, "aws:principaltype", ""), - } - - for variable, value := range variableMap { - if value != "" { - expanded = strings.ReplaceAll(expanded, variable, value) - } - } - - return expanded -} - -// getContextValue safely gets a value from the evaluation context -func getContextValue(evalCtx *EvaluationContext, key, defaultValue string) string { - if value, exists := evalCtx.RequestContext[key]; exists { - if str, ok := value.(string); ok { - return str - } - } - return defaultValue -} - -// AwsWildcardMatch performs case-insensitive wildcard matching like AWS IAM -func AwsWildcardMatch(pattern, value string) bool { - // Create regex pattern key for caching - // First escape all regex metacharacters, then replace wildcards - regexPattern := regexp.QuoteMeta(pattern) - regexPattern = strings.ReplaceAll(regexPattern, "\\*", ".*") - regexPattern = strings.ReplaceAll(regexPattern, "\\?", ".") - regexPattern = "^" + regexPattern + "$" - regexKey := "(?i)" + regexPattern - - // Try to get compiled regex from cache - regexCacheMu.RLock() - regex, found := regexCache[regexKey] - regexCacheMu.RUnlock() - - if !found { - // Compile and cache the regex - compiledRegex, err := regexp.Compile(regexKey) - if err != nil { - // Fallback to simple case-insensitive comparison if regex fails - return strings.EqualFold(pattern, value) - } - - // Store in cache with write lock - regexCacheMu.Lock() - // Double-check in case another goroutine added it - if existingRegex, exists := regexCache[regexKey]; exists { - regex = existingRegex - } else { - regexCache[regexKey] = compiledRegex - regex = compiledRegex - } - regexCacheMu.Unlock() - } - - return regex.MatchString(value) -} - -// matchAction checks if an action pattern matches a requested action -// Uses hybrid approach: simple suffix wildcards for compatibility, filepath.Match for complex patterns -func matchAction(pattern, action string) bool { - if pattern == action { - return true - } - - // Handle simple suffix wildcard (backward compatibility) - if strings.HasSuffix(pattern, "*") { - prefix := pattern[:len(pattern)-1] - return strings.HasPrefix(action, prefix) - } - - // For complex patterns, use filepath.Match for advanced wildcard support (*, ?, []) - matched, err := filepath.Match(pattern, action) - if err != nil { - // Fallback to exact match if pattern is malformed - return pattern == action - } - - return matched -} - -// evaluateStringConditionIgnoreCase evaluates string conditions with case insensitivity -func (e *PolicyEngine) evaluateStringConditionIgnoreCase(block map[string]interface{}, evalCtx *EvaluationContext, shouldMatch bool, useWildcard bool) bool { - for key, expectedValues := range block { - contextValue, exists := evalCtx.RequestContext[key] - if !exists { - if !shouldMatch { - continue // For NotEquals, missing key is OK - } - return false - } - - contextStr, ok := contextValue.(string) - if !ok { - return false - } - - contextStr = strings.ToLower(contextStr) - matched := false - - // Handle different value types - switch v := expectedValues.(type) { - case string: - expectedStr := strings.ToLower(v) - if useWildcard { - matched, _ = filepath.Match(expectedStr, contextStr) - } else { - matched = expectedStr == contextStr - } - case []interface{}: - for _, val := range v { - if valStr, ok := val.(string); ok { - expectedStr := strings.ToLower(valStr) - if useWildcard { - if m, _ := filepath.Match(expectedStr, contextStr); m { - matched = true - break - } - } else { - if expectedStr == contextStr { - matched = true - break - } - } - } - } - } - - if shouldMatch && !matched { - return false - } - if !shouldMatch && matched { - return false - } - } - return true -} - -// evaluateNumericCondition evaluates numeric conditions -func (e *PolicyEngine) evaluateNumericCondition(block map[string]interface{}, evalCtx *EvaluationContext, operator string) bool { - for key, expectedValues := range block { - contextValue, exists := evalCtx.RequestContext[key] - if !exists { - return false - } - - contextNum, err := parseNumeric(contextValue) - if err != nil { - return false - } - - matched := false - - // Handle different value types - switch v := expectedValues.(type) { - case string: - expectedNum, err := parseNumeric(v) - if err != nil { - return false - } - matched = compareNumbers(contextNum, expectedNum, operator) - case []interface{}: - for _, val := range v { - expectedNum, err := parseNumeric(val) - if err != nil { - continue - } - if compareNumbers(contextNum, expectedNum, operator) { - matched = true - break - } - } - } - - if !matched { - return false - } - } - return true -} - -// evaluateDateCondition evaluates date conditions -func (e *PolicyEngine) evaluateDateCondition(block map[string]interface{}, evalCtx *EvaluationContext, operator string) bool { - for key, expectedValues := range block { - contextValue, exists := evalCtx.RequestContext[key] - if !exists { - return false - } - - contextTime, err := parseDateTime(contextValue) - if err != nil { - return false - } - - matched := false - - // Handle different value types - switch v := expectedValues.(type) { - case string: - expectedTime, err := parseDateTime(v) - if err != nil { - return false - } - matched = compareDates(contextTime, expectedTime, operator) - case []interface{}: - for _, val := range v { - expectedTime, err := parseDateTime(val) - if err != nil { - continue - } - if compareDates(contextTime, expectedTime, operator) { - matched = true - break - } - } - } - - if !matched { - return false - } - } - return true -} - -// evaluateBoolCondition evaluates boolean conditions -func (e *PolicyEngine) evaluateBoolCondition(block map[string]interface{}, evalCtx *EvaluationContext) bool { - for key, expectedValues := range block { - contextValue, exists := evalCtx.RequestContext[key] - if !exists { - return false - } - - contextBool, err := parseBool(contextValue) - if err != nil { - return false - } - - matched := false - - // Handle different value types - switch v := expectedValues.(type) { - case string: - expectedBool, err := parseBool(v) - if err != nil { - return false - } - matched = contextBool == expectedBool - case bool: - matched = contextBool == v - case []interface{}: - for _, val := range v { - expectedBool, err := parseBool(val) - if err != nil { - continue - } - if contextBool == expectedBool { - matched = true - break - } - } - } - - if !matched { - return false - } - } - return true -} - -// evaluateNullCondition evaluates null conditions -func (e *PolicyEngine) evaluateNullCondition(block map[string]interface{}, evalCtx *EvaluationContext) bool { - for key, expectedValues := range block { - _, exists := evalCtx.RequestContext[key] - - expectedNull := false - switch v := expectedValues.(type) { - case string: - expectedNull = v == "true" - case bool: - expectedNull = v - } - - // If we expect null (true) and key exists, or expect non-null (false) and key doesn't exist - if expectedNull == exists { - return false - } - } - return true -} - -// Helper functions for parsing and comparing values - -// parseNumeric parses a value as a float64 -func parseNumeric(value interface{}) (float64, error) { - switch v := value.(type) { - case float64: - return v, nil - case float32: - return float64(v), nil - case int: - return float64(v), nil - case int64: - return float64(v), nil - case string: - return strconv.ParseFloat(v, 64) - default: - return 0, fmt.Errorf("cannot parse %T as numeric", value) - } -} - -// compareNumbers compares two numbers using the given operator -func compareNumbers(a, b float64, operator string) bool { - switch operator { - case "==": - return a == b - case "!=": - return a != b - case "<": - return a < b - case "<=": - return a <= b - case ">": - return a > b - case ">=": - return a >= b - default: - return false - } -} - -// parseDateTime parses a value as a time.Time -func parseDateTime(value interface{}) (time.Time, error) { - switch v := value.(type) { - case string: - // Try common date formats - formats := []string{ - time.RFC3339, - "2006-01-02T15:04:05Z", - "2006-01-02T15:04:05", - "2006-01-02 15:04:05", - "2006-01-02", - } - for _, format := range formats { - if t, err := time.Parse(format, v); err == nil { - return t, nil - } - } - return time.Time{}, fmt.Errorf("cannot parse date: %s", v) - case time.Time: - return v, nil - default: - return time.Time{}, fmt.Errorf("cannot parse %T as date", value) - } -} - -// compareDates compares two dates using the given operator -func compareDates(a, b time.Time, operator string) bool { - switch operator { - case "==": - return a.Equal(b) - case "!=": - return !a.Equal(b) - case "<": - return a.Before(b) - case "<=": - return a.Before(b) || a.Equal(b) - case ">": - return a.After(b) - case ">=": - return a.After(b) || a.Equal(b) - default: - return false - } -} - -// parseBool parses a value as a boolean -func parseBool(value interface{}) (bool, error) { - switch v := value.(type) { - case bool: - return v, nil - case string: - return strconv.ParseBool(v) - default: - return false, fmt.Errorf("cannot parse %T as boolean", value) - } -} diff --git a/weed/iam/policy/policy_engine_distributed_test.go b/weed/iam/policy/policy_engine_distributed_test.go deleted file mode 100644 index f5b5d285b..000000000 --- a/weed/iam/policy/policy_engine_distributed_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package policy - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestDistributedPolicyEngine verifies that multiple PolicyEngine instances with identical configurations -// behave consistently across distributed environments -func TestDistributedPolicyEngine(t *testing.T) { - ctx := context.Background() - - // Common configuration for all instances - commonConfig := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", // For testing - would be "filer" in production - StoreConfig: map[string]interface{}{}, - } - - // Create multiple PolicyEngine instances simulating distributed deployment - instance1 := NewPolicyEngine() - instance2 := NewPolicyEngine() - instance3 := NewPolicyEngine() - - // Initialize all instances with identical configuration - err := instance1.Initialize(commonConfig) - require.NoError(t, err, "Instance 1 should initialize successfully") - - err = instance2.Initialize(commonConfig) - require.NoError(t, err, "Instance 2 should initialize successfully") - - err = instance3.Initialize(commonConfig) - require.NoError(t, err, "Instance 3 should initialize successfully") - - // Test policy consistency across instances - t.Run("policy_storage_consistency", func(t *testing.T) { - // Define a test policy - testPolicy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "AllowS3Read", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{"arn:seaweed:s3:::test-bucket/*", "arn:seaweed:s3:::test-bucket"}, - }, - { - Sid: "DenyS3Write", - Effect: "Deny", - Action: []string{"s3:PutObject", "s3:DeleteObject"}, - Resource: []string{"arn:seaweed:s3:::test-bucket/*"}, - }, - }, - } - - // Store policy on instance 1 - err := instance1.AddPolicy("", "TestPolicy", testPolicy) - require.NoError(t, err, "Should be able to store policy on instance 1") - - // For memory storage, each instance has separate storage - // In production with filer storage, all instances would share the same policies - - // Verify policy exists on instance 1 - storedPolicy1, err := instance1.store.GetPolicy(ctx, "", "TestPolicy") - require.NoError(t, err, "Policy should exist on instance 1") - assert.Equal(t, "2012-10-17", storedPolicy1.Version) - assert.Len(t, storedPolicy1.Statement, 2) - - // For demonstration: store same policy on other instances - err = instance2.AddPolicy("", "TestPolicy", testPolicy) - require.NoError(t, err, "Should be able to store policy on instance 2") - - err = instance3.AddPolicy("", "TestPolicy", testPolicy) - require.NoError(t, err, "Should be able to store policy on instance 3") - }) - - // Test policy evaluation consistency - t.Run("evaluation_consistency", func(t *testing.T) { - // Create evaluation context - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::test-bucket/file.txt", - RequestContext: map[string]interface{}{ - "sourceIp": "192.168.1.100", - }, - } - - // Evaluate policy on all instances - result1, err1 := instance1.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result2, err2 := instance2.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result3, err3 := instance3.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - - require.NoError(t, err1, "Evaluation should succeed on instance 1") - require.NoError(t, err2, "Evaluation should succeed on instance 2") - require.NoError(t, err3, "Evaluation should succeed on instance 3") - - // All instances should return identical results - assert.Equal(t, result1.Effect, result2.Effect, "Instance 1 and 2 should have same effect") - assert.Equal(t, result2.Effect, result3.Effect, "Instance 2 and 3 should have same effect") - assert.Equal(t, EffectAllow, result1.Effect, "Should allow s3:GetObject") - - // Matching statements should be identical - assert.Len(t, result1.MatchingStatements, 1, "Should have one matching statement") - assert.Len(t, result2.MatchingStatements, 1, "Should have one matching statement") - assert.Len(t, result3.MatchingStatements, 1, "Should have one matching statement") - - assert.Equal(t, "AllowS3Read", result1.MatchingStatements[0].StatementSid) - assert.Equal(t, "AllowS3Read", result2.MatchingStatements[0].StatementSid) - assert.Equal(t, "AllowS3Read", result3.MatchingStatements[0].StatementSid) - }) - - // Test explicit deny precedence - t.Run("deny_precedence_consistency", func(t *testing.T) { - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "s3:PutObject", - Resource: "arn:seaweed:s3:::test-bucket/newfile.txt", - } - - // All instances should consistently apply deny precedence - result1, err1 := instance1.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result2, err2 := instance2.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result3, err3 := instance3.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - - require.NoError(t, err1) - require.NoError(t, err2) - require.NoError(t, err3) - - // All should deny due to explicit deny statement - assert.Equal(t, EffectDeny, result1.Effect, "Instance 1 should deny write operation") - assert.Equal(t, EffectDeny, result2.Effect, "Instance 2 should deny write operation") - assert.Equal(t, EffectDeny, result3.Effect, "Instance 3 should deny write operation") - - // Should have matching deny statement - assert.Len(t, result1.MatchingStatements, 1) - assert.Equal(t, "DenyS3Write", result1.MatchingStatements[0].StatementSid) - assert.Equal(t, EffectDeny, result1.MatchingStatements[0].Effect) - }) - - // Test default effect consistency - t.Run("default_effect_consistency", func(t *testing.T) { - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "filer:CreateEntry", // Action not covered by any policy - Resource: "arn:seaweed:filer::path/test", - } - - result1, err1 := instance1.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result2, err2 := instance2.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - result3, err3 := instance3.Evaluate(ctx, "", evalCtx, []string{"TestPolicy"}) - - require.NoError(t, err1) - require.NoError(t, err2) - require.NoError(t, err3) - - // All should use default effect (Deny) - assert.Equal(t, EffectDeny, result1.Effect, "Should use default effect") - assert.Equal(t, EffectDeny, result2.Effect, "Should use default effect") - assert.Equal(t, EffectDeny, result3.Effect, "Should use default effect") - - // No matching statements - assert.Empty(t, result1.MatchingStatements, "Should have no matching statements") - assert.Empty(t, result2.MatchingStatements, "Should have no matching statements") - assert.Empty(t, result3.MatchingStatements, "Should have no matching statements") - }) -} - -// TestPolicyEngineConfigurationConsistency tests configuration validation for distributed deployments -func TestPolicyEngineConfigurationConsistency(t *testing.T) { - t.Run("consistent_default_effects_required", func(t *testing.T) { - // Different default effects could lead to inconsistent authorization - config1 := &PolicyEngineConfig{ - DefaultEffect: "Allow", - StoreType: "memory", - } - - config2 := &PolicyEngineConfig{ - DefaultEffect: "Deny", // Different default! - StoreType: "memory", - } - - instance1 := NewPolicyEngine() - instance2 := NewPolicyEngine() - - err1 := instance1.Initialize(config1) - err2 := instance2.Initialize(config2) - - require.NoError(t, err1) - require.NoError(t, err2) - - // Test with an action not covered by any policy - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "uncovered:action", - Resource: "arn:seaweed:test:::resource", - } - - result1, _ := instance1.Evaluate(context.Background(), "", evalCtx, []string{}) - result2, _ := instance2.Evaluate(context.Background(), "", evalCtx, []string{}) - - // Results should be different due to different default effects - assert.NotEqual(t, result1.Effect, result2.Effect, "Different default effects should produce different results") - assert.Equal(t, EffectAllow, result1.Effect, "Instance 1 should allow by default") - assert.Equal(t, EffectDeny, result2.Effect, "Instance 2 should deny by default") - }) - - t.Run("invalid_configuration_handling", func(t *testing.T) { - invalidConfigs := []*PolicyEngineConfig{ - { - DefaultEffect: "Maybe", // Invalid effect - StoreType: "memory", - }, - { - DefaultEffect: "Allow", - StoreType: "nonexistent", // Invalid store type - }, - } - - for i, config := range invalidConfigs { - t.Run(fmt.Sprintf("invalid_config_%d", i), func(t *testing.T) { - instance := NewPolicyEngine() - err := instance.Initialize(config) - assert.Error(t, err, "Should reject invalid configuration") - }) - } - }) -} - -// TestPolicyStoreDistributed tests policy store behavior in distributed scenarios -func TestPolicyStoreDistributed(t *testing.T) { - ctx := context.Background() - - t.Run("memory_store_isolation", func(t *testing.T) { - // Memory stores are isolated per instance (not suitable for distributed) - store1 := NewMemoryPolicyStore() - store2 := NewMemoryPolicyStore() - - policy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Effect: "Allow", - Action: []string{"s3:GetObject"}, - Resource: []string{"*"}, - }, - }, - } - - // Store policy in store1 - err := store1.StorePolicy(ctx, "", "TestPolicy", policy) - require.NoError(t, err) - - // Policy should exist in store1 - _, err = store1.GetPolicy(ctx, "", "TestPolicy") - assert.NoError(t, err, "Policy should exist in store1") - - // Policy should NOT exist in store2 (different instance) - _, err = store2.GetPolicy(ctx, "", "TestPolicy") - assert.Error(t, err, "Policy should not exist in store2") - assert.Contains(t, err.Error(), "not found", "Should be a not found error") - }) - - t.Run("policy_loading_error_handling", func(t *testing.T) { - engine := NewPolicyEngine() - config := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - } - - err := engine.Initialize(config) - require.NoError(t, err) - - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::bucket/key", - } - - // Evaluate with non-existent policies - result, err := engine.Evaluate(ctx, "", evalCtx, []string{"NonExistentPolicy1", "NonExistentPolicy2"}) - require.NoError(t, err, "Should not error on missing policies") - - // Should use default effect when no policies can be loaded - assert.Equal(t, EffectDeny, result.Effect, "Should use default effect") - assert.Empty(t, result.MatchingStatements, "Should have no matching statements") - }) -} - -// TestFilerPolicyStoreConfiguration tests filer policy store configuration for distributed deployments -func TestFilerPolicyStoreConfiguration(t *testing.T) { - t.Run("filer_store_creation", func(t *testing.T) { - // Test with minimal configuration - config := map[string]interface{}{ - "filerAddress": "localhost:8888", - } - - store, err := NewFilerPolicyStore(config, nil) - require.NoError(t, err, "Should create filer policy store with minimal config") - assert.NotNil(t, store) - }) - - t.Run("filer_store_custom_path", func(t *testing.T) { - config := map[string]interface{}{ - "filerAddress": "prod-filer:8888", - "basePath": "/custom/iam/policies", - } - - store, err := NewFilerPolicyStore(config, nil) - require.NoError(t, err, "Should create filer policy store with custom path") - assert.NotNil(t, store) - }) - - t.Run("filer_store_missing_address", func(t *testing.T) { - config := map[string]interface{}{ - "basePath": "/seaweedfs/iam/policies", - } - - store, err := NewFilerPolicyStore(config, nil) - assert.NoError(t, err, "Should create filer store without filerAddress in config") - assert.NotNil(t, store, "Store should be created successfully") - }) -} - -// TestPolicyEvaluationPerformance tests performance considerations for distributed policy evaluation -func TestPolicyEvaluationPerformance(t *testing.T) { - ctx := context.Background() - - // Create engine with memory store (for performance baseline) - engine := NewPolicyEngine() - config := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - } - - err := engine.Initialize(config) - require.NoError(t, err) - - // Add multiple policies - for i := 0; i < 10; i++ { - policy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: fmt.Sprintf("Statement%d", i), - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{fmt.Sprintf("arn:seaweed:s3:::bucket%d/*", i)}, - }, - }, - } - - err := engine.AddPolicy("", fmt.Sprintf("Policy%d", i), policy) - require.NoError(t, err) - } - - // Test evaluation performance - evalCtx := &EvaluationContext{ - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::bucket5/file.txt", - } - - policyNames := make([]string, 10) - for i := 0; i < 10; i++ { - policyNames[i] = fmt.Sprintf("Policy%d", i) - } - - // Measure evaluation time - start := time.Now() - for i := 0; i < 100; i++ { - _, err := engine.Evaluate(ctx, "", evalCtx, policyNames) - require.NoError(t, err) - } - duration := time.Since(start) - - // Should be reasonably fast (less than 10ms per evaluation on average) - avgDuration := duration / 100 - t.Logf("Average policy evaluation time: %v", avgDuration) - assert.Less(t, avgDuration, 10*time.Millisecond, "Policy evaluation should be fast") -} diff --git a/weed/iam/policy/policy_engine_test.go b/weed/iam/policy/policy_engine_test.go deleted file mode 100644 index 4e6cd3c3a..000000000 --- a/weed/iam/policy/policy_engine_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package policy - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestPolicyEngineInitialization tests policy engine initialization -func TestPolicyEngineInitialization(t *testing.T) { - tests := []struct { - name string - config *PolicyEngineConfig - wantErr bool - }{ - { - name: "valid config", - config: &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - wantErr: false, - }, - { - name: "invalid default effect", - config: &PolicyEngineConfig{ - DefaultEffect: "Invalid", - StoreType: "memory", - }, - wantErr: true, - }, - { - name: "nil config", - config: nil, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - engine := NewPolicyEngine() - - err := engine.Initialize(tt.config) - - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.True(t, engine.IsInitialized()) - } - }) - } -} - -// TestPolicyDocumentValidation tests policy document structure validation -func TestPolicyDocumentValidation(t *testing.T) { - tests := []struct { - name string - policy *PolicyDocument - wantErr bool - errorMsg string - }{ - { - name: "valid policy document", - policy: &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "AllowS3Read", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{"arn:seaweed:s3:::mybucket/*"}, - }, - }, - }, - wantErr: false, - }, - { - name: "missing version", - policy: &PolicyDocument{ - Statement: []Statement{ - { - Effect: "Allow", - Action: []string{"s3:GetObject"}, - Resource: []string{"arn:seaweed:s3:::mybucket/*"}, - }, - }, - }, - wantErr: true, - errorMsg: "version is required", - }, - { - name: "empty statements", - policy: &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{}, - }, - wantErr: true, - errorMsg: "at least one statement is required", - }, - { - name: "invalid effect", - policy: &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Effect: "Maybe", - Action: []string{"s3:GetObject"}, - Resource: []string{"arn:seaweed:s3:::mybucket/*"}, - }, - }, - }, - wantErr: true, - errorMsg: "invalid effect", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidatePolicyDocument(tt.policy) - - if tt.wantErr { - assert.Error(t, err) - if tt.errorMsg != "" { - assert.Contains(t, err.Error(), tt.errorMsg) - } - } else { - assert.NoError(t, err) - } - }) - } -} - -// TestPolicyEvaluation tests policy evaluation logic -func TestPolicyEvaluation(t *testing.T) { - engine := setupTestPolicyEngine(t) - - // Add test policies - readPolicy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "AllowS3Read", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{ - "arn:seaweed:s3:::public-bucket/*", // For object operations - "arn:seaweed:s3:::public-bucket", // For bucket operations - }, - }, - }, - } - - err := engine.AddPolicy("", "read-policy", readPolicy) - require.NoError(t, err) - - denyPolicy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "DenyS3Delete", - Effect: "Deny", - Action: []string{"s3:DeleteObject"}, - Resource: []string{"arn:seaweed:s3:::*"}, - }, - }, - } - - err = engine.AddPolicy("", "deny-policy", denyPolicy) - require.NoError(t, err) - - tests := []struct { - name string - context *EvaluationContext - policies []string - want Effect - }{ - { - name: "allow read access", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::public-bucket/file.txt", - RequestContext: map[string]interface{}{ - "sourceIP": "192.168.1.100", - }, - }, - policies: []string{"read-policy"}, - want: EffectAllow, - }, - { - name: "deny delete access (explicit deny)", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:DeleteObject", - Resource: "arn:seaweed:s3:::public-bucket/file.txt", - }, - policies: []string{"read-policy", "deny-policy"}, - want: EffectDeny, - }, - { - name: "deny by default (no matching policy)", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:PutObject", - Resource: "arn:seaweed:s3:::public-bucket/file.txt", - }, - policies: []string{"read-policy"}, - want: EffectDeny, - }, - { - name: "allow with wildcard action", - context: &EvaluationContext{ - Principal: "user:admin", - Action: "s3:ListBucket", - Resource: "arn:seaweed:s3:::public-bucket", - }, - policies: []string{"read-policy"}, - want: EffectAllow, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Evaluate(context.Background(), "", tt.context, tt.policies) - - assert.NoError(t, err) - assert.Equal(t, tt.want, result.Effect) - - // Verify evaluation details - assert.NotNil(t, result.EvaluationDetails) - assert.Equal(t, tt.context.Action, result.EvaluationDetails.Action) - assert.Equal(t, tt.context.Resource, result.EvaluationDetails.Resource) - }) - } -} - -// TestConditionEvaluation tests policy conditions -func TestConditionEvaluation(t *testing.T) { - engine := setupTestPolicyEngine(t) - - // Policy with IP address condition - conditionalPolicy := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "AllowFromOfficeIP", - Effect: "Allow", - Action: []string{"s3:*"}, - Resource: []string{"arn:seaweed:s3:::*"}, - Condition: map[string]map[string]interface{}{ - "IpAddress": { - "seaweed:SourceIP": []string{"192.168.1.0/24", "10.0.0.0/8"}, - }, - }, - }, - }, - } - - err := engine.AddPolicy("", "ip-conditional", conditionalPolicy) - require.NoError(t, err) - - tests := []struct { - name string - context *EvaluationContext - want Effect - }{ - { - name: "allow from office IP", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::mybucket/file.txt", - RequestContext: map[string]interface{}{ - "sourceIP": "192.168.1.100", - }, - }, - want: EffectAllow, - }, - { - name: "deny from external IP", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:GetObject", - Resource: "arn:seaweed:s3:::mybucket/file.txt", - RequestContext: map[string]interface{}{ - "sourceIP": "8.8.8.8", - }, - }, - want: EffectDeny, - }, - { - name: "allow from internal IP", - context: &EvaluationContext{ - Principal: "user:alice", - Action: "s3:PutObject", - Resource: "arn:seaweed:s3:::mybucket/newfile.txt", - RequestContext: map[string]interface{}{ - "sourceIP": "10.1.2.3", - }, - }, - want: EffectAllow, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Evaluate(context.Background(), "", tt.context, []string{"ip-conditional"}) - - assert.NoError(t, err) - assert.Equal(t, tt.want, result.Effect) - }) - } -} - -// TestResourceMatching tests resource ARN matching -func TestResourceMatching(t *testing.T) { - tests := []struct { - name string - policyResource string - requestResource string - want bool - }{ - { - name: "exact match", - policyResource: "arn:seaweed:s3:::mybucket/file.txt", - requestResource: "arn:seaweed:s3:::mybucket/file.txt", - want: true, - }, - { - name: "wildcard match", - policyResource: "arn:seaweed:s3:::mybucket/*", - requestResource: "arn:seaweed:s3:::mybucket/folder/file.txt", - want: true, - }, - { - name: "bucket wildcard", - policyResource: "arn:seaweed:s3:::*", - requestResource: "arn:seaweed:s3:::anybucket/file.txt", - want: true, - }, - { - name: "no match different bucket", - policyResource: "arn:seaweed:s3:::mybucket/*", - requestResource: "arn:seaweed:s3:::otherbucket/file.txt", - want: false, - }, - { - name: "prefix match", - policyResource: "arn:seaweed:s3:::mybucket/documents/*", - requestResource: "arn:seaweed:s3:::mybucket/documents/secret.txt", - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := matchResource(tt.policyResource, tt.requestResource) - assert.Equal(t, tt.want, result) - }) - } -} - -// TestActionMatching tests action pattern matching -func TestActionMatching(t *testing.T) { - tests := []struct { - name string - policyAction string - requestAction string - want bool - }{ - { - name: "exact match", - policyAction: "s3:GetObject", - requestAction: "s3:GetObject", - want: true, - }, - { - name: "wildcard service", - policyAction: "s3:*", - requestAction: "s3:PutObject", - want: true, - }, - { - name: "wildcard all", - policyAction: "*", - requestAction: "filer:CreateEntry", - want: true, - }, - { - name: "prefix match", - policyAction: "s3:Get*", - requestAction: "s3:GetObject", - want: true, - }, - { - name: "no match different service", - policyAction: "s3:GetObject", - requestAction: "filer:GetEntry", - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := matchAction(tt.policyAction, tt.requestAction) - assert.Equal(t, tt.want, result) - }) - } -} - -// Helper function to set up test policy engine -func setupTestPolicyEngine(t *testing.T) *PolicyEngine { - engine := NewPolicyEngine() - config := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - } - - err := engine.Initialize(config) - require.NoError(t, err) - - return engine -} diff --git a/weed/iam/policy/policy_store.go b/weed/iam/policy/policy_store.go deleted file mode 100644 index d25adce61..000000000 --- a/weed/iam/policy/policy_store.go +++ /dev/null @@ -1,395 +0,0 @@ -package policy - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" -) - -// MemoryPolicyStore implements PolicyStore using in-memory storage -type MemoryPolicyStore struct { - policies map[string]*PolicyDocument - mutex sync.RWMutex -} - -// NewMemoryPolicyStore creates a new memory-based policy store -func NewMemoryPolicyStore() *MemoryPolicyStore { - return &MemoryPolicyStore{ - policies: make(map[string]*PolicyDocument), - } -} - -// StorePolicy stores a policy document in memory (filerAddress ignored for memory store) -func (s *MemoryPolicyStore) StorePolicy(ctx context.Context, filerAddress string, name string, policy *PolicyDocument) error { - if name == "" { - return fmt.Errorf("policy name cannot be empty") - } - - if policy == nil { - return fmt.Errorf("policy cannot be nil") - } - - s.mutex.Lock() - defer s.mutex.Unlock() - - // Deep copy the policy to prevent external modifications - s.policies[name] = copyPolicyDocument(policy) - return nil -} - -// GetPolicy retrieves a policy document from memory (filerAddress ignored for memory store) -func (s *MemoryPolicyStore) GetPolicy(ctx context.Context, filerAddress string, name string) (*PolicyDocument, error) { - if name == "" { - return nil, fmt.Errorf("policy name cannot be empty") - } - - s.mutex.RLock() - defer s.mutex.RUnlock() - - policy, exists := s.policies[name] - if !exists { - return nil, fmt.Errorf("policy not found: %s", name) - } - - // Return a copy to prevent external modifications - return copyPolicyDocument(policy), nil -} - -// DeletePolicy deletes a policy document from memory (filerAddress ignored for memory store) -func (s *MemoryPolicyStore) DeletePolicy(ctx context.Context, filerAddress string, name string) error { - if name == "" { - return fmt.Errorf("policy name cannot be empty") - } - - s.mutex.Lock() - defer s.mutex.Unlock() - - delete(s.policies, name) - return nil -} - -// ListPolicies lists all policy names in memory (filerAddress ignored for memory store) -func (s *MemoryPolicyStore) ListPolicies(ctx context.Context, filerAddress string) ([]string, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - - names := make([]string, 0, len(s.policies)) - for name := range s.policies { - names = append(names, name) - } - - return names, nil -} - -// copyPolicyDocument creates a deep copy of a policy document -func copyPolicyDocument(original *PolicyDocument) *PolicyDocument { - if original == nil { - return nil - } - - copied := &PolicyDocument{ - Version: original.Version, - Id: original.Id, - } - - // Copy statements - copied.Statement = make([]Statement, len(original.Statement)) - for i, stmt := range original.Statement { - copied.Statement[i] = Statement{ - Sid: stmt.Sid, - Effect: stmt.Effect, - Principal: stmt.Principal, - NotPrincipal: stmt.NotPrincipal, - } - - // Copy action slice - if stmt.Action != nil { - copied.Statement[i].Action = make([]string, len(stmt.Action)) - copy(copied.Statement[i].Action, stmt.Action) - } - - // Copy NotAction slice - if stmt.NotAction != nil { - copied.Statement[i].NotAction = make([]string, len(stmt.NotAction)) - copy(copied.Statement[i].NotAction, stmt.NotAction) - } - - // Copy resource slice - if stmt.Resource != nil { - copied.Statement[i].Resource = make([]string, len(stmt.Resource)) - copy(copied.Statement[i].Resource, stmt.Resource) - } - - // Copy NotResource slice - if stmt.NotResource != nil { - copied.Statement[i].NotResource = make([]string, len(stmt.NotResource)) - copy(copied.Statement[i].NotResource, stmt.NotResource) - } - - // Copy condition map (shallow copy for now) - if stmt.Condition != nil { - copied.Statement[i].Condition = make(map[string]map[string]interface{}) - for k, v := range stmt.Condition { - copied.Statement[i].Condition[k] = v - } - } - } - - return copied -} - -// FilerPolicyStore implements PolicyStore using SeaweedFS filer -type FilerPolicyStore struct { - grpcDialOption grpc.DialOption - basePath string - filerAddressProvider func() string -} - -// NewFilerPolicyStore creates a new filer-based policy store -func NewFilerPolicyStore(config map[string]interface{}, filerAddressProvider func() string) (*FilerPolicyStore, error) { - store := &FilerPolicyStore{ - basePath: "/etc/iam/policies", // Default path for policy storage - aligned with /etc/ convention - filerAddressProvider: filerAddressProvider, - } - - // Parse configuration - only basePath and other settings, NOT filerAddress - if config != nil { - if basePath, ok := config["basePath"].(string); ok && basePath != "" { - store.basePath = strings.TrimSuffix(basePath, "/") - } - } - - glog.V(2).Infof("Initialized FilerPolicyStore with basePath %s", store.basePath) - - return store, nil -} - -// StorePolicy stores a policy document in filer -func (s *FilerPolicyStore) StorePolicy(ctx context.Context, filerAddress string, name string, policy *PolicyDocument) error { - // Use provider function if filerAddress is not provided - if filerAddress == "" && s.filerAddressProvider != nil { - filerAddress = s.filerAddressProvider() - } - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerPolicyStore") - } - if name == "" { - return fmt.Errorf("policy name cannot be empty") - } - if policy == nil { - return fmt.Errorf("policy cannot be nil") - } - - // Serialize policy to JSON - policyData, err := json.MarshalIndent(policy, "", " ") - if err != nil { - return fmt.Errorf("failed to serialize policy: %v", err) - } - - policyPath := s.getPolicyPath(name) - - // Store in filer - return s.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.CreateEntryRequest{ - Directory: s.basePath, - Entry: &filer_pb.Entry{ - Name: s.getPolicyFileName(name), - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0600), // Read/write for owner only - Uid: uint32(0), - Gid: uint32(0), - }, - Content: policyData, - }, - } - - glog.V(3).Infof("Storing policy %s at %s", name, policyPath) - _, err := client.CreateEntry(ctx, request) - if err != nil { - return fmt.Errorf("failed to store policy %s: %v", name, err) - } - - return nil - }) -} - -// GetPolicy retrieves a policy document from filer -func (s *FilerPolicyStore) GetPolicy(ctx context.Context, filerAddress string, name string) (*PolicyDocument, error) { - // Use provider function if filerAddress is not provided - if filerAddress == "" && s.filerAddressProvider != nil { - filerAddress = s.filerAddressProvider() - } - if filerAddress == "" { - return nil, fmt.Errorf("filer address is required for FilerPolicyStore") - } - if name == "" { - return nil, fmt.Errorf("policy name cannot be empty") - } - - var policyData []byte - err := s.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: s.basePath, - Name: s.getPolicyFileName(name), - } - - glog.V(3).Infof("Looking up policy %s", name) - response, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - return fmt.Errorf("policy not found: %v", err) - } - - if response.Entry == nil { - return fmt.Errorf("policy not found") - } - - policyData = response.Entry.Content - return nil - }) - - if err != nil { - return nil, err - } - - // Deserialize policy from JSON - var policy PolicyDocument - if err := json.Unmarshal(policyData, &policy); err != nil { - return nil, fmt.Errorf("failed to deserialize policy: %v", err) - } - - return &policy, nil -} - -// DeletePolicy deletes a policy document from filer -func (s *FilerPolicyStore) DeletePolicy(ctx context.Context, filerAddress string, name string) error { - // Use provider function if filerAddress is not provided - if filerAddress == "" && s.filerAddressProvider != nil { - filerAddress = s.filerAddressProvider() - } - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerPolicyStore") - } - if name == "" { - return fmt.Errorf("policy name cannot be empty") - } - - return s.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: s.basePath, - Name: s.getPolicyFileName(name), - IsDeleteData: true, - IsRecursive: false, - IgnoreRecursiveError: false, - } - - glog.V(3).Infof("Deleting policy %s", name) - resp, err := client.DeleteEntry(ctx, request) - if err != nil { - // Ignore "not found" errors - policy may already be deleted - if strings.Contains(err.Error(), "not found") { - return nil - } - return fmt.Errorf("failed to delete policy %s: %v", name, err) - } - - // Check response error - if resp.Error != "" { - // Ignore "not found" errors - policy may already be deleted - if strings.Contains(resp.Error, "not found") { - return nil - } - return fmt.Errorf("failed to delete policy %s: %s", name, resp.Error) - } - - return nil - }) -} - -// ListPolicies lists all policy names in filer -func (s *FilerPolicyStore) ListPolicies(ctx context.Context, filerAddress string) ([]string, error) { - // Use provider function if filerAddress is not provided - if filerAddress == "" && s.filerAddressProvider != nil { - filerAddress = s.filerAddressProvider() - } - if filerAddress == "" { - return nil, fmt.Errorf("filer address is required for FilerPolicyStore") - } - - var policyNames []string - - err := s.withFilerClient(filerAddress, func(client filer_pb.SeaweedFilerClient) error { - // List all entries in the policy directory - request := &filer_pb.ListEntriesRequest{ - Directory: s.basePath, - Prefix: "policy_", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 1000, // Process in batches of 1000 - } - - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("failed to list policies: %v", err) - } - - for { - resp, err := stream.Recv() - if err != nil { - break // End of stream or error - } - - if resp.Entry == nil || resp.Entry.IsDirectory { - continue - } - - // Extract policy name from filename - filename := resp.Entry.Name - if strings.HasPrefix(filename, "policy_") && strings.HasSuffix(filename, ".json") { - // Remove "policy_" prefix and ".json" suffix - policyName := strings.TrimSuffix(strings.TrimPrefix(filename, "policy_"), ".json") - policyNames = append(policyNames, policyName) - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - return policyNames, nil -} - -// Helper methods - -// withFilerClient executes a function with a filer client -func (s *FilerPolicyStore) withFilerClient(filerAddress string, fn func(client filer_pb.SeaweedFilerClient) error) error { - if filerAddress == "" { - return fmt.Errorf("filer address is required for FilerPolicyStore") - } - - // Use the pb.WithGrpcFilerClient helper similar to existing SeaweedFS code - return pb.WithGrpcFilerClient(false, 0, pb.ServerAddress(filerAddress), s.grpcDialOption, fn) -} - -// getPolicyPath returns the full path for a policy -func (s *FilerPolicyStore) getPolicyPath(policyName string) string { - return s.basePath + "/" + s.getPolicyFileName(policyName) -} - -// getPolicyFileName returns the filename for a policy -func (s *FilerPolicyStore) getPolicyFileName(policyName string) string { - return "policy_" + policyName + ".json" -} diff --git a/weed/iam/policy/policy_variable_matching_test.go b/weed/iam/policy/policy_variable_matching_test.go deleted file mode 100644 index 6b9827dff..000000000 --- a/weed/iam/policy/policy_variable_matching_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package policy - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestPolicyVariableMatchingInActionsAndResources tests that Actions and Resources -// now support policy variables like ${aws:username} just like string conditions do -func TestPolicyVariableMatchingInActionsAndResources(t *testing.T) { - engine := NewPolicyEngine() - config := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - } - - err := engine.Initialize(config) - require.NoError(t, err) - - ctx := context.Background() - filerAddress := "" - - // Create a policy that uses policy variables in Action and Resource fields - policyDoc := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "AllowUserSpecificActions", - Effect: "Allow", - Action: []string{ - "s3:Get*", // Regular wildcard - "s3:${aws:principaltype}*", // Policy variable in action - }, - Resource: []string{ - "arn:aws:s3:::user-${aws:username}/*", // Policy variable in resource - "arn:aws:s3:::shared/${saml:username}/*", // Different policy variable - }, - }, - }, - } - - err = engine.AddPolicy(filerAddress, "user-specific-policy", policyDoc) - require.NoError(t, err) - - tests := []struct { - name string - principal string - action string - resource string - requestContext map[string]interface{} - expectedEffect Effect - description string - }{ - { - name: "policy_variable_in_action_matches", - principal: "test-user", - action: "s3:AssumedRole", // Should match s3:${aws:principaltype}* when principaltype=AssumedRole - resource: "arn:aws:s3:::user-testuser/file.txt", - requestContext: map[string]interface{}{ - "aws:username": "testuser", - "aws:principaltype": "AssumedRole", - }, - expectedEffect: EffectAllow, - description: "Action with policy variable should match when variable is expanded", - }, - { - name: "policy_variable_in_resource_matches", - principal: "alice", - action: "s3:GetObject", - resource: "arn:aws:s3:::user-alice/document.pdf", // Should match user-${aws:username}/* - requestContext: map[string]interface{}{ - "aws:username": "alice", - }, - expectedEffect: EffectAllow, - description: "Resource with policy variable should match when variable is expanded", - }, - { - name: "saml_username_variable_in_resource", - principal: "bob", - action: "s3:GetObject", - resource: "arn:aws:s3:::shared/bob/data.json", // Should match shared/${saml:username}/* - requestContext: map[string]interface{}{ - "saml:username": "bob", - }, - expectedEffect: EffectAllow, - description: "SAML username variable should be expanded in resource patterns", - }, - { - name: "policy_variable_no_match_wrong_user", - principal: "charlie", - action: "s3:GetObject", - resource: "arn:aws:s3:::user-alice/file.txt", // charlie trying to access alice's files - requestContext: map[string]interface{}{ - "aws:username": "charlie", - }, - expectedEffect: EffectDeny, - description: "Policy variable should prevent access when username doesn't match", - }, - { - name: "missing_policy_variable_context", - principal: "dave", - action: "s3:GetObject", - resource: "arn:aws:s3:::user-dave/file.txt", - requestContext: map[string]interface{}{ - // Missing aws:username context - }, - expectedEffect: EffectDeny, - description: "Missing policy variable context should result in no match", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - evalCtx := &EvaluationContext{ - Principal: tt.principal, - Action: tt.action, - Resource: tt.resource, - RequestContext: tt.requestContext, - } - - result, err := engine.Evaluate(ctx, filerAddress, evalCtx, []string{"user-specific-policy"}) - require.NoError(t, err, "Policy evaluation should not error") - - assert.Equal(t, tt.expectedEffect, result.Effect, - "Test %s: %s. Expected %s but got %s", - tt.name, tt.description, tt.expectedEffect, result.Effect) - }) - } -} - -// TestActionResourceConsistencyWithStringConditions verifies that Actions, Resources, -// and string conditions all use the same AWS IAM-compliant matching logic -func TestActionResourceConsistencyWithStringConditions(t *testing.T) { - engine := NewPolicyEngine() - config := &PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - } - - err := engine.Initialize(config) - require.NoError(t, err) - - ctx := context.Background() - filerAddress := "" - - // Policy that uses case-insensitive matching in all three areas - policyDoc := &PolicyDocument{ - Version: "2012-10-17", - Statement: []Statement{ - { - Sid: "CaseInsensitiveMatching", - Effect: "Allow", - Action: []string{"S3:GET*"}, // Uppercase action pattern - Resource: []string{"arn:aws:s3:::TEST-BUCKET/*"}, // Uppercase resource pattern - Condition: map[string]map[string]interface{}{ - "StringLike": { - "s3:RequestedRegion": "US-*", // Uppercase condition pattern - }, - }, - }, - }, - } - - err = engine.AddPolicy(filerAddress, "case-insensitive-policy", policyDoc) - require.NoError(t, err) - - evalCtx := &EvaluationContext{ - Principal: "test-user", - Action: "s3:getobject", // lowercase action - Resource: "arn:aws:s3:::test-bucket/file.txt", // lowercase resource - RequestContext: map[string]interface{}{ - "s3:RequestedRegion": "us-east-1", // lowercase condition value - }, - } - - result, err := engine.Evaluate(ctx, filerAddress, evalCtx, []string{"case-insensitive-policy"}) - require.NoError(t, err) - - // All should match due to case-insensitive AWS IAM-compliant matching - assert.Equal(t, EffectAllow, result.Effect, - "Actions, Resources, and Conditions should all use case-insensitive AWS IAM matching") - - // Verify that matching statements were found - assert.Len(t, result.MatchingStatements, 1, - "Should have exactly one matching statement") - assert.Equal(t, "Allow", string(result.MatchingStatements[0].Effect), - "Matching statement should have Allow effect") -} diff --git a/weed/iam/providers/provider.go b/weed/iam/providers/provider.go deleted file mode 100644 index 5c1deb03d..000000000 --- a/weed/iam/providers/provider.go +++ /dev/null @@ -1,227 +0,0 @@ -package providers - -import ( - "context" - "fmt" - "net/mail" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" -) - -// IdentityProvider defines the interface for external identity providers -type IdentityProvider interface { - // Name returns the unique name of the provider - Name() string - - // Initialize initializes the provider with configuration - Initialize(config interface{}) error - - // Authenticate authenticates a user with a token and returns external identity - Authenticate(ctx context.Context, token string) (*ExternalIdentity, error) - - // GetUserInfo retrieves user information by user ID - GetUserInfo(ctx context.Context, userID string) (*ExternalIdentity, error) - - // ValidateToken validates a token and returns claims - ValidateToken(ctx context.Context, token string) (*TokenClaims, error) -} - -// ExternalIdentity represents an identity from an external provider -type ExternalIdentity struct { - // UserID is the unique identifier from the external provider - UserID string `json:"userId"` - - // Email is the user's email address - Email string `json:"email"` - - // DisplayName is the user's display name - DisplayName string `json:"displayName"` - - // Groups are the groups the user belongs to - Groups []string `json:"groups,omitempty"` - - // Attributes are additional user attributes - Attributes map[string]string `json:"attributes,omitempty"` - - // Provider is the name of the identity provider - Provider string `json:"provider"` -} - -// Validate validates the external identity structure -func (e *ExternalIdentity) Validate() error { - if e.UserID == "" { - return fmt.Errorf("user ID is required") - } - - if e.Provider == "" { - return fmt.Errorf("provider is required") - } - - if e.Email != "" { - if _, err := mail.ParseAddress(e.Email); err != nil { - return fmt.Errorf("invalid email format: %w", err) - } - } - - return nil -} - -// TokenClaims represents claims from a validated token -type TokenClaims struct { - // Subject (sub) - user identifier - Subject string `json:"sub"` - - // Issuer (iss) - token issuer - Issuer string `json:"iss"` - - // Audience (aud) - intended audience - Audience string `json:"aud"` - - // ExpiresAt (exp) - expiration time - ExpiresAt time.Time `json:"exp"` - - // IssuedAt (iat) - issued at time - IssuedAt time.Time `json:"iat"` - - // NotBefore (nbf) - not valid before time - NotBefore time.Time `json:"nbf,omitempty"` - - // Claims are additional claims from the token - Claims map[string]interface{} `json:"claims,omitempty"` -} - -// IsValid checks if the token claims are valid (not expired, etc.) -func (c *TokenClaims) IsValid() bool { - now := time.Now() - - // Check expiration - if !c.ExpiresAt.IsZero() && now.After(c.ExpiresAt) { - return false - } - - // Check not before - if !c.NotBefore.IsZero() && now.Before(c.NotBefore) { - return false - } - - // Check issued at (shouldn't be in the future) - if !c.IssuedAt.IsZero() && now.Before(c.IssuedAt) { - return false - } - - return true -} - -// GetClaimString returns a string claim value -func (c *TokenClaims) GetClaimString(key string) (string, bool) { - if value, exists := c.Claims[key]; exists { - if str, ok := value.(string); ok { - return str, true - } - } - return "", false -} - -// GetClaimStringSlice returns a string slice claim value -func (c *TokenClaims) GetClaimStringSlice(key string) ([]string, bool) { - if value, exists := c.Claims[key]; exists { - switch v := value.(type) { - case []string: - return v, true - case []interface{}: - var result []string - for _, item := range v { - if str, ok := item.(string); ok { - result = append(result, str) - } - } - return result, len(result) > 0 - case string: - // Single string can be treated as slice - return []string{v}, true - } - } - return nil, false -} - -// ProviderConfig represents configuration for identity providers -type ProviderConfig struct { - // Type of provider (oidc, ldap, saml) - Type string `json:"type"` - - // Name of the provider instance - Name string `json:"name"` - - // Enabled indicates if the provider is active - Enabled bool `json:"enabled"` - - // Config is provider-specific configuration - Config map[string]interface{} `json:"config"` - - // RoleMapping defines how to map external identities to roles - RoleMapping *RoleMapping `json:"roleMapping,omitempty"` -} - -// RoleMapping defines rules for mapping external identities to roles -type RoleMapping struct { - // Rules are the mapping rules - Rules []MappingRule `json:"rules"` - - // DefaultRole is assigned if no rules match - DefaultRole string `json:"defaultRole,omitempty"` -} - -// MappingRule defines a single mapping rule -type MappingRule struct { - // Claim is the claim key to check - Claim string `json:"claim"` - - // Value is the expected claim value (supports wildcards) - Value string `json:"value"` - - // Role is the role ARN to assign - Role string `json:"role"` - - // Condition is additional condition logic (optional) - Condition string `json:"condition,omitempty"` -} - -// Matches checks if a rule matches the given claims -func (r *MappingRule) Matches(claims *TokenClaims) bool { - if r.Claim == "" || r.Value == "" { - glog.V(3).Infof("Rule invalid: claim=%s, value=%s", r.Claim, r.Value) - return false - } - - claimValue, exists := claims.GetClaimString(r.Claim) - if !exists { - glog.V(3).Infof("Claim '%s' not found as string, trying as string slice", r.Claim) - // Try as string slice - if claimSlice, sliceExists := claims.GetClaimStringSlice(r.Claim); sliceExists { - glog.V(3).Infof("Claim '%s' found as string slice: %v", r.Claim, claimSlice) - for _, val := range claimSlice { - glog.V(3).Infof("Checking if '%s' matches rule value '%s'", val, r.Value) - if r.matchValue(val) { - glog.V(3).Infof("Match found: '%s' matches '%s'", val, r.Value) - return true - } - } - } else { - glog.V(3).Infof("Claim '%s' not found in any format", r.Claim) - } - return false - } - - glog.V(3).Infof("Claim '%s' found as string: '%s'", r.Claim, claimValue) - return r.matchValue(claimValue) -} - -// matchValue checks if a value matches the rule value (with wildcard support) -// Uses AWS IAM-compliant case-insensitive wildcard matching for consistency with policy engine -func (r *MappingRule) matchValue(value string) bool { - matched := policy.AwsWildcardMatch(r.Value, value) - glog.V(3).Infof("AWS IAM pattern match result: '%s' matches '%s' = %t", value, r.Value, matched) - return matched -} diff --git a/weed/iam/providers/provider_test.go b/weed/iam/providers/provider_test.go deleted file mode 100644 index 99cf360c1..000000000 --- a/weed/iam/providers/provider_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package providers - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestIdentityProviderInterface tests the core identity provider interface -func TestIdentityProviderInterface(t *testing.T) { - tests := []struct { - name string - provider IdentityProvider - wantErr bool - }{ - // We'll add test cases as we implement providers - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test provider name - name := tt.provider.Name() - assert.NotEmpty(t, name, "Provider name should not be empty") - - // Test initialization - err := tt.provider.Initialize(nil) - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - - // Test authentication with invalid token - ctx := context.Background() - _, err = tt.provider.Authenticate(ctx, "invalid-token") - assert.Error(t, err, "Should fail with invalid token") - }) - } -} - -// TestExternalIdentityValidation tests external identity structure validation -func TestExternalIdentityValidation(t *testing.T) { - tests := []struct { - name string - identity *ExternalIdentity - wantErr bool - }{ - { - name: "valid identity", - identity: &ExternalIdentity{ - UserID: "user123", - Email: "user@example.com", - DisplayName: "Test User", - Groups: []string{"group1", "group2"}, - Attributes: map[string]string{"dept": "engineering"}, - Provider: "test-provider", - }, - wantErr: false, - }, - { - name: "missing user id", - identity: &ExternalIdentity{ - Email: "user@example.com", - Provider: "test-provider", - }, - wantErr: true, - }, - { - name: "missing provider", - identity: &ExternalIdentity{ - UserID: "user123", - Email: "user@example.com", - }, - wantErr: true, - }, - { - name: "invalid email", - identity: &ExternalIdentity{ - UserID: "user123", - Email: "invalid-email", - Provider: "test-provider", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.identity.Validate() - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -// TestTokenClaimsValidation tests token claims structure -func TestTokenClaimsValidation(t *testing.T) { - tests := []struct { - name string - claims *TokenClaims - valid bool - }{ - { - name: "valid claims", - claims: &TokenClaims{ - Subject: "user123", - Issuer: "https://provider.example.com", - Audience: "seaweedfs", - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now().Add(-time.Minute), - Claims: map[string]interface{}{"email": "user@example.com"}, - }, - valid: true, - }, - { - name: "expired token", - claims: &TokenClaims{ - Subject: "user123", - Issuer: "https://provider.example.com", - Audience: "seaweedfs", - ExpiresAt: time.Now().Add(-time.Hour), // Expired - IssuedAt: time.Now().Add(-time.Hour * 2), - Claims: map[string]interface{}{"email": "user@example.com"}, - }, - valid: false, - }, - { - name: "future issued token", - claims: &TokenClaims{ - Subject: "user123", - Issuer: "https://provider.example.com", - Audience: "seaweedfs", - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now().Add(time.Hour), // Future - Claims: map[string]interface{}{"email": "user@example.com"}, - }, - valid: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - valid := tt.claims.IsValid() - assert.Equal(t, tt.valid, valid) - }) - } -} - -// TestProviderRegistry tests provider registration and discovery -func TestProviderRegistry(t *testing.T) { - // Clear registry for test - registry := NewProviderRegistry() - - t.Run("register provider", func(t *testing.T) { - mockProvider := &MockProvider{name: "test-provider"} - - err := registry.RegisterProvider(mockProvider) - assert.NoError(t, err) - - // Test duplicate registration - err = registry.RegisterProvider(mockProvider) - assert.Error(t, err, "Should not allow duplicate registration") - }) - - t.Run("get provider", func(t *testing.T) { - provider, exists := registry.GetProvider("test-provider") - assert.True(t, exists) - assert.Equal(t, "test-provider", provider.Name()) - - // Test non-existent provider - _, exists = registry.GetProvider("non-existent") - assert.False(t, exists) - }) - - t.Run("list providers", func(t *testing.T) { - providers := registry.ListProviders() - assert.Len(t, providers, 1) - assert.Equal(t, "test-provider", providers[0]) - }) -} - -// MockProvider for testing -type MockProvider struct { - name string - initialized bool - shouldError bool -} - -func (m *MockProvider) Name() string { - return m.name -} - -func (m *MockProvider) Initialize(config interface{}) error { - if m.shouldError { - return assert.AnError - } - m.initialized = true - return nil -} - -func (m *MockProvider) Authenticate(ctx context.Context, token string) (*ExternalIdentity, error) { - if !m.initialized { - return nil, assert.AnError - } - if token == "invalid-token" { - return nil, assert.AnError - } - return &ExternalIdentity{ - UserID: "test-user", - Email: "test@example.com", - DisplayName: "Test User", - Provider: m.name, - }, nil -} - -func (m *MockProvider) GetUserInfo(ctx context.Context, userID string) (*ExternalIdentity, error) { - if !m.initialized || userID == "" { - return nil, assert.AnError - } - return &ExternalIdentity{ - UserID: userID, - Email: userID + "@example.com", - DisplayName: "User " + userID, - Provider: m.name, - }, nil -} - -func (m *MockProvider) ValidateToken(ctx context.Context, token string) (*TokenClaims, error) { - if !m.initialized || token == "invalid-token" { - return nil, assert.AnError - } - return &TokenClaims{ - Subject: "test-user", - Issuer: "test-issuer", - Audience: "seaweedfs", - ExpiresAt: time.Now().Add(time.Hour), - IssuedAt: time.Now(), - Claims: map[string]interface{}{"email": "test@example.com"}, - }, nil -} diff --git a/weed/iam/providers/registry.go b/weed/iam/providers/registry.go deleted file mode 100644 index dee50df44..000000000 --- a/weed/iam/providers/registry.go +++ /dev/null @@ -1,109 +0,0 @@ -package providers - -import ( - "fmt" - "sync" -) - -// ProviderRegistry manages registered identity providers -type ProviderRegistry struct { - mu sync.RWMutex - providers map[string]IdentityProvider -} - -// NewProviderRegistry creates a new provider registry -func NewProviderRegistry() *ProviderRegistry { - return &ProviderRegistry{ - providers: make(map[string]IdentityProvider), - } -} - -// RegisterProvider registers a new identity provider -func (r *ProviderRegistry) RegisterProvider(provider IdentityProvider) error { - if provider == nil { - return fmt.Errorf("provider cannot be nil") - } - - name := provider.Name() - if name == "" { - return fmt.Errorf("provider name cannot be empty") - } - - r.mu.Lock() - defer r.mu.Unlock() - - if _, exists := r.providers[name]; exists { - return fmt.Errorf("provider %s is already registered", name) - } - - r.providers[name] = provider - return nil -} - -// GetProvider retrieves a provider by name -func (r *ProviderRegistry) GetProvider(name string) (IdentityProvider, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - - provider, exists := r.providers[name] - return provider, exists -} - -// ListProviders returns all registered provider names -func (r *ProviderRegistry) ListProviders() []string { - r.mu.RLock() - defer r.mu.RUnlock() - - var names []string - for name := range r.providers { - names = append(names, name) - } - return names -} - -// UnregisterProvider removes a provider from the registry -func (r *ProviderRegistry) UnregisterProvider(name string) error { - r.mu.Lock() - defer r.mu.Unlock() - - if _, exists := r.providers[name]; !exists { - return fmt.Errorf("provider %s is not registered", name) - } - - delete(r.providers, name) - return nil -} - -// Clear removes all providers from the registry -func (r *ProviderRegistry) Clear() { - r.mu.Lock() - defer r.mu.Unlock() - - r.providers = make(map[string]IdentityProvider) -} - -// GetProviderCount returns the number of registered providers -func (r *ProviderRegistry) GetProviderCount() int { - r.mu.RLock() - defer r.mu.RUnlock() - - return len(r.providers) -} - -// Default global registry -var defaultRegistry = NewProviderRegistry() - -// RegisterProvider registers a provider in the default registry -func RegisterProvider(provider IdentityProvider) error { - return defaultRegistry.RegisterProvider(provider) -} - -// GetProvider retrieves a provider from the default registry -func GetProvider(name string) (IdentityProvider, bool) { - return defaultRegistry.GetProvider(name) -} - -// ListProviders returns all provider names from the default registry -func ListProviders() []string { - return defaultRegistry.ListProviders() -} diff --git a/weed/iam/sts/constants.go b/weed/iam/sts/constants.go deleted file mode 100644 index 0d2afc59e..000000000 --- a/weed/iam/sts/constants.go +++ /dev/null @@ -1,136 +0,0 @@ -package sts - -// Store Types -const ( - StoreTypeMemory = "memory" - StoreTypeFiler = "filer" - StoreTypeRedis = "redis" -) - -// Provider Types -const ( - ProviderTypeOIDC = "oidc" - ProviderTypeLDAP = "ldap" - ProviderTypeSAML = "saml" -) - -// Policy Effects -const ( - EffectAllow = "Allow" - EffectDeny = "Deny" -) - -// Default Paths - aligned with filer /etc/ convention -const ( - DefaultSessionBasePath = "/etc/iam/sessions" - DefaultPolicyBasePath = "/etc/iam/policies" - DefaultRoleBasePath = "/etc/iam/roles" -) - -// Default Values -const ( - DefaultTokenDuration = 3600 // 1 hour in seconds - DefaultMaxSessionLength = 43200 // 12 hours in seconds - DefaultIssuer = "seaweedfs-sts" - DefaultStoreType = StoreTypeFiler // Default store type for persistence - MinSigningKeyLength = 16 // Minimum signing key length in bytes -) - -// Configuration Field Names -const ( - ConfigFieldFilerAddress = "filerAddress" - ConfigFieldBasePath = "basePath" - ConfigFieldIssuer = "issuer" - ConfigFieldClientID = "clientId" - ConfigFieldClientSecret = "clientSecret" - ConfigFieldJWKSUri = "jwksUri" - ConfigFieldScopes = "scopes" - ConfigFieldUserInfoUri = "userInfoUri" - ConfigFieldRedirectUri = "redirectUri" -) - -// Error Messages -const ( - ErrConfigCannotBeNil = "config cannot be nil" - ErrProviderCannotBeNil = "provider cannot be nil" - ErrProviderNameEmpty = "provider name cannot be empty" - ErrProviderTypeEmpty = "provider type cannot be empty" - ErrTokenCannotBeEmpty = "token cannot be empty" - ErrSessionTokenCannotBeEmpty = "session token cannot be empty" - ErrSessionIDCannotBeEmpty = "session ID cannot be empty" - ErrSTSServiceNotInitialized = "STS service not initialized" - ErrProviderNotInitialized = "provider not initialized" - ErrInvalidTokenDuration = "token duration must be positive" - ErrInvalidMaxSessionLength = "max session length must be positive" - ErrIssuerRequired = "issuer is required" - ErrSigningKeyTooShort = "signing key must be at least %d bytes" - ErrFilerAddressRequired = "filer address is required" - ErrClientIDRequired = "clientId is required for OIDC provider" - ErrUnsupportedStoreType = "unsupported store type: %s" - ErrUnsupportedProviderType = "unsupported provider type: %s" - ErrInvalidTokenFormat = "invalid session token format: %w" - ErrSessionValidationFailed = "session validation failed: %w" - ErrInvalidToken = "invalid token: %w" - ErrTokenNotValid = "token is not valid" - ErrInvalidTokenClaims = "invalid token claims" - ErrInvalidIssuer = "invalid issuer" - ErrMissingSessionID = "missing session ID" -) - -// JWT Claims -const ( - JWTClaimIssuer = "iss" - JWTClaimSubject = "sub" - JWTClaimAudience = "aud" - JWTClaimExpiration = "exp" - JWTClaimIssuedAt = "iat" - JWTClaimTokenType = "token_type" -) - -// Token Types -const ( - TokenTypeSession = "session" - TokenTypeAccess = "access" - TokenTypeRefresh = "refresh" -) - -// AWS STS Actions -const ( - ActionAssumeRole = "sts:AssumeRole" - ActionAssumeRoleWithWebIdentity = "sts:AssumeRoleWithWebIdentity" - ActionAssumeRoleWithCredentials = "sts:AssumeRoleWithCredentials" - ActionValidateSession = "sts:ValidateSession" -) - -// Session File Prefixes -const ( - SessionFilePrefix = "session_" - SessionFileExt = ".json" - PolicyFilePrefix = "policy_" - PolicyFileExt = ".json" - RoleFileExt = ".json" -) - -// HTTP Headers -const ( - HeaderAuthorization = "Authorization" - HeaderContentType = "Content-Type" - HeaderUserAgent = "User-Agent" -) - -// Content Types -const ( - ContentTypeJSON = "application/json" - ContentTypeFormURLEncoded = "application/x-www-form-urlencoded" -) - -// Default Test Values -const ( - TestSigningKey32Chars = "test-signing-key-32-characters-long" - TestIssuer = "test-sts" - TestClientID = "test-client" - TestSessionID = "test-session-123" - TestValidToken = "valid_test_token" - TestInvalidToken = "invalid_token" - TestExpiredToken = "expired_token" -) diff --git a/weed/iam/sts/cross_instance_token_test.go b/weed/iam/sts/cross_instance_token_test.go deleted file mode 100644 index 243951d82..000000000 --- a/weed/iam/sts/cross_instance_token_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package sts - -import ( - "context" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Test-only constants for mock providers -const ( - ProviderTypeMock = "mock" -) - -// createMockOIDCProvider creates a mock OIDC provider for testing -// This is only available in test builds -func createMockOIDCProvider(name string, config map[string]interface{}) (providers.IdentityProvider, error) { - // Convert config to OIDC format - factory := NewProviderFactory() - oidcConfig, err := factory.convertToOIDCConfig(config) - if err != nil { - return nil, err - } - - // Set default values for mock provider if not provided - if oidcConfig.Issuer == "" { - oidcConfig.Issuer = "http://localhost:9999" - } - - provider := oidc.NewMockOIDCProvider(name) - if err := provider.Initialize(oidcConfig); err != nil { - return nil, err - } - - // Set up default test data for the mock provider - provider.SetupDefaultTestData() - - return provider, nil -} - -// createMockJWT creates a test JWT token with the specified issuer for mock provider testing -func createMockJWT(t *testing.T, issuer, subject string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - }) - - tokenString, err := token.SignedString([]byte("test-signing-key")) - require.NoError(t, err) - return tokenString -} - -// TestCrossInstanceTokenUsage verifies that tokens generated by one STS instance -// can be used and validated by other STS instances in a distributed environment -func TestCrossInstanceTokenUsage(t *testing.T) { - ctx := context.Background() - // Dummy filer address for testing - - // Common configuration that would be shared across all instances in production - sharedConfig := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "distributed-sts-cluster", // SAME across all instances - SigningKey: []byte(TestSigningKey32Chars), // SAME across all instances - Providers: []*ProviderConfig{ - { - Name: "company-oidc", - Type: ProviderTypeOIDC, - Enabled: true, - Config: map[string]interface{}{ - ConfigFieldIssuer: "https://sso.company.com/realms/production", - ConfigFieldClientID: "seaweedfs-cluster", - ConfigFieldJWKSUri: "https://sso.company.com/realms/production/protocol/openid-connect/certs", - }, - }, - }, - } - - // Create multiple STS instances simulating different S3 gateway instances - instanceA := NewSTSService() // e.g., s3-gateway-1 - instanceB := NewSTSService() // e.g., s3-gateway-2 - instanceC := NewSTSService() // e.g., s3-gateway-3 - - // Initialize all instances with IDENTICAL configuration - err := instanceA.Initialize(sharedConfig) - require.NoError(t, err, "Instance A should initialize") - - err = instanceB.Initialize(sharedConfig) - require.NoError(t, err, "Instance B should initialize") - - err = instanceC.Initialize(sharedConfig) - require.NoError(t, err, "Instance C should initialize") - - // Set up mock trust policy validator for all instances (required for STS testing) - mockValidator := &MockTrustPolicyValidator{} - instanceA.SetTrustPolicyValidator(mockValidator) - instanceB.SetTrustPolicyValidator(mockValidator) - instanceC.SetTrustPolicyValidator(mockValidator) - - // Manually register mock provider for testing (not available in production) - mockProviderConfig := map[string]interface{}{ - ConfigFieldIssuer: "http://test-mock:9999", - ConfigFieldClientID: TestClientID, - } - mockProviderA, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - mockProviderB, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - mockProviderC, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - - instanceA.RegisterProvider(mockProviderA) - instanceB.RegisterProvider(mockProviderB) - instanceC.RegisterProvider(mockProviderC) - - // Test 1: Token generated on Instance A can be validated on Instance B & C - t.Run("cross_instance_token_validation", func(t *testing.T) { - // Generate session token on Instance A - sessionId := TestSessionID - expiresAt := time.Now().Add(time.Hour) - - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err, "Instance A should generate token") - - // Validate token on Instance B - claimsFromB, err := instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) - require.NoError(t, err, "Instance B should validate token from Instance A") - assert.Equal(t, sessionId, claimsFromB.SessionId, "Session ID should match") - - // Validate same token on Instance C - claimsFromC, err := instanceC.tokenGenerator.ValidateSessionToken(tokenFromA) - require.NoError(t, err, "Instance C should validate token from Instance A") - assert.Equal(t, sessionId, claimsFromC.SessionId, "Session ID should match") - - // All instances should extract identical claims - assert.Equal(t, claimsFromB.SessionId, claimsFromC.SessionId) - assert.Equal(t, claimsFromB.ExpiresAt.Unix(), claimsFromC.ExpiresAt.Unix()) - assert.Equal(t, claimsFromB.IssuedAt.Unix(), claimsFromC.IssuedAt.Unix()) - }) - - // Test 2: Complete assume role flow across instances - t.Run("cross_instance_assume_role_flow", func(t *testing.T) { - // Step 1: User authenticates and assumes role on Instance A - // Create a valid JWT token for the mock provider - mockToken := createMockJWT(t, "http://test-mock:9999", "test-user") - - assumeRequest := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/CrossInstanceTestRole", - WebIdentityToken: mockToken, // JWT token for mock provider - RoleSessionName: "cross-instance-test-session", - DurationSeconds: int64ToPtr(3600), - } - - // Instance A processes assume role request - responseFromA, err := instanceA.AssumeRoleWithWebIdentity(ctx, assumeRequest) - require.NoError(t, err, "Instance A should process assume role") - - sessionToken := responseFromA.Credentials.SessionToken - accessKeyId := responseFromA.Credentials.AccessKeyId - secretAccessKey := responseFromA.Credentials.SecretAccessKey - - // Verify response structure - assert.NotEmpty(t, sessionToken, "Should have session token") - assert.NotEmpty(t, accessKeyId, "Should have access key ID") - assert.NotEmpty(t, secretAccessKey, "Should have secret access key") - assert.NotNil(t, responseFromA.AssumedRoleUser, "Should have assumed role user") - - // Step 2: Use session token on Instance B (different instance) - sessionInfoFromB, err := instanceB.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Instance B should validate session token from Instance A") - - assert.Equal(t, assumeRequest.RoleSessionName, sessionInfoFromB.SessionName) - assert.Equal(t, assumeRequest.RoleArn, sessionInfoFromB.RoleArn) - - // Step 3: Use same session token on Instance C (yet another instance) - sessionInfoFromC, err := instanceC.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Instance C should validate session token from Instance A") - - // All instances should return identical session information - assert.Equal(t, sessionInfoFromB.SessionId, sessionInfoFromC.SessionId) - assert.Equal(t, sessionInfoFromB.SessionName, sessionInfoFromC.SessionName) - assert.Equal(t, sessionInfoFromB.RoleArn, sessionInfoFromC.RoleArn) - assert.Equal(t, sessionInfoFromB.Subject, sessionInfoFromC.Subject) - assert.Equal(t, sessionInfoFromB.Provider, sessionInfoFromC.Provider) - }) - - // Test 3: Session revocation across instances - t.Run("cross_instance_session_revocation", func(t *testing.T) { - // Create session on Instance A - mockToken := createMockJWT(t, "http://test-mock:9999", "test-user") - - assumeRequest := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/RevocationTestRole", - WebIdentityToken: mockToken, - RoleSessionName: "revocation-test-session", - } - - response, err := instanceA.AssumeRoleWithWebIdentity(ctx, assumeRequest) - require.NoError(t, err) - sessionToken := response.Credentials.SessionToken - - // Verify token works on Instance B - _, err = instanceB.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Token should be valid on Instance B initially") - - // Validate session on Instance C to verify cross-instance token compatibility - _, err = instanceC.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Instance C should be able to validate session token") - - // In a stateless JWT system, tokens remain valid on all instances since they're self-contained - // No revocation is possible without breaking the stateless architecture - _, err = instanceA.ValidateSessionToken(ctx, sessionToken) - assert.NoError(t, err, "Token should still be valid on Instance A (stateless system)") - - // Verify token is still valid on Instance B - _, err = instanceB.ValidateSessionToken(ctx, sessionToken) - assert.NoError(t, err, "Token should still be valid on Instance B (stateless system)") - }) - - // Test 4: Provider consistency across instances - t.Run("provider_consistency_affects_token_generation", func(t *testing.T) { - // All instances should have same providers and be able to process same OIDC tokens - providerNamesA := instanceA.getProviderNames() - providerNamesB := instanceB.getProviderNames() - providerNamesC := instanceC.getProviderNames() - - assert.ElementsMatch(t, providerNamesA, providerNamesB, "Instance A and B should have same providers") - assert.ElementsMatch(t, providerNamesB, providerNamesC, "Instance B and C should have same providers") - - // All instances should be able to process same web identity token - testToken := createMockJWT(t, "http://test-mock:9999", "test-user") - - // Try to assume role with same token on different instances - assumeRequest := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/ProviderTestRole", - WebIdentityToken: testToken, - RoleSessionName: "provider-consistency-test", - } - - // Should work on any instance - responseA, errA := instanceA.AssumeRoleWithWebIdentity(ctx, assumeRequest) - responseB, errB := instanceB.AssumeRoleWithWebIdentity(ctx, assumeRequest) - responseC, errC := instanceC.AssumeRoleWithWebIdentity(ctx, assumeRequest) - - require.NoError(t, errA, "Instance A should process OIDC token") - require.NoError(t, errB, "Instance B should process OIDC token") - require.NoError(t, errC, "Instance C should process OIDC token") - - // All should return valid responses (sessions will have different IDs but same structure) - assert.NotEmpty(t, responseA.Credentials.SessionToken) - assert.NotEmpty(t, responseB.Credentials.SessionToken) - assert.NotEmpty(t, responseC.Credentials.SessionToken) - }) -} - -// TestSTSDistributedConfigurationRequirements tests the configuration requirements -// for cross-instance token compatibility -func TestSTSDistributedConfigurationRequirements(t *testing.T) { - _ = "localhost:8888" // Dummy filer address for testing (not used in these tests) - - t.Run("same_signing_key_required", func(t *testing.T) { - // Instance A with signing key 1 - configA := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "test-sts", - SigningKey: []byte("signing-key-1-32-characters-long"), - } - - // Instance B with different signing key - configB := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "test-sts", - SigningKey: []byte("signing-key-2-32-characters-long"), // DIFFERENT! - } - - instanceA := NewSTSService() - instanceB := NewSTSService() - - err := instanceA.Initialize(configA) - require.NoError(t, err) - - err = instanceB.Initialize(configB) - require.NoError(t, err) - - // Generate token on Instance A - sessionId := "test-session" - expiresAt := time.Now().Add(time.Hour) - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // Instance A should validate its own token - _, err = instanceA.tokenGenerator.ValidateSessionToken(tokenFromA) - assert.NoError(t, err, "Instance A should validate own token") - - // Instance B should REJECT token due to different signing key - _, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) - assert.Error(t, err, "Instance B should reject token with different signing key") - assert.Contains(t, err.Error(), "invalid token", "Should be signature validation error") - }) - - t.Run("same_issuer_required", func(t *testing.T) { - sharedSigningKey := []byte("shared-signing-key-32-characters-lo") - - // Instance A with issuer 1 - configA := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "sts-cluster-1", - SigningKey: sharedSigningKey, - } - - // Instance B with different issuer - configB := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "sts-cluster-2", // DIFFERENT! - SigningKey: sharedSigningKey, - } - - instanceA := NewSTSService() - instanceB := NewSTSService() - - err := instanceA.Initialize(configA) - require.NoError(t, err) - - err = instanceB.Initialize(configB) - require.NoError(t, err) - - // Generate token on Instance A - sessionId := "test-session" - expiresAt := time.Now().Add(time.Hour) - tokenFromA, err := instanceA.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // Instance B should REJECT token due to different issuer - _, err = instanceB.tokenGenerator.ValidateSessionToken(tokenFromA) - assert.Error(t, err, "Instance B should reject token with different issuer") - assert.Contains(t, err.Error(), "invalid issuer", "Should be issuer validation error") - }) - - t.Run("identical_configuration_required", func(t *testing.T) { - // Identical configuration - identicalConfig := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "production-sts-cluster", - SigningKey: []byte("production-signing-key-32-chars-l"), - } - - // Create multiple instances with identical config - instances := make([]*STSService, 5) - for i := 0; i < 5; i++ { - instances[i] = NewSTSService() - err := instances[i].Initialize(identicalConfig) - require.NoError(t, err, "Instance %d should initialize", i) - } - - // Generate token on Instance 0 - sessionId := "multi-instance-test" - expiresAt := time.Now().Add(time.Hour) - token, err := instances[0].tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // All other instances should validate the token - for i := 1; i < 5; i++ { - claims, err := instances[i].tokenGenerator.ValidateSessionToken(token) - require.NoError(t, err, "Instance %d should validate token", i) - assert.Equal(t, sessionId, claims.SessionId, "Instance %d should extract correct session ID", i) - } - }) -} - -// TestSTSRealWorldDistributedScenarios tests realistic distributed deployment scenarios -func TestSTSRealWorldDistributedScenarios(t *testing.T) { - ctx := context.Background() - - t.Run("load_balanced_s3_gateway_scenario", func(t *testing.T) { - // Simulate real production scenario: - // 1. User authenticates with OIDC provider - // 2. User calls AssumeRoleWithWebIdentity on S3 Gateway 1 - // 3. User makes S3 requests that hit S3 Gateway 2 & 3 via load balancer - // 4. All instances should handle the session token correctly - - productionConfig := &STSConfig{ - TokenDuration: FlexibleDuration{2 * time.Hour}, - MaxSessionLength: FlexibleDuration{24 * time.Hour}, - Issuer: "seaweedfs-production-sts", - SigningKey: []byte("prod-signing-key-32-characters-lon"), - - Providers: []*ProviderConfig{ - { - Name: "corporate-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://sso.company.com/realms/production", - "clientId": "seaweedfs-prod-cluster", - "clientSecret": "supersecret-prod-key", - "scopes": []string{"openid", "profile", "email", "groups"}, - }, - }, - }, - } - - // Create 3 S3 Gateway instances behind load balancer - gateway1 := NewSTSService() - gateway2 := NewSTSService() - gateway3 := NewSTSService() - - err := gateway1.Initialize(productionConfig) - require.NoError(t, err) - - err = gateway2.Initialize(productionConfig) - require.NoError(t, err) - - err = gateway3.Initialize(productionConfig) - require.NoError(t, err) - - // Set up mock trust policy validator for all gateway instances - mockValidator := &MockTrustPolicyValidator{} - gateway1.SetTrustPolicyValidator(mockValidator) - gateway2.SetTrustPolicyValidator(mockValidator) - gateway3.SetTrustPolicyValidator(mockValidator) - - // Manually register mock provider for testing (not available in production) - mockProviderConfig := map[string]interface{}{ - ConfigFieldIssuer: "http://test-mock:9999", - ConfigFieldClientID: "test-client-id", - } - mockProvider1, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - mockProvider2, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - mockProvider3, err := createMockOIDCProvider("test-mock", mockProviderConfig) - require.NoError(t, err) - - gateway1.RegisterProvider(mockProvider1) - gateway2.RegisterProvider(mockProvider2) - gateway3.RegisterProvider(mockProvider3) - - // Step 1: User authenticates and hits Gateway 1 for AssumeRole - mockToken := createMockJWT(t, "http://test-mock:9999", "production-user") - - assumeRequest := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/ProductionS3User", - WebIdentityToken: mockToken, // JWT token from mock provider - RoleSessionName: "user-production-session", - DurationSeconds: int64ToPtr(7200), // 2 hours - } - - stsResponse, err := gateway1.AssumeRoleWithWebIdentity(ctx, assumeRequest) - require.NoError(t, err, "Gateway 1 should handle AssumeRole") - - sessionToken := stsResponse.Credentials.SessionToken - accessKey := stsResponse.Credentials.AccessKeyId - secretKey := stsResponse.Credentials.SecretAccessKey - - // Step 2: User makes S3 requests that hit different gateways via load balancer - // Simulate S3 request validation on Gateway 2 - sessionInfo2, err := gateway2.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Gateway 2 should validate session from Gateway 1") - assert.Equal(t, "user-production-session", sessionInfo2.SessionName) - assert.Equal(t, "arn:seaweed:iam::role/ProductionS3User", sessionInfo2.RoleArn) - - // Simulate S3 request validation on Gateway 3 - sessionInfo3, err := gateway3.ValidateSessionToken(ctx, sessionToken) - require.NoError(t, err, "Gateway 3 should validate session from Gateway 1") - assert.Equal(t, sessionInfo2.SessionId, sessionInfo3.SessionId, "Should be same session") - - // Step 3: Verify credentials are consistent - assert.Equal(t, accessKey, stsResponse.Credentials.AccessKeyId, "Access key should be consistent") - assert.Equal(t, secretKey, stsResponse.Credentials.SecretAccessKey, "Secret key should be consistent") - - // Step 4: Session expiration should be honored across all instances - assert.True(t, sessionInfo2.ExpiresAt.After(time.Now()), "Session should not be expired") - assert.True(t, sessionInfo3.ExpiresAt.After(time.Now()), "Session should not be expired") - - // Step 5: Token should be identical when parsed - claims2, err := gateway2.tokenGenerator.ValidateSessionToken(sessionToken) - require.NoError(t, err) - - claims3, err := gateway3.tokenGenerator.ValidateSessionToken(sessionToken) - require.NoError(t, err) - - assert.Equal(t, claims2.SessionId, claims3.SessionId, "Session IDs should match") - assert.Equal(t, claims2.ExpiresAt.Unix(), claims3.ExpiresAt.Unix(), "Expiration should match") - }) -} - -// Helper function to convert int64 to pointer -func int64ToPtr(i int64) *int64 { - return &i -} diff --git a/weed/iam/sts/distributed_sts_test.go b/weed/iam/sts/distributed_sts_test.go deleted file mode 100644 index 133f3a669..000000000 --- a/weed/iam/sts/distributed_sts_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package sts - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestDistributedSTSService verifies that multiple STS instances with identical configurations -// behave consistently across distributed environments -func TestDistributedSTSService(t *testing.T) { - ctx := context.Background() - - // Common configuration for all instances - commonConfig := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "distributed-sts-test", - SigningKey: []byte("test-signing-key-32-characters-long"), - - Providers: []*ProviderConfig{ - { - Name: "keycloak-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "http://keycloak:8080/realms/seaweedfs-test", - "clientId": "seaweedfs-s3", - "jwksUri": "http://keycloak:8080/realms/seaweedfs-test/protocol/openid-connect/certs", - }, - }, - - { - Name: "disabled-ldap", - Type: "oidc", // Use OIDC as placeholder since LDAP isn't implemented - Enabled: false, - Config: map[string]interface{}{ - "issuer": "ldap://company.com", - "clientId": "ldap-client", - }, - }, - }, - } - - // Create multiple STS instances simulating distributed deployment - instance1 := NewSTSService() - instance2 := NewSTSService() - instance3 := NewSTSService() - - // Initialize all instances with identical configuration - err := instance1.Initialize(commonConfig) - require.NoError(t, err, "Instance 1 should initialize successfully") - - err = instance2.Initialize(commonConfig) - require.NoError(t, err, "Instance 2 should initialize successfully") - - err = instance3.Initialize(commonConfig) - require.NoError(t, err, "Instance 3 should initialize successfully") - - // Manually register mock providers for testing (not available in production) - mockProviderConfig := map[string]interface{}{ - "issuer": "http://localhost:9999", - "clientId": "test-client", - } - mockProvider1, err := createMockOIDCProvider("test-mock-provider", mockProviderConfig) - require.NoError(t, err) - mockProvider2, err := createMockOIDCProvider("test-mock-provider", mockProviderConfig) - require.NoError(t, err) - mockProvider3, err := createMockOIDCProvider("test-mock-provider", mockProviderConfig) - require.NoError(t, err) - - instance1.RegisterProvider(mockProvider1) - instance2.RegisterProvider(mockProvider2) - instance3.RegisterProvider(mockProvider3) - - // Verify all instances have identical provider configurations - t.Run("provider_consistency", func(t *testing.T) { - // All instances should have same number of providers - assert.Len(t, instance1.providers, 2, "Instance 1 should have 2 enabled providers") - assert.Len(t, instance2.providers, 2, "Instance 2 should have 2 enabled providers") - assert.Len(t, instance3.providers, 2, "Instance 3 should have 2 enabled providers") - - // All instances should have same provider names - instance1Names := instance1.getProviderNames() - instance2Names := instance2.getProviderNames() - instance3Names := instance3.getProviderNames() - - assert.ElementsMatch(t, instance1Names, instance2Names, "Instance 1 and 2 should have same providers") - assert.ElementsMatch(t, instance2Names, instance3Names, "Instance 2 and 3 should have same providers") - - // Verify specific providers exist on all instances - expectedProviders := []string{"keycloak-oidc", "test-mock-provider"} - assert.ElementsMatch(t, instance1Names, expectedProviders, "Instance 1 should have expected providers") - assert.ElementsMatch(t, instance2Names, expectedProviders, "Instance 2 should have expected providers") - assert.ElementsMatch(t, instance3Names, expectedProviders, "Instance 3 should have expected providers") - - // Verify disabled providers are not loaded - assert.NotContains(t, instance1Names, "disabled-ldap", "Disabled providers should not be loaded") - assert.NotContains(t, instance2Names, "disabled-ldap", "Disabled providers should not be loaded") - assert.NotContains(t, instance3Names, "disabled-ldap", "Disabled providers should not be loaded") - }) - - // Test token generation consistency across instances - t.Run("token_generation_consistency", func(t *testing.T) { - sessionId := "test-session-123" - expiresAt := time.Now().Add(time.Hour) - - // Generate tokens from different instances - token1, err1 := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - token2, err2 := instance2.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - token3, err3 := instance3.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - - require.NoError(t, err1, "Instance 1 token generation should succeed") - require.NoError(t, err2, "Instance 2 token generation should succeed") - require.NoError(t, err3, "Instance 3 token generation should succeed") - - // All tokens should be different (due to timestamp variations) - // But they should all be valid JWTs with same signing key - assert.NotEmpty(t, token1) - assert.NotEmpty(t, token2) - assert.NotEmpty(t, token3) - }) - - // Test token validation consistency - any instance should validate tokens from any other instance - t.Run("cross_instance_token_validation", func(t *testing.T) { - sessionId := "cross-validation-session" - expiresAt := time.Now().Add(time.Hour) - - // Generate token on instance 1 - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // Validate on all instances - claims1, err1 := instance1.tokenGenerator.ValidateSessionToken(token) - claims2, err2 := instance2.tokenGenerator.ValidateSessionToken(token) - claims3, err3 := instance3.tokenGenerator.ValidateSessionToken(token) - - require.NoError(t, err1, "Instance 1 should validate token from instance 1") - require.NoError(t, err2, "Instance 2 should validate token from instance 1") - require.NoError(t, err3, "Instance 3 should validate token from instance 1") - - // All instances should extract same session ID - assert.Equal(t, sessionId, claims1.SessionId) - assert.Equal(t, sessionId, claims2.SessionId) - assert.Equal(t, sessionId, claims3.SessionId) - - assert.Equal(t, claims1.SessionId, claims2.SessionId) - assert.Equal(t, claims2.SessionId, claims3.SessionId) - }) - - // Test provider access consistency - t.Run("provider_access_consistency", func(t *testing.T) { - // All instances should be able to access the same providers - provider1, exists1 := instance1.providers["test-mock-provider"] - provider2, exists2 := instance2.providers["test-mock-provider"] - provider3, exists3 := instance3.providers["test-mock-provider"] - - assert.True(t, exists1, "Instance 1 should have test-mock-provider") - assert.True(t, exists2, "Instance 2 should have test-mock-provider") - assert.True(t, exists3, "Instance 3 should have test-mock-provider") - - assert.Equal(t, provider1.Name(), provider2.Name()) - assert.Equal(t, provider2.Name(), provider3.Name()) - - // Test authentication with the mock provider on all instances - testToken := "valid_test_token" - - identity1, err1 := provider1.Authenticate(ctx, testToken) - identity2, err2 := provider2.Authenticate(ctx, testToken) - identity3, err3 := provider3.Authenticate(ctx, testToken) - - require.NoError(t, err1, "Instance 1 provider should authenticate successfully") - require.NoError(t, err2, "Instance 2 provider should authenticate successfully") - require.NoError(t, err3, "Instance 3 provider should authenticate successfully") - - // All instances should return identical identity information - assert.Equal(t, identity1.UserID, identity2.UserID) - assert.Equal(t, identity2.UserID, identity3.UserID) - assert.Equal(t, identity1.Email, identity2.Email) - assert.Equal(t, identity2.Email, identity3.Email) - assert.Equal(t, identity1.Provider, identity2.Provider) - assert.Equal(t, identity2.Provider, identity3.Provider) - }) -} - -// TestSTSConfigurationValidation tests configuration validation for distributed deployments -func TestSTSConfigurationValidation(t *testing.T) { - t.Run("consistent_signing_keys_required", func(t *testing.T) { - // Different signing keys should result in incompatible token validation - config1 := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "test-sts", - SigningKey: []byte("signing-key-1-32-characters-long"), - } - - config2 := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "test-sts", - SigningKey: []byte("signing-key-2-32-characters-long"), // Different key! - } - - instance1 := NewSTSService() - instance2 := NewSTSService() - - err1 := instance1.Initialize(config1) - err2 := instance2.Initialize(config2) - - require.NoError(t, err1) - require.NoError(t, err2) - - // Generate token on instance 1 - sessionId := "test-session" - expiresAt := time.Now().Add(time.Hour) - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // Instance 1 should validate its own token - _, err = instance1.tokenGenerator.ValidateSessionToken(token) - assert.NoError(t, err, "Instance 1 should validate its own token") - - // Instance 2 should reject token from instance 1 (different signing key) - _, err = instance2.tokenGenerator.ValidateSessionToken(token) - assert.Error(t, err, "Instance 2 should reject token with different signing key") - }) - - t.Run("consistent_issuer_required", func(t *testing.T) { - // Different issuers should result in incompatible tokens - commonSigningKey := []byte("shared-signing-key-32-characters-lo") - - config1 := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "sts-instance-1", - SigningKey: commonSigningKey, - } - - config2 := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{12 * time.Hour}, - Issuer: "sts-instance-2", // Different issuer! - SigningKey: commonSigningKey, - } - - instance1 := NewSTSService() - instance2 := NewSTSService() - - err1 := instance1.Initialize(config1) - err2 := instance2.Initialize(config2) - - require.NoError(t, err1) - require.NoError(t, err2) - - // Generate token on instance 1 - sessionId := "test-session" - expiresAt := time.Now().Add(time.Hour) - token, err := instance1.tokenGenerator.GenerateSessionToken(sessionId, expiresAt) - require.NoError(t, err) - - // Instance 2 should reject token due to issuer mismatch - // (Even though signing key is the same, issuer validation will fail) - _, err = instance2.tokenGenerator.ValidateSessionToken(token) - assert.Error(t, err, "Instance 2 should reject token with different issuer") - }) -} - -// TestProviderFactoryDistributed tests the provider factory in distributed scenarios -func TestProviderFactoryDistributed(t *testing.T) { - factory := NewProviderFactory() - - // Simulate configuration that would be identical across all instances - configs := []*ProviderConfig{ - { - Name: "production-keycloak", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://keycloak.company.com/realms/seaweedfs", - "clientId": "seaweedfs-prod", - "clientSecret": "super-secret-key", - "jwksUri": "https://keycloak.company.com/realms/seaweedfs/protocol/openid-connect/certs", - "scopes": []string{"openid", "profile", "email", "roles"}, - }, - }, - { - Name: "backup-oidc", - Type: "oidc", - Enabled: false, // Disabled by default - Config: map[string]interface{}{ - "issuer": "https://backup-oidc.company.com", - "clientId": "seaweedfs-backup", - }, - }, - } - - // Create providers multiple times (simulating multiple instances) - providers1, err1 := factory.LoadProvidersFromConfig(configs) - providers2, err2 := factory.LoadProvidersFromConfig(configs) - providers3, err3 := factory.LoadProvidersFromConfig(configs) - - require.NoError(t, err1, "First load should succeed") - require.NoError(t, err2, "Second load should succeed") - require.NoError(t, err3, "Third load should succeed") - - // All instances should have same provider counts - assert.Len(t, providers1, 1, "First instance should have 1 enabled provider") - assert.Len(t, providers2, 1, "Second instance should have 1 enabled provider") - assert.Len(t, providers3, 1, "Third instance should have 1 enabled provider") - - // All instances should have same provider names - names1 := make([]string, 0, len(providers1)) - names2 := make([]string, 0, len(providers2)) - names3 := make([]string, 0, len(providers3)) - - for name := range providers1 { - names1 = append(names1, name) - } - for name := range providers2 { - names2 = append(names2, name) - } - for name := range providers3 { - names3 = append(names3, name) - } - - assert.ElementsMatch(t, names1, names2, "Instance 1 and 2 should have same provider names") - assert.ElementsMatch(t, names2, names3, "Instance 2 and 3 should have same provider names") - - // Verify specific providers - expectedProviders := []string{"production-keycloak"} - assert.ElementsMatch(t, names1, expectedProviders, "Should have expected enabled providers") - - // Verify disabled providers are not included - assert.NotContains(t, names1, "backup-oidc", "Disabled providers should not be loaded") - assert.NotContains(t, names2, "backup-oidc", "Disabled providers should not be loaded") - assert.NotContains(t, names3, "backup-oidc", "Disabled providers should not be loaded") -} diff --git a/weed/iam/sts/provider_factory.go b/weed/iam/sts/provider_factory.go deleted file mode 100644 index 0733afdba..000000000 --- a/weed/iam/sts/provider_factory.go +++ /dev/null @@ -1,325 +0,0 @@ -package sts - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// ProviderFactory creates identity providers from configuration -type ProviderFactory struct{} - -// NewProviderFactory creates a new provider factory -func NewProviderFactory() *ProviderFactory { - return &ProviderFactory{} -} - -// CreateProvider creates an identity provider from configuration -func (f *ProviderFactory) CreateProvider(config *ProviderConfig) (providers.IdentityProvider, error) { - if config == nil { - return nil, fmt.Errorf(ErrConfigCannotBeNil) - } - - if config.Name == "" { - return nil, fmt.Errorf(ErrProviderNameEmpty) - } - - if config.Type == "" { - return nil, fmt.Errorf(ErrProviderTypeEmpty) - } - - if !config.Enabled { - glog.V(2).Infof("Provider %s is disabled, skipping", config.Name) - return nil, nil - } - - glog.V(2).Infof("Creating provider: name=%s, type=%s", config.Name, config.Type) - - switch config.Type { - case ProviderTypeOIDC: - return f.createOIDCProvider(config) - case ProviderTypeLDAP: - return f.createLDAPProvider(config) - case ProviderTypeSAML: - return f.createSAMLProvider(config) - default: - return nil, fmt.Errorf(ErrUnsupportedProviderType, config.Type) - } -} - -// createOIDCProvider creates an OIDC provider from configuration -func (f *ProviderFactory) createOIDCProvider(config *ProviderConfig) (providers.IdentityProvider, error) { - oidcConfig, err := f.convertToOIDCConfig(config.Config) - if err != nil { - return nil, fmt.Errorf("failed to convert OIDC config: %w", err) - } - - provider := oidc.NewOIDCProvider(config.Name) - if err := provider.Initialize(oidcConfig); err != nil { - return nil, fmt.Errorf("failed to initialize OIDC provider: %w", err) - } - - return provider, nil -} - -// createLDAPProvider creates an LDAP provider from configuration -func (f *ProviderFactory) createLDAPProvider(config *ProviderConfig) (providers.IdentityProvider, error) { - // TODO: Implement LDAP provider when available - return nil, fmt.Errorf("LDAP provider not implemented yet") -} - -// createSAMLProvider creates a SAML provider from configuration -func (f *ProviderFactory) createSAMLProvider(config *ProviderConfig) (providers.IdentityProvider, error) { - // TODO: Implement SAML provider when available - return nil, fmt.Errorf("SAML provider not implemented yet") -} - -// convertToOIDCConfig converts generic config map to OIDC config struct -func (f *ProviderFactory) convertToOIDCConfig(configMap map[string]interface{}) (*oidc.OIDCConfig, error) { - config := &oidc.OIDCConfig{} - - // Required fields - if issuer, ok := configMap[ConfigFieldIssuer].(string); ok { - config.Issuer = issuer - } else { - return nil, fmt.Errorf(ErrIssuerRequired) - } - - if clientID, ok := configMap[ConfigFieldClientID].(string); ok { - config.ClientID = clientID - } else { - return nil, fmt.Errorf(ErrClientIDRequired) - } - - // Optional fields - if clientSecret, ok := configMap[ConfigFieldClientSecret].(string); ok { - config.ClientSecret = clientSecret - } - - if jwksUri, ok := configMap[ConfigFieldJWKSUri].(string); ok { - config.JWKSUri = jwksUri - } - - if userInfoUri, ok := configMap[ConfigFieldUserInfoUri].(string); ok { - config.UserInfoUri = userInfoUri - } - - // Convert scopes array - if scopesInterface, ok := configMap[ConfigFieldScopes]; ok { - scopes, err := f.convertToStringSlice(scopesInterface) - if err != nil { - return nil, fmt.Errorf("failed to convert scopes: %w", err) - } - config.Scopes = scopes - } - - // Convert claims mapping - if claimsMapInterface, ok := configMap["claimsMapping"]; ok { - claimsMap, err := f.convertToStringMap(claimsMapInterface) - if err != nil { - return nil, fmt.Errorf("failed to convert claimsMapping: %w", err) - } - config.ClaimsMapping = claimsMap - } - - // Convert role mapping - if roleMappingInterface, ok := configMap["roleMapping"]; ok { - roleMapping, err := f.convertToRoleMapping(roleMappingInterface) - if err != nil { - return nil, fmt.Errorf("failed to convert roleMapping: %w", err) - } - config.RoleMapping = roleMapping - } - - glog.V(3).Infof("Converted OIDC config: issuer=%s, clientId=%s, jwksUri=%s", - config.Issuer, config.ClientID, config.JWKSUri) - - return config, nil -} - -// convertToStringSlice converts interface{} to []string -func (f *ProviderFactory) convertToStringSlice(value interface{}) ([]string, error) { - switch v := value.(type) { - case []string: - return v, nil - case []interface{}: - result := make([]string, len(v)) - for i, item := range v { - if str, ok := item.(string); ok { - result[i] = str - } else { - return nil, fmt.Errorf("non-string item in slice: %v", item) - } - } - return result, nil - default: - return nil, fmt.Errorf("cannot convert %T to []string", value) - } -} - -// convertToStringMap converts interface{} to map[string]string -func (f *ProviderFactory) convertToStringMap(value interface{}) (map[string]string, error) { - switch v := value.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - result := make(map[string]string) - for key, val := range v { - if str, ok := val.(string); ok { - result[key] = str - } else { - return nil, fmt.Errorf("non-string value for key %s: %v", key, val) - } - } - return result, nil - default: - return nil, fmt.Errorf("cannot convert %T to map[string]string", value) - } -} - -// LoadProvidersFromConfig creates providers from configuration -func (f *ProviderFactory) LoadProvidersFromConfig(configs []*ProviderConfig) (map[string]providers.IdentityProvider, error) { - providersMap := make(map[string]providers.IdentityProvider) - - for _, config := range configs { - if config == nil { - glog.V(1).Infof("Skipping nil provider config") - continue - } - - glog.V(2).Infof("Loading provider: %s (type: %s, enabled: %t)", - config.Name, config.Type, config.Enabled) - - if !config.Enabled { - glog.V(2).Infof("Provider %s is disabled, skipping", config.Name) - continue - } - - provider, err := f.CreateProvider(config) - if err != nil { - glog.Errorf("Failed to create provider %s: %v", config.Name, err) - return nil, fmt.Errorf("failed to create provider %s: %w", config.Name, err) - } - - if provider != nil { - providersMap[config.Name] = provider - glog.V(1).Infof("Successfully loaded provider: %s", config.Name) - } - } - - glog.V(1).Infof("Loaded %d identity providers from configuration", len(providersMap)) - return providersMap, nil -} - -// convertToRoleMapping converts interface{} to *providers.RoleMapping -func (f *ProviderFactory) convertToRoleMapping(value interface{}) (*providers.RoleMapping, error) { - roleMappingMap, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("roleMapping must be an object") - } - - roleMapping := &providers.RoleMapping{} - - // Convert rules - if rulesInterface, ok := roleMappingMap["rules"]; ok { - rulesSlice, ok := rulesInterface.([]interface{}) - if !ok { - return nil, fmt.Errorf("rules must be an array") - } - - rules := make([]providers.MappingRule, len(rulesSlice)) - for i, ruleInterface := range rulesSlice { - ruleMap, ok := ruleInterface.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("rule must be an object") - } - - rule := providers.MappingRule{} - if claim, ok := ruleMap["claim"].(string); ok { - rule.Claim = claim - } - if value, ok := ruleMap["value"].(string); ok { - rule.Value = value - } - if role, ok := ruleMap["role"].(string); ok { - rule.Role = role - } - if condition, ok := ruleMap["condition"].(string); ok { - rule.Condition = condition - } - - rules[i] = rule - } - roleMapping.Rules = rules - } - - // Convert default role - if defaultRole, ok := roleMappingMap["defaultRole"].(string); ok { - roleMapping.DefaultRole = defaultRole - } - - return roleMapping, nil -} - -// ValidateProviderConfig validates a provider configuration -func (f *ProviderFactory) ValidateProviderConfig(config *ProviderConfig) error { - if config == nil { - return fmt.Errorf("provider config cannot be nil") - } - - if config.Name == "" { - return fmt.Errorf("provider name cannot be empty") - } - - if config.Type == "" { - return fmt.Errorf("provider type cannot be empty") - } - - if config.Config == nil { - return fmt.Errorf("provider config cannot be nil") - } - - // Type-specific validation - switch config.Type { - case "oidc": - return f.validateOIDCConfig(config.Config) - case "ldap": - return f.validateLDAPConfig(config.Config) - case "saml": - return f.validateSAMLConfig(config.Config) - default: - return fmt.Errorf("unsupported provider type: %s", config.Type) - } -} - -// validateOIDCConfig validates OIDC provider configuration -func (f *ProviderFactory) validateOIDCConfig(config map[string]interface{}) error { - if _, ok := config[ConfigFieldIssuer]; !ok { - return fmt.Errorf("OIDC provider requires '%s' field", ConfigFieldIssuer) - } - - if _, ok := config[ConfigFieldClientID]; !ok { - return fmt.Errorf("OIDC provider requires '%s' field", ConfigFieldClientID) - } - - return nil -} - -// validateLDAPConfig validates LDAP provider configuration -func (f *ProviderFactory) validateLDAPConfig(config map[string]interface{}) error { - // TODO: Implement when LDAP provider is available - return nil -} - -// validateSAMLConfig validates SAML provider configuration -func (f *ProviderFactory) validateSAMLConfig(config map[string]interface{}) error { - // TODO: Implement when SAML provider is available - return nil -} - -// GetSupportedProviderTypes returns list of supported provider types -func (f *ProviderFactory) GetSupportedProviderTypes() []string { - return []string{ProviderTypeOIDC} -} diff --git a/weed/iam/sts/provider_factory_test.go b/weed/iam/sts/provider_factory_test.go deleted file mode 100644 index 8c36142a7..000000000 --- a/weed/iam/sts/provider_factory_test.go +++ /dev/null @@ -1,312 +0,0 @@ -package sts - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestProviderFactory_CreateOIDCProvider(t *testing.T) { - factory := NewProviderFactory() - - config := &ProviderConfig{ - Name: "test-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - "clientSecret": "test-secret", - "jwksUri": "https://test-issuer.com/.well-known/jwks.json", - "scopes": []string{"openid", "profile", "email"}, - }, - } - - provider, err := factory.CreateProvider(config) - require.NoError(t, err) - assert.NotNil(t, provider) - assert.Equal(t, "test-oidc", provider.Name()) -} - -// Note: Mock provider tests removed - mock providers are now test-only -// and not available through the production ProviderFactory - -func TestProviderFactory_DisabledProvider(t *testing.T) { - factory := NewProviderFactory() - - config := &ProviderConfig{ - Name: "disabled-provider", - Type: "oidc", - Enabled: false, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - }, - } - - provider, err := factory.CreateProvider(config) - require.NoError(t, err) - assert.Nil(t, provider) // Should return nil for disabled providers -} - -func TestProviderFactory_InvalidProviderType(t *testing.T) { - factory := NewProviderFactory() - - config := &ProviderConfig{ - Name: "invalid-provider", - Type: "unsupported-type", - Enabled: true, - Config: map[string]interface{}{}, - } - - provider, err := factory.CreateProvider(config) - assert.Error(t, err) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "unsupported provider type") -} - -func TestProviderFactory_LoadMultipleProviders(t *testing.T) { - factory := NewProviderFactory() - - configs := []*ProviderConfig{ - { - Name: "oidc-provider", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://oidc-issuer.com", - "clientId": "oidc-client", - }, - }, - - { - Name: "disabled-provider", - Type: "oidc", - Enabled: false, - Config: map[string]interface{}{ - "issuer": "https://disabled-issuer.com", - "clientId": "disabled-client", - }, - }, - } - - providers, err := factory.LoadProvidersFromConfig(configs) - require.NoError(t, err) - assert.Len(t, providers, 1) // Only enabled providers should be loaded - - assert.Contains(t, providers, "oidc-provider") - assert.NotContains(t, providers, "disabled-provider") -} - -func TestProviderFactory_ValidateOIDCConfig(t *testing.T) { - factory := NewProviderFactory() - - t.Run("valid config", func(t *testing.T) { - config := &ProviderConfig{ - Name: "valid-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://valid-issuer.com", - "clientId": "valid-client", - }, - } - - err := factory.ValidateProviderConfig(config) - assert.NoError(t, err) - }) - - t.Run("missing issuer", func(t *testing.T) { - config := &ProviderConfig{ - Name: "invalid-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "clientId": "valid-client", - }, - } - - err := factory.ValidateProviderConfig(config) - assert.Error(t, err) - assert.Contains(t, err.Error(), "issuer") - }) - - t.Run("missing clientId", func(t *testing.T) { - config := &ProviderConfig{ - Name: "invalid-oidc", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://valid-issuer.com", - }, - } - - err := factory.ValidateProviderConfig(config) - assert.Error(t, err) - assert.Contains(t, err.Error(), "clientId") - }) -} - -func TestProviderFactory_ConvertToStringSlice(t *testing.T) { - factory := NewProviderFactory() - - t.Run("string slice", func(t *testing.T) { - input := []string{"a", "b", "c"} - result, err := factory.convertToStringSlice(input) - require.NoError(t, err) - assert.Equal(t, []string{"a", "b", "c"}, result) - }) - - t.Run("interface slice", func(t *testing.T) { - input := []interface{}{"a", "b", "c"} - result, err := factory.convertToStringSlice(input) - require.NoError(t, err) - assert.Equal(t, []string{"a", "b", "c"}, result) - }) - - t.Run("invalid type", func(t *testing.T) { - input := "not-a-slice" - result, err := factory.convertToStringSlice(input) - assert.Error(t, err) - assert.Nil(t, result) - }) -} - -func TestProviderFactory_ConfigConversionErrors(t *testing.T) { - factory := NewProviderFactory() - - t.Run("invalid scopes type", func(t *testing.T) { - config := &ProviderConfig{ - Name: "invalid-scopes", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - "scopes": "invalid-not-array", // Should be array - }, - } - - provider, err := factory.CreateProvider(config) - assert.Error(t, err) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "failed to convert scopes") - }) - - t.Run("invalid claimsMapping type", func(t *testing.T) { - config := &ProviderConfig{ - Name: "invalid-claims", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - "claimsMapping": "invalid-not-map", // Should be map - }, - } - - provider, err := factory.CreateProvider(config) - assert.Error(t, err) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "failed to convert claimsMapping") - }) - - t.Run("invalid roleMapping type", func(t *testing.T) { - config := &ProviderConfig{ - Name: "invalid-roles", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - "roleMapping": "invalid-not-map", // Should be map - }, - } - - provider, err := factory.CreateProvider(config) - assert.Error(t, err) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "failed to convert roleMapping") - }) -} - -func TestProviderFactory_ConvertToStringMap(t *testing.T) { - factory := NewProviderFactory() - - t.Run("string map", func(t *testing.T) { - input := map[string]string{"key1": "value1", "key2": "value2"} - result, err := factory.convertToStringMap(input) - require.NoError(t, err) - assert.Equal(t, map[string]string{"key1": "value1", "key2": "value2"}, result) - }) - - t.Run("interface map", func(t *testing.T) { - input := map[string]interface{}{"key1": "value1", "key2": "value2"} - result, err := factory.convertToStringMap(input) - require.NoError(t, err) - assert.Equal(t, map[string]string{"key1": "value1", "key2": "value2"}, result) - }) - - t.Run("invalid type", func(t *testing.T) { - input := "not-a-map" - result, err := factory.convertToStringMap(input) - assert.Error(t, err) - assert.Nil(t, result) - }) -} - -func TestProviderFactory_GetSupportedProviderTypes(t *testing.T) { - factory := NewProviderFactory() - - supportedTypes := factory.GetSupportedProviderTypes() - assert.Contains(t, supportedTypes, "oidc") - assert.Len(t, supportedTypes, 1) // Currently only OIDC is supported in production -} - -func TestSTSService_LoadProvidersFromConfig(t *testing.T) { - stsConfig := &STSConfig{ - TokenDuration: FlexibleDuration{3600 * time.Second}, - MaxSessionLength: FlexibleDuration{43200 * time.Second}, - Issuer: "test-issuer", - SigningKey: []byte("test-signing-key-32-characters-long"), - Providers: []*ProviderConfig{ - { - Name: "test-provider", - Type: "oidc", - Enabled: true, - Config: map[string]interface{}{ - "issuer": "https://test-issuer.com", - "clientId": "test-client", - }, - }, - }, - } - - stsService := NewSTSService() - err := stsService.Initialize(stsConfig) - require.NoError(t, err) - - // Check that provider was loaded - assert.Len(t, stsService.providers, 1) - assert.Contains(t, stsService.providers, "test-provider") - assert.Equal(t, "test-provider", stsService.providers["test-provider"].Name()) -} - -func TestSTSService_NoProvidersConfig(t *testing.T) { - stsConfig := &STSConfig{ - TokenDuration: FlexibleDuration{3600 * time.Second}, - MaxSessionLength: FlexibleDuration{43200 * time.Second}, - Issuer: "test-issuer", - SigningKey: []byte("test-signing-key-32-characters-long"), - // No providers configured - } - - stsService := NewSTSService() - err := stsService.Initialize(stsConfig) - require.NoError(t, err) - - // Should initialize successfully with no providers - assert.Len(t, stsService.providers, 0) -} diff --git a/weed/iam/sts/security_test.go b/weed/iam/sts/security_test.go deleted file mode 100644 index 2d230d796..000000000 --- a/weed/iam/sts/security_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package sts - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSecurityIssuerToProviderMapping tests the security fix that ensures JWT tokens -// with specific issuer claims can only be validated by the provider registered for that issuer -func TestSecurityIssuerToProviderMapping(t *testing.T) { - ctx := context.Background() - - // Create STS service with two mock providers - service := NewSTSService() - config := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - } - - err := service.Initialize(config) - require.NoError(t, err) - - // Set up mock trust policy validator - mockValidator := &MockTrustPolicyValidator{} - service.SetTrustPolicyValidator(mockValidator) - - // Create two mock providers with different issuers - providerA := &MockIdentityProviderWithIssuer{ - name: "provider-a", - issuer: "https://provider-a.com", - validTokens: map[string]bool{ - "token-for-provider-a": true, - }, - } - - providerB := &MockIdentityProviderWithIssuer{ - name: "provider-b", - issuer: "https://provider-b.com", - validTokens: map[string]bool{ - "token-for-provider-b": true, - }, - } - - // Register both providers - err = service.RegisterProvider(providerA) - require.NoError(t, err) - err = service.RegisterProvider(providerB) - require.NoError(t, err) - - // Create JWT tokens with specific issuer claims - tokenForProviderA := createTestJWT(t, "https://provider-a.com", "user-a") - tokenForProviderB := createTestJWT(t, "https://provider-b.com", "user-b") - - t.Run("jwt_token_with_issuer_a_only_validated_by_provider_a", func(t *testing.T) { - // This should succeed - token has issuer A and provider A is registered - identity, provider, err := service.validateWebIdentityToken(ctx, tokenForProviderA) - assert.NoError(t, err) - assert.NotNil(t, identity) - assert.Equal(t, "provider-a", provider.Name()) - }) - - t.Run("jwt_token_with_issuer_b_only_validated_by_provider_b", func(t *testing.T) { - // This should succeed - token has issuer B and provider B is registered - identity, provider, err := service.validateWebIdentityToken(ctx, tokenForProviderB) - assert.NoError(t, err) - assert.NotNil(t, identity) - assert.Equal(t, "provider-b", provider.Name()) - }) - - t.Run("jwt_token_with_unregistered_issuer_fails", func(t *testing.T) { - // Create token with unregistered issuer - tokenWithUnknownIssuer := createTestJWT(t, "https://unknown-issuer.com", "user-x") - - // This should fail - no provider registered for this issuer - identity, provider, err := service.validateWebIdentityToken(ctx, tokenWithUnknownIssuer) - assert.Error(t, err) - assert.Nil(t, identity) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "no identity provider registered for issuer: https://unknown-issuer.com") - }) - - t.Run("non_jwt_tokens_are_rejected", func(t *testing.T) { - // Non-JWT tokens should be rejected - no fallback mechanism exists for security - identity, provider, err := service.validateWebIdentityToken(ctx, "token-for-provider-a") - assert.Error(t, err) - assert.Nil(t, identity) - assert.Nil(t, provider) - assert.Contains(t, err.Error(), "web identity token must be a valid JWT token") - }) -} - -// createTestJWT creates a test JWT token with the specified issuer and subject -func createTestJWT(t *testing.T, issuer, subject string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - }) - - tokenString, err := token.SignedString([]byte("test-signing-key")) - require.NoError(t, err) - return tokenString -} - -// MockIdentityProviderWithIssuer is a mock provider that supports issuer mapping -type MockIdentityProviderWithIssuer struct { - name string - issuer string - validTokens map[string]bool -} - -func (m *MockIdentityProviderWithIssuer) Name() string { - return m.name -} - -func (m *MockIdentityProviderWithIssuer) GetIssuer() string { - return m.issuer -} - -func (m *MockIdentityProviderWithIssuer) Initialize(config interface{}) error { - return nil -} - -func (m *MockIdentityProviderWithIssuer) Authenticate(ctx context.Context, token string) (*providers.ExternalIdentity, error) { - // For JWT tokens, parse and validate the token format - if len(token) > 50 && strings.Contains(token, ".") { - // This looks like a JWT - parse it to get the subject - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err != nil { - return nil, fmt.Errorf("invalid JWT token") - } - - claims, ok := parsedToken.Claims.(jwt.MapClaims) - if !ok { - return nil, fmt.Errorf("invalid claims") - } - - issuer, _ := claims["iss"].(string) - subject, _ := claims["sub"].(string) - - // Verify the issuer matches what we expect - if issuer != m.issuer { - return nil, fmt.Errorf("token issuer %s does not match provider issuer %s", issuer, m.issuer) - } - - return &providers.ExternalIdentity{ - UserID: subject, - Email: subject + "@" + m.name + ".com", - Provider: m.name, - }, nil - } - - // For non-JWT tokens, check our simple token list - if m.validTokens[token] { - return &providers.ExternalIdentity{ - UserID: "test-user", - Email: "test@" + m.name + ".com", - Provider: m.name, - }, nil - } - - return nil, fmt.Errorf("invalid token") -} - -func (m *MockIdentityProviderWithIssuer) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - return &providers.ExternalIdentity{ - UserID: userID, - Email: userID + "@" + m.name + ".com", - Provider: m.name, - }, nil -} - -func (m *MockIdentityProviderWithIssuer) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if m.validTokens[token] { - return &providers.TokenClaims{ - Subject: "test-user", - Issuer: m.issuer, - }, nil - } - return nil, fmt.Errorf("invalid token") -} diff --git a/weed/iam/sts/session_claims.go b/weed/iam/sts/session_claims.go deleted file mode 100644 index 8d065efcd..000000000 --- a/weed/iam/sts/session_claims.go +++ /dev/null @@ -1,154 +0,0 @@ -package sts - -import ( - "time" - - "github.com/golang-jwt/jwt/v5" -) - -// STSSessionClaims represents comprehensive session information embedded in JWT tokens -// This eliminates the need for separate session storage by embedding all session -// metadata directly in the token itself - enabling true stateless operation -type STSSessionClaims struct { - jwt.RegisteredClaims - - // Session identification - SessionId string `json:"sid"` // session_id (abbreviated for smaller tokens) - SessionName string `json:"snam"` // session_name (abbreviated for smaller tokens) - TokenType string `json:"typ"` // token_type - - // Role information - RoleArn string `json:"role"` // role_arn - AssumedRole string `json:"assumed"` // assumed_role_user - Principal string `json:"principal"` // principal_arn - - // Authorization data - Policies []string `json:"pol,omitempty"` // policies (abbreviated) - - // Identity provider information - IdentityProvider string `json:"idp"` // identity_provider - ExternalUserId string `json:"ext_uid"` // external_user_id - ProviderIssuer string `json:"prov_iss"` // provider_issuer - - // Request context (optional, for policy evaluation) - RequestContext map[string]interface{} `json:"req_ctx,omitempty"` - - // Session metadata - AssumedAt time.Time `json:"assumed_at"` // when role was assumed - MaxDuration int64 `json:"max_dur,omitempty"` // maximum session duration in seconds -} - -// NewSTSSessionClaims creates new STS session claims with all required information -func NewSTSSessionClaims(sessionId, issuer string, expiresAt time.Time) *STSSessionClaims { - now := time.Now() - return &STSSessionClaims{ - RegisteredClaims: jwt.RegisteredClaims{ - Issuer: issuer, - Subject: sessionId, - IssuedAt: jwt.NewNumericDate(now), - ExpiresAt: jwt.NewNumericDate(expiresAt), - NotBefore: jwt.NewNumericDate(now), - }, - SessionId: sessionId, - TokenType: TokenTypeSession, - AssumedAt: now, - } -} - -// ToSessionInfo converts JWT claims back to SessionInfo structure -// This enables seamless integration with existing code expecting SessionInfo -func (c *STSSessionClaims) ToSessionInfo() *SessionInfo { - var expiresAt time.Time - if c.ExpiresAt != nil { - expiresAt = c.ExpiresAt.Time - } - - return &SessionInfo{ - SessionId: c.SessionId, - SessionName: c.SessionName, - RoleArn: c.RoleArn, - AssumedRoleUser: c.AssumedRole, - Principal: c.Principal, - Policies: c.Policies, - ExpiresAt: expiresAt, - IdentityProvider: c.IdentityProvider, - ExternalUserId: c.ExternalUserId, - ProviderIssuer: c.ProviderIssuer, - RequestContext: c.RequestContext, - } -} - -// IsValid checks if the session claims are valid (not expired, etc.) -func (c *STSSessionClaims) IsValid() bool { - now := time.Now() - - // Check expiration - if c.ExpiresAt != nil && c.ExpiresAt.Before(now) { - return false - } - - // Check not-before - if c.NotBefore != nil && c.NotBefore.After(now) { - return false - } - - // Ensure required fields are present - if c.SessionId == "" || c.RoleArn == "" || c.Principal == "" { - return false - } - - return true -} - -// GetSessionId returns the session identifier -func (c *STSSessionClaims) GetSessionId() string { - return c.SessionId -} - -// GetExpiresAt returns the expiration time -func (c *STSSessionClaims) GetExpiresAt() time.Time { - if c.ExpiresAt != nil { - return c.ExpiresAt.Time - } - return time.Time{} -} - -// WithRoleInfo sets role-related information in the claims -func (c *STSSessionClaims) WithRoleInfo(roleArn, assumedRole, principal string) *STSSessionClaims { - c.RoleArn = roleArn - c.AssumedRole = assumedRole - c.Principal = principal - return c -} - -// WithPolicies sets the policies associated with this session -func (c *STSSessionClaims) WithPolicies(policies []string) *STSSessionClaims { - c.Policies = policies - return c -} - -// WithIdentityProvider sets identity provider information -func (c *STSSessionClaims) WithIdentityProvider(providerName, externalUserId, providerIssuer string) *STSSessionClaims { - c.IdentityProvider = providerName - c.ExternalUserId = externalUserId - c.ProviderIssuer = providerIssuer - return c -} - -// WithRequestContext sets request context for policy evaluation -func (c *STSSessionClaims) WithRequestContext(ctx map[string]interface{}) *STSSessionClaims { - c.RequestContext = ctx - return c -} - -// WithMaxDuration sets the maximum session duration -func (c *STSSessionClaims) WithMaxDuration(duration time.Duration) *STSSessionClaims { - c.MaxDuration = int64(duration.Seconds()) - return c -} - -// WithSessionName sets the session name -func (c *STSSessionClaims) WithSessionName(sessionName string) *STSSessionClaims { - c.SessionName = sessionName - return c -} diff --git a/weed/iam/sts/session_policy_test.go b/weed/iam/sts/session_policy_test.go deleted file mode 100644 index 6f94169ec..000000000 --- a/weed/iam/sts/session_policy_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package sts - -import ( - "context" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createSessionPolicyTestJWT creates a test JWT token for session policy tests -func createSessionPolicyTestJWT(t *testing.T, issuer, subject string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - }) - - tokenString, err := token.SignedString([]byte("test-signing-key")) - require.NoError(t, err) - return tokenString -} - -// TestAssumeRoleWithWebIdentity_SessionPolicy tests the handling of the Policy field -// in AssumeRoleWithWebIdentityRequest to ensure users are properly informed that -// session policies are not currently supported -func TestAssumeRoleWithWebIdentity_SessionPolicy(t *testing.T) { - service := setupTestSTSService(t) - - t.Run("should_reject_request_with_session_policy", func(t *testing.T) { - ctx := context.Background() - - // Create a request with a session policy - sessionPolicy := `{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::example-bucket/*" - }] - }` - - testToken := createSessionPolicyTestJWT(t, "test-issuer", "test-user") - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: testToken, - RoleSessionName: "test-session", - DurationSeconds: nil, // Use default - Policy: &sessionPolicy, // โ† Session policy provided - } - - // Should return an error indicating session policies are not supported - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - // Verify the error - assert.Error(t, err) - assert.Nil(t, response) - assert.Contains(t, err.Error(), "session policies are not currently supported") - assert.Contains(t, err.Error(), "Policy parameter must be omitted") - }) - - t.Run("should_succeed_without_session_policy", func(t *testing.T) { - ctx := context.Background() - testToken := createSessionPolicyTestJWT(t, "test-issuer", "test-user") - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: testToken, - RoleSessionName: "test-session", - DurationSeconds: nil, // Use default - Policy: nil, // โ† No session policy - } - - // Should succeed without session policy - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - // Verify success - require.NoError(t, err) - require.NotNil(t, response) - assert.NotNil(t, response.Credentials) - assert.NotEmpty(t, response.Credentials.AccessKeyId) - assert.NotEmpty(t, response.Credentials.SecretAccessKey) - assert.NotEmpty(t, response.Credentials.SessionToken) - }) - - t.Run("should_succeed_with_empty_policy_pointer", func(t *testing.T) { - ctx := context.Background() - testToken := createSessionPolicyTestJWT(t, "test-issuer", "test-user") - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: testToken, - RoleSessionName: "test-session", - Policy: nil, // โ† Explicitly nil - } - - // Should succeed with nil policy pointer - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - require.NoError(t, err) - require.NotNil(t, response) - assert.NotNil(t, response.Credentials) - }) - - t.Run("should_reject_empty_string_policy", func(t *testing.T) { - ctx := context.Background() - - emptyPolicy := "" // Empty string, but still a non-nil pointer - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: createSessionPolicyTestJWT(t, "test-issuer", "test-user"), - RoleSessionName: "test-session", - Policy: &emptyPolicy, // โ† Non-nil pointer to empty string - } - - // Should still reject because pointer is not nil - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - assert.Error(t, err) - assert.Nil(t, response) - assert.Contains(t, err.Error(), "session policies are not currently supported") - }) -} - -// TestAssumeRoleWithWebIdentity_SessionPolicy_ErrorMessage tests that the error message -// is clear and helps users understand what they need to do -func TestAssumeRoleWithWebIdentity_SessionPolicy_ErrorMessage(t *testing.T) { - service := setupTestSTSService(t) - - ctx := context.Background() - complexPolicy := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowS3Access", - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::my-bucket/*", - "arn:aws:s3:::my-bucket" - ], - "Condition": { - "StringEquals": { - "s3:prefix": ["documents/", "images/"] - } - } - } - ] - }` - - testToken := createSessionPolicyTestJWT(t, "test-issuer", "test-user") - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: testToken, - RoleSessionName: "test-session-with-complex-policy", - Policy: &complexPolicy, - } - - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - // Verify error details - require.Error(t, err) - assert.Nil(t, response) - - errorMsg := err.Error() - - // The error should be clear and actionable - assert.Contains(t, errorMsg, "session policies are not currently supported", - "Error should explain that session policies aren't supported") - assert.Contains(t, errorMsg, "Policy parameter must be omitted", - "Error should specify what action the user needs to take") - - // Should NOT contain internal implementation details - assert.NotContains(t, errorMsg, "nil pointer", - "Error should not expose internal implementation details") - assert.NotContains(t, errorMsg, "struct field", - "Error should not expose internal struct details") -} - -// Test edge case scenarios for the Policy field handling -func TestAssumeRoleWithWebIdentity_SessionPolicy_EdgeCases(t *testing.T) { - service := setupTestSTSService(t) - - t.Run("malformed_json_policy_still_rejected", func(t *testing.T) { - ctx := context.Background() - malformedPolicy := `{"Version": "2012-10-17", "Statement": [` // Incomplete JSON - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: createSessionPolicyTestJWT(t, "test-issuer", "test-user"), - RoleSessionName: "test-session", - Policy: &malformedPolicy, - } - - // Should reject before even parsing the policy JSON - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - assert.Error(t, err) - assert.Nil(t, response) - assert.Contains(t, err.Error(), "session policies are not currently supported") - }) - - t.Run("policy_with_whitespace_still_rejected", func(t *testing.T) { - ctx := context.Background() - whitespacePolicy := " \t\n " // Only whitespace - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: createSessionPolicyTestJWT(t, "test-issuer", "test-user"), - RoleSessionName: "test-session", - Policy: &whitespacePolicy, - } - - // Should reject any non-nil policy, even whitespace - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - assert.Error(t, err) - assert.Nil(t, response) - assert.Contains(t, err.Error(), "session policies are not currently supported") - }) -} - -// TestAssumeRoleWithWebIdentity_PolicyFieldDocumentation verifies that the struct -// field is properly documented to help developers understand the limitation -func TestAssumeRoleWithWebIdentity_PolicyFieldDocumentation(t *testing.T) { - // This test documents the current behavior and ensures the struct field - // exists with proper typing - request := &AssumeRoleWithWebIdentityRequest{} - - // Verify the Policy field exists and has the correct type - assert.IsType(t, (*string)(nil), request.Policy, - "Policy field should be *string type for optional JSON policy") - - // Verify initial value is nil (no policy by default) - assert.Nil(t, request.Policy, - "Policy field should default to nil (no session policy)") - - // Test that we can set it to a string pointer (even though it will be rejected) - policyValue := `{"Version": "2012-10-17"}` - request.Policy = &policyValue - assert.NotNil(t, request.Policy, "Should be able to assign policy value") - assert.Equal(t, policyValue, *request.Policy, "Policy value should be preserved") -} - -// TestAssumeRoleWithCredentials_NoSessionPolicySupport verifies that -// AssumeRoleWithCredentialsRequest doesn't have a Policy field, which is correct -// since credential-based role assumption typically doesn't support session policies -func TestAssumeRoleWithCredentials_NoSessionPolicySupport(t *testing.T) { - // Verify that AssumeRoleWithCredentialsRequest doesn't have a Policy field - // This is the expected behavior since session policies are typically only - // supported with web identity (OIDC/SAML) flows in AWS STS - request := &AssumeRoleWithCredentialsRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - Username: "testuser", - Password: "testpass", - RoleSessionName: "test-session", - ProviderName: "ldap", - } - - // The struct should compile and work without a Policy field - assert.NotNil(t, request) - assert.Equal(t, "arn:seaweed:iam::role/TestRole", request.RoleArn) - assert.Equal(t, "testuser", request.Username) - - // This documents that credential-based assume role does NOT support session policies - // which matches AWS STS behavior where session policies are primarily for - // web identity (OIDC/SAML) and federation scenarios -} diff --git a/weed/iam/sts/sts_service.go b/weed/iam/sts/sts_service.go deleted file mode 100644 index 7305adb4b..000000000 --- a/weed/iam/sts/sts_service.go +++ /dev/null @@ -1,826 +0,0 @@ -package sts - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/seaweedfs/seaweedfs/weed/iam/utils" -) - -// TrustPolicyValidator interface for validating trust policies during role assumption -type TrustPolicyValidator interface { - // ValidateTrustPolicyForWebIdentity validates if a web identity token can assume a role - ValidateTrustPolicyForWebIdentity(ctx context.Context, roleArn string, webIdentityToken string) error - - // ValidateTrustPolicyForCredentials validates if credentials can assume a role - ValidateTrustPolicyForCredentials(ctx context.Context, roleArn string, identity *providers.ExternalIdentity) error -} - -// FlexibleDuration wraps time.Duration to support both integer nanoseconds and duration strings in JSON -type FlexibleDuration struct { - time.Duration -} - -// UnmarshalJSON implements JSON unmarshaling for FlexibleDuration -// Supports both: 3600000000000 (nanoseconds) and "1h" (duration string) -func (fd *FlexibleDuration) UnmarshalJSON(data []byte) error { - // Try to unmarshal as a duration string first (e.g., "1h", "30m") - var durationStr string - if err := json.Unmarshal(data, &durationStr); err == nil { - duration, parseErr := time.ParseDuration(durationStr) - if parseErr != nil { - return fmt.Errorf("invalid duration string %q: %w", durationStr, parseErr) - } - fd.Duration = duration - return nil - } - - // If that fails, try to unmarshal as an integer (nanoseconds for backward compatibility) - var nanoseconds int64 - if err := json.Unmarshal(data, &nanoseconds); err == nil { - fd.Duration = time.Duration(nanoseconds) - return nil - } - - // If both fail, try unmarshaling as a quoted number string (edge case) - var numberStr string - if err := json.Unmarshal(data, &numberStr); err == nil { - if nanoseconds, parseErr := strconv.ParseInt(numberStr, 10, 64); parseErr == nil { - fd.Duration = time.Duration(nanoseconds) - return nil - } - } - - return fmt.Errorf("unable to parse duration from %s (expected duration string like \"1h\" or integer nanoseconds)", data) -} - -// MarshalJSON implements JSON marshaling for FlexibleDuration -// Always marshals as a human-readable duration string -func (fd FlexibleDuration) MarshalJSON() ([]byte, error) { - return json.Marshal(fd.Duration.String()) -} - -// STSService provides Security Token Service functionality -// This service is now completely stateless - all session information is embedded -// in JWT tokens, eliminating the need for session storage and enabling true -// distributed operation without shared state -type STSService struct { - Config *STSConfig // Public for access by other components - initialized bool - providers map[string]providers.IdentityProvider - issuerToProvider map[string]providers.IdentityProvider // Efficient issuer-based provider lookup - tokenGenerator *TokenGenerator - trustPolicyValidator TrustPolicyValidator // Interface for trust policy validation -} - -// STSConfig holds STS service configuration -type STSConfig struct { - // TokenDuration is the default duration for issued tokens - TokenDuration FlexibleDuration `json:"tokenDuration"` - - // MaxSessionLength is the maximum duration for any session - MaxSessionLength FlexibleDuration `json:"maxSessionLength"` - - // Issuer is the STS issuer identifier - Issuer string `json:"issuer"` - - // SigningKey is used to sign session tokens - SigningKey []byte `json:"signingKey"` - - // Providers configuration - enables automatic provider loading - Providers []*ProviderConfig `json:"providers,omitempty"` -} - -// ProviderConfig holds identity provider configuration -type ProviderConfig struct { - // Name is the unique identifier for the provider - Name string `json:"name"` - - // Type specifies the provider type (oidc, ldap, etc.) - Type string `json:"type"` - - // Config contains provider-specific configuration - Config map[string]interface{} `json:"config"` - - // Enabled indicates if this provider should be active - Enabled bool `json:"enabled"` -} - -// AssumeRoleWithWebIdentityRequest represents a request to assume role with web identity -type AssumeRoleWithWebIdentityRequest struct { - // RoleArn is the ARN of the role to assume - RoleArn string `json:"RoleArn"` - - // WebIdentityToken is the OIDC token from the identity provider - WebIdentityToken string `json:"WebIdentityToken"` - - // RoleSessionName is a name for the assumed role session - RoleSessionName string `json:"RoleSessionName"` - - // DurationSeconds is the duration of the role session (optional) - DurationSeconds *int64 `json:"DurationSeconds,omitempty"` - - // Policy is an optional session policy (optional) - Policy *string `json:"Policy,omitempty"` -} - -// AssumeRoleWithCredentialsRequest represents a request to assume role with username/password -type AssumeRoleWithCredentialsRequest struct { - // RoleArn is the ARN of the role to assume - RoleArn string `json:"RoleArn"` - - // Username is the username for authentication - Username string `json:"Username"` - - // Password is the password for authentication - Password string `json:"Password"` - - // RoleSessionName is a name for the assumed role session - RoleSessionName string `json:"RoleSessionName"` - - // ProviderName is the name of the identity provider to use - ProviderName string `json:"ProviderName"` - - // DurationSeconds is the duration of the role session (optional) - DurationSeconds *int64 `json:"DurationSeconds,omitempty"` -} - -// AssumeRoleResponse represents the response from assume role operations -type AssumeRoleResponse struct { - // Credentials contains the temporary security credentials - Credentials *Credentials `json:"Credentials"` - - // AssumedRoleUser contains information about the assumed role user - AssumedRoleUser *AssumedRoleUser `json:"AssumedRoleUser"` - - // PackedPolicySize is the percentage of max policy size used (AWS compatibility) - PackedPolicySize *int64 `json:"PackedPolicySize,omitempty"` -} - -// Credentials represents temporary security credentials -type Credentials struct { - // AccessKeyId is the access key ID - AccessKeyId string `json:"AccessKeyId"` - - // SecretAccessKey is the secret access key - SecretAccessKey string `json:"SecretAccessKey"` - - // SessionToken is the session token - SessionToken string `json:"SessionToken"` - - // Expiration is when the credentials expire - Expiration time.Time `json:"Expiration"` -} - -// AssumedRoleUser contains information about the assumed role user -type AssumedRoleUser struct { - // AssumedRoleId is the unique identifier of the assumed role - AssumedRoleId string `json:"AssumedRoleId"` - - // Arn is the ARN of the assumed role user - Arn string `json:"Arn"` - - // Subject is the subject identifier from the identity provider - Subject string `json:"Subject,omitempty"` -} - -// SessionInfo represents information about an active session -type SessionInfo struct { - // SessionId is the unique identifier for the session - SessionId string `json:"sessionId"` - - // SessionName is the name of the role session - SessionName string `json:"sessionName"` - - // RoleArn is the ARN of the assumed role - RoleArn string `json:"roleArn"` - - // AssumedRoleUser contains information about the assumed role user - AssumedRoleUser string `json:"assumedRoleUser"` - - // Principal is the principal ARN - Principal string `json:"principal"` - - // Subject is the subject identifier from the identity provider - Subject string `json:"subject"` - - // Provider is the identity provider used (legacy field) - Provider string `json:"provider"` - - // IdentityProvider is the identity provider used - IdentityProvider string `json:"identityProvider"` - - // ExternalUserId is the external user identifier from the provider - ExternalUserId string `json:"externalUserId"` - - // ProviderIssuer is the issuer from the identity provider - ProviderIssuer string `json:"providerIssuer"` - - // Policies are the policies associated with this session - Policies []string `json:"policies"` - - // RequestContext contains additional request context for policy evaluation - RequestContext map[string]interface{} `json:"requestContext,omitempty"` - - // CreatedAt is when the session was created - CreatedAt time.Time `json:"createdAt"` - - // ExpiresAt is when the session expires - ExpiresAt time.Time `json:"expiresAt"` - - // Credentials are the temporary credentials for this session - Credentials *Credentials `json:"credentials"` -} - -// NewSTSService creates a new STS service -func NewSTSService() *STSService { - return &STSService{ - providers: make(map[string]providers.IdentityProvider), - issuerToProvider: make(map[string]providers.IdentityProvider), - } -} - -// Initialize initializes the STS service with configuration -func (s *STSService) Initialize(config *STSConfig) error { - if config == nil { - return fmt.Errorf(ErrConfigCannotBeNil) - } - - if err := s.validateConfig(config); err != nil { - return fmt.Errorf("invalid STS configuration: %w", err) - } - - s.Config = config - - // Initialize token generator for stateless JWT operations - s.tokenGenerator = NewTokenGenerator(config.SigningKey, config.Issuer) - - // Load identity providers from configuration - if err := s.loadProvidersFromConfig(config); err != nil { - return fmt.Errorf("failed to load identity providers: %w", err) - } - - s.initialized = true - return nil -} - -// validateConfig validates the STS configuration -func (s *STSService) validateConfig(config *STSConfig) error { - if config.TokenDuration.Duration <= 0 { - return fmt.Errorf(ErrInvalidTokenDuration) - } - - if config.MaxSessionLength.Duration <= 0 { - return fmt.Errorf(ErrInvalidMaxSessionLength) - } - - if config.Issuer == "" { - return fmt.Errorf(ErrIssuerRequired) - } - - if len(config.SigningKey) < MinSigningKeyLength { - return fmt.Errorf(ErrSigningKeyTooShort, MinSigningKeyLength) - } - - return nil -} - -// loadProvidersFromConfig loads identity providers from configuration -func (s *STSService) loadProvidersFromConfig(config *STSConfig) error { - if len(config.Providers) == 0 { - glog.V(2).Infof("No providers configured in STS config") - return nil - } - - factory := NewProviderFactory() - - // Load all providers from configuration - providersMap, err := factory.LoadProvidersFromConfig(config.Providers) - if err != nil { - return fmt.Errorf("failed to load providers from config: %w", err) - } - - // Replace current providers with new ones - s.providers = providersMap - - // Also populate the issuerToProvider map for efficient and secure JWT validation - s.issuerToProvider = make(map[string]providers.IdentityProvider) - for name, provider := range s.providers { - issuer := s.extractIssuerFromProvider(provider) - if issuer != "" { - if _, exists := s.issuerToProvider[issuer]; exists { - glog.Warningf("Duplicate issuer %s found for provider %s. Overwriting.", issuer, name) - } - s.issuerToProvider[issuer] = provider - glog.V(2).Infof("Registered provider %s with issuer %s for efficient lookup", name, issuer) - } - } - - glog.V(1).Infof("Successfully loaded %d identity providers: %v", - len(s.providers), s.getProviderNames()) - - return nil -} - -// getProviderNames returns list of loaded provider names -func (s *STSService) getProviderNames() []string { - names := make([]string, 0, len(s.providers)) - for name := range s.providers { - names = append(names, name) - } - return names -} - -// IsInitialized returns whether the service is initialized -func (s *STSService) IsInitialized() bool { - return s.initialized -} - -// RegisterProvider registers an identity provider -func (s *STSService) RegisterProvider(provider providers.IdentityProvider) error { - if provider == nil { - return fmt.Errorf(ErrProviderCannotBeNil) - } - - name := provider.Name() - if name == "" { - return fmt.Errorf(ErrProviderNameEmpty) - } - - s.providers[name] = provider - - // Try to extract issuer information for efficient lookup - // This is a best-effort approach for different provider types - issuer := s.extractIssuerFromProvider(provider) - if issuer != "" { - s.issuerToProvider[issuer] = provider - glog.V(2).Infof("Registered provider %s with issuer %s for efficient lookup", name, issuer) - } - - return nil -} - -// extractIssuerFromProvider attempts to extract issuer information from different provider types -func (s *STSService) extractIssuerFromProvider(provider providers.IdentityProvider) string { - // Handle different provider types - switch p := provider.(type) { - case interface{ GetIssuer() string }: - // For providers that implement GetIssuer() method - return p.GetIssuer() - default: - // For other provider types, we'll rely on JWT parsing during validation - // This is still more efficient than the current brute-force approach - return "" - } -} - -// GetProviders returns all registered identity providers -func (s *STSService) GetProviders() map[string]providers.IdentityProvider { - return s.providers -} - -// SetTrustPolicyValidator sets the trust policy validator for role assumption validation -func (s *STSService) SetTrustPolicyValidator(validator TrustPolicyValidator) { - s.trustPolicyValidator = validator -} - -// AssumeRoleWithWebIdentity assumes a role using a web identity token (OIDC) -// This method is now completely stateless - all session information is embedded in the JWT token -func (s *STSService) AssumeRoleWithWebIdentity(ctx context.Context, request *AssumeRoleWithWebIdentityRequest) (*AssumeRoleResponse, error) { - if !s.initialized { - return nil, fmt.Errorf(ErrSTSServiceNotInitialized) - } - - if request == nil { - return nil, fmt.Errorf("request cannot be nil") - } - - // Validate request parameters - if err := s.validateAssumeRoleWithWebIdentityRequest(request); err != nil { - return nil, fmt.Errorf("invalid request: %w", err) - } - - // Check for unsupported session policy - if request.Policy != nil { - return nil, fmt.Errorf("session policies are not currently supported - Policy parameter must be omitted") - } - - // 1. Validate the web identity token with appropriate provider - externalIdentity, provider, err := s.validateWebIdentityToken(ctx, request.WebIdentityToken) - if err != nil { - return nil, fmt.Errorf("failed to validate web identity token: %w", err) - } - - // 2. Check if the role exists and can be assumed (includes trust policy validation) - if err := s.validateRoleAssumptionForWebIdentity(ctx, request.RoleArn, request.WebIdentityToken); err != nil { - return nil, fmt.Errorf("role assumption denied: %w", err) - } - - // 3. Calculate session duration - sessionDuration := s.calculateSessionDuration(request.DurationSeconds) - expiresAt := time.Now().Add(sessionDuration) - - // 4. Generate session ID and credentials - sessionId, err := GenerateSessionId() - if err != nil { - return nil, fmt.Errorf("failed to generate session ID: %w", err) - } - - credGenerator := NewCredentialGenerator() - credentials, err := credGenerator.GenerateTemporaryCredentials(sessionId, expiresAt) - if err != nil { - return nil, fmt.Errorf("failed to generate credentials: %w", err) - } - - // 5. Create comprehensive JWT session token with all session information embedded - assumedRoleUser := &AssumedRoleUser{ - AssumedRoleId: request.RoleArn, - Arn: GenerateAssumedRoleArn(request.RoleArn, request.RoleSessionName), - Subject: externalIdentity.UserID, - } - - // Create rich JWT claims with all session information - sessionClaims := NewSTSSessionClaims(sessionId, s.Config.Issuer, expiresAt). - WithSessionName(request.RoleSessionName). - WithRoleInfo(request.RoleArn, assumedRoleUser.Arn, assumedRoleUser.Arn). - WithIdentityProvider(provider.Name(), externalIdentity.UserID, ""). - WithMaxDuration(sessionDuration) - - // Generate self-contained JWT token with all session information - jwtToken, err := s.tokenGenerator.GenerateJWTWithClaims(sessionClaims) - if err != nil { - return nil, fmt.Errorf("failed to generate JWT session token: %w", err) - } - credentials.SessionToken = jwtToken - - // 6. Build and return response (no session storage needed!) - - return &AssumeRoleResponse{ - Credentials: credentials, - AssumedRoleUser: assumedRoleUser, - }, nil -} - -// AssumeRoleWithCredentials assumes a role using username/password credentials -// This method is now completely stateless - all session information is embedded in the JWT token -func (s *STSService) AssumeRoleWithCredentials(ctx context.Context, request *AssumeRoleWithCredentialsRequest) (*AssumeRoleResponse, error) { - if !s.initialized { - return nil, fmt.Errorf("STS service not initialized") - } - - if request == nil { - return nil, fmt.Errorf("request cannot be nil") - } - - // Validate request parameters - if err := s.validateAssumeRoleWithCredentialsRequest(request); err != nil { - return nil, fmt.Errorf("invalid request: %w", err) - } - - // 1. Get the specified provider - provider, exists := s.providers[request.ProviderName] - if !exists { - return nil, fmt.Errorf("identity provider not found: %s", request.ProviderName) - } - - // 2. Validate credentials with the specified provider - credentials := request.Username + ":" + request.Password - externalIdentity, err := provider.Authenticate(ctx, credentials) - if err != nil { - return nil, fmt.Errorf("failed to authenticate credentials: %w", err) - } - - // 3. Check if the role exists and can be assumed (includes trust policy validation) - if err := s.validateRoleAssumptionForCredentials(ctx, request.RoleArn, externalIdentity); err != nil { - return nil, fmt.Errorf("role assumption denied: %w", err) - } - - // 4. Calculate session duration - sessionDuration := s.calculateSessionDuration(request.DurationSeconds) - expiresAt := time.Now().Add(sessionDuration) - - // 5. Generate session ID and temporary credentials - sessionId, err := GenerateSessionId() - if err != nil { - return nil, fmt.Errorf("failed to generate session ID: %w", err) - } - - credGenerator := NewCredentialGenerator() - tempCredentials, err := credGenerator.GenerateTemporaryCredentials(sessionId, expiresAt) - if err != nil { - return nil, fmt.Errorf("failed to generate credentials: %w", err) - } - - // 6. Create comprehensive JWT session token with all session information embedded - assumedRoleUser := &AssumedRoleUser{ - AssumedRoleId: request.RoleArn, - Arn: GenerateAssumedRoleArn(request.RoleArn, request.RoleSessionName), - Subject: externalIdentity.UserID, - } - - // Create rich JWT claims with all session information - sessionClaims := NewSTSSessionClaims(sessionId, s.Config.Issuer, expiresAt). - WithSessionName(request.RoleSessionName). - WithRoleInfo(request.RoleArn, assumedRoleUser.Arn, assumedRoleUser.Arn). - WithIdentityProvider(provider.Name(), externalIdentity.UserID, ""). - WithMaxDuration(sessionDuration) - - // Generate self-contained JWT token with all session information - jwtToken, err := s.tokenGenerator.GenerateJWTWithClaims(sessionClaims) - if err != nil { - return nil, fmt.Errorf("failed to generate JWT session token: %w", err) - } - tempCredentials.SessionToken = jwtToken - - // 7. Build and return response (no session storage needed!) - - return &AssumeRoleResponse{ - Credentials: tempCredentials, - AssumedRoleUser: assumedRoleUser, - }, nil -} - -// ValidateSessionToken validates a session token and returns session information -// This method is now completely stateless - all session information is extracted from the JWT token -func (s *STSService) ValidateSessionToken(ctx context.Context, sessionToken string) (*SessionInfo, error) { - if !s.initialized { - return nil, fmt.Errorf(ErrSTSServiceNotInitialized) - } - - if sessionToken == "" { - return nil, fmt.Errorf(ErrSessionTokenCannotBeEmpty) - } - - // Validate JWT and extract comprehensive session claims - claims, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken) - if err != nil { - return nil, fmt.Errorf(ErrSessionValidationFailed, err) - } - - // Convert JWT claims back to SessionInfo - // All session information is embedded in the JWT token itself - return claims.ToSessionInfo(), nil -} - -// NOTE: Session revocation is not supported in the stateless JWT design. -// -// In a stateless JWT system, tokens cannot be revoked without implementing a token blacklist, -// which would break the stateless architecture. Tokens remain valid until their natural -// expiration time. -// -// For applications requiring token revocation, consider: -// 1. Using shorter token lifespans (e.g., 15-30 minutes) -// 2. Implementing a distributed token blacklist (breaks stateless design) -// 3. Including a "jti" (JWT ID) claim for tracking specific tokens -// -// Use ValidateSessionToken() to verify if a token is valid and not expired. - -// Helper methods for AssumeRoleWithWebIdentity - -// validateAssumeRoleWithWebIdentityRequest validates the request parameters -func (s *STSService) validateAssumeRoleWithWebIdentityRequest(request *AssumeRoleWithWebIdentityRequest) error { - if request.RoleArn == "" { - return fmt.Errorf("RoleArn is required") - } - - if request.WebIdentityToken == "" { - return fmt.Errorf("WebIdentityToken is required") - } - - if request.RoleSessionName == "" { - return fmt.Errorf("RoleSessionName is required") - } - - // Validate session duration if provided - if request.DurationSeconds != nil { - if *request.DurationSeconds < 900 || *request.DurationSeconds > 43200 { // 15min to 12 hours - return fmt.Errorf("DurationSeconds must be between 900 and 43200 seconds") - } - } - - return nil -} - -// validateWebIdentityToken validates the web identity token with strict issuer-to-provider mapping -// SECURITY: JWT tokens with a specific issuer claim MUST only be validated by the provider for that issuer -// SECURITY: This method only accepts JWT tokens. Non-JWT authentication must use AssumeRoleWithCredentials with explicit ProviderName. -func (s *STSService) validateWebIdentityToken(ctx context.Context, token string) (*providers.ExternalIdentity, providers.IdentityProvider, error) { - // Try to extract issuer from JWT token for strict validation - issuer, err := s.extractIssuerFromJWT(token) - if err != nil { - // Token is not a valid JWT or cannot be parsed - // SECURITY: Web identity tokens MUST be JWT tokens. Non-JWT authentication flows - // should use AssumeRoleWithCredentials with explicit ProviderName to prevent - // security vulnerabilities from non-deterministic provider selection. - return nil, nil, fmt.Errorf("web identity token must be a valid JWT token: %w", err) - } - - // Look up the specific provider for this issuer - provider, exists := s.issuerToProvider[issuer] - if !exists { - // SECURITY: If no provider is registered for this issuer, fail immediately - // This prevents JWT tokens from being validated by unintended providers - return nil, nil, fmt.Errorf("no identity provider registered for issuer: %s", issuer) - } - - // Authenticate with the correct provider for this issuer - identity, err := provider.Authenticate(ctx, token) - if err != nil { - return nil, nil, fmt.Errorf("token validation failed with provider for issuer %s: %w", issuer, err) - } - - if identity == nil { - return nil, nil, fmt.Errorf("authentication succeeded but no identity returned for issuer %s", issuer) - } - - return identity, provider, nil -} - -// ValidateWebIdentityToken is a public method that exposes secure token validation for external use -// This method uses issuer-based lookup to select the correct provider, ensuring security and efficiency -func (s *STSService) ValidateWebIdentityToken(ctx context.Context, token string) (*providers.ExternalIdentity, providers.IdentityProvider, error) { - return s.validateWebIdentityToken(ctx, token) -} - -// extractIssuerFromJWT extracts the issuer (iss) claim from a JWT token without verification -func (s *STSService) extractIssuerFromJWT(token string) (string, error) { - // Parse token without verification to get claims - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err != nil { - return "", fmt.Errorf("failed to parse JWT token: %v", err) - } - - // Extract claims - claims, ok := parsedToken.Claims.(jwt.MapClaims) - if !ok { - return "", fmt.Errorf("invalid token claims") - } - - // Get issuer claim - issuer, ok := claims["iss"].(string) - if !ok || issuer == "" { - return "", fmt.Errorf("missing or invalid issuer claim") - } - - return issuer, nil -} - -// validateRoleAssumptionForWebIdentity validates role assumption for web identity tokens -// This method performs complete trust policy validation to prevent unauthorized role assumptions -func (s *STSService) validateRoleAssumptionForWebIdentity(ctx context.Context, roleArn string, webIdentityToken string) error { - if roleArn == "" { - return fmt.Errorf("role ARN cannot be empty") - } - - if webIdentityToken == "" { - return fmt.Errorf("web identity token cannot be empty") - } - - // Basic role ARN format validation - expectedPrefix := "arn:seaweed:iam::role/" - if len(roleArn) < len(expectedPrefix) || roleArn[:len(expectedPrefix)] != expectedPrefix { - return fmt.Errorf("invalid role ARN format: got %s, expected format: %s*", roleArn, expectedPrefix) - } - - // Extract role name and validate ARN format - roleName := utils.ExtractRoleNameFromArn(roleArn) - if roleName == "" { - return fmt.Errorf("invalid role ARN format: %s", roleArn) - } - - // CRITICAL SECURITY: Perform trust policy validation - if s.trustPolicyValidator != nil { - if err := s.trustPolicyValidator.ValidateTrustPolicyForWebIdentity(ctx, roleArn, webIdentityToken); err != nil { - return fmt.Errorf("trust policy validation failed: %w", err) - } - } else { - // If no trust policy validator is configured, fail closed for security - glog.Errorf("SECURITY WARNING: No trust policy validator configured - denying role assumption for security") - return fmt.Errorf("trust policy validation not available - role assumption denied for security") - } - - return nil -} - -// validateRoleAssumptionForCredentials validates role assumption for credential-based authentication -// This method performs complete trust policy validation to prevent unauthorized role assumptions -func (s *STSService) validateRoleAssumptionForCredentials(ctx context.Context, roleArn string, identity *providers.ExternalIdentity) error { - if roleArn == "" { - return fmt.Errorf("role ARN cannot be empty") - } - - if identity == nil { - return fmt.Errorf("identity cannot be nil") - } - - // Basic role ARN format validation - expectedPrefix := "arn:seaweed:iam::role/" - if len(roleArn) < len(expectedPrefix) || roleArn[:len(expectedPrefix)] != expectedPrefix { - return fmt.Errorf("invalid role ARN format: got %s, expected format: %s*", roleArn, expectedPrefix) - } - - // Extract role name and validate ARN format - roleName := utils.ExtractRoleNameFromArn(roleArn) - if roleName == "" { - return fmt.Errorf("invalid role ARN format: %s", roleArn) - } - - // CRITICAL SECURITY: Perform trust policy validation - if s.trustPolicyValidator != nil { - if err := s.trustPolicyValidator.ValidateTrustPolicyForCredentials(ctx, roleArn, identity); err != nil { - return fmt.Errorf("trust policy validation failed: %w", err) - } - } else { - // If no trust policy validator is configured, fail closed for security - glog.Errorf("SECURITY WARNING: No trust policy validator configured - denying role assumption for security") - return fmt.Errorf("trust policy validation not available - role assumption denied for security") - } - - return nil -} - -// calculateSessionDuration calculates the session duration -func (s *STSService) calculateSessionDuration(durationSeconds *int64) time.Duration { - if durationSeconds != nil { - return time.Duration(*durationSeconds) * time.Second - } - - // Use default from config - return s.Config.TokenDuration.Duration -} - -// extractSessionIdFromToken extracts session ID from JWT session token -func (s *STSService) extractSessionIdFromToken(sessionToken string) string { - // Parse JWT and extract session ID from claims - claims, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken) - if err != nil { - // For test compatibility, also handle direct session IDs - if len(sessionToken) == 32 { // Typical session ID length - return sessionToken - } - return "" - } - - return claims.SessionId -} - -// validateAssumeRoleWithCredentialsRequest validates the credentials request parameters -func (s *STSService) validateAssumeRoleWithCredentialsRequest(request *AssumeRoleWithCredentialsRequest) error { - if request.RoleArn == "" { - return fmt.Errorf("RoleArn is required") - } - - if request.Username == "" { - return fmt.Errorf("Username is required") - } - - if request.Password == "" { - return fmt.Errorf("Password is required") - } - - if request.RoleSessionName == "" { - return fmt.Errorf("RoleSessionName is required") - } - - if request.ProviderName == "" { - return fmt.Errorf("ProviderName is required") - } - - // Validate session duration if provided - if request.DurationSeconds != nil { - if *request.DurationSeconds < 900 || *request.DurationSeconds > 43200 { // 15min to 12 hours - return fmt.Errorf("DurationSeconds must be between 900 and 43200 seconds") - } - } - - return nil -} - -// ExpireSessionForTesting manually expires a session for testing purposes -func (s *STSService) ExpireSessionForTesting(ctx context.Context, sessionToken string) error { - if !s.initialized { - return fmt.Errorf("STS service not initialized") - } - - if sessionToken == "" { - return fmt.Errorf("session token cannot be empty") - } - - // Validate JWT token format - _, err := s.tokenGenerator.ValidateJWTWithClaims(sessionToken) - if err != nil { - return fmt.Errorf("invalid session token format: %w", err) - } - - // In a stateless system, we cannot manually expire JWT tokens - // The token expiration is embedded in the token itself and handled by JWT validation - glog.V(1).Infof("Manual session expiration requested for stateless token - cannot expire JWT tokens manually") - - return fmt.Errorf("manual session expiration not supported in stateless JWT system") -} diff --git a/weed/iam/sts/sts_service_test.go b/weed/iam/sts/sts_service_test.go deleted file mode 100644 index 60d78118f..000000000 --- a/weed/iam/sts/sts_service_test.go +++ /dev/null @@ -1,453 +0,0 @@ -package sts - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createSTSTestJWT creates a test JWT token for STS service tests -func createSTSTestJWT(t *testing.T, issuer, subject string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - }) - - tokenString, err := token.SignedString([]byte("test-signing-key")) - require.NoError(t, err) - return tokenString -} - -// TestSTSServiceInitialization tests STS service initialization -func TestSTSServiceInitialization(t *testing.T) { - tests := []struct { - name string - config *STSConfig - wantErr bool - }{ - { - name: "valid config", - config: &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{time.Hour * 12}, - Issuer: "seaweedfs-sts", - SigningKey: []byte("test-signing-key"), - }, - wantErr: false, - }, - { - name: "missing signing key", - config: &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - Issuer: "seaweedfs-sts", - }, - wantErr: true, - }, - { - name: "invalid token duration", - config: &STSConfig{ - TokenDuration: FlexibleDuration{-time.Hour}, - Issuer: "seaweedfs-sts", - SigningKey: []byte("test-key"), - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - service := NewSTSService() - - err := service.Initialize(tt.config) - - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.True(t, service.IsInitialized()) - } - }) - } -} - -// TestAssumeRoleWithWebIdentity tests role assumption with OIDC tokens -func TestAssumeRoleWithWebIdentity(t *testing.T) { - service := setupTestSTSService(t) - - tests := []struct { - name string - roleArn string - webIdentityToken string - sessionName string - durationSeconds *int64 - wantErr bool - expectedSubject string - }{ - { - name: "successful role assumption", - roleArn: "arn:seaweed:iam::role/TestRole", - webIdentityToken: createSTSTestJWT(t, "test-issuer", "test-user-id"), - sessionName: "test-session", - durationSeconds: nil, // Use default - wantErr: false, - expectedSubject: "test-user-id", - }, - { - name: "invalid web identity token", - roleArn: "arn:seaweed:iam::role/TestRole", - webIdentityToken: "invalid-token", - sessionName: "test-session", - wantErr: true, - }, - { - name: "non-existent role", - roleArn: "arn:seaweed:iam::role/NonExistentRole", - webIdentityToken: createSTSTestJWT(t, "test-issuer", "test-user"), - sessionName: "test-session", - wantErr: true, - }, - { - name: "custom session duration", - roleArn: "arn:seaweed:iam::role/TestRole", - webIdentityToken: createSTSTestJWT(t, "test-issuer", "test-user"), - sessionName: "test-session", - durationSeconds: int64Ptr(7200), // 2 hours - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: tt.roleArn, - WebIdentityToken: tt.webIdentityToken, - RoleSessionName: tt.sessionName, - DurationSeconds: tt.durationSeconds, - } - - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - - if tt.wantErr { - assert.Error(t, err) - assert.Nil(t, response) - } else { - assert.NoError(t, err) - assert.NotNil(t, response) - assert.NotNil(t, response.Credentials) - assert.NotNil(t, response.AssumedRoleUser) - - // Verify credentials - creds := response.Credentials - assert.NotEmpty(t, creds.AccessKeyId) - assert.NotEmpty(t, creds.SecretAccessKey) - assert.NotEmpty(t, creds.SessionToken) - assert.True(t, creds.Expiration.After(time.Now())) - - // Verify assumed role user - user := response.AssumedRoleUser - assert.Equal(t, tt.roleArn, user.AssumedRoleId) - assert.Contains(t, user.Arn, tt.sessionName) - - if tt.expectedSubject != "" { - assert.Equal(t, tt.expectedSubject, user.Subject) - } - } - }) - } -} - -// TestAssumeRoleWithLDAP tests role assumption with LDAP credentials -func TestAssumeRoleWithLDAP(t *testing.T) { - service := setupTestSTSService(t) - - tests := []struct { - name string - roleArn string - username string - password string - sessionName string - wantErr bool - }{ - { - name: "successful LDAP role assumption", - roleArn: "arn:seaweed:iam::role/LDAPRole", - username: "testuser", - password: "testpass", - sessionName: "ldap-session", - wantErr: false, - }, - { - name: "invalid LDAP credentials", - roleArn: "arn:seaweed:iam::role/LDAPRole", - username: "testuser", - password: "wrongpass", - sessionName: "ldap-session", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - request := &AssumeRoleWithCredentialsRequest{ - RoleArn: tt.roleArn, - Username: tt.username, - Password: tt.password, - RoleSessionName: tt.sessionName, - ProviderName: "test-ldap", - } - - response, err := service.AssumeRoleWithCredentials(ctx, request) - - if tt.wantErr { - assert.Error(t, err) - assert.Nil(t, response) - } else { - assert.NoError(t, err) - assert.NotNil(t, response) - assert.NotNil(t, response.Credentials) - } - }) - } -} - -// TestSessionTokenValidation tests session token validation -func TestSessionTokenValidation(t *testing.T) { - service := setupTestSTSService(t) - ctx := context.Background() - - // First, create a session - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: createSTSTestJWT(t, "test-issuer", "test-user"), - RoleSessionName: "test-session", - } - - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - require.NoError(t, err) - require.NotNil(t, response) - - sessionToken := response.Credentials.SessionToken - - tests := []struct { - name string - token string - wantErr bool - }{ - { - name: "valid session token", - token: sessionToken, - wantErr: false, - }, - { - name: "invalid session token", - token: "invalid-session-token", - wantErr: true, - }, - { - name: "empty session token", - token: "", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - session, err := service.ValidateSessionToken(ctx, tt.token) - - if tt.wantErr { - assert.Error(t, err) - assert.Nil(t, session) - } else { - assert.NoError(t, err) - assert.NotNil(t, session) - assert.Equal(t, "test-session", session.SessionName) - assert.Equal(t, "arn:seaweed:iam::role/TestRole", session.RoleArn) - } - }) - } -} - -// TestSessionTokenPersistence tests that JWT tokens remain valid throughout their lifetime -// Note: In the stateless JWT design, tokens cannot be revoked and remain valid until expiration -func TestSessionTokenPersistence(t *testing.T) { - service := setupTestSTSService(t) - ctx := context.Background() - - // Create a session first - request := &AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/TestRole", - WebIdentityToken: createSTSTestJWT(t, "test-issuer", "test-user"), - RoleSessionName: "test-session", - } - - response, err := service.AssumeRoleWithWebIdentity(ctx, request) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - - // Verify token is valid initially - session, err := service.ValidateSessionToken(ctx, sessionToken) - assert.NoError(t, err) - assert.NotNil(t, session) - assert.Equal(t, "test-session", session.SessionName) - - // In a stateless JWT system, tokens remain valid throughout their lifetime - // Multiple validations should all succeed as long as the token hasn't expired - session2, err := service.ValidateSessionToken(ctx, sessionToken) - assert.NoError(t, err, "Token should remain valid in stateless system") - assert.NotNil(t, session2, "Session should be returned from JWT token") - assert.Equal(t, session.SessionId, session2.SessionId, "Session ID should be consistent") -} - -// Helper functions - -func setupTestSTSService(t *testing.T) *STSService { - service := NewSTSService() - - config := &STSConfig{ - TokenDuration: FlexibleDuration{time.Hour}, - MaxSessionLength: FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - } - - err := service.Initialize(config) - require.NoError(t, err) - - // Set up mock trust policy validator (required for STS testing) - mockValidator := &MockTrustPolicyValidator{} - service.SetTrustPolicyValidator(mockValidator) - - // Register test providers - mockOIDCProvider := &MockIdentityProvider{ - name: "test-oidc", - validTokens: map[string]*providers.TokenClaims{ - createSTSTestJWT(t, "test-issuer", "test-user"): { - Subject: "test-user-id", - Issuer: "test-issuer", - Claims: map[string]interface{}{ - "email": "test@example.com", - "name": "Test User", - }, - }, - }, - } - - mockLDAPProvider := &MockIdentityProvider{ - name: "test-ldap", - validCredentials: map[string]string{ - "testuser": "testpass", - }, - } - - service.RegisterProvider(mockOIDCProvider) - service.RegisterProvider(mockLDAPProvider) - - return service -} - -func int64Ptr(v int64) *int64 { - return &v -} - -// Mock identity provider for testing -type MockIdentityProvider struct { - name string - validTokens map[string]*providers.TokenClaims - validCredentials map[string]string -} - -func (m *MockIdentityProvider) Name() string { - return m.name -} - -func (m *MockIdentityProvider) GetIssuer() string { - return "test-issuer" // This matches the issuer in the token claims -} - -func (m *MockIdentityProvider) Initialize(config interface{}) error { - return nil -} - -func (m *MockIdentityProvider) Authenticate(ctx context.Context, token string) (*providers.ExternalIdentity, error) { - // First try to parse as JWT token - if len(token) > 20 && strings.Count(token, ".") >= 2 { - parsedToken, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) - if err == nil { - if claims, ok := parsedToken.Claims.(jwt.MapClaims); ok { - issuer, _ := claims["iss"].(string) - subject, _ := claims["sub"].(string) - - // Verify the issuer matches what we expect - if issuer == "test-issuer" && subject != "" { - return &providers.ExternalIdentity{ - UserID: subject, - Email: subject + "@test-domain.com", - DisplayName: "Test User " + subject, - Provider: m.name, - }, nil - } - } - } - } - - // Handle legacy OIDC tokens (for backwards compatibility) - if claims, exists := m.validTokens[token]; exists { - email, _ := claims.GetClaimString("email") - name, _ := claims.GetClaimString("name") - - return &providers.ExternalIdentity{ - UserID: claims.Subject, - Email: email, - DisplayName: name, - Provider: m.name, - }, nil - } - - // Handle LDAP credentials (username:password format) - if m.validCredentials != nil { - parts := strings.Split(token, ":") - if len(parts) == 2 { - username, password := parts[0], parts[1] - if expectedPassword, exists := m.validCredentials[username]; exists && expectedPassword == password { - return &providers.ExternalIdentity{ - UserID: username, - Email: username + "@" + m.name + ".com", - DisplayName: "Test User " + username, - Provider: m.name, - }, nil - } - } - } - - return nil, fmt.Errorf("unknown test token: %s", token) -} - -func (m *MockIdentityProvider) GetUserInfo(ctx context.Context, userID string) (*providers.ExternalIdentity, error) { - return &providers.ExternalIdentity{ - UserID: userID, - Email: userID + "@" + m.name + ".com", - Provider: m.name, - }, nil -} - -func (m *MockIdentityProvider) ValidateToken(ctx context.Context, token string) (*providers.TokenClaims, error) { - if claims, exists := m.validTokens[token]; exists { - return claims, nil - } - return nil, fmt.Errorf("invalid token") -} diff --git a/weed/iam/sts/test_utils.go b/weed/iam/sts/test_utils.go deleted file mode 100644 index 58de592dc..000000000 --- a/weed/iam/sts/test_utils.go +++ /dev/null @@ -1,53 +0,0 @@ -package sts - -import ( - "context" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/iam/providers" -) - -// MockTrustPolicyValidator is a simple mock for testing STS functionality -type MockTrustPolicyValidator struct{} - -// ValidateTrustPolicyForWebIdentity allows valid JWT test tokens for STS testing -func (m *MockTrustPolicyValidator) ValidateTrustPolicyForWebIdentity(ctx context.Context, roleArn string, webIdentityToken string) error { - // Reject non-existent roles for testing - if strings.Contains(roleArn, "NonExistentRole") { - return fmt.Errorf("trust policy validation failed: role does not exist") - } - - // For STS unit tests, allow JWT tokens that look valid (contain dots for JWT structure) - // In real implementation, this would validate against actual trust policies - if len(webIdentityToken) > 20 && strings.Count(webIdentityToken, ".") >= 2 { - // This appears to be a JWT token - allow it for testing - return nil - } - - // Legacy support for specific test tokens during migration - if webIdentityToken == "valid_test_token" || webIdentityToken == "valid-oidc-token" { - return nil - } - - // Reject invalid tokens - if webIdentityToken == "invalid_token" || webIdentityToken == "expired_token" || webIdentityToken == "invalid-token" { - return fmt.Errorf("trust policy denies token") - } - - return nil -} - -// ValidateTrustPolicyForCredentials allows valid test identities for STS testing -func (m *MockTrustPolicyValidator) ValidateTrustPolicyForCredentials(ctx context.Context, roleArn string, identity *providers.ExternalIdentity) error { - // Reject non-existent roles for testing - if strings.Contains(roleArn, "NonExistentRole") { - return fmt.Errorf("trust policy validation failed: role does not exist") - } - - // For STS unit tests, allow test identities - if identity != nil && identity.UserID != "" { - return nil - } - return fmt.Errorf("invalid identity for role assumption") -} diff --git a/weed/iam/sts/token_utils.go b/weed/iam/sts/token_utils.go deleted file mode 100644 index 07c195326..000000000 --- a/weed/iam/sts/token_utils.go +++ /dev/null @@ -1,217 +0,0 @@ -package sts - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/utils" -) - -// TokenGenerator handles token generation and validation -type TokenGenerator struct { - signingKey []byte - issuer string -} - -// NewTokenGenerator creates a new token generator -func NewTokenGenerator(signingKey []byte, issuer string) *TokenGenerator { - return &TokenGenerator{ - signingKey: signingKey, - issuer: issuer, - } -} - -// GenerateSessionToken creates a signed JWT session token (legacy method for compatibility) -func (t *TokenGenerator) GenerateSessionToken(sessionId string, expiresAt time.Time) (string, error) { - claims := NewSTSSessionClaims(sessionId, t.issuer, expiresAt) - return t.GenerateJWTWithClaims(claims) -} - -// GenerateJWTWithClaims creates a signed JWT token with comprehensive session claims -func (t *TokenGenerator) GenerateJWTWithClaims(claims *STSSessionClaims) (string, error) { - if claims == nil { - return "", fmt.Errorf("claims cannot be nil") - } - - // Ensure issuer is set from token generator - if claims.Issuer == "" { - claims.Issuer = t.issuer - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - return token.SignedString(t.signingKey) -} - -// ValidateSessionToken validates and extracts claims from a session token -func (t *TokenGenerator) ValidateSessionToken(tokenString string) (*SessionTokenClaims, error) { - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return t.signingKey, nil - }) - - if err != nil { - return nil, fmt.Errorf(ErrInvalidToken, err) - } - - if !token.Valid { - return nil, fmt.Errorf(ErrTokenNotValid) - } - - claims, ok := token.Claims.(jwt.MapClaims) - if !ok { - return nil, fmt.Errorf(ErrInvalidTokenClaims) - } - - // Verify issuer - if iss, ok := claims[JWTClaimIssuer].(string); !ok || iss != t.issuer { - return nil, fmt.Errorf(ErrInvalidIssuer) - } - - // Extract session ID - sessionId, ok := claims[JWTClaimSubject].(string) - if !ok { - return nil, fmt.Errorf(ErrMissingSessionID) - } - - return &SessionTokenClaims{ - SessionId: sessionId, - ExpiresAt: time.Unix(int64(claims[JWTClaimExpiration].(float64)), 0), - IssuedAt: time.Unix(int64(claims[JWTClaimIssuedAt].(float64)), 0), - }, nil -} - -// ValidateJWTWithClaims validates and extracts comprehensive session claims from a JWT token -func (t *TokenGenerator) ValidateJWTWithClaims(tokenString string) (*STSSessionClaims, error) { - token, err := jwt.ParseWithClaims(tokenString, &STSSessionClaims{}, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return t.signingKey, nil - }) - - if err != nil { - return nil, fmt.Errorf(ErrInvalidToken, err) - } - - if !token.Valid { - return nil, fmt.Errorf(ErrTokenNotValid) - } - - claims, ok := token.Claims.(*STSSessionClaims) - if !ok { - return nil, fmt.Errorf(ErrInvalidTokenClaims) - } - - // Validate issuer - if claims.Issuer != t.issuer { - return nil, fmt.Errorf(ErrInvalidIssuer) - } - - // Validate that required fields are present - if claims.SessionId == "" { - return nil, fmt.Errorf(ErrMissingSessionID) - } - - // Additional validation using the claims' own validation method - if !claims.IsValid() { - return nil, fmt.Errorf(ErrTokenNotValid) - } - - return claims, nil -} - -// SessionTokenClaims represents parsed session token claims -type SessionTokenClaims struct { - SessionId string - ExpiresAt time.Time - IssuedAt time.Time -} - -// CredentialGenerator generates AWS-compatible temporary credentials -type CredentialGenerator struct{} - -// NewCredentialGenerator creates a new credential generator -func NewCredentialGenerator() *CredentialGenerator { - return &CredentialGenerator{} -} - -// GenerateTemporaryCredentials creates temporary AWS credentials -func (c *CredentialGenerator) GenerateTemporaryCredentials(sessionId string, expiration time.Time) (*Credentials, error) { - accessKeyId, err := c.generateAccessKeyId(sessionId) - if err != nil { - return nil, fmt.Errorf("failed to generate access key ID: %w", err) - } - - secretAccessKey, err := c.generateSecretAccessKey() - if err != nil { - return nil, fmt.Errorf("failed to generate secret access key: %w", err) - } - - sessionToken, err := c.generateSessionTokenId(sessionId) - if err != nil { - return nil, fmt.Errorf("failed to generate session token: %w", err) - } - - return &Credentials{ - AccessKeyId: accessKeyId, - SecretAccessKey: secretAccessKey, - SessionToken: sessionToken, - Expiration: expiration, - }, nil -} - -// generateAccessKeyId generates an AWS-style access key ID -func (c *CredentialGenerator) generateAccessKeyId(sessionId string) (string, error) { - // Create a deterministic but unique access key ID based on session - hash := sha256.Sum256([]byte("access-key:" + sessionId)) - return "AKIA" + hex.EncodeToString(hash[:8]), nil // AWS format: AKIA + 16 chars -} - -// generateSecretAccessKey generates a random secret access key -func (c *CredentialGenerator) generateSecretAccessKey() (string, error) { - // Generate 32 random bytes for secret key - secretBytes := make([]byte, 32) - _, err := rand.Read(secretBytes) - if err != nil { - return "", err - } - - return base64.StdEncoding.EncodeToString(secretBytes), nil -} - -// generateSessionTokenId generates a session token identifier -func (c *CredentialGenerator) generateSessionTokenId(sessionId string) (string, error) { - // Create session token with session ID embedded - hash := sha256.Sum256([]byte("session-token:" + sessionId)) - return "ST" + hex.EncodeToString(hash[:16]), nil // Custom format -} - -// generateSessionId generates a unique session ID -func GenerateSessionId() (string, error) { - randomBytes := make([]byte, 16) - _, err := rand.Read(randomBytes) - if err != nil { - return "", err - } - - return hex.EncodeToString(randomBytes), nil -} - -// generateAssumedRoleArn generates the ARN for an assumed role user -func GenerateAssumedRoleArn(roleArn, sessionName string) string { - // Convert role ARN to assumed role user ARN - // arn:seaweed:iam::role/RoleName -> arn:seaweed:sts::assumed-role/RoleName/SessionName - roleName := utils.ExtractRoleNameFromArn(roleArn) - if roleName == "" { - // This should not happen if validation is done properly upstream - return fmt.Sprintf("arn:seaweed:sts::assumed-role/INVALID-ARN/%s", sessionName) - } - return fmt.Sprintf("arn:seaweed:sts::assumed-role/%s/%s", roleName, sessionName) -} diff --git a/weed/iam/util/generic_cache.go b/weed/iam/util/generic_cache.go deleted file mode 100644 index 19bc3d67b..000000000 --- a/weed/iam/util/generic_cache.go +++ /dev/null @@ -1,175 +0,0 @@ -package util - -import ( - "context" - "time" - - "github.com/karlseguin/ccache/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// CacheableStore defines the interface for stores that can be cached -type CacheableStore[T any] interface { - Get(ctx context.Context, filerAddress string, key string) (T, error) - Store(ctx context.Context, filerAddress string, key string, value T) error - Delete(ctx context.Context, filerAddress string, key string) error - List(ctx context.Context, filerAddress string) ([]string, error) -} - -// CopyFunction defines how to deep copy cached values -type CopyFunction[T any] func(T) T - -// CachedStore provides generic TTL caching for any store type -type CachedStore[T any] struct { - baseStore CacheableStore[T] - cache *ccache.Cache - listCache *ccache.Cache - copyFunc CopyFunction[T] - ttl time.Duration - listTTL time.Duration -} - -// CachedStoreConfig holds configuration for the generic cached store -type CachedStoreConfig struct { - TTL time.Duration - ListTTL time.Duration - MaxCacheSize int64 -} - -// NewCachedStore creates a new generic cached store -func NewCachedStore[T any]( - baseStore CacheableStore[T], - copyFunc CopyFunction[T], - config CachedStoreConfig, -) *CachedStore[T] { - // Apply defaults - if config.TTL == 0 { - config.TTL = 5 * time.Minute - } - if config.ListTTL == 0 { - config.ListTTL = 1 * time.Minute - } - if config.MaxCacheSize == 0 { - config.MaxCacheSize = 1000 - } - - // Create ccache instances - pruneCount := config.MaxCacheSize >> 3 - if pruneCount <= 0 { - pruneCount = 100 - } - - return &CachedStore[T]{ - baseStore: baseStore, - cache: ccache.New(ccache.Configure().MaxSize(config.MaxCacheSize).ItemsToPrune(uint32(pruneCount))), - listCache: ccache.New(ccache.Configure().MaxSize(100).ItemsToPrune(10)), - copyFunc: copyFunc, - ttl: config.TTL, - listTTL: config.ListTTL, - } -} - -// Get retrieves an item with caching -func (c *CachedStore[T]) Get(ctx context.Context, filerAddress string, key string) (T, error) { - // Try cache first - item := c.cache.Get(key) - if item != nil { - // Cache hit - return cached item (DO NOT extend TTL) - value := item.Value().(T) - glog.V(4).Infof("Cache hit for key %s", key) - return c.copyFunc(value), nil - } - - // Cache miss - fetch from base store - glog.V(4).Infof("Cache miss for key %s, fetching from store", key) - value, err := c.baseStore.Get(ctx, filerAddress, key) - if err != nil { - var zero T - return zero, err - } - - // Cache the result with TTL - c.cache.Set(key, c.copyFunc(value), c.ttl) - glog.V(3).Infof("Cached key %s with TTL %v", key, c.ttl) - return value, nil -} - -// Store stores an item and invalidates cache -func (c *CachedStore[T]) Store(ctx context.Context, filerAddress string, key string, value T) error { - // Store in base store - err := c.baseStore.Store(ctx, filerAddress, key, value) - if err != nil { - return err - } - - // Invalidate cache entries - c.cache.Delete(key) - c.listCache.Clear() // Invalidate list cache - - glog.V(3).Infof("Stored and invalidated cache for key %s", key) - return nil -} - -// Delete deletes an item and invalidates cache -func (c *CachedStore[T]) Delete(ctx context.Context, filerAddress string, key string) error { - // Delete from base store - err := c.baseStore.Delete(ctx, filerAddress, key) - if err != nil { - return err - } - - // Invalidate cache entries - c.cache.Delete(key) - c.listCache.Clear() // Invalidate list cache - - glog.V(3).Infof("Deleted and invalidated cache for key %s", key) - return nil -} - -// List lists all items with caching -func (c *CachedStore[T]) List(ctx context.Context, filerAddress string) ([]string, error) { - const listCacheKey = "item_list" - - // Try list cache first - item := c.listCache.Get(listCacheKey) - if item != nil { - // Cache hit - return cached list (DO NOT extend TTL) - items := item.Value().([]string) - glog.V(4).Infof("List cache hit, returning %d items", len(items)) - return append([]string(nil), items...), nil // Return a copy - } - - // Cache miss - fetch from base store - glog.V(4).Infof("List cache miss, fetching from store") - items, err := c.baseStore.List(ctx, filerAddress) - if err != nil { - return nil, err - } - - // Cache the result with TTL (store a copy) - itemsCopy := append([]string(nil), items...) - c.listCache.Set(listCacheKey, itemsCopy, c.listTTL) - glog.V(3).Infof("Cached list with %d entries, TTL %v", len(items), c.listTTL) - return items, nil -} - -// ClearCache clears all cached entries -func (c *CachedStore[T]) ClearCache() { - c.cache.Clear() - c.listCache.Clear() - glog.V(2).Infof("Cleared all cache entries") -} - -// GetCacheStats returns cache statistics -func (c *CachedStore[T]) GetCacheStats() map[string]interface{} { - return map[string]interface{}{ - "itemCache": map[string]interface{}{ - "size": c.cache.ItemCount(), - "ttl": c.ttl.String(), - }, - "listCache": map[string]interface{}{ - "size": c.listCache.ItemCount(), - "ttl": c.listTTL.String(), - }, - } -} diff --git a/weed/iam/utils/arn_utils.go b/weed/iam/utils/arn_utils.go deleted file mode 100644 index f4c05dab1..000000000 --- a/weed/iam/utils/arn_utils.go +++ /dev/null @@ -1,39 +0,0 @@ -package utils - -import "strings" - -// ExtractRoleNameFromPrincipal extracts role name from principal ARN -// Handles both STS assumed role and IAM role formats -func ExtractRoleNameFromPrincipal(principal string) string { - // Handle STS assumed role format: arn:seaweed:sts::assumed-role/RoleName/SessionName - stsPrefix := "arn:seaweed:sts::assumed-role/" - if strings.HasPrefix(principal, stsPrefix) { - remainder := principal[len(stsPrefix):] - // Split on first '/' to get role name - if slashIndex := strings.Index(remainder, "/"); slashIndex != -1 { - return remainder[:slashIndex] - } - // If no slash found, return the remainder (edge case) - return remainder - } - - // Handle IAM role format: arn:seaweed:iam::role/RoleName - iamPrefix := "arn:seaweed:iam::role/" - if strings.HasPrefix(principal, iamPrefix) { - return principal[len(iamPrefix):] - } - - // Return empty string to signal invalid ARN format - // This allows callers to handle the error explicitly instead of masking it - return "" -} - -// ExtractRoleNameFromArn extracts role name from an IAM role ARN -// Specifically handles: arn:seaweed:iam::role/RoleName -func ExtractRoleNameFromArn(roleArn string) string { - prefix := "arn:seaweed:iam::role/" - if strings.HasPrefix(roleArn, prefix) && len(roleArn) > len(prefix) { - return roleArn[len(prefix):] - } - return "" -} diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go index c8eac8ef6..a59834e88 100644 --- a/weed/iamapi/iamapi_handlers.go +++ b/weed/iamapi/iamapi_handlers.go @@ -1,45 +1,31 @@ package iamapi import ( - "net/http" - + "fmt" "github.com/aws/aws-sdk-go/service/iam" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net/http" ) -func newErrorResponse(errCode string, errMsg string) ErrorResponse { +func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, err error, object string, value string, msg error) { + errCode := err.Error() errorResp := ErrorResponse{} errorResp.Error.Type = "Sender" errorResp.Error.Code = &errCode - errorResp.Error.Message = &errMsg - return errorResp -} - -func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, iamError *IamError) { - - if iamError == nil { - // Do nothing if there is no error - glog.Errorf("No error found") - return + if msg != nil { + errMsg := msg.Error() + errorResp.Error.Message = &errMsg } - - errCode := iamError.Code - errMsg := iamError.Error.Error() - glog.Errorf("Response %+v", errMsg) - - errorResp := newErrorResponse(errCode, errMsg) - internalErrorResponse := newErrorResponse(iam.ErrCodeServiceFailureException, "Internal server error") - + glog.Errorf("Response %+v", err) switch errCode { case iam.ErrCodeNoSuchEntityException: + msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value) + errorResp.Error.Message = &msg s3err.WriteXMLResponse(w, r, http.StatusNotFound, errorResp) - case iam.ErrCodeMalformedPolicyDocumentException: - s3err.WriteXMLResponse(w, r, http.StatusBadRequest, errorResp) case iam.ErrCodeServiceFailureException: - // We do not want to expose internal server error to the client - s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, internalErrorResponse) + s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp) default: - s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, internalErrorResponse) + s3err.WriteXMLResponse(w, r, http.StatusInternalServerError, errorResp) } } diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go index 1a8f852cd..e1f215bd3 100644 --- a/weed/iamapi/iamapi_management_handlers.go +++ b/weed/iamapi/iamapi_management_handlers.go @@ -3,7 +3,6 @@ package iamapi import ( "crypto/sha1" "encoding/json" - "errors" "fmt" "math/rand" "net/http" @@ -13,34 +12,29 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/aws/aws-sdk-go/service/iam" ) const ( - charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/" - policyDocumentVersion = "2012-10-17" - StatementActionAdmin = "*" - StatementActionWrite = "Put*" - StatementActionWriteAcp = "PutBucketAcl" - StatementActionRead = "Get*" - StatementActionReadAcp = "GetBucketAcl" - StatementActionList = "List*" - StatementActionTagging = "Tagging*" - StatementActionDelete = "DeleteBucket*" + charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/" + policyDocumentVersion = "2012-10-17" + StatementActionAdmin = "*" + StatementActionWrite = "Put*" + StatementActionRead = "Get*" + StatementActionList = "List*" + StatementActionTagging = "Tagging*" ) var ( seededRand *rand.Rand = rand.New( rand.NewSource(time.Now().UnixNano())) - policyDocuments = map[string]*policy_engine.PolicyDocument{} + policyDocuments = map[string]*PolicyDocument{} policyLock = sync.RWMutex{} ) @@ -50,18 +44,12 @@ func MapToStatementAction(action string) string { return s3_constants.ACTION_ADMIN case StatementActionWrite: return s3_constants.ACTION_WRITE - case StatementActionWriteAcp: - return s3_constants.ACTION_WRITE_ACP case StatementActionRead: return s3_constants.ACTION_READ - case StatementActionReadAcp: - return s3_constants.ACTION_READ_ACP case StatementActionList: return s3_constants.ACTION_LIST case StatementActionTagging: return s3_constants.ACTION_TAGGING - case StatementActionDelete: - return s3_constants.ACTION_DELETE_BUCKET default: return "" } @@ -73,29 +61,35 @@ func MapToIdentitiesAction(action string) string { return StatementActionAdmin case s3_constants.ACTION_WRITE: return StatementActionWrite - case s3_constants.ACTION_WRITE_ACP: - return StatementActionWriteAcp case s3_constants.ACTION_READ: return StatementActionRead - case s3_constants.ACTION_READ_ACP: - return StatementActionReadAcp case s3_constants.ACTION_LIST: return StatementActionList case s3_constants.ACTION_TAGGING: return StatementActionTagging - case s3_constants.ACTION_DELETE_BUCKET: - return StatementActionDelete default: return "" } } -const ( - USER_DOES_NOT_EXIST = "the user with name %s cannot be found." -) +type Statement struct { + Effect string `json:"Effect"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} type Policies struct { - Policies map[string]policy_engine.PolicyDocument `json:"policies"` + Policies map[string]PolicyDocument `json:"policies"` +} + +type PolicyDocument struct { + Version string `json:"Version"` + Statement []*Statement `json:"Statement"` +} + +func (p PolicyDocument) String() string { + b, _ := json.Marshal(p) + return string(b) } func Hash(s *string) string { @@ -142,27 +136,27 @@ func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values ur return resp } -func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err *IamError) { +func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) { for i, ident := range s3cfg.Identities { if userName == ident.Name { s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) return resp, nil } } - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } -func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err *IamError) { +func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) { for _, ident := range s3cfg.Identities { if userName == ident.Name { resp.GetUserResult.User = iam.User{UserName: &ident.Name} return resp, nil } } - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } -func (iama *IamApiServer) UpdateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp UpdateUserResponse, err *IamError) { +func (iama *IamApiServer) UpdateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp UpdateUserResponse, err error) { userName := values.Get("UserName") newUserName := values.Get("NewUserName") if newUserName != "" { @@ -175,23 +169,22 @@ func (iama *IamApiServer) UpdateUser(s3cfg *iam_pb.S3ApiConfiguration, values ur } else { return resp, nil } - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } -func GetPolicyDocument(policy *string) (policy_engine.PolicyDocument, error) { - var policyDocument policy_engine.PolicyDocument - if err := json.Unmarshal([]byte(*policy), &policyDocument); err != nil { - return policy_engine.PolicyDocument{}, err +func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) { + if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil { + return PolicyDocument{}, err } - return policyDocument, nil + return policyDocument, err } -func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, iamError *IamError) { +func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) { policyName := values.Get("PolicyName") policyDocumentString := values.Get("PolicyDocument") policyDocument, err := GetPolicyDocument(&policyDocumentString) if err != nil { - return CreatePolicyResponse{}, &IamError{Code: iam.ErrCodeMalformedPolicyDocumentException, Error: err} + return CreatePolicyResponse{}, err } policyId := Hash(&policyDocumentString) arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName) @@ -202,47 +195,39 @@ func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values policyLock.Lock() defer policyLock.Unlock() if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil { - return resp, &IamError{Code: iam.ErrCodeServiceFailureException, Error: err} + return resp, err } policies.Policies[policyName] = policyDocument if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil { - return resp, &IamError{Code: iam.ErrCodeServiceFailureException, Error: err} + return resp, err } return resp, nil } -type IamError struct { - Code string - Error error -} - // https://docs.aws.amazon.com/IAM/latest/APIReference/API_PutUserPolicy.html -func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, iamError *IamError) { +func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { userName := values.Get("UserName") policyName := values.Get("PolicyName") policyDocumentString := values.Get("PolicyDocument") policyDocument, err := GetPolicyDocument(&policyDocumentString) if err != nil { - return PutUserPolicyResponse{}, &IamError{Code: iam.ErrCodeMalformedPolicyDocumentException, Error: err} + return PutUserPolicyResponse{}, err } policyDocuments[policyName] = &policyDocument - actions, err := GetActions(&policyDocument) - if err != nil { - return PutUserPolicyResponse{}, &IamError{Code: iam.ErrCodeMalformedPolicyDocumentException, Error: err} - } - // Log the actions - glog.V(3).Infof("PutUserPolicy: actions=%v", actions) + actions := GetActions(&policyDocument) for _, ident := range s3cfg.Identities { if userName != ident.Name { continue } - ident.Actions = actions + for _, action := range actions { + ident.Actions = append(ident.Actions, action) + } return resp, nil } - return PutUserPolicyResponse{}, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf("the user with name %s cannot be found", userName)} + return resp, fmt.Errorf("%s: the user with name %s cannot be found", iam.ErrCodeNoSuchEntityException, userName) } -func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err *IamError) { +func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) { userName := values.Get("UserName") policyName := values.Get("PolicyName") for _, ident := range s3cfg.Identities { @@ -253,10 +238,10 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values resp.GetUserPolicyResult.UserName = userName resp.GetUserPolicyResult.PolicyName = policyName if len(ident.Actions) == 0 { - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: errors.New("no actions found")} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } - policyDocument := policy_engine.PolicyDocument{Version: policyDocumentVersion} + policyDocument := PolicyDocument{Version: policyDocumentVersion} statements := make(map[string][]string) for _, action := range ident.Actions { // parse "Read:EXAMPLE-BUCKET" @@ -273,9 +258,9 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values for resource, actions := range statements { isEqAction := false for i, statement := range policyDocument.Statement { - if reflect.DeepEqual(statement.Action.Strings(), actions) { - policyDocument.Statement[i].Resource = policy_engine.NewStringOrStringSlice(append( - policyDocument.Statement[i].Resource.Strings(), resource)...) + if reflect.DeepEqual(statement.Action, actions) { + policyDocument.Statement[i].Resource = append( + policyDocument.Statement[i].Resource, resource) isEqAction = true break } @@ -283,24 +268,20 @@ func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values if isEqAction { continue } - policyDocumentStatement := policy_engine.PolicyStatement{ - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice(actions...), - Resource: policy_engine.NewStringOrStringSlice(resource), + policyDocumentStatement := Statement{ + Effect: "Allow", + Action: actions, } - policyDocument.Statement = append(policyDocument.Statement, policyDocumentStatement) + policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource) + policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement) } - policyDocumentJSON, err := json.Marshal(policyDocument) - if err != nil { - return resp, &IamError{Code: iam.ErrCodeServiceFailureException, Error: err} - } - resp.GetUserPolicyResult.PolicyDocument = string(policyDocumentJSON) + resp.GetUserPolicyResult.PolicyDocument = policyDocument.String() return resp, nil } - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } -func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err *IamError) { +func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { userName := values.Get("UserName") for i, ident := range s3cfg.Identities { if ident.Name == userName { @@ -308,44 +289,44 @@ func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, val return resp, nil } } - return resp, &IamError{Code: iam.ErrCodeNoSuchEntityException, Error: fmt.Errorf(USER_DOES_NOT_EXIST, userName)} + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) } -func GetActions(policy *policy_engine.PolicyDocument) ([]string, error) { - var actions []string - +func GetActions(policy *PolicyDocument) (actions []string) { for _, statement := range policy.Statement { - if statement.Effect != policy_engine.PolicyEffectAllow { - return nil, fmt.Errorf("not a valid effect: '%s'. Only 'Allow' is possible", statement.Effect) + if statement.Effect != "Allow" { + continue } - for _, resource := range statement.Resource.Strings() { + for _, resource := range statement.Resource { // Parse "arn:aws:s3:::my-bucket/shared/*" res := strings.Split(resource, ":") if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" { + glog.Infof("not match resource: %s", res) continue } - for _, action := range statement.Action.Strings() { + for _, action := range statement.Action { // Parse "s3:Get*" act := strings.Split(action, ":") if len(act) != 2 || act[0] != "s3" { + glog.Infof("not match action: %s", act) continue } statementAction := MapToStatementAction(act[1]) - - if statementAction == "" { - return nil, fmt.Errorf("not a valid action: '%s'", act[1]) - } - - path := res[5] - if path == "*" { + if res[5] == "*" { actions = append(actions, statementAction) continue } - actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path)) + // Parse my-bucket/shared/* + path := strings.Split(res[5], "/") + if len(path) != 2 || path[1] != "*" { + glog.Infof("not match bucket: %s", path) + continue + } + actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0])) } } } - return actions, nil + return actions } func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) { @@ -368,8 +349,7 @@ func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, valu } if !changed { s3cfg.Identities = append(s3cfg.Identities, - &iam_pb.Identity{ - Name: userName, + &iam_pb.Identity{Name: userName, Credentials: []*iam_pb.Credential{ { AccessKey: accessKeyId, @@ -435,14 +415,14 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { } values := r.PostForm s3cfg := &iam_pb.S3ApiConfiguration{} - if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil && !errors.Is(err, filer_pb.ErrNotFound) { + if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } glog.V(4).Infof("DoActions: %+v", values) var response interface{} - var iamError *IamError + var err error changed := true switch r.Form.Get("Action") { case "ListUsers": @@ -456,24 +436,24 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { response = iama.CreateUser(s3cfg, values) case "GetUser": userName := values.Get("UserName") - response, iamError = iama.GetUser(s3cfg, userName) - if iamError != nil { - writeIamErrorResponse(w, r, iamError) + response, err = iama.GetUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, r, err, "user", userName, nil) return } changed = false case "UpdateUser": - response, iamError = iama.UpdateUser(s3cfg, values) - if iamError != nil { - glog.Errorf("UpdateUser: %+v", iamError.Error) + response, err = iama.UpdateUser(s3cfg, values) + if err != nil { + glog.Errorf("UpdateUser: %+v", err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } case "DeleteUser": userName := values.Get("UserName") - response, iamError = iama.DeleteUser(s3cfg, userName) - if iamError != nil { - writeIamErrorResponse(w, r, iamError) + response, err = iama.DeleteUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, r, err, "user", userName, nil) return } case "CreateAccessKey": @@ -483,31 +463,29 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { handleImplicitUsername(r, values) response = iama.DeleteAccessKey(s3cfg, values) case "CreatePolicy": - response, iamError = iama.CreatePolicy(s3cfg, values) - if iamError != nil { - glog.Errorf("CreatePolicy: %+v", iamError.Error) + response, err = iama.CreatePolicy(s3cfg, values) + if err != nil { + glog.Errorf("CreatePolicy: %+v", err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } case "PutUserPolicy": - var iamError *IamError - response, iamError = iama.PutUserPolicy(s3cfg, values) - if iamError != nil { - glog.Errorf("PutUserPolicy: %+v", iamError.Error) - - writeIamErrorResponse(w, r, iamError) + response, err = iama.PutUserPolicy(s3cfg, values) + if err != nil { + glog.Errorf("PutUserPolicy: %+v", err) + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } case "GetUserPolicy": - response, iamError = iama.GetUserPolicy(s3cfg, values) - if iamError != nil { - writeIamErrorResponse(w, r, iamError) + response, err = iama.GetUserPolicy(s3cfg, values) + if err != nil { + writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil) return } changed = false case "DeleteUserPolicy": - if response, iamError = iama.DeleteUserPolicy(s3cfg, values); iamError != nil { - writeIamErrorResponse(w, r, iamError) + if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil { + writeIamErrorResponse(w, r, err, "user", values.Get("UserName"), nil) return } default: @@ -521,8 +499,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { if changed { err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg) if err != nil { - var iamError = IamError{Code: iam.ErrCodeServiceFailureException, Error: err} - writeIamErrorResponse(w, r, &iamError) + writeIamErrorResponse(w, r, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err) return } } diff --git a/weed/iamapi/iamapi_management_handlers_test.go b/weed/iamapi/iamapi_management_handlers_test.go deleted file mode 100644 index 5bc8eff67..000000000 --- a/weed/iamapi/iamapi_management_handlers_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package iamapi - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - "github.com/stretchr/testify/assert" -) - -func TestGetActionsUserPath(t *testing.T) { - - policyDocument := policy_engine.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy_engine.PolicyStatement{ - { - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice("s3:Put*", "s3:PutBucketAcl", "s3:Get*", "s3:GetBucketAcl", "s3:List*", "s3:Tagging*", "s3:DeleteBucket*"), - Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::shared/user-Alice/*"), - }, - }, - } - - actions, _ := GetActions(&policyDocument) - - expectedActions := []string{ - "Write:shared/user-Alice/*", - "WriteAcp:shared/user-Alice/*", - "Read:shared/user-Alice/*", - "ReadAcp:shared/user-Alice/*", - "List:shared/user-Alice/*", - "Tagging:shared/user-Alice/*", - "DeleteBucket:shared/user-Alice/*", - } - assert.Equal(t, expectedActions, actions) -} - -func TestGetActionsWildcardPath(t *testing.T) { - - policyDocument := policy_engine.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy_engine.PolicyStatement{ - { - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice("s3:Get*", "s3:PutBucketAcl"), - Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::*"), - }, - }, - } - - actions, _ := GetActions(&policyDocument) - - expectedActions := []string{ - "Read", - "WriteAcp", - } - assert.Equal(t, expectedActions, actions) -} - -func TestGetActionsInvalidAction(t *testing.T) { - policyDocument := policy_engine.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy_engine.PolicyStatement{ - { - Effect: policy_engine.PolicyEffectAllow, - Action: policy_engine.NewStringOrStringSlice("s3:InvalidAction"), - Resource: policy_engine.NewStringOrStringSlice("arn:aws:s3:::shared/user-Alice/*"), - }, - }, - } - - _, err := GetActions(&policyDocument) - assert.NotNil(t, err) - assert.Equal(t, "not a valid action: 'InvalidAction'", err.Error()) -} diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go index cf507ee82..252d5bd0e 100644 --- a/weed/iamapi/iamapi_server.go +++ b/weed/iamapi/iamapi_server.go @@ -4,23 +4,20 @@ package iamapi import ( "bytes" - "context" "encoding/json" "fmt" "net/http" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - . "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/wdclient" "google.golang.org/grpc" ) @@ -32,9 +29,8 @@ type IamS3ApiConfig interface { } type IamS3ApiConfigure struct { - option *IamServerOption - masterClient *wdclient.MasterClient - credentialManager *credential.CredentialManager + option *IamServerOption + masterClient *wdclient.MasterClient } type IamServerOption struct { @@ -52,28 +48,14 @@ type IamApiServer struct { var s3ApiConfigure IamS3ApiConfig func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { - return NewIamApiServerWithStore(router, option, "") -} - -func NewIamApiServerWithStore(router *mux.Router, option *IamServerOption, explicitStore string) (iamApiServer *IamApiServer, err error) { - configure := &IamS3ApiConfigure{ + s3ApiConfigure = IamS3ApiConfigure{ option: option, - masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", "", *pb.NewServiceDiscoveryFromMap(option.Masters)), + masterClient: wdclient.NewMasterClient(option.GrpcDialOption, "", "iam", "", "", option.Masters), } - - s3ApiConfigure = configure - - s3Option := s3api.S3ApiServerOption{ - Filer: option.Filer, - GrpcDialOption: option.GrpcDialOption, - } - - iam := s3api.NewIdentityAccessManagementWithStore(&s3Option, explicitStore) - configure.credentialManager = iam.GetCredentialManager() - + s3Option := s3api.S3ApiServerOption{Filer: option.Filer} iamApiServer = &IamApiServer{ s3ApiConfig: s3ApiConfigure, - iam: iam, + iam: s3api.NewIdentityAccessManagement(&s3Option), } iamApiServer.registerRouter(router) @@ -87,37 +69,16 @@ func (iama *IamApiServer) registerRouter(router *mux.Router) { // ListBuckets // apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST")) - apiRouter.Methods(http.MethodPost).Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN)) + apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN)) // // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler) } -func (iama *IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { - return iama.GetS3ApiConfigurationFromCredentialManager(s3cfg) -} - -func (iama *IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { - return iama.PutS3ApiConfigurationToCredentialManager(s3cfg) -} - -func (iama *IamS3ApiConfigure) GetS3ApiConfigurationFromCredentialManager(s3cfg *iam_pb.S3ApiConfiguration) (err error) { - config, err := iama.credentialManager.LoadConfiguration(context.Background()) - if err != nil { - return fmt.Errorf("failed to load configuration from credential manager: %w", err) - } - *s3cfg = *config - return nil -} - -func (iama *IamS3ApiConfigure) PutS3ApiConfigurationToCredentialManager(s3cfg *iam_pb.S3ApiConfiguration) (err error) { - return iama.credentialManager.SaveConfiguration(context.Background(), s3cfg) -} - -func (iama *IamS3ApiConfigure) GetS3ApiConfigurationFromFiler(s3cfg *iam_pb.S3ApiConfiguration) (err error) { +func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { var buf bytes.Buffer - err = pb.WithGrpcFilerClient(false, 0, iama.option.Filer, iama.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - if err = filer.ReadEntry(iama.masterClient, client, filer.IamConfigDirectory, filer.IamIdentityFile, &buf); err != nil { + err = pb.WithGrpcFilerClient(false, iam.option.Filer, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil { return err } return nil @@ -133,14 +94,14 @@ func (iama *IamS3ApiConfigure) GetS3ApiConfigurationFromFiler(s3cfg *iam_pb.S3Ap return nil } -func (iama *IamS3ApiConfigure) PutS3ApiConfigurationToFiler(s3cfg *iam_pb.S3ApiConfiguration) (err error) { +func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { buf := bytes.Buffer{} if err := filer.ProtoToText(&buf, s3cfg); err != nil { return fmt.Errorf("ProtoToText: %s", err) } - return pb.WithGrpcFilerClient(false, 0, iama.option.Filer, iama.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithGrpcFilerClient(false, iam.option.Filer, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { err = util.Retry("saveIamIdentity", func() error { - return filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile, buf.Bytes()) + return filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()) }) if err != nil { return err @@ -149,10 +110,10 @@ func (iama *IamS3ApiConfigure) PutS3ApiConfigurationToFiler(s3cfg *iam_pb.S3ApiC }) } -func (iama *IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { +func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { var buf bytes.Buffer - err = pb.WithGrpcFilerClient(false, 0, iama.option.Filer, iama.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - if err = filer.ReadEntry(iama.masterClient, client, filer.IamConfigDirectory, filer.IamPoliciesFile, &buf); err != nil { + err = pb.WithGrpcFilerClient(false, iam.option.Filer, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil { return err } return nil @@ -161,7 +122,7 @@ func (iama *IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { return err } if err == filer_pb.ErrNotFound || buf.Len() == 0 { - policies.Policies = make(map[string]policy_engine.PolicyDocument) + policies.Policies = make(map[string]PolicyDocument) return nil } if err := json.Unmarshal(buf.Bytes(), policies); err != nil { @@ -170,13 +131,13 @@ func (iama *IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { return nil } -func (iama *IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) { +func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) { var b []byte if b, err = json.Marshal(policies); err != nil { return err } - return pb.WithGrpcFilerClient(false, 0, iama.option.Filer, iama.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - if err := filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamPoliciesFile, b); err != nil { + return pb.WithGrpcFilerClient(false, iam.option.Filer, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil { return err } return nil diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go index 94c48aa7f..375e9a2f3 100644 --- a/weed/iamapi/iamapi_test.go +++ b/weed/iamapi/iamapi_test.go @@ -5,16 +5,14 @@ import ( "net/http" "net/http/httptest" "net/url" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/gorilla/mux" "github.com/jinzhu/copier" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" "github.com/stretchr/testify/assert" ) @@ -24,7 +22,7 @@ var GetPolicies func(policies *Policies) (err error) var PutPolicies func(policies *Policies) (err error) var s3config = iam_pb.S3ApiConfiguration{} -var policiesFile = Policies{Policies: make(map[string]policy_engine.PolicyDocument)} +var policiesFile = Policies{Policies: make(map[string]PolicyDocument)} var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}} type iamS3ApiConfigureMock struct{} @@ -154,53 +152,6 @@ func TestPutUserPolicy(t *testing.T) { assert.Equal(t, http.StatusOK, response.Code) } -func TestPutUserPolicyError(t *testing.T) { - userName := aws.String("InvalidUser") - params := &iam.PutUserPolicyInput{ - UserName: userName, - PolicyName: aws.String("S3-read-only-example-bucket"), - PolicyDocument: aws.String( - `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:Get*", - "s3:List*" - ], - "Resource": [ - "arn:aws:s3:::EXAMPLE-BUCKET", - "arn:aws:s3:::EXAMPLE-BUCKET/*" - ] - } - ] - }`), - } - req, _ := iam.New(session.New()).PutUserPolicyRequest(params) - _ = req.Build() - response, err := executeRequest(req.HTTPRequest, nil) - assert.Equal(t, nil, err) - assert.Equal(t, http.StatusNotFound, response.Code) - - expectedMessage := "the user with name InvalidUser cannot be found" - expectedCode := "NoSuchEntity" - - code, message := extractErrorCodeAndMessage(response) - - assert.Equal(t, expectedMessage, message) - assert.Equal(t, expectedCode, code) -} - -func extractErrorCodeAndMessage(response *httptest.ResponseRecorder) (string, string) { - pattern := `(.*)(.*)(.*)` - re := regexp.MustCompile(pattern) - - code := re.FindStringSubmatch(response.Body.String())[1] - message := re.FindStringSubmatch(response.Body.String())[2] - return code, message -} - func TestGetUserPolicy(t *testing.T) { userName := aws.String("Test") params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")} @@ -238,7 +189,7 @@ func TestDeleteUser(t *testing.T) { func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) { rr := httptest.NewRecorder() apiRouter := mux.NewRouter().SkipClean(true) - apiRouter.Path("/").Methods(http.MethodPost).HandlerFunc(ias.DoActions) + apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions) apiRouter.ServeHTTP(rr, req) return rr, xml.Unmarshal(rr.Body.Bytes(), &v) } diff --git a/weed/images/cropping.go b/weed/images/cropping.go deleted file mode 100644 index 8f9525d1a..000000000 --- a/weed/images/cropping.go +++ /dev/null @@ -1,47 +0,0 @@ -package images - -import ( - "bytes" - "image" - "image/gif" - "image/jpeg" - "image/png" - "io" - - "github.com/cognusion/imaging" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.ReadSeeker, err error) { - srcImage, _, err := image.Decode(read) - if err != nil { - glog.Error(err) - return read, err - } - - bounds := srcImage.Bounds() - if x2 > bounds.Dx() || y2 > bounds.Dy() { - read.Seek(0, 0) - return read, nil - } - - rectangle := image.Rect(x1, y1, x2, y2) - dstImage := imaging.Crop(srcImage, rectangle) - var buf bytes.Buffer - switch ext { - case ".jpg", ".jpeg": - if err = jpeg.Encode(&buf, dstImage, nil); err != nil { - glog.Error(err) - } - case ".png": - if err = png.Encode(&buf, dstImage); err != nil { - glog.Error(err) - } - case ".gif": - if err = gif.Encode(&buf, dstImage, nil); err != nil { - glog.Error(err) - } - } - return bytes.NewReader(buf.Bytes()), err -} diff --git a/weed/images/cropping_test.go b/weed/images/cropping_test.go deleted file mode 100644 index 284432e3a..000000000 --- a/weed/images/cropping_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package images - -import ( - "bytes" - "os" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func TestCropping(t *testing.T) { - fname := "sample1.jpg" - - dat, _ := os.ReadFile(fname) - - cropped, _ := Cropped(".jpg", bytes.NewReader(dat), 1072, 932, 1751, 1062) - buf := new(bytes.Buffer) - buf.ReadFrom(cropped) - - util.WriteFile("cropped1.jpg", buf.Bytes(), 0644) - -} diff --git a/weed/images/orientation.go b/weed/images/orientation.go index 5f3964005..a592a7d8b 100644 --- a/weed/images/orientation.go +++ b/weed/images/orientation.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/goexif/exif" ) -// many code is copied from http://camlistore.org/pkg/images/images.go +//many code is copied from http://camlistore.org/pkg/images/images.go func FixJpgOrientation(data []byte) (oriented []byte) { ex, err := exif.Decode(bytes.NewReader(data)) if err != nil { diff --git a/weed/images/orientation_test.go b/weed/images/orientation_test.go index c90b85a44..92bf4fb8f 100644 --- a/weed/images/orientation_test.go +++ b/weed/images/orientation_test.go @@ -1,7 +1,7 @@ package images import ( - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "os" "testing" ) diff --git a/weed/images/resizing.go b/weed/images/resizing.go index aee096cfb..20b9a5030 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -8,9 +8,9 @@ import ( "image/png" "io" - "github.com/cognusion/imaging" + "github.com/disintegration/imaging" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" _ "golang.org/x/image/webp" ) diff --git a/weed/images/resizing_test.go b/weed/images/resizing_test.go index 8e4bfaa25..035c42b4d 100644 --- a/weed/images/resizing_test.go +++ b/weed/images/resizing_test.go @@ -2,7 +2,7 @@ package images import ( "bytes" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "os" "testing" ) diff --git a/weed/kms/aws/aws_kms.go b/weed/kms/aws/aws_kms.go deleted file mode 100644 index ea1a24ced..000000000 --- a/weed/kms/aws/aws_kms.go +++ /dev/null @@ -1,389 +0,0 @@ -package aws - -import ( - "context" - "encoding/base64" - "fmt" - "net/http" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kms" - - "github.com/seaweedfs/seaweedfs/weed/glog" - seaweedkms "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - // Register the AWS KMS provider - seaweedkms.RegisterProvider("aws", NewAWSKMSProvider) -} - -// AWSKMSProvider implements the KMSProvider interface using AWS KMS -type AWSKMSProvider struct { - client *kms.KMS - region string - endpoint string // For testing with LocalStack or custom endpoints -} - -// AWSKMSConfig contains configuration for the AWS KMS provider -type AWSKMSConfig struct { - Region string `json:"region"` // AWS region (e.g., "us-east-1") - AccessKey string `json:"access_key"` // AWS access key (optional if using IAM roles) - SecretKey string `json:"secret_key"` // AWS secret key (optional if using IAM roles) - SessionToken string `json:"session_token"` // AWS session token (optional for STS) - Endpoint string `json:"endpoint"` // Custom endpoint (optional, for LocalStack/testing) - Profile string `json:"profile"` // AWS profile name (optional) - RoleARN string `json:"role_arn"` // IAM role ARN to assume (optional) - ExternalID string `json:"external_id"` // External ID for role assumption (optional) - ConnectTimeout int `json:"connect_timeout"` // Connection timeout in seconds (default: 10) - RequestTimeout int `json:"request_timeout"` // Request timeout in seconds (default: 30) - MaxRetries int `json:"max_retries"` // Maximum number of retries (default: 3) -} - -// NewAWSKMSProvider creates a new AWS KMS provider -func NewAWSKMSProvider(config util.Configuration) (seaweedkms.KMSProvider, error) { - if config == nil { - return nil, fmt.Errorf("AWS KMS configuration is required") - } - - // Extract configuration - region := config.GetString("region") - if region == "" { - region = "us-east-1" // Default region - } - - accessKey := config.GetString("access_key") - secretKey := config.GetString("secret_key") - sessionToken := config.GetString("session_token") - endpoint := config.GetString("endpoint") - profile := config.GetString("profile") - - // Timeouts and retries - connectTimeout := config.GetInt("connect_timeout") - if connectTimeout == 0 { - connectTimeout = 10 // Default 10 seconds - } - - requestTimeout := config.GetInt("request_timeout") - if requestTimeout == 0 { - requestTimeout = 30 // Default 30 seconds - } - - maxRetries := config.GetInt("max_retries") - if maxRetries == 0 { - maxRetries = 3 // Default 3 retries - } - - // Create AWS session - awsConfig := &aws.Config{ - Region: aws.String(region), - MaxRetries: aws.Int(maxRetries), - HTTPClient: &http.Client{ - Timeout: time.Duration(requestTimeout) * time.Second, - }, - } - - // Set custom endpoint if provided (for testing with LocalStack) - if endpoint != "" { - awsConfig.Endpoint = aws.String(endpoint) - awsConfig.DisableSSL = aws.Bool(strings.HasPrefix(endpoint, "http://")) - } - - // Configure credentials - if accessKey != "" && secretKey != "" { - awsConfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken) - } else if profile != "" { - awsConfig.Credentials = credentials.NewSharedCredentials("", profile) - } - // If neither are provided, use default credential chain (IAM roles, etc.) - - sess, err := session.NewSession(awsConfig) - if err != nil { - return nil, fmt.Errorf("failed to create AWS session: %w", err) - } - - provider := &AWSKMSProvider{ - client: kms.New(sess), - region: region, - endpoint: endpoint, - } - - glog.V(1).Infof("AWS KMS provider initialized for region %s", region) - return provider, nil -} - -// GenerateDataKey generates a new data encryption key using AWS KMS -func (p *AWSKMSProvider) GenerateDataKey(ctx context.Context, req *seaweedkms.GenerateDataKeyRequest) (*seaweedkms.GenerateDataKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("GenerateDataKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Validate key spec - var keySpec string - switch req.KeySpec { - case seaweedkms.KeySpecAES256: - keySpec = "AES_256" - default: - return nil, fmt.Errorf("unsupported key spec: %s", req.KeySpec) - } - - // Build KMS request - kmsReq := &kms.GenerateDataKeyInput{ - KeyId: aws.String(req.KeyID), - KeySpec: aws.String(keySpec), - } - - // Add encryption context if provided - if len(req.EncryptionContext) > 0 { - kmsReq.EncryptionContext = aws.StringMap(req.EncryptionContext) - } - - // Call AWS KMS - glog.V(4).Infof("AWS KMS: Generating data key for key ID %s", req.KeyID) - result, err := p.client.GenerateDataKeyWithContext(ctx, kmsReq) - if err != nil { - return nil, p.convertAWSError(err, req.KeyID) - } - - // Extract the actual key ID from the response (resolves aliases) - actualKeyID := "" - if result.KeyId != nil { - actualKeyID = *result.KeyId - } - - // Create standardized envelope format for consistent API behavior - envelopeBlob, err := seaweedkms.CreateEnvelope("aws", actualKeyID, base64.StdEncoding.EncodeToString(result.CiphertextBlob), nil) - if err != nil { - return nil, fmt.Errorf("failed to create ciphertext envelope: %w", err) - } - - response := &seaweedkms.GenerateDataKeyResponse{ - KeyID: actualKeyID, - Plaintext: result.Plaintext, - CiphertextBlob: envelopeBlob, // Store in standardized envelope format - } - - glog.V(4).Infof("AWS KMS: Generated data key for key ID %s (actual: %s)", req.KeyID, actualKeyID) - return response, nil -} - -// Decrypt decrypts an encrypted data key using AWS KMS -func (p *AWSKMSProvider) Decrypt(ctx context.Context, req *seaweedkms.DecryptRequest) (*seaweedkms.DecryptResponse, error) { - if req == nil { - return nil, fmt.Errorf("DecryptRequest cannot be nil") - } - - if len(req.CiphertextBlob) == 0 { - return nil, fmt.Errorf("CiphertextBlob cannot be empty") - } - - // Parse the ciphertext envelope to extract key information - envelope, err := seaweedkms.ParseEnvelope(req.CiphertextBlob) - if err != nil { - return nil, fmt.Errorf("failed to parse ciphertext envelope: %w", err) - } - - if envelope.Provider != "aws" { - return nil, fmt.Errorf("invalid provider in envelope: expected 'aws', got '%s'", envelope.Provider) - } - - ciphertext, err := base64.StdEncoding.DecodeString(envelope.Ciphertext) - if err != nil { - return nil, fmt.Errorf("failed to decode ciphertext from envelope: %w", err) - } - - // Build KMS request - kmsReq := &kms.DecryptInput{ - CiphertextBlob: ciphertext, - } - - // Add encryption context if provided - if len(req.EncryptionContext) > 0 { - kmsReq.EncryptionContext = aws.StringMap(req.EncryptionContext) - } - - // Call AWS KMS - glog.V(4).Infof("AWS KMS: Decrypting data key (blob size: %d bytes)", len(req.CiphertextBlob)) - result, err := p.client.DecryptWithContext(ctx, kmsReq) - if err != nil { - return nil, p.convertAWSError(err, "") - } - - // Extract the key ID that was used for encryption - keyID := "" - if result.KeyId != nil { - keyID = *result.KeyId - } - - response := &seaweedkms.DecryptResponse{ - KeyID: keyID, - Plaintext: result.Plaintext, - } - - glog.V(4).Infof("AWS KMS: Decrypted data key using key ID %s", keyID) - return response, nil -} - -// DescribeKey validates that a key exists and returns its metadata -func (p *AWSKMSProvider) DescribeKey(ctx context.Context, req *seaweedkms.DescribeKeyRequest) (*seaweedkms.DescribeKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("DescribeKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Build KMS request - kmsReq := &kms.DescribeKeyInput{ - KeyId: aws.String(req.KeyID), - } - - // Call AWS KMS - glog.V(4).Infof("AWS KMS: Describing key %s", req.KeyID) - result, err := p.client.DescribeKeyWithContext(ctx, kmsReq) - if err != nil { - return nil, p.convertAWSError(err, req.KeyID) - } - - if result.KeyMetadata == nil { - return nil, fmt.Errorf("no key metadata returned from AWS KMS") - } - - metadata := result.KeyMetadata - response := &seaweedkms.DescribeKeyResponse{ - KeyID: aws.StringValue(metadata.KeyId), - ARN: aws.StringValue(metadata.Arn), - Description: aws.StringValue(metadata.Description), - } - - // Convert AWS key usage to our enum - if metadata.KeyUsage != nil { - switch *metadata.KeyUsage { - case "ENCRYPT_DECRYPT": - response.KeyUsage = seaweedkms.KeyUsageEncryptDecrypt - case "GENERATE_DATA_KEY": - response.KeyUsage = seaweedkms.KeyUsageGenerateDataKey - } - } - - // Convert AWS key state to our enum - if metadata.KeyState != nil { - switch *metadata.KeyState { - case "Enabled": - response.KeyState = seaweedkms.KeyStateEnabled - case "Disabled": - response.KeyState = seaweedkms.KeyStateDisabled - case "PendingDeletion": - response.KeyState = seaweedkms.KeyStatePendingDeletion - case "Unavailable": - response.KeyState = seaweedkms.KeyStateUnavailable - } - } - - // Convert AWS origin to our enum - if metadata.Origin != nil { - switch *metadata.Origin { - case "AWS_KMS": - response.Origin = seaweedkms.KeyOriginAWS - case "EXTERNAL": - response.Origin = seaweedkms.KeyOriginExternal - case "AWS_CLOUDHSM": - response.Origin = seaweedkms.KeyOriginCloudHSM - } - } - - glog.V(4).Infof("AWS KMS: Described key %s (actual: %s, state: %s)", req.KeyID, response.KeyID, response.KeyState) - return response, nil -} - -// GetKeyID resolves a key alias or ARN to the actual key ID -func (p *AWSKMSProvider) GetKeyID(ctx context.Context, keyIdentifier string) (string, error) { - if keyIdentifier == "" { - return "", fmt.Errorf("key identifier cannot be empty") - } - - // Use DescribeKey to resolve the key identifier - descReq := &seaweedkms.DescribeKeyRequest{KeyID: keyIdentifier} - descResp, err := p.DescribeKey(ctx, descReq) - if err != nil { - return "", fmt.Errorf("failed to resolve key identifier %s: %w", keyIdentifier, err) - } - - return descResp.KeyID, nil -} - -// Close cleans up any resources used by the provider -func (p *AWSKMSProvider) Close() error { - // AWS SDK clients don't require explicit cleanup - glog.V(2).Infof("AWS KMS provider closed") - return nil -} - -// convertAWSError converts AWS KMS errors to our standard KMS errors -func (p *AWSKMSProvider) convertAWSError(err error, keyID string) error { - if awsErr, ok := err.(awserr.Error); ok { - switch awsErr.Code() { - case "NotFoundException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeNotFoundException, - Message: awsErr.Message(), - KeyID: keyID, - } - case "DisabledException", "KeyUnavailableException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKeyUnavailable, - Message: awsErr.Message(), - KeyID: keyID, - } - case "AccessDeniedException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeAccessDenied, - Message: awsErr.Message(), - KeyID: keyID, - } - case "InvalidKeyUsageException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeInvalidKeyUsage, - Message: awsErr.Message(), - KeyID: keyID, - } - case "InvalidCiphertextException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeInvalidCiphertext, - Message: awsErr.Message(), - KeyID: keyID, - } - case "KMSInternalException", "KMSInvalidStateException": - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: awsErr.Message(), - KeyID: keyID, - } - default: - // For unknown AWS errors, wrap them as internal failures - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("AWS KMS error %s: %s", awsErr.Code(), awsErr.Message()), - KeyID: keyID, - } - } - } - - // For non-AWS errors (network issues, etc.), wrap as internal failure - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("AWS KMS provider error: %v", err), - KeyID: keyID, - } -} diff --git a/weed/kms/azure/azure_kms.go b/weed/kms/azure/azure_kms.go deleted file mode 100644 index 490e09848..000000000 --- a/weed/kms/azure/azure_kms.go +++ /dev/null @@ -1,379 +0,0 @@ -//go:build azurekms - -package azure - -import ( - "context" - "crypto/rand" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys" - - "github.com/seaweedfs/seaweedfs/weed/glog" - seaweedkms "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - // Register the Azure Key Vault provider - seaweedkms.RegisterProvider("azure", NewAzureKMSProvider) -} - -// AzureKMSProvider implements the KMSProvider interface using Azure Key Vault -type AzureKMSProvider struct { - client *azkeys.Client - vaultURL string - tenantID string - clientID string - clientSecret string -} - -// AzureKMSConfig contains configuration for the Azure Key Vault provider -type AzureKMSConfig struct { - VaultURL string `json:"vault_url"` // Azure Key Vault URL (e.g., "https://myvault.vault.azure.net/") - TenantID string `json:"tenant_id"` // Azure AD tenant ID - ClientID string `json:"client_id"` // Service principal client ID - ClientSecret string `json:"client_secret"` // Service principal client secret - Certificate string `json:"certificate"` // Certificate path for cert-based auth (alternative to client secret) - UseDefaultCreds bool `json:"use_default_creds"` // Use default Azure credentials (managed identity) - RequestTimeout int `json:"request_timeout"` // Request timeout in seconds (default: 30) -} - -// NewAzureKMSProvider creates a new Azure Key Vault provider -func NewAzureKMSProvider(config util.Configuration) (seaweedkms.KMSProvider, error) { - if config == nil { - return nil, fmt.Errorf("Azure Key Vault configuration is required") - } - - // Extract configuration - vaultURL := config.GetString("vault_url") - if vaultURL == "" { - return nil, fmt.Errorf("vault_url is required for Azure Key Vault provider") - } - - tenantID := config.GetString("tenant_id") - clientID := config.GetString("client_id") - clientSecret := config.GetString("client_secret") - useDefaultCreds := config.GetBool("use_default_creds") - - requestTimeout := config.GetInt("request_timeout") - if requestTimeout == 0 { - requestTimeout = 30 // Default 30 seconds - } - - // Create credential based on configuration - var credential azcore.TokenCredential - var err error - - if useDefaultCreds { - // Use default Azure credentials (managed identity, Azure CLI, etc.) - credential, err = azidentity.NewDefaultAzureCredential(nil) - if err != nil { - return nil, fmt.Errorf("failed to create default Azure credentials: %w", err) - } - glog.V(1).Infof("Azure KMS: Using default Azure credentials") - } else if clientID != "" && clientSecret != "" { - // Use service principal credentials - if tenantID == "" { - return nil, fmt.Errorf("tenant_id is required when using client credentials") - } - credential, err = azidentity.NewClientSecretCredential(tenantID, clientID, clientSecret, nil) - if err != nil { - return nil, fmt.Errorf("failed to create Azure client secret credential: %w", err) - } - glog.V(1).Infof("Azure KMS: Using client secret credentials for client ID %s", clientID) - } else { - return nil, fmt.Errorf("either use_default_creds=true or client_id+client_secret must be provided") - } - - // Create Key Vault client - clientOptions := &azkeys.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - PerCallPolicies: []policy.Policy{}, - Transport: &http.Client{ - Timeout: time.Duration(requestTimeout) * time.Second, - }, - }, - } - - client, err := azkeys.NewClient(vaultURL, credential, clientOptions) - if err != nil { - return nil, fmt.Errorf("failed to create Azure Key Vault client: %w", err) - } - - provider := &AzureKMSProvider{ - client: client, - vaultURL: vaultURL, - tenantID: tenantID, - clientID: clientID, - clientSecret: clientSecret, - } - - glog.V(1).Infof("Azure Key Vault provider initialized for vault %s", vaultURL) - return provider, nil -} - -// GenerateDataKey generates a new data encryption key using Azure Key Vault -func (p *AzureKMSProvider) GenerateDataKey(ctx context.Context, req *seaweedkms.GenerateDataKeyRequest) (*seaweedkms.GenerateDataKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("GenerateDataKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Validate key spec - var keySize int - switch req.KeySpec { - case seaweedkms.KeySpecAES256: - keySize = 32 // 256 bits - default: - return nil, fmt.Errorf("unsupported key spec: %s", req.KeySpec) - } - - // Generate data key locally (Azure Key Vault doesn't have GenerateDataKey like AWS) - dataKey := make([]byte, keySize) - if _, err := rand.Read(dataKey); err != nil { - return nil, fmt.Errorf("failed to generate random data key: %w", err) - } - - // Encrypt the data key using Azure Key Vault - glog.V(4).Infof("Azure KMS: Encrypting data key using key %s", req.KeyID) - - // Prepare encryption parameters - algorithm := azkeys.JSONWebKeyEncryptionAlgorithmRSAOAEP256 - encryptParams := azkeys.KeyOperationsParameters{ - Algorithm: &algorithm, // Default encryption algorithm - Value: dataKey, - } - - // Add encryption context as Additional Authenticated Data (AAD) if provided - if len(req.EncryptionContext) > 0 { - // Marshal encryption context to JSON for deterministic AAD - aadBytes, err := json.Marshal(req.EncryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context: %w", err) - } - encryptParams.AAD = aadBytes - glog.V(4).Infof("Azure KMS: Using encryption context as AAD for key %s", req.KeyID) - } - - // Call Azure Key Vault to encrypt the data key - encryptResult, err := p.client.Encrypt(ctx, req.KeyID, "", encryptParams, nil) - if err != nil { - return nil, p.convertAzureError(err, req.KeyID) - } - - // Get the actual key ID from the response - actualKeyID := req.KeyID - if encryptResult.KID != nil { - actualKeyID = string(*encryptResult.KID) - } - - // Create standardized envelope format for consistent API behavior - envelopeBlob, err := seaweedkms.CreateEnvelope("azure", actualKeyID, string(encryptResult.Result), nil) - if err != nil { - return nil, fmt.Errorf("failed to create ciphertext envelope: %w", err) - } - - response := &seaweedkms.GenerateDataKeyResponse{ - KeyID: actualKeyID, - Plaintext: dataKey, - CiphertextBlob: envelopeBlob, // Store in standardized envelope format - } - - glog.V(4).Infof("Azure KMS: Generated and encrypted data key using key %s", actualKeyID) - return response, nil -} - -// Decrypt decrypts an encrypted data key using Azure Key Vault -func (p *AzureKMSProvider) Decrypt(ctx context.Context, req *seaweedkms.DecryptRequest) (*seaweedkms.DecryptResponse, error) { - if req == nil { - return nil, fmt.Errorf("DecryptRequest cannot be nil") - } - - if len(req.CiphertextBlob) == 0 { - return nil, fmt.Errorf("CiphertextBlob cannot be empty") - } - - // Parse the ciphertext envelope to extract key information - envelope, err := seaweedkms.ParseEnvelope(req.CiphertextBlob) - if err != nil { - return nil, fmt.Errorf("failed to parse ciphertext envelope: %w", err) - } - - keyID := envelope.KeyID - if keyID == "" { - return nil, fmt.Errorf("envelope missing key ID") - } - - // Convert string back to bytes - ciphertext := []byte(envelope.Ciphertext) - - // Prepare decryption parameters - decryptAlgorithm := azkeys.JSONWebKeyEncryptionAlgorithmRSAOAEP256 - decryptParams := azkeys.KeyOperationsParameters{ - Algorithm: &decryptAlgorithm, // Must match encryption algorithm - Value: ciphertext, - } - - // Add encryption context as Additional Authenticated Data (AAD) if provided - if len(req.EncryptionContext) > 0 { - // Marshal encryption context to JSON for deterministic AAD (must match encryption) - aadBytes, err := json.Marshal(req.EncryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context: %w", err) - } - decryptParams.AAD = aadBytes - glog.V(4).Infof("Azure KMS: Using encryption context as AAD for decryption of key %s", keyID) - } - - // Call Azure Key Vault to decrypt the data key - glog.V(4).Infof("Azure KMS: Decrypting data key using key %s", keyID) - decryptResult, err := p.client.Decrypt(ctx, keyID, "", decryptParams, nil) - if err != nil { - return nil, p.convertAzureError(err, keyID) - } - - // Get the actual key ID from the response - actualKeyID := keyID - if decryptResult.KID != nil { - actualKeyID = string(*decryptResult.KID) - } - - response := &seaweedkms.DecryptResponse{ - KeyID: actualKeyID, - Plaintext: decryptResult.Result, - } - - glog.V(4).Infof("Azure KMS: Decrypted data key using key %s", actualKeyID) - return response, nil -} - -// DescribeKey validates that a key exists and returns its metadata -func (p *AzureKMSProvider) DescribeKey(ctx context.Context, req *seaweedkms.DescribeKeyRequest) (*seaweedkms.DescribeKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("DescribeKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Get key from Azure Key Vault - glog.V(4).Infof("Azure KMS: Describing key %s", req.KeyID) - result, err := p.client.GetKey(ctx, req.KeyID, "", nil) - if err != nil { - return nil, p.convertAzureError(err, req.KeyID) - } - - if result.Key == nil { - return nil, fmt.Errorf("no key returned from Azure Key Vault") - } - - key := result.Key - response := &seaweedkms.DescribeKeyResponse{ - KeyID: req.KeyID, - Description: "Azure Key Vault key", // Azure doesn't provide description in the same way - } - - // Set ARN-like identifier for Azure - if key.KID != nil { - response.ARN = string(*key.KID) - response.KeyID = string(*key.KID) - } - - // Set key usage based on key operations - if key.KeyOps != nil && len(key.KeyOps) > 0 { - // Azure keys can have multiple operations, check if encrypt/decrypt are supported - for _, op := range key.KeyOps { - if op != nil && (*op == string(azkeys.JSONWebKeyOperationEncrypt) || *op == string(azkeys.JSONWebKeyOperationDecrypt)) { - response.KeyUsage = seaweedkms.KeyUsageEncryptDecrypt - break - } - } - } - - // Set key state based on enabled status - if result.Attributes != nil { - if result.Attributes.Enabled != nil && *result.Attributes.Enabled { - response.KeyState = seaweedkms.KeyStateEnabled - } else { - response.KeyState = seaweedkms.KeyStateDisabled - } - } - - // Azure Key Vault keys are managed by Azure - response.Origin = seaweedkms.KeyOriginAzure - - glog.V(4).Infof("Azure KMS: Described key %s (state: %s)", req.KeyID, response.KeyState) - return response, nil -} - -// GetKeyID resolves a key name to the full key identifier -func (p *AzureKMSProvider) GetKeyID(ctx context.Context, keyIdentifier string) (string, error) { - if keyIdentifier == "" { - return "", fmt.Errorf("key identifier cannot be empty") - } - - // Use DescribeKey to resolve and validate the key identifier - descReq := &seaweedkms.DescribeKeyRequest{KeyID: keyIdentifier} - descResp, err := p.DescribeKey(ctx, descReq) - if err != nil { - return "", fmt.Errorf("failed to resolve key identifier %s: %w", keyIdentifier, err) - } - - return descResp.KeyID, nil -} - -// Close cleans up any resources used by the provider -func (p *AzureKMSProvider) Close() error { - // Azure SDK clients don't require explicit cleanup - glog.V(2).Infof("Azure Key Vault provider closed") - return nil -} - -// convertAzureError converts Azure Key Vault errors to our standard KMS errors -func (p *AzureKMSProvider) convertAzureError(err error, keyID string) error { - // Azure SDK uses different error types, need to check for specific conditions - errMsg := err.Error() - - if strings.Contains(errMsg, "not found") || strings.Contains(errMsg, "NotFound") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeNotFoundException, - Message: fmt.Sprintf("Key not found in Azure Key Vault: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "access") || strings.Contains(errMsg, "Forbidden") || strings.Contains(errMsg, "Unauthorized") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeAccessDenied, - Message: fmt.Sprintf("Access denied to Azure Key Vault: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "disabled") || strings.Contains(errMsg, "unavailable") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKeyUnavailable, - Message: fmt.Sprintf("Key unavailable in Azure Key Vault: %v", err), - KeyID: keyID, - } - } - - // For unknown errors, wrap as internal failure - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("Azure Key Vault error: %v", err), - KeyID: keyID, - } -} diff --git a/weed/kms/config.go b/weed/kms/config.go deleted file mode 100644 index 8f3146c28..000000000 --- a/weed/kms/config.go +++ /dev/null @@ -1,480 +0,0 @@ -package kms - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// KMSManager manages KMS provider instances and configurations -type KMSManager struct { - mu sync.RWMutex - providers map[string]KMSProvider // provider name -> provider instance - configs map[string]*KMSConfig // provider name -> configuration - bucketKMS map[string]string // bucket name -> provider name - defaultKMS string // default KMS provider name -} - -// KMSConfig represents a complete KMS provider configuration -type KMSConfig struct { - Provider string `json:"provider"` // Provider type (aws, azure, gcp, local) - Config map[string]interface{} `json:"config"` // Provider-specific configuration - CacheEnabled bool `json:"cache_enabled"` // Enable data key caching - CacheTTL time.Duration `json:"cache_ttl"` // Cache TTL (default: 1 hour) - MaxCacheSize int `json:"max_cache_size"` // Maximum cached keys (default: 1000) -} - -// BucketKMSConfig represents KMS configuration for a specific bucket -type BucketKMSConfig struct { - Provider string `json:"provider"` // KMS provider to use - KeyID string `json:"key_id"` // Default KMS key ID for this bucket - BucketKey bool `json:"bucket_key"` // Enable S3 Bucket Keys optimization - Context map[string]string `json:"context"` // Additional encryption context - Enabled bool `json:"enabled"` // Whether KMS encryption is enabled -} - -// configAdapter adapts KMSConfig.Config to util.Configuration interface -type configAdapter struct { - config map[string]interface{} -} - -// GetConfigMap returns the underlying configuration map for direct access -func (c *configAdapter) GetConfigMap() map[string]interface{} { - return c.config -} - -func (c *configAdapter) GetString(key string) string { - if val, ok := c.config[key]; ok { - if str, ok := val.(string); ok { - return str - } - } - return "" -} - -func (c *configAdapter) GetBool(key string) bool { - if val, ok := c.config[key]; ok { - if b, ok := val.(bool); ok { - return b - } - } - return false -} - -func (c *configAdapter) GetInt(key string) int { - if val, ok := c.config[key]; ok { - if i, ok := val.(int); ok { - return i - } - if f, ok := val.(float64); ok { - return int(f) - } - } - return 0 -} - -func (c *configAdapter) GetStringSlice(key string) []string { - if val, ok := c.config[key]; ok { - if slice, ok := val.([]string); ok { - return slice - } - if interfaceSlice, ok := val.([]interface{}); ok { - result := make([]string, len(interfaceSlice)) - for i, v := range interfaceSlice { - if str, ok := v.(string); ok { - result[i] = str - } - } - return result - } - } - return nil -} - -func (c *configAdapter) SetDefault(key string, value interface{}) { - if c.config == nil { - c.config = make(map[string]interface{}) - } - if _, exists := c.config[key]; !exists { - c.config[key] = value - } -} - -var ( - globalKMSManager *KMSManager - globalKMSMutex sync.RWMutex - - // Global KMS provider for legacy compatibility - globalKMSProvider KMSProvider -) - -// InitializeGlobalKMS initializes the global KMS provider -func InitializeGlobalKMS(config *KMSConfig) error { - if config == nil || config.Provider == "" { - return fmt.Errorf("KMS configuration is required") - } - - // Adapt the config to util.Configuration interface - var providerConfig util.Configuration - if config.Config != nil { - providerConfig = &configAdapter{config: config.Config} - } - - provider, err := GetProvider(config.Provider, providerConfig) - if err != nil { - return err - } - - globalKMSMutex.Lock() - defer globalKMSMutex.Unlock() - - // Close existing provider if any - if globalKMSProvider != nil { - globalKMSProvider.Close() - } - - globalKMSProvider = provider - return nil -} - -// GetGlobalKMS returns the global KMS provider -func GetGlobalKMS() KMSProvider { - globalKMSMutex.RLock() - defer globalKMSMutex.RUnlock() - return globalKMSProvider -} - -// IsKMSEnabled returns true if KMS is enabled globally -func IsKMSEnabled() bool { - return GetGlobalKMS() != nil -} - -// SetGlobalKMSProvider sets the global KMS provider. -// This is mainly for backward compatibility. -func SetGlobalKMSProvider(provider KMSProvider) { - globalKMSMutex.Lock() - defer globalKMSMutex.Unlock() - - // Close existing provider if any - if globalKMSProvider != nil { - globalKMSProvider.Close() - } - - globalKMSProvider = provider -} - -// InitializeKMSManager initializes the global KMS manager -func InitializeKMSManager() *KMSManager { - globalKMSMutex.Lock() - defer globalKMSMutex.Unlock() - - if globalKMSManager == nil { - globalKMSManager = &KMSManager{ - providers: make(map[string]KMSProvider), - configs: make(map[string]*KMSConfig), - bucketKMS: make(map[string]string), - } - glog.V(1).Infof("KMS Manager initialized") - } - - return globalKMSManager -} - -// GetKMSManager returns the global KMS manager -func GetKMSManager() *KMSManager { - globalKMSMutex.RLock() - manager := globalKMSManager - globalKMSMutex.RUnlock() - - if manager == nil { - return InitializeKMSManager() - } - - return manager -} - -// AddKMSProvider adds a KMS provider configuration -func (km *KMSManager) AddKMSProvider(name string, config *KMSConfig) error { - if name == "" { - return fmt.Errorf("provider name cannot be empty") - } - - if config == nil { - return fmt.Errorf("KMS configuration cannot be nil") - } - - km.mu.Lock() - defer km.mu.Unlock() - - // Close existing provider if it exists - if existingProvider, exists := km.providers[name]; exists { - if err := existingProvider.Close(); err != nil { - glog.Errorf("Failed to close existing KMS provider %s: %v", name, err) - } - } - - // Create new provider instance - configAdapter := &configAdapter{config: config.Config} - provider, err := GetProvider(config.Provider, configAdapter) - if err != nil { - return fmt.Errorf("failed to create KMS provider %s: %w", name, err) - } - - // Store provider and configuration - km.providers[name] = provider - km.configs[name] = config - - glog.V(1).Infof("Added KMS provider %s (type: %s)", name, config.Provider) - return nil -} - -// SetDefaultKMSProvider sets the default KMS provider -func (km *KMSManager) SetDefaultKMSProvider(name string) error { - km.mu.RLock() - _, exists := km.providers[name] - km.mu.RUnlock() - - if !exists { - return fmt.Errorf("KMS provider %s does not exist", name) - } - - km.mu.Lock() - km.defaultKMS = name - km.mu.Unlock() - - glog.V(1).Infof("Set default KMS provider to %s", name) - return nil -} - -// SetBucketKMSProvider sets the KMS provider for a specific bucket -func (km *KMSManager) SetBucketKMSProvider(bucket, providerName string) error { - if bucket == "" { - return fmt.Errorf("bucket name cannot be empty") - } - - km.mu.RLock() - _, exists := km.providers[providerName] - km.mu.RUnlock() - - if !exists { - return fmt.Errorf("KMS provider %s does not exist", providerName) - } - - km.mu.Lock() - km.bucketKMS[bucket] = providerName - km.mu.Unlock() - - glog.V(2).Infof("Set KMS provider for bucket %s to %s", bucket, providerName) - return nil -} - -// GetKMSProvider returns the KMS provider for a bucket (or default if not configured) -func (km *KMSManager) GetKMSProvider(bucket string) (KMSProvider, error) { - km.mu.RLock() - defer km.mu.RUnlock() - - // Try bucket-specific provider first - if bucket != "" { - if providerName, exists := km.bucketKMS[bucket]; exists { - if provider, exists := km.providers[providerName]; exists { - return provider, nil - } - } - } - - // Fall back to default provider - if km.defaultKMS != "" { - if provider, exists := km.providers[km.defaultKMS]; exists { - return provider, nil - } - } - - // No provider configured - return nil, fmt.Errorf("no KMS provider configured for bucket %s", bucket) -} - -// GetKMSProviderByName returns a specific KMS provider by name -func (km *KMSManager) GetKMSProviderByName(name string) (KMSProvider, error) { - km.mu.RLock() - defer km.mu.RUnlock() - - provider, exists := km.providers[name] - if !exists { - return nil, fmt.Errorf("KMS provider %s not found", name) - } - - return provider, nil -} - -// ListKMSProviders returns all configured KMS provider names -func (km *KMSManager) ListKMSProviders() []string { - km.mu.RLock() - defer km.mu.RUnlock() - - names := make([]string, 0, len(km.providers)) - for name := range km.providers { - names = append(names, name) - } - - return names -} - -// GetBucketKMSProvider returns the KMS provider name for a bucket -func (km *KMSManager) GetBucketKMSProvider(bucket string) string { - km.mu.RLock() - defer km.mu.RUnlock() - - if providerName, exists := km.bucketKMS[bucket]; exists { - return providerName - } - - return km.defaultKMS -} - -// RemoveKMSProvider removes a KMS provider -func (km *KMSManager) RemoveKMSProvider(name string) error { - km.mu.Lock() - defer km.mu.Unlock() - - provider, exists := km.providers[name] - if !exists { - return fmt.Errorf("KMS provider %s does not exist", name) - } - - // Close the provider - if err := provider.Close(); err != nil { - glog.Errorf("Failed to close KMS provider %s: %v", name, err) - } - - // Remove from maps - delete(km.providers, name) - delete(km.configs, name) - - // Remove from bucket associations - for bucket, providerName := range km.bucketKMS { - if providerName == name { - delete(km.bucketKMS, bucket) - } - } - - // Clear default if it was this provider - if km.defaultKMS == name { - km.defaultKMS = "" - } - - glog.V(1).Infof("Removed KMS provider %s", name) - return nil -} - -// Close closes all KMS providers and cleans up resources -func (km *KMSManager) Close() error { - km.mu.Lock() - defer km.mu.Unlock() - - var allErrors []error - for name, provider := range km.providers { - if err := provider.Close(); err != nil { - allErrors = append(allErrors, fmt.Errorf("failed to close KMS provider %s: %w", name, err)) - } - } - - // Clear all maps - km.providers = make(map[string]KMSProvider) - km.configs = make(map[string]*KMSConfig) - km.bucketKMS = make(map[string]string) - km.defaultKMS = "" - - if len(allErrors) > 0 { - return fmt.Errorf("errors closing KMS providers: %v", allErrors) - } - - glog.V(1).Infof("KMS Manager closed") - return nil -} - -// GenerateDataKeyForBucket generates a data key using the appropriate KMS provider for a bucket -func (km *KMSManager) GenerateDataKeyForBucket(ctx context.Context, bucket, keyID string, keySpec KeySpec, encryptionContext map[string]string) (*GenerateDataKeyResponse, error) { - provider, err := km.GetKMSProvider(bucket) - if err != nil { - return nil, fmt.Errorf("failed to get KMS provider for bucket %s: %w", bucket, err) - } - - req := &GenerateDataKeyRequest{ - KeyID: keyID, - KeySpec: keySpec, - EncryptionContext: encryptionContext, - } - - return provider.GenerateDataKey(ctx, req) -} - -// DecryptForBucket decrypts a data key using the appropriate KMS provider for a bucket -func (km *KMSManager) DecryptForBucket(ctx context.Context, bucket string, ciphertextBlob []byte, encryptionContext map[string]string) (*DecryptResponse, error) { - provider, err := km.GetKMSProvider(bucket) - if err != nil { - return nil, fmt.Errorf("failed to get KMS provider for bucket %s: %w", bucket, err) - } - - req := &DecryptRequest{ - CiphertextBlob: ciphertextBlob, - EncryptionContext: encryptionContext, - } - - return provider.Decrypt(ctx, req) -} - -// ValidateKeyForBucket validates that a KMS key exists and is usable for a bucket -func (km *KMSManager) ValidateKeyForBucket(ctx context.Context, bucket, keyID string) error { - provider, err := km.GetKMSProvider(bucket) - if err != nil { - return fmt.Errorf("failed to get KMS provider for bucket %s: %w", bucket, err) - } - - req := &DescribeKeyRequest{KeyID: keyID} - resp, err := provider.DescribeKey(ctx, req) - if err != nil { - return fmt.Errorf("failed to validate key %s for bucket %s: %w", keyID, bucket, err) - } - - // Check key state - if resp.KeyState != KeyStateEnabled { - return fmt.Errorf("key %s is not enabled (state: %s)", keyID, resp.KeyState) - } - - // Check key usage - if resp.KeyUsage != KeyUsageEncryptDecrypt && resp.KeyUsage != KeyUsageGenerateDataKey { - return fmt.Errorf("key %s cannot be used for encryption (usage: %s)", keyID, resp.KeyUsage) - } - - return nil -} - -// GetKMSHealth returns health status of all KMS providers -func (km *KMSManager) GetKMSHealth(ctx context.Context) map[string]error { - km.mu.RLock() - defer km.mu.RUnlock() - - health := make(map[string]error) - - for name, provider := range km.providers { - // Try to perform a basic operation to check health - // We'll use DescribeKey with a dummy key - the error will tell us if KMS is reachable - req := &DescribeKeyRequest{KeyID: "health-check-dummy-key"} - _, err := provider.DescribeKey(ctx, req) - - // If it's a "not found" error, KMS is healthy but key doesn't exist (expected) - if kmsErr, ok := err.(*KMSError); ok && kmsErr.Code == ErrCodeNotFoundException { - health[name] = nil // Healthy - } else if err != nil { - health[name] = err // Unhealthy - } else { - health[name] = nil // Healthy (shouldn't happen with dummy key, but just in case) - } - } - - return health -} diff --git a/weed/kms/config_loader.go b/weed/kms/config_loader.go deleted file mode 100644 index 3778c0f59..000000000 --- a/weed/kms/config_loader.go +++ /dev/null @@ -1,426 +0,0 @@ -package kms - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// ViperConfig interface extends Configuration with additional methods needed for KMS configuration -type ViperConfig interface { - GetString(key string) string - GetBool(key string) bool - GetInt(key string) int - GetStringSlice(key string) []string - SetDefault(key string, value interface{}) - GetStringMap(key string) map[string]interface{} - IsSet(key string) bool -} - -// ConfigLoader handles loading KMS configurations from filer.toml -type ConfigLoader struct { - viper ViperConfig - manager *KMSManager -} - -// NewConfigLoader creates a new KMS configuration loader -func NewConfigLoader(v ViperConfig) *ConfigLoader { - return &ConfigLoader{ - viper: v, - manager: GetKMSManager(), - } -} - -// LoadConfigurations loads all KMS provider configurations from filer.toml -func (loader *ConfigLoader) LoadConfigurations() error { - // Check if KMS section exists - if !loader.viper.IsSet("kms") { - glog.V(1).Infof("No KMS configuration found in filer.toml") - return nil - } - - // Get the KMS configuration section - kmsConfig := loader.viper.GetStringMap("kms") - - // Load global KMS settings - if err := loader.loadGlobalKMSSettings(kmsConfig); err != nil { - return fmt.Errorf("failed to load global KMS settings: %w", err) - } - - // Load KMS providers - if providersConfig, exists := kmsConfig["providers"]; exists { - if providers, ok := providersConfig.(map[string]interface{}); ok { - if err := loader.loadKMSProviders(providers); err != nil { - return fmt.Errorf("failed to load KMS providers: %w", err) - } - } - } - - // Set default provider after all providers are loaded - if err := loader.setDefaultProvider(); err != nil { - return fmt.Errorf("failed to set default KMS provider: %w", err) - } - - // Initialize global KMS provider for backwards compatibility - if err := loader.initializeGlobalKMSProvider(); err != nil { - glog.Warningf("Failed to initialize global KMS provider: %v", err) - } - - // Load bucket-specific KMS configurations - if bucketsConfig, exists := kmsConfig["buckets"]; exists { - if buckets, ok := bucketsConfig.(map[string]interface{}); ok { - if err := loader.loadBucketKMSConfigurations(buckets); err != nil { - return fmt.Errorf("failed to load bucket KMS configurations: %w", err) - } - } - } - - glog.V(1).Infof("KMS configuration loaded successfully") - return nil -} - -// loadGlobalKMSSettings loads global KMS settings -func (loader *ConfigLoader) loadGlobalKMSSettings(kmsConfig map[string]interface{}) error { - // Set default KMS provider if specified - if defaultProvider, exists := kmsConfig["default_provider"]; exists { - if providerName, ok := defaultProvider.(string); ok { - // We'll set this after providers are loaded - glog.V(2).Infof("Default KMS provider will be set to: %s", providerName) - } - } - - return nil -} - -// loadKMSProviders loads individual KMS provider configurations -func (loader *ConfigLoader) loadKMSProviders(providers map[string]interface{}) error { - for providerName, providerConfigInterface := range providers { - providerConfig, ok := providerConfigInterface.(map[string]interface{}) - if !ok { - glog.Warningf("Invalid configuration for KMS provider %s", providerName) - continue - } - - if err := loader.loadSingleKMSProvider(providerName, providerConfig); err != nil { - glog.Errorf("Failed to load KMS provider %s: %v", providerName, err) - continue - } - - glog.V(1).Infof("Loaded KMS provider: %s", providerName) - } - - return nil -} - -// loadSingleKMSProvider loads a single KMS provider configuration -func (loader *ConfigLoader) loadSingleKMSProvider(providerName string, config map[string]interface{}) error { - // Get provider type - providerType, exists := config["type"] - if !exists { - return fmt.Errorf("provider type not specified for %s", providerName) - } - - providerTypeStr, ok := providerType.(string) - if !ok { - return fmt.Errorf("invalid provider type for %s", providerName) - } - - // Get provider-specific configuration - providerConfig := make(map[string]interface{}) - for key, value := range config { - if key != "type" { - providerConfig[key] = value - } - } - - // Set default cache settings if not specified - if _, exists := providerConfig["cache_enabled"]; !exists { - providerConfig["cache_enabled"] = true - } - - if _, exists := providerConfig["cache_ttl"]; !exists { - providerConfig["cache_ttl"] = "1h" - } - - if _, exists := providerConfig["max_cache_size"]; !exists { - providerConfig["max_cache_size"] = 1000 - } - - // Parse cache TTL - cacheTTL := time.Hour // default - if ttlStr, exists := providerConfig["cache_ttl"]; exists { - if ttlStrValue, ok := ttlStr.(string); ok { - if parsed, err := time.ParseDuration(ttlStrValue); err == nil { - cacheTTL = parsed - } - } - } - - // Create KMS configuration - kmsConfig := &KMSConfig{ - Provider: providerTypeStr, - Config: providerConfig, - CacheEnabled: getBoolFromConfig(providerConfig, "cache_enabled", true), - CacheTTL: cacheTTL, - MaxCacheSize: getIntFromConfig(providerConfig, "max_cache_size", 1000), - } - - // Add the provider to the KMS manager - if err := loader.manager.AddKMSProvider(providerName, kmsConfig); err != nil { - return err - } - - // Test the provider with a health check - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - health := loader.manager.GetKMSHealth(ctx) - if providerHealth, exists := health[providerName]; exists && providerHealth != nil { - glog.Warningf("KMS provider %s health check failed: %v", providerName, providerHealth) - } - - return nil -} - -// loadBucketKMSConfigurations loads bucket-specific KMS configurations -func (loader *ConfigLoader) loadBucketKMSConfigurations(buckets map[string]interface{}) error { - for bucketName, bucketConfigInterface := range buckets { - bucketConfig, ok := bucketConfigInterface.(map[string]interface{}) - if !ok { - glog.Warningf("Invalid KMS configuration for bucket %s", bucketName) - continue - } - - // Get provider for this bucket - if provider, exists := bucketConfig["provider"]; exists { - if providerName, ok := provider.(string); ok { - if err := loader.manager.SetBucketKMSProvider(bucketName, providerName); err != nil { - glog.Errorf("Failed to set KMS provider for bucket %s: %v", bucketName, err) - continue - } - glog.V(2).Infof("Set KMS provider for bucket %s to %s", bucketName, providerName) - } - } - } - - return nil -} - -// setDefaultProvider sets the default KMS provider after all providers are loaded -func (loader *ConfigLoader) setDefaultProvider() error { - kmsConfig := loader.viper.GetStringMap("kms") - if defaultProvider, exists := kmsConfig["default_provider"]; exists { - if providerName, ok := defaultProvider.(string); ok { - if err := loader.manager.SetDefaultKMSProvider(providerName); err != nil { - return fmt.Errorf("failed to set default KMS provider: %w", err) - } - glog.V(1).Infof("Set default KMS provider to: %s", providerName) - } - } - return nil -} - -// initializeGlobalKMSProvider initializes the global KMS provider for backwards compatibility -func (loader *ConfigLoader) initializeGlobalKMSProvider() error { - // Get the default provider from the manager - defaultProviderName := "" - kmsConfig := loader.viper.GetStringMap("kms") - if defaultProvider, exists := kmsConfig["default_provider"]; exists { - if providerName, ok := defaultProvider.(string); ok { - defaultProviderName = providerName - } - } - - if defaultProviderName == "" { - // If no default provider, try to use the first available provider - providers := loader.manager.ListKMSProviders() - if len(providers) > 0 { - defaultProviderName = providers[0] - } - } - - if defaultProviderName == "" { - glog.V(2).Infof("No KMS providers configured, skipping global KMS initialization") - return nil - } - - // Get the provider from the manager - provider, err := loader.manager.GetKMSProviderByName(defaultProviderName) - if err != nil { - return fmt.Errorf("failed to get KMS provider %s: %w", defaultProviderName, err) - } - - // Set as global KMS provider - SetGlobalKMSProvider(provider) - glog.V(1).Infof("Initialized global KMS provider: %s", defaultProviderName) - - return nil -} - -// ValidateConfiguration validates the KMS configuration -func (loader *ConfigLoader) ValidateConfiguration() error { - providers := loader.manager.ListKMSProviders() - if len(providers) == 0 { - glog.V(1).Infof("No KMS providers configured") - return nil - } - - // Test connectivity to all providers - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - health := loader.manager.GetKMSHealth(ctx) - hasHealthyProvider := false - - for providerName, err := range health { - if err != nil { - glog.Warningf("KMS provider %s is unhealthy: %v", providerName, err) - } else { - hasHealthyProvider = true - glog.V(2).Infof("KMS provider %s is healthy", providerName) - } - } - - if !hasHealthyProvider { - glog.Warningf("No healthy KMS providers found") - } - - return nil -} - -// LoadKMSFromFilerToml is a convenience function to load KMS configuration from filer.toml -func LoadKMSFromFilerToml(v ViperConfig) error { - loader := NewConfigLoader(v) - if err := loader.LoadConfigurations(); err != nil { - return err - } - return loader.ValidateConfiguration() -} - -// LoadKMSFromConfig loads KMS configuration directly from parsed JSON data -func LoadKMSFromConfig(kmsConfig interface{}) error { - kmsMap, ok := kmsConfig.(map[string]interface{}) - if !ok { - return fmt.Errorf("invalid KMS configuration format") - } - - // Create a direct config adapter that doesn't use Viper - // Wrap the KMS config under a "kms" key as expected by LoadConfigurations - wrappedConfig := map[string]interface{}{ - "kms": kmsMap, - } - adapter := &directConfigAdapter{config: wrappedConfig} - loader := NewConfigLoader(adapter) - - if err := loader.LoadConfigurations(); err != nil { - return err - } - - return loader.ValidateConfiguration() -} - -// directConfigAdapter implements ViperConfig interface for direct map access -type directConfigAdapter struct { - config map[string]interface{} -} - -func (d *directConfigAdapter) GetStringMap(key string) map[string]interface{} { - if val, exists := d.config[key]; exists { - if mapVal, ok := val.(map[string]interface{}); ok { - return mapVal - } - } - return make(map[string]interface{}) -} - -func (d *directConfigAdapter) GetString(key string) string { - if val, exists := d.config[key]; exists { - if strVal, ok := val.(string); ok { - return strVal - } - } - return "" -} - -func (d *directConfigAdapter) GetBool(key string) bool { - if val, exists := d.config[key]; exists { - if boolVal, ok := val.(bool); ok { - return boolVal - } - } - return false -} - -func (d *directConfigAdapter) GetInt(key string) int { - if val, exists := d.config[key]; exists { - switch v := val.(type) { - case int: - return v - case float64: - return int(v) - } - } - return 0 -} - -func (d *directConfigAdapter) GetStringSlice(key string) []string { - if val, exists := d.config[key]; exists { - if sliceVal, ok := val.([]interface{}); ok { - result := make([]string, len(sliceVal)) - for i, item := range sliceVal { - if strItem, ok := item.(string); ok { - result[i] = strItem - } - } - return result - } - if strSlice, ok := val.([]string); ok { - return strSlice - } - } - return []string{} -} - -func (d *directConfigAdapter) SetDefault(key string, value interface{}) { - // For direct config adapter, we don't need to set defaults - // as the configuration is already parsed -} - -func (d *directConfigAdapter) IsSet(key string) bool { - _, exists := d.config[key] - return exists -} - -// Helper functions - -func getBoolFromConfig(config map[string]interface{}, key string, defaultValue bool) bool { - if value, exists := config[key]; exists { - if boolValue, ok := value.(bool); ok { - return boolValue - } - } - return defaultValue -} - -func getIntFromConfig(config map[string]interface{}, key string, defaultValue int) int { - if value, exists := config[key]; exists { - if intValue, ok := value.(int); ok { - return intValue - } - if floatValue, ok := value.(float64); ok { - return int(floatValue) - } - } - return defaultValue -} - -func getStringFromConfig(config map[string]interface{}, key string, defaultValue string) string { - if value, exists := config[key]; exists { - if stringValue, ok := value.(string); ok { - return stringValue - } - } - return defaultValue -} diff --git a/weed/kms/envelope.go b/weed/kms/envelope.go deleted file mode 100644 index 60542b8a4..000000000 --- a/weed/kms/envelope.go +++ /dev/null @@ -1,79 +0,0 @@ -package kms - -import ( - "encoding/json" - "fmt" -) - -// CiphertextEnvelope represents a standardized format for storing encrypted data -// along with the metadata needed for decryption. This ensures consistent API -// behavior across all KMS providers. -type CiphertextEnvelope struct { - // Provider identifies which KMS provider was used - Provider string `json:"provider"` - - // KeyID is the identifier of the key used for encryption - KeyID string `json:"key_id"` - - // Ciphertext is the encrypted data (base64 encoded for JSON compatibility) - Ciphertext string `json:"ciphertext"` - - // Version allows for future format changes - Version int `json:"version"` - - // ProviderSpecific contains provider-specific metadata if needed - ProviderSpecific map[string]interface{} `json:"provider_specific,omitempty"` -} - -// CreateEnvelope creates a ciphertext envelope for consistent KMS provider behavior -func CreateEnvelope(provider, keyID, ciphertext string, providerSpecific map[string]interface{}) ([]byte, error) { - // Validate required fields - if provider == "" { - return nil, fmt.Errorf("provider cannot be empty") - } - if keyID == "" { - return nil, fmt.Errorf("keyID cannot be empty") - } - if ciphertext == "" { - return nil, fmt.Errorf("ciphertext cannot be empty") - } - - envelope := CiphertextEnvelope{ - Provider: provider, - KeyID: keyID, - Ciphertext: ciphertext, - Version: 1, - ProviderSpecific: providerSpecific, - } - - return json.Marshal(envelope) -} - -// ParseEnvelope parses a ciphertext envelope to extract key information -func ParseEnvelope(ciphertextBlob []byte) (*CiphertextEnvelope, error) { - if len(ciphertextBlob) == 0 { - return nil, fmt.Errorf("ciphertext blob cannot be empty") - } - - // Parse as envelope format - var envelope CiphertextEnvelope - if err := json.Unmarshal(ciphertextBlob, &envelope); err != nil { - return nil, fmt.Errorf("failed to parse ciphertext envelope: %w", err) - } - - // Validate required fields - if envelope.Provider == "" { - return nil, fmt.Errorf("envelope missing provider field") - } - if envelope.KeyID == "" { - return nil, fmt.Errorf("envelope missing key_id field") - } - if envelope.Ciphertext == "" { - return nil, fmt.Errorf("envelope missing ciphertext field") - } - if envelope.Version == 0 { - envelope.Version = 1 // Default to version 1 - } - - return &envelope, nil -} diff --git a/weed/kms/envelope_test.go b/weed/kms/envelope_test.go deleted file mode 100644 index 322a4eafa..000000000 --- a/weed/kms/envelope_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package kms - -import ( - "encoding/json" - "testing" -) - -func TestCiphertextEnvelope_CreateAndParse(t *testing.T) { - // Test basic envelope creation and parsing - provider := "openbao" - keyID := "test-key-123" - ciphertext := "vault:v1:abcd1234encrypted" - providerSpecific := map[string]interface{}{ - "transit_path": "transit", - "version": 1, - } - - // Create envelope - envelopeBlob, err := CreateEnvelope(provider, keyID, ciphertext, providerSpecific) - if err != nil { - t.Fatalf("CreateEnvelope failed: %v", err) - } - - // Verify it's valid JSON - var jsonCheck map[string]interface{} - if err := json.Unmarshal(envelopeBlob, &jsonCheck); err != nil { - t.Fatalf("Envelope is not valid JSON: %v", err) - } - - // Parse envelope back - envelope, err := ParseEnvelope(envelopeBlob) - if err != nil { - t.Fatalf("ParseEnvelope failed: %v", err) - } - - // Verify fields - if envelope.Provider != provider { - t.Errorf("Provider mismatch: expected %s, got %s", provider, envelope.Provider) - } - if envelope.KeyID != keyID { - t.Errorf("KeyID mismatch: expected %s, got %s", keyID, envelope.KeyID) - } - if envelope.Ciphertext != ciphertext { - t.Errorf("Ciphertext mismatch: expected %s, got %s", ciphertext, envelope.Ciphertext) - } - if envelope.Version != 1 { - t.Errorf("Version mismatch: expected 1, got %d", envelope.Version) - } - if envelope.ProviderSpecific == nil { - t.Error("ProviderSpecific is nil") - } -} - -func TestCiphertextEnvelope_InvalidFormat(t *testing.T) { - // Test parsing invalid (non-envelope) ciphertext should fail - rawCiphertext := []byte("some-raw-data-not-json") - - _, err := ParseEnvelope(rawCiphertext) - if err == nil { - t.Fatal("Expected error for invalid format, got none") - } -} - -func TestCiphertextEnvelope_ValidationErrors(t *testing.T) { - // Test validation errors - testCases := []struct { - name string - provider string - keyID string - ciphertext string - expectError bool - }{ - {"Valid", "openbao", "key1", "cipher1", false}, - {"Empty provider", "", "key1", "cipher1", true}, - {"Empty keyID", "openbao", "", "cipher1", true}, - {"Empty ciphertext", "openbao", "key1", "", true}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - envelopeBlob, err := CreateEnvelope(tc.provider, tc.keyID, tc.ciphertext, nil) - if err != nil && !tc.expectError { - t.Fatalf("Unexpected error in CreateEnvelope: %v", err) - } - if err == nil && tc.expectError { - t.Fatal("Expected error in CreateEnvelope but got none") - } - - if !tc.expectError { - // Test parsing as well - _, err = ParseEnvelope(envelopeBlob) - if err != nil { - t.Fatalf("ParseEnvelope failed: %v", err) - } - } - }) - } -} - -func TestCiphertextEnvelope_MultipleProviders(t *testing.T) { - // Test with different providers to ensure API consistency - providers := []struct { - name string - keyID string - ciphertext string - }{ - {"openbao", "transit/test-key", "vault:v1:encrypted123"}, - {"gcp", "projects/test/locations/us/keyRings/ring/cryptoKeys/key", "gcp-encrypted-data"}, - {"azure", "https://vault.vault.azure.net/keys/test/123", "azure-encrypted-bytes"}, - {"aws", "arn:aws:kms:us-east-1:123:key/abc", "aws-encrypted-blob"}, - } - - for _, provider := range providers { - t.Run(provider.name, func(t *testing.T) { - // Create envelope - envelopeBlob, err := CreateEnvelope(provider.name, provider.keyID, provider.ciphertext, nil) - if err != nil { - t.Fatalf("CreateEnvelope failed for %s: %v", provider.name, err) - } - - // Parse envelope - envelope, err := ParseEnvelope(envelopeBlob) - if err != nil { - t.Fatalf("ParseEnvelope failed for %s: %v", provider.name, err) - } - - // Verify consistency - if envelope.Provider != provider.name { - t.Errorf("Provider mismatch for %s: expected %s, got %s", - provider.name, provider.name, envelope.Provider) - } - if envelope.KeyID != provider.keyID { - t.Errorf("KeyID mismatch for %s: expected %s, got %s", - provider.name, provider.keyID, envelope.KeyID) - } - }) - } -} diff --git a/weed/kms/gcp/gcp_kms.go b/weed/kms/gcp/gcp_kms.go deleted file mode 100644 index 5380a7aeb..000000000 --- a/weed/kms/gcp/gcp_kms.go +++ /dev/null @@ -1,349 +0,0 @@ -package gcp - -import ( - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "strings" - "time" - - "google.golang.org/api/option" - - kms "cloud.google.com/go/kms/apiv1" - "cloud.google.com/go/kms/apiv1/kmspb" - - "github.com/seaweedfs/seaweedfs/weed/glog" - seaweedkms "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - // Register the Google Cloud KMS provider - seaweedkms.RegisterProvider("gcp", NewGCPKMSProvider) -} - -// GCPKMSProvider implements the KMSProvider interface using Google Cloud KMS -type GCPKMSProvider struct { - client *kms.KeyManagementClient - projectID string -} - -// GCPKMSConfig contains configuration for the Google Cloud KMS provider -type GCPKMSConfig struct { - ProjectID string `json:"project_id"` // GCP project ID - CredentialsFile string `json:"credentials_file"` // Path to service account JSON file - CredentialsJSON string `json:"credentials_json"` // Service account JSON content (base64 encoded) - UseDefaultCredentials bool `json:"use_default_credentials"` // Use default GCP credentials (metadata service, gcloud, etc.) - RequestTimeout int `json:"request_timeout"` // Request timeout in seconds (default: 30) -} - -// NewGCPKMSProvider creates a new Google Cloud KMS provider -func NewGCPKMSProvider(config util.Configuration) (seaweedkms.KMSProvider, error) { - if config == nil { - return nil, fmt.Errorf("Google Cloud KMS configuration is required") - } - - // Extract configuration - projectID := config.GetString("project_id") - if projectID == "" { - return nil, fmt.Errorf("project_id is required for Google Cloud KMS provider") - } - - credentialsFile := config.GetString("credentials_file") - credentialsJSON := config.GetString("credentials_json") - useDefaultCredentials := config.GetBool("use_default_credentials") - - requestTimeout := config.GetInt("request_timeout") - if requestTimeout == 0 { - requestTimeout = 30 // Default 30 seconds - } - - // Prepare client options - var clientOptions []option.ClientOption - - // Configure credentials - if credentialsFile != "" { - clientOptions = append(clientOptions, option.WithCredentialsFile(credentialsFile)) - glog.V(1).Infof("GCP KMS: Using credentials file %s", credentialsFile) - } else if credentialsJSON != "" { - // Decode base64 credentials if provided - credBytes, err := base64.StdEncoding.DecodeString(credentialsJSON) - if err != nil { - return nil, fmt.Errorf("failed to decode credentials JSON: %w", err) - } - clientOptions = append(clientOptions, option.WithCredentialsJSON(credBytes)) - glog.V(1).Infof("GCP KMS: Using provided credentials JSON") - } else if !useDefaultCredentials { - return nil, fmt.Errorf("either credentials_file, credentials_json, or use_default_credentials=true must be provided") - } else { - glog.V(1).Infof("GCP KMS: Using default credentials") - } - - // Set request timeout - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(requestTimeout)*time.Second) - defer cancel() - - // Create KMS client - client, err := kms.NewKeyManagementClient(ctx, clientOptions...) - if err != nil { - return nil, fmt.Errorf("failed to create Google Cloud KMS client: %w", err) - } - - provider := &GCPKMSProvider{ - client: client, - projectID: projectID, - } - - glog.V(1).Infof("Google Cloud KMS provider initialized for project %s", projectID) - return provider, nil -} - -// GenerateDataKey generates a new data encryption key using Google Cloud KMS -func (p *GCPKMSProvider) GenerateDataKey(ctx context.Context, req *seaweedkms.GenerateDataKeyRequest) (*seaweedkms.GenerateDataKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("GenerateDataKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Validate key spec - var keySize int - switch req.KeySpec { - case seaweedkms.KeySpecAES256: - keySize = 32 // 256 bits - default: - return nil, fmt.Errorf("unsupported key spec: %s", req.KeySpec) - } - - // Generate data key locally (GCP KMS doesn't have GenerateDataKey like AWS) - dataKey := make([]byte, keySize) - if _, err := rand.Read(dataKey); err != nil { - return nil, fmt.Errorf("failed to generate random data key: %w", err) - } - - // Encrypt the data key using GCP KMS - glog.V(4).Infof("GCP KMS: Encrypting data key using key %s", req.KeyID) - - // Build the encryption request - encryptReq := &kmspb.EncryptRequest{ - Name: req.KeyID, - Plaintext: dataKey, - } - - // Add additional authenticated data from encryption context - if len(req.EncryptionContext) > 0 { - // Convert encryption context to additional authenticated data - aad := p.encryptionContextToAAD(req.EncryptionContext) - encryptReq.AdditionalAuthenticatedData = []byte(aad) - } - - // Call GCP KMS to encrypt the data key - encryptResp, err := p.client.Encrypt(ctx, encryptReq) - if err != nil { - return nil, p.convertGCPError(err, req.KeyID) - } - - // Create standardized envelope format for consistent API behavior - envelopeBlob, err := seaweedkms.CreateEnvelope("gcp", encryptResp.Name, string(encryptResp.Ciphertext), nil) - if err != nil { - return nil, fmt.Errorf("failed to create ciphertext envelope: %w", err) - } - - response := &seaweedkms.GenerateDataKeyResponse{ - KeyID: encryptResp.Name, // GCP returns the full resource name - Plaintext: dataKey, - CiphertextBlob: envelopeBlob, // Store in standardized envelope format - } - - glog.V(4).Infof("GCP KMS: Generated and encrypted data key using key %s", req.KeyID) - return response, nil -} - -// Decrypt decrypts an encrypted data key using Google Cloud KMS -func (p *GCPKMSProvider) Decrypt(ctx context.Context, req *seaweedkms.DecryptRequest) (*seaweedkms.DecryptResponse, error) { - if req == nil { - return nil, fmt.Errorf("DecryptRequest cannot be nil") - } - - if len(req.CiphertextBlob) == 0 { - return nil, fmt.Errorf("CiphertextBlob cannot be empty") - } - - // Parse the ciphertext envelope to extract key information - envelope, err := seaweedkms.ParseEnvelope(req.CiphertextBlob) - if err != nil { - return nil, fmt.Errorf("failed to parse ciphertext envelope: %w", err) - } - - keyName := envelope.KeyID - if keyName == "" { - return nil, fmt.Errorf("envelope missing key ID") - } - - // Convert string back to bytes - ciphertext := []byte(envelope.Ciphertext) - - // Build the decryption request - decryptReq := &kmspb.DecryptRequest{ - Name: keyName, - Ciphertext: ciphertext, - } - - // Add additional authenticated data from encryption context - if len(req.EncryptionContext) > 0 { - aad := p.encryptionContextToAAD(req.EncryptionContext) - decryptReq.AdditionalAuthenticatedData = []byte(aad) - } - - // Call GCP KMS to decrypt the data key - glog.V(4).Infof("GCP KMS: Decrypting data key using key %s", keyName) - decryptResp, err := p.client.Decrypt(ctx, decryptReq) - if err != nil { - return nil, p.convertGCPError(err, keyName) - } - - response := &seaweedkms.DecryptResponse{ - KeyID: keyName, - Plaintext: decryptResp.Plaintext, - } - - glog.V(4).Infof("GCP KMS: Decrypted data key using key %s", keyName) - return response, nil -} - -// DescribeKey validates that a key exists and returns its metadata -func (p *GCPKMSProvider) DescribeKey(ctx context.Context, req *seaweedkms.DescribeKeyRequest) (*seaweedkms.DescribeKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("DescribeKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Build the request to get the crypto key - getKeyReq := &kmspb.GetCryptoKeyRequest{ - Name: req.KeyID, - } - - // Call GCP KMS to get key information - glog.V(4).Infof("GCP KMS: Describing key %s", req.KeyID) - key, err := p.client.GetCryptoKey(ctx, getKeyReq) - if err != nil { - return nil, p.convertGCPError(err, req.KeyID) - } - - response := &seaweedkms.DescribeKeyResponse{ - KeyID: key.Name, - ARN: key.Name, // GCP uses resource names instead of ARNs - Description: "Google Cloud KMS key", - } - - // Map GCP key purpose to our usage enum - if key.Purpose == kmspb.CryptoKey_ENCRYPT_DECRYPT { - response.KeyUsage = seaweedkms.KeyUsageEncryptDecrypt - } - - // Map GCP key state to our state enum - // Get the primary version to check its state - if key.Primary != nil && key.Primary.State == kmspb.CryptoKeyVersion_ENABLED { - response.KeyState = seaweedkms.KeyStateEnabled - } else { - response.KeyState = seaweedkms.KeyStateDisabled - } - - // GCP KMS keys are managed by Google Cloud - response.Origin = seaweedkms.KeyOriginGCP - - glog.V(4).Infof("GCP KMS: Described key %s (state: %s)", req.KeyID, response.KeyState) - return response, nil -} - -// GetKeyID resolves a key name to the full resource name -func (p *GCPKMSProvider) GetKeyID(ctx context.Context, keyIdentifier string) (string, error) { - if keyIdentifier == "" { - return "", fmt.Errorf("key identifier cannot be empty") - } - - // If it's already a full resource name, return as-is - if strings.HasPrefix(keyIdentifier, "projects/") { - return keyIdentifier, nil - } - - // Otherwise, try to construct the full resource name or validate via DescribeKey - descReq := &seaweedkms.DescribeKeyRequest{KeyID: keyIdentifier} - descResp, err := p.DescribeKey(ctx, descReq) - if err != nil { - return "", fmt.Errorf("failed to resolve key identifier %s: %w", keyIdentifier, err) - } - - return descResp.KeyID, nil -} - -// Close cleans up any resources used by the provider -func (p *GCPKMSProvider) Close() error { - if p.client != nil { - err := p.client.Close() - if err != nil { - glog.Errorf("Error closing GCP KMS client: %v", err) - return err - } - } - glog.V(2).Infof("Google Cloud KMS provider closed") - return nil -} - -// encryptionContextToAAD converts encryption context map to additional authenticated data -// This is a simplified implementation - in production, you might want a more robust serialization -func (p *GCPKMSProvider) encryptionContextToAAD(context map[string]string) string { - if len(context) == 0 { - return "" - } - - // Simple key=value&key=value format - var parts []string - for k, v := range context { - parts = append(parts, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(parts, "&") -} - -// convertGCPError converts Google Cloud KMS errors to our standard KMS errors -func (p *GCPKMSProvider) convertGCPError(err error, keyID string) error { - // Google Cloud SDK uses gRPC status codes - errMsg := err.Error() - - if strings.Contains(errMsg, "not found") || strings.Contains(errMsg, "NotFound") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeNotFoundException, - Message: fmt.Sprintf("Key not found in Google Cloud KMS: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "permission") || strings.Contains(errMsg, "access") || strings.Contains(errMsg, "Forbidden") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeAccessDenied, - Message: fmt.Sprintf("Access denied to Google Cloud KMS: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "disabled") || strings.Contains(errMsg, "unavailable") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKeyUnavailable, - Message: fmt.Sprintf("Key unavailable in Google Cloud KMS: %v", err), - KeyID: keyID, - } - } - - // For unknown errors, wrap as internal failure - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("Google Cloud KMS error: %v", err), - KeyID: keyID, - } -} diff --git a/weed/kms/kms.go b/weed/kms/kms.go deleted file mode 100644 index 334e724d1..000000000 --- a/weed/kms/kms.go +++ /dev/null @@ -1,159 +0,0 @@ -package kms - -import ( - "context" - "fmt" -) - -// KMSProvider defines the interface for Key Management Service implementations -type KMSProvider interface { - // GenerateDataKey creates a new data encryption key encrypted under the specified KMS key - GenerateDataKey(ctx context.Context, req *GenerateDataKeyRequest) (*GenerateDataKeyResponse, error) - - // Decrypt decrypts an encrypted data key using the KMS - Decrypt(ctx context.Context, req *DecryptRequest) (*DecryptResponse, error) - - // DescribeKey validates that a key exists and returns its metadata - DescribeKey(ctx context.Context, req *DescribeKeyRequest) (*DescribeKeyResponse, error) - - // GetKeyID resolves a key alias or ARN to the actual key ID - GetKeyID(ctx context.Context, keyIdentifier string) (string, error) - - // Close cleans up any resources used by the provider - Close() error -} - -// GenerateDataKeyRequest contains parameters for generating a data key -type GenerateDataKeyRequest struct { - KeyID string // KMS key identifier (ID, ARN, or alias) - KeySpec KeySpec // Specification for the data key - EncryptionContext map[string]string // Additional authenticated data -} - -// GenerateDataKeyResponse contains the generated data key -type GenerateDataKeyResponse struct { - KeyID string // The actual KMS key ID used - Plaintext []byte // The plaintext data key (sensitive - clear from memory ASAP) - CiphertextBlob []byte // The encrypted data key for storage -} - -// DecryptRequest contains parameters for decrypting a data key -type DecryptRequest struct { - CiphertextBlob []byte // The encrypted data key - EncryptionContext map[string]string // Must match the context used during encryption -} - -// DecryptResponse contains the decrypted data key -type DecryptResponse struct { - KeyID string // The KMS key ID that was used for encryption - Plaintext []byte // The decrypted data key (sensitive - clear from memory ASAP) -} - -// DescribeKeyRequest contains parameters for describing a key -type DescribeKeyRequest struct { - KeyID string // KMS key identifier (ID, ARN, or alias) -} - -// DescribeKeyResponse contains key metadata -type DescribeKeyResponse struct { - KeyID string // The actual key ID - ARN string // The key ARN - Description string // Key description - KeyUsage KeyUsage // How the key can be used - KeyState KeyState // Current state of the key - Origin KeyOrigin // Where the key material originated -} - -// KeySpec specifies the type of data key to generate -type KeySpec string - -const ( - KeySpecAES256 KeySpec = "AES_256" // 256-bit AES key -) - -// KeyUsage specifies how a key can be used -type KeyUsage string - -const ( - KeyUsageEncryptDecrypt KeyUsage = "ENCRYPT_DECRYPT" - KeyUsageGenerateDataKey KeyUsage = "GENERATE_DATA_KEY" -) - -// KeyState represents the current state of a KMS key -type KeyState string - -const ( - KeyStateEnabled KeyState = "Enabled" - KeyStateDisabled KeyState = "Disabled" - KeyStatePendingDeletion KeyState = "PendingDeletion" - KeyStateUnavailable KeyState = "Unavailable" -) - -// KeyOrigin indicates where the key material came from -type KeyOrigin string - -const ( - KeyOriginAWS KeyOrigin = "AWS_KMS" - KeyOriginExternal KeyOrigin = "EXTERNAL" - KeyOriginCloudHSM KeyOrigin = "AWS_CLOUDHSM" - KeyOriginAzure KeyOrigin = "AZURE_KEY_VAULT" - KeyOriginGCP KeyOrigin = "GCP_KMS" - KeyOriginOpenBao KeyOrigin = "OPENBAO" - KeyOriginLocal KeyOrigin = "LOCAL" -) - -// KMSError represents an error from the KMS service -type KMSError struct { - Code string // Error code (e.g., "KeyUnavailableException") - Message string // Human-readable error message - KeyID string // Key ID that caused the error (if applicable) -} - -func (e *KMSError) Error() string { - if e.KeyID != "" { - return fmt.Sprintf("KMS error %s for key %s: %s", e.Code, e.KeyID, e.Message) - } - return fmt.Sprintf("KMS error %s: %s", e.Code, e.Message) -} - -// Common KMS error codes -const ( - ErrCodeKeyUnavailable = "KeyUnavailableException" - ErrCodeAccessDenied = "AccessDeniedException" - ErrCodeNotFoundException = "NotFoundException" - ErrCodeInvalidKeyUsage = "InvalidKeyUsageException" - ErrCodeKMSInternalFailure = "KMSInternalException" - ErrCodeInvalidCiphertext = "InvalidCiphertextException" -) - -// EncryptionContextKey constants for building encryption context -const ( - EncryptionContextS3ARN = "aws:s3:arn" - EncryptionContextS3Bucket = "aws:s3:bucket" - EncryptionContextS3Object = "aws:s3:object" -) - -// BuildS3EncryptionContext creates the standard encryption context for S3 objects -// Following AWS S3 conventions from the documentation -func BuildS3EncryptionContext(bucketName, objectKey string, useBucketKey bool) map[string]string { - context := make(map[string]string) - - if useBucketKey { - // When using S3 Bucket Keys, use bucket ARN as encryption context - context[EncryptionContextS3ARN] = fmt.Sprintf("arn:aws:s3:::%s", bucketName) - } else { - // For individual object encryption, use object ARN as encryption context - context[EncryptionContextS3ARN] = fmt.Sprintf("arn:aws:s3:::%s/%s", bucketName, objectKey) - } - - return context -} - -// ClearSensitiveData securely clears sensitive byte slices -func ClearSensitiveData(data []byte) { - if data != nil { - for i := range data { - data[i] = 0 - } - } -} diff --git a/weed/kms/local/local_kms.go b/weed/kms/local/local_kms.go deleted file mode 100644 index c33ae4b05..000000000 --- a/weed/kms/local/local_kms.go +++ /dev/null @@ -1,568 +0,0 @@ -package local - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "sort" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// LocalKMSProvider implements a local, in-memory KMS for development and testing -// WARNING: This is NOT suitable for production use - keys are stored in memory -type LocalKMSProvider struct { - mu sync.RWMutex - keys map[string]*LocalKey - defaultKeyID string - enableOnDemandCreate bool // Whether to create keys on-demand for missing key IDs -} - -// LocalKey represents a key stored in the local KMS -type LocalKey struct { - KeyID string `json:"keyId"` - ARN string `json:"arn"` - Description string `json:"description"` - KeyMaterial []byte `json:"keyMaterial"` // 256-bit master key - KeyUsage kms.KeyUsage `json:"keyUsage"` - KeyState kms.KeyState `json:"keyState"` - Origin kms.KeyOrigin `json:"origin"` - CreatedAt time.Time `json:"createdAt"` - Aliases []string `json:"aliases"` - Metadata map[string]string `json:"metadata"` -} - -// LocalKMSConfig contains configuration for the local KMS provider -type LocalKMSConfig struct { - DefaultKeyID string `json:"defaultKeyId"` - Keys map[string]*LocalKey `json:"keys"` - EnableOnDemandCreate bool `json:"enableOnDemandCreate"` -} - -func init() { - // Register the local KMS provider - kms.RegisterProvider("local", NewLocalKMSProvider) -} - -// NewLocalKMSProvider creates a new local KMS provider -func NewLocalKMSProvider(config util.Configuration) (kms.KMSProvider, error) { - provider := &LocalKMSProvider{ - keys: make(map[string]*LocalKey), - enableOnDemandCreate: true, // Default to true for development/testing convenience - } - - // Load configuration if provided - if config != nil { - if err := provider.loadConfig(config); err != nil { - return nil, fmt.Errorf("failed to load local KMS config: %v", err) - } - } - - // Create a default key if none exists - if len(provider.keys) == 0 { - defaultKey, err := provider.createDefaultKey() - if err != nil { - return nil, fmt.Errorf("failed to create default key: %v", err) - } - provider.defaultKeyID = defaultKey.KeyID - glog.V(1).Infof("Local KMS: Created default key %s", defaultKey.KeyID) - } - - return provider, nil -} - -// loadConfig loads configuration from the provided config -func (p *LocalKMSProvider) loadConfig(config util.Configuration) error { - if config == nil { - return nil - } - - p.enableOnDemandCreate = config.GetBool("enableOnDemandCreate") - - // TODO: Load pre-existing keys from configuration if provided - // For now, rely on default key creation in constructor - - glog.V(2).Infof("Local KMS: enableOnDemandCreate = %v", p.enableOnDemandCreate) - return nil -} - -// createDefaultKey creates a default master key for the local KMS -func (p *LocalKMSProvider) createDefaultKey() (*LocalKey, error) { - keyID, err := generateKeyID() - if err != nil { - return nil, fmt.Errorf("failed to generate key ID: %w", err) - } - keyMaterial := make([]byte, 32) // 256-bit key - if _, err := io.ReadFull(rand.Reader, keyMaterial); err != nil { - return nil, fmt.Errorf("failed to generate key material: %w", err) - } - - key := &LocalKey{ - KeyID: keyID, - ARN: fmt.Sprintf("arn:aws:kms:local:000000000000:key/%s", keyID), - Description: "Default local KMS key for SeaweedFS", - KeyMaterial: keyMaterial, - KeyUsage: kms.KeyUsageEncryptDecrypt, - KeyState: kms.KeyStateEnabled, - Origin: kms.KeyOriginLocal, - CreatedAt: time.Now(), - Aliases: []string{"alias/seaweedfs-default"}, - Metadata: make(map[string]string), - } - - p.mu.Lock() - defer p.mu.Unlock() - p.keys[keyID] = key - - // Also register aliases - for _, alias := range key.Aliases { - p.keys[alias] = key - } - - return key, nil -} - -// GenerateDataKey implements the KMSProvider interface -func (p *LocalKMSProvider) GenerateDataKey(ctx context.Context, req *kms.GenerateDataKeyRequest) (*kms.GenerateDataKeyResponse, error) { - if req.KeySpec != kms.KeySpecAES256 { - return nil, &kms.KMSError{ - Code: kms.ErrCodeInvalidKeyUsage, - Message: fmt.Sprintf("Unsupported key spec: %s", req.KeySpec), - KeyID: req.KeyID, - } - } - - // Resolve the key - key, err := p.getKey(req.KeyID) - if err != nil { - return nil, err - } - - if key.KeyState != kms.KeyStateEnabled { - return nil, &kms.KMSError{ - Code: kms.ErrCodeKeyUnavailable, - Message: fmt.Sprintf("Key %s is in state %s", key.KeyID, key.KeyState), - KeyID: key.KeyID, - } - } - - // Generate a random 256-bit data key - dataKey := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, dataKey); err != nil { - return nil, &kms.KMSError{ - Code: kms.ErrCodeKMSInternalFailure, - Message: "Failed to generate data key", - KeyID: key.KeyID, - } - } - - // Encrypt the data key with the master key - encryptedDataKey, err := p.encryptDataKey(dataKey, key, req.EncryptionContext) - if err != nil { - kms.ClearSensitiveData(dataKey) - return nil, &kms.KMSError{ - Code: kms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("Failed to encrypt data key: %v", err), - KeyID: key.KeyID, - } - } - - return &kms.GenerateDataKeyResponse{ - KeyID: key.KeyID, - Plaintext: dataKey, - CiphertextBlob: encryptedDataKey, - }, nil -} - -// Decrypt implements the KMSProvider interface -func (p *LocalKMSProvider) Decrypt(ctx context.Context, req *kms.DecryptRequest) (*kms.DecryptResponse, error) { - // Parse the encrypted data key to extract metadata - metadata, err := p.parseEncryptedDataKey(req.CiphertextBlob) - if err != nil { - return nil, &kms.KMSError{ - Code: kms.ErrCodeInvalidCiphertext, - Message: fmt.Sprintf("Invalid ciphertext format: %v", err), - } - } - - // Verify encryption context matches - if !p.encryptionContextMatches(metadata.EncryptionContext, req.EncryptionContext) { - return nil, &kms.KMSError{ - Code: kms.ErrCodeInvalidCiphertext, - Message: "Encryption context mismatch", - KeyID: metadata.KeyID, - } - } - - // Get the master key - key, err := p.getKey(metadata.KeyID) - if err != nil { - return nil, err - } - - if key.KeyState != kms.KeyStateEnabled { - return nil, &kms.KMSError{ - Code: kms.ErrCodeKeyUnavailable, - Message: fmt.Sprintf("Key %s is in state %s", key.KeyID, key.KeyState), - KeyID: key.KeyID, - } - } - - // Decrypt the data key - dataKey, err := p.decryptDataKey(metadata, key) - if err != nil { - return nil, &kms.KMSError{ - Code: kms.ErrCodeInvalidCiphertext, - Message: fmt.Sprintf("Failed to decrypt data key: %v", err), - KeyID: key.KeyID, - } - } - - return &kms.DecryptResponse{ - KeyID: key.KeyID, - Plaintext: dataKey, - }, nil -} - -// DescribeKey implements the KMSProvider interface -func (p *LocalKMSProvider) DescribeKey(ctx context.Context, req *kms.DescribeKeyRequest) (*kms.DescribeKeyResponse, error) { - key, err := p.getKey(req.KeyID) - if err != nil { - return nil, err - } - - return &kms.DescribeKeyResponse{ - KeyID: key.KeyID, - ARN: key.ARN, - Description: key.Description, - KeyUsage: key.KeyUsage, - KeyState: key.KeyState, - Origin: key.Origin, - }, nil -} - -// GetKeyID implements the KMSProvider interface -func (p *LocalKMSProvider) GetKeyID(ctx context.Context, keyIdentifier string) (string, error) { - key, err := p.getKey(keyIdentifier) - if err != nil { - return "", err - } - return key.KeyID, nil -} - -// Close implements the KMSProvider interface -func (p *LocalKMSProvider) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - - // Clear all key material from memory - for _, key := range p.keys { - kms.ClearSensitiveData(key.KeyMaterial) - } - p.keys = make(map[string]*LocalKey) - return nil -} - -// getKey retrieves a key by ID or alias, creating it on-demand if it doesn't exist -func (p *LocalKMSProvider) getKey(keyIdentifier string) (*LocalKey, error) { - p.mu.RLock() - - // Try direct lookup first - if key, exists := p.keys[keyIdentifier]; exists { - p.mu.RUnlock() - return key, nil - } - - // Try with default key if no identifier provided - if keyIdentifier == "" && p.defaultKeyID != "" { - if key, exists := p.keys[p.defaultKeyID]; exists { - p.mu.RUnlock() - return key, nil - } - } - - p.mu.RUnlock() - - // Key doesn't exist - create on-demand if enabled and key identifier is reasonable - if keyIdentifier != "" && p.enableOnDemandCreate && p.isReasonableKeyIdentifier(keyIdentifier) { - glog.V(1).Infof("Creating on-demand local KMS key: %s", keyIdentifier) - key, err := p.CreateKeyWithID(keyIdentifier, fmt.Sprintf("Auto-created local KMS key: %s", keyIdentifier)) - if err != nil { - return nil, &kms.KMSError{ - Code: kms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("Failed to create on-demand key %s: %v", keyIdentifier, err), - KeyID: keyIdentifier, - } - } - return key, nil - } - - return nil, &kms.KMSError{ - Code: kms.ErrCodeNotFoundException, - Message: fmt.Sprintf("Key not found: %s", keyIdentifier), - KeyID: keyIdentifier, - } -} - -// isReasonableKeyIdentifier determines if a key identifier is reasonable for on-demand creation -func (p *LocalKMSProvider) isReasonableKeyIdentifier(keyIdentifier string) bool { - // Basic validation: reasonable length and character set - if len(keyIdentifier) < 3 || len(keyIdentifier) > 100 { - return false - } - - // Allow alphanumeric characters, hyphens, underscores, and forward slashes - // This covers most reasonable key identifier formats without being overly restrictive - for _, r := range keyIdentifier { - if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || - (r >= '0' && r <= '9') || r == '-' || r == '_' || r == '/') { - return false - } - } - - // Reject keys that start or end with separators - if keyIdentifier[0] == '-' || keyIdentifier[0] == '_' || keyIdentifier[0] == '/' || - keyIdentifier[len(keyIdentifier)-1] == '-' || keyIdentifier[len(keyIdentifier)-1] == '_' || keyIdentifier[len(keyIdentifier)-1] == '/' { - return false - } - - return true -} - -// encryptedDataKeyMetadata represents the metadata stored with encrypted data keys -type encryptedDataKeyMetadata struct { - KeyID string `json:"keyId"` - EncryptionContext map[string]string `json:"encryptionContext"` - EncryptedData []byte `json:"encryptedData"` - Nonce []byte `json:"nonce"` // Renamed from IV to be more explicit about AES-GCM usage -} - -// encryptDataKey encrypts a data key using the master key with AES-GCM for authenticated encryption -func (p *LocalKMSProvider) encryptDataKey(dataKey []byte, masterKey *LocalKey, encryptionContext map[string]string) ([]byte, error) { - block, err := aes.NewCipher(masterKey.KeyMaterial) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - // Generate a random nonce - nonce := make([]byte, gcm.NonceSize()) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, err - } - - // Prepare additional authenticated data (AAD) from the encryption context - // Use deterministic marshaling to ensure consistent AAD - var aad []byte - if len(encryptionContext) > 0 { - var err error - aad, err = marshalEncryptionContextDeterministic(encryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context for AAD: %w", err) - } - } - - // Encrypt using AES-GCM - encryptedData := gcm.Seal(nil, nonce, dataKey, aad) - - // Create metadata structure - metadata := &encryptedDataKeyMetadata{ - KeyID: masterKey.KeyID, - EncryptionContext: encryptionContext, - EncryptedData: encryptedData, - Nonce: nonce, - } - - // Serialize metadata to JSON - return json.Marshal(metadata) -} - -// decryptDataKey decrypts a data key using the master key with AES-GCM for authenticated decryption -func (p *LocalKMSProvider) decryptDataKey(metadata *encryptedDataKeyMetadata, masterKey *LocalKey) ([]byte, error) { - block, err := aes.NewCipher(masterKey.KeyMaterial) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - // Prepare additional authenticated data (AAD) - var aad []byte - if len(metadata.EncryptionContext) > 0 { - var err error - aad, err = marshalEncryptionContextDeterministic(metadata.EncryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context for AAD: %w", err) - } - } - - // Decrypt using AES-GCM - nonce := metadata.Nonce - if len(nonce) != gcm.NonceSize() { - return nil, fmt.Errorf("invalid nonce size: expected %d, got %d", gcm.NonceSize(), len(nonce)) - } - - dataKey, err := gcm.Open(nil, nonce, metadata.EncryptedData, aad) - if err != nil { - return nil, fmt.Errorf("failed to decrypt with GCM: %w", err) - } - - return dataKey, nil -} - -// parseEncryptedDataKey parses the encrypted data key blob -func (p *LocalKMSProvider) parseEncryptedDataKey(ciphertextBlob []byte) (*encryptedDataKeyMetadata, error) { - var metadata encryptedDataKeyMetadata - if err := json.Unmarshal(ciphertextBlob, &metadata); err != nil { - return nil, fmt.Errorf("failed to parse ciphertext blob: %v", err) - } - return &metadata, nil -} - -// encryptionContextMatches checks if two encryption contexts match -func (p *LocalKMSProvider) encryptionContextMatches(ctx1, ctx2 map[string]string) bool { - if len(ctx1) != len(ctx2) { - return false - } - for k, v := range ctx1 { - if ctx2[k] != v { - return false - } - } - return true -} - -// generateKeyID generates a random key ID -func generateKeyID() (string, error) { - // Generate a UUID-like key ID - b := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, b); err != nil { - return "", fmt.Errorf("failed to generate random bytes for key ID: %w", err) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", - b[0:4], b[4:6], b[6:8], b[8:10], b[10:16]), nil -} - -// CreateKey creates a new key in the local KMS (for testing) -func (p *LocalKMSProvider) CreateKey(description string, aliases []string) (*LocalKey, error) { - keyID, err := generateKeyID() - if err != nil { - return nil, fmt.Errorf("failed to generate key ID: %w", err) - } - keyMaterial := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, keyMaterial); err != nil { - return nil, err - } - - key := &LocalKey{ - KeyID: keyID, - ARN: fmt.Sprintf("arn:aws:kms:local:000000000000:key/%s", keyID), - Description: description, - KeyMaterial: keyMaterial, - KeyUsage: kms.KeyUsageEncryptDecrypt, - KeyState: kms.KeyStateEnabled, - Origin: kms.KeyOriginLocal, - CreatedAt: time.Now(), - Aliases: aliases, - Metadata: make(map[string]string), - } - - p.mu.Lock() - defer p.mu.Unlock() - - p.keys[keyID] = key - for _, alias := range aliases { - // Ensure alias has proper format - if !strings.HasPrefix(alias, "alias/") { - alias = "alias/" + alias - } - p.keys[alias] = key - } - - return key, nil -} - -// CreateKeyWithID creates a key with a specific keyID (for testing only) -func (p *LocalKMSProvider) CreateKeyWithID(keyID, description string) (*LocalKey, error) { - keyMaterial := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, keyMaterial); err != nil { - return nil, fmt.Errorf("failed to generate key material: %w", err) - } - - key := &LocalKey{ - KeyID: keyID, - ARN: fmt.Sprintf("arn:aws:kms:local:000000000000:key/%s", keyID), - Description: description, - KeyMaterial: keyMaterial, - KeyUsage: kms.KeyUsageEncryptDecrypt, - KeyState: kms.KeyStateEnabled, - Origin: kms.KeyOriginLocal, - CreatedAt: time.Now(), - Aliases: []string{}, // No aliases by default - Metadata: make(map[string]string), - } - - p.mu.Lock() - defer p.mu.Unlock() - - // Register key with the exact keyID provided - p.keys[keyID] = key - - return key, nil -} - -// marshalEncryptionContextDeterministic creates a deterministic byte representation of encryption context -// This ensures that the same encryption context always produces the same AAD for AES-GCM -func marshalEncryptionContextDeterministic(encryptionContext map[string]string) ([]byte, error) { - if len(encryptionContext) == 0 { - return nil, nil - } - - // Sort keys to ensure deterministic output - keys := make([]string, 0, len(encryptionContext)) - for k := range encryptionContext { - keys = append(keys, k) - } - sort.Strings(keys) - - // Build deterministic representation with proper JSON escaping - var buf strings.Builder - buf.WriteString("{") - for i, k := range keys { - if i > 0 { - buf.WriteString(",") - } - // Marshal key and value to get proper JSON string escaping - keyBytes, err := json.Marshal(k) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context key '%s': %w", k, err) - } - valueBytes, err := json.Marshal(encryptionContext[k]) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context value for key '%s': %w", k, err) - } - buf.Write(keyBytes) - buf.WriteString(":") - buf.Write(valueBytes) - } - buf.WriteString("}") - - return []byte(buf.String()), nil -} diff --git a/weed/kms/openbao/openbao_kms.go b/weed/kms/openbao/openbao_kms.go deleted file mode 100644 index 259a689b3..000000000 --- a/weed/kms/openbao/openbao_kms.go +++ /dev/null @@ -1,403 +0,0 @@ -package openbao - -import ( - "context" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "strings" - "time" - - vault "github.com/hashicorp/vault/api" - - "github.com/seaweedfs/seaweedfs/weed/glog" - seaweedkms "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func init() { - // Register the OpenBao/Vault KMS provider - seaweedkms.RegisterProvider("openbao", NewOpenBaoKMSProvider) - seaweedkms.RegisterProvider("vault", NewOpenBaoKMSProvider) // Alias for compatibility -} - -// OpenBaoKMSProvider implements the KMSProvider interface using OpenBao/Vault Transit engine -type OpenBaoKMSProvider struct { - client *vault.Client - transitPath string // Transit engine mount path (default: "transit") - address string -} - -// OpenBaoKMSConfig contains configuration for the OpenBao/Vault KMS provider -type OpenBaoKMSConfig struct { - Address string `json:"address"` // Vault address (e.g., "http://localhost:8200") - Token string `json:"token"` // Vault token for authentication - RoleID string `json:"role_id"` // AppRole role ID (alternative to token) - SecretID string `json:"secret_id"` // AppRole secret ID (alternative to token) - TransitPath string `json:"transit_path"` // Transit engine mount path (default: "transit") - TLSSkipVerify bool `json:"tls_skip_verify"` // Skip TLS verification (for testing) - CACert string `json:"ca_cert"` // Path to CA certificate - ClientCert string `json:"client_cert"` // Path to client certificate - ClientKey string `json:"client_key"` // Path to client private key - RequestTimeout int `json:"request_timeout"` // Request timeout in seconds (default: 30) -} - -// NewOpenBaoKMSProvider creates a new OpenBao/Vault KMS provider -func NewOpenBaoKMSProvider(config util.Configuration) (seaweedkms.KMSProvider, error) { - if config == nil { - return nil, fmt.Errorf("OpenBao/Vault KMS configuration is required") - } - - // Extract configuration - address := config.GetString("address") - if address == "" { - address = "http://localhost:8200" // Default OpenBao address - } - - token := config.GetString("token") - roleID := config.GetString("role_id") - secretID := config.GetString("secret_id") - transitPath := config.GetString("transit_path") - if transitPath == "" { - transitPath = "transit" // Default transit path - } - - tlsSkipVerify := config.GetBool("tls_skip_verify") - caCert := config.GetString("ca_cert") - clientCert := config.GetString("client_cert") - clientKey := config.GetString("client_key") - - requestTimeout := config.GetInt("request_timeout") - if requestTimeout == 0 { - requestTimeout = 30 // Default 30 seconds - } - - // Create Vault client configuration - vaultConfig := vault.DefaultConfig() - vaultConfig.Address = address - vaultConfig.Timeout = time.Duration(requestTimeout) * time.Second - - // Configure TLS - if tlsSkipVerify || caCert != "" || (clientCert != "" && clientKey != "") { - tlsConfig := &vault.TLSConfig{ - Insecure: tlsSkipVerify, - } - if caCert != "" { - tlsConfig.CACert = caCert - } - if clientCert != "" && clientKey != "" { - tlsConfig.ClientCert = clientCert - tlsConfig.ClientKey = clientKey - } - - if err := vaultConfig.ConfigureTLS(tlsConfig); err != nil { - return nil, fmt.Errorf("failed to configure TLS: %w", err) - } - } - - // Create Vault client - client, err := vault.NewClient(vaultConfig) - if err != nil { - return nil, fmt.Errorf("failed to create OpenBao/Vault client: %w", err) - } - - // Authenticate - if token != "" { - client.SetToken(token) - glog.V(1).Infof("OpenBao KMS: Using token authentication") - } else if roleID != "" && secretID != "" { - if err := authenticateAppRole(client, roleID, secretID); err != nil { - return nil, fmt.Errorf("failed to authenticate with AppRole: %w", err) - } - glog.V(1).Infof("OpenBao KMS: Using AppRole authentication") - } else { - return nil, fmt.Errorf("either token or role_id+secret_id must be provided") - } - - provider := &OpenBaoKMSProvider{ - client: client, - transitPath: transitPath, - address: address, - } - - glog.V(1).Infof("OpenBao/Vault KMS provider initialized at %s", address) - return provider, nil -} - -// authenticateAppRole authenticates using AppRole method -func authenticateAppRole(client *vault.Client, roleID, secretID string) error { - data := map[string]interface{}{ - "role_id": roleID, - "secret_id": secretID, - } - - secret, err := client.Logical().Write("auth/approle/login", data) - if err != nil { - return fmt.Errorf("AppRole authentication failed: %w", err) - } - - if secret == nil || secret.Auth == nil { - return fmt.Errorf("AppRole authentication returned empty token") - } - - client.SetToken(secret.Auth.ClientToken) - return nil -} - -// GenerateDataKey generates a new data encryption key using OpenBao/Vault Transit -func (p *OpenBaoKMSProvider) GenerateDataKey(ctx context.Context, req *seaweedkms.GenerateDataKeyRequest) (*seaweedkms.GenerateDataKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("GenerateDataKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Validate key spec - var keySize int - switch req.KeySpec { - case seaweedkms.KeySpecAES256: - keySize = 32 // 256 bits - default: - return nil, fmt.Errorf("unsupported key spec: %s", req.KeySpec) - } - - // Generate data key locally (similar to Azure/GCP approach) - dataKey := make([]byte, keySize) - if _, err := rand.Read(dataKey); err != nil { - return nil, fmt.Errorf("failed to generate random data key: %w", err) - } - - // Encrypt the data key using OpenBao/Vault Transit - glog.V(4).Infof("OpenBao KMS: Encrypting data key using key %s", req.KeyID) - - // Prepare encryption data - encryptData := map[string]interface{}{ - "plaintext": base64.StdEncoding.EncodeToString(dataKey), - } - - // Add encryption context if provided - if len(req.EncryptionContext) > 0 { - contextJSON, err := json.Marshal(req.EncryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context: %w", err) - } - encryptData["context"] = base64.StdEncoding.EncodeToString(contextJSON) - } - - // Call OpenBao/Vault Transit encrypt endpoint - path := fmt.Sprintf("%s/encrypt/%s", p.transitPath, req.KeyID) - secret, err := p.client.Logical().WriteWithContext(ctx, path, encryptData) - if err != nil { - return nil, p.convertVaultError(err, req.KeyID) - } - - if secret == nil || secret.Data == nil { - return nil, fmt.Errorf("no data returned from OpenBao/Vault encrypt operation") - } - - ciphertext, ok := secret.Data["ciphertext"].(string) - if !ok { - return nil, fmt.Errorf("invalid ciphertext format from OpenBao/Vault") - } - - // Create standardized envelope format for consistent API behavior - envelopeBlob, err := seaweedkms.CreateEnvelope("openbao", req.KeyID, ciphertext, nil) - if err != nil { - return nil, fmt.Errorf("failed to create ciphertext envelope: %w", err) - } - - response := &seaweedkms.GenerateDataKeyResponse{ - KeyID: req.KeyID, - Plaintext: dataKey, - CiphertextBlob: envelopeBlob, // Store in standardized envelope format - } - - glog.V(4).Infof("OpenBao KMS: Generated and encrypted data key using key %s", req.KeyID) - return response, nil -} - -// Decrypt decrypts an encrypted data key using OpenBao/Vault Transit -func (p *OpenBaoKMSProvider) Decrypt(ctx context.Context, req *seaweedkms.DecryptRequest) (*seaweedkms.DecryptResponse, error) { - if req == nil { - return nil, fmt.Errorf("DecryptRequest cannot be nil") - } - - if len(req.CiphertextBlob) == 0 { - return nil, fmt.Errorf("CiphertextBlob cannot be empty") - } - - // Parse the ciphertext envelope to extract key information - envelope, err := seaweedkms.ParseEnvelope(req.CiphertextBlob) - if err != nil { - return nil, fmt.Errorf("failed to parse ciphertext envelope: %w", err) - } - - keyID := envelope.KeyID - if keyID == "" { - return nil, fmt.Errorf("envelope missing key ID") - } - - // Use the ciphertext from envelope - ciphertext := envelope.Ciphertext - - // Prepare decryption data - decryptData := map[string]interface{}{ - "ciphertext": ciphertext, - } - - // Add encryption context if provided - if len(req.EncryptionContext) > 0 { - contextJSON, err := json.Marshal(req.EncryptionContext) - if err != nil { - return nil, fmt.Errorf("failed to marshal encryption context: %w", err) - } - decryptData["context"] = base64.StdEncoding.EncodeToString(contextJSON) - } - - // Call OpenBao/Vault Transit decrypt endpoint - path := fmt.Sprintf("%s/decrypt/%s", p.transitPath, keyID) - glog.V(4).Infof("OpenBao KMS: Decrypting data key using key %s", keyID) - secret, err := p.client.Logical().WriteWithContext(ctx, path, decryptData) - if err != nil { - return nil, p.convertVaultError(err, keyID) - } - - if secret == nil || secret.Data == nil { - return nil, fmt.Errorf("no data returned from OpenBao/Vault decrypt operation") - } - - plaintextB64, ok := secret.Data["plaintext"].(string) - if !ok { - return nil, fmt.Errorf("invalid plaintext format from OpenBao/Vault") - } - - plaintext, err := base64.StdEncoding.DecodeString(plaintextB64) - if err != nil { - return nil, fmt.Errorf("failed to decode plaintext from OpenBao/Vault: %w", err) - } - - response := &seaweedkms.DecryptResponse{ - KeyID: keyID, - Plaintext: plaintext, - } - - glog.V(4).Infof("OpenBao KMS: Decrypted data key using key %s", keyID) - return response, nil -} - -// DescribeKey validates that a key exists and returns its metadata -func (p *OpenBaoKMSProvider) DescribeKey(ctx context.Context, req *seaweedkms.DescribeKeyRequest) (*seaweedkms.DescribeKeyResponse, error) { - if req == nil { - return nil, fmt.Errorf("DescribeKeyRequest cannot be nil") - } - - if req.KeyID == "" { - return nil, fmt.Errorf("KeyID is required") - } - - // Get key information from OpenBao/Vault - path := fmt.Sprintf("%s/keys/%s", p.transitPath, req.KeyID) - glog.V(4).Infof("OpenBao KMS: Describing key %s", req.KeyID) - secret, err := p.client.Logical().ReadWithContext(ctx, path) - if err != nil { - return nil, p.convertVaultError(err, req.KeyID) - } - - if secret == nil || secret.Data == nil { - return nil, &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeNotFoundException, - Message: fmt.Sprintf("Key not found: %s", req.KeyID), - KeyID: req.KeyID, - } - } - - response := &seaweedkms.DescribeKeyResponse{ - KeyID: req.KeyID, - ARN: fmt.Sprintf("openbao:%s:key:%s", p.address, req.KeyID), - Description: "OpenBao/Vault Transit engine key", - } - - // Check key type and set usage - if keyType, ok := secret.Data["type"].(string); ok { - if keyType == "aes256-gcm96" || keyType == "aes128-gcm96" || keyType == "chacha20-poly1305" { - response.KeyUsage = seaweedkms.KeyUsageEncryptDecrypt - } else { - // Default to data key generation if not an encrypt/decrypt type - response.KeyUsage = seaweedkms.KeyUsageGenerateDataKey - } - } else { - // If type is missing, default to data key generation - response.KeyUsage = seaweedkms.KeyUsageGenerateDataKey - } - - // OpenBao/Vault keys are enabled by default (no disabled state in transit) - response.KeyState = seaweedkms.KeyStateEnabled - - // Keys in OpenBao/Vault transit are service-managed - response.Origin = seaweedkms.KeyOriginOpenBao - - glog.V(4).Infof("OpenBao KMS: Described key %s (state: %s)", req.KeyID, response.KeyState) - return response, nil -} - -// GetKeyID resolves a key name (already the full key ID in OpenBao/Vault) -func (p *OpenBaoKMSProvider) GetKeyID(ctx context.Context, keyIdentifier string) (string, error) { - if keyIdentifier == "" { - return "", fmt.Errorf("key identifier cannot be empty") - } - - // Use DescribeKey to validate the key exists - descReq := &seaweedkms.DescribeKeyRequest{KeyID: keyIdentifier} - descResp, err := p.DescribeKey(ctx, descReq) - if err != nil { - return "", fmt.Errorf("failed to resolve key identifier %s: %w", keyIdentifier, err) - } - - return descResp.KeyID, nil -} - -// Close cleans up any resources used by the provider -func (p *OpenBaoKMSProvider) Close() error { - // OpenBao/Vault client doesn't require explicit cleanup - glog.V(2).Infof("OpenBao/Vault KMS provider closed") - return nil -} - -// convertVaultError converts OpenBao/Vault errors to our standard KMS errors -func (p *OpenBaoKMSProvider) convertVaultError(err error, keyID string) error { - errMsg := err.Error() - - if strings.Contains(errMsg, "not found") || strings.Contains(errMsg, "no handler") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeNotFoundException, - Message: fmt.Sprintf("Key not found in OpenBao/Vault: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "permission") || strings.Contains(errMsg, "denied") || strings.Contains(errMsg, "forbidden") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeAccessDenied, - Message: fmt.Sprintf("Access denied to OpenBao/Vault: %v", err), - KeyID: keyID, - } - } - - if strings.Contains(errMsg, "disabled") || strings.Contains(errMsg, "unavailable") { - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKeyUnavailable, - Message: fmt.Sprintf("Key unavailable in OpenBao/Vault: %v", err), - KeyID: keyID, - } - } - - // For unknown errors, wrap as internal failure - return &seaweedkms.KMSError{ - Code: seaweedkms.ErrCodeKMSInternalFailure, - Message: fmt.Sprintf("OpenBao/Vault error: %v", err), - KeyID: keyID, - } -} diff --git a/weed/kms/registry.go b/weed/kms/registry.go deleted file mode 100644 index d1d812f71..000000000 --- a/weed/kms/registry.go +++ /dev/null @@ -1,145 +0,0 @@ -package kms - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// ProviderRegistry manages KMS provider implementations -type ProviderRegistry struct { - mu sync.RWMutex - providers map[string]ProviderFactory - instances map[string]KMSProvider -} - -// ProviderFactory creates a new KMS provider instance -type ProviderFactory func(config util.Configuration) (KMSProvider, error) - -var defaultRegistry = NewProviderRegistry() - -// NewProviderRegistry creates a new provider registry -func NewProviderRegistry() *ProviderRegistry { - return &ProviderRegistry{ - providers: make(map[string]ProviderFactory), - instances: make(map[string]KMSProvider), - } -} - -// RegisterProvider registers a new KMS provider factory -func RegisterProvider(name string, factory ProviderFactory) { - defaultRegistry.RegisterProvider(name, factory) -} - -// RegisterProvider registers a new KMS provider factory in this registry -func (r *ProviderRegistry) RegisterProvider(name string, factory ProviderFactory) { - r.mu.Lock() - defer r.mu.Unlock() - r.providers[name] = factory -} - -// GetProvider returns a KMS provider instance, creating it if necessary -func GetProvider(name string, config util.Configuration) (KMSProvider, error) { - return defaultRegistry.GetProvider(name, config) -} - -// GetProvider returns a KMS provider instance, creating it if necessary -func (r *ProviderRegistry) GetProvider(name string, config util.Configuration) (KMSProvider, error) { - r.mu.Lock() - defer r.mu.Unlock() - - // Return existing instance if available - if instance, exists := r.instances[name]; exists { - return instance, nil - } - - // Find the factory - factory, exists := r.providers[name] - if !exists { - return nil, fmt.Errorf("KMS provider '%s' not registered", name) - } - - // Create new instance - instance, err := factory(config) - if err != nil { - return nil, fmt.Errorf("failed to create KMS provider '%s': %v", name, err) - } - - // Cache the instance - r.instances[name] = instance - return instance, nil -} - -// ListProviders returns the names of all registered providers -func ListProviders() []string { - return defaultRegistry.ListProviders() -} - -// ListProviders returns the names of all registered providers -func (r *ProviderRegistry) ListProviders() []string { - r.mu.RLock() - defer r.mu.RUnlock() - - names := make([]string, 0, len(r.providers)) - for name := range r.providers { - names = append(names, name) - } - return names -} - -// CloseAll closes all provider instances -func CloseAll() error { - return defaultRegistry.CloseAll() -} - -// CloseAll closes all provider instances in this registry -func (r *ProviderRegistry) CloseAll() error { - r.mu.Lock() - defer r.mu.Unlock() - - var allErrors []error - for name, instance := range r.instances { - if err := instance.Close(); err != nil { - allErrors = append(allErrors, fmt.Errorf("failed to close KMS provider '%s': %w", name, err)) - } - } - - // Clear the instances map - r.instances = make(map[string]KMSProvider) - - return errors.Join(allErrors...) -} - -// WithKMSProvider is a helper function to execute code with a KMS provider -func WithKMSProvider(name string, config util.Configuration, fn func(KMSProvider) error) error { - provider, err := GetProvider(name, config) - if err != nil { - return err - } - return fn(provider) -} - -// TestKMSConnection tests the connection to a KMS provider -func TestKMSConnection(ctx context.Context, provider KMSProvider, testKeyID string) error { - if provider == nil { - return fmt.Errorf("KMS provider is nil") - } - - // Try to describe a test key to verify connectivity - _, err := provider.DescribeKey(ctx, &DescribeKeyRequest{ - KeyID: testKeyID, - }) - - if err != nil { - // If the key doesn't exist, that's still a successful connection test - if kmsErr, ok := err.(*KMSError); ok && kmsErr.Code == ErrCodeNotFoundException { - return nil - } - return fmt.Errorf("KMS connection test failed: %v", err) - } - - return nil -} diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go new file mode 100644 index 000000000..9a31a8ac0 --- /dev/null +++ b/weed/messaging/broker/broker_append.go @@ -0,0 +1,130 @@ +package broker + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error { + + assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data) + if err2 != nil { + return err2 + } + + dir, name := util.FullPath(targetFile).DirAndName() + + // append the chunk + if err := broker.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AppendToEntryRequest{ + Directory: dir, + EntryName: name, + Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)}, + } + + _, err := client.AppendToEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("append to file %v: %v", request, err) + return err + } + + return nil + }); err != nil { + return fmt.Errorf("append to file %v: %v", targetFile, err) + } + + return nil +} + +func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + + var assignResult = &operation.AssignResult{} + + // assign a volume location + if err := broker.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + assignErr := util.Retry("assignVolume", func() error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: topicConfig.Replication, + Collection: topicConfig.Collection, + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + assignResult.Auth = security.EncodedJwt(resp.Auth) + assignResult.Fid = resp.FileId + assignResult.Url = resp.Location.Url + assignResult.PublicUrl = resp.Location.PublicUrl + assignResult.GrpcPort = int(resp.Location.GrpcPort) + assignResult.Count = uint64(resp.Count) + + return nil + }) + if assignErr != nil { + return assignErr + } + + return nil + }); err != nil { + return nil, nil, err + } + + // upload data + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + uploadOption := &operation.UploadOption{ + UploadUrl: targetUrl, + Filename: "", + Cipher: broker.option.Cipher, + IsInputCompressed: false, + MimeType: "", + PairMap: nil, + Jwt: assignResult.Auth, + } + uploadResult, err := operation.UploadData(data, uploadOption) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} + +var _ = filer_pb.FilerClient(&MessageBroker{}) + +func (broker *MessageBroker) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) (err error) { + + for _, filer := range broker.option.Filers { + if err = pb.WithFilerClient(streamingMode, filer, broker.grpcDialOption, fn); err != nil { + if err == io.EOF { + return + } + glog.V(0).Infof("fail to connect to %s: %v", filer, err) + } else { + break + } + } + + return + +} + +func (broker *MessageBroker) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go new file mode 100644 index 000000000..ba141fdd0 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server.go @@ -0,0 +1,37 @@ +package broker + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) { + resp := &messaging_pb.DeleteTopicResponse{} + dir, entry := genTopicDirEntry(request.Namespace, request.Topic) + if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil { + return nil, err + } else if exists { + err = filer_pb.Remove(broker, dir, entry, true, true, true, false, nil) + } + return resp, nil +} + +func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) { + panic("implement me") +} + +func genTopicDir(namespace, topic string) string { + return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic) +} + +func genTopicDirEntry(namespace, topic string) (dir, entry string) { + return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic +} diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go new file mode 100644 index 000000000..5cd8edd33 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_discovery.go @@ -0,0 +1,122 @@ +package broker + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/cluster" + "github.com/chrislusf/seaweedfs/weed/pb" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +/* +Topic discovery: + +When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker. + +The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it. +Otherwise, just host the topic. + +So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy. +If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help. + +*/ + +func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) { + + t := &messaging_pb.FindBrokerResponse{} + var peers []string + + targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition) + + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(false, filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{ + Resource: targetTopicPartition, + }) + if err != nil { + return err + } + if resp.Found && len(resp.Resources) > 0 { + t.Broker = resp.Resources[0].GrpcAddresses + return nil + } + for _, b := range resp.Resources { + peers = append(peers, b.GrpcAddresses) + } + return nil + }) + if err != nil { + return nil, err + } + } + + t.Broker = PickMember(peers, []byte(targetTopicPartition)) + + return t, nil + +} + +func (broker *MessageBroker) checkFilers() { + + // contact a filer about masters + var masters []pb.ServerAddress + found := false + for !found { + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(false, filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + for _, m := range resp.Masters { + masters = append(masters, pb.ServerAddress(m)) + } + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received master list: %s", masters) + + // contact each masters for filers + var filers []pb.ServerAddress + found = false + for !found { + for _, master := range masters { + err := broker.withMasterClient(false, master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + }) + if err != nil { + return err + } + + for _, clusterNode := range resp.ClusterNodes { + filers = append(filers, pb.ServerAddress(clusterNode.Address)) + } + + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to list filers: %v", err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received filer list: %s", filers) + + broker.option.Filers = filers + +} diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go new file mode 100644 index 000000000..6e6b723d1 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_publish.go @@ -0,0 +1,112 @@ +package broker + +import ( + "crypto/md5" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // send init response + initResponse := &messaging_pb.PublishResponse{ + Config: nil, + Redirect: nil, + } + err = stream.Send(initResponse) + if err != nil { + return err + } + if initResponse.Redirect != nil { + return nil + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + + tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic) + md5File := fmt.Sprintf("p%02d.md5", tp.Partition) + // println("chan data stored under", tpDir, "as", md5File) + + if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists { + return fmt.Errorf("channel is already closed") + } + + tl := broker.topicManager.RequestLock(tp, topicConfig, true) + defer broker.topicManager.ReleaseLock(tp, true) + + md5hash := md5.New() + // process each message + for { + // println("recv") + in, err := stream.Recv() + // glog.V(0).Infof("recieved %v err: %v", in, err) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if in.Data == nil { + continue + } + + // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value)) + + data, err := proto.Marshal(in.Data) + if err != nil { + glog.Errorf("marshall error: %v\n", err) + continue + } + + tl.logBuffer.AddToBuffer(in.Data.Key, data, in.Data.EventTimeNs) + + if in.Data.IsClose { + // println("server received closing") + break + } + + md5hash.Write(in.Data.Value) + + } + + if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil { + glog.V(0).Infof("err writing %s: %v", md5File, err) + } + + // fmt.Printf("received md5 %X\n", md5hash.Sum(nil)) + + // send the close ack + // println("server send ack closing") + if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil { + glog.V(0).Infof("err sending close response: %v", err) + } + return nil + +} diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go new file mode 100644 index 000000000..20d529239 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_subscribe.go @@ -0,0 +1,178 @@ +package broker + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var processedTsNs int64 + var messageCount int64 + subscriberId := in.Init.SubscriberId + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String()) + defer func() { + fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs)) + }() + + lock := broker.topicManager.RequestLock(tp, topicConfig, false) + defer broker.topicManager.ReleaseLock(tp, false) + + isConnected := true + go func() { + for isConnected { + if _, err := stream.Recv(); err != nil { + // println("disconnecting connection to", subscriberId, tp.String()) + isConnected = false + lock.cond.Signal() + } + } + }() + + lastReadTime := time.Now() + switch in.Init.StartPosition { + case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP: + lastReadTime = time.Unix(0, in.Init.TimestampNs) + case messaging_pb.SubscriberMessage_InitMessage_LATEST: + case messaging_pb.SubscriberMessage_InitMessage_EARLIEST: + lastReadTime = time.Unix(0, 0) + } + + // how to process each message + // an error returned will end the subscription + eachMessageFn := func(m *messaging_pb.Message) error { + err := stream.Send(&messaging_pb.BrokerMessage{ + Data: m, + }) + if err != nil { + glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err) + } + return err + } + + eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error { + m := &messaging_pb.Message{} + if err = proto.Unmarshal(logEntry.Data, m); err != nil { + glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) + return err + } + // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs) + if err = eachMessageFn(m); err != nil { + glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err) + return err + } + if m.IsClose { + // println("processed EOF") + return io.EOF + } + processedTsNs = logEntry.TsNs + messageCount++ + return nil + } + + // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime) + + for { + + if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil { + if err != io.EOF { + // println("stopping from persisted logs", err.Error()) + return err + } + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + lastReadTime, _, err = lock.logBuffer.LoopProcessLogData("broker", lastReadTime, 0, func() bool { + lock.Mutex.Lock() + lock.cond.Wait() + lock.Mutex.Unlock() + return isConnected + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) { + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + topicDir := genTopicDir(tp.Namespace, tp.Topic) + partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition) + + return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error { + dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name) + return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error { + if dayEntry.Name == startDate { + hourMinute := util.FileNameBase(hourMinuteEntry.Name) + if strings.Compare(hourMinute, startHourMinute) < 0 { + return nil + } + } + if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) { + return nil + } + // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name) + chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks) + defer chunkedFileReader.Close() + if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, 0, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + return err + } + return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err) + } + return nil + }, "", false, 24*60) + }, startDate, true, 366) + +} diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go new file mode 100644 index 000000000..acf2d6d34 --- /dev/null +++ b/weed/messaging/broker/broker_server.go @@ -0,0 +1,116 @@ +package broker + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +type MessageBrokerOption struct { + Filers []pb.ServerAddress + DefaultReplication string + MaxMB int + Ip string + Port int + Cipher bool +} + +type MessageBroker struct { + messaging_pb.UnimplementedSeaweedMessagingServer + option *MessageBrokerOption + grpcDialOption grpc.DialOption + topicManager *TopicManager +} + +func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: grpcDialOption, + } + + messageBroker.topicManager = NewTopicManager(messageBroker) + + messageBroker.checkFilers() + + go messageBroker.keepConnectedToOneFiler() + + return messageBroker, nil +} + +func (broker *MessageBroker) keepConnectedToOneFiler() { + + for { + for _, filer := range broker.option.Filers { + broker.withFilerClient(false, filer, func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.KeepConnected(ctx) + if err != nil { + glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + initRequest := &filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + } + for _, tp := range broker.topicManager.ListTopicPartitions() { + initRequest.Resources = append(initRequest.Resources, tp.String()) + } + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + // TODO send events of adding/removing topics + + glog.V(0).Infof("conntected with filer: %v", filer) + for { + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("send heartbeat") + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("received reply") + time.Sleep(11 * time.Second) + // println("woke up") + } + return nil + }) + time.Sleep(3 * time.Second) + } + } + +} + +func (broker *MessageBroker) withFilerClient(streamingMode bool, filer pb.ServerAddress, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(streamingMode, filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(streamingMode bool, master pb.ServerAddress, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(streamingMode, master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go new file mode 100644 index 000000000..465a2a8f2 --- /dev/null +++ b/weed/messaging/broker/consistent_distribution.go @@ -0,0 +1,38 @@ +package broker + +import ( + "github.com/buraksezer/consistent" + "github.com/cespare/xxhash" +) + +type Member string + +func (m Member) String() string { + return string(m) +} + +type hasher struct{} + +func (h hasher) Sum64(data []byte) uint64 { + return xxhash.Sum64(data) +} + +func PickMember(members []string, key []byte) string { + cfg := consistent.Config{ + PartitionCount: 9791, + ReplicationFactor: 2, + Load: 1.25, + Hasher: hasher{}, + } + + cmembers := []consistent.Member{} + for _, m := range members { + cmembers = append(cmembers, Member(m)) + } + + c := consistent.New(cmembers, cfg) + + m := c.LocateKey(key) + + return m.String() +} diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go new file mode 100644 index 000000000..f58fe4e0e --- /dev/null +++ b/weed/messaging/broker/consistent_distribution_test.go @@ -0,0 +1,32 @@ +package broker + +import ( + "fmt" + "testing" +) + +func TestPickMember(t *testing.T) { + + servers := []string{ + "s1:port", + "s2:port", + "s3:port", + "s5:port", + "s4:port", + } + + total := 1000 + + distribution := make(map[string]int) + for i := 0; i < total; i++ { + tp := fmt.Sprintf("tp:%2d", i) + m := PickMember(servers, []byte(tp)) + // println(tp, "=>", m) + distribution[m]++ + } + + for member, count := range distribution { + fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers))) + } + +} diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go new file mode 100644 index 000000000..c303c29b3 --- /dev/null +++ b/weed/messaging/broker/topic_manager.go @@ -0,0 +1,124 @@ +package broker + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type TopicPartition struct { + Namespace string + Topic string + Partition int32 +} + +const ( + TopicPartitionFmt = "%s/%s_%02d" +) + +func (tp *TopicPartition) String() string { + return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition) +} + +type TopicControl struct { + sync.Mutex + cond *sync.Cond + subscriberCount int + publisherCount int + logBuffer *log_buffer.LogBuffer +} + +type TopicManager struct { + sync.Mutex + topicControls map[TopicPartition]*TopicControl + broker *MessageBroker +} + +func NewTopicManager(messageBroker *MessageBroker) *TopicManager { + return &TopicManager{ + topicControls: make(map[TopicPartition]*TopicControl), + broker: messageBroker, + } +} + +func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer { + + flushFn := func(startTime, stopTime time.Time, buf []byte) { + + if topicConfig.IsTransient { + // return + } + + // fmt.Printf("flushing with topic config %+v\n", topicConfig) + + startTime, stopTime = startTime.UTC(), stopTime.UTC() + targetFile := fmt.Sprintf( + "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", + filer.TopicsDir, tp.Namespace, tp.Topic, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + tp.Partition, + ) + + if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil { + glog.V(0).Infof("log write failed %s: %v", targetFile, err) + } + } + logBuffer := log_buffer.NewLogBuffer("broker", time.Minute, flushFn, func() { + tl.cond.Broadcast() + }) + + return logBuffer +} + +func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl { + tm.Lock() + defer tm.Unlock() + + tc, found := tm.topicControls[partition] + if !found { + tc = &TopicControl{} + tc.cond = sync.NewCond(&tc.Mutex) + tm.topicControls[partition] = tc + tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig) + } + if isPublisher { + tc.publisherCount++ + } else { + tc.subscriberCount++ + } + return tc +} + +func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) { + tm.Lock() + defer tm.Unlock() + + lock, found := tm.topicControls[partition] + if !found { + return + } + if isPublisher { + lock.publisherCount-- + } else { + lock.subscriberCount-- + } + if lock.subscriberCount <= 0 && lock.publisherCount <= 0 { + delete(tm.topicControls, partition) + lock.logBuffer.Shutdown() + } +} + +func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) { + tm.Lock() + defer tm.Unlock() + + for k := range tm.topicControls { + tps = append(tps, k) + } + return +} diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go new file mode 100644 index 000000000..a75678815 --- /dev/null +++ b/weed/messaging/msgclient/chan_config.go @@ -0,0 +1,5 @@ +package msgclient + +func (mc *MessagingClient) DeleteChannel(chanName string) error { + return mc.DeleteTopic("chan", chanName) +} diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go new file mode 100644 index 000000000..9bc88f7c0 --- /dev/null +++ b/weed/messaging/msgclient/chan_pub.go @@ -0,0 +1,76 @@ +package msgclient + +import ( + "crypto/md5" + "hash" + "io" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type PubChannel struct { + client messaging_pb.SeaweedMessaging_PublishClient + grpcConnection *grpc.ClientConn + md5hash hash.Hash +} + +func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + pc, err := setupPublisherClient(grpcConnection, tp) + if err != nil { + return nil, err + } + return &PubChannel{ + client: pc, + grpcConnection: grpcConnection, + md5hash: md5.New(), + }, nil +} + +func (pc *PubChannel) Publish(m []byte) error { + err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + Value: m, + }, + }) + if err == nil { + pc.md5hash.Write(m) + } + return err +} +func (pc *PubChannel) Close() error { + + // println("send closing") + if err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + IsClose: true, + }, + }); err != nil { + log.Printf("err send close: %v", err) + } + // println("receive closing") + if _, err := pc.client.Recv(); err != nil && err != io.EOF { + log.Printf("err receive close: %v", err) + } + // println("close connection") + if err := pc.grpcConnection.Close(); err != nil { + log.Printf("err connection close: %v", err) + } + return nil +} + +func (pc *PubChannel) Md5() []byte { + return pc.md5hash.Sum(nil) +} diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go new file mode 100644 index 000000000..213ff4666 --- /dev/null +++ b/weed/messaging/msgclient/chan_sub.go @@ -0,0 +1,85 @@ +package msgclient + +import ( + "context" + "crypto/md5" + "hash" + "io" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type SubChannel struct { + ch chan []byte + stream messaging_pb.SeaweedMessaging_SubscribeClient + md5hash hash.Hash + cancel context.CancelFunc +} + +func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0)) + if err != nil { + return nil, err + } + + t := &SubChannel{ + ch: make(chan []byte), + stream: sc, + md5hash: md5.New(), + cancel: cancel, + } + + go func() { + for { + resp, subErr := t.stream.Recv() + if subErr == io.EOF { + return + } + if subErr != nil { + log.Printf("fail to receive from netchan %s: %v", chanName, subErr) + return + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + if resp.Data.IsClose { + t.stream.Send(&messaging_pb.SubscriberMessage{ + IsClose: true, + }) + close(t.ch) + cancel() + return + } + t.ch <- resp.Data.Value + t.md5hash.Write(resp.Data.Value) + } + }() + + return t, nil +} + +func (sc *SubChannel) Channel() chan []byte { + return sc.ch +} + +func (sc *SubChannel) Md5() []byte { + return sc.md5hash.Sum(nil) +} + +func (sc *SubChannel) Cancel() { + sc.cancel() +} diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go new file mode 100644 index 000000000..4d7ef2b8e --- /dev/null +++ b/weed/messaging/msgclient/client.go @@ -0,0 +1,55 @@ +package msgclient + +import ( + "context" + "fmt" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessagingClient struct { + bootstrapBrokers []string + grpcConnections map[broker.TopicPartition]*grpc.ClientConn + grpcDialOption grpc.DialOption +} + +func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient { + return &MessagingClient{ + bootstrapBrokers: bootstrapBrokers, + grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"), + } +} + +func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) { + + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(), + &messaging_pb.FindBrokerRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Parition: tp.Partition, + }) + if err != nil { + return nil, err + } + + targetBroker := resp.Broker + return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption) + } + return nil, fmt.Errorf("no broker found for %+v", tp) +} diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go new file mode 100644 index 000000000..2b9eba1a8 --- /dev/null +++ b/weed/messaging/msgclient/config.go @@ -0,0 +1,63 @@ +package msgclient + +import ( + "context" + "log" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.ConfigureTopic(context.Background(), + &messaging_pb.ConfigureTopicRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Configuration: &messaging_pb.TopicConfiguration{ + PartitionCount: 0, + Collection: "", + Replication: "", + IsTransient: false, + Partitoning: 0, + }, + }) + return err + }) + +} + +func (mc *MessagingClient) DeleteTopic(namespace, topic string) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.DeleteTopic(context.Background(), + &messaging_pb.DeleteTopicRequest{ + Namespace: namespace, + Topic: topic, + }) + return err + }) +} + +func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + var lastErr error + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection)) + if err == nil { + return nil + } + lastErr = err + } + + return lastErr +} diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go new file mode 100644 index 000000000..1aa483ff8 --- /dev/null +++ b/weed/messaging/msgclient/publisher.go @@ -0,0 +1,118 @@ +package msgclient + +import ( + "context" + + "github.com/OneOfOne/xxhash" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type Publisher struct { + publishClients []messaging_pb.SeaweedMessaging_PublishClient + topicConfiguration *messaging_pb.TopicConfiguration + messageCount uint64 + publisherId string +} + +func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount) + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + client, err := setupPublisherClient(grpcClientConn, tp) + if err != nil { + return nil, err + } + publishClients[i] = client + } + return &Publisher{ + publishClients: publishClients, + topicConfiguration: topicConfiguration, + }, nil +} + +func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) { + + stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background()) + if err != nil { + return nil, err + } + + // send init message + err = stream.Send(&messaging_pb.PublishRequest{ + Init: &messaging_pb.PublishRequest_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + }, + }) + if err != nil { + return nil, err + } + + // process init response + initResponse, err := stream.Recv() + if err != nil { + return nil, err + } + if initResponse.Redirect != nil { + // TODO follow redirection + } + if initResponse.Config != nil { + } + + // setup looks for control messages + doneChan := make(chan error, 1) + go func() { + for { + in, err := stream.Recv() + if err != nil { + doneChan <- err + return + } + if in.Redirect != nil { + } + if in.Config != nil { + } + } + }() + + return stream, nil + +} + +func (p *Publisher) Publish(m *messaging_pb.Message) error { + hashValue := p.messageCount + p.messageCount++ + if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash { + if m.Key != nil { + hashValue = xxhash.Checksum64(m.Key) + } + } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash { + hashValue = xxhash.Checksum64(m.Key) + } else { + // round robin + } + + idx := int(hashValue) % len(p.publishClients) + if idx < 0 { + idx += len(p.publishClients) + } + return p.publishClients[idx].Send(&messaging_pb.PublishRequest{ + Data: m, + }) +} diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go new file mode 100644 index 000000000..6c7dc1ab7 --- /dev/null +++ b/weed/messaging/msgclient/subscriber.go @@ -0,0 +1,120 @@ +package msgclient + +import ( + "context" + "io" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "google.golang.org/grpc" +) + +type Subscriber struct { + subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient + subscriberCancels []context.CancelFunc + subscriberId string +} + +func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount) + subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount) + + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + if partitionId >= 0 && i != partitionId { + continue + } + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime) + if err != nil { + return nil, err + } + subscriberClients[i] = client + subscriberCancels[i] = cancel + } + + return &Subscriber{ + subscriberClients: subscriberClients, + subscriberCancels: subscriberCancels, + subscriberId: subscriberId, + }, nil +} + +func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) { + stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx) + if err != nil { + return + } + + // send init message + err = stream.Send(&messaging_pb.SubscriberMessage{ + Init: &messaging_pb.SubscriberMessage_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP, + TimestampNs: startTime.UnixNano(), + SubscriberId: subscriberId, + }, + }) + if err != nil { + return + } + + return stream, nil +} + +func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error { + for { + resp, listenErr := subscriberClient.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + println(listenErr.Error()) + return listenErr + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + processFn(resp.Data) + } +} + +// Subscribe starts goroutines to process the messages +func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) { + var wg sync.WaitGroup + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberClients[i] != nil { + wg.Add(1) + go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) { + defer wg.Done() + doSubscribe(subscriberClient, processFn) + }(s.subscriberClients[i]) + } + } + wg.Wait() +} + +func (s *Subscriber) Shutdown() { + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberCancels[i] != nil { + s.subscriberCancels[i]() + } + } +} diff --git a/weed/mount/dirty_pages_chunked.go b/weed/mount/dirty_pages_chunked.go index 25b071e7d..52308e0e5 100644 --- a/weed/mount/dirty_pages_chunked.go +++ b/weed/mount/dirty_pages_chunked.go @@ -2,12 +2,12 @@ package mount import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/mount/page_writer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" "sync" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/page_writer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "time" ) type ChunkedDirtyPages struct { @@ -30,19 +30,19 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages { fh: fh, } - swapFileDir := fh.wfs.option.getUniqueCacheDirForWrite() + swapFileDir := fh.wfs.option.getTempFilePageDir() dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(fh.wfs.concurrentWriters, chunkSize, - dirtyPages.saveChunkedFileIntervalToStorage, fh.wfs.option.ConcurrentWriters, swapFileDir) + dirtyPages.saveChunkedFileIntevalToStorage, fh.wfs.option.ConcurrentWriters, swapFileDir) return dirtyPages } -func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { +func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool) { pages.hasWrites = true glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data))) - pages.uploadPipeline.SaveDataAt(data, offset, isSequential, tsNs) + pages.uploadPipeline.SaveDataAt(data, offset, isSequential) return } @@ -58,32 +58,39 @@ func (pages *ChunkedDirtyPages) FlushData() error { return nil } -func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64, tsNs int64) (maxStop int64) { +func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) { if !pages.hasWrites { return } - return pages.uploadPipeline.MaybeReadDataAt(data, startOffset, tsNs) + return pages.uploadPipeline.MaybeReadDataAt(data, startOffset) } -func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reader, offset int64, size int64, modifiedTsNs int64, cleanupFn func()) { +func (pages *ChunkedDirtyPages) GetStorageOptions() (collection, replication string) { + return pages.collection, pages.replication +} +func (pages *ChunkedDirtyPages) saveChunkedFileIntevalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) { + + mtime := time.Now().UnixNano() defer cleanupFn() fileFullPath := pages.fh.FullPath() fileName := fileFullPath.Name() - chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset, modifiedTsNs) + chunk, collection, replication, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset) if err != nil { glog.V(0).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err) pages.lastErr = err return } + chunk.Mtime = mtime + pages.collection, pages.replication = collection, replication pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) - pages.fh.entryChunkGroup.AddChunk(chunk) + pages.fh.entryViewCache = nil glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) } -func (pages *ChunkedDirtyPages) Destroy() { +func (pages ChunkedDirtyPages) Destroy() { pages.uploadPipeline.Shutdown() } diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index c20f9eca8..0dfcbd7f6 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -1,43 +1,34 @@ package mount import ( - "os" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "golang.org/x/exp/slices" "sync" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) type FileHandleId uint64 -var IsDebugFileReadWrite = false - type FileHandle struct { - fh FileHandleId - counter int64 - entry *LockedEntry - entryLock sync.RWMutex - entryChunkGroup *filer.ChunkGroup - inode uint64 - wfs *WFS + fh FileHandleId + counter int64 + entry *filer_pb.Entry + entryLock sync.Mutex + inode uint64 + wfs *WFS // cache file has been written to - dirtyMetadata bool - dirtyPages *PageWriter - reader *filer.ChunkReadAt - contentType string + dirtyMetadata bool + dirtyPages *PageWriter + entryViewCache []filer.VisibleInterval + reader *filer.ChunkReadAt + contentType string + handle uint64 + sync.Mutex isDeleted bool - - // RDMA chunk offset cache for performance optimization - chunkOffsetCache []int64 - chunkCacheValid bool - chunkCacheLock sync.RWMutex - - // for debugging - mirrorFile *os.File } func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_pb.Entry) *FileHandle { @@ -49,19 +40,8 @@ func newFileHandle(wfs *WFS, handleId FileHandleId, inode uint64, entry *filer_p } // dirtyPages: newContinuousDirtyPages(file, writeOnly), fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit) - fh.entry = &LockedEntry{ - Entry: entry, - } if entry != nil { - fh.SetEntry(entry) - } - - if IsDebugFileReadWrite { - var err error - fh.mirrorFile, err = os.OpenFile("/tmp/sw/"+entry.Name, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - println("failed to create mirror:", err.Error()) - } + entry.Attributes.FileSize = filer.FileSize(entry) } return fh @@ -72,103 +52,66 @@ func (fh *FileHandle) FullPath() util.FullPath { return fp } -func (fh *FileHandle) GetEntry() *LockedEntry { +func (fh *FileHandle) GetEntry() *filer_pb.Entry { + fh.entryLock.Lock() + defer fh.entryLock.Unlock() return fh.entry } - func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { - if entry != nil { - fileSize := filer.FileSize(entry) - entry.Attributes.FileSize = fileSize - var resolveManifestErr error - fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks) - if resolveManifestErr != nil { - glog.Warningf("failed to resolve manifest chunks in %+v", entry) - } - } else { - glog.Fatalf("setting file handle entry to nil") - } - fh.entry.SetEntry(entry) - - // Invalidate chunk offset cache since chunks may have changed - fh.invalidateChunkCache() -} - -func (fh *FileHandle) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry { - result := fh.entry.UpdateEntry(fn) - - // Invalidate chunk offset cache since entry may have been modified - fh.invalidateChunkCache() - - return result + fh.entryLock.Lock() + defer fh.entryLock.Unlock() + fh.entry = entry } func (fh *FileHandle) AddChunks(chunks []*filer_pb.FileChunk) { - fh.entry.AppendChunks(chunks) + fh.entryLock.Lock() + defer fh.entryLock.Unlock() - // Invalidate chunk offset cache since new chunks were added - fh.invalidateChunkCache() + // find the earliest incoming chunk + newChunks := chunks + earliestChunk := newChunks[0] + for i := 1; i < len(newChunks); i++ { + if lessThan(earliestChunk, newChunks[i]) { + earliestChunk = newChunks[i] + } + } + + if fh.entry == nil { + return + } + + // pick out-of-order chunks from existing chunks + for _, chunk := range fh.entry.Chunks { + if lessThan(earliestChunk, chunk) { + chunks = append(chunks, chunk) + } + } + + // sort incoming chunks + slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { + return lessThan(a, b) + }) + + glog.V(4).Infof("%s existing %d chunks adds %d more", fh.FullPath(), len(fh.entry.Chunks), len(chunks)) + + fh.entry.Chunks = append(fh.entry.Chunks, newChunks...) + fh.entryViewCache = nil } -func (fh *FileHandle) ReleaseHandle() { - - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("ReleaseHandle", fh.fh, util.ExclusiveLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - - fh.dirtyPages.Destroy() - if IsDebugFileReadWrite { - fh.mirrorFile.Close() +func (fh *FileHandle) CloseReader() { + if fh.reader != nil { + fh.reader.Close() } } +func (fh *FileHandle) Release() { + fh.dirtyPages.Destroy() + fh.CloseReader() +} + func lessThan(a, b *filer_pb.FileChunk) bool { - if a.ModifiedTsNs == b.ModifiedTsNs { + if a.Mtime == b.Mtime { return a.Fid.FileKey < b.Fid.FileKey } - return a.ModifiedTsNs < b.ModifiedTsNs -} - -// getCumulativeOffsets returns cached cumulative offsets for chunks, computing them if necessary -func (fh *FileHandle) getCumulativeOffsets(chunks []*filer_pb.FileChunk) []int64 { - fh.chunkCacheLock.RLock() - if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 { - // Cache is valid and matches current chunk count - result := make([]int64, len(fh.chunkOffsetCache)) - copy(result, fh.chunkOffsetCache) - fh.chunkCacheLock.RUnlock() - return result - } - fh.chunkCacheLock.RUnlock() - - // Need to compute/recompute cache - fh.chunkCacheLock.Lock() - defer fh.chunkCacheLock.Unlock() - - // Double-check in case another goroutine computed it while we waited for the lock - if fh.chunkCacheValid && len(fh.chunkOffsetCache) == len(chunks)+1 { - result := make([]int64, len(fh.chunkOffsetCache)) - copy(result, fh.chunkOffsetCache) - return result - } - - // Compute cumulative offsets - cumulativeOffsets := make([]int64, len(chunks)+1) - for i, chunk := range chunks { - cumulativeOffsets[i+1] = cumulativeOffsets[i] + int64(chunk.Size) - } - - // Cache the result - fh.chunkOffsetCache = make([]int64, len(cumulativeOffsets)) - copy(fh.chunkOffsetCache, cumulativeOffsets) - fh.chunkCacheValid = true - - return cumulativeOffsets -} - -// invalidateChunkCache invalidates the chunk offset cache when chunks are modified -func (fh *FileHandle) invalidateChunkCache() { - fh.chunkCacheLock.Lock() - fh.chunkCacheValid = false - fh.chunkOffsetCache = nil - fh.chunkCacheLock.Unlock() + return a.Mtime < b.Mtime } diff --git a/weed/mount/filehandle_map.go b/weed/mount/filehandle_map.go index 4441de0be..e6e2d15c9 100644 --- a/weed/mount/filehandle_map.go +++ b/weed/mount/filehandle_map.go @@ -1,15 +1,13 @@ package mount import ( + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "sync" - - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) type FileHandleToInode struct { sync.RWMutex + nextFh FileHandleId inode2fh map[uint64]*FileHandle fh2inode map[FileHandleId]uint64 } @@ -18,6 +16,7 @@ func NewFileHandleToInode() *FileHandleToInode { return &FileHandleToInode{ inode2fh: make(map[uint64]*FileHandle), fh2inode: make(map[FileHandleId]uint64), + nextFh: 0, } } @@ -43,15 +42,14 @@ func (i *FileHandleToInode) AcquireFileHandle(wfs *WFS, inode uint64, entry *fil defer i.Unlock() fh, found := i.inode2fh[inode] if !found { - fh = newFileHandle(wfs, FileHandleId(util.RandomUint64()), inode, entry) + fh = newFileHandle(wfs, i.nextFh, inode, entry) + i.nextFh++ i.inode2fh[inode] = fh i.fh2inode[fh.fh] = inode } else { fh.counter++ } - if fh.GetEntry().GetEntry() != entry { - fh.SetEntry(entry) - } + fh.entry = entry return fh } @@ -64,30 +62,26 @@ func (i *FileHandleToInode) ReleaseByInode(inode uint64) { if fh.counter <= 0 { delete(i.inode2fh, inode) delete(i.fh2inode, fh.fh) - fh.ReleaseHandle() + fh.Release() } } } - func (i *FileHandleToInode) ReleaseByHandle(fh FileHandleId) { i.Lock() defer i.Unlock() - inode, found := i.fh2inode[fh] - if !found { - return // Handle already released or invalid - } + if found { + fhHandle, fhFound := i.inode2fh[inode] + if !fhFound { + delete(i.fh2inode, fh) + } else { + fhHandle.counter-- + if fhHandle.counter <= 0 { + delete(i.inode2fh, inode) + delete(i.fh2inode, fhHandle.fh) + fhHandle.Release() + } + } - fhHandle, fhFound := i.inode2fh[inode] - if !fhFound { - delete(i.fh2inode, fh) - return - } - - fhHandle.counter-- - if fhHandle.counter <= 0 { - delete(i.inode2fh, inode) - delete(i.fh2inode, fhHandle.fh) - fhHandle.ReleaseHandle() } } diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go index 88b020bf1..45fc10a0b 100644 --- a/weed/mount/filehandle_read.go +++ b/weed/mount/filehandle_read.go @@ -3,12 +3,10 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "io" - "sort" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) func (fh *FileHandle) lockForRead(startOffset int64, size int) { @@ -18,65 +16,65 @@ func (fh *FileHandle) unlockForRead(startOffset int64, size int) { fh.dirtyPages.UnlockForRead(startOffset, startOffset+int64(size)) } -func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64, tsNs int64) (maxStop int64) { - maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset, tsNs) +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) { + maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset) return } -func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, error) { - return fh.readFromChunksWithContext(context.Background(), buff, offset) -} - -func (fh *FileHandle) readFromChunksWithContext(ctx context.Context, buff []byte, offset int64) (int64, int64, error) { - fh.entryLock.RLock() - defer fh.entryLock.RUnlock() +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { fileFullPath := fh.FullPath() - entry := fh.GetEntry() + fh.entryLock.Lock() + defer fh.entryLock.Unlock() + + entry := fh.entry + if entry == nil { + return 0, io.EOF + } if entry.IsInRemoteOnly() { glog.V(4).Infof("download remote entry %s", fileFullPath) - err := fh.downloadRemoteEntry(entry) + newEntry, err := fh.downloadRemoteEntry(entry) if err != nil { glog.V(1).Infof("download remote entry %s: %v", fileFullPath, err) - return 0, 0, err + return 0, err } + entry = newEntry } - fileSize := int64(entry.Attributes.FileSize) - if fileSize == 0 { - fileSize = int64(filer.FileSize(entry.GetEntry())) - } + fileSize := int64(filer.FileSize(entry)) if fileSize == 0 { glog.V(1).Infof("empty fh %v", fileFullPath) - return 0, 0, io.EOF - } else if offset == fileSize { - return 0, 0, io.EOF - } else if offset >= fileSize { - glog.V(1).Infof("invalid read, fileSize %d, offset %d for %s", fileSize, offset, fileFullPath) - return 0, 0, io.EOF + return 0, io.EOF } - if offset < int64(len(entry.Content)) { + if offset+int64(len(buff)) <= int64(len(entry.Content)) { totalRead := copy(buff, entry.Content[offset:]) glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) - return int64(totalRead), 0, nil + return int64(totalRead), nil } - // Try RDMA acceleration first if available - if fh.wfs.rdmaClient != nil && fh.wfs.option.RdmaEnabled { - totalRead, ts, err := fh.tryRDMARead(ctx, fileSize, buff, offset, entry) - if err == nil { - glog.V(4).Infof("RDMA read successful for %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) - return int64(totalRead), ts, nil + var chunkResolveErr error + if fh.entryViewCache == nil { + fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.wfs.LookupFn(), entry.Chunks, 0, fileSize) + if chunkResolveErr != nil { + return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } - glog.V(4).Infof("RDMA read failed for %s, falling back to HTTP: %v", fileFullPath, err) + fh.CloseReader() } - // Fall back to normal chunk reading - totalRead, ts, err := fh.entryChunkGroup.ReadDataAt(ctx, fileSize, buff, offset) + if fh.reader == nil { + chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, fileSize) + glog.V(4).Infof("file handle read %s [%d,%d) from %d views", fileFullPath, offset, offset+int64(len(buff)), len(chunkViews)) + for _, chunkView := range chunkViews { + glog.V(4).Infof(" read %s [%d,%d) from chunk %+v", fileFullPath, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.FileId) + } + fh.reader = filer.NewChunkReaderAtFromClient(fh.wfs.LookupFn(), chunkViews, fh.wfs.chunkCache, fileSize) + } + + totalRead, err := fh.reader.ReadAt(buff, offset) if err != nil && err != io.EOF { glog.Errorf("file handle read %s: %v", fileFullPath, err) @@ -84,65 +82,10 @@ func (fh *FileHandle) readFromChunksWithContext(ctx context.Context, buff []byte // glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) - return int64(totalRead), ts, err + return int64(totalRead), err } -// tryRDMARead attempts to read file data using RDMA acceleration -func (fh *FileHandle) tryRDMARead(ctx context.Context, fileSize int64, buff []byte, offset int64, entry *LockedEntry) (int64, int64, error) { - // For now, we'll try to read the chunks directly using RDMA - // This is a simplified approach - in a full implementation, we'd need to - // handle chunk boundaries, multiple chunks, etc. - - chunks := entry.GetEntry().Chunks - if len(chunks) == 0 { - return 0, 0, fmt.Errorf("no chunks available for RDMA read") - } - - // Find the chunk that contains our offset using binary search - var targetChunk *filer_pb.FileChunk - var chunkOffset int64 - - // Get cached cumulative offsets for efficient binary search - cumulativeOffsets := fh.getCumulativeOffsets(chunks) - - // Use binary search to find the chunk containing the offset - chunkIndex := sort.Search(len(chunks), func(i int) bool { - return offset < cumulativeOffsets[i+1] - }) - - // Verify the chunk actually contains our offset - if chunkIndex < len(chunks) && offset >= cumulativeOffsets[chunkIndex] { - targetChunk = chunks[chunkIndex] - chunkOffset = offset - cumulativeOffsets[chunkIndex] - } - - if targetChunk == nil { - return 0, 0, fmt.Errorf("no chunk found for offset %d", offset) - } - - // Calculate how much to read from this chunk - remainingInChunk := int64(targetChunk.Size) - chunkOffset - readSize := min(int64(len(buff)), remainingInChunk) - - glog.V(4).Infof("RDMA read attempt: chunk=%s (fileId=%s), chunkOffset=%d, readSize=%d", - targetChunk.FileId, targetChunk.FileId, chunkOffset, readSize) - - // Try RDMA read using file ID directly (more efficient) - data, isRDMA, err := fh.wfs.rdmaClient.ReadNeedle(ctx, targetChunk.FileId, uint64(chunkOffset), uint64(readSize)) - if err != nil { - return 0, 0, fmt.Errorf("RDMA read failed: %w", err) - } - - if !isRDMA { - return 0, 0, fmt.Errorf("RDMA not available for chunk") - } - - // Copy data to buffer - copied := copy(buff, data) - return int64(copied), targetChunk.ModifiedTsNs, nil -} - -func (fh *FileHandle) downloadRemoteEntry(entry *LockedEntry) error { +func (fh *FileHandle) downloadRemoteEntry(entry *filer_pb.Entry) (*filer_pb.Entry, error) { fileFullPath := fh.FullPath() dir, _ := fileFullPath.DirAndName() @@ -160,12 +103,12 @@ func (fh *FileHandle) downloadRemoteEntry(entry *LockedEntry) error { return fmt.Errorf("CacheRemoteObjectToLocalCluster file %s: %v", fileFullPath, err) } - fh.SetEntry(resp.Entry) + entry = resp.Entry fh.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, resp.Entry)) return nil }) - return err + return entry, err } diff --git a/weed/mount/filer_conf.go b/weed/mount/filer_conf.go deleted file mode 100644 index 3c71bb9ce..000000000 --- a/weed/mount/filer_conf.go +++ /dev/null @@ -1,106 +0,0 @@ -package mount - -import ( - "errors" - "fmt" - "path/filepath" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func (wfs *WFS) subscribeFilerConfEvents() (*meta_cache.MetadataFollower, error) { - confDir := filer.DirectoryEtcSeaweedFS - confName := filer.FilerConfName - confFullName := filepath.Join(filer.DirectoryEtcSeaweedFS, filer.FilerConfName) - - // read current conf - err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - content, err := filer.ReadInsideFiler(client, confDir, confName) - if err != nil { - return err - } - - fc := filer.NewFilerConf() - if len(content) > 0 { - if err := fc.LoadFromBytes(content); err != nil { - return fmt.Errorf("parse %s: %v", confFullName, err) - } - } - - wfs.FilerConf = fc - - return nil - }) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - glog.V(0).Infof("fuse filer conf %s not found", confFullName) - } else { - return nil, err - } - } - - processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { - message := resp.EventNotification - if message.NewEntry == nil { - return nil - } - - dir := resp.Directory - name := resp.EventNotification.NewEntry.Name - - if dir != confDir || name != confName { - return nil - } - - content := message.NewEntry.Content - fc := filer.NewFilerConf() - if len(content) > 0 { - if err = fc.LoadFromBytes(content); err != nil { - return fmt.Errorf("parse %s: %v", confFullName, err) - } - } - - wfs.FilerConf = fc - - return nil - } - return &meta_cache.MetadataFollower{ - PathPrefixToWatch: confFullName, - ProcessEventFn: processEventFn, - }, nil -} - -func (wfs *WFS) wormEnforcedForEntry(path util.FullPath, entry *filer_pb.Entry) (wormEnforced, wormEnabled bool) { - if entry == nil || wfs.FilerConf == nil { - return false, false - } - - rule := wfs.FilerConf.MatchStorageRule(string(path)) - if !rule.Worm { - return false, false - } - - // worm is not enforced - if entry.WormEnforcedAtTsNs == 0 { - return false, true - } - - // worm will never expire - if rule.WormRetentionTimeSeconds == 0 { - return true, true - } - - enforcedAt := time.Unix(0, entry.WormEnforcedAtTsNs) - - // worm is expired - if time.Now().Sub(enforcedAt).Seconds() >= float64(rule.WormRetentionTimeSeconds) { - return false, true - } - - return true, true -} diff --git a/weed/mount/inode_to_path.go b/weed/mount/inode_to_path.go index da38750d1..29635efca 100644 --- a/weed/mount/inode_to_path.go +++ b/weed/mount/inode_to_path.go @@ -1,77 +1,35 @@ package mount import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" "sync" - "time" ) type InodeToPath struct { sync.RWMutex - nextInodeId uint64 - cacheMetaTtlSec time.Duration - inode2path map[uint64]*InodeEntry - path2inode map[util.FullPath]uint64 + nextInodeId uint64 + inode2path map[uint64]*InodeEntry + path2inode map[util.FullPath]uint64 } type InodeEntry struct { - paths []util.FullPath - nlookup uint64 - isDirectory bool - isChildrenCached bool - cachedExpiresTime time.Time + util.FullPath + nlookup uint64 + isDirectory bool + isChildrenCached bool } -func (ie *InodeEntry) removeOnePath(p util.FullPath) bool { - if len(ie.paths) == 0 { - return false - } - idx := -1 - for i, x := range ie.paths { - if x == p { - idx = i - break - } - } - if idx < 0 { - return false - } - for x := idx; x < len(ie.paths)-1; x++ { - ie.paths[x] = ie.paths[x+1] - } - ie.paths = ie.paths[0 : len(ie.paths)-1] - ie.nlookup-- - return true -} - -func NewInodeToPath(root util.FullPath, ttlSec int) *InodeToPath { +func NewInodeToPath(root util.FullPath) *InodeToPath { t := &InodeToPath{ - inode2path: make(map[uint64]*InodeEntry), - path2inode: make(map[util.FullPath]uint64), - cacheMetaTtlSec: time.Second * time.Duration(ttlSec), + inode2path: make(map[uint64]*InodeEntry), + path2inode: make(map[util.FullPath]uint64), } - t.inode2path[1] = &InodeEntry{[]util.FullPath{root}, 1, true, false, time.Time{}} + t.inode2path[1] = &InodeEntry{root, 1, true, false} t.path2inode[root] = 1 - return t } -// EnsurePath make sure the full path is tracked, used by symlink. -func (i *InodeToPath) EnsurePath(path util.FullPath, isDirectory bool) bool { - for { - dir, _ := path.DirAndName() - if dir == "/" { - return true - } - if i.EnsurePath(util.FullPath(dir), true) { - i.Lookup(path, time.Now().Unix(), isDirectory, false, 0, false) - return true - } - } - return false -} - func (i *InodeToPath) Lookup(path util.FullPath, unixTime int64, isDirectory bool, isHardlink bool, possibleInode uint64, isLookup bool) uint64 { i.Lock() defer i.Unlock() @@ -84,7 +42,7 @@ func (i *InodeToPath) Lookup(path util.FullPath, unixTime int64, isDirectory boo } if !isHardlink { for _, found := i.inode2path[inode]; found; inode++ { - _, found = i.inode2path[inode+1] + _, found = i.inode2path[inode] } } } @@ -96,9 +54,9 @@ func (i *InodeToPath) Lookup(path util.FullPath, unixTime int64, isDirectory boo } } else { if !isLookup { - i.inode2path[inode] = &InodeEntry{[]util.FullPath{path}, 0, isDirectory, false, time.Time{}} + i.inode2path[inode] = &InodeEntry{path, 0, isDirectory, false} } else { - i.inode2path[inode] = &InodeEntry{[]util.FullPath{path}, 1, isDirectory, false, time.Time{}} + i.inode2path[inode] = &InodeEntry{path, 1, isDirectory, false} } } @@ -118,9 +76,9 @@ func (i *InodeToPath) AllocateInode(path util.FullPath, unixTime int64) uint64 { return inode } -func (i *InodeToPath) GetInode(path util.FullPath) (uint64, bool) { +func (i *InodeToPath) GetInode(path util.FullPath) uint64 { if path == "/" { - return 1, true + return 1 } i.Lock() defer i.Unlock() @@ -129,17 +87,17 @@ func (i *InodeToPath) GetInode(path util.FullPath) (uint64, bool) { // glog.Fatalf("GetInode unknown inode for %s", path) // this could be the parent for mount point } - return inode, found + return inode } func (i *InodeToPath) GetPath(inode uint64) (util.FullPath, fuse.Status) { i.RLock() defer i.RUnlock() path, found := i.inode2path[inode] - if !found || len(path.paths) == 0 { + if !found { return "", fuse.ENOENT } - return path.paths[0], fuse.OK + return path.FullPath, fuse.OK } func (i *InodeToPath) HasPath(path util.FullPath) bool { @@ -150,20 +108,14 @@ func (i *InodeToPath) HasPath(path util.FullPath) bool { } func (i *InodeToPath) MarkChildrenCached(fullpath util.FullPath) { - i.Lock() - defer i.Unlock() + i.RLock() + defer i.RUnlock() inode, found := i.path2inode[fullpath] if !found { - // https://github.com/seaweedfs/seaweedfs/issues/4968 - // glog.Fatalf("MarkChildrenCached not found inode %v", fullpath) - glog.Warningf("MarkChildrenCached not found inode %v", fullpath) - return + glog.Fatalf("MarkChildrenCached not found inode %v", fullpath) } path, found := i.inode2path[inode] path.isChildrenCached = true - if i.cacheMetaTtlSec > 0 { - path.cachedExpiresTime = time.Now().Add(i.cacheMetaTtlSec) - } } func (i *InodeToPath) IsChildrenCached(fullpath util.FullPath) bool { @@ -174,11 +126,8 @@ func (i *InodeToPath) IsChildrenCached(fullpath util.FullPath) bool { return false } path, found := i.inode2path[inode] - if !found { - return false - } - if path.isChildrenCached { - return path.cachedExpiresTime.IsZero() || time.Now().Before(path.cachedExpiresTime) + if found { + return path.isChildrenCached } return false } @@ -193,44 +142,12 @@ func (i *InodeToPath) HasInode(inode uint64) bool { return found } -func (i *InodeToPath) AddPath(inode uint64, path util.FullPath) { - i.Lock() - defer i.Unlock() - i.path2inode[path] = inode - - ie, found := i.inode2path[inode] - if found { - ie.paths = append(ie.paths, path) - ie.nlookup++ - } else { - i.inode2path[inode] = &InodeEntry{ - paths: []util.FullPath{path}, - nlookup: 1, - isDirectory: false, - isChildrenCached: false, - } - } -} - func (i *InodeToPath) RemovePath(path util.FullPath) { i.Lock() defer i.Unlock() inode, found := i.path2inode[path] if found { delete(i.path2inode, path) - i.removePathFromInode2Path(inode, path) - } -} - -func (i *InodeToPath) removePathFromInode2Path(inode uint64, path util.FullPath) { - ie, found := i.inode2path[inode] - if !found { - return - } - if !ie.removeOnePath(path) { - return - } - if len(ie.paths) == 0 { delete(i.inode2path, inode) } } @@ -241,7 +158,7 @@ func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInod sourceInode, sourceFound := i.path2inode[sourcePath] targetInode, targetFound := i.path2inode[targetPath] if targetFound { - i.removePathFromInode2Path(targetInode, targetPath) + delete(i.inode2path, targetInode) delete(i.path2inode, targetPath) } if sourceFound { @@ -253,11 +170,7 @@ func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInod return } if entry, entryFound := i.inode2path[sourceInode]; entryFound { - for i, p := range entry.paths { - if p == sourcePath { - entry.paths[i] = targetPath - } - } + entry.FullPath = targetPath entry.isChildrenCached = false if !targetFound { entry.nlookup++ @@ -274,9 +187,7 @@ func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.Fu if found { path.nlookup -= nlookup if path.nlookup <= 0 { - for _, p := range path.paths { - delete(i.path2inode, p) - } + delete(i.path2inode, path.FullPath) delete(i.inode2path, inode) } } @@ -284,9 +195,7 @@ func (i *InodeToPath) Forget(inode, nlookup uint64, onForgetDir func(dir util.Fu if found { if path.isDirectory && path.nlookup <= 0 && onForgetDir != nil { path.isChildrenCached = false - for _, p := range path.paths { - onForgetDir(p) - } + onForgetDir(path.FullPath) } } } diff --git a/weed/mount/inode_to_path_test.go b/weed/mount/inode_to_path_test.go deleted file mode 100644 index bbe9c4faa..000000000 --- a/weed/mount/inode_to_path_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package mount - -import ( - "github.com/seaweedfs/seaweedfs/weed/util" - "testing" -) - -func TestInodeEntry_removeOnePath(t *testing.T) { - tests := []struct { - name string - entry InodeEntry - p util.FullPath - want bool - count int - }{ - { - name: "actual case", - entry: InodeEntry{ - paths: []util.FullPath{"/pjd/nx", "/pjd/n0"}, - }, - p: "/pjd/nx", - want: true, - count: 1, - }, - { - name: "empty", - entry: InodeEntry{}, - p: "x", - want: false, - count: 0, - }, - { - name: "single", - entry: InodeEntry{ - paths: []util.FullPath{"/x"}, - }, - p: "/x", - want: true, - count: 0, - }, - { - name: "first", - entry: InodeEntry{ - paths: []util.FullPath{"/x", "/y", "/z"}, - }, - p: "/x", - want: true, - count: 2, - }, - { - name: "middle", - entry: InodeEntry{ - paths: []util.FullPath{"/x", "/y", "/z"}, - }, - p: "/y", - want: true, - count: 2, - }, - { - name: "last", - entry: InodeEntry{ - paths: []util.FullPath{"/x", "/y", "/z"}, - }, - p: "/z", - want: true, - count: 2, - }, - { - name: "not found", - entry: InodeEntry{ - paths: []util.FullPath{"/x", "/y", "/z"}, - }, - p: "/t", - want: false, - count: 3, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.entry.removeOnePath(tt.p); got != tt.want { - t.Errorf("removeOnePath() = %v, want %v", got, tt.want) - } - if tt.count != len(tt.entry.paths) { - t.Errorf("removeOnePath path count = %v, want %v", len(tt.entry.paths), tt.count) - } - for i, p := range tt.entry.paths { - if p == tt.p { - t.Errorf("removeOnePath found path still exists at %v, %v", i, p) - } - } - }) - } -} diff --git a/weed/mount/locked_entry.go b/weed/mount/locked_entry.go deleted file mode 100644 index c5fbaee91..000000000 --- a/weed/mount/locked_entry.go +++ /dev/null @@ -1,44 +0,0 @@ -package mount - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "sync" -) - -type LockedEntry struct { - *filer_pb.Entry - sync.RWMutex -} - -func (le *LockedEntry) GetEntry() *filer_pb.Entry { - le.RLock() - defer le.RUnlock() - return le.Entry -} - -// SetEntry sets the entry of the LockedEntry -// entry should never be nil -func (le *LockedEntry) SetEntry(entry *filer_pb.Entry) { - le.Lock() - defer le.Unlock() - le.Entry = entry -} - -func (le *LockedEntry) UpdateEntry(fn func(entry *filer_pb.Entry)) *filer_pb.Entry { - le.Lock() - defer le.Unlock() - fn(le.Entry) - return le.Entry -} - -func (le *LockedEntry) GetChunks() []*filer_pb.FileChunk { - le.RLock() - defer le.RUnlock() - return le.Entry.Chunks -} - -func (le *LockedEntry) AppendChunks(newChunks []*filer_pb.FileChunk) { - le.Lock() - defer le.Unlock() - le.Entry.Chunks = append(le.Entry.Chunks, newChunks...) -} diff --git a/weed/mount/meta_cache/cache_config.go b/weed/mount/meta_cache/cache_config.go index 5063bd400..e6593ebde 100644 --- a/weed/mount/meta_cache/cache_config.go +++ b/weed/mount/meta_cache/cache_config.go @@ -1,12 +1,12 @@ package meta_cache -import "github.com/seaweedfs/seaweedfs/weed/util" +import "github.com/chrislusf/seaweedfs/weed/util" var ( _ = util.Configuration(&cacheConfig{}) ) -// implementing util.Configuration +// implementing util.Configuraion type cacheConfig struct { dir string } diff --git a/weed/mount/meta_cache/meta_cache.go b/weed/mount/meta_cache/meta_cache.go index 0f0b1de30..8c434787a 100644 --- a/weed/mount/meta_cache/meta_cache.go +++ b/weed/mount/meta_cache/meta_cache.go @@ -2,15 +2,12 @@ package meta_cache import ( "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "os" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer/leveldb" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) // need to have logic similar to FilerStoreWrapper @@ -19,7 +16,7 @@ import ( type MetaCache struct { root util.FullPath localStore filer.VirtualFilerStore - sync.RWMutex + // sync.RWMutex uidGidMapper *UidGidMapper markCachedFn func(fullpath util.FullPath) isCachedFn func(fullpath util.FullPath) bool @@ -59,8 +56,8 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore { } func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error { - mc.Lock() - defer mc.Unlock() + //mc.Lock() + //defer mc.Unlock() return mc.doInsertEntry(ctx, entry) } @@ -68,25 +65,26 @@ func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) erro return mc.localStore.InsertEntry(ctx, entry) } -func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error { - mc.Lock() - defer mc.Unlock() +func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry, shouldDeleteChunks bool) error { + //mc.Lock() + //defer mc.Unlock() - entry, err := mc.localStore.FindEntry(ctx, oldPath) - if err != nil && err != filer_pb.ErrNotFound { - glog.Errorf("Metacache: find entry error: %v", err) - return err - } - if entry != nil { + oldDir, _ := oldPath.DirAndName() + if mc.isCachedFn(util.FullPath(oldDir)) { if oldPath != "" { if newEntry != nil && oldPath == newEntry.FullPath { // skip the unnecessary deletion // leave the update to the following InsertEntry operation } else { - ctx = context.WithValue(ctx, "OP", "MV") glog.V(3).Infof("DeleteEntry %s", oldPath) - if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { - return err + if shouldDeleteChunks { + if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { + return err + } + } else { + if err := mc.localStore.DeleteOneEntrySkipHardlink(ctx, oldPath); err != nil { + return err + } } } } @@ -107,39 +105,42 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti } func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error { - mc.Lock() - defer mc.Unlock() + //mc.Lock() + //defer mc.Unlock() return mc.localStore.UpdateEntry(ctx, entry) } func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) { - mc.RLock() - defer mc.RUnlock() + //mc.RLock() + //defer mc.RUnlock() entry, err = mc.localStore.FindEntry(ctx, fp) if err != nil { return nil, err } - if entry.TtlSec > 0 && entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) { - return nil, filer_pb.ErrNotFound - } mc.mapIdFromFilerToLocal(entry) return } +func (mc *MetaCache) DeleteEntrySkipHardlink(ctx context.Context, fp util.FullPath) (err error) { + //mc.Lock() + //defer mc.Unlock() + return mc.localStore.DeleteOneEntrySkipHardlink(ctx, fp) +} + func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { - mc.Lock() - defer mc.Unlock() + //mc.Lock() + //defer mc.Unlock() return mc.localStore.DeleteEntry(ctx, fp) } func (mc *MetaCache) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { - mc.Lock() - defer mc.Unlock() + //mc.Lock() + //defer mc.Unlock() return mc.localStore.DeleteFolderChildren(ctx, fp) } func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error { - mc.RLock() - defer mc.RUnlock() + //mc.RLock() + //defer mc.RUnlock() if !mc.isCachedFn(dirPath) { // if this request comes after renaming, it should be fine @@ -147,9 +148,6 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full } _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool { - if entry.TtlSec > 0 && entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) { - return true - } mc.mapIdFromFilerToLocal(entry) return eachEntryFunc(entry) }) @@ -160,8 +158,8 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full } func (mc *MetaCache) Shutdown() { - mc.Lock() - defer mc.Unlock() + //mc.Lock() + //defer mc.Unlock() mc.localStore.Shutdown() } diff --git a/weed/mount/meta_cache/meta_cache_init.go b/weed/mount/meta_cache/meta_cache_init.go index 068daa3f9..f360f1f2b 100644 --- a/weed/mount/meta_cache/meta_cache_init.go +++ b/weed/mount/meta_cache/meta_cache_init.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) error { @@ -44,7 +44,7 @@ func doEnsureVisited(mc *MetaCache, client filer_pb.FilerClient, path util.FullP glog.V(4).Infof("ReadDirAllEntries %s ...", path) err := util.Retry("ReadDirAllEntries", func() error { - return filer_pb.ReadDirAllEntries(context.Background(), client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error { + return filer_pb.ReadDirAllEntries(client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error { entry := filer.FromPbEntry(string(path), pbEntry) if IsHiddenSystemEntry(string(path), entry.Name()) { return nil diff --git a/weed/mount/meta_cache/meta_cache_subscribe.go b/weed/mount/meta_cache/meta_cache_subscribe.go index 9a4553013..c8ccdd375 100644 --- a/weed/mount/meta_cache/meta_cache_subscribe.go +++ b/weed/mount/meta_cache/meta_cache_subscribe.go @@ -2,52 +2,14 @@ package meta_cache import ( "context" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "strings" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) -type MetadataFollower struct { - PathPrefixToWatch string - ProcessEventFn func(resp *filer_pb.SubscribeMetadataResponse) error -} - -func mergeProcessors(mainProcessor func(resp *filer_pb.SubscribeMetadataResponse) error, followers ...*MetadataFollower) func(resp *filer_pb.SubscribeMetadataResponse) error { - return func(resp *filer_pb.SubscribeMetadataResponse) error { - - // build the full path - entry := resp.EventNotification.NewEntry - if entry == nil { - entry = resp.EventNotification.OldEntry - } - if entry != nil { - dir := resp.Directory - if resp.EventNotification.NewParentPath != "" { - dir = resp.EventNotification.NewParentPath - } - fp := util.NewFullPath(dir, entry.Name) - - for _, follower := range followers { - if strings.HasPrefix(string(fp), follower.PathPrefixToWatch) { - if err := follower.ProcessEventFn(resp); err != nil { - return err - } - } - } - } - return mainProcessor(resp) - } -} - -func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64, followers ...*MetadataFollower) error { - - var prefixes []string - for _, follower := range followers { - prefixes = append(prefixes, follower.PathPrefixToWatch) - } +func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error { processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { message := resp.EventNotification @@ -74,7 +36,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil glog.V(4).Infof("creating %v", key) newEntry = filer.FromPbEntry(dir, message.NewEntry) } - err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry) + err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry, message.DeleteChunks) if err == nil { if message.OldEntry != nil && message.NewEntry != nil { oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name) @@ -84,7 +46,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil mc.invalidateFunc(newKey, message.NewEntry) } } else if filer_pb.IsCreate(resp) { - // no need to invalidate + // no need to invaalidate } else if filer_pb.IsDelete(resp) { oldKey := util.NewFullPath(resp.Directory, message.OldEntry.Name) mc.invalidateFunc(oldKey, message.OldEntry) @@ -95,26 +57,8 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil } - prefix := dir - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" - } - - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: "mount", - ClientId: selfSignature, - ClientEpoch: 1, - SelfSignature: selfSignature, - PathPrefix: prefix, - AdditionalPathPrefixes: prefixes, - DirectoriesToWatch: nil, - StartTsNs: lastTsNs, - StopTsNs: 0, - EventErrorType: pb.FatalOnError, - } - util.RetryUntil("followMetaUpdates", func() error { - metadataFollowOption.ClientEpoch++ - return pb.WithFilerClientFollowMetadata(client, metadataFollowOption, mergeProcessors(processEventFn, followers...)) + util.RetryForever("followMetaUpdates", func() error { + return pb.WithFilerClientFollowMetadata(client, "mount", selfSignature, dir, &lastTsNs, 0, selfSignature, processEventFn, pb.FatalOnError) }, func(err error) bool { glog.Errorf("follow metadata updates: %v", err) return true diff --git a/weed/mount/page_writer.go b/weed/mount/page_writer.go index 58ae03cda..7e3db8e28 100644 --- a/weed/mount/page_writer.go +++ b/weed/mount/page_writer.go @@ -1,8 +1,8 @@ package mount import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/page_writer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/mount/page_writer" ) type PageWriter struct { @@ -29,35 +29,35 @@ func newPageWriter(fh *FileHandle, chunkSize int64) *PageWriter { return pw } -func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { +func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool) { glog.V(4).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset) - pw.addToOneChunk(i, offset, data[:writeSize], isSequential, tsNs) + pw.addToOneChunk(i, offset, data[:writeSize], isSequential) offset += writeSize data = data[writeSize:] } } -func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte, isSequential bool, tsNs int64) { - pw.randomWriter.AddPage(offset, data, isSequential, tsNs) +func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte, isSequential bool) { + pw.randomWriter.AddPage(offset, data, isSequential) } func (pw *PageWriter) FlushData() error { return pw.randomWriter.FlushData() } -func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64, tsNs int64) (maxStop int64) { - glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.inode, offset, offset+int64(len(data))) +func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64) (maxStop int64) { + glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { readSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset) - maxStop = pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset, tsNs) + maxStop = pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset) offset += readSize data = data[readSize:] @@ -66,6 +66,10 @@ func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64, tsNs int64) (ma return } +func (pw *PageWriter) GetStorageOptions() (collection, replication string) { + return pw.randomWriter.GetStorageOptions() +} + func (pw *PageWriter) LockForRead(startOffset, stopOffset int64) { pw.randomWriter.LockForRead(startOffset, stopOffset) } diff --git a/weed/mount/page_writer/activity_score.go b/weed/mount/page_writer/activity_score.go deleted file mode 100644 index 22da87e37..000000000 --- a/weed/mount/page_writer/activity_score.go +++ /dev/null @@ -1,39 +0,0 @@ -package page_writer - -import "time" - -type ActivityScore struct { - lastActiveTsNs int64 - decayedActivenessScore int64 -} - -func NewActivityScore() *ActivityScore { - return &ActivityScore{} -} - -func (as ActivityScore) MarkRead() { - now := time.Now().UnixNano() - deltaTime := (now - as.lastActiveTsNs) >> 30 // about number of seconds - as.lastActiveTsNs = now - - as.decayedActivenessScore = as.decayedActivenessScore>>deltaTime + 256 - if as.decayedActivenessScore < 0 { - as.decayedActivenessScore = 0 - } -} - -func (as ActivityScore) MarkWrite() { - now := time.Now().UnixNano() - deltaTime := (now - as.lastActiveTsNs) >> 30 // about number of seconds - as.lastActiveTsNs = now - - as.decayedActivenessScore = as.decayedActivenessScore>>deltaTime + 1024 - if as.decayedActivenessScore < 0 { - as.decayedActivenessScore = 0 - } -} - -func (as ActivityScore) ActivityScore() int64 { - deltaTime := (time.Now().UnixNano() - as.lastActiveTsNs) >> 30 // about number of seconds - return as.decayedActivenessScore >> deltaTime -} diff --git a/weed/mount/page_writer/chunk_interval_list.go b/weed/mount/page_writer/chunk_interval_list.go index 005385c1a..e6dc5d1f5 100644 --- a/weed/mount/page_writer/chunk_interval_list.go +++ b/weed/mount/page_writer/chunk_interval_list.go @@ -1,14 +1,11 @@ package page_writer -import ( - "math" -) +import "math" // ChunkWrittenInterval mark one written interval within one page chunk type ChunkWrittenInterval struct { StartOffset int64 stopOffset int64 - TsNs int64 prev *ChunkWrittenInterval next *ChunkWrittenInterval } @@ -43,14 +40,10 @@ func newChunkWrittenIntervalList() *ChunkWrittenIntervalList { return list } -func (list *ChunkWrittenIntervalList) MarkWritten(startOffset, stopOffset, tsNs int64) { - if startOffset >= stopOffset { - return - } +func (list *ChunkWrittenIntervalList) MarkWritten(startOffset, stopOffset int64) { interval := &ChunkWrittenInterval{ StartOffset: startOffset, stopOffset: stopOffset, - TsNs: tsNs, } list.addInterval(interval) } @@ -67,56 +60,52 @@ func (list *ChunkWrittenIntervalList) WrittenSize() (writtenByteCount int64) { func (list *ChunkWrittenIntervalList) addInterval(interval *ChunkWrittenInterval) { - //t := list.head - //for ; t.next != nil; t = t.next { - // if t.TsNs > interval.TsNs { - // println("writes is out of order", t.TsNs-interval.TsNs, "ns") - // } - //} - p := list.head - for ; p.next != nil && p.next.stopOffset <= interval.StartOffset; p = p.next { + for ; p.next != nil && p.next.StartOffset <= interval.StartOffset; p = p.next { } q := list.tail - for ; q.prev != nil && q.prev.StartOffset >= interval.stopOffset; q = q.prev { + for ; q.prev != nil && q.prev.stopOffset >= interval.stopOffset; q = q.prev { } - // left side - // interval after p.next start - if p.next.StartOffset < interval.StartOffset { - t := &ChunkWrittenInterval{ - StartOffset: p.next.StartOffset, - stopOffset: interval.StartOffset, - TsNs: p.next.TsNs, - } - p.next = t - t.prev = p - t.next = interval - interval.prev = t - } else { - p.next = interval - interval.prev = p + if interval.StartOffset <= p.stopOffset && q.StartOffset <= interval.stopOffset { + // merge p and q together + p.stopOffset = q.stopOffset + unlinkNodesBetween(p, q.next) + return + } + if interval.StartOffset <= p.stopOffset { + // merge new interval into p + p.stopOffset = interval.stopOffset + unlinkNodesBetween(p, q) + return + } + if q.StartOffset <= interval.stopOffset { + // merge new interval into q + q.StartOffset = interval.StartOffset + unlinkNodesBetween(p, q) + return } - // right side - // interval ends before p.prev - if interval.stopOffset < q.prev.stopOffset { - t := &ChunkWrittenInterval{ - StartOffset: interval.stopOffset, - stopOffset: q.prev.stopOffset, - TsNs: q.prev.TsNs, - } - q.prev = t - t.next = q - interval.next = t - t.prev = interval - } else { - q.prev = interval - interval.next = q - } + // add the new interval between p and q + unlinkNodesBetween(p, q) + p.next = interval + interval.prev = p + q.prev = interval + interval.next = q } +// unlinkNodesBetween remove all nodes after start and before stop, exclusive +func unlinkNodesBetween(start *ChunkWrittenInterval, stop *ChunkWrittenInterval) { + if start.next == stop { + return + } + start.next.prev = nil + start.next = stop + stop.prev.next = nil + stop.prev = start +} + func (list *ChunkWrittenIntervalList) size() int { var count int for t := list.head; t != nil; t = t.next { diff --git a/weed/mount/page_writer/chunk_interval_list_test.go b/weed/mount/page_writer/chunk_interval_list_test.go index eb1d5ff46..b22f5eb5d 100644 --- a/weed/mount/page_writer/chunk_interval_list_test.go +++ b/weed/mount/page_writer/chunk_interval_list_test.go @@ -10,72 +10,40 @@ func Test_PageChunkWrittenIntervalList(t *testing.T) { assert.Equal(t, 0, list.size(), "empty list") - list.MarkWritten(0, 5, 1) + list.MarkWritten(0, 5) assert.Equal(t, 1, list.size(), "one interval") - list.MarkWritten(0, 5, 2) + list.MarkWritten(0, 5) assert.Equal(t, 1, list.size(), "duplicated interval2") - list.MarkWritten(95, 100, 3) + list.MarkWritten(95, 100) assert.Equal(t, 2, list.size(), "two intervals") - list.MarkWritten(50, 60, 4) + list.MarkWritten(50, 60) assert.Equal(t, 3, list.size(), "three intervals") - list.MarkWritten(50, 55, 5) - assert.Equal(t, 4, list.size(), "three intervals merge") + list.MarkWritten(50, 55) + assert.Equal(t, 3, list.size(), "three intervals merge") - list.MarkWritten(40, 50, 6) - assert.Equal(t, 5, list.size(), "three intervals grow forward") + list.MarkWritten(40, 50) + assert.Equal(t, 3, list.size(), "three intervals grow forward") - list.MarkWritten(50, 65, 7) - assert.Equal(t, 4, list.size(), "three intervals grow backward") + list.MarkWritten(50, 65) + assert.Equal(t, 3, list.size(), "three intervals grow backward") - list.MarkWritten(70, 80, 8) - assert.Equal(t, 5, list.size(), "four intervals") + list.MarkWritten(70, 80) + assert.Equal(t, 4, list.size(), "four intervals") - list.MarkWritten(60, 70, 9) - assert.Equal(t, 6, list.size(), "three intervals merged") + list.MarkWritten(60, 70) + assert.Equal(t, 3, list.size(), "three intervals merged") - list.MarkWritten(59, 71, 10) - assert.Equal(t, 6, list.size(), "covered three intervals") + list.MarkWritten(59, 71) + assert.Equal(t, 3, list.size(), "covered three intervals") - list.MarkWritten(5, 59, 11) - assert.Equal(t, 5, list.size(), "covered two intervals") + list.MarkWritten(5, 59) + assert.Equal(t, 2, list.size(), "covered two intervals") - list.MarkWritten(70, 99, 12) - assert.Equal(t, 5, list.size(), "covered one intervals") + list.MarkWritten(70, 99) + assert.Equal(t, 1, list.size(), "covered one intervals") } - -type interval struct { - start int64 - stop int64 - expected bool -} - -func Test_PageChunkWrittenIntervalList1(t *testing.T) { - list := newChunkWrittenIntervalList() - inputs := []interval{ - {1, 5, true}, - {2, 3, true}, - } - for i, input := range inputs { - list.MarkWritten(input.start, input.stop, int64(i)+1) - actual := hasData(list, 0, 4) - if actual != input.expected { - t.Errorf("input [%d,%d) expected %v actual %v", input.start, input.stop, input.expected, actual) - } - } -} - -func hasData(usage *ChunkWrittenIntervalList, chunkStartOffset, x int64) bool { - for t := usage.head.next; t != usage.tail; t = t.next { - logicStart := chunkStartOffset + t.StartOffset - logicStop := chunkStartOffset + t.stopOffset - if logicStart <= x && x < logicStop { - return true - } - } - return false -} diff --git a/weed/mount/page_writer/dirty_pages.go b/weed/mount/page_writer/dirty_pages.go index 7cddcf69e..c16cee47a 100644 --- a/weed/mount/page_writer/dirty_pages.go +++ b/weed/mount/page_writer/dirty_pages.go @@ -1,9 +1,10 @@ package page_writer type DirtyPages interface { - AddPage(offset int64, data []byte, isSequential bool, tsNs int64) + AddPage(offset int64, data []byte, isSequential bool) FlushData() error - ReadDirtyDataAt(data []byte, startOffset int64, tsNs int64) (maxStop int64) + ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) + GetStorageOptions() (collection, replication string) Destroy() LockForRead(startOffset, stopOffset int64) UnlockForRead(startOffset, stopOffset int64) diff --git a/weed/mount/page_writer/page_chunk.go b/weed/mount/page_writer/page_chunk.go index ac1d24622..4e8f31425 100644 --- a/weed/mount/page_writer/page_chunk.go +++ b/weed/mount/page_writer/page_chunk.go @@ -4,14 +4,13 @@ import ( "io" ) -type SaveToStorageFunc func(reader io.Reader, offset int64, size int64, modifiedTsNs int64, cleanupFn func()) +type SaveToStorageFunc func(reader io.Reader, offset int64, size int64, cleanupFn func()) type PageChunk interface { FreeResource() - WriteDataAt(src []byte, offset int64, tsNs int64) (n int) - ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) + WriteDataAt(src []byte, offset int64) (n int) + ReadDataAt(p []byte, off int64) (maxStop int64) IsComplete() bool - ActivityScore() int64 WrittenSize() int64 SaveContent(saveFn SaveToStorageFunc) } diff --git a/weed/mount/page_writer/page_chunk_mem.go b/weed/mount/page_writer/page_chunk_mem.go index b5c8f9ebd..52db6d4f9 100644 --- a/weed/mount/page_writer/page_chunk_mem.go +++ b/weed/mount/page_writer/page_chunk_mem.go @@ -1,11 +1,9 @@ package page_writer import ( - "sync" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/mem" "sync/atomic" - - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/mem" ) var ( @@ -15,12 +13,10 @@ var ( ) type MemChunk struct { - sync.RWMutex buf []byte usage *ChunkWrittenIntervalList chunkSize int64 logicChunkIndex LogicChunkIndex - activityScore *ActivityScore } func NewMemChunk(logicChunkIndex LogicChunkIndex, chunkSize int64) *MemChunk { @@ -30,89 +26,49 @@ func NewMemChunk(logicChunkIndex LogicChunkIndex, chunkSize int64) *MemChunk { chunkSize: chunkSize, buf: mem.Allocate(int(chunkSize)), usage: newChunkWrittenIntervalList(), - activityScore: NewActivityScore(), } } func (mc *MemChunk) FreeResource() { - mc.Lock() - defer mc.Unlock() - atomic.AddInt64(&memChunkCounter, -1) mem.Free(mc.buf) } -func (mc *MemChunk) WriteDataAt(src []byte, offset int64, tsNs int64) (n int) { - mc.Lock() - defer mc.Unlock() - +func (mc *MemChunk) WriteDataAt(src []byte, offset int64) (n int) { innerOffset := offset % mc.chunkSize n = copy(mc.buf[innerOffset:], src) - mc.usage.MarkWritten(innerOffset, innerOffset+int64(n), tsNs) - mc.activityScore.MarkWrite() - + mc.usage.MarkWritten(innerOffset, innerOffset+int64(n)) return } -func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { - mc.RLock() - defer mc.RUnlock() - +func (mc *MemChunk) ReadDataAt(p []byte, off int64) (maxStop int64) { memChunkBaseOffset := int64(mc.logicChunkIndex) * mc.chunkSize for t := mc.usage.head.next; t != mc.usage.tail; t = t.next { - logicStart := max(off, memChunkBaseOffset+t.StartOffset) + logicStart := max(off, int64(mc.logicChunkIndex)*mc.chunkSize+t.StartOffset) logicStop := min(off+int64(len(p)), memChunkBaseOffset+t.stopOffset) if logicStart < logicStop { copy(p[logicStart-off:logicStop-off], mc.buf[logicStart-memChunkBaseOffset:logicStop-memChunkBaseOffset]) maxStop = max(maxStop, logicStop) - - if t.TsNs >= tsNs { - println("read new data1", t.TsNs-tsNs, "ns") - } } } - mc.activityScore.MarkRead() - return } func (mc *MemChunk) IsComplete() bool { - mc.RLock() - defer mc.RUnlock() - return mc.usage.IsComplete(mc.chunkSize) } -func (mc *MemChunk) ActivityScore() int64 { - return mc.activityScore.ActivityScore() -} - func (mc *MemChunk) WrittenSize() int64 { - mc.RLock() - defer mc.RUnlock() - return mc.usage.WrittenSize() } func (mc *MemChunk) SaveContent(saveFn SaveToStorageFunc) { - mc.RLock() - defer mc.RUnlock() - if saveFn == nil { return } - for t := mc.usage.head.next; t != mc.usage.tail; t = t.next { - startOffset := t.StartOffset - stopOffset := t.stopOffset - tsNs := t.TsNs - for t != mc.usage.tail && t.next.StartOffset == stopOffset { - stopOffset = t.next.stopOffset - t = t.next - tsNs = max(tsNs, t.TsNs) - } - reader := util.NewBytesReader(mc.buf[startOffset:stopOffset]) - saveFn(reader, int64(mc.logicChunkIndex)*mc.chunkSize+startOffset, stopOffset-startOffset, tsNs, func() { + reader := util.NewBytesReader(mc.buf[t.StartOffset:t.stopOffset]) + saveFn(reader, int64(mc.logicChunkIndex)*mc.chunkSize+t.StartOffset, t.Size(), func() { }) } } diff --git a/weed/mount/page_writer/page_chunk_swapfile.go b/weed/mount/page_writer/page_chunk_swapfile.go index dd9781b68..b56589bfc 100644 --- a/weed/mount/page_writer/page_chunk_swapfile.go +++ b/weed/mount/page_writer/page_chunk_swapfile.go @@ -1,10 +1,9 @@ package page_writer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/mem" - "io" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/mem" "os" "sync" ) @@ -16,29 +15,27 @@ var ( type ActualChunkIndex int type SwapFile struct { - dir string - file *os.File - chunkSize int64 - chunkTrackingLock sync.Mutex - activeChunkCount int - freeActualChunkList []ActualChunkIndex + dir string + file *os.File + logicToActualChunkIndex map[LogicChunkIndex]ActualChunkIndex + logicToActualChunkIndexLock sync.Mutex + chunkSize int64 + freeActualChunkList []ActualChunkIndex } type SwapFileChunk struct { - sync.RWMutex swapfile *SwapFile usage *ChunkWrittenIntervalList logicChunkIndex LogicChunkIndex actualChunkIndex ActualChunkIndex - activityScore *ActivityScore - //memChunk *MemChunk } func NewSwapFile(dir string, chunkSize int64) *SwapFile { return &SwapFile{ - dir: dir, - file: nil, - chunkSize: chunkSize, + dir: dir, + file: nil, + logicToActualChunkIndex: make(map[LogicChunkIndex]ActualChunkIndex), + chunkSize: chunkSize, } } func (sf *SwapFile) FreeResource() { @@ -48,7 +45,7 @@ func (sf *SwapFile) FreeResource() { } } -func (sf *SwapFile) NewSwapFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapFileChunk) { +func (sf *SwapFile) NewTempFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapFileChunk) { if sf.file == nil { var err error sf.file, err = os.CreateTemp(sf.dir, "") @@ -57,146 +54,83 @@ func (sf *SwapFile) NewSwapFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapF return nil } } - sf.chunkTrackingLock.Lock() - defer sf.chunkTrackingLock.Unlock() - - sf.activeChunkCount++ - - // assign a new physical chunk - var actualChunkIndex ActualChunkIndex - if len(sf.freeActualChunkList) > 0 { - actualChunkIndex = sf.freeActualChunkList[0] - sf.freeActualChunkList = sf.freeActualChunkList[1:] - } else { - actualChunkIndex = ActualChunkIndex(sf.activeChunkCount) + sf.logicToActualChunkIndexLock.Lock() + defer sf.logicToActualChunkIndexLock.Unlock() + actualChunkIndex, found := sf.logicToActualChunkIndex[logicChunkIndex] + if !found { + if len(sf.freeActualChunkList) > 0 { + actualChunkIndex = sf.freeActualChunkList[0] + sf.freeActualChunkList = sf.freeActualChunkList[1:] + } else { + actualChunkIndex = ActualChunkIndex(len(sf.logicToActualChunkIndex)) + } + sf.logicToActualChunkIndex[logicChunkIndex] = actualChunkIndex } - swapFileChunk := &SwapFileChunk{ + return &SwapFileChunk{ swapfile: sf, usage: newChunkWrittenIntervalList(), logicChunkIndex: logicChunkIndex, actualChunkIndex: actualChunkIndex, - activityScore: NewActivityScore(), - // memChunk: NewMemChunk(logicChunkIndex, sf.chunkSize), } - - // println(logicChunkIndex, "|", "++++", swapFileChunk.actualChunkIndex, swapFileChunk, sf) - return swapFileChunk } func (sc *SwapFileChunk) FreeResource() { - - sc.Lock() - defer sc.Unlock() - - sc.swapfile.chunkTrackingLock.Lock() - defer sc.swapfile.chunkTrackingLock.Unlock() + sc.swapfile.logicToActualChunkIndexLock.Lock() + defer sc.swapfile.logicToActualChunkIndexLock.Unlock() sc.swapfile.freeActualChunkList = append(sc.swapfile.freeActualChunkList, sc.actualChunkIndex) - sc.swapfile.activeChunkCount-- - // println(sc.logicChunkIndex, "|", "----", sc.actualChunkIndex, sc, sc.swapfile) + delete(sc.swapfile.logicToActualChunkIndex, sc.logicChunkIndex) } -func (sc *SwapFileChunk) WriteDataAt(src []byte, offset int64, tsNs int64) (n int) { - sc.Lock() - defer sc.Unlock() - - // println(sc.logicChunkIndex, "|", tsNs, "write at", offset, len(src), sc.actualChunkIndex) - +func (sc *SwapFileChunk) WriteDataAt(src []byte, offset int64) (n int) { innerOffset := offset % sc.swapfile.chunkSize var err error n, err = sc.swapfile.file.WriteAt(src, int64(sc.actualChunkIndex)*sc.swapfile.chunkSize+innerOffset) - sc.usage.MarkWritten(innerOffset, innerOffset+int64(n), tsNs) - if err != nil { + if err == nil { + sc.usage.MarkWritten(innerOffset, innerOffset+int64(n)) + } else { glog.Errorf("failed to write swap file %s: %v", sc.swapfile.file.Name(), err) } - //sc.memChunk.WriteDataAt(src, offset, tsNs) - sc.activityScore.MarkWrite() - return } -func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { - sc.RLock() - defer sc.RUnlock() - - // println(sc.logicChunkIndex, "|", tsNs, "read at", off, len(p), sc.actualChunkIndex) - - //memCopy := make([]byte, len(p)) - //copy(memCopy, p) - +func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64) (maxStop int64) { chunkStartOffset := int64(sc.logicChunkIndex) * sc.swapfile.chunkSize for t := sc.usage.head.next; t != sc.usage.tail; t = t.next { logicStart := max(off, chunkStartOffset+t.StartOffset) logicStop := min(off+int64(len(p)), chunkStartOffset+t.stopOffset) if logicStart < logicStop { actualStart := logicStart - chunkStartOffset + int64(sc.actualChunkIndex)*sc.swapfile.chunkSize - if n, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil { - if err == io.EOF && n == int(logicStop-logicStart) { - err = nil - } + if _, err := sc.swapfile.file.ReadAt(p[logicStart-off:logicStop-off], actualStart); err != nil { glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err) break } maxStop = max(maxStop, logicStop) - - if t.TsNs > tsNs { - println("read new data2", t.TsNs-tsNs, "ns") - } } } - //sc.memChunk.ReadDataAt(memCopy, off, tsNs) - //if bytes.Compare(memCopy, p) != 0 { - // println("read wrong data from swap file", off, sc.logicChunkIndex) - //} - - sc.activityScore.MarkRead() - return } func (sc *SwapFileChunk) IsComplete() bool { - sc.RLock() - defer sc.RUnlock() return sc.usage.IsComplete(sc.swapfile.chunkSize) } -func (sc *SwapFileChunk) ActivityScore() int64 { - return sc.activityScore.ActivityScore() -} - func (sc *SwapFileChunk) WrittenSize() int64 { - sc.RLock() - defer sc.RUnlock() return sc.usage.WrittenSize() } func (sc *SwapFileChunk) SaveContent(saveFn SaveToStorageFunc) { - sc.RLock() - defer sc.RUnlock() - if saveFn == nil { return } - // println(sc.logicChunkIndex, "|", "save") for t := sc.usage.head.next; t != sc.usage.tail; t = t.next { - startOffset := t.StartOffset - stopOffset := t.stopOffset - tsNs := t.TsNs - for t != sc.usage.tail && t.next.StartOffset == stopOffset { - stopOffset = t.next.stopOffset - t = t.next - tsNs = max(tsNs, t.TsNs) - } - - data := mem.Allocate(int(stopOffset - startOffset)) - n, _ := sc.swapfile.file.ReadAt(data, startOffset+int64(sc.actualChunkIndex)*sc.swapfile.chunkSize) - if n > 0 { - reader := util.NewBytesReader(data[:n]) - saveFn(reader, int64(sc.logicChunkIndex)*sc.swapfile.chunkSize+startOffset, int64(n), tsNs, func() { - }) - } + data := mem.Allocate(int(t.Size())) + sc.swapfile.file.ReadAt(data, t.StartOffset+int64(sc.actualChunkIndex)*sc.swapfile.chunkSize) + reader := util.NewBytesReader(data) + saveFn(reader, int64(sc.logicChunkIndex)*sc.swapfile.chunkSize+t.StartOffset, t.Size(), func() { + }) mem.Free(data) } - + sc.usage = newChunkWrittenIntervalList() } diff --git a/weed/mount/page_writer/upload_pipeline.go b/weed/mount/page_writer/upload_pipeline.go index bd7fc99dd..0c7446cad 100644 --- a/weed/mount/page_writer/upload_pipeline.go +++ b/weed/mount/page_writer/upload_pipeline.go @@ -2,28 +2,30 @@ package page_writer import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "sync" "sync/atomic" + "time" ) type LogicChunkIndex int type UploadPipeline struct { - uploaderCount int32 - uploaderCountCond *sync.Cond - filepath util.FullPath - ChunkSize int64 - uploaders *util.LimitedConcurrentExecutor - saveToStorageFn SaveToStorageFunc - writableChunkLimit int - swapFile *SwapFile - chunksLock sync.Mutex - writableChunks map[LogicChunkIndex]PageChunk - sealedChunks map[LogicChunkIndex]*SealedChunk - activeReadChunks map[LogicChunkIndex]int - readerCountCond *sync.Cond + filepath util.FullPath + ChunkSize int64 + writableChunks map[LogicChunkIndex]PageChunk + writableChunksLock sync.Mutex + sealedChunks map[LogicChunkIndex]*SealedChunk + sealedChunksLock sync.Mutex + uploaders *util.LimitedConcurrentExecutor + uploaderCount int32 + uploaderCountCond *sync.Cond + saveToStorageFn SaveToStorageFunc + activeReadChunks map[LogicChunkIndex]int + activeReadChunksLock sync.Mutex + writableChunkLimit int + swapFile *SwapFile } type SealedChunk struct { @@ -40,7 +42,7 @@ func (sc *SealedChunk) FreeReference(messageOnFree string) { } func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline { - t := &UploadPipeline{ + return &UploadPipeline{ ChunkSize: chunkSize, writableChunks: make(map[LogicChunkIndex]PageChunk), sealedChunks: make(map[LogicChunkIndex]*SealedChunk), @@ -51,14 +53,11 @@ func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, writableChunkLimit: bufferChunkLimit, swapFile: NewSwapFile(swapFileDir, chunkSize), } - t.readerCountCond = sync.NewCond(&t.chunksLock) - return t } -func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool, tsNs int64) (n int) { - - up.chunksLock.Lock() - defer up.chunksLock.Unlock() +func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool) (n int) { + up.writableChunksLock.Lock() + defer up.writableChunksLock.Unlock() logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) @@ -66,80 +65,57 @@ func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool, tsN if !found { if len(up.writableChunks) > up.writableChunkLimit { // if current file chunks is over the per file buffer count limit - candidateChunkIndex, fullness := LogicChunkIndex(-1), int64(0) + fullestChunkIndex, fullness := LogicChunkIndex(-1), int64(0) for lci, mc := range up.writableChunks { chunkFullness := mc.WrittenSize() if fullness < chunkFullness { - candidateChunkIndex = lci + fullestChunkIndex = lci fullness = chunkFullness } } - /* // this algo generates too many chunks - candidateChunkIndex, lowestActivityScore := LogicChunkIndex(-1), int64(math.MaxInt64) - for wci, wc := range up.writableChunks { - activityScore := wc.ActivityScore() - if lowestActivityScore >= activityScore { - if lowestActivityScore == activityScore { - chunkFullness := wc.WrittenSize() - if fullness < chunkFullness { - candidateChunkIndex = lci - fullness = chunkFullness - } - } - candidateChunkIndex = wci - lowestActivityScore = activityScore - } - } - */ - up.moveToSealed(up.writableChunks[candidateChunkIndex], candidateChunkIndex) + up.moveToSealed(up.writableChunks[fullestChunkIndex], fullestChunkIndex) + delete(up.writableChunks, fullestChunkIndex) // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness) } - // fmt.Printf("isSequential:%v len(up.writableChunks):%v memChunkCounter:%v", isSequential, len(up.writableChunks), memChunkCounter) if isSequential && len(up.writableChunks) < up.writableChunkLimit && atomic.LoadInt64(&memChunkCounter) < 4*int64(up.writableChunkLimit) { pageChunk = NewMemChunk(logicChunkIndex, up.ChunkSize) - // fmt.Printf(" create mem chunk %d\n", logicChunkIndex) } else { - pageChunk = up.swapFile.NewSwapFileChunk(logicChunkIndex) - // fmt.Printf(" create file chunk %d\n", logicChunkIndex) + pageChunk = up.swapFile.NewTempFileChunk(logicChunkIndex) } up.writableChunks[logicChunkIndex] = pageChunk } - //if _, foundSealed := up.sealedChunks[logicChunkIndex]; foundSealed { - // println("found already sealed chunk", logicChunkIndex) - //} - //if _, foundReading := up.activeReadChunks[logicChunkIndex]; foundReading { - // println("found active read chunk", logicChunkIndex) - //} - n = pageChunk.WriteDataAt(p, off, tsNs) + n = pageChunk.WriteDataAt(p, off) up.maybeMoveToSealed(pageChunk, logicChunkIndex) return } -func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) { +func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64) (maxStop int64) { logicChunkIndex := LogicChunkIndex(off / up.ChunkSize) - up.chunksLock.Lock() - defer func() { - up.readerCountCond.Signal() - up.chunksLock.Unlock() - }() - // read from sealed chunks first + up.sealedChunksLock.Lock() sealedChunk, found := up.sealedChunks[logicChunkIndex] if found { - maxStop = sealedChunk.chunk.ReadDataAt(p, off, tsNs) + sealedChunk.referenceCounter++ + } + up.sealedChunksLock.Unlock() + if found { + maxStop = sealedChunk.chunk.ReadDataAt(p, off) glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) + sealedChunk.FreeReference(fmt.Sprintf("%s finish reading chunk %d", up.filepath, logicChunkIndex)) } // read from writable chunks last + up.writableChunksLock.Lock() + defer up.writableChunksLock.Unlock() writableChunk, found := up.writableChunks[logicChunkIndex] if !found { return } - writableMaxStop := writableChunk.ReadDataAt(p, off, tsNs) + writableMaxStop := writableChunk.ReadDataAt(p, off) glog.V(4).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop) maxStop = max(maxStop, writableMaxStop) @@ -147,17 +123,14 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxS } func (up *UploadPipeline) FlushAll() { - up.flushChunks() - up.waitForCurrentWritersToComplete() -} - -func (up *UploadPipeline) flushChunks() { - up.chunksLock.Lock() - defer up.chunksLock.Unlock() + up.writableChunksLock.Lock() + defer up.writableChunksLock.Unlock() for logicChunkIndex, memChunk := range up.writableChunks { up.moveToSealed(memChunk, logicChunkIndex) } + + up.waitForCurrentWritersToComplete() } func (up *UploadPipeline) maybeMoveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) { @@ -170,6 +143,8 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic atomic.AddInt32(&up.uploaderCount, 1) glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) + up.sealedChunksLock.Lock() + if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) } @@ -180,8 +155,8 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic up.sealedChunks[logicChunkIndex] = sealedChunk delete(up.writableChunks, logicChunkIndex) - // unlock before submitting the uploading jobs - up.chunksLock.Unlock() + up.sealedChunksLock.Unlock() + up.uploaders.Execute(func() { // first add to the file chunks sealedChunk.chunk.SaveContent(up.saveToStorageFn) @@ -197,25 +172,24 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic up.uploaderCountCond.L.Unlock() // wait for readers - up.chunksLock.Lock() - defer up.chunksLock.Unlock() for up.IsLocked(logicChunkIndex) { - up.readerCountCond.Wait() + time.Sleep(59 * time.Millisecond) } // then remove from sealed chunks + up.sealedChunksLock.Lock() + defer up.sealedChunksLock.Unlock() delete(up.sealedChunks, logicChunkIndex) sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex)) }) - up.chunksLock.Lock() } func (up *UploadPipeline) Shutdown() { up.swapFile.FreeResource() - up.chunksLock.Lock() - defer up.chunksLock.Unlock() + up.sealedChunksLock.Lock() + defer up.sealedChunksLock.Unlock() for logicChunkIndex, sealedChunk := range up.sealedChunks { sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex)) } diff --git a/weed/mount/page_writer/upload_pipeline_lock.go b/weed/mount/page_writer/upload_pipeline_lock.go index fec61b532..47a40ba37 100644 --- a/weed/mount/page_writer/upload_pipeline_lock.go +++ b/weed/mount/page_writer/upload_pipeline_lock.go @@ -10,8 +10,8 @@ func (up *UploadPipeline) LockForRead(startOffset, stopOffset int64) { if stopOffset%up.ChunkSize > 0 { stopLogicChunkIndex += 1 } - up.chunksLock.Lock() - defer up.chunksLock.Unlock() + up.activeReadChunksLock.Lock() + defer up.activeReadChunksLock.Unlock() for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { if count, found := up.activeReadChunks[i]; found { up.activeReadChunks[i] = count + 1 @@ -27,8 +27,8 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) { if stopOffset%up.ChunkSize > 0 { stopLogicChunkIndex += 1 } - up.chunksLock.Lock() - defer up.chunksLock.Unlock() + up.activeReadChunksLock.Lock() + defer up.activeReadChunksLock.Unlock() for i := startLogicChunkIndex; i < stopLogicChunkIndex; i++ { if count, found := up.activeReadChunks[i]; found { if count == 1 { @@ -41,6 +41,8 @@ func (up *UploadPipeline) UnlockForRead(startOffset, stopOffset int64) { } func (up *UploadPipeline) IsLocked(logicChunkIndex LogicChunkIndex) bool { + up.activeReadChunksLock.Lock() + defer up.activeReadChunksLock.Unlock() if count, found := up.activeReadChunks[logicChunkIndex]; found { return count > 0 } diff --git a/weed/mount/page_writer/upload_pipeline_test.go b/weed/mount/page_writer/upload_pipeline_test.go index 2d803f6af..f130c97c1 100644 --- a/weed/mount/page_writer/upload_pipeline_test.go +++ b/weed/mount/page_writer/upload_pipeline_test.go @@ -1,7 +1,7 @@ package page_writer import ( - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" "testing" ) @@ -31,14 +31,14 @@ func writeRange(uploadPipeline *UploadPipeline, startOff, stopOff int64) { p := make([]byte, 4) for i := startOff / 4; i < stopOff/4; i += 4 { util.Uint32toBytes(p, uint32(i)) - uploadPipeline.SaveDataAt(p, i, false, 0) + uploadPipeline.SaveDataAt(p, i, false) } } func confirmRange(t *testing.T, uploadPipeline *UploadPipeline, startOff, stopOff int64) { p := make([]byte, 4) for i := startOff; i < stopOff/4; i += 4 { - uploadPipeline.MaybeReadDataAt(p, i, 0) + uploadPipeline.MaybeReadDataAt(p, i) x := util.BytesToUint32(p) if x != uint32(i) { t.Errorf("expecting %d found %d at offset [%d,%d)", i, x, i, i+4) diff --git a/weed/mount/page_writer_pattern.go b/weed/mount/page_writer_pattern.go index d0c3c417c..1ec9c9d4c 100644 --- a/weed/mount/page_writer_pattern.go +++ b/weed/mount/page_writer_pattern.go @@ -1,17 +1,14 @@ package mount -import "sync/atomic" - type WriterPattern struct { isSequentialCounter int64 lastWriteStopOffset int64 chunkSize int64 } -const ModeChangeLimit = 3 - // For streaming write: only cache the first chunk // For random write: fall back to temp file approach +// writes can only change from streaming mode to non-streaming mode func NewWriterPattern(chunkSize int64) *WriterPattern { return &WriterPattern{ @@ -22,19 +19,14 @@ func NewWriterPattern(chunkSize int64) *WriterPattern { } func (rp *WriterPattern) MonitorWriteAt(offset int64, size int) { - lastOffset := atomic.SwapInt64(&rp.lastWriteStopOffset, offset+int64(size)) - counter := atomic.LoadInt64(&rp.isSequentialCounter) - if lastOffset == offset { - if counter < ModeChangeLimit { - atomic.AddInt64(&rp.isSequentialCounter, 1) - } + if rp.lastWriteStopOffset == offset { + rp.isSequentialCounter++ } else { - if counter > -ModeChangeLimit { - atomic.AddInt64(&rp.isSequentialCounter, -1) - } + rp.isSequentialCounter-- } + rp.lastWriteStopOffset = offset + int64(size) } func (rp *WriterPattern) IsSequentialMode() bool { - return atomic.LoadInt64(&rp.isSequentialCounter) >= 0 + return rp.isSequentialCounter >= 0 } diff --git a/weed/mount/rdma_client.go b/weed/mount/rdma_client.go deleted file mode 100644 index 1cab1f1aa..000000000 --- a/weed/mount/rdma_client.go +++ /dev/null @@ -1,379 +0,0 @@ -package mount - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strings" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/wdclient" -) - -// RDMAMountClient provides RDMA acceleration for SeaweedFS mount operations -type RDMAMountClient struct { - sidecarAddr string - httpClient *http.Client - maxConcurrent int - timeout time.Duration - semaphore chan struct{} - - // Volume lookup - lookupFileIdFn wdclient.LookupFileIdFunctionType - - // Statistics - totalRequests atomic.Int64 - successfulReads atomic.Int64 - failedReads atomic.Int64 - totalBytesRead atomic.Int64 - totalLatencyNs atomic.Int64 -} - -// RDMAReadRequest represents a request to read data via RDMA -type RDMAReadRequest struct { - VolumeID uint32 `json:"volume_id"` - NeedleID uint64 `json:"needle_id"` - Cookie uint32 `json:"cookie"` - Offset uint64 `json:"offset"` - Size uint64 `json:"size"` -} - -// RDMAReadResponse represents the response from an RDMA read operation -type RDMAReadResponse struct { - Success bool `json:"success"` - IsRDMA bool `json:"is_rdma"` - Source string `json:"source"` - Duration string `json:"duration"` - DataSize int `json:"data_size"` - SessionID string `json:"session_id,omitempty"` - ErrorMsg string `json:"error,omitempty"` - - // Zero-copy optimization fields - UseTempFile bool `json:"use_temp_file"` - TempFile string `json:"temp_file"` -} - -// RDMAHealthResponse represents the health status of the RDMA sidecar -type RDMAHealthResponse struct { - Status string `json:"status"` - RDMA struct { - Enabled bool `json:"enabled"` - Connected bool `json:"connected"` - } `json:"rdma"` - Timestamp string `json:"timestamp"` -} - -// NewRDMAMountClient creates a new RDMA client for mount operations -func NewRDMAMountClient(sidecarAddr string, lookupFileIdFn wdclient.LookupFileIdFunctionType, maxConcurrent int, timeoutMs int) (*RDMAMountClient, error) { - client := &RDMAMountClient{ - sidecarAddr: sidecarAddr, - maxConcurrent: maxConcurrent, - timeout: time.Duration(timeoutMs) * time.Millisecond, - httpClient: &http.Client{ - Timeout: time.Duration(timeoutMs) * time.Millisecond, - }, - semaphore: make(chan struct{}, maxConcurrent), - lookupFileIdFn: lookupFileIdFn, - } - - // Test connectivity and RDMA availability - if err := client.healthCheck(); err != nil { - return nil, fmt.Errorf("RDMA sidecar health check failed: %w", err) - } - - glog.Infof("RDMA mount client initialized: sidecar=%s, maxConcurrent=%d, timeout=%v", - sidecarAddr, maxConcurrent, client.timeout) - - return client, nil -} - -// lookupVolumeLocationByFileID finds the best volume server for a given file ID -func (c *RDMAMountClient) lookupVolumeLocationByFileID(ctx context.Context, fileID string) (string, error) { - glog.V(4).Infof("Looking up volume location for file ID %s", fileID) - - targetUrls, err := c.lookupFileIdFn(ctx, fileID) - if err != nil { - return "", fmt.Errorf("failed to lookup volume for file %s: %w", fileID, err) - } - - if len(targetUrls) == 0 { - return "", fmt.Errorf("no locations found for file %s", fileID) - } - - // Choose the first URL and extract the server address - targetUrl := targetUrls[0] - // Extract server address from URL like "http://server:port/fileId" - parts := strings.Split(targetUrl, "/") - if len(parts) < 3 { - return "", fmt.Errorf("invalid target URL format: %s", targetUrl) - } - bestAddress := fmt.Sprintf("http://%s", parts[2]) - - glog.V(4).Infof("File %s located at %s", fileID, bestAddress) - return bestAddress, nil -} - -// lookupVolumeLocation finds the best volume server for a given volume ID (legacy method) -func (c *RDMAMountClient) lookupVolumeLocation(ctx context.Context, volumeID uint32, needleID uint64, cookie uint32) (string, error) { - // Create a file ID for lookup (format: volumeId,needleId,cookie) - fileID := fmt.Sprintf("%d,%x,%d", volumeID, needleID, cookie) - return c.lookupVolumeLocationByFileID(ctx, fileID) -} - -// healthCheck verifies that the RDMA sidecar is available and functioning -func (c *RDMAMountClient) healthCheck() error { - ctx, cancel := context.WithTimeout(context.Background(), c.timeout) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, "GET", - fmt.Sprintf("http://%s/health", c.sidecarAddr), nil) - if err != nil { - return fmt.Errorf("failed to create health check request: %w", err) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return fmt.Errorf("health check request failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("health check failed with status: %s", resp.Status) - } - - // Parse health response - var health RDMAHealthResponse - if err := json.NewDecoder(resp.Body).Decode(&health); err != nil { - return fmt.Errorf("failed to parse health response: %w", err) - } - - if health.Status != "healthy" { - return fmt.Errorf("sidecar reports unhealthy status: %s", health.Status) - } - - if !health.RDMA.Enabled { - return fmt.Errorf("RDMA is not enabled on sidecar") - } - - if !health.RDMA.Connected { - glog.Warningf("RDMA sidecar is healthy but not connected to RDMA engine") - } - - return nil -} - -// ReadNeedle reads data from a specific needle using RDMA acceleration -func (c *RDMAMountClient) ReadNeedle(ctx context.Context, fileID string, offset, size uint64) ([]byte, bool, error) { - // Acquire semaphore for concurrency control - select { - case c.semaphore <- struct{}{}: - defer func() { <-c.semaphore }() - case <-ctx.Done(): - return nil, false, ctx.Err() - } - - c.totalRequests.Add(1) - startTime := time.Now() - - // Lookup volume location using file ID directly - volumeServer, err := c.lookupVolumeLocationByFileID(ctx, fileID) - if err != nil { - c.failedReads.Add(1) - return nil, false, fmt.Errorf("failed to lookup volume for file %s: %w", fileID, err) - } - - // Prepare request URL with file_id parameter (simpler than individual components) - reqURL := fmt.Sprintf("http://%s/read?file_id=%s&offset=%d&size=%d&volume_server=%s", - c.sidecarAddr, fileID, offset, size, volumeServer) - - req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil) - if err != nil { - c.failedReads.Add(1) - return nil, false, fmt.Errorf("failed to create RDMA request: %w", err) - } - - // Execute request - resp, err := c.httpClient.Do(req) - if err != nil { - c.failedReads.Add(1) - return nil, false, fmt.Errorf("RDMA request failed: %w", err) - } - defer resp.Body.Close() - - duration := time.Since(startTime) - c.totalLatencyNs.Add(duration.Nanoseconds()) - - if resp.StatusCode != http.StatusOK { - c.failedReads.Add(1) - body, _ := io.ReadAll(resp.Body) - return nil, false, fmt.Errorf("RDMA read failed with status %s: %s", resp.Status, string(body)) - } - - // Check if response indicates RDMA was used - contentType := resp.Header.Get("Content-Type") - isRDMA := strings.Contains(resp.Header.Get("X-Source"), "rdma") || - resp.Header.Get("X-RDMA-Used") == "true" - - // Check for zero-copy temp file optimization - tempFilePath := resp.Header.Get("X-Temp-File") - useTempFile := resp.Header.Get("X-Use-Temp-File") == "true" - - var data []byte - - if useTempFile && tempFilePath != "" { - // Zero-copy path: read from temp file (page cache) - glog.V(4).Infof("๐Ÿ”ฅ Using zero-copy temp file: %s", tempFilePath) - - // Allocate buffer for temp file read - var bufferSize uint64 = 1024 * 1024 // Default 1MB - if size > 0 { - bufferSize = size - } - buffer := make([]byte, bufferSize) - - n, err := c.readFromTempFile(tempFilePath, buffer) - if err != nil { - glog.V(2).Infof("Zero-copy failed, falling back to HTTP body: %v", err) - // Fall back to reading HTTP body - data, err = io.ReadAll(resp.Body) - } else { - data = buffer[:n] - glog.V(4).Infof("๐Ÿ”ฅ Zero-copy successful: %d bytes from page cache", n) - } - - // Important: Cleanup temp file after reading (consumer responsibility) - // This prevents accumulation of temp files in /tmp/rdma-cache - go c.cleanupTempFile(tempFilePath) - } else { - // Regular path: read from HTTP response body - data, err = io.ReadAll(resp.Body) - } - - if err != nil { - c.failedReads.Add(1) - return nil, false, fmt.Errorf("failed to read RDMA response: %w", err) - } - - c.successfulReads.Add(1) - c.totalBytesRead.Add(int64(len(data))) - - // Log successful operation - glog.V(4).Infof("RDMA read completed: fileID=%s, size=%d, duration=%v, rdma=%v, contentType=%s", - fileID, size, duration, isRDMA, contentType) - - return data, isRDMA, nil -} - -// cleanupTempFile requests cleanup of a temp file from the sidecar -func (c *RDMAMountClient) cleanupTempFile(tempFilePath string) { - if tempFilePath == "" { - return - } - - // Give the page cache a brief moment to be utilized before cleanup - // This preserves the zero-copy performance window - time.Sleep(100 * time.Millisecond) - - // Call sidecar cleanup endpoint - cleanupURL := fmt.Sprintf("http://%s/cleanup?temp_file=%s", c.sidecarAddr, url.QueryEscape(tempFilePath)) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, "DELETE", cleanupURL, nil) - if err != nil { - glog.V(2).Infof("Failed to create cleanup request for %s: %v", tempFilePath, err) - return - } - - resp, err := c.httpClient.Do(req) - if err != nil { - glog.V(2).Infof("Failed to cleanup temp file %s: %v", tempFilePath, err) - return - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - glog.V(4).Infof("๐Ÿงน Temp file cleaned up: %s", tempFilePath) - } else { - glog.V(2).Infof("Cleanup failed for %s: status %s", tempFilePath, resp.Status) - } -} - -// GetStats returns current RDMA client statistics -func (c *RDMAMountClient) GetStats() map[string]interface{} { - totalRequests := c.totalRequests.Load() - successfulReads := c.successfulReads.Load() - failedReads := c.failedReads.Load() - totalBytesRead := c.totalBytesRead.Load() - totalLatencyNs := c.totalLatencyNs.Load() - - successRate := float64(0) - avgLatencyNs := int64(0) - - if totalRequests > 0 { - successRate = float64(successfulReads) / float64(totalRequests) * 100 - avgLatencyNs = totalLatencyNs / totalRequests - } - - return map[string]interface{}{ - "sidecar_addr": c.sidecarAddr, - "max_concurrent": c.maxConcurrent, - "timeout_ms": int(c.timeout / time.Millisecond), - "total_requests": totalRequests, - "successful_reads": successfulReads, - "failed_reads": failedReads, - "success_rate_pct": fmt.Sprintf("%.1f", successRate), - "total_bytes_read": totalBytesRead, - "avg_latency_ns": avgLatencyNs, - "avg_latency_ms": fmt.Sprintf("%.3f", float64(avgLatencyNs)/1000000), - } -} - -// Close shuts down the RDMA client and releases resources -func (c *RDMAMountClient) Close() error { - // No need to close semaphore channel; closing it may cause panics if goroutines are still using it. - // The semaphore will be garbage collected when the client is no longer referenced. - - // Log final statistics - stats := c.GetStats() - glog.Infof("RDMA mount client closing: %+v", stats) - - return nil -} - -// IsHealthy checks if the RDMA sidecar is currently healthy -func (c *RDMAMountClient) IsHealthy() bool { - err := c.healthCheck() - return err == nil -} - -// readFromTempFile performs zero-copy read from temp file using page cache -func (c *RDMAMountClient) readFromTempFile(tempFilePath string, buffer []byte) (int, error) { - if tempFilePath == "" { - return 0, fmt.Errorf("empty temp file path") - } - - // Open temp file for reading - file, err := os.Open(tempFilePath) - if err != nil { - return 0, fmt.Errorf("failed to open temp file %s: %w", tempFilePath, err) - } - defer file.Close() - - // Read from temp file (this should be served from page cache) - n, err := file.Read(buffer) - if err != nil && err != io.EOF { - return n, fmt.Errorf("failed to read from temp file: %w", err) - } - - glog.V(4).Infof("๐Ÿ”ฅ Zero-copy read: %d bytes from temp file %s", n, tempFilePath) - - return n, nil -} diff --git a/weed/mount/weedfs.go b/weed/mount/weedfs.go index 95864ef00..584174202 100644 --- a/weed/mount/weedfs.go +++ b/weed/mount/weedfs.go @@ -2,38 +2,31 @@ package mount import ( "context" - "errors" - "math/rand/v2" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/mount/meta_cache" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/mount_pb" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/hanwen/go-fuse/v2/fuse" + "google.golang.org/grpc" + "math/rand" "os" "path" "path/filepath" - "sync" - "sync/atomic" "time" - "github.com/hanwen/go-fuse/v2/fuse" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mount_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" - "github.com/seaweedfs/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/util/version" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "github.com/hanwen/go-fuse/v2/fs" ) type Option struct { - filerIndex int32 // align memory for atomic read/write - FilerAddresses []pb.ServerAddress MountDirectory string + FilerAddresses []pb.ServerAddress + filerIndex int GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string @@ -42,15 +35,12 @@ type Option struct { DiskType types.DiskType ChunkSizeLimit int64 ConcurrentWriters int - CacheDirForRead string - CacheSizeMBForRead int64 - CacheDirForWrite string - CacheMetaTTlSec int + CacheDir string + CacheSizeMB int64 DataCenter string Umask os.FileMode Quota int64 DisableXAttr bool - IsMacOs bool MountUid uint32 MountGid uint32 @@ -63,16 +53,8 @@ type Option struct { Cipher bool // whether encrypt data on volume server UidGidMapper *meta_cache.UidGidMapper - // RDMA acceleration options - RdmaEnabled bool - RdmaSidecarAddr string - RdmaFallback bool - RdmaReadOnly bool - RdmaMaxConcurrent int - RdmaTimeoutMs int - - uniqueCacheDirForRead string - uniqueCacheDirForWrite string + uniqueCacheDir string + uniqueCacheTempPageDir string } type WFS struct { @@ -81,22 +63,17 @@ type WFS struct { fuse.RawFileSystem mount_pb.UnimplementedSeaweedMountServer fs.Inode - option *Option - metaCache *meta_cache.MetaCache - stats statsCache - chunkCache *chunk_cache.TieredChunkCache - signature int32 - concurrentWriters *util.LimitedConcurrentExecutor - copyBufferPool sync.Pool - concurrentCopiersSem chan struct{} - inodeToPath *InodeToPath - fhMap *FileHandleToInode - dhMap *DirectoryHandleToInode - fuseServer *fuse.Server - IsOverQuota bool - fhLockTable *util.LockTable[FileHandleId] - rdmaClient *RDMAMountClient - FilerConf *filer.FilerConf + option *Option + metaCache *meta_cache.MetaCache + stats statsCache + chunkCache *chunk_cache.TieredChunkCache + signature int32 + concurrentWriters *util.LimitedConcurrentExecutor + inodeToPath *InodeToPath + fhmap *FileHandleToInode + dhmap *DirectoryHandleToInode + fuseServer *fuse.Server + IsOverQuota bool } func NewSeaweedFileSystem(option *Option) *WFS { @@ -104,93 +81,40 @@ func NewSeaweedFileSystem(option *Option) *WFS { RawFileSystem: fuse.NewDefaultRawFileSystem(), option: option, signature: util.RandomInt32(), - inodeToPath: NewInodeToPath(util.FullPath(option.FilerMountRootPath), option.CacheMetaTTlSec), - fhMap: NewFileHandleToInode(), - dhMap: NewDirectoryHandleToInode(), - fhLockTable: util.NewLockTable[FileHandleId](), + inodeToPath: NewInodeToPath(util.FullPath(option.FilerMountRootPath)), + fhmap: NewFileHandleToInode(), + dhmap: NewDirectoryHandleToInode(), } - wfs.option.filerIndex = int32(rand.IntN(len(option.FilerAddresses))) + wfs.option.filerIndex = rand.Intn(len(option.FilerAddresses)) wfs.option.setupUniqueCacheDirectory() - if option.CacheSizeMBForRead > 0 { - wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024) + if option.CacheSizeMB > 0 { + wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDir(), option.CacheSizeMB, 1024*1024) } - wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper, + wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDir(), "meta"), option.UidGidMapper, util.FullPath(option.FilerMountRootPath), func(path util.FullPath) { wfs.inodeToPath.MarkChildrenCached(path) }, func(path util.FullPath) bool { return wfs.inodeToPath.IsChildrenCached(path) }, func(filePath util.FullPath, entry *filer_pb.Entry) { - // Find inode if it is not a deleted path - if inode, inodeFound := wfs.inodeToPath.GetInode(filePath); inodeFound { - // Find open file handle - if fh, fhFound := wfs.fhMap.FindFileHandle(inode); fhFound { - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("invalidateFunc", fh.fh, util.ExclusiveLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - - // Recreate dirty pages - fh.dirtyPages.Destroy() - fh.dirtyPages = newPageWriter(fh, wfs.option.ChunkSizeLimit) - - // Update handle entry - newEntry, status := wfs.maybeLoadEntry(filePath) - if status == fuse.OK { - if fh.GetEntry().GetEntry() != newEntry { - fh.SetEntry(newEntry) - } - } - } - } }) grace.OnInterrupt(func() { wfs.metaCache.Shutdown() - os.RemoveAll(option.getUniqueCacheDirForWrite()) - os.RemoveAll(option.getUniqueCacheDirForRead()) - if wfs.rdmaClient != nil { - wfs.rdmaClient.Close() - } + os.RemoveAll(option.getUniqueCacheDir()) }) - // Initialize RDMA client if enabled - if option.RdmaEnabled && option.RdmaSidecarAddr != "" { - rdmaClient, err := NewRDMAMountClient( - option.RdmaSidecarAddr, - wfs.LookupFn(), - option.RdmaMaxConcurrent, - option.RdmaTimeoutMs, - ) - if err != nil { - glog.Warningf("Failed to initialize RDMA client: %v", err) - } else { - wfs.rdmaClient = rdmaClient - glog.Infof("RDMA acceleration enabled: sidecar=%s, maxConcurrent=%d, timeout=%dms", - option.RdmaSidecarAddr, option.RdmaMaxConcurrent, option.RdmaTimeoutMs) - } - } - if wfs.option.ConcurrentWriters > 0 { wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters) - wfs.concurrentCopiersSem = make(chan struct{}, wfs.option.ConcurrentWriters) - } - wfs.copyBufferPool.New = func() any { - return make([]byte, option.ChunkSizeLimit) } return wfs } -func (wfs *WFS) StartBackgroundTasks() error { - follower, err := wfs.subscribeFilerConfEvents() - if err != nil { - return err - } - +func (wfs *WFS) StartBackgroundTasks() { startTime := time.Now() - go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano(), follower) + go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano()) go wfs.loopCheckQuota() - - return nil } func (wfs *WFS) String() string { @@ -207,19 +131,19 @@ func (wfs *WFS) maybeReadEntry(inode uint64) (path util.FullPath, fh *FileHandle return } var found bool - if fh, found = wfs.fhMap.FindFileHandle(inode); found { - entry = fh.UpdateEntry(func(entry *filer_pb.Entry) { - if entry != nil && fh.entry.Attributes == nil { - entry.Attributes = &filer_pb.FuseAttributes{} - } - }) - } else { - entry, status = wfs.maybeLoadEntry(path) + if fh, found = wfs.fhmap.FindFileHandle(inode); found { + entry = fh.GetEntry() + if entry != nil && fh.entry.Attributes == nil { + entry.Attributes = &filer_pb.FuseAttributes{} + } + return path, fh, entry, fuse.OK } + entry, status = wfs.maybeLoadEntry(path) return } func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.Status) { + // glog.V(3).Infof("read entry cache miss %s", fullpath) dir, name := fullpath.DirAndName() @@ -241,7 +165,7 @@ func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.St // read from async meta cache meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir)) cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath) - if errors.Is(cacheErr, filer_pb.ErrNotFound) { + if cacheErr == filer_pb.ErrNotFound { return nil, fuse.ENOENT } return cachedEntry.ToProtoEntry(), fuse.OK @@ -249,7 +173,7 @@ func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.St func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { if wfs.option.VolumeServerAccess == "filerProxy" { - return func(ctx context.Context, fileId string) (targetUrls []string, err error) { + return func(fileId string) (targetUrls []string, err error) { return []string{"http://" + wfs.getCurrentFiler().ToHttpAddress() + "/?proxyChunkId=" + fileId}, nil } } @@ -257,28 +181,20 @@ func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { } func (wfs *WFS) getCurrentFiler() pb.ServerAddress { - i := atomic.LoadInt32(&wfs.option.filerIndex) - return wfs.option.FilerAddresses[i] -} - -func (wfs *WFS) ClearCacheDir() { - wfs.metaCache.Shutdown() - os.RemoveAll(wfs.option.getUniqueCacheDirForWrite()) - os.RemoveAll(wfs.option.getUniqueCacheDirForRead()) + return wfs.option.FilerAddresses[wfs.option.filerIndex] } func (option *Option) setupUniqueCacheDirectory() { - cacheUniqueId := util.Md5String([]byte(option.MountDirectory + string(option.FilerAddresses[0]) + option.FilerMountRootPath + version.Version()))[0:8] - option.uniqueCacheDirForRead = path.Join(option.CacheDirForRead, cacheUniqueId) - os.MkdirAll(option.uniqueCacheDirForRead, os.FileMode(0777)&^option.Umask) - option.uniqueCacheDirForWrite = filepath.Join(path.Join(option.CacheDirForWrite, cacheUniqueId), "swap") - os.MkdirAll(option.uniqueCacheDirForWrite, os.FileMode(0777)&^option.Umask) + cacheUniqueId := util.Md5String([]byte(option.MountDirectory + string(option.FilerAddresses[0]) + option.FilerMountRootPath + util.Version()))[0:8] + option.uniqueCacheDir = path.Join(option.CacheDir, cacheUniqueId) + option.uniqueCacheTempPageDir = filepath.Join(option.uniqueCacheDir, "swap") + os.MkdirAll(option.uniqueCacheTempPageDir, os.FileMode(0777)&^option.Umask) } -func (option *Option) getUniqueCacheDirForWrite() string { - return option.uniqueCacheDirForWrite +func (option *Option) getTempFilePageDir() string { + return option.uniqueCacheTempPageDir } -func (option *Option) getUniqueCacheDirForRead() string { - return option.uniqueCacheDirForRead +func (option *Option) getUniqueCacheDir() string { + return option.uniqueCacheDir } diff --git a/weed/mount/weedfs_attr.go b/weed/mount/weedfs_attr.go index d8ca4bc6a..be504f5e2 100644 --- a/weed/mount/weedfs_attr.go +++ b/weed/mount/weedfs_attr.go @@ -1,37 +1,30 @@ package mount import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "os" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) func (wfs *WFS) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) (code fuse.Status) { - glog.V(4).Infof("GetAttr %v", input.NodeId) if input.NodeId == 1 { wfs.setRootAttr(out) return fuse.OK } - inode := input.NodeId - _, _, entry, status := wfs.maybeReadEntry(inode) + _, _, entry, status := wfs.maybeReadEntry(input.NodeId) if status == fuse.OK { out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, inode, entry, true) + wfs.setAttrByPbEntry(&out.Attr, input.NodeId, entry) return status } else { - if fh, found := wfs.fhMap.FindFileHandle(inode); found { + if fh, found := wfs.fhmap.FindFileHandle(input.NodeId); found { out.AttrValid = 1 - // Use shared lock to prevent race with Write operations - fhActiveLock := wfs.fhLockTable.AcquireLock("GetAttr", fh.fh, util.SharedLock) - wfs.setAttrByPbEntry(&out.Attr, inode, fh.entry.GetEntry(), true) - wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) + wfs.setAttrByPbEntry(&out.Attr, input.NodeId, fh.entry) out.Nlink = 0 return fuse.OK } @@ -47,7 +40,7 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } path, fh, entry, status := wfs.maybeReadEntry(input.NodeId) - if status != fuse.OK || entry == nil { + if status != fuse.OK { return status } if fh != nil { @@ -55,18 +48,13 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse defer fh.entryLock.Unlock() } - wormEnforced, wormEnabled := wfs.wormEnforcedForEntry(path, entry) - if wormEnforced { - return fuse.EPERM - } - if size, ok := input.GetSize(); ok { - glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.GetChunks())) + glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.Chunks)) if size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) var chunks []*filer_pb.FileChunk var truncatedChunks []*filer_pb.FileChunk - for _, chunk := range entry.GetChunks() { + for _, chunk := range entry.Chunks { int64Size := int64(chunk.Size) if chunk.Offset+int64Size > int64(size) { // this chunk is truncated @@ -79,14 +67,12 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) truncatedChunks = append(truncatedChunks, chunk) } - } else { - chunks = append(chunks, chunk) } } // set the new chunks and reset entry cache entry.Chunks = chunks if fh != nil { - fh.entryChunkGroup.SetChunks(chunks) + fh.entryViewCache = nil } } entry.Attributes.Mtime = time.Now().Unix() @@ -95,11 +81,6 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } if mode, ok := input.GetMode(); ok { - // commit the file to worm when it is set to readonly at the first time - if entry.WormEnforcedAtTsNs == 0 && wormEnabled && !hasWritePermission(mode) { - entry.WormEnforcedAtTsNs = time.Now().UnixNano() - } - // glog.V(4).Infof("setAttr mode %o", mode) entry.Attributes.FileMode = chmod(entry.Attributes.FileMode, mode) if input.NodeId == 1 { @@ -130,11 +111,7 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } out.AttrValid = 1 - size, includeSize := input.GetSize() - if includeSize { - out.Attr.Size = size - } - wfs.setAttrByPbEntry(&out.Attr, input.NodeId, entry, !includeSize) + wfs.setAttrByPbEntry(&out.Attr, input.NodeId, entry) if fh != nil { fh.dirtyMetadata = true @@ -159,22 +136,11 @@ func (wfs *WFS) setRootAttr(out *fuse.AttrOut) { out.Nlink = 1 } -func (wfs *WFS) setAttrByPbEntry(out *fuse.Attr, inode uint64, entry *filer_pb.Entry, calculateSize bool) { +func (wfs *WFS) setAttrByPbEntry(out *fuse.Attr, inode uint64, entry *filer_pb.Entry) { out.Ino = inode - setBlksize(out, blockSize) - if entry == nil { - return - } - if entry.Attributes != nil && entry.Attributes.Inode != 0 { - out.Ino = entry.Attributes.Inode - } - if calculateSize { - out.Size = filer.FileSize(entry) - } - if entry.FileMode()&os.ModeSymlink != 0 { - out.Size = uint64(len(entry.Attributes.SymlinkTarget)) - } + out.Size = filer.FileSize(entry) out.Blocks = (out.Size + blockSize - 1) / blockSize + setBlksize(out, blockSize) out.Mtime = uint64(entry.Attributes.Mtime) out.Ctime = uint64(entry.Attributes.Mtime) out.Atime = uint64(entry.Attributes.Mtime) @@ -192,9 +158,6 @@ func (wfs *WFS) setAttrByPbEntry(out *fuse.Attr, inode uint64, entry *filer_pb.E func (wfs *WFS) setAttrByFilerEntry(out *fuse.Attr, inode uint64, entry *filer.Entry) { out.Ino = inode out.Size = entry.FileSize - if entry.Mode&os.ModeSymlink != 0 { - out.Size = uint64(len(entry.SymlinkTarget)) - } out.Blocks = (out.Size + blockSize - 1) / blockSize setBlksize(out, blockSize) out.Atime = uint64(entry.Attr.Mtime.Unix()) @@ -216,7 +179,7 @@ func (wfs *WFS) outputPbEntry(out *fuse.EntryOut, inode uint64, entry *filer_pb. out.Generation = 1 out.EntryValid = 1 out.AttrValid = 1 - wfs.setAttrByPbEntry(&out.Attr, inode, entry, true) + wfs.setAttrByPbEntry(&out.Attr, inode, entry) } func (wfs *WFS) outputFilerEntry(out *fuse.EntryOut, inode uint64, entry *filer.Entry) { @@ -231,14 +194,6 @@ func chmod(existing uint32, mode uint32) uint32 { return existing&^07777 | mode&07777 } -const ownerWrite = 0o200 -const groupWrite = 0o020 -const otherWrite = 0o002 - -func hasWritePermission(mode uint32) bool { - return (mode&ownerWrite != 0) || (mode&groupWrite != 0) || (mode&otherWrite != 0) -} - func toSyscallMode(mode os.FileMode) uint32 { return toSyscallType(mode) | uint32(mode) } diff --git a/weed/mount/weedfs_attr_freebsd.go b/weed/mount/weedfs_attr_freebsd.go deleted file mode 100644 index e7767d4a6..000000000 --- a/weed/mount/weedfs_attr_freebsd.go +++ /dev/null @@ -1,8 +0,0 @@ -package mount - -import ( - "github.com/hanwen/go-fuse/v2/fuse" -) - -func setBlksize(out *fuse.Attr, size uint32) { -} diff --git a/weed/mount/weedfs_dir_lookup.go b/weed/mount/weedfs_dir_lookup.go index 7af989b80..fdc16aad5 100644 --- a/weed/mount/weedfs_dir_lookup.go +++ b/weed/mount/weedfs_dir_lookup.go @@ -2,13 +2,11 @@ package mount import ( "context" - + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/mount/meta_cache" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) // Lookup is called by the kernel when the VFS wants to know @@ -41,7 +39,7 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin if localEntry == nil { // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err := filer_pb.GetEntry(context.Background(), wfs, fullFilePath) + entry, err := filer_pb.GetEntry(wfs, fullFilePath) if err != nil { glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) return fuse.ENOENT @@ -57,13 +55,9 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin inode := wfs.inodeToPath.Lookup(fullFilePath, localEntry.Crtime.Unix(), localEntry.IsDirectory(), len(localEntry.HardLinkId) > 0, localEntry.Inode, true) - if fh, found := wfs.fhMap.FindFileHandle(inode); found { - fh.entryLock.RLock() - if entry := fh.GetEntry().GetEntry(); entry != nil { - glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(entry)) - localEntry = filer.FromPbEntry(string(dirPath), entry) - } - fh.entryLock.RUnlock() + if fh, found := wfs.fhmap.FindFileHandle(inode); found && fh.entry != nil { + glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(fh.entry)) + localEntry = filer.FromPbEntry(string(dirPath), fh.entry) } wfs.outputFilerEntry(out, inode, localEntry) diff --git a/weed/mount/weedfs_dir_mkrm.go b/weed/mount/weedfs_dir_mkrm.go index 503d6a076..4246f0a4c 100644 --- a/weed/mount/weedfs_dir_mkrm.go +++ b/weed/mount/weedfs_dir_mkrm.go @@ -3,16 +3,14 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "os" "strings" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) /** Create a directory @@ -56,14 +54,13 @@ func (wfs *WFS) Mkdir(cancel <-chan struct{}, in *fuse.MkdirIn, name string, out defer wfs.mapPbIdFromFilerToLocal(newEntry) request := &filer_pb.CreateEntryRequest{ - Directory: string(dirFullPath), - Entry: newEntry, - Signatures: []int32{wfs.signature}, - SkipCheckParentDirectory: true, + Directory: string(dirFullPath), + Entry: newEntry, + Signatures: []int32{wfs.signature}, } glog.V(1).Infof("mkdir: %v", request) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mkdir %s: %v", entryFullPath, err) return err } @@ -107,7 +104,7 @@ func (wfs *WFS) Rmdir(cancel <-chan struct{}, header *fuse.InHeader, name string glog.V(3).Infof("remove directory: %v", entryFullPath) ignoreRecursiveErr := true // ignore recursion error since the OS should manage it - err := filer_pb.Remove(context.Background(), wfs, string(dirFullPath), name, true, false, ignoreRecursiveErr, false, []int32{wfs.signature}) + err := filer_pb.Remove(wfs, string(dirFullPath), name, true, false, ignoreRecursiveErr, false, []int32{wfs.signature}) if err != nil { glog.V(0).Infof("remove %s: %v", entryFullPath, err) if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { diff --git a/weed/mount/weedfs_dir_read.go b/weed/mount/weedfs_dir_read.go index 6e18b50e8..dd34e1ab5 100644 --- a/weed/mount/weedfs_dir_read.go +++ b/weed/mount/weedfs_dir_read.go @@ -2,11 +2,10 @@ package mount import ( "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/mount/meta_cache" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" - "github.com/seaweedfs/seaweedfs/weed/util" "math" "sync" ) @@ -44,32 +43,35 @@ func NewDirectoryHandleToInode() *DirectoryHandleToInode { } func (wfs *WFS) AcquireDirectoryHandle() (DirectoryHandleId, *DirectoryHandle) { - fh := FileHandleId(util.RandomUint64()) + wfs.fhmap.Lock() + fh := wfs.fhmap.nextFh + wfs.fhmap.nextFh++ + wfs.fhmap.Unlock() - wfs.dhMap.Lock() - defer wfs.dhMap.Unlock() + wfs.dhmap.Lock() + defer wfs.dhmap.Unlock() dh := new(DirectoryHandle) dh.reset() - wfs.dhMap.dir2inode[DirectoryHandleId(fh)] = dh + wfs.dhmap.dir2inode[DirectoryHandleId(fh)] = dh return DirectoryHandleId(fh), dh } func (wfs *WFS) GetDirectoryHandle(dhid DirectoryHandleId) *DirectoryHandle { - wfs.dhMap.Lock() - defer wfs.dhMap.Unlock() - if dh, found := wfs.dhMap.dir2inode[dhid]; found { + wfs.dhmap.Lock() + defer wfs.dhmap.Unlock() + if dh, found := wfs.dhmap.dir2inode[dhid]; found { return dh } dh := new(DirectoryHandle) dh.reset() - wfs.dhMap.dir2inode[dhid] = dh + wfs.dhmap.dir2inode[dhid] = dh return dh } func (wfs *WFS) ReleaseDirectoryHandle(dhid DirectoryHandleId) { - wfs.dhMap.Lock() - defer wfs.dhMap.Unlock() - delete(wfs.dhMap.dir2inode, dhid) + wfs.dhmap.Lock() + defer wfs.dhmap.Unlock() + delete(wfs.dhmap.dir2inode, dhid) } // Directory handling @@ -169,9 +171,9 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl isEarlyTerminated = true return false } - if fh, found := wfs.fhMap.FindFileHandle(inode); found { + if fh, found := wfs.fhmap.FindFileHandle(inode); found { glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name)) - entry = filer.FromPbEntry(string(dirPath), fh.GetEntry().GetEntry()) + entry = filer.FromPbEntry(string(dirPath), fh.entry) } wfs.outputFilerEntry(entryOut, inode, entry) } diff --git a/weed/mount/weedfs_file_copy_range.go b/weed/mount/weedfs_file_copy_range.go deleted file mode 100644 index bcf5ae03a..000000000 --- a/weed/mount/weedfs_file_copy_range.go +++ /dev/null @@ -1,154 +0,0 @@ -package mount - -import ( - "net/http" - "time" - - "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// CopyFileRange copies data from one file to another from and to specified offsets. -// -// See https://man7.org/linux/man-pages/man2/copy_file_range.2.html -// See https://github.com/libfuse/libfuse/commit/fe4f9428fc403fa8b99051f52d84ea5bd13f3855 -/** - * Copy a range of data from one file to another - * - * Niels de Vos: โ€ข libfuse: add copy_file_range() support - * - * Performs an optimized copy between two file descriptors without the - * additional cost of transferring data through the FUSE kernel module - * to user space (glibc) and then back into the FUSE filesystem again. - * - * In case this method is not implemented, applications are expected to - * fall back to a regular file copy. (Some glibc versions did this - * emulation automatically, but the emulation has been removed from all - * glibc release branches.) - */ -func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) (written uint32, code fuse.Status) { - // flags must equal 0 for this syscall as of now - if in.Flags != 0 { - return 0, fuse.EINVAL - } - - // files must exist - fhOut := wfs.GetHandle(FileHandleId(in.FhOut)) - if fhOut == nil { - return 0, fuse.EBADF - } - fhIn := wfs.GetHandle(FileHandleId(in.FhIn)) - if fhIn == nil { - return 0, fuse.EBADF - } - - // lock source and target file handles - fhOutActiveLock := fhOut.wfs.fhLockTable.AcquireLock("CopyFileRange", fhOut.fh, util.ExclusiveLock) - defer fhOut.wfs.fhLockTable.ReleaseLock(fhOut.fh, fhOutActiveLock) - - if fhOut.entry == nil { - return 0, fuse.ENOENT - } - - if fhIn.fh != fhOut.fh { - fhInActiveLock := fhIn.wfs.fhLockTable.AcquireLock("CopyFileRange", fhIn.fh, util.SharedLock) - defer fhIn.wfs.fhLockTable.ReleaseLock(fhIn.fh, fhInActiveLock) - } - - // directories are not supported - if fhIn.entry.IsDirectory || fhOut.entry.IsDirectory { - return 0, fuse.EISDIR - } - - glog.V(4).Infof( - "CopyFileRange %s fhIn %d -> %s fhOut %d, [%d,%d) -> [%d,%d)", - fhIn.FullPath(), fhIn.fh, - fhOut.FullPath(), fhOut.fh, - in.OffIn, in.OffIn+in.Len, - in.OffOut, in.OffOut+in.Len, - ) - - // Concurrent copy operations could allocate too much memory, so we want to - // throttle our concurrency, scaling with the number of writers the mount - // was configured with. - if wfs.concurrentCopiersSem != nil { - wfs.concurrentCopiersSem <- struct{}{} - defer func() { <-wfs.concurrentCopiersSem }() - } - - // We want to stream the copy operation to avoid allocating massive buffers. - nowUnixNano := time.Now().UnixNano() - totalCopied := int64(0) - buff := wfs.copyBufferPool.Get().([]byte) - defer wfs.copyBufferPool.Put(buff) - for { - // Comply with cancellation as best as we can, given that the underlying - // IO functions aren't cancellation-aware. - select { - case <-cancel: - glog.Warningf("canceled CopyFileRange for %s (copied %d)", - fhIn.FullPath(), totalCopied) - return uint32(totalCopied), fuse.EINTR - default: // keep going - } - - // We can save one IO by breaking early if we already know the next read - // will result in zero bytes. - remaining := int64(in.Len) - totalCopied - readLen := min(remaining, int64(len(buff))) - if readLen == 0 { - break - } - - // Perform the read - offsetIn := totalCopied + int64(in.OffIn) - numBytesRead, err := readDataByFileHandle( - buff[:readLen], fhIn, offsetIn) - if err != nil { - glog.Warningf("file handle read %s %d (total %d): %v", - fhIn.FullPath(), numBytesRead, totalCopied, err) - return 0, fuse.EIO - } - - // Break if we're done copying (no more bytes to read) - if numBytesRead == 0 { - break - } - - offsetOut := int64(in.OffOut) + totalCopied - - // Detect mime type only during the beginning of our stream, since - // DetectContentType is expecting some of the first 512 bytes of the - // file. See [http.DetectContentType] for details. - if offsetOut <= 512 { - fhOut.contentType = http.DetectContentType(buff[:numBytesRead]) - } - - // Perform the write - fhOut.dirtyPages.writerPattern.MonitorWriteAt(offsetOut, int(numBytesRead)) - fhOut.dirtyPages.AddPage( - offsetOut, - buff[:numBytesRead], - fhOut.dirtyPages.writerPattern.IsSequentialMode(), - nowUnixNano) - - // Accumulate for the next loop iteration - totalCopied += numBytesRead - } - - if totalCopied == 0 { - return 0, fuse.OK - } - - fhOut.entry.Attributes.FileSize = uint64(max( - totalCopied+int64(in.OffOut), - int64(fhOut.entry.Attributes.FileSize), - )) - fhOut.entry.Content = nil - fhOut.dirtyMetadata = true - - written = uint32(totalCopied) - return written, fuse.OK -} diff --git a/weed/mount/weedfs_file_io.go b/weed/mount/weedfs_file_io.go index 04fe7f21c..7039b14ec 100644 --- a/weed/mount/weedfs_file_io.go +++ b/weed/mount/weedfs_file_io.go @@ -2,7 +2,6 @@ package mount import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" ) /** @@ -63,18 +62,9 @@ import ( */ func (wfs *WFS) Open(cancel <-chan struct{}, in *fuse.OpenIn, out *fuse.OpenOut) (status fuse.Status) { var fileHandle *FileHandle - fileHandle, status = wfs.AcquireHandle(in.NodeId, in.Flags, in.Uid, in.Gid) + fileHandle, status = wfs.AcquireHandle(in.NodeId, in.Uid, in.Gid) if status == fuse.OK { out.Fh = uint64(fileHandle.fh) - out.OpenFlags = in.Flags - if wfs.option.IsMacOs { - // remove the direct_io flag, as it is not well-supported on macOS - // https://code.google.com/archive/p/macfuse/wikis/OPTIONS.wiki recommended to avoid the direct_io flag - if in.Flags&fuse.FOPEN_DIRECT_IO != 0 { - glog.V(4).Infof("macfuse direct_io mode %v => false\n", in.Flags&fuse.FOPEN_DIRECT_IO != 0) - out.OpenFlags &^= fuse.FOPEN_DIRECT_IO - } - } // TODO https://github.com/libfuse/libfuse/blob/master/include/fuse_common.h#L64 } return status diff --git a/weed/mount/weedfs_file_lseek.go b/weed/mount/weedfs_file_lseek.go deleted file mode 100644 index a7e3a2b46..000000000 --- a/weed/mount/weedfs_file_lseek.go +++ /dev/null @@ -1,88 +0,0 @@ -package mount - -import ( - "context" - "syscall" - - "github.com/seaweedfs/seaweedfs/weed/util" - - "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// These are non-POSIX extensions -const ( - SEEK_DATA uint32 = 3 // seek to next data after the offset - SEEK_HOLE uint32 = 4 // seek to next hole after the offset - ENXIO = fuse.Status(syscall.ENXIO) -) - -// Lseek finds next data or hole segments after the specified offset -// See https://man7.org/linux/man-pages/man2/lseek.2.html -func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekOut) fuse.Status { - // not a documented feature - if in.Padding != 0 { - return fuse.EINVAL - } - - if in.Whence != SEEK_DATA && in.Whence != SEEK_HOLE { - return fuse.EINVAL - } - - // file must exist - fh := wfs.GetHandle(FileHandleId(in.Fh)) - if fh == nil { - return fuse.EBADF - } - - // lock the file until the proper offset was calculated - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("Lseek", fh.fh, util.SharedLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - - fileSize := int64(filer.FileSize(fh.GetEntry().GetEntry())) - offset := max(int64(in.Offset), 0) - - glog.V(4).Infof( - "Lseek %s fh %d in [%d,%d], whence %d", - fh.FullPath(), fh.fh, offset, fileSize, in.Whence, - ) - - // can neither seek beyond file nor seek to a hole at the end of the file with SEEK_DATA - if offset > fileSize { - return ENXIO - } else if in.Whence == SEEK_DATA && offset == fileSize { - return ENXIO - } - - // Create a context that will be cancelled when the cancel channel receives a signal - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() // Ensure cleanup - - go func() { - select { - case <-cancel: - cancelFunc() - case <-ctx.Done(): - // Clean exit when lseek operation completes - } - }() - - // search chunks for the offset - found, offset := fh.entryChunkGroup.SearchChunks(ctx, offset, fileSize, in.Whence) - if found { - out.Offset = uint64(offset) - return fuse.OK - } - - // in case we found no exact matches, we return the recommended fallbacks, that is: - // original offset for SEEK_DATA or end of file for an implicit hole - if in.Whence == SEEK_DATA { - out.Offset = in.Offset - } else { - out.Offset = uint64(fileSize) - } - - return fuse.OK -} diff --git a/weed/mount/weedfs_file_mkrm.go b/weed/mount/weedfs_file_mkrm.go index e48d5af40..86d4c4d41 100644 --- a/weed/mount/weedfs_file_mkrm.go +++ b/weed/mount/weedfs_file_mkrm.go @@ -3,13 +3,12 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) /** @@ -83,7 +82,7 @@ func (wfs *WFS) Mknod(cancel <-chan struct{}, in *fuse.MknodIn, name string, out } glog.V(1).Infof("mknod: %v", request) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mknod %s: %v", entryFullPath, err) return err } @@ -130,14 +129,10 @@ func (wfs *WFS) Unlink(cancel <-chan struct{}, header *fuse.InHeader, name strin return code } - if wormEnforced, _ := wfs.wormEnforcedForEntry(entryFullPath, entry); wormEnforced { - return fuse.EPERM - } - // first, ensure the filer store can correctly delete glog.V(3).Infof("remove file: %v", entryFullPath) isDeleteData := entry != nil && entry.HardLinkCounter <= 1 - err := filer_pb.Remove(context.Background(), wfs, string(dirFullPath), name, isDeleteData, false, false, false, []int32{wfs.signature}) + err := filer_pb.Remove(wfs, string(dirFullPath), name, isDeleteData, false, false, false, []int32{wfs.signature}) if err != nil { glog.V(0).Infof("remove %s: %v", entryFullPath, err) return fuse.OK @@ -149,6 +144,7 @@ func (wfs *WFS) Unlink(cancel <-chan struct{}, header *fuse.InHeader, name strin return fuse.EIO } + wfs.metaCache.DeleteEntry(context.Background(), entryFullPath) wfs.inodeToPath.RemovePath(entryFullPath) return fuse.OK diff --git a/weed/mount/weedfs_file_read.go b/weed/mount/weedfs_file_read.go index c85478cd0..00143a5b4 100644 --- a/weed/mount/weedfs_file_read.go +++ b/weed/mount/weedfs_file_read.go @@ -1,16 +1,9 @@ package mount import ( - "bytes" - "context" - "fmt" - "io" - - "github.com/seaweedfs/seaweedfs/weed/util" - + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/glog" + "io" ) /** @@ -44,78 +37,23 @@ func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse return nil, fuse.ENOENT } - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("Read", fh.fh, util.SharedLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - - // Create a context that will be cancelled when the cancel channel receives a signal - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() // Ensure cleanup - - go func() { - select { - case <-cancel: - cancelFunc() - case <-ctx.Done(): - } - }() - offset := int64(in.Offset) - totalRead, err := readDataByFileHandleWithContext(ctx, buff, fh, offset) + fh.lockForRead(offset, len(buff)) + defer fh.unlockForRead(offset, len(buff)) + + totalRead, err := fh.readFromChunks(buff, offset) + if err == nil || err == io.EOF { + maxStop := fh.readFromDirtyPages(buff, offset) + totalRead = max(maxStop-offset, totalRead) + } + if err == io.EOF { + err = nil + } + if err != nil { glog.Warningf("file handle read %s %d: %v", fh.FullPath(), totalRead, err) return nil, fuse.EIO } - if IsDebugFileReadWrite { - // print(".") - mirrorData := make([]byte, totalRead) - fh.mirrorFile.ReadAt(mirrorData, offset) - if bytes.Compare(mirrorData, buff[:totalRead]) != 0 { - - againBuff := make([]byte, len(buff)) - againRead, _ := readDataByFileHandleWithContext(ctx, againBuff, fh, offset) - againCorrect := bytes.Compare(mirrorData, againBuff[:againRead]) == 0 - againSame := bytes.Compare(buff[:totalRead], againBuff[:againRead]) == 0 - - fmt.Printf("\ncompare %v [%d,%d) size:%d againSame:%v againCorrect:%v\n", fh.mirrorFile.Name(), offset, offset+totalRead, totalRead, againSame, againCorrect) - //fmt.Printf("read mirrow data: %v\n", mirrorData) - //fmt.Printf("read actual data: %v\n", againBuff[:totalRead]) - } - } - return fuse.ReadResultData(buff[:totalRead]), fuse.OK } - -func readDataByFileHandle(buff []byte, fhIn *FileHandle, offset int64) (int64, error) { - // read data from source file - size := len(buff) - fhIn.lockForRead(offset, size) - defer fhIn.unlockForRead(offset, size) - - n, tsNs, err := fhIn.readFromChunks(buff, offset) - if err == nil || err == io.EOF { - maxStop := fhIn.readFromDirtyPages(buff, offset, tsNs) - n = max(maxStop-offset, n) - } - if err == io.EOF { - err = nil - } - return n, err -} - -func readDataByFileHandleWithContext(ctx context.Context, buff []byte, fhIn *FileHandle, offset int64) (int64, error) { - // read data from source file - size := len(buff) - fhIn.lockForRead(offset, size) - defer fhIn.unlockForRead(offset, size) - - n, tsNs, err := fhIn.readFromChunksWithContext(ctx, buff, offset) - if err == nil || err == io.EOF { - maxStop := fhIn.readFromDirtyPages(buff, offset, tsNs) - n = max(maxStop-offset, n) - } - if err == io.EOF { - err = nil - } - return n, err -} diff --git a/weed/mount/weedfs_file_sync.go b/weed/mount/weedfs_file_sync.go index eda5ad8da..b7fffaaa3 100644 --- a/weed/mount/weedfs_file_sync.go +++ b/weed/mount/weedfs_file_sync.go @@ -3,14 +3,12 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) /** @@ -54,11 +52,12 @@ import ( func (wfs *WFS) Flush(cancel <-chan struct{}, in *fuse.FlushIn) fuse.Status { fh := wfs.GetHandle(FileHandleId(in.Fh)) if fh == nil { - // If handle is not found, it might have been already released - // This is not an error condition for FLUSH - return fuse.OK + return fuse.ENOENT } + fh.Lock() + defer fh.Unlock() + return wfs.doFlush(fh, in.Uid, in.Gid) } @@ -88,17 +87,19 @@ func (wfs *WFS) Fsync(cancel <-chan struct{}, in *fuse.FsyncIn) (code fuse.Statu return fuse.ENOENT } + fh.Lock() + defer fh.Unlock() + return wfs.doFlush(fh, in.Uid, in.Gid) } func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { - // flush works at fh level fileFullPath := fh.FullPath() dir, name := fileFullPath.DirAndName() // send the data to the OS - glog.V(4).Infof("doFlush %s fh %d", fileFullPath, fh.fh) + glog.V(4).Infof("doFlush %s fh %d", fileFullPath, fh.handle) if !wfs.IsOverQuota { if err := fh.dirtyPages.FlushData(); err != nil { @@ -115,12 +116,15 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { return fuse.Status(syscall.ENOSPC) } - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("doFlush", fh.fh, util.ExclusiveLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - entry := fh.GetEntry() + fh.entryLock.Lock() + defer fh.entryLock.Unlock() + + entry := fh.entry + if entry == nil { + return nil + } entry.Name = name // this flush may be just after a rename operation if entry.Attributes != nil { @@ -131,24 +135,27 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { if entry.Attributes.Gid == 0 { entry.Attributes.Gid = gid } + if entry.Attributes.Crtime == 0 { + entry.Attributes.Crtime = time.Now().Unix() + } entry.Attributes.Mtime = time.Now().Unix() } request := &filer_pb.CreateEntryRequest{ Directory: string(dir), - Entry: entry.GetEntry(), + Entry: entry, Signatures: []int32{wfs.signature}, SkipCheckParentDirectory: true, } - glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks())) - //for i, chunk := range entry.GetChunks() { - // glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) - //} + glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.Chunks)) + for i, chunk := range entry.Chunks { + glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + } - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.GetChunks()) + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) - chunks, _ := filer.CompactFileChunks(context.Background(), wfs.LookupFn(), nonManifestChunks) + chunks, _ := filer.CompactFileChunks(wfs.LookupFn(), nonManifestChunks) chunks, manifestErr := filer.MaybeManifestize(wfs.saveDataAsChunk(fileFullPath), chunks) if manifestErr != nil { // not good, but should be ok @@ -159,7 +166,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { wfs.mapPbIdFromLocalToFiler(request.Entry) defer wfs.mapPbIdFromFilerToLocal(request.Entry) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.Errorf("fh flush create %s: %v", fileFullPath, err) return fmt.Errorf("fh flush create %s: %v", fileFullPath, err) } @@ -174,13 +181,9 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { } if err != nil { - glog.Errorf("%v fh %d flush: %v", fileFullPath, fh.fh, err) + glog.Errorf("%v fh %d flush: %v", fileFullPath, fh.handle, err) return fuse.EIO } - if IsDebugFileReadWrite { - fh.mirrorFile.Sync() - } - return fuse.OK } diff --git a/weed/mount/weedfs_file_write.go b/weed/mount/weedfs_file_write.go index 1ec20c294..2b7a6cea2 100644 --- a/weed/mount/weedfs_file_write.go +++ b/weed/mount/weedfs_file_write.go @@ -2,10 +2,8 @@ package mount import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/util" "net/http" "syscall" - "time" ) /** @@ -47,12 +45,10 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr fh.dirtyPages.writerPattern.MonitorWriteAt(int64(in.Offset), int(in.Size)) - tsNs := time.Now().UnixNano() + fh.Lock() + defer fh.Unlock() - fhActiveLock := fh.wfs.fhLockTable.AcquireLock("Write", fh.fh, util.ExclusiveLock) - defer fh.wfs.fhLockTable.ReleaseLock(fh.fh, fhActiveLock) - - entry := fh.GetEntry() + entry := fh.entry if entry == nil { return 0, fuse.OK } @@ -62,7 +58,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize))) // glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) - fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode(), tsNs) + fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode()) written = uint32(len(data)) @@ -73,10 +69,5 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr fh.dirtyMetadata = true - if IsDebugFileReadWrite { - // print("+") - fh.mirrorFile.WriteAt(data, offset) - } - return written, fuse.OK } diff --git a/weed/mount/weedfs_filehandle.go b/weed/mount/weedfs_filehandle.go index f3add0378..d769e51c5 100644 --- a/weed/mount/weedfs_filehandle.go +++ b/weed/mount/weedfs_filehandle.go @@ -1,29 +1,24 @@ package mount import ( + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) -func (wfs *WFS) AcquireHandle(inode uint64, flags, uid, gid uint32) (fileHandle *FileHandle, status fuse.Status) { +func (wfs *WFS) AcquireHandle(inode uint64, uid, gid uint32) (fileHandle *FileHandle, status fuse.Status) { var entry *filer_pb.Entry - var path util.FullPath - path, _, entry, status = wfs.maybeReadEntry(inode) + _, _, entry, status = wfs.maybeReadEntry(inode) if status == fuse.OK { - if wormEnforced, _ := wfs.wormEnforcedForEntry(path, entry); wormEnforced && flags&fuse.O_ANYWRITE != 0 { - return nil, fuse.EPERM - } // need to AcquireFileHandle again to ensure correct handle counter - fileHandle = wfs.fhMap.AcquireFileHandle(wfs, inode, entry) + fileHandle = wfs.fhmap.AcquireFileHandle(wfs, inode, entry) } return } func (wfs *WFS) ReleaseHandle(handleId FileHandleId) { - wfs.fhMap.ReleaseByHandle(handleId) + wfs.fhmap.ReleaseByHandle(handleId) } func (wfs *WFS) GetHandle(handleId FileHandleId) *FileHandle { - return wfs.fhMap.GetFileHandle(handleId) + return wfs.fhmap.GetFileHandle(handleId) } diff --git a/weed/mount/weedfs_forget.go b/weed/mount/weedfs_forget.go index 0a45aba6b..4212adeb6 100644 --- a/weed/mount/weedfs_forget.go +++ b/weed/mount/weedfs_forget.go @@ -2,7 +2,7 @@ package mount import ( "context" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" ) // Forget is called when the kernel discards entries from its @@ -65,5 +65,5 @@ func (wfs *WFS) Forget(nodeid, nlookup uint64) { wfs.inodeToPath.Forget(nodeid, nlookup, func(dir util.FullPath) { wfs.metaCache.DeleteFolderChildren(context.Background(), dir) }) - wfs.fhMap.ReleaseByInode(nodeid) + wfs.fhmap.ReleaseByInode(nodeid) } diff --git a/weed/mount/weedfs_grpc_server.go b/weed/mount/weedfs_grpc_server.go index f867f2d80..4b2fdffa6 100644 --- a/weed/mount/weedfs_grpc_server.go +++ b/weed/mount/weedfs_grpc_server.go @@ -3,8 +3,8 @@ package mount import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mount_pb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/mount_pb" ) func (wfs *WFS) Configure(ctx context.Context, request *mount_pb.ConfigureRequest) (*mount_pb.ConfigureResponse, error) { diff --git a/weed/mount/weedfs_link.go b/weed/mount/weedfs_link.go index 344b907b4..2ab412fd5 100644 --- a/weed/mount/weedfs_link.go +++ b/weed/mount/weedfs_link.go @@ -2,14 +2,12 @@ package mount import ( "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) /* @@ -25,6 +23,7 @@ When creating a link: /** Create a hard link to a file */ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out *fuse.EntryOut) (code fuse.Status) { + if wfs.IsOverQuota { return fuse.Status(syscall.ENOSPC) } @@ -48,11 +47,6 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * return status } - // hardlink is not allowed in WORM mode - if wormEnforced, _ := wfs.wormEnforcedForEntry(oldEntryPath, oldEntry); wormEnforced { - return fuse.EPERM - } - // update old file to hardlink mode if len(oldEntry.HardLinkId) == 0 { oldEntry.HardLinkId = filer.NewHardLinkId() @@ -73,13 +67,12 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * Name: name, IsDirectory: false, Attributes: oldEntry.Attributes, - Chunks: oldEntry.GetChunks(), + Chunks: oldEntry.Chunks, Extended: oldEntry.Extended, HardLinkId: oldEntry.HardLinkId, HardLinkCounter: oldEntry.HardLinkCounter, }, - Signatures: []int32{wfs.signature}, - SkipCheckParentDirectory: true, + Signatures: []int32{wfs.signature}, } // apply changes to the filer, and also apply to local metaCache @@ -88,12 +81,12 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * wfs.mapPbIdFromLocalToFiler(request.Entry) defer wfs.mapPbIdFromFilerToLocal(request.Entry) - if err := filer_pb.UpdateEntry(context.Background(), client, updateOldEntryRequest); err != nil { + if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil { return err } wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry)) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return err } @@ -109,9 +102,9 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * return fuse.EIO } - wfs.inodeToPath.AddPath(oldEntry.Attributes.Inode, newEntryPath) + inode := wfs.inodeToPath.Lookup(newEntryPath, oldEntry.Attributes.Crtime, oldEntry.IsDirectory, true, oldEntry.Attributes.Inode, true) - wfs.outputPbEntry(out, oldEntry.Attributes.Inode, request.Entry) + wfs.outputPbEntry(out, inode, request.Entry) return fuse.OK } diff --git a/weed/mount/weedfs_quota.go b/weed/mount/weedfs_quota.go index 23f487549..ac3e58e62 100644 --- a/weed/mount/weedfs_quota.go +++ b/weed/mount/weedfs_quota.go @@ -3,8 +3,8 @@ package mount import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "time" ) diff --git a/weed/mount/weedfs_rename.go b/weed/mount/weedfs_rename.go index e567b12e1..0c7de0bbb 100644 --- a/weed/mount/weedfs_rename.go +++ b/weed/mount/weedfs_rename.go @@ -3,16 +3,15 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/hanwen/go-fuse/v2/fs" + "github.com/hanwen/go-fuse/v2/fuse" "io" "strings" "syscall" - - "github.com/hanwen/go-fuse/v2/fs" - "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) /** Rename a file @@ -161,15 +160,6 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string } newPath := newDir.Child(newName) - oldEntry, status := wfs.maybeLoadEntry(oldPath) - if status != fuse.OK { - return status - } - - if wormEnforced, _ := wfs.wormEnforcedForEntry(oldPath, oldEntry); wormEnforced { - return fuse.EPERM - } - glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) // update remote filer @@ -233,7 +223,7 @@ func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR if resp.EventNotification.NewEntry != nil { // with new entry, the old entry name also exists. This is the first step to create new entry newEntry := filer.FromPbEntry(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry) - if err := wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, "", newEntry); err != nil { + if err := wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, "", newEntry, false); err != nil { return err } @@ -245,23 +235,20 @@ func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamR sourceInode, targetInode := wfs.inodeToPath.MovePath(oldPath, newPath) if sourceInode != 0 { - fh, foundFh := wfs.fhMap.FindFileHandle(sourceInode) - if foundFh { - if entry := fh.GetEntry(); entry != nil { - entry.Name = newName - } + if fh, foundFh := wfs.fhmap.inode2fh[sourceInode]; foundFh && fh.entry != nil { + fh.entry.Name = newName } // invalidate attr and data - // wfs.fuseServer.InodeNotify(sourceInode, 0, -1) + wfs.fuseServer.InodeNotify(sourceInode, 0, -1) } if targetInode != 0 { // invalidate attr and data - // wfs.fuseServer.InodeNotify(targetInode, 0, -1) + wfs.fuseServer.InodeNotify(targetInode, 0, -1) } } else if resp.EventNotification.OldEntry != nil { // without new entry, only old entry name exists. This is the second step to delete old entry - if err := wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil); err != nil { + if err := wfs.metaCache.AtomicUpdateEntryFromFiler(ctx, util.NewFullPath(resp.Directory, resp.EventNotification.OldEntry.Name), nil, resp.EventNotification.DeleteChunks); err != nil { return err } } diff --git a/weed/mount/weedfs_stats.go b/weed/mount/weedfs_stats.go index 28e992158..21f664889 100644 --- a/weed/mount/weedfs_stats.go +++ b/weed/mount/weedfs_stats.go @@ -3,9 +3,9 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "math" "time" ) @@ -64,43 +64,15 @@ func (wfs *WFS) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.Stat } } - // http://man.he.net/man2/statfs - /* - struct statfs { - __fsword_t f_type; // Type of filesystem (see below) - __fsword_t f_bsize; // Optimal transfer block size - fsblkcnt_t f_blocks; // Total data blocks in filesystem - fsblkcnt_t f_bfree; // Free blocks in filesystem - fsblkcnt_t f_bavail; // Free blocks available to - unprivileged user - fsfilcnt_t f_files; // Total file nodes in filesystem - fsfilcnt_t f_ffree; // Free file nodes in filesystem - fsid_t f_fsid; // Filesystem ID - __fsword_t f_namelen; // Maximum length of filenames - __fsword_t f_frsize; // Fragment size (since Linux 2.6) - __fsword_t f_flags; // Mount flags of filesystem - (since Linux 2.6.36) - __fsword_t f_spare[xxx]; - // Padding bytes reserved for future use - }; - */ - // Compute the total number of available blocks out.Blocks = totalDiskSize / blockSize - if out.Blocks <= 0 { - out.Blocks = 1 - } - // Compute the number of used blocks - numBlocks := usedDiskSize / blockSize - remainingBlocks := int64(out.Blocks) - int64(numBlocks) - if remainingBlocks < 0 { - remainingBlocks = 0 - } + // Compute the number of used blocks + numBlocks := uint64(usedDiskSize / blockSize) // Report the number of free and available blocks for the block size - out.Bfree = uint64(remainingBlocks) - out.Bavail = uint64(remainingBlocks) + out.Bfree = out.Blocks - numBlocks + out.Bavail = out.Blocks - numBlocks out.Bsize = uint32(blockSize) // Report the total number of possible files in the file system (and those free) diff --git a/weed/mount/weedfs_symlink.go b/weed/mount/weedfs_symlink.go index 80081c31c..b8be55011 100644 --- a/weed/mount/weedfs_symlink.go +++ b/weed/mount/weedfs_symlink.go @@ -3,15 +3,13 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/hanwen/go-fuse/v2/fuse" "os" "syscall" "time" - - "github.com/hanwen/go-fuse/v2/fuse" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) /** Create a symbolic link */ @@ -44,8 +42,7 @@ func (wfs *WFS) Symlink(cancel <-chan struct{}, header *fuse.InHeader, target st SymlinkTarget: target, }, }, - Signatures: []int32{wfs.signature}, - SkipCheckParentDirectory: true, + Signatures: []int32{wfs.signature}, } err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { @@ -53,7 +50,7 @@ func (wfs *WFS) Symlink(cancel <-chan struct{}, header *fuse.InHeader, target st wfs.mapPbIdFromLocalToFiler(request.Entry) defer wfs.mapPbIdFromFilerToLocal(request.Entry) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("symlink %s: %v", entryFullPath, err) } diff --git a/weed/mount/weedfs_unsupported.go b/weed/mount/weedfs_unsupported.go index a8342a2fc..2536811b8 100644 --- a/weed/mount/weedfs_unsupported.go +++ b/weed/mount/weedfs_unsupported.go @@ -4,6 +4,22 @@ import "github.com/hanwen/go-fuse/v2/fuse" // https://github.com/libfuse/libfuse/blob/48ae2e72b39b6a31cb2194f6f11786b7ca06aac6/include/fuse.h#L778 +/** + * Copy a range of data from one file to anotherNiels de Vos, 4 years ago: โ€ข libfuse: add copy_file_range() support + * + * Performs an optimized copy between two file descriptors without the + * additional cost of transferring data through the FUSE kernel module + * to user space (glibc) and then back into the FUSE filesystem again. + * + * In case this method is not implemented, applications are expected to + * fall back to a regular file copy. (Some glibc versions did this + * emulation automatically, but the emulation has been removed from all + * glibc release branches.) + */ +func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) (written uint32, code fuse.Status) { + return 0, fuse.ENOSYS +} + /** * Allocates space for an open file * @@ -16,6 +32,13 @@ func (wfs *WFS) Fallocate(cancel <-chan struct{}, in *fuse.FallocateIn) (code fu return fuse.ENOSYS } +/** + * Find next data or hole after the specified offset + */ +func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekOut) fuse.Status { + return fuse.ENOSYS +} + func (wfs *WFS) GetLk(cancel <-chan struct{}, in *fuse.LkIn, out *fuse.LkOut) (code fuse.Status) { return fuse.ENOSYS } diff --git a/weed/mount/weedfs_write.go b/weed/mount/weedfs_write.go index de8a756ce..723ce9c34 100644 --- a/weed/mount/weedfs_write.go +++ b/weed/mount/weedfs_write.go @@ -1,66 +1,84 @@ package mount import ( + "context" "fmt" "io" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { - return func(reader io.Reader, filename string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { - uploader, err := operation.NewUploader() - if err != nil { - return + return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { + var fileId, host string + var auth security.EncodedJwt + + if err := wfs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return util.Retry("assignVolume", func() error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: wfs.option.Replication, + Collection: wfs.option.Collection, + TtlSec: wfs.option.TtlSec, + DiskType: string(wfs.option.DiskType), + DataCenter: wfs.option.DataCenter, + Path: string(fullPath), + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth) + loc := resp.Location + host = wfs.AdjustedUrl(loc) + collection, replication = resp.Collection, resp.Replication + + return nil + }) + }); err != nil { + return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err) } - fileId, uploadResult, err, data := uploader.UploadWithRetry( - wfs, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: wfs.option.Replication, - Collection: wfs.option.Collection, - TtlSec: wfs.option.TtlSec, - DiskType: string(wfs.option.DiskType), - DataCenter: wfs.option.DataCenter, - Path: string(fullPath), - }, - &operation.UploadOption{ - Filename: filename, - Cipher: wfs.option.Cipher, - IsInputCompressed: false, - MimeType: "", - PairMap: nil, - }, - func(host, fileId string) string { - fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - if wfs.option.VolumeServerAccess == "filerProxy" { - fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.getCurrentFiler(), fileId) - } - return fileUrl - }, - reader, - ) - + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if wfs.option.VolumeServerAccess == "filerProxy" { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.getCurrentFiler(), fileId) + } + uploadOption := &operation.UploadOption{ + UploadUrl: fileUrl, + Filename: filename, + Cipher: wfs.option.Cipher, + IsInputCompressed: false, + MimeType: "", + PairMap: nil, + Jwt: auth, + } + uploadResult, err, data := operation.Upload(reader, uploadOption) if err != nil { - glog.V(0).Infof("upload data %v: %v", filename, err) - return nil, fmt.Errorf("upload data: %w", err) + glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v: %v", filename, err) - return nil, fmt.Errorf("upload result: %v", uploadResult.Error) + glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error) } if offset == 0 { wfs.chunkCache.SetChunk(fileId, data) } - chunk = uploadResult.ToPbFileChunk(fileId, offset, tsNs) - return chunk, nil + chunk = uploadResult.ToPbFileChunk(fileId, offset) + return chunk, collection, replication, nil } } diff --git a/weed/mount/weedfs_xattr.go b/weed/mount/weedfs_xattr.go index 78acaafc8..64cc0f6f0 100644 --- a/weed/mount/weedfs_xattr.go +++ b/weed/mount/weedfs_xattr.go @@ -1,15 +1,11 @@ -//go:build !freebsd -// +build !freebsd - package mount import ( + "github.com/hanwen/go-fuse/v2/fuse" + sys "golang.org/x/sys/unix" "runtime" "strings" "syscall" - - "github.com/hanwen/go-fuse/v2/fuse" - sys "golang.org/x/sys/unix" ) const ( @@ -64,19 +60,18 @@ func (wfs *WFS) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr str // SetXAttr writes an extended attribute. // https://man7.org/linux/man-pages/man2/setxattr.2.html +// By default (i.e., flags is zero), the extended attribute will be +// created if it does not exist, or the value will be replaced if +// the attribute already exists. To modify these semantics, one of +// the following values can be specified in flags: // -// By default (i.e., flags is zero), the extended attribute will be -// created if it does not exist, or the value will be replaced if -// the attribute already exists. To modify these semantics, one of -// the following values can be specified in flags: +// XATTR_CREATE +// Perform a pure create, which fails if the named attribute +// exists already. // -// XATTR_CREATE -// Perform a pure create, which fails if the named attribute -// exists already. -// -// XATTR_REPLACE -// Perform a pure replace operation, which fails if the named -// attribute does not already exist. +// XATTR_REPLACE +// Perform a pure replace operation, which fails if the named +// attribute does not already exist. func (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status { if wfs.option.DisableXAttr { @@ -111,9 +106,6 @@ func (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr st if status != fuse.OK { return status } - if entry == nil { - return fuse.ENOENT - } if fh != nil { fh.entryLock.Lock() defer fh.entryLock.Unlock() @@ -135,11 +127,6 @@ func (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr st entry.Extended[XATTR_PREFIX+attr] = data } - if fh != nil { - fh.dirtyMetadata = true - return fuse.OK - } - return wfs.saveEntry(path, entry) } @@ -194,9 +181,6 @@ func (wfs *WFS) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr if status != fuse.OK { return status } - if entry == nil { - return fuse.OK - } if fh != nil { fh.entryLock.Lock() defer fh.entryLock.Unlock() diff --git a/weed/mount/weedfs_xattr_freebsd.go b/weed/mount/weedfs_xattr_freebsd.go deleted file mode 100644 index 01cb748e5..000000000 --- a/weed/mount/weedfs_xattr_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package mount - -import ( - "syscall" - - "github.com/hanwen/go-fuse/v2/fuse" -) - -func (wfs *WFS) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (size uint32, code fuse.Status) { - - return 0, fuse.Status(syscall.ENOTSUP) -} - -func (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status { - - return fuse.Status(syscall.ENOTSUP) -} - -func (wfs *WFS) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (n uint32, code fuse.Status) { - - return 0, fuse.Status(syscall.ENOTSUP) -} - -func (wfs *WFS) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status { - - return fuse.Status(syscall.ENOTSUP) -} diff --git a/weed/mount/wfs_filer_client.go b/weed/mount/wfs_filer_client.go index 5dd09363f..e8feb8342 100644 --- a/weed/mount/wfs_filer_client.go +++ b/weed/mount/wfs_filer_client.go @@ -1,14 +1,12 @@ package mount import ( - "sync/atomic" - + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) var _ = filer_pb.FilerClient(&WFS{}) @@ -17,25 +15,25 @@ func (wfs *WFS) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFile return util.Retry("filer grpc", func() error { - i := atomic.LoadInt32(&wfs.option.filerIndex) + i := wfs.option.filerIndex n := len(wfs.option.FilerAddresses) for x := 0; x < n; x++ { filerGrpcAddress := wfs.option.FilerAddresses[i].ToGrpcAddress() - err = pb.WithGrpcClient(streamingMode, wfs.signature, func(grpcConnection *grpc.ClientConn) error { + err = pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, filerGrpcAddress, false, wfs.option.GrpcDialOption) + }, filerGrpcAddress, wfs.option.GrpcDialOption) if err != nil { glog.V(0).Infof("WithFilerClient %d %v: %v", x, filerGrpcAddress, err) } else { - atomic.StoreInt32(&wfs.option.filerIndex, i) + wfs.option.filerIndex = i return nil } i++ - if i >= int32(n) { + if i >= n { i = 0 } @@ -51,7 +49,3 @@ func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string { } return location.Url } - -func (wfs *WFS) GetDataCenter() string { - return wfs.option.DataCenter -} diff --git a/weed/mount/wfs_save.go b/weed/mount/wfs_save.go index 56ad47011..0cac30453 100644 --- a/weed/mount/wfs_save.go +++ b/weed/mount/wfs_save.go @@ -3,11 +3,11 @@ package mount import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" "syscall" ) @@ -60,7 +60,7 @@ func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) { } func checkName(name string) fuse.Status { - if len(name) >= 4096 { + if len(name) >= 256 { return fuse.Status(syscall.ENAMETOOLONG) } return fuse.OK diff --git a/weed/mq/README.md b/weed/mq/README.md deleted file mode 100644 index 6a641c009..000000000 --- a/weed/mq/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# SeaweedMQ Message Queue on SeaweedFS (WIP, not ready) - -## What are the use cases it is designed for? - -Message queues are like water pipes. Messages flow in the pipes to their destinations. - -However, what if a flood comes? Of course, you can increase the number of partitions, add more brokers, restart, -and watch the traffic level closely. - -Sometimes the flood is expected. For example, backfill some old data in batch, and switch to online messages. -You may want to ensure enough brokers to handle the data and reduce them later to cut cost. - -SeaweedMQ is designed for use cases that need to: -* Receive and save large number of messages. -* Handle spike traffic automatically. - -## What is special about SeaweedMQ? - -* Separate computation and storage nodes to scale independently. - * Unlimited storage space by adding volume servers. - * Unlimited message brokers to handle incoming messages. - * Offline messages can be operated as normal files. -* Scale up and down with auto split and merge message topics. - * Topics can automatically split into segments when traffic increases, and vice verse. -* Pass messages by reference instead of copying. - * Clients can optionally upload the messages first and just submit the references. - * Drastically reduce the broker load. -* Stateless brokers - * All brokers are equal. One broker is dynamically picked as the leader. - * Add brokers at any time. - * Allow rolling restart brokers or remove brokers at a pace. - -# Design - -# How it works? - -Brokers are just computation nodes without storage. When a broker starts, it reports itself to masters. -Among all the brokers, one of them will be selected as the leader by the masters. - -A topic needs to define its partition key on its messages. - -Messages for a topic are divided into segments. One segment can cover a range of partitions. A segment can -be split into 2 segments, or 2 neighboring segments can be merged back to one segment. - -During write time, the client will ask the broker leader for a few brokers to process the segment. - -The broker leader will check whether the segment already has assigned the brokers. If not, select a few brokers based -on their loads, save the selection into filer, and tell the client. - -The client will write the messages for this segment to the selected brokers. - -## Failover - -The broker leader does not contain any state. If it fails, the masters will select a different broker. - -For a segment, if any one of the selected brokers is down, the remaining brokers should try to write received messages -to the filer, and close the segment to the clients. - -Then the clients should start a new segment. The masters should assign other healthy brokers to handle the new segment. - -So any brokers can go down without losing data. - -## Auto Split or Merge - -(The idea is learned from Pravega.) - -The brokers should report its traffic load to the broker leader periodically. - -If any segment has too much load, the broker leader will ask the brokers to tell the client to -close current one and create two new segments. - -If 2 neighboring segments have the combined load below average load per segment, the broker leader will ask -the brokers to tell the client to close this 2 segments and create a new segment. diff --git a/weed/mq/agent/agent_grpc_pub_session.go b/weed/mq/agent/agent_grpc_pub_session.go deleted file mode 100644 index 7cf857da1..000000000 --- a/weed/mq/agent/agent_grpc_pub_session.go +++ /dev/null @@ -1,53 +0,0 @@ -package agent - -import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/mq/client/pub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "log/slog" - "math/rand/v2" -) - -func (a *MessageQueueAgent) StartPublishSession(ctx context.Context, req *mq_agent_pb.StartPublishSessionRequest) (*mq_agent_pb.StartPublishSessionResponse, error) { - sessionId := rand.Int64() - - topicPublisher, err := pub_client.NewTopicPublisher( - &pub_client.PublisherConfiguration{ - Topic: topic.NewTopic(req.Topic.Namespace, req.Topic.Name), - PartitionCount: req.PartitionCount, - Brokers: a.brokersList(), - PublisherName: req.PublisherName, - RecordType: req.RecordType, - }) - if err != nil { - return nil, err - } - - a.publishersLock.Lock() - a.publishers[SessionId(sessionId)] = &SessionEntry[*pub_client.TopicPublisher]{ - entry: topicPublisher, - } - a.publishersLock.Unlock() - - return &mq_agent_pb.StartPublishSessionResponse{ - SessionId: sessionId, - }, nil -} - -func (a *MessageQueueAgent) ClosePublishSession(ctx context.Context, req *mq_agent_pb.ClosePublishSessionRequest) (*mq_agent_pb.ClosePublishSessionResponse, error) { - var finishErr string - a.publishersLock.Lock() - publisherEntry, found := a.publishers[SessionId(req.SessionId)] - if found { - if err := publisherEntry.entry.FinishPublish(); err != nil { - finishErr = err.Error() - slog.Warn("failed to finish publish", "error", err) - } - delete(a.publishers, SessionId(req.SessionId)) - } - a.publishersLock.Unlock() - return &mq_agent_pb.ClosePublishSessionResponse{ - Error: finishErr, - }, nil -} diff --git a/weed/mq/agent/agent_grpc_publish.go b/weed/mq/agent/agent_grpc_publish.go deleted file mode 100644 index 0b666ff6d..000000000 --- a/weed/mq/agent/agent_grpc_publish.go +++ /dev/null @@ -1,44 +0,0 @@ -package agent - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" -) - -func (a *MessageQueueAgent) PublishRecord(stream mq_agent_pb.SeaweedMessagingAgent_PublishRecordServer) error { - m, err := stream.Recv() - if err != nil { - return err - } - sessionId := SessionId(m.SessionId) - a.publishersLock.RLock() - publisherEntry, found := a.publishers[sessionId] - a.publishersLock.RUnlock() - if !found { - return fmt.Errorf("publish session id %d not found", sessionId) - } - defer func() { - a.publishersLock.Lock() - delete(a.publishers, sessionId) - a.publishersLock.Unlock() - }() - - if m.Value != nil { - if err := publisherEntry.entry.PublishRecord(m.Key, m.Value); err != nil { - return err - } - } - - for { - m, err = stream.Recv() - if err != nil { - return err - } - if m.Value == nil { - continue - } - if err := publisherEntry.entry.PublishRecord(m.Key, m.Value); err != nil { - return err - } - } -} diff --git a/weed/mq/agent/agent_grpc_subscribe.go b/weed/mq/agent/agent_grpc_subscribe.go deleted file mode 100644 index 2deaab9c2..000000000 --- a/weed/mq/agent/agent_grpc_subscribe.go +++ /dev/null @@ -1,106 +0,0 @@ -package agent - -import ( - "context" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/client/sub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/protobuf/proto" -) - -func (a *MessageQueueAgent) SubscribeRecord(stream mq_agent_pb.SeaweedMessagingAgent_SubscribeRecordServer) error { - // the first message is the subscribe request - // it should only contain the session id - initMessage, err := stream.Recv() - if err != nil { - return err - } - - subscriber := a.handleInitSubscribeRecordRequest(stream.Context(), initMessage.Init) - - var lastErr error - executors := util.NewLimitedConcurrentExecutor(int(subscriber.SubscriberConfig.SlidingWindowSize)) - subscriber.SetOnDataMessageFn(func(m *mq_pb.SubscribeMessageResponse_Data) { - executors.Execute(func() { - record := &schema_pb.RecordValue{} - err := proto.Unmarshal(m.Data.Value, record) - if err != nil { - glog.V(0).Infof("unmarshal record value: %v", err) - if lastErr == nil { - lastErr = err - } - return - } - if sendErr := stream.Send(&mq_agent_pb.SubscribeRecordResponse{ - Key: m.Data.Key, - Value: record, - TsNs: m.Data.TsNs, - }); sendErr != nil { - glog.V(0).Infof("send record: %v", sendErr) - if lastErr == nil { - lastErr = sendErr - } - } - }) - }) - - go func() { - subErr := subscriber.Subscribe() - if subErr != nil { - glog.V(0).Infof("subscriber %s subscribe: %v", subscriber.SubscriberConfig.String(), subErr) - if lastErr == nil { - lastErr = subErr - } - } - }() - - for { - m, err := stream.Recv() - if err != nil { - glog.V(0).Infof("subscriber %s receive: %v", subscriber.SubscriberConfig.String(), err) - return err - } - if m != nil { - subscriber.PartitionOffsetChan <- sub_client.KeyedTimestamp{ - Key: m.AckKey, - TsNs: m.AckSequence, // Note: AckSequence should be renamed to AckTsNs in agent protocol - } - } - } -} - -func (a *MessageQueueAgent) handleInitSubscribeRecordRequest(ctx context.Context, req *mq_agent_pb.SubscribeRecordRequest_InitSubscribeRecordRequest) *sub_client.TopicSubscriber { - - subscriberConfig := &sub_client.SubscriberConfiguration{ - ConsumerGroup: req.ConsumerGroup, - ConsumerGroupInstanceId: req.ConsumerGroupInstanceId, - GrpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()), - MaxPartitionCount: req.MaxSubscribedPartitions, - SlidingWindowSize: req.SlidingWindowSize, - } - - contentConfig := &sub_client.ContentConfiguration{ - Topic: topic.FromPbTopic(req.Topic), - Filter: req.Filter, - PartitionOffsets: req.PartitionOffsets, - OffsetType: req.OffsetType, - OffsetTsNs: req.OffsetTsNs, - } - - topicSubscriber := sub_client.NewTopicSubscriber( - ctx, - a.brokersList(), - subscriberConfig, - contentConfig, - make(chan sub_client.KeyedTimestamp, 1024), - ) - - return topicSubscriber -} diff --git a/weed/mq/agent/agent_server.go b/weed/mq/agent/agent_server.go deleted file mode 100644 index f1d6ec679..000000000 --- a/weed/mq/agent/agent_server.go +++ /dev/null @@ -1,55 +0,0 @@ -package agent - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/client/pub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/client/sub_client" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "google.golang.org/grpc" - "sync" -) - -type SessionId int64 -type SessionEntry[T any] struct { - entry T -} - -type MessageQueueAgentOptions struct { - SeedBrokers []pb.ServerAddress -} - -type MessageQueueAgent struct { - mq_agent_pb.UnimplementedSeaweedMessagingAgentServer - option *MessageQueueAgentOptions - brokers []pb.ServerAddress - grpcDialOption grpc.DialOption - publishers map[SessionId]*SessionEntry[*pub_client.TopicPublisher] - publishersLock sync.RWMutex - subscribers map[SessionId]*SessionEntry[*sub_client.TopicSubscriber] - subscribersLock sync.RWMutex -} - -func NewMessageQueueAgent(option *MessageQueueAgentOptions, grpcDialOption grpc.DialOption) *MessageQueueAgent { - - // initialize brokers which may change later - var brokers []pb.ServerAddress - for _, broker := range option.SeedBrokers { - brokers = append(brokers, broker) - } - - return &MessageQueueAgent{ - option: option, - brokers: brokers, - grpcDialOption: grpcDialOption, - publishers: make(map[SessionId]*SessionEntry[*pub_client.TopicPublisher]), - subscribers: make(map[SessionId]*SessionEntry[*sub_client.TopicSubscriber]), - } -} - -func (a *MessageQueueAgent) brokersList() []string { - var brokers []string - for _, broker := range a.brokers { - brokers = append(brokers, broker.String()) - } - return brokers -} diff --git a/weed/mq/broker/broker_connect.go b/weed/mq/broker/broker_connect.go deleted file mode 100644 index c0f2192a4..000000000 --- a/weed/mq/broker/broker_connect.go +++ /dev/null @@ -1,103 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "io" - "math/rand/v2" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -// BrokerConnectToBalancer connects to the broker balancer and sends stats -func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stopCh chan struct{}) error { - - self := string(b.option.BrokerAddress()) - - glog.V(0).Infof("broker %s connects to balancer %s", self, brokerBalancer) - if brokerBalancer == "" { - return fmt.Errorf("no balancer found") - } - - // connect to the lock owner - return pb.WithBrokerGrpcClient(true, brokerBalancer, b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - stream, err := client.PublisherToPubBalancer(context.Background()) - if err != nil { - return fmt.Errorf("connect to balancer %v: %w", brokerBalancer, err) - } - defer stream.CloseSend() - err = stream.Send(&mq_pb.PublisherToPubBalancerRequest{ - Message: &mq_pb.PublisherToPubBalancerRequest_Init{ - Init: &mq_pb.PublisherToPubBalancerRequest_InitMessage{ - Broker: self, - }, - }, - }) - if err != nil { - return fmt.Errorf("send init message: %w", err) - } - - for { - // check if the broker is stopping - select { - case <-stopCh: - return nil - default: - } - - stats := b.localTopicManager.CollectStats(time.Second * 5) - err = stream.Send(&mq_pb.PublisherToPubBalancerRequest{ - Message: &mq_pb.PublisherToPubBalancerRequest_Stats{ - Stats: stats, - }, - }) - if err != nil { - if err == io.EOF { - // return err - } - return fmt.Errorf("send stats message to balancer %s: %v", brokerBalancer, err) - } - // glog.V(3).Infof("sent stats: %+v", stats) - - time.Sleep(time.Millisecond*5000 + time.Duration(rand.IntN(1000))*time.Millisecond) - } - }) -} - -func (b *MessageQueueBroker) KeepConnectedToBrokerBalancer(newBrokerBalancerCh chan string) { - var stopPrevRunChan chan struct{} - for { - select { - case newBrokerBalancer := <-newBrokerBalancerCh: - if stopPrevRunChan != nil { - close(stopPrevRunChan) - stopPrevRunChan = nil - } - thisRunStopChan := make(chan struct{}) - if newBrokerBalancer != "" { - stopPrevRunChan = thisRunStopChan - go func() { - for { - err := b.BrokerConnectToBalancer(newBrokerBalancer, thisRunStopChan) - if err != nil { - glog.V(0).Infof("connect to balancer %s: %v", newBrokerBalancer, err) - time.Sleep(time.Second) - } else { - break - } - - select { - case <-thisRunStopChan: - return - default: - } - - } - }() - } - } - } -} diff --git a/weed/mq/broker/broker_errors.go b/weed/mq/broker/broker_errors.go deleted file mode 100644 index b3d4cc42c..000000000 --- a/weed/mq/broker/broker_errors.go +++ /dev/null @@ -1,132 +0,0 @@ -package broker - -// Broker Error Codes -// These codes are used internally by the broker and can be mapped to Kafka protocol error codes -const ( - // Success - BrokerErrorNone int32 = 0 - - // General broker errors - BrokerErrorUnknownServerError int32 = 1 - BrokerErrorTopicNotFound int32 = 2 - BrokerErrorPartitionNotFound int32 = 3 - BrokerErrorNotLeaderOrFollower int32 = 6 // Maps to Kafka ErrorCodeNotLeaderOrFollower - BrokerErrorRequestTimedOut int32 = 7 - BrokerErrorBrokerNotAvailable int32 = 8 - BrokerErrorMessageTooLarge int32 = 10 - BrokerErrorNetworkException int32 = 13 - BrokerErrorOffsetLoadInProgress int32 = 14 - BrokerErrorInvalidRecord int32 = 42 - BrokerErrorTopicAlreadyExists int32 = 36 - BrokerErrorInvalidPartitions int32 = 37 - BrokerErrorInvalidConfig int32 = 40 - - // Publisher/connection errors - BrokerErrorPublisherNotFound int32 = 100 - BrokerErrorConnectionFailed int32 = 101 - BrokerErrorFollowerConnectionFailed int32 = 102 -) - -// BrokerErrorInfo contains metadata about a broker error -type BrokerErrorInfo struct { - Code int32 - Name string - Description string - KafkaCode int16 // Corresponding Kafka protocol error code -} - -// BrokerErrors maps broker error codes to their metadata and Kafka equivalents -var BrokerErrors = map[int32]BrokerErrorInfo{ - BrokerErrorNone: { - Code: BrokerErrorNone, Name: "NONE", - Description: "No error", KafkaCode: 0, - }, - BrokerErrorUnknownServerError: { - Code: BrokerErrorUnknownServerError, Name: "UNKNOWN_SERVER_ERROR", - Description: "Unknown server error", KafkaCode: 1, - }, - BrokerErrorTopicNotFound: { - Code: BrokerErrorTopicNotFound, Name: "TOPIC_NOT_FOUND", - Description: "Topic not found", KafkaCode: 3, // UNKNOWN_TOPIC_OR_PARTITION - }, - BrokerErrorPartitionNotFound: { - Code: BrokerErrorPartitionNotFound, Name: "PARTITION_NOT_FOUND", - Description: "Partition not found", KafkaCode: 3, // UNKNOWN_TOPIC_OR_PARTITION - }, - BrokerErrorNotLeaderOrFollower: { - Code: BrokerErrorNotLeaderOrFollower, Name: "NOT_LEADER_OR_FOLLOWER", - Description: "Not leader or follower for this partition", KafkaCode: 6, - }, - BrokerErrorRequestTimedOut: { - Code: BrokerErrorRequestTimedOut, Name: "REQUEST_TIMED_OUT", - Description: "Request timed out", KafkaCode: 7, - }, - BrokerErrorBrokerNotAvailable: { - Code: BrokerErrorBrokerNotAvailable, Name: "BROKER_NOT_AVAILABLE", - Description: "Broker not available", KafkaCode: 8, - }, - BrokerErrorMessageTooLarge: { - Code: BrokerErrorMessageTooLarge, Name: "MESSAGE_TOO_LARGE", - Description: "Message size exceeds limit", KafkaCode: 10, - }, - BrokerErrorNetworkException: { - Code: BrokerErrorNetworkException, Name: "NETWORK_EXCEPTION", - Description: "Network error", KafkaCode: 13, - }, - BrokerErrorOffsetLoadInProgress: { - Code: BrokerErrorOffsetLoadInProgress, Name: "OFFSET_LOAD_IN_PROGRESS", - Description: "Offset loading in progress", KafkaCode: 14, - }, - BrokerErrorInvalidRecord: { - Code: BrokerErrorInvalidRecord, Name: "INVALID_RECORD", - Description: "Invalid record", KafkaCode: 42, - }, - BrokerErrorTopicAlreadyExists: { - Code: BrokerErrorTopicAlreadyExists, Name: "TOPIC_ALREADY_EXISTS", - Description: "Topic already exists", KafkaCode: 36, - }, - BrokerErrorInvalidPartitions: { - Code: BrokerErrorInvalidPartitions, Name: "INVALID_PARTITIONS", - Description: "Invalid partition count", KafkaCode: 37, - }, - BrokerErrorInvalidConfig: { - Code: BrokerErrorInvalidConfig, Name: "INVALID_CONFIG", - Description: "Invalid configuration", KafkaCode: 40, - }, - BrokerErrorPublisherNotFound: { - Code: BrokerErrorPublisherNotFound, Name: "PUBLISHER_NOT_FOUND", - Description: "Publisher not found", KafkaCode: 1, // UNKNOWN_SERVER_ERROR - }, - BrokerErrorConnectionFailed: { - Code: BrokerErrorConnectionFailed, Name: "CONNECTION_FAILED", - Description: "Connection failed", KafkaCode: 13, // NETWORK_EXCEPTION - }, - BrokerErrorFollowerConnectionFailed: { - Code: BrokerErrorFollowerConnectionFailed, Name: "FOLLOWER_CONNECTION_FAILED", - Description: "Failed to connect to follower brokers", KafkaCode: 13, // NETWORK_EXCEPTION - }, -} - -// GetBrokerErrorInfo returns error information for the given broker error code -func GetBrokerErrorInfo(code int32) BrokerErrorInfo { - if info, exists := BrokerErrors[code]; exists { - return info - } - return BrokerErrorInfo{ - Code: code, Name: "UNKNOWN", Description: "Unknown broker error code", KafkaCode: 1, - } -} - -// GetKafkaErrorCode returns the corresponding Kafka protocol error code for a broker error -func GetKafkaErrorCode(brokerErrorCode int32) int16 { - return GetBrokerErrorInfo(brokerErrorCode).KafkaCode -} - -// CreateBrokerError creates a structured broker error with both error code and message -func CreateBrokerError(code int32, message string) (int32, string) { - info := GetBrokerErrorInfo(code) - if message == "" { - message = info.Description - } - return code, message -} diff --git a/weed/mq/broker/broker_grpc_admin.go b/weed/mq/broker/broker_grpc_admin.go deleted file mode 100644 index 3c9ef282c..000000000 --- a/weed/mq/broker/broker_grpc_admin.go +++ /dev/null @@ -1,23 +0,0 @@ -package broker - -import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func (b *MessageQueueBroker) FindBrokerLeader(c context.Context, request *mq_pb.FindBrokerLeaderRequest) (*mq_pb.FindBrokerLeaderResponse, error) { - ret := &mq_pb.FindBrokerLeaderResponse{} - err := b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.FindLockOwner(context.Background(), &filer_pb.FindLockOwnerRequest{ - Name: pub_balancer.LockBrokerBalancer, - }) - if err != nil { - return err - } - ret.Broker = resp.Owner - return nil - }) - return ret, err -} diff --git a/weed/mq/broker/broker_grpc_assign.go b/weed/mq/broker/broker_grpc_assign.go deleted file mode 100644 index 3f502cb3c..000000000 --- a/weed/mq/broker/broker_grpc_assign.go +++ /dev/null @@ -1,106 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/logstore" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// AssignTopicPartitions Runs on the assigned broker, to execute the topic partition assignment -func (b *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *mq_pb.AssignTopicPartitionsRequest) (*mq_pb.AssignTopicPartitionsResponse, error) { - ret := &mq_pb.AssignTopicPartitionsResponse{} - - // drain existing topic partition subscriptions - for _, assignment := range request.BrokerPartitionAssignments { - t := topic.FromPbTopic(request.Topic) - partition := topic.FromPbPartition(assignment.Partition) - b.accessLock.Lock() - if request.IsDraining { - // TODO drain existing topic partition subscriptions - b.localTopicManager.RemoveLocalPartition(t, partition) - } else { - var localPartition *topic.LocalPartition - if localPartition = b.localTopicManager.GetLocalPartition(t, partition); localPartition == nil { - localPartition = topic.NewLocalPartition(partition, b.option.LogFlushInterval, b.genLogFlushFunc(t, partition), logstore.GenMergedReadFunc(b, t, partition)) - - // Initialize offset from existing data to ensure continuity on restart - b.initializePartitionOffsetFromExistingData(localPartition, t, partition) - - b.localTopicManager.AddLocalPartition(t, localPartition) - } else { - } - } - b.accessLock.Unlock() - } - - // if is leader, notify the followers to drain existing topic partition subscriptions - if request.IsLeader { - for _, brokerPartition := range request.BrokerPartitionAssignments { - if follower := brokerPartition.FollowerBroker; follower != "" { - err := pb.WithBrokerGrpcClient(false, follower, b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - _, err := client.AssignTopicPartitions(context.Background(), request) - return err - }) - if err != nil { - return ret, err - } - } - } - } - - return ret, nil -} - -// called by broker leader to drain existing partitions. -// new/updated partitions will be detected by broker from the filer -func (b *MessageQueueBroker) assignTopicPartitionsToBrokers(ctx context.Context, t *schema_pb.Topic, assignments []*mq_pb.BrokerPartitionAssignment, isAdd bool) error { - // notify the brokers to create the topic partitions in parallel - var wg sync.WaitGroup - for _, bpa := range assignments { - wg.Add(1) - go func(bpa *mq_pb.BrokerPartitionAssignment) { - defer wg.Done() - if doCreateErr := b.withBrokerClient(false, pb.ServerAddress(bpa.LeaderBroker), func(client mq_pb.SeaweedMessagingClient) error { - _, doCreateErr := client.AssignTopicPartitions(ctx, &mq_pb.AssignTopicPartitionsRequest{ - Topic: t, - BrokerPartitionAssignments: []*mq_pb.BrokerPartitionAssignment{ - { - Partition: bpa.Partition, - }, - }, - IsLeader: true, - IsDraining: !isAdd, - }) - if doCreateErr != nil { - if !isAdd { - return fmt.Errorf("drain topic %s %v on %s: %v", t, bpa.LeaderBroker, bpa.Partition, doCreateErr) - } else { - return fmt.Errorf("create topic %s %v on %s: %v", t, bpa.LeaderBroker, bpa.Partition, doCreateErr) - } - } - brokerStats, found := b.PubBalancer.Brokers.Get(bpa.LeaderBroker) - if !found { - brokerStats = pub_balancer.NewBrokerStats() - if !b.PubBalancer.Brokers.SetIfAbsent(bpa.LeaderBroker, brokerStats) { - brokerStats, _ = b.PubBalancer.Brokers.Get(bpa.LeaderBroker) - } - } - brokerStats.RegisterAssignment(t, bpa.Partition, isAdd) - return nil - }); doCreateErr != nil { - glog.Errorf("create topic %s partition %+v on %s: %v", t, bpa.Partition, bpa.LeaderBroker, doCreateErr) - } - }(bpa) - } - wg.Wait() - - return nil -} diff --git a/weed/mq/broker/broker_grpc_balance.go b/weed/mq/broker/broker_grpc_balance.go deleted file mode 100644 index 54634c9d1..000000000 --- a/weed/mq/broker/broker_grpc_balance.go +++ /dev/null @@ -1,27 +0,0 @@ -package broker - -import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func (b *MessageQueueBroker) BalanceTopics(ctx context.Context, request *mq_pb.BalanceTopicsRequest) (resp *mq_pb.BalanceTopicsResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.BalanceTopics(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - ret := &mq_pb.BalanceTopicsResponse{} - - actions := b.PubBalancer.BalancePublishers() - err = b.PubBalancer.ExecuteBalanceAction(actions, b.grpcDialOption) - - return ret, err -} diff --git a/weed/mq/broker/broker_grpc_configure.go b/weed/mq/broker/broker_grpc_configure.go deleted file mode 100644 index 3d3ed0d1c..000000000 --- a/weed/mq/broker/broker_grpc_configure.go +++ /dev/null @@ -1,122 +0,0 @@ -package broker - -import ( - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// ConfigureTopic Runs on any broker, but proxied to the balancer if not the balancer -// It generates an assignments based on existing allocations, -// and then assign the partitions to the brokers. -func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.ConfigureTopicRequest) (resp *mq_pb.ConfigureTopicResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.ConfigureTopic(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - // Validate flat schema format - if request.MessageRecordType != nil && len(request.KeyColumns) > 0 { - if err := schema.ValidateKeyColumns(request.MessageRecordType, request.KeyColumns); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid key columns: %v", err) - } - } - - t := topic.FromPbTopic(request.Topic) - var readErr, assignErr error - resp, readErr = b.fca.ReadTopicConfFromFiler(t) - if readErr != nil { - glog.V(0).Infof("read topic %s conf: %v", request.Topic, readErr) - } - - if resp != nil { - assignErr = b.ensureTopicActiveAssignments(t, resp) - // no need to assign directly. - // The added or updated assignees will read from filer directly. - // The gone assignees will die by themselves. - } - - if readErr == nil && assignErr == nil && len(resp.BrokerPartitionAssignments) == int(request.PartitionCount) { - // Check if schema needs to be updated - schemaChanged := false - - if request.MessageRecordType != nil && resp.MessageRecordType != nil { - if !proto.Equal(request.MessageRecordType, resp.MessageRecordType) { - schemaChanged = true - } - } else if request.MessageRecordType != nil || resp.MessageRecordType != nil { - schemaChanged = true - } - - if !schemaChanged { - glog.V(0).Infof("existing topic partitions %d: %+v", len(resp.BrokerPartitionAssignments), resp.BrokerPartitionAssignments) - return resp, nil - } - - // Update schema in existing configuration - resp.MessageRecordType = request.MessageRecordType - resp.KeyColumns = request.KeyColumns - resp.SchemaFormat = request.SchemaFormat - - if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil { - return nil, fmt.Errorf("update topic schemas: %w", err) - } - - // Invalidate topic cache since we just updated the topic - b.invalidateTopicCache(t) - - glog.V(0).Infof("updated schemas for topic %s", request.Topic) - return resp, nil - } - - if resp != nil && len(resp.BrokerPartitionAssignments) > 0 { - if cancelErr := b.assignTopicPartitionsToBrokers(ctx, request.Topic, resp.BrokerPartitionAssignments, false); cancelErr != nil { - glog.V(1).Infof("cancel old topic %s partitions assignments %v : %v", request.Topic, resp.BrokerPartitionAssignments, cancelErr) - } - } - resp = &mq_pb.ConfigureTopicResponse{} - if b.PubBalancer.Brokers.IsEmpty() { - return nil, status.Errorf(codes.Unavailable, "no broker available: %v", pub_balancer.ErrNoBroker) - } - resp.BrokerPartitionAssignments = pub_balancer.AllocateTopicPartitions(b.PubBalancer.Brokers, request.PartitionCount) - // Set flat schema format - resp.MessageRecordType = request.MessageRecordType - resp.KeyColumns = request.KeyColumns - resp.SchemaFormat = request.SchemaFormat - resp.Retention = request.Retention - - // save the topic configuration on filer - if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil { - return nil, fmt.Errorf("configure topic: %w", err) - } - - // Invalidate topic cache since we just created/updated the topic - b.invalidateTopicCache(t) - - b.PubBalancer.OnPartitionChange(request.Topic, resp.BrokerPartitionAssignments) - - // Actually assign the new partitions to brokers and add to localTopicManager - if assignErr := b.assignTopicPartitionsToBrokers(ctx, request.Topic, resp.BrokerPartitionAssignments, true); assignErr != nil { - glog.Errorf("assign topic %s partitions to brokers: %v", request.Topic, assignErr) - return nil, fmt.Errorf("assign topic partitions: %w", assignErr) - } - - glog.V(0).Infof("ConfigureTopic: topic %s partition assignments: %v", request.Topic, resp.BrokerPartitionAssignments) - - return resp, nil -} diff --git a/weed/mq/broker/broker_grpc_fetch.go b/weed/mq/broker/broker_grpc_fetch.go deleted file mode 100644 index 4eb17d4fb..000000000 --- a/weed/mq/broker/broker_grpc_fetch.go +++ /dev/null @@ -1,164 +0,0 @@ -package broker - -import ( - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -// FetchMessage implements Kafka-style stateless message fetching -// This is the recommended API for Kafka gateway and other stateless clients -// -// Key differences from SubscribeMessage: -// 1. Request/Response pattern (not streaming) -// 2. No session state maintained on broker -// 3. Each request is completely independent -// 4. Safe for concurrent calls at different offsets -// 5. No Subscribe loop cancellation/restart complexity -// -// Design inspired by Kafka's Fetch API: -// - Client manages offset tracking -// - Each fetch is independent -// - No shared state between requests -// - Natural support for concurrent reads -func (b *MessageQueueBroker) FetchMessage(ctx context.Context, req *mq_pb.FetchMessageRequest) (*mq_pb.FetchMessageResponse, error) { - glog.V(3).Infof("[FetchMessage] CALLED!") // DEBUG: ensure this shows up - - // Validate request - if req.Topic == nil { - return nil, fmt.Errorf("missing topic") - } - if req.Partition == nil { - return nil, fmt.Errorf("missing partition") - } - - t := topic.FromPbTopic(req.Topic) - partition := topic.FromPbPartition(req.Partition) - - glog.V(3).Infof("[FetchMessage] %s/%s partition=%v offset=%d maxMessages=%d maxBytes=%d consumer=%s/%s", - t.Namespace, t.Name, partition, req.StartOffset, req.MaxMessages, req.MaxBytes, - req.ConsumerGroup, req.ConsumerId) - - // Get local partition - localPartition, err := b.GetOrGenerateLocalPartition(t, partition) - if err != nil { - glog.Errorf("[FetchMessage] Failed to get partition: %v", err) - return &mq_pb.FetchMessageResponse{ - Error: fmt.Sprintf("partition not found: %v", err), - ErrorCode: 1, - }, nil - } - if localPartition == nil { - return &mq_pb.FetchMessageResponse{ - Error: "partition not found", - ErrorCode: 1, - }, nil - } - - // Set defaults for limits - maxMessages := int(req.MaxMessages) - if maxMessages <= 0 { - maxMessages = 100 // Reasonable default - } - if maxMessages > 10000 { - maxMessages = 10000 // Safety limit - } - - maxBytes := int(req.MaxBytes) - if maxBytes <= 0 { - maxBytes = 4 * 1024 * 1024 // 4MB default - } - if maxBytes > 100*1024*1024 { - maxBytes = 100 * 1024 * 1024 // 100MB safety limit - } - - // TODO: Long poll support disabled for now (causing timeouts) - // Check if we should wait for data (long poll support) - // shouldWait := req.MaxWaitMs > 0 - // if shouldWait { - // // Wait for data to be available (with timeout) - // dataAvailable := localPartition.LogBuffer.WaitForDataWithTimeout(req.StartOffset, int(req.MaxWaitMs)) - // if !dataAvailable { - // // Timeout - return empty response - // glog.V(3).Infof("[FetchMessage] Timeout waiting for data at offset %d", req.StartOffset) - // return &mq_pb.FetchMessageResponse{ - // Messages: []*mq_pb.DataMessage{}, - // HighWaterMark: localPartition.LogBuffer.GetHighWaterMark(), - // LogStartOffset: localPartition.LogBuffer.GetLogStartOffset(), - // EndOfPartition: false, - // NextOffset: req.StartOffset, - // }, nil - // } - // } - - // Check if disk read function is configured - if localPartition.LogBuffer.ReadFromDiskFn == nil { - glog.Errorf("[FetchMessage] LogBuffer.ReadFromDiskFn is nil! This should not happen.") - } else { - glog.V(3).Infof("[FetchMessage] LogBuffer.ReadFromDiskFn is configured") - } - - // Use requested offset directly - let ReadMessagesAtOffset handle disk reads - requestedOffset := req.StartOffset - - // Read messages from LogBuffer (stateless read) - logEntries, nextOffset, highWaterMark, endOfPartition, err := localPartition.LogBuffer.ReadMessagesAtOffset( - requestedOffset, - maxMessages, - maxBytes, - ) - - // CRITICAL: Log the result with full details - if len(logEntries) == 0 && highWaterMark > requestedOffset && err == nil { - glog.Errorf("[FetchMessage] CRITICAL: ReadMessagesAtOffset returned 0 entries but HWM=%d > requestedOffset=%d (should return data!)", - highWaterMark, requestedOffset) - glog.Errorf("[FetchMessage] Details: nextOffset=%d, endOfPartition=%v, bufferStartOffset=%d", - nextOffset, endOfPartition, localPartition.LogBuffer.GetLogStartOffset()) - } - - if err != nil { - // Check if this is an "offset out of range" error - errMsg := err.Error() - if len(errMsg) > 0 && (len(errMsg) < 20 || errMsg[:20] != "offset") { - glog.Errorf("[FetchMessage] Read error: %v", err) - } else { - // Offset out of range - this is expected when consumer requests old data - glog.V(3).Infof("[FetchMessage] Offset out of range: %v", err) - } - - // Return empty response with metadata - let client adjust offset - return &mq_pb.FetchMessageResponse{ - Messages: []*mq_pb.DataMessage{}, - HighWaterMark: highWaterMark, - LogStartOffset: localPartition.LogBuffer.GetLogStartOffset(), - EndOfPartition: false, - NextOffset: localPartition.LogBuffer.GetLogStartOffset(), // Suggest starting from earliest available - Error: errMsg, - ErrorCode: 2, - }, nil - } - - // Convert to protobuf messages - messages := make([]*mq_pb.DataMessage, 0, len(logEntries)) - for _, entry := range logEntries { - messages = append(messages, &mq_pb.DataMessage{ - Key: entry.Key, - Value: entry.Data, - TsNs: entry.TsNs, - }) - } - - glog.V(4).Infof("[FetchMessage] Returning %d messages, nextOffset=%d, highWaterMark=%d, endOfPartition=%v", - len(messages), nextOffset, highWaterMark, endOfPartition) - - return &mq_pb.FetchMessageResponse{ - Messages: messages, - HighWaterMark: highWaterMark, - LogStartOffset: localPartition.LogBuffer.GetLogStartOffset(), - EndOfPartition: endOfPartition, - NextOffset: nextOffset, - }, nil -} diff --git a/weed/mq/broker/broker_grpc_lookup.go b/weed/mq/broker/broker_grpc_lookup.go deleted file mode 100644 index 5eec21b69..000000000 --- a/weed/mq/broker/broker_grpc_lookup.go +++ /dev/null @@ -1,434 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// LookupTopicBrokers returns the brokers that are serving the topic -func (b *MessageQueueBroker) LookupTopicBrokers(ctx context.Context, request *mq_pb.LookupTopicBrokersRequest) (resp *mq_pb.LookupTopicBrokersResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.LookupTopicBrokers(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - t := topic.FromPbTopic(request.Topic) - ret := &mq_pb.LookupTopicBrokersResponse{} - ret.Topic = request.Topic - - // Use cached topic config to avoid expensive filer reads (26% CPU overhead!) - // getTopicConfFromCache also validates broker assignments on cache miss (saves 14% CPU) - conf, err := b.getTopicConfFromCache(t) - if err != nil { - glog.V(0).Infof("lookup topic %s conf: %v", request.Topic, err) - return ret, err - } - - // Note: Assignment validation is now done inside getTopicConfFromCache on cache misses - // This avoids 14% CPU overhead from validating on EVERY lookup - ret.BrokerPartitionAssignments = conf.BrokerPartitionAssignments - - return ret, nil -} - -func (b *MessageQueueBroker) ListTopics(ctx context.Context, request *mq_pb.ListTopicsRequest) (resp *mq_pb.ListTopicsResponse, err error) { - glog.V(4).Infof("๐Ÿ“‹ ListTopics called, isLockOwner=%v", b.isLockOwner()) - - if !b.isLockOwner() { - glog.V(4).Infof("๐Ÿ“‹ ListTopics proxying to lock owner: %s", b.lockAsBalancer.LockOwner()) - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.ListTopics(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - glog.V(4).Infof("๐Ÿ“‹ ListTopics starting - getting in-memory topics") - ret := &mq_pb.ListTopicsResponse{} - - // First, get topics from in-memory state (includes unflushed topics) - inMemoryTopics := b.localTopicManager.ListTopicsInMemory() - glog.V(4).Infof("๐Ÿ“‹ ListTopics found %d in-memory topics", len(inMemoryTopics)) - topicMap := make(map[string]*schema_pb.Topic) - - // Add in-memory topics to the result - for _, topic := range inMemoryTopics { - topicMap[topic.String()] = &schema_pb.Topic{ - Namespace: topic.Namespace, - Name: topic.Name, - } - } - - // Then, scan the filer directory structure to find persisted topics (fallback for topics not in memory) - // Use a shorter timeout for filer scanning to ensure Metadata requests remain fast - filerCtx, filerCancel := context.WithTimeout(ctx, 2*time.Second) - defer filerCancel() - - glog.V(4).Infof("๐Ÿ“‹ ListTopics scanning filer for persisted topics (2s timeout)") - err = b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // List all namespaces under /topics - glog.V(4).Infof("๐Ÿ“‹ ListTopics calling ListEntries for %s", filer.TopicsDir) - stream, err := client.ListEntries(filerCtx, &filer_pb.ListEntriesRequest{ - Directory: filer.TopicsDir, - Limit: 1000, - }) - if err != nil { - glog.V(0).Infof("list namespaces in %s: %v", filer.TopicsDir, err) - return err - } - glog.V(4).Infof("๐Ÿ“‹ ListTopics got ListEntries stream, processing namespaces...") - - // Process each namespace - for { - resp, err := stream.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - return err - } - - if !resp.Entry.IsDirectory { - continue - } - - namespaceName := resp.Entry.Name - namespacePath := fmt.Sprintf("%s/%s", filer.TopicsDir, namespaceName) - - // List all topics in this namespace - topicStream, err := client.ListEntries(filerCtx, &filer_pb.ListEntriesRequest{ - Directory: namespacePath, - Limit: 1000, - }) - if err != nil { - glog.V(0).Infof("list topics in namespace %s: %v", namespacePath, err) - continue - } - - // Process each topic in the namespace - for { - topicResp, err := topicStream.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - glog.V(0).Infof("error reading topic stream in namespace %s: %v", namespaceName, err) - break - } - - if !topicResp.Entry.IsDirectory { - continue - } - - topicName := topicResp.Entry.Name - - // Check if topic.conf exists - topicPath := fmt.Sprintf("%s/%s", namespacePath, topicName) - confResp, err := client.LookupDirectoryEntry(filerCtx, &filer_pb.LookupDirectoryEntryRequest{ - Directory: topicPath, - Name: filer.TopicConfFile, - }) - if err != nil { - glog.V(0).Infof("lookup topic.conf in %s: %v", topicPath, err) - continue - } - - if confResp.Entry != nil { - // This is a valid persisted topic - add to map if not already present - topicKey := fmt.Sprintf("%s.%s", namespaceName, topicName) - if _, exists := topicMap[topicKey]; !exists { - topicMap[topicKey] = &schema_pb.Topic{ - Namespace: namespaceName, - Name: topicName, - } - } - } - } - } - - return nil - }) - - // Convert map to slice for response (combines in-memory and persisted topics) - for _, topic := range topicMap { - ret.Topics = append(ret.Topics, topic) - } - - if err != nil { - glog.V(0).Infof("ListTopics: filer scan failed: %v (returning %d in-memory topics)", err, len(inMemoryTopics)) - // Still return in-memory topics even if filer fails - } else { - glog.V(4).Infof("๐Ÿ“‹ ListTopics completed successfully: %d total topics (in-memory + persisted)", len(ret.Topics)) - } - - return ret, nil -} - -// TopicExists checks if a topic exists in memory or filer -// Uses unified cache (checks if config is non-nil) to reduce filer load -func (b *MessageQueueBroker) TopicExists(ctx context.Context, request *mq_pb.TopicExistsRequest) (*mq_pb.TopicExistsResponse, error) { - if !b.isLockOwner() { - var resp *mq_pb.TopicExistsResponse - var err error - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.TopicExists(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - if request.Topic == nil { - return &mq_pb.TopicExistsResponse{Exists: false}, nil - } - - // Convert schema_pb.Topic to topic.Topic - topicObj := topic.Topic{ - Namespace: request.Topic.Namespace, - Name: request.Topic.Name, - } - topicKey := topicObj.String() - - // First check in-memory state (includes unflushed topics) - if b.localTopicManager.TopicExistsInMemory(topicObj) { - return &mq_pb.TopicExistsResponse{Exists: true}, nil - } - - // Check unified cache (if conf != nil, topic exists; if conf == nil, doesn't exist) - b.topicCacheMu.RLock() - if entry, found := b.topicCache[topicKey]; found { - if time.Now().Before(entry.expiresAt) { - exists := entry.conf != nil - b.topicCacheMu.RUnlock() - glog.V(4).Infof("Topic cache HIT for %s: exists=%v", topicKey, exists) - return &mq_pb.TopicExistsResponse{Exists: exists}, nil - } - } - b.topicCacheMu.RUnlock() - - // Cache miss or expired - query filer for persisted topics (lightweight check) - glog.V(4).Infof("Topic cache MISS for %s, querying filer for existence", topicKey) - exists := false - err := b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - topicPath := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, request.Topic.Namespace, request.Topic.Name) - confResp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ - Directory: topicPath, - Name: filer.TopicConfFile, - }) - if err == nil && confResp.Entry != nil { - exists = true - } - return nil // Don't propagate error, just check existence - }) - - if err != nil { - glog.V(0).Infof("check topic existence in filer: %v", err) - // Don't cache errors - return false and let next check retry - return &mq_pb.TopicExistsResponse{Exists: false}, nil - } - - // Update unified cache with lightweight result (don't read full config yet) - // Cache existence info: conf=nil for non-existent (we don't have full config yet for existent) - b.topicCacheMu.Lock() - if !exists { - // Negative cache: topic definitely doesn't exist - b.topicCache[topicKey] = &topicCacheEntry{ - conf: nil, - expiresAt: time.Now().Add(b.topicCacheTTL), - } - glog.V(4).Infof("Topic cached as non-existent: %s", topicKey) - } - // Note: For positive existence, we don't cache here to avoid partial state - // The config will be cached when GetOrGenerateLocalPartition reads it - b.topicCacheMu.Unlock() - - return &mq_pb.TopicExistsResponse{Exists: exists}, nil -} - -// GetTopicConfiguration returns the complete configuration of a topic including schema and partition assignments -func (b *MessageQueueBroker) GetTopicConfiguration(ctx context.Context, request *mq_pb.GetTopicConfigurationRequest) (resp *mq_pb.GetTopicConfigurationResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.GetTopicConfiguration(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - t := topic.FromPbTopic(request.Topic) - var conf *mq_pb.ConfigureTopicResponse - var createdAtNs, modifiedAtNs int64 - - if conf, createdAtNs, modifiedAtNs, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil { - glog.V(0).Infof("get topic configuration %s: %v", request.Topic, err) - return nil, fmt.Errorf("failed to read topic configuration: %w", err) - } - - // Ensure topic assignments are active - err = b.ensureTopicActiveAssignments(t, conf) - if err != nil { - glog.V(0).Infof("ensure topic active assignments %s: %v", request.Topic, err) - return nil, fmt.Errorf("failed to ensure topic assignments: %w", err) - } - - // Build the response with complete configuration including metadata - ret := &mq_pb.GetTopicConfigurationResponse{ - Topic: request.Topic, - PartitionCount: int32(len(conf.BrokerPartitionAssignments)), - MessageRecordType: conf.MessageRecordType, - KeyColumns: conf.KeyColumns, - BrokerPartitionAssignments: conf.BrokerPartitionAssignments, - CreatedAtNs: createdAtNs, - LastUpdatedNs: modifiedAtNs, - Retention: conf.Retention, - } - - return ret, nil -} - -// GetTopicPublishers returns the active publishers for a topic -func (b *MessageQueueBroker) GetTopicPublishers(ctx context.Context, request *mq_pb.GetTopicPublishersRequest) (resp *mq_pb.GetTopicPublishersResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.GetTopicPublishers(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - t := topic.FromPbTopic(request.Topic) - var publishers []*mq_pb.TopicPublisher - - // Get topic configuration to find partition assignments - var conf *mq_pb.ConfigureTopicResponse - if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil { - glog.V(0).Infof("get topic configuration for publishers %s: %v", request.Topic, err) - return nil, fmt.Errorf("failed to read topic configuration: %w", err) - } - - // Collect publishers from each partition that is hosted on this broker - for _, assignment := range conf.BrokerPartitionAssignments { - // Only collect from partitions where this broker is the leader - if assignment.LeaderBroker == b.option.BrokerAddress().String() { - partition := topic.FromPbPartition(assignment.Partition) - if localPartition := b.localTopicManager.GetLocalPartition(t, partition); localPartition != nil { - // Get publisher information from local partition - localPartition.Publishers.ForEachPublisher(func(clientName string, publisher *topic.LocalPublisher) { - connectTimeNs, lastSeenTimeNs := publisher.GetTimestamps() - lastPublishedOffset, lastAckedOffset := publisher.GetOffsets() - publishers = append(publishers, &mq_pb.TopicPublisher{ - PublisherName: clientName, - ClientId: clientName, // For now, client name is used as client ID - Partition: assignment.Partition, - ConnectTimeNs: connectTimeNs, - LastSeenTimeNs: lastSeenTimeNs, - Broker: assignment.LeaderBroker, - IsActive: true, - LastPublishedOffset: lastPublishedOffset, - LastAckedOffset: lastAckedOffset, - }) - }) - } - } - } - - return &mq_pb.GetTopicPublishersResponse{ - Publishers: publishers, - }, nil -} - -// GetTopicSubscribers returns the active subscribers for a topic -func (b *MessageQueueBroker) GetTopicSubscribers(ctx context.Context, request *mq_pb.GetTopicSubscribersRequest) (resp *mq_pb.GetTopicSubscribersResponse, err error) { - if !b.isLockOwner() { - proxyErr := b.withBrokerClient(false, pb.ServerAddress(b.lockAsBalancer.LockOwner()), func(client mq_pb.SeaweedMessagingClient) error { - resp, err = client.GetTopicSubscribers(ctx, request) - return nil - }) - if proxyErr != nil { - return nil, proxyErr - } - return resp, err - } - - t := topic.FromPbTopic(request.Topic) - var subscribers []*mq_pb.TopicSubscriber - - // Get topic configuration to find partition assignments - var conf *mq_pb.ConfigureTopicResponse - if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil { - glog.V(0).Infof("get topic configuration for subscribers %s: %v", request.Topic, err) - return nil, fmt.Errorf("failed to read topic configuration: %w", err) - } - - // Collect subscribers from each partition that is hosted on this broker - for _, assignment := range conf.BrokerPartitionAssignments { - // Only collect from partitions where this broker is the leader - if assignment.LeaderBroker == b.option.BrokerAddress().String() { - partition := topic.FromPbPartition(assignment.Partition) - if localPartition := b.localTopicManager.GetLocalPartition(t, partition); localPartition != nil { - // Get subscriber information from local partition - localPartition.Subscribers.ForEachSubscriber(func(clientName string, subscriber *topic.LocalSubscriber) { - // Parse client name to extract consumer group and consumer ID - // Format is typically: "consumerGroup/consumerID" - consumerGroup := "default" - consumerID := clientName - if idx := strings.Index(clientName, "/"); idx != -1 { - consumerGroup = clientName[:idx] - consumerID = clientName[idx+1:] - } - - connectTimeNs, lastSeenTimeNs := subscriber.GetTimestamps() - lastReceivedOffset, lastAckedOffset := subscriber.GetOffsets() - - subscribers = append(subscribers, &mq_pb.TopicSubscriber{ - ConsumerGroup: consumerGroup, - ConsumerId: consumerID, - ClientId: clientName, // Full client name as client ID - Partition: assignment.Partition, - ConnectTimeNs: connectTimeNs, - LastSeenTimeNs: lastSeenTimeNs, - Broker: assignment.LeaderBroker, - IsActive: true, - CurrentOffset: lastAckedOffset, // for compatibility - LastReceivedOffset: lastReceivedOffset, - }) - }) - } - } - } - - return &mq_pb.GetTopicSubscribersResponse{ - Subscribers: subscribers, - }, nil -} - -func (b *MessageQueueBroker) isLockOwner() bool { - return b.lockAsBalancer.LockOwner() == b.option.BrokerAddress().String() -} diff --git a/weed/mq/broker/broker_grpc_pub.go b/weed/mq/broker/broker_grpc_pub.go deleted file mode 100644 index 4604394eb..000000000 --- a/weed/mq/broker/broker_grpc_pub.go +++ /dev/null @@ -1,296 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "io" - "math/rand/v2" - "net" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/grpc/peer" - "google.golang.org/protobuf/proto" -) - -// PUB -// 1. gRPC API to configure a topic -// 1.1 create a topic with existing partition count -// 1.2 assign partitions to brokers -// 2. gRPC API to lookup topic partitions -// 3. gRPC API to publish by topic partitions - -// SUB -// 1. gRPC API to lookup a topic partitions - -// Re-balance topic partitions for publishing -// 1. collect stats from all the brokers -// 2. Rebalance and configure new generation of partitions on brokers -// 3. Tell brokers to close current gneration of publishing. -// Publishers needs to lookup again and publish to the new generation of partitions. - -// Re-balance topic partitions for subscribing -// 1. collect stats from all the brokers -// Subscribers needs to listen for new partitions and connect to the brokers. -// Each subscription may not get data. It can act as a backup. - -func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_PublishMessageServer) error { - - req, err := stream.Recv() - if err != nil { - return err - } - response := &mq_pb.PublishMessageResponse{} - - initMessage := req.GetInit() - if initMessage == nil { - response.ErrorCode, response.Error = CreateBrokerError(BrokerErrorInvalidRecord, "missing init message") - glog.Errorf("missing init message") - return stream.Send(response) - } - - // Check whether current broker should be the leader for the topic partition - leaderBroker, err := b.findBrokerForTopicPartition(initMessage.Topic, initMessage.Partition) - if err != nil { - response.ErrorCode, response.Error = CreateBrokerError(BrokerErrorTopicNotFound, fmt.Sprintf("failed to find leader for topic partition: %v", err)) - glog.Errorf("failed to find leader for topic partition: %v", err) - return stream.Send(response) - } - - currentBrokerAddress := fmt.Sprintf("%s:%d", b.option.Ip, b.option.Port) - if leaderBroker != currentBrokerAddress { - response.ErrorCode, response.Error = CreateBrokerError(BrokerErrorNotLeaderOrFollower, fmt.Sprintf("not the leader for this partition, leader is: %s", leaderBroker)) - glog.V(1).Infof("rejecting publish request: not the leader for partition, leader is: %s", leaderBroker) - return stream.Send(response) - } - - // get or generate a local partition - t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) - localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, p) - if getOrGenErr != nil { - response.ErrorCode, response.Error = CreateBrokerError(BrokerErrorTopicNotFound, fmt.Sprintf("topic %v not found: %v", t, getOrGenErr)) - glog.Errorf("topic %v not found: %v", t, getOrGenErr) - return stream.Send(response) - } - - // connect to follower brokers - if followerErr := localTopicPartition.MaybeConnectToFollowers(initMessage, b.grpcDialOption); followerErr != nil { - response.ErrorCode, response.Error = CreateBrokerError(BrokerErrorFollowerConnectionFailed, followerErr.Error()) - glog.Errorf("MaybeConnectToFollowers: %v", followerErr) - return stream.Send(response) - } - - // process each published messages - clientName := fmt.Sprintf("%v-%4d", findClientAddress(stream.Context()), rand.IntN(10000)) - publisher := topic.NewLocalPublisher() - localTopicPartition.Publishers.AddPublisher(clientName, publisher) - - // DISABLED: Periodic ack goroutine not needed with immediate per-message acks - // Immediate acks provide correct offset information for Kafka Gateway - var receivedSequence, acknowledgedSequence int64 - var isClosed bool - - if false { - ackInterval := int64(1) - if initMessage.AckInterval > 0 { - ackInterval = int64(initMessage.AckInterval) - } - go func() { - defer func() { - // println("stop sending ack to publisher", initMessage.PublisherName) - }() - - lastAckTime := time.Now() - for !isClosed { - receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs) - if acknowledgedSequence < receivedSequence && (receivedSequence-acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 100*time.Millisecond) { - acknowledgedSequence = receivedSequence - response := &mq_pb.PublishMessageResponse{ - AckTsNs: acknowledgedSequence, - } - if err := stream.Send(response); err != nil { - glog.Errorf("Error sending response %v: %v", response, err) - } - // Update acknowledged offset for this publisher - publisher.UpdateAckedOffset(acknowledgedSequence) - // println("sent ack", acknowledgedSequence, "=>", initMessage.PublisherName) - lastAckTime = time.Now() - } else { - time.Sleep(10 * time.Millisecond) // Reduced from 1s to 10ms for faster acknowledgments - } - } - }() - } - - defer func() { - // remove the publisher - localTopicPartition.Publishers.RemovePublisher(clientName) - // Use topic-aware shutdown logic to prevent aggressive removal of system topics - if localTopicPartition.MaybeShutdownLocalPartitionForTopic(t.Name) { - b.localTopicManager.RemoveLocalPartition(t, p) - glog.V(0).Infof("Removed local topic %v partition %v", initMessage.Topic, initMessage.Partition) - } - }() - - // send a hello message - stream.Send(&mq_pb.PublishMessageResponse{}) - - defer func() { - isClosed = true - }() - - // process each published messages - for { - // receive a message - req, err := stream.Recv() - if err != nil { - if err == io.EOF { - break - } - glog.V(0).Infof("topic %v partition %v publish stream from %s error: %v", initMessage.Topic, initMessage.Partition, initMessage.PublisherName, err) - break - } - - // Process the received message - dataMessage := req.GetData() - if dataMessage == nil { - continue - } - - // Validate RecordValue structure only for schema-based messages - // Note: Only messages sent via ProduceRecordValue should be in RecordValue format - // Regular Kafka messages and offset management messages are stored as raw bytes - if dataMessage.Value != nil { - record := &schema_pb.RecordValue{} - if err := proto.Unmarshal(dataMessage.Value, record); err == nil { - // Successfully unmarshaled as RecordValue - validate structure - if err := b.validateRecordValue(record, initMessage.Topic); err != nil { - glog.V(1).Infof("RecordValue validation failed on topic %v partition %v: %v", initMessage.Topic, initMessage.Partition, err) - } - } - // Note: We don't log errors for non-RecordValue messages since most Kafka messages - // are raw bytes and should not be expected to be in RecordValue format - } - - // The control message should still be sent to the follower - // to avoid timing issue when ack messages. - - // Send to the local partition with offset assignment - t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) - - // Create offset assignment function for this partition - assignOffsetFn := func() (int64, error) { - return b.offsetManager.AssignOffset(t, p) - } - - // Use offset-aware publishing - assignedOffset, err := localTopicPartition.PublishWithOffset(dataMessage, assignOffsetFn) - if err != nil { - return fmt.Errorf("topic %v partition %v publish error: %w", initMessage.Topic, initMessage.Partition, err) - } - - // No ForceFlush - subscribers use per-subscriber notification channels for instant wake-up - // Data is served from in-memory LogBuffer with <1ms latency - glog.V(2).Infof("Published offset %d to %s", assignedOffset, initMessage.Topic.Name) - - // Send immediate per-message ack WITH offset - // This is critical for Gateway to return correct offsets to Kafka clients - response := &mq_pb.PublishMessageResponse{ - AckTsNs: dataMessage.TsNs, - AssignedOffset: assignedOffset, - } - if err := stream.Send(response); err != nil { - glog.Errorf("Error sending immediate ack %v: %v", response, err) - return fmt.Errorf("failed to send ack: %v", err) - } - - // Update published offset and last seen time for this publisher - publisher.UpdatePublishedOffset(assignedOffset) - } - - glog.V(0).Infof("topic %v partition %v publish stream from %s closed.", initMessage.Topic, initMessage.Partition, initMessage.PublisherName) - - return nil -} - -// validateRecordValue validates the structure and content of a RecordValue message -// Since RecordValue messages are created from successful protobuf unmarshaling, -// their structure is already guaranteed to be valid by the protobuf library. -// Schema validation (if applicable) already happened during Kafka gateway decoding. -func (b *MessageQueueBroker) validateRecordValue(record *schema_pb.RecordValue, topic *schema_pb.Topic) error { - // Check for nil RecordValue - if record == nil { - return fmt.Errorf("RecordValue is nil") - } - - // Check for nil Fields map - if record.Fields == nil { - return fmt.Errorf("RecordValue.Fields is nil") - } - - // Check for empty Fields map - if len(record.Fields) == 0 { - return fmt.Errorf("RecordValue has no fields") - } - - // If protobuf unmarshaling succeeded, the RecordValue is structurally valid - return nil -} - -// duplicated from master_grpc_server.go -func findClientAddress(ctx context.Context) string { - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return "" - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return "" - } - return pr.Addr.String() -} - -// GetPartitionRangeInfo returns comprehensive range information for a partition (offsets, timestamps, etc.) -func (b *MessageQueueBroker) GetPartitionRangeInfo(ctx context.Context, req *mq_pb.GetPartitionRangeInfoRequest) (*mq_pb.GetPartitionRangeInfoResponse, error) { - if req.Topic == nil || req.Partition == nil { - return &mq_pb.GetPartitionRangeInfoResponse{ - Error: "topic and partition are required", - }, nil - } - - t := topic.FromPbTopic(req.Topic) - p := topic.FromPbPartition(req.Partition) - - // Get offset information from the broker's internal method - info, err := b.GetPartitionOffsetInfoInternal(t, p) - if err != nil { - return &mq_pb.GetPartitionRangeInfoResponse{ - Error: fmt.Sprintf("failed to get partition range info: %v", err), - }, nil - } - - // TODO: Get timestamp range information from chunk metadata or log buffer - // For now, we'll return zero values for timestamps - this can be enhanced later - // to read from Extended attributes (ts_min, ts_max) from filer metadata - timestampRange := &mq_pb.TimestampRangeInfo{ - EarliestTimestampNs: 0, // TODO: Read from chunk metadata ts_min - LatestTimestampNs: 0, // TODO: Read from chunk metadata ts_max - } - - return &mq_pb.GetPartitionRangeInfoResponse{ - OffsetRange: &mq_pb.OffsetRangeInfo{ - EarliestOffset: info.EarliestOffset, - LatestOffset: info.LatestOffset, - HighWaterMark: info.HighWaterMark, - }, - TimestampRange: timestampRange, - RecordCount: info.RecordCount, - ActiveSubscriptions: info.ActiveSubscriptions, - }, nil -} diff --git a/weed/mq/broker/broker_grpc_pub_balancer.go b/weed/mq/broker/broker_grpc_pub_balancer.go deleted file mode 100644 index 8327ead7d..000000000 --- a/weed/mq/broker/broker_grpc_pub_balancer.go +++ /dev/null @@ -1,49 +0,0 @@ -package broker - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// PublisherToPubBalancer receives connections from brokers and collects stats -func (b *MessageQueueBroker) PublisherToPubBalancer(stream mq_pb.SeaweedMessaging_PublisherToPubBalancerServer) error { - if !b.isLockOwner() { - return status.Errorf(codes.Unavailable, "not current broker balancer") - } - req, err := stream.Recv() - if err != nil { - return fmt.Errorf("receive init message: %w", err) - } - - // process init message - initMessage := req.GetInit() - var brokerStats *pub_balancer.BrokerStats - if initMessage != nil { - brokerStats = b.PubBalancer.AddBroker(initMessage.Broker) - } else { - return status.Errorf(codes.InvalidArgument, "balancer init message is empty") - } - defer func() { - b.PubBalancer.RemoveBroker(initMessage.Broker, brokerStats) - }() - - // process stats message - for { - req, err := stream.Recv() - if err != nil { - return fmt.Errorf("receive stats message from %s: %v", initMessage.Broker, err) - } - if !b.isLockOwner() { - return status.Errorf(codes.Unavailable, "not current broker balancer") - } - if receivedStats := req.GetStats(); receivedStats != nil { - b.PubBalancer.OnBrokerStatsUpdated(initMessage.Broker, brokerStats, receivedStats) - // glog.V(4).Infof("received from %v: %+v", initMessage.Broker, receivedStats) - } - } - - return nil -} diff --git a/weed/mq/broker/broker_grpc_pub_follow.go b/weed/mq/broker/broker_grpc_pub_follow.go deleted file mode 100644 index 117dc4f87..000000000 --- a/weed/mq/broker/broker_grpc_pub_follow.go +++ /dev/null @@ -1,148 +0,0 @@ -package broker - -import ( - "fmt" - "io" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -type memBuffer struct { - buf []byte - startTime time.Time - stopTime time.Time -} - -func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) { - var req *mq_pb.PublishFollowMeRequest - req, err = stream.Recv() - if err != nil { - return err - } - initMessage := req.GetInit() - if initMessage == nil { - return fmt.Errorf("missing init message") - } - - // create an in-memory queue of buffered messages - inMemoryBuffers := buffered_queue.NewBufferedQueue[memBuffer](4) - logBuffer := b.buildFollowerLogBuffer(inMemoryBuffers) - - lastFlushTsNs := time.Now().UnixNano() - - // follow each published messages - for { - // receive a message - req, err = stream.Recv() - if err != nil { - if err == io.EOF { - err = nil - break - } - glog.V(0).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err) - break - } - - // Process the received message - if dataMessage := req.GetData(); dataMessage != nil { - - // TODO: change this to DataMessage - // log the message - logBuffer.AddToBuffer(dataMessage) - - // send back the ack - if err := stream.Send(&mq_pb.PublishFollowMeResponse{ - AckTsNs: dataMessage.TsNs, - }); err != nil { - glog.Errorf("Error sending response %v: %v", dataMessage, err) - } - // println("ack", string(dataMessage.Key), dataMessage.TsNs) - } else if closeMessage := req.GetClose(); closeMessage != nil { - glog.V(0).Infof("topic %v partition %v publish stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) - break - } else if flushMessage := req.GetFlush(); flushMessage != nil { - glog.V(0).Infof("topic %v partition %v publish stream flushed: %v", initMessage.Topic, initMessage.Partition, flushMessage) - - lastFlushTsNs = flushMessage.TsNs - - // drop already flushed messages - for mem, found := inMemoryBuffers.PeekHead(); found; mem, found = inMemoryBuffers.PeekHead() { - if mem.stopTime.UnixNano() <= flushMessage.TsNs { - inMemoryBuffers.Dequeue() - // println("dropping flushed messages: ", mem.startTime.UnixNano(), mem.stopTime.UnixNano(), len(mem.buf)) - } else { - break - } - } - - } else { - glog.Errorf("unknown message: %v", req) - } - } - - t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) - - logBuffer.ShutdownLogBuffer() - // wait until all messages are sent to inMemoryBuffers - for !logBuffer.IsAllFlushed() { - time.Sleep(113 * time.Millisecond) - } - - partitionDir := topic.PartitionDir(t, p) - - // flush the remaining messages - inMemoryBuffers.CloseInput() - for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() { - if len(mem.buf) == 0 { - continue - } - - startTime, stopTime := mem.startTime.UTC(), mem.stopTime.UTC() - - if stopTime.UnixNano() <= lastFlushTsNs { - glog.V(0).Infof("dropping remaining data at %v %v", t, p) - continue - } - - // TODO trim data earlier than lastFlushTsNs - - targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT)) - - for { - if err := b.appendToFile(targetFile, mem.buf); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) - time.Sleep(737 * time.Millisecond) - } else { - break - } - } - - glog.V(0).Infof("flushed remaining data at %v to %s size %d", mem.stopTime.UnixNano(), targetFile, len(mem.buf)) - } - - glog.V(0).Infof("shut down follower for %v %v", t, p) - - return err -} - -func (b *MessageQueueBroker) buildFollowerLogBuffer(inMemoryBuffers *buffered_queue.BufferedQueue[memBuffer]) *log_buffer.LogBuffer { - lb := log_buffer.NewLogBuffer("follower", - 5*time.Second, func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { - if len(buf) == 0 { - return - } - inMemoryBuffers.Enqueue(memBuffer{ - buf: buf, - startTime: startTime, - stopTime: stopTime, - }) - glog.V(0).Infof("queue up %d~%d size %d", startTime.UnixNano(), stopTime.UnixNano(), len(buf)) - }, nil, func() { - }) - return lb -} diff --git a/weed/mq/broker/broker_grpc_query.go b/weed/mq/broker/broker_grpc_query.go deleted file mode 100644 index 228152bdf..000000000 --- a/weed/mq/broker/broker_grpc_query.go +++ /dev/null @@ -1,351 +0,0 @@ -package broker - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -// BufferRange represents a range of buffer offsets that have been flushed to disk -type BufferRange struct { - start int64 - end int64 -} - -// ErrNoPartitionAssignment indicates no broker assignment found for the partition. -// This is a normal case that means there are no unflushed messages for this partition. -var ErrNoPartitionAssignment = errors.New("no broker assignment found for partition") - -// GetUnflushedMessages returns messages from the broker's in-memory LogBuffer -// that haven't been flushed to disk yet, using buffer_start metadata for deduplication -// Now supports streaming responses and buffer offset filtering for better performance -// Includes broker routing to redirect requests to the correct broker hosting the topic/partition -func (b *MessageQueueBroker) GetUnflushedMessages(req *mq_pb.GetUnflushedMessagesRequest, stream mq_pb.SeaweedMessaging_GetUnflushedMessagesServer) error { - // Convert protobuf types to internal types - t := topic.FromPbTopic(req.Topic) - partition := topic.FromPbPartition(req.Partition) - - // Get or generate the local partition for this topic/partition (similar to subscriber flow) - localPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, partition) - if getOrGenErr != nil { - // Fall back to the original logic for broker routing - b.accessLock.Lock() - localPartition = b.localTopicManager.GetLocalPartition(t, partition) - b.accessLock.Unlock() - } else { - } - - if localPartition == nil { - // Topic/partition not found locally, attempt to find the correct broker and redirect - glog.V(1).Infof("Topic/partition %v %v not found locally, looking up broker", t, partition) - - // Look up which broker hosts this topic/partition - brokerHost, err := b.findBrokerForTopicPartition(req.Topic, req.Partition) - if err != nil { - if errors.Is(err, ErrNoPartitionAssignment) { - // Normal case: no broker assignment means no unflushed messages - glog.V(2).Infof("No broker assignment for %v %v - no unflushed messages", t, partition) - return stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - EndOfStream: true, - }) - } - return stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - Error: fmt.Sprintf("failed to find broker for %v %v: %v", t, partition, err), - EndOfStream: true, - }) - } - - if brokerHost == "" { - // This should not happen after ErrNoPartitionAssignment check, but keep for safety - glog.V(2).Infof("Empty broker host for %v %v - no unflushed messages", t, partition) - return stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - EndOfStream: true, - }) - } - - // Redirect to the correct broker - glog.V(1).Infof("Redirecting GetUnflushedMessages request for %v %v to broker %s", t, partition, brokerHost) - return b.redirectGetUnflushedMessages(brokerHost, req, stream) - } - - // Build deduplication map from existing log files using buffer_start metadata - partitionDir := topic.PartitionDir(t, partition) - flushedBufferRanges, err := b.buildBufferStartDeduplicationMap(partitionDir) - if err != nil { - glog.Errorf("Failed to build deduplication map for %v %v: %v", t, partition, err) - // Continue with empty map - better to potentially duplicate than to miss data - flushedBufferRanges = make([]BufferRange, 0) - } - - // Use buffer_start offset for precise deduplication - lastFlushTsNs := localPartition.LogBuffer.LastFlushTsNs - startBufferOffset := req.StartBufferOffset - startTimeNs := lastFlushTsNs // Still respect last flush time for safety - - // Stream messages from LogBuffer with filtering - messageCount := 0 - startPosition := log_buffer.NewMessagePosition(startTimeNs, startBufferOffset) - - // Use the new LoopProcessLogDataWithOffset method to avoid code duplication - _, _, err = localPartition.LogBuffer.LoopProcessLogDataWithOffset( - "GetUnflushedMessages", - startPosition, - 0, // stopTsNs = 0 means process all available data - func() bool { return false }, // waitForDataFn = false means don't wait for new data - func(logEntry *filer_pb.LogEntry, offset int64) (isDone bool, err error) { - - // Apply buffer offset filtering if specified - if startBufferOffset > 0 && offset < startBufferOffset { - return false, nil - } - - // Check if this message is from a buffer range that's already been flushed - if b.isBufferOffsetFlushed(offset, flushedBufferRanges) { - return false, nil - } - - // Stream this message - err = stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - Message: logEntry, - EndOfStream: false, - }) - - if err != nil { - glog.Errorf("Failed to stream message: %v", err) - return true, err // isDone = true to stop processing - } - - messageCount++ - return false, nil // Continue processing - }, - ) - - // Handle collection errors - if err != nil && err != log_buffer.ResumeFromDiskError { - streamErr := stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - Error: fmt.Sprintf("failed to stream unflushed messages: %v", err), - EndOfStream: true, - }) - if streamErr != nil { - glog.Errorf("Failed to send error response: %v", streamErr) - } - return err - } - - // Send end-of-stream marker - err = stream.Send(&mq_pb.GetUnflushedMessagesResponse{ - EndOfStream: true, - }) - - if err != nil { - glog.Errorf("Failed to send end-of-stream marker: %v", err) - return err - } - - return nil -} - -// buildBufferStartDeduplicationMap scans log files to build a map of buffer ranges -// that have been flushed to disk, using the buffer_start metadata -func (b *MessageQueueBroker) buildBufferStartDeduplicationMap(partitionDir string) ([]BufferRange, error) { - var flushedRanges []BufferRange - - // List all files in the partition directory using filer client accessor - // Use pagination to handle directories with more than 1000 files - err := b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - var lastFileName string - var hasMore = true - - for hasMore { - var currentBatchProcessed int - err := filer_pb.SeaweedList(context.Background(), client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error { - currentBatchProcessed++ - hasMore = !isLast // If this is the last entry of a full batch, there might be more - lastFileName = entry.Name - - if entry.IsDirectory { - return nil - } - - // Skip Parquet files - they don't represent buffer ranges - if strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - - // Skip offset files - if strings.HasSuffix(entry.Name, ".offset") { - return nil - } - - // Get buffer start for this file - bufferStart, err := b.getLogBufferStartFromFile(entry) - if err != nil { - glog.V(2).Infof("Failed to get buffer start from file %s: %v", entry.Name, err) - return nil // Continue with other files - } - - if bufferStart == nil { - // File has no buffer metadata - skip deduplication for this file - glog.V(2).Infof("File %s has no buffer_start metadata", entry.Name) - return nil - } - - // Calculate the buffer range covered by this file - chunkCount := int64(len(entry.GetChunks())) - if chunkCount > 0 { - fileRange := BufferRange{ - start: bufferStart.StartIndex, - end: bufferStart.StartIndex + chunkCount - 1, - } - flushedRanges = append(flushedRanges, fileRange) - glog.V(3).Infof("File %s covers buffer range [%d-%d]", entry.Name, fileRange.start, fileRange.end) - } - - return nil - }, lastFileName, false, 1000) // Start from last processed file name for next batch - - if err != nil { - return err - } - - // If we processed fewer than 1000 entries, we've reached the end - if currentBatchProcessed < 1000 { - hasMore = false - } - } - - return nil - }) - - if err != nil { - return flushedRanges, fmt.Errorf("failed to list partition directory %s: %v", partitionDir, err) - } - - return flushedRanges, nil -} - -// getLogBufferStartFromFile extracts LogBufferStart metadata from a log file -func (b *MessageQueueBroker) getLogBufferStartFromFile(entry *filer_pb.Entry) (*LogBufferStart, error) { - if entry.Extended == nil { - return nil, nil - } - - // Only support binary buffer_start format - if startData, exists := entry.Extended["buffer_start"]; exists { - if len(startData) == 8 { - startIndex := int64(binary.BigEndian.Uint64(startData)) - if startIndex > 0 { - return &LogBufferStart{StartIndex: startIndex}, nil - } - } else { - return nil, fmt.Errorf("invalid buffer_start format: expected 8 bytes, got %d", len(startData)) - } - } - - return nil, nil -} - -// isBufferOffsetFlushed checks if a buffer offset is covered by any of the flushed ranges -func (b *MessageQueueBroker) isBufferOffsetFlushed(bufferOffset int64, flushedRanges []BufferRange) bool { - for _, flushedRange := range flushedRanges { - if bufferOffset >= flushedRange.start && bufferOffset <= flushedRange.end { - return true - } - } - return false -} - -// findBrokerForTopicPartition finds which broker hosts the specified topic/partition -func (b *MessageQueueBroker) findBrokerForTopicPartition(topic *schema_pb.Topic, partition *schema_pb.Partition) (string, error) { - // Use LookupTopicBrokers to find which broker hosts this topic/partition - ctx := context.Background() - lookupReq := &mq_pb.LookupTopicBrokersRequest{ - Topic: topic, - } - - // If we're not the lock owner (balancer), we need to redirect to the balancer first - var lookupResp *mq_pb.LookupTopicBrokersResponse - var err error - - if !b.isLockOwner() { - // Redirect to balancer to get topic broker assignments - balancerAddress := pb.ServerAddress(b.lockAsBalancer.LockOwner()) - err = b.withBrokerClient(false, balancerAddress, func(client mq_pb.SeaweedMessagingClient) error { - lookupResp, err = client.LookupTopicBrokers(ctx, lookupReq) - return err - }) - } else { - // We are the balancer, handle the lookup directly - lookupResp, err = b.LookupTopicBrokers(ctx, lookupReq) - } - - if err != nil { - return "", fmt.Errorf("failed to lookup topic brokers: %v", err) - } - - // Find the broker assignment that matches our partition - for _, assignment := range lookupResp.BrokerPartitionAssignments { - if b.partitionsMatch(partition, assignment.Partition) { - if assignment.LeaderBroker != "" { - return assignment.LeaderBroker, nil - } - } - } - - return "", ErrNoPartitionAssignment -} - -// partitionsMatch checks if two partitions represent the same partition -func (b *MessageQueueBroker) partitionsMatch(p1, p2 *schema_pb.Partition) bool { - return p1.RingSize == p2.RingSize && - p1.RangeStart == p2.RangeStart && - p1.RangeStop == p2.RangeStop && - p1.UnixTimeNs == p2.UnixTimeNs -} - -// redirectGetUnflushedMessages forwards the GetUnflushedMessages request to the correct broker -func (b *MessageQueueBroker) redirectGetUnflushedMessages(brokerHost string, req *mq_pb.GetUnflushedMessagesRequest, stream mq_pb.SeaweedMessaging_GetUnflushedMessagesServer) error { - ctx := stream.Context() - - // Connect to the target broker and forward the request - return b.withBrokerClient(false, pb.ServerAddress(brokerHost), func(client mq_pb.SeaweedMessagingClient) error { - // Create a new stream to the target broker - targetStream, err := client.GetUnflushedMessages(ctx, req) - if err != nil { - return fmt.Errorf("failed to create stream to broker %s: %v", brokerHost, err) - } - - // Forward all responses from the target broker to our client - for { - response, err := targetStream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - // Normal end of stream - return nil - } - return fmt.Errorf("error receiving from broker %s: %v", brokerHost, err) - } - - // Forward the response to our client - if sendErr := stream.Send(response); sendErr != nil { - return fmt.Errorf("error forwarding response to client: %v", sendErr) - } - - // Check if this is the end of stream - if response.EndOfStream { - return nil - } - } - }) -} diff --git a/weed/mq/broker/broker_grpc_sub.go b/weed/mq/broker/broker_grpc_sub.go deleted file mode 100644 index f20e1a065..000000000 --- a/weed/mq/broker/broker_grpc_sub.go +++ /dev/null @@ -1,423 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_SubscribeMessageServer) error { - - req, err := stream.Recv() - if err != nil { - return err - } - if req.GetInit() == nil { - glog.Errorf("missing init message") - return fmt.Errorf("missing init message") - } - - // Create a cancellable context so we can properly clean up when the client disconnects - ctx, cancel := context.WithCancel(stream.Context()) - defer cancel() // Ensure context is cancelled when function exits - - clientName := fmt.Sprintf("%s/%s-%s", req.GetInit().ConsumerGroup, req.GetInit().ConsumerId, req.GetInit().ClientId) - - t := topic.FromPbTopic(req.GetInit().Topic) - partition := topic.FromPbPartition(req.GetInit().GetPartitionOffset().GetPartition()) - - glog.V(0).Infof("Subscriber %s on %v %v connected", req.GetInit().ConsumerId, t, partition) - - glog.V(4).Infof("Calling GetOrGenerateLocalPartition for %s %s", t, partition) - localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, partition) - if getOrGenErr != nil { - glog.V(4).Infof("GetOrGenerateLocalPartition failed: %v", getOrGenErr) - return getOrGenErr - } - glog.V(4).Infof("GetOrGenerateLocalPartition succeeded, localTopicPartition=%v", localTopicPartition != nil) - if localTopicPartition == nil { - return fmt.Errorf("failed to get or generate local partition for topic %v partition %v", t, partition) - } - - subscriber := topic.NewLocalSubscriber() - localTopicPartition.Subscribers.AddSubscriber(clientName, subscriber) - glog.V(0).Infof("Subscriber %s connected on %v %v", clientName, t, partition) - isConnected := true - - var counter int64 - startPosition := b.getRequestPosition(req.GetInit()) - imt := sub_coordinator.NewInflightMessageTracker(int(req.GetInit().SlidingWindowSize)) - - defer func() { - isConnected = false - // Clean up any in-flight messages to prevent them from blocking other subscribers - if cleanedCount := imt.Cleanup(); cleanedCount > 0 { - glog.V(0).Infof("Subscriber %s cleaned up %d in-flight messages on disconnect", clientName, cleanedCount) - } - localTopicPartition.Subscribers.RemoveSubscriber(clientName) - glog.V(0).Infof("Subscriber %s on %v %v disconnected, sent %d", clientName, t, partition, counter) - // Use topic-aware shutdown logic to prevent aggressive removal of system topics - if localTopicPartition.MaybeShutdownLocalPartitionForTopic(t.Name) { - b.localTopicManager.RemoveLocalPartition(t, partition) - } - }() - - // connect to the follower - var subscribeFollowMeStream mq_pb.SeaweedMessaging_SubscribeFollowMeClient - glog.V(0).Infof("follower broker: %v", req.GetInit().FollowerBroker) - if req.GetInit().FollowerBroker != "" { - follower := req.GetInit().FollowerBroker - if followerGrpcConnection, err := pb.GrpcDial(ctx, follower, true, b.grpcDialOption); err != nil { - return fmt.Errorf("fail to dial %s: %v", follower, err) - } else { - defer func() { - println("closing SubscribeFollowMe connection", follower) - if subscribeFollowMeStream != nil { - subscribeFollowMeStream.CloseSend() - } - // followerGrpcConnection.Close() - }() - followerClient := mq_pb.NewSeaweedMessagingClient(followerGrpcConnection) - if subscribeFollowMeStream, err = followerClient.SubscribeFollowMe(ctx); err != nil { - return fmt.Errorf("fail to subscribe to %s: %v", follower, err) - } else { - if err := subscribeFollowMeStream.Send(&mq_pb.SubscribeFollowMeRequest{ - Message: &mq_pb.SubscribeFollowMeRequest_Init{ - Init: &mq_pb.SubscribeFollowMeRequest_InitMessage{ - Topic: req.GetInit().Topic, - Partition: req.GetInit().GetPartitionOffset().Partition, - ConsumerGroup: req.GetInit().ConsumerGroup, - }, - }, - }); err != nil { - return fmt.Errorf("fail to send init to %s: %v", follower, err) - } - } - } - glog.V(0).Infof("follower %s connected", follower) - } - - // Channel to handle seek requests - signals Subscribe loop to restart from new offset - seekChan := make(chan *mq_pb.SubscribeMessageRequest_SeekMessage, 1) - - go func() { - defer cancel() // CRITICAL: Cancel context when Recv goroutine exits (client disconnect) - - var lastOffset int64 - - for { - ack, err := stream.Recv() - - if err != nil { - if err == io.EOF { - // the client has called CloseSend(). This is to ack the close. - stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Ctrl{ - Ctrl: &mq_pb.SubscribeMessageResponse_SubscribeCtrlMessage{ - IsEndOfStream: true, - }, - }}) - break - } - glog.V(0).Infof("topic %v partition %v subscriber %s lastOffset %d error: %v", t, partition, clientName, lastOffset, err) - break - } - // Handle seek messages - if seekMsg := ack.GetSeek(); seekMsg != nil { - glog.V(0).Infof("Subscriber %s received seek request to offset %d (type %v)", - clientName, seekMsg.Offset, seekMsg.OffsetType) - - // Send seek request to Subscribe loop - select { - case seekChan <- seekMsg: - glog.V(0).Infof("Subscriber %s seek request queued", clientName) - default: - glog.V(0).Infof("Subscriber %s seek request dropped (already pending)", clientName) - // Send error response if seek is already in progress - stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Ctrl{ - Ctrl: &mq_pb.SubscribeMessageResponse_SubscribeCtrlMessage{ - Error: "Seek already in progress", - }, - }}) - } - continue - } - - if ack.GetAck().Key == nil { - // skip ack for control messages - continue - } - imt.AcknowledgeMessage(ack.GetAck().Key, ack.GetAck().TsNs) - - currentLastOffset := imt.GetOldestAckedTimestamp() - // Update acknowledged offset and last seen time for this subscriber when it sends an ack - subscriber.UpdateAckedOffset(currentLastOffset) - // fmt.Printf("%+v recv (%s,%d), oldest %d\n", partition, string(ack.GetAck().Key), ack.GetAck().TsNs, currentLastOffset) - if subscribeFollowMeStream != nil && currentLastOffset > lastOffset { - if err := subscribeFollowMeStream.Send(&mq_pb.SubscribeFollowMeRequest{ - Message: &mq_pb.SubscribeFollowMeRequest_Ack{ - Ack: &mq_pb.SubscribeFollowMeRequest_AckMessage{ - TsNs: currentLastOffset, - }, - }, - }); err != nil { - glog.Errorf("Error sending ack to follower: %v", err) - break - } - lastOffset = currentLastOffset - // fmt.Printf("%+v forwarding ack %d\n", partition, lastOffset) - } - } - if lastOffset > 0 { - glog.V(0).Infof("saveConsumerGroupOffset %v %v %v %v", t, partition, req.GetInit().ConsumerGroup, lastOffset) - if err := b.saveConsumerGroupOffset(t, partition, req.GetInit().ConsumerGroup, lastOffset); err != nil { - glog.Errorf("saveConsumerGroupOffset partition %v lastOffset %d: %v", partition, lastOffset, err) - } - } - if subscribeFollowMeStream != nil { - if err := subscribeFollowMeStream.Send(&mq_pb.SubscribeFollowMeRequest{ - Message: &mq_pb.SubscribeFollowMeRequest_Close{ - Close: &mq_pb.SubscribeFollowMeRequest_CloseMessage{}, - }, - }); err != nil { - if err != io.EOF { - glog.Errorf("Error sending close to follower: %v", err) - } - } - } - }() - - // Create a goroutine to handle context cancellation and wake up the condition variable - // This is created ONCE per subscriber, not per callback invocation - go func() { - <-ctx.Done() - // Wake up the condition variable when context is cancelled - localTopicPartition.ListenersLock.Lock() - localTopicPartition.ListenersCond.Broadcast() - localTopicPartition.ListenersLock.Unlock() - }() - - // Subscribe loop - can be restarted when seek is requested - currentPosition := startPosition -subscribeLoop: - for { - // Context for this iteration of Subscribe (can be cancelled by seek) - subscribeCtx, subscribeCancel := context.WithCancel(ctx) - - // Start Subscribe in a goroutine so we can interrupt it with seek - subscribeDone := make(chan error, 1) - go func() { - subscribeErr := localTopicPartition.Subscribe(clientName, currentPosition, func() bool { - // Check cancellation before waiting - if subscribeCtx.Err() != nil || !isConnected { - return false - } - - // Wait for new data using condition variable (blocking, not polling) - localTopicPartition.ListenersLock.Lock() - localTopicPartition.ListenersCond.Wait() - localTopicPartition.ListenersLock.Unlock() - - // After waking up, check if we should stop - return subscribeCtx.Err() == nil && isConnected - }, func(logEntry *filer_pb.LogEntry) (bool, error) { - // Wait for the message to be acknowledged with a timeout to prevent infinite loops - const maxWaitTime = 30 * time.Second - const checkInterval = 137 * time.Millisecond - startTime := time.Now() - - for imt.IsInflight(logEntry.Key) { - // Check if we've exceeded the maximum wait time - if time.Since(startTime) > maxWaitTime { - glog.Warningf("Subscriber %s: message with key %s has been in-flight for more than %v, forcing acknowledgment", - clientName, string(logEntry.Key), maxWaitTime) - // Force remove the message from in-flight tracking to prevent infinite loop - imt.AcknowledgeMessage(logEntry.Key, logEntry.TsNs) - break - } - - time.Sleep(checkInterval) - - // Check if the client has disconnected by monitoring the context - select { - case <-subscribeCtx.Done(): - err := subscribeCtx.Err() - if err == context.Canceled { - // Subscribe cancelled (seek or disconnect) - return false, nil - } - glog.V(0).Infof("Subscriber %s disconnected: %v", clientName, err) - return false, nil - default: - // Continue processing the request - } - } - if logEntry.Key != nil { - imt.EnflightMessage(logEntry.Key, logEntry.TsNs) - } - - // Create the message to send - dataMsg := &mq_pb.DataMessage{ - Key: logEntry.Key, - Value: logEntry.Data, - TsNs: logEntry.TsNs, - } - - - if err := stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Data{ - Data: dataMsg, - }}); err != nil { - glog.Errorf("Error sending data: %v", err) - return false, err - } - - // Update received offset and last seen time for this subscriber - subscriber.UpdateReceivedOffset(logEntry.TsNs) - - counter++ - return false, nil - }) - subscribeDone <- subscribeErr - }() - - // Wait for either Subscribe to complete or a seek request - select { - case err = <-subscribeDone: - subscribeCancel() - if err != nil || ctx.Err() != nil { - // Subscribe finished with error or main context cancelled - exit loop - break subscribeLoop - } - // Subscribe completed normally (shouldn't happen in streaming mode) - break subscribeLoop - - case seekMsg := <-seekChan: - // Seek requested - cancel current Subscribe and restart from new offset - glog.V(0).Infof("Subscriber %s seeking from offset %d to offset %d (type %v)", - clientName, currentPosition.GetOffset(), seekMsg.Offset, seekMsg.OffsetType) - - // Cancel current Subscribe iteration - subscribeCancel() - - // Wait for Subscribe to finish cancelling - <-subscribeDone - - // Update position for next iteration - currentPosition = b.getRequestPositionFromSeek(seekMsg) - glog.V(0).Infof("Subscriber %s restarting Subscribe from new offset %d", clientName, seekMsg.Offset) - - // Send acknowledgment that seek completed - stream.Send(&mq_pb.SubscribeMessageResponse{Message: &mq_pb.SubscribeMessageResponse_Ctrl{ - Ctrl: &mq_pb.SubscribeMessageResponse_SubscribeCtrlMessage{ - Error: "", // Empty error means success - }, - }}) - - // Loop will restart with new position - } - } - - return err -} - -func (b *MessageQueueBroker) getRequestPosition(initMessage *mq_pb.SubscribeMessageRequest_InitMessage) (startPosition log_buffer.MessagePosition) { - if initMessage == nil { - return - } - offset := initMessage.GetPartitionOffset() - offsetType := initMessage.OffsetType - - // reset to earliest or latest - if offsetType == schema_pb.OffsetType_RESET_TO_EARLIEST { - startPosition = log_buffer.NewMessagePosition(1, -3) - return - } - if offsetType == schema_pb.OffsetType_RESET_TO_LATEST { - startPosition = log_buffer.NewMessagePosition(time.Now().UnixNano(), -4) - return - } - - // use the exact timestamp - if offsetType == schema_pb.OffsetType_EXACT_TS_NS { - startPosition = log_buffer.NewMessagePosition(offset.StartTsNs, -2) - return - } - - // use exact offset (native offset-based positioning) - if offsetType == schema_pb.OffsetType_EXACT_OFFSET { - startPosition = log_buffer.NewMessagePositionFromOffset(offset.StartOffset) - return - } - - // reset to specific offset - if offsetType == schema_pb.OffsetType_RESET_TO_OFFSET { - startPosition = log_buffer.NewMessagePositionFromOffset(offset.StartOffset) - return - } - - // try to resume - if storedOffset, err := b.readConsumerGroupOffset(initMessage); err == nil { - glog.V(0).Infof("resume from saved offset %v %v %v: %v", initMessage.Topic, initMessage.PartitionOffset.Partition, initMessage.ConsumerGroup, storedOffset) - startPosition = log_buffer.NewMessagePosition(storedOffset, -2) - return - } - - if offsetType == schema_pb.OffsetType_RESUME_OR_EARLIEST { - startPosition = log_buffer.NewMessagePosition(1, -5) - } else if offsetType == schema_pb.OffsetType_RESUME_OR_LATEST { - startPosition = log_buffer.NewMessagePosition(time.Now().UnixNano(), -6) - } - return -} - -// getRequestPositionFromSeek converts a seek request to a MessagePosition -// This is used when implementing full seek support in Subscribe loop -func (b *MessageQueueBroker) getRequestPositionFromSeek(seekMsg *mq_pb.SubscribeMessageRequest_SeekMessage) (startPosition log_buffer.MessagePosition) { - if seekMsg == nil { - return - } - - offsetType := seekMsg.OffsetType - offset := seekMsg.Offset - - // reset to earliest or latest - if offsetType == schema_pb.OffsetType_RESET_TO_EARLIEST { - startPosition = log_buffer.NewMessagePosition(1, -3) - return - } - if offsetType == schema_pb.OffsetType_RESET_TO_LATEST { - startPosition = log_buffer.NewMessagePosition(time.Now().UnixNano(), -4) - return - } - - // use the exact timestamp - if offsetType == schema_pb.OffsetType_EXACT_TS_NS { - startPosition = log_buffer.NewMessagePosition(offset, -2) - return - } - - // use exact offset (native offset-based positioning) - if offsetType == schema_pb.OffsetType_EXACT_OFFSET { - startPosition = log_buffer.NewMessagePositionFromOffset(offset) - return - } - - // reset to specific offset - if offsetType == schema_pb.OffsetType_RESET_TO_OFFSET { - startPosition = log_buffer.NewMessagePositionFromOffset(offset) - return - } - - // default to exact offset - startPosition = log_buffer.NewMessagePositionFromOffset(offset) - return -} diff --git a/weed/mq/broker/broker_grpc_sub_coordinator.go b/weed/mq/broker/broker_grpc_sub_coordinator.go deleted file mode 100644 index 985b0a47e..000000000 --- a/weed/mq/broker/broker_grpc_sub_coordinator.go +++ /dev/null @@ -1,92 +0,0 @@ -package broker - -import ( - "context" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// SubscriberToSubCoordinator coordinates the subscribers -func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMessaging_SubscriberToSubCoordinatorServer) error { - if !b.isLockOwner() { - return status.Errorf(codes.Unavailable, "not current broker balancer") - } - req, err := stream.Recv() - if err != nil { - return err - } - - var cgi *sub_coordinator.ConsumerGroupInstance - var cg *sub_coordinator.ConsumerGroup - // process init message - initMessage := req.GetInit() - if initMessage != nil { - cg, cgi, err = b.SubCoordinator.AddSubscriber(initMessage) - if err != nil { - return status.Errorf(codes.InvalidArgument, "failed to add subscriber: %v", err) - } - glog.V(0).Infof("subscriber %s/%s/%s connected", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic) - } else { - return status.Errorf(codes.InvalidArgument, "subscriber init message is empty") - } - defer func() { - b.SubCoordinator.RemoveSubscriber(initMessage) - glog.V(0).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) - }() - - ctx := stream.Context() - - go func() { - // process ack messages - for { - req, err := stream.Recv() - if err != nil { - glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) - } - - if ackUnAssignment := req.GetAckUnAssignment(); ackUnAssignment != nil { - glog.V(0).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment) - cg.AckUnAssignment(cgi, ackUnAssignment) - } - if ackAssignment := req.GetAckAssignment(); ackAssignment != nil { - glog.V(0).Infof("subscriber %s/%s/%s ack assignment %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackAssignment) - cg.AckAssignment(cgi, ackAssignment) - } - - select { - case <-ctx.Done(): - err := ctx.Err() - if err == context.Canceled { - // Client disconnected - return - } - return - default: - // Continue processing the request - } - } - }() - - // send commands to subscriber - for { - select { - case <-ctx.Done(): - err := ctx.Err() - if err == context.Canceled { - // Client disconnected - return err - } - glog.V(0).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) - return err - case message := <-cgi.ResponseChan: - glog.V(0).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, message) - if err := stream.Send(message); err != nil { - glog.V(0).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) - } - } - } -} diff --git a/weed/mq/broker/broker_grpc_sub_follow.go b/weed/mq/broker/broker_grpc_sub_follow.go deleted file mode 100644 index 0a74274d7..000000000 --- a/weed/mq/broker/broker_grpc_sub_follow.go +++ /dev/null @@ -1,73 +0,0 @@ -package broker - -import ( - "fmt" - "io" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_SubscribeFollowMeServer) (err error) { - var req *mq_pb.SubscribeFollowMeRequest - req, err = stream.Recv() - if err != nil { - return err - } - initMessage := req.GetInit() - if initMessage == nil { - return fmt.Errorf("missing init message") - } - - // create an in-memory offset - var lastOffset int64 - - // follow each published messages - for { - // receive a message - req, err = stream.Recv() - if err != nil { - if err == io.EOF { - err = nil - break - } - glog.V(0).Infof("topic %v partition %v subscribe stream error: %v", initMessage.Topic, initMessage.Partition, err) - break - } - - // Process the received message - if ackMessage := req.GetAck(); ackMessage != nil { - lastOffset = ackMessage.TsNs - // println("sub follower got offset", lastOffset) - } else if closeMessage := req.GetClose(); closeMessage != nil { - glog.V(0).Infof("topic %v partition %v subscribe stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) - return nil - } else { - glog.Errorf("unknown message: %v", req) - } - } - - t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) - - if lastOffset > 0 { - err = b.saveConsumerGroupOffset(t, p, initMessage.ConsumerGroup, lastOffset) - } - - glog.V(0).Infof("shut down follower for %v offset %d", initMessage, lastOffset) - - return err -} - -func (b *MessageQueueBroker) readConsumerGroupOffset(initMessage *mq_pb.SubscribeMessageRequest_InitMessage) (offset int64, err error) { - t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.PartitionOffset.Partition) - - // Use the offset manager's consumer group storage - return b.offsetManager.LoadConsumerGroupOffset(t, p, initMessage.ConsumerGroup) -} - -func (b *MessageQueueBroker) saveConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string, offset int64) error { - // Use the offset manager's consumer group storage - glog.V(0).Infof("saving topic %s partition %v consumer group %s offset %d", t, p, consumerGroup, offset) - return b.offsetManager.SaveConsumerGroupOffset(t, p, consumerGroup, offset) -} diff --git a/weed/mq/broker/broker_grpc_sub_offset.go b/weed/mq/broker/broker_grpc_sub_offset.go deleted file mode 100644 index b79d961d3..000000000 --- a/weed/mq/broker/broker_grpc_sub_offset.go +++ /dev/null @@ -1,253 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/offset" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -// SubscribeWithOffset handles subscription requests with offset-based positioning -// TODO: This extends the broker with offset-aware subscription support -// ASSUMPTION: This will eventually be integrated into the main SubscribeMessage method -func (b *MessageQueueBroker) SubscribeWithOffset( - ctx context.Context, - req *mq_pb.SubscribeMessageRequest, - stream mq_pb.SeaweedMessaging_SubscribeMessageServer, - offsetType schema_pb.OffsetType, - startOffset int64, -) error { - - initMessage := req.GetInit() - if initMessage == nil { - return fmt.Errorf("missing init message") - } - - // Extract partition information from the request - t := topic.FromPbTopic(initMessage.Topic) - - // Get partition from the request's partition_offset field - if initMessage.PartitionOffset == nil || initMessage.PartitionOffset.Partition == nil { - return fmt.Errorf("missing partition information in request") - } - - // Use the partition information from the request - p := topic.Partition{ - RingSize: initMessage.PartitionOffset.Partition.RingSize, - RangeStart: initMessage.PartitionOffset.Partition.RangeStart, - RangeStop: initMessage.PartitionOffset.Partition.RangeStop, - UnixTimeNs: initMessage.PartitionOffset.Partition.UnixTimeNs, - } - - // Create offset-based subscription - subscriptionID := fmt.Sprintf("%s-%s-%d", initMessage.ConsumerGroup, initMessage.ConsumerId, startOffset) - subscription, err := b.offsetManager.CreateSubscription(subscriptionID, t, p, offsetType, startOffset) - if err != nil { - return fmt.Errorf("failed to create offset subscription: %w", err) - } - - defer func() { - if closeErr := b.offsetManager.CloseSubscription(subscriptionID); closeErr != nil { - glog.V(0).Infof("Failed to close subscription %s: %v", subscriptionID, closeErr) - } - }() - - // Get local partition for reading - localTopicPartition, err := b.GetOrGenerateLocalPartition(t, p) - if err != nil { - return fmt.Errorf("topic %v partition %v not found: %v", t, p, err) - } - - // Subscribe to messages using offset-based positioning - return b.subscribeWithOffsetSubscription(ctx, localTopicPartition, subscription, stream, initMessage) -} - -// subscribeWithOffsetSubscription handles the actual message consumption with offset tracking -func (b *MessageQueueBroker) subscribeWithOffsetSubscription( - ctx context.Context, - localPartition *topic.LocalPartition, - subscription *offset.OffsetSubscription, - stream mq_pb.SeaweedMessaging_SubscribeMessageServer, - initMessage *mq_pb.SubscribeMessageRequest_InitMessage, -) error { - - clientName := fmt.Sprintf("%s-%s", initMessage.ConsumerGroup, initMessage.ConsumerId) - - // TODO: Implement offset-based message reading - // ASSUMPTION: For now, we'll use the existing subscription mechanism and track offsets separately - // This should be replaced with proper offset-based reading from storage - - // Convert the subscription's current offset to a proper MessagePosition - startPosition, err := b.convertOffsetToMessagePosition(subscription) - if err != nil { - return fmt.Errorf("failed to convert offset to message position: %w", err) - } - - glog.V(0).Infof("[%s] Starting Subscribe for topic %s partition %d-%d at offset %d", - clientName, subscription.TopicName, subscription.Partition.RangeStart, subscription.Partition.RangeStop, subscription.CurrentOffset) - - return localPartition.Subscribe(clientName, - startPosition, - func() bool { - // Check if context is cancelled (client disconnected) - select { - case <-ctx.Done(): - glog.V(0).Infof("[%s] Context cancelled, stopping", clientName) - return false - default: - } - - // Check if subscription is still active and not at end - if !subscription.IsActive { - glog.V(0).Infof("[%s] Subscription not active, stopping", clientName) - return false - } - - atEnd, err := subscription.IsAtEnd() - if err != nil { - glog.V(0).Infof("[%s] Error checking if subscription at end: %v", clientName, err) - return false - } - - if atEnd { - glog.V(4).Infof("[%s] At end of subscription, stopping", clientName) - return false - } - - // Add a small sleep to avoid CPU busy-wait when checking for new data - time.Sleep(10 * time.Millisecond) - return true - }, - func(logEntry *filer_pb.LogEntry) (bool, error) { - // Check if this message matches our offset requirements - currentOffset := subscription.GetNextOffset() - - if logEntry.Offset < currentOffset { - // Skip messages before our current offset - return false, nil - } - - // Send message to client - if err := stream.Send(&mq_pb.SubscribeMessageResponse{ - Message: &mq_pb.SubscribeMessageResponse_Data{ - Data: &mq_pb.DataMessage{ - Key: logEntry.Key, - Value: logEntry.Data, - TsNs: logEntry.TsNs, - }, - }, - }); err != nil { - glog.Errorf("Error sending data to %s: %v", clientName, err) - return false, err - } - - // Advance subscription offset - subscription.AdvanceOffset() - - // Check context for cancellation - select { - case <-ctx.Done(): - return true, ctx.Err() - default: - return false, nil - } - }) -} - -// GetSubscriptionInfo returns information about an active subscription -func (b *MessageQueueBroker) GetSubscriptionInfo(subscriptionID string) (map[string]interface{}, error) { - subscription, err := b.offsetManager.GetSubscription(subscriptionID) - if err != nil { - return nil, err - } - - lag, err := subscription.GetLag() - if err != nil { - return nil, err - } - - atEnd, err := subscription.IsAtEnd() - if err != nil { - return nil, err - } - - return map[string]interface{}{ - "subscription_id": subscription.ID, - "start_offset": subscription.StartOffset, - "current_offset": subscription.CurrentOffset, - "offset_type": subscription.OffsetType.String(), - "is_active": subscription.IsActive, - "lag": lag, - "at_end": atEnd, - }, nil -} - -// ListActiveSubscriptions returns information about all active subscriptions -func (b *MessageQueueBroker) ListActiveSubscriptions() ([]map[string]interface{}, error) { - subscriptions, err := b.offsetManager.ListActiveSubscriptions() - if err != nil { - return nil, err - } - - result := make([]map[string]interface{}, len(subscriptions)) - for i, subscription := range subscriptions { - lag, _ := subscription.GetLag() - atEnd, _ := subscription.IsAtEnd() - - result[i] = map[string]interface{}{ - "subscription_id": subscription.ID, - "start_offset": subscription.StartOffset, - "current_offset": subscription.CurrentOffset, - "offset_type": subscription.OffsetType.String(), - "is_active": subscription.IsActive, - "lag": lag, - "at_end": atEnd, - } - } - - return result, nil -} - -// SeekSubscription seeks an existing subscription to a specific offset -func (b *MessageQueueBroker) SeekSubscription(subscriptionID string, offset int64) error { - subscription, err := b.offsetManager.GetSubscription(subscriptionID) - if err != nil { - return err - } - - return subscription.SeekToOffset(offset) -} - -// convertOffsetToMessagePosition converts a subscription's current offset to a MessagePosition for log_buffer -func (b *MessageQueueBroker) convertOffsetToMessagePosition(subscription *offset.OffsetSubscription) (log_buffer.MessagePosition, error) { - currentOffset := subscription.GetNextOffset() - - // Handle special offset cases - switch subscription.OffsetType { - case schema_pb.OffsetType_RESET_TO_EARLIEST: - return log_buffer.NewMessagePosition(1, -3), nil - - case schema_pb.OffsetType_RESET_TO_LATEST: - return log_buffer.NewMessagePosition(time.Now().UnixNano(), -4), nil - - case schema_pb.OffsetType_EXACT_OFFSET: - // Use proper offset-based positioning that provides consistent results - // This uses the same approach as the main subscription handler in broker_grpc_sub.go - return log_buffer.NewMessagePositionFromOffset(currentOffset), nil - - case schema_pb.OffsetType_EXACT_TS_NS: - // For exact timestamps, use the timestamp directly - return log_buffer.NewMessagePosition(currentOffset, -2), nil - - default: - // Default to starting from current time for unknown offset types - return log_buffer.NewMessagePosition(time.Now().UnixNano(), -2), nil - } -} diff --git a/weed/mq/broker/broker_grpc_sub_offset_test.go b/weed/mq/broker/broker_grpc_sub_offset_test.go deleted file mode 100644 index f25a51259..000000000 --- a/weed/mq/broker/broker_grpc_sub_offset_test.go +++ /dev/null @@ -1,707 +0,0 @@ -package broker - -import ( - "fmt" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/offset" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -func TestConvertOffsetToMessagePosition(t *testing.T) { - broker := &MessageQueueBroker{} - - tests := []struct { - name string - offsetType schema_pb.OffsetType - currentOffset int64 - expectedBatch int64 - expectError bool - }{ - { - name: "reset to earliest", - offsetType: schema_pb.OffsetType_RESET_TO_EARLIEST, - currentOffset: 0, - expectedBatch: -3, - expectError: false, - }, - { - name: "reset to latest", - offsetType: schema_pb.OffsetType_RESET_TO_LATEST, - currentOffset: 0, - expectedBatch: -4, - expectError: false, - }, - { - name: "exact offset zero", - offsetType: schema_pb.OffsetType_EXACT_OFFSET, - currentOffset: 0, - expectedBatch: 0, // NewMessagePositionFromOffset stores offset directly in Offset field - expectError: false, - }, - { - name: "exact offset non-zero", - offsetType: schema_pb.OffsetType_EXACT_OFFSET, - currentOffset: 100, - expectedBatch: 100, // NewMessagePositionFromOffset stores offset directly in Offset field - expectError: false, - }, - { - name: "exact timestamp", - offsetType: schema_pb.OffsetType_EXACT_TS_NS, - currentOffset: 50, - expectedBatch: -2, - expectError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock subscription - subscription := &offset.OffsetSubscription{ - ID: "test-subscription", - CurrentOffset: tt.currentOffset, - OffsetType: tt.offsetType, - IsActive: true, - } - - position, err := broker.convertOffsetToMessagePosition(subscription) - - if tt.expectError && err == nil { - t.Error("Expected error but got none") - return - } - - if !tt.expectError && err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if position.Offset != tt.expectedBatch { - t.Errorf("Expected batch index %d, got %d", tt.expectedBatch, position.Offset) - } - - // Verify that the timestamp is reasonable (not zero for most cases) - // Note: EXACT_OFFSET uses epoch time (zero) with NewMessagePositionFromOffset - if tt.offsetType != schema_pb.OffsetType_RESET_TO_EARLIEST && - tt.offsetType != schema_pb.OffsetType_EXACT_OFFSET && - position.Time.IsZero() { - t.Error("Expected non-zero timestamp") - } - - }) - } -} - -func TestConvertOffsetToMessagePosition_OffsetEncoding(t *testing.T) { - broker := &MessageQueueBroker{} - - // Test that offset-based positions encode the offset correctly in Offset field - testCases := []struct { - offset int64 - expectedBatch int64 - expectedIsSentinel bool // Should timestamp be the offset sentinel value? - }{ - {10, 10, true}, - {100, 100, true}, - {0, 0, true}, - {42, 42, true}, - } - - for _, tc := range testCases { - t.Run(fmt.Sprintf("offset_%d", tc.offset), func(t *testing.T) { - subscription := &offset.OffsetSubscription{ - ID: fmt.Sprintf("test-%d", tc.offset), - CurrentOffset: tc.offset, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET, - IsActive: true, - } - - pos, err := broker.convertOffsetToMessagePosition(subscription) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - // Check Offset encoding - if pos.Offset != tc.expectedBatch { - t.Errorf("Expected batch index %d, got %d", tc.expectedBatch, pos.Offset) - } - - // Verify the offset can be extracted correctly using IsOffsetBased/GetOffset - if !pos.IsOffsetBased { - t.Error("Position should be detected as offset-based") - } - - // Check that IsOffsetBased flag is set correctly - if tc.expectedIsSentinel && !pos.IsOffsetBased { - t.Error("Expected offset-based position but IsOffsetBased=false") - } - - if extractedOffset := pos.GetOffset(); extractedOffset != tc.offset { - t.Errorf("Expected extracted offset %d, got %d", tc.offset, extractedOffset) - } - - }) - } -} - -func TestConvertOffsetToMessagePosition_ConsistentResults(t *testing.T) { - broker := &MessageQueueBroker{} - - subscription := &offset.OffsetSubscription{ - ID: "consistent-test", - CurrentOffset: 42, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET, - IsActive: true, - } - - // Call multiple times within a short period - positions := make([]log_buffer.MessagePosition, 5) - for i := 0; i < 5; i++ { - pos, err := broker.convertOffsetToMessagePosition(subscription) - if err != nil { - t.Fatalf("Unexpected error on iteration %d: %v", i, err) - } - positions[i] = pos - time.Sleep(1 * time.Millisecond) // Small delay - } - - // All positions should have the same Offset - for i := 1; i < len(positions); i++ { - if positions[i].Offset != positions[0].Offset { - t.Errorf("Inconsistent Offset: %d vs %d", positions[0].Offset, positions[i].Offset) - } - } - - // With NewMessagePositionFromOffset, timestamps should be identical (zero time for offset-based) - expectedTime := time.Time{} - for i := 0; i < len(positions); i++ { - if !positions[i].Time.Equal(expectedTime) { - t.Errorf("Expected all timestamps to be sentinel time (%v), got %v at index %d", - expectedTime, positions[i].Time, i) - } - } - -} - -func TestConvertOffsetToMessagePosition_FixVerification(t *testing.T) { - // This test specifically verifies that the fix addresses the issue mentioned: - // "The calculated timestamp for a given offset will change every time the function is called" - - broker := &MessageQueueBroker{} - - subscription := &offset.OffsetSubscription{ - ID: "fix-verification", - CurrentOffset: 123, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET, - IsActive: true, - } - - // Call the function multiple times with delays to simulate real-world usage - var positions []log_buffer.MessagePosition - var timestamps []int64 - - for i := 0; i < 10; i++ { - pos, err := broker.convertOffsetToMessagePosition(subscription) - if err != nil { - t.Fatalf("Unexpected error on iteration %d: %v", i, err) - } - positions = append(positions, pos) - timestamps = append(timestamps, pos.Time.UnixNano()) - time.Sleep(2 * time.Millisecond) // Small delay to ensure time progression - } - - // Verify ALL timestamps are identical (no time-based variance) - expectedTimestamp := timestamps[0] - for i, ts := range timestamps { - if ts != expectedTimestamp { - t.Errorf("Timestamp variance detected at call %d: expected %d, got %d", i, expectedTimestamp, ts) - } - } - - // Verify ALL Offset values are identical - expectedBatch := positions[0].Offset - for i, pos := range positions { - if pos.Offset != expectedBatch { - t.Errorf("Offset variance detected at call %d: expected %d, got %d", i, expectedBatch, pos.Offset) - } - } - - // Verify the offset can be consistently extracted - expectedOffset := subscription.CurrentOffset - for i, pos := range positions { - if extractedOffset := pos.GetOffset(); extractedOffset != expectedOffset { - t.Errorf("Extracted offset variance at call %d: expected %d, got %d", i, expectedOffset, extractedOffset) - } - } - -} - -func TestPartitionIdentityConsistency(t *testing.T) { - // Test that partition identity is preserved from request to avoid breaking offset manager keys - - // Create a mock init message with specific partition info - partition := &schema_pb.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: 1234567890123456789, // Fixed timestamp - } - - initMessage := &mq_pb.SubscribeMessageRequest_InitMessage{ - ConsumerGroup: "test-group", - ConsumerId: "test-consumer", - PartitionOffset: &schema_pb.PartitionOffset{ - Partition: partition, - }, - } - - // Simulate the partition creation logic from SubscribeWithOffset - p := topic.Partition{ - RingSize: initMessage.PartitionOffset.Partition.RingSize, - RangeStart: initMessage.PartitionOffset.Partition.RangeStart, - RangeStop: initMessage.PartitionOffset.Partition.RangeStop, - UnixTimeNs: initMessage.PartitionOffset.Partition.UnixTimeNs, - } - - // Verify that the partition preserves the original UnixTimeNs - if p.UnixTimeNs != partition.UnixTimeNs { - t.Errorf("Partition UnixTimeNs not preserved: expected %d, got %d", - partition.UnixTimeNs, p.UnixTimeNs) - } - - // Verify partition key consistency - expectedKey := fmt.Sprintf("ring:%d:range:%d-%d:time:%d", - partition.RingSize, partition.RangeStart, partition.RangeStop, partition.UnixTimeNs) - - actualKey := fmt.Sprintf("ring:%d:range:%d-%d:time:%d", - p.RingSize, p.RangeStart, p.RangeStop, p.UnixTimeNs) - - if actualKey != expectedKey { - t.Errorf("Partition key mismatch: expected %s, got %s", expectedKey, actualKey) - } - -} - -func TestBrokerOffsetManager_GetSubscription_Fixed(t *testing.T) { - // Test that GetSubscription now works correctly after the fix - - storage := NewInMemoryOffsetStorageForTesting() - offsetManager := NewBrokerOffsetManagerWithStorage(storage) - - // Create test topic and partition - testTopic := topic.Topic{Namespace: "test", Name: "topic1"} - testPartition := topic.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // Test getting non-existent subscription - _, err := offsetManager.GetSubscription("non-existent") - if err == nil { - t.Error("Expected error for non-existent subscription") - } - - // Create a subscription - subscriptionID := "test-subscription-fixed" - subscription, err := offsetManager.CreateSubscription( - subscriptionID, - testTopic, - testPartition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Test getting existing subscription (this should now work) - retrievedSub, err := offsetManager.GetSubscription(subscriptionID) - if err != nil { - t.Fatalf("GetSubscription failed after fix: %v", err) - } - - if retrievedSub.ID != subscription.ID { - t.Errorf("Expected subscription ID %s, got %s", subscription.ID, retrievedSub.ID) - } - - if retrievedSub.OffsetType != subscription.OffsetType { - t.Errorf("Expected offset type %v, got %v", subscription.OffsetType, retrievedSub.OffsetType) - } - -} - -func TestBrokerOffsetManager_ListActiveSubscriptions_Fixed(t *testing.T) { - // Test that ListActiveSubscriptions now works correctly after the fix - - storage := NewInMemoryOffsetStorageForTesting() - offsetManager := NewBrokerOffsetManagerWithStorage(storage) - - // Create test topic and partition - testTopic := topic.Topic{Namespace: "test", Name: "topic1"} - testPartition := topic.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // Initially should have no subscriptions - subscriptions, err := offsetManager.ListActiveSubscriptions() - if err != nil { - t.Fatalf("ListActiveSubscriptions failed after fix: %v", err) - } - if len(subscriptions) != 0 { - t.Errorf("Expected 0 subscriptions, got %d", len(subscriptions)) - } - - // Create multiple subscriptions (use RESET types to avoid HWM validation issues) - subscriptionIDs := []string{"sub-fixed-1", "sub-fixed-2", "sub-fixed-3"} - offsetTypes := []schema_pb.OffsetType{ - schema_pb.OffsetType_RESET_TO_EARLIEST, - schema_pb.OffsetType_RESET_TO_LATEST, - schema_pb.OffsetType_RESET_TO_EARLIEST, // Changed from EXACT_OFFSET - } - - for i, subID := range subscriptionIDs { - _, err := offsetManager.CreateSubscription( - subID, - testTopic, - testPartition, - offsetTypes[i], - 0, // Use 0 for all to avoid validation issues - ) - if err != nil { - t.Fatalf("Failed to create subscription %s: %v", subID, err) - } - } - - // List all subscriptions (this should now work) - subscriptions, err = offsetManager.ListActiveSubscriptions() - if err != nil { - t.Fatalf("ListActiveSubscriptions failed after fix: %v", err) - } - - if len(subscriptions) != len(subscriptionIDs) { - t.Errorf("Expected %d subscriptions, got %d", len(subscriptionIDs), len(subscriptions)) - } - - // Verify all subscriptions are active - for _, sub := range subscriptions { - if !sub.IsActive { - t.Errorf("Subscription %s should be active", sub.ID) - } - } - -} - -func TestMessageQueueBroker_ListActiveSubscriptions_Fixed(t *testing.T) { - // Test that the broker-level ListActiveSubscriptions now works correctly - - storage := NewInMemoryOffsetStorageForTesting() - offsetManager := NewBrokerOffsetManagerWithStorage(storage) - - broker := &MessageQueueBroker{ - offsetManager: offsetManager, - } - - // Create test topic and partition - testTopic := topic.Topic{Namespace: "test", Name: "topic1"} - testPartition := topic.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // Initially should have no subscriptions - subscriptionInfos, err := broker.ListActiveSubscriptions() - if err != nil { - t.Fatalf("Broker ListActiveSubscriptions failed after fix: %v", err) - } - if len(subscriptionInfos) != 0 { - t.Errorf("Expected 0 subscription infos, got %d", len(subscriptionInfos)) - } - - // Create subscriptions with different offset types (use RESET types to avoid HWM validation issues) - testCases := []struct { - id string - offsetType schema_pb.OffsetType - startOffset int64 - }{ - {"broker-earliest-sub", schema_pb.OffsetType_RESET_TO_EARLIEST, 0}, - {"broker-latest-sub", schema_pb.OffsetType_RESET_TO_LATEST, 0}, - {"broker-reset-sub", schema_pb.OffsetType_RESET_TO_EARLIEST, 0}, // Changed from EXACT_OFFSET - } - - for _, tc := range testCases { - _, err := broker.offsetManager.CreateSubscription( - tc.id, - testTopic, - testPartition, - tc.offsetType, - tc.startOffset, - ) - if err != nil { - t.Fatalf("Failed to create subscription %s: %v", tc.id, err) - } - } - - // List subscription infos (this should now work) - subscriptionInfos, err = broker.ListActiveSubscriptions() - if err != nil { - t.Fatalf("Broker ListActiveSubscriptions failed after fix: %v", err) - } - - if len(subscriptionInfos) != len(testCases) { - t.Errorf("Expected %d subscription infos, got %d", len(testCases), len(subscriptionInfos)) - } - - // Verify subscription info structure - for _, info := range subscriptionInfos { - // Check required fields - requiredFields := []string{ - "subscription_id", "start_offset", "current_offset", - "offset_type", "is_active", "lag", "at_end", - } - - for _, field := range requiredFields { - if _, ok := info[field]; !ok { - t.Errorf("Missing field %s in subscription info", field) - } - } - - // Verify is_active is true - if isActive, ok := info["is_active"].(bool); !ok || !isActive { - t.Errorf("Expected is_active to be true, got %v", info["is_active"]) - } - - } -} - -func TestSingleWriterPerPartitionCorrectness(t *testing.T) { - // Test that demonstrates correctness under single-writer-per-partition model - - // Simulate two brokers with separate offset managers but same partition - storage1 := NewInMemoryOffsetStorageForTesting() - storage2 := NewInMemoryOffsetStorageForTesting() - - offsetManager1 := NewBrokerOffsetManagerWithStorage(storage1) - offsetManager2 := NewBrokerOffsetManagerWithStorage(storage2) - - broker1 := &MessageQueueBroker{offsetManager: offsetManager1} - broker2 := &MessageQueueBroker{offsetManager: offsetManager2} - - // Same partition identity (this is key for correctness) - fixedTimestamp := time.Now().UnixNano() - testTopic := topic.Topic{Namespace: "test", Name: "shared-topic"} - testPartition := topic.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: fixedTimestamp, // Same timestamp = same partition identity - } - - // Broker 1 is the leader for this partition - assigns offsets - baseOffset, lastOffset, err := broker1.offsetManager.AssignBatchOffsets(testTopic, testPartition, 10) - if err != nil { - t.Fatalf("Failed to assign offsets on broker1: %v", err) - } - - if baseOffset != 0 || lastOffset != 9 { - t.Errorf("Expected offsets 0-9, got %d-%d", baseOffset, lastOffset) - } - - // Get HWM from leader - hwm1, err := broker1.offsetManager.GetHighWaterMark(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get HWM from broker1: %v", err) - } - - if hwm1 != 10 { - t.Errorf("Expected HWM 10 on leader, got %d", hwm1) - } - - // Broker 2 is a follower - should have HWM 0 (no local assignments) - hwm2, err := broker2.offsetManager.GetHighWaterMark(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get HWM from broker2: %v", err) - } - - if hwm2 != 0 { - t.Errorf("Expected HWM 0 on follower, got %d", hwm2) - } - - // Create subscription on leader (where offsets were assigned) - subscription1, err := broker1.offsetManager.CreateSubscription( - "leader-subscription", - testTopic, - testPartition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription on leader: %v", err) - } - - // Verify subscription can see the correct HWM - lag1, err := subscription1.GetLag() - if err != nil { - t.Fatalf("Failed to get lag on leader subscription: %v", err) - } - - if lag1 != 10 { - t.Errorf("Expected lag 10 on leader subscription, got %d", lag1) - } - - // Create subscription on follower (should have different lag due to local HWM) - subscription2, err := broker2.offsetManager.CreateSubscription( - "follower-subscription", - testTopic, - testPartition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription on follower: %v", err) - } - - lag2, err := subscription2.GetLag() - if err != nil { - t.Fatalf("Failed to get lag on follower subscription: %v", err) - } - - if lag2 != 0 { - t.Errorf("Expected lag 0 on follower subscription (no local data), got %d", lag2) - } - -} - -func TestEndToEndWorkflowAfterFixes(t *testing.T) { - // Test the complete workflow with all fixes applied - - storage := NewInMemoryOffsetStorageForTesting() - offsetManager := NewBrokerOffsetManagerWithStorage(storage) - - broker := &MessageQueueBroker{ - offsetManager: offsetManager, - } - - // Create test topic and partition with fixed timestamp - fixedTimestamp := time.Now().UnixNano() - testTopic := topic.Topic{Namespace: "test", Name: "e2e-topic"} - testPartition := topic.Partition{ - RingSize: 32, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: fixedTimestamp, - } - - subscriptionID := "e2e-test-sub" - - // 1. Create subscription (use RESET_TO_EARLIEST to avoid HWM validation issues) - subscription, err := broker.offsetManager.CreateSubscription( - subscriptionID, - testTopic, - testPartition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // 2. Verify GetSubscription works - retrievedSub, err := broker.offsetManager.GetSubscription(subscriptionID) - if err != nil { - t.Fatalf("GetSubscription failed: %v", err) - } - - if retrievedSub.ID != subscription.ID { - t.Errorf("GetSubscription returned wrong subscription: expected %s, got %s", - subscription.ID, retrievedSub.ID) - } - - // 3. Verify it appears in active list - activeList, err := broker.ListActiveSubscriptions() - if err != nil { - t.Fatalf("Failed to list active subscriptions: %v", err) - } - - found := false - for _, info := range activeList { - if info["subscription_id"] == subscriptionID { - found = true - break - } - } - if !found { - t.Error("New subscription not found in active list") - } - - // 4. Get subscription info - info, err := broker.GetSubscriptionInfo(subscriptionID) - if err != nil { - t.Fatalf("Failed to get subscription info: %v", err) - } - - if info["subscription_id"] != subscriptionID { - t.Errorf("Wrong subscription ID in info: expected %s, got %v", subscriptionID, info["subscription_id"]) - } - - // 5. Assign some offsets to create data for seeking - _, _, err = broker.offsetManager.AssignBatchOffsets(testTopic, testPartition, 50) - if err != nil { - t.Fatalf("Failed to assign offsets: %v", err) - } - - // 6. Seek subscription - newOffset := int64(42) - err = broker.SeekSubscription(subscriptionID, newOffset) - if err != nil { - t.Fatalf("Failed to seek subscription: %v", err) - } - - // 7. Verify seek worked - updatedInfo, err := broker.GetSubscriptionInfo(subscriptionID) - if err != nil { - t.Fatalf("Failed to get updated subscription info: %v", err) - } - - if updatedInfo["current_offset"] != newOffset { - t.Errorf("Seek didn't work: expected offset %d, got %v", newOffset, updatedInfo["current_offset"]) - } - - // 8. Test offset to timestamp conversion with fixed partition identity - updatedSub, err := broker.offsetManager.GetSubscription(subscriptionID) - if err != nil { - t.Fatalf("Failed to get updated subscription: %v", err) - } - - position, err := broker.convertOffsetToMessagePosition(updatedSub) - if err != nil { - t.Fatalf("Failed to convert offset to position: %v", err) - } - - if position.Time.IsZero() { - t.Error("Expected non-zero timestamp from conversion") - } - - // 9. Verify partition identity consistency throughout - partitionKey1 := fmt.Sprintf("ring:%d:range:%d-%d:time:%d", - testPartition.RingSize, testPartition.RangeStart, testPartition.RangeStop, testPartition.UnixTimeNs) - - partitionKey2 := fmt.Sprintf("ring:%d:range:%d-%d:time:%d", - testPartition.RingSize, testPartition.RangeStart, testPartition.RangeStop, fixedTimestamp) - - if partitionKey1 != partitionKey2 { - t.Errorf("Partition key inconsistency: %s != %s", partitionKey1, partitionKey2) - } - -} diff --git a/weed/mq/broker/broker_grpc_topic_partition_control.go b/weed/mq/broker/broker_grpc_topic_partition_control.go deleted file mode 100644 index 66547b010..000000000 --- a/weed/mq/broker/broker_grpc_topic_partition_control.go +++ /dev/null @@ -1,28 +0,0 @@ -package broker - -import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func (b *MessageQueueBroker) ClosePublishers(ctx context.Context, request *mq_pb.ClosePublishersRequest) (resp *mq_pb.ClosePublishersResponse, err error) { - resp = &mq_pb.ClosePublishersResponse{} - - t := topic.FromPbTopic(request.Topic) - - b.localTopicManager.ClosePublishers(t, request.UnixTimeNs) - - // wait until all publishers are closed - b.localTopicManager.WaitUntilNoPublishers(t) - - return -} - -func (b *MessageQueueBroker) CloseSubscribers(ctx context.Context, request *mq_pb.CloseSubscribersRequest) (resp *mq_pb.CloseSubscribersResponse, err error) { - resp = &mq_pb.CloseSubscribersResponse{} - - b.localTopicManager.CloseSubscribers(topic.FromPbTopic(request.Topic), request.UnixTimeNs) - - return -} diff --git a/weed/mq/broker/broker_log_buffer_offset.go b/weed/mq/broker/broker_log_buffer_offset.go deleted file mode 100644 index aeb8fad1b..000000000 --- a/weed/mq/broker/broker_log_buffer_offset.go +++ /dev/null @@ -1,169 +0,0 @@ -package broker - -import ( - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "google.golang.org/protobuf/proto" -) - -// OffsetAssignmentFunc is a function type for assigning offsets to messages -type OffsetAssignmentFunc func() (int64, error) - -// AddToBufferWithOffset adds a message to the log buffer with offset assignment -// TODO: This is a temporary solution until LogBuffer can be modified to accept offset assignment -// ASSUMPTION: This function will be integrated into LogBuffer.AddToBuffer in the future -func (b *MessageQueueBroker) AddToBufferWithOffset( - logBuffer *log_buffer.LogBuffer, - message *mq_pb.DataMessage, - t topic.Topic, - p topic.Partition, -) error { - // Assign offset for this message - offset, err := b.offsetManager.AssignOffset(t, p) - if err != nil { - return err - } - - // PERFORMANCE OPTIMIZATION: Pre-process expensive operations OUTSIDE the lock - var ts time.Time - processingTsNs := message.TsNs - if processingTsNs == 0 { - ts = time.Now() - processingTsNs = ts.UnixNano() - } else { - ts = time.Unix(0, processingTsNs) - } - - // Create LogEntry with assigned offset - logEntry := &filer_pb.LogEntry{ - TsNs: processingTsNs, - PartitionKeyHash: util.HashToInt32(message.Key), - Data: message.Value, - Key: message.Key, - Offset: offset, // Add the assigned offset - } - - logEntryData, err := proto.Marshal(logEntry) - if err != nil { - return err - } - - // Use the existing LogBuffer infrastructure for the rest - // TODO: This is a workaround - ideally LogBuffer should handle offset assignment - // For now, we'll add the message with the pre-assigned offset - return b.addLogEntryToBuffer(logBuffer, logEntry, logEntryData, ts) -} - -// addLogEntryToBuffer adds a pre-constructed LogEntry to the buffer -// This is a helper function that mimics LogBuffer.AddDataToBuffer but with a pre-built LogEntry -func (b *MessageQueueBroker) addLogEntryToBuffer( - logBuffer *log_buffer.LogBuffer, - logEntry *filer_pb.LogEntry, - logEntryData []byte, - ts time.Time, -) error { - // TODO: This is a simplified version of LogBuffer.AddDataToBuffer - // ASSUMPTION: We're bypassing some of the LogBuffer's internal logic - // This should be properly integrated when LogBuffer is modified - - // Use the new AddLogEntryToBuffer method to preserve offset information - // This ensures the offset is maintained throughout the entire data flow - logBuffer.AddLogEntryToBuffer(logEntry) - return nil -} - -// GetPartitionOffsetInfoInternal returns offset information for a partition (internal method) -func (b *MessageQueueBroker) GetPartitionOffsetInfoInternal(t topic.Topic, p topic.Partition) (*PartitionOffsetInfo, error) { - info, err := b.offsetManager.GetPartitionOffsetInfo(t, p) - if err != nil { - return nil, err - } - - // CRITICAL FIX: Also check LogBuffer for in-memory messages - // The offset manager only tracks assigned offsets from persistent storage - // But the LogBuffer contains recently written messages that haven't been flushed yet - localPartition := b.localTopicManager.GetLocalPartition(t, p) - logBufferHWM := int64(-1) - if localPartition != nil && localPartition.LogBuffer != nil { - logBufferHWM = localPartition.LogBuffer.GetOffset() - } else { - } - - // Use the MAX of offset manager HWM and LogBuffer HWM - // This ensures we report the correct HWM even if data hasn't been flushed to disk yet - // IMPORTANT: Use >= not > because when they're equal, we still want the correct value - highWaterMark := info.HighWaterMark - if logBufferHWM >= 0 && logBufferHWM > highWaterMark { - highWaterMark = logBufferHWM - } else if logBufferHWM >= 0 && logBufferHWM == highWaterMark && highWaterMark > 0 { - } else if logBufferHWM >= 0 { - } - - // Latest offset is HWM - 1 (last assigned offset) - latestOffset := highWaterMark - 1 - if highWaterMark == 0 { - latestOffset = -1 // No records - } - - // Convert to broker-specific format - return &PartitionOffsetInfo{ - Topic: t, - Partition: p, - EarliestOffset: info.EarliestOffset, - LatestOffset: latestOffset, - HighWaterMark: highWaterMark, - RecordCount: highWaterMark, // HWM equals record count (offsets 0 to HWM-1) - ActiveSubscriptions: info.ActiveSubscriptions, - }, nil -} - -// PartitionOffsetInfo provides offset information for a partition (broker-specific) -type PartitionOffsetInfo struct { - Topic topic.Topic - Partition topic.Partition - EarliestOffset int64 - LatestOffset int64 - HighWaterMark int64 - RecordCount int64 - ActiveSubscriptions int64 -} - -// CreateOffsetSubscription creates an offset-based subscription through the broker -func (b *MessageQueueBroker) CreateOffsetSubscription( - subscriptionID string, - t topic.Topic, - p topic.Partition, - offsetType string, // Will be converted to schema_pb.OffsetType - startOffset int64, -) error { - // TODO: Convert string offsetType to schema_pb.OffsetType - // ASSUMPTION: For now using RESET_TO_EARLIEST as default - // This should be properly mapped based on the offsetType parameter - - _, err := b.offsetManager.CreateSubscription( - subscriptionID, - t, - p, - 0, // schema_pb.OffsetType_RESET_TO_EARLIEST - startOffset, - ) - - return err -} - -// GetOffsetMetrics returns offset metrics for monitoring -func (b *MessageQueueBroker) GetOffsetMetrics() map[string]interface{} { - metrics := b.offsetManager.GetOffsetMetrics() - - return map[string]interface{}{ - "partition_count": metrics.PartitionCount, - "total_offsets": metrics.TotalOffsets, - "active_subscriptions": metrics.ActiveSubscriptions, - "average_latency": metrics.AverageLatency, - } -} diff --git a/weed/mq/broker/broker_offset_integration_test.go b/weed/mq/broker/broker_offset_integration_test.go deleted file mode 100644 index 49df58a64..000000000 --- a/weed/mq/broker/broker_offset_integration_test.go +++ /dev/null @@ -1,351 +0,0 @@ -package broker - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func createTestTopic() topic.Topic { - return topic.Topic{ - Namespace: "test", - Name: "offset-test", - } -} - -func createTestPartition() topic.Partition { - return topic.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } -} - -func TestBrokerOffsetManager_AssignOffset(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Test sequential offset assignment - for i := int64(0); i < 10; i++ { - assignedOffset, err := manager.AssignOffset(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to assign offset %d: %v", i, err) - } - - if assignedOffset != i { - t.Errorf("Expected offset %d, got %d", i, assignedOffset) - } - } -} - -func TestBrokerOffsetManager_AssignBatchOffsets(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Assign batch of offsets - baseOffset, lastOffset, err := manager.AssignBatchOffsets(testTopic, testPartition, 5) - if err != nil { - t.Fatalf("Failed to assign batch offsets: %v", err) - } - - if baseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", baseOffset) - } - - if lastOffset != 4 { - t.Errorf("Expected last offset 4, got %d", lastOffset) - } - - // Assign another batch - baseOffset2, lastOffset2, err := manager.AssignBatchOffsets(testTopic, testPartition, 3) - if err != nil { - t.Fatalf("Failed to assign second batch offsets: %v", err) - } - - if baseOffset2 != 5 { - t.Errorf("Expected base offset 5, got %d", baseOffset2) - } - - if lastOffset2 != 7 { - t.Errorf("Expected last offset 7, got %d", lastOffset2) - } -} - -func TestBrokerOffsetManager_GetHighWaterMark(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Initially should be 0 - hwm, err := manager.GetHighWaterMark(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get initial high water mark: %v", err) - } - - if hwm != 0 { - t.Errorf("Expected initial high water mark 0, got %d", hwm) - } - - // Assign some offsets - manager.AssignBatchOffsets(testTopic, testPartition, 10) - - // High water mark should be updated - hwm, err = manager.GetHighWaterMark(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get high water mark after assignment: %v", err) - } - - if hwm != 10 { - t.Errorf("Expected high water mark 10, got %d", hwm) - } -} - -func TestBrokerOffsetManager_CreateSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Assign some offsets first - manager.AssignBatchOffsets(testTopic, testPartition, 5) - - // Create subscription - sub, err := manager.CreateSubscription( - "test-sub", - testTopic, - testPartition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - if sub.ID != "test-sub" { - t.Errorf("Expected subscription ID 'test-sub', got %s", sub.ID) - } - - if sub.StartOffset != 0 { - t.Errorf("Expected start offset 0, got %d", sub.StartOffset) - } -} - -func TestBrokerOffsetManager_GetPartitionOffsetInfo(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Test empty partition - info, err := manager.GetPartitionOffsetInfo(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get partition offset info: %v", err) - } - - if info.EarliestOffset != 0 { - t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset) - } - - if info.LatestOffset != -1 { - t.Errorf("Expected latest offset -1 for empty partition, got %d", info.LatestOffset) - } - - // Assign offsets and test again - manager.AssignBatchOffsets(testTopic, testPartition, 5) - - info, err = manager.GetPartitionOffsetInfo(testTopic, testPartition) - if err != nil { - t.Fatalf("Failed to get partition offset info after assignment: %v", err) - } - - if info.LatestOffset != 4 { - t.Errorf("Expected latest offset 4, got %d", info.LatestOffset) - } - - if info.HighWaterMark != 5 { - t.Errorf("Expected high water mark 5, got %d", info.HighWaterMark) - } -} - -func TestBrokerOffsetManager_MultiplePartitions(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - - // Create different partitions - partition1 := topic.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - partition2 := topic.Partition{ - RingSize: 1024, - RangeStart: 32, - RangeStop: 63, - UnixTimeNs: time.Now().UnixNano(), - } - - // Assign offsets to different partitions - assignedOffset1, err := manager.AssignOffset(testTopic, partition1) - if err != nil { - t.Fatalf("Failed to assign offset to partition1: %v", err) - } - - assignedOffset2, err := manager.AssignOffset(testTopic, partition2) - if err != nil { - t.Fatalf("Failed to assign offset to partition2: %v", err) - } - - // Both should start at 0 - if assignedOffset1 != 0 { - t.Errorf("Expected offset 0 for partition1, got %d", assignedOffset1) - } - - if assignedOffset2 != 0 { - t.Errorf("Expected offset 0 for partition2, got %d", assignedOffset2) - } - - // Assign more offsets to partition1 - assignedOffset1_2, err := manager.AssignOffset(testTopic, partition1) - if err != nil { - t.Fatalf("Failed to assign second offset to partition1: %v", err) - } - - if assignedOffset1_2 != 1 { - t.Errorf("Expected offset 1 for partition1, got %d", assignedOffset1_2) - } - - // Partition2 should still be at 0 for next assignment - assignedOffset2_2, err := manager.AssignOffset(testTopic, partition2) - if err != nil { - t.Fatalf("Failed to assign second offset to partition2: %v", err) - } - - if assignedOffset2_2 != 1 { - t.Errorf("Expected offset 1 for partition2, got %d", assignedOffset2_2) - } -} - -func TestOffsetAwarePublisher(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Create a mock local partition (simplified for testing) - localPartition := &topic.LocalPartition{} - - // Create offset assignment function - assignOffsetFn := func() (int64, error) { - return manager.AssignOffset(testTopic, testPartition) - } - - // Create offset-aware publisher - publisher := topic.NewOffsetAwarePublisher(localPartition, assignOffsetFn) - - if publisher.GetPartition() != localPartition { - t.Error("Publisher should return the correct partition") - } - - // Test would require more setup to actually publish messages - // This tests the basic structure -} - -func TestBrokerOffsetManager_GetOffsetMetrics(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Initial metrics - metrics := manager.GetOffsetMetrics() - if metrics.TotalOffsets != 0 { - t.Errorf("Expected 0 total offsets initially, got %d", metrics.TotalOffsets) - } - - // Assign some offsets - manager.AssignBatchOffsets(testTopic, testPartition, 5) - - // Create subscription - manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - - // Check updated metrics - metrics = manager.GetOffsetMetrics() - if metrics.PartitionCount != 1 { - t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount) - } -} - -func TestBrokerOffsetManager_AssignOffsetsWithResult(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Assign offsets with result - result := manager.AssignOffsetsWithResult(testTopic, testPartition, 3) - - if result.Error != nil { - t.Fatalf("Expected no error, got: %v", result.Error) - } - - if result.BaseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", result.BaseOffset) - } - - if result.LastOffset != 2 { - t.Errorf("Expected last offset 2, got %d", result.LastOffset) - } - - if result.Count != 3 { - t.Errorf("Expected count 3, got %d", result.Count) - } - - if result.Topic != testTopic { - t.Error("Topic mismatch in result") - } - - if result.Partition != testPartition { - t.Error("Partition mismatch in result") - } - - if result.Timestamp <= 0 { - t.Error("Timestamp should be set") - } -} - -func TestBrokerOffsetManager_Shutdown(t *testing.T) { - storage := NewInMemoryOffsetStorageForTesting() - manager := NewBrokerOffsetManagerWithStorage(storage) - testTopic := createTestTopic() - testPartition := createTestPartition() - - // Assign some offsets and create subscriptions - manager.AssignBatchOffsets(testTopic, testPartition, 5) - manager.CreateSubscription("test-sub", testTopic, testPartition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - - // Shutdown should not panic - manager.Shutdown() - - // After shutdown, operations should still work (using new managers) - offset, err := manager.AssignOffset(testTopic, testPartition) - if err != nil { - t.Fatalf("Operations should still work after shutdown: %v", err) - } - - // Should start from 0 again (new manager) - if offset != 0 { - t.Errorf("Expected offset 0 after shutdown, got %d", offset) - } -} diff --git a/weed/mq/broker/broker_offset_manager.go b/weed/mq/broker/broker_offset_manager.go deleted file mode 100644 index f12f2efc5..000000000 --- a/weed/mq/broker/broker_offset_manager.go +++ /dev/null @@ -1,202 +0,0 @@ -package broker - -import ( - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/mq/offset" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// BrokerOffsetManager manages offset assignment for all partitions in a broker -type BrokerOffsetManager struct { - mu sync.RWMutex - offsetIntegration *offset.SMQOffsetIntegration - storage offset.OffsetStorage - consumerGroupStorage offset.ConsumerGroupOffsetStorage -} - -// NewBrokerOffsetManagerWithFilerAccessor creates a new broker offset manager using existing filer client accessor -func NewBrokerOffsetManagerWithFilerAccessor(filerAccessor *filer_client.FilerClientAccessor) *BrokerOffsetManager { - // Create filer storage using the accessor directly - no duplicate connection management - filerStorage := offset.NewFilerOffsetStorageWithAccessor(filerAccessor) - - // Create consumer group storage using the accessor directly - consumerGroupStorage := offset.NewFilerConsumerGroupOffsetStorageWithAccessor(filerAccessor) - - return &BrokerOffsetManager{ - offsetIntegration: offset.NewSMQOffsetIntegration(filerStorage), - storage: filerStorage, - consumerGroupStorage: consumerGroupStorage, - } -} - -// AssignOffset assigns the next offset for a partition -func (bom *BrokerOffsetManager) AssignOffset(t topic.Topic, p topic.Partition) (int64, error) { - partition := topicPartitionToSchemaPartition(t, p) - - // Use the integration layer's offset assigner to ensure consistency with subscriptions - result := bom.offsetIntegration.AssignSingleOffset(t.Namespace, t.Name, partition) - if result.Error != nil { - return 0, result.Error - } - - return result.Assignment.Offset, nil -} - -// AssignBatchOffsets assigns a batch of offsets for a partition -func (bom *BrokerOffsetManager) AssignBatchOffsets(t topic.Topic, p topic.Partition, count int64) (baseOffset, lastOffset int64, err error) { - partition := topicPartitionToSchemaPartition(t, p) - - // Use the integration layer's offset assigner to ensure consistency with subscriptions - result := bom.offsetIntegration.AssignBatchOffsets(t.Namespace, t.Name, partition, count) - if result.Error != nil { - return 0, 0, result.Error - } - - return result.Batch.BaseOffset, result.Batch.LastOffset, nil -} - -// GetHighWaterMark returns the high water mark for a partition -func (bom *BrokerOffsetManager) GetHighWaterMark(t topic.Topic, p topic.Partition) (int64, error) { - partition := topicPartitionToSchemaPartition(t, p) - - // Use the integration layer's offset assigner to ensure consistency with subscriptions - return bom.offsetIntegration.GetHighWaterMark(t.Namespace, t.Name, partition) -} - -// CreateSubscription creates an offset-based subscription -func (bom *BrokerOffsetManager) CreateSubscription( - subscriptionID string, - t topic.Topic, - p topic.Partition, - offsetType schema_pb.OffsetType, - startOffset int64, -) (*offset.OffsetSubscription, error) { - partition := topicPartitionToSchemaPartition(t, p) - return bom.offsetIntegration.CreateSubscription(subscriptionID, t.Namespace, t.Name, partition, offsetType, startOffset) -} - -// GetSubscription retrieves an existing subscription -func (bom *BrokerOffsetManager) GetSubscription(subscriptionID string) (*offset.OffsetSubscription, error) { - return bom.offsetIntegration.GetSubscription(subscriptionID) -} - -// CloseSubscription closes a subscription -func (bom *BrokerOffsetManager) CloseSubscription(subscriptionID string) error { - return bom.offsetIntegration.CloseSubscription(subscriptionID) -} - -// ListActiveSubscriptions returns all active subscriptions -func (bom *BrokerOffsetManager) ListActiveSubscriptions() ([]*offset.OffsetSubscription, error) { - return bom.offsetIntegration.ListActiveSubscriptions() -} - -// GetPartitionOffsetInfo returns comprehensive offset information for a partition -func (bom *BrokerOffsetManager) GetPartitionOffsetInfo(t topic.Topic, p topic.Partition) (*offset.PartitionOffsetInfo, error) { - partition := topicPartitionToSchemaPartition(t, p) - - // Use the integration layer to ensure consistency with subscriptions - return bom.offsetIntegration.GetPartitionOffsetInfo(t.Namespace, t.Name, partition) -} - -// topicPartitionToSchemaPartition converts topic.Topic and topic.Partition to schema_pb.Partition -func topicPartitionToSchemaPartition(t topic.Topic, p topic.Partition) *schema_pb.Partition { - return &schema_pb.Partition{ - RingSize: int32(p.RingSize), - RangeStart: int32(p.RangeStart), - RangeStop: int32(p.RangeStop), - UnixTimeNs: p.UnixTimeNs, - } -} - -// OffsetAssignmentResult contains the result of offset assignment for logging/metrics -type OffsetAssignmentResult struct { - Topic topic.Topic - Partition topic.Partition - BaseOffset int64 - LastOffset int64 - Count int64 - Timestamp int64 - Error error -} - -// AssignOffsetsWithResult assigns offsets and returns detailed result for logging/metrics -func (bom *BrokerOffsetManager) AssignOffsetsWithResult(t topic.Topic, p topic.Partition, count int64) *OffsetAssignmentResult { - baseOffset, lastOffset, err := bom.AssignBatchOffsets(t, p, count) - - result := &OffsetAssignmentResult{ - Topic: t, - Partition: p, - Count: count, - Error: err, - } - - if err == nil { - result.BaseOffset = baseOffset - result.LastOffset = lastOffset - result.Timestamp = time.Now().UnixNano() - } - - return result -} - -// GetOffsetMetrics returns metrics about offset usage across all partitions -func (bom *BrokerOffsetManager) GetOffsetMetrics() *offset.OffsetMetrics { - // Use the integration layer to ensure consistency with subscriptions - return bom.offsetIntegration.GetOffsetMetrics() -} - -// Shutdown gracefully shuts down the offset manager -func (bom *BrokerOffsetManager) Shutdown() { - bom.mu.Lock() - defer bom.mu.Unlock() - - // Reset the underlying storage to ensure clean restart behavior - // This is important for testing where we want offsets to start from 0 after shutdown - if bom.storage != nil { - if resettable, ok := bom.storage.(interface{ Reset() error }); ok { - resettable.Reset() - } - } - - // Reset the integration layer to ensure clean restart behavior - bom.offsetIntegration.Reset() -} - -// Consumer Group Offset Management - -// SaveConsumerGroupOffset saves the committed offset for a consumer group -func (bom *BrokerOffsetManager) SaveConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string, offset int64) error { - if bom.consumerGroupStorage == nil { - return fmt.Errorf("consumer group storage not configured") - } - return bom.consumerGroupStorage.SaveConsumerGroupOffset(t, p, consumerGroup, offset) -} - -// LoadConsumerGroupOffset loads the committed offset for a consumer group -func (bom *BrokerOffsetManager) LoadConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) (int64, error) { - if bom.consumerGroupStorage == nil { - return -1, fmt.Errorf("consumer group storage not configured") - } - return bom.consumerGroupStorage.LoadConsumerGroupOffset(t, p, consumerGroup) -} - -// ListConsumerGroups returns all consumer groups for a topic partition -func (bom *BrokerOffsetManager) ListConsumerGroups(t topic.Topic, p topic.Partition) ([]string, error) { - if bom.consumerGroupStorage == nil { - return nil, fmt.Errorf("consumer group storage not configured") - } - return bom.consumerGroupStorage.ListConsumerGroups(t, p) -} - -// DeleteConsumerGroupOffset removes the offset file for a consumer group -func (bom *BrokerOffsetManager) DeleteConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) error { - if bom.consumerGroupStorage == nil { - return fmt.Errorf("consumer group storage not configured") - } - return bom.consumerGroupStorage.DeleteConsumerGroupOffset(t, p, consumerGroup) -} diff --git a/weed/mq/broker/broker_recordvalue_test.go b/weed/mq/broker/broker_recordvalue_test.go deleted file mode 100644 index e4d12f7fc..000000000 --- a/weed/mq/broker/broker_recordvalue_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package broker - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/protobuf/proto" -) - -func TestValidateRecordValue(t *testing.T) { - broker := &MessageQueueBroker{} - - // Test valid schema-based RecordValue - validRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "user_name": { - Kind: &schema_pb.Value_StringValue{StringValue: "john_doe"}, - }, - "user_age": { - Kind: &schema_pb.Value_Int32Value{Int32Value: 30}, - }, - "is_active": { - Kind: &schema_pb.Value_BoolValue{BoolValue: true}, - }, - }, - } - - kafkaTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: "test-topic", - } - - err := broker.validateRecordValue(validRecord, kafkaTopic) - if err != nil { - t.Errorf("Valid schema-based RecordValue should pass validation: %v", err) - } -} - -func TestValidateRecordValueEmptyFields(t *testing.T) { - broker := &MessageQueueBroker{} - - kafkaTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: "test-topic", - } - - // Test empty fields - recordEmptyFields := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{}, - } - - err := broker.validateRecordValue(recordEmptyFields, kafkaTopic) - if err == nil { - t.Error("RecordValue with empty fields should fail validation") - } - if err.Error() != "RecordValue has no fields" { - t.Errorf("Expected specific error message, got: %v", err) - } -} - -func TestValidateRecordValueNonKafkaTopic(t *testing.T) { - broker := &MessageQueueBroker{} - - // For non-Kafka topics, validation should be more lenient - nonKafkaTopic := &schema_pb.Topic{ - Namespace: "custom", - Name: "test-topic", - } - - recordWithoutKafkaFields := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "custom_field": { - Kind: &schema_pb.Value_StringValue{StringValue: "custom-value"}, - }, - }, - } - - err := broker.validateRecordValue(recordWithoutKafkaFields, nonKafkaTopic) - if err != nil { - t.Errorf("Non-Kafka topic should allow flexible RecordValue structure: %v", err) - } -} - -func TestValidateRecordValueNilInputs(t *testing.T) { - broker := &MessageQueueBroker{} - - kafkaTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: "test-topic", - } - - // Test nil RecordValue - err := broker.validateRecordValue(nil, kafkaTopic) - if err == nil { - t.Error("Nil RecordValue should fail validation") - } - if err.Error() != "RecordValue is nil" { - t.Errorf("Expected specific error message, got: %v", err) - } - - // Test RecordValue with nil Fields - recordWithNilFields := &schema_pb.RecordValue{ - Fields: nil, - } - - err = broker.validateRecordValue(recordWithNilFields, kafkaTopic) - if err == nil { - t.Error("RecordValue with nil Fields should fail validation") - } - if err.Error() != "RecordValue.Fields is nil" { - t.Errorf("Expected specific error message, got: %v", err) - } -} - -func TestRecordValueMarshalUnmarshalIntegration(t *testing.T) { - broker := &MessageQueueBroker{} - - // Create a valid RecordValue - originalRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "key": { - Kind: &schema_pb.Value_BytesValue{BytesValue: []byte("integration-key")}, - }, - "value": { - Kind: &schema_pb.Value_StringValue{StringValue: "integration-value"}, - }, - "timestamp": { - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: 1234567890, - IsUtc: true, - }, - }, - }, - }, - } - - // Marshal to bytes - recordBytes, err := proto.Marshal(originalRecord) - if err != nil { - t.Fatalf("Failed to marshal RecordValue: %v", err) - } - - // Unmarshal back - unmarshaledRecord := &schema_pb.RecordValue{} - err = proto.Unmarshal(recordBytes, unmarshaledRecord) - if err != nil { - t.Fatalf("Failed to unmarshal RecordValue: %v", err) - } - - // Validate the unmarshaled record - kafkaTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: "integration-topic", - } - - err = broker.validateRecordValue(unmarshaledRecord, kafkaTopic) - if err != nil { - t.Errorf("Unmarshaled RecordValue should pass validation: %v", err) - } - - // Verify field values - keyField := unmarshaledRecord.Fields["key"] - if keyValue, ok := keyField.Kind.(*schema_pb.Value_BytesValue); ok { - if string(keyValue.BytesValue) != "integration-key" { - t.Errorf("Key field mismatch: expected 'integration-key', got '%s'", string(keyValue.BytesValue)) - } - } else { - t.Errorf("Key field is not BytesValue: %T", keyField.Kind) - } - - valueField := unmarshaledRecord.Fields["value"] - if valueValue, ok := valueField.Kind.(*schema_pb.Value_StringValue); ok { - if valueValue.StringValue != "integration-value" { - t.Errorf("Value field mismatch: expected 'integration-value', got '%s'", valueValue.StringValue) - } - } else { - t.Errorf("Value field is not StringValue: %T", valueField.Kind) - } -} diff --git a/weed/mq/broker/broker_server.go b/weed/mq/broker/broker_server.go deleted file mode 100644 index 38e022a7c..000000000 --- a/weed/mq/broker/broker_server.go +++ /dev/null @@ -1,219 +0,0 @@ -package broker - -import ( - "context" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" -) - -type MessageQueueBrokerOption struct { - Masters map[string]pb.ServerAddress - FilerGroup string - DataCenter string - Rack string - DefaultReplication string - MaxMB int - Ip string - Port int - Cipher bool - VolumeServerAccess string // how to access volume servers - LogFlushInterval int // log buffer flush interval in seconds -} - -func (option *MessageQueueBrokerOption) BrokerAddress() pb.ServerAddress { - return pb.NewServerAddress(option.Ip, option.Port, 0) -} - -// topicCacheEntry caches both topic existence and configuration -// If conf is nil, topic doesn't exist (negative cache) -// If conf is non-nil, topic exists with this configuration (positive cache) -type topicCacheEntry struct { - conf *mq_pb.ConfigureTopicResponse // nil = topic doesn't exist - expiresAt time.Time -} - -type MessageQueueBroker struct { - mq_pb.UnimplementedSeaweedMessagingServer - option *MessageQueueBrokerOption - grpcDialOption grpc.DialOption - MasterClient *wdclient.MasterClient - filers map[pb.ServerAddress]struct{} - currentFiler pb.ServerAddress - localTopicManager *topic.LocalTopicManager - PubBalancer *pub_balancer.PubBalancer - lockAsBalancer *cluster.LiveLock - // TODO: Add native offset management to broker - // ASSUMPTION: BrokerOffsetManager handles all partition offset assignment - offsetManager *BrokerOffsetManager - SubCoordinator *sub_coordinator.SubCoordinator - // Removed gatewayRegistry - no longer needed - accessLock sync.Mutex - fca *filer_client.FilerClientAccessor - // Unified topic cache for both existence and configuration - // Caches topic config (positive: conf != nil) and non-existence (negative: conf == nil) - // Eliminates 60% CPU overhead from repeated filer reads and JSON unmarshaling - topicCache map[string]*topicCacheEntry - topicCacheMu sync.RWMutex - topicCacheTTL time.Duration -} - -func NewMessageBroker(option *MessageQueueBrokerOption, grpcDialOption grpc.DialOption) (mqBroker *MessageQueueBroker, err error) { - - pubBalancer := pub_balancer.NewPubBalancer() - subCoordinator := sub_coordinator.NewSubCoordinator() - - mqBroker = &MessageQueueBroker{ - option: option, - grpcDialOption: grpcDialOption, - MasterClient: wdclient.NewMasterClient(grpcDialOption, option.FilerGroup, cluster.BrokerType, option.BrokerAddress(), option.DataCenter, option.Rack, *pb.NewServiceDiscoveryFromMap(option.Masters)), - filers: make(map[pb.ServerAddress]struct{}), - localTopicManager: topic.NewLocalTopicManager(), - PubBalancer: pubBalancer, - SubCoordinator: subCoordinator, - offsetManager: nil, // Will be initialized below - topicCache: make(map[string]*topicCacheEntry), - topicCacheTTL: 30 * time.Second, // Unified cache for existence + config (eliminates 60% CPU overhead) - } - // Create FilerClientAccessor that adapts broker's single filer to the new multi-filer interface - fca := &filer_client.FilerClientAccessor{ - GetGrpcDialOption: mqBroker.GetGrpcDialOption, - GetFilers: func() []pb.ServerAddress { - filer := mqBroker.GetFiler() - if filer != "" { - return []pb.ServerAddress{filer} - } - return []pb.ServerAddress{} - }, - } - mqBroker.fca = fca - subCoordinator.FilerClientAccessor = fca - - mqBroker.MasterClient.SetOnPeerUpdateFn(mqBroker.OnBrokerUpdate) - pubBalancer.OnPartitionChange = mqBroker.SubCoordinator.OnPartitionChange - - go mqBroker.MasterClient.KeepConnectedToMaster(context.Background()) - - // Initialize offset manager using the filer accessor - // The filer accessor will automatically use the current filer address as it gets discovered - // No hardcoded namespace/topic - offset storage now derives paths from actual topic information - mqBroker.offsetManager = NewBrokerOffsetManagerWithFilerAccessor(fca) - glog.V(0).Infof("broker initialized offset manager with filer accessor (current filer: %s)", mqBroker.GetFiler()) - - // Start idle partition cleanup task - // Cleans up partitions with no publishers/subscribers after 5 minutes of idle time - // Checks every 1 minute to avoid memory bloat from short-lived topics - mqBroker.localTopicManager.StartIdlePartitionCleanup( - context.Background(), - 1*time.Minute, // Check interval - 5*time.Minute, // Idle timeout - clean up after 5 minutes of no activity - ) - glog.V(0).Info("Started idle partition cleanup task (check: 1m, timeout: 5m)") - - existingNodes := cluster.ListExistingPeerUpdates(mqBroker.MasterClient.GetMaster(context.Background()), grpcDialOption, option.FilerGroup, cluster.FilerType) - for _, newNode := range existingNodes { - mqBroker.OnBrokerUpdate(newNode, time.Now()) - } - - // keep connecting to balancer - go func() { - for mqBroker.currentFiler == "" { - time.Sleep(time.Millisecond * 237) - } - self := option.BrokerAddress() - glog.V(0).Infof("broker %s found filer %s", self, mqBroker.currentFiler) - - newBrokerBalancerCh := make(chan string, 1) - lockClient := cluster.NewLockClient(grpcDialOption, mqBroker.currentFiler) - mqBroker.lockAsBalancer = lockClient.StartLongLivedLock(pub_balancer.LockBrokerBalancer, string(self), func(newLockOwner string) { - glog.V(0).Infof("broker %s found balanacer %s", self, newLockOwner) - newBrokerBalancerCh <- newLockOwner - }) - mqBroker.KeepConnectedToBrokerBalancer(newBrokerBalancerCh) - }() - - return mqBroker, nil -} - -func (b *MessageQueueBroker) OnBrokerUpdate(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { - if update.NodeType != cluster.FilerType { - return - } - - address := pb.ServerAddress(update.Address) - if update.IsAdd { - b.filers[address] = struct{}{} - if b.currentFiler == "" { - b.currentFiler = address - // The offset manager will automatically use the updated filer through the filer accessor - glog.V(0).Infof("broker discovered filer %s (offset manager will automatically use it via filer accessor)", address) - } - } else { - delete(b.filers, address) - if b.currentFiler == address { - for filer := range b.filers { - b.currentFiler = filer - // The offset manager will automatically use the new filer through the filer accessor - glog.V(0).Infof("broker switched to filer %s (offset manager will automatically use it)", filer) - break - } - } - } - -} - -func (b *MessageQueueBroker) GetGrpcDialOption() grpc.DialOption { - return b.grpcDialOption -} - -func (b *MessageQueueBroker) GetFiler() pb.ServerAddress { - return b.currentFiler -} - -func (b *MessageQueueBroker) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - - return pb.WithFilerClient(streamingMode, 0, b.GetFiler(), b.grpcDialOption, fn) - -} - -func (b *MessageQueueBroker) AdjustedUrl(location *filer_pb.Location) string { - - return location.Url - -} - -func (b *MessageQueueBroker) GetDataCenter() string { - - return "" - -} - -func (b *MessageQueueBroker) withMasterClient(streamingMode bool, master pb.ServerAddress, fn func(client master_pb.SeaweedClient) error) error { - - return pb.WithMasterClient(streamingMode, master, b.grpcDialOption, false, func(client master_pb.SeaweedClient) error { - return fn(client) - }) - -} - -func (b *MessageQueueBroker) withBrokerClient(streamingMode bool, server pb.ServerAddress, fn func(client mq_pb.SeaweedMessagingClient) error) error { - - return pb.WithBrokerGrpcClient(streamingMode, server.String(), b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - return fn(client) - }) - -} diff --git a/weed/mq/broker/broker_topic_conf_read_write.go b/weed/mq/broker/broker_topic_conf_read_write.go deleted file mode 100644 index 138d1023e..000000000 --- a/weed/mq/broker/broker_topic_conf_read_write.go +++ /dev/null @@ -1,376 +0,0 @@ -package broker - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/mq/logstore" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func (b *MessageQueueBroker) GetOrGenerateLocalPartition(t topic.Topic, partition topic.Partition) (localTopicPartition *topic.LocalPartition, getOrGenError error) { - // get or generate a local partition using cached topic config - conf, err := b.getTopicConfFromCache(t) - if err != nil { - glog.Errorf("topic %v not found: %v", t, err) - return nil, fmt.Errorf("topic %v not found: %w", t, err) - } - - localTopicPartition, _, getOrGenError = b.doGetOrGenLocalPartition(t, partition, conf) - if getOrGenError != nil { - glog.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError) - return nil, fmt.Errorf("topic %v partition %v not setup: %w", t, partition, getOrGenError) - } - return localTopicPartition, nil -} - -// invalidateTopicCache removes a topic from the unified cache -// Should be called when a topic is created, deleted, or config is updated -func (b *MessageQueueBroker) invalidateTopicCache(t topic.Topic) { - topicKey := t.String() - b.topicCacheMu.Lock() - delete(b.topicCache, topicKey) - b.topicCacheMu.Unlock() - glog.V(4).Infof("Invalidated topic cache for %s", topicKey) -} - -// getTopicConfFromCache reads topic configuration with caching -// Returns the config or error if not found. Uses unified cache to avoid expensive filer reads. -// On cache miss, validates broker assignments to ensure they're still active (14% CPU overhead). -// This is the public API for reading topic config - always use this instead of direct filer reads. -func (b *MessageQueueBroker) getTopicConfFromCache(t topic.Topic) (*mq_pb.ConfigureTopicResponse, error) { - topicKey := t.String() - - // Check unified cache first - b.topicCacheMu.RLock() - if entry, found := b.topicCache[topicKey]; found { - if time.Now().Before(entry.expiresAt) { - conf := entry.conf - b.topicCacheMu.RUnlock() - - // If conf is nil, topic was cached as non-existent - if conf == nil { - glog.V(4).Infof("Topic cache HIT for %s: topic doesn't exist", topicKey) - return nil, fmt.Errorf("topic %v not found (cached)", t) - } - - glog.V(4).Infof("Topic cache HIT for %s (skipping assignment validation)", topicKey) - // Cache hit - return immediately without validating assignments - // Assignments were validated when we first cached this config - return conf, nil - } - } - b.topicCacheMu.RUnlock() - - // Cache miss or expired - read from filer - glog.V(4).Infof("Topic cache MISS for %s, reading from filer", topicKey) - conf, readConfErr := b.fca.ReadTopicConfFromFiler(t) - - if readConfErr != nil { - // Negative cache: topic doesn't exist - b.topicCacheMu.Lock() - b.topicCache[topicKey] = &topicCacheEntry{ - conf: nil, - expiresAt: time.Now().Add(b.topicCacheTTL), - } - b.topicCacheMu.Unlock() - glog.V(4).Infof("Topic cached as non-existent: %s", topicKey) - return nil, fmt.Errorf("topic %v not found: %w", t, readConfErr) - } - - // Validate broker assignments before caching (NOT holding cache lock) - // This ensures cached configs always have valid broker assignments - // Only done on cache miss (not on every lookup), saving 14% CPU - glog.V(4).Infof("Validating broker assignments for %s", topicKey) - hasChanges := b.ensureTopicActiveAssignmentsUnsafe(t, conf) - if hasChanges { - glog.V(0).Infof("topic %v partition assignments updated due to broker changes", t) - // Save updated assignments to filer immediately to ensure persistence - if err := b.fca.SaveTopicConfToFiler(t, conf); err != nil { - glog.Errorf("failed to save updated topic config for %s: %v", topicKey, err) - // Don't cache on error - let next request retry - return conf, err - } - // CRITICAL FIX: Invalidate cache while holding lock to prevent race condition - // Before the fix, between checking the cache and invalidating it, another goroutine - // could read stale data. Now we hold the lock throughout. - b.topicCacheMu.Lock() - delete(b.topicCache, topicKey) - // Cache the updated config with validated assignments - b.topicCache[topicKey] = &topicCacheEntry{ - conf: conf, - expiresAt: time.Now().Add(b.topicCacheTTL), - } - b.topicCacheMu.Unlock() - glog.V(4).Infof("Updated cache for %s after assignment update", topicKey) - return conf, nil - } - - // Positive cache: topic exists with validated assignments - b.topicCacheMu.Lock() - b.topicCache[topicKey] = &topicCacheEntry{ - conf: conf, - expiresAt: time.Now().Add(b.topicCacheTTL), - } - b.topicCacheMu.Unlock() - glog.V(4).Infof("Topic config cached for %s", topicKey) - - return conf, nil -} - -func (b *MessageQueueBroker) doGetOrGenLocalPartition(t topic.Topic, partition topic.Partition, conf *mq_pb.ConfigureTopicResponse) (localPartition *topic.LocalPartition, isGenerated bool, err error) { - b.accessLock.Lock() - defer b.accessLock.Unlock() - - if localPartition = b.localTopicManager.GetLocalPartition(t, partition); localPartition == nil { - localPartition, isGenerated, err = b.genLocalPartitionFromFiler(t, partition, conf) - if err != nil { - return nil, false, err - } - } - return localPartition, isGenerated, nil -} - -func (b *MessageQueueBroker) genLocalPartitionFromFiler(t topic.Topic, partition topic.Partition, conf *mq_pb.ConfigureTopicResponse) (localPartition *topic.LocalPartition, isGenerated bool, err error) { - self := b.option.BrokerAddress() - glog.V(4).Infof("genLocalPartitionFromFiler for %s %s, self=%s", t, partition, self) - glog.V(4).Infof("conf.BrokerPartitionAssignments: %v", conf.BrokerPartitionAssignments) - for _, assignment := range conf.BrokerPartitionAssignments { - assignmentPartition := topic.FromPbPartition(assignment.Partition) - glog.V(4).Infof("checking assignment: LeaderBroker=%s, Partition=%s", assignment.LeaderBroker, assignmentPartition) - glog.V(4).Infof("comparing self=%s with LeaderBroker=%s: %v", self, assignment.LeaderBroker, assignment.LeaderBroker == string(self)) - glog.V(4).Infof("comparing partition=%s with assignmentPartition=%s: %v", partition.String(), assignmentPartition.String(), partition.Equals(assignmentPartition)) - glog.V(4).Infof("logical comparison (RangeStart, RangeStop only): %v", partition.LogicalEquals(assignmentPartition)) - glog.V(4).Infof("partition details: RangeStart=%d, RangeStop=%d, RingSize=%d, UnixTimeNs=%d", partition.RangeStart, partition.RangeStop, partition.RingSize, partition.UnixTimeNs) - glog.V(4).Infof("assignmentPartition details: RangeStart=%d, RangeStop=%d, RingSize=%d, UnixTimeNs=%d", assignmentPartition.RangeStart, assignmentPartition.RangeStop, assignmentPartition.RingSize, assignmentPartition.UnixTimeNs) - if assignment.LeaderBroker == string(self) && partition.LogicalEquals(assignmentPartition) { - glog.V(4).Infof("Creating local partition for %s %s", t, partition) - localPartition = topic.NewLocalPartition(partition, b.option.LogFlushInterval, b.genLogFlushFunc(t, partition), logstore.GenMergedReadFunc(b, t, partition)) - - // Initialize offset from existing data to ensure continuity on restart - b.initializePartitionOffsetFromExistingData(localPartition, t, partition) - - b.localTopicManager.AddLocalPartition(t, localPartition) - isGenerated = true - glog.V(4).Infof("Successfully added local partition %s %s to localTopicManager", t, partition) - break - } - } - - if !isGenerated { - glog.V(4).Infof("No matching assignment found for %s %s", t, partition) - } - - return localPartition, isGenerated, nil -} - -// ensureTopicActiveAssignmentsUnsafe validates that partition assignments reference active brokers -// Returns true if assignments were changed. Caller must save config to filer if hasChanges=true. -// Note: Assumes caller holds topicCacheMu lock or is OK with concurrent access to conf -func (b *MessageQueueBroker) ensureTopicActiveAssignmentsUnsafe(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) (hasChanges bool) { - // also fix assignee broker if invalid - hasChanges = pub_balancer.EnsureAssignmentsToActiveBrokers(b.PubBalancer.Brokers, 1, conf.BrokerPartitionAssignments) - return hasChanges -} - -func (b *MessageQueueBroker) ensureTopicActiveAssignments(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) (err error) { - // Validate and save if needed - hasChanges := b.ensureTopicActiveAssignmentsUnsafe(t, conf) - if hasChanges { - glog.V(0).Infof("topic %v partition updated assignments: %v", t, conf.BrokerPartitionAssignments) - if err = b.fca.SaveTopicConfToFiler(t, conf); err != nil { - return err - } - } - - return err -} - -// initializePartitionOffsetFromExistingData initializes the LogBuffer offset from existing data on filer -// This ensures offset continuity when SMQ restarts -func (b *MessageQueueBroker) initializePartitionOffsetFromExistingData(localPartition *topic.LocalPartition, t topic.Topic, partition topic.Partition) { - // Create a function to get the highest existing offset from chunk metadata - getHighestOffsetFn := func() (int64, error) { - // Use the existing chunk metadata approach to find the highest offset - if b.fca == nil { - return -1, fmt.Errorf("no filer client accessor available") - } - - // Use the same logic as getOffsetRangeFromChunkMetadata but only get the highest offset - _, highWaterMark, err := b.getOffsetRangeFromChunkMetadata(t, partition) - if err != nil { - return -1, err - } - - // The high water mark is the next offset to be assigned, so the highest existing offset is hwm - 1 - if highWaterMark > 0 { - return highWaterMark - 1, nil - } - - return -1, nil // No existing data - } - - // Initialize the LogBuffer offset from existing data - if err := localPartition.LogBuffer.InitializeOffsetFromExistingData(getHighestOffsetFn); err != nil { - glog.V(0).Infof("Failed to initialize offset for partition %s %s: %v", t, partition, err) - } -} - -// getOffsetRangeFromChunkMetadata reads chunk metadata to find both earliest and latest offsets -func (b *MessageQueueBroker) getOffsetRangeFromChunkMetadata(t topic.Topic, partition topic.Partition) (earliestOffset int64, highWaterMark int64, err error) { - if b.fca == nil { - return 0, 0, fmt.Errorf("filer client accessor not available") - } - - // Get the topic path and find the latest version - topicPath := fmt.Sprintf("/topics/%s/%s", t.Namespace, t.Name) - - // First, list the topic versions to find the latest - var latestVersion string - err = b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: topicPath, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if resp.Entry.IsDirectory && strings.HasPrefix(resp.Entry.Name, "v") { - if latestVersion == "" || resp.Entry.Name > latestVersion { - latestVersion = resp.Entry.Name - } - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to list topic versions: %v", err) - } - - if latestVersion == "" { - glog.V(0).Infof("No version directory found for topic %s", t) - return 0, 0, nil - } - - // Find the partition directory - versionPath := fmt.Sprintf("%s/%s", topicPath, latestVersion) - var partitionDir string - err = b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: versionPath, - }) - if err != nil { - return err - } - - // Look for the partition directory that matches our partition range - targetPartitionName := fmt.Sprintf("%04d-%04d", partition.RangeStart, partition.RangeStop) - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if resp.Entry.IsDirectory && resp.Entry.Name == targetPartitionName { - partitionDir = resp.Entry.Name - break - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to list partition directories: %v", err) - } - - if partitionDir == "" { - glog.V(0).Infof("No partition directory found for topic %s partition %s", t, partition) - return 0, 0, nil - } - - // Scan all message files to find the highest offset_max and lowest offset_min - partitionPath := fmt.Sprintf("%s/%s", versionPath, partitionDir) - highWaterMark = 0 - earliestOffset = -1 // -1 indicates no data found yet - - err = b.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: partitionPath, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if !resp.Entry.IsDirectory && resp.Entry.Name != "checkpoint.offset" { - // Check for offset ranges in Extended attributes (both log files and parquet files) - if resp.Entry.Extended != nil { - fileType := "log" - if strings.HasSuffix(resp.Entry.Name, ".parquet") { - fileType = "parquet" - } - - // Track maximum offset for high water mark - if maxOffsetBytes, exists := resp.Entry.Extended[mq.ExtendedAttrOffsetMax]; exists && len(maxOffsetBytes) == 8 { - maxOffset := int64(binary.BigEndian.Uint64(maxOffsetBytes)) - if maxOffset > highWaterMark { - highWaterMark = maxOffset - } - glog.V(2).Infof("%s file %s has offset_max=%d", fileType, resp.Entry.Name, maxOffset) - } - - // Track minimum offset for earliest offset - if minOffsetBytes, exists := resp.Entry.Extended[mq.ExtendedAttrOffsetMin]; exists && len(minOffsetBytes) == 8 { - minOffset := int64(binary.BigEndian.Uint64(minOffsetBytes)) - if earliestOffset == -1 || minOffset < earliestOffset { - earliestOffset = minOffset - } - glog.V(2).Infof("%s file %s has offset_min=%d", fileType, resp.Entry.Name, minOffset) - } - } - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to scan message files: %v", err) - } - - // High water mark is the next offset after the highest written offset - if highWaterMark > 0 { - highWaterMark++ - } - - // If no data found, set earliest offset to 0 - if earliestOffset == -1 { - earliestOffset = 0 - } - - glog.V(0).Infof("Offset range for topic %s partition %s: earliest=%d, highWaterMark=%d", t, partition, earliestOffset, highWaterMark) - return earliestOffset, highWaterMark, nil -} diff --git a/weed/mq/broker/broker_topic_partition_read_write.go b/weed/mq/broker/broker_topic_partition_read_write.go deleted file mode 100644 index 18f9c98b0..000000000 --- a/weed/mq/broker/broker_topic_partition_read_write.go +++ /dev/null @@ -1,54 +0,0 @@ -package broker - -import ( - "fmt" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -// LogBufferStart tracks the starting buffer offset for a live log file -// Buffer offsets are monotonically increasing, count = number of chunks -// Now stored in binary format for efficiency -type LogBufferStart struct { - StartIndex int64 // Starting buffer offset (count = len(chunks)) -} - -func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, p topic.Partition) log_buffer.LogFlushFuncType { - partitionDir := topic.PartitionDir(t, p) - - return func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte, minOffset, maxOffset int64) { - if len(buf) == 0 { - return - } - - startTime, stopTime = startTime.UTC(), stopTime.UTC() - - targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT)) - - // Get buffer offset (sequential: 0, 1, 2, 3...) - bufferOffset := logBuffer.GetOffset() - - for { - if err := b.appendToFileWithBufferIndex(targetFile, buf, bufferOffset, minOffset, maxOffset); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) - time.Sleep(737 * time.Millisecond) - } else { - break - } - } - - atomic.StoreInt64(&logBuffer.LastFlushTsNs, stopTime.UnixNano()) - - b.accessLock.Lock() - defer b.accessLock.Unlock() - if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil { - localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs) - } - - glog.V(0).Infof("flushing at %d to %s size %d from buffer %s (offset %d)", logBuffer.LastFlushTsNs, targetFile, len(buf), logBuffer.GetName(), bufferOffset) - } -} diff --git a/weed/mq/broker/broker_write.go b/weed/mq/broker/broker_write.go deleted file mode 100644 index bdb72a770..000000000 --- a/weed/mq/broker/broker_write.go +++ /dev/null @@ -1,188 +0,0 @@ -package broker - -import ( - "context" - "encoding/binary" - "fmt" - "os" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func (b *MessageQueueBroker) appendToFile(targetFile string, data []byte) error { - return b.appendToFileWithBufferIndex(targetFile, data, 0) -} - -func (b *MessageQueueBroker) appendToFileWithBufferIndex(targetFile string, data []byte, bufferOffset int64, offsetArgs ...int64) error { - // Extract optional offset parameters (minOffset, maxOffset) - var minOffset, maxOffset int64 - if len(offsetArgs) >= 2 { - minOffset = offsetArgs[0] - maxOffset = offsetArgs[1] - } - - fileId, uploadResult, err2 := b.assignAndUpload(targetFile, data) - if err2 != nil { - return err2 - } - - // find out existing entry - fullpath := util.FullPath(targetFile) - dir, name := fullpath.DirAndName() - entry, err := filer_pb.GetEntry(context.Background(), b, fullpath) - var offset int64 = 0 - if err == filer_pb.ErrNotFound { - entry = &filer_pb.Entry{ - Name: name, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - FileMode: uint32(os.FileMode(0644)), - Uid: uint32(os.Getuid()), - Gid: uint32(os.Getgid()), - }, - } - - // Add buffer start offset for deduplication tracking (binary format) - if bufferOffset != 0 { - entry.Extended = make(map[string][]byte) - bufferStartBytes := make([]byte, 8) - binary.BigEndian.PutUint64(bufferStartBytes, uint64(bufferOffset)) - entry.Extended[mq.ExtendedAttrBufferStart] = bufferStartBytes - } - - // Add offset range metadata for Kafka integration - if minOffset > 0 && maxOffset >= minOffset { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - minOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(minOffsetBytes, uint64(minOffset)) - entry.Extended[mq.ExtendedAttrOffsetMin] = minOffsetBytes - - maxOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxOffsetBytes, uint64(maxOffset)) - entry.Extended[mq.ExtendedAttrOffsetMax] = maxOffsetBytes - } - } else if err != nil { - return fmt.Errorf("find %s: %v", fullpath, err) - } else { - offset = int64(filer.TotalSize(entry.GetChunks())) - - // Verify buffer offset continuity for existing files (append operations) - if bufferOffset != 0 { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - // Check for existing buffer start (binary format) - if existingData, exists := entry.Extended[mq.ExtendedAttrBufferStart]; exists { - if len(existingData) == 8 { - existingStartIndex := int64(binary.BigEndian.Uint64(existingData)) - - // Verify that the new buffer offset is consecutive - // Expected offset = start + number of existing chunks - expectedOffset := existingStartIndex + int64(len(entry.GetChunks())) - if bufferOffset != expectedOffset { - // This shouldn't happen in normal operation - // Log warning but continue (don't crash the system) - glog.Warningf("non-consecutive buffer offset for %s. Expected %d, got %d", - fullpath, expectedOffset, bufferOffset) - } - // Note: We don't update the start offset - it stays the same - } - } else { - // No existing buffer start, create new one (shouldn't happen for existing files) - bufferStartBytes := make([]byte, 8) - binary.BigEndian.PutUint64(bufferStartBytes, uint64(bufferOffset)) - entry.Extended[mq.ExtendedAttrBufferStart] = bufferStartBytes - } - } - - // Update offset range metadata for existing files - if minOffset > 0 && maxOffset >= minOffset { - // Update minimum offset if this chunk has a lower minimum - if existingMinData, exists := entry.Extended[mq.ExtendedAttrOffsetMin]; exists && len(existingMinData) == 8 { - existingMin := int64(binary.BigEndian.Uint64(existingMinData)) - if minOffset < existingMin { - minOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(minOffsetBytes, uint64(minOffset)) - entry.Extended[mq.ExtendedAttrOffsetMin] = minOffsetBytes - } - } else { - // No existing minimum, set it - minOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(minOffsetBytes, uint64(minOffset)) - entry.Extended[mq.ExtendedAttrOffsetMin] = minOffsetBytes - } - - // Update maximum offset if this chunk has a higher maximum - if existingMaxData, exists := entry.Extended[mq.ExtendedAttrOffsetMax]; exists && len(existingMaxData) == 8 { - existingMax := int64(binary.BigEndian.Uint64(existingMaxData)) - if maxOffset > existingMax { - maxOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxOffsetBytes, uint64(maxOffset)) - entry.Extended[mq.ExtendedAttrOffsetMax] = maxOffsetBytes - } - } else { - // No existing maximum, set it - maxOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxOffsetBytes, uint64(maxOffset)) - entry.Extended[mq.ExtendedAttrOffsetMax] = maxOffsetBytes - } - } - } - - // append to existing chunks - entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(fileId, offset, time.Now().UnixNano())) - - // update the entry - return b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer_pb.CreateEntry(context.Background(), client, &filer_pb.CreateEntryRequest{ - Directory: dir, - Entry: entry, - }) - }) -} - -func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) { - - reader := util.NewBytesReader(data) - - uploader, err := operation.NewUploader() - if err != nil { - return - } - - fileId, uploadResult, err, _ = uploader.UploadWithRetry( - b, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: b.option.DefaultReplication, - Collection: "topics", - // TtlSec: wfs.option.TtlSec, - // DiskType: string(wfs.option.DiskType), - DataCenter: b.option.DataCenter, - Path: targetFile, - }, - &operation.UploadOption{ - Cipher: b.option.Cipher, - }, - func(host, fileId string) string { - fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - if b.option.VolumeServerAccess == "filerProxy" { - fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", b.currentFiler, fileId) - } - return fileUrl - }, - reader, - ) - return -} diff --git a/weed/mq/broker/memory_storage_test.go b/weed/mq/broker/memory_storage_test.go deleted file mode 100644 index 83fb24f84..000000000 --- a/weed/mq/broker/memory_storage_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package broker - -import ( - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/offset" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// recordEntry holds a record with timestamp for TTL cleanup -type recordEntry struct { - exists bool - timestamp time.Time -} - -// InMemoryOffsetStorage provides an in-memory implementation of OffsetStorage for testing ONLY -// This is a copy of the implementation in weed/mq/offset/memory_storage_test.go -type InMemoryOffsetStorage struct { - mu sync.RWMutex - checkpoints map[string]int64 // partition key -> offset - records map[string]map[int64]*recordEntry // partition key -> offset -> entry with timestamp - - // Memory leak protection - maxRecordsPerPartition int // Maximum records to keep per partition - recordTTL time.Duration // TTL for record entries - lastCleanup time.Time // Last cleanup time - cleanupInterval time.Duration // How often to run cleanup -} - -// NewInMemoryOffsetStorage creates a new in-memory storage with memory leak protection -// FOR TESTING ONLY - do not use in production -func NewInMemoryOffsetStorage() *InMemoryOffsetStorage { - return &InMemoryOffsetStorage{ - checkpoints: make(map[string]int64), - records: make(map[string]map[int64]*recordEntry), - maxRecordsPerPartition: 10000, // Limit to 10K records per partition - recordTTL: 1 * time.Hour, // Records expire after 1 hour - cleanupInterval: 5 * time.Minute, // Cleanup every 5 minutes - lastCleanup: time.Now(), - } -} - -// SaveCheckpoint saves the checkpoint for a partition -func (s *InMemoryOffsetStorage) SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, off int64) error { - s.mu.Lock() - defer s.mu.Unlock() - - key := offset.PartitionKey(partition) - s.checkpoints[key] = off - return nil -} - -// LoadCheckpoint loads the checkpoint for a partition -func (s *InMemoryOffsetStorage) LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - key := offset.PartitionKey(partition) - off, exists := s.checkpoints[key] - if !exists { - return -1, fmt.Errorf("no checkpoint found") - } - - return off, nil -} - -// GetHighestOffset finds the highest offset in storage for a partition -func (s *InMemoryOffsetStorage) GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - key := offset.PartitionKey(partition) - offsets, exists := s.records[key] - if !exists || len(offsets) == 0 { - return -1, fmt.Errorf("no records found") - } - - var highest int64 = -1 - for off, entry := range offsets { - if entry.exists && off > highest { - highest = off - } - } - - return highest, nil -} - -// AddRecord simulates storing a record with an offset (for testing) -func (s *InMemoryOffsetStorage) AddRecord(partition *schema_pb.Partition, off int64) { - s.mu.Lock() - defer s.mu.Unlock() - - key := offset.PartitionKey(partition) - if s.records[key] == nil { - s.records[key] = make(map[int64]*recordEntry) - } - - // Add record with current timestamp - s.records[key][off] = &recordEntry{ - exists: true, - timestamp: time.Now(), - } - - // Trigger cleanup if needed (memory leak protection) - s.cleanupIfNeeded() -} - -// Reset removes all data (implements resettable interface for shutdown) -func (s *InMemoryOffsetStorage) Reset() error { - s.mu.Lock() - defer s.mu.Unlock() - - s.checkpoints = make(map[string]int64) - s.records = make(map[string]map[int64]*recordEntry) - s.lastCleanup = time.Now() - return nil -} - -// cleanupIfNeeded performs memory leak protection cleanup -// This method assumes the caller already holds the write lock -func (s *InMemoryOffsetStorage) cleanupIfNeeded() { - now := time.Now() - - // Only cleanup if enough time has passed - if now.Sub(s.lastCleanup) < s.cleanupInterval { - return - } - - s.lastCleanup = now - cutoff := now.Add(-s.recordTTL) - - // Clean up expired records and enforce size limits - for partitionKey, offsets := range s.records { - // Remove expired records - for offset, entry := range offsets { - if entry.timestamp.Before(cutoff) { - delete(offsets, offset) - } - } - - // Enforce size limit per partition - if len(offsets) > s.maxRecordsPerPartition { - // Keep only the most recent records - type offsetTime struct { - offset int64 - time time.Time - } - - var entries []offsetTime - for offset, entry := range offsets { - entries = append(entries, offsetTime{offset: offset, time: entry.timestamp}) - } - - // Sort by timestamp (newest first) - for i := 0; i < len(entries)-1; i++ { - for j := i + 1; j < len(entries); j++ { - if entries[i].time.Before(entries[j].time) { - entries[i], entries[j] = entries[j], entries[i] - } - } - } - - // Keep only the newest maxRecordsPerPartition entries - newOffsets := make(map[int64]*recordEntry) - for i := 0; i < s.maxRecordsPerPartition && i < len(entries); i++ { - offset := entries[i].offset - newOffsets[offset] = offsets[offset] - } - - s.records[partitionKey] = newOffsets - } - - // Remove empty partition maps - if len(offsets) == 0 { - delete(s.records, partitionKey) - } - } -} - -// NewInMemoryOffsetStorageForTesting creates an InMemoryOffsetStorage for testing purposes -func NewInMemoryOffsetStorageForTesting() offset.OffsetStorage { - return NewInMemoryOffsetStorage() -} - -// NewBrokerOffsetManagerWithStorage creates a new broker offset manager with custom storage -// FOR TESTING ONLY - moved from production code since it's only used in tests -func NewBrokerOffsetManagerWithStorage(storage offset.OffsetStorage) *BrokerOffsetManager { - if storage == nil { - panic("BrokerOffsetManager requires a storage implementation. Use NewBrokerOffsetManagerWithFiler() or provide FilerOffsetStorage/SQLOffsetStorage. InMemoryOffsetStorage is only for testing.") - } - - return &BrokerOffsetManager{ - offsetIntegration: offset.NewSMQOffsetIntegration(storage), - storage: storage, - consumerGroupStorage: nil, // Will be set separately if needed - } -} diff --git a/weed/mq/client/agent_client/publish_session.go b/weed/mq/client/agent_client/publish_session.go deleted file mode 100644 index 384f12f48..000000000 --- a/weed/mq/client/agent_client/publish_session.go +++ /dev/null @@ -1,81 +0,0 @@ -package agent_client - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -type PublishSession struct { - schema *schema.Schema - partitionCount int - publisherName string - stream grpc.BidiStreamingClient[mq_agent_pb.PublishRecordRequest, mq_agent_pb.PublishRecordResponse] -} - -func NewPublishSession(agentAddress string, topicSchema *schema.Schema, partitionCount int, publisherName string) (*PublishSession, error) { - - // call local agent grpc server to create a new session - clientConn, err := grpc.NewClient(agentAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, fmt.Errorf("dial agent server %s: %v", agentAddress, err) - } - agentClient := mq_agent_pb.NewSeaweedMessagingAgentClient(clientConn) - - resp, err := agentClient.StartPublishSession(context.Background(), &mq_agent_pb.StartPublishSessionRequest{ - Topic: &schema_pb.Topic{ - Namespace: topicSchema.Namespace, - Name: topicSchema.Name, - }, - PartitionCount: int32(partitionCount), - RecordType: topicSchema.RecordType, - PublisherName: publisherName, - }) - if err != nil { - return nil, err - } - if resp.Error != "" { - return nil, fmt.Errorf("start publish session: %v", resp.Error) - } - - stream, err := agentClient.PublishRecord(context.Background()) - if err != nil { - return nil, fmt.Errorf("publish record: %w", err) - } - - if err = stream.Send(&mq_agent_pb.PublishRecordRequest{ - SessionId: resp.SessionId, - }); err != nil { - return nil, fmt.Errorf("send session id: %w", err) - } - - return &PublishSession{ - schema: topicSchema, - partitionCount: partitionCount, - publisherName: publisherName, - stream: stream, - }, nil -} - -func (a *PublishSession) CloseSession() error { - if a.schema == nil { - return nil - } - err := a.stream.CloseSend() - if err != nil { - return fmt.Errorf("close send: %w", err) - } - a.schema = nil - return err -} - -func (a *PublishSession) PublishMessageRecord(key []byte, record *schema_pb.RecordValue) error { - return a.stream.Send(&mq_agent_pb.PublishRecordRequest{ - Key: key, - Value: record, - }) -} diff --git a/weed/mq/client/agent_client/subscribe_session.go b/weed/mq/client/agent_client/subscribe_session.go deleted file mode 100644 index f9803b66b..000000000 --- a/weed/mq/client/agent_client/subscribe_session.go +++ /dev/null @@ -1,87 +0,0 @@ -package agent_client - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -type SubscribeOption struct { - ConsumerGroup string - ConsumerGroupInstanceId string - Topic topic.Topic - OffsetType schema_pb.OffsetType - OffsetTsNs int64 - Filter string - MaxSubscribedPartitions int32 - SlidingWindowSize int32 -} - -type SubscribeSession struct { - Option *SubscribeOption - stream grpc.BidiStreamingClient[mq_agent_pb.SubscribeRecordRequest, mq_agent_pb.SubscribeRecordResponse] -} - -func NewSubscribeSession(agentAddress string, option *SubscribeOption) (*SubscribeSession, error) { - // call local agent grpc server to create a new session - clientConn, err := grpc.NewClient(agentAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, fmt.Errorf("dial agent server %s: %v", agentAddress, err) - } - agentClient := mq_agent_pb.NewSeaweedMessagingAgentClient(clientConn) - - initRequest := &mq_agent_pb.SubscribeRecordRequest_InitSubscribeRecordRequest{ - ConsumerGroup: option.ConsumerGroup, - ConsumerGroupInstanceId: option.ConsumerGroupInstanceId, - Topic: &schema_pb.Topic{ - Namespace: option.Topic.Namespace, - Name: option.Topic.Name, - }, - OffsetType: option.OffsetType, - OffsetTsNs: option.OffsetTsNs, - MaxSubscribedPartitions: option.MaxSubscribedPartitions, - Filter: option.Filter, - SlidingWindowSize: option.SlidingWindowSize, - } - - stream, err := agentClient.SubscribeRecord(context.Background()) - if err != nil { - return nil, fmt.Errorf("subscribe record: %w", err) - } - - if err = stream.Send(&mq_agent_pb.SubscribeRecordRequest{ - Init: initRequest, - }); err != nil { - return nil, fmt.Errorf("send session id: %w", err) - } - - return &SubscribeSession{ - Option: option, - stream: stream, - }, nil -} - -func (s *SubscribeSession) CloseSession() error { - err := s.stream.CloseSend() - return err -} - -func (a *SubscribeSession) SubscribeMessageRecord( - onEachMessageFn func(key []byte, record *schema_pb.RecordValue), - onCompletionFn func()) error { - for { - resp, err := a.stream.Recv() - if err != nil { - return err - } - onEachMessageFn(resp.Key, resp.Value) - } - if onCompletionFn != nil { - onCompletionFn() - } - return nil -} diff --git a/weed/mq/client/pub_client/publish.go b/weed/mq/client/pub_client/publish.go deleted file mode 100644 index 1988e9279..000000000 --- a/weed/mq/client/pub_client/publish.go +++ /dev/null @@ -1,61 +0,0 @@ -package pub_client - -import ( - "fmt" - "github.com/golang/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "time" -) - -func (p *TopicPublisher) Publish(key, value []byte) error { - if p.config.RecordType != nil { - return fmt.Errorf("record type is set, use PublishRecord instead") - } - return p.doPublish(key, value) -} - -func (p *TopicPublisher) doPublish(key, value []byte) error { - hashKey := util.HashToInt32(key) % pub_balancer.MaxPartitionCount - if hashKey < 0 { - hashKey = -hashKey - } - inputBuffer, found := p.partition2Buffer.Floor(hashKey+1, hashKey+1) - if !found { - return fmt.Errorf("no input buffer found for key %d", hashKey) - } - - return inputBuffer.Enqueue(&mq_pb.DataMessage{ - Key: key, - Value: value, - TsNs: time.Now().UnixNano(), - }) -} - -func (p *TopicPublisher) PublishRecord(key []byte, recordValue *schema_pb.RecordValue) error { - // serialize record value - value, err := proto.Marshal(recordValue) - if err != nil { - return fmt.Errorf("failed to marshal record value: %w", err) - } - - return p.doPublish(key, value) -} - -func (p *TopicPublisher) FinishPublish() error { - if inputBuffers, found := p.partition2Buffer.AllIntersections(0, pub_balancer.MaxPartitionCount); found { - for _, inputBuffer := range inputBuffers { - inputBuffer.Enqueue(&mq_pb.DataMessage{ - TsNs: time.Now().UnixNano(), - Ctrl: &mq_pb.ControlMessage{ - IsClose: true, - PublisherName: p.config.PublisherName, - }, - }) - } - } - - return nil -} diff --git a/weed/mq/client/pub_client/publisher.go b/weed/mq/client/pub_client/publisher.go deleted file mode 100644 index f95d00602..000000000 --- a/weed/mq/client/pub_client/publisher.go +++ /dev/null @@ -1,73 +0,0 @@ -package pub_client - -import ( - "github.com/rdleal/intervalst/interval" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "log" - "sync" -) - -type PublisherConfiguration struct { - Topic topic.Topic - PartitionCount int32 - Brokers []string - PublisherName string // for debugging - RecordType *schema_pb.RecordType -} - -type PublishClient struct { - mq_pb.SeaweedMessaging_PublishMessageClient - Broker string - Err error -} -type TopicPublisher struct { - partition2Buffer *interval.SearchTree[*buffered_queue.BufferedQueue[*mq_pb.DataMessage], int32] - grpcDialOption grpc.DialOption - sync.Mutex // protects grpc - config *PublisherConfiguration - jobs []*EachPartitionPublishJob -} - -func NewTopicPublisher(config *PublisherConfiguration) (tp *TopicPublisher, err error) { - tp = &TopicPublisher{ - partition2Buffer: interval.NewSearchTree[*buffered_queue.BufferedQueue[*mq_pb.DataMessage]](func(a, b int32) int { - return int(a - b) - }), - grpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()), - config: config, - } - - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err = tp.startSchedulerThread(&wg); err != nil { - log.Println(err) - return - } - }() - - wg.Wait() - - return -} - -func (p *TopicPublisher) Shutdown() error { - - if inputBuffers, found := p.partition2Buffer.AllIntersections(0, pub_balancer.MaxPartitionCount); found { - for _, inputBuffer := range inputBuffers { - inputBuffer.CloseInput() - } - } - - for _, job := range p.jobs { - job.wg.Wait() - } - - return nil -} diff --git a/weed/mq/client/pub_client/scheduler.go b/weed/mq/client/pub_client/scheduler.go deleted file mode 100644 index 8cb481051..000000000 --- a/weed/mq/client/pub_client/scheduler.go +++ /dev/null @@ -1,299 +0,0 @@ -package pub_client - -import ( - "context" - "fmt" - "log" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" -) - -type EachPartitionError struct { - *mq_pb.BrokerPartitionAssignment - Err error - generation int -} - -type EachPartitionPublishJob struct { - *mq_pb.BrokerPartitionAssignment - stopChan chan bool - wg sync.WaitGroup - generation int - inputQueue *buffered_queue.BufferedQueue[*mq_pb.DataMessage] -} - -func (p *TopicPublisher) startSchedulerThread(wg *sync.WaitGroup) error { - - if err := p.doConfigureTopic(); err != nil { - wg.Done() - return fmt.Errorf("configure topic %s: %v", p.config.Topic, err) - } - - log.Printf("start scheduler thread for topic %s", p.config.Topic) - - generation := 0 - var errChan chan EachPartitionError - for { - glog.V(0).Infof("lookup partitions gen %d topic %s", generation+1, p.config.Topic) - if assignments, err := p.doLookupTopicPartitions(); err == nil { - generation++ - glog.V(0).Infof("start generation %d with %d assignments", generation, len(assignments)) - if errChan == nil { - errChan = make(chan EachPartitionError, len(assignments)) - } - p.onEachAssignments(generation, assignments, errChan) - } else { - glog.Errorf("lookup topic %s: %v", p.config.Topic, err) - time.Sleep(5 * time.Second) - continue - } - - if generation == 1 { - wg.Done() - } - - // wait for any error to happen. If so, consume all remaining errors, and retry - for { - select { - case eachErr := <-errChan: - glog.Errorf("gen %d publish to topic %s partition %v: %v", eachErr.generation, p.config.Topic, eachErr.Partition, eachErr.Err) - if eachErr.generation < generation { - continue - } - break - } - } - } -} - -func (p *TopicPublisher) onEachAssignments(generation int, assignments []*mq_pb.BrokerPartitionAssignment, errChan chan EachPartitionError) { - // TODO assuming this is not re-configured so the partitions are fixed. - sort.Slice(assignments, func(i, j int) bool { - return assignments[i].Partition.RangeStart < assignments[j].Partition.RangeStart - }) - var jobs []*EachPartitionPublishJob - hasExistingJob := len(p.jobs) == len(assignments) - for i, assignment := range assignments { - if assignment.LeaderBroker == "" { - continue - } - if hasExistingJob { - var existingJob *EachPartitionPublishJob - existingJob = p.jobs[i] - if existingJob.BrokerPartitionAssignment.LeaderBroker == assignment.LeaderBroker { - existingJob.generation = generation - jobs = append(jobs, existingJob) - continue - } else { - if existingJob.LeaderBroker != "" { - close(existingJob.stopChan) - existingJob.LeaderBroker = "" - existingJob.wg.Wait() - } - } - } - - // start a go routine to publish to this partition - job := &EachPartitionPublishJob{ - BrokerPartitionAssignment: assignment, - stopChan: make(chan bool, 1), - generation: generation, - inputQueue: buffered_queue.NewBufferedQueue[*mq_pb.DataMessage](1024), - } - job.wg.Add(1) - go func(job *EachPartitionPublishJob) { - defer job.wg.Done() - if err := p.doPublishToPartition(job); err != nil { - log.Printf("publish to %s partition %v: %v", p.config.Topic, job.Partition, err) - errChan <- EachPartitionError{assignment, err, generation} - } - }(job) - jobs = append(jobs, job) - // TODO assuming this is not re-configured so the partitions are fixed. - // better just re-use the existing job - p.partition2Buffer.Insert(assignment.Partition.RangeStart, assignment.Partition.RangeStop, job.inputQueue) - } - p.jobs = jobs -} - -func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) error { - - log.Printf("connecting to %v for topic partition %+v", job.LeaderBroker, job.Partition) - - grpcConnection, err := grpc.NewClient(job.LeaderBroker, grpc.WithTransportCredentials(insecure.NewCredentials()), p.grpcDialOption) - if err != nil { - return fmt.Errorf("dial broker %s: %v", job.LeaderBroker, err) - } - brokerClient := mq_pb.NewSeaweedMessagingClient(grpcConnection) - stream, err := brokerClient.PublishMessage(context.Background()) - if err != nil { - return fmt.Errorf("create publish client: %w", err) - } - publishClient := &PublishClient{ - SeaweedMessaging_PublishMessageClient: stream, - Broker: job.LeaderBroker, - } - if err = publishClient.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Init{ - Init: &mq_pb.PublishMessageRequest_InitMessage{ - Topic: p.config.Topic.ToPbTopic(), - Partition: job.Partition, - AckInterval: 128, - FollowerBroker: job.FollowerBroker, - PublisherName: p.config.PublisherName, - }, - }, - }); err != nil { - return fmt.Errorf("send init message: %w", err) - } - // process the hello message - resp, err := stream.Recv() - if err != nil { - return fmt.Errorf("recv init response: %w", err) - } - if resp.Error != "" { - return fmt.Errorf("init response error: %v", resp.Error) - } - - var publishedTsNs int64 - hasMoreData := int32(1) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for { - ackResp, err := publishClient.Recv() - if err != nil { - e, _ := status.FromError(err) - if e.Code() == codes.Unknown && e.Message() == "EOF" { - log.Printf("publish to %s EOF", publishClient.Broker) - return - } - publishClient.Err = err - log.Printf("publish1 to %s error: %v\n", publishClient.Broker, err) - return - } - if ackResp.Error != "" { - publishClient.Err = fmt.Errorf("ack error: %v", ackResp.Error) - log.Printf("publish2 to %s error: %v\n", publishClient.Broker, ackResp.Error) - return - } - if ackResp.AckTsNs > 0 { - log.Printf("ack %d published %d hasMoreData:%d", ackResp.AckTsNs, atomic.LoadInt64(&publishedTsNs), atomic.LoadInt32(&hasMoreData)) - } - if atomic.LoadInt64(&publishedTsNs) <= ackResp.AckTsNs && atomic.LoadInt32(&hasMoreData) == 0 { - return - } - } - }() - - publishCounter := 0 - for data, hasData := job.inputQueue.Dequeue(); hasData; data, hasData = job.inputQueue.Dequeue() { - if data.Ctrl != nil && data.Ctrl.IsClose { - // need to set this before sending to brokers, to avoid timing issue - atomic.StoreInt32(&hasMoreData, 0) - } - if err := publishClient.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Data{ - Data: data, - }, - }); err != nil { - return fmt.Errorf("send publish data: %w", err) - } - publishCounter++ - atomic.StoreInt64(&publishedTsNs, data.TsNs) - } - if publishCounter > 0 { - wg.Wait() - } else { - // CloseSend would cancel the context on the server side - if err := publishClient.CloseSend(); err != nil { - return fmt.Errorf("close send: %w", err) - } - } - - log.Printf("published %d messages to %v for topic partition %+v", publishCounter, job.LeaderBroker, job.Partition) - - return nil -} - -func (p *TopicPublisher) doConfigureTopic() (err error) { - if len(p.config.Brokers) == 0 { - return fmt.Errorf("topic configuring found no bootstrap brokers") - } - var lastErr error - for _, brokerAddress := range p.config.Brokers { - err = pb.WithBrokerGrpcClient(false, - brokerAddress, - p.grpcDialOption, - func(client mq_pb.SeaweedMessagingClient) error { - _, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{ - Topic: p.config.Topic.ToPbTopic(), - PartitionCount: p.config.PartitionCount, - MessageRecordType: p.config.RecordType, // Flat schema - }) - return err - }) - if err == nil { - lastErr = nil - return nil - } else { - lastErr = fmt.Errorf("%s: %v", brokerAddress, err) - } - } - - if lastErr != nil { - return fmt.Errorf("doConfigureTopic %s: %v", p.config.Topic, err) - } - return nil -} - -func (p *TopicPublisher) doLookupTopicPartitions() (assignments []*mq_pb.BrokerPartitionAssignment, err error) { - if len(p.config.Brokers) == 0 { - return nil, fmt.Errorf("lookup found no bootstrap brokers") - } - var lastErr error - for _, brokerAddress := range p.config.Brokers { - err := pb.WithBrokerGrpcClient(false, - brokerAddress, - p.grpcDialOption, - func(client mq_pb.SeaweedMessagingClient) error { - lookupResp, err := client.LookupTopicBrokers(context.Background(), - &mq_pb.LookupTopicBrokersRequest{ - Topic: p.config.Topic.ToPbTopic(), - }) - glog.V(0).Infof("lookup topic %s: %v", p.config.Topic, lookupResp) - - if err != nil { - return err - } - - if len(lookupResp.BrokerPartitionAssignments) == 0 { - return fmt.Errorf("no broker partition assignments") - } - - assignments = lookupResp.BrokerPartitionAssignments - - return nil - }) - if err == nil { - return assignments, nil - } else { - lastErr = err - } - } - - return nil, fmt.Errorf("lookup topic %s: %v", p.config.Topic, lastErr) - -} diff --git a/weed/mq/client/sub_client/connect_to_sub_coordinator.go b/weed/mq/client/sub_client/connect_to_sub_coordinator.go deleted file mode 100644 index e88aaca2f..000000000 --- a/weed/mq/client/sub_client/connect_to_sub_coordinator.go +++ /dev/null @@ -1,107 +0,0 @@ -package sub_client - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "time" -) - -func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { - waitTime := 1 * time.Second - for { - for _, broker := range sub.bootstrapBrokers { - - select { - case <-sub.ctx.Done(): - return - default: - } - - // lookup topic brokers - var brokerLeader string - err := pb.WithBrokerGrpcClient(false, broker, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - resp, err := client.FindBrokerLeader(sub.ctx, &mq_pb.FindBrokerLeaderRequest{}) - if err != nil { - return err - } - brokerLeader = resp.Broker - return nil - }) - if err != nil { - glog.V(0).Infof("broker coordinator on %s: %v", broker, err) - continue - } - glog.V(0).Infof("found broker coordinator: %v", brokerLeader) - - // connect to the balancer - pb.WithBrokerGrpcClient(true, brokerLeader, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - - stream, err := client.SubscriberToSubCoordinator(sub.ctx) - if err != nil { - glog.V(0).Infof("subscriber %s: %v", sub.ContentConfig.Topic, err) - return err - } - waitTime = 1 * time.Second - - // Maybe later: subscribe to multiple topics instead of just one - - if err := stream.Send(&mq_pb.SubscriberToSubCoordinatorRequest{ - Message: &mq_pb.SubscriberToSubCoordinatorRequest_Init{ - Init: &mq_pb.SubscriberToSubCoordinatorRequest_InitMessage{ - ConsumerGroup: sub.SubscriberConfig.ConsumerGroup, - ConsumerGroupInstanceId: sub.SubscriberConfig.ConsumerGroupInstanceId, - Topic: sub.ContentConfig.Topic.ToPbTopic(), - MaxPartitionCount: sub.SubscriberConfig.MaxPartitionCount, - }, - }, - }); err != nil { - glog.V(0).Infof("subscriber %s send init: %v", sub.ContentConfig.Topic, err) - return err - } - - go func() { - for reply := range sub.brokerPartitionAssignmentAckChan { - - select { - case <-sub.ctx.Done(): - return - default: - } - - glog.V(0).Infof("subscriber instance %s ack %+v", sub.SubscriberConfig.ConsumerGroupInstanceId, reply) - if err := stream.Send(reply); err != nil { - glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err) - return - } - } - }() - - // keep receiving messages from the sub coordinator - for { - resp, err := stream.Recv() - if err != nil { - glog.V(0).Infof("subscriber %s receive: %v", sub.ContentConfig.Topic, err) - return err - } - - select { - case <-sub.ctx.Done(): - return nil - default: - } - - sub.brokerPartitionAssignmentChan <- resp - glog.V(0).Infof("Received assignment: %+v", resp) - } - - return nil - }) - } - glog.V(0).Infof("subscriber %s/%s waiting for more assignments", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) - if waitTime < 10*time.Second { - waitTime += 1 * time.Second - } - time.Sleep(waitTime) - } -} diff --git a/weed/mq/client/sub_client/on_each_partition.go b/weed/mq/client/sub_client/on_each_partition.go deleted file mode 100644 index 470e886d2..000000000 --- a/weed/mq/client/sub_client/on_each_partition.go +++ /dev/null @@ -1,142 +0,0 @@ -package sub_client - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type KeyedTimestamp struct { - Key []byte - TsNs int64 // Timestamp in nanoseconds for acknowledgment -} - -func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssignment, stopCh chan struct{}, onDataMessageFn OnDataMessageFn) error { - // connect to the partition broker - return pb.WithBrokerGrpcClient(true, assigned.LeaderBroker, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - - subscribeClient, err := client.SubscribeMessage(context.Background()) - if err != nil { - return fmt.Errorf("create subscribe client: %w", err) - } - - slidingWindowSize := sub.SubscriberConfig.SlidingWindowSize - if slidingWindowSize <= 0 { - slidingWindowSize = 1 - } - - po := findPartitionOffset(sub.ContentConfig.PartitionOffsets, assigned.Partition) - if po == nil { - po = &schema_pb.PartitionOffset{ - Partition: assigned.Partition, - StartTsNs: sub.ContentConfig.OffsetTsNs, - } - } - - if err = subscribeClient.Send(&mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Init{ - Init: &mq_pb.SubscribeMessageRequest_InitMessage{ - ConsumerGroup: sub.SubscriberConfig.ConsumerGroup, - ConsumerId: sub.SubscriberConfig.ConsumerGroupInstanceId, - Topic: sub.ContentConfig.Topic.ToPbTopic(), - PartitionOffset: po, - OffsetType: sub.ContentConfig.OffsetType, - Filter: sub.ContentConfig.Filter, - FollowerBroker: assigned.FollowerBroker, - SlidingWindowSize: slidingWindowSize, - }, - }, - }); err != nil { - glog.V(0).Infof("subscriber %s connected to partition %+v at %v: %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker, err) - } - - glog.V(0).Infof("subscriber %s connected to partition %+v at %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker) - - if sub.OnCompletionFunc != nil { - defer sub.OnCompletionFunc() - } - - go func() { - for { - select { - case <-sub.ctx.Done(): - subscribeClient.CloseSend() - return - case <-stopCh: - subscribeClient.CloseSend() - return - case ack, ok := <-sub.PartitionOffsetChan: - if !ok { - subscribeClient.CloseSend() - return - } - subscribeClient.SendMsg(&mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Ack{ - Ack: &mq_pb.SubscribeMessageRequest_AckMessage{ - Key: ack.Key, - TsNs: ack.TsNs, - }, - }, - }) - } - } - }() - - for { - // glog.V(0).Infof("subscriber %s/%s waiting for message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) - resp, err := subscribeClient.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - return nil - } - return fmt.Errorf("subscribe recv: %w", err) - } - if resp.Message == nil { - glog.V(0).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) - continue - } - - select { - case <-sub.ctx.Done(): - return nil - case <-stopCh: - return nil - default: - } - - switch m := resp.Message.(type) { - case *mq_pb.SubscribeMessageResponse_Data: - if m.Data.Ctrl != nil { - glog.V(2).Infof("subscriber %s received control from producer:%s isClose:%v", sub.SubscriberConfig.ConsumerGroup, m.Data.Ctrl.PublisherName, m.Data.Ctrl.IsClose) - continue - } - if len(m.Data.Key) == 0 { - // fmt.Printf("empty key %+v, type %v\n", m, reflect.TypeOf(m)) - continue - } - onDataMessageFn(m) - case *mq_pb.SubscribeMessageResponse_Ctrl: - // glog.V(0).Infof("subscriber %s/%s/%s received control %+v", sub.ContentConfig.Namespace, sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, m.Ctrl) - if m.Ctrl.IsEndOfStream || m.Ctrl.IsEndOfTopic { - return io.EOF - } - } - } - - }) -} - -func findPartitionOffset(partitionOffsets []*schema_pb.PartitionOffset, partition *schema_pb.Partition) *schema_pb.PartitionOffset { - for _, po := range partitionOffsets { - if po.Partition == partition { - return po - } - } - return nil -} diff --git a/weed/mq/client/sub_client/subscribe.go b/weed/mq/client/sub_client/subscribe.go deleted file mode 100644 index 0f3f9b5ee..000000000 --- a/weed/mq/client/sub_client/subscribe.go +++ /dev/null @@ -1,138 +0,0 @@ -package sub_client - -import ( - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -type ProcessorState struct { - stopCh chan struct{} -} - -// Subscribe subscribes to a topic's specified partitions. -// If a partition is moved to another broker, the subscriber will automatically reconnect to the new broker. - -func (sub *TopicSubscriber) Subscribe() error { - - go sub.startProcessors() - - // loop forever - // TODO shutdown the subscriber when not needed anymore - sub.doKeepConnectedToSubCoordinator() - - return nil -} - -func (sub *TopicSubscriber) startProcessors() { - // listen to the messages from the sub coordinator - // start one processor per partition - var wg sync.WaitGroup - semaphore := make(chan struct{}, sub.SubscriberConfig.MaxPartitionCount) - - for message := range sub.brokerPartitionAssignmentChan { - if assigned := message.GetAssignment(); assigned != nil { - wg.Add(1) - semaphore <- struct{}{} - - topicPartition := topic.FromPbPartition(assigned.PartitionAssignment.Partition) - - // wait until no covering partition is still in progress - sub.waitUntilNoOverlappingPartitionInFlight(topicPartition) - - // start a processors - stopChan := make(chan struct{}) - sub.activeProcessorsLock.Lock() - sub.activeProcessors[topicPartition] = &ProcessorState{ - stopCh: stopChan, - } - sub.activeProcessorsLock.Unlock() - - go func(assigned *mq_pb.BrokerPartitionAssignment, topicPartition topic.Partition) { - defer func() { - sub.activeProcessorsLock.Lock() - delete(sub.activeProcessors, topicPartition) - sub.activeProcessorsLock.Unlock() - - <-semaphore - wg.Done() - }() - glog.V(0).Infof("subscriber %s/%s assigned partition %+v at %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) - sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{ - Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignment{ - AckAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignmentMessage{ - Partition: assigned.Partition, - }, - }, - } - - executors := util.NewLimitedConcurrentExecutor(int(sub.SubscriberConfig.SlidingWindowSize)) - onDataMessageFn := func(m *mq_pb.SubscribeMessageResponse_Data) { - executors.Execute(func() { - if sub.OnDataMessageFunc != nil { - sub.OnDataMessageFunc(m) - } - sub.PartitionOffsetChan <- KeyedTimestamp{ - Key: m.Data.Key, - TsNs: m.Data.TsNs, - } - }) - } - - err := sub.onEachPartition(assigned, stopChan, onDataMessageFn) - if err != nil { - glog.V(0).Infof("subscriber %s/%s partition %+v at %v: %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker, err) - } else { - glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) - } - sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{ - Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{ - AckUnAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{ - Partition: assigned.Partition, - }, - }, - } - }(assigned.PartitionAssignment, topicPartition) - } - if unAssignment := message.GetUnAssignment(); unAssignment != nil { - topicPartition := topic.FromPbPartition(unAssignment.Partition) - sub.activeProcessorsLock.Lock() - if processor, found := sub.activeProcessors[topicPartition]; found { - close(processor.stopCh) - delete(sub.activeProcessors, topicPartition) - } - sub.activeProcessorsLock.Unlock() - } - } - - wg.Wait() - -} - -func (sub *TopicSubscriber) waitUntilNoOverlappingPartitionInFlight(topicPartition topic.Partition) { - foundOverlapping := true - for foundOverlapping { - sub.activeProcessorsLock.Lock() - foundOverlapping = false - var overlappedPartition topic.Partition - for partition, _ := range sub.activeProcessors { - if partition.Overlaps(topicPartition) { - if partition.Equals(topicPartition) { - continue - } - foundOverlapping = true - overlappedPartition = partition - break - } - } - sub.activeProcessorsLock.Unlock() - if foundOverlapping { - glog.V(0).Infof("subscriber %s new partition %v waiting for partition %+v to complete", sub.ContentConfig.Topic, topicPartition, overlappedPartition) - time.Sleep(1 * time.Second) - } - } -} diff --git a/weed/mq/client/sub_client/subscriber.go b/weed/mq/client/sub_client/subscriber.go deleted file mode 100644 index 68bf74c5e..000000000 --- a/weed/mq/client/sub_client/subscriber.go +++ /dev/null @@ -1,70 +0,0 @@ -package sub_client - -import ( - "context" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/grpc" -) - -type SubscriberConfiguration struct { - ClientId string - ConsumerGroup string - ConsumerGroupInstanceId string - GrpcDialOption grpc.DialOption - MaxPartitionCount int32 // how many partitions to process concurrently - SlidingWindowSize int32 // how many messages to process concurrently per partition -} - -func (s *SubscriberConfiguration) String() string { - return "ClientId: " + s.ClientId + ", ConsumerGroup: " + s.ConsumerGroup + ", ConsumerGroupInstanceId: " + s.ConsumerGroupInstanceId -} - -type ContentConfiguration struct { - Topic topic.Topic - Filter string - PartitionOffsets []*schema_pb.PartitionOffset - OffsetType schema_pb.OffsetType - OffsetTsNs int64 -} - -type OnDataMessageFn func(m *mq_pb.SubscribeMessageResponse_Data) -type OnCompletionFunc func() - -type TopicSubscriber struct { - ctx context.Context - SubscriberConfig *SubscriberConfiguration - ContentConfig *ContentConfiguration - brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse - brokerPartitionAssignmentAckChan chan *mq_pb.SubscriberToSubCoordinatorRequest - OnDataMessageFunc OnDataMessageFn - OnCompletionFunc OnCompletionFunc - bootstrapBrokers []string - activeProcessors map[topic.Partition]*ProcessorState - activeProcessorsLock sync.Mutex - PartitionOffsetChan chan KeyedTimestamp -} - -func NewTopicSubscriber(ctx context.Context, bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration, partitionOffsetChan chan KeyedTimestamp) *TopicSubscriber { - return &TopicSubscriber{ - ctx: ctx, - SubscriberConfig: subscriber, - ContentConfig: content, - brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024), - brokerPartitionAssignmentAckChan: make(chan *mq_pb.SubscriberToSubCoordinatorRequest, 1024), - bootstrapBrokers: bootstrapBrokers, - activeProcessors: make(map[topic.Partition]*ProcessorState), - PartitionOffsetChan: partitionOffsetChan, - } -} - -func (sub *TopicSubscriber) SetOnDataMessageFn(fn OnDataMessageFn) { - sub.OnDataMessageFunc = fn -} - -func (sub *TopicSubscriber) SetCompletionFunc(onCompletionFn OnCompletionFunc) { - sub.OnCompletionFunc = onCompletionFn -} diff --git a/weed/mq/kafka/API_VERSION_MATRIX.md b/weed/mq/kafka/API_VERSION_MATRIX.md deleted file mode 100644 index d9465c7b4..000000000 --- a/weed/mq/kafka/API_VERSION_MATRIX.md +++ /dev/null @@ -1,77 +0,0 @@ -# Kafka API Version Matrix Audit - -## Summary -This document audits the advertised API versions in `handleApiVersions()` against actual implementation support in `validateAPIVersion()` and handlers. - -## Current Status: ALL VERIFIED โœ… - -### API Version Matrix - -| API Key | API Name | Advertised | Validated | Handler Implemented | Status | -|---------|----------|------------|-----------|---------------------|--------| -| 18 | ApiVersions | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 3 | Metadata | v0-v7 | v0-v7 | v0-v7 | โœ… Match | -| 0 | Produce | v0-v7 | v0-v7 | v0-v7 | โœ… Match | -| 1 | Fetch | v0-v7 | v0-v7 | v0-v7 | โœ… Match | -| 2 | ListOffsets | v0-v2 | v0-v2 | v0-v2 | โœ… Match | -| 19 | CreateTopics | v0-v5 | v0-v5 | v0-v5 | โœ… Match | -| 20 | DeleteTopics | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 10 | FindCoordinator | v0-v3 | v0-v3 | v0-v3 | โœ… Match | -| 11 | JoinGroup | v0-v6 | v0-v6 | v0-v6 | โœ… Match | -| 14 | SyncGroup | v0-v5 | v0-v5 | v0-v5 | โœ… Match | -| 8 | OffsetCommit | v0-v2 | v0-v2 | v0-v2 | โœ… Match | -| 9 | OffsetFetch | v0-v5 | v0-v5 | v0-v5 | โœ… Match | -| 12 | Heartbeat | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 13 | LeaveGroup | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 15 | DescribeGroups | v0-v5 | v0-v5 | v0-v5 | โœ… Match | -| 16 | ListGroups | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 32 | DescribeConfigs | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 22 | InitProducerId | v0-v4 | v0-v4 | v0-v4 | โœ… Match | -| 60 | DescribeCluster | v0-v1 | v0-v1 | v0-v1 | โœ… Match | - -## Implementation Details - -### Core APIs -- **ApiVersions (v0-v4)**: Supports both flexible (v3+) and non-flexible formats. v4 added for Kafka 8.0.0 compatibility. -- **Metadata (v0-v7)**: Full version support with flexible format in v7+ -- **Produce (v0-v7)**: Supports transactional writes and idempotent producers -- **Fetch (v0-v7)**: Includes schema-aware fetching and multi-batch support - -### Consumer Group Coordination -- **FindCoordinator (v0-v3)**: v3+ supports flexible format -- **JoinGroup (v0-v6)**: Capped at v6 (first flexible version) -- **SyncGroup (v0-v5)**: Full consumer group protocol support -- **Heartbeat (v0-v4)**: Consumer group session management -- **LeaveGroup (v0-v4)**: Clean consumer group exit -- **OffsetCommit (v0-v2)**: Consumer offset persistence -- **OffsetFetch (v0-v5)**: v3+ includes throttle_time_ms, v5+ includes leader_epoch - -### Topic Management -- **CreateTopics (v0-v5)**: v2+ uses compact arrays and tagged fields -- **DeleteTopics (v0-v4)**: Full topic deletion support -- **ListOffsets (v0-v2)**: Offset listing for partitions - -### Admin & Discovery -- **DescribeCluster (v0-v1)**: AdminClient compatibility (KIP-919) -- **DescribeGroups (v0-v5)**: Consumer group introspection -- **ListGroups (v0-v4)**: List all consumer groups -- **DescribeConfigs (v0-v4)**: Configuration inspection -- **InitProducerId (v0-v4)**: Transactional producer initialization - -## Verification Source - -All version ranges verified from `handler.go`: -- `SupportedApiKeys` array (line 1196): Advertised versions -- `validateAPIVersion()` function (line 2903): Validation ranges -- Individual handler implementations: Actual version support - -Last verified: 2025-10-13 - -## Maintenance Notes - -1. After adding new API handlers, update all three locations: - - `SupportedApiKeys` array - - `validateAPIVersion()` map - - This documentation -2. Test new versions with kafka-go and Sarama clients -3. Ensure flexible format support for v3+ APIs where applicable diff --git a/weed/mq/kafka/compression/compression.go b/weed/mq/kafka/compression/compression.go deleted file mode 100644 index f4c472199..000000000 --- a/weed/mq/kafka/compression/compression.go +++ /dev/null @@ -1,203 +0,0 @@ -package compression - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - - "github.com/golang/snappy" - "github.com/klauspost/compress/zstd" - "github.com/pierrec/lz4/v4" -) - -// nopCloser wraps an io.Reader to provide a no-op Close method -type nopCloser struct { - io.Reader -} - -func (nopCloser) Close() error { return nil } - -// CompressionCodec represents the compression codec used in Kafka record batches -type CompressionCodec int8 - -const ( - None CompressionCodec = 0 - Gzip CompressionCodec = 1 - Snappy CompressionCodec = 2 - Lz4 CompressionCodec = 3 - Zstd CompressionCodec = 4 -) - -// String returns the string representation of the compression codec -func (c CompressionCodec) String() string { - switch c { - case None: - return "none" - case Gzip: - return "gzip" - case Snappy: - return "snappy" - case Lz4: - return "lz4" - case Zstd: - return "zstd" - default: - return fmt.Sprintf("unknown(%d)", c) - } -} - -// IsValid returns true if the compression codec is valid -func (c CompressionCodec) IsValid() bool { - return c >= None && c <= Zstd -} - -// ExtractCompressionCodec extracts the compression codec from record batch attributes -func ExtractCompressionCodec(attributes int16) CompressionCodec { - return CompressionCodec(attributes & 0x07) // Lower 3 bits -} - -// SetCompressionCodec sets the compression codec in record batch attributes -func SetCompressionCodec(attributes int16, codec CompressionCodec) int16 { - return (attributes &^ 0x07) | int16(codec) -} - -// Compress compresses data using the specified codec -func Compress(codec CompressionCodec, data []byte) ([]byte, error) { - if codec == None { - return data, nil - } - - var buf bytes.Buffer - var writer io.WriteCloser - var err error - - switch codec { - case Gzip: - writer = gzip.NewWriter(&buf) - case Snappy: - // Snappy doesn't have a streaming writer, so we compress directly - compressed := snappy.Encode(nil, data) - if compressed == nil { - compressed = []byte{} - } - return compressed, nil - case Lz4: - writer = lz4.NewWriter(&buf) - case Zstd: - writer, err = zstd.NewWriter(&buf) - if err != nil { - return nil, fmt.Errorf("failed to create zstd writer: %w", err) - } - default: - return nil, fmt.Errorf("unsupported compression codec: %s", codec) - } - - if _, err := writer.Write(data); err != nil { - writer.Close() - return nil, fmt.Errorf("failed to write compressed data: %w", err) - } - - if err := writer.Close(); err != nil { - return nil, fmt.Errorf("failed to close compressor: %w", err) - } - - return buf.Bytes(), nil -} - -// Decompress decompresses data using the specified codec -func Decompress(codec CompressionCodec, data []byte) ([]byte, error) { - if codec == None { - return data, nil - } - - var reader io.ReadCloser - var err error - - buf := bytes.NewReader(data) - - switch codec { - case Gzip: - reader, err = gzip.NewReader(buf) - if err != nil { - return nil, fmt.Errorf("failed to create gzip reader: %w", err) - } - case Snappy: - // Snappy doesn't have a streaming reader, so we decompress directly - decompressed, err := snappy.Decode(nil, data) - if err != nil { - return nil, fmt.Errorf("failed to decompress snappy data: %w", err) - } - if decompressed == nil { - decompressed = []byte{} - } - return decompressed, nil - case Lz4: - lz4Reader := lz4.NewReader(buf) - // lz4.Reader doesn't implement Close, so we wrap it - reader = &nopCloser{Reader: lz4Reader} - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, fmt.Errorf("failed to create zstd reader: %w", err) - } - defer zstdReader.Close() - - var result bytes.Buffer - if _, err := io.Copy(&result, zstdReader); err != nil { - return nil, fmt.Errorf("failed to decompress zstd data: %w", err) - } - decompressed := result.Bytes() - if decompressed == nil { - decompressed = []byte{} - } - return decompressed, nil - default: - return nil, fmt.Errorf("unsupported compression codec: %s", codec) - } - - defer reader.Close() - - var result bytes.Buffer - if _, err := io.Copy(&result, reader); err != nil { - return nil, fmt.Errorf("failed to decompress data: %w", err) - } - - decompressed := result.Bytes() - if decompressed == nil { - decompressed = []byte{} - } - return decompressed, nil -} - -// CompressRecordBatch compresses the records portion of a Kafka record batch -// This function compresses only the records data, not the entire batch header -func CompressRecordBatch(codec CompressionCodec, recordsData []byte) ([]byte, int16, error) { - if codec == None { - return recordsData, 0, nil - } - - compressed, err := Compress(codec, recordsData) - if err != nil { - return nil, 0, fmt.Errorf("failed to compress record batch: %w", err) - } - - attributes := int16(codec) - return compressed, attributes, nil -} - -// DecompressRecordBatch decompresses the records portion of a Kafka record batch -func DecompressRecordBatch(attributes int16, compressedData []byte) ([]byte, error) { - codec := ExtractCompressionCodec(attributes) - - if codec == None { - return compressedData, nil - } - - decompressed, err := Decompress(codec, compressedData) - if err != nil { - return nil, fmt.Errorf("failed to decompress record batch: %w", err) - } - - return decompressed, nil -} diff --git a/weed/mq/kafka/compression/compression_test.go b/weed/mq/kafka/compression/compression_test.go deleted file mode 100644 index 41fe82651..000000000 --- a/weed/mq/kafka/compression/compression_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package compression - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestCompressionCodec_String tests the string representation of compression codecs -func TestCompressionCodec_String(t *testing.T) { - tests := []struct { - codec CompressionCodec - expected string - }{ - {None, "none"}, - {Gzip, "gzip"}, - {Snappy, "snappy"}, - {Lz4, "lz4"}, - {Zstd, "zstd"}, - {CompressionCodec(99), "unknown(99)"}, - } - - for _, test := range tests { - t.Run(test.expected, func(t *testing.T) { - assert.Equal(t, test.expected, test.codec.String()) - }) - } -} - -// TestCompressionCodec_IsValid tests codec validation -func TestCompressionCodec_IsValid(t *testing.T) { - tests := []struct { - codec CompressionCodec - valid bool - }{ - {None, true}, - {Gzip, true}, - {Snappy, true}, - {Lz4, true}, - {Zstd, true}, - {CompressionCodec(-1), false}, - {CompressionCodec(5), false}, - {CompressionCodec(99), false}, - } - - for _, test := range tests { - t.Run(test.codec.String(), func(t *testing.T) { - assert.Equal(t, test.valid, test.codec.IsValid()) - }) - } -} - -// TestExtractCompressionCodec tests extracting compression codec from attributes -func TestExtractCompressionCodec(t *testing.T) { - tests := []struct { - name string - attributes int16 - expected CompressionCodec - }{ - {"None", 0x0000, None}, - {"Gzip", 0x0001, Gzip}, - {"Snappy", 0x0002, Snappy}, - {"Lz4", 0x0003, Lz4}, - {"Zstd", 0x0004, Zstd}, - {"Gzip with transactional", 0x0011, Gzip}, // Bit 4 set (transactional) - {"Snappy with control", 0x0022, Snappy}, // Bit 5 set (control) - {"Lz4 with both flags", 0x0033, Lz4}, // Both flags set - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - codec := ExtractCompressionCodec(test.attributes) - assert.Equal(t, test.expected, codec) - }) - } -} - -// TestSetCompressionCodec tests setting compression codec in attributes -func TestSetCompressionCodec(t *testing.T) { - tests := []struct { - name string - attributes int16 - codec CompressionCodec - expected int16 - }{ - {"Set None", 0x0000, None, 0x0000}, - {"Set Gzip", 0x0000, Gzip, 0x0001}, - {"Set Snappy", 0x0000, Snappy, 0x0002}, - {"Set Lz4", 0x0000, Lz4, 0x0003}, - {"Set Zstd", 0x0000, Zstd, 0x0004}, - {"Replace Gzip with Snappy", 0x0001, Snappy, 0x0002}, - {"Set Gzip preserving transactional", 0x0010, Gzip, 0x0011}, - {"Set Lz4 preserving control", 0x0020, Lz4, 0x0023}, - {"Set Zstd preserving both flags", 0x0030, Zstd, 0x0034}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - result := SetCompressionCodec(test.attributes, test.codec) - assert.Equal(t, test.expected, result) - }) - } -} - -// TestCompress_None tests compression with None codec -func TestCompress_None(t *testing.T) { - data := []byte("Hello, World!") - - compressed, err := Compress(None, data) - require.NoError(t, err) - assert.Equal(t, data, compressed, "None codec should return original data") -} - -// TestCompress_Gzip tests gzip compression -func TestCompress_Gzip(t *testing.T) { - data := []byte("Hello, World! This is a test message for gzip compression.") - - compressed, err := Compress(Gzip, data) - require.NoError(t, err) - assert.NotEqual(t, data, compressed, "Gzip should compress data") - assert.True(t, len(compressed) > 0, "Compressed data should not be empty") -} - -// TestCompress_Snappy tests snappy compression -func TestCompress_Snappy(t *testing.T) { - data := []byte("Hello, World! This is a test message for snappy compression.") - - compressed, err := Compress(Snappy, data) - require.NoError(t, err) - assert.NotEqual(t, data, compressed, "Snappy should compress data") - assert.True(t, len(compressed) > 0, "Compressed data should not be empty") -} - -// TestCompress_Lz4 tests lz4 compression -func TestCompress_Lz4(t *testing.T) { - data := []byte("Hello, World! This is a test message for lz4 compression.") - - compressed, err := Compress(Lz4, data) - require.NoError(t, err) - assert.NotEqual(t, data, compressed, "Lz4 should compress data") - assert.True(t, len(compressed) > 0, "Compressed data should not be empty") -} - -// TestCompress_Zstd tests zstd compression -func TestCompress_Zstd(t *testing.T) { - data := []byte("Hello, World! This is a test message for zstd compression.") - - compressed, err := Compress(Zstd, data) - require.NoError(t, err) - assert.NotEqual(t, data, compressed, "Zstd should compress data") - assert.True(t, len(compressed) > 0, "Compressed data should not be empty") -} - -// TestCompress_InvalidCodec tests compression with invalid codec -func TestCompress_InvalidCodec(t *testing.T) { - data := []byte("Hello, World!") - - _, err := Compress(CompressionCodec(99), data) - assert.Error(t, err) - assert.Contains(t, err.Error(), "unsupported compression codec") -} - -// TestDecompress_None tests decompression with None codec -func TestDecompress_None(t *testing.T) { - data := []byte("Hello, World!") - - decompressed, err := Decompress(None, data) - require.NoError(t, err) - assert.Equal(t, data, decompressed, "None codec should return original data") -} - -// TestRoundTrip tests compression and decompression round trip for all codecs -func TestRoundTrip(t *testing.T) { - testData := [][]byte{ - []byte("Hello, World!"), - []byte(""), - []byte("A"), - []byte(string(bytes.Repeat([]byte("Test data for compression round trip. "), 100))), - []byte("Special characters: ร รกรขรฃรครฅรฆรงรจรฉรชรซรฌรญรฎรฏรฐรฑรฒรณรดรตรถรทรธรนรบรปรผรฝรพรฟ"), - bytes.Repeat([]byte{0x00, 0x01, 0x02, 0xFF}, 256), // Binary data - } - - codecs := []CompressionCodec{None, Gzip, Snappy, Lz4, Zstd} - - for _, codec := range codecs { - t.Run(codec.String(), func(t *testing.T) { - for i, data := range testData { - t.Run(fmt.Sprintf("data_%d", i), func(t *testing.T) { - // Compress - compressed, err := Compress(codec, data) - require.NoError(t, err, "Compression should succeed") - - // Decompress - decompressed, err := Decompress(codec, compressed) - require.NoError(t, err, "Decompression should succeed") - - // Verify round trip - assert.Equal(t, data, decompressed, "Round trip should preserve data") - }) - } - }) - } -} - -// TestDecompress_InvalidCodec tests decompression with invalid codec -func TestDecompress_InvalidCodec(t *testing.T) { - data := []byte("Hello, World!") - - _, err := Decompress(CompressionCodec(99), data) - assert.Error(t, err) - assert.Contains(t, err.Error(), "unsupported compression codec") -} - -// TestDecompress_CorruptedData tests decompression with corrupted data -func TestDecompress_CorruptedData(t *testing.T) { - corruptedData := []byte("This is not compressed data") - - codecs := []CompressionCodec{Gzip, Snappy, Lz4, Zstd} - - for _, codec := range codecs { - t.Run(codec.String(), func(t *testing.T) { - _, err := Decompress(codec, corruptedData) - assert.Error(t, err, "Decompression of corrupted data should fail") - }) - } -} - -// TestCompressRecordBatch tests record batch compression -func TestCompressRecordBatch(t *testing.T) { - recordsData := []byte("Record batch data for compression testing") - - t.Run("None codec", func(t *testing.T) { - compressed, attributes, err := CompressRecordBatch(None, recordsData) - require.NoError(t, err) - assert.Equal(t, recordsData, compressed) - assert.Equal(t, int16(0), attributes) - }) - - t.Run("Gzip codec", func(t *testing.T) { - compressed, attributes, err := CompressRecordBatch(Gzip, recordsData) - require.NoError(t, err) - assert.NotEqual(t, recordsData, compressed) - assert.Equal(t, int16(1), attributes) - }) - - t.Run("Snappy codec", func(t *testing.T) { - compressed, attributes, err := CompressRecordBatch(Snappy, recordsData) - require.NoError(t, err) - assert.NotEqual(t, recordsData, compressed) - assert.Equal(t, int16(2), attributes) - }) -} - -// TestDecompressRecordBatch tests record batch decompression -func TestDecompressRecordBatch(t *testing.T) { - recordsData := []byte("Record batch data for decompression testing") - - t.Run("None codec", func(t *testing.T) { - attributes := int16(0) // No compression - decompressed, err := DecompressRecordBatch(attributes, recordsData) - require.NoError(t, err) - assert.Equal(t, recordsData, decompressed) - }) - - t.Run("Round trip with Gzip", func(t *testing.T) { - // Compress - compressed, attributes, err := CompressRecordBatch(Gzip, recordsData) - require.NoError(t, err) - - // Decompress - decompressed, err := DecompressRecordBatch(attributes, compressed) - require.NoError(t, err) - assert.Equal(t, recordsData, decompressed) - }) - - t.Run("Round trip with Snappy", func(t *testing.T) { - // Compress - compressed, attributes, err := CompressRecordBatch(Snappy, recordsData) - require.NoError(t, err) - - // Decompress - decompressed, err := DecompressRecordBatch(attributes, compressed) - require.NoError(t, err) - assert.Equal(t, recordsData, decompressed) - }) -} - -// TestCompressionEfficiency tests compression efficiency for different codecs -func TestCompressionEfficiency(t *testing.T) { - // Create highly compressible data - data := bytes.Repeat([]byte("This is a repeated string for compression testing. "), 100) - - codecs := []CompressionCodec{Gzip, Snappy, Lz4, Zstd} - - for _, codec := range codecs { - t.Run(codec.String(), func(t *testing.T) { - compressed, err := Compress(codec, data) - require.NoError(t, err) - - compressionRatio := float64(len(compressed)) / float64(len(data)) - t.Logf("Codec: %s, Original: %d bytes, Compressed: %d bytes, Ratio: %.2f", - codec.String(), len(data), len(compressed), compressionRatio) - - // All codecs should achieve some compression on this highly repetitive data - assert.Less(t, len(compressed), len(data), "Compression should reduce data size") - }) - } -} - -// BenchmarkCompression benchmarks compression performance for different codecs -func BenchmarkCompression(b *testing.B) { - data := bytes.Repeat([]byte("Benchmark data for compression testing. "), 1000) - codecs := []CompressionCodec{None, Gzip, Snappy, Lz4, Zstd} - - for _, codec := range codecs { - b.Run(fmt.Sprintf("Compress_%s", codec.String()), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := Compress(codec, data) - if err != nil { - b.Fatal(err) - } - } - }) - } -} - -// BenchmarkDecompression benchmarks decompression performance for different codecs -func BenchmarkDecompression(b *testing.B) { - data := bytes.Repeat([]byte("Benchmark data for decompression testing. "), 1000) - codecs := []CompressionCodec{None, Gzip, Snappy, Lz4, Zstd} - - for _, codec := range codecs { - // Pre-compress the data - compressed, err := Compress(codec, data) - if err != nil { - b.Fatal(err) - } - - b.Run(fmt.Sprintf("Decompress_%s", codec.String()), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := Decompress(codec, compressed) - if err != nil { - b.Fatal(err) - } - } - }) - } -} diff --git a/weed/mq/kafka/consumer/assignment.go b/weed/mq/kafka/consumer/assignment.go deleted file mode 100644 index 706efe5c9..000000000 --- a/weed/mq/kafka/consumer/assignment.go +++ /dev/null @@ -1,299 +0,0 @@ -package consumer - -import ( - "sort" -) - -// Assignment strategy protocol names -const ( - ProtocolNameRange = "range" - ProtocolNameRoundRobin = "roundrobin" - ProtocolNameSticky = "sticky" - ProtocolNameCooperativeSticky = "cooperative-sticky" -) - -// AssignmentStrategy defines how partitions are assigned to consumers -type AssignmentStrategy interface { - Name() string - Assign(members []*GroupMember, topicPartitions map[string][]int32) map[string][]PartitionAssignment -} - -// RangeAssignmentStrategy implements the Range assignment strategy -// Assigns partitions in ranges to consumers, similar to Kafka's range assignor -type RangeAssignmentStrategy struct{} - -func (r *RangeAssignmentStrategy) Name() string { - return ProtocolNameRange -} - -func (r *RangeAssignmentStrategy) Assign(members []*GroupMember, topicPartitions map[string][]int32) map[string][]PartitionAssignment { - if len(members) == 0 { - return make(map[string][]PartitionAssignment) - } - - assignments := make(map[string][]PartitionAssignment) - for _, member := range members { - assignments[member.ID] = make([]PartitionAssignment, 0) - } - - // Sort members for consistent assignment - sortedMembers := make([]*GroupMember, len(members)) - copy(sortedMembers, members) - sort.Slice(sortedMembers, func(i, j int) bool { - return sortedMembers[i].ID < sortedMembers[j].ID - }) - - // Get all subscribed topics - subscribedTopics := make(map[string]bool) - for _, member := range members { - for _, topic := range member.Subscription { - subscribedTopics[topic] = true - } - } - - // Assign partitions for each topic - for topic := range subscribedTopics { - partitions, exists := topicPartitions[topic] - if !exists { - continue - } - - // Sort partitions for consistent assignment - sort.Slice(partitions, func(i, j int) bool { - return partitions[i] < partitions[j] - }) - - // Find members subscribed to this topic - topicMembers := make([]*GroupMember, 0) - for _, member := range sortedMembers { - for _, subscribedTopic := range member.Subscription { - if subscribedTopic == topic { - topicMembers = append(topicMembers, member) - break - } - } - } - - if len(topicMembers) == 0 { - continue - } - - // Assign partitions to members using range strategy - numPartitions := len(partitions) - numMembers := len(topicMembers) - partitionsPerMember := numPartitions / numMembers - remainingPartitions := numPartitions % numMembers - - partitionIndex := 0 - for memberIndex, member := range topicMembers { - // Calculate how many partitions this member should get - memberPartitions := partitionsPerMember - if memberIndex < remainingPartitions { - memberPartitions++ - } - - // Assign partitions to this member - for i := 0; i < memberPartitions && partitionIndex < numPartitions; i++ { - assignment := PartitionAssignment{ - Topic: topic, - Partition: partitions[partitionIndex], - } - assignments[member.ID] = append(assignments[member.ID], assignment) - partitionIndex++ - } - } - } - - return assignments -} - -// RoundRobinAssignmentStrategy implements the RoundRobin assignment strategy -// Distributes partitions evenly across all consumers in round-robin fashion -type RoundRobinAssignmentStrategy struct{} - -func (rr *RoundRobinAssignmentStrategy) Name() string { - return ProtocolNameRoundRobin -} - -func (rr *RoundRobinAssignmentStrategy) Assign(members []*GroupMember, topicPartitions map[string][]int32) map[string][]PartitionAssignment { - if len(members) == 0 { - return make(map[string][]PartitionAssignment) - } - - assignments := make(map[string][]PartitionAssignment) - for _, member := range members { - assignments[member.ID] = make([]PartitionAssignment, 0) - } - - // Sort members for consistent assignment - sortedMembers := make([]*GroupMember, len(members)) - copy(sortedMembers, members) - sort.Slice(sortedMembers, func(i, j int) bool { - return sortedMembers[i].ID < sortedMembers[j].ID - }) - - // Collect all partition assignments across all topics - allAssignments := make([]PartitionAssignment, 0) - - // Get all subscribed topics - subscribedTopics := make(map[string]bool) - for _, member := range members { - for _, topic := range member.Subscription { - subscribedTopics[topic] = true - } - } - - // Collect all partitions from all subscribed topics - for topic := range subscribedTopics { - partitions, exists := topicPartitions[topic] - if !exists { - continue - } - - for _, partition := range partitions { - allAssignments = append(allAssignments, PartitionAssignment{ - Topic: topic, - Partition: partition, - }) - } - } - - // Sort assignments for consistent distribution - sort.Slice(allAssignments, func(i, j int) bool { - if allAssignments[i].Topic != allAssignments[j].Topic { - return allAssignments[i].Topic < allAssignments[j].Topic - } - return allAssignments[i].Partition < allAssignments[j].Partition - }) - - // Distribute partitions in round-robin fashion - memberIndex := 0 - for _, assignment := range allAssignments { - // Find a member that is subscribed to this topic - assigned := false - startIndex := memberIndex - - for !assigned { - member := sortedMembers[memberIndex] - - // Check if this member is subscribed to the topic - subscribed := false - for _, topic := range member.Subscription { - if topic == assignment.Topic { - subscribed = true - break - } - } - - if subscribed { - assignments[member.ID] = append(assignments[member.ID], assignment) - assigned = true - } - - memberIndex = (memberIndex + 1) % len(sortedMembers) - - // Prevent infinite loop if no member is subscribed to this topic - if memberIndex == startIndex && !assigned { - break - } - } - } - - return assignments -} - -// GetAssignmentStrategy returns the appropriate assignment strategy -func GetAssignmentStrategy(name string) AssignmentStrategy { - switch name { - case ProtocolNameRange: - return &RangeAssignmentStrategy{} - case ProtocolNameRoundRobin: - return &RoundRobinAssignmentStrategy{} - case ProtocolNameCooperativeSticky: - return NewIncrementalCooperativeAssignmentStrategy() - default: - // Default to range strategy - return &RangeAssignmentStrategy{} - } -} - -// AssignPartitions performs partition assignment for a consumer group -func (group *ConsumerGroup) AssignPartitions(topicPartitions map[string][]int32) { - if len(group.Members) == 0 { - return - } - - // Convert members map to slice - members := make([]*GroupMember, 0, len(group.Members)) - for _, member := range group.Members { - if member.State == MemberStateStable || member.State == MemberStatePending { - members = append(members, member) - } - } - - if len(members) == 0 { - return - } - - // Get assignment strategy - strategy := GetAssignmentStrategy(group.Protocol) - assignments := strategy.Assign(members, topicPartitions) - - // Apply assignments to members - for memberID, assignment := range assignments { - if member, exists := group.Members[memberID]; exists { - member.Assignment = assignment - } - } -} - -// GetMemberAssignments returns the current partition assignments for all members -func (group *ConsumerGroup) GetMemberAssignments() map[string][]PartitionAssignment { - group.Mu.RLock() - defer group.Mu.RUnlock() - - assignments := make(map[string][]PartitionAssignment) - for memberID, member := range group.Members { - assignments[memberID] = make([]PartitionAssignment, len(member.Assignment)) - copy(assignments[memberID], member.Assignment) - } - - return assignments -} - -// UpdateMemberSubscription updates a member's topic subscription -func (group *ConsumerGroup) UpdateMemberSubscription(memberID string, topics []string) { - group.Mu.Lock() - defer group.Mu.Unlock() - - member, exists := group.Members[memberID] - if !exists { - return - } - - // Update member subscription - member.Subscription = make([]string, len(topics)) - copy(member.Subscription, topics) - - // Update group's subscribed topics - group.SubscribedTopics = make(map[string]bool) - for _, m := range group.Members { - for _, topic := range m.Subscription { - group.SubscribedTopics[topic] = true - } - } -} - -// GetSubscribedTopics returns all topics subscribed by the group -func (group *ConsumerGroup) GetSubscribedTopics() []string { - group.Mu.RLock() - defer group.Mu.RUnlock() - - topics := make([]string, 0, len(group.SubscribedTopics)) - for topic := range group.SubscribedTopics { - topics = append(topics, topic) - } - - sort.Strings(topics) - return topics -} diff --git a/weed/mq/kafka/consumer/assignment_test.go b/weed/mq/kafka/consumer/assignment_test.go deleted file mode 100644 index 14200366f..000000000 --- a/weed/mq/kafka/consumer/assignment_test.go +++ /dev/null @@ -1,359 +0,0 @@ -package consumer - -import ( - "reflect" - "sort" - "testing" -) - -func TestRangeAssignmentStrategy(t *testing.T) { - strategy := &RangeAssignmentStrategy{} - - if strategy.Name() != ProtocolNameRange { - t.Errorf("Expected strategy name '%s', got '%s'", ProtocolNameRange, strategy.Name()) - } - - // Test with 2 members, 4 partitions on one topic - members := []*GroupMember{ - { - ID: "member1", - Subscription: []string{"topic1"}, - }, - { - ID: "member2", - Subscription: []string{"topic1"}, - }, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify all members have assignments - if len(assignments) != 2 { - t.Fatalf("Expected assignments for 2 members, got %d", len(assignments)) - } - - // Verify total partitions assigned - totalAssigned := 0 - for _, assignment := range assignments { - totalAssigned += len(assignment) - } - - if totalAssigned != 4 { - t.Errorf("Expected 4 total partitions assigned, got %d", totalAssigned) - } - - // Range assignment should distribute evenly: 2 partitions each - for memberID, assignment := range assignments { - if len(assignment) != 2 { - t.Errorf("Expected 2 partitions for member %s, got %d", memberID, len(assignment)) - } - - // Verify all assignments are for the subscribed topic - for _, pa := range assignment { - if pa.Topic != "topic1" { - t.Errorf("Expected topic 'topic1', got '%s'", pa.Topic) - } - } - } -} - -func TestRangeAssignmentStrategy_UnevenPartitions(t *testing.T) { - strategy := &RangeAssignmentStrategy{} - - // Test with 3 members, 4 partitions - should distribute 2,1,1 - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1"}}, - {ID: "member2", Subscription: []string{"topic1"}}, - {ID: "member3", Subscription: []string{"topic1"}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Get assignment counts - counts := make([]int, 0, 3) - for _, assignment := range assignments { - counts = append(counts, len(assignment)) - } - sort.Ints(counts) - - // Should be distributed as [1, 1, 2] (first member gets extra partition) - expected := []int{1, 1, 2} - if !reflect.DeepEqual(counts, expected) { - t.Errorf("Expected partition distribution %v, got %v", expected, counts) - } -} - -func TestRangeAssignmentStrategy_MultipleTopics(t *testing.T) { - strategy := &RangeAssignmentStrategy{} - - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1", "topic2"}}, - {ID: "member2", Subscription: []string{"topic1"}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1}, - "topic2": {0, 1}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Member1 should get assignments from both topics - member1Assignments := assignments["member1"] - topicsAssigned := make(map[string]int) - for _, pa := range member1Assignments { - topicsAssigned[pa.Topic]++ - } - - if len(topicsAssigned) != 2 { - t.Errorf("Expected member1 to be assigned to 2 topics, got %d", len(topicsAssigned)) - } - - // Member2 should only get topic1 assignments - member2Assignments := assignments["member2"] - for _, pa := range member2Assignments { - if pa.Topic != "topic1" { - t.Errorf("Expected member2 to only get topic1, but got %s", pa.Topic) - } - } -} - -func TestRoundRobinAssignmentStrategy(t *testing.T) { - strategy := &RoundRobinAssignmentStrategy{} - - if strategy.Name() != ProtocolNameRoundRobin { - t.Errorf("Expected strategy name '%s', got '%s'", ProtocolNameRoundRobin, strategy.Name()) - } - - // Test with 2 members, 4 partitions on one topic - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1"}}, - {ID: "member2", Subscription: []string{"topic1"}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify all members have assignments - if len(assignments) != 2 { - t.Fatalf("Expected assignments for 2 members, got %d", len(assignments)) - } - - // Verify total partitions assigned - totalAssigned := 0 - for _, assignment := range assignments { - totalAssigned += len(assignment) - } - - if totalAssigned != 4 { - t.Errorf("Expected 4 total partitions assigned, got %d", totalAssigned) - } - - // Round robin should distribute evenly: 2 partitions each - for memberID, assignment := range assignments { - if len(assignment) != 2 { - t.Errorf("Expected 2 partitions for member %s, got %d", memberID, len(assignment)) - } - } -} - -func TestRoundRobinAssignmentStrategy_MultipleTopics(t *testing.T) { - strategy := &RoundRobinAssignmentStrategy{} - - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1", "topic2"}}, - {ID: "member2", Subscription: []string{"topic1", "topic2"}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1}, - "topic2": {0, 1}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Each member should get 2 partitions (round robin across topics) - for memberID, assignment := range assignments { - if len(assignment) != 2 { - t.Errorf("Expected 2 partitions for member %s, got %d", memberID, len(assignment)) - } - } - - // Verify no partition is assigned twice - assignedPartitions := make(map[string]map[int32]bool) - for _, assignment := range assignments { - for _, pa := range assignment { - if assignedPartitions[pa.Topic] == nil { - assignedPartitions[pa.Topic] = make(map[int32]bool) - } - if assignedPartitions[pa.Topic][pa.Partition] { - t.Errorf("Partition %d of topic %s assigned multiple times", pa.Partition, pa.Topic) - } - assignedPartitions[pa.Topic][pa.Partition] = true - } - } -} - -func TestGetAssignmentStrategy(t *testing.T) { - rangeStrategy := GetAssignmentStrategy(ProtocolNameRange) - if rangeStrategy.Name() != ProtocolNameRange { - t.Errorf("Expected range strategy, got %s", rangeStrategy.Name()) - } - - rrStrategy := GetAssignmentStrategy(ProtocolNameRoundRobin) - if rrStrategy.Name() != ProtocolNameRoundRobin { - t.Errorf("Expected roundrobin strategy, got %s", rrStrategy.Name()) - } - - // Unknown strategy should default to range - defaultStrategy := GetAssignmentStrategy("unknown") - if defaultStrategy.Name() != ProtocolNameRange { - t.Errorf("Expected default strategy to be range, got %s", defaultStrategy.Name()) - } -} - -func TestConsumerGroup_AssignPartitions(t *testing.T) { - group := &ConsumerGroup{ - ID: "test-group", - Protocol: ProtocolNameRange, - Members: map[string]*GroupMember{ - "member1": { - ID: "member1", - Subscription: []string{"topic1"}, - State: MemberStateStable, - }, - "member2": { - ID: "member2", - Subscription: []string{"topic1"}, - State: MemberStateStable, - }, - }, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - group.AssignPartitions(topicPartitions) - - // Verify assignments were created - for memberID, member := range group.Members { - if len(member.Assignment) == 0 { - t.Errorf("Expected member %s to have partition assignments", memberID) - } - - // Verify all assignments are valid - for _, pa := range member.Assignment { - if pa.Topic != "topic1" { - t.Errorf("Unexpected topic assignment: %s", pa.Topic) - } - if pa.Partition < 0 || pa.Partition >= 4 { - t.Errorf("Unexpected partition assignment: %d", pa.Partition) - } - } - } -} - -func TestConsumerGroup_GetMemberAssignments(t *testing.T) { - group := &ConsumerGroup{ - Members: map[string]*GroupMember{ - "member1": { - ID: "member1", - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic1", Partition: 1}, - }, - }, - }, - } - - assignments := group.GetMemberAssignments() - - if len(assignments) != 1 { - t.Fatalf("Expected 1 member assignment, got %d", len(assignments)) - } - - member1Assignments := assignments["member1"] - if len(member1Assignments) != 2 { - t.Errorf("Expected 2 partition assignments for member1, got %d", len(member1Assignments)) - } - - // Verify assignment content - expectedAssignments := []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic1", Partition: 1}, - } - - if !reflect.DeepEqual(member1Assignments, expectedAssignments) { - t.Errorf("Expected assignments %v, got %v", expectedAssignments, member1Assignments) - } -} - -func TestConsumerGroup_UpdateMemberSubscription(t *testing.T) { - group := &ConsumerGroup{ - Members: map[string]*GroupMember{ - "member1": { - ID: "member1", - Subscription: []string{"topic1"}, - }, - "member2": { - ID: "member2", - Subscription: []string{"topic2"}, - }, - }, - SubscribedTopics: map[string]bool{ - "topic1": true, - "topic2": true, - }, - } - - // Update member1's subscription - group.UpdateMemberSubscription("member1", []string{"topic1", "topic3"}) - - // Verify member subscription updated - member1 := group.Members["member1"] - expectedSubscription := []string{"topic1", "topic3"} - if !reflect.DeepEqual(member1.Subscription, expectedSubscription) { - t.Errorf("Expected subscription %v, got %v", expectedSubscription, member1.Subscription) - } - - // Verify group subscribed topics updated - expectedGroupTopics := []string{"topic1", "topic2", "topic3"} - actualGroupTopics := group.GetSubscribedTopics() - - if !reflect.DeepEqual(actualGroupTopics, expectedGroupTopics) { - t.Errorf("Expected group topics %v, got %v", expectedGroupTopics, actualGroupTopics) - } -} - -func TestAssignmentStrategy_EmptyMembers(t *testing.T) { - rangeStrategy := &RangeAssignmentStrategy{} - rrStrategy := &RoundRobinAssignmentStrategy{} - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - // Both strategies should handle empty members gracefully - rangeAssignments := rangeStrategy.Assign([]*GroupMember{}, topicPartitions) - rrAssignments := rrStrategy.Assign([]*GroupMember{}, topicPartitions) - - if len(rangeAssignments) != 0 { - t.Error("Expected empty assignments for empty members list (range)") - } - - if len(rrAssignments) != 0 { - t.Error("Expected empty assignments for empty members list (round robin)") - } -} diff --git a/weed/mq/kafka/consumer/cooperative_sticky_test.go b/weed/mq/kafka/consumer/cooperative_sticky_test.go deleted file mode 100644 index 0c579d3f4..000000000 --- a/weed/mq/kafka/consumer/cooperative_sticky_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package consumer - -import ( - "testing" -) - -func TestCooperativeStickyAssignmentStrategy_Name(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - if strategy.Name() != ProtocolNameCooperativeSticky { - t.Errorf("Expected strategy name '%s', got '%s'", ProtocolNameCooperativeSticky, strategy.Name()) - } -} - -func TestCooperativeStickyAssignmentStrategy_InitialAssignment(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1"}, Assignment: []PartitionAssignment{}}, - {ID: "member2", Subscription: []string{"topic1"}, Assignment: []PartitionAssignment{}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify all partitions are assigned - totalAssigned := 0 - for _, assignment := range assignments { - totalAssigned += len(assignment) - } - - if totalAssigned != 4 { - t.Errorf("Expected 4 total partitions assigned, got %d", totalAssigned) - } - - // Verify fair distribution (2 partitions each) - for memberID, assignment := range assignments { - if len(assignment) != 2 { - t.Errorf("Expected member %s to get 2 partitions, got %d", memberID, len(assignment)) - } - } - - // Verify no partition is assigned twice - assignedPartitions := make(map[PartitionAssignment]bool) - for _, assignment := range assignments { - for _, pa := range assignment { - if assignedPartitions[pa] { - t.Errorf("Partition %v assigned multiple times", pa) - } - assignedPartitions[pa] = true - } - } -} - -func TestCooperativeStickyAssignmentStrategy_StickyBehavior(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Initial state: member1 has partitions 0,1 and member2 has partitions 2,3 - members := []*GroupMember{ - { - ID: "member1", - Subscription: []string{"topic1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic1", Partition: 1}, - }, - }, - { - ID: "member2", - Subscription: []string{"topic1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 2}, - {Topic: "topic1", Partition: 3}, - }, - }, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify sticky behavior - existing assignments should be preserved - member1Assignment := assignments["member1"] - member2Assignment := assignments["member2"] - - // Check that member1 still has partitions 0 and 1 - hasPartition0 := false - hasPartition1 := false - for _, pa := range member1Assignment { - if pa.Topic == "topic1" && pa.Partition == 0 { - hasPartition0 = true - } - if pa.Topic == "topic1" && pa.Partition == 1 { - hasPartition1 = true - } - } - - if !hasPartition0 || !hasPartition1 { - t.Errorf("Member1 should retain partitions 0 and 1, got %v", member1Assignment) - } - - // Check that member2 still has partitions 2 and 3 - hasPartition2 := false - hasPartition3 := false - for _, pa := range member2Assignment { - if pa.Topic == "topic1" && pa.Partition == 2 { - hasPartition2 = true - } - if pa.Topic == "topic1" && pa.Partition == 3 { - hasPartition3 = true - } - } - - if !hasPartition2 || !hasPartition3 { - t.Errorf("Member2 should retain partitions 2 and 3, got %v", member2Assignment) - } -} - -func TestCooperativeStickyAssignmentStrategy_NewMemberJoin(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Scenario: member1 has all partitions, member2 joins - members := []*GroupMember{ - { - ID: "member1", - Subscription: []string{"topic1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic1", Partition: 1}, - {Topic: "topic1", Partition: 2}, - {Topic: "topic1", Partition: 3}, - }, - }, - { - ID: "member2", - Subscription: []string{"topic1"}, - Assignment: []PartitionAssignment{}, // New member, no existing assignment - }, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, - } - - // First call: revocation phase - assignments1 := strategy.Assign(members, topicPartitions) - - // Update members with revocation results - members[0].Assignment = assignments1["member1"] - members[1].Assignment = assignments1["member2"] - - // Force completion of revocation timeout - strategy.GetRebalanceState().RevocationTimeout = 0 - - // Second call: assignment phase - assignments := strategy.Assign(members, topicPartitions) - - // Verify fair redistribution (2 partitions each) - member1Assignment := assignments["member1"] - member2Assignment := assignments["member2"] - - if len(member1Assignment) != 2 { - t.Errorf("Expected member1 to have 2 partitions after rebalance, got %d", len(member1Assignment)) - } - - if len(member2Assignment) != 2 { - t.Errorf("Expected member2 to have 2 partitions after rebalance, got %d", len(member2Assignment)) - } - - // Verify some stickiness - member1 should retain some of its original partitions - originalPartitions := map[int32]bool{0: true, 1: true, 2: true, 3: true} - retainedCount := 0 - for _, pa := range member1Assignment { - if originalPartitions[pa.Partition] { - retainedCount++ - } - } - - if retainedCount == 0 { - t.Error("Member1 should retain at least some of its original partitions (sticky behavior)") - } - - t.Logf("Member1 retained %d out of 4 original partitions", retainedCount) -} - -func TestCooperativeStickyAssignmentStrategy_MemberLeave(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Scenario: member2 leaves, member1 should get its partitions - members := []*GroupMember{ - { - ID: "member1", - Subscription: []string{"topic1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic1", Partition: 1}, - }, - }, - // member2 has left, so it's not in the members list - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3}, // All partitions still need to be assigned - } - - assignments := strategy.Assign(members, topicPartitions) - - // member1 should get all partitions - member1Assignment := assignments["member1"] - - if len(member1Assignment) != 4 { - t.Errorf("Expected member1 to get all 4 partitions after member2 left, got %d", len(member1Assignment)) - } - - // Verify member1 retained its original partitions (sticky behavior) - hasPartition0 := false - hasPartition1 := false - for _, pa := range member1Assignment { - if pa.Partition == 0 { - hasPartition0 = true - } - if pa.Partition == 1 { - hasPartition1 = true - } - } - - if !hasPartition0 || !hasPartition1 { - t.Error("Member1 should retain its original partitions 0 and 1") - } -} - -func TestCooperativeStickyAssignmentStrategy_MultipleTopics(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - members := []*GroupMember{ - { - ID: "member1", - Subscription: []string{"topic1", "topic2"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 0}, - {Topic: "topic2", Partition: 0}, - }, - }, - { - ID: "member2", - Subscription: []string{"topic1", "topic2"}, - Assignment: []PartitionAssignment{ - {Topic: "topic1", Partition: 1}, - {Topic: "topic2", Partition: 1}, - }, - }, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1}, - "topic2": {0, 1}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify all partitions are assigned - totalAssigned := 0 - for _, assignment := range assignments { - totalAssigned += len(assignment) - } - - if totalAssigned != 4 { - t.Errorf("Expected 4 total partitions assigned across both topics, got %d", totalAssigned) - } - - // Verify sticky behavior - each member should retain their original assignments - member1Assignment := assignments["member1"] - member2Assignment := assignments["member2"] - - // Check member1 retains topic1:0 and topic2:0 - hasT1P0 := false - hasT2P0 := false - for _, pa := range member1Assignment { - if pa.Topic == "topic1" && pa.Partition == 0 { - hasT1P0 = true - } - if pa.Topic == "topic2" && pa.Partition == 0 { - hasT2P0 = true - } - } - - if !hasT1P0 || !hasT2P0 { - t.Errorf("Member1 should retain topic1:0 and topic2:0, got %v", member1Assignment) - } - - // Check member2 retains topic1:1 and topic2:1 - hasT1P1 := false - hasT2P1 := false - for _, pa := range member2Assignment { - if pa.Topic == "topic1" && pa.Partition == 1 { - hasT1P1 = true - } - if pa.Topic == "topic2" && pa.Partition == 1 { - hasT2P1 = true - } - } - - if !hasT1P1 || !hasT2P1 { - t.Errorf("Member2 should retain topic1:1 and topic2:1, got %v", member2Assignment) - } -} - -func TestCooperativeStickyAssignmentStrategy_UnevenPartitions(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // 5 partitions, 2 members - should distribute 3:2 or 2:3 - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1"}, Assignment: []PartitionAssignment{}}, - {ID: "member2", Subscription: []string{"topic1"}, Assignment: []PartitionAssignment{}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1, 2, 3, 4}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // Verify all partitions are assigned - totalAssigned := 0 - for _, assignment := range assignments { - totalAssigned += len(assignment) - } - - if totalAssigned != 5 { - t.Errorf("Expected 5 total partitions assigned, got %d", totalAssigned) - } - - // Verify fair distribution - member1Count := len(assignments["member1"]) - member2Count := len(assignments["member2"]) - - // Should be 3:2 or 2:3 distribution - if !((member1Count == 3 && member2Count == 2) || (member1Count == 2 && member2Count == 3)) { - t.Errorf("Expected 3:2 or 2:3 distribution, got %d:%d", member1Count, member2Count) - } -} - -func TestCooperativeStickyAssignmentStrategy_PartialSubscription(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // member1 subscribes to both topics, member2 only to topic1 - members := []*GroupMember{ - {ID: "member1", Subscription: []string{"topic1", "topic2"}, Assignment: []PartitionAssignment{}}, - {ID: "member2", Subscription: []string{"topic1"}, Assignment: []PartitionAssignment{}}, - } - - topicPartitions := map[string][]int32{ - "topic1": {0, 1}, - "topic2": {0, 1}, - } - - assignments := strategy.Assign(members, topicPartitions) - - // member1 should get all topic2 partitions since member2 isn't subscribed - member1Assignment := assignments["member1"] - member2Assignment := assignments["member2"] - - // Count topic2 partitions for each member - member1Topic2Count := 0 - member2Topic2Count := 0 - - for _, pa := range member1Assignment { - if pa.Topic == "topic2" { - member1Topic2Count++ - } - } - - for _, pa := range member2Assignment { - if pa.Topic == "topic2" { - member2Topic2Count++ - } - } - - if member1Topic2Count != 2 { - t.Errorf("Expected member1 to get all 2 topic2 partitions, got %d", member1Topic2Count) - } - - if member2Topic2Count != 0 { - t.Errorf("Expected member2 to get 0 topic2 partitions (not subscribed), got %d", member2Topic2Count) - } - - // Both members should get some topic1 partitions - member1Topic1Count := 0 - member2Topic1Count := 0 - - for _, pa := range member1Assignment { - if pa.Topic == "topic1" { - member1Topic1Count++ - } - } - - for _, pa := range member2Assignment { - if pa.Topic == "topic1" { - member2Topic1Count++ - } - } - - if member1Topic1Count+member2Topic1Count != 2 { - t.Errorf("Expected all topic1 partitions to be assigned, got %d + %d = %d", - member1Topic1Count, member2Topic1Count, member1Topic1Count+member2Topic1Count) - } -} - -func TestGetAssignmentStrategy_CooperativeSticky(t *testing.T) { - strategy := GetAssignmentStrategy(ProtocolNameCooperativeSticky) - if strategy.Name() != ProtocolNameCooperativeSticky { - t.Errorf("Expected cooperative-sticky strategy, got %s", strategy.Name()) - } - - // Verify it's the correct type - if _, ok := strategy.(*IncrementalCooperativeAssignmentStrategy); !ok { - t.Errorf("Expected IncrementalCooperativeAssignmentStrategy, got %T", strategy) - } -} diff --git a/weed/mq/kafka/consumer/group_coordinator.go b/weed/mq/kafka/consumer/group_coordinator.go deleted file mode 100644 index 1158f9431..000000000 --- a/weed/mq/kafka/consumer/group_coordinator.go +++ /dev/null @@ -1,399 +0,0 @@ -package consumer - -import ( - "crypto/sha256" - "fmt" - "sync" - "time" -) - -// GroupState represents the state of a consumer group -type GroupState int - -const ( - GroupStateEmpty GroupState = iota - GroupStatePreparingRebalance - GroupStateCompletingRebalance - GroupStateStable - GroupStateDead -) - -func (gs GroupState) String() string { - switch gs { - case GroupStateEmpty: - return "Empty" - case GroupStatePreparingRebalance: - return "PreparingRebalance" - case GroupStateCompletingRebalance: - return "CompletingRebalance" - case GroupStateStable: - return "Stable" - case GroupStateDead: - return "Dead" - default: - return "Unknown" - } -} - -// MemberState represents the state of a group member -type MemberState int - -const ( - MemberStateUnknown MemberState = iota - MemberStatePending - MemberStateStable - MemberStateLeaving -) - -func (ms MemberState) String() string { - switch ms { - case MemberStateUnknown: - return "Unknown" - case MemberStatePending: - return "Pending" - case MemberStateStable: - return "Stable" - case MemberStateLeaving: - return "Leaving" - default: - return "Unknown" - } -} - -// GroupMember represents a consumer in a consumer group -type GroupMember struct { - ID string // Member ID (generated by gateway) - ClientID string // Client ID from consumer - ClientHost string // Client host/IP - GroupInstanceID *string // Static membership instance ID (optional) - SessionTimeout int32 // Session timeout in milliseconds - RebalanceTimeout int32 // Rebalance timeout in milliseconds - Subscription []string // Subscribed topics - Assignment []PartitionAssignment // Assigned partitions - Metadata []byte // Protocol-specific metadata - State MemberState // Current member state - LastHeartbeat time.Time // Last heartbeat timestamp - JoinedAt time.Time // When member joined group -} - -// PartitionAssignment represents partition assignment for a member -type PartitionAssignment struct { - Topic string - Partition int32 -} - -// ConsumerGroup represents a Kafka consumer group -type ConsumerGroup struct { - ID string // Group ID - State GroupState // Current group state - Generation int32 // Generation ID (incremented on rebalance) - Protocol string // Assignment protocol (e.g., "range", "roundrobin") - Leader string // Leader member ID - Members map[string]*GroupMember // Group members by member ID - StaticMembers map[string]string // Static instance ID -> member ID mapping - SubscribedTopics map[string]bool // Topics subscribed by group - OffsetCommits map[string]map[int32]OffsetCommit // Topic -> Partition -> Offset - CreatedAt time.Time // Group creation time - LastActivity time.Time // Last activity (join, heartbeat, etc.) - - Mu sync.RWMutex // Protects group state -} - -// OffsetCommit represents a committed offset for a topic partition -type OffsetCommit struct { - Offset int64 // Committed offset - Metadata string // Optional metadata - Timestamp time.Time // Commit timestamp -} - -// GroupCoordinator manages consumer groups -type GroupCoordinator struct { - groups map[string]*ConsumerGroup // Group ID -> Group - groupsMu sync.RWMutex // Protects groups map - - // Configuration - sessionTimeoutMin int32 // Minimum session timeout (ms) - sessionTimeoutMax int32 // Maximum session timeout (ms) - rebalanceTimeoutMs int32 // Default rebalance timeout (ms) - - // Timeout management - rebalanceTimeoutManager *RebalanceTimeoutManager - - // Cleanup - cleanupTicker *time.Ticker - stopChan chan struct{} - stopOnce sync.Once -} - -// NewGroupCoordinator creates a new consumer group coordinator -func NewGroupCoordinator() *GroupCoordinator { - gc := &GroupCoordinator{ - groups: make(map[string]*ConsumerGroup), - sessionTimeoutMin: 6000, // 6 seconds - sessionTimeoutMax: 300000, // 5 minutes - rebalanceTimeoutMs: 300000, // 5 minutes - stopChan: make(chan struct{}), - } - - // Initialize rebalance timeout manager - gc.rebalanceTimeoutManager = NewRebalanceTimeoutManager(gc) - - // Start cleanup routine - gc.cleanupTicker = time.NewTicker(30 * time.Second) - go gc.cleanupRoutine() - - return gc -} - -// GetOrCreateGroup returns an existing group or creates a new one -func (gc *GroupCoordinator) GetOrCreateGroup(groupID string) *ConsumerGroup { - gc.groupsMu.Lock() - defer gc.groupsMu.Unlock() - - group, exists := gc.groups[groupID] - if !exists { - group = &ConsumerGroup{ - ID: groupID, - State: GroupStateEmpty, - Generation: 0, - Members: make(map[string]*GroupMember), - StaticMembers: make(map[string]string), - SubscribedTopics: make(map[string]bool), - OffsetCommits: make(map[string]map[int32]OffsetCommit), - CreatedAt: time.Now(), - LastActivity: time.Now(), - } - gc.groups[groupID] = group - } - - return group -} - -// GetGroup returns an existing group or nil if not found -func (gc *GroupCoordinator) GetGroup(groupID string) *ConsumerGroup { - gc.groupsMu.RLock() - defer gc.groupsMu.RUnlock() - - return gc.groups[groupID] -} - -// RemoveGroup removes a group from the coordinator -func (gc *GroupCoordinator) RemoveGroup(groupID string) { - gc.groupsMu.Lock() - defer gc.groupsMu.Unlock() - - delete(gc.groups, groupID) -} - -// ListGroups returns all current group IDs -func (gc *GroupCoordinator) ListGroups() []string { - gc.groupsMu.RLock() - defer gc.groupsMu.RUnlock() - - groups := make([]string, 0, len(gc.groups)) - for groupID := range gc.groups { - groups = append(groups, groupID) - } - return groups -} - -// FindStaticMember finds a member by static instance ID -func (gc *GroupCoordinator) FindStaticMember(group *ConsumerGroup, instanceID string) *GroupMember { - if instanceID == "" { - return nil - } - - group.Mu.RLock() - defer group.Mu.RUnlock() - - if memberID, exists := group.StaticMembers[instanceID]; exists { - return group.Members[memberID] - } - return nil -} - -// FindStaticMemberLocked finds a member by static instance ID (assumes group is already locked) -func (gc *GroupCoordinator) FindStaticMemberLocked(group *ConsumerGroup, instanceID string) *GroupMember { - if instanceID == "" { - return nil - } - - if memberID, exists := group.StaticMembers[instanceID]; exists { - return group.Members[memberID] - } - return nil -} - -// RegisterStaticMember registers a static member in the group -func (gc *GroupCoordinator) RegisterStaticMember(group *ConsumerGroup, member *GroupMember) { - if member.GroupInstanceID == nil || *member.GroupInstanceID == "" { - return - } - - group.Mu.Lock() - defer group.Mu.Unlock() - - group.StaticMembers[*member.GroupInstanceID] = member.ID -} - -// RegisterStaticMemberLocked registers a static member in the group (assumes group is already locked) -func (gc *GroupCoordinator) RegisterStaticMemberLocked(group *ConsumerGroup, member *GroupMember) { - if member.GroupInstanceID == nil || *member.GroupInstanceID == "" { - return - } - - group.StaticMembers[*member.GroupInstanceID] = member.ID -} - -// UnregisterStaticMember removes a static member from the group -func (gc *GroupCoordinator) UnregisterStaticMember(group *ConsumerGroup, instanceID string) { - if instanceID == "" { - return - } - - group.Mu.Lock() - defer group.Mu.Unlock() - - delete(group.StaticMembers, instanceID) -} - -// UnregisterStaticMemberLocked removes a static member from the group (assumes group is already locked) -func (gc *GroupCoordinator) UnregisterStaticMemberLocked(group *ConsumerGroup, instanceID string) { - if instanceID == "" { - return - } - - delete(group.StaticMembers, instanceID) -} - -// IsStaticMember checks if a member is using static membership -func (gc *GroupCoordinator) IsStaticMember(member *GroupMember) bool { - return member.GroupInstanceID != nil && *member.GroupInstanceID != "" -} - -// GenerateMemberID creates a deterministic member ID based on client info -func (gc *GroupCoordinator) GenerateMemberID(clientID, clientHost string) string { - // EXPERIMENT: Use simpler member ID format like real Kafka brokers - // Real Kafka uses format like: "consumer-1-uuid" or "consumer-groupId-uuid" - hash := fmt.Sprintf("%x", sha256.Sum256([]byte(clientID+"-"+clientHost))) - return fmt.Sprintf("consumer-%s", hash[:16]) // Shorter, simpler format -} - -// ValidateSessionTimeout checks if session timeout is within acceptable range -func (gc *GroupCoordinator) ValidateSessionTimeout(timeout int32) bool { - return timeout >= gc.sessionTimeoutMin && timeout <= gc.sessionTimeoutMax -} - -// cleanupRoutine periodically cleans up dead groups and expired members -func (gc *GroupCoordinator) cleanupRoutine() { - for { - select { - case <-gc.cleanupTicker.C: - gc.performCleanup() - case <-gc.stopChan: - return - } - } -} - -// performCleanup removes expired members and empty groups -func (gc *GroupCoordinator) performCleanup() { - now := time.Now() - - // Use rebalance timeout manager for more sophisticated timeout handling - gc.rebalanceTimeoutManager.CheckRebalanceTimeouts() - - gc.groupsMu.Lock() - defer gc.groupsMu.Unlock() - - for groupID, group := range gc.groups { - group.Mu.Lock() - - // Check for expired members (session timeout) - expiredMembers := make([]string, 0) - for memberID, member := range group.Members { - sessionDuration := time.Duration(member.SessionTimeout) * time.Millisecond - timeSinceHeartbeat := now.Sub(member.LastHeartbeat) - if timeSinceHeartbeat > sessionDuration { - expiredMembers = append(expiredMembers, memberID) - } - } - - // Remove expired members - for _, memberID := range expiredMembers { - delete(group.Members, memberID) - if group.Leader == memberID { - group.Leader = "" - } - } - - // Update group state based on member count - if len(group.Members) == 0 { - if group.State != GroupStateEmpty { - group.State = GroupStateEmpty - group.Generation++ - } - - // Mark group for deletion if empty for too long (30 minutes) - if now.Sub(group.LastActivity) > 30*time.Minute { - group.State = GroupStateDead - } - } - - // Check for stuck rebalances and force completion if necessary - maxRebalanceDuration := 10 * time.Minute // Maximum time allowed for rebalancing - if gc.rebalanceTimeoutManager.IsRebalanceStuck(group, maxRebalanceDuration) { - gc.rebalanceTimeoutManager.ForceCompleteRebalance(group) - } - - group.Mu.Unlock() - - // Remove dead groups - if group.State == GroupStateDead { - delete(gc.groups, groupID) - } - } -} - -// Close shuts down the group coordinator -func (gc *GroupCoordinator) Close() { - gc.stopOnce.Do(func() { - close(gc.stopChan) - if gc.cleanupTicker != nil { - gc.cleanupTicker.Stop() - } - }) -} - -// GetGroupStats returns statistics about the group coordinator -func (gc *GroupCoordinator) GetGroupStats() map[string]interface{} { - gc.groupsMu.RLock() - defer gc.groupsMu.RUnlock() - - stats := map[string]interface{}{ - "total_groups": len(gc.groups), - "group_states": make(map[string]int), - } - - stateCount := make(map[GroupState]int) - totalMembers := 0 - - for _, group := range gc.groups { - group.Mu.RLock() - stateCount[group.State]++ - totalMembers += len(group.Members) - group.Mu.RUnlock() - } - - stats["total_members"] = totalMembers - for state, count := range stateCount { - stats["group_states"].(map[string]int)[state.String()] = count - } - - return stats -} - -// GetRebalanceStatus returns the rebalance status for a specific group -func (gc *GroupCoordinator) GetRebalanceStatus(groupID string) *RebalanceStatus { - return gc.rebalanceTimeoutManager.GetRebalanceStatus(groupID) -} diff --git a/weed/mq/kafka/consumer/group_coordinator_test.go b/weed/mq/kafka/consumer/group_coordinator_test.go deleted file mode 100644 index 5be4f7f93..000000000 --- a/weed/mq/kafka/consumer/group_coordinator_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package consumer - -import ( - "strings" - "testing" - "time" -) - -func TestGroupCoordinator_CreateGroup(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - groupID := "test-group" - group := gc.GetOrCreateGroup(groupID) - - if group == nil { - t.Fatal("Expected group to be created") - } - - if group.ID != groupID { - t.Errorf("Expected group ID %s, got %s", groupID, group.ID) - } - - if group.State != GroupStateEmpty { - t.Errorf("Expected initial state to be Empty, got %s", group.State) - } - - if group.Generation != 0 { - t.Errorf("Expected initial generation to be 0, got %d", group.Generation) - } - - // Getting the same group should return the existing one - group2 := gc.GetOrCreateGroup(groupID) - if group2 != group { - t.Error("Expected to get the same group instance") - } -} - -func TestGroupCoordinator_ValidateSessionTimeout(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - // Test valid timeouts - validTimeouts := []int32{6000, 30000, 300000} - for _, timeout := range validTimeouts { - if !gc.ValidateSessionTimeout(timeout) { - t.Errorf("Expected timeout %d to be valid", timeout) - } - } - - // Test invalid timeouts - invalidTimeouts := []int32{1000, 5000, 400000} - for _, timeout := range invalidTimeouts { - if gc.ValidateSessionTimeout(timeout) { - t.Errorf("Expected timeout %d to be invalid", timeout) - } - } -} - -func TestGroupCoordinator_MemberManagement(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - group := gc.GetOrCreateGroup("test-group") - - // Add members - member1 := &GroupMember{ - ID: "member1", - ClientID: "client1", - SessionTimeout: 30000, - Subscription: []string{"topic1", "topic2"}, - State: MemberStateStable, - LastHeartbeat: time.Now(), - } - - member2 := &GroupMember{ - ID: "member2", - ClientID: "client2", - SessionTimeout: 30000, - Subscription: []string{"topic1"}, - State: MemberStateStable, - LastHeartbeat: time.Now(), - } - - group.Mu.Lock() - group.Members[member1.ID] = member1 - group.Members[member2.ID] = member2 - group.Mu.Unlock() - - // Update subscriptions - group.UpdateMemberSubscription("member1", []string{"topic1", "topic3"}) - - group.Mu.RLock() - updatedMember := group.Members["member1"] - expectedTopics := []string{"topic1", "topic3"} - if len(updatedMember.Subscription) != len(expectedTopics) { - t.Errorf("Expected %d subscribed topics, got %d", len(expectedTopics), len(updatedMember.Subscription)) - } - - // Check group subscribed topics - if len(group.SubscribedTopics) != 2 { // topic1, topic3 - t.Errorf("Expected 2 group subscribed topics, got %d", len(group.SubscribedTopics)) - } - group.Mu.RUnlock() -} - -func TestGroupCoordinator_Stats(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - // Create multiple groups in different states - group1 := gc.GetOrCreateGroup("group1") - group1.Mu.Lock() - group1.State = GroupStateStable - group1.Members["member1"] = &GroupMember{ID: "member1"} - group1.Members["member2"] = &GroupMember{ID: "member2"} - group1.Mu.Unlock() - - group2 := gc.GetOrCreateGroup("group2") - group2.Mu.Lock() - group2.State = GroupStatePreparingRebalance - group2.Members["member3"] = &GroupMember{ID: "member3"} - group2.Mu.Unlock() - - stats := gc.GetGroupStats() - - totalGroups := stats["total_groups"].(int) - if totalGroups != 2 { - t.Errorf("Expected 2 total groups, got %d", totalGroups) - } - - totalMembers := stats["total_members"].(int) - if totalMembers != 3 { - t.Errorf("Expected 3 total members, got %d", totalMembers) - } - - stateCount := stats["group_states"].(map[string]int) - if stateCount["Stable"] != 1 { - t.Errorf("Expected 1 stable group, got %d", stateCount["Stable"]) - } - - if stateCount["PreparingRebalance"] != 1 { - t.Errorf("Expected 1 preparing rebalance group, got %d", stateCount["PreparingRebalance"]) - } -} - -func TestGroupCoordinator_Cleanup(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - // Create a group with an expired member - group := gc.GetOrCreateGroup("test-group") - - expiredMember := &GroupMember{ - ID: "expired-member", - SessionTimeout: 1000, // 1 second - LastHeartbeat: time.Now().Add(-2 * time.Second), // 2 seconds ago - State: MemberStateStable, - } - - activeMember := &GroupMember{ - ID: "active-member", - SessionTimeout: 30000, // 30 seconds - LastHeartbeat: time.Now(), // just now - State: MemberStateStable, - } - - group.Mu.Lock() - group.Members[expiredMember.ID] = expiredMember - group.Members[activeMember.ID] = activeMember - group.Leader = expiredMember.ID // Make expired member the leader - group.Mu.Unlock() - - // Perform cleanup - gc.performCleanup() - - group.Mu.RLock() - defer group.Mu.RUnlock() - - // Expired member should be removed - if _, exists := group.Members[expiredMember.ID]; exists { - t.Error("Expected expired member to be removed") - } - - // Active member should remain - if _, exists := group.Members[activeMember.ID]; !exists { - t.Error("Expected active member to remain") - } - - // Leader should be reset since expired member was leader - if group.Leader == expiredMember.ID { - t.Error("Expected leader to be reset after expired member removal") - } -} - -func TestGroupCoordinator_GenerateMemberID(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - // Test that same client/host combination generates consistent member ID - id1 := gc.GenerateMemberID("client1", "host1") - id2 := gc.GenerateMemberID("client1", "host1") - - // Same client/host should generate same ID (deterministic) - if id1 != id2 { - t.Errorf("Expected same member ID for same client/host: %s vs %s", id1, id2) - } - - // Different clients should generate different IDs - id3 := gc.GenerateMemberID("client2", "host1") - id4 := gc.GenerateMemberID("client1", "host2") - - if id1 == id3 { - t.Errorf("Expected different member IDs for different clients: %s vs %s", id1, id3) - } - - if id1 == id4 { - t.Errorf("Expected different member IDs for different hosts: %s vs %s", id1, id4) - } - - // IDs should be properly formatted - if len(id1) < 10 { // Should be longer than just "consumer-" - t.Errorf("Expected member ID to be properly formatted, got: %s", id1) - } - - // Should start with "consumer-" prefix - if !strings.HasPrefix(id1, "consumer-") { - t.Errorf("Expected member ID to start with 'consumer-', got: %s", id1) - } -} diff --git a/weed/mq/kafka/consumer/incremental_rebalancing.go b/weed/mq/kafka/consumer/incremental_rebalancing.go deleted file mode 100644 index 49509bc76..000000000 --- a/weed/mq/kafka/consumer/incremental_rebalancing.go +++ /dev/null @@ -1,356 +0,0 @@ -package consumer - -import ( - "fmt" - "sort" - "time" -) - -// RebalancePhase represents the phase of incremental cooperative rebalancing -type RebalancePhase int - -const ( - RebalancePhaseNone RebalancePhase = iota - RebalancePhaseRevocation - RebalancePhaseAssignment -) - -func (rp RebalancePhase) String() string { - switch rp { - case RebalancePhaseNone: - return "None" - case RebalancePhaseRevocation: - return "Revocation" - case RebalancePhaseAssignment: - return "Assignment" - default: - return "Unknown" - } -} - -// IncrementalRebalanceState tracks the state of incremental cooperative rebalancing -type IncrementalRebalanceState struct { - Phase RebalancePhase - RevocationGeneration int32 // Generation when revocation started - AssignmentGeneration int32 // Generation when assignment started - RevokedPartitions map[string][]PartitionAssignment // Member ID -> revoked partitions - PendingAssignments map[string][]PartitionAssignment // Member ID -> pending assignments - StartTime time.Time - RevocationTimeout time.Duration -} - -// NewIncrementalRebalanceState creates a new incremental rebalance state -func NewIncrementalRebalanceState() *IncrementalRebalanceState { - return &IncrementalRebalanceState{ - Phase: RebalancePhaseNone, - RevokedPartitions: make(map[string][]PartitionAssignment), - PendingAssignments: make(map[string][]PartitionAssignment), - RevocationTimeout: 30 * time.Second, // Default revocation timeout - } -} - -// IncrementalCooperativeAssignmentStrategy implements incremental cooperative rebalancing -// This strategy performs rebalancing in two phases: -// 1. Revocation phase: Members give up partitions that need to be reassigned -// 2. Assignment phase: Members receive new partitions -type IncrementalCooperativeAssignmentStrategy struct { - rebalanceState *IncrementalRebalanceState -} - -func NewIncrementalCooperativeAssignmentStrategy() *IncrementalCooperativeAssignmentStrategy { - return &IncrementalCooperativeAssignmentStrategy{ - rebalanceState: NewIncrementalRebalanceState(), - } -} - -func (ics *IncrementalCooperativeAssignmentStrategy) Name() string { - return ProtocolNameCooperativeSticky -} - -func (ics *IncrementalCooperativeAssignmentStrategy) Assign( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - if len(members) == 0 { - return make(map[string][]PartitionAssignment) - } - - // Check if we need to start a new rebalance - if ics.rebalanceState.Phase == RebalancePhaseNone { - return ics.startIncrementalRebalance(members, topicPartitions) - } - - // Continue existing rebalance based on current phase - switch ics.rebalanceState.Phase { - case RebalancePhaseRevocation: - return ics.handleRevocationPhase(members, topicPartitions) - case RebalancePhaseAssignment: - return ics.handleAssignmentPhase(members, topicPartitions) - default: - // Fallback to regular assignment - return ics.performRegularAssignment(members, topicPartitions) - } -} - -// startIncrementalRebalance initiates a new incremental rebalance -func (ics *IncrementalCooperativeAssignmentStrategy) startIncrementalRebalance( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - // Calculate ideal assignment - idealAssignment := ics.calculateIdealAssignment(members, topicPartitions) - - // Determine which partitions need to be revoked - partitionsToRevoke := ics.calculateRevocations(members, idealAssignment) - - if len(partitionsToRevoke) == 0 { - // No revocations needed, proceed with regular assignment - return idealAssignment - } - - // Start revocation phase - ics.rebalanceState.Phase = RebalancePhaseRevocation - ics.rebalanceState.StartTime = time.Now() - ics.rebalanceState.RevokedPartitions = partitionsToRevoke - - // Return current assignments minus revoked partitions - return ics.applyRevocations(members, partitionsToRevoke) -} - -// handleRevocationPhase manages the revocation phase of incremental rebalancing -func (ics *IncrementalCooperativeAssignmentStrategy) handleRevocationPhase( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - // Check if revocation timeout has passed - if time.Since(ics.rebalanceState.StartTime) > ics.rebalanceState.RevocationTimeout { - // Force move to assignment phase - ics.rebalanceState.Phase = RebalancePhaseAssignment - return ics.handleAssignmentPhase(members, topicPartitions) - } - - // Continue with revoked assignments (members should stop consuming revoked partitions) - return ics.getCurrentAssignmentsWithRevocations(members) -} - -// handleAssignmentPhase manages the assignment phase of incremental rebalancing -func (ics *IncrementalCooperativeAssignmentStrategy) handleAssignmentPhase( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - // Calculate final assignment including previously revoked partitions - finalAssignment := ics.calculateIdealAssignment(members, topicPartitions) - - // Complete the rebalance - ics.rebalanceState.Phase = RebalancePhaseNone - ics.rebalanceState.RevokedPartitions = make(map[string][]PartitionAssignment) - ics.rebalanceState.PendingAssignments = make(map[string][]PartitionAssignment) - - return finalAssignment -} - -// calculateIdealAssignment computes the ideal partition assignment -func (ics *IncrementalCooperativeAssignmentStrategy) calculateIdealAssignment( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - assignments := make(map[string][]PartitionAssignment) - for _, member := range members { - assignments[member.ID] = make([]PartitionAssignment, 0) - } - - // Sort members for consistent assignment - sortedMembers := make([]*GroupMember, len(members)) - copy(sortedMembers, members) - sort.Slice(sortedMembers, func(i, j int) bool { - return sortedMembers[i].ID < sortedMembers[j].ID - }) - - // Get all subscribed topics - subscribedTopics := make(map[string]bool) - for _, member := range members { - for _, topic := range member.Subscription { - subscribedTopics[topic] = true - } - } - - // Collect all partitions that need assignment - allPartitions := make([]PartitionAssignment, 0) - for topic := range subscribedTopics { - partitions, exists := topicPartitions[topic] - if !exists { - continue - } - - for _, partition := range partitions { - allPartitions = append(allPartitions, PartitionAssignment{ - Topic: topic, - Partition: partition, - }) - } - } - - // Sort partitions for consistent assignment - sort.Slice(allPartitions, func(i, j int) bool { - if allPartitions[i].Topic != allPartitions[j].Topic { - return allPartitions[i].Topic < allPartitions[j].Topic - } - return allPartitions[i].Partition < allPartitions[j].Partition - }) - - // Distribute partitions based on subscriptions - if len(allPartitions) > 0 && len(sortedMembers) > 0 { - // Group partitions by topic - partitionsByTopic := make(map[string][]PartitionAssignment) - for _, partition := range allPartitions { - partitionsByTopic[partition.Topic] = append(partitionsByTopic[partition.Topic], partition) - } - - // Assign partitions topic by topic - for topic, topicPartitions := range partitionsByTopic { - // Find members subscribed to this topic - subscribedMembers := make([]*GroupMember, 0) - for _, member := range sortedMembers { - for _, subscribedTopic := range member.Subscription { - if subscribedTopic == topic { - subscribedMembers = append(subscribedMembers, member) - break - } - } - } - - if len(subscribedMembers) == 0 { - continue // No members subscribed to this topic - } - - // Distribute topic partitions among subscribed members - partitionsPerMember := len(topicPartitions) / len(subscribedMembers) - extraPartitions := len(topicPartitions) % len(subscribedMembers) - - partitionIndex := 0 - for i, member := range subscribedMembers { - // Calculate how many partitions this member should get for this topic - numPartitions := partitionsPerMember - if i < extraPartitions { - numPartitions++ - } - - // Assign partitions to this member - for j := 0; j < numPartitions && partitionIndex < len(topicPartitions); j++ { - assignments[member.ID] = append(assignments[member.ID], topicPartitions[partitionIndex]) - partitionIndex++ - } - } - } - } - - return assignments -} - -// calculateRevocations determines which partitions need to be revoked for rebalancing -func (ics *IncrementalCooperativeAssignmentStrategy) calculateRevocations( - members []*GroupMember, - idealAssignment map[string][]PartitionAssignment, -) map[string][]PartitionAssignment { - revocations := make(map[string][]PartitionAssignment) - - for _, member := range members { - currentAssignment := member.Assignment - memberIdealAssignment := idealAssignment[member.ID] - - // Find partitions that are currently assigned but not in ideal assignment - currentMap := make(map[string]bool) - for _, assignment := range currentAssignment { - key := fmt.Sprintf("%s:%d", assignment.Topic, assignment.Partition) - currentMap[key] = true - } - - idealMap := make(map[string]bool) - for _, assignment := range memberIdealAssignment { - key := fmt.Sprintf("%s:%d", assignment.Topic, assignment.Partition) - idealMap[key] = true - } - - // Identify partitions to revoke - var toRevoke []PartitionAssignment - for _, assignment := range currentAssignment { - key := fmt.Sprintf("%s:%d", assignment.Topic, assignment.Partition) - if !idealMap[key] { - toRevoke = append(toRevoke, assignment) - } - } - - if len(toRevoke) > 0 { - revocations[member.ID] = toRevoke - } - } - - return revocations -} - -// applyRevocations returns current assignments with specified partitions revoked -func (ics *IncrementalCooperativeAssignmentStrategy) applyRevocations( - members []*GroupMember, - revocations map[string][]PartitionAssignment, -) map[string][]PartitionAssignment { - assignments := make(map[string][]PartitionAssignment) - - for _, member := range members { - assignments[member.ID] = make([]PartitionAssignment, 0) - - // Get revoked partitions for this member - revokedPartitions := make(map[string]bool) - if revoked, exists := revocations[member.ID]; exists { - for _, partition := range revoked { - key := fmt.Sprintf("%s:%d", partition.Topic, partition.Partition) - revokedPartitions[key] = true - } - } - - // Add current assignments except revoked ones - for _, assignment := range member.Assignment { - key := fmt.Sprintf("%s:%d", assignment.Topic, assignment.Partition) - if !revokedPartitions[key] { - assignments[member.ID] = append(assignments[member.ID], assignment) - } - } - } - - return assignments -} - -// getCurrentAssignmentsWithRevocations returns current assignments with revocations applied -func (ics *IncrementalCooperativeAssignmentStrategy) getCurrentAssignmentsWithRevocations( - members []*GroupMember, -) map[string][]PartitionAssignment { - return ics.applyRevocations(members, ics.rebalanceState.RevokedPartitions) -} - -// performRegularAssignment performs a regular (non-incremental) assignment as fallback -func (ics *IncrementalCooperativeAssignmentStrategy) performRegularAssignment( - members []*GroupMember, - topicPartitions map[string][]int32, -) map[string][]PartitionAssignment { - // Reset rebalance state - ics.rebalanceState = NewIncrementalRebalanceState() - - // Use ideal assignment calculation (non-incremental cooperative assignment) - return ics.calculateIdealAssignment(members, topicPartitions) -} - -// GetRebalanceState returns the current rebalance state (for monitoring/debugging) -func (ics *IncrementalCooperativeAssignmentStrategy) GetRebalanceState() *IncrementalRebalanceState { - return ics.rebalanceState -} - -// IsRebalanceInProgress returns true if an incremental rebalance is currently in progress -func (ics *IncrementalCooperativeAssignmentStrategy) IsRebalanceInProgress() bool { - return ics.rebalanceState.Phase != RebalancePhaseNone -} - -// ForceCompleteRebalance forces completion of the current rebalance (for timeout scenarios) -func (ics *IncrementalCooperativeAssignmentStrategy) ForceCompleteRebalance() { - ics.rebalanceState.Phase = RebalancePhaseNone - ics.rebalanceState.RevokedPartitions = make(map[string][]PartitionAssignment) - ics.rebalanceState.PendingAssignments = make(map[string][]PartitionAssignment) -} diff --git a/weed/mq/kafka/consumer/incremental_rebalancing_test.go b/weed/mq/kafka/consumer/incremental_rebalancing_test.go deleted file mode 100644 index 1352b2da0..000000000 --- a/weed/mq/kafka/consumer/incremental_rebalancing_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package consumer - -import ( - "fmt" - "testing" - "time" -) - -func TestIncrementalCooperativeAssignmentStrategy_BasicAssignment(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Create members - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, // No existing assignment - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, // No existing assignment - }, - } - - // Topic partitions - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, - } - - // First assignment (no existing assignments, should be direct) - assignments := strategy.Assign(members, topicPartitions) - - // Verify assignments - if len(assignments) != 2 { - t.Errorf("Expected 2 member assignments, got %d", len(assignments)) - } - - totalPartitions := 0 - for memberID, partitions := range assignments { - t.Logf("Member %s assigned %d partitions: %v", memberID, len(partitions), partitions) - totalPartitions += len(partitions) - } - - if totalPartitions != 4 { - t.Errorf("Expected 4 total partitions assigned, got %d", totalPartitions) - } - - // Should not be in rebalance state for initial assignment - if strategy.IsRebalanceInProgress() { - t.Error("Expected no rebalance in progress for initial assignment") - } -} - -func TestIncrementalCooperativeAssignmentStrategy_RebalanceWithRevocation(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Create members with existing assignments - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - {Topic: "topic-1", Partition: 2}, - {Topic: "topic-1", Partition: 3}, // This member has all partitions - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, // New member with no assignments - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, - } - - // First call should start revocation phase - assignments1 := strategy.Assign(members, topicPartitions) - - // Should be in revocation phase - if !strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be in progress") - } - - state := strategy.GetRebalanceState() - if state.Phase != RebalancePhaseRevocation { - t.Errorf("Expected revocation phase, got %s", state.Phase) - } - - // Member-1 should have some partitions revoked - member1Assignments := assignments1["member-1"] - if len(member1Assignments) >= 4 { - t.Errorf("Expected member-1 to have fewer than 4 partitions after revocation, got %d", len(member1Assignments)) - } - - // Member-2 should still have no assignments during revocation - member2Assignments := assignments1["member-2"] - if len(member2Assignments) != 0 { - t.Errorf("Expected member-2 to have 0 partitions during revocation, got %d", len(member2Assignments)) - } - - t.Logf("Revocation phase - Member-1: %d partitions, Member-2: %d partitions", - len(member1Assignments), len(member2Assignments)) - - // Simulate time passing and second call (should move to assignment phase) - time.Sleep(10 * time.Millisecond) - - // Force move to assignment phase by setting timeout to 0 - state.RevocationTimeout = 0 - - assignments2 := strategy.Assign(members, topicPartitions) - - // Should complete rebalance - if strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be completed") - } - - // Both members should have partitions now - member1FinalAssignments := assignments2["member-1"] - member2FinalAssignments := assignments2["member-2"] - - if len(member1FinalAssignments) == 0 { - t.Error("Expected member-1 to have some partitions after rebalance") - } - - if len(member2FinalAssignments) == 0 { - t.Error("Expected member-2 to have some partitions after rebalance") - } - - totalFinalPartitions := len(member1FinalAssignments) + len(member2FinalAssignments) - if totalFinalPartitions != 4 { - t.Errorf("Expected 4 total partitions after rebalance, got %d", totalFinalPartitions) - } - - t.Logf("Final assignment - Member-1: %d partitions, Member-2: %d partitions", - len(member1FinalAssignments), len(member2FinalAssignments)) -} - -func TestIncrementalCooperativeAssignmentStrategy_NoRevocationNeeded(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Create members with already balanced assignments - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 2}, - {Topic: "topic-1", Partition: 3}, - }, - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, - } - - // Assignment should not trigger rebalance - assignments := strategy.Assign(members, topicPartitions) - - // Should not be in rebalance state - if strategy.IsRebalanceInProgress() { - t.Error("Expected no rebalance in progress when assignments are already balanced") - } - - // Assignments should remain the same - member1Assignments := assignments["member-1"] - member2Assignments := assignments["member-2"] - - if len(member1Assignments) != 2 { - t.Errorf("Expected member-1 to keep 2 partitions, got %d", len(member1Assignments)) - } - - if len(member2Assignments) != 2 { - t.Errorf("Expected member-2 to keep 2 partitions, got %d", len(member2Assignments)) - } -} - -func TestIncrementalCooperativeAssignmentStrategy_MultipleTopics(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Create members with mixed topic subscriptions - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1", "topic-2"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - {Topic: "topic-2", Partition: 0}, - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 2}, - }, - }, - { - ID: "member-3", - Subscription: []string{"topic-2"}, - Assignment: []PartitionAssignment{}, // New member - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2}, - "topic-2": {0, 1}, - } - - // Should trigger rebalance to distribute topic-2 partitions - assignments := strategy.Assign(members, topicPartitions) - - // Verify all partitions are assigned - allAssignedPartitions := make(map[string]bool) - for _, memberAssignments := range assignments { - for _, assignment := range memberAssignments { - key := fmt.Sprintf("%s:%d", assignment.Topic, assignment.Partition) - allAssignedPartitions[key] = true - } - } - - expectedPartitions := []string{"topic-1:0", "topic-1:1", "topic-1:2", "topic-2:0", "topic-2:1"} - for _, expected := range expectedPartitions { - if !allAssignedPartitions[expected] { - t.Errorf("Expected partition %s to be assigned", expected) - } - } - - // Debug: Print all assigned partitions - t.Logf("All assigned partitions: %v", allAssignedPartitions) -} - -func TestIncrementalCooperativeAssignmentStrategy_ForceComplete(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Start a rebalance - create scenario where member-1 has all partitions but member-2 joins - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - {Topic: "topic-1", Partition: 2}, - {Topic: "topic-1", Partition: 3}, - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, // New member - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, - } - - // This should start a rebalance (member-2 needs partitions) - strategy.Assign(members, topicPartitions) - - if !strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be in progress") - } - - // Force complete the rebalance - strategy.ForceCompleteRebalance() - - if strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be completed after force complete") - } - - state := strategy.GetRebalanceState() - if state.Phase != RebalancePhaseNone { - t.Errorf("Expected phase to be None after force complete, got %s", state.Phase) - } -} - -func TestIncrementalCooperativeAssignmentStrategy_RevocationTimeout(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Set a very short revocation timeout for testing - strategy.rebalanceState.RevocationTimeout = 1 * time.Millisecond - - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - {Topic: "topic-1", Partition: 2}, - {Topic: "topic-1", Partition: 3}, - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, - } - - // First call starts revocation - strategy.Assign(members, topicPartitions) - - if !strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be in progress") - } - - // Wait for timeout - time.Sleep(5 * time.Millisecond) - - // Second call should complete due to timeout - assignments := strategy.Assign(members, topicPartitions) - - if strategy.IsRebalanceInProgress() { - t.Error("Expected rebalance to be completed after timeout") - } - - // Both members should have partitions - member1Assignments := assignments["member-1"] - member2Assignments := assignments["member-2"] - - if len(member1Assignments) == 0 { - t.Error("Expected member-1 to have partitions after timeout") - } - - if len(member2Assignments) == 0 { - t.Error("Expected member-2 to have partitions after timeout") - } -} - -func TestIncrementalCooperativeAssignmentStrategy_StateTransitions(t *testing.T) { - strategy := NewIncrementalCooperativeAssignmentStrategy() - - // Initial state should be None - state := strategy.GetRebalanceState() - if state.Phase != RebalancePhaseNone { - t.Errorf("Expected initial phase to be None, got %s", state.Phase) - } - - // Create scenario that requires rebalancing - members := []*GroupMember{ - { - ID: "member-1", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{ - {Topic: "topic-1", Partition: 0}, - {Topic: "topic-1", Partition: 1}, - {Topic: "topic-1", Partition: 2}, - {Topic: "topic-1", Partition: 3}, - }, - }, - { - ID: "member-2", - Subscription: []string{"topic-1"}, - Assignment: []PartitionAssignment{}, // New member - }, - } - - topicPartitions := map[string][]int32{ - "topic-1": {0, 1, 2, 3}, // Same partitions, but need rebalancing due to new member - } - - // First call should move to revocation phase - strategy.Assign(members, topicPartitions) - state = strategy.GetRebalanceState() - if state.Phase != RebalancePhaseRevocation { - t.Errorf("Expected phase to be Revocation, got %s", state.Phase) - } - - // Force timeout to move to assignment phase - state.RevocationTimeout = 0 - strategy.Assign(members, topicPartitions) - - // Should complete and return to None - state = strategy.GetRebalanceState() - if state.Phase != RebalancePhaseNone { - t.Errorf("Expected phase to be None after completion, got %s", state.Phase) - } -} diff --git a/weed/mq/kafka/consumer/rebalance_timeout.go b/weed/mq/kafka/consumer/rebalance_timeout.go deleted file mode 100644 index 9844723c0..000000000 --- a/weed/mq/kafka/consumer/rebalance_timeout.go +++ /dev/null @@ -1,218 +0,0 @@ -package consumer - -import ( - "time" -) - -// RebalanceTimeoutManager handles rebalance timeout logic and member eviction -type RebalanceTimeoutManager struct { - coordinator *GroupCoordinator -} - -// NewRebalanceTimeoutManager creates a new rebalance timeout manager -func NewRebalanceTimeoutManager(coordinator *GroupCoordinator) *RebalanceTimeoutManager { - return &RebalanceTimeoutManager{ - coordinator: coordinator, - } -} - -// CheckRebalanceTimeouts checks for members that have exceeded rebalance timeouts -func (rtm *RebalanceTimeoutManager) CheckRebalanceTimeouts() { - now := time.Now() - rtm.coordinator.groupsMu.RLock() - defer rtm.coordinator.groupsMu.RUnlock() - - for _, group := range rtm.coordinator.groups { - group.Mu.Lock() - - // Only check timeouts for groups in rebalancing states - if group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance { - rtm.checkGroupRebalanceTimeout(group, now) - } - - group.Mu.Unlock() - } -} - -// checkGroupRebalanceTimeout checks and handles rebalance timeout for a specific group -func (rtm *RebalanceTimeoutManager) checkGroupRebalanceTimeout(group *ConsumerGroup, now time.Time) { - expiredMembers := make([]string, 0) - - for memberID, member := range group.Members { - // Check if member has exceeded its rebalance timeout - rebalanceTimeout := time.Duration(member.RebalanceTimeout) * time.Millisecond - if rebalanceTimeout == 0 { - // Use default rebalance timeout if not specified - rebalanceTimeout = time.Duration(rtm.coordinator.rebalanceTimeoutMs) * time.Millisecond - } - - // For members in pending state during rebalance, check against join time - if member.State == MemberStatePending { - if now.Sub(member.JoinedAt) > rebalanceTimeout { - expiredMembers = append(expiredMembers, memberID) - } - } - - // Also check session timeout as a fallback - sessionTimeout := time.Duration(member.SessionTimeout) * time.Millisecond - if now.Sub(member.LastHeartbeat) > sessionTimeout { - expiredMembers = append(expiredMembers, memberID) - } - } - - // Remove expired members and trigger rebalance if necessary - if len(expiredMembers) > 0 { - rtm.evictExpiredMembers(group, expiredMembers) - } -} - -// evictExpiredMembers removes expired members and updates group state -func (rtm *RebalanceTimeoutManager) evictExpiredMembers(group *ConsumerGroup, expiredMembers []string) { - for _, memberID := range expiredMembers { - delete(group.Members, memberID) - - // If the leader was evicted, clear leader - if group.Leader == memberID { - group.Leader = "" - } - } - - // Update group state based on remaining members - if len(group.Members) == 0 { - group.State = GroupStateEmpty - group.Generation++ - group.Leader = "" - } else { - // If we were in the middle of rebalancing, restart the process - if group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance { - // Select new leader if needed - if group.Leader == "" { - for memberID := range group.Members { - group.Leader = memberID - break - } - } - - // Reset to preparing rebalance to restart the process - group.State = GroupStatePreparingRebalance - group.Generation++ - - // Mark remaining members as pending - for _, member := range group.Members { - member.State = MemberStatePending - } - } - } - - group.LastActivity = time.Now() -} - -// IsRebalanceStuck checks if a group has been stuck in rebalancing for too long -func (rtm *RebalanceTimeoutManager) IsRebalanceStuck(group *ConsumerGroup, maxRebalanceDuration time.Duration) bool { - if group.State != GroupStatePreparingRebalance && group.State != GroupStateCompletingRebalance { - return false - } - - return time.Since(group.LastActivity) > maxRebalanceDuration -} - -// ForceCompleteRebalance forces completion of a stuck rebalance -func (rtm *RebalanceTimeoutManager) ForceCompleteRebalance(group *ConsumerGroup) { - group.Mu.Lock() - defer group.Mu.Unlock() - - // If stuck in preparing rebalance, move to completing - if group.State == GroupStatePreparingRebalance { - group.State = GroupStateCompletingRebalance - group.LastActivity = time.Now() - return - } - - // If stuck in completing rebalance, force to stable - if group.State == GroupStateCompletingRebalance { - group.State = GroupStateStable - for _, member := range group.Members { - member.State = MemberStateStable - } - group.LastActivity = time.Now() - return - } -} - -// GetRebalanceStatus returns the current rebalance status for a group -func (rtm *RebalanceTimeoutManager) GetRebalanceStatus(groupID string) *RebalanceStatus { - group := rtm.coordinator.GetGroup(groupID) - if group == nil { - return nil - } - - group.Mu.RLock() - defer group.Mu.RUnlock() - - status := &RebalanceStatus{ - GroupID: groupID, - State: group.State, - Generation: group.Generation, - MemberCount: len(group.Members), - Leader: group.Leader, - LastActivity: group.LastActivity, - IsRebalancing: group.State == GroupStatePreparingRebalance || group.State == GroupStateCompletingRebalance, - RebalanceDuration: time.Since(group.LastActivity), - } - - // Calculate member timeout status - now := time.Now() - for memberID, member := range group.Members { - memberStatus := MemberTimeoutStatus{ - MemberID: memberID, - State: member.State, - LastHeartbeat: member.LastHeartbeat, - JoinedAt: member.JoinedAt, - SessionTimeout: time.Duration(member.SessionTimeout) * time.Millisecond, - RebalanceTimeout: time.Duration(member.RebalanceTimeout) * time.Millisecond, - } - - // Calculate time until session timeout - sessionTimeRemaining := memberStatus.SessionTimeout - now.Sub(member.LastHeartbeat) - if sessionTimeRemaining < 0 { - sessionTimeRemaining = 0 - } - memberStatus.SessionTimeRemaining = sessionTimeRemaining - - // Calculate time until rebalance timeout - rebalanceTimeRemaining := memberStatus.RebalanceTimeout - now.Sub(member.JoinedAt) - if rebalanceTimeRemaining < 0 { - rebalanceTimeRemaining = 0 - } - memberStatus.RebalanceTimeRemaining = rebalanceTimeRemaining - - status.Members = append(status.Members, memberStatus) - } - - return status -} - -// RebalanceStatus represents the current status of a group's rebalance -type RebalanceStatus struct { - GroupID string `json:"group_id"` - State GroupState `json:"state"` - Generation int32 `json:"generation"` - MemberCount int `json:"member_count"` - Leader string `json:"leader"` - LastActivity time.Time `json:"last_activity"` - IsRebalancing bool `json:"is_rebalancing"` - RebalanceDuration time.Duration `json:"rebalance_duration"` - Members []MemberTimeoutStatus `json:"members"` -} - -// MemberTimeoutStatus represents timeout status for a group member -type MemberTimeoutStatus struct { - MemberID string `json:"member_id"` - State MemberState `json:"state"` - LastHeartbeat time.Time `json:"last_heartbeat"` - JoinedAt time.Time `json:"joined_at"` - SessionTimeout time.Duration `json:"session_timeout"` - RebalanceTimeout time.Duration `json:"rebalance_timeout"` - SessionTimeRemaining time.Duration `json:"session_time_remaining"` - RebalanceTimeRemaining time.Duration `json:"rebalance_time_remaining"` -} diff --git a/weed/mq/kafka/consumer/rebalance_timeout_test.go b/weed/mq/kafka/consumer/rebalance_timeout_test.go deleted file mode 100644 index ac5f90aee..000000000 --- a/weed/mq/kafka/consumer/rebalance_timeout_test.go +++ /dev/null @@ -1,331 +0,0 @@ -package consumer - -import ( - "testing" - "time" -) - -func TestRebalanceTimeoutManager_CheckRebalanceTimeouts(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Create a group with a member that has a short rebalance timeout - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - - member := &GroupMember{ - ID: "member1", - ClientID: "client1", - SessionTimeout: 30000, // 30 seconds - RebalanceTimeout: 1000, // 1 second (very short for testing) - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now().Add(-2 * time.Second), // Joined 2 seconds ago - } - group.Members["member1"] = member - group.Mu.Unlock() - - // Check timeouts - member should be evicted - rtm.CheckRebalanceTimeouts() - - group.Mu.RLock() - if len(group.Members) != 0 { - t.Errorf("Expected member to be evicted due to rebalance timeout, but %d members remain", len(group.Members)) - } - - if group.State != GroupStateEmpty { - t.Errorf("Expected group state to be Empty after member eviction, got %s", group.State.String()) - } - group.Mu.RUnlock() -} - -func TestRebalanceTimeoutManager_SessionTimeoutFallback(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Create a group with a member that has exceeded session timeout - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - - member := &GroupMember{ - ID: "member1", - ClientID: "client1", - SessionTimeout: 1000, // 1 second - RebalanceTimeout: 30000, // 30 seconds - State: MemberStatePending, - LastHeartbeat: time.Now().Add(-2 * time.Second), // Last heartbeat 2 seconds ago - JoinedAt: time.Now(), - } - group.Members["member1"] = member - group.Mu.Unlock() - - // Check timeouts - member should be evicted due to session timeout - rtm.CheckRebalanceTimeouts() - - group.Mu.RLock() - if len(group.Members) != 0 { - t.Errorf("Expected member to be evicted due to session timeout, but %d members remain", len(group.Members)) - } - group.Mu.RUnlock() -} - -func TestRebalanceTimeoutManager_LeaderEviction(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Create a group with leader and another member - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - group.Leader = "member1" - - // Leader with expired rebalance timeout - leader := &GroupMember{ - ID: "member1", - ClientID: "client1", - SessionTimeout: 30000, - RebalanceTimeout: 1000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now().Add(-2 * time.Second), - } - group.Members["member1"] = leader - - // Another member that's still valid - member2 := &GroupMember{ - ID: "member2", - ClientID: "client2", - SessionTimeout: 30000, - RebalanceTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - group.Members["member2"] = member2 - group.Mu.Unlock() - - // Check timeouts - leader should be evicted, new leader selected - rtm.CheckRebalanceTimeouts() - - group.Mu.RLock() - if len(group.Members) != 1 { - t.Errorf("Expected 1 member to remain after leader eviction, got %d", len(group.Members)) - } - - if group.Leader != "member2" { - t.Errorf("Expected member2 to become new leader, got %s", group.Leader) - } - - if group.State != GroupStatePreparingRebalance { - t.Errorf("Expected group to restart rebalancing after leader eviction, got %s", group.State.String()) - } - group.Mu.RUnlock() -} - -func TestRebalanceTimeoutManager_IsRebalanceStuck(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Create a group that's been rebalancing for a while - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - group.LastActivity = time.Now().Add(-15 * time.Minute) // 15 minutes ago - group.Mu.Unlock() - - // Check if rebalance is stuck (max 10 minutes) - maxDuration := 10 * time.Minute - if !rtm.IsRebalanceStuck(group, maxDuration) { - t.Error("Expected rebalance to be detected as stuck") - } - - // Test with a group that's not stuck - group.Mu.Lock() - group.LastActivity = time.Now().Add(-5 * time.Minute) // 5 minutes ago - group.Mu.Unlock() - - if rtm.IsRebalanceStuck(group, maxDuration) { - t.Error("Expected rebalance to not be detected as stuck") - } - - // Test with stable group (should not be stuck) - group.Mu.Lock() - group.State = GroupStateStable - group.LastActivity = time.Now().Add(-15 * time.Minute) - group.Mu.Unlock() - - if rtm.IsRebalanceStuck(group, maxDuration) { - t.Error("Stable group should not be detected as stuck") - } -} - -func TestRebalanceTimeoutManager_ForceCompleteRebalance(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Test forcing completion from PreparingRebalance - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - - member := &GroupMember{ - ID: "member1", - State: MemberStatePending, - } - group.Members["member1"] = member - group.Mu.Unlock() - - rtm.ForceCompleteRebalance(group) - - group.Mu.RLock() - if group.State != GroupStateCompletingRebalance { - t.Errorf("Expected group state to be CompletingRebalance, got %s", group.State.String()) - } - group.Mu.RUnlock() - - // Test forcing completion from CompletingRebalance - rtm.ForceCompleteRebalance(group) - - group.Mu.RLock() - if group.State != GroupStateStable { - t.Errorf("Expected group state to be Stable, got %s", group.State.String()) - } - - if member.State != MemberStateStable { - t.Errorf("Expected member state to be Stable, got %s", member.State.String()) - } - group.Mu.RUnlock() -} - -func TestRebalanceTimeoutManager_GetRebalanceStatus(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Test with non-existent group - status := rtm.GetRebalanceStatus("non-existent") - if status != nil { - t.Error("Expected nil status for non-existent group") - } - - // Create a group with members - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - group.Generation = 5 - group.Leader = "member1" - group.LastActivity = time.Now().Add(-2 * time.Minute) - - member1 := &GroupMember{ - ID: "member1", - State: MemberStatePending, - LastHeartbeat: time.Now().Add(-30 * time.Second), - JoinedAt: time.Now().Add(-2 * time.Minute), - SessionTimeout: 30000, // 30 seconds - RebalanceTimeout: 300000, // 5 minutes - } - group.Members["member1"] = member1 - - member2 := &GroupMember{ - ID: "member2", - State: MemberStatePending, - LastHeartbeat: time.Now().Add(-10 * time.Second), - JoinedAt: time.Now().Add(-1 * time.Minute), - SessionTimeout: 60000, // 1 minute - RebalanceTimeout: 180000, // 3 minutes - } - group.Members["member2"] = member2 - group.Mu.Unlock() - - // Get status - status = rtm.GetRebalanceStatus("test-group") - - if status == nil { - t.Fatal("Expected non-nil status") - } - - if status.GroupID != "test-group" { - t.Errorf("Expected group ID 'test-group', got %s", status.GroupID) - } - - if status.State != GroupStatePreparingRebalance { - t.Errorf("Expected state PreparingRebalance, got %s", status.State.String()) - } - - if status.Generation != 5 { - t.Errorf("Expected generation 5, got %d", status.Generation) - } - - if status.MemberCount != 2 { - t.Errorf("Expected 2 members, got %d", status.MemberCount) - } - - if status.Leader != "member1" { - t.Errorf("Expected leader 'member1', got %s", status.Leader) - } - - if !status.IsRebalancing { - t.Error("Expected IsRebalancing to be true") - } - - if len(status.Members) != 2 { - t.Errorf("Expected 2 member statuses, got %d", len(status.Members)) - } - - // Check member timeout calculations - for _, memberStatus := range status.Members { - if memberStatus.SessionTimeRemaining < 0 { - t.Errorf("Session time remaining should not be negative for member %s", memberStatus.MemberID) - } - - if memberStatus.RebalanceTimeRemaining < 0 { - t.Errorf("Rebalance time remaining should not be negative for member %s", memberStatus.MemberID) - } - } -} - -func TestRebalanceTimeoutManager_DefaultRebalanceTimeout(t *testing.T) { - coordinator := NewGroupCoordinator() - defer coordinator.Close() - - rtm := coordinator.rebalanceTimeoutManager - - // Create a group with a member that has no rebalance timeout set (0) - group := coordinator.GetOrCreateGroup("test-group") - group.Mu.Lock() - group.State = GroupStatePreparingRebalance - - member := &GroupMember{ - ID: "member1", - ClientID: "client1", - SessionTimeout: 30000, // 30 seconds - RebalanceTimeout: 0, // Not set, should use default - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now().Add(-6 * time.Minute), // Joined 6 minutes ago - } - group.Members["member1"] = member - group.Mu.Unlock() - - // Default rebalance timeout is 5 minutes (300000ms), so member should be evicted - rtm.CheckRebalanceTimeouts() - - group.Mu.RLock() - if len(group.Members) != 0 { - t.Errorf("Expected member to be evicted using default rebalance timeout, but %d members remain", len(group.Members)) - } - group.Mu.RUnlock() -} diff --git a/weed/mq/kafka/consumer/static_membership_test.go b/weed/mq/kafka/consumer/static_membership_test.go deleted file mode 100644 index df1ad1fbb..000000000 --- a/weed/mq/kafka/consumer/static_membership_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package consumer - -import ( - "testing" - "time" -) - -func TestGroupCoordinator_StaticMembership(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - group := gc.GetOrCreateGroup("test-group") - - // Test static member registration - instanceID := "static-instance-1" - member := &GroupMember{ - ID: "member-1", - ClientID: "client-1", - ClientHost: "localhost", - GroupInstanceID: &instanceID, - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - // Add member to group - group.Members[member.ID] = member - gc.RegisterStaticMember(group, member) - - // Test finding static member - foundMember := gc.FindStaticMember(group, instanceID) - if foundMember == nil { - t.Error("Expected to find static member, got nil") - } - if foundMember.ID != member.ID { - t.Errorf("Expected member ID %s, got %s", member.ID, foundMember.ID) - } - - // Test IsStaticMember - if !gc.IsStaticMember(member) { - t.Error("Expected member to be static") - } - - // Test dynamic member (no instance ID) - dynamicMember := &GroupMember{ - ID: "member-2", - ClientID: "client-2", - ClientHost: "localhost", - GroupInstanceID: nil, - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - if gc.IsStaticMember(dynamicMember) { - t.Error("Expected member to be dynamic") - } - - // Test unregistering static member - gc.UnregisterStaticMember(group, instanceID) - foundMember = gc.FindStaticMember(group, instanceID) - if foundMember != nil { - t.Error("Expected static member to be unregistered") - } -} - -func TestGroupCoordinator_StaticMemberReconnection(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - group := gc.GetOrCreateGroup("test-group") - instanceID := "static-instance-1" - - // First connection - member1 := &GroupMember{ - ID: "member-1", - ClientID: "client-1", - ClientHost: "localhost", - GroupInstanceID: &instanceID, - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - group.Members[member1.ID] = member1 - gc.RegisterStaticMember(group, member1) - - // Simulate disconnection and reconnection with same instance ID - delete(group.Members, member1.ID) - - // Reconnection with same instance ID should reuse the mapping - member2 := &GroupMember{ - ID: "member-2", // Different member ID - ClientID: "client-1", - ClientHost: "localhost", - GroupInstanceID: &instanceID, // Same instance ID - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - group.Members[member2.ID] = member2 - gc.RegisterStaticMember(group, member2) - - // Should find the new member with the same instance ID - foundMember := gc.FindStaticMember(group, instanceID) - if foundMember == nil { - t.Error("Expected to find static member after reconnection") - } - if foundMember.ID != member2.ID { - t.Errorf("Expected member ID %s, got %s", member2.ID, foundMember.ID) - } -} - -func TestGroupCoordinator_StaticMembershipEdgeCases(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - group := gc.GetOrCreateGroup("test-group") - - // Test empty instance ID - member := &GroupMember{ - ID: "member-1", - ClientID: "client-1", - ClientHost: "localhost", - GroupInstanceID: nil, - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - gc.RegisterStaticMember(group, member) // Should be no-op - foundMember := gc.FindStaticMember(group, "") - if foundMember != nil { - t.Error("Expected not to find member with empty instance ID") - } - - // Test empty string instance ID - emptyInstanceID := "" - member.GroupInstanceID = &emptyInstanceID - gc.RegisterStaticMember(group, member) // Should be no-op - foundMember = gc.FindStaticMember(group, emptyInstanceID) - if foundMember != nil { - t.Error("Expected not to find member with empty string instance ID") - } - - // Test unregistering non-existent instance ID - gc.UnregisterStaticMember(group, "non-existent") // Should be no-op -} - -func TestGroupCoordinator_StaticMembershipConcurrency(t *testing.T) { - gc := NewGroupCoordinator() - defer gc.Close() - - group := gc.GetOrCreateGroup("test-group") - instanceID := "static-instance-1" - - // Test concurrent access - done := make(chan bool, 2) - - // Goroutine 1: Register static member - go func() { - member := &GroupMember{ - ID: "member-1", - ClientID: "client-1", - ClientHost: "localhost", - GroupInstanceID: &instanceID, - SessionTimeout: 30000, - State: MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - group.Members[member.ID] = member - gc.RegisterStaticMember(group, member) - done <- true - }() - - // Goroutine 2: Find static member - go func() { - time.Sleep(10 * time.Millisecond) // Small delay to ensure registration happens first - foundMember := gc.FindStaticMember(group, instanceID) - if foundMember == nil { - t.Error("Expected to find static member in concurrent access") - } - done <- true - }() - - // Wait for both goroutines to complete - <-done - <-done -} diff --git a/weed/mq/kafka/consumer_offset/filer_storage.go b/weed/mq/kafka/consumer_offset/filer_storage.go deleted file mode 100644 index 8eeceb660..000000000 --- a/weed/mq/kafka/consumer_offset/filer_storage.go +++ /dev/null @@ -1,326 +0,0 @@ -package consumer_offset - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -const ( - // ConsumerOffsetsBasePath is the base path for storing Kafka consumer offsets in SeaweedFS - ConsumerOffsetsBasePath = "/topics/kafka/.meta/consumer_offsets" -) - -// KafkaConsumerPosition represents a Kafka consumer's position -// Can be either offset-based or timestamp-based -type KafkaConsumerPosition struct { - Type string `json:"type"` // "offset" or "timestamp" - Value int64 `json:"value"` // The actual offset or timestamp value - CommittedAt int64 `json:"committed_at"` // Unix timestamp in milliseconds when committed - Metadata string `json:"metadata"` // Optional: application-specific metadata -} - -// FilerStorage implements OffsetStorage using SeaweedFS filer -// Offsets are stored in JSON format: {ConsumerOffsetsBasePath}/{group}/{topic}/{partition}/offset -// Supports both offset and timestamp positioning -type FilerStorage struct { - fca *filer_client.FilerClientAccessor - closed bool -} - -// NewFilerStorage creates a new filer-based offset storage -func NewFilerStorage(fca *filer_client.FilerClientAccessor) *FilerStorage { - return &FilerStorage{ - fca: fca, - closed: false, - } -} - -// CommitOffset commits an offset for a consumer group -// Now stores as JSON to support both offset and timestamp positioning -func (f *FilerStorage) CommitOffset(group, topic string, partition int32, offset int64, metadata string) error { - if f.closed { - return ErrStorageClosed - } - - // Validate inputs - if offset < -1 { - return ErrInvalidOffset - } - if partition < 0 { - return ErrInvalidPartition - } - - offsetPath := f.getOffsetPath(group, topic, partition) - - // Create position structure - position := &KafkaConsumerPosition{ - Type: "offset", - Value: offset, - CommittedAt: time.Now().UnixMilli(), - Metadata: metadata, - } - - // Marshal to JSON - jsonBytes, err := json.Marshal(position) - if err != nil { - return fmt.Errorf("failed to marshal offset to JSON: %w", err) - } - - // Store as single JSON file - if err := f.writeFile(offsetPath, jsonBytes); err != nil { - return fmt.Errorf("failed to write offset: %w", err) - } - - return nil -} - -// FetchOffset fetches the committed offset for a consumer group -func (f *FilerStorage) FetchOffset(group, topic string, partition int32) (int64, string, error) { - if f.closed { - return -1, "", ErrStorageClosed - } - - offsetPath := f.getOffsetPath(group, topic, partition) - - // Read offset file - offsetData, err := f.readFile(offsetPath) - if err != nil { - // File doesn't exist, no offset committed - return -1, "", nil - } - - // Parse JSON format - var position KafkaConsumerPosition - if err := json.Unmarshal(offsetData, &position); err != nil { - return -1, "", fmt.Errorf("failed to parse offset JSON: %w", err) - } - - return position.Value, position.Metadata, nil -} - -// FetchAllOffsets fetches all committed offsets for a consumer group -func (f *FilerStorage) FetchAllOffsets(group string) (map[TopicPartition]OffsetMetadata, error) { - if f.closed { - return nil, ErrStorageClosed - } - - result := make(map[TopicPartition]OffsetMetadata) - groupPath := f.getGroupPath(group) - - // List all topics for this group - topics, err := f.listDirectory(groupPath) - if err != nil { - // Group doesn't exist, return empty map - return result, nil - } - - // For each topic, list all partitions - for _, topicName := range topics { - topicPath := fmt.Sprintf("%s/%s", groupPath, topicName) - partitions, err := f.listDirectory(topicPath) - if err != nil { - continue - } - - // For each partition, read the offset - for _, partitionName := range partitions { - var partition int32 - _, err := fmt.Sscanf(partitionName, "%d", &partition) - if err != nil { - continue - } - - offset, metadata, err := f.FetchOffset(group, topicName, partition) - if err == nil && offset >= 0 { - tp := TopicPartition{Topic: topicName, Partition: partition} - result[tp] = OffsetMetadata{Offset: offset, Metadata: metadata} - } - } - } - - return result, nil -} - -// DeleteGroup deletes all offset data for a consumer group -func (f *FilerStorage) DeleteGroup(group string) error { - if f.closed { - return ErrStorageClosed - } - - groupPath := f.getGroupPath(group) - return f.deleteDirectory(groupPath) -} - -// ListGroups returns all consumer group IDs -func (f *FilerStorage) ListGroups() ([]string, error) { - if f.closed { - return nil, ErrStorageClosed - } - - return f.listDirectory(ConsumerOffsetsBasePath) -} - -// Close releases resources -func (f *FilerStorage) Close() error { - f.closed = true - return nil -} - -// Helper methods - -func (f *FilerStorage) getGroupPath(group string) string { - return fmt.Sprintf("%s/%s", ConsumerOffsetsBasePath, group) -} - -func (f *FilerStorage) getTopicPath(group, topic string) string { - return fmt.Sprintf("%s/%s", f.getGroupPath(group), topic) -} - -func (f *FilerStorage) getPartitionPath(group, topic string, partition int32) string { - return fmt.Sprintf("%s/%d", f.getTopicPath(group, topic), partition) -} - -func (f *FilerStorage) getOffsetPath(group, topic string, partition int32) string { - return fmt.Sprintf("%s/offset", f.getPartitionPath(group, topic, partition)) -} - -func (f *FilerStorage) getMetadataPath(group, topic string, partition int32) string { - return fmt.Sprintf("%s/metadata", f.getPartitionPath(group, topic, partition)) -} - -func (f *FilerStorage) writeFile(path string, data []byte) error { - fullPath := util.FullPath(path) - dir, name := fullPath.DirAndName() - - return f.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Create entry - entry := &filer_pb.Entry{ - Name: name, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - FileMode: 0644, - FileSize: uint64(len(data)), - }, - Chunks: []*filer_pb.FileChunk{}, - } - - // For small files, store inline - if len(data) > 0 { - entry.Content = data - } - - // Create or update the entry - return filer_pb.CreateEntry(context.Background(), client, &filer_pb.CreateEntryRequest{ - Directory: dir, - Entry: entry, - }) - }) -} - -func (f *FilerStorage) readFile(path string) ([]byte, error) { - fullPath := util.FullPath(path) - dir, name := fullPath.DirAndName() - - var data []byte - err := f.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Get the entry - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir, - Name: name, - }) - if err != nil { - return err - } - - entry := resp.Entry - if entry.IsDirectory { - return fmt.Errorf("path is a directory") - } - - // Read inline content if available - if len(entry.Content) > 0 { - data = entry.Content - return nil - } - - // If no chunks, file is empty - if len(entry.Chunks) == 0 { - data = []byte{} - return nil - } - - return fmt.Errorf("chunked files not supported for offset storage") - }) - - return data, err -} - -func (f *FilerStorage) listDirectory(path string) ([]string, error) { - var entries []string - - err := f.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: path, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if resp.Entry.IsDirectory { - entries = append(entries, resp.Entry.Name) - } - } - - return nil - }) - - return entries, err -} - -func (f *FilerStorage) deleteDirectory(path string) error { - fullPath := util.FullPath(path) - dir, name := fullPath.DirAndName() - - return f.fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: true, - IsRecursive: true, - IgnoreRecursiveError: true, - }) - return err - }) -} - -// normalizePath removes leading/trailing slashes and collapses multiple slashes -func normalizePath(path string) string { - path = strings.Trim(path, "/") - parts := strings.Split(path, "/") - normalized := []string{} - for _, part := range parts { - if part != "" { - normalized = append(normalized, part) - } - } - return "/" + strings.Join(normalized, "/") -} diff --git a/weed/mq/kafka/consumer_offset/filer_storage_test.go b/weed/mq/kafka/consumer_offset/filer_storage_test.go deleted file mode 100644 index 67a0e7e09..000000000 --- a/weed/mq/kafka/consumer_offset/filer_storage_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package consumer_offset - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -// Note: These tests require a running filer instance -// They are marked as integration tests and should be run with: -// go test -tags=integration - -func TestFilerStorageCommitAndFetch(t *testing.T) { - t.Skip("Requires running filer - integration test") - - // This will be implemented once we have test infrastructure - // Test will: - // 1. Create filer storage - // 2. Commit offset - // 3. Fetch offset - // 4. Verify values match -} - -func TestFilerStoragePersistence(t *testing.T) { - t.Skip("Requires running filer - integration test") - - // Test will: - // 1. Commit offset with first storage instance - // 2. Close first instance - // 3. Create new storage instance - // 4. Fetch offset and verify it persisted -} - -func TestFilerStorageMultipleGroups(t *testing.T) { - t.Skip("Requires running filer - integration test") - - // Test will: - // 1. Commit offsets for multiple groups - // 2. Fetch all offsets per group - // 3. Verify isolation between groups -} - -func TestFilerStoragePath(t *testing.T) { - // Test path generation (doesn't require filer) - storage := &FilerStorage{} - - group := "test-group" - topic := "test-topic" - partition := int32(5) - - groupPath := storage.getGroupPath(group) - assert.Equal(t, ConsumerOffsetsBasePath+"/test-group", groupPath) - - topicPath := storage.getTopicPath(group, topic) - assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic", topicPath) - - partitionPath := storage.getPartitionPath(group, topic, partition) - assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5", partitionPath) - - offsetPath := storage.getOffsetPath(group, topic, partition) - assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/offset", offsetPath) - - metadataPath := storage.getMetadataPath(group, topic, partition) - assert.Equal(t, ConsumerOffsetsBasePath+"/test-group/test-topic/5/metadata", metadataPath) -} diff --git a/weed/mq/kafka/consumer_offset/memory_storage.go b/weed/mq/kafka/consumer_offset/memory_storage.go deleted file mode 100644 index 8814107bb..000000000 --- a/weed/mq/kafka/consumer_offset/memory_storage.go +++ /dev/null @@ -1,145 +0,0 @@ -package consumer_offset - -import ( - "sync" -) - -// MemoryStorage implements OffsetStorage using in-memory maps -// This is suitable for testing and single-node deployments -// Data is lost on restart -type MemoryStorage struct { - mu sync.RWMutex - groups map[string]map[TopicPartition]OffsetMetadata - closed bool -} - -// NewMemoryStorage creates a new in-memory offset storage -func NewMemoryStorage() *MemoryStorage { - return &MemoryStorage{ - groups: make(map[string]map[TopicPartition]OffsetMetadata), - closed: false, - } -} - -// CommitOffset commits an offset for a consumer group -func (m *MemoryStorage) CommitOffset(group, topic string, partition int32, offset int64, metadata string) error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.closed { - return ErrStorageClosed - } - - // Validate inputs - if offset < -1 { - return ErrInvalidOffset - } - if partition < 0 { - return ErrInvalidPartition - } - - // Create group if it doesn't exist - if m.groups[group] == nil { - m.groups[group] = make(map[TopicPartition]OffsetMetadata) - } - - // Store offset - tp := TopicPartition{Topic: topic, Partition: partition} - m.groups[group][tp] = OffsetMetadata{ - Offset: offset, - Metadata: metadata, - } - - return nil -} - -// FetchOffset fetches the committed offset for a consumer group -func (m *MemoryStorage) FetchOffset(group, topic string, partition int32) (int64, string, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - if m.closed { - return -1, "", ErrStorageClosed - } - - groupOffsets, exists := m.groups[group] - if !exists { - // Group doesn't exist, return -1 (no committed offset) - return -1, "", nil - } - - tp := TopicPartition{Topic: topic, Partition: partition} - offsetMeta, exists := groupOffsets[tp] - if !exists { - // No offset committed for this partition - return -1, "", nil - } - - return offsetMeta.Offset, offsetMeta.Metadata, nil -} - -// FetchAllOffsets fetches all committed offsets for a consumer group -func (m *MemoryStorage) FetchAllOffsets(group string) (map[TopicPartition]OffsetMetadata, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - if m.closed { - return nil, ErrStorageClosed - } - - groupOffsets, exists := m.groups[group] - if !exists { - // Return empty map for non-existent group - return make(map[TopicPartition]OffsetMetadata), nil - } - - // Return a copy to prevent external modification - result := make(map[TopicPartition]OffsetMetadata, len(groupOffsets)) - for tp, offset := range groupOffsets { - result[tp] = offset - } - - return result, nil -} - -// DeleteGroup deletes all offset data for a consumer group -func (m *MemoryStorage) DeleteGroup(group string) error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.closed { - return ErrStorageClosed - } - - delete(m.groups, group) - return nil -} - -// ListGroups returns all consumer group IDs -func (m *MemoryStorage) ListGroups() ([]string, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - if m.closed { - return nil, ErrStorageClosed - } - - groups := make([]string, 0, len(m.groups)) - for group := range m.groups { - groups = append(groups, group) - } - - return groups, nil -} - -// Close releases resources (no-op for memory storage) -func (m *MemoryStorage) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - - m.closed = true - m.groups = nil - - return nil -} - diff --git a/weed/mq/kafka/consumer_offset/memory_storage_test.go b/weed/mq/kafka/consumer_offset/memory_storage_test.go deleted file mode 100644 index eaf849dc5..000000000 --- a/weed/mq/kafka/consumer_offset/memory_storage_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package consumer_offset - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMemoryStorageCommitAndFetch(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - group := "test-group" - topic := "test-topic" - partition := int32(0) - offset := int64(42) - metadata := "test-metadata" - - // Commit offset - err := storage.CommitOffset(group, topic, partition, offset, metadata) - require.NoError(t, err) - - // Fetch offset - fetchedOffset, fetchedMetadata, err := storage.FetchOffset(group, topic, partition) - require.NoError(t, err) - assert.Equal(t, offset, fetchedOffset) - assert.Equal(t, metadata, fetchedMetadata) -} - -func TestMemoryStorageFetchNonExistent(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - // Fetch offset for non-existent group - offset, metadata, err := storage.FetchOffset("non-existent", "topic", 0) - require.NoError(t, err) - assert.Equal(t, int64(-1), offset) - assert.Equal(t, "", metadata) -} - -func TestMemoryStorageFetchAllOffsets(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - group := "test-group" - - // Commit offsets for multiple partitions - err := storage.CommitOffset(group, "topic1", 0, 10, "meta1") - require.NoError(t, err) - err = storage.CommitOffset(group, "topic1", 1, 20, "meta2") - require.NoError(t, err) - err = storage.CommitOffset(group, "topic2", 0, 30, "meta3") - require.NoError(t, err) - - // Fetch all offsets - offsets, err := storage.FetchAllOffsets(group) - require.NoError(t, err) - assert.Equal(t, 3, len(offsets)) - - // Verify each offset - tp1 := TopicPartition{Topic: "topic1", Partition: 0} - assert.Equal(t, int64(10), offsets[tp1].Offset) - assert.Equal(t, "meta1", offsets[tp1].Metadata) - - tp2 := TopicPartition{Topic: "topic1", Partition: 1} - assert.Equal(t, int64(20), offsets[tp2].Offset) - - tp3 := TopicPartition{Topic: "topic2", Partition: 0} - assert.Equal(t, int64(30), offsets[tp3].Offset) -} - -func TestMemoryStorageDeleteGroup(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - group := "test-group" - - // Commit offset - err := storage.CommitOffset(group, "topic", 0, 100, "") - require.NoError(t, err) - - // Verify offset exists - offset, _, err := storage.FetchOffset(group, "topic", 0) - require.NoError(t, err) - assert.Equal(t, int64(100), offset) - - // Delete group - err = storage.DeleteGroup(group) - require.NoError(t, err) - - // Verify offset is gone - offset, _, err = storage.FetchOffset(group, "topic", 0) - require.NoError(t, err) - assert.Equal(t, int64(-1), offset) -} - -func TestMemoryStorageListGroups(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - // Initially empty - groups, err := storage.ListGroups() - require.NoError(t, err) - assert.Equal(t, 0, len(groups)) - - // Commit offsets for multiple groups - err = storage.CommitOffset("group1", "topic", 0, 10, "") - require.NoError(t, err) - err = storage.CommitOffset("group2", "topic", 0, 20, "") - require.NoError(t, err) - err = storage.CommitOffset("group3", "topic", 0, 30, "") - require.NoError(t, err) - - // List groups - groups, err = storage.ListGroups() - require.NoError(t, err) - assert.Equal(t, 3, len(groups)) - assert.Contains(t, groups, "group1") - assert.Contains(t, groups, "group2") - assert.Contains(t, groups, "group3") -} - -func TestMemoryStorageConcurrency(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - group := "concurrent-group" - topic := "topic" - numGoroutines := 100 - - var wg sync.WaitGroup - wg.Add(numGoroutines) - - // Launch multiple goroutines to commit offsets concurrently - for i := 0; i < numGoroutines; i++ { - go func(partition int32, offset int64) { - defer wg.Done() - err := storage.CommitOffset(group, topic, partition, offset, "") - assert.NoError(t, err) - }(int32(i%10), int64(i)) - } - - wg.Wait() - - // Verify we can fetch offsets without errors - offsets, err := storage.FetchAllOffsets(group) - require.NoError(t, err) - assert.Greater(t, len(offsets), 0) -} - -func TestMemoryStorageInvalidInputs(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - // Invalid offset (less than -1) - err := storage.CommitOffset("group", "topic", 0, -2, "") - assert.ErrorIs(t, err, ErrInvalidOffset) - - // Invalid partition (negative) - err = storage.CommitOffset("group", "topic", -1, 10, "") - assert.ErrorIs(t, err, ErrInvalidPartition) -} - -func TestMemoryStorageClosedOperations(t *testing.T) { - storage := NewMemoryStorage() - storage.Close() - - // Operations on closed storage should return error - err := storage.CommitOffset("group", "topic", 0, 10, "") - assert.ErrorIs(t, err, ErrStorageClosed) - - _, _, err = storage.FetchOffset("group", "topic", 0) - assert.ErrorIs(t, err, ErrStorageClosed) - - _, err = storage.FetchAllOffsets("group") - assert.ErrorIs(t, err, ErrStorageClosed) - - err = storage.DeleteGroup("group") - assert.ErrorIs(t, err, ErrStorageClosed) - - _, err = storage.ListGroups() - assert.ErrorIs(t, err, ErrStorageClosed) -} - -func TestMemoryStorageOverwrite(t *testing.T) { - storage := NewMemoryStorage() - defer storage.Close() - - group := "test-group" - topic := "topic" - partition := int32(0) - - // Commit initial offset - err := storage.CommitOffset(group, topic, partition, 10, "meta1") - require.NoError(t, err) - - // Overwrite with new offset - err = storage.CommitOffset(group, topic, partition, 20, "meta2") - require.NoError(t, err) - - // Fetch should return latest offset - offset, metadata, err := storage.FetchOffset(group, topic, partition) - require.NoError(t, err) - assert.Equal(t, int64(20), offset) - assert.Equal(t, "meta2", metadata) -} - diff --git a/weed/mq/kafka/consumer_offset/storage.go b/weed/mq/kafka/consumer_offset/storage.go deleted file mode 100644 index d3f999faa..000000000 --- a/weed/mq/kafka/consumer_offset/storage.go +++ /dev/null @@ -1,59 +0,0 @@ -package consumer_offset - -import ( - "fmt" -) - -// TopicPartition uniquely identifies a topic partition -type TopicPartition struct { - Topic string - Partition int32 -} - -// OffsetMetadata contains offset and associated metadata -type OffsetMetadata struct { - Offset int64 - Metadata string -} - -// String returns a string representation of TopicPartition -func (tp TopicPartition) String() string { - return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition) -} - -// OffsetStorage defines the interface for storing and retrieving consumer offsets -type OffsetStorage interface { - // CommitOffset commits an offset for a consumer group, topic, and partition - // offset is the next offset to read (Kafka convention) - // metadata is optional application-specific data - CommitOffset(group, topic string, partition int32, offset int64, metadata string) error - - // FetchOffset fetches the committed offset for a consumer group, topic, and partition - // Returns -1 if no offset has been committed - // Returns error if the group or topic doesn't exist (depending on implementation) - FetchOffset(group, topic string, partition int32) (int64, string, error) - - // FetchAllOffsets fetches all committed offsets for a consumer group - // Returns map of TopicPartition to OffsetMetadata - // Returns empty map if group doesn't exist - FetchAllOffsets(group string) (map[TopicPartition]OffsetMetadata, error) - - // DeleteGroup deletes all offset data for a consumer group - DeleteGroup(group string) error - - // ListGroups returns all consumer group IDs - ListGroups() ([]string, error) - - // Close releases any resources held by the storage - Close() error -} - -// Common errors -var ( - ErrGroupNotFound = fmt.Errorf("consumer group not found") - ErrOffsetNotFound = fmt.Errorf("offset not found") - ErrInvalidOffset = fmt.Errorf("invalid offset value") - ErrInvalidPartition = fmt.Errorf("invalid partition") - ErrStorageClosed = fmt.Errorf("storage is closed") -) - diff --git a/weed/mq/kafka/gateway/coordinator_registry.go b/weed/mq/kafka/gateway/coordinator_registry.go deleted file mode 100644 index eea1b1907..000000000 --- a/weed/mq/kafka/gateway/coordinator_registry.go +++ /dev/null @@ -1,805 +0,0 @@ -package gateway - -import ( - "context" - "encoding/json" - "fmt" - "hash/fnv" - "io" - "sort" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/protocol" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "google.golang.org/grpc" -) - -// CoordinatorRegistry manages consumer group coordinator assignments -// Only the gateway leader maintains this registry -type CoordinatorRegistry struct { - // Leader election - leaderLock *cluster.LiveLock - isLeader bool - leaderMutex sync.RWMutex - leadershipChange chan string // Notifies when leadership changes - - // No in-memory assignments - read/write directly to filer - // assignmentsMutex still needed for coordinating file operations - assignmentsMutex sync.RWMutex - - // Gateway registry - activeGateways map[string]*GatewayInfo // gatewayAddress -> info - gatewaysMutex sync.RWMutex - - // Configuration - gatewayAddress string - lockClient *cluster.LockClient - filerClientAccessor *filer_client.FilerClientAccessor - filerDiscoveryService *filer_client.FilerDiscoveryService - - // Control - stopChan chan struct{} - wg sync.WaitGroup -} - -// Remove local CoordinatorAssignment - use protocol.CoordinatorAssignment instead - -// GatewayInfo represents an active gateway instance -type GatewayInfo struct { - Address string - NodeID int32 - RegisteredAt time.Time - LastHeartbeat time.Time - IsHealthy bool -} - -const ( - GatewayLeaderLockKey = "kafka-gateway-leader" - HeartbeatInterval = 10 * time.Second - GatewayTimeout = 30 * time.Second - - // Filer paths for coordinator assignment persistence - CoordinatorAssignmentsDir = "/topics/kafka/.meta/coordinators" -) - -// NewCoordinatorRegistry creates a new coordinator registry -func NewCoordinatorRegistry(gatewayAddress string, masters []pb.ServerAddress, grpcDialOption grpc.DialOption) *CoordinatorRegistry { - // Create filer discovery service that will periodically refresh filers from all masters - filerDiscoveryService := filer_client.NewFilerDiscoveryService(masters, grpcDialOption) - - // Manually discover filers from each master until we find one - var seedFiler pb.ServerAddress - for _, master := range masters { - // Use the same discovery logic as filer_discovery.go - grpcAddr := master.ToGrpcAddress() - conn, err := grpc.NewClient(grpcAddr, grpcDialOption) - if err != nil { - continue - } - - client := master_pb.NewSeaweedClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - resp, err := client.ListClusterNodes(ctx, &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - cancel() - conn.Close() - - if err == nil && len(resp.ClusterNodes) > 0 { - // Found a filer - use its HTTP address (WithFilerClient will convert to gRPC automatically) - seedFiler = pb.ServerAddress(resp.ClusterNodes[0].Address) - glog.V(1).Infof("Using filer %s as seed for distributed locking (discovered from master %s)", seedFiler, master) - break - } - } - - lockClient := cluster.NewLockClient(grpcDialOption, seedFiler) - - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: gatewayAddress, - lockClient: lockClient, - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), // Buffered channel for leadership notifications - filerDiscoveryService: filerDiscoveryService, - } - - // Create filer client accessor that uses dynamic filer discovery - registry.filerClientAccessor = &filer_client.FilerClientAccessor{ - GetGrpcDialOption: func() grpc.DialOption { - return grpcDialOption - }, - GetFilers: func() []pb.ServerAddress { - return registry.filerDiscoveryService.GetFilers() - }, - } - - return registry -} - -// Start begins the coordinator registry operations -func (cr *CoordinatorRegistry) Start() error { - glog.V(1).Infof("Starting coordinator registry for gateway %s", cr.gatewayAddress) - - // Start filer discovery service first - if err := cr.filerDiscoveryService.Start(); err != nil { - return fmt.Errorf("failed to start filer discovery service: %w", err) - } - - // Start leader election - cr.startLeaderElection() - - // Start heartbeat loop to keep this gateway healthy - cr.startHeartbeatLoop() - - // Start cleanup goroutine - cr.startCleanupLoop() - - // Register this gateway - cr.registerGateway(cr.gatewayAddress) - - return nil -} - -// Stop shuts down the coordinator registry -func (cr *CoordinatorRegistry) Stop() error { - glog.V(1).Infof("Stopping coordinator registry for gateway %s", cr.gatewayAddress) - - close(cr.stopChan) - cr.wg.Wait() - - // Release leader lock if held - if cr.leaderLock != nil { - cr.leaderLock.Stop() - } - - // Stop filer discovery service - if err := cr.filerDiscoveryService.Stop(); err != nil { - glog.Warningf("Failed to stop filer discovery service: %v", err) - } - - return nil -} - -// startLeaderElection starts the leader election process -func (cr *CoordinatorRegistry) startLeaderElection() { - cr.wg.Add(1) - go func() { - defer cr.wg.Done() - - // Start long-lived lock for leader election - cr.leaderLock = cr.lockClient.StartLongLivedLock( - GatewayLeaderLockKey, - cr.gatewayAddress, - cr.onLeadershipChange, - ) - - // Wait for shutdown - <-cr.stopChan - - // The leader lock will be stopped when Stop() is called - }() -} - -// onLeadershipChange handles leadership changes -func (cr *CoordinatorRegistry) onLeadershipChange(newLeader string) { - cr.leaderMutex.Lock() - defer cr.leaderMutex.Unlock() - - wasLeader := cr.isLeader - cr.isLeader = (newLeader == cr.gatewayAddress) - - if cr.isLeader && !wasLeader { - glog.V(0).Infof("Gateway %s became the coordinator registry leader", cr.gatewayAddress) - cr.onBecameLeader() - } else if !cr.isLeader && wasLeader { - glog.V(0).Infof("Gateway %s lost coordinator registry leadership to %s", cr.gatewayAddress, newLeader) - cr.onLostLeadership() - } - - // Notify waiting goroutines about leadership change - select { - case cr.leadershipChange <- newLeader: - // Notification sent - default: - // Channel full, skip notification (shouldn't happen with buffered channel) - } -} - -// onBecameLeader handles becoming the leader -func (cr *CoordinatorRegistry) onBecameLeader() { - // Assignments are now read directly from files - no need to load into memory - glog.V(1).Info("Leader election complete - coordinator assignments will be read from filer as needed") - - // Clear gateway registry since it's ephemeral (gateways need to re-register) - cr.gatewaysMutex.Lock() - cr.activeGateways = make(map[string]*GatewayInfo) - cr.gatewaysMutex.Unlock() - - // Re-register this gateway - cr.registerGateway(cr.gatewayAddress) -} - -// onLostLeadership handles losing leadership -func (cr *CoordinatorRegistry) onLostLeadership() { - // No in-memory assignments to clear - assignments are stored in filer - glog.V(1).Info("Lost leadership - no longer managing coordinator assignments") -} - -// IsLeader returns whether this gateway is the coordinator registry leader -func (cr *CoordinatorRegistry) IsLeader() bool { - cr.leaderMutex.RLock() - defer cr.leaderMutex.RUnlock() - return cr.isLeader -} - -// GetLeaderAddress returns the current leader's address -func (cr *CoordinatorRegistry) GetLeaderAddress() string { - if cr.leaderLock != nil { - return cr.leaderLock.LockOwner() - } - return "" -} - -// WaitForLeader waits for a leader to be elected, with timeout -func (cr *CoordinatorRegistry) WaitForLeader(timeout time.Duration) (string, error) { - // Check if there's already a leader - if leader := cr.GetLeaderAddress(); leader != "" { - return leader, nil - } - - // Check if this instance is the leader - if cr.IsLeader() { - return cr.gatewayAddress, nil - } - - // Wait for leadership change notification - deadline := time.Now().Add(timeout) - for { - select { - case leader := <-cr.leadershipChange: - if leader != "" { - return leader, nil - } - case <-time.After(time.Until(deadline)): - return "", fmt.Errorf("timeout waiting for leader election after %v", timeout) - } - - // Double-check in case we missed a notification - if leader := cr.GetLeaderAddress(); leader != "" { - return leader, nil - } - if cr.IsLeader() { - return cr.gatewayAddress, nil - } - - if time.Now().After(deadline) { - break - } - } - - return "", fmt.Errorf("timeout waiting for leader election after %v", timeout) -} - -// AssignCoordinator assigns a coordinator for a consumer group using a balanced strategy. -// The coordinator is selected deterministically via consistent hashing of the -// consumer group across the set of healthy gateways. This spreads groups evenly -// and avoids hot-spotting on the first requester. -func (cr *CoordinatorRegistry) AssignCoordinator(consumerGroup string, requestingGateway string) (*protocol.CoordinatorAssignment, error) { - if !cr.IsLeader() { - return nil, fmt.Errorf("not the coordinator registry leader") - } - - // First check if requesting gateway is healthy without holding assignments lock - if !cr.isGatewayHealthy(requestingGateway) { - return nil, fmt.Errorf("requesting gateway %s is not healthy", requestingGateway) - } - - // Lock assignments mutex to coordinate file operations - cr.assignmentsMutex.Lock() - defer cr.assignmentsMutex.Unlock() - - // Check if coordinator already assigned by trying to load from file - existing, err := cr.loadCoordinatorAssignment(consumerGroup) - if err == nil && existing != nil { - // Assignment exists, check if coordinator is still healthy - if cr.isGatewayHealthy(existing.CoordinatorAddr) { - glog.V(2).Infof("Consumer group %s already has healthy coordinator %s", consumerGroup, existing.CoordinatorAddr) - return existing, nil - } else { - glog.V(1).Infof("Existing coordinator %s for group %s is unhealthy, reassigning", existing.CoordinatorAddr, consumerGroup) - // Delete the existing assignment file - if delErr := cr.deleteCoordinatorAssignment(consumerGroup); delErr != nil { - glog.Warningf("Failed to delete stale assignment for group %s: %v", consumerGroup, delErr) - } - } - } - - // Choose a balanced coordinator via consistent hashing across healthy gateways - chosenAddr, nodeID, err := cr.chooseCoordinatorAddrForGroup(consumerGroup) - if err != nil { - return nil, err - } - - assignment := &protocol.CoordinatorAssignment{ - ConsumerGroup: consumerGroup, - CoordinatorAddr: chosenAddr, - CoordinatorNodeID: nodeID, - AssignedAt: time.Now(), - LastHeartbeat: time.Now(), - } - - // Persist the new assignment to individual file - if err := cr.saveCoordinatorAssignment(consumerGroup, assignment); err != nil { - return nil, fmt.Errorf("failed to persist coordinator assignment for group %s: %w", consumerGroup, err) - } - - glog.V(1).Infof("Assigned coordinator %s (node %d) for consumer group %s via consistent hashing", chosenAddr, nodeID, consumerGroup) - return assignment, nil -} - -// GetCoordinator returns the coordinator for a consumer group -func (cr *CoordinatorRegistry) GetCoordinator(consumerGroup string) (*protocol.CoordinatorAssignment, error) { - if !cr.IsLeader() { - return nil, fmt.Errorf("not the coordinator registry leader") - } - - // Load assignment directly from file - assignment, err := cr.loadCoordinatorAssignment(consumerGroup) - if err != nil { - return nil, fmt.Errorf("no coordinator assigned for consumer group %s: %w", consumerGroup, err) - } - - return assignment, nil -} - -// RegisterGateway registers a gateway instance -func (cr *CoordinatorRegistry) RegisterGateway(gatewayAddress string) error { - if !cr.IsLeader() { - return fmt.Errorf("not the coordinator registry leader") - } - - cr.registerGateway(gatewayAddress) - return nil -} - -// registerGateway internal method to register a gateway -func (cr *CoordinatorRegistry) registerGateway(gatewayAddress string) { - cr.gatewaysMutex.Lock() - defer cr.gatewaysMutex.Unlock() - - nodeID := generateDeterministicNodeID(gatewayAddress) - - cr.activeGateways[gatewayAddress] = &GatewayInfo{ - Address: gatewayAddress, - NodeID: nodeID, - RegisteredAt: time.Now(), - LastHeartbeat: time.Now(), - IsHealthy: true, - } - - glog.V(1).Infof("Registered gateway %s with deterministic node ID %d", gatewayAddress, nodeID) -} - -// HeartbeatGateway updates the heartbeat for a gateway -func (cr *CoordinatorRegistry) HeartbeatGateway(gatewayAddress string) error { - if !cr.IsLeader() { - return fmt.Errorf("not the coordinator registry leader") - } - - cr.gatewaysMutex.Lock() - - if gateway, exists := cr.activeGateways[gatewayAddress]; exists { - gateway.LastHeartbeat = time.Now() - gateway.IsHealthy = true - cr.gatewaysMutex.Unlock() - glog.V(3).Infof("Updated heartbeat for gateway %s", gatewayAddress) - } else { - // Auto-register unknown gateway - unlock first to avoid double unlock - cr.gatewaysMutex.Unlock() - cr.registerGateway(gatewayAddress) - } - - return nil -} - -// isGatewayHealthy checks if a gateway is healthy -func (cr *CoordinatorRegistry) isGatewayHealthy(gatewayAddress string) bool { - cr.gatewaysMutex.RLock() - defer cr.gatewaysMutex.RUnlock() - - return cr.isGatewayHealthyUnsafe(gatewayAddress) -} - -// isGatewayHealthyUnsafe checks if a gateway is healthy without acquiring locks -// Caller must hold gatewaysMutex.RLock() or gatewaysMutex.Lock() -func (cr *CoordinatorRegistry) isGatewayHealthyUnsafe(gatewayAddress string) bool { - gateway, exists := cr.activeGateways[gatewayAddress] - if !exists { - return false - } - - return gateway.IsHealthy && time.Since(gateway.LastHeartbeat) < GatewayTimeout -} - -// getGatewayNodeID returns the node ID for a gateway -func (cr *CoordinatorRegistry) getGatewayNodeID(gatewayAddress string) int32 { - cr.gatewaysMutex.RLock() - defer cr.gatewaysMutex.RUnlock() - - return cr.getGatewayNodeIDUnsafe(gatewayAddress) -} - -// getGatewayNodeIDUnsafe returns the node ID for a gateway without acquiring locks -// Caller must hold gatewaysMutex.RLock() or gatewaysMutex.Lock() -func (cr *CoordinatorRegistry) getGatewayNodeIDUnsafe(gatewayAddress string) int32 { - if gateway, exists := cr.activeGateways[gatewayAddress]; exists { - return gateway.NodeID - } - - return 1 // Default node ID -} - -// getHealthyGatewaysSorted returns a stable-sorted list of healthy gateway addresses. -func (cr *CoordinatorRegistry) getHealthyGatewaysSorted() []string { - cr.gatewaysMutex.RLock() - defer cr.gatewaysMutex.RUnlock() - - addresses := make([]string, 0, len(cr.activeGateways)) - for addr, info := range cr.activeGateways { - if info.IsHealthy && time.Since(info.LastHeartbeat) < GatewayTimeout { - addresses = append(addresses, addr) - } - } - - sort.Strings(addresses) - return addresses -} - -// chooseCoordinatorAddrForGroup selects a coordinator address using consistent hashing. -func (cr *CoordinatorRegistry) chooseCoordinatorAddrForGroup(consumerGroup string) (string, int32, error) { - healthy := cr.getHealthyGatewaysSorted() - if len(healthy) == 0 { - return "", 0, fmt.Errorf("no healthy gateways available for coordinator assignment") - } - idx := hashStringToIndex(consumerGroup, len(healthy)) - addr := healthy[idx] - return addr, cr.getGatewayNodeID(addr), nil -} - -// hashStringToIndex hashes a string to an index in [0, modulo). -func hashStringToIndex(s string, modulo int) int { - if modulo <= 0 { - return 0 - } - h := fnv.New32a() - _, _ = h.Write([]byte(s)) - return int(h.Sum32() % uint32(modulo)) -} - -// generateDeterministicNodeID generates a stable node ID based on gateway address -func generateDeterministicNodeID(gatewayAddress string) int32 { - h := fnv.New32a() - _, _ = h.Write([]byte(gatewayAddress)) - // Use only positive values and avoid 0 - return int32(h.Sum32()&0x7fffffff) + 1 -} - -// startHeartbeatLoop starts the heartbeat loop for this gateway -func (cr *CoordinatorRegistry) startHeartbeatLoop() { - cr.wg.Add(1) - go func() { - defer cr.wg.Done() - - ticker := time.NewTicker(HeartbeatInterval / 2) // Send heartbeats more frequently than timeout - defer ticker.Stop() - - for { - select { - case <-cr.stopChan: - return - case <-ticker.C: - if cr.IsLeader() { - // Send heartbeat for this gateway to keep it healthy - if err := cr.HeartbeatGateway(cr.gatewayAddress); err != nil { - glog.V(2).Infof("Failed to send heartbeat for gateway %s: %v", cr.gatewayAddress, err) - } - } - } - } - }() -} - -// startCleanupLoop starts the cleanup loop for stale assignments and gateways -func (cr *CoordinatorRegistry) startCleanupLoop() { - cr.wg.Add(1) - go func() { - defer cr.wg.Done() - - ticker := time.NewTicker(HeartbeatInterval) - defer ticker.Stop() - - for { - select { - case <-cr.stopChan: - return - case <-ticker.C: - if cr.IsLeader() { - cr.cleanupStaleEntries() - } - } - } - }() -} - -// cleanupStaleEntries removes stale gateways and assignments -func (cr *CoordinatorRegistry) cleanupStaleEntries() { - now := time.Now() - - // First, identify stale gateways - var staleGateways []string - cr.gatewaysMutex.Lock() - for addr, gateway := range cr.activeGateways { - if now.Sub(gateway.LastHeartbeat) > GatewayTimeout { - staleGateways = append(staleGateways, addr) - } - } - // Remove stale gateways - for _, addr := range staleGateways { - glog.V(1).Infof("Removing stale gateway %s", addr) - delete(cr.activeGateways, addr) - } - cr.gatewaysMutex.Unlock() - - // Then, identify assignments with unhealthy coordinators and reassign them - cr.assignmentsMutex.Lock() - defer cr.assignmentsMutex.Unlock() - - // Get list of all consumer groups with assignments - consumerGroups, err := cr.listAllCoordinatorAssignments() - if err != nil { - glog.Warningf("Failed to list coordinator assignments during cleanup: %v", err) - return - } - - for _, group := range consumerGroups { - // Load assignment from file - assignment, err := cr.loadCoordinatorAssignment(group) - if err != nil { - glog.Warningf("Failed to load assignment for group %s during cleanup: %v", group, err) - continue - } - - // Check if coordinator is healthy - if !cr.isGatewayHealthy(assignment.CoordinatorAddr) { - glog.V(1).Infof("Coordinator %s for group %s is unhealthy, attempting reassignment", assignment.CoordinatorAddr, group) - - // Try to reassign to a healthy gateway - newAddr, newNodeID, err := cr.chooseCoordinatorAddrForGroup(group) - if err != nil { - // No healthy gateways available, remove the assignment for now - glog.Warningf("No healthy gateways available for reassignment of group %s, removing assignment", group) - if delErr := cr.deleteCoordinatorAssignment(group); delErr != nil { - glog.Warningf("Failed to delete assignment for group %s: %v", group, delErr) - } - } else if newAddr != assignment.CoordinatorAddr { - // Reassign to the new healthy coordinator - newAssignment := &protocol.CoordinatorAssignment{ - ConsumerGroup: group, - CoordinatorAddr: newAddr, - CoordinatorNodeID: newNodeID, - AssignedAt: time.Now(), - LastHeartbeat: time.Now(), - } - - // Save new assignment to file - if saveErr := cr.saveCoordinatorAssignment(group, newAssignment); saveErr != nil { - glog.Warningf("Failed to save reassignment for group %s: %v", group, saveErr) - } else { - glog.V(0).Infof("Reassigned coordinator for group %s from unhealthy %s to healthy %s", - group, assignment.CoordinatorAddr, newAddr) - } - } - } - } -} - -// GetStats returns registry statistics -func (cr *CoordinatorRegistry) GetStats() map[string]interface{} { - // Read counts separately to avoid holding locks while calling IsLeader() - cr.gatewaysMutex.RLock() - gatewayCount := len(cr.activeGateways) - cr.gatewaysMutex.RUnlock() - - // Count assignments from files - var assignmentCount int - if cr.IsLeader() { - consumerGroups, err := cr.listAllCoordinatorAssignments() - if err != nil { - glog.Warningf("Failed to count coordinator assignments: %v", err) - assignmentCount = -1 // Indicate error - } else { - assignmentCount = len(consumerGroups) - } - } else { - assignmentCount = 0 // Non-leader doesn't track assignments - } - - return map[string]interface{}{ - "is_leader": cr.IsLeader(), - "leader_address": cr.GetLeaderAddress(), - "active_gateways": gatewayCount, - "assignments": assignmentCount, - "gateway_address": cr.gatewayAddress, - } -} - -// Persistence methods for coordinator assignments - -// saveCoordinatorAssignment saves a single coordinator assignment to its individual file -func (cr *CoordinatorRegistry) saveCoordinatorAssignment(consumerGroup string, assignment *protocol.CoordinatorAssignment) error { - if !cr.IsLeader() { - // Only leader should save assignments - return nil - } - - return cr.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Convert assignment to JSON - assignmentData, err := json.Marshal(assignment) - if err != nil { - return fmt.Errorf("failed to marshal assignment for group %s: %w", consumerGroup, err) - } - - // Save to individual file: /topics/kafka/.meta/coordinators/_assignments.json - fileName := fmt.Sprintf("%s_assignments.json", consumerGroup) - return filer.SaveInsideFiler(client, CoordinatorAssignmentsDir, fileName, assignmentData) - }) -} - -// loadCoordinatorAssignment loads a single coordinator assignment from its individual file -func (cr *CoordinatorRegistry) loadCoordinatorAssignment(consumerGroup string) (*protocol.CoordinatorAssignment, error) { - return cr.loadCoordinatorAssignmentWithClient(consumerGroup, cr.filerClientAccessor) -} - -// loadCoordinatorAssignmentWithClient loads a single coordinator assignment using provided client -func (cr *CoordinatorRegistry) loadCoordinatorAssignmentWithClient(consumerGroup string, clientAccessor *filer_client.FilerClientAccessor) (*protocol.CoordinatorAssignment, error) { - var assignment *protocol.CoordinatorAssignment - - err := clientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Load from individual file: /topics/kafka/.meta/coordinators/_assignments.json - fileName := fmt.Sprintf("%s_assignments.json", consumerGroup) - data, err := filer.ReadInsideFiler(client, CoordinatorAssignmentsDir, fileName) - if err != nil { - return fmt.Errorf("assignment file not found for group %s: %w", consumerGroup, err) - } - - // Parse JSON - if err := json.Unmarshal(data, &assignment); err != nil { - return fmt.Errorf("failed to unmarshal assignment for group %s: %w", consumerGroup, err) - } - - return nil - }) - - if err != nil { - return nil, err - } - - return assignment, nil -} - -// listAllCoordinatorAssignments lists all coordinator assignment files -func (cr *CoordinatorRegistry) listAllCoordinatorAssignments() ([]string, error) { - var consumerGroups []string - - err := cr.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.ListEntriesRequest{ - Directory: CoordinatorAssignmentsDir, - } - - stream, streamErr := client.ListEntries(context.Background(), request) - if streamErr != nil { - // Directory might not exist yet, that's okay - return nil - } - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } - return fmt.Errorf("failed to receive entry: %v", recvErr) - } - - // Only include assignment files (ending with _assignments.json) - if resp.Entry != nil && !resp.Entry.IsDirectory && - strings.HasSuffix(resp.Entry.Name, "_assignments.json") { - // Extract consumer group name by removing _assignments.json suffix - consumerGroup := strings.TrimSuffix(resp.Entry.Name, "_assignments.json") - consumerGroups = append(consumerGroups, consumerGroup) - } - } - - return nil - }) - - if err != nil { - return nil, fmt.Errorf("failed to list coordinator assignments: %w", err) - } - - return consumerGroups, nil -} - -// deleteCoordinatorAssignment removes a coordinator assignment file -func (cr *CoordinatorRegistry) deleteCoordinatorAssignment(consumerGroup string) error { - if !cr.IsLeader() { - return nil - } - - return cr.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - fileName := fmt.Sprintf("%s_assignments.json", consumerGroup) - filePath := fmt.Sprintf("%s/%s", CoordinatorAssignmentsDir, fileName) - - _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: CoordinatorAssignmentsDir, - Name: fileName, - }) - - if err != nil { - return fmt.Errorf("failed to delete assignment file %s: %w", filePath, err) - } - - return nil - }) -} - -// ReassignCoordinator manually reassigns a coordinator for a consumer group -// This can be called when a coordinator gateway becomes unavailable -func (cr *CoordinatorRegistry) ReassignCoordinator(consumerGroup string) (*protocol.CoordinatorAssignment, error) { - if !cr.IsLeader() { - return nil, fmt.Errorf("not the coordinator registry leader") - } - - cr.assignmentsMutex.Lock() - defer cr.assignmentsMutex.Unlock() - - // Check if assignment exists by loading from file - existing, err := cr.loadCoordinatorAssignment(consumerGroup) - if err != nil { - return nil, fmt.Errorf("no existing assignment for consumer group %s: %w", consumerGroup, err) - } - - // Choose a new coordinator - newAddr, newNodeID, err := cr.chooseCoordinatorAddrForGroup(consumerGroup) - if err != nil { - return nil, fmt.Errorf("failed to choose new coordinator: %w", err) - } - - // Create new assignment - newAssignment := &protocol.CoordinatorAssignment{ - ConsumerGroup: consumerGroup, - CoordinatorAddr: newAddr, - CoordinatorNodeID: newNodeID, - AssignedAt: time.Now(), - LastHeartbeat: time.Now(), - } - - // Persist the new assignment to individual file - if err := cr.saveCoordinatorAssignment(consumerGroup, newAssignment); err != nil { - return nil, fmt.Errorf("failed to persist coordinator reassignment for group %s: %w", consumerGroup, err) - } - - glog.V(0).Infof("Manually reassigned coordinator for group %s from %s to %s", - consumerGroup, existing.CoordinatorAddr, newAddr) - - return newAssignment, nil -} diff --git a/weed/mq/kafka/gateway/coordinator_registry_test.go b/weed/mq/kafka/gateway/coordinator_registry_test.go deleted file mode 100644 index 9ce560cd1..000000000 --- a/weed/mq/kafka/gateway/coordinator_registry_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package gateway - -import ( - "testing" - "time" -) - -func TestCoordinatorRegistry_DeterministicNodeID(t *testing.T) { - // Test that node IDs are deterministic and stable - addr1 := "gateway1:9092" - addr2 := "gateway2:9092" - - id1a := generateDeterministicNodeID(addr1) - id1b := generateDeterministicNodeID(addr1) - id2 := generateDeterministicNodeID(addr2) - - if id1a != id1b { - t.Errorf("Node ID should be deterministic: %d != %d", id1a, id1b) - } - - if id1a == id2 { - t.Errorf("Different addresses should have different node IDs: %d == %d", id1a, id2) - } - - if id1a <= 0 || id2 <= 0 { - t.Errorf("Node IDs should be positive: %d, %d", id1a, id2) - } -} - -func TestCoordinatorRegistry_BasicOperations(t *testing.T) { - // Create a test registry without actual filer connection - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, // Simulate being leader for tests - } - - // Test gateway registration - gatewayAddr := "test-gateway:9092" - registry.registerGateway(gatewayAddr) - - if len(registry.activeGateways) != 1 { - t.Errorf("Expected 1 gateway, got %d", len(registry.activeGateways)) - } - - gateway, exists := registry.activeGateways[gatewayAddr] - if !exists { - t.Error("Gateway should be registered") - } - - if gateway.NodeID <= 0 { - t.Errorf("Gateway should have positive node ID, got %d", gateway.NodeID) - } - - // Test gateway health check - if !registry.isGatewayHealthyUnsafe(gatewayAddr) { - t.Error("Newly registered gateway should be healthy") - } - - // Test node ID retrieval - nodeID := registry.getGatewayNodeIDUnsafe(gatewayAddr) - if nodeID != gateway.NodeID { - t.Errorf("Expected node ID %d, got %d", gateway.NodeID, nodeID) - } -} - -func TestCoordinatorRegistry_AssignCoordinator(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - // Register a gateway - gatewayAddr := "test-gateway:9092" - registry.registerGateway(gatewayAddr) - - // Test coordinator assignment when not leader - registry.isLeader = false - _, err := registry.AssignCoordinator("test-group", gatewayAddr) - if err == nil { - t.Error("Should fail when not leader") - } - - // Test coordinator assignment when leader - // Note: This will panic due to no filer client, but we expect this in unit tests - registry.isLeader = true - func() { - defer func() { - if r := recover(); r == nil { - t.Error("Expected panic due to missing filer client") - } - }() - registry.AssignCoordinator("test-group", gatewayAddr) - }() - - // Test getting assignment when not leader - registry.isLeader = false - _, err = registry.GetCoordinator("test-group") - if err == nil { - t.Error("Should fail when not leader") - } -} - -func TestCoordinatorRegistry_HealthyGateways(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - // Register multiple gateways - gateways := []string{"gateway1:9092", "gateway2:9092", "gateway3:9092"} - for _, addr := range gateways { - registry.registerGateway(addr) - } - - // All should be healthy initially - healthy := registry.getHealthyGatewaysSorted() - if len(healthy) != len(gateways) { - t.Errorf("Expected %d healthy gateways, got %d", len(gateways), len(healthy)) - } - - // Make one gateway stale - registry.activeGateways["gateway2:9092"].LastHeartbeat = time.Now().Add(-2 * GatewayTimeout) - - healthy = registry.getHealthyGatewaysSorted() - if len(healthy) != len(gateways)-1 { - t.Errorf("Expected %d healthy gateways after one became stale, got %d", len(gateways)-1, len(healthy)) - } - - // Check that results are sorted - for i := 1; i < len(healthy); i++ { - if healthy[i-1] >= healthy[i] { - t.Errorf("Healthy gateways should be sorted: %v", healthy) - break - } - } -} - -func TestCoordinatorRegistry_ConsistentHashing(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - // Register multiple gateways - gateways := []string{"gateway1:9092", "gateway2:9092", "gateway3:9092"} - for _, addr := range gateways { - registry.registerGateway(addr) - } - - // Test that same group always gets same coordinator - group := "test-group" - addr1, nodeID1, err1 := registry.chooseCoordinatorAddrForGroup(group) - addr2, nodeID2, err2 := registry.chooseCoordinatorAddrForGroup(group) - - if err1 != nil || err2 != nil { - t.Errorf("Failed to choose coordinator: %v, %v", err1, err2) - } - - if addr1 != addr2 || nodeID1 != nodeID2 { - t.Errorf("Consistent hashing should return same result: (%s,%d) != (%s,%d)", - addr1, nodeID1, addr2, nodeID2) - } - - // Test that different groups can get different coordinators - groups := []string{"group1", "group2", "group3", "group4", "group5"} - coordinators := make(map[string]bool) - - for _, g := range groups { - addr, _, err := registry.chooseCoordinatorAddrForGroup(g) - if err != nil { - t.Errorf("Failed to choose coordinator for %s: %v", g, err) - } - coordinators[addr] = true - } - - // With multiple groups and gateways, we should see some distribution - // (though not guaranteed due to hashing) - if len(coordinators) == 1 && len(gateways) > 1 { - t.Log("Warning: All groups mapped to same coordinator (possible but unlikely)") - } -} - -func TestCoordinatorRegistry_CleanupStaleEntries(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - // Register gateways and create assignments - gateway1 := "gateway1:9092" - gateway2 := "gateway2:9092" - - registry.registerGateway(gateway1) - registry.registerGateway(gateway2) - - // Note: In the actual implementation, assignments are stored in filer. - // For this test, we'll skip assignment creation since we don't have a mock filer. - - // Make gateway2 stale - registry.activeGateways[gateway2].LastHeartbeat = time.Now().Add(-2 * GatewayTimeout) - - // Verify gateways are present before cleanup - if _, exists := registry.activeGateways[gateway1]; !exists { - t.Error("Gateway1 should be present before cleanup") - } - if _, exists := registry.activeGateways[gateway2]; !exists { - t.Error("Gateway2 should be present before cleanup") - } - - // Run cleanup - this will panic due to missing filer client, but that's expected - func() { - defer func() { - if r := recover(); r == nil { - t.Error("Expected panic due to missing filer client during cleanup") - } - }() - registry.cleanupStaleEntries() - }() - - // Note: Gateway cleanup assertions are skipped since cleanup panics due to missing filer client. - // In real usage, cleanup would remove stale gateways and handle filer-based assignment cleanup. -} - -func TestCoordinatorRegistry_GetStats(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - // Add some data - registry.registerGateway("gateway1:9092") - registry.registerGateway("gateway2:9092") - - // Note: Assignment creation is skipped since assignments are now stored in filer - - // GetStats will panic when trying to count assignments from filer - func() { - defer func() { - if r := recover(); r == nil { - t.Error("Expected panic due to missing filer client in GetStats") - } - }() - registry.GetStats() - }() - - // Note: Stats verification is skipped since GetStats panics due to missing filer client. - // In real usage, GetStats would return proper counts of gateways and assignments. -} - -func TestCoordinatorRegistry_HeartbeatGateway(t *testing.T) { - registry := &CoordinatorRegistry{ - activeGateways: make(map[string]*GatewayInfo), - gatewayAddress: "test-gateway:9092", - stopChan: make(chan struct{}), - leadershipChange: make(chan string, 10), - isLeader: true, - } - - gatewayAddr := "test-gateway:9092" - - // Test heartbeat for non-existent gateway (should auto-register) - err := registry.HeartbeatGateway(gatewayAddr) - if err != nil { - t.Errorf("Heartbeat should succeed and auto-register: %v", err) - } - - if len(registry.activeGateways) != 1 { - t.Errorf("Gateway should be auto-registered") - } - - // Test heartbeat for existing gateway - originalTime := registry.activeGateways[gatewayAddr].LastHeartbeat - time.Sleep(10 * time.Millisecond) // Ensure time difference - - err = registry.HeartbeatGateway(gatewayAddr) - if err != nil { - t.Errorf("Heartbeat should succeed: %v", err) - } - - newTime := registry.activeGateways[gatewayAddr].LastHeartbeat - if !newTime.After(originalTime) { - t.Error("Heartbeat should update LastHeartbeat time") - } - - // Test heartbeat when not leader - registry.isLeader = false - err = registry.HeartbeatGateway(gatewayAddr) - if err == nil { - t.Error("Heartbeat should fail when not leader") - } -} diff --git a/weed/mq/kafka/gateway/server.go b/weed/mq/kafka/gateway/server.go deleted file mode 100644 index 9f4e0c81f..000000000 --- a/weed/mq/kafka/gateway/server.go +++ /dev/null @@ -1,300 +0,0 @@ -package gateway - -import ( - "context" - "fmt" - "net" - "strconv" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/protocol" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" - "github.com/seaweedfs/seaweedfs/weed/pb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -// resolveAdvertisedAddress resolves the appropriate address to advertise to Kafka clients -// when the server binds to all interfaces (:: or 0.0.0.0) -func resolveAdvertisedAddress() string { - // Try to find a non-loopback interface - interfaces, err := net.Interfaces() - if err != nil { - glog.V(1).Infof("Failed to get network interfaces, using localhost: %v", err) - return "127.0.0.1" - } - - for _, iface := range interfaces { - // Skip loopback and inactive interfaces - if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { - continue - } - - addrs, err := iface.Addrs() - if err != nil { - continue - } - - for _, addr := range addrs { - if ipNet, ok := addr.(*net.IPNet); ok && !ipNet.IP.IsLoopback() { - // Prefer IPv4 addresses for better Kafka client compatibility - if ipv4 := ipNet.IP.To4(); ipv4 != nil { - return ipv4.String() - } - } - } - } - - // Fallback to localhost if no suitable interface found - glog.V(1).Infof("No non-loopback interface found, using localhost") - return "127.0.0.1" -} - -type Options struct { - Listen string - Masters string // SeaweedFS master servers - FilerGroup string // filer group name (optional) - SchemaRegistryURL string // Schema Registry URL (optional) - DefaultPartitions int32 // Default number of partitions for new topics -} - -type Server struct { - opts Options - ln net.Listener - wg sync.WaitGroup - ctx context.Context - cancel context.CancelFunc - handler *protocol.Handler - coordinatorRegistry *CoordinatorRegistry -} - -func NewServer(opts Options) *Server { - ctx, cancel := context.WithCancel(context.Background()) - - var handler *protocol.Handler - var err error - - // Create SeaweedMQ handler - masters are required for production - if opts.Masters == "" { - glog.Fatalf("SeaweedMQ masters are required for Kafka gateway - provide masters addresses") - } - - // Use the intended listen address as the client host for master registration - clientHost := opts.Listen - if clientHost == "" { - clientHost = "127.0.0.1:9092" // Default Kafka port - } - - handler, err = protocol.NewSeaweedMQBrokerHandler(opts.Masters, opts.FilerGroup, clientHost) - if err != nil { - glog.Fatalf("Failed to create SeaweedMQ handler with masters %s: %v", opts.Masters, err) - } - - glog.V(1).Infof("Created Kafka gateway with SeaweedMQ brokers via masters %s", opts.Masters) - - // Initialize schema management if Schema Registry URL is provided - // Note: This is done lazily on first use if it fails here (e.g., if Schema Registry isn't ready yet) - if opts.SchemaRegistryURL != "" { - schemaConfig := schema.ManagerConfig{ - RegistryURL: opts.SchemaRegistryURL, - } - if err := handler.EnableSchemaManagement(schemaConfig); err != nil { - glog.Warningf("Schema management initialization deferred (Schema Registry may not be ready yet): %v", err) - glog.V(1).Infof("Will retry schema management initialization on first schema-related operation") - // Store schema registry URL for lazy initialization - handler.SetSchemaRegistryURL(opts.SchemaRegistryURL) - } else { - glog.V(1).Infof("Schema management enabled with Schema Registry at %s", opts.SchemaRegistryURL) - } - } - - server := &Server{ - opts: opts, - ctx: ctx, - cancel: cancel, - handler: handler, - } - - return server -} - -// NewTestServerForUnitTests creates a test server with a minimal mock handler for unit tests -// This allows basic gateway functionality testing without requiring SeaweedMQ masters -func NewTestServerForUnitTests(opts Options) *Server { - ctx, cancel := context.WithCancel(context.Background()) - - // Create a minimal handler with mock SeaweedMQ backend - handler := NewMinimalTestHandler() - - return &Server{ - opts: opts, - ctx: ctx, - cancel: cancel, - handler: handler, - } -} - -func (s *Server) Start() error { - ln, err := net.Listen("tcp", s.opts.Listen) - if err != nil { - return err - } - s.ln = ln - - // Get gateway address for coordinator registry - // CRITICAL FIX: Use the actual bound address from listener, not the requested listen address - // This is important when using port 0 (random port) for testing - actualListenAddr := s.ln.Addr().String() - host, port := s.handler.GetAdvertisedAddress(actualListenAddr) - gatewayAddress := fmt.Sprintf("%s:%d", host, port) - glog.V(1).Infof("Kafka gateway listening on %s, advertising as %s in Metadata responses", actualListenAddr, gatewayAddress) - - // Set gateway address in handler for coordinator registry - s.handler.SetGatewayAddress(gatewayAddress) - - // Initialize coordinator registry for distributed coordinator assignment (only if masters are configured) - if s.opts.Masters != "" { - // Parse all masters from the comma-separated list using pb.ServerAddresses - masters := pb.ServerAddresses(s.opts.Masters).ToAddresses() - - grpcDialOption := grpc.WithTransportCredentials(insecure.NewCredentials()) - - s.coordinatorRegistry = NewCoordinatorRegistry(gatewayAddress, masters, grpcDialOption) - s.handler.SetCoordinatorRegistry(s.coordinatorRegistry) - - // Start coordinator registry - if err := s.coordinatorRegistry.Start(); err != nil { - glog.Errorf("Failed to start coordinator registry: %v", err) - return err - } - - glog.V(1).Infof("Started coordinator registry for gateway %s", gatewayAddress) - } else { - glog.V(1).Infof("No masters configured, skipping coordinator registry setup (test mode)") - } - s.wg.Add(1) - go func() { - defer s.wg.Done() - for { - conn, err := s.ln.Accept() - if err != nil { - select { - case <-s.ctx.Done(): - return - default: - return - } - } - // Simple accept log to trace client connections (useful for JoinGroup debugging) - if conn != nil { - glog.V(1).Infof("accepted conn %s -> %s", conn.RemoteAddr(), conn.LocalAddr()) - } - s.wg.Add(1) - go func(c net.Conn) { - defer s.wg.Done() - if err := s.handler.HandleConn(s.ctx, c); err != nil { - glog.V(1).Infof("handle conn %v: %v", c.RemoteAddr(), err) - } - }(conn) - } - }() - return nil -} - -func (s *Server) Wait() error { - s.wg.Wait() - return nil -} - -func (s *Server) Close() error { - s.cancel() - - // Stop coordinator registry - if s.coordinatorRegistry != nil { - if err := s.coordinatorRegistry.Stop(); err != nil { - glog.Warningf("Error stopping coordinator registry: %v", err) - } - } - - if s.ln != nil { - _ = s.ln.Close() - } - - // Wait for goroutines to finish with a timeout to prevent hanging - done := make(chan struct{}) - go func() { - s.wg.Wait() - close(done) - }() - - select { - case <-done: - // Normal shutdown - case <-time.After(5 * time.Second): - // Timeout - force shutdown - glog.Warningf("Server shutdown timed out after 5 seconds, forcing close") - } - - // Close the handler (important for SeaweedMQ mode) - if s.handler != nil { - if err := s.handler.Close(); err != nil { - glog.Warningf("Error closing handler: %v", err) - } - } - - return nil -} - -// Removed registerWithBrokerLeader - no longer needed - -// Addr returns the bound address of the server listener, or empty if not started. -func (s *Server) Addr() string { - if s.ln == nil { - return "" - } - // Normalize to an address reachable by clients - host, port := s.GetListenerAddr() - return net.JoinHostPort(host, strconv.Itoa(port)) -} - -// GetHandler returns the protocol handler (for testing) -func (s *Server) GetHandler() *protocol.Handler { - return s.handler -} - -// GetListenerAddr returns the actual listening address and port -func (s *Server) GetListenerAddr() (string, int) { - if s.ln == nil { - // Return empty values to indicate address not available yet - // The caller should handle this appropriately - return "", 0 - } - - addr := s.ln.Addr().String() - // Parse [::]:port or host:port format - use exact match for kafka-go compatibility - if strings.HasPrefix(addr, "[::]:") { - port := strings.TrimPrefix(addr, "[::]:") - if p, err := strconv.Atoi(port); err == nil { - // Resolve appropriate address when bound to IPv6 all interfaces - return resolveAdvertisedAddress(), p - } - } - - // Handle host:port format - if host, port, err := net.SplitHostPort(addr); err == nil { - if p, err := strconv.Atoi(port); err == nil { - // Resolve appropriate address when bound to all interfaces - if host == "::" || host == "" || host == "0.0.0.0" { - host = resolveAdvertisedAddress() - } - return host, p - } - } - - // This should not happen if the listener was set up correctly - glog.Warningf("Unable to parse listener address: %s", addr) - return "", 0 -} diff --git a/weed/mq/kafka/gateway/test_mock_handler.go b/weed/mq/kafka/gateway/test_mock_handler.go deleted file mode 100644 index c01aac970..000000000 --- a/weed/mq/kafka/gateway/test_mock_handler.go +++ /dev/null @@ -1,229 +0,0 @@ -package gateway - -import ( - "context" - "fmt" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/protocol" - filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - schema_pb "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// mockRecord implements the SMQRecord interface for testing -type mockRecord struct { - key []byte - value []byte - timestamp int64 - offset int64 -} - -func (r *mockRecord) GetKey() []byte { return r.key } -func (r *mockRecord) GetValue() []byte { return r.value } -func (r *mockRecord) GetTimestamp() int64 { return r.timestamp } -func (r *mockRecord) GetOffset() int64 { return r.offset } - -// mockSeaweedMQHandler is a stateful mock for unit testing without real SeaweedMQ -type mockSeaweedMQHandler struct { - mu sync.RWMutex - topics map[string]*integration.KafkaTopicInfo - records map[string]map[int32][]integration.SMQRecord // topic -> partition -> records - offsets map[string]map[int32]int64 // topic -> partition -> next offset -} - -func newMockSeaweedMQHandler() *mockSeaweedMQHandler { - return &mockSeaweedMQHandler{ - topics: make(map[string]*integration.KafkaTopicInfo), - records: make(map[string]map[int32][]integration.SMQRecord), - offsets: make(map[string]map[int32]int64), - } -} - -func (m *mockSeaweedMQHandler) TopicExists(topic string) bool { - m.mu.RLock() - defer m.mu.RUnlock() - _, exists := m.topics[topic] - return exists -} - -func (m *mockSeaweedMQHandler) ListTopics() []string { - m.mu.RLock() - defer m.mu.RUnlock() - topics := make([]string, 0, len(m.topics)) - for topic := range m.topics { - topics = append(topics, topic) - } - return topics -} - -func (m *mockSeaweedMQHandler) CreateTopic(topic string, partitions int32) error { - m.mu.Lock() - defer m.mu.Unlock() - if _, exists := m.topics[topic]; exists { - return fmt.Errorf("topic already exists") - } - m.topics[topic] = &integration.KafkaTopicInfo{ - Name: topic, - Partitions: partitions, - } - return nil -} - -func (m *mockSeaweedMQHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error { - m.mu.Lock() - defer m.mu.Unlock() - if _, exists := m.topics[name]; exists { - return fmt.Errorf("topic already exists") - } - m.topics[name] = &integration.KafkaTopicInfo{ - Name: name, - Partitions: partitions, - } - return nil -} - -func (m *mockSeaweedMQHandler) DeleteTopic(topic string) error { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.topics, topic) - return nil -} - -func (m *mockSeaweedMQHandler) GetTopicInfo(topic string) (*integration.KafkaTopicInfo, bool) { - m.mu.RLock() - defer m.mu.RUnlock() - info, exists := m.topics[topic] - return info, exists -} - -func (m *mockSeaweedMQHandler) InvalidateTopicExistsCache(topic string) { - // Mock handler doesn't cache topic existence, so this is a no-op -} - -func (m *mockSeaweedMQHandler) ProduceRecord(ctx context.Context, topicName string, partitionID int32, key, value []byte) (int64, error) { - m.mu.Lock() - defer m.mu.Unlock() - - // Check if topic exists - if _, exists := m.topics[topicName]; !exists { - return 0, fmt.Errorf("topic does not exist: %s", topicName) - } - - // Initialize partition records if needed - if _, exists := m.records[topicName]; !exists { - m.records[topicName] = make(map[int32][]integration.SMQRecord) - m.offsets[topicName] = make(map[int32]int64) - } - - // Get next offset - offset := m.offsets[topicName][partitionID] - m.offsets[topicName][partitionID]++ - - - // Store record - record := &mockRecord{ - key: key, - value: value, - offset: offset, - } - m.records[topicName][partitionID] = append(m.records[topicName][partitionID], record) - - return offset, nil -} - -func (m *mockSeaweedMQHandler) ProduceRecordValue(ctx context.Context, topicName string, partitionID int32, key []byte, recordValueBytes []byte) (int64, error) { - return m.ProduceRecord(ctx, topicName, partitionID, key, recordValueBytes) -} - -func (m *mockSeaweedMQHandler) GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]integration.SMQRecord, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - // Check if topic exists - if _, exists := m.topics[topic]; !exists { - return nil, fmt.Errorf("topic does not exist: %s", topic) - } - - // Get partition records - partitionRecords, exists := m.records[topic][partition] - if !exists || len(partitionRecords) == 0 { - return []integration.SMQRecord{}, nil - } - - // Find records starting from fromOffset - result := make([]integration.SMQRecord, 0, maxRecords) - for _, record := range partitionRecords { - if record.GetOffset() >= fromOffset { - result = append(result, record) - if len(result) >= maxRecords { - break - } - } - } - - return result, nil -} - -func (m *mockSeaweedMQHandler) GetEarliestOffset(topic string, partition int32) (int64, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - // Check if topic exists - if _, exists := m.topics[topic]; !exists { - return 0, fmt.Errorf("topic does not exist: %s", topic) - } - - // Get partition records - partitionRecords, exists := m.records[topic][partition] - if !exists || len(partitionRecords) == 0 { - return 0, nil - } - - return partitionRecords[0].GetOffset(), nil -} - -func (m *mockSeaweedMQHandler) GetLatestOffset(topic string, partition int32) (int64, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - // Check if topic exists - if _, exists := m.topics[topic]; !exists { - return 0, fmt.Errorf("topic does not exist: %s", topic) - } - - // Return next offset (latest + 1) - if offsets, exists := m.offsets[topic]; exists { - return offsets[partition], nil - } - - return 0, nil -} - -func (m *mockSeaweedMQHandler) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return fmt.Errorf("mock handler: not implemented") -} - -func (m *mockSeaweedMQHandler) CreatePerConnectionBrokerClient() (*integration.BrokerClient, error) { - // Return a minimal broker client that won't actually connect - return nil, fmt.Errorf("mock handler: per-connection broker client not available in unit test mode") -} - -func (m *mockSeaweedMQHandler) GetFilerClientAccessor() *filer_client.FilerClientAccessor { - return nil -} - -func (m *mockSeaweedMQHandler) GetBrokerAddresses() []string { - return []string{"localhost:9092"} // Return a dummy broker address for unit tests -} - -func (m *mockSeaweedMQHandler) Close() error { return nil } - -func (m *mockSeaweedMQHandler) SetProtocolHandler(h integration.ProtocolHandler) {} - -// NewMinimalTestHandler creates a minimal handler for unit testing -// that won't actually process Kafka protocol requests -func NewMinimalTestHandler() *protocol.Handler { - return protocol.NewTestHandlerWithMock(newMockSeaweedMQHandler()) -} diff --git a/weed/mq/kafka/integration/broker_client.go b/weed/mq/kafka/integration/broker_client.go deleted file mode 100644 index c1f743f0b..000000000 --- a/weed/mq/kafka/integration/broker_client.go +++ /dev/null @@ -1,452 +0,0 @@ -package integration - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "strings" - "time" - - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// NewBrokerClientWithFilerAccessor creates a client with a shared filer accessor -func NewBrokerClientWithFilerAccessor(brokerAddress string, filerClientAccessor *filer_client.FilerClientAccessor) (*BrokerClient, error) { - ctx, cancel := context.WithCancel(context.Background()) - - // Use background context for gRPC connections to prevent them from being canceled - // when BrokerClient.Close() is called. This allows subscriber streams to continue - // operating even during client shutdown, which is important for testing scenarios. - dialCtx := context.Background() - - // CRITICAL FIX: Add timeout to dial context - // gRPC dial will retry with exponential backoff. Without a timeout, it hangs indefinitely - // if the broker is unreachable. Set a reasonable timeout for initial connection attempt. - dialCtx, dialCancel := context.WithTimeout(dialCtx, 30*time.Second) - defer dialCancel() - - // Connect to broker - // Load security configuration for broker connection - util.LoadSecurityConfiguration() - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq") - - conn, err := grpc.DialContext(dialCtx, brokerAddress, - grpcDialOption, - ) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to connect to broker %s: %v", brokerAddress, err) - } - - client := mq_pb.NewSeaweedMessagingClient(conn) - - return &BrokerClient{ - filerClientAccessor: filerClientAccessor, - brokerAddress: brokerAddress, - conn: conn, - client: client, - publishers: make(map[string]*BrokerPublisherSession), - subscribers: make(map[string]*BrokerSubscriberSession), - fetchRequests: make(map[string]*FetchRequest), - partitionAssignmentCache: make(map[string]*partitionAssignmentCacheEntry), - partitionAssignmentCacheTTL: 30 * time.Second, // Same as broker's cache TTL - ctx: ctx, - cancel: cancel, - }, nil -} - -// Close shuts down the broker client and all streams -func (bc *BrokerClient) Close() error { - bc.cancel() - - // Close all publisher streams - bc.publishersLock.Lock() - for key, session := range bc.publishers { - if session.Stream != nil { - _ = session.Stream.CloseSend() - } - delete(bc.publishers, key) - } - bc.publishersLock.Unlock() - - // Close all subscriber streams - bc.subscribersLock.Lock() - for key, session := range bc.subscribers { - if session.Stream != nil { - _ = session.Stream.CloseSend() - } - if session.Cancel != nil { - session.Cancel() - } - delete(bc.subscribers, key) - } - bc.subscribersLock.Unlock() - - return bc.conn.Close() -} - -// HealthCheck verifies the broker connection is working -func (bc *BrokerClient) HealthCheck() error { - // Create a timeout context for health check - ctx, cancel := context.WithTimeout(bc.ctx, 2*time.Second) - defer cancel() - - // Try to list topics as a health check - _, err := bc.client.ListTopics(ctx, &mq_pb.ListTopicsRequest{}) - if err != nil { - return fmt.Errorf("broker health check failed: %v", err) - } - - return nil -} - -// GetPartitionRangeInfo gets comprehensive range information from SeaweedMQ broker's native range manager -func (bc *BrokerClient) GetPartitionRangeInfo(topic string, partition int32) (*PartitionRangeInfo, error) { - - if bc.client == nil { - return nil, fmt.Errorf("broker client not connected") - } - - // Get the actual partition assignment from the broker instead of hardcoding - pbTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - } - - // Get the actual partition assignment for this Kafka partition - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get actual partition assignment: %v", err) - } - - // Call the broker's gRPC method - resp, err := bc.client.GetPartitionRangeInfo(context.Background(), &mq_pb.GetPartitionRangeInfoRequest{ - Topic: pbTopic, - Partition: actualPartition, - }) - if err != nil { - return nil, fmt.Errorf("failed to get partition range info from broker: %v", err) - } - - if resp.Error != "" { - return nil, fmt.Errorf("broker error: %s", resp.Error) - } - - // Extract offset range information - var earliestOffset, latestOffset, highWaterMark int64 - if resp.OffsetRange != nil { - earliestOffset = resp.OffsetRange.EarliestOffset - latestOffset = resp.OffsetRange.LatestOffset - highWaterMark = resp.OffsetRange.HighWaterMark - } - - // Extract timestamp range information - var earliestTimestampNs, latestTimestampNs int64 - if resp.TimestampRange != nil { - earliestTimestampNs = resp.TimestampRange.EarliestTimestampNs - latestTimestampNs = resp.TimestampRange.LatestTimestampNs - } - - info := &PartitionRangeInfo{ - EarliestOffset: earliestOffset, - LatestOffset: latestOffset, - HighWaterMark: highWaterMark, - EarliestTimestampNs: earliestTimestampNs, - LatestTimestampNs: latestTimestampNs, - RecordCount: resp.RecordCount, - ActiveSubscriptions: resp.ActiveSubscriptions, - } - - return info, nil -} - -// GetHighWaterMark gets the high water mark for a topic partition -func (bc *BrokerClient) GetHighWaterMark(topic string, partition int32) (int64, error) { - - // Primary approach: Use SeaweedMQ's native range manager via gRPC - info, err := bc.GetPartitionRangeInfo(topic, partition) - if err != nil { - // Fallback to chunk metadata approach - highWaterMark, err := bc.getHighWaterMarkFromChunkMetadata(topic, partition) - if err != nil { - return 0, err - } - return highWaterMark, nil - } - - return info.HighWaterMark, nil -} - -// GetEarliestOffset gets the earliest offset from SeaweedMQ broker's native offset manager -func (bc *BrokerClient) GetEarliestOffset(topic string, partition int32) (int64, error) { - - // Primary approach: Use SeaweedMQ's native range manager via gRPC - info, err := bc.GetPartitionRangeInfo(topic, partition) - if err != nil { - // Fallback to chunk metadata approach - earliestOffset, err := bc.getEarliestOffsetFromChunkMetadata(topic, partition) - if err != nil { - return 0, err - } - return earliestOffset, nil - } - - return info.EarliestOffset, nil -} - -// getOffsetRangeFromChunkMetadata reads chunk metadata to find both earliest and latest offsets -func (bc *BrokerClient) getOffsetRangeFromChunkMetadata(topic string, partition int32) (earliestOffset int64, highWaterMark int64, err error) { - if bc.filerClientAccessor == nil { - return 0, 0, fmt.Errorf("filer client not available") - } - - // Get the topic path and find the latest version - topicPath := fmt.Sprintf("/topics/kafka/%s", topic) - - // First, list the topic versions to find the latest - var latestVersion string - err = bc.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: topicPath, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if resp.Entry.IsDirectory && strings.HasPrefix(resp.Entry.Name, "v") { - if latestVersion == "" || resp.Entry.Name > latestVersion { - latestVersion = resp.Entry.Name - } - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to list topic versions: %v", err) - } - - if latestVersion == "" { - return 0, 0, nil - } - - // Find the partition directory - versionPath := fmt.Sprintf("%s/%s", topicPath, latestVersion) - var partitionDir string - err = bc.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: versionPath, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if resp.Entry.IsDirectory && strings.Contains(resp.Entry.Name, "-") { - partitionDir = resp.Entry.Name - break // Use the first partition directory we find - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to list partition directories: %v", err) - } - - if partitionDir == "" { - return 0, 0, nil - } - - // Scan all message files to find the highest offset_max and lowest offset_min - partitionPath := fmt.Sprintf("%s/%s", versionPath, partitionDir) - highWaterMark = 0 - earliestOffset = -1 // -1 indicates no data found yet - - err = bc.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: partitionPath, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - if !resp.Entry.IsDirectory && resp.Entry.Name != "checkpoint.offset" { - // Check for offset ranges in Extended attributes (both log files and parquet files) - if resp.Entry.Extended != nil { - // Track maximum offset for high water mark - if maxOffsetBytes, exists := resp.Entry.Extended[mq.ExtendedAttrOffsetMax]; exists && len(maxOffsetBytes) == 8 { - maxOffset := int64(binary.BigEndian.Uint64(maxOffsetBytes)) - if maxOffset > highWaterMark { - highWaterMark = maxOffset - } - } - - // Track minimum offset for earliest offset - if minOffsetBytes, exists := resp.Entry.Extended[mq.ExtendedAttrOffsetMin]; exists && len(minOffsetBytes) == 8 { - minOffset := int64(binary.BigEndian.Uint64(minOffsetBytes)) - if earliestOffset == -1 || minOffset < earliestOffset { - earliestOffset = minOffset - } - } - } - } - } - return nil - }) - if err != nil { - return 0, 0, fmt.Errorf("failed to scan message files: %v", err) - } - - // High water mark is the next offset after the highest written offset - if highWaterMark > 0 { - highWaterMark++ - } - - // If no data found, set earliest offset to 0 - if earliestOffset == -1 { - earliestOffset = 0 - } - - return earliestOffset, highWaterMark, nil -} - -// getHighWaterMarkFromChunkMetadata is a wrapper for backward compatibility -func (bc *BrokerClient) getHighWaterMarkFromChunkMetadata(topic string, partition int32) (int64, error) { - _, highWaterMark, err := bc.getOffsetRangeFromChunkMetadata(topic, partition) - return highWaterMark, err -} - -// getEarliestOffsetFromChunkMetadata gets the earliest offset from chunk metadata (fallback) -func (bc *BrokerClient) getEarliestOffsetFromChunkMetadata(topic string, partition int32) (int64, error) { - earliestOffset, _, err := bc.getOffsetRangeFromChunkMetadata(topic, partition) - return earliestOffset, err -} - -// GetFilerAddress returns the first filer address used by this broker client (for backward compatibility) -func (bc *BrokerClient) GetFilerAddress() string { - if bc.filerClientAccessor != nil && bc.filerClientAccessor.GetFilers != nil { - filers := bc.filerClientAccessor.GetFilers() - if len(filers) > 0 { - return string(filers[0]) - } - } - return "" -} - -// Delegate methods to the shared filer client accessor -func (bc *BrokerClient) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - return bc.filerClientAccessor.WithFilerClient(streamingMode, fn) -} - -func (bc *BrokerClient) GetFilers() []pb.ServerAddress { - return bc.filerClientAccessor.GetFilers() -} - -func (bc *BrokerClient) GetGrpcDialOption() grpc.DialOption { - return bc.filerClientAccessor.GetGrpcDialOption() -} - -// ListTopics gets all topics from SeaweedMQ broker (includes in-memory topics) -func (bc *BrokerClient) ListTopics() ([]string, error) { - if bc.client == nil { - return nil, fmt.Errorf("broker client not connected") - } - - ctx, cancel := context.WithTimeout(bc.ctx, 5*time.Second) - defer cancel() - - resp, err := bc.client.ListTopics(ctx, &mq_pb.ListTopicsRequest{}) - if err != nil { - return nil, fmt.Errorf("failed to list topics from broker: %v", err) - } - - var topics []string - for _, topic := range resp.Topics { - // Filter for kafka namespace topics - if topic.Namespace == "kafka" { - topics = append(topics, topic.Name) - } - } - - return topics, nil -} - -// GetTopicConfiguration gets topic configuration including partition count from the broker -func (bc *BrokerClient) GetTopicConfiguration(topicName string) (*mq_pb.GetTopicConfigurationResponse, error) { - if bc.client == nil { - return nil, fmt.Errorf("broker client not connected") - } - - ctx, cancel := context.WithTimeout(bc.ctx, 5*time.Second) - defer cancel() - - resp, err := bc.client.GetTopicConfiguration(ctx, &mq_pb.GetTopicConfigurationRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topicName, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to get topic configuration from broker: %v", err) - } - - return resp, nil -} - -// TopicExists checks if a topic exists in SeaweedMQ broker (includes in-memory topics) -func (bc *BrokerClient) TopicExists(topicName string) (bool, error) { - if bc.client == nil { - return false, fmt.Errorf("broker client not connected") - } - - ctx, cancel := context.WithTimeout(bc.ctx, 5*time.Second) - defer cancel() - - glog.V(2).Infof("[BrokerClient] TopicExists: Querying broker for topic %s", topicName) - resp, err := bc.client.TopicExists(ctx, &mq_pb.TopicExistsRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topicName, - }, - }) - if err != nil { - glog.V(1).Infof("[BrokerClient] TopicExists: ERROR for topic %s: %v", topicName, err) - return false, fmt.Errorf("failed to check topic existence: %v", err) - } - - glog.V(2).Infof("[BrokerClient] TopicExists: Topic %s exists=%v", topicName, resp.Exists) - return resp.Exists, nil -} diff --git a/weed/mq/kafka/integration/broker_client_fetch.go b/weed/mq/kafka/integration/broker_client_fetch.go deleted file mode 100644 index 016f8ccdf..000000000 --- a/weed/mq/kafka/integration/broker_client_fetch.go +++ /dev/null @@ -1,188 +0,0 @@ -package integration - -import ( - "context" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// FetchMessagesStateless fetches messages using the Kafka-style stateless FetchMessage RPC -// This is the long-term solution that eliminates all Subscribe loop complexity -// -// Benefits over SubscribeMessage: -// 1. No broker-side session state -// 2. No shared Subscribe loops -// 3. No stream corruption from concurrent seeks -// 4. Simple request/response pattern -// 5. Natural support for concurrent reads -// -// This is how Kafka works - completely stateless per-fetch -func (bc *BrokerClient) FetchMessagesStateless(ctx context.Context, topic string, partition int32, startOffset int64, maxRecords int, consumerGroup string, consumerID string) ([]*SeaweedRecord, error) { - glog.V(4).Infof("[FETCH-STATELESS] Fetching from %s-%d at offset %d, maxRecords=%d", - topic, partition, startOffset, maxRecords) - - // Get actual partition assignment from broker - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get partition assignment: %v", err) - } - - // Create FetchMessage request - req := &mq_pb.FetchMessageRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", // Kafka gateway always uses "kafka" namespace - Name: topic, - }, - Partition: actualPartition, - StartOffset: startOffset, - MaxMessages: int32(maxRecords), - MaxBytes: 4 * 1024 * 1024, // 4MB default - MaxWaitMs: 100, // 100ms wait for data (long poll) - MinBytes: 0, // Return immediately if any data available - ConsumerGroup: consumerGroup, - ConsumerId: consumerID, - } - - // Get timeout from context (set by Kafka fetch request) - // This respects the client's MaxWaitTime - // Note: We use a default of 100ms above, but if context has shorter timeout, use that - - // Call FetchMessage RPC (simple request/response) - resp, err := bc.client.FetchMessage(ctx, req) - if err != nil { - return nil, fmt.Errorf("FetchMessage RPC failed: %v", err) - } - - // Check for errors in response - if resp.Error != "" { - // Check if this is an "offset out of range" error - if resp.ErrorCode == 2 && resp.LogStartOffset > 0 && startOffset < resp.LogStartOffset { - // Offset too old - broker suggests starting from LogStartOffset - glog.V(3).Infof("[FETCH-STATELESS-CLIENT] Requested offset %d too old, adjusting to log start %d", - startOffset, resp.LogStartOffset) - - // Retry with adjusted offset - req.StartOffset = resp.LogStartOffset - resp, err = bc.client.FetchMessage(ctx, req) - if err != nil { - return nil, fmt.Errorf("FetchMessage RPC failed on retry: %v", err) - } - if resp.Error != "" { - return nil, fmt.Errorf("broker error on retry: %s (code=%d)", resp.Error, resp.ErrorCode) - } - // Continue with adjusted offset response - startOffset = resp.LogStartOffset - } else { - return nil, fmt.Errorf("broker error: %s (code=%d)", resp.Error, resp.ErrorCode) - } - } - - // CRITICAL: If broker returns 0 messages but hwm > startOffset, something is wrong - if len(resp.Messages) == 0 && resp.HighWaterMark > startOffset { - glog.Errorf("[FETCH-STATELESS-CLIENT] CRITICAL BUG: Broker returned 0 messages for %s[%d] offset %d, but HWM=%d (should have %d messages available)", - topic, partition, startOffset, resp.HighWaterMark, resp.HighWaterMark-startOffset) - glog.Errorf("[FETCH-STATELESS-CLIENT] This suggests broker's FetchMessage RPC is not returning data that exists!") - glog.Errorf("[FETCH-STATELESS-CLIENT] Broker metadata: logStart=%d, nextOffset=%d, endOfPartition=%v", - resp.LogStartOffset, resp.NextOffset, resp.EndOfPartition) - } - - // Convert protobuf messages to SeaweedRecord - records := make([]*SeaweedRecord, 0, len(resp.Messages)) - for i, msg := range resp.Messages { - record := &SeaweedRecord{ - Key: msg.Key, - Value: msg.Value, - Timestamp: msg.TsNs, - Offset: startOffset + int64(i), // Sequential offset assignment - } - records = append(records, record) - - // Log each message for debugging - glog.V(4).Infof("[FETCH-STATELESS-CLIENT] Message %d: offset=%d, keyLen=%d, valueLen=%d", - i, record.Offset, len(msg.Key), len(msg.Value)) - } - - if len(records) > 0 { - glog.V(3).Infof("[FETCH-STATELESS-CLIENT] Converted to %d SeaweedRecords, first offset=%d, last offset=%d", - len(records), records[0].Offset, records[len(records)-1].Offset) - } else { - glog.V(3).Infof("[FETCH-STATELESS-CLIENT] Converted to 0 SeaweedRecords") - } - - glog.V(4).Infof("[FETCH-STATELESS] Fetched %d records, nextOffset=%d, highWaterMark=%d, endOfPartition=%v", - len(records), resp.NextOffset, resp.HighWaterMark, resp.EndOfPartition) - - return records, nil -} - -// GetPartitionHighWaterMark returns the highest offset available in a partition -// This is useful for Kafka clients to track consumer lag -func (bc *BrokerClient) GetPartitionHighWaterMark(ctx context.Context, topic string, partition int32) (int64, error) { - // Use FetchMessage with 0 maxRecords to just get metadata - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return 0, fmt.Errorf("failed to get partition assignment: %v", err) - } - - req := &mq_pb.FetchMessageRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - Partition: actualPartition, - StartOffset: 0, - MaxMessages: 0, // Just get metadata - MaxBytes: 0, - MaxWaitMs: 0, // Return immediately - ConsumerGroup: "kafka-metadata", - ConsumerId: "hwm-check", - } - - resp, err := bc.client.FetchMessage(ctx, req) - if err != nil { - return 0, fmt.Errorf("FetchMessage RPC failed: %v", err) - } - - if resp.Error != "" { - return 0, fmt.Errorf("broker error: %s", resp.Error) - } - - return resp.HighWaterMark, nil -} - -// GetPartitionLogStartOffset returns the earliest offset available in a partition -// This is useful for Kafka clients to know the valid offset range -func (bc *BrokerClient) GetPartitionLogStartOffset(ctx context.Context, topic string, partition int32) (int64, error) { - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return 0, fmt.Errorf("failed to get partition assignment: %v", err) - } - - req := &mq_pb.FetchMessageRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - Partition: actualPartition, - StartOffset: 0, - MaxMessages: 0, - MaxBytes: 0, - MaxWaitMs: 0, - ConsumerGroup: "kafka-metadata", - ConsumerId: "lso-check", - } - - resp, err := bc.client.FetchMessage(ctx, req) - if err != nil { - return 0, fmt.Errorf("FetchMessage RPC failed: %v", err) - } - - if resp.Error != "" { - return 0, fmt.Errorf("broker error: %s", resp.Error) - } - - return resp.LogStartOffset, nil -} diff --git a/weed/mq/kafka/integration/broker_client_publish.go b/weed/mq/kafka/integration/broker_client_publish.go deleted file mode 100644 index 1ad64bc10..000000000 --- a/weed/mq/kafka/integration/broker_client_publish.go +++ /dev/null @@ -1,399 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// PublishRecord publishes a single record to SeaweedMQ broker -// ctx controls the publish timeout - if client cancels, publish operation is cancelled -func (bc *BrokerClient) PublishRecord(ctx context.Context, topic string, partition int32, key []byte, value []byte, timestamp int64) (int64, error) { - // Check context before starting - if err := ctx.Err(); err != nil { - return 0, fmt.Errorf("context cancelled before publish: %w", err) - } - - session, err := bc.getOrCreatePublisher(topic, partition) - if err != nil { - return 0, err - } - - if session.Stream == nil { - return 0, fmt.Errorf("publisher session stream cannot be nil") - } - - // CRITICAL: Lock to prevent concurrent Send/Recv causing response mix-ups - // Without this, two concurrent publishes can steal each other's offsets - session.mu.Lock() - defer session.mu.Unlock() - - // Check context after acquiring lock - if err := ctx.Err(); err != nil { - return 0, fmt.Errorf("context cancelled after lock: %w", err) - } - - // Send data message using broker API format - dataMsg := &mq_pb.DataMessage{ - Key: key, - Value: value, - TsNs: timestamp, - } - - // DEBUG: Log message being published for GitHub Actions debugging - valuePreview := "" - if len(dataMsg.Value) > 0 { - if len(dataMsg.Value) <= 50 { - valuePreview = string(dataMsg.Value) - } else { - valuePreview = fmt.Sprintf("%s...(total %d bytes)", string(dataMsg.Value[:50]), len(dataMsg.Value)) - } - } else { - valuePreview = "" - } - glog.V(1).Infof("[PUBLISH] topic=%s partition=%d key=%s valueLen=%d valuePreview=%q timestamp=%d", - topic, partition, string(key), len(value), valuePreview, timestamp) - - // CRITICAL: Use a goroutine with context checking to enforce timeout - // gRPC streams may not respect context deadlines automatically - // We need to monitor the context and timeout the operation if needed - sendErrChan := make(chan error, 1) - go func() { - sendErrChan <- session.Stream.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Data{ - Data: dataMsg, - }, - }) - }() - - select { - case err := <-sendErrChan: - if err != nil { - return 0, fmt.Errorf("failed to send data: %v", err) - } - case <-ctx.Done(): - return 0, fmt.Errorf("context cancelled while sending: %w", ctx.Err()) - } - - // Read acknowledgment with context timeout enforcement - recvErrChan := make(chan interface{}, 1) - go func() { - resp, err := session.Stream.Recv() - if err != nil { - recvErrChan <- err - } else { - recvErrChan <- resp - } - }() - - var resp *mq_pb.PublishMessageResponse - select { - case result := <-recvErrChan: - if err, isErr := result.(error); isErr { - return 0, fmt.Errorf("failed to receive ack: %v", err) - } - resp = result.(*mq_pb.PublishMessageResponse) - case <-ctx.Done(): - return 0, fmt.Errorf("context cancelled while receiving: %w", ctx.Err()) - } - - // Handle structured broker errors - if kafkaErrorCode, errorMsg, handleErr := HandleBrokerResponse(resp); handleErr != nil { - return 0, handleErr - } else if kafkaErrorCode != 0 { - // Return error with Kafka error code information for better debugging - return 0, fmt.Errorf("broker error (Kafka code %d): %s", kafkaErrorCode, errorMsg) - } - - // Use the assigned offset from SMQ, not the timestamp - glog.V(1).Infof("[PUBLISH_ACK] topic=%s partition=%d assignedOffset=%d", topic, partition, resp.AssignedOffset) - return resp.AssignedOffset, nil -} - -// PublishRecordValue publishes a RecordValue message to SeaweedMQ via broker -// ctx controls the publish timeout - if client cancels, publish operation is cancelled -func (bc *BrokerClient) PublishRecordValue(ctx context.Context, topic string, partition int32, key []byte, recordValueBytes []byte, timestamp int64) (int64, error) { - // Check context before starting - if err := ctx.Err(); err != nil { - return 0, fmt.Errorf("context cancelled before publish: %w", err) - } - - session, err := bc.getOrCreatePublisher(topic, partition) - if err != nil { - return 0, err - } - - if session.Stream == nil { - return 0, fmt.Errorf("publisher session stream cannot be nil") - } - - // CRITICAL: Lock to prevent concurrent Send/Recv causing response mix-ups - session.mu.Lock() - defer session.mu.Unlock() - - // Check context after acquiring lock - if err := ctx.Err(); err != nil { - return 0, fmt.Errorf("context cancelled after lock: %w", err) - } - - // Send data message with RecordValue in the Value field - dataMsg := &mq_pb.DataMessage{ - Key: key, - Value: recordValueBytes, // This contains the marshaled RecordValue - TsNs: timestamp, - } - - if err := session.Stream.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Data{ - Data: dataMsg, - }, - }); err != nil { - return 0, fmt.Errorf("failed to send RecordValue data: %v", err) - } - - // Read acknowledgment - resp, err := session.Stream.Recv() - if err != nil { - return 0, fmt.Errorf("failed to receive RecordValue ack: %v", err) - } - - // Handle structured broker errors - if kafkaErrorCode, errorMsg, handleErr := HandleBrokerResponse(resp); handleErr != nil { - return 0, handleErr - } else if kafkaErrorCode != 0 { - // Return error with Kafka error code information for better debugging - return 0, fmt.Errorf("RecordValue broker error (Kafka code %d): %s", kafkaErrorCode, errorMsg) - } - - // Use the assigned offset from SMQ, not the timestamp - return resp.AssignedOffset, nil -} - -// getOrCreatePublisher gets or creates a publisher stream for a topic-partition -func (bc *BrokerClient) getOrCreatePublisher(topic string, partition int32) (*BrokerPublisherSession, error) { - key := fmt.Sprintf("%s-%d", topic, partition) - - // Try to get existing publisher - bc.publishersLock.RLock() - if session, exists := bc.publishers[key]; exists { - bc.publishersLock.RUnlock() - return session, nil - } - bc.publishersLock.RUnlock() - - // CRITICAL FIX: Prevent multiple concurrent attempts to create the same publisher - // Use a creation lock that is specific to each topic-partition pair - // This ensures only ONE goroutine tries to create/initialize for each publisher - if bc.publisherCreationLocks == nil { - bc.publishersLock.Lock() - if bc.publisherCreationLocks == nil { - bc.publisherCreationLocks = make(map[string]*sync.Mutex) - } - bc.publishersLock.Unlock() - } - - bc.publishersLock.RLock() - creationLock, exists := bc.publisherCreationLocks[key] - if !exists { - // Need to create a creation lock for this topic-partition - bc.publishersLock.RUnlock() - bc.publishersLock.Lock() - // Double-check if someone else created it - if lock, exists := bc.publisherCreationLocks[key]; exists { - creationLock = lock - } else { - creationLock = &sync.Mutex{} - bc.publisherCreationLocks[key] = creationLock - } - bc.publishersLock.Unlock() - } else { - bc.publishersLock.RUnlock() - } - - // Acquire the creation lock - only ONE goroutine will proceed - creationLock.Lock() - defer creationLock.Unlock() - - // Double-check if publisher was created while we were waiting for the lock - bc.publishersLock.RLock() - if session, exists := bc.publishers[key]; exists { - bc.publishersLock.RUnlock() - return session, nil - } - bc.publishersLock.RUnlock() - - // Create the stream - stream, err := bc.client.PublishMessage(bc.ctx) - if err != nil { - return nil, fmt.Errorf("failed to create publish stream: %v", err) - } - - // Get the actual partition assignment from the broker - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get actual partition assignment: %v", err) - } - - // Send init message - if err := stream.Send(&mq_pb.PublishMessageRequest{ - Message: &mq_pb.PublishMessageRequest_Init{ - Init: &mq_pb.PublishMessageRequest_InitMessage{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - Partition: actualPartition, - AckInterval: 1, - PublisherName: "kafka-gateway", - }, - }, - }); err != nil { - return nil, fmt.Errorf("failed to send init message: %v", err) - } - - // Consume the "hello" message sent by broker after init - helloResp, err := stream.Recv() - if err != nil { - return nil, fmt.Errorf("failed to receive hello message: %v", err) - } - if helloResp.ErrorCode != 0 { - return nil, fmt.Errorf("broker init error (code %d): %s", helloResp.ErrorCode, helloResp.Error) - } - - session := &BrokerPublisherSession{ - Topic: topic, - Partition: partition, - Stream: stream, - } - - // Store in the map under the publishersLock - bc.publishersLock.Lock() - bc.publishers[key] = session - bc.publishersLock.Unlock() - - return session, nil -} - -// ClosePublisher closes a specific publisher session -func (bc *BrokerClient) ClosePublisher(topic string, partition int32) error { - key := fmt.Sprintf("%s-%d", topic, partition) - - bc.publishersLock.Lock() - defer bc.publishersLock.Unlock() - - session, exists := bc.publishers[key] - if !exists { - return nil // Already closed or never existed - } - - if session.Stream != nil { - session.Stream.CloseSend() - } - delete(bc.publishers, key) - return nil -} - -// getActualPartitionAssignment looks up the actual partition assignment from the broker configuration -// Uses cache to avoid expensive LookupTopicBrokers calls on every fetch (13.5% CPU overhead!) -func (bc *BrokerClient) getActualPartitionAssignment(topic string, kafkaPartition int32) (*schema_pb.Partition, error) { - // Check cache first - bc.partitionAssignmentCacheMu.RLock() - if entry, found := bc.partitionAssignmentCache[topic]; found { - if time.Now().Before(entry.expiresAt) { - assignments := entry.assignments - bc.partitionAssignmentCacheMu.RUnlock() - glog.V(4).Infof("Partition assignment cache HIT for topic %s", topic) - // Use cached assignments to find partition - return bc.findPartitionInAssignments(topic, kafkaPartition, assignments) - } - } - bc.partitionAssignmentCacheMu.RUnlock() - - // Cache miss or expired - lookup from broker - glog.V(4).Infof("Partition assignment cache MISS for topic %s, calling LookupTopicBrokers", topic) - lookupResp, err := bc.client.LookupTopicBrokers(bc.ctx, &mq_pb.LookupTopicBrokersRequest{ - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to lookup topic brokers: %v", err) - } - - if len(lookupResp.BrokerPartitionAssignments) == 0 { - return nil, fmt.Errorf("no partition assignments found for topic %s", topic) - } - - // Cache the assignments - bc.partitionAssignmentCacheMu.Lock() - bc.partitionAssignmentCache[topic] = &partitionAssignmentCacheEntry{ - assignments: lookupResp.BrokerPartitionAssignments, - expiresAt: time.Now().Add(bc.partitionAssignmentCacheTTL), - } - bc.partitionAssignmentCacheMu.Unlock() - glog.V(4).Infof("Cached partition assignments for topic %s", topic) - - // Use freshly fetched assignments to find partition - return bc.findPartitionInAssignments(topic, kafkaPartition, lookupResp.BrokerPartitionAssignments) -} - -// findPartitionInAssignments finds the SeaweedFS partition for a given Kafka partition ID -func (bc *BrokerClient) findPartitionInAssignments(topic string, kafkaPartition int32, assignments []*mq_pb.BrokerPartitionAssignment) (*schema_pb.Partition, error) { - totalPartitions := int32(len(assignments)) - if kafkaPartition >= totalPartitions { - return nil, fmt.Errorf("kafka partition %d out of range, topic %s has %d partitions", - kafkaPartition, topic, totalPartitions) - } - - // Calculate expected range for this Kafka partition based on actual partition count - // Ring is divided equally among partitions, with last partition getting any remainder - rangeSize := int32(pub_balancer.MaxPartitionCount) / totalPartitions - expectedRangeStart := kafkaPartition * rangeSize - var expectedRangeStop int32 - - if kafkaPartition == totalPartitions-1 { - // Last partition gets the remainder to fill the entire ring - expectedRangeStop = int32(pub_balancer.MaxPartitionCount) - } else { - expectedRangeStop = (kafkaPartition + 1) * rangeSize - } - - glog.V(2).Infof("Looking for Kafka partition %d in topic %s: expected range [%d, %d] out of %d partitions", - kafkaPartition, topic, expectedRangeStart, expectedRangeStop, totalPartitions) - - // Find the broker assignment that matches this range - for _, assignment := range assignments { - if assignment.Partition == nil { - continue - } - - // Check if this assignment's range matches our expected range - if assignment.Partition.RangeStart == expectedRangeStart && assignment.Partition.RangeStop == expectedRangeStop { - glog.V(1).Infof("found matching partition assignment for %s[%d]: {RingSize: %d, RangeStart: %d, RangeStop: %d, UnixTimeNs: %d}", - topic, kafkaPartition, assignment.Partition.RingSize, assignment.Partition.RangeStart, - assignment.Partition.RangeStop, assignment.Partition.UnixTimeNs) - return assignment.Partition, nil - } - } - - // If no exact match found, log all available assignments for debugging - glog.Warningf("no partition assignment found for Kafka partition %d in topic %s with expected range [%d, %d]", - kafkaPartition, topic, expectedRangeStart, expectedRangeStop) - glog.Warningf("Available assignments:") - for i, assignment := range assignments { - if assignment.Partition != nil { - glog.Warningf(" Assignment[%d]: {RangeStart: %d, RangeStop: %d, RingSize: %d}", - i, assignment.Partition.RangeStart, assignment.Partition.RangeStop, assignment.Partition.RingSize) - } - } - - return nil, fmt.Errorf("no broker assignment found for Kafka partition %d with expected range [%d, %d]", - kafkaPartition, expectedRangeStart, expectedRangeStop) -} diff --git a/weed/mq/kafka/integration/broker_client_restart_test.go b/weed/mq/kafka/integration/broker_client_restart_test.go deleted file mode 100644 index 3440b8478..000000000 --- a/weed/mq/kafka/integration/broker_client_restart_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package integration - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc/metadata" -) - -// MockSubscribeStream implements mq_pb.SeaweedMessaging_SubscribeMessageClient for testing -type MockSubscribeStream struct { - sendCalls []interface{} - closed bool -} - -func (m *MockSubscribeStream) Send(req *mq_pb.SubscribeMessageRequest) error { - m.sendCalls = append(m.sendCalls, req) - return nil -} - -func (m *MockSubscribeStream) Recv() (*mq_pb.SubscribeMessageResponse, error) { - return nil, nil -} - -func (m *MockSubscribeStream) CloseSend() error { - m.closed = true - return nil -} - -func (m *MockSubscribeStream) Header() (metadata.MD, error) { return nil, nil } -func (m *MockSubscribeStream) Trailer() metadata.MD { return nil } -func (m *MockSubscribeStream) Context() context.Context { return context.Background() } -func (m *MockSubscribeStream) SendMsg(m2 interface{}) error { return nil } -func (m *MockSubscribeStream) RecvMsg(m2 interface{}) error { return nil } - -// TestNeedsRestart tests the NeedsRestart logic -func TestNeedsRestart(t *testing.T) { - bc := &BrokerClient{} - - tests := []struct { - name string - session *BrokerSubscriberSession - requestedOffset int64 - want bool - reason string - }{ - { - name: "Stream is nil - needs restart", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: nil, - }, - requestedOffset: 100, - want: true, - reason: "Stream is nil", - }, - { - name: "Offset in cache - no restart needed", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - consumedRecords: []*SeaweedRecord{ - {Offset: 95}, - {Offset: 96}, - {Offset: 97}, - {Offset: 98}, - {Offset: 99}, - }, - }, - requestedOffset: 97, - want: false, - reason: "Offset 97 is in cache [95-99]", - }, - { - name: "Offset before current - needs restart", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - }, - requestedOffset: 50, - want: true, - reason: "Requested offset 50 < current 100", - }, - { - name: "Large gap ahead - needs restart", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - }, - requestedOffset: 2000, - want: true, - reason: "Gap of 1900 is > 1000", - }, - { - name: "Small gap ahead - no restart needed", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - }, - requestedOffset: 150, - want: false, - reason: "Gap of 50 is < 1000", - }, - { - name: "Exact match - no restart needed", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - }, - requestedOffset: 100, - want: false, - reason: "Exact match with current offset", - }, - { - name: "Context is nil - needs restart", - session: &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: nil, - }, - requestedOffset: 100, - want: true, - reason: "Context is nil", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := bc.NeedsRestart(tt.session, tt.requestedOffset) - if got != tt.want { - t.Errorf("NeedsRestart() = %v, want %v (reason: %s)", got, tt.want, tt.reason) - } - }) - } -} - -// TestNeedsRestart_CacheLogic tests cache-based restart decisions -func TestNeedsRestart_CacheLogic(t *testing.T) { - bc := &BrokerClient{} - - // Create session with cache containing offsets 100-109 - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 110, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - consumedRecords: []*SeaweedRecord{ - {Offset: 100}, {Offset: 101}, {Offset: 102}, {Offset: 103}, {Offset: 104}, - {Offset: 105}, {Offset: 106}, {Offset: 107}, {Offset: 108}, {Offset: 109}, - }, - } - - testCases := []struct { - offset int64 - want bool - desc string - }{ - {100, false, "First offset in cache"}, - {105, false, "Middle offset in cache"}, - {109, false, "Last offset in cache"}, - {99, true, "Before cache start"}, - {110, false, "Current position"}, - {111, false, "One ahead"}, - {1200, true, "Large gap > 1000"}, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - got := bc.NeedsRestart(session, tc.offset) - if got != tc.want { - t.Errorf("NeedsRestart(offset=%d) = %v, want %v (%s)", tc.offset, got, tc.want, tc.desc) - } - }) - } -} - -// TestNeedsRestart_EmptyCache tests behavior with empty cache -func TestNeedsRestart_EmptyCache(t *testing.T) { - bc := &BrokerClient{} - - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - consumedRecords: nil, // Empty cache - } - - tests := []struct { - offset int64 - want bool - desc string - }{ - {50, true, "Before current"}, - {100, false, "At current"}, - {150, false, "Small gap ahead"}, - {1200, true, "Large gap ahead"}, - } - - for _, tt := range tests { - t.Run(tt.desc, func(t *testing.T) { - got := bc.NeedsRestart(session, tt.offset) - if got != tt.want { - t.Errorf("NeedsRestart(offset=%d) = %v, want %v (%s)", tt.offset, got, tt.want, tt.desc) - } - }) - } -} - -// TestNeedsRestart_ThreadSafety tests concurrent access -func TestNeedsRestart_ThreadSafety(t *testing.T) { - bc := &BrokerClient{} - - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - } - - // Run many concurrent checks - done := make(chan bool) - for i := 0; i < 100; i++ { - go func(offset int64) { - bc.NeedsRestart(session, offset) - done <- true - }(int64(i)) - } - - // Wait for all to complete - for i := 0; i < 100; i++ { - <-done - } - - // Test passes if no panic/race condition -} - -// TestRestartSubscriber_StateManagement tests session state management -func TestRestartSubscriber_StateManagement(t *testing.T) { - oldStream := &MockSubscribeStream{} - oldCtx, oldCancel := context.WithCancel(context.Background()) - - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 100, - Stream: oldStream, - Ctx: oldCtx, - Cancel: oldCancel, - consumedRecords: []*SeaweedRecord{ - {Offset: 100, Key: []byte("key100"), Value: []byte("value100")}, - {Offset: 101, Key: []byte("key101"), Value: []byte("value101")}, - {Offset: 102, Key: []byte("key102"), Value: []byte("value102")}, - }, - nextOffsetToRead: 103, - } - - // Verify initial state - if len(session.consumedRecords) != 3 { - t.Errorf("Initial cache size = %d, want 3", len(session.consumedRecords)) - } - if session.nextOffsetToRead != 103 { - t.Errorf("Initial nextOffsetToRead = %d, want 103", session.nextOffsetToRead) - } - if session.StartOffset != 100 { - t.Errorf("Initial StartOffset = %d, want 100", session.StartOffset) - } - - // Note: Full RestartSubscriber testing requires gRPC mocking - // These tests verify the core state management and NeedsRestart logic -} - -// BenchmarkNeedsRestart_CacheHit benchmarks cache hit performance -func BenchmarkNeedsRestart_CacheHit(b *testing.B) { - bc := &BrokerClient{} - - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 1000, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - consumedRecords: make([]*SeaweedRecord, 100), - } - - for i := 0; i < 100; i++ { - session.consumedRecords[i] = &SeaweedRecord{Offset: int64(i)} - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - bc.NeedsRestart(session, 50) // Hit cache - } -} - -// BenchmarkNeedsRestart_CacheMiss benchmarks cache miss performance -func BenchmarkNeedsRestart_CacheMiss(b *testing.B) { - bc := &BrokerClient{} - - session := &BrokerSubscriberSession{ - Topic: "test-topic", - Partition: 0, - StartOffset: 1000, - Stream: &MockSubscribeStream{}, - Ctx: context.Background(), - consumedRecords: make([]*SeaweedRecord, 100), - } - - for i := 0; i < 100; i++ { - session.consumedRecords[i] = &SeaweedRecord{Offset: int64(i)} - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - bc.NeedsRestart(session, 500) // Miss cache (within gap threshold) - } -} diff --git a/weed/mq/kafka/integration/broker_client_subscribe.go b/weed/mq/kafka/integration/broker_client_subscribe.go deleted file mode 100644 index e9884ea4d..000000000 --- a/weed/mq/kafka/integration/broker_client_subscribe.go +++ /dev/null @@ -1,1246 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// createSubscribeInitMessage creates a subscribe init message with the given parameters -func createSubscribeInitMessage(topic string, actualPartition *schema_pb.Partition, startOffset int64, offsetType schema_pb.OffsetType, consumerGroup string, consumerID string) *mq_pb.SubscribeMessageRequest { - return &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Init{ - Init: &mq_pb.SubscribeMessageRequest_InitMessage{ - ConsumerGroup: consumerGroup, - ConsumerId: consumerID, - ClientId: "kafka-gateway", - Topic: &schema_pb.Topic{ - Namespace: "kafka", - Name: topic, - }, - PartitionOffset: &schema_pb.PartitionOffset{ - Partition: actualPartition, - StartTsNs: 0, - StartOffset: startOffset, - }, - OffsetType: offsetType, - SlidingWindowSize: 10, - }, - }, - } -} - -// CreateFreshSubscriber creates a new subscriber session without caching -// This ensures each fetch gets fresh data from the requested offset -// consumerGroup and consumerID are passed from Kafka client for proper tracking in SMQ -func (bc *BrokerClient) CreateFreshSubscriber(topic string, partition int32, startOffset int64, consumerGroup string, consumerID string) (*BrokerSubscriberSession, error) { - // Use BrokerClient's context so subscriber is cancelled when connection closes - subscriberCtx, subscriberCancel := context.WithCancel(bc.ctx) - - stream, err := bc.client.SubscribeMessage(subscriberCtx) - if err != nil { - return nil, fmt.Errorf("failed to create subscribe stream: %v", err) - } - - // Get the actual partition assignment from the broker - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get actual partition assignment for subscribe: %v", err) - } - - // Use EXACT_OFFSET to read from the specific offset - offsetType := schema_pb.OffsetType_EXACT_OFFSET - - // Send init message to start subscription with Kafka client's consumer group and ID - initReq := createSubscribeInitMessage(topic, actualPartition, startOffset, offsetType, consumerGroup, consumerID) - - glog.V(4).Infof("[SUBSCRIBE-INIT] CreateFreshSubscriber sending init: topic=%s partition=%d startOffset=%d offsetType=%v consumerGroup=%s consumerID=%s", - topic, partition, startOffset, offsetType, consumerGroup, consumerID) - - if err := stream.Send(initReq); err != nil { - return nil, fmt.Errorf("failed to send subscribe init: %v", err) - } - - // IMPORTANT: Don't wait for init response here! - // The broker may send the first data record as the "init response" - // If we call Recv() here, we'll consume that first record and ReadRecords will block - // waiting for the second record, causing a 30-second timeout. - // Instead, let ReadRecords handle all Recv() calls. - - session := &BrokerSubscriberSession{ - Stream: stream, - Topic: topic, - Partition: partition, - StartOffset: startOffset, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - Ctx: subscriberCtx, - Cancel: subscriberCancel, - } - - return session, nil -} - -// GetOrCreateSubscriber gets or creates a subscriber for offset tracking -func (bc *BrokerClient) GetOrCreateSubscriber(topic string, partition int32, startOffset int64, consumerGroup string, consumerID string) (*BrokerSubscriberSession, error) { - // Create a temporary session to generate the key - tempSession := &BrokerSubscriberSession{ - Topic: topic, - Partition: partition, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - } - key := tempSession.Key() - - bc.subscribersLock.RLock() - if session, exists := bc.subscribers[key]; exists { - // Check if we can reuse the existing session - session.mu.Lock() - currentOffset := session.StartOffset - - // Check cache to see what offsets are available - canUseCache := false - if len(session.consumedRecords) > 0 { - cacheStartOffset := session.consumedRecords[0].Offset - cacheEndOffset := session.consumedRecords[len(session.consumedRecords)-1].Offset - if startOffset >= cacheStartOffset && startOffset <= cacheEndOffset { - canUseCache = true - } - } - session.mu.Unlock() - - // With seekable broker: Always reuse existing session - // Any offset mismatch will be handled by FetchRecords via SeekMessage - // This includes: - // 1. Forward read: Natural continuation - // 2. Backward read with cache hit: Serve from cache - // 3. Backward read without cache: Send seek message to broker - // No need for stream recreation - broker repositions internally - - bc.subscribersLock.RUnlock() - - if canUseCache { - glog.V(4).Infof("[FETCH] Reusing session for %s: session at %d, requested %d (cached)", - key, currentOffset, startOffset) - } else if startOffset >= currentOffset { - glog.V(4).Infof("[FETCH] Reusing session for %s: session at %d, requested %d (forward read)", - key, currentOffset, startOffset) - } else { - glog.V(4).Infof("[FETCH] Reusing session for %s: session at %d, requested %d (will seek backward)", - key, currentOffset, startOffset) - } - return session, nil - } - - // Session doesn't exist - need to create one - bc.subscribersLock.RUnlock() - - // Create new subscriber stream - // Need to acquire write lock since we don't have it from the paths above - bc.subscribersLock.Lock() - defer bc.subscribersLock.Unlock() - - // Double-check if session was created by another thread while we were acquiring the lock - if session, exists := bc.subscribers[key]; exists { - // With seekable broker, always reuse existing session - // FetchRecords will handle any offset mismatch via seek - session.mu.Lock() - existingOffset := session.StartOffset - session.mu.Unlock() - - glog.V(3).Infof("[FETCH] Session created concurrently at offset %d (requested %d), reusing", existingOffset, startOffset) - return session, nil - } - - // Use BrokerClient's context so subscribers are automatically cancelled when connection closes - // This ensures proper cleanup without artificial timeouts - subscriberCtx, subscriberCancel := context.WithCancel(bc.ctx) - - stream, err := bc.client.SubscribeMessage(subscriberCtx) - if err != nil { - return nil, fmt.Errorf("failed to create subscribe stream: %v", err) - } - - // Get the actual partition assignment from the broker instead of using Kafka partition mapping - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - return nil, fmt.Errorf("failed to get actual partition assignment for subscribe: %v", err) - } - - // Convert Kafka offset to appropriate SeaweedMQ OffsetType - var offsetType schema_pb.OffsetType - var offsetValue int64 - - if startOffset == -1 { - // Kafka offset -1 typically means "latest" - offsetType = schema_pb.OffsetType_RESET_TO_LATEST - offsetValue = 0 // Not used with RESET_TO_LATEST - glog.V(2).Infof("Using RESET_TO_LATEST for Kafka offset -1 (read latest)") - } else { - // CRITICAL FIX: Use EXACT_OFFSET to position subscriber at the exact Kafka offset - // This allows the subscriber to read from both buffer and disk at the correct position - offsetType = schema_pb.OffsetType_EXACT_OFFSET - offsetValue = startOffset // Use the exact Kafka offset - glog.V(2).Infof("Using EXACT_OFFSET for Kafka offset %d (direct positioning)", startOffset) - } - - glog.V(2).Infof("Creating subscriber for topic=%s partition=%d: Kafka offset %d -> SeaweedMQ %s", - topic, partition, startOffset, offsetType) - - glog.V(4).Infof("[SUBSCRIBE-INIT] GetOrCreateSubscriber sending init: topic=%s partition=%d startOffset=%d offsetType=%v consumerGroup=%s consumerID=%s", - topic, partition, offsetValue, offsetType, consumerGroup, consumerID) - - // Send init message using the actual partition structure that the broker allocated - initReq := createSubscribeInitMessage(topic, actualPartition, offsetValue, offsetType, consumerGroup, consumerID) - if err := stream.Send(initReq); err != nil { - return nil, fmt.Errorf("failed to send subscribe init: %v", err) - } - - session := &BrokerSubscriberSession{ - Topic: topic, - Partition: partition, - Stream: stream, - StartOffset: startOffset, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - Ctx: subscriberCtx, - Cancel: subscriberCancel, - } - - bc.subscribers[key] = session - glog.V(2).Infof("Created subscriber session for %s with context cancellation support", key) - return session, nil -} - -// createTemporarySubscriber creates a fresh subscriber for a single fetch operation -// This is used by the stateless fetch approach to eliminate concurrent access issues -// The subscriber is NOT stored in bc.subscribers and must be cleaned up by the caller -func (bc *BrokerClient) createTemporarySubscriber(topic string, partition int32, startOffset int64, consumerGroup string, consumerID string) (*BrokerSubscriberSession, error) { - glog.V(4).Infof("[STATELESS] Creating temporary subscriber for %s-%d at offset %d", topic, partition, startOffset) - - // Create context for this temporary subscriber - ctx, cancel := context.WithCancel(bc.ctx) - - // Create gRPC stream - stream, err := bc.client.SubscribeMessage(ctx) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to create subscribe stream: %v", err) - } - - // Get the actual partition assignment from the broker - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to get actual partition assignment: %v", err) - } - - // Convert Kafka offset to appropriate SeaweedMQ OffsetType - var offsetType schema_pb.OffsetType - var offsetValue int64 - - if startOffset == -1 { - offsetType = schema_pb.OffsetType_RESET_TO_LATEST - offsetValue = 0 - glog.V(4).Infof("[STATELESS] Using RESET_TO_LATEST for Kafka offset -1") - } else { - offsetType = schema_pb.OffsetType_EXACT_OFFSET - offsetValue = startOffset - glog.V(4).Infof("[STATELESS] Using EXACT_OFFSET for Kafka offset %d", startOffset) - } - - // Send init message - initReq := createSubscribeInitMessage(topic, actualPartition, offsetValue, offsetType, consumerGroup, consumerID) - if err := stream.Send(initReq); err != nil { - cancel() - return nil, fmt.Errorf("failed to send subscribe init: %v", err) - } - - // Create temporary session (not stored in bc.subscribers) - session := &BrokerSubscriberSession{ - Topic: topic, - Partition: partition, - Stream: stream, - StartOffset: startOffset, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - Ctx: ctx, - Cancel: cancel, - } - - glog.V(4).Infof("[STATELESS] Created temporary subscriber for %s-%d starting at offset %d", topic, partition, startOffset) - return session, nil -} - -// createSubscriberSession creates a new subscriber session with proper initialization -// This is used by the hybrid approach for initial connections and backward seeks -func (bc *BrokerClient) createSubscriberSession(topic string, partition int32, startOffset int64, consumerGroup string, consumerID string) (*BrokerSubscriberSession, error) { - glog.V(4).Infof("[HYBRID-SESSION] Creating subscriber session for %s-%d at offset %d", topic, partition, startOffset) - - // Create context for this subscriber - ctx, cancel := context.WithCancel(bc.ctx) - - // Create gRPC stream - stream, err := bc.client.SubscribeMessage(ctx) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to create subscribe stream: %v", err) - } - - // Get the actual partition assignment from the broker - actualPartition, err := bc.getActualPartitionAssignment(topic, partition) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to get actual partition assignment: %v", err) - } - - // Convert Kafka offset to appropriate SeaweedMQ OffsetType - var offsetType schema_pb.OffsetType - var offsetValue int64 - - if startOffset == -1 { - offsetType = schema_pb.OffsetType_RESET_TO_LATEST - offsetValue = 0 - glog.V(4).Infof("[HYBRID-SESSION] Using RESET_TO_LATEST for Kafka offset -1") - } else { - offsetType = schema_pb.OffsetType_EXACT_OFFSET - offsetValue = startOffset - glog.V(4).Infof("[HYBRID-SESSION] Using EXACT_OFFSET for Kafka offset %d", startOffset) - } - - // Send init message - initReq := createSubscribeInitMessage(topic, actualPartition, offsetValue, offsetType, consumerGroup, consumerID) - if err := stream.Send(initReq); err != nil { - cancel() - return nil, fmt.Errorf("failed to send subscribe init: %v", err) - } - - // Create session with proper initialization - session := &BrokerSubscriberSession{ - Topic: topic, - Partition: partition, - Stream: stream, - StartOffset: startOffset, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - Ctx: ctx, - Cancel: cancel, - consumedRecords: nil, - nextOffsetToRead: startOffset, - lastReadOffset: startOffset - 1, // Will be updated after first read - initialized: false, - } - - glog.V(4).Infof("[HYBRID-SESSION] Created subscriber session for %s-%d starting at offset %d", topic, partition, startOffset) - return session, nil -} - -// serveFromCache serves records from the session's cache -func (bc *BrokerClient) serveFromCache(session *BrokerSubscriberSession, requestedOffset int64, maxRecords int) []*SeaweedRecord { - // Find the start index in cache - startIdx := -1 - for i, record := range session.consumedRecords { - if record.Offset == requestedOffset { - startIdx = i - break - } - } - - if startIdx == -1 { - // Offset not found in cache (shouldn't happen if caller checked properly) - return nil - } - - // Calculate end index - endIdx := startIdx + maxRecords - if endIdx > len(session.consumedRecords) { - endIdx = len(session.consumedRecords) - } - - // Return slice from cache - result := session.consumedRecords[startIdx:endIdx] - glog.V(4).Infof("[HYBRID-CACHE] Served %d records from cache (requested %d, offset %d)", - len(result), maxRecords, requestedOffset) - return result -} - -// readRecordsFromSession reads records from the session's stream -func (bc *BrokerClient) readRecordsFromSession(ctx context.Context, session *BrokerSubscriberSession, startOffset int64, maxRecords int) ([]*SeaweedRecord, error) { - glog.V(4).Infof("[HYBRID-READ] Reading from stream: offset=%d maxRecords=%d", startOffset, maxRecords) - - records := make([]*SeaweedRecord, 0, maxRecords) - currentOffset := startOffset - - // Read until we have enough records or timeout - for len(records) < maxRecords { - // Check context timeout - select { - case <-ctx.Done(): - // Timeout or cancellation - return what we have - glog.V(4).Infof("[HYBRID-READ] Context done, returning %d records", len(records)) - return records, nil - default: - } - - // Read from stream with timeout - resp, err := session.Stream.Recv() - if err != nil { - if err == io.EOF { - glog.V(4).Infof("[HYBRID-READ] Stream closed (EOF), returning %d records", len(records)) - return records, nil - } - return nil, fmt.Errorf("failed to receive from stream: %v", err) - } - - // Handle data message - if dataMsg := resp.GetData(); dataMsg != nil { - record := &SeaweedRecord{ - Key: dataMsg.Key, - Value: dataMsg.Value, - Timestamp: dataMsg.TsNs, - Offset: currentOffset, - } - records = append(records, record) - currentOffset++ - - // Auto-acknowledge to prevent throttling - ackReq := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Ack{ - Ack: &mq_pb.SubscribeMessageRequest_AckMessage{ - Key: dataMsg.Key, - TsNs: dataMsg.TsNs, - }, - }, - } - if err := session.Stream.Send(ackReq); err != nil { - if err != io.EOF { - glog.Warningf("[HYBRID-READ] Failed to send ack (non-critical): %v", err) - } - } - } - - // Handle control messages - if ctrlMsg := resp.GetCtrl(); ctrlMsg != nil { - if ctrlMsg.Error != "" { - // Error message from broker - return nil, fmt.Errorf("broker error: %s", ctrlMsg.Error) - } - if ctrlMsg.IsEndOfStream { - glog.V(4).Infof("[HYBRID-READ] End of stream, returning %d records", len(records)) - return records, nil - } - if ctrlMsg.IsEndOfTopic { - glog.V(4).Infof("[HYBRID-READ] End of topic, returning %d records", len(records)) - return records, nil - } - // Empty control message (e.g., seek ack) - continue reading - glog.V(4).Infof("[HYBRID-READ] Received control message (seek ack?), continuing") - continue - } - } - - glog.V(4).Infof("[HYBRID-READ] Read %d records successfully", len(records)) - - // Update cache - session.consumedRecords = append(session.consumedRecords, records...) - // Limit cache size to prevent unbounded growth - const maxCacheSize = 10000 - if len(session.consumedRecords) > maxCacheSize { - // Keep only the most recent records - session.consumedRecords = session.consumedRecords[len(session.consumedRecords)-maxCacheSize:] - } - - return records, nil -} - -// FetchRecordsHybrid uses a hybrid approach: session reuse + proper offset tracking -// - Fast path (95%): Reuse session for sequential reads -// - Slow path (5%): Create new subscriber for backward seeks -// This combines performance (connection reuse) with correctness (proper tracking) -func (bc *BrokerClient) FetchRecordsHybrid(ctx context.Context, topic string, partition int32, requestedOffset int64, maxRecords int, consumerGroup string, consumerID string) ([]*SeaweedRecord, error) { - glog.V(4).Infof("[FETCH-HYBRID] topic=%s partition=%d requestedOffset=%d maxRecords=%d", - topic, partition, requestedOffset, maxRecords) - - // Get or create session for this (topic, partition, consumerGroup, consumerID) - key := fmt.Sprintf("%s-%d-%s-%s", topic, partition, consumerGroup, consumerID) - - bc.subscribersLock.Lock() - session, exists := bc.subscribers[key] - if !exists { - // No session - create one (this is initial fetch) - glog.V(4).Infof("[FETCH-HYBRID] Creating initial session for %s at offset %d", key, requestedOffset) - newSession, err := bc.createSubscriberSession(topic, partition, requestedOffset, consumerGroup, consumerID) - if err != nil { - bc.subscribersLock.Unlock() - return nil, fmt.Errorf("failed to create initial session: %v", err) - } - bc.subscribers[key] = newSession - session = newSession - } - bc.subscribersLock.Unlock() - - // CRITICAL: Lock the session for the entire operation to serialize requests - // This prevents concurrent access to the same stream - session.mu.Lock() - defer session.mu.Unlock() - - // Check if we can serve from cache - if len(session.consumedRecords) > 0 { - cacheStart := session.consumedRecords[0].Offset - cacheEnd := session.consumedRecords[len(session.consumedRecords)-1].Offset - - if requestedOffset >= cacheStart && requestedOffset <= cacheEnd { - // Serve from cache - glog.V(4).Infof("[FETCH-HYBRID] FAST: Serving from cache for %s offset %d (cache: %d-%d)", - key, requestedOffset, cacheStart, cacheEnd) - return bc.serveFromCache(session, requestedOffset, maxRecords), nil - } - } - - // Determine stream position - // lastReadOffset tracks what we've actually read from the stream - streamPosition := session.lastReadOffset + 1 - if !session.initialized { - streamPosition = session.StartOffset - } - - glog.V(4).Infof("[FETCH-HYBRID] requestedOffset=%d streamPosition=%d lastReadOffset=%d", - requestedOffset, streamPosition, session.lastReadOffset) - - // Decision: Fast path or slow path? - if requestedOffset < streamPosition { - // SLOW PATH: Backward seek - need new subscriber - glog.V(4).Infof("[FETCH-HYBRID] SLOW: Backward seek from %d to %d, creating new subscriber", - streamPosition, requestedOffset) - - // Close old session - if session.Stream != nil { - session.Stream.CloseSend() - } - if session.Cancel != nil { - session.Cancel() - } - - // Create new subscriber at requested offset - newSession, err := bc.createSubscriberSession(topic, partition, requestedOffset, consumerGroup, consumerID) - if err != nil { - return nil, fmt.Errorf("failed to create subscriber for backward seek: %v", err) - } - - // Replace session in map - bc.subscribersLock.Lock() - bc.subscribers[key] = newSession - bc.subscribersLock.Unlock() - - // Update local reference and lock the new session - session.Stream = newSession.Stream - session.Ctx = newSession.Ctx - session.Cancel = newSession.Cancel - session.StartOffset = requestedOffset - session.lastReadOffset = requestedOffset - 1 // Will be updated after read - session.initialized = false - session.consumedRecords = nil - - streamPosition = requestedOffset - } else if requestedOffset > streamPosition { - // FAST PATH: Forward seek - use server-side seek - seekOffset := requestedOffset - glog.V(4).Infof("[FETCH-HYBRID] FAST: Forward seek from %d to %d using server-side seek", - streamPosition, seekOffset) - - // Send seek message to broker - seekReq := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Seek{ - Seek: &mq_pb.SubscribeMessageRequest_SeekMessage{ - Offset: seekOffset, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET, - }, - }, - } - - if err := session.Stream.Send(seekReq); err != nil { - if err == io.EOF { - glog.V(4).Infof("[FETCH-HYBRID] Stream closed during seek, ignoring") - return nil, nil - } - return nil, fmt.Errorf("failed to send seek request: %v", err) - } - - glog.V(4).Infof("[FETCH-HYBRID] Seek request sent, broker will reposition stream to offset %d", seekOffset) - // NOTE: Don't wait for ack - the broker will restart Subscribe loop and send data - // The ack will be handled inline with data messages in readRecordsFromSession - - // Clear cache since we've skipped ahead - session.consumedRecords = nil - streamPosition = seekOffset - } else { - // FAST PATH: Sequential read - continue from current position - glog.V(4).Infof("[FETCH-HYBRID] FAST: Sequential read at offset %d", requestedOffset) - } - - // Read records from stream - records, err := bc.readRecordsFromSession(ctx, session, requestedOffset, maxRecords) - if err != nil { - return nil, err - } - - // Update tracking - if len(records) > 0 { - session.lastReadOffset = records[len(records)-1].Offset - session.initialized = true - glog.V(4).Infof("[FETCH-HYBRID] Read %d records, lastReadOffset now %d", - len(records), session.lastReadOffset) - } - - return records, nil -} - -// FetchRecordsWithDedup reads records with request deduplication to prevent duplicate concurrent fetches -// DEPRECATED: Use FetchRecordsHybrid instead for better performance -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -func (bc *BrokerClient) FetchRecordsWithDedup(ctx context.Context, topic string, partition int32, startOffset int64, maxRecords int, consumerGroup string, consumerID string) ([]*SeaweedRecord, error) { - // Create key for this fetch request - key := fmt.Sprintf("%s-%d-%d", topic, partition, startOffset) - - glog.V(4).Infof("[FETCH-DEDUP] topic=%s partition=%d offset=%d maxRecords=%d key=%s", - topic, partition, startOffset, maxRecords, key) - - // Check if there's already a fetch in progress for this exact request - bc.fetchRequestsLock.Lock() - - if existing, exists := bc.fetchRequests[key]; exists { - // Another fetch is in progress for this (topic, partition, offset) - // Create a waiter channel and add it to the list - waiter := make(chan FetchResult, 1) - existing.mu.Lock() - existing.waiters = append(existing.waiters, waiter) - existing.mu.Unlock() - bc.fetchRequestsLock.Unlock() - - glog.V(4).Infof("[FETCH-DEDUP] Waiting for in-progress fetch: %s", key) - - // Wait for the result from the in-progress fetch - select { - case result := <-waiter: - glog.V(4).Infof("[FETCH-DEDUP] Received result from in-progress fetch: %s (records=%d, err=%v)", - key, len(result.records), result.err) - return result.records, result.err - case <-ctx.Done(): - return nil, ctx.Err() - } - } - - // No fetch in progress - this request will do the fetch - fetchReq := &FetchRequest{ - topic: topic, - partition: partition, - offset: startOffset, - resultChan: make(chan FetchResult, 1), - waiters: []chan FetchResult{}, - inProgress: true, - } - bc.fetchRequests[key] = fetchReq - bc.fetchRequestsLock.Unlock() - - glog.V(4).Infof("[FETCH-DEDUP] Starting new fetch: %s", key) - - // Perform the actual fetch - records, err := bc.fetchRecordsStatelessInternal(ctx, topic, partition, startOffset, maxRecords, consumerGroup, consumerID) - - // Prepare result - result := FetchResult{ - records: records, - err: err, - } - - // Broadcast result to all waiters and clean up - bc.fetchRequestsLock.Lock() - fetchReq.mu.Lock() - waiters := fetchReq.waiters - fetchReq.mu.Unlock() - delete(bc.fetchRequests, key) - bc.fetchRequestsLock.Unlock() - - // Send result to all waiters - glog.V(4).Infof("[FETCH-DEDUP] Broadcasting result to %d waiters: %s (records=%d, err=%v)", - len(waiters), key, len(records), err) - for _, waiter := range waiters { - waiter <- result - close(waiter) - } - - return records, err -} - -// fetchRecordsStatelessInternal is the internal implementation of stateless fetch -// This is called by FetchRecordsWithDedup and should not be called directly -func (bc *BrokerClient) fetchRecordsStatelessInternal(ctx context.Context, topic string, partition int32, startOffset int64, maxRecords int, consumerGroup string, consumerID string) ([]*SeaweedRecord, error) { - glog.V(4).Infof("[FETCH-STATELESS] topic=%s partition=%d offset=%d maxRecords=%d", - topic, partition, startOffset, maxRecords) - - // STATELESS APPROACH: Create a temporary subscriber just for this fetch - // This eliminates concurrent access to shared offset state - tempSubscriber, err := bc.createTemporarySubscriber(topic, partition, startOffset, consumerGroup, consumerID) - if err != nil { - return nil, fmt.Errorf("failed to create temporary subscriber: %v", err) - } - - // Ensure cleanup even if read fails - defer func() { - if tempSubscriber.Stream != nil { - // Send close message - tempSubscriber.Stream.CloseSend() - } - if tempSubscriber.Cancel != nil { - tempSubscriber.Cancel() - } - }() - - // Read records from the fresh subscriber (no seeking needed, it starts at startOffset) - return bc.readRecordsFrom(ctx, tempSubscriber, startOffset, maxRecords) -} - -// FetchRecordsStateless reads records using a stateless approach (creates fresh subscriber per fetch) -// DEPRECATED: Use FetchRecordsHybrid instead for better performance with session reuse -// This eliminates concurrent access to shared offset state -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -func (bc *BrokerClient) FetchRecordsStateless(ctx context.Context, topic string, partition int32, startOffset int64, maxRecords int, consumerGroup string, consumerID string) ([]*SeaweedRecord, error) { - return bc.FetchRecordsHybrid(ctx, topic, partition, startOffset, maxRecords, consumerGroup, consumerID) -} - -// ReadRecordsFromOffset reads records starting from a specific offset using STATELESS approach -// Creates a fresh subscriber for each fetch to eliminate concurrent access issues -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -// DEPRECATED: Use FetchRecordsStateless instead for better API clarity -func (bc *BrokerClient) ReadRecordsFromOffset(ctx context.Context, session *BrokerSubscriberSession, requestedOffset int64, maxRecords int) ([]*SeaweedRecord, error) { - if session == nil { - return nil, fmt.Errorf("subscriber session cannot be nil") - } - - return bc.FetchRecordsStateless(ctx, session.Topic, session.Partition, requestedOffset, maxRecords, session.ConsumerGroup, session.ConsumerID) -} - -// readRecordsFrom reads records from the stream, assigning offsets starting from startOffset -// Uses a timeout-based approach to read multiple records without blocking indefinitely -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -func (bc *BrokerClient) readRecordsFrom(ctx context.Context, session *BrokerSubscriberSession, startOffset int64, maxRecords int) ([]*SeaweedRecord, error) { - if session == nil { - return nil, fmt.Errorf("subscriber session cannot be nil") - } - - if session.Stream == nil { - return nil, fmt.Errorf("subscriber session stream cannot be nil") - } - - glog.V(4).Infof("[FETCH] readRecordsFrom: topic=%s partition=%d startOffset=%d maxRecords=%d", - session.Topic, session.Partition, startOffset, maxRecords) - - var records []*SeaweedRecord - currentOffset := startOffset - - // CRITICAL FIX: Return immediately if maxRecords is 0 or negative - if maxRecords <= 0 { - return records, nil - } - - // Note: Cache checking is done in ReadRecordsFromOffset, not here - // This function is called only when we need to read new data from the stream - - // Read first record with timeout (important for empty topics) - // CRITICAL: For SMQ backend with consumer groups, we need adequate timeout for disk reads - // When a consumer group resumes from a committed offset, the subscriber may need to: - // 1. Connect to the broker (network latency) - // 2. Seek to the correct offset in the log file (disk I/O) - // 3. Read and deserialize the record (disk I/O) - // Total latency can be 100-500ms for cold reads from disk - // - // CRITICAL: Use the context from the Kafka fetch request - // The context timeout is set by the caller based on the Kafka fetch request's MaxWaitTime - // This ensures we wait exactly as long as the client requested, not more or less - // For in-memory reads (hot path), records arrive in <10ms - // For low-volume topics (like _schemas), the caller sets longer timeout to keep subscriber alive - // If no context provided, use a reasonable default timeout - if ctx == nil { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - } - - // CRITICAL: Capture stream pointer while holding lock to prevent TOCTOU race - // If we access session.Stream in the goroutine, it could become nil between check and use - stream := session.Stream - if stream == nil { - glog.V(4).Infof("[FETCH] Stream is nil, cannot read") - return records, nil - } - - type recvResult struct { - resp *mq_pb.SubscribeMessageResponse - err error - } - recvChan := make(chan recvResult, 1) - - // Try to receive first record using captured stream pointer - go func() { - // Recover from panics caused by stream being closed during Recv() - defer func() { - if r := recover(); r != nil { - select { - case recvChan <- recvResult{resp: nil, err: fmt.Errorf("stream recv panicked: %v", r)}: - case <-ctx.Done(): - } - } - }() - resp, err := stream.Recv() - select { - case recvChan <- recvResult{resp: resp, err: err}: - case <-ctx.Done(): - // Context cancelled, don't send (avoid blocking) - } - }() - - select { - case result := <-recvChan: - if result.err != nil { - glog.V(4).Infof("[FETCH] Stream.Recv() error on first record: %v", result.err) - return records, nil // Return empty - no error for empty topic - } - - if dataMsg := result.resp.GetData(); dataMsg != nil { - record := &SeaweedRecord{ - Key: dataMsg.Key, - Value: dataMsg.Value, - Timestamp: dataMsg.TsNs, - Offset: currentOffset, - } - records = append(records, record) - currentOffset++ - glog.V(4).Infof("[FETCH] Received first record: offset=%d, keyLen=%d, valueLen=%d", - record.Offset, len(record.Key), len(record.Value)) - - // CRITICAL: Auto-acknowledge first message immediately for Kafka gateway - // Kafka uses offset commits (not per-message acks) so we must ack to prevent - // broker from blocking on in-flight messages waiting for acks that will never come - ackMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Ack{ - Ack: &mq_pb.SubscribeMessageRequest_AckMessage{ - Key: dataMsg.Key, - TsNs: dataMsg.TsNs, - }, - }, - } - if err := stream.Send(ackMsg); err != nil { - glog.V(4).Infof("[FETCH] Failed to send ack for first record offset %d: %v (continuing)", record.Offset, err) - // Don't fail the fetch if ack fails - continue reading - } - } - - case <-ctx.Done(): - // Timeout on first record - topic is empty or no data available - glog.V(4).Infof("[FETCH] No data available (timeout on first record)") - return records, nil - } - - // If we got the first record, try to get more with adaptive timeout - // CRITICAL: Schema Registry catch-up scenario - give generous timeout for the first batch - // Schema Registry needs to read multiple records quickly when catching up (e.g., offsets 3-6) - // The broker may be reading from disk, which introduces 10-20ms delay between records - // - // Strategy: Start with generous timeout (1 second) for first 5 records to allow broker - // to read from disk, then switch to fast mode (100ms) for streaming in-memory data - consecutiveReads := 0 - - for len(records) < maxRecords { - // Adaptive timeout based on how many records we've already read - var currentTimeout time.Duration - if consecutiveReads < 5 { - // First 5 records: generous timeout for disk reads + network delays - currentTimeout = 1 * time.Second - } else { - // After 5 records: assume we're streaming from memory, use faster timeout - currentTimeout = 100 * time.Millisecond - } - - readStart := time.Now() - // CRITICAL: Use parent context (ctx) to respect client's MaxWaitTime deadline - // The per-record timeout is combined with the overall fetch deadline - ctx2, cancel2 := context.WithTimeout(ctx, currentTimeout) - recvChan2 := make(chan recvResult, 1) - - go func() { - // Recover from panics caused by stream being closed during Recv() - defer func() { - if r := recover(); r != nil { - select { - case recvChan2 <- recvResult{resp: nil, err: fmt.Errorf("stream recv panicked: %v", r)}: - case <-ctx2.Done(): - } - } - }() - // Use captured stream pointer to prevent TOCTOU race - resp, err := stream.Recv() - select { - case recvChan2 <- recvResult{resp: resp, err: err}: - case <-ctx2.Done(): - // Context cancelled - } - }() - - select { - case result := <-recvChan2: - cancel2() - readDuration := time.Since(readStart) - - if result.err != nil { - glog.V(4).Infof("[FETCH] Stream.Recv() error after %d records: %v", len(records), result.err) - // Return what we have - cache will be updated at the end - break - } - - if dataMsg := result.resp.GetData(); dataMsg != nil { - record := &SeaweedRecord{ - Key: dataMsg.Key, - Value: dataMsg.Value, - Timestamp: dataMsg.TsNs, - Offset: currentOffset, - } - records = append(records, record) - currentOffset++ - consecutiveReads++ // Track number of successful reads for adaptive timeout - - // DEBUG: Log received message with value preview for GitHub Actions debugging - valuePreview := "" - if len(dataMsg.Value) > 0 { - if len(dataMsg.Value) <= 50 { - valuePreview = string(dataMsg.Value) - } else { - valuePreview = fmt.Sprintf("%s...(total %d bytes)", string(dataMsg.Value[:50]), len(dataMsg.Value)) - } - } else { - valuePreview = "" - } - glog.V(1).Infof("[FETCH_RECORD] offset=%d keyLen=%d valueLen=%d valuePreview=%q readTime=%v", - record.Offset, len(record.Key), len(record.Value), valuePreview, readDuration) - - glog.V(4).Infof("[FETCH] Received record %d: offset=%d, keyLen=%d, valueLen=%d, readTime=%v", - len(records), record.Offset, len(record.Key), len(record.Value), readDuration) - - // CRITICAL: Auto-acknowledge message immediately for Kafka gateway - // Kafka uses offset commits (not per-message acks) so we must ack to prevent - // broker from blocking on in-flight messages waiting for acks that will never come - ackMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Ack{ - Ack: &mq_pb.SubscribeMessageRequest_AckMessage{ - Key: dataMsg.Key, - TsNs: dataMsg.TsNs, - }, - }, - } - if err := stream.Send(ackMsg); err != nil { - glog.V(4).Infof("[FETCH] Failed to send ack for offset %d: %v (continuing)", record.Offset, err) - // Don't fail the fetch if ack fails - continue reading - } - } - - case <-ctx2.Done(): - cancel2() - // Timeout - return what we have - glog.V(4).Infof("[FETCH] Read timeout after %d records (waited %v), returning batch", len(records), time.Since(readStart)) - return records, nil - } - } - - glog.V(4).Infof("[FETCH] Returning %d records (maxRecords reached)", len(records)) - return records, nil -} - -// ReadRecords is a simplified version for deprecated code paths -// It reads from wherever the stream currently is -func (bc *BrokerClient) ReadRecords(ctx context.Context, session *BrokerSubscriberSession, maxRecords int) ([]*SeaweedRecord, error) { - // Determine where stream is based on cache - session.mu.Lock() - var streamOffset int64 - if len(session.consumedRecords) > 0 { - streamOffset = session.consumedRecords[len(session.consumedRecords)-1].Offset + 1 - } else { - streamOffset = session.StartOffset - } - session.mu.Unlock() - - return bc.readRecordsFrom(ctx, session, streamOffset, maxRecords) -} - -// CloseSubscriber closes and removes a subscriber session -func (bc *BrokerClient) CloseSubscriber(topic string, partition int32, consumerGroup string, consumerID string) { - tempSession := &BrokerSubscriberSession{ - Topic: topic, - Partition: partition, - ConsumerGroup: consumerGroup, - ConsumerID: consumerID, - } - key := tempSession.Key() - - bc.subscribersLock.Lock() - defer bc.subscribersLock.Unlock() - - if session, exists := bc.subscribers[key]; exists { - // CRITICAL: Hold session lock while cancelling to prevent race with active Recv() calls - session.mu.Lock() - if session.Stream != nil { - _ = session.Stream.CloseSend() - } - if session.Cancel != nil { - session.Cancel() - } - session.mu.Unlock() - delete(bc.subscribers, key) - glog.V(4).Infof("[FETCH] Closed subscriber for %s", key) - } -} - -// NeedsRestart checks if the subscriber needs to restart to read from the given offset -// Returns true if: -// 1. Requested offset is before current position AND not in cache -// 2. Stream is closed/invalid -func (bc *BrokerClient) NeedsRestart(session *BrokerSubscriberSession, requestedOffset int64) bool { - session.mu.Lock() - defer session.mu.Unlock() - - // Check if stream is still valid - if session.Stream == nil || session.Ctx == nil { - return true - } - - // Check if we can serve from cache - if len(session.consumedRecords) > 0 { - cacheStart := session.consumedRecords[0].Offset - cacheEnd := session.consumedRecords[len(session.consumedRecords)-1].Offset - if requestedOffset >= cacheStart && requestedOffset <= cacheEnd { - // Can serve from cache, no restart needed - return false - } - } - - // If requested offset is far behind current position, need restart - if requestedOffset < session.StartOffset { - return true - } - - // Check if we're too far ahead (gap in cache) - if requestedOffset > session.StartOffset+1000 { - // Large gap - might be more efficient to restart - return true - } - - return false -} - -// RestartSubscriber restarts an existing subscriber from a new offset -// This is more efficient than closing and recreating the session -func (bc *BrokerClient) RestartSubscriber(session *BrokerSubscriberSession, newOffset int64, consumerGroup string, consumerID string) error { - session.mu.Lock() - defer session.mu.Unlock() - - glog.V(4).Infof("[FETCH] Restarting subscriber for %s[%d]: from offset %d to %d", - session.Topic, session.Partition, session.StartOffset, newOffset) - - // Close existing stream - if session.Stream != nil { - _ = session.Stream.CloseSend() - } - if session.Cancel != nil { - session.Cancel() - } - - // Clear cache since we're seeking to a different position - session.consumedRecords = nil - session.nextOffsetToRead = newOffset - - // Create new stream from new offset - subscriberCtx, cancel := context.WithCancel(bc.ctx) - - stream, err := bc.client.SubscribeMessage(subscriberCtx) - if err != nil { - cancel() - return fmt.Errorf("failed to create subscribe stream for restart: %v", err) - } - - // Get the actual partition assignment - actualPartition, err := bc.getActualPartitionAssignment(session.Topic, session.Partition) - if err != nil { - cancel() - _ = stream.CloseSend() - return fmt.Errorf("failed to get actual partition assignment for restart: %v", err) - } - - // Send init message with new offset - initReq := createSubscribeInitMessage(session.Topic, actualPartition, newOffset, schema_pb.OffsetType_EXACT_OFFSET, consumerGroup, consumerID) - - if err := stream.Send(initReq); err != nil { - cancel() - _ = stream.CloseSend() - return fmt.Errorf("failed to send subscribe init for restart: %v", err) - } - - // Update session with new stream and offset - session.Stream = stream - session.Cancel = cancel - session.Ctx = subscriberCtx - session.StartOffset = newOffset - - glog.V(4).Infof("[FETCH] Successfully restarted subscriber for %s[%d] at offset %d", - session.Topic, session.Partition, newOffset) - - return nil -} - -// Seek helper methods for BrokerSubscriberSession - -// SeekToOffset repositions the stream to read from a specific offset -func (session *BrokerSubscriberSession) SeekToOffset(offset int64) error { - // Skip seek if already at the requested offset - session.mu.Lock() - currentOffset := session.StartOffset - session.mu.Unlock() - - if currentOffset == offset { - glog.V(4).Infof("[SEEK] Already at offset %d for %s[%d], skipping seek", offset, session.Topic, session.Partition) - return nil - } - - seekMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Seek{ - Seek: &mq_pb.SubscribeMessageRequest_SeekMessage{ - Offset: offset, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET, - }, - }, - } - - if err := session.Stream.Send(seekMsg); err != nil { - // Handle graceful shutdown - if err == io.EOF { - glog.V(4).Infof("[SEEK] Stream closing during seek to offset %d for %s[%d]", offset, session.Topic, session.Partition) - return nil // Not an error during shutdown - } - return fmt.Errorf("seek to offset %d failed: %v", offset, err) - } - - session.mu.Lock() - session.StartOffset = offset - // Only clear cache if seeking forward past cached data - shouldClearCache := true - if len(session.consumedRecords) > 0 { - cacheEndOffset := session.consumedRecords[len(session.consumedRecords)-1].Offset - if offset <= cacheEndOffset { - shouldClearCache = false - } - } - if shouldClearCache { - session.consumedRecords = nil - } - session.mu.Unlock() - - glog.V(4).Infof("[SEEK] Seeked to offset %d for %s[%d]", offset, session.Topic, session.Partition) - return nil -} - -// SeekToTimestamp repositions the stream to read from messages at or after a specific timestamp -// timestamp is in nanoseconds since Unix epoch -// Note: We don't skip this operation even if we think we're at the right position because -// we can't easily determine the offset corresponding to a timestamp without querying the broker -func (session *BrokerSubscriberSession) SeekToTimestamp(timestampNs int64) error { - seekMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Seek{ - Seek: &mq_pb.SubscribeMessageRequest_SeekMessage{ - Offset: timestampNs, - OffsetType: schema_pb.OffsetType_EXACT_TS_NS, - }, - }, - } - - if err := session.Stream.Send(seekMsg); err != nil { - // Handle graceful shutdown - if err == io.EOF { - glog.V(4).Infof("[SEEK] Stream closing during seek to timestamp %d for %s[%d]", timestampNs, session.Topic, session.Partition) - return nil // Not an error during shutdown - } - return fmt.Errorf("seek to timestamp %d failed: %v", timestampNs, err) - } - - session.mu.Lock() - // Note: We don't know the exact offset at this timestamp yet - // It will be updated when we read the first message - session.consumedRecords = nil - session.mu.Unlock() - - glog.V(4).Infof("[SEEK] Seeked to timestamp %d for %s[%d]", timestampNs, session.Topic, session.Partition) - return nil -} - -// SeekToEarliest repositions the stream to the beginning of the partition -// Note: We don't skip this operation even if StartOffset == 0 because the broker -// may have a different notion of "earliest" (e.g., after compaction or retention) -func (session *BrokerSubscriberSession) SeekToEarliest() error { - seekMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Seek{ - Seek: &mq_pb.SubscribeMessageRequest_SeekMessage{ - Offset: 0, - OffsetType: schema_pb.OffsetType_RESET_TO_EARLIEST, - }, - }, - } - - if err := session.Stream.Send(seekMsg); err != nil { - // Handle graceful shutdown - if err == io.EOF { - glog.V(4).Infof("[SEEK] Stream closing during seek to earliest for %s[%d]", session.Topic, session.Partition) - return nil // Not an error during shutdown - } - return fmt.Errorf("seek to earliest failed: %v", err) - } - - session.mu.Lock() - session.StartOffset = 0 - session.consumedRecords = nil - session.mu.Unlock() - - glog.V(4).Infof("[SEEK] Seeked to earliest for %s[%d]", session.Topic, session.Partition) - return nil -} - -// SeekToLatest repositions the stream to the end of the partition (next new message) -// Note: We don't skip this operation because "latest" is a moving target and we can't -// reliably determine if we're already at the latest position without querying the broker -func (session *BrokerSubscriberSession) SeekToLatest() error { - seekMsg := &mq_pb.SubscribeMessageRequest{ - Message: &mq_pb.SubscribeMessageRequest_Seek{ - Seek: &mq_pb.SubscribeMessageRequest_SeekMessage{ - Offset: 0, - OffsetType: schema_pb.OffsetType_RESET_TO_LATEST, - }, - }, - } - - if err := session.Stream.Send(seekMsg); err != nil { - // Handle graceful shutdown - if err == io.EOF { - glog.V(4).Infof("[SEEK] Stream closing during seek to latest for %s[%d]", session.Topic, session.Partition) - return nil // Not an error during shutdown - } - return fmt.Errorf("seek to latest failed: %v", err) - } - - session.mu.Lock() - // Offset will be set when we read the first new message - session.consumedRecords = nil - session.mu.Unlock() - - glog.V(4).Infof("[SEEK] Seeked to latest for %s[%d]", session.Topic, session.Partition) - return nil -} diff --git a/weed/mq/kafka/integration/broker_error_mapping.go b/weed/mq/kafka/integration/broker_error_mapping.go deleted file mode 100644 index 61476eeb0..000000000 --- a/weed/mq/kafka/integration/broker_error_mapping.go +++ /dev/null @@ -1,124 +0,0 @@ -package integration - -import ( - "strings" - - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -// Kafka Protocol Error Codes (copied from protocol package to avoid import cycle) -const ( - kafkaErrorCodeNone int16 = 0 - kafkaErrorCodeUnknownServerError int16 = 1 - kafkaErrorCodeUnknownTopicOrPartition int16 = 3 - kafkaErrorCodeNotLeaderOrFollower int16 = 6 - kafkaErrorCodeRequestTimedOut int16 = 7 - kafkaErrorCodeBrokerNotAvailable int16 = 8 - kafkaErrorCodeMessageTooLarge int16 = 10 - kafkaErrorCodeNetworkException int16 = 13 - kafkaErrorCodeOffsetLoadInProgress int16 = 14 - kafkaErrorCodeTopicAlreadyExists int16 = 36 - kafkaErrorCodeInvalidPartitions int16 = 37 - kafkaErrorCodeInvalidConfig int16 = 40 - kafkaErrorCodeInvalidRecord int16 = 42 -) - -// MapBrokerErrorToKafka maps a broker error code to the corresponding Kafka protocol error code -func MapBrokerErrorToKafka(brokerErrorCode int32) int16 { - switch brokerErrorCode { - case 0: // BrokerErrorNone - return kafkaErrorCodeNone - case 1: // BrokerErrorUnknownServerError - return kafkaErrorCodeUnknownServerError - case 2: // BrokerErrorTopicNotFound - return kafkaErrorCodeUnknownTopicOrPartition - case 3: // BrokerErrorPartitionNotFound - return kafkaErrorCodeUnknownTopicOrPartition - case 6: // BrokerErrorNotLeaderOrFollower - return kafkaErrorCodeNotLeaderOrFollower - case 7: // BrokerErrorRequestTimedOut - return kafkaErrorCodeRequestTimedOut - case 8: // BrokerErrorBrokerNotAvailable - return kafkaErrorCodeBrokerNotAvailable - case 10: // BrokerErrorMessageTooLarge - return kafkaErrorCodeMessageTooLarge - case 13: // BrokerErrorNetworkException - return kafkaErrorCodeNetworkException - case 14: // BrokerErrorOffsetLoadInProgress - return kafkaErrorCodeOffsetLoadInProgress - case 42: // BrokerErrorInvalidRecord - return kafkaErrorCodeInvalidRecord - case 36: // BrokerErrorTopicAlreadyExists - return kafkaErrorCodeTopicAlreadyExists - case 37: // BrokerErrorInvalidPartitions - return kafkaErrorCodeInvalidPartitions - case 40: // BrokerErrorInvalidConfig - return kafkaErrorCodeInvalidConfig - case 100: // BrokerErrorPublisherNotFound - return kafkaErrorCodeUnknownServerError - case 101: // BrokerErrorConnectionFailed - return kafkaErrorCodeNetworkException - case 102: // BrokerErrorFollowerConnectionFailed - return kafkaErrorCodeNetworkException - default: - // Unknown broker error code, default to unknown server error - return kafkaErrorCodeUnknownServerError - } -} - -// HandleBrokerResponse processes a broker response and returns appropriate error information -// Returns (kafkaErrorCode, errorMessage, error) where error is non-nil for system errors -func HandleBrokerResponse(resp *mq_pb.PublishMessageResponse) (int16, string, error) { - if resp.Error == "" && resp.ErrorCode == 0 { - // No error - return kafkaErrorCodeNone, "", nil - } - - // Use structured error code if available, otherwise fall back to string parsing - if resp.ErrorCode != 0 { - kafkaErrorCode := MapBrokerErrorToKafka(resp.ErrorCode) - return kafkaErrorCode, resp.Error, nil - } - - // Fallback: parse string error for backward compatibility - // This handles cases where older brokers might not set ErrorCode - kafkaErrorCode := parseStringErrorToKafkaCode(resp.Error) - return kafkaErrorCode, resp.Error, nil -} - -// parseStringErrorToKafkaCode provides backward compatibility for string-based error parsing -// This is the old brittle approach that we're replacing with structured error codes -func parseStringErrorToKafkaCode(errorMsg string) int16 { - if errorMsg == "" { - return kafkaErrorCodeNone - } - - // Check for common error patterns (brittle string matching) - switch { - case containsAny(errorMsg, "not the leader", "not leader"): - return kafkaErrorCodeNotLeaderOrFollower - case containsAny(errorMsg, "topic", "not found", "does not exist"): - return kafkaErrorCodeUnknownTopicOrPartition - case containsAny(errorMsg, "partition", "not found"): - return kafkaErrorCodeUnknownTopicOrPartition - case containsAny(errorMsg, "timeout", "timed out"): - return kafkaErrorCodeRequestTimedOut - case containsAny(errorMsg, "network", "connection"): - return kafkaErrorCodeNetworkException - case containsAny(errorMsg, "too large", "size"): - return kafkaErrorCodeMessageTooLarge - default: - return kafkaErrorCodeUnknownServerError - } -} - -// containsAny checks if the text contains any of the given substrings (case-insensitive) -func containsAny(text string, substrings ...string) bool { - textLower := strings.ToLower(text) - for _, substr := range substrings { - if strings.Contains(textLower, strings.ToLower(substr)) { - return true - } - } - return false -} diff --git a/weed/mq/kafka/integration/broker_error_mapping_test.go b/weed/mq/kafka/integration/broker_error_mapping_test.go deleted file mode 100644 index 2f4849833..000000000 --- a/weed/mq/kafka/integration/broker_error_mapping_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package integration - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -func TestMapBrokerErrorToKafka(t *testing.T) { - tests := []struct { - name string - brokerErrorCode int32 - expectedKafka int16 - }{ - {"No error", 0, kafkaErrorCodeNone}, - {"Unknown server error", 1, kafkaErrorCodeUnknownServerError}, - {"Topic not found", 2, kafkaErrorCodeUnknownTopicOrPartition}, - {"Partition not found", 3, kafkaErrorCodeUnknownTopicOrPartition}, - {"Not leader or follower", 6, kafkaErrorCodeNotLeaderOrFollower}, - {"Request timed out", 7, kafkaErrorCodeRequestTimedOut}, - {"Broker not available", 8, kafkaErrorCodeBrokerNotAvailable}, - {"Message too large", 10, kafkaErrorCodeMessageTooLarge}, - {"Network exception", 13, kafkaErrorCodeNetworkException}, - {"Offset load in progress", 14, kafkaErrorCodeOffsetLoadInProgress}, - {"Invalid record", 42, kafkaErrorCodeInvalidRecord}, - {"Topic already exists", 36, kafkaErrorCodeTopicAlreadyExists}, - {"Invalid partitions", 37, kafkaErrorCodeInvalidPartitions}, - {"Invalid config", 40, kafkaErrorCodeInvalidConfig}, - {"Publisher not found", 100, kafkaErrorCodeUnknownServerError}, - {"Connection failed", 101, kafkaErrorCodeNetworkException}, - {"Follower connection failed", 102, kafkaErrorCodeNetworkException}, - {"Unknown error code", 999, kafkaErrorCodeUnknownServerError}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := MapBrokerErrorToKafka(tt.brokerErrorCode) - if result != tt.expectedKafka { - t.Errorf("MapBrokerErrorToKafka(%d) = %d, want %d", tt.brokerErrorCode, result, tt.expectedKafka) - } - }) - } -} - -func TestHandleBrokerResponse(t *testing.T) { - tests := []struct { - name string - response *mq_pb.PublishMessageResponse - expectedKafkaCode int16 - expectedError string - expectSystemError bool - }{ - { - name: "No error", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 123, - Error: "", - ErrorCode: 0, - }, - expectedKafkaCode: kafkaErrorCodeNone, - expectedError: "", - expectSystemError: false, - }, - { - name: "Structured error - Not leader", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 0, - Error: "not the leader for this partition, leader is: broker2:9092", - ErrorCode: 6, // BrokerErrorNotLeaderOrFollower - }, - expectedKafkaCode: kafkaErrorCodeNotLeaderOrFollower, - expectedError: "not the leader for this partition, leader is: broker2:9092", - expectSystemError: false, - }, - { - name: "Structured error - Topic not found", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 0, - Error: "topic test-topic not found", - ErrorCode: 2, // BrokerErrorTopicNotFound - }, - expectedKafkaCode: kafkaErrorCodeUnknownTopicOrPartition, - expectedError: "topic test-topic not found", - expectSystemError: false, - }, - { - name: "Fallback string parsing - Not leader", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 0, - Error: "not the leader for this partition", - ErrorCode: 0, // No structured error code - }, - expectedKafkaCode: kafkaErrorCodeNotLeaderOrFollower, - expectedError: "not the leader for this partition", - expectSystemError: false, - }, - { - name: "Fallback string parsing - Topic not found", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 0, - Error: "topic does not exist", - ErrorCode: 0, // No structured error code - }, - expectedKafkaCode: kafkaErrorCodeUnknownTopicOrPartition, - expectedError: "topic does not exist", - expectSystemError: false, - }, - { - name: "Fallback string parsing - Unknown error", - response: &mq_pb.PublishMessageResponse{ - AckTsNs: 0, - Error: "some unknown error occurred", - ErrorCode: 0, // No structured error code - }, - expectedKafkaCode: kafkaErrorCodeUnknownServerError, - expectedError: "some unknown error occurred", - expectSystemError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - kafkaCode, errorMsg, systemErr := HandleBrokerResponse(tt.response) - - if kafkaCode != tt.expectedKafkaCode { - t.Errorf("HandleBrokerResponse() kafkaCode = %d, want %d", kafkaCode, tt.expectedKafkaCode) - } - - if errorMsg != tt.expectedError { - t.Errorf("HandleBrokerResponse() errorMsg = %q, want %q", errorMsg, tt.expectedError) - } - - if (systemErr != nil) != tt.expectSystemError { - t.Errorf("HandleBrokerResponse() systemErr = %v, expectSystemError = %v", systemErr, tt.expectSystemError) - } - }) - } -} - -func TestParseStringErrorToKafkaCode(t *testing.T) { - tests := []struct { - name string - errorMsg string - expectedCode int16 - }{ - {"Empty error", "", kafkaErrorCodeNone}, - {"Not leader error", "not the leader for this partition", kafkaErrorCodeNotLeaderOrFollower}, - {"Not leader error variant", "not leader", kafkaErrorCodeNotLeaderOrFollower}, - {"Topic not found", "topic not found", kafkaErrorCodeUnknownTopicOrPartition}, - {"Topic does not exist", "topic does not exist", kafkaErrorCodeUnknownTopicOrPartition}, - {"Partition not found", "partition not found", kafkaErrorCodeUnknownTopicOrPartition}, - {"Timeout error", "request timed out", kafkaErrorCodeRequestTimedOut}, - {"Timeout error variant", "timeout occurred", kafkaErrorCodeRequestTimedOut}, - {"Network error", "network exception", kafkaErrorCodeNetworkException}, - {"Connection error", "connection failed", kafkaErrorCodeNetworkException}, - {"Message too large", "message too large", kafkaErrorCodeMessageTooLarge}, - {"Size error", "size exceeds limit", kafkaErrorCodeMessageTooLarge}, - {"Unknown error", "some random error", kafkaErrorCodeUnknownServerError}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseStringErrorToKafkaCode(tt.errorMsg) - if result != tt.expectedCode { - t.Errorf("parseStringErrorToKafkaCode(%q) = %d, want %d", tt.errorMsg, result, tt.expectedCode) - } - }) - } -} diff --git a/weed/mq/kafka/integration/fetch_performance_test.go b/weed/mq/kafka/integration/fetch_performance_test.go deleted file mode 100644 index c891784eb..000000000 --- a/weed/mq/kafka/integration/fetch_performance_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package integration - -import ( - "testing" - "time" -) - -// TestAdaptiveFetchTimeout verifies that the adaptive timeout strategy -// allows reading multiple records from disk within a reasonable time -func TestAdaptiveFetchTimeout(t *testing.T) { - t.Log("Testing adaptive fetch timeout strategy...") - - // Simulate the scenario where we need to read 4 records from disk - // Each record takes 100-200ms to read (simulates disk I/O) - recordReadTimes := []time.Duration{ - 150 * time.Millisecond, // Record 1 (from disk) - 150 * time.Millisecond, // Record 2 (from disk) - 150 * time.Millisecond, // Record 3 (from disk) - 150 * time.Millisecond, // Record 4 (from disk) - } - - // Test 1: Old strategy (50ms timeout per record) - t.Run("OldStrategy_50ms_Timeout", func(t *testing.T) { - timeout := 50 * time.Millisecond - recordsReceived := 0 - - start := time.Now() - for i, readTime := range recordReadTimes { - if readTime <= timeout { - recordsReceived++ - } else { - t.Logf("Record %d timed out (readTime=%v > timeout=%v)", i+1, readTime, timeout) - break - } - } - duration := time.Since(start) - - t.Logf("Old strategy: received %d/%d records in %v", recordsReceived, len(recordReadTimes), duration) - - if recordsReceived >= len(recordReadTimes) { - t.Error("Old strategy should NOT receive all records (timeout too short)") - } else { - t.Logf("โœ“ Bug reproduced: old strategy times out too quickly") - } - }) - - // Test 2: New adaptive strategy (1 second timeout for first 5 records) - t.Run("NewStrategy_1s_Timeout", func(t *testing.T) { - timeout := 1 * time.Second // Generous timeout for first batch - recordsReceived := 0 - - start := time.Now() - for i, readTime := range recordReadTimes { - if readTime <= timeout { - recordsReceived++ - t.Logf("Record %d received (readTime=%v)", i+1, readTime) - } else { - t.Logf("Record %d timed out (readTime=%v > timeout=%v)", i+1, readTime, timeout) - break - } - } - duration := time.Since(start) - - t.Logf("New strategy: received %d/%d records in %v", recordsReceived, len(recordReadTimes), duration) - - if recordsReceived < len(recordReadTimes) { - t.Errorf("New strategy should receive all records (timeout=%v)", timeout) - } else { - t.Logf("โœ“ Fix verified: new strategy receives all records") - } - }) - - // Test 3: Schema Registry catch-up scenario - t.Run("SchemaRegistry_CatchUp_Scenario", func(t *testing.T) { - // Schema Registry has 500ms total timeout to catch up from offset 3 to 6 - schemaRegistryTimeout := 500 * time.Millisecond - - // With old strategy (50ms per record after first): - // - First record: 10s timeout โœ“ - // - Records 2-4: 50ms each โœ— (times out after record 1) - // Total time: > 500ms (only gets 1 record per fetch) - - // With new strategy (1s per record for first 5): - // - Records 1-4: 1s each โœ“ - // - All 4 records received in ~600ms - // Total time: ~600ms (gets all 4 records in one fetch) - - recordsNeeded := 4 - perRecordReadTime := 150 * time.Millisecond - - // Old strategy simulation - oldStrategyTime := time.Duration(recordsNeeded) * 50 * time.Millisecond // Times out, need multiple fetches - oldStrategyRoundTrips := recordsNeeded // One record per fetch - - // New strategy simulation - newStrategyTime := time.Duration(recordsNeeded) * perRecordReadTime // All in one fetch - newStrategyRoundTrips := 1 - - t.Logf("Schema Registry catch-up simulation:") - t.Logf(" Old strategy: %d round trips, ~%v total time", oldStrategyRoundTrips, oldStrategyTime*time.Duration(oldStrategyRoundTrips)) - t.Logf(" New strategy: %d round trip, ~%v total time", newStrategyRoundTrips, newStrategyTime) - t.Logf(" Schema Registry timeout: %v", schemaRegistryTimeout) - - oldStrategyTotalTime := oldStrategyTime * time.Duration(oldStrategyRoundTrips) - newStrategyTotalTime := newStrategyTime * time.Duration(newStrategyRoundTrips) - - if oldStrategyTotalTime > schemaRegistryTimeout { - t.Logf("โœ“ Old strategy exceeds timeout: %v > %v", oldStrategyTotalTime, schemaRegistryTimeout) - } - - if newStrategyTotalTime <= schemaRegistryTimeout+200*time.Millisecond { - t.Logf("โœ“ New strategy completes within timeout: %v <= %v", newStrategyTotalTime, schemaRegistryTimeout+200*time.Millisecond) - } else { - t.Errorf("New strategy too slow: %v > %v", newStrategyTotalTime, schemaRegistryTimeout) - } - }) -} - -// TestFetchTimeoutProgression verifies the timeout progression logic -func TestFetchTimeoutProgression(t *testing.T) { - t.Log("Testing fetch timeout progression...") - - // Adaptive timeout logic: - // - First 5 records: 1 second (catch-up from disk) - // - After 5 records: 100ms (streaming from memory) - - getTimeout := func(recordNumber int) time.Duration { - if recordNumber <= 5 { - return 1 * time.Second - } - return 100 * time.Millisecond - } - - t.Logf("Timeout progression:") - for i := 1; i <= 10; i++ { - timeout := getTimeout(i) - t.Logf(" Record %2d: timeout = %v", i, timeout) - } - - // Verify the progression - if getTimeout(1) != 1*time.Second { - t.Error("First record should have 1s timeout") - } - if getTimeout(5) != 1*time.Second { - t.Error("Fifth record should have 1s timeout") - } - if getTimeout(6) != 100*time.Millisecond { - t.Error("Sixth record should have 100ms timeout (fast path)") - } - if getTimeout(10) != 100*time.Millisecond { - t.Error("Tenth record should have 100ms timeout (fast path)") - } - - t.Log("โœ“ Timeout progression is correct") -} diff --git a/weed/mq/kafka/integration/record_retrieval_test.go b/weed/mq/kafka/integration/record_retrieval_test.go deleted file mode 100644 index 697f6af48..000000000 --- a/weed/mq/kafka/integration/record_retrieval_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package integration - -import ( - "testing" - "time" -) - -// MockSeaweedClient provides a mock implementation for testing -type MockSeaweedClient struct { - records map[string]map[int32][]*SeaweedRecord // topic -> partition -> records -} - -func NewMockSeaweedClient() *MockSeaweedClient { - return &MockSeaweedClient{ - records: make(map[string]map[int32][]*SeaweedRecord), - } -} - -func (m *MockSeaweedClient) AddRecord(topic string, partition int32, key []byte, value []byte, timestamp int64) { - if m.records[topic] == nil { - m.records[topic] = make(map[int32][]*SeaweedRecord) - } - if m.records[topic][partition] == nil { - m.records[topic][partition] = make([]*SeaweedRecord, 0) - } - - record := &SeaweedRecord{ - Key: key, - Value: value, - Timestamp: timestamp, - Offset: int64(len(m.records[topic][partition])), // Simple offset numbering - } - - m.records[topic][partition] = append(m.records[topic][partition], record) -} - -func (m *MockSeaweedClient) GetRecords(topic string, partition int32, fromOffset int64, maxRecords int) ([]*SeaweedRecord, error) { - if m.records[topic] == nil || m.records[topic][partition] == nil { - return nil, nil - } - - allRecords := m.records[topic][partition] - if fromOffset < 0 || fromOffset >= int64(len(allRecords)) { - return nil, nil - } - - endOffset := fromOffset + int64(maxRecords) - if endOffset > int64(len(allRecords)) { - endOffset = int64(len(allRecords)) - } - - return allRecords[fromOffset:endOffset], nil -} - -func TestSeaweedSMQRecord_Interface(t *testing.T) { - // Test that SeaweedSMQRecord properly implements SMQRecord interface - key := []byte("test-key") - value := []byte("test-value") - timestamp := time.Now().UnixNano() - kafkaOffset := int64(42) - - record := &SeaweedSMQRecord{ - key: key, - value: value, - timestamp: timestamp, - offset: kafkaOffset, - } - - // Test interface compliance - var smqRecord SMQRecord = record - - // Test GetKey - if string(smqRecord.GetKey()) != string(key) { - t.Errorf("Expected key %s, got %s", string(key), string(smqRecord.GetKey())) - } - - // Test GetValue - if string(smqRecord.GetValue()) != string(value) { - t.Errorf("Expected value %s, got %s", string(value), string(smqRecord.GetValue())) - } - - // Test GetTimestamp - if smqRecord.GetTimestamp() != timestamp { - t.Errorf("Expected timestamp %d, got %d", timestamp, smqRecord.GetTimestamp()) - } - - // Test GetOffset - if smqRecord.GetOffset() != kafkaOffset { - t.Errorf("Expected offset %d, got %d", kafkaOffset, smqRecord.GetOffset()) - } -} - -func TestSeaweedMQHandler_GetStoredRecords_EmptyTopic(t *testing.T) { - // Note: Ledgers have been removed - SMQ broker handles all offset management directly - // This test is now obsolete as GetStoredRecords requires a real broker connection - t.Skip("Test obsolete: ledgers removed, SMQ broker handles offset management") -} - -func TestSeaweedMQHandler_GetStoredRecords_EmptyPartition(t *testing.T) { - // Note: Ledgers have been removed - SMQ broker handles all offset management directly - // This test is now obsolete as GetStoredRecords requires a real broker connection - t.Skip("Test obsolete: ledgers removed, SMQ broker handles offset management") -} - -func TestSeaweedMQHandler_GetStoredRecords_OffsetBeyondHighWaterMark(t *testing.T) { - // Note: Ledgers have been removed - SMQ broker handles all offset management directly - // This test is now obsolete as GetStoredRecords requires a real broker connection - t.Skip("Test obsolete: ledgers removed, SMQ broker handles offset management") -} - -func TestSeaweedMQHandler_GetStoredRecords_MaxRecordsLimit(t *testing.T) { - // Note: Ledgers have been removed - SMQ broker handles all offset management directly - // This test is now obsolete as GetStoredRecords requires a real broker connection - t.Skip("Test obsolete: ledgers removed, SMQ broker handles offset management") -} - -// Integration test helpers and benchmarks - -func BenchmarkSeaweedSMQRecord_GetMethods(b *testing.B) { - record := &SeaweedSMQRecord{ - key: []byte("benchmark-key"), - value: []byte("benchmark-value-with-some-longer-content"), - timestamp: time.Now().UnixNano(), - offset: 12345, - } - - b.ResetTimer() - - b.Run("GetKey", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = record.GetKey() - } - }) - - b.Run("GetValue", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = record.GetValue() - } - }) - - b.Run("GetTimestamp", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = record.GetTimestamp() - } - }) - - b.Run("GetOffset", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = record.GetOffset() - } - }) -} diff --git a/weed/mq/kafka/integration/seaweedmq_handler.go b/weed/mq/kafka/integration/seaweedmq_handler.go deleted file mode 100644 index 0ef659050..000000000 --- a/weed/mq/kafka/integration/seaweedmq_handler.go +++ /dev/null @@ -1,513 +0,0 @@ -package integration - -import ( - "context" - "encoding/binary" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -// GetStoredRecords retrieves records from SeaweedMQ using the proper subscriber API -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -func (h *SeaweedMQHandler) GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]SMQRecord, error) { - glog.V(4).Infof("[FETCH] GetStoredRecords: topic=%s partition=%d fromOffset=%d maxRecords=%d", topic, partition, fromOffset, maxRecords) - - // Verify topic exists - if !h.TopicExists(topic) { - return nil, fmt.Errorf("topic %s does not exist", topic) - } - - // CRITICAL: Use per-connection BrokerClient to prevent gRPC stream interference - // Each Kafka connection has its own isolated BrokerClient instance - var brokerClient *BrokerClient - consumerGroup := "kafka-fetch-consumer" // default - // CRITICAL FIX: Use stable consumer ID per topic-partition, NOT with timestamp - // Including timestamp would create a new session on every fetch, causing subscriber churn - consumerID := fmt.Sprintf("kafka-fetch-%s-%d", topic, partition) // default, stable per topic-partition - - // Get the per-connection broker client from connection context - if h.protocolHandler != nil { - connCtx := h.protocolHandler.GetConnectionContext() - if connCtx != nil { - // Extract per-connection broker client - if connCtx.BrokerClient != nil { - if bc, ok := connCtx.BrokerClient.(*BrokerClient); ok { - brokerClient = bc - glog.V(4).Infof("[FETCH] Using per-connection BrokerClient for topic=%s partition=%d", topic, partition) - } - } - - // Extract consumer group and client ID - if connCtx.ConsumerGroup != "" { - consumerGroup = connCtx.ConsumerGroup - glog.V(4).Infof("[FETCH] Using actual consumer group from context: %s", consumerGroup) - } - if connCtx.MemberID != "" { - // Use member ID as base, but still include topic-partition for uniqueness - consumerID = fmt.Sprintf("%s-%s-%d", connCtx.MemberID, topic, partition) - glog.V(4).Infof("[FETCH] Using actual member ID from context: %s", consumerID) - } else if connCtx.ClientID != "" { - // Fallback to client ID if member ID not set (for clients not using consumer groups) - // Include topic-partition to ensure each partition consumer is unique - consumerID = fmt.Sprintf("%s-%s-%d", connCtx.ClientID, topic, partition) - glog.V(4).Infof("[FETCH] Using client ID from context: %s", consumerID) - } - } - } - - // Fallback to shared broker client if per-connection client not available - if brokerClient == nil { - glog.Warningf("[FETCH] No per-connection BrokerClient, falling back to shared client") - brokerClient = h.brokerClient - if brokerClient == nil { - return nil, fmt.Errorf("no broker client available") - } - } - - // KAFKA-STYLE STATELESS FETCH (Long-term solution) - // Uses FetchMessage RPC - completely stateless, no Subscribe loops - // - // Benefits: - // 1. No session state on broker - each request is independent - // 2. No shared Subscribe loops - no concurrent access issues - // 3. No stream corruption - no cancel/restart complexity - // 4. Safe concurrent reads - like Kafka's file-based reads - // 5. Simple and maintainable - just request/response - // - // Architecture inspired by Kafka: - // - Client manages offset tracking - // - Each fetch is independent - // - Broker reads from LogBuffer without maintaining state - // - Natural support for concurrent requests - glog.V(4).Infof("[FETCH-STATELESS] Fetching records for topic=%s partition=%d fromOffset=%d maxRecords=%d", topic, partition, fromOffset, maxRecords) - - // Use the new FetchMessage RPC (Kafka-style stateless) - seaweedRecords, err := brokerClient.FetchMessagesStateless(ctx, topic, partition, fromOffset, maxRecords, consumerGroup, consumerID) - if err != nil { - glog.Errorf("[FETCH-STATELESS] Failed to fetch records: %v", err) - return nil, fmt.Errorf("failed to fetch records: %v", err) - } - - glog.V(4).Infof("[FETCH-STATELESS] Fetched %d records", len(seaweedRecords)) - // - // STATELESS FETCH BENEFITS: - // - No broker-side session state = no state synchronization bugs - // - No Subscribe loops = no concurrent access to LogBuffer - // - No stream corruption = no cancel/restart issues - // - Natural concurrent access = like Kafka file reads - // - Simple architecture = easier to maintain and debug - // - // EXPECTED RESULTS: - // - <1% message loss (only from consumer rebalancing) - // - No duplicates (no stream corruption) - // - Low latency (direct LogBuffer reads) - // - No context timeouts (no stream initialization overhead) - - // Convert SeaweedMQ records to SMQRecord interface with proper Kafka offsets - smqRecords := make([]SMQRecord, 0, len(seaweedRecords)) - for i, seaweedRecord := range seaweedRecords { - // CRITICAL FIX: Use the actual offset from SeaweedMQ - // The SeaweedRecord.Offset field now contains the correct offset from the subscriber - kafkaOffset := seaweedRecord.Offset - - // CRITICAL: Skip records before the requested offset - // This can happen when the subscriber cache returns old data - if kafkaOffset < fromOffset { - glog.V(4).Infof("[FETCH] Skipping record %d with offset %d (requested fromOffset=%d)", i, kafkaOffset, fromOffset) - continue - } - - smqRecord := &SeaweedSMQRecord{ - key: seaweedRecord.Key, - value: seaweedRecord.Value, - timestamp: seaweedRecord.Timestamp, - offset: kafkaOffset, - } - smqRecords = append(smqRecords, smqRecord) - - glog.V(4).Infof("[FETCH] Record %d: offset=%d, keyLen=%d, valueLen=%d", i, kafkaOffset, len(seaweedRecord.Key), len(seaweedRecord.Value)) - } - - glog.V(4).Infof("[FETCH] Successfully read %d records from SMQ", len(smqRecords)) - return smqRecords, nil -} - -// GetEarliestOffset returns the earliest available offset for a topic partition -// ALWAYS queries SMQ broker directly - no ledger involved -func (h *SeaweedMQHandler) GetEarliestOffset(topic string, partition int32) (int64, error) { - - // Check if topic exists - if !h.TopicExists(topic) { - return 0, nil // Empty topic starts at offset 0 - } - - // ALWAYS query SMQ broker directly for earliest offset - if h.brokerClient != nil { - earliestOffset, err := h.brokerClient.GetEarliestOffset(topic, partition) - if err != nil { - return 0, err - } - return earliestOffset, nil - } - - // No broker client - this shouldn't happen in production - return 0, fmt.Errorf("broker client not available") -} - -// GetLatestOffset returns the latest available offset for a topic partition -// ALWAYS queries SMQ broker directly - no ledger involved -func (h *SeaweedMQHandler) GetLatestOffset(topic string, partition int32) (int64, error) { - // Check if topic exists - if !h.TopicExists(topic) { - return 0, nil // Empty topic - } - - // Check cache first - cacheKey := fmt.Sprintf("%s:%d", topic, partition) - h.hwmCacheMu.RLock() - if entry, exists := h.hwmCache[cacheKey]; exists { - if time.Now().Before(entry.expiresAt) { - // Cache hit - return cached value - h.hwmCacheMu.RUnlock() - glog.V(2).Infof("[HWM] Cache HIT for %s: hwm=%d", cacheKey, entry.value) - return entry.value, nil - } - } - h.hwmCacheMu.RUnlock() - - // Cache miss or expired - query SMQ broker - if h.brokerClient != nil { - glog.V(2).Infof("[HWM] Cache MISS for %s, querying broker...", cacheKey) - latestOffset, err := h.brokerClient.GetHighWaterMark(topic, partition) - if err != nil { - glog.V(1).Infof("[HWM] ERROR querying broker for %s: %v", cacheKey, err) - return 0, err - } - - glog.V(2).Infof("[HWM] Broker returned hwm=%d for %s", latestOffset, cacheKey) - - // Update cache - h.hwmCacheMu.Lock() - h.hwmCache[cacheKey] = &hwmCacheEntry{ - value: latestOffset, - expiresAt: time.Now().Add(h.hwmCacheTTL), - } - h.hwmCacheMu.Unlock() - - return latestOffset, nil - } - - // No broker client - this shouldn't happen in production - return 0, fmt.Errorf("broker client not available") -} - -// WithFilerClient executes a function with a filer client -func (h *SeaweedMQHandler) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - if h.brokerClient == nil { - return fmt.Errorf("no broker client available") - } - return h.brokerClient.WithFilerClient(streamingMode, fn) -} - -// GetFilerAddress returns the filer address used by this handler -func (h *SeaweedMQHandler) GetFilerAddress() string { - if h.brokerClient != nil { - return h.brokerClient.GetFilerAddress() - } - return "" -} - -// ProduceRecord publishes a record to SeaweedMQ and lets SMQ generate the offset -// ctx controls the publish timeout - if client cancels, broker operation is cancelled -func (h *SeaweedMQHandler) ProduceRecord(ctx context.Context, topic string, partition int32, key []byte, value []byte) (int64, error) { - if len(key) > 0 { - } - if len(value) > 0 { - } else { - } - - // Verify topic exists - if !h.TopicExists(topic) { - return 0, fmt.Errorf("topic %s does not exist", topic) - } - - // Get current timestamp - timestamp := time.Now().UnixNano() - - // Publish to SeaweedMQ and let SMQ generate the offset - var smqOffset int64 - var publishErr error - if h.brokerClient == nil { - publishErr = fmt.Errorf("no broker client available") - } else { - smqOffset, publishErr = h.brokerClient.PublishRecord(ctx, topic, partition, key, value, timestamp) - } - - if publishErr != nil { - return 0, fmt.Errorf("failed to publish to SeaweedMQ: %v", publishErr) - } - - // SMQ should have generated and returned the offset - use it directly as the Kafka offset - - // Invalidate HWM cache for this partition to ensure fresh reads - // This is critical for read-your-own-write scenarios (e.g., Schema Registry) - cacheKey := fmt.Sprintf("%s:%d", topic, partition) - h.hwmCacheMu.Lock() - delete(h.hwmCache, cacheKey) - h.hwmCacheMu.Unlock() - - return smqOffset, nil -} - -// ProduceRecordValue produces a record using RecordValue format to SeaweedMQ -// ALWAYS uses broker's assigned offset - no ledger involved -// ctx controls the publish timeout - if client cancels, broker operation is cancelled -func (h *SeaweedMQHandler) ProduceRecordValue(ctx context.Context, topic string, partition int32, key []byte, recordValueBytes []byte) (int64, error) { - // Verify topic exists - if !h.TopicExists(topic) { - return 0, fmt.Errorf("topic %s does not exist", topic) - } - - // Get current timestamp - timestamp := time.Now().UnixNano() - - // Publish RecordValue to SeaweedMQ and get the broker-assigned offset - var smqOffset int64 - var publishErr error - if h.brokerClient == nil { - publishErr = fmt.Errorf("no broker client available") - } else { - smqOffset, publishErr = h.brokerClient.PublishRecordValue(ctx, topic, partition, key, recordValueBytes, timestamp) - } - - if publishErr != nil { - return 0, fmt.Errorf("failed to publish RecordValue to SeaweedMQ: %v", publishErr) - } - - // SMQ broker has assigned the offset - use it directly as the Kafka offset - - // Invalidate HWM cache for this partition to ensure fresh reads - // This is critical for read-your-own-write scenarios (e.g., Schema Registry) - cacheKey := fmt.Sprintf("%s:%d", topic, partition) - h.hwmCacheMu.Lock() - delete(h.hwmCache, cacheKey) - h.hwmCacheMu.Unlock() - - return smqOffset, nil -} - -// Ledger methods removed - SMQ broker handles all offset management directly - -// FetchRecords DEPRECATED - only used in old tests -func (h *SeaweedMQHandler) FetchRecords(topic string, partition int32, fetchOffset int64, maxBytes int32) ([]byte, error) { - // Verify topic exists - if !h.TopicExists(topic) { - return nil, fmt.Errorf("topic %s does not exist", topic) - } - - // DEPRECATED: This function only used in old tests - // Get HWM directly from broker - highWaterMark, err := h.GetLatestOffset(topic, partition) - if err != nil { - return nil, err - } - - // If fetch offset is at or beyond high water mark, no records to return - if fetchOffset >= highWaterMark { - return []byte{}, nil - } - - // Get or create subscriber session for this topic/partition - var seaweedRecords []*SeaweedRecord - - // Calculate how many records to fetch - recordsToFetch := int(highWaterMark - fetchOffset) - if recordsToFetch > 100 { - recordsToFetch = 100 // Limit batch size - } - - // Read records using broker client - if h.brokerClient == nil { - return nil, fmt.Errorf("no broker client available") - } - // Use default consumer group/ID since this is a deprecated function - brokerSubscriber, subErr := h.brokerClient.GetOrCreateSubscriber(topic, partition, fetchOffset, "deprecated-consumer-group", "deprecated-consumer") - if subErr != nil { - return nil, fmt.Errorf("failed to get broker subscriber: %v", subErr) - } - // Use ReadRecordsFromOffset which handles caching and proper locking - seaweedRecords, err = h.brokerClient.ReadRecordsFromOffset(context.Background(), brokerSubscriber, fetchOffset, recordsToFetch) - - if err != nil { - // If no records available, return empty batch instead of error - return []byte{}, nil - } - - // Map SeaweedMQ records to Kafka offsets and update ledger - kafkaRecords, err := h.mapSeaweedToKafkaOffsets(topic, partition, seaweedRecords, fetchOffset) - if err != nil { - return nil, fmt.Errorf("failed to map offsets: %v", err) - } - - // Convert mapped records to Kafka record batch format - return h.convertSeaweedToKafkaRecordBatch(kafkaRecords, fetchOffset, maxBytes) -} - -// mapSeaweedToKafkaOffsets maps SeaweedMQ records to proper Kafka offsets -func (h *SeaweedMQHandler) mapSeaweedToKafkaOffsets(topic string, partition int32, seaweedRecords []*SeaweedRecord, startOffset int64) ([]*SeaweedRecord, error) { - if len(seaweedRecords) == 0 { - return seaweedRecords, nil - } - - // DEPRECATED: This function only used in old tests - // Just map offsets sequentially - mappedRecords := make([]*SeaweedRecord, 0, len(seaweedRecords)) - - for i, seaweedRecord := range seaweedRecords { - currentKafkaOffset := startOffset + int64(i) - - // Create a copy of the record with proper Kafka offset assignment - mappedRecord := &SeaweedRecord{ - Key: seaweedRecord.Key, - Value: seaweedRecord.Value, - Timestamp: seaweedRecord.Timestamp, - Offset: currentKafkaOffset, - } - - // Just skip any error handling since this is deprecated - { - // Log warning but continue processing - } - - mappedRecords = append(mappedRecords, mappedRecord) - } - - return mappedRecords, nil -} - -// convertSeaweedToKafkaRecordBatch converts SeaweedMQ records to Kafka record batch format -func (h *SeaweedMQHandler) convertSeaweedToKafkaRecordBatch(seaweedRecords []*SeaweedRecord, fetchOffset int64, maxBytes int32) ([]byte, error) { - if len(seaweedRecords) == 0 { - return []byte{}, nil - } - - batch := make([]byte, 0, 512) - - // Record batch header - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(fetchOffset)) - batch = append(batch, baseOffsetBytes...) // base offset - - // Batch length (placeholder, will be filled at end) - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - batch = append(batch, 0, 0, 0, 0) // partition leader epoch - batch = append(batch, 2) // magic byte (version 2) - - // CRC placeholder - batch = append(batch, 0, 0, 0, 0) - - // Batch attributes - batch = append(batch, 0, 0) - - // Last offset delta - lastOffsetDelta := uint32(len(seaweedRecords) - 1) - lastOffsetDeltaBytes := make([]byte, 4) - binary.BigEndian.PutUint32(lastOffsetDeltaBytes, lastOffsetDelta) - batch = append(batch, lastOffsetDeltaBytes...) - - // Timestamps - use actual timestamps from SeaweedMQ records - var firstTimestamp, maxTimestamp int64 - if len(seaweedRecords) > 0 { - firstTimestamp = seaweedRecords[0].Timestamp - maxTimestamp = firstTimestamp - for _, record := range seaweedRecords { - if record.Timestamp > maxTimestamp { - maxTimestamp = record.Timestamp - } - } - } - - firstTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(firstTimestampBytes, uint64(firstTimestamp)) - batch = append(batch, firstTimestampBytes...) - - maxTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxTimestampBytes, uint64(maxTimestamp)) - batch = append(batch, maxTimestampBytes...) - - // Producer info (simplified) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) // producer ID (-1) - batch = append(batch, 0xFF, 0xFF) // producer epoch (-1) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) // base sequence (-1) - - // Record count - recordCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(recordCountBytes, uint32(len(seaweedRecords))) - batch = append(batch, recordCountBytes...) - - // Add actual records from SeaweedMQ - for i, seaweedRecord := range seaweedRecords { - record := h.convertSingleSeaweedRecord(seaweedRecord, int64(i), fetchOffset) - recordLength := byte(len(record)) - batch = append(batch, recordLength) - batch = append(batch, record...) - - // Check if we're approaching maxBytes limit - if int32(len(batch)) > maxBytes*3/4 { - // Leave room for remaining headers and stop adding records - break - } - } - - // Fill in the batch length - batchLength := uint32(len(batch) - batchLengthPos - 4) - binary.BigEndian.PutUint32(batch[batchLengthPos:batchLengthPos+4], batchLength) - - return batch, nil -} - -// convertSingleSeaweedRecord converts a single SeaweedMQ record to Kafka format -func (h *SeaweedMQHandler) convertSingleSeaweedRecord(seaweedRecord *SeaweedRecord, index, baseOffset int64) []byte { - record := make([]byte, 0, 64) - - // Record attributes - record = append(record, 0) - - // Timestamp delta (varint - simplified) - timestampDelta := seaweedRecord.Timestamp - baseOffset // Simple delta calculation - if timestampDelta < 0 { - timestampDelta = 0 - } - record = append(record, byte(timestampDelta&0xFF)) // Simplified varint encoding - - // Offset delta (varint - simplified) - record = append(record, byte(index)) - - // Key length and key - if len(seaweedRecord.Key) > 0 { - record = append(record, byte(len(seaweedRecord.Key))) - record = append(record, seaweedRecord.Key...) - } else { - // Null key - record = append(record, 0xFF) - } - - // Value length and value - if len(seaweedRecord.Value) > 0 { - record = append(record, byte(len(seaweedRecord.Value))) - record = append(record, seaweedRecord.Value...) - } else { - // Empty value - record = append(record, 0) - } - - // Headers count (0) - record = append(record, 0) - - return record -} diff --git a/weed/mq/kafka/integration/seaweedmq_handler_test.go b/weed/mq/kafka/integration/seaweedmq_handler_test.go deleted file mode 100644 index d16d8e10f..000000000 --- a/weed/mq/kafka/integration/seaweedmq_handler_test.go +++ /dev/null @@ -1,512 +0,0 @@ -package integration - -import ( - "context" - "testing" - "time" -) - -// Unit tests for new FetchRecords functionality - -// TestSeaweedMQHandler_MapSeaweedToKafkaOffsets tests offset mapping logic -func TestSeaweedMQHandler_MapSeaweedToKafkaOffsets(t *testing.T) { - // Note: This test is now obsolete since the ledger system has been removed - // SMQ now uses native offsets directly, so no mapping is needed - t.Skip("Test obsolete: ledger system removed, SMQ uses native offsets") -} - -// TestSeaweedMQHandler_MapSeaweedToKafkaOffsets_EmptyRecords tests empty record handling -func TestSeaweedMQHandler_MapSeaweedToKafkaOffsets_EmptyRecords(t *testing.T) { - // Note: This test is now obsolete since the ledger system has been removed - t.Skip("Test obsolete: ledger system removed, SMQ uses native offsets") -} - -// TestSeaweedMQHandler_ConvertSeaweedToKafkaRecordBatch tests record batch conversion -func TestSeaweedMQHandler_ConvertSeaweedToKafkaRecordBatch(t *testing.T) { - handler := &SeaweedMQHandler{} - - // Create sample records - seaweedRecords := []*SeaweedRecord{ - { - Key: []byte("batch-key1"), - Value: []byte("batch-value1"), - Timestamp: 1000000000, - Offset: 0, - }, - { - Key: []byte("batch-key2"), - Value: []byte("batch-value2"), - Timestamp: 1000000001, - Offset: 1, - }, - } - - fetchOffset := int64(0) - maxBytes := int32(1024) - - // Test conversion - batchData, err := handler.convertSeaweedToKafkaRecordBatch(seaweedRecords, fetchOffset, maxBytes) - if err != nil { - t.Fatalf("Failed to convert to record batch: %v", err) - } - - if len(batchData) == 0 { - t.Errorf("Record batch should not be empty") - } - - // Basic validation of record batch structure - if len(batchData) < 61 { // Minimum Kafka record batch header size - t.Errorf("Record batch too small: got %d bytes", len(batchData)) - } - - // Verify magic byte (should be 2 for version 2) - magicByte := batchData[16] // Magic byte is at offset 16 - if magicByte != 2 { - t.Errorf("Invalid magic byte: got %d, want 2", magicByte) - } - - t.Logf("Successfully converted %d records to %d byte batch", len(seaweedRecords), len(batchData)) -} - -// TestSeaweedMQHandler_ConvertSeaweedToKafkaRecordBatch_EmptyRecords tests empty batch handling -func TestSeaweedMQHandler_ConvertSeaweedToKafkaRecordBatch_EmptyRecords(t *testing.T) { - handler := &SeaweedMQHandler{} - - batchData, err := handler.convertSeaweedToKafkaRecordBatch([]*SeaweedRecord{}, 0, 1024) - if err != nil { - t.Errorf("Converting empty records should not fail: %v", err) - } - - if len(batchData) != 0 { - t.Errorf("Empty record batch should be empty, got %d bytes", len(batchData)) - } -} - -// TestSeaweedMQHandler_ConvertSingleSeaweedRecord tests individual record conversion -func TestSeaweedMQHandler_ConvertSingleSeaweedRecord(t *testing.T) { - handler := &SeaweedMQHandler{} - - testCases := []struct { - name string - record *SeaweedRecord - index int64 - base int64 - }{ - { - name: "Record with key and value", - record: &SeaweedRecord{ - Key: []byte("test-key"), - Value: []byte("test-value"), - Timestamp: 1000000000, - Offset: 5, - }, - index: 0, - base: 5, - }, - { - name: "Record with null key", - record: &SeaweedRecord{ - Key: nil, - Value: []byte("test-value-no-key"), - Timestamp: 1000000001, - Offset: 6, - }, - index: 1, - base: 5, - }, - { - name: "Record with empty value", - record: &SeaweedRecord{ - Key: []byte("test-key-empty-value"), - Value: []byte{}, - Timestamp: 1000000002, - Offset: 7, - }, - index: 2, - base: 5, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - recordData := handler.convertSingleSeaweedRecord(tc.record, tc.index, tc.base) - - if len(recordData) == 0 { - t.Errorf("Record data should not be empty") - } - - // Basic validation - should have at least attributes, timestamp delta, offset delta, key length, value length, headers count - if len(recordData) < 6 { - t.Errorf("Record data too small: got %d bytes", len(recordData)) - } - - // Verify record structure - pos := 0 - - // Attributes (1 byte) - if recordData[pos] != 0 { - t.Errorf("Expected attributes to be 0, got %d", recordData[pos]) - } - pos++ - - // Timestamp delta (1 byte simplified) - pos++ - - // Offset delta (1 byte simplified) - if recordData[pos] != byte(tc.index) { - t.Errorf("Expected offset delta %d, got %d", tc.index, recordData[pos]) - } - pos++ - - t.Logf("Successfully converted single record: %d bytes", len(recordData)) - }) - } -} - -// Integration tests - -// TestSeaweedMQHandler_Creation tests handler creation and shutdown -func TestSeaweedMQHandler_Creation(t *testing.T) { - // Skip if no real broker available - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - // Test basic operations - topics := handler.ListTopics() - if topics == nil { - t.Errorf("ListTopics returned nil") - } - - t.Logf("SeaweedMQ handler created successfully, found %d existing topics", len(topics)) -} - -// TestSeaweedMQHandler_TopicLifecycle tests topic creation and deletion -func TestSeaweedMQHandler_TopicLifecycle(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - topicName := "lifecycle-test-topic" - - // Initially should not exist - if handler.TopicExists(topicName) { - t.Errorf("Topic %s should not exist initially", topicName) - } - - // Create the topic - err = handler.CreateTopic(topicName, 1) - if err != nil { - t.Fatalf("Failed to create topic: %v", err) - } - - // Now should exist - if !handler.TopicExists(topicName) { - t.Errorf("Topic %s should exist after creation", topicName) - } - - // Get topic info - info, exists := handler.GetTopicInfo(topicName) - if !exists { - t.Errorf("Topic info should exist") - } - - if info.Name != topicName { - t.Errorf("Topic name mismatch: got %s, want %s", info.Name, topicName) - } - - if info.Partitions != 1 { - t.Errorf("Partition count mismatch: got %d, want 1", info.Partitions) - } - - // Try to create again (should fail) - err = handler.CreateTopic(topicName, 1) - if err == nil { - t.Errorf("Creating existing topic should fail") - } - - // Delete the topic - err = handler.DeleteTopic(topicName) - if err != nil { - t.Fatalf("Failed to delete topic: %v", err) - } - - // Should no longer exist - if handler.TopicExists(topicName) { - t.Errorf("Topic %s should not exist after deletion", topicName) - } - - t.Logf("Topic lifecycle test completed successfully") -} - -// TestSeaweedMQHandler_ProduceRecord tests message production -func TestSeaweedMQHandler_ProduceRecord(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - topicName := "produce-test-topic" - - // Create topic - err = handler.CreateTopic(topicName, 1) - if err != nil { - t.Fatalf("Failed to create topic: %v", err) - } - defer handler.DeleteTopic(topicName) - - // Produce a record - key := []byte("produce-key") - value := []byte("produce-value") - - offset, err := handler.ProduceRecord(context.Background(), topicName, 0, key, value) - if err != nil { - t.Fatalf("Failed to produce record: %v", err) - } - - if offset < 0 { - t.Errorf("Invalid offset: %d", offset) - } - - // Check high water mark from broker (ledgers removed - broker handles offset management) - hwm, err := handler.GetLatestOffset(topicName, 0) - if err != nil { - t.Errorf("Failed to get high water mark: %v", err) - } - - if hwm != offset+1 { - t.Errorf("High water mark mismatch: got %d, want %d", hwm, offset+1) - } - - t.Logf("Produced record at offset %d, HWM: %d", offset, hwm) -} - -// TestSeaweedMQHandler_MultiplePartitions tests multiple partition handling -func TestSeaweedMQHandler_MultiplePartitions(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - topicName := "multi-partition-test-topic" - numPartitions := int32(3) - - // Create topic with multiple partitions - err = handler.CreateTopic(topicName, numPartitions) - if err != nil { - t.Fatalf("Failed to create topic: %v", err) - } - defer handler.DeleteTopic(topicName) - - // Produce to different partitions - for partitionID := int32(0); partitionID < numPartitions; partitionID++ { - key := []byte("partition-key") - value := []byte("partition-value") - - offset, err := handler.ProduceRecord(context.Background(), topicName, partitionID, key, value) - if err != nil { - t.Fatalf("Failed to produce to partition %d: %v", partitionID, err) - } - - // Verify offset from broker (ledgers removed - broker handles offset management) - hwm, err := handler.GetLatestOffset(topicName, partitionID) - if err != nil { - t.Errorf("Failed to get high water mark for partition %d: %v", partitionID, err) - } else if hwm <= offset { - t.Errorf("High water mark should be greater than produced offset for partition %d: hwm=%d, offset=%d", partitionID, hwm, offset) - } - - t.Logf("Partition %d: produced at offset %d", partitionID, offset) - } - - t.Logf("Multi-partition test completed successfully") -} - -// TestSeaweedMQHandler_FetchRecords tests record fetching with real SeaweedMQ data -func TestSeaweedMQHandler_FetchRecords(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - topicName := "fetch-test-topic" - - // Create topic - err = handler.CreateTopic(topicName, 1) - if err != nil { - t.Fatalf("Failed to create topic: %v", err) - } - defer handler.DeleteTopic(topicName) - - // Produce some test records with known data - testRecords := []struct { - key string - value string - }{ - {"fetch-key-1", "fetch-value-1"}, - {"fetch-key-2", "fetch-value-2"}, - {"fetch-key-3", "fetch-value-3"}, - } - - var producedOffsets []int64 - for i, record := range testRecords { - offset, err := handler.ProduceRecord(context.Background(), topicName, 0, []byte(record.key), []byte(record.value)) - if err != nil { - t.Fatalf("Failed to produce record %d: %v", i, err) - } - producedOffsets = append(producedOffsets, offset) - t.Logf("Produced record %d at offset %d: key=%s, value=%s", i, offset, record.key, record.value) - } - - // Wait a bit for records to be available in SeaweedMQ - time.Sleep(500 * time.Millisecond) - - // Test fetching from beginning - fetchedBatch, err := handler.FetchRecords(topicName, 0, 0, 2048) - if err != nil { - t.Fatalf("Failed to fetch records: %v", err) - } - - if len(fetchedBatch) == 0 { - t.Errorf("No record data fetched - this indicates the FetchRecords implementation is not working properly") - } else { - t.Logf("Successfully fetched %d bytes of real record batch data", len(fetchedBatch)) - - // Basic validation of Kafka record batch format - if len(fetchedBatch) >= 61 { // Minimum Kafka record batch size - // Check magic byte (at offset 16) - magicByte := fetchedBatch[16] - if magicByte == 2 { - t.Logf("โœ“ Valid Kafka record batch format detected (magic byte = 2)") - } else { - t.Errorf("Invalid Kafka record batch magic byte: got %d, want 2", magicByte) - } - } else { - t.Errorf("Fetched batch too small to be valid Kafka record batch: %d bytes", len(fetchedBatch)) - } - } - - // Test fetching from specific offset - if len(producedOffsets) > 1 { - partialBatch, err := handler.FetchRecords(topicName, 0, producedOffsets[1], 1024) - if err != nil { - t.Fatalf("Failed to fetch from specific offset: %v", err) - } - t.Logf("Fetched %d bytes starting from offset %d", len(partialBatch), producedOffsets[1]) - } - - // Test fetching beyond high water mark (ledgers removed - use broker offset management) - hwm, err := handler.GetLatestOffset(topicName, 0) - if err != nil { - t.Fatalf("Failed to get high water mark: %v", err) - } - - emptyBatch, err := handler.FetchRecords(topicName, 0, hwm, 1024) - if err != nil { - t.Fatalf("Failed to fetch from HWM: %v", err) - } - - if len(emptyBatch) != 0 { - t.Errorf("Should get empty batch beyond HWM, got %d bytes", len(emptyBatch)) - } - - t.Logf("โœ“ Real data fetch test completed successfully - FetchRecords is now working with actual SeaweedMQ data!") -} - -// TestSeaweedMQHandler_FetchRecords_ErrorHandling tests error cases for fetching -func TestSeaweedMQHandler_FetchRecords_ErrorHandling(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - // Test fetching from non-existent topic - _, err = handler.FetchRecords("non-existent-topic", 0, 0, 1024) - if err == nil { - t.Errorf("Fetching from non-existent topic should fail") - } - - // Create topic for partition tests - topicName := "fetch-error-test-topic" - err = handler.CreateTopic(topicName, 1) - if err != nil { - t.Fatalf("Failed to create topic: %v", err) - } - defer handler.DeleteTopic(topicName) - - // Test fetching from non-existent partition (partition 1 when only 0 exists) - batch, err := handler.FetchRecords(topicName, 1, 0, 1024) - // This may or may not fail depending on implementation, but should return empty batch - if err != nil { - t.Logf("Expected behavior: fetching from non-existent partition failed: %v", err) - } else if len(batch) > 0 { - t.Errorf("Fetching from non-existent partition should return empty batch, got %d bytes", len(batch)) - } - - // Test with very small maxBytes - _, err = handler.ProduceRecord(context.Background(), topicName, 0, []byte("key"), []byte("value")) - if err != nil { - t.Fatalf("Failed to produce test record: %v", err) - } - - time.Sleep(100 * time.Millisecond) - - smallBatch, err := handler.FetchRecords(topicName, 0, 0, 1) // Very small maxBytes - if err != nil { - t.Errorf("Fetching with small maxBytes should not fail: %v", err) - } - t.Logf("Fetch with maxBytes=1 returned %d bytes", len(smallBatch)) - - t.Logf("Error handling test completed successfully") -} - -// TestSeaweedMQHandler_ErrorHandling tests error conditions -func TestSeaweedMQHandler_ErrorHandling(t *testing.T) { - t.Skip("Integration test requires real SeaweedMQ Broker - run manually with broker available") - - handler, err := NewSeaweedMQBrokerHandler("localhost:9333", "default", "localhost") - if err != nil { - t.Fatalf("Failed to create SeaweedMQ handler: %v", err) - } - defer handler.Close() - - // Try to produce to non-existent topic - _, err = handler.ProduceRecord(context.Background(), "non-existent-topic", 0, []byte("key"), []byte("value")) - if err == nil { - t.Errorf("Producing to non-existent topic should fail") - } - - // Try to fetch from non-existent topic - _, err = handler.FetchRecords("non-existent-topic", 0, 0, 1024) - if err == nil { - t.Errorf("Fetching from non-existent topic should fail") - } - - // Try to delete non-existent topic - err = handler.DeleteTopic("non-existent-topic") - if err == nil { - t.Errorf("Deleting non-existent topic should fail") - } - - t.Logf("Error handling test completed successfully") -} diff --git a/weed/mq/kafka/integration/seaweedmq_handler_topics.go b/weed/mq/kafka/integration/seaweedmq_handler_topics.go deleted file mode 100644 index b635b40af..000000000 --- a/weed/mq/kafka/integration/seaweedmq_handler_topics.go +++ /dev/null @@ -1,315 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// CreateTopic creates a new topic in both Kafka registry and SeaweedMQ -func (h *SeaweedMQHandler) CreateTopic(name string, partitions int32) error { - return h.CreateTopicWithSchema(name, partitions, nil) -} - -// CreateTopicWithSchema creates a topic with optional value schema -func (h *SeaweedMQHandler) CreateTopicWithSchema(name string, partitions int32, recordType *schema_pb.RecordType) error { - return h.CreateTopicWithSchemas(name, partitions, nil, recordType) -} - -// CreateTopicWithSchemas creates a topic with optional key and value schemas -func (h *SeaweedMQHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error { - // Check if topic already exists in filer - if h.checkTopicInFiler(name) { - return fmt.Errorf("topic %s already exists", name) - } - - // Create SeaweedMQ topic reference - seaweedTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: name, - } - - // Configure topic with SeaweedMQ broker via gRPC - if len(h.brokerAddresses) > 0 { - brokerAddress := h.brokerAddresses[0] // Use first available broker - glog.V(1).Infof("Configuring topic %s with broker %s", name, brokerAddress) - - // Load security configuration for broker connection - util.LoadSecurityConfiguration() - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq") - - err := pb.WithBrokerGrpcClient(false, brokerAddress, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - // Convert dual schemas to flat schema format - var flatSchema *schema_pb.RecordType - var keyColumns []string - if keyRecordType != nil || valueRecordType != nil { - flatSchema, keyColumns = schema.CombineFlatSchemaFromKeyValue(keyRecordType, valueRecordType) - } - - _, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{ - Topic: seaweedTopic, - PartitionCount: partitions, - MessageRecordType: flatSchema, - KeyColumns: keyColumns, - }) - if err != nil { - return fmt.Errorf("configure topic with broker: %w", err) - } - glog.V(1).Infof("successfully configured topic %s with broker", name) - return nil - }) - if err != nil { - return fmt.Errorf("failed to configure topic %s with broker %s: %w", name, brokerAddress, err) - } - } else { - glog.Warningf("No brokers available - creating topic %s in gateway memory only (testing mode)", name) - } - - // Topic is now stored in filer only via SeaweedMQ broker - // No need to create in-memory topic info structure - - // Offset management now handled directly by SMQ broker - no initialization needed - - // Invalidate cache after successful topic creation - h.InvalidateTopicExistsCache(name) - - glog.V(1).Infof("Topic %s created successfully with %d partitions", name, partitions) - return nil -} - -// CreateTopicWithRecordType creates a topic with flat schema and key columns -func (h *SeaweedMQHandler) CreateTopicWithRecordType(name string, partitions int32, flatSchema *schema_pb.RecordType, keyColumns []string) error { - // Check if topic already exists in filer - if h.checkTopicInFiler(name) { - return fmt.Errorf("topic %s already exists", name) - } - - // Create SeaweedMQ topic reference - seaweedTopic := &schema_pb.Topic{ - Namespace: "kafka", - Name: name, - } - - // Configure topic with SeaweedMQ broker via gRPC - if len(h.brokerAddresses) > 0 { - brokerAddress := h.brokerAddresses[0] // Use first available broker - glog.V(1).Infof("Configuring topic %s with broker %s", name, brokerAddress) - - // Load security configuration for broker connection - util.LoadSecurityConfiguration() - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq") - - err := pb.WithBrokerGrpcClient(false, brokerAddress, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - _, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{ - Topic: seaweedTopic, - PartitionCount: partitions, - MessageRecordType: flatSchema, - KeyColumns: keyColumns, - }) - if err != nil { - return fmt.Errorf("failed to configure topic: %w", err) - } - - glog.V(1).Infof("successfully configured topic %s with broker", name) - return nil - }) - - if err != nil { - return err - } - } else { - glog.Warningf("No broker addresses configured, topic %s not created in SeaweedMQ", name) - } - - // Topic is now stored in filer only via SeaweedMQ broker - // No need to create in-memory topic info structure - - glog.V(1).Infof("Topic %s created successfully with %d partitions using flat schema", name, partitions) - return nil -} - -// DeleteTopic removes a topic from both Kafka registry and SeaweedMQ -func (h *SeaweedMQHandler) DeleteTopic(name string) error { - // Check if topic exists in filer - if !h.checkTopicInFiler(name) { - return fmt.Errorf("topic %s does not exist", name) - } - - // Get topic info to determine partition count for cleanup - topicInfo, exists := h.GetTopicInfo(name) - if !exists { - return fmt.Errorf("topic %s info not found", name) - } - - // Close all publisher sessions for this topic - for partitionID := int32(0); partitionID < topicInfo.Partitions; partitionID++ { - if h.brokerClient != nil { - h.brokerClient.ClosePublisher(name, partitionID) - } - } - - // Topic removal from filer would be handled by SeaweedMQ broker - // No in-memory cache to clean up - - // Offset management handled by SMQ broker - no cleanup needed - - return nil -} - -// TopicExists checks if a topic exists in SeaweedMQ broker (includes in-memory topics) -// Uses a 5-second cache to reduce broker queries -func (h *SeaweedMQHandler) TopicExists(name string) bool { - // Check cache first - h.topicExistsCacheMu.RLock() - if entry, found := h.topicExistsCache[name]; found { - if time.Now().Before(entry.expiresAt) { - h.topicExistsCacheMu.RUnlock() - return entry.exists - } - } - h.topicExistsCacheMu.RUnlock() - - // Cache miss or expired - query broker - - var exists bool - // Check via SeaweedMQ broker (includes in-memory topics) - if h.brokerClient != nil { - var err error - exists, err = h.brokerClient.TopicExists(name) - if err != nil { - // Don't cache errors - return false - } - } else { - // Return false if broker is unavailable - return false - } - - // Update cache - h.topicExistsCacheMu.Lock() - h.topicExistsCache[name] = &topicExistsCacheEntry{ - exists: exists, - expiresAt: time.Now().Add(h.topicExistsCacheTTL), - } - h.topicExistsCacheMu.Unlock() - - return exists -} - -// InvalidateTopicExistsCache removes a topic from the existence cache -// Should be called after creating or deleting a topic -func (h *SeaweedMQHandler) InvalidateTopicExistsCache(name string) { - h.topicExistsCacheMu.Lock() - delete(h.topicExistsCache, name) - h.topicExistsCacheMu.Unlock() -} - -// GetTopicInfo returns information about a topic from broker -func (h *SeaweedMQHandler) GetTopicInfo(name string) (*KafkaTopicInfo, bool) { - // Get topic configuration from broker - if h.brokerClient != nil { - config, err := h.brokerClient.GetTopicConfiguration(name) - if err == nil && config != nil { - topicInfo := &KafkaTopicInfo{ - Name: name, - Partitions: config.PartitionCount, - CreatedAt: config.CreatedAtNs, - } - return topicInfo, true - } - glog.V(2).Infof("Failed to get topic configuration for %s from broker: %v", name, err) - } - - // Fallback: check if topic exists in filer (for backward compatibility) - if !h.checkTopicInFiler(name) { - return nil, false - } - - // Return default info if broker query failed but topic exists in filer - topicInfo := &KafkaTopicInfo{ - Name: name, - Partitions: 1, // Default to 1 partition if broker query failed - CreatedAt: 0, - } - - return topicInfo, true -} - -// ListTopics returns all topic names from SeaweedMQ broker (includes in-memory topics) -func (h *SeaweedMQHandler) ListTopics() []string { - // Get topics from SeaweedMQ broker (includes in-memory topics) - if h.brokerClient != nil { - topics, err := h.brokerClient.ListTopics() - if err == nil { - return topics - } - } - - // Return empty list if broker is unavailable - return []string{} -} - -// checkTopicInFiler checks if a topic exists in the filer -func (h *SeaweedMQHandler) checkTopicInFiler(topicName string) bool { - if h.filerClientAccessor == nil { - return false - } - - var exists bool - h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: "/topics/kafka", - Name: topicName, - } - - _, err := client.LookupDirectoryEntry(context.Background(), request) - exists = (err == nil) - return nil // Don't propagate error, just check existence - }) - - return exists -} - -// listTopicsFromFiler lists all topics from the filer -func (h *SeaweedMQHandler) listTopicsFromFiler() []string { - if h.filerClientAccessor == nil { - return []string{} - } - - var topics []string - - h.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.ListEntriesRequest{ - Directory: "/topics/kafka", - } - - stream, err := client.ListEntries(context.Background(), request) - if err != nil { - return nil // Don't propagate error, just return empty list - } - - for { - resp, err := stream.Recv() - if err != nil { - break // End of stream or error - } - - if resp.Entry != nil && resp.Entry.IsDirectory { - topics = append(topics, resp.Entry.Name) - } else if resp.Entry != nil { - } - } - return nil - }) - - return topics -} diff --git a/weed/mq/kafka/integration/seaweedmq_handler_utils.go b/weed/mq/kafka/integration/seaweedmq_handler_utils.go deleted file mode 100644 index 843b72280..000000000 --- a/weed/mq/kafka/integration/seaweedmq_handler_utils.go +++ /dev/null @@ -1,217 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/wdclient" -) - -// NewSeaweedMQBrokerHandler creates a new handler with SeaweedMQ broker integration -func NewSeaweedMQBrokerHandler(masters string, filerGroup string, clientHost string) (*SeaweedMQHandler, error) { - if masters == "" { - return nil, fmt.Errorf("masters required - SeaweedMQ infrastructure must be configured") - } - - // Parse master addresses using SeaweedFS utilities - masterServerAddresses := pb.ServerAddresses(masters).ToAddresses() - if len(masterServerAddresses) == 0 { - return nil, fmt.Errorf("no valid master addresses provided") - } - - // Load security configuration for gRPC connections - util.LoadSecurityConfiguration() - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq") - masterDiscovery := pb.ServerAddresses(masters).ToServiceDiscovery() - - // Use provided client host for proper gRPC connection - // This is critical for MasterClient to establish streaming connections - clientHostAddr := pb.ServerAddress(clientHost) - - masterClient := wdclient.NewMasterClient(grpcDialOption, filerGroup, "kafka-gateway", clientHostAddr, "", "", *masterDiscovery) - - glog.V(1).Infof("Created MasterClient with clientHost=%s, masters=%s", clientHost, masters) - - // Start KeepConnectedToMaster in background to maintain connection - glog.V(1).Infof("Starting KeepConnectedToMaster background goroutine...") - ctx, cancel := context.WithCancel(context.Background()) - go func() { - defer cancel() - masterClient.KeepConnectedToMaster(ctx) - }() - - // Give the connection a moment to establish - time.Sleep(2 * time.Second) - glog.V(1).Infof("Initial connection delay completed") - - // Discover brokers from masters using master client - glog.V(1).Infof("About to call discoverBrokersWithMasterClient...") - brokerAddresses, err := discoverBrokersWithMasterClient(masterClient, filerGroup) - if err != nil { - glog.Errorf("Broker discovery failed: %v", err) - return nil, fmt.Errorf("failed to discover brokers: %v", err) - } - glog.V(1).Infof("Broker discovery returned: %v", brokerAddresses) - - if len(brokerAddresses) == 0 { - return nil, fmt.Errorf("no brokers discovered from masters") - } - - // Discover filers from masters using master client - filerAddresses, err := discoverFilersWithMasterClient(masterClient, filerGroup) - if err != nil { - return nil, fmt.Errorf("failed to discover filers: %v", err) - } - - // Create shared filer client accessor for all components - sharedFilerAccessor := filer_client.NewFilerClientAccessor( - filerAddresses, - grpcDialOption, - ) - - // For now, use the first broker (can be enhanced later for load balancing) - brokerAddress := brokerAddresses[0] - - // Create broker client with shared filer accessor - brokerClient, err := NewBrokerClientWithFilerAccessor(brokerAddress, sharedFilerAccessor) - if err != nil { - return nil, fmt.Errorf("failed to create broker client: %v", err) - } - - // Test the connection - if err := brokerClient.HealthCheck(); err != nil { - brokerClient.Close() - return nil, fmt.Errorf("broker health check failed: %v", err) - } - - return &SeaweedMQHandler{ - filerClientAccessor: sharedFilerAccessor, - brokerClient: brokerClient, - masterClient: masterClient, - // topics map removed - always read from filer directly - // ledgers removed - SMQ broker handles all offset management - brokerAddresses: brokerAddresses, // Store all discovered broker addresses - hwmCache: make(map[string]*hwmCacheEntry), - hwmCacheTTL: 100 * time.Millisecond, // 100ms cache TTL for fresh HWM reads (critical for Schema Registry) - topicExistsCache: make(map[string]*topicExistsCacheEntry), - topicExistsCacheTTL: 5 * time.Second, // 5 second cache TTL for topic existence - }, nil -} - -// discoverBrokersWithMasterClient queries masters for available brokers using reusable master client -func discoverBrokersWithMasterClient(masterClient *wdclient.MasterClient, filerGroup string) ([]string, error) { - var brokers []string - - err := masterClient.WithClient(false, func(client master_pb.SeaweedClient) error { - glog.V(1).Infof("Inside MasterClient.WithClient callback - client obtained successfully") - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.BrokerType, - FilerGroup: filerGroup, - Limit: 1000, - }) - if err != nil { - return err - } - - glog.V(1).Infof("list cluster nodes successful - found %d cluster nodes", len(resp.ClusterNodes)) - - // Extract broker addresses from response - for _, node := range resp.ClusterNodes { - if node.Address != "" { - brokers = append(brokers, node.Address) - glog.V(1).Infof("discovered broker: %s", node.Address) - } - } - - return nil - }) - - if err != nil { - glog.Errorf("MasterClient.WithClient failed: %v", err) - } else { - glog.V(1).Infof("Broker discovery completed successfully - found %d brokers: %v", len(brokers), brokers) - } - - return brokers, err -} - -// discoverFilersWithMasterClient queries masters for available filers using reusable master client -func discoverFilersWithMasterClient(masterClient *wdclient.MasterClient, filerGroup string) ([]pb.ServerAddress, error) { - var filers []pb.ServerAddress - - err := masterClient.WithClient(false, func(client master_pb.SeaweedClient) error { - resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - FilerGroup: filerGroup, - Limit: 1000, - }) - if err != nil { - return err - } - - // Extract filer addresses from response - return as HTTP addresses (pb.ServerAddress) - for _, node := range resp.ClusterNodes { - if node.Address != "" { - // Return HTTP address as pb.ServerAddress (no pre-conversion to gRPC) - httpAddr := pb.ServerAddress(node.Address) - filers = append(filers, httpAddr) - } - } - - return nil - }) - - return filers, err -} - -// GetFilerClientAccessor returns the shared filer client accessor -func (h *SeaweedMQHandler) GetFilerClientAccessor() *filer_client.FilerClientAccessor { - return h.filerClientAccessor -} - -// SetProtocolHandler sets the protocol handler reference for accessing connection context -func (h *SeaweedMQHandler) SetProtocolHandler(handler ProtocolHandler) { - h.protocolHandler = handler -} - -// GetBrokerAddresses returns the discovered SMQ broker addresses -func (h *SeaweedMQHandler) GetBrokerAddresses() []string { - return h.brokerAddresses -} - -// Close shuts down the handler and all connections -func (h *SeaweedMQHandler) Close() error { - if h.brokerClient != nil { - return h.brokerClient.Close() - } - return nil -} - -// CreatePerConnectionBrokerClient creates a new BrokerClient instance for a specific connection -// CRITICAL: Each Kafka TCP connection gets its own BrokerClient to prevent gRPC stream interference -// This fixes the deadlock where CreateFreshSubscriber would block all connections -func (h *SeaweedMQHandler) CreatePerConnectionBrokerClient() (*BrokerClient, error) { - // Use the same broker addresses as the shared client - if len(h.brokerAddresses) == 0 { - return nil, fmt.Errorf("no broker addresses available") - } - - // Use the first broker address (in production, could use load balancing) - brokerAddress := h.brokerAddresses[0] - - // Create a new client with the shared filer accessor - client, err := NewBrokerClientWithFilerAccessor(brokerAddress, h.filerClientAccessor) - if err != nil { - return nil, fmt.Errorf("failed to create broker client: %w", err) - } - - return client, nil -} diff --git a/weed/mq/kafka/integration/test_helper.go b/weed/mq/kafka/integration/test_helper.go deleted file mode 100644 index 7d1a9fb0d..000000000 --- a/weed/mq/kafka/integration/test_helper.go +++ /dev/null @@ -1,62 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestSeaweedMQHandler wraps SeaweedMQHandler for testing -type TestSeaweedMQHandler struct { - handler *SeaweedMQHandler - t *testing.T -} - -// NewTestSeaweedMQHandler creates a new test handler with in-memory storage -func NewTestSeaweedMQHandler(t *testing.T) *TestSeaweedMQHandler { - // For now, return a stub implementation - // Full implementation will be added when needed - return &TestSeaweedMQHandler{ - handler: nil, - t: t, - } -} - -// ProduceMessage produces a message to a topic partition -func (h *TestSeaweedMQHandler) ProduceMessage(ctx context.Context, topic, partition string, record *schema_pb.RecordValue, key []byte) error { - // This will be implemented to use the handler's produce logic - // For now, return a placeholder - return fmt.Errorf("ProduceMessage not yet implemented") -} - -// CommitOffset commits an offset for a consumer group -func (h *TestSeaweedMQHandler) CommitOffset(ctx context.Context, consumerGroup string, topic string, partition int32, offset int64, metadata string) error { - // This will be implemented to use the handler's offset commit logic - return fmt.Errorf("CommitOffset not yet implemented") -} - -// FetchOffset fetches the committed offset for a consumer group -func (h *TestSeaweedMQHandler) FetchOffset(ctx context.Context, consumerGroup string, topic string, partition int32) (int64, string, error) { - // This will be implemented to use the handler's offset fetch logic - return -1, "", fmt.Errorf("FetchOffset not yet implemented") -} - -// FetchMessages fetches messages from a topic partition starting at an offset -func (h *TestSeaweedMQHandler) FetchMessages(ctx context.Context, topic string, partition int32, startOffset int64, maxBytes int32) ([]*Message, error) { - // This will be implemented to use the handler's fetch logic - return nil, fmt.Errorf("FetchMessages not yet implemented") -} - -// Cleanup cleans up test resources -func (h *TestSeaweedMQHandler) Cleanup() { - // Cleanup resources when implemented -} - -// Message represents a fetched message -type Message struct { - Offset int64 - Key []byte - Value []byte -} diff --git a/weed/mq/kafka/integration/types.go b/weed/mq/kafka/integration/types.go deleted file mode 100644 index d707045e6..000000000 --- a/weed/mq/kafka/integration/types.go +++ /dev/null @@ -1,240 +0,0 @@ -package integration - -import ( - "context" - "fmt" - "sync" - "time" - - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" -) - -// SMQRecord interface for records from SeaweedMQ -type SMQRecord interface { - GetKey() []byte - GetValue() []byte - GetTimestamp() int64 - GetOffset() int64 -} - -// hwmCacheEntry represents a cached high water mark value -type hwmCacheEntry struct { - value int64 - expiresAt time.Time -} - -// topicExistsCacheEntry represents a cached topic existence check -type topicExistsCacheEntry struct { - exists bool - expiresAt time.Time -} - -// SeaweedMQHandler integrates Kafka protocol handlers with real SeaweedMQ storage -type SeaweedMQHandler struct { - // Shared filer client accessor for all components - filerClientAccessor *filer_client.FilerClientAccessor - - brokerClient *BrokerClient // For broker-based connections - - // Master client for service discovery - masterClient *wdclient.MasterClient - - // Discovered broker addresses (for Metadata responses) - brokerAddresses []string - - // Reference to protocol handler for accessing connection context - protocolHandler ProtocolHandler - - // High water mark cache to reduce broker queries - hwmCache map[string]*hwmCacheEntry // key: "topic:partition" - hwmCacheMu sync.RWMutex - hwmCacheTTL time.Duration - - // Topic existence cache to reduce broker queries - topicExistsCache map[string]*topicExistsCacheEntry // key: "topic" - topicExistsCacheMu sync.RWMutex - topicExistsCacheTTL time.Duration -} - -// ConnectionContext holds connection-specific information for requests -// This is a local copy to avoid circular dependency with protocol package -type ConnectionContext struct { - ClientID string // Kafka client ID from request headers - ConsumerGroup string // Consumer group (set by JoinGroup) - MemberID string // Consumer group member ID (set by JoinGroup) - BrokerClient interface{} // Per-connection broker client (*BrokerClient) -} - -// ProtocolHandler interface for accessing Handler's connection context -type ProtocolHandler interface { - GetConnectionContext() *ConnectionContext -} - -// KafkaTopicInfo holds Kafka-specific topic information -type KafkaTopicInfo struct { - Name string - Partitions int32 - CreatedAt int64 - - // SeaweedMQ integration - SeaweedTopic *schema_pb.Topic -} - -// TopicPartitionKey uniquely identifies a topic partition -type TopicPartitionKey struct { - Topic string - Partition int32 -} - -// SeaweedRecord represents a record received from SeaweedMQ -type SeaweedRecord struct { - Key []byte - Value []byte - Timestamp int64 - Offset int64 -} - -// PartitionRangeInfo contains comprehensive range information for a partition -type PartitionRangeInfo struct { - // Offset range information - EarliestOffset int64 - LatestOffset int64 - HighWaterMark int64 - - // Timestamp range information - EarliestTimestampNs int64 - LatestTimestampNs int64 - - // Partition metadata - RecordCount int64 - ActiveSubscriptions int64 -} - -// SeaweedSMQRecord implements the SMQRecord interface for SeaweedMQ records -type SeaweedSMQRecord struct { - key []byte - value []byte - timestamp int64 - offset int64 -} - -// GetKey returns the record key -func (r *SeaweedSMQRecord) GetKey() []byte { - return r.key -} - -// GetValue returns the record value -func (r *SeaweedSMQRecord) GetValue() []byte { - return r.value -} - -// GetTimestamp returns the record timestamp -func (r *SeaweedSMQRecord) GetTimestamp() int64 { - return r.timestamp -} - -// GetOffset returns the Kafka offset for this record -func (r *SeaweedSMQRecord) GetOffset() int64 { - return r.offset -} - -// BrokerClient wraps the SeaweedMQ Broker gRPC client for Kafka gateway integration -// FetchRequest tracks an in-flight fetch request with multiple waiters -type FetchRequest struct { - topic string - partition int32 - offset int64 - resultChan chan FetchResult // Single channel for the fetch result - waiters []chan FetchResult // Multiple waiters can subscribe - mu sync.Mutex - inProgress bool -} - -// FetchResult contains the result of a fetch operation -type FetchResult struct { - records []*SeaweedRecord - err error -} - -// partitionAssignmentCacheEntry caches LookupTopicBrokers results -type partitionAssignmentCacheEntry struct { - assignments []*mq_pb.BrokerPartitionAssignment - expiresAt time.Time -} - -type BrokerClient struct { - // Reference to shared filer client accessor - filerClientAccessor *filer_client.FilerClientAccessor - - brokerAddress string - conn *grpc.ClientConn - client mq_pb.SeaweedMessagingClient - - // Publisher streams: topic-partition -> stream info - publishersLock sync.RWMutex - publishers map[string]*BrokerPublisherSession - - // Publisher creation locks to prevent concurrent creation attempts for the same topic-partition - publisherCreationLocks map[string]*sync.Mutex - - // Subscriber streams for offset tracking - subscribersLock sync.RWMutex - subscribers map[string]*BrokerSubscriberSession - - // Request deduplication for stateless fetches - fetchRequestsLock sync.Mutex - fetchRequests map[string]*FetchRequest - - // Partition assignment cache to reduce LookupTopicBrokers calls (13.5% CPU overhead!) - partitionAssignmentCache map[string]*partitionAssignmentCacheEntry // Key: topic name - partitionAssignmentCacheMu sync.RWMutex - partitionAssignmentCacheTTL time.Duration - - ctx context.Context - cancel context.CancelFunc -} - -// BrokerPublisherSession tracks a publishing stream to SeaweedMQ broker -type BrokerPublisherSession struct { - Topic string - Partition int32 - Stream mq_pb.SeaweedMessaging_PublishMessageClient - mu sync.Mutex // Protects Send/Recv pairs from concurrent access -} - -// BrokerSubscriberSession tracks a subscription stream for offset management -type BrokerSubscriberSession struct { - Topic string - Partition int32 - Stream mq_pb.SeaweedMessaging_SubscribeMessageClient - // Track the requested start offset used to initialize this stream - StartOffset int64 - // Consumer group identity for this session - ConsumerGroup string - ConsumerID string - // Context for canceling reads (used for timeout) - Ctx context.Context - Cancel context.CancelFunc - // Mutex to serialize all operations on this session - mu sync.Mutex - // Cache of consumed records to avoid re-reading from broker - consumedRecords []*SeaweedRecord - nextOffsetToRead int64 - // Track what has actually been READ from the stream (not what was requested) - // This is the HIGHEST offset that has been read from the stream - // Used to determine if we need to seek or can continue reading - lastReadOffset int64 - // Flag to indicate if this session has been initialized - initialized bool -} - -// Key generates a unique key for this subscriber session -// Includes consumer group and ID to prevent different consumers from sharing sessions -func (s *BrokerSubscriberSession) Key() string { - return fmt.Sprintf("%s-%d-%s-%s", s.Topic, s.Partition, s.ConsumerGroup, s.ConsumerID) -} diff --git a/weed/mq/kafka/package.go b/weed/mq/kafka/package.go deleted file mode 100644 index 01743a12b..000000000 --- a/weed/mq/kafka/package.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package kafka provides Kafka protocol implementation for SeaweedFS MQ -package kafka - -// This file exists to make the kafka package valid. -// The actual implementation is in the subdirectories: -// - integration/: SeaweedMQ integration layer -// - protocol/: Kafka protocol handlers -// - gateway/: Kafka Gateway server -// - offset/: Offset management -// - schema/: Schema registry integration -// - consumer/: Consumer group coordination - - diff --git a/weed/mq/kafka/partition_mapping.go b/weed/mq/kafka/partition_mapping.go deleted file mode 100644 index 697e67386..000000000 --- a/weed/mq/kafka/partition_mapping.go +++ /dev/null @@ -1,55 +0,0 @@ -package kafka - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// Convenience functions for partition mapping used by production code -// The full PartitionMapper implementation is in partition_mapping_test.go for testing - -// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range -func MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) { - // Use a range size that divides evenly into MaxPartitionCount (2520) - // Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72 - rangeSize := int32(35) - rangeStart = kafkaPartition * rangeSize - rangeStop = rangeStart + rangeSize - 1 - return rangeStart, rangeStop -} - -// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition -func CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition { - rangeStart, rangeStop := MapKafkaPartitionToSMQRange(kafkaPartition) - - return &schema_pb.Partition{ - RingSize: pub_balancer.MaxPartitionCount, - RangeStart: rangeStart, - RangeStop: rangeStop, - UnixTimeNs: unixTimeNs, - } -} - -// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range -func ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 { - rangeSize := int32(35) - return rangeStart / rangeSize -} - -// ValidateKafkaPartition validates that a Kafka partition is within supported range -func ValidateKafkaPartition(kafkaPartition int32) bool { - maxPartitions := int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions - return kafkaPartition >= 0 && kafkaPartition < maxPartitions -} - -// GetRangeSize returns the range size used for partition mapping -func GetRangeSize() int32 { - return 35 -} - -// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported -func GetMaxKafkaPartitions() int32 { - return int32(pub_balancer.MaxPartitionCount) / 35 // 72 partitions -} - - diff --git a/weed/mq/kafka/partition_mapping_test.go b/weed/mq/kafka/partition_mapping_test.go deleted file mode 100644 index 6f41a68d4..000000000 --- a/weed/mq/kafka/partition_mapping_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package kafka - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// PartitionMapper provides consistent Kafka partition to SeaweedMQ ring mapping -// NOTE: This is test-only code and not used in the actual Kafka Gateway implementation -type PartitionMapper struct{} - -// NewPartitionMapper creates a new partition mapper -func NewPartitionMapper() *PartitionMapper { - return &PartitionMapper{} -} - -// GetRangeSize returns the consistent range size for Kafka partition mapping -// This ensures all components use the same calculation -func (pm *PartitionMapper) GetRangeSize() int32 { - // Use a range size that divides evenly into MaxPartitionCount (2520) - // Range size 35 gives us exactly 72 Kafka partitions: 2520 / 35 = 72 - // This provides a good balance between partition granularity and ring utilization - return 35 -} - -// GetMaxKafkaPartitions returns the maximum number of Kafka partitions supported -func (pm *PartitionMapper) GetMaxKafkaPartitions() int32 { - // With range size 35, we can support: 2520 / 35 = 72 Kafka partitions - return int32(pub_balancer.MaxPartitionCount) / pm.GetRangeSize() -} - -// MapKafkaPartitionToSMQRange maps a Kafka partition to SeaweedMQ ring range -func (pm *PartitionMapper) MapKafkaPartitionToSMQRange(kafkaPartition int32) (rangeStart, rangeStop int32) { - rangeSize := pm.GetRangeSize() - rangeStart = kafkaPartition * rangeSize - rangeStop = rangeStart + rangeSize - 1 - return rangeStart, rangeStop -} - -// CreateSMQPartition creates a SeaweedMQ partition from a Kafka partition -func (pm *PartitionMapper) CreateSMQPartition(kafkaPartition int32, unixTimeNs int64) *schema_pb.Partition { - rangeStart, rangeStop := pm.MapKafkaPartitionToSMQRange(kafkaPartition) - - return &schema_pb.Partition{ - RingSize: pub_balancer.MaxPartitionCount, - RangeStart: rangeStart, - RangeStop: rangeStop, - UnixTimeNs: unixTimeNs, - } -} - -// ExtractKafkaPartitionFromSMQRange extracts the Kafka partition from SeaweedMQ range -func (pm *PartitionMapper) ExtractKafkaPartitionFromSMQRange(rangeStart int32) int32 { - rangeSize := pm.GetRangeSize() - return rangeStart / rangeSize -} - -// ValidateKafkaPartition validates that a Kafka partition is within supported range -func (pm *PartitionMapper) ValidateKafkaPartition(kafkaPartition int32) bool { - return kafkaPartition >= 0 && kafkaPartition < pm.GetMaxKafkaPartitions() -} - -// GetPartitionMappingInfo returns debug information about the partition mapping -func (pm *PartitionMapper) GetPartitionMappingInfo() map[string]interface{} { - return map[string]interface{}{ - "ring_size": pub_balancer.MaxPartitionCount, - "range_size": pm.GetRangeSize(), - "max_kafka_partitions": pm.GetMaxKafkaPartitions(), - "ring_utilization": float64(pm.GetMaxKafkaPartitions()*pm.GetRangeSize()) / float64(pub_balancer.MaxPartitionCount), - } -} - -// Global instance for consistent usage across the test codebase -var DefaultPartitionMapper = NewPartitionMapper() - -func TestPartitionMapper_GetRangeSize(t *testing.T) { - mapper := NewPartitionMapper() - rangeSize := mapper.GetRangeSize() - - if rangeSize != 35 { - t.Errorf("Expected range size 35, got %d", rangeSize) - } - - // Verify that the range size divides evenly into available partitions - maxPartitions := mapper.GetMaxKafkaPartitions() - totalUsed := maxPartitions * rangeSize - - if totalUsed > int32(pub_balancer.MaxPartitionCount) { - t.Errorf("Total used slots (%d) exceeds MaxPartitionCount (%d)", totalUsed, pub_balancer.MaxPartitionCount) - } - - t.Logf("Range size: %d, Max Kafka partitions: %d, Ring utilization: %.2f%%", - rangeSize, maxPartitions, float64(totalUsed)/float64(pub_balancer.MaxPartitionCount)*100) -} - -func TestPartitionMapper_MapKafkaPartitionToSMQRange(t *testing.T) { - mapper := NewPartitionMapper() - - tests := []struct { - kafkaPartition int32 - expectedStart int32 - expectedStop int32 - }{ - {0, 0, 34}, - {1, 35, 69}, - {2, 70, 104}, - {10, 350, 384}, - } - - for _, tt := range tests { - t.Run("", func(t *testing.T) { - start, stop := mapper.MapKafkaPartitionToSMQRange(tt.kafkaPartition) - - if start != tt.expectedStart { - t.Errorf("Kafka partition %d: expected start %d, got %d", tt.kafkaPartition, tt.expectedStart, start) - } - - if stop != tt.expectedStop { - t.Errorf("Kafka partition %d: expected stop %d, got %d", tt.kafkaPartition, tt.expectedStop, stop) - } - - // Verify range size is consistent - rangeSize := stop - start + 1 - if rangeSize != mapper.GetRangeSize() { - t.Errorf("Inconsistent range size: expected %d, got %d", mapper.GetRangeSize(), rangeSize) - } - }) - } -} - -func TestPartitionMapper_ExtractKafkaPartitionFromSMQRange(t *testing.T) { - mapper := NewPartitionMapper() - - tests := []struct { - rangeStart int32 - expectedKafka int32 - }{ - {0, 0}, - {35, 1}, - {70, 2}, - {350, 10}, - } - - for _, tt := range tests { - t.Run("", func(t *testing.T) { - kafkaPartition := mapper.ExtractKafkaPartitionFromSMQRange(tt.rangeStart) - - if kafkaPartition != tt.expectedKafka { - t.Errorf("Range start %d: expected Kafka partition %d, got %d", - tt.rangeStart, tt.expectedKafka, kafkaPartition) - } - }) - } -} - -func TestPartitionMapper_RoundTrip(t *testing.T) { - mapper := NewPartitionMapper() - - // Test round-trip conversion for all valid Kafka partitions - maxPartitions := mapper.GetMaxKafkaPartitions() - - for kafkaPartition := int32(0); kafkaPartition < maxPartitions; kafkaPartition++ { - // Kafka -> SMQ -> Kafka - rangeStart, rangeStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition) - extractedKafka := mapper.ExtractKafkaPartitionFromSMQRange(rangeStart) - - if extractedKafka != kafkaPartition { - t.Errorf("Round-trip failed for partition %d: got %d", kafkaPartition, extractedKafka) - } - - // Verify no overlap with next partition - if kafkaPartition < maxPartitions-1 { - nextStart, _ := mapper.MapKafkaPartitionToSMQRange(kafkaPartition + 1) - if rangeStop >= nextStart { - t.Errorf("Partition %d range [%d,%d] overlaps with partition %d start %d", - kafkaPartition, rangeStart, rangeStop, kafkaPartition+1, nextStart) - } - } - } -} - -func TestPartitionMapper_CreateSMQPartition(t *testing.T) { - mapper := NewPartitionMapper() - - kafkaPartition := int32(5) - unixTimeNs := time.Now().UnixNano() - - partition := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs) - - if partition.RingSize != pub_balancer.MaxPartitionCount { - t.Errorf("Expected ring size %d, got %d", pub_balancer.MaxPartitionCount, partition.RingSize) - } - - expectedStart, expectedStop := mapper.MapKafkaPartitionToSMQRange(kafkaPartition) - if partition.RangeStart != expectedStart { - t.Errorf("Expected range start %d, got %d", expectedStart, partition.RangeStart) - } - - if partition.RangeStop != expectedStop { - t.Errorf("Expected range stop %d, got %d", expectedStop, partition.RangeStop) - } - - if partition.UnixTimeNs != unixTimeNs { - t.Errorf("Expected timestamp %d, got %d", unixTimeNs, partition.UnixTimeNs) - } -} - -func TestPartitionMapper_ValidateKafkaPartition(t *testing.T) { - mapper := NewPartitionMapper() - - tests := []struct { - partition int32 - valid bool - }{ - {-1, false}, - {0, true}, - {1, true}, - {mapper.GetMaxKafkaPartitions() - 1, true}, - {mapper.GetMaxKafkaPartitions(), false}, - {1000, false}, - } - - for _, tt := range tests { - t.Run("", func(t *testing.T) { - valid := mapper.ValidateKafkaPartition(tt.partition) - if valid != tt.valid { - t.Errorf("Partition %d: expected valid=%v, got %v", tt.partition, tt.valid, valid) - } - }) - } -} - -func TestPartitionMapper_ConsistencyWithGlobalFunctions(t *testing.T) { - mapper := NewPartitionMapper() - - kafkaPartition := int32(7) - unixTimeNs := time.Now().UnixNano() - - // Test that global functions produce same results as mapper methods - start1, stop1 := mapper.MapKafkaPartitionToSMQRange(kafkaPartition) - start2, stop2 := MapKafkaPartitionToSMQRange(kafkaPartition) - - if start1 != start2 || stop1 != stop2 { - t.Errorf("Global function inconsistent: mapper=(%d,%d), global=(%d,%d)", - start1, stop1, start2, stop2) - } - - partition1 := mapper.CreateSMQPartition(kafkaPartition, unixTimeNs) - partition2 := CreateSMQPartition(kafkaPartition, unixTimeNs) - - if partition1.RangeStart != partition2.RangeStart || partition1.RangeStop != partition2.RangeStop { - t.Errorf("Global CreateSMQPartition inconsistent") - } - - extracted1 := mapper.ExtractKafkaPartitionFromSMQRange(start1) - extracted2 := ExtractKafkaPartitionFromSMQRange(start1) - - if extracted1 != extracted2 { - t.Errorf("Global ExtractKafkaPartitionFromSMQRange inconsistent: %d vs %d", extracted1, extracted2) - } -} - -func TestPartitionMapper_GetPartitionMappingInfo(t *testing.T) { - mapper := NewPartitionMapper() - - info := mapper.GetPartitionMappingInfo() - - // Verify all expected keys are present - expectedKeys := []string{"ring_size", "range_size", "max_kafka_partitions", "ring_utilization"} - for _, key := range expectedKeys { - if _, exists := info[key]; !exists { - t.Errorf("Missing key in mapping info: %s", key) - } - } - - // Verify values are reasonable - if info["ring_size"].(int) != pub_balancer.MaxPartitionCount { - t.Errorf("Incorrect ring_size in info") - } - - if info["range_size"].(int32) != mapper.GetRangeSize() { - t.Errorf("Incorrect range_size in info") - } - - utilization := info["ring_utilization"].(float64) - if utilization <= 0 || utilization > 1 { - t.Errorf("Invalid ring utilization: %f", utilization) - } - - t.Logf("Partition mapping info: %+v", info) -} diff --git a/weed/mq/kafka/protocol/batch_crc_compat_test.go b/weed/mq/kafka/protocol/batch_crc_compat_test.go deleted file mode 100644 index a6410beb7..000000000 --- a/weed/mq/kafka/protocol/batch_crc_compat_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package protocol - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" -) - -// TestBatchConstruction tests that our batch construction produces valid CRC -func TestBatchConstruction(t *testing.T) { - // Create test data - key := []byte("test-key") - value := []byte("test-value") - timestamp := time.Now() - - // Build batch using our implementation - batch := constructTestBatch(0, timestamp, key, value) - - t.Logf("Batch size: %d bytes", len(batch)) - t.Logf("Batch hex:\n%s", hexDumpTest(batch)) - - // Extract and verify CRC - if len(batch) < 21 { - t.Fatalf("Batch too short: %d bytes", len(batch)) - } - - storedCRC := binary.BigEndian.Uint32(batch[17:21]) - t.Logf("Stored CRC: 0x%08x", storedCRC) - - // Recalculate CRC from the data - crcData := batch[21:] - calculatedCRC := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - t.Logf("Calculated CRC: 0x%08x (over %d bytes)", calculatedCRC, len(crcData)) - - if storedCRC != calculatedCRC { - t.Errorf("CRC mismatch: stored=0x%08x calculated=0x%08x", storedCRC, calculatedCRC) - - // Debug: show what bytes the CRC is calculated over - t.Logf("CRC data (first 100 bytes):") - dumpSize := 100 - if len(crcData) < dumpSize { - dumpSize = len(crcData) - } - for i := 0; i < dumpSize; i += 16 { - end := i + 16 - if end > dumpSize { - end = dumpSize - } - t.Logf(" %04d: %x", i, crcData[i:end]) - } - } else { - t.Log("CRC verification PASSED") - } - - // Verify batch structure - t.Log("\n=== Batch Structure ===") - verifyField(t, "Base Offset", batch[0:8], binary.BigEndian.Uint64(batch[0:8])) - verifyField(t, "Batch Length", batch[8:12], binary.BigEndian.Uint32(batch[8:12])) - verifyField(t, "Leader Epoch", batch[12:16], int32(binary.BigEndian.Uint32(batch[12:16]))) - verifyField(t, "Magic", batch[16:17], batch[16]) - verifyField(t, "CRC", batch[17:21], binary.BigEndian.Uint32(batch[17:21])) - verifyField(t, "Attributes", batch[21:23], binary.BigEndian.Uint16(batch[21:23])) - verifyField(t, "Last Offset Delta", batch[23:27], binary.BigEndian.Uint32(batch[23:27])) - verifyField(t, "Base Timestamp", batch[27:35], binary.BigEndian.Uint64(batch[27:35])) - verifyField(t, "Max Timestamp", batch[35:43], binary.BigEndian.Uint64(batch[35:43])) - verifyField(t, "Record Count", batch[57:61], binary.BigEndian.Uint32(batch[57:61])) - - // Verify the batch length field is correct - expectedBatchLength := uint32(len(batch) - 12) - actualBatchLength := binary.BigEndian.Uint32(batch[8:12]) - if expectedBatchLength != actualBatchLength { - t.Errorf("Batch length mismatch: expected=%d actual=%d", expectedBatchLength, actualBatchLength) - } else { - t.Logf("Batch length correct: %d", actualBatchLength) - } -} - -// TestMultipleRecordsBatch tests batch construction with multiple records -func TestMultipleRecordsBatch(t *testing.T) { - timestamp := time.Now() - - // We can't easily test multiple records without the full implementation - // So let's test that our single record batch matches expected structure - - batch1 := constructTestBatch(0, timestamp, []byte("key1"), []byte("value1")) - batch2 := constructTestBatch(1, timestamp, []byte("key2"), []byte("value2")) - - t.Logf("Batch 1 size: %d, CRC: 0x%08x", len(batch1), binary.BigEndian.Uint32(batch1[17:21])) - t.Logf("Batch 2 size: %d, CRC: 0x%08x", len(batch2), binary.BigEndian.Uint32(batch2[17:21])) - - // Verify both batches have valid CRCs - for i, batch := range [][]byte{batch1, batch2} { - storedCRC := binary.BigEndian.Uint32(batch[17:21]) - calculatedCRC := crc32.Checksum(batch[21:], crc32.MakeTable(crc32.Castagnoli)) - - if storedCRC != calculatedCRC { - t.Errorf("Batch %d CRC mismatch: stored=0x%08x calculated=0x%08x", i+1, storedCRC, calculatedCRC) - } else { - t.Logf("Batch %d CRC valid", i+1) - } - } -} - -// TestVarintEncoding tests our varint encoding implementation -func TestVarintEncoding(t *testing.T) { - testCases := []struct { - value int64 - expected []byte - }{ - {0, []byte{0x00}}, - {1, []byte{0x02}}, - {-1, []byte{0x01}}, - {5, []byte{0x0a}}, - {-5, []byte{0x09}}, - {127, []byte{0xfe, 0x01}}, - {128, []byte{0x80, 0x02}}, - {-127, []byte{0xfd, 0x01}}, - {-128, []byte{0xff, 0x01}}, - } - - for _, tc := range testCases { - result := encodeVarint(tc.value) - if !bytes.Equal(result, tc.expected) { - t.Errorf("encodeVarint(%d) = %x, expected %x", tc.value, result, tc.expected) - } else { - t.Logf("encodeVarint(%d) = %x", tc.value, result) - } - } -} - -// constructTestBatch builds a batch using our implementation -func constructTestBatch(baseOffset int64, timestamp time.Time, key, value []byte) []byte { - batch := make([]byte, 0, 256) - - // Base offset (0-7) - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) - - // Batch length placeholder (8-11) - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Partition leader epoch (12-15) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Magic (16) - batch = append(batch, 0x02) - - // CRC placeholder (17-20) - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (21-22) - batch = append(batch, 0, 0) - - // Last offset delta (23-26) - batch = append(batch, 0, 0, 0, 0) - - // Base timestamp (27-34) - timestampMs := timestamp.UnixMilli() - timestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(timestampBytes, uint64(timestampMs)) - batch = append(batch, timestampBytes...) - - // Max timestamp (35-42) - batch = append(batch, timestampBytes...) - - // Producer ID (43-50) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer epoch (51-52) - batch = append(batch, 0xFF, 0xFF) - - // Base sequence (53-56) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Record count (57-60) - recordCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(recordCountBytes, 1) - batch = append(batch, recordCountBytes...) - - // Build record (61+) - recordBody := []byte{} - - // Attributes - recordBody = append(recordBody, 0) - - // Timestamp delta - recordBody = append(recordBody, encodeVarint(0)...) - - // Offset delta - recordBody = append(recordBody, encodeVarint(0)...) - - // Key length and key - if key == nil { - recordBody = append(recordBody, encodeVarint(-1)...) - } else { - recordBody = append(recordBody, encodeVarint(int64(len(key)))...) - recordBody = append(recordBody, key...) - } - - // Value length and value - if value == nil { - recordBody = append(recordBody, encodeVarint(-1)...) - } else { - recordBody = append(recordBody, encodeVarint(int64(len(value)))...) - recordBody = append(recordBody, value...) - } - - // Headers count - recordBody = append(recordBody, encodeVarint(0)...) - - // Prepend record length - recordLength := int64(len(recordBody)) - batch = append(batch, encodeVarint(recordLength)...) - batch = append(batch, recordBody...) - - // Fill in batch length - batchLength := uint32(len(batch) - 12) - binary.BigEndian.PutUint32(batch[batchLengthPos:], batchLength) - - // Calculate CRC - crcData := batch[21:] - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:], crc) - - return batch -} - -// verifyField logs a field's value -func verifyField(t *testing.T, name string, bytes []byte, value interface{}) { - t.Logf(" %s: %x (value: %v)", name, bytes, value) -} - -// hexDump formats bytes as hex dump -func hexDumpTest(data []byte) string { - var buf bytes.Buffer - for i := 0; i < len(data); i += 16 { - end := i + 16 - if end > len(data) { - end = len(data) - } - buf.WriteString(fmt.Sprintf(" %04d: %x\n", i, data[i:end])) - } - return buf.String() -} - -// TestClientSideCRCValidation mimics what a Kafka client does -func TestClientSideCRCValidation(t *testing.T) { - // Build a batch - batch := constructTestBatch(0, time.Now(), []byte("test-key"), []byte("test-value")) - - t.Logf("Constructed batch: %d bytes", len(batch)) - - // Now pretend we're a Kafka client receiving this batch - // Step 1: Read the batch header to get the CRC - if len(batch) < 21 { - t.Fatalf("Batch too short for client to read CRC") - } - - clientReadCRC := binary.BigEndian.Uint32(batch[17:21]) - t.Logf("Client read CRC from header: 0x%08x", clientReadCRC) - - // Step 2: Calculate CRC over the data (from byte 21 onwards) - clientCalculatedCRC := crc32.Checksum(batch[21:], crc32.MakeTable(crc32.Castagnoli)) - t.Logf("Client calculated CRC: 0x%08x", clientCalculatedCRC) - - // Step 3: Compare - if clientReadCRC != clientCalculatedCRC { - t.Errorf("CLIENT WOULD REJECT: CRC mismatch: read=0x%08x calculated=0x%08x", - clientReadCRC, clientCalculatedCRC) - t.Log("This is the error consumers are seeing!") - } else { - t.Log("CLIENT WOULD ACCEPT: CRC valid") - } -} - -// TestConcurrentBatchConstruction tests if there are race conditions -func TestConcurrentBatchConstruction(t *testing.T) { - timestamp := time.Now() - - // Build multiple batches concurrently - const numBatches = 10 - results := make(chan bool, numBatches) - - for i := 0; i < numBatches; i++ { - go func(id int) { - batch := constructTestBatch(int64(id), timestamp, - []byte(fmt.Sprintf("key-%d", id)), - []byte(fmt.Sprintf("value-%d", id))) - - // Validate CRC - storedCRC := binary.BigEndian.Uint32(batch[17:21]) - calculatedCRC := crc32.Checksum(batch[21:], crc32.MakeTable(crc32.Castagnoli)) - - results <- (storedCRC == calculatedCRC) - }(i) - } - - // Check all results - allValid := true - for i := 0; i < numBatches; i++ { - if !<-results { - allValid = false - t.Errorf("Batch %d has invalid CRC", i) - } - } - - if allValid { - t.Logf("All %d concurrent batches have valid CRCs", numBatches) - } -} - -// TestProductionBatchConstruction tests the actual production code -func TestProductionBatchConstruction(t *testing.T) { - // Create a mock SMQ record - mockRecord := &mockSMQRecord{ - key: []byte("prod-key"), - value: []byte("prod-value"), - timestamp: time.Now().UnixNano(), - } - - // Create a mock handler - mockHandler := &Handler{} - - // Create fetcher - fetcher := NewMultiBatchFetcher(mockHandler) - - // Construct batch using production code - batch := fetcher.constructSingleRecordBatch("test-topic", 0, []integration.SMQRecord{mockRecord}) - - t.Logf("Production batch size: %d bytes", len(batch)) - - // Validate CRC - if len(batch) < 21 { - t.Fatalf("Production batch too short: %d bytes", len(batch)) - } - - storedCRC := binary.BigEndian.Uint32(batch[17:21]) - calculatedCRC := crc32.Checksum(batch[21:], crc32.MakeTable(crc32.Castagnoli)) - - t.Logf("Production batch CRC: stored=0x%08x calculated=0x%08x", storedCRC, calculatedCRC) - - if storedCRC != calculatedCRC { - t.Errorf("PRODUCTION CODE CRC INVALID: stored=0x%08x calculated=0x%08x", storedCRC, calculatedCRC) - t.Log("This means the production constructSingleRecordBatch has a bug!") - } else { - t.Log("PRODUCTION CODE CRC VALID") - } -} - -// mockSMQRecord implements the SMQRecord interface for testing -type mockSMQRecord struct { - key []byte - value []byte - timestamp int64 -} - -func (m *mockSMQRecord) GetKey() []byte { return m.key } -func (m *mockSMQRecord) GetValue() []byte { return m.value } -func (m *mockSMQRecord) GetTimestamp() int64 { return m.timestamp } -func (m *mockSMQRecord) GetOffset() int64 { return 0 } diff --git a/weed/mq/kafka/protocol/consumer_coordination.go b/weed/mq/kafka/protocol/consumer_coordination.go deleted file mode 100644 index dafc8c033..000000000 --- a/weed/mq/kafka/protocol/consumer_coordination.go +++ /dev/null @@ -1,553 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer" -) - -// Heartbeat API (key 12) - Consumer group heartbeat -// Consumers send periodic heartbeats to stay in the group and receive rebalancing signals - -// HeartbeatRequest represents a Heartbeat request from a Kafka client -type HeartbeatRequest struct { - GroupID string - GenerationID int32 - MemberID string - GroupInstanceID string // Optional static membership ID -} - -// HeartbeatResponse represents a Heartbeat response to a Kafka client -type HeartbeatResponse struct { - CorrelationID uint32 - ErrorCode int16 -} - -// LeaveGroup API (key 13) - Consumer graceful departure -// Consumers call this when shutting down to trigger immediate rebalancing - -// LeaveGroupRequest represents a LeaveGroup request from a Kafka client -type LeaveGroupRequest struct { - GroupID string - MemberID string - GroupInstanceID string // Optional static membership ID - Members []LeaveGroupMember // For newer versions, can leave multiple members -} - -// LeaveGroupMember represents a member leaving the group (for batch departures) -type LeaveGroupMember struct { - MemberID string - GroupInstanceID string - Reason string // Optional reason for leaving -} - -// LeaveGroupResponse represents a LeaveGroup response to a Kafka client -type LeaveGroupResponse struct { - CorrelationID uint32 - ErrorCode int16 - Members []LeaveGroupMemberResponse // Per-member responses for newer versions -} - -// LeaveGroupMemberResponse represents per-member leave group response -type LeaveGroupMemberResponse struct { - MemberID string - GroupInstanceID string - ErrorCode int16 -} - -// Error codes specific to consumer coordination are imported from errors.go - -func (h *Handler) handleHeartbeat(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse Heartbeat request - request, err := h.parseHeartbeatRequest(requestBody, apiVersion) - if err != nil { - return h.buildHeartbeatErrorResponseV(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Validate request - if request.GroupID == "" || request.MemberID == "" { - return h.buildHeartbeatErrorResponseV(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Get consumer group - group := h.groupCoordinator.GetGroup(request.GroupID) - if group == nil { - return h.buildHeartbeatErrorResponseV(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - group.Mu.Lock() - defer group.Mu.Unlock() - - // Update group's last activity - group.LastActivity = time.Now() - - // Validate member exists - member, exists := group.Members[request.MemberID] - if !exists { - return h.buildHeartbeatErrorResponseV(correlationID, ErrorCodeUnknownMemberID, apiVersion), nil - } - - // Validate generation - if request.GenerationID != group.Generation { - return h.buildHeartbeatErrorResponseV(correlationID, ErrorCodeIllegalGeneration, apiVersion), nil - } - - // Update member's last heartbeat - member.LastHeartbeat = time.Now() - - // Check if rebalancing is in progress - var errorCode int16 = ErrorCodeNone - switch group.State { - case consumer.GroupStatePreparingRebalance, consumer.GroupStateCompletingRebalance: - // Signal the consumer that rebalancing is happening - errorCode = ErrorCodeRebalanceInProgress - case consumer.GroupStateDead: - errorCode = ErrorCodeInvalidGroupID - case consumer.GroupStateEmpty: - // This shouldn't happen if member exists, but handle gracefully - errorCode = ErrorCodeUnknownMemberID - case consumer.GroupStateStable: - // Normal case - heartbeat accepted - errorCode = ErrorCodeNone - } - - // Build successful response - response := HeartbeatResponse{ - CorrelationID: correlationID, - ErrorCode: errorCode, - } - - return h.buildHeartbeatResponseV(response, apiVersion), nil -} - -func (h *Handler) handleLeaveGroup(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse LeaveGroup request - request, err := h.parseLeaveGroupRequest(requestBody) - if err != nil { - return h.buildLeaveGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Validate request - if request.GroupID == "" || request.MemberID == "" { - return h.buildLeaveGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Get consumer group - group := h.groupCoordinator.GetGroup(request.GroupID) - if group == nil { - return h.buildLeaveGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - group.Mu.Lock() - defer group.Mu.Unlock() - - // Update group's last activity - group.LastActivity = time.Now() - - // Validate member exists - member, exists := group.Members[request.MemberID] - if !exists { - return h.buildLeaveGroupErrorResponse(correlationID, ErrorCodeUnknownMemberID, apiVersion), nil - } - - // For static members, only remove if GroupInstanceID matches or is not provided - if h.groupCoordinator.IsStaticMember(member) { - if request.GroupInstanceID != "" && *member.GroupInstanceID != request.GroupInstanceID { - return h.buildLeaveGroupErrorResponse(correlationID, ErrorCodeFencedInstanceID, apiVersion), nil - } - // Unregister static member - h.groupCoordinator.UnregisterStaticMemberLocked(group, *member.GroupInstanceID) - } - - // Remove the member from the group - delete(group.Members, request.MemberID) - - // Update group state based on remaining members - if len(group.Members) == 0 { - // Group becomes empty - group.State = consumer.GroupStateEmpty - group.Generation++ - group.Leader = "" - } else { - // Trigger rebalancing for remaining members - group.State = consumer.GroupStatePreparingRebalance - group.Generation++ - - // If the leaving member was the leader, select a new leader - if group.Leader == request.MemberID { - // Select first remaining member as new leader - for memberID := range group.Members { - group.Leader = memberID - break - } - } - - // Mark remaining members as pending to trigger rebalancing - for _, member := range group.Members { - member.State = consumer.MemberStatePending - } - } - - // Update group's subscribed topics (may have changed with member leaving) - h.updateGroupSubscriptionFromMembers(group) - - // Build successful response - response := LeaveGroupResponse{ - CorrelationID: correlationID, - ErrorCode: ErrorCodeNone, - Members: []LeaveGroupMemberResponse{ - { - MemberID: request.MemberID, - GroupInstanceID: request.GroupInstanceID, - ErrorCode: ErrorCodeNone, - }, - }, - } - - return h.buildLeaveGroupResponse(response, apiVersion), nil -} - -func (h *Handler) parseHeartbeatRequest(data []byte, apiVersion uint16) (*HeartbeatRequest, error) { - if len(data) < 8 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - isFlexible := IsFlexibleVersion(12, apiVersion) // Heartbeat API key = 12 - - // ADMINCLIENT COMPATIBILITY FIX: Parse top-level tagged fields at the beginning for flexible versions - if isFlexible { - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err == nil { - offset += consumed - } - } - - // Parse GroupID - var groupID string - if isFlexible { - // FLEXIBLE V4+ FIX: GroupID is a compact string - groupIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid group ID compact string") - } - if groupIDBytes != nil { - groupID = string(groupIDBytes) - } - offset += consumed - } else { - // Non-flexible parsing (v0-v3) - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID = string(data[offset : offset+groupIDLength]) - offset += groupIDLength - } - - // Generation ID (4 bytes) - always fixed-length - if offset+4 > len(data) { - return nil, fmt.Errorf("missing generation ID") - } - generationID := int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - // Parse MemberID - var memberID string - if isFlexible { - // FLEXIBLE V4+ FIX: MemberID is a compact string - memberIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid member ID compact string") - } - if memberIDBytes != nil { - memberID = string(memberIDBytes) - } - offset += consumed - } else { - // Non-flexible parsing (v0-v3) - if offset+2 > len(data) { - return nil, fmt.Errorf("missing member ID length") - } - memberIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+memberIDLength > len(data) { - return nil, fmt.Errorf("invalid member ID length") - } - memberID = string(data[offset : offset+memberIDLength]) - offset += memberIDLength - } - - // Parse GroupInstanceID (nullable string) - for Heartbeat v1+ - var groupInstanceID string - if apiVersion >= 1 { - if isFlexible { - // FLEXIBLE V4+ FIX: GroupInstanceID is a compact nullable string - groupInstanceIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 && len(data) > offset && data[offset] == 0x00 { - groupInstanceID = "" // null - offset += 1 - } else { - if groupInstanceIDBytes != nil { - groupInstanceID = string(groupInstanceIDBytes) - } - offset += consumed - } - } else { - // Non-flexible v1-v3: regular nullable string - if offset+2 <= len(data) { - instanceIDLength := int16(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if instanceIDLength == -1 { - groupInstanceID = "" // null string - } else if instanceIDLength >= 0 && offset+int(instanceIDLength) <= len(data) { - groupInstanceID = string(data[offset : offset+int(instanceIDLength)]) - offset += int(instanceIDLength) - } - } - } - } - - // Parse request-level tagged fields (v4+) - if isFlexible { - if offset < len(data) { - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err == nil { - offset += consumed - } - } - } - - return &HeartbeatRequest{ - GroupID: groupID, - GenerationID: generationID, - MemberID: memberID, - GroupInstanceID: groupInstanceID, - }, nil -} - -func (h *Handler) parseLeaveGroupRequest(data []byte) (*LeaveGroupRequest, error) { - if len(data) < 4 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - - // GroupID (string) - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID := string(data[offset : offset+groupIDLength]) - offset += groupIDLength - - // MemberID (string) - if offset+2 > len(data) { - return nil, fmt.Errorf("missing member ID length") - } - memberIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+memberIDLength > len(data) { - return nil, fmt.Errorf("invalid member ID length") - } - memberID := string(data[offset : offset+memberIDLength]) - offset += memberIDLength - - // GroupInstanceID (string, v3+) - optional field - var groupInstanceID string - if offset+2 <= len(data) { - instanceIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if instanceIDLength != 0xFFFF && offset+instanceIDLength <= len(data) { - groupInstanceID = string(data[offset : offset+instanceIDLength]) - } - } - - return &LeaveGroupRequest{ - GroupID: groupID, - MemberID: memberID, - GroupInstanceID: groupInstanceID, - Members: []LeaveGroupMember{}, // Would parse members array for batch operations - }, nil -} - -func (h *Handler) buildHeartbeatResponse(response HeartbeatResponse) []byte { - result := make([]byte, 0, 12) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - // Throttle time (4 bytes, 0 = no throttling) - result = append(result, 0, 0, 0, 0) - - return result -} - -func (h *Handler) buildHeartbeatResponseV(response HeartbeatResponse, apiVersion uint16) []byte { - isFlexible := IsFlexibleVersion(12, apiVersion) // Heartbeat API key = 12 - result := make([]byte, 0, 16) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - if isFlexible { - // FLEXIBLE V4+ FORMAT - // NOTE: Response header tagged fields are handled by writeResponseWithHeader - // Do NOT include them in the response body - - // Throttle time (4 bytes, 0 = no throttling) - comes first in flexible format - result = append(result, 0, 0, 0, 0) - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - // Response body tagged fields (varint: 0x00 = empty) - result = append(result, 0x00) - } else if apiVersion >= 1 { - // NON-FLEXIBLE V1-V3 FORMAT: throttle_time_ms BEFORE error_code - // CRITICAL FIX: Kafka protocol specifies throttle_time_ms comes FIRST in v1+ - - // Throttle time (4 bytes, 0 = no throttling) - comes first in v1-v3 - result = append(result, 0, 0, 0, 0) - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - } else { - // V0 FORMAT: Only error_code, NO throttle_time_ms - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - } - - return result -} - -func (h *Handler) buildLeaveGroupResponse(response LeaveGroupResponse, apiVersion uint16) []byte { - // LeaveGroup v0 only includes correlation_id and error_code (no throttle_time_ms, no members) - if apiVersion == 0 { - return h.buildLeaveGroupV0Response(response) - } - - // For v1+ use the full response format - return h.buildLeaveGroupFullResponse(response) -} - -func (h *Handler) buildLeaveGroupV0Response(response LeaveGroupResponse) []byte { - result := make([]byte, 0, 6) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Error code (2 bytes) - that's it for v0! - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - return result -} - -func (h *Handler) buildLeaveGroupFullResponse(response LeaveGroupResponse) []byte { - estimatedSize := 16 - for _, member := range response.Members { - estimatedSize += len(member.MemberID) + len(member.GroupInstanceID) + 8 - } - - result := make([]byte, 0, estimatedSize) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // For LeaveGroup v1+, throttle_time_ms comes first (4 bytes) - result = append(result, 0, 0, 0, 0) - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - // Members array length (4 bytes) - membersLengthBytes := make([]byte, 4) - binary.BigEndian.PutUint32(membersLengthBytes, uint32(len(response.Members))) - result = append(result, membersLengthBytes...) - - // Members - for _, member := range response.Members { - // Member ID length (2 bytes) - memberIDLength := make([]byte, 2) - binary.BigEndian.PutUint16(memberIDLength, uint16(len(member.MemberID))) - result = append(result, memberIDLength...) - - // Member ID - result = append(result, []byte(member.MemberID)...) - - // Group instance ID length (2 bytes) - instanceIDLength := make([]byte, 2) - binary.BigEndian.PutUint16(instanceIDLength, uint16(len(member.GroupInstanceID))) - result = append(result, instanceIDLength...) - - // Group instance ID - if len(member.GroupInstanceID) > 0 { - result = append(result, []byte(member.GroupInstanceID)...) - } - - // Error code (2 bytes) - memberErrorBytes := make([]byte, 2) - binary.BigEndian.PutUint16(memberErrorBytes, uint16(member.ErrorCode)) - result = append(result, memberErrorBytes...) - } - - return result -} - -func (h *Handler) buildHeartbeatErrorResponse(correlationID uint32, errorCode int16) []byte { - response := HeartbeatResponse{ - CorrelationID: correlationID, - ErrorCode: errorCode, - } - - return h.buildHeartbeatResponse(response) -} - -func (h *Handler) buildHeartbeatErrorResponseV(correlationID uint32, errorCode int16, apiVersion uint16) []byte { - response := HeartbeatResponse{ - CorrelationID: correlationID, - ErrorCode: errorCode, - } - - return h.buildHeartbeatResponseV(response, apiVersion) -} - -func (h *Handler) buildLeaveGroupErrorResponse(correlationID uint32, errorCode int16, apiVersion uint16) []byte { - response := LeaveGroupResponse{ - CorrelationID: correlationID, - ErrorCode: errorCode, - Members: []LeaveGroupMemberResponse{}, - } - - return h.buildLeaveGroupResponse(response, apiVersion) -} - -func (h *Handler) updateGroupSubscriptionFromMembers(group *consumer.ConsumerGroup) { - // Update group's subscribed topics from remaining members - group.SubscribedTopics = make(map[string]bool) - for _, member := range group.Members { - for _, topic := range member.Subscription { - group.SubscribedTopics[topic] = true - } - } -} diff --git a/weed/mq/kafka/protocol/consumer_group_metadata.go b/weed/mq/kafka/protocol/consumer_group_metadata.go deleted file mode 100644 index 1c934238f..000000000 --- a/weed/mq/kafka/protocol/consumer_group_metadata.go +++ /dev/null @@ -1,278 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" - "net" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer" -) - -// ConsumerProtocolMetadata represents parsed consumer protocol metadata -type ConsumerProtocolMetadata struct { - Version int16 // Protocol metadata version - Topics []string // Subscribed topic names - UserData []byte // Optional user data - AssignmentStrategy string // Preferred assignment strategy -} - -// ConnectionContext holds connection-specific information for requests -type ConnectionContext struct { - RemoteAddr net.Addr // Client's remote address - LocalAddr net.Addr // Server's local address - ConnectionID string // Connection identifier - ClientID string // Kafka client ID from request headers - ConsumerGroup string // Consumer group (set by JoinGroup) - MemberID string // Consumer group member ID (set by JoinGroup) - // Per-connection broker client for isolated gRPC streams - // Each Kafka connection MUST have its own gRPC streams to avoid interference - // when multiple consumers or requests are active on different connections - BrokerClient interface{} // Will be set to *integration.BrokerClient - - // Persistent partition readers - one goroutine per topic-partition that maintains position - // and streams forward, eliminating repeated offset lookups and reducing broker CPU load - partitionReaders sync.Map // map[TopicPartitionKey]*partitionReader -} - -// ExtractClientHost extracts the client hostname/IP from connection context -func ExtractClientHost(connCtx *ConnectionContext) string { - if connCtx == nil || connCtx.RemoteAddr == nil { - return "unknown" - } - - // Extract host portion from address - if tcpAddr, ok := connCtx.RemoteAddr.(*net.TCPAddr); ok { - return tcpAddr.IP.String() - } - - // Fallback: parse string representation - addrStr := connCtx.RemoteAddr.String() - if host, _, err := net.SplitHostPort(addrStr); err == nil { - return host - } - - // Last resort: return full address - return addrStr -} - -// ParseConsumerProtocolMetadata parses consumer protocol metadata with enhanced error handling -func ParseConsumerProtocolMetadata(metadata []byte, strategyName string) (*ConsumerProtocolMetadata, error) { - if len(metadata) < 2 { - return &ConsumerProtocolMetadata{ - Version: 0, - Topics: []string{}, - UserData: []byte{}, - AssignmentStrategy: strategyName, - }, nil - } - - result := &ConsumerProtocolMetadata{ - AssignmentStrategy: strategyName, - } - - offset := 0 - - // Parse version (2 bytes) - if len(metadata) < offset+2 { - return nil, fmt.Errorf("metadata too short for version field") - } - result.Version = int16(binary.BigEndian.Uint16(metadata[offset : offset+2])) - offset += 2 - - // Parse topics array - if len(metadata) < offset+4 { - return nil, fmt.Errorf("metadata too short for topics count") - } - topicsCount := binary.BigEndian.Uint32(metadata[offset : offset+4]) - offset += 4 - - // Validate topics count (reasonable limit) - if topicsCount > 10000 { - return nil, fmt.Errorf("unreasonable topics count: %d", topicsCount) - } - - result.Topics = make([]string, 0, topicsCount) - - for i := uint32(0); i < topicsCount && offset < len(metadata); i++ { - // Parse topic name length - if len(metadata) < offset+2 { - return nil, fmt.Errorf("metadata too short for topic %d name length", i) - } - topicNameLength := binary.BigEndian.Uint16(metadata[offset : offset+2]) - offset += 2 - - // Validate topic name length - if topicNameLength > 1000 { - return nil, fmt.Errorf("unreasonable topic name length: %d", topicNameLength) - } - - if len(metadata) < offset+int(topicNameLength) { - return nil, fmt.Errorf("metadata too short for topic %d name data", i) - } - - topicName := string(metadata[offset : offset+int(topicNameLength)]) - offset += int(topicNameLength) - - // Validate topic name (basic validation) - if len(topicName) == 0 { - continue // Skip empty topic names - } - - result.Topics = append(result.Topics, topicName) - } - - // Parse user data if remaining bytes exist - if len(metadata) >= offset+4 { - userDataLength := binary.BigEndian.Uint32(metadata[offset : offset+4]) - offset += 4 - - // Handle -1 (0xFFFFFFFF) as null/empty user data (Kafka protocol convention) - if userDataLength == 0xFFFFFFFF { - result.UserData = []byte{} - return result, nil - } - - // Validate user data length - if userDataLength > 100000 { // 100KB limit - return nil, fmt.Errorf("unreasonable user data length: %d", userDataLength) - } - - if len(metadata) >= offset+int(userDataLength) { - result.UserData = make([]byte, userDataLength) - copy(result.UserData, metadata[offset:offset+int(userDataLength)]) - } - } - - return result, nil -} - -// ValidateAssignmentStrategy checks if an assignment strategy is supported -func ValidateAssignmentStrategy(strategy string) bool { - supportedStrategies := map[string]bool{ - consumer.ProtocolNameRange: true, - consumer.ProtocolNameRoundRobin: true, - consumer.ProtocolNameSticky: true, - consumer.ProtocolNameCooperativeSticky: true, // Incremental cooperative rebalancing (Kafka 2.4+) - } - - return supportedStrategies[strategy] -} - -// ExtractTopicsFromMetadata extracts topic list from protocol metadata with fallback -func ExtractTopicsFromMetadata(protocols []GroupProtocol, fallbackTopics []string) []string { - for _, protocol := range protocols { - if ValidateAssignmentStrategy(protocol.Name) { - parsed, err := ParseConsumerProtocolMetadata(protocol.Metadata, protocol.Name) - if err != nil { - continue - } - - if len(parsed.Topics) > 0 { - return parsed.Topics - } - } - } - - // Fallback to provided topics or empty list - if len(fallbackTopics) > 0 { - return fallbackTopics - } - - // Return empty slice if no topics found - consumer may be using pattern subscription - return []string{} -} - -// SelectBestProtocol chooses the best assignment protocol from available options -func SelectBestProtocol(protocols []GroupProtocol, groupProtocols []string) string { - // Priority order: sticky > roundrobin > range - protocolPriority := []string{consumer.ProtocolNameSticky, consumer.ProtocolNameRoundRobin, consumer.ProtocolNameRange} - - // Find supported protocols in client's list - clientProtocols := make(map[string]bool) - for _, protocol := range protocols { - if ValidateAssignmentStrategy(protocol.Name) { - clientProtocols[protocol.Name] = true - } - } - - // Find supported protocols in group's list - groupProtocolSet := make(map[string]bool) - for _, protocol := range groupProtocols { - groupProtocolSet[protocol] = true - } - - // Select highest priority protocol that both client and group support - for _, preferred := range protocolPriority { - if clientProtocols[preferred] && (len(groupProtocols) == 0 || groupProtocolSet[preferred]) { - return preferred - } - } - - // If group has existing protocols, find a protocol supported by both client and group - if len(groupProtocols) > 0 { - // Try to find a protocol that both client and group support - for _, preferred := range protocolPriority { - if clientProtocols[preferred] && groupProtocolSet[preferred] { - return preferred - } - } - - // No common protocol found - handle special fallback case - // If client supports nothing we validate, but group supports "range", use "range" - if len(clientProtocols) == 0 && groupProtocolSet[consumer.ProtocolNameRange] { - return consumer.ProtocolNameRange - } - - // Return empty string to indicate no compatible protocol found - return "" - } - - // Fallback to first supported protocol from client (only when group has no existing protocols) - for _, protocol := range protocols { - if ValidateAssignmentStrategy(protocol.Name) { - return protocol.Name - } - } - - // Last resort - return consumer.ProtocolNameRange -} - -// ProtocolMetadataDebugInfo returns debug information about protocol metadata -type ProtocolMetadataDebugInfo struct { - Strategy string - Version int16 - TopicCount int - Topics []string - UserDataSize int - ParsedOK bool - ParseError string -} - -// AnalyzeProtocolMetadata provides detailed debug information about protocol metadata -func AnalyzeProtocolMetadata(protocols []GroupProtocol) []ProtocolMetadataDebugInfo { - result := make([]ProtocolMetadataDebugInfo, 0, len(protocols)) - - for _, protocol := range protocols { - info := ProtocolMetadataDebugInfo{ - Strategy: protocol.Name, - } - - parsed, err := ParseConsumerProtocolMetadata(protocol.Metadata, protocol.Name) - if err != nil { - info.ParsedOK = false - info.ParseError = err.Error() - } else { - info.ParsedOK = true - info.Version = parsed.Version - info.TopicCount = len(parsed.Topics) - info.Topics = parsed.Topics - info.UserDataSize = len(parsed.UserData) - } - - result = append(result, info) - } - - return result -} diff --git a/weed/mq/kafka/protocol/describe_cluster.go b/weed/mq/kafka/protocol/describe_cluster.go deleted file mode 100644 index af622de3c..000000000 --- a/weed/mq/kafka/protocol/describe_cluster.go +++ /dev/null @@ -1,114 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" -) - -// handleDescribeCluster implements the DescribeCluster API (key 60, versions 0-1) -// This API is used by Java AdminClient for broker discovery (KIP-919) -// Response format (flexible, all versions): -// -// ThrottleTimeMs(int32) + ErrorCode(int16) + ErrorMessage(compact nullable string) + -// [v1+: EndpointType(int8)] + ClusterId(compact string) + ControllerId(int32) + -// Brokers(compact array) + ClusterAuthorizedOperations(int32) + TaggedFields -func (h *Handler) handleDescribeCluster(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse request fields (all flexible format) - offset := 0 - - // IncludeClusterAuthorizedOperations (bool - 1 byte) - if offset >= len(requestBody) { - return nil, fmt.Errorf("incomplete DescribeCluster request") - } - includeAuthorizedOps := requestBody[offset] != 0 - offset++ - - // EndpointType (int8, v1+) - var endpointType int8 = 1 // Default: brokers - if apiVersion >= 1 { - if offset >= len(requestBody) { - return nil, fmt.Errorf("incomplete DescribeCluster v1+ request") - } - endpointType = int8(requestBody[offset]) - offset++ - } - - // Tagged fields at end of request - // (We don't parse them, just skip) - - - // Build response - response := make([]byte, 0, 256) - - // ThrottleTimeMs (int32) - response = append(response, 0, 0, 0, 0) - - // ErrorCode (int16) - no error - response = append(response, 0, 0) - - // ErrorMessage (compact nullable string) - null - response = append(response, 0x00) // varint 0 = null - - // EndpointType (int8, v1+) - if apiVersion >= 1 { - response = append(response, byte(endpointType)) - } - - // ClusterId (compact string) - clusterID := "seaweedfs-kafka-gateway" - response = append(response, CompactArrayLength(uint32(len(clusterID)))...) - response = append(response, []byte(clusterID)...) - - // ControllerId (int32) - use broker ID 1 - controllerIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(controllerIDBytes, uint32(1)) - response = append(response, controllerIDBytes...) - - // Brokers (compact array) - // Get advertised address - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - // Broker count (compact array length) - response = append(response, CompactArrayLength(1)...) // 1 broker - - // Broker 0: BrokerId(int32) + Host(compact string) + Port(int32) + Rack(compact nullable string) + TaggedFields - brokerIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(brokerIDBytes, uint32(1)) - response = append(response, brokerIDBytes...) // BrokerId = 1 - - // Host (compact string) - response = append(response, CompactArrayLength(uint32(len(host)))...) - response = append(response, []byte(host)...) - - // Port (int32) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(port)) - response = append(response, portBytes...) - - // Rack (compact nullable string) - null - response = append(response, 0x00) // varint 0 = null - - // Per-broker tagged fields - response = append(response, 0x00) // Empty tagged fields - - // ClusterAuthorizedOperations (int32) - -2147483648 (INT32_MIN) means not included - authOpsBytes := make([]byte, 4) - if includeAuthorizedOps { - // For now, return 0 (no operations authorized) - binary.BigEndian.PutUint32(authOpsBytes, 0) - } else { - // -2147483648 = INT32_MIN = operations not included - binary.BigEndian.PutUint32(authOpsBytes, 0x80000000) - } - response = append(response, authOpsBytes...) - - // Response-level tagged fields (flexible response) - response = append(response, 0x00) // Empty tagged fields - - - return response, nil -} diff --git a/weed/mq/kafka/protocol/errors.go b/weed/mq/kafka/protocol/errors.go deleted file mode 100644 index 93bc85c80..000000000 --- a/weed/mq/kafka/protocol/errors.go +++ /dev/null @@ -1,362 +0,0 @@ -package protocol - -import ( - "context" - "encoding/binary" - "net" - "time" -) - -// Kafka Protocol Error Codes -// Based on Apache Kafka protocol specification -const ( - // Success - ErrorCodeNone int16 = 0 - - // General server errors - ErrorCodeUnknownServerError int16 = -1 - ErrorCodeOffsetOutOfRange int16 = 1 - ErrorCodeCorruptMessage int16 = 3 // Also UNKNOWN_TOPIC_OR_PARTITION - ErrorCodeUnknownTopicOrPartition int16 = 3 - ErrorCodeInvalidFetchSize int16 = 4 - ErrorCodeLeaderNotAvailable int16 = 5 - ErrorCodeNotLeaderOrFollower int16 = 6 // Formerly NOT_LEADER_FOR_PARTITION - ErrorCodeRequestTimedOut int16 = 7 - ErrorCodeBrokerNotAvailable int16 = 8 - ErrorCodeReplicaNotAvailable int16 = 9 - ErrorCodeMessageTooLarge int16 = 10 - ErrorCodeStaleControllerEpoch int16 = 11 - ErrorCodeOffsetMetadataTooLarge int16 = 12 - ErrorCodeNetworkException int16 = 13 - ErrorCodeOffsetLoadInProgress int16 = 14 - ErrorCodeGroupLoadInProgress int16 = 15 - ErrorCodeNotCoordinatorForGroup int16 = 16 - ErrorCodeNotCoordinatorForTransaction int16 = 17 - - // Consumer group coordination errors - ErrorCodeIllegalGeneration int16 = 22 - ErrorCodeInconsistentGroupProtocol int16 = 23 - ErrorCodeInvalidGroupID int16 = 24 - ErrorCodeUnknownMemberID int16 = 25 - ErrorCodeInvalidSessionTimeout int16 = 26 - ErrorCodeRebalanceInProgress int16 = 27 - ErrorCodeInvalidCommitOffsetSize int16 = 28 - ErrorCodeTopicAuthorizationFailed int16 = 29 - ErrorCodeGroupAuthorizationFailed int16 = 30 - ErrorCodeClusterAuthorizationFailed int16 = 31 - ErrorCodeInvalidTimestamp int16 = 32 - ErrorCodeUnsupportedSASLMechanism int16 = 33 - ErrorCodeIllegalSASLState int16 = 34 - ErrorCodeUnsupportedVersion int16 = 35 - - // Topic management errors - ErrorCodeTopicAlreadyExists int16 = 36 - ErrorCodeInvalidPartitions int16 = 37 - ErrorCodeInvalidReplicationFactor int16 = 38 - ErrorCodeInvalidReplicaAssignment int16 = 39 - ErrorCodeInvalidConfig int16 = 40 - ErrorCodeNotController int16 = 41 - ErrorCodeInvalidRecord int16 = 42 - ErrorCodePolicyViolation int16 = 43 - ErrorCodeOutOfOrderSequenceNumber int16 = 44 - ErrorCodeDuplicateSequenceNumber int16 = 45 - ErrorCodeInvalidProducerEpoch int16 = 46 - ErrorCodeInvalidTxnState int16 = 47 - ErrorCodeInvalidProducerIDMapping int16 = 48 - ErrorCodeInvalidTransactionTimeout int16 = 49 - ErrorCodeConcurrentTransactions int16 = 50 - - // Connection and timeout errors - ErrorCodeConnectionRefused int16 = 60 // Custom for connection issues - ErrorCodeConnectionTimeout int16 = 61 // Custom for connection timeouts - ErrorCodeReadTimeout int16 = 62 // Custom for read timeouts - ErrorCodeWriteTimeout int16 = 63 // Custom for write timeouts - - // Consumer group specific errors - ErrorCodeMemberIDRequired int16 = 79 - ErrorCodeFencedInstanceID int16 = 82 - ErrorCodeGroupMaxSizeReached int16 = 84 - ErrorCodeUnstableOffsetCommit int16 = 95 -) - -// ErrorInfo contains metadata about a Kafka error -type ErrorInfo struct { - Code int16 - Name string - Description string - Retriable bool -} - -// KafkaErrors maps error codes to their metadata -var KafkaErrors = map[int16]ErrorInfo{ - ErrorCodeNone: { - Code: ErrorCodeNone, Name: "NONE", Description: "No error", Retriable: false, - }, - ErrorCodeUnknownServerError: { - Code: ErrorCodeUnknownServerError, Name: "UNKNOWN_SERVER_ERROR", - Description: "Unknown server error", Retriable: true, - }, - ErrorCodeOffsetOutOfRange: { - Code: ErrorCodeOffsetOutOfRange, Name: "OFFSET_OUT_OF_RANGE", - Description: "Offset out of range", Retriable: false, - }, - ErrorCodeUnknownTopicOrPartition: { - Code: ErrorCodeUnknownTopicOrPartition, Name: "UNKNOWN_TOPIC_OR_PARTITION", - Description: "Topic or partition does not exist", Retriable: false, - }, - ErrorCodeInvalidFetchSize: { - Code: ErrorCodeInvalidFetchSize, Name: "INVALID_FETCH_SIZE", - Description: "Invalid fetch size", Retriable: false, - }, - ErrorCodeLeaderNotAvailable: { - Code: ErrorCodeLeaderNotAvailable, Name: "LEADER_NOT_AVAILABLE", - Description: "Leader not available", Retriable: true, - }, - ErrorCodeNotLeaderOrFollower: { - Code: ErrorCodeNotLeaderOrFollower, Name: "NOT_LEADER_OR_FOLLOWER", - Description: "Not leader or follower", Retriable: true, - }, - ErrorCodeRequestTimedOut: { - Code: ErrorCodeRequestTimedOut, Name: "REQUEST_TIMED_OUT", - Description: "Request timed out", Retriable: true, - }, - ErrorCodeBrokerNotAvailable: { - Code: ErrorCodeBrokerNotAvailable, Name: "BROKER_NOT_AVAILABLE", - Description: "Broker not available", Retriable: true, - }, - ErrorCodeMessageTooLarge: { - Code: ErrorCodeMessageTooLarge, Name: "MESSAGE_TOO_LARGE", - Description: "Message size exceeds limit", Retriable: false, - }, - ErrorCodeOffsetMetadataTooLarge: { - Code: ErrorCodeOffsetMetadataTooLarge, Name: "OFFSET_METADATA_TOO_LARGE", - Description: "Offset metadata too large", Retriable: false, - }, - ErrorCodeNetworkException: { - Code: ErrorCodeNetworkException, Name: "NETWORK_EXCEPTION", - Description: "Network error", Retriable: true, - }, - ErrorCodeOffsetLoadInProgress: { - Code: ErrorCodeOffsetLoadInProgress, Name: "OFFSET_LOAD_IN_PROGRESS", - Description: "Offset load in progress", Retriable: true, - }, - ErrorCodeNotCoordinatorForGroup: { - Code: ErrorCodeNotCoordinatorForGroup, Name: "NOT_COORDINATOR_FOR_GROUP", - Description: "Not coordinator for group", Retriable: true, - }, - ErrorCodeInvalidGroupID: { - Code: ErrorCodeInvalidGroupID, Name: "INVALID_GROUP_ID", - Description: "Invalid group ID", Retriable: false, - }, - ErrorCodeUnknownMemberID: { - Code: ErrorCodeUnknownMemberID, Name: "UNKNOWN_MEMBER_ID", - Description: "Unknown member ID", Retriable: false, - }, - ErrorCodeInvalidSessionTimeout: { - Code: ErrorCodeInvalidSessionTimeout, Name: "INVALID_SESSION_TIMEOUT", - Description: "Invalid session timeout", Retriable: false, - }, - ErrorCodeRebalanceInProgress: { - Code: ErrorCodeRebalanceInProgress, Name: "REBALANCE_IN_PROGRESS", - Description: "Group rebalance in progress", Retriable: true, - }, - ErrorCodeInvalidCommitOffsetSize: { - Code: ErrorCodeInvalidCommitOffsetSize, Name: "INVALID_COMMIT_OFFSET_SIZE", - Description: "Invalid commit offset size", Retriable: false, - }, - ErrorCodeTopicAuthorizationFailed: { - Code: ErrorCodeTopicAuthorizationFailed, Name: "TOPIC_AUTHORIZATION_FAILED", - Description: "Topic authorization failed", Retriable: false, - }, - ErrorCodeGroupAuthorizationFailed: { - Code: ErrorCodeGroupAuthorizationFailed, Name: "GROUP_AUTHORIZATION_FAILED", - Description: "Group authorization failed", Retriable: false, - }, - ErrorCodeUnsupportedVersion: { - Code: ErrorCodeUnsupportedVersion, Name: "UNSUPPORTED_VERSION", - Description: "Unsupported version", Retriable: false, - }, - ErrorCodeTopicAlreadyExists: { - Code: ErrorCodeTopicAlreadyExists, Name: "TOPIC_ALREADY_EXISTS", - Description: "Topic already exists", Retriable: false, - }, - ErrorCodeInvalidPartitions: { - Code: ErrorCodeInvalidPartitions, Name: "INVALID_PARTITIONS", - Description: "Invalid number of partitions", Retriable: false, - }, - ErrorCodeInvalidReplicationFactor: { - Code: ErrorCodeInvalidReplicationFactor, Name: "INVALID_REPLICATION_FACTOR", - Description: "Invalid replication factor", Retriable: false, - }, - ErrorCodeInvalidRecord: { - Code: ErrorCodeInvalidRecord, Name: "INVALID_RECORD", - Description: "Invalid record", Retriable: false, - }, - ErrorCodeConnectionRefused: { - Code: ErrorCodeConnectionRefused, Name: "CONNECTION_REFUSED", - Description: "Connection refused", Retriable: true, - }, - ErrorCodeConnectionTimeout: { - Code: ErrorCodeConnectionTimeout, Name: "CONNECTION_TIMEOUT", - Description: "Connection timeout", Retriable: true, - }, - ErrorCodeReadTimeout: { - Code: ErrorCodeReadTimeout, Name: "READ_TIMEOUT", - Description: "Read operation timeout", Retriable: true, - }, - ErrorCodeWriteTimeout: { - Code: ErrorCodeWriteTimeout, Name: "WRITE_TIMEOUT", - Description: "Write operation timeout", Retriable: true, - }, - ErrorCodeIllegalGeneration: { - Code: ErrorCodeIllegalGeneration, Name: "ILLEGAL_GENERATION", - Description: "Illegal generation", Retriable: false, - }, - ErrorCodeInconsistentGroupProtocol: { - Code: ErrorCodeInconsistentGroupProtocol, Name: "INCONSISTENT_GROUP_PROTOCOL", - Description: "Inconsistent group protocol", Retriable: false, - }, - ErrorCodeMemberIDRequired: { - Code: ErrorCodeMemberIDRequired, Name: "MEMBER_ID_REQUIRED", - Description: "Member ID required", Retriable: false, - }, - ErrorCodeFencedInstanceID: { - Code: ErrorCodeFencedInstanceID, Name: "FENCED_INSTANCE_ID", - Description: "Instance ID fenced", Retriable: false, - }, - ErrorCodeGroupMaxSizeReached: { - Code: ErrorCodeGroupMaxSizeReached, Name: "GROUP_MAX_SIZE_REACHED", - Description: "Group max size reached", Retriable: false, - }, - ErrorCodeUnstableOffsetCommit: { - Code: ErrorCodeUnstableOffsetCommit, Name: "UNSTABLE_OFFSET_COMMIT", - Description: "Offset commit during rebalance", Retriable: true, - }, -} - -// GetErrorInfo returns error information for the given error code -func GetErrorInfo(code int16) ErrorInfo { - if info, exists := KafkaErrors[code]; exists { - return info - } - return ErrorInfo{ - Code: code, Name: "UNKNOWN", Description: "Unknown error code", Retriable: false, - } -} - -// IsRetriableError returns true if the error is retriable -func IsRetriableError(code int16) bool { - return GetErrorInfo(code).Retriable -} - -// BuildErrorResponse builds a standard Kafka error response -func BuildErrorResponse(correlationID uint32, errorCode int16) []byte { - response := make([]byte, 0, 8) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(errorCode)) - response = append(response, errorCodeBytes...) - - return response -} - -// BuildErrorResponseWithMessage builds a Kafka error response with error message -func BuildErrorResponseWithMessage(correlationID uint32, errorCode int16, message string) []byte { - response := BuildErrorResponse(correlationID, errorCode) - - // Error message (2 bytes length + message) - if message == "" { - response = append(response, 0xFF, 0xFF) // Null string - } else { - messageLen := uint16(len(message)) - messageLenBytes := make([]byte, 2) - binary.BigEndian.PutUint16(messageLenBytes, messageLen) - response = append(response, messageLenBytes...) - response = append(response, []byte(message)...) - } - - return response -} - -// ClassifyNetworkError classifies network errors into appropriate Kafka error codes -func ClassifyNetworkError(err error) int16 { - if err == nil { - return ErrorCodeNone - } - - // Check for network errors - if netErr, ok := err.(net.Error); ok { - if netErr.Timeout() { - return ErrorCodeRequestTimedOut - } - return ErrorCodeNetworkException - } - - // Check for specific error types - switch err.Error() { - case "connection refused": - return ErrorCodeConnectionRefused - case "connection timeout": - return ErrorCodeConnectionTimeout - default: - return ErrorCodeUnknownServerError - } -} - -// TimeoutConfig holds timeout configuration for connections and operations -type TimeoutConfig struct { - ConnectionTimeout time.Duration // Timeout for establishing connections - ReadTimeout time.Duration // Timeout for read operations - WriteTimeout time.Duration // Timeout for write operations - RequestTimeout time.Duration // Overall request timeout -} - -// DefaultTimeoutConfig returns default timeout configuration -func DefaultTimeoutConfig() TimeoutConfig { - return TimeoutConfig{ - ConnectionTimeout: 30 * time.Second, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - RequestTimeout: 30 * time.Second, - } -} - -// HandleTimeoutError handles timeout errors and returns appropriate error code -func HandleTimeoutError(err error, operation string) int16 { - if err == nil { - return ErrorCodeNone - } - - // Handle context timeout errors - if err == context.DeadlineExceeded { - switch operation { - case "read": - return ErrorCodeReadTimeout - case "write": - return ErrorCodeWriteTimeout - case "connect": - return ErrorCodeConnectionTimeout - default: - return ErrorCodeRequestTimedOut - } - } - - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - switch operation { - case "read": - return ErrorCodeReadTimeout - case "write": - return ErrorCodeWriteTimeout - case "connect": - return ErrorCodeConnectionTimeout - default: - return ErrorCodeRequestTimedOut - } - } - - return ClassifyNetworkError(err) -} diff --git a/weed/mq/kafka/protocol/fetch.go b/weed/mq/kafka/protocol/fetch.go deleted file mode 100644 index 58a96f5d8..000000000 --- a/weed/mq/kafka/protocol/fetch.go +++ /dev/null @@ -1,1301 +0,0 @@ -package protocol - -import ( - "context" - "encoding/binary" - "fmt" - "hash/crc32" - "strings" - "time" - "unicode/utf8" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/compression" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/protobuf/proto" -) - -// partitionFetchResult holds the result of fetching from a single partition -type partitionFetchResult struct { - topicIndex int - partitionIndex int - recordBatch []byte - highWaterMark int64 - errorCode int16 - fetchDuration time.Duration -} - -func (h *Handler) handleFetch(ctx context.Context, correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse the Fetch request to get the requested topics and partitions - fetchRequest, err := h.parseFetchRequest(apiVersion, requestBody) - if err != nil { - return nil, fmt.Errorf("parse fetch request: %w", err) - } - - // Basic long-polling to avoid client busy-looping when there's no data. - var throttleTimeMs int32 = 0 - // Only long-poll when all referenced topics exist; unknown topics should not block - allTopicsExist := func() bool { - for _, topic := range fetchRequest.Topics { - if !h.seaweedMQHandler.TopicExists(topic.Name) { - return false - } - } - return true - } - hasDataAvailable := func() bool { - // Check if any requested partition has data available - // Compare fetch offset with high water mark - for _, topic := range fetchRequest.Topics { - if !h.seaweedMQHandler.TopicExists(topic.Name) { - continue - } - for _, partition := range topic.Partitions { - hwm, err := h.seaweedMQHandler.GetLatestOffset(topic.Name, partition.PartitionID) - if err != nil { - continue - } - // Normalize fetch offset - effectiveOffset := partition.FetchOffset - if effectiveOffset == -2 { // earliest - effectiveOffset = 0 - } else if effectiveOffset == -1 { // latest - effectiveOffset = hwm - } - // If fetch offset < hwm, data is available - if effectiveOffset < hwm { - return true - } - } - } - return false - } - // Long-poll when client requests it via MaxWaitTime and there's no data - // Even if MinBytes=0, we should honor MaxWaitTime to reduce polling overhead - maxWaitMs := fetchRequest.MaxWaitTime - - // Long-poll if: (1) client wants to wait (maxWaitMs > 0), (2) no data available, (3) topics exist - // NOTE: We long-poll even if MinBytes=0, since the client specified a wait time - hasData := hasDataAvailable() - topicsExist := allTopicsExist() - shouldLongPoll := maxWaitMs > 0 && !hasData && topicsExist - - if shouldLongPoll { - start := time.Now() - // Use the client's requested wait time (already capped at 1s) - maxPollTime := time.Duration(maxWaitMs) * time.Millisecond - deadline := start.Add(maxPollTime) - pollLoop: - for time.Now().Before(deadline) { - // Use context-aware sleep instead of blocking time.Sleep - select { - case <-ctx.Done(): - throttleTimeMs = int32(time.Since(start) / time.Millisecond) - break pollLoop - case <-time.After(10 * time.Millisecond): - // Continue with polling - } - if hasDataAvailable() { - // Data became available during polling - return immediately with NO throttle - // Throttle time should only be used for quota enforcement, not for long-poll timing - throttleTimeMs = 0 - break pollLoop - } - } - // If we got here without breaking early, we hit the timeout - // Long-poll timeout is NOT throttling - throttle time should only be used for quota/rate limiting - // Do NOT set throttle time based on long-poll duration - throttleTimeMs = 0 - } - - // Build the response - response := make([]byte, 0, 1024) - totalAppendedRecordBytes := 0 - - // NOTE: Correlation ID is NOT included in the response body - // The wire protocol layer (writeResponseWithTimeout) writes: [Size][CorrelationID][Body] - // Kafka clients read the correlation ID separately from the 8-byte header, then read Size-4 bytes of body - // If we include correlation ID here, clients will see it twice and fail with "4 extra bytes" errors - - // Fetch v1+ has throttle_time_ms at the beginning - if apiVersion >= 1 { - throttleBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throttleBytes, uint32(throttleTimeMs)) - response = append(response, throttleBytes...) - } - - // Fetch v7+ has error_code and session_id - if apiVersion >= 7 { - response = append(response, 0, 0) // error_code (2 bytes, 0 = no error) - response = append(response, 0, 0, 0, 0) // session_id (4 bytes, 0 = no session) - } - - // Check if this version uses flexible format (v12+) - isFlexible := IsFlexibleVersion(1, apiVersion) // API key 1 = Fetch - - // Topics count - write the actual number of topics in the request - // Kafka protocol: we MUST return all requested topics in the response (even with empty data) - topicsCount := len(fetchRequest.Topics) - if isFlexible { - // Flexible versions use compact array format (count + 1) - response = append(response, EncodeUvarint(uint32(topicsCount+1))...) - } else { - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, uint32(topicsCount)) - response = append(response, topicsCountBytes...) - } - - // ==================================================================== - // PERSISTENT PARTITION READERS - // Use per-connection persistent goroutines that maintain offset position - // and stream forward, eliminating repeated lookups and reducing broker CPU - // ==================================================================== - - // Get connection context to access persistent partition readers - connContext := h.getConnectionContextFromRequest(ctx) - if connContext == nil { - glog.Errorf("FETCH CORR=%d: Connection context not available - cannot use persistent readers", - correlationID) - return nil, fmt.Errorf("connection context not available") - } - - glog.V(4).Infof("[%s] FETCH CORR=%d: Processing %d topics with %d total partitions", - connContext.ConnectionID, correlationID, len(fetchRequest.Topics), - func() int { - count := 0 - for _, t := range fetchRequest.Topics { - count += len(t.Partitions) - } - return count - }()) - - // Collect results from persistent readers - // Dispatch all requests concurrently, then wait for all results in parallel - // to avoid sequential timeout accumulation - type pendingFetch struct { - topicName string - partitionID int32 - resultChan chan *partitionFetchResult - } - - pending := make([]pendingFetch, 0) - - // Phase 1: Dispatch all fetch requests to partition readers (non-blocking) - for _, topic := range fetchRequest.Topics { - isSchematizedTopic := false - if h.IsSchemaEnabled() { - isSchematizedTopic = h.isSchematizedTopic(topic.Name) - } - - for _, partition := range topic.Partitions { - key := TopicPartitionKey{Topic: topic.Name, Partition: partition.PartitionID} - - // All topics (including system topics) use persistent readers for in-memory access - // This enables instant notification and avoids ForceFlush dependencies - - // Get or create persistent reader for this partition - reader := h.getOrCreatePartitionReader(ctx, connContext, key, partition.FetchOffset) - if reader == nil { - // Failed to create reader - add empty pending - glog.Errorf("[%s] Failed to get/create partition reader for %s[%d]", - connContext.ConnectionID, topic.Name, partition.PartitionID) - nilChan := make(chan *partitionFetchResult, 1) - nilChan <- &partitionFetchResult{errorCode: 3} // UNKNOWN_TOPIC_OR_PARTITION - pending = append(pending, pendingFetch{ - topicName: topic.Name, - partitionID: partition.PartitionID, - resultChan: nilChan, - }) - continue - } - - // Signal reader to fetch (don't wait for result yet) - resultChan := make(chan *partitionFetchResult, 1) - fetchReq := &partitionFetchRequest{ - requestedOffset: partition.FetchOffset, - maxBytes: partition.MaxBytes, - maxWaitMs: maxWaitMs, // Pass MaxWaitTime from Kafka fetch request - resultChan: resultChan, - isSchematized: isSchematizedTopic, - apiVersion: apiVersion, - } - - // Try to send request (increased timeout for CI environments with slow disk I/O) - select { - case reader.fetchChan <- fetchReq: - // Request sent successfully, add to pending - pending = append(pending, pendingFetch{ - topicName: topic.Name, - partitionID: partition.PartitionID, - resultChan: resultChan, - }) - case <-time.After(200 * time.Millisecond): - // Channel full, return empty result - glog.Warningf("[%s] Reader channel full for %s[%d], returning empty", - connContext.ConnectionID, topic.Name, partition.PartitionID) - emptyChan := make(chan *partitionFetchResult, 1) - emptyChan <- &partitionFetchResult{} - pending = append(pending, pendingFetch{ - topicName: topic.Name, - partitionID: partition.PartitionID, - resultChan: emptyChan, - }) - } - } - } - - // Phase 2: Wait for all results with adequate timeout for CI environments - // We MUST return a result for every requested partition or Sarama will error - results := make([]*partitionFetchResult, len(pending)) - // Use 95% of client's MaxWaitTime to ensure we return BEFORE client timeout - // This maximizes data collection time while leaving a safety buffer for: - // - Response serialization, network transmission, client processing - // For 500ms client timeout: 475ms internal fetch, 25ms buffer - // For 100ms client timeout: 95ms internal fetch, 5ms buffer - effectiveDeadlineMs := time.Duration(maxWaitMs) * 95 / 100 - deadline := time.After(effectiveDeadlineMs * time.Millisecond) - if maxWaitMs < 20 { - // For very short timeouts (< 20ms), use full timeout to maximize data collection - deadline = time.After(time.Duration(maxWaitMs) * time.Millisecond) - } - - // Collect results one by one with shared deadline - for i, pf := range pending { - select { - case result := <-pf.resultChan: - results[i] = result - case <-deadline: - // Deadline expired, return empty for this and all remaining partitions - for j := i; j < len(pending); j++ { - results[j] = &partitionFetchResult{} - } - glog.V(3).Infof("[%s] Fetch deadline expired, returning empty for %d remaining partitions", - connContext.ConnectionID, len(pending)-i) - goto done - case <-ctx.Done(): - // Context cancelled, return empty for remaining - for j := i; j < len(pending); j++ { - results[j] = &partitionFetchResult{} - } - goto done - } - } -done: - - // ==================================================================== - // BUILD RESPONSE FROM FETCHED DATA - // Now assemble the response in the correct order using fetched results - // ==================================================================== - - // Verify we have results for all requested partitions - // Sarama requires a response block for EVERY requested partition to avoid ErrIncompleteResponse - expectedResultCount := 0 - for _, topic := range fetchRequest.Topics { - expectedResultCount += len(topic.Partitions) - } - if len(results) != expectedResultCount { - glog.Errorf("[%s] Result count mismatch: expected %d, got %d - this will cause ErrIncompleteResponse", - connContext.ConnectionID, expectedResultCount, len(results)) - // Pad with empty results if needed (safety net - shouldn't happen with fixed code) - for len(results) < expectedResultCount { - results = append(results, &partitionFetchResult{}) - } - } - - // Process each requested topic - resultIdx := 0 - for _, topic := range fetchRequest.Topics { - topicNameBytes := []byte(topic.Name) - - // Topic name length and name - if isFlexible { - // Flexible versions use compact string format (length + 1) - response = append(response, EncodeUvarint(uint32(len(topicNameBytes)+1))...) - } else { - response = append(response, byte(len(topicNameBytes)>>8), byte(len(topicNameBytes))) - } - response = append(response, topicNameBytes...) - - // Partitions count for this topic - partitionsCount := len(topic.Partitions) - if isFlexible { - // Flexible versions use compact array format (count + 1) - response = append(response, EncodeUvarint(uint32(partitionsCount+1))...) - } else { - partitionsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsCountBytes, uint32(partitionsCount)) - response = append(response, partitionsCountBytes...) - } - - // Process each requested partition (using pre-fetched results) - for _, partition := range topic.Partitions { - // Get the pre-fetched result for this partition - result := results[resultIdx] - resultIdx++ - - // Partition ID - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, uint32(partition.PartitionID)) - response = append(response, partitionIDBytes...) - - // Error code (2 bytes) - use the result's error code - response = append(response, byte(result.errorCode>>8), byte(result.errorCode)) - - // Use the pre-fetched high water mark from concurrent fetch - highWaterMark := result.highWaterMark - - // High water mark (8 bytes) - highWaterMarkBytes := make([]byte, 8) - binary.BigEndian.PutUint64(highWaterMarkBytes, uint64(highWaterMark)) - response = append(response, highWaterMarkBytes...) - - // Fetch v4+ has last_stable_offset and log_start_offset - if apiVersion >= 4 { - // Last stable offset (8 bytes) - same as high water mark for non-transactional - response = append(response, highWaterMarkBytes...) - // Log start offset (8 bytes) - 0 for simplicity - response = append(response, 0, 0, 0, 0, 0, 0, 0, 0) - - // Aborted transactions count (4 bytes) = 0 - response = append(response, 0, 0, 0, 0) - } - - // Use the pre-fetched record batch - recordBatch := result.recordBatch - - // Records size - flexible versions (v12+) use compact format: varint(size+1) - if isFlexible { - if len(recordBatch) == 0 { - response = append(response, 0) // null records = 0 in compact format - } else { - response = append(response, EncodeUvarint(uint32(len(recordBatch)+1))...) - } - } else { - // Non-flexible versions use int32(size) - recordsSizeBytes := make([]byte, 4) - binary.BigEndian.PutUint32(recordsSizeBytes, uint32(len(recordBatch))) - response = append(response, recordsSizeBytes...) - } - - // Records data - response = append(response, recordBatch...) - totalAppendedRecordBytes += len(recordBatch) - - // Tagged fields for flexible versions (v12+) after each partition - if isFlexible { - response = append(response, 0) // Empty tagged fields - } - } - - // Tagged fields for flexible versions (v12+) after each topic - if isFlexible { - response = append(response, 0) // Empty tagged fields - } - } - - // Tagged fields for flexible versions (v12+) at the end of response - if isFlexible { - response = append(response, 0) // Empty tagged fields - } - - // Verify topics count hasn't been corrupted - if !isFlexible { - // Topics count position depends on API version: - // v0: byte 0 (no throttle_time_ms, no error_code, no session_id) - // v1-v6: byte 4 (after throttle_time_ms) - // v7+: byte 10 (after throttle_time_ms, error_code, session_id) - var topicsCountPos int - if apiVersion == 0 { - topicsCountPos = 0 - } else if apiVersion < 7 { - topicsCountPos = 4 - } else { - topicsCountPos = 10 - } - - if len(response) >= topicsCountPos+4 { - actualTopicsCount := binary.BigEndian.Uint32(response[topicsCountPos : topicsCountPos+4]) - if actualTopicsCount != uint32(topicsCount) { - glog.Errorf("FETCH CORR=%d v%d: Topics count CORRUPTED! Expected %d, found %d at response[%d:%d]=%02x %02x %02x %02x", - correlationID, apiVersion, topicsCount, actualTopicsCount, topicsCountPos, topicsCountPos+4, - response[topicsCountPos], response[topicsCountPos+1], response[topicsCountPos+2], response[topicsCountPos+3]) - } - } - } - - return response, nil -} - -// FetchRequest represents a parsed Kafka Fetch request -type FetchRequest struct { - ReplicaID int32 - MaxWaitTime int32 - MinBytes int32 - MaxBytes int32 - IsolationLevel int8 - Topics []FetchTopic -} - -type FetchTopic struct { - Name string - Partitions []FetchPartition -} - -type FetchPartition struct { - PartitionID int32 - FetchOffset int64 - LogStartOffset int64 - MaxBytes int32 -} - -// parseFetchRequest parses a Kafka Fetch request -func (h *Handler) parseFetchRequest(apiVersion uint16, requestBody []byte) (*FetchRequest, error) { - if len(requestBody) < 12 { - return nil, fmt.Errorf("fetch request too short: %d bytes", len(requestBody)) - } - - offset := 0 - request := &FetchRequest{} - - // Check if this version uses flexible format (v12+) - isFlexible := IsFlexibleVersion(1, apiVersion) // API key 1 = Fetch - - // NOTE: client_id is already handled by HandleConn and stripped from requestBody - // Request body starts directly with fetch-specific fields - - // Replica ID (4 bytes) - always fixed - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for replica_id") - } - request.ReplicaID = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Max wait time (4 bytes) - always fixed - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for max_wait_time") - } - request.MaxWaitTime = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Min bytes (4 bytes) - always fixed - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for min_bytes") - } - request.MinBytes = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Max bytes (4 bytes) - only in v3+, always fixed - if apiVersion >= 3 { - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for max_bytes") - } - request.MaxBytes = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - } - - // Isolation level (1 byte) - only in v4+, always fixed - if apiVersion >= 4 { - if offset+1 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for isolation_level") - } - request.IsolationLevel = int8(requestBody[offset]) - offset += 1 - } - - // Session ID (4 bytes) and Session Epoch (4 bytes) - only in v7+, always fixed - if apiVersion >= 7 { - if offset+8 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for session_id and epoch") - } - offset += 8 // Skip session_id and session_epoch - } - - // Topics count - flexible uses compact array, non-flexible uses INT32 - var topicsCount int - if isFlexible { - // Compact array: length+1 encoded as varint - length, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode topics compact array: %w", err) - } - topicsCount = int(length) - offset += consumed - } else { - // Regular array: INT32 length - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for topics count") - } - topicsCount = int(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - } - - // Parse topics - request.Topics = make([]FetchTopic, topicsCount) - for i := 0; i < topicsCount; i++ { - // Topic name - flexible uses compact string, non-flexible uses STRING (INT16 length) - var topicName string - if isFlexible { - // Compact string: length+1 encoded as varint - name, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode topic name compact string: %w", err) - } - topicName = name - offset += consumed - } else { - // Regular string: INT16 length + bytes - if offset+2 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for topic name length") - } - topicNameLength := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - - if offset+topicNameLength > len(requestBody) { - return nil, fmt.Errorf("insufficient data for topic name") - } - topicName = string(requestBody[offset : offset+topicNameLength]) - offset += topicNameLength - } - request.Topics[i].Name = topicName - - // Partitions count - flexible uses compact array, non-flexible uses INT32 - var partitionsCount int - if isFlexible { - // Compact array: length+1 encoded as varint - length, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode partitions compact array: %w", err) - } - partitionsCount = int(length) - offset += consumed - } else { - // Regular array: INT32 length - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for partitions count") - } - partitionsCount = int(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - } - - // Parse partitions - request.Topics[i].Partitions = make([]FetchPartition, partitionsCount) - for j := 0; j < partitionsCount; j++ { - // Partition ID (4 bytes) - always fixed - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for partition ID") - } - request.Topics[i].Partitions[j].PartitionID = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Current leader epoch (4 bytes) - only in v9+, always fixed - if apiVersion >= 9 { - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for current leader epoch") - } - offset += 4 // Skip current leader epoch - } - - // Fetch offset (8 bytes) - always fixed - if offset+8 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for fetch offset") - } - request.Topics[i].Partitions[j].FetchOffset = int64(binary.BigEndian.Uint64(requestBody[offset : offset+8])) - offset += 8 - - // Log start offset (8 bytes) - only in v5+, always fixed - if apiVersion >= 5 { - if offset+8 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for log start offset") - } - request.Topics[i].Partitions[j].LogStartOffset = int64(binary.BigEndian.Uint64(requestBody[offset : offset+8])) - offset += 8 - } - - // Partition max bytes (4 bytes) - always fixed - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for partition max bytes") - } - request.Topics[i].Partitions[j].MaxBytes = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Tagged fields for partition (only in flexible versions v12+) - if isFlexible { - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode partition tagged fields: %w", err) - } - offset += consumed - } - } - - // Tagged fields for topic (only in flexible versions v12+) - if isFlexible { - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode topic tagged fields: %w", err) - } - offset += consumed - } - } - - // Forgotten topics data (only in v7+) - if apiVersion >= 7 { - // Skip forgotten topics array - we don't use incremental fetch yet - var forgottenTopicsCount int - if isFlexible { - length, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode forgotten topics compact array: %w", err) - } - forgottenTopicsCount = int(length) - offset += consumed - } else { - if offset+4 > len(requestBody) { - // End of request, no forgotten topics - return request, nil - } - forgottenTopicsCount = int(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - } - - // Skip forgotten topics if present - for i := 0; i < forgottenTopicsCount && offset < len(requestBody); i++ { - // Skip topic name - if isFlexible { - _, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - break - } - offset += consumed - } else { - if offset+2 > len(requestBody) { - break - } - nameLen := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 + nameLen - } - - // Skip partitions array - if isFlexible { - length, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - break - } - offset += consumed - // Skip partition IDs (4 bytes each) - offset += int(length) * 4 - } else { - if offset+4 > len(requestBody) { - break - } - partCount := int(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 + partCount*4 - } - - // Skip tagged fields if flexible - if isFlexible { - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - break - } - offset += consumed - } - } - } - - // Rack ID (only in v11+) - optional string - if apiVersion >= 11 && offset < len(requestBody) { - if isFlexible { - _, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err == nil { - offset += consumed - } - } else { - if offset+2 <= len(requestBody) { - rackIDLen := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - if rackIDLen >= 0 && offset+2+rackIDLen <= len(requestBody) { - offset += 2 + rackIDLen - } - } - } - } - - // Top-level tagged fields (only in flexible versions v12+) - if isFlexible && offset < len(requestBody) { - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - // Don't fail on trailing tagged fields parsing - } else { - offset += consumed - } - } - - return request, nil -} - -// constructRecordBatchFromSMQ creates a Kafka record batch from SeaweedMQ records -func (h *Handler) constructRecordBatchFromSMQ(topicName string, fetchOffset int64, smqRecords []integration.SMQRecord) []byte { - if len(smqRecords) == 0 { - return []byte{} - } - - // Create record batch using the SMQ records - batch := make([]byte, 0, 512) - - // Record batch header - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(fetchOffset)) - batch = append(batch, baseOffsetBytes...) // base offset (8 bytes) - - // Calculate batch length (will be filled after we know the size) - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) // batch length placeholder (4 bytes) - - // Partition leader epoch (4 bytes) - use 0 (real Kafka uses 0, not -1) - batch = append(batch, 0x00, 0x00, 0x00, 0x00) - - // Magic byte (1 byte) - v2 format - batch = append(batch, 2) - - // CRC placeholder (4 bytes) - will be calculated later - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - no compression, etc. - batch = append(batch, 0, 0) - - // Last offset delta (4 bytes) - lastOffsetDelta := int32(len(smqRecords) - 1) - lastOffsetDeltaBytes := make([]byte, 4) - binary.BigEndian.PutUint32(lastOffsetDeltaBytes, uint32(lastOffsetDelta)) - batch = append(batch, lastOffsetDeltaBytes...) - - // Base timestamp (8 bytes) - convert from nanoseconds to milliseconds for Kafka compatibility - baseTimestamp := smqRecords[0].GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - baseTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseTimestampBytes, uint64(baseTimestamp)) - batch = append(batch, baseTimestampBytes...) - - // Max timestamp (8 bytes) - convert from nanoseconds to milliseconds for Kafka compatibility - maxTimestamp := baseTimestamp - if len(smqRecords) > 1 { - maxTimestamp = smqRecords[len(smqRecords)-1].GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - } - maxTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxTimestampBytes, uint64(maxTimestamp)) - batch = append(batch, maxTimestampBytes...) - - // Producer ID (8 bytes) - use -1 for no producer ID - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer epoch (2 bytes) - use -1 for no producer epoch - batch = append(batch, 0xFF, 0xFF) - - // Base sequence (4 bytes) - use -1 for no base sequence - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Records count (4 bytes) - recordCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(recordCountBytes, uint32(len(smqRecords))) - batch = append(batch, recordCountBytes...) - - // Add individual records from SMQ records - for i, smqRecord := range smqRecords { - // Build individual record - recordBytes := make([]byte, 0, 128) - - // Record attributes (1 byte) - recordBytes = append(recordBytes, 0) - - // Timestamp delta (varint) - calculate from base timestamp (both in milliseconds) - recordTimestampMs := smqRecord.GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - timestampDelta := recordTimestampMs - baseTimestamp // Both in milliseconds now - recordBytes = append(recordBytes, encodeVarint(timestampDelta)...) - - // Offset delta (varint) - offsetDelta := int64(i) - recordBytes = append(recordBytes, encodeVarint(offsetDelta)...) - - // Key length and key (varint + data) - decode RecordValue to get original Kafka message - key := h.decodeRecordValueToKafkaMessage(topicName, smqRecord.GetKey()) - if key == nil { - recordBytes = append(recordBytes, encodeVarint(-1)...) // null key - } else { - recordBytes = append(recordBytes, encodeVarint(int64(len(key)))...) - recordBytes = append(recordBytes, key...) - } - - // Value length and value (varint + data) - decode RecordValue to get original Kafka message - value := h.decodeRecordValueToKafkaMessage(topicName, smqRecord.GetValue()) - - if value == nil { - recordBytes = append(recordBytes, encodeVarint(-1)...) // null value - } else { - recordBytes = append(recordBytes, encodeVarint(int64(len(value)))...) - recordBytes = append(recordBytes, value...) - } - - // Headers count (varint) - 0 headers - recordBytes = append(recordBytes, encodeVarint(0)...) - - // Prepend record length (varint) - recordLength := int64(len(recordBytes)) - batch = append(batch, encodeVarint(recordLength)...) - batch = append(batch, recordBytes...) - } - - // Fill in the batch length - batchLength := uint32(len(batch) - batchLengthPos - 4) - binary.BigEndian.PutUint32(batch[batchLengthPos:batchLengthPos+4], batchLength) - - // Calculate CRC32 for the batch - // Kafka CRC calculation covers: partition leader epoch + magic + attributes + ... (everything after batch length) - // Skip: BaseOffset(8) + BatchLength(4) = 12 bytes - crcData := batch[crcPos+4:] // CRC covers ONLY from attributes (byte 21) onwards // Skip CRC field itself, include rest - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - return batch -} - -// encodeVarint encodes a signed integer using Kafka's varint encoding -func encodeVarint(value int64) []byte { - // Kafka uses zigzag encoding for signed integers - zigzag := uint64((value << 1) ^ (value >> 63)) - - var buf []byte - for zigzag >= 0x80 { - buf = append(buf, byte(zigzag)|0x80) - zigzag >>= 7 - } - buf = append(buf, byte(zigzag)) - return buf -} - -// SchematizedRecord holds both key and value for schematized messages -type SchematizedRecord struct { - Key []byte - Value []byte -} - -// createEmptyRecordBatch creates an empty Kafka record batch using the new parser -func (h *Handler) createEmptyRecordBatch(baseOffset int64) []byte { - // Use the new record batch creation function with no compression - emptyRecords := []byte{} - batch, err := CreateRecordBatch(baseOffset, emptyRecords, compression.None) - if err != nil { - // Fallback to manual creation if there's an error - return h.createEmptyRecordBatchManual(baseOffset) - } - return batch -} - -// createEmptyRecordBatchManual creates an empty Kafka record batch manually (fallback) -func (h *Handler) createEmptyRecordBatchManual(baseOffset int64) []byte { - // Create a minimal empty record batch - batch := make([]byte, 0, 61) // Standard record batch header size - - // Base offset (8 bytes) - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) - - // Batch length (4 bytes) - will be filled at the end - lengthPlaceholder := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Partition leader epoch (4 bytes) - 0 for simplicity - batch = append(batch, 0, 0, 0, 0) - - // Magic byte (1 byte) - version 2 - batch = append(batch, 2) - - // CRC32 (4 bytes) - placeholder, should be calculated - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - no compression, no transactional - batch = append(batch, 0, 0) - - // Last offset delta (4 bytes) - 0 for empty batch - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // First timestamp (8 bytes) - current time - timestamp := time.Now().UnixMilli() - timestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(timestampBytes, uint64(timestamp)) - batch = append(batch, timestampBytes...) - - // Max timestamp (8 bytes) - same as first for empty batch - batch = append(batch, timestampBytes...) - - // Producer ID (8 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer Epoch (2 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF) - - // Base Sequence (4 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Record count (4 bytes) - 0 for empty batch - batch = append(batch, 0, 0, 0, 0) - - // Fill in the batch length - batchLength := len(batch) - 12 // Exclude base offset and length field itself - binary.BigEndian.PutUint32(batch[lengthPlaceholder:lengthPlaceholder+4], uint32(batchLength)) - - return batch -} - -// isSchematizedTopic checks if a topic uses schema management -func (h *Handler) isSchematizedTopic(topicName string) bool { - // System topics (_schemas, __consumer_offsets, etc.) should NEVER use schema encoding - // They have their own internal formats and should be passed through as-is - if h.isSystemTopic(topicName) { - return false - } - - if !h.IsSchemaEnabled() { - return false - } - - // Check multiple indicators for schematized topics: - - // Check Confluent Schema Registry naming conventions - return h.matchesSchemaRegistryConvention(topicName) -} - -// matchesSchemaRegistryConvention checks Confluent Schema Registry naming patterns -func (h *Handler) matchesSchemaRegistryConvention(topicName string) bool { - // Common Schema Registry subject patterns: - // - topicName-value (for message values) - // - topicName-key (for message keys) - // - topicName (direct topic name as subject) - - if len(topicName) > 6 && topicName[len(topicName)-6:] == "-value" { - return true - } - if len(topicName) > 4 && topicName[len(topicName)-4:] == "-key" { - return true - } - - // Check if the topic has registered schema subjects in Schema Registry - // Use standard Kafka naming convention: -value and -key - if h.schemaManager != nil { - // Check with -value suffix (standard pattern for value schemas) - latestSchemaValue, err := h.schemaManager.GetLatestSchema(topicName + "-value") - if err == nil { - // Since we retrieved schema from registry, ensure topic config is updated - h.ensureTopicSchemaFromLatestSchema(topicName, latestSchemaValue) - return true - } - - // Check with -key suffix (for key schemas) - latestSchemaKey, err := h.schemaManager.GetLatestSchema(topicName + "-key") - if err == nil { - // Since we retrieved key schema from registry, ensure topic config is updated - h.ensureTopicKeySchemaFromLatestSchema(topicName, latestSchemaKey) - return true - } - } - - return false -} - -// getSchemaMetadataForTopic retrieves schema metadata for a topic -func (h *Handler) getSchemaMetadataForTopic(topicName string) (map[string]string, error) { - if !h.IsSchemaEnabled() { - return nil, fmt.Errorf("schema management not enabled") - } - - // Try multiple approaches to get schema metadata from Schema Registry - - // 1. Try to get schema from registry using topic name as subject - metadata, err := h.getSchemaMetadataFromRegistry(topicName) - if err == nil { - return metadata, nil - } - - // 2. Try with -value suffix (common pattern) - metadata, err = h.getSchemaMetadataFromRegistry(topicName + "-value") - if err == nil { - return metadata, nil - } - - // 3. Try with -key suffix - metadata, err = h.getSchemaMetadataFromRegistry(topicName + "-key") - if err == nil { - return metadata, nil - } - - return nil, fmt.Errorf("no schema found in registry for topic %s (tried %s, %s-value, %s-key)", topicName, topicName, topicName, topicName) -} - -// getSchemaMetadataFromRegistry retrieves schema metadata from Schema Registry -func (h *Handler) getSchemaMetadataFromRegistry(subject string) (map[string]string, error) { - if h.schemaManager == nil { - return nil, fmt.Errorf("schema manager not available") - } - - // Get latest schema for the subject - cachedSchema, err := h.schemaManager.GetLatestSchema(subject) - if err != nil { - return nil, fmt.Errorf("failed to get schema for subject %s: %w", subject, err) - } - - // Since we retrieved schema from registry, ensure topic config is updated - // Extract topic name from subject (remove -key or -value suffix if present) - topicName := h.extractTopicFromSubject(subject) - if topicName != "" { - h.ensureTopicSchemaFromLatestSchema(topicName, cachedSchema) - } - - // Build metadata map - // Detect format from schema content - // Simple format detection - assume Avro for now - format := schema.FormatAvro - - metadata := map[string]string{ - "schema_id": fmt.Sprintf("%d", cachedSchema.LatestID), - "schema_format": format.String(), - "schema_subject": subject, - "schema_version": fmt.Sprintf("%d", cachedSchema.Version), - "schema_content": cachedSchema.Schema, - } - - return metadata, nil -} - -// ensureTopicSchemaFromLatestSchema ensures topic configuration is updated when latest schema is retrieved -func (h *Handler) ensureTopicSchemaFromLatestSchema(topicName string, latestSchema *schema.CachedSubject) { - if latestSchema == nil { - return - } - - // Convert CachedSubject to CachedSchema format for reuse - // Note: CachedSubject has different field structure than expected - cachedSchema := &schema.CachedSchema{ - ID: latestSchema.LatestID, - Schema: latestSchema.Schema, - Subject: latestSchema.Subject, - Version: latestSchema.Version, - Format: schema.FormatAvro, // Default to Avro, could be improved with format detection - CachedAt: latestSchema.CachedAt, - } - - // Use existing function to handle the schema update - h.ensureTopicSchemaFromRegistryCache(topicName, cachedSchema) -} - -// extractTopicFromSubject extracts the topic name from a schema registry subject -func (h *Handler) extractTopicFromSubject(subject string) string { - // Remove common suffixes used in schema registry - if strings.HasSuffix(subject, "-value") { - return strings.TrimSuffix(subject, "-value") - } - if strings.HasSuffix(subject, "-key") { - return strings.TrimSuffix(subject, "-key") - } - // If no suffix, assume subject name is the topic name - return subject -} - -// ensureTopicKeySchemaFromLatestSchema ensures topic configuration is updated when key schema is retrieved -func (h *Handler) ensureTopicKeySchemaFromLatestSchema(topicName string, latestSchema *schema.CachedSubject) { - if latestSchema == nil { - return - } - - // Convert CachedSubject to CachedSchema format for reuse - // Note: CachedSubject has different field structure than expected - cachedSchema := &schema.CachedSchema{ - ID: latestSchema.LatestID, - Schema: latestSchema.Schema, - Subject: latestSchema.Subject, - Version: latestSchema.Version, - Format: schema.FormatAvro, // Default to Avro, could be improved with format detection - CachedAt: latestSchema.CachedAt, - } - - // Use existing function to handle the key schema update - h.ensureTopicKeySchemaFromRegistryCache(topicName, cachedSchema) -} - -// decodeRecordValueToKafkaMessage decodes a RecordValue back to the original Kafka message bytes -func (h *Handler) decodeRecordValueToKafkaMessage(topicName string, recordValueBytes []byte) []byte { - if recordValueBytes == nil { - return nil - } - - // For system topics like _schemas, _consumer_offsets, etc., - // return the raw bytes as-is. These topics store Kafka's internal format (Avro, etc.) - // and should NOT be processed as RecordValue protobuf messages. - if strings.HasPrefix(topicName, "_") { - return recordValueBytes - } - - // CRITICAL: If schema management is not enabled, we should NEVER try to parse as RecordValue - // All messages are stored as raw bytes when schema management is disabled - // Attempting to parse them as RecordValue will cause corruption due to protobuf's lenient parsing - if !h.IsSchemaEnabled() { - return recordValueBytes - } - - // Try to unmarshal as RecordValue - recordValue := &schema_pb.RecordValue{} - if err := proto.Unmarshal(recordValueBytes, recordValue); err != nil { - // Not a RecordValue format - this is normal for Avro/JSON/raw Kafka messages - // Return raw bytes as-is (Kafka consumers expect this) - return recordValueBytes - } - - // Validate that the unmarshaled RecordValue is actually a valid RecordValue - // Protobuf unmarshal is lenient and can succeed with garbage data for random bytes - // We need to check if this looks like a real RecordValue or just random bytes - if !h.isValidRecordValue(recordValue, recordValueBytes) { - // Not a valid RecordValue - return raw bytes as-is - return recordValueBytes - } - - // If schema management is enabled, re-encode the RecordValue to Confluent format - if h.IsSchemaEnabled() { - if encodedMsg, err := h.encodeRecordValueToConfluentFormat(topicName, recordValue); err == nil { - return encodedMsg - } else { - } - } - - // Fallback: convert RecordValue to JSON - return h.recordValueToJSON(recordValue) -} - -// isValidRecordValue checks if a RecordValue looks like a real RecordValue or garbage from random bytes -// This performs a roundtrip test: marshal the RecordValue and check if it produces similar output -func (h *Handler) isValidRecordValue(recordValue *schema_pb.RecordValue, originalBytes []byte) bool { - // Empty or nil Fields means not a valid RecordValue - if recordValue == nil || recordValue.Fields == nil || len(recordValue.Fields) == 0 { - return false - } - - // Check if field names are valid UTF-8 strings (not binary garbage) - // Real RecordValue messages have proper field names like "name", "age", etc. - // Random bytes parsed as protobuf often create non-UTF8 or very short field names - for fieldName, fieldValue := range recordValue.Fields { - // Field name should be valid UTF-8 - if !utf8.ValidString(fieldName) { - return false - } - - // Field name should have reasonable length (at least 1 char, at most 1000) - if len(fieldName) == 0 || len(fieldName) > 1000 { - return false - } - - // Field value should not be nil - if fieldValue == nil || fieldValue.Kind == nil { - return false - } - } - - // Roundtrip check: If this is a real RecordValue, marshaling it back should produce - // similar-sized output. Random bytes that accidentally parse as protobuf will typically - // produce very different output when marshaled back. - remarshaled, err := proto.Marshal(recordValue) - if err != nil { - return false - } - - // Check if the sizes are reasonably similar (within 50% tolerance) - // Real RecordValue will have similar size, random bytes will be very different - originalSize := len(originalBytes) - remarshaledSize := len(remarshaled) - if originalSize == 0 { - return false - } - - // Calculate size ratio - should be close to 1.0 for real RecordValue - ratio := float64(remarshaledSize) / float64(originalSize) - if ratio < 0.5 || ratio > 2.0 { - // Size differs too much - this is likely random bytes parsed as protobuf - return false - } - - return true -} - -// encodeRecordValueToConfluentFormat re-encodes a RecordValue back to Confluent format -func (h *Handler) encodeRecordValueToConfluentFormat(topicName string, recordValue *schema_pb.RecordValue) ([]byte, error) { - if recordValue == nil { - return nil, fmt.Errorf("RecordValue is nil") - } - - // Get schema configuration from topic config - schemaConfig, err := h.getTopicSchemaConfig(topicName) - if err != nil { - return nil, fmt.Errorf("failed to get topic schema config: %w", err) - } - - // Use schema manager to encode RecordValue back to original format - encodedBytes, err := h.schemaManager.EncodeMessage(recordValue, schemaConfig.ValueSchemaID, schemaConfig.ValueSchemaFormat) - if err != nil { - return nil, fmt.Errorf("failed to encode RecordValue: %w", err) - } - - return encodedBytes, nil -} - -// getTopicSchemaConfig retrieves schema configuration for a topic -func (h *Handler) getTopicSchemaConfig(topicName string) (*TopicSchemaConfig, error) { - h.topicSchemaConfigMu.RLock() - defer h.topicSchemaConfigMu.RUnlock() - - if h.topicSchemaConfigs == nil { - return nil, fmt.Errorf("no schema configuration available for topic: %s", topicName) - } - - config, exists := h.topicSchemaConfigs[topicName] - if !exists { - return nil, fmt.Errorf("no schema configuration found for topic: %s", topicName) - } - - return config, nil -} - -// recordValueToJSON converts a RecordValue to JSON bytes (fallback) -func (h *Handler) recordValueToJSON(recordValue *schema_pb.RecordValue) []byte { - if recordValue == nil || recordValue.Fields == nil { - return []byte("{}") - } - - // Simple JSON conversion - in a real implementation, this would be more sophisticated - jsonStr := "{" - first := true - for fieldName, fieldValue := range recordValue.Fields { - if !first { - jsonStr += "," - } - first = false - - jsonStr += fmt.Sprintf(`"%s":`, fieldName) - - switch v := fieldValue.Kind.(type) { - case *schema_pb.Value_StringValue: - jsonStr += fmt.Sprintf(`"%s"`, v.StringValue) - case *schema_pb.Value_BytesValue: - jsonStr += fmt.Sprintf(`"%s"`, string(v.BytesValue)) - case *schema_pb.Value_Int32Value: - jsonStr += fmt.Sprintf(`%d`, v.Int32Value) - case *schema_pb.Value_Int64Value: - jsonStr += fmt.Sprintf(`%d`, v.Int64Value) - case *schema_pb.Value_BoolValue: - jsonStr += fmt.Sprintf(`%t`, v.BoolValue) - default: - jsonStr += `null` - } - } - jsonStr += "}" - - return []byte(jsonStr) -} diff --git a/weed/mq/kafka/protocol/fetch_multibatch.go b/weed/mq/kafka/protocol/fetch_multibatch.go deleted file mode 100644 index 192872850..000000000 --- a/weed/mq/kafka/protocol/fetch_multibatch.go +++ /dev/null @@ -1,624 +0,0 @@ -package protocol - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "hash/crc32" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/compression" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" -) - -// MultiBatchFetcher handles fetching multiple record batches with size limits -type MultiBatchFetcher struct { - handler *Handler -} - -// NewMultiBatchFetcher creates a new multi-batch fetcher -func NewMultiBatchFetcher(handler *Handler) *MultiBatchFetcher { - return &MultiBatchFetcher{handler: handler} -} - -// FetchResult represents the result of a multi-batch fetch operation -type FetchResult struct { - RecordBatches []byte // Concatenated record batches - NextOffset int64 // Next offset to fetch from - TotalSize int32 // Total size of all batches - BatchCount int // Number of batches included -} - -// FetchMultipleBatches fetches multiple record batches up to maxBytes limit -// ctx controls the fetch timeout (should match Kafka fetch request's MaxWaitTime) -func (f *MultiBatchFetcher) FetchMultipleBatches(ctx context.Context, topicName string, partitionID int32, startOffset, highWaterMark int64, maxBytes int32) (*FetchResult, error) { - - if startOffset >= highWaterMark { - return &FetchResult{ - RecordBatches: []byte{}, - NextOffset: startOffset, - TotalSize: 0, - BatchCount: 0, - }, nil - } - - // Minimum size for basic response headers and one empty batch - minResponseSize := int32(200) - if maxBytes < minResponseSize { - maxBytes = minResponseSize - } - - var combinedBatches []byte - currentOffset := startOffset - totalSize := int32(0) - batchCount := 0 - - // Estimate records per batch based on maxBytes available - // Assume average message size + batch overhead - // Client requested maxBytes, we should use most of it - // Start with larger batches to maximize throughput - estimatedMsgSize := int32(1024) // Typical message size with overhead - recordsPerBatch := (maxBytes - 200) / estimatedMsgSize // Use available space efficiently - if recordsPerBatch < 100 { - recordsPerBatch = 100 // Minimum 100 records per batch - } - if recordsPerBatch > 10000 { - recordsPerBatch = 10000 // Cap at 10k records per batch to avoid huge memory allocations - } - maxBatchesPerFetch := int((maxBytes - 200) / (estimatedMsgSize * 10)) // Reasonable limit - if maxBatchesPerFetch < 5 { - maxBatchesPerFetch = 5 // At least 5 batches - } - if maxBatchesPerFetch > 100 { - maxBatchesPerFetch = 100 // At most 100 batches - } - - for batchCount < maxBatchesPerFetch && currentOffset < highWaterMark { - - // Calculate remaining space - remainingBytes := maxBytes - totalSize - if remainingBytes < 100 { // Need at least 100 bytes for a minimal batch - break - } - - // Adapt records per batch based on remaining space - // If we have less space remaining, fetch fewer records to avoid going over - currentBatchSize := recordsPerBatch - if remainingBytes < recordsPerBatch*estimatedMsgSize { - currentBatchSize = remainingBytes / estimatedMsgSize - if currentBatchSize < 1 { - currentBatchSize = 1 - } - } - - // Calculate how many records to fetch for this batch - recordsAvailable := highWaterMark - currentOffset - if recordsAvailable <= 0 { - break - } - - recordsToFetch := currentBatchSize - if int64(recordsToFetch) > recordsAvailable { - recordsToFetch = int32(recordsAvailable) - } - - // Check if handler is nil - if f.handler == nil { - break - } - if f.handler.seaweedMQHandler == nil { - break - } - - // Fetch records for this batch - // Pass context to respect Kafka fetch request's MaxWaitTime - smqRecords, err := f.handler.seaweedMQHandler.GetStoredRecords(ctx, topicName, partitionID, currentOffset, int(recordsToFetch)) - - if err != nil || len(smqRecords) == 0 { - break - } - - // Note: we construct the batch and check actual size after construction - - // Construct record batch - batch := f.constructSingleRecordBatch(topicName, currentOffset, smqRecords) - batchSize := int32(len(batch)) - - // Double-check actual size doesn't exceed maxBytes - if totalSize+batchSize > maxBytes && batchCount > 0 { - break - } - - // Add this batch to combined result - combinedBatches = append(combinedBatches, batch...) - totalSize += batchSize - currentOffset += int64(len(smqRecords)) - batchCount++ - - // If this is a small batch, we might be at the end - if len(smqRecords) < int(recordsPerBatch) { - break - } - } - - result := &FetchResult{ - RecordBatches: combinedBatches, - NextOffset: currentOffset, - TotalSize: totalSize, - BatchCount: batchCount, - } - - return result, nil -} - -// constructSingleRecordBatch creates a single record batch from SMQ records -func (f *MultiBatchFetcher) constructSingleRecordBatch(topicName string, baseOffset int64, smqRecords []integration.SMQRecord) []byte { - if len(smqRecords) == 0 { - return f.constructEmptyRecordBatch(baseOffset) - } - - // Create record batch using the SMQ records - batch := make([]byte, 0, 512) - - // Record batch header - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) // base offset (8 bytes) - - // Calculate batch length (will be filled after we know the size) - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) // batch length placeholder (4 bytes) - - // Partition leader epoch (4 bytes) - use 0 (real Kafka uses 0, not -1) - batch = append(batch, 0x00, 0x00, 0x00, 0x00) - - // Magic byte (1 byte) - v2 format - batch = append(batch, 2) - - // CRC placeholder (4 bytes) - will be calculated later - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - no compression, etc. - batch = append(batch, 0, 0) - - // Last offset delta (4 bytes) - lastOffsetDelta := int32(len(smqRecords) - 1) - lastOffsetDeltaBytes := make([]byte, 4) - binary.BigEndian.PutUint32(lastOffsetDeltaBytes, uint32(lastOffsetDelta)) - batch = append(batch, lastOffsetDeltaBytes...) - - // Base timestamp (8 bytes) - convert from nanoseconds to milliseconds for Kafka compatibility - baseTimestamp := smqRecords[0].GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - baseTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseTimestampBytes, uint64(baseTimestamp)) - batch = append(batch, baseTimestampBytes...) - - // Max timestamp (8 bytes) - convert from nanoseconds to milliseconds for Kafka compatibility - maxTimestamp := baseTimestamp - if len(smqRecords) > 1 { - maxTimestamp = smqRecords[len(smqRecords)-1].GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - } - maxTimestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxTimestampBytes, uint64(maxTimestamp)) - batch = append(batch, maxTimestampBytes...) - - // Producer ID (8 bytes) - use -1 for no producer ID - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer epoch (2 bytes) - use -1 for no producer epoch - batch = append(batch, 0xFF, 0xFF) - - // Base sequence (4 bytes) - use -1 for no base sequence - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Records count (4 bytes) - recordCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(recordCountBytes, uint32(len(smqRecords))) - batch = append(batch, recordCountBytes...) - - // Add individual records from SMQ records - for i, smqRecord := range smqRecords { - // Build individual record - recordBytes := make([]byte, 0, 128) - - // Record attributes (1 byte) - recordBytes = append(recordBytes, 0) - - // Timestamp delta (varint) - calculate from base timestamp (both in milliseconds) - recordTimestampMs := smqRecord.GetTimestamp() / 1000000 // Convert nanoseconds to milliseconds - timestampDelta := recordTimestampMs - baseTimestamp // Both in milliseconds now - recordBytes = append(recordBytes, encodeVarint(timestampDelta)...) - - // Offset delta (varint) - offsetDelta := int64(i) - recordBytes = append(recordBytes, encodeVarint(offsetDelta)...) - - // Key length and key (varint + data) - decode RecordValue to get original Kafka message - key := f.handler.decodeRecordValueToKafkaMessage(topicName, smqRecord.GetKey()) - if key == nil { - recordBytes = append(recordBytes, encodeVarint(-1)...) // null key - } else { - recordBytes = append(recordBytes, encodeVarint(int64(len(key)))...) - recordBytes = append(recordBytes, key...) - } - - // Value length and value (varint + data) - decode RecordValue to get original Kafka message - value := f.handler.decodeRecordValueToKafkaMessage(topicName, smqRecord.GetValue()) - - if value == nil { - recordBytes = append(recordBytes, encodeVarint(-1)...) // null value - } else { - recordBytes = append(recordBytes, encodeVarint(int64(len(value)))...) - recordBytes = append(recordBytes, value...) - } - - // Headers count (varint) - 0 headers - recordBytes = append(recordBytes, encodeVarint(0)...) - - // Prepend record length (varint) - recordLength := int64(len(recordBytes)) - batch = append(batch, encodeVarint(recordLength)...) - batch = append(batch, recordBytes...) - } - - // Fill in the batch length - batchLength := uint32(len(batch) - batchLengthPos - 4) - binary.BigEndian.PutUint32(batch[batchLengthPos:batchLengthPos+4], batchLength) - - // Debug: Log reconstructed batch (only at high verbosity) - if glog.V(4) { - fmt.Printf("\nโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\n") - fmt.Printf("๐Ÿ“ RECONSTRUCTED BATCH: topic=%s baseOffset=%d size=%d bytes, recordCount=%d\n", - topicName, baseOffset, len(batch), len(smqRecords)) - } - - if glog.V(4) && len(batch) >= 61 { - fmt.Printf(" Header Structure:\n") - fmt.Printf(" Base Offset (0-7): %x\n", batch[0:8]) - fmt.Printf(" Batch Length (8-11): %x\n", batch[8:12]) - fmt.Printf(" Leader Epoch (12-15): %x\n", batch[12:16]) - fmt.Printf(" Magic (16): %x\n", batch[16:17]) - fmt.Printf(" CRC (17-20): %x (WILL BE CALCULATED)\n", batch[17:21]) - fmt.Printf(" Attributes (21-22): %x\n", batch[21:23]) - fmt.Printf(" Last Offset Delta (23-26): %x\n", batch[23:27]) - fmt.Printf(" Base Timestamp (27-34): %x\n", batch[27:35]) - fmt.Printf(" Max Timestamp (35-42): %x\n", batch[35:43]) - fmt.Printf(" Producer ID (43-50): %x\n", batch[43:51]) - fmt.Printf(" Producer Epoch (51-52): %x\n", batch[51:53]) - fmt.Printf(" Base Sequence (53-56): %x\n", batch[53:57]) - fmt.Printf(" Record Count (57-60): %x\n", batch[57:61]) - if len(batch) > 61 { - fmt.Printf(" Records Section (61+): %x... (%d bytes)\n", - batch[61:min(81, len(batch))], len(batch)-61) - } - } - - // Calculate CRC32 for the batch - // Per Kafka spec: CRC covers ONLY from attributes offset (byte 21) onwards - // See: DefaultRecordBatch.java computeChecksum() - Crc32C.compute(buffer, ATTRIBUTES_OFFSET, ...) - crcData := batch[crcPos+4:] // Skip CRC field itself, include rest - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - - // CRC debug (only at high verbosity) - if glog.V(4) { - batchLengthValue := binary.BigEndian.Uint32(batch[8:12]) - expectedTotalSize := 12 + int(batchLengthValue) - actualTotalSize := len(batch) - - fmt.Printf("\n === CRC CALCULATION DEBUG ===\n") - fmt.Printf(" Batch length field (bytes 8-11): %d\n", batchLengthValue) - fmt.Printf(" Expected total batch size: %d bytes (12 + %d)\n", expectedTotalSize, batchLengthValue) - fmt.Printf(" Actual batch size: %d bytes\n", actualTotalSize) - fmt.Printf(" CRC position: byte %d\n", crcPos) - fmt.Printf(" CRC data range: bytes %d to %d (%d bytes)\n", crcPos+4, actualTotalSize-1, len(crcData)) - - if expectedTotalSize != actualTotalSize { - fmt.Printf(" SIZE MISMATCH: %d bytes difference!\n", actualTotalSize-expectedTotalSize) - } - - if crcPos != 17 { - fmt.Printf(" CRC POSITION WRONG: expected 17, got %d!\n", crcPos) - } - - fmt.Printf(" CRC data (first 100 bytes of %d):\n", len(crcData)) - dumpSize := 100 - if len(crcData) < dumpSize { - dumpSize = len(crcData) - } - for i := 0; i < dumpSize; i += 20 { - end := i + 20 - if end > dumpSize { - end = dumpSize - } - fmt.Printf(" [%3d-%3d]: %x\n", i, end-1, crcData[i:end]) - } - - manualCRC := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - fmt.Printf(" Calculated CRC: 0x%08x\n", crc) - fmt.Printf(" Manual verify: 0x%08x", manualCRC) - if crc == manualCRC { - fmt.Printf(" OK\n") - } else { - fmt.Printf(" MISMATCH!\n") - } - - if actualTotalSize <= 200 { - fmt.Printf(" Complete batch hex dump (%d bytes):\n", actualTotalSize) - for i := 0; i < actualTotalSize; i += 16 { - end := i + 16 - if end > actualTotalSize { - end = actualTotalSize - } - fmt.Printf(" %04d: %x\n", i, batch[i:end]) - } - } - fmt.Printf(" === END CRC DEBUG ===\n\n") - } - - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - if glog.V(4) { - fmt.Printf(" Final CRC (17-20): %x (calculated over %d bytes)\n", batch[17:21], len(crcData)) - - // VERIFICATION: Read back what we just wrote - writtenCRC := binary.BigEndian.Uint32(batch[17:21]) - fmt.Printf(" VERIFICATION: CRC we calculated=0x%x, CRC written to batch=0x%x", crc, writtenCRC) - if crc == writtenCRC { - fmt.Printf(" OK\n") - } else { - fmt.Printf(" MISMATCH!\n") - } - - // DEBUG: Hash the entire batch to check if reconstructions are identical - batchHash := crc32.ChecksumIEEE(batch) - fmt.Printf(" BATCH IDENTITY: hash=0x%08x size=%d topic=%s baseOffset=%d recordCount=%d\n", - batchHash, len(batch), topicName, baseOffset, len(smqRecords)) - - // DEBUG: Show first few record keys/values to verify consistency - if len(smqRecords) > 0 && strings.Contains(topicName, "loadtest") { - fmt.Printf(" RECORD SAMPLES:\n") - for i := 0; i < min(3, len(smqRecords)); i++ { - keyPreview := smqRecords[i].GetKey() - if len(keyPreview) > 20 { - keyPreview = keyPreview[:20] - } - valuePreview := smqRecords[i].GetValue() - if len(valuePreview) > 40 { - valuePreview = valuePreview[:40] - } - fmt.Printf(" [%d] keyLen=%d valueLen=%d keyHex=%x valueHex=%x\n", - i, len(smqRecords[i].GetKey()), len(smqRecords[i].GetValue()), - keyPreview, valuePreview) - } - } - - fmt.Printf(" Batch for topic=%s baseOffset=%d recordCount=%d\n", topicName, baseOffset, len(smqRecords)) - fmt.Printf("โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\n\n") - } - - return batch -} - -// constructEmptyRecordBatch creates an empty record batch -func (f *MultiBatchFetcher) constructEmptyRecordBatch(baseOffset int64) []byte { - // Create minimal empty record batch - batch := make([]byte, 0, 61) - - // Base offset (8 bytes) - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) - - // Batch length (4 bytes) - will be filled at the end - lengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Partition leader epoch (4 bytes) - -1 - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Magic byte (1 byte) - version 2 - batch = append(batch, 2) - - // CRC32 (4 bytes) - placeholder - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - no compression, no transactional - batch = append(batch, 0, 0) - - // Last offset delta (4 bytes) - -1 for empty batch - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Base timestamp (8 bytes) - timestamp := uint64(1640995200000) // Fixed timestamp for empty batches - timestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(timestampBytes, timestamp) - batch = append(batch, timestampBytes...) - - // Max timestamp (8 bytes) - same as base for empty batch - batch = append(batch, timestampBytes...) - - // Producer ID (8 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer Epoch (2 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF) - - // Base Sequence (4 bytes) - -1 for non-transactional - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Record count (4 bytes) - 0 for empty batch - batch = append(batch, 0, 0, 0, 0) - - // Fill in the batch length - batchLength := len(batch) - 12 // Exclude base offset and length field itself - binary.BigEndian.PutUint32(batch[lengthPos:lengthPos+4], uint32(batchLength)) - - // Calculate CRC32 for the batch - // Per Kafka spec: CRC covers ONLY from attributes offset (byte 21) onwards - // See: DefaultRecordBatch.java computeChecksum() - Crc32C.compute(buffer, ATTRIBUTES_OFFSET, ...) - crcData := batch[crcPos+4:] // Skip CRC field itself, include rest - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - return batch -} - -// CompressedBatchResult represents a compressed record batch result -type CompressedBatchResult struct { - CompressedData []byte - OriginalSize int32 - CompressedSize int32 - Codec compression.CompressionCodec -} - -// CreateCompressedBatch creates a compressed record batch (basic support) -func (f *MultiBatchFetcher) CreateCompressedBatch(baseOffset int64, smqRecords []integration.SMQRecord, codec compression.CompressionCodec) (*CompressedBatchResult, error) { - if codec == compression.None { - // No compression requested - batch := f.constructSingleRecordBatch("", baseOffset, smqRecords) - return &CompressedBatchResult{ - CompressedData: batch, - OriginalSize: int32(len(batch)), - CompressedSize: int32(len(batch)), - Codec: compression.None, - }, nil - } - - // For Phase 5, implement basic GZIP compression support - originalBatch := f.constructSingleRecordBatch("", baseOffset, smqRecords) - originalSize := int32(len(originalBatch)) - - compressedData, err := f.compressData(originalBatch, codec) - if err != nil { - // Fall back to uncompressed if compression fails - return &CompressedBatchResult{ - CompressedData: originalBatch, - OriginalSize: originalSize, - CompressedSize: originalSize, - Codec: compression.None, - }, nil - } - - // Create compressed record batch with proper headers - compressedBatch := f.constructCompressedRecordBatch(baseOffset, compressedData, codec, originalSize) - - return &CompressedBatchResult{ - CompressedData: compressedBatch, - OriginalSize: originalSize, - CompressedSize: int32(len(compressedBatch)), - Codec: codec, - }, nil -} - -// constructCompressedRecordBatch creates a record batch with compressed records -func (f *MultiBatchFetcher) constructCompressedRecordBatch(baseOffset int64, compressedRecords []byte, codec compression.CompressionCodec, originalSize int32) []byte { - // Validate size to prevent overflow - const maxBatchSize = 1 << 30 // 1 GB limit - if len(compressedRecords) > maxBatchSize-100 { - glog.Errorf("Compressed records too large: %d bytes", len(compressedRecords)) - return nil - } - batch := make([]byte, 0, len(compressedRecords)+100) - - // Record batch header is similar to regular batch - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) - - // Batch length (4 bytes) - will be filled later - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Partition leader epoch (4 bytes) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Magic byte (1 byte) - v2 format - batch = append(batch, 2) - - // CRC placeholder (4 bytes) - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - set compression bits - var compressionBits uint16 - switch codec { - case compression.Gzip: - compressionBits = 1 - case compression.Snappy: - compressionBits = 2 - case compression.Lz4: - compressionBits = 3 - case compression.Zstd: - compressionBits = 4 - default: - compressionBits = 0 // no compression - } - batch = append(batch, byte(compressionBits>>8), byte(compressionBits)) - - // Last offset delta (4 bytes) - for compressed batches, this represents the logical record count - batch = append(batch, 0, 0, 0, 0) // Will be set based on logical records - - // Timestamps (16 bytes) - use current time for compressed batches - timestamp := uint64(1640995200000) - timestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(timestampBytes, timestamp) - batch = append(batch, timestampBytes...) // first timestamp - batch = append(batch, timestampBytes...) // max timestamp - - // Producer fields (14 bytes total) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) // producer ID - batch = append(batch, 0xFF, 0xFF) // producer epoch - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) // base sequence - - // Record count (4 bytes) - for compressed batches, this is the number of logical records - batch = append(batch, 0, 0, 0, 1) // Placeholder: treat as 1 logical record - - // Compressed records data - batch = append(batch, compressedRecords...) - - // Fill in the batch length - batchLength := uint32(len(batch) - batchLengthPos - 4) - binary.BigEndian.PutUint32(batch[batchLengthPos:batchLengthPos+4], batchLength) - - // Calculate CRC32 for the batch - // Per Kafka spec: CRC covers ONLY from attributes offset (byte 21) onwards - // See: DefaultRecordBatch.java computeChecksum() - Crc32C.compute(buffer, ATTRIBUTES_OFFSET, ...) - crcData := batch[crcPos+4:] // Skip CRC field itself, include rest - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - return batch -} - -// compressData compresses data using the specified codec (basic implementation) -func (f *MultiBatchFetcher) compressData(data []byte, codec compression.CompressionCodec) ([]byte, error) { - // For Phase 5, implement basic compression support - switch codec { - case compression.None: - return data, nil - case compression.Gzip: - // Implement actual GZIP compression - var buf bytes.Buffer - gzipWriter := gzip.NewWriter(&buf) - - if _, err := gzipWriter.Write(data); err != nil { - gzipWriter.Close() - return nil, fmt.Errorf("gzip compression write failed: %w", err) - } - - if err := gzipWriter.Close(); err != nil { - return nil, fmt.Errorf("gzip compression close failed: %w", err) - } - - compressed := buf.Bytes() - - return compressed, nil - default: - return nil, fmt.Errorf("unsupported compression codec: %d", codec) - } -} diff --git a/weed/mq/kafka/protocol/fetch_partition_reader.go b/weed/mq/kafka/protocol/fetch_partition_reader.go deleted file mode 100644 index 6583c6489..000000000 --- a/weed/mq/kafka/protocol/fetch_partition_reader.go +++ /dev/null @@ -1,270 +0,0 @@ -package protocol - -import ( - "context" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// partitionReader maintains a persistent connection to a single topic-partition -// and streams records forward, eliminating repeated offset lookups -// Pre-fetches and buffers records for instant serving -type partitionReader struct { - topicName string - partitionID int32 - currentOffset int64 - fetchChan chan *partitionFetchRequest - closeChan chan struct{} - - // Pre-fetch buffer support - recordBuffer chan *bufferedRecords // Buffered pre-fetched records - bufferMu sync.Mutex // Protects offset access - - handler *Handler - connCtx *ConnectionContext -} - -// bufferedRecords represents a batch of pre-fetched records -type bufferedRecords struct { - recordBatch []byte - startOffset int64 - endOffset int64 - highWaterMark int64 -} - -// partitionFetchRequest represents a request to fetch data from this partition -type partitionFetchRequest struct { - requestedOffset int64 - maxBytes int32 - maxWaitMs int32 // MaxWaitTime from Kafka fetch request - resultChan chan *partitionFetchResult - isSchematized bool - apiVersion uint16 - correlationID int32 // Added for correlation tracking -} - -// newPartitionReader creates and starts a new partition reader with pre-fetch buffering -func newPartitionReader(ctx context.Context, handler *Handler, connCtx *ConnectionContext, topicName string, partitionID int32, startOffset int64) *partitionReader { - pr := &partitionReader{ - topicName: topicName, - partitionID: partitionID, - currentOffset: startOffset, - fetchChan: make(chan *partitionFetchRequest, 200), // Buffer 200 requests to handle Schema Registry's rapid polling in slow CI environments - closeChan: make(chan struct{}), - recordBuffer: make(chan *bufferedRecords, 5), // Buffer 5 batches of records - handler: handler, - connCtx: connCtx, - } - - // Start the pre-fetch goroutine that continuously fetches ahead - go pr.preFetchLoop(ctx) - - // Start the request handler goroutine - go pr.handleRequests(ctx) - - glog.V(4).Infof("[%s] Created partition reader for %s[%d] starting at offset %d (sequential with ch=200)", - connCtx.ConnectionID, topicName, partitionID, startOffset) - - return pr -} - -// preFetchLoop is disabled for SMQ backend to prevent subscriber storms -// SMQ reads from disk and creating multiple concurrent subscribers causes -// broker overload and partition shutdowns. Fetch requests are handled -// on-demand in serveFetchRequest instead. -func (pr *partitionReader) preFetchLoop(ctx context.Context) { - defer func() { - glog.V(4).Infof("[%s] Pre-fetch loop exiting for %s[%d]", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID) - close(pr.recordBuffer) - }() - - // Wait for shutdown - no continuous pre-fetching to avoid overwhelming the broker - select { - case <-ctx.Done(): - return - case <-pr.closeChan: - return - } -} - -// handleRequests serves fetch requests SEQUENTIALLY to prevent subscriber storm -// Sequential processing is essential for SMQ backend because: -// 1. GetStoredRecords may create a new subscriber on each call -// 2. Concurrent calls create multiple subscribers for the same partition -// 3. This overwhelms the broker and causes partition shutdowns -func (pr *partitionReader) handleRequests(ctx context.Context) { - defer func() { - glog.V(4).Infof("[%s] Request handler exiting for %s[%d]", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID) - }() - - for { - select { - case <-ctx.Done(): - return - case <-pr.closeChan: - return - case req := <-pr.fetchChan: - // Process sequentially to prevent subscriber storm - pr.serveFetchRequest(ctx, req) - } - } -} - -// serveFetchRequest fetches data on-demand (no pre-fetching) -func (pr *partitionReader) serveFetchRequest(ctx context.Context, req *partitionFetchRequest) { - startTime := time.Now() - result := &partitionFetchResult{} - - defer func() { - result.fetchDuration = time.Since(startTime) - - // Send result back to client - select { - case req.resultChan <- result: - // Successfully sent - case <-ctx.Done(): - glog.Warningf("[%s] Context cancelled while sending result for %s[%d]", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID) - case <-time.After(50 * time.Millisecond): - glog.Warningf("[%s] Timeout sending result for %s[%d] - CLIENT MAY HAVE DISCONNECTED", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID) - } - }() - - // Get high water mark - hwm, hwmErr := pr.handler.seaweedMQHandler.GetLatestOffset(pr.topicName, pr.partitionID) - if hwmErr != nil { - glog.Errorf("[%s] CRITICAL: Failed to get HWM for %s[%d]: %v", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, hwmErr) - result.recordBatch = []byte{} - result.highWaterMark = 0 - return - } - result.highWaterMark = hwm - - glog.V(2).Infof("[%s] HWM for %s[%d]: %d (requested: %d)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, hwm, req.requestedOffset) - - // If requested offset >= HWM, return immediately with empty result - // This prevents overwhelming the broker with futile read attempts when no data is available - if req.requestedOffset >= hwm { - result.recordBatch = []byte{} - glog.V(3).Infof("[%s] Requested offset %d >= HWM %d, returning empty", - pr.connCtx.ConnectionID, req.requestedOffset, hwm) - return - } - - // Update tracking offset to match requested offset - pr.bufferMu.Lock() - if req.requestedOffset != pr.currentOffset { - glog.V(3).Infof("[%s] Updating currentOffset for %s[%d]: %d -> %d", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, pr.currentOffset, req.requestedOffset) - pr.currentOffset = req.requestedOffset - } - pr.bufferMu.Unlock() - - // Fetch on-demand - no pre-fetching to avoid overwhelming the broker - recordBatch, newOffset := pr.readRecords(ctx, req.requestedOffset, req.maxBytes, req.maxWaitMs, hwm) - - // Log what we got back - DETAILED for diagnostics - if len(recordBatch) == 0 { - glog.V(2).Infof("[%s] FETCH %s[%d]: readRecords returned EMPTY (offset=%d, hwm=%d)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, req.requestedOffset, hwm) - result.recordBatch = []byte{} - } else { - result.recordBatch = recordBatch - pr.bufferMu.Lock() - pr.currentOffset = newOffset - pr.bufferMu.Unlock() - } -} - -// readRecords reads records forward using the multi-batch fetcher -func (pr *partitionReader) readRecords(ctx context.Context, fromOffset int64, maxBytes int32, maxWaitMs int32, highWaterMark int64) ([]byte, int64) { - fetchStartTime := time.Now() - - // Create context with timeout based on Kafka fetch request's MaxWaitTime - // This ensures we wait exactly as long as the client requested - fetchCtx := ctx - if maxWaitMs > 0 { - var cancel context.CancelFunc - // Use 1.5x the client timeout to account for internal processing overhead - // This prevents legitimate slow reads from being killed by client timeout - internalTimeoutMs := int32(float64(maxWaitMs) * 1.5) - if internalTimeoutMs > 5000 { - internalTimeoutMs = 5000 // Cap at 5 seconds - } - fetchCtx, cancel = context.WithTimeout(ctx, time.Duration(internalTimeoutMs)*time.Millisecond) - defer cancel() - } - - // Use multi-batch fetcher for better MaxBytes compliance - multiFetcher := NewMultiBatchFetcher(pr.handler) - startTime := time.Now() - fetchResult, err := multiFetcher.FetchMultipleBatches( - fetchCtx, - pr.topicName, - pr.partitionID, - fromOffset, - highWaterMark, - maxBytes, - ) - fetchDuration := time.Since(startTime) - - // Log slow fetches (potential hangs) - if fetchDuration > 2*time.Second { - glog.Warningf("[%s] SLOW FETCH for %s[%d]: offset=%d took %.2fs (maxWait=%dms, HWM=%d)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, fromOffset, fetchDuration.Seconds(), maxWaitMs, highWaterMark) - } - - if err == nil && fetchResult.TotalSize > 0 { - glog.V(4).Infof("[%s] Multi-batch fetch for %s[%d]: %d batches, %d bytes, offset %d -> %d (duration: %v)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, - fetchResult.BatchCount, fetchResult.TotalSize, fromOffset, fetchResult.NextOffset, fetchDuration) - return fetchResult.RecordBatches, fetchResult.NextOffset - } - - // Multi-batch failed - try single batch WITHOUT the timeout constraint - // to ensure we get at least some data even if multi-batch timed out - glog.Warningf("[%s] Multi-batch fetch failed for %s[%d] offset=%d after %v, falling back to single-batch (err: %v)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, fromOffset, fetchDuration, err) - - // Use original context for fallback, NOT the timed-out fetchCtx - // This ensures the fallback has a fresh chance to fetch data - fallbackStartTime := time.Now() - smqRecords, err := pr.handler.seaweedMQHandler.GetStoredRecords(ctx, pr.topicName, pr.partitionID, fromOffset, 10) - fallbackDuration := time.Since(fallbackStartTime) - - if fallbackDuration > 2*time.Second { - glog.Warningf("[%s] SLOW FALLBACK for %s[%d]: offset=%d took %.2fs", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, fromOffset, fallbackDuration.Seconds()) - } - - if err != nil { - glog.Errorf("[%s] CRITICAL: Both multi-batch AND fallback failed for %s[%d] offset=%d: %v", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, fromOffset, err) - return []byte{}, fromOffset - } - - if len(smqRecords) > 0 { - recordBatch := pr.handler.constructRecordBatchFromSMQ(pr.topicName, fromOffset, smqRecords) - nextOffset := fromOffset + int64(len(smqRecords)) - glog.V(3).Infof("[%s] Fallback succeeded: got %d records for %s[%d] offset %d -> %d (total: %v)", - pr.connCtx.ConnectionID, len(smqRecords), pr.topicName, pr.partitionID, fromOffset, nextOffset, time.Since(fetchStartTime)) - return recordBatch, nextOffset - } - - // No records available - glog.V(3).Infof("[%s] No records available for %s[%d] offset=%d after multi-batch and fallback (total: %v)", - pr.connCtx.ConnectionID, pr.topicName, pr.partitionID, fromOffset, time.Since(fetchStartTime)) - return []byte{}, fromOffset -} - -// close signals the reader to shut down -func (pr *partitionReader) close() { - close(pr.closeChan) -} diff --git a/weed/mq/kafka/protocol/find_coordinator.go b/weed/mq/kafka/protocol/find_coordinator.go deleted file mode 100644 index 81e94d43f..000000000 --- a/weed/mq/kafka/protocol/find_coordinator.go +++ /dev/null @@ -1,498 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" - "net" - "strconv" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// CoordinatorRegistryInterface defines the interface for coordinator registry operations -type CoordinatorRegistryInterface interface { - IsLeader() bool - GetLeaderAddress() string - WaitForLeader(timeout time.Duration) (string, error) - AssignCoordinator(consumerGroup string, requestingGateway string) (*CoordinatorAssignment, error) - GetCoordinator(consumerGroup string) (*CoordinatorAssignment, error) -} - -// CoordinatorAssignment represents a consumer group coordinator assignment -type CoordinatorAssignment struct { - ConsumerGroup string - CoordinatorAddr string - CoordinatorNodeID int32 - AssignedAt time.Time - LastHeartbeat time.Time -} - -func (h *Handler) handleFindCoordinator(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - glog.V(2).Infof("FindCoordinator: version=%d, correlation=%d, bodyLen=%d", apiVersion, correlationID, len(requestBody)) - switch apiVersion { - case 0: - glog.V(4).Infof("FindCoordinator - Routing to V0 handler") - return h.handleFindCoordinatorV0(correlationID, requestBody) - case 1, 2: - glog.V(4).Infof("FindCoordinator - Routing to V1-2 handler (non-flexible)") - return h.handleFindCoordinatorV2(correlationID, requestBody) - case 3: - glog.V(4).Infof("FindCoordinator - Routing to V3 handler (flexible)") - return h.handleFindCoordinatorV3(correlationID, requestBody) - default: - return nil, fmt.Errorf("FindCoordinator version %d not supported", apiVersion) - } -} - -func (h *Handler) handleFindCoordinatorV0(correlationID uint32, requestBody []byte) ([]byte, error) { - // Parse FindCoordinator v0 request: Key (STRING) only - - if len(requestBody) < 2 { // need at least Key length - return nil, fmt.Errorf("FindCoordinator request too short") - } - - offset := 0 - - if len(requestBody) < offset+2 { // coordinator_key_size(2) - return nil, fmt.Errorf("FindCoordinator request missing data (need %d bytes, have %d)", offset+2, len(requestBody)) - } - - // Parse coordinator key (group ID for consumer groups) - coordinatorKeySize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(coordinatorKeySize) { - return nil, fmt.Errorf("FindCoordinator request missing coordinator key (need %d bytes, have %d)", offset+int(coordinatorKeySize), len(requestBody)) - } - - coordinatorKey := string(requestBody[offset : offset+int(coordinatorKeySize)]) - offset += int(coordinatorKeySize) - - // Parse coordinator type (v1+ only, default to 0 for consumer groups in v0) - _ = int8(0) // Consumer group coordinator (unused in v0) - - // Find the appropriate coordinator for this group - coordinatorHost, coordinatorPort, nodeID, err := h.findCoordinatorForGroup(coordinatorKey) - if err != nil { - return nil, fmt.Errorf("failed to find coordinator for group %s: %w", coordinatorKey, err) - } - - // Return hostname instead of IP address for client connectivity - // Clients need to connect to the same hostname they originally connected to - _ = coordinatorHost // originalHost - coordinatorHost = h.getClientConnectableHost(coordinatorHost) - - // Build response - response := make([]byte, 0, 64) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // FindCoordinator v0 Response Format (NO throttle_time_ms, NO error_message): - // - error_code (INT16) - // - node_id (INT32) - // - host (STRING) - // - port (INT32) - - // Error code (2 bytes, 0 = no error) - response = append(response, 0, 0) - - // Coordinator node_id (4 bytes) - use direct bit conversion for int32 to uint32 - nodeIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(nodeIDBytes, uint32(int32(nodeID))) - response = append(response, nodeIDBytes...) - - // Coordinator host (string) - hostLen := uint16(len(coordinatorHost)) - response = append(response, byte(hostLen>>8), byte(hostLen)) - response = append(response, []byte(coordinatorHost)...) - - // Coordinator port (4 bytes) - validate port range - if coordinatorPort < 0 || coordinatorPort > 65535 { - return nil, fmt.Errorf("invalid port number: %d", coordinatorPort) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(coordinatorPort)) - response = append(response, portBytes...) - - return response, nil -} - -func (h *Handler) handleFindCoordinatorV2(correlationID uint32, requestBody []byte) ([]byte, error) { - // Parse FindCoordinator request (v0-2 non-flex): Key (STRING), v1+ adds KeyType (INT8) - - if len(requestBody) < 2 { // need at least Key length - return nil, fmt.Errorf("FindCoordinator request too short") - } - - offset := 0 - - if len(requestBody) < offset+2 { // coordinator_key_size(2) - return nil, fmt.Errorf("FindCoordinator request missing data (need %d bytes, have %d)", offset+2, len(requestBody)) - } - - // Parse coordinator key (group ID for consumer groups) - coordinatorKeySize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(coordinatorKeySize) { - return nil, fmt.Errorf("FindCoordinator request missing coordinator key (need %d bytes, have %d)", offset+int(coordinatorKeySize), len(requestBody)) - } - - coordinatorKey := string(requestBody[offset : offset+int(coordinatorKeySize)]) - offset += int(coordinatorKeySize) - - // Coordinator type present in v1+ (INT8). If absent, default 0. - if offset < len(requestBody) { - _ = requestBody[offset] // coordinatorType - offset++ // Move past the coordinator type byte - } - - // Find the appropriate coordinator for this group - coordinatorHost, coordinatorPort, nodeID, err := h.findCoordinatorForGroup(coordinatorKey) - if err != nil { - return nil, fmt.Errorf("failed to find coordinator for group %s: %w", coordinatorKey, err) - } - - // Return hostname instead of IP address for client connectivity - // Clients need to connect to the same hostname they originally connected to - _ = coordinatorHost // originalHost - coordinatorHost = h.getClientConnectableHost(coordinatorHost) - - response := make([]byte, 0, 64) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // FindCoordinator v2 Response Format: - // - throttle_time_ms (INT32) - // - error_code (INT16) - // - error_message (STRING) - nullable - // - node_id (INT32) - // - host (STRING) - // - port (INT32) - - // Throttle time (4 bytes, 0 = no throttling) - response = append(response, 0, 0, 0, 0) - - // Error code (2 bytes, 0 = no error) - response = append(response, 0, 0) - - // Error message (nullable string) - null for success - response = append(response, 0xff, 0xff) // -1 length indicates null - - // Coordinator node_id (4 bytes) - use direct bit conversion for int32 to uint32 - nodeIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(nodeIDBytes, uint32(int32(nodeID))) - response = append(response, nodeIDBytes...) - - // Coordinator host (string) - hostLen := uint16(len(coordinatorHost)) - response = append(response, byte(hostLen>>8), byte(hostLen)) - response = append(response, []byte(coordinatorHost)...) - - // Coordinator port (4 bytes) - validate port range - if coordinatorPort < 0 || coordinatorPort > 65535 { - return nil, fmt.Errorf("invalid port number: %d", coordinatorPort) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(coordinatorPort)) - response = append(response, portBytes...) - - // Debug logging (hex dump removed to reduce CPU usage) - if glog.V(4) { - glog.V(4).Infof("FindCoordinator v2: Built response - bodyLen=%d, host='%s' (len=%d), port=%d, nodeID=%d", - len(response), coordinatorHost, len(coordinatorHost), coordinatorPort, nodeID) - } - - return response, nil -} - -func (h *Handler) handleFindCoordinatorV3(correlationID uint32, requestBody []byte) ([]byte, error) { - // Parse FindCoordinator v3 request (flexible version): - // - Key (COMPACT_STRING with varint length+1) - // - KeyType (INT8) - // - Tagged fields (varint) - - if len(requestBody) < 2 { - return nil, fmt.Errorf("FindCoordinator v3 request too short") - } - - // HEX DUMP for debugging - glog.V(4).Infof("FindCoordinator V3 request body (first 50 bytes): % x", requestBody[:min(50, len(requestBody))]) - glog.V(4).Infof("FindCoordinator V3 request body length: %d", len(requestBody)) - - offset := 0 - - // The first byte is the tagged fields from the REQUEST HEADER that weren't consumed - // Skip the tagged fields count (should be 0x00 for no tagged fields) - if len(requestBody) > 0 && requestBody[0] == 0x00 { - glog.V(4).Infof("FindCoordinator V3: Skipping header tagged fields byte (0x00)") - offset = 1 - } - - // Parse coordinator key (compact string: varint length+1) - glog.V(4).Infof("FindCoordinator V3: About to decode varint from bytes: % x", requestBody[offset:min(offset+5, len(requestBody))]) - coordinatorKeyLen, bytesRead, err := DecodeUvarint(requestBody[offset:]) - if err != nil || bytesRead <= 0 { - return nil, fmt.Errorf("failed to decode coordinator key length: %w (bytes: % x)", err, requestBody[offset:min(offset+5, len(requestBody))]) - } - offset += bytesRead - - glog.V(4).Infof("FindCoordinator V3: coordinatorKeyLen (varint)=%d, bytesRead=%d, offset now=%d", coordinatorKeyLen, bytesRead, offset) - glog.V(4).Infof("FindCoordinator V3: Next bytes after varint: % x", requestBody[offset:min(offset+20, len(requestBody))]) - - if coordinatorKeyLen == 0 { - return nil, fmt.Errorf("coordinator key cannot be null in v3") - } - // Compact strings in Kafka use length+1 encoding: - // varint=0 means null, varint=1 means empty string, varint=n+1 means string of length n - coordinatorKeyLen-- // Decode: actual length = varint - 1 - - glog.V(4).Infof("FindCoordinator V3: actual coordinatorKeyLen after decoding: %d", coordinatorKeyLen) - - if len(requestBody) < offset+int(coordinatorKeyLen) { - return nil, fmt.Errorf("FindCoordinator v3 request missing coordinator key") - } - - coordinatorKey := string(requestBody[offset : offset+int(coordinatorKeyLen)]) - offset += int(coordinatorKeyLen) - - // Parse coordinator type (INT8) - if offset < len(requestBody) { - _ = requestBody[offset] // coordinatorType - offset++ - } - - // Skip tagged fields (we don't need them for now) - if offset < len(requestBody) { - _, bytesRead, tagErr := DecodeUvarint(requestBody[offset:]) - if tagErr == nil && bytesRead > 0 { - offset += bytesRead - // TODO: Parse tagged fields if needed - } - } - - // Find the appropriate coordinator for this group - coordinatorHost, coordinatorPort, nodeID, err := h.findCoordinatorForGroup(coordinatorKey) - if err != nil { - return nil, fmt.Errorf("failed to find coordinator for group %s: %w", coordinatorKey, err) - } - - // Return hostname instead of IP address for client connectivity - _ = coordinatorHost // originalHost - coordinatorHost = h.getClientConnectableHost(coordinatorHost) - - // Build response (v3 is flexible, uses compact strings and tagged fields) - response := make([]byte, 0, 64) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // FindCoordinator v3 Response Format (FLEXIBLE): - // - throttle_time_ms (INT32) - // - error_code (INT16) - // - error_message (COMPACT_NULLABLE_STRING with varint length+1, 0 = null) - // - node_id (INT32) - // - host (COMPACT_STRING with varint length+1) - // - port (INT32) - // - tagged_fields (varint, 0 = no tags) - - // Throttle time (4 bytes, 0 = no throttling) - response = append(response, 0, 0, 0, 0) - - // Error code (2 bytes, 0 = no error) - response = append(response, 0, 0) - - // Error message (compact nullable string) - null for success - // Compact nullable string: 0 = null, 1 = empty string, n+1 = string of length n - response = append(response, 0) // 0 = null - - // Coordinator node_id (4 bytes) - use direct bit conversion for int32 to uint32 - nodeIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(nodeIDBytes, uint32(int32(nodeID))) - response = append(response, nodeIDBytes...) - - // Coordinator host (compact string: varint length+1) - hostLen := uint32(len(coordinatorHost)) - response = append(response, EncodeUvarint(hostLen+1)...) // +1 for compact string encoding - response = append(response, []byte(coordinatorHost)...) - - // Coordinator port (4 bytes) - validate port range - if coordinatorPort < 0 || coordinatorPort > 65535 { - return nil, fmt.Errorf("invalid port number: %d", coordinatorPort) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(coordinatorPort)) - response = append(response, portBytes...) - - // Tagged fields (0 = no tags) - response = append(response, 0) - - return response, nil -} - -// findCoordinatorForGroup determines the coordinator gateway for a consumer group -// Uses gateway leader for distributed coordinator assignment (first-come-first-serve) -func (h *Handler) findCoordinatorForGroup(groupID string) (host string, port int, nodeID int32, err error) { - // Get the coordinator registry from the handler - registry := h.GetCoordinatorRegistry() - if registry == nil { - // Fallback to current gateway if no registry available - gatewayAddr := h.GetGatewayAddress() - if gatewayAddr == "" { - return "", 0, 0, fmt.Errorf("no coordinator registry and no gateway address configured") - } - host, port, err := h.parseGatewayAddress(gatewayAddr) - if err != nil { - return "", 0, 0, fmt.Errorf("failed to parse gateway address: %w", err) - } - nodeID = 1 - return host, port, nodeID, nil - } - - // If this gateway is the leader, handle the assignment directly - if registry.IsLeader() { - return h.handleCoordinatorAssignmentAsLeader(groupID, registry) - } - - // If not the leader, contact the leader to get/assign coordinator - // But first check if we can quickly become the leader or if there's already a leader - if leader := registry.GetLeaderAddress(); leader != "" { - // If the leader is this gateway, handle assignment directly - if leader == h.GetGatewayAddress() { - return h.handleCoordinatorAssignmentAsLeader(groupID, registry) - } - } - return h.requestCoordinatorFromLeader(groupID, registry) -} - -// handleCoordinatorAssignmentAsLeader handles coordinator assignment when this gateway is the leader -func (h *Handler) handleCoordinatorAssignmentAsLeader(groupID string, registry CoordinatorRegistryInterface) (host string, port int, nodeID int32, err error) { - // Check if coordinator already exists - if assignment, err := registry.GetCoordinator(groupID); err == nil && assignment != nil { - return h.parseAddress(assignment.CoordinatorAddr, assignment.CoordinatorNodeID) - } - - // No coordinator exists, assign the requesting gateway (first-come-first-serve) - currentGateway := h.GetGatewayAddress() - if currentGateway == "" { - return "", 0, 0, fmt.Errorf("no gateway address configured for coordinator assignment") - } - assignment, err := registry.AssignCoordinator(groupID, currentGateway) - if err != nil { - // Fallback to current gateway on assignment error - host, port, parseErr := h.parseGatewayAddress(currentGateway) - if parseErr != nil { - return "", 0, 0, fmt.Errorf("failed to parse gateway address after assignment error: %w", parseErr) - } - nodeID = 1 - return host, port, nodeID, nil - } - - return h.parseAddress(assignment.CoordinatorAddr, assignment.CoordinatorNodeID) -} - -// requestCoordinatorFromLeader requests coordinator assignment from the gateway leader -// If no leader exists, it waits for leader election to complete -func (h *Handler) requestCoordinatorFromLeader(groupID string, registry CoordinatorRegistryInterface) (host string, port int, nodeID int32, err error) { - // Wait for leader election to complete with a longer timeout for Schema Registry compatibility - _, err = h.waitForLeader(registry, 10*time.Second) // 10 second timeout for enterprise clients - if err != nil { - gatewayAddr := h.GetGatewayAddress() - if gatewayAddr == "" { - return "", 0, 0, fmt.Errorf("failed to wait for leader and no gateway address configured: %w", err) - } - host, port, parseErr := h.parseGatewayAddress(gatewayAddr) - if parseErr != nil { - return "", 0, 0, fmt.Errorf("failed to parse gateway address after leader wait timeout: %w", parseErr) - } - nodeID = 1 - return host, port, nodeID, nil - } - - // Since we don't have direct RPC between gateways yet, and the leader might be this gateway, - // check if we became the leader during the wait - if registry.IsLeader() { - return h.handleCoordinatorAssignmentAsLeader(groupID, registry) - } - - // For now, if we can't directly contact the leader (no inter-gateway RPC yet), - // use current gateway as fallback. In a full implementation, this would make - // an RPC call to the leader gateway. - gatewayAddr := h.GetGatewayAddress() - if gatewayAddr == "" { - return "", 0, 0, fmt.Errorf("no gateway address configured for fallback coordinator") - } - host, port, parseErr := h.parseGatewayAddress(gatewayAddr) - if parseErr != nil { - return "", 0, 0, fmt.Errorf("failed to parse gateway address for fallback: %w", parseErr) - } - nodeID = 1 - return host, port, nodeID, nil -} - -// waitForLeader waits for a leader to be elected, with timeout -func (h *Handler) waitForLeader(registry CoordinatorRegistryInterface, timeout time.Duration) (leaderAddress string, err error) { - - // Use the registry's efficient wait mechanism - leaderAddress, err = registry.WaitForLeader(timeout) - if err != nil { - return "", err - } - - return leaderAddress, nil -} - -// parseGatewayAddress parses a gateway address string (host:port) into host and port -func (h *Handler) parseGatewayAddress(address string) (host string, port int, err error) { - // Use net.SplitHostPort for proper IPv6 support - hostStr, portStr, err := net.SplitHostPort(address) - if err != nil { - return "", 0, fmt.Errorf("invalid gateway address format: %s", address) - } - - port, err = strconv.Atoi(portStr) - if err != nil { - return "", 0, fmt.Errorf("invalid port in gateway address %s: %v", address, err) - } - - return hostStr, port, nil -} - -// parseAddress parses a gateway address and returns host, port, and nodeID -func (h *Handler) parseAddress(address string, nodeID int32) (host string, port int, nid int32, err error) { - // Reuse the correct parseGatewayAddress implementation - host, port, err = h.parseGatewayAddress(address) - if err != nil { - return "", 0, 0, err - } - nid = nodeID - return host, port, nid, nil -} - -// getClientConnectableHost returns the hostname that clients can connect to -// This ensures that FindCoordinator returns the same hostname the client originally connected to -func (h *Handler) getClientConnectableHost(coordinatorHost string) string { - // If the coordinator host is an IP address, return the original gateway hostname - // This prevents clients from switching to IP addresses which creates new connections - if net.ParseIP(coordinatorHost) != nil { - // It's an IP address, return the original gateway hostname - gatewayAddr := h.GetGatewayAddress() - if host, _, err := h.parseGatewayAddress(gatewayAddr); err == nil { - // If the gateway address is also an IP, return the IP directly - // This handles local/test environments where hostnames aren't resolvable - if net.ParseIP(host) != nil { - // Both are IPs, return the actual IP address - return coordinatorHost - } - return host - } - // Fallback to the coordinator host IP itself - return coordinatorHost - } - - // It's already a hostname, return as-is - return coordinatorHost -} diff --git a/weed/mq/kafka/protocol/flexible_versions.go b/weed/mq/kafka/protocol/flexible_versions.go deleted file mode 100644 index ddb55e74f..000000000 --- a/weed/mq/kafka/protocol/flexible_versions.go +++ /dev/null @@ -1,480 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" -) - -// FlexibleVersions provides utilities for handling Kafka flexible versions protocol -// Flexible versions use compact arrays/strings and tagged fields for backward compatibility - -// CompactArrayLength encodes a length for compact arrays -// Compact arrays encode length as length+1, where 0 means empty array -func CompactArrayLength(length uint32) []byte { - // Compact arrays use length+1 encoding (0 = null, 1 = empty, n+1 = array of length n) - // For an empty array (length=0), we return 1 (not 0, which would be null) - return EncodeUvarint(length + 1) -} - -// DecodeCompactArrayLength decodes a compact array length -// Returns the actual length and number of bytes consumed -func DecodeCompactArrayLength(data []byte) (uint32, int, error) { - if len(data) == 0 { - return 0, 0, fmt.Errorf("no data for compact array length") - } - - if data[0] == 0 { - return 0, 1, nil // Empty array - } - - length, consumed, err := DecodeUvarint(data) - if err != nil { - return 0, 0, fmt.Errorf("decode compact array length: %w", err) - } - - if length == 0 { - return 0, consumed, fmt.Errorf("invalid compact array length encoding") - } - - return length - 1, consumed, nil -} - -// CompactStringLength encodes a length for compact strings -// Compact strings encode length as length+1, where 0 means null string -func CompactStringLength(length int) []byte { - if length < 0 { - return []byte{0} // Null string - } - return EncodeUvarint(uint32(length + 1)) -} - -// DecodeCompactStringLength decodes a compact string length -// Returns the actual length (-1 for null), and number of bytes consumed -func DecodeCompactStringLength(data []byte) (int, int, error) { - if len(data) == 0 { - return 0, 0, fmt.Errorf("no data for compact string length") - } - - if data[0] == 0 { - return -1, 1, nil // Null string - } - - length, consumed, err := DecodeUvarint(data) - if err != nil { - return 0, 0, fmt.Errorf("decode compact string length: %w", err) - } - - if length == 0 { - return 0, consumed, fmt.Errorf("invalid compact string length encoding") - } - - return int(length - 1), consumed, nil -} - -// EncodeUvarint encodes an unsigned integer using variable-length encoding -// This is used for compact arrays, strings, and tagged fields -func EncodeUvarint(value uint32) []byte { - var buf []byte - for value >= 0x80 { - buf = append(buf, byte(value)|0x80) - value >>= 7 - } - buf = append(buf, byte(value)) - return buf -} - -// DecodeUvarint decodes a variable-length unsigned integer -// Returns the decoded value and number of bytes consumed -func DecodeUvarint(data []byte) (uint32, int, error) { - var value uint32 - var shift uint - var consumed int - - for i, b := range data { - consumed = i + 1 - value |= uint32(b&0x7F) << shift - - if (b & 0x80) == 0 { - return value, consumed, nil - } - - shift += 7 - if shift >= 32 { - return 0, consumed, fmt.Errorf("uvarint overflow") - } - } - - return 0, consumed, fmt.Errorf("incomplete uvarint") -} - -// TaggedField represents a tagged field in flexible versions -type TaggedField struct { - Tag uint32 - Data []byte -} - -// TaggedFields represents a collection of tagged fields -type TaggedFields struct { - Fields []TaggedField -} - -// EncodeTaggedFields encodes tagged fields for flexible versions -func (tf *TaggedFields) Encode() []byte { - if len(tf.Fields) == 0 { - return []byte{0} // Empty tagged fields - } - - var buf []byte - - // Number of tagged fields - buf = append(buf, EncodeUvarint(uint32(len(tf.Fields)))...) - - for _, field := range tf.Fields { - // Tag - buf = append(buf, EncodeUvarint(field.Tag)...) - // Size - buf = append(buf, EncodeUvarint(uint32(len(field.Data)))...) - // Data - buf = append(buf, field.Data...) - } - - return buf -} - -// DecodeTaggedFields decodes tagged fields from flexible versions -func DecodeTaggedFields(data []byte) (*TaggedFields, int, error) { - if len(data) == 0 { - return &TaggedFields{}, 0, fmt.Errorf("no data for tagged fields") - } - - if data[0] == 0 { - return &TaggedFields{}, 1, nil // Empty tagged fields - } - - offset := 0 - - // Number of tagged fields - numFields, consumed, err := DecodeUvarint(data[offset:]) - if err != nil { - return nil, 0, fmt.Errorf("decode tagged fields count: %w", err) - } - offset += consumed - - fields := make([]TaggedField, numFields) - - for i := uint32(0); i < numFields; i++ { - // Tag - tag, consumed, err := DecodeUvarint(data[offset:]) - if err != nil { - return nil, 0, fmt.Errorf("decode tagged field %d tag: %w", i, err) - } - offset += consumed - - // Size - size, consumed, err := DecodeUvarint(data[offset:]) - if err != nil { - return nil, 0, fmt.Errorf("decode tagged field %d size: %w", i, err) - } - offset += consumed - - // Data - if offset+int(size) > len(data) { - // More detailed error information - return nil, 0, fmt.Errorf("tagged field %d data truncated: need %d bytes at offset %d, but only %d total bytes available", i, size, offset, len(data)) - } - - fields[i] = TaggedField{ - Tag: tag, - Data: data[offset : offset+int(size)], - } - offset += int(size) - } - - return &TaggedFields{Fields: fields}, offset, nil -} - -// IsFlexibleVersion determines if an API version uses flexible versions -// This is API-specific and based on when each API adopted flexible versions -func IsFlexibleVersion(apiKey, apiVersion uint16) bool { - switch APIKey(apiKey) { - case APIKeyApiVersions: - return apiVersion >= 3 - case APIKeyMetadata: - return apiVersion >= 9 - case APIKeyFetch: - return apiVersion >= 12 - case APIKeyProduce: - return apiVersion >= 9 - case APIKeyJoinGroup: - return apiVersion >= 6 - case APIKeySyncGroup: - return apiVersion >= 4 - case APIKeyOffsetCommit: - return apiVersion >= 8 - case APIKeyOffsetFetch: - return apiVersion >= 6 - case APIKeyFindCoordinator: - return apiVersion >= 3 - case APIKeyHeartbeat: - return apiVersion >= 4 - case APIKeyLeaveGroup: - return apiVersion >= 4 - case APIKeyCreateTopics: - return apiVersion >= 2 - case APIKeyDeleteTopics: - return apiVersion >= 4 - default: - return false - } -} - -// FlexibleString encodes a string for flexible versions (compact format) -func FlexibleString(s string) []byte { - // Compact strings use length+1 encoding (0 = null, 1 = empty, n+1 = string of length n) - // For an empty string (s=""), we return length+1 = 1 (not 0, which would be null) - var buf []byte - buf = append(buf, CompactStringLength(len(s))...) - buf = append(buf, []byte(s)...) - return buf -} - -// parseCompactString parses a compact string from flexible protocol -// Returns the string bytes and the number of bytes consumed -func parseCompactString(data []byte) ([]byte, int) { - if len(data) == 0 { - return nil, 0 - } - - // Parse compact string length (unsigned varint - no zigzag decoding!) - length, consumed := decodeUnsignedVarint(data) - if consumed == 0 { - return nil, 0 - } - - // Debug logging for compact string parsing - - if length == 0 { - // Null string (length 0 means null) - return nil, consumed - } - - // In compact strings, length is actual length + 1 - // So length 1 means empty string, length > 1 means non-empty - if length == 0 { - return nil, consumed // Already handled above - } - actualLength := int(length - 1) - if actualLength < 0 { - return nil, 0 - } - - - if actualLength == 0 { - // Empty string (length was 1) - return []byte{}, consumed - } - - if consumed+actualLength > len(data) { - return nil, 0 - } - - result := data[consumed : consumed+actualLength] - return result, consumed + actualLength -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -// decodeUnsignedVarint decodes an unsigned varint (no zigzag decoding) -func decodeUnsignedVarint(data []byte) (uint64, int) { - if len(data) == 0 { - return 0, 0 - } - - var result uint64 - var shift uint - var bytesRead int - - for i, b := range data { - if i > 9 { // varints can be at most 10 bytes - return 0, 0 // invalid varint - } - - bytesRead++ - result |= uint64(b&0x7F) << shift - - if (b & 0x80) == 0 { - // Most significant bit is 0, we're done - return result, bytesRead - } - - shift += 7 - } - - return 0, 0 // incomplete varint -} - -// FlexibleNullableString encodes a nullable string for flexible versions -func FlexibleNullableString(s *string) []byte { - if s == nil { - return []byte{0} // Null string - } - return FlexibleString(*s) -} - -// DecodeFlexibleString decodes a flexible string -// Returns the string (empty for null) and bytes consumed -func DecodeFlexibleString(data []byte) (string, int, error) { - length, consumed, err := DecodeCompactStringLength(data) - if err != nil { - return "", 0, err - } - - if length < 0 { - return "", consumed, nil // Null string -> empty string - } - - if consumed+length > len(data) { - return "", 0, fmt.Errorf("string data truncated") - } - - return string(data[consumed : consumed+length]), consumed + length, nil -} - -// FlexibleVersionHeader handles the request header parsing for flexible versions -type FlexibleVersionHeader struct { - APIKey uint16 - APIVersion uint16 - CorrelationID uint32 - ClientID *string - TaggedFields *TaggedFields -} - -// parseRegularHeader parses a regular (non-flexible) Kafka request header -func parseRegularHeader(data []byte) (*FlexibleVersionHeader, []byte, error) { - if len(data) < 8 { - return nil, nil, fmt.Errorf("header too short") - } - - header := &FlexibleVersionHeader{} - offset := 0 - - // API Key (2 bytes) - header.APIKey = binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - // API Version (2 bytes) - header.APIVersion = binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - // Correlation ID (4 bytes) - header.CorrelationID = binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - // Regular versions use standard strings - if len(data) < offset+2 { - return nil, nil, fmt.Errorf("missing client_id length") - } - - clientIDLen := int16(binary.BigEndian.Uint16(data[offset : offset+2])) - offset += 2 - - if clientIDLen >= 0 { - if len(data) < offset+int(clientIDLen) { - return nil, nil, fmt.Errorf("client_id truncated") - } - clientID := string(data[offset : offset+int(clientIDLen)]) - header.ClientID = &clientID - offset += int(clientIDLen) - } - - return header, data[offset:], nil -} - -// ParseRequestHeader parses a Kafka request header, handling both regular and flexible versions -func ParseRequestHeader(data []byte) (*FlexibleVersionHeader, []byte, error) { - if len(data) < 8 { - return nil, nil, fmt.Errorf("header too short") - } - - header := &FlexibleVersionHeader{} - offset := 0 - - // API Key (2 bytes) - header.APIKey = binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - // API Version (2 bytes) - header.APIVersion = binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - // Correlation ID (4 bytes) - header.CorrelationID = binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - // Client ID handling depends on flexible version - isFlexible := IsFlexibleVersion(header.APIKey, header.APIVersion) - - if isFlexible { - // Flexible versions use compact strings - clientID, consumed, err := DecodeFlexibleString(data[offset:]) - if err != nil { - return nil, nil, fmt.Errorf("decode flexible client_id: %w", err) - } - offset += consumed - - if clientID != "" { - header.ClientID = &clientID - } - - // Parse tagged fields in header - taggedFields, consumed, err := DecodeTaggedFields(data[offset:]) - if err != nil { - // If tagged fields parsing fails, this might be a regular header sent by kafka-go - // Fall back to regular header parsing - return parseRegularHeader(data) - } - offset += consumed - header.TaggedFields = taggedFields - - } else { - // Regular versions use standard strings - if len(data) < offset+2 { - return nil, nil, fmt.Errorf("missing client_id length") - } - - clientIDLen := int16(binary.BigEndian.Uint16(data[offset : offset+2])) - offset += 2 - - if clientIDLen >= 0 { - if len(data) < offset+int(clientIDLen) { - return nil, nil, fmt.Errorf("client_id truncated") - } - - clientID := string(data[offset : offset+int(clientIDLen)]) - header.ClientID = &clientID - offset += int(clientIDLen) - } - // No tagged fields in regular versions - } - - return header, data[offset:], nil -} - -// EncodeFlexibleResponse encodes a response with proper flexible version formatting -func EncodeFlexibleResponse(correlationID uint32, data []byte, hasTaggedFields bool) []byte { - response := make([]byte, 4) - binary.BigEndian.PutUint32(response, correlationID) - response = append(response, data...) - - if hasTaggedFields { - // Add empty tagged fields for flexible responses - response = append(response, 0) - } - - return response -} diff --git a/weed/mq/kafka/protocol/group_introspection.go b/weed/mq/kafka/protocol/group_introspection.go deleted file mode 100644 index 0ff3ed4b5..000000000 --- a/weed/mq/kafka/protocol/group_introspection.go +++ /dev/null @@ -1,447 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" -) - -// handleDescribeGroups handles DescribeGroups API (key 15) -func (h *Handler) handleDescribeGroups(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse request - request, err := h.parseDescribeGroupsRequest(requestBody, apiVersion) - if err != nil { - return nil, fmt.Errorf("parse DescribeGroups request: %w", err) - } - - // Build response - response := DescribeGroupsResponse{ - ThrottleTimeMs: 0, - Groups: make([]DescribeGroupsGroup, 0, len(request.GroupIDs)), - } - - // Get group information for each requested group - for _, groupID := range request.GroupIDs { - group := h.describeGroup(groupID) - response.Groups = append(response.Groups, group) - } - - return h.buildDescribeGroupsResponse(response, correlationID, apiVersion), nil -} - -// handleListGroups handles ListGroups API (key 16) -func (h *Handler) handleListGroups(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse request (ListGroups has minimal request structure) - request, err := h.parseListGroupsRequest(requestBody, apiVersion) - if err != nil { - return nil, fmt.Errorf("parse ListGroups request: %w", err) - } - - // Build response - response := ListGroupsResponse{ - ThrottleTimeMs: 0, - ErrorCode: 0, - Groups: h.listAllGroups(request.StatesFilter), - } - - return h.buildListGroupsResponse(response, correlationID, apiVersion), nil -} - -// describeGroup gets detailed information about a specific group -func (h *Handler) describeGroup(groupID string) DescribeGroupsGroup { - // Get group information from coordinator - if h.groupCoordinator == nil { - return DescribeGroupsGroup{ - ErrorCode: 15, // GROUP_COORDINATOR_NOT_AVAILABLE - GroupID: groupID, - State: "Dead", - } - } - - group := h.groupCoordinator.GetGroup(groupID) - if group == nil { - return DescribeGroupsGroup{ - ErrorCode: 25, // UNKNOWN_GROUP_ID - GroupID: groupID, - State: "Dead", - ProtocolType: "", - Protocol: "", - Members: []DescribeGroupsMember{}, - } - } - - // Convert group to response format - members := make([]DescribeGroupsMember, 0, len(group.Members)) - for memberID, member := range group.Members { - // Convert assignment to bytes (simplified) - var assignmentBytes []byte - if len(member.Assignment) > 0 { - // In a real implementation, this would serialize the assignment properly - assignmentBytes = []byte(fmt.Sprintf("assignment:%d", len(member.Assignment))) - } - - members = append(members, DescribeGroupsMember{ - MemberID: memberID, - GroupInstanceID: member.GroupInstanceID, // Now supports static membership - ClientID: member.ClientID, - ClientHost: member.ClientHost, - MemberMetadata: member.Metadata, - MemberAssignment: assignmentBytes, - }) - } - - // Convert group state to string - var stateStr string - switch group.State { - case 0: // Assuming 0 is Empty - stateStr = "Empty" - case 1: // Assuming 1 is PreparingRebalance - stateStr = "PreparingRebalance" - case 2: // Assuming 2 is CompletingRebalance - stateStr = "CompletingRebalance" - case 3: // Assuming 3 is Stable - stateStr = "Stable" - default: - stateStr = "Dead" - } - - return DescribeGroupsGroup{ - ErrorCode: 0, - GroupID: groupID, - State: stateStr, - ProtocolType: "consumer", // Default protocol type - Protocol: group.Protocol, - Members: members, - AuthorizedOps: []int32{}, // Empty for now - } -} - -// listAllGroups gets a list of all consumer groups -func (h *Handler) listAllGroups(statesFilter []string) []ListGroupsGroup { - if h.groupCoordinator == nil { - return []ListGroupsGroup{} - } - - allGroupIDs := h.groupCoordinator.ListGroups() - groups := make([]ListGroupsGroup, 0, len(allGroupIDs)) - - for _, groupID := range allGroupIDs { - // Get the full group details - group := h.groupCoordinator.GetGroup(groupID) - if group == nil { - continue - } - - // Convert group state to string - var stateStr string - switch group.State { - case 0: - stateStr = "Empty" - case 1: - stateStr = "PreparingRebalance" - case 2: - stateStr = "CompletingRebalance" - case 3: - stateStr = "Stable" - default: - stateStr = "Dead" - } - - // Apply state filter if provided - if len(statesFilter) > 0 { - matchesFilter := false - for _, state := range statesFilter { - if stateStr == state { - matchesFilter = true - break - } - } - if !matchesFilter { - continue - } - } - - groups = append(groups, ListGroupsGroup{ - GroupID: group.ID, - ProtocolType: "consumer", // Default protocol type - GroupState: stateStr, - }) - } - - return groups -} - -// Request/Response structures - -type DescribeGroupsRequest struct { - GroupIDs []string - IncludeAuthorizedOps bool -} - -type DescribeGroupsResponse struct { - ThrottleTimeMs int32 - Groups []DescribeGroupsGroup -} - -type DescribeGroupsGroup struct { - ErrorCode int16 - GroupID string - State string - ProtocolType string - Protocol string - Members []DescribeGroupsMember - AuthorizedOps []int32 -} - -type DescribeGroupsMember struct { - MemberID string - GroupInstanceID *string - ClientID string - ClientHost string - MemberMetadata []byte - MemberAssignment []byte -} - -type ListGroupsRequest struct { - StatesFilter []string -} - -type ListGroupsResponse struct { - ThrottleTimeMs int32 - ErrorCode int16 - Groups []ListGroupsGroup -} - -type ListGroupsGroup struct { - GroupID string - ProtocolType string - GroupState string -} - -// Parsing functions - -func (h *Handler) parseDescribeGroupsRequest(data []byte, apiVersion uint16) (*DescribeGroupsRequest, error) { - offset := 0 - request := &DescribeGroupsRequest{} - - // Skip client_id if present (depends on version) - if len(data) < 4 { - return nil, fmt.Errorf("request too short") - } - - // Group IDs array - groupCount := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - request.GroupIDs = make([]string, groupCount) - for i := uint32(0); i < groupCount; i++ { - if offset+2 > len(data) { - return nil, fmt.Errorf("invalid group ID at index %d", i) - } - - groupIDLen := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if offset+int(groupIDLen) > len(data) { - return nil, fmt.Errorf("group ID too long at index %d", i) - } - - request.GroupIDs[i] = string(data[offset : offset+int(groupIDLen)]) - offset += int(groupIDLen) - } - - // Include authorized operations (v3+) - if apiVersion >= 3 && offset < len(data) { - request.IncludeAuthorizedOps = data[offset] != 0 - } - - return request, nil -} - -func (h *Handler) parseListGroupsRequest(data []byte, apiVersion uint16) (*ListGroupsRequest, error) { - request := &ListGroupsRequest{} - - // ListGroups v4+ includes states filter - if apiVersion >= 4 && len(data) >= 4 { - offset := 0 - statesCount := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - if statesCount > 0 { - request.StatesFilter = make([]string, statesCount) - for i := uint32(0); i < statesCount; i++ { - if offset+2 > len(data) { - break - } - - stateLen := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if offset+int(stateLen) > len(data) { - break - } - - request.StatesFilter[i] = string(data[offset : offset+int(stateLen)]) - offset += int(stateLen) - } - } - } - - return request, nil -} - -// Response building functions - -func (h *Handler) buildDescribeGroupsResponse(response DescribeGroupsResponse, correlationID uint32, apiVersion uint16) []byte { - buf := make([]byte, 0, 1024) - - // Correlation ID - correlationIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(correlationIDBytes, correlationID) - buf = append(buf, correlationIDBytes...) - - // Throttle time (v1+) - if apiVersion >= 1 { - throttleBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throttleBytes, uint32(response.ThrottleTimeMs)) - buf = append(buf, throttleBytes...) - } - - // Groups array - groupCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(groupCountBytes, uint32(len(response.Groups))) - buf = append(buf, groupCountBytes...) - - for _, group := range response.Groups { - // Error code - buf = append(buf, byte(group.ErrorCode>>8), byte(group.ErrorCode)) - - // Group ID - groupIDLen := uint16(len(group.GroupID)) - buf = append(buf, byte(groupIDLen>>8), byte(groupIDLen)) - buf = append(buf, []byte(group.GroupID)...) - - // State - stateLen := uint16(len(group.State)) - buf = append(buf, byte(stateLen>>8), byte(stateLen)) - buf = append(buf, []byte(group.State)...) - - // Protocol type - protocolTypeLen := uint16(len(group.ProtocolType)) - buf = append(buf, byte(protocolTypeLen>>8), byte(protocolTypeLen)) - buf = append(buf, []byte(group.ProtocolType)...) - - // Protocol - protocolLen := uint16(len(group.Protocol)) - buf = append(buf, byte(protocolLen>>8), byte(protocolLen)) - buf = append(buf, []byte(group.Protocol)...) - - // Members array - memberCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(memberCountBytes, uint32(len(group.Members))) - buf = append(buf, memberCountBytes...) - - for _, member := range group.Members { - // Member ID - memberIDLen := uint16(len(member.MemberID)) - buf = append(buf, byte(memberIDLen>>8), byte(memberIDLen)) - buf = append(buf, []byte(member.MemberID)...) - - // Group instance ID (v4+, nullable) - if apiVersion >= 4 { - if member.GroupInstanceID != nil { - instanceIDLen := uint16(len(*member.GroupInstanceID)) - buf = append(buf, byte(instanceIDLen>>8), byte(instanceIDLen)) - buf = append(buf, []byte(*member.GroupInstanceID)...) - } else { - buf = append(buf, 0xFF, 0xFF) // null - } - } - - // Client ID - clientIDLen := uint16(len(member.ClientID)) - buf = append(buf, byte(clientIDLen>>8), byte(clientIDLen)) - buf = append(buf, []byte(member.ClientID)...) - - // Client host - clientHostLen := uint16(len(member.ClientHost)) - buf = append(buf, byte(clientHostLen>>8), byte(clientHostLen)) - buf = append(buf, []byte(member.ClientHost)...) - - // Member metadata - metadataLen := uint32(len(member.MemberMetadata)) - metadataLenBytes := make([]byte, 4) - binary.BigEndian.PutUint32(metadataLenBytes, metadataLen) - buf = append(buf, metadataLenBytes...) - buf = append(buf, member.MemberMetadata...) - - // Member assignment - assignmentLen := uint32(len(member.MemberAssignment)) - assignmentLenBytes := make([]byte, 4) - binary.BigEndian.PutUint32(assignmentLenBytes, assignmentLen) - buf = append(buf, assignmentLenBytes...) - buf = append(buf, member.MemberAssignment...) - } - - // Authorized operations (v3+) - if apiVersion >= 3 { - opsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(opsCountBytes, uint32(len(group.AuthorizedOps))) - buf = append(buf, opsCountBytes...) - - for _, op := range group.AuthorizedOps { - opBytes := make([]byte, 4) - binary.BigEndian.PutUint32(opBytes, uint32(op)) - buf = append(buf, opBytes...) - } - } - } - - return buf -} - -func (h *Handler) buildListGroupsResponse(response ListGroupsResponse, correlationID uint32, apiVersion uint16) []byte { - buf := make([]byte, 0, 512) - - // Correlation ID - correlationIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(correlationIDBytes, correlationID) - buf = append(buf, correlationIDBytes...) - - // Throttle time (v1+) - if apiVersion >= 1 { - throttleBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throttleBytes, uint32(response.ThrottleTimeMs)) - buf = append(buf, throttleBytes...) - } - - // Error code - buf = append(buf, byte(response.ErrorCode>>8), byte(response.ErrorCode)) - - // Groups array - groupCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(groupCountBytes, uint32(len(response.Groups))) - buf = append(buf, groupCountBytes...) - - for _, group := range response.Groups { - // Group ID - groupIDLen := uint16(len(group.GroupID)) - buf = append(buf, byte(groupIDLen>>8), byte(groupIDLen)) - buf = append(buf, []byte(group.GroupID)...) - - // Protocol type - protocolTypeLen := uint16(len(group.ProtocolType)) - buf = append(buf, byte(protocolTypeLen>>8), byte(protocolTypeLen)) - buf = append(buf, []byte(group.ProtocolType)...) - - // Group state (v4+) - if apiVersion >= 4 { - groupStateLen := uint16(len(group.GroupState)) - buf = append(buf, byte(groupStateLen>>8), byte(groupStateLen)) - buf = append(buf, []byte(group.GroupState)...) - } - } - - return buf -} diff --git a/weed/mq/kafka/protocol/handler.go b/weed/mq/kafka/protocol/handler.go deleted file mode 100644 index 2768793d2..000000000 --- a/weed/mq/kafka/protocol/handler.go +++ /dev/null @@ -1,4309 +0,0 @@ -package protocol - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "hash/fnv" - "io" - "net" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer_offset" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" - mqschema "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/mem" -) - -// GetAdvertisedAddress returns the host:port that should be advertised to clients -// This handles the Docker networking issue where internal IPs aren't reachable by external clients -func (h *Handler) GetAdvertisedAddress(gatewayAddr string) (string, int) { - host, port := "localhost", 9093 - - // First, check for environment variable override - if advertisedHost := os.Getenv("KAFKA_ADVERTISED_HOST"); advertisedHost != "" { - host = advertisedHost - glog.V(2).Infof("Using KAFKA_ADVERTISED_HOST: %s", advertisedHost) - } else if gatewayAddr != "" { - // Try to parse the gateway address to extract hostname and port - parsedHost, gatewayPort, err := net.SplitHostPort(gatewayAddr) - if err == nil { - // Successfully parsed host:port - if gatewayPortInt, err := strconv.Atoi(gatewayPort); err == nil { - port = gatewayPortInt - } - // Use the parsed host if it's not 0.0.0.0 or empty - if parsedHost != "" && parsedHost != "0.0.0.0" { - host = parsedHost - glog.V(2).Infof("Using host from gatewayAddr: %s", host) - } else { - // Fall back to localhost for 0.0.0.0 or ambiguous addresses - host = "localhost" - glog.V(2).Infof("gatewayAddr is 0.0.0.0, using localhost for client advertising") - } - } else { - // Could not parse, use as-is if it looks like a hostname - if gatewayAddr != "" && gatewayAddr != "0.0.0.0" { - host = gatewayAddr - glog.V(2).Infof("Using gatewayAddr directly as host (unparseable): %s", host) - } - } - } else { - // No gateway address and no environment variable - host = "localhost" - glog.V(2).Infof("No gatewayAddr provided, using localhost") - } - - return host, port -} - -// generateNodeID generates a deterministic node ID from a gateway address. -// This must match the logic in gateway/coordinator_registry.go to ensure consistency -// between Metadata and FindCoordinator responses. -func generateNodeID(gatewayAddress string) int32 { - if gatewayAddress == "" { - return 1 // Default fallback - } - h := fnv.New32a() - _, _ = h.Write([]byte(gatewayAddress)) - // Use only positive values and avoid 0 - return int32(h.Sum32()&0x7fffffff) + 1 -} - -// GetNodeID returns the consistent node ID for this gateway. -// This is used by both Metadata and FindCoordinator handlers to ensure -// clients see the same broker/coordinator node ID across all APIs. -func (h *Handler) GetNodeID() int32 { - gatewayAddr := h.GetGatewayAddress() - return generateNodeID(gatewayAddr) -} - -// TopicInfo holds basic information about a topic -type TopicInfo struct { - Name string - Partitions int32 - CreatedAt int64 -} - -// TopicPartitionKey uniquely identifies a topic partition -type TopicPartitionKey struct { - Topic string - Partition int32 -} - -// contextKey is a type for context keys to avoid collisions -type contextKey string - -const ( - // connContextKey is the context key for storing ConnectionContext - connContextKey contextKey = "connectionContext" -) - -// kafkaRequest represents a Kafka API request to be processed -type kafkaRequest struct { - correlationID uint32 - apiKey uint16 - apiVersion uint16 - requestBody []byte - ctx context.Context - connContext *ConnectionContext // Per-connection context to avoid race conditions -} - -// kafkaResponse represents a Kafka API response -type kafkaResponse struct { - correlationID uint32 - apiKey uint16 - apiVersion uint16 - response []byte - err error -} - -const ( - // DefaultKafkaNamespace is the default namespace for Kafka topics in SeaweedMQ - DefaultKafkaNamespace = "kafka" -) - -// APIKey represents a Kafka API key type for better type safety -type APIKey uint16 - -// Kafka API Keys -const ( - APIKeyProduce APIKey = 0 - APIKeyFetch APIKey = 1 - APIKeyListOffsets APIKey = 2 - APIKeyMetadata APIKey = 3 - APIKeyOffsetCommit APIKey = 8 - APIKeyOffsetFetch APIKey = 9 - APIKeyFindCoordinator APIKey = 10 - APIKeyJoinGroup APIKey = 11 - APIKeyHeartbeat APIKey = 12 - APIKeyLeaveGroup APIKey = 13 - APIKeySyncGroup APIKey = 14 - APIKeyDescribeGroups APIKey = 15 - APIKeyListGroups APIKey = 16 - APIKeyApiVersions APIKey = 18 - APIKeyCreateTopics APIKey = 19 - APIKeyDeleteTopics APIKey = 20 - APIKeyInitProducerId APIKey = 22 - APIKeyDescribeConfigs APIKey = 32 - APIKeyDescribeCluster APIKey = 60 -) - -// SeaweedMQHandlerInterface defines the interface for SeaweedMQ integration -type SeaweedMQHandlerInterface interface { - TopicExists(topic string) bool - ListTopics() []string - CreateTopic(topic string, partitions int32) error - CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error - DeleteTopic(topic string) error - GetTopicInfo(topic string) (*integration.KafkaTopicInfo, bool) - InvalidateTopicExistsCache(topic string) - // Ledger methods REMOVED - SMQ handles Kafka offsets natively - ProduceRecord(ctx context.Context, topicName string, partitionID int32, key, value []byte) (int64, error) - ProduceRecordValue(ctx context.Context, topicName string, partitionID int32, key []byte, recordValueBytes []byte) (int64, error) - // GetStoredRecords retrieves records from SMQ storage (optional - for advanced implementations) - // ctx is used to control the fetch timeout (should match Kafka fetch request's MaxWaitTime) - GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]integration.SMQRecord, error) - // GetEarliestOffset returns the earliest available offset for a topic partition - GetEarliestOffset(topic string, partition int32) (int64, error) - // GetLatestOffset returns the latest available offset for a topic partition - GetLatestOffset(topic string, partition int32) (int64, error) - // WithFilerClient executes a function with a filer client for accessing SeaweedMQ metadata - WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error - // GetBrokerAddresses returns the discovered SMQ broker addresses for Metadata responses - GetBrokerAddresses() []string - // CreatePerConnectionBrokerClient creates an isolated BrokerClient for each TCP connection - CreatePerConnectionBrokerClient() (*integration.BrokerClient, error) - // SetProtocolHandler sets the protocol handler reference for connection context access - SetProtocolHandler(handler integration.ProtocolHandler) - Close() error -} - -// ConsumerOffsetStorage defines the interface for storing consumer offsets -// This is used by OffsetCommit and OffsetFetch protocol handlers -type ConsumerOffsetStorage interface { - CommitOffset(group, topic string, partition int32, offset int64, metadata string) error - FetchOffset(group, topic string, partition int32) (int64, string, error) - FetchAllOffsets(group string) (map[TopicPartition]OffsetMetadata, error) - DeleteGroup(group string) error - Close() error -} - -// TopicPartition uniquely identifies a topic partition for offset storage -type TopicPartition struct { - Topic string - Partition int32 -} - -// OffsetMetadata contains offset and associated metadata -type OffsetMetadata struct { - Offset int64 - Metadata string -} - -// TopicSchemaConfig holds schema configuration for a topic -type TopicSchemaConfig struct { - // Value schema configuration - ValueSchemaID uint32 - ValueSchemaFormat schema.Format - - // Key schema configuration (optional) - KeySchemaID uint32 - KeySchemaFormat schema.Format - HasKeySchema bool // indicates if key schema is configured -} - -// Legacy accessors for backward compatibility -func (c *TopicSchemaConfig) SchemaID() uint32 { - return c.ValueSchemaID -} - -func (c *TopicSchemaConfig) SchemaFormat() schema.Format { - return c.ValueSchemaFormat -} - -// getTopicSchemaFormat returns the schema format string for a topic -func (h *Handler) getTopicSchemaFormat(topic string) string { - h.topicSchemaConfigMu.RLock() - defer h.topicSchemaConfigMu.RUnlock() - - if config, exists := h.topicSchemaConfigs[topic]; exists { - return config.ValueSchemaFormat.String() - } - return "" // Empty string means schemaless or format unknown -} - -// Handler processes Kafka protocol requests from clients using SeaweedMQ -type Handler struct { - // SeaweedMQ integration - seaweedMQHandler SeaweedMQHandlerInterface - - // SMQ offset storage removed - using ConsumerOffsetStorage instead - - // Consumer offset storage for Kafka protocol OffsetCommit/OffsetFetch - consumerOffsetStorage ConsumerOffsetStorage - - // Consumer group coordination - groupCoordinator *consumer.GroupCoordinator - - // Response caching to reduce CPU usage for repeated requests - metadataCache *ResponseCache - coordinatorCache *ResponseCache - - // Coordinator registry for distributed coordinator assignment - coordinatorRegistry CoordinatorRegistryInterface - - // Schema management (optional, for schematized topics) - schemaManager *schema.Manager - useSchema bool - brokerClient *schema.BrokerClient - - // Topic schema configuration cache - topicSchemaConfigs map[string]*TopicSchemaConfig - topicSchemaConfigMu sync.RWMutex - - // Track registered schemas to prevent duplicate registrations - registeredSchemas map[string]bool // key: "topic:schemaID" or "topic-key:schemaID" - registeredSchemasMu sync.RWMutex - - // RecordType inference cache to avoid recreating Avro codecs (37% CPU overhead!) - // Key: schema content hash or schema string - inferredRecordTypes map[string]*schema_pb.RecordType - inferredRecordTypesMu sync.RWMutex - - filerClient filer_pb.SeaweedFilerClient - - // SMQ broker addresses discovered from masters for Metadata responses - smqBrokerAddresses []string - - // Gateway address for coordinator registry - gatewayAddress string - - // Connection contexts stored per connection ID (thread-safe) - // Replaces the race-prone shared connContext field - connContexts sync.Map // map[string]*ConnectionContext - - // Schema Registry URL for delayed initialization - schemaRegistryURL string - - // Default partition count for auto-created topics - defaultPartitions int32 -} - -// NewHandler creates a basic Kafka handler with in-memory storage -// WARNING: This is for testing ONLY - never use in production! -// For production use with persistent storage, use NewSeaweedMQBrokerHandler instead -func NewHandler() *Handler { - // Production safety check - prevent accidental production use - // Comment out for testing: os.Getenv can be used for runtime checks - panic("NewHandler() with in-memory storage should NEVER be used in production! Use NewSeaweedMQBrokerHandler() with SeaweedMQ masters for production, or NewTestHandler() for tests.") -} - -// NewTestHandler and NewSimpleTestHandler moved to handler_test.go (test-only file) - -// All test-related types and implementations moved to handler_test.go (test-only file) - -// NewTestHandlerWithMock creates a test handler with a custom SeaweedMQHandlerInterface -// This is useful for unit tests that need a handler but don't want to connect to real SeaweedMQ -func NewTestHandlerWithMock(mockHandler SeaweedMQHandlerInterface) *Handler { - return &Handler{ - seaweedMQHandler: mockHandler, - consumerOffsetStorage: nil, // Unit tests don't need offset storage - groupCoordinator: consumer.NewGroupCoordinator(), - registeredSchemas: make(map[string]bool), - topicSchemaConfigs: make(map[string]*TopicSchemaConfig), - inferredRecordTypes: make(map[string]*schema_pb.RecordType), - defaultPartitions: 1, - } -} - -// NewSeaweedMQBrokerHandler creates a new handler with SeaweedMQ broker integration -func NewSeaweedMQBrokerHandler(masters string, filerGroup string, clientHost string) (*Handler, error) { - return NewSeaweedMQBrokerHandlerWithDefaults(masters, filerGroup, clientHost, 4) // Default to 4 partitions -} - -// NewSeaweedMQBrokerHandlerWithDefaults creates a new handler with SeaweedMQ broker integration and custom defaults -func NewSeaweedMQBrokerHandlerWithDefaults(masters string, filerGroup string, clientHost string, defaultPartitions int32) (*Handler, error) { - // Set up SeaweedMQ integration - smqHandler, err := integration.NewSeaweedMQBrokerHandler(masters, filerGroup, clientHost) - if err != nil { - return nil, err - } - - // Use the shared filer client accessor from SeaweedMQHandler - sharedFilerAccessor := smqHandler.GetFilerClientAccessor() - if sharedFilerAccessor == nil { - return nil, fmt.Errorf("no shared filer client accessor available from SMQ handler") - } - - // Create consumer offset storage (for OffsetCommit/OffsetFetch protocol) - // Use filer-based storage for persistence across restarts - consumerOffsetStorage := newOffsetStorageAdapter( - consumer_offset.NewFilerStorage(sharedFilerAccessor), - ) - - // Create response caches to reduce CPU usage - // Metadata cache: 5 second TTL (Schema Registry polls frequently) - // Coordinator cache: 10 second TTL (less frequent, more stable) - metadataCache := NewResponseCache(5 * time.Second) - coordinatorCache := NewResponseCache(10 * time.Second) - - // Start cleanup loops - metadataCache.StartCleanupLoop(30 * time.Second) - coordinatorCache.StartCleanupLoop(60 * time.Second) - - handler := &Handler{ - seaweedMQHandler: smqHandler, - consumerOffsetStorage: consumerOffsetStorage, - groupCoordinator: consumer.NewGroupCoordinator(), - smqBrokerAddresses: nil, // Will be set by SetSMQBrokerAddresses() when server starts - registeredSchemas: make(map[string]bool), - topicSchemaConfigs: make(map[string]*TopicSchemaConfig), - inferredRecordTypes: make(map[string]*schema_pb.RecordType), - defaultPartitions: defaultPartitions, - metadataCache: metadataCache, - coordinatorCache: coordinatorCache, - } - - // Set protocol handler reference in SMQ handler for connection context access - smqHandler.SetProtocolHandler(handler) - - return handler, nil -} - -// AddTopicForTesting creates a topic for testing purposes -// This delegates to the underlying SeaweedMQ handler -func (h *Handler) AddTopicForTesting(topicName string, partitions int32) { - if h.seaweedMQHandler != nil { - h.seaweedMQHandler.CreateTopic(topicName, partitions) - } -} - -// Delegate methods to SeaweedMQ handler - -// GetOrCreateLedger method REMOVED - SMQ handles Kafka offsets natively - -// GetLedger method REMOVED - SMQ handles Kafka offsets natively - -// Close shuts down the handler and all connections -func (h *Handler) Close() error { - // Close group coordinator - if h.groupCoordinator != nil { - h.groupCoordinator.Close() - } - - // Close broker client if present - if h.brokerClient != nil { - if err := h.brokerClient.Close(); err != nil { - glog.Warningf("Failed to close broker client: %v", err) - } - } - - // Close SeaweedMQ handler if present - if h.seaweedMQHandler != nil { - return h.seaweedMQHandler.Close() - } - return nil -} - -// SetSMQBrokerAddresses updates the SMQ broker addresses used in Metadata responses -func (h *Handler) SetSMQBrokerAddresses(brokerAddresses []string) { - h.smqBrokerAddresses = brokerAddresses -} - -// GetSMQBrokerAddresses returns the SMQ broker addresses -func (h *Handler) GetSMQBrokerAddresses() []string { - // First try to get from the SeaweedMQ handler (preferred) - if h.seaweedMQHandler != nil { - if brokerAddresses := h.seaweedMQHandler.GetBrokerAddresses(); len(brokerAddresses) > 0 { - return brokerAddresses - } - } - - // Fallback to manually set addresses - if len(h.smqBrokerAddresses) > 0 { - return h.smqBrokerAddresses - } - - // No brokers configured - return empty slice - // This will cause proper error handling in callers - return []string{} -} - -// GetGatewayAddress returns the current gateway address as a string (for coordinator registry) -func (h *Handler) GetGatewayAddress() string { - if h.gatewayAddress != "" { - return h.gatewayAddress - } - // No gateway address configured - return empty string - // Callers should handle this as a configuration error - return "" -} - -// SetGatewayAddress sets the gateway address for coordinator registry -func (h *Handler) SetGatewayAddress(address string) { - h.gatewayAddress = address -} - -// SetCoordinatorRegistry sets the coordinator registry for this handler -func (h *Handler) SetCoordinatorRegistry(registry CoordinatorRegistryInterface) { - h.coordinatorRegistry = registry -} - -// GetCoordinatorRegistry returns the coordinator registry -func (h *Handler) GetCoordinatorRegistry() CoordinatorRegistryInterface { - return h.coordinatorRegistry -} - -// isDataPlaneAPI returns true if the API key is a data plane operation (Fetch, Produce) -// Data plane operations can be slow and may block on I/O -func isDataPlaneAPI(apiKey uint16) bool { - switch APIKey(apiKey) { - case APIKeyProduce: - return true - case APIKeyFetch: - return true - default: - return false - } -} - -// GetConnectionContext returns the current connection context converted to integration.ConnectionContext -// This implements the integration.ProtocolHandler interface -// -// NOTE: Since this method doesn't receive a context parameter, it returns a "best guess" connection context. -// In single-connection scenarios (like tests), this works correctly. In high-concurrency scenarios with many -// simultaneous connections, this may return a connection context from a different connection. -// For a proper fix, the integration.ProtocolHandler interface would need to be updated to pass context.Context. -func (h *Handler) GetConnectionContext() *integration.ConnectionContext { - // Try to find any active connection context - // In most cases (single connection, or low concurrency), this will return the correct context - var connCtx *ConnectionContext - h.connContexts.Range(func(key, value interface{}) bool { - if ctx, ok := value.(*ConnectionContext); ok { - connCtx = ctx - return false // Stop iteration after finding first context - } - return true - }) - - if connCtx == nil { - return nil - } - - // Convert protocol.ConnectionContext to integration.ConnectionContext - return &integration.ConnectionContext{ - ClientID: connCtx.ClientID, - ConsumerGroup: connCtx.ConsumerGroup, - MemberID: connCtx.MemberID, - BrokerClient: connCtx.BrokerClient, - } -} - -// HandleConn processes a single client connection -func (h *Handler) HandleConn(ctx context.Context, conn net.Conn) error { - connectionID := fmt.Sprintf("%s->%s", conn.RemoteAddr(), conn.LocalAddr()) - - // Record connection metrics - RecordConnectionMetrics() - - // Create cancellable context for this connection - // This ensures all requests are cancelled when the connection closes - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create per-connection BrokerClient for isolated gRPC streams - // This prevents different connections from interfering with each other's Fetch requests - // In mock/unit test mode, this may not be available, so we continue without it - var connBrokerClient *integration.BrokerClient - connBrokerClient, err := h.seaweedMQHandler.CreatePerConnectionBrokerClient() - if err != nil { - // Continue without broker client for unit test/mock mode - connBrokerClient = nil - } - - // RACE CONDITION FIX: Create connection-local context and pass through request pipeline - // Store in thread-safe map to enable lookup from methods that don't have direct access - connContext := &ConnectionContext{ - RemoteAddr: conn.RemoteAddr(), - LocalAddr: conn.LocalAddr(), - ConnectionID: connectionID, - BrokerClient: connBrokerClient, - } - - // Store in thread-safe map for later retrieval - h.connContexts.Store(connectionID, connContext) - - defer func() { - // Close all partition readers first - cleanupPartitionReaders(connContext) - // Close the per-connection broker client - if connBrokerClient != nil { - if closeErr := connBrokerClient.Close(); closeErr != nil { - glog.Errorf("[%s] Error closing BrokerClient: %v", connectionID, closeErr) - } - } - // Remove connection context from map - h.connContexts.Delete(connectionID) - RecordDisconnectionMetrics() - conn.Close() - }() - - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - defer w.Flush() - - // Use default timeout config - timeoutConfig := DefaultTimeoutConfig() - - // Track consecutive read timeouts to detect stale/CLOSE_WAIT connections - consecutiveTimeouts := 0 - const maxConsecutiveTimeouts = 3 // Give up after 3 timeouts in a row - - // Separate control plane from data plane - // Control plane: Metadata, Heartbeat, JoinGroup, etc. (must be fast, never block) - // Data plane: Fetch, Produce (can be slow, may block on I/O) - // - // Architecture: - // - Main loop routes requests to appropriate channel based on API key - // - Control goroutine processes control messages (fast, sequential) - // - Data goroutine processes data messages (can be slow) - // - Response writer handles responses in order using correlation IDs - controlChan := make(chan *kafkaRequest, 10) - dataChan := make(chan *kafkaRequest, 10) - responseChan := make(chan *kafkaResponse, 100) - var wg sync.WaitGroup - - // Response writer - maintains request/response order per connection - // While we process requests concurrently (control/data plane), - // we MUST track the order requests arrive and send responses in that same order. - // Solution: Track received correlation IDs in a queue, send responses in that queue order. - correlationQueue := make([]uint32, 0, 100) - correlationQueueMu := &sync.Mutex{} - - wg.Add(1) - go func() { - defer wg.Done() - glog.V(2).Infof("[%s] Response writer started", connectionID) - defer glog.V(2).Infof("[%s] Response writer exiting", connectionID) - pendingResponses := make(map[uint32]*kafkaResponse) - nextToSend := 0 // Index in correlationQueue - - for { - select { - case resp, ok := <-responseChan: - if !ok { - // responseChan closed, exit - return - } - // Only log at V(3) for debugging, not V(4) in hot path - glog.V(3).Infof("[%s] Response writer received correlation=%d", connectionID, resp.correlationID) - correlationQueueMu.Lock() - pendingResponses[resp.correlationID] = resp - - // Send all responses we can in queue order - for nextToSend < len(correlationQueue) { - expectedID := correlationQueue[nextToSend] - readyResp, exists := pendingResponses[expectedID] - if !exists { - // Response not ready yet, stop sending - break - } - - // Send this response - if readyResp.err != nil { - glog.Errorf("[%s] Error processing correlation=%d: %v", connectionID, readyResp.correlationID, readyResp.err) - } else { - if writeErr := h.writeResponseWithHeader(w, readyResp.correlationID, readyResp.apiKey, readyResp.apiVersion, readyResp.response, timeoutConfig.WriteTimeout); writeErr != nil { - glog.Errorf("[%s] Response writer WRITE ERROR correlation=%d: %v - EXITING", connectionID, readyResp.correlationID, writeErr) - correlationQueueMu.Unlock() - return - } - } - - // Remove from pending and advance - delete(pendingResponses, expectedID) - nextToSend++ - } - correlationQueueMu.Unlock() - case <-ctx.Done(): - // Context cancelled, exit immediately to prevent deadlock - glog.V(2).Infof("[%s] Response writer: context cancelled, exiting", connectionID) - return - } - } - }() - - // Control plane processor - fast operations, never blocks - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case req, ok := <-controlChan: - if !ok { - // Channel closed, exit - return - } - // Removed V(4) logging from hot path - only log errors and important events - - // Wrap request processing with panic recovery to prevent deadlocks - // If processRequestSync panics, we MUST still send a response to avoid blocking the response writer - var response []byte - var err error - func() { - defer func() { - if r := recover(); r != nil { - glog.Errorf("[%s] PANIC in control plane correlation=%d: %v", connectionID, req.correlationID, r) - err = fmt.Errorf("internal server error: panic in request handler: %v", r) - } - }() - response, err = h.processRequestSync(req) - }() - - select { - case responseChan <- &kafkaResponse{ - correlationID: req.correlationID, - apiKey: req.apiKey, - apiVersion: req.apiVersion, - response: response, - err: err, - }: - // Response sent successfully - no logging here - case <-ctx.Done(): - // Connection closed, stop processing - return - case <-time.After(5 * time.Second): - glog.Warningf("[%s] Control plane: timeout sending response correlation=%d", connectionID, req.correlationID) - } - case <-ctx.Done(): - // Context cancelled, drain remaining requests before exiting - glog.V(2).Infof("[%s] Control plane: context cancelled, draining remaining requests", connectionID) - for { - select { - case req, ok := <-controlChan: - if !ok { - return - } - // Process remaining requests with a short timeout - glog.V(3).Infof("[%s] Control plane: processing drained request correlation=%d", connectionID, req.correlationID) - response, err := h.processRequestSync(req) - select { - case responseChan <- &kafkaResponse{ - correlationID: req.correlationID, - apiKey: req.apiKey, - apiVersion: req.apiVersion, - response: response, - err: err, - }: - glog.V(3).Infof("[%s] Control plane: sent drained response correlation=%d", connectionID, req.correlationID) - case <-time.After(1 * time.Second): - glog.Warningf("[%s] Control plane: timeout sending drained response correlation=%d, discarding", connectionID, req.correlationID) - return - } - default: - // Channel empty, safe to exit - glog.V(4).Infof("[%s] Control plane: drain complete, exiting", connectionID) - return - } - } - } - } - }() - - // Data plane processor - can block on I/O - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case req, ok := <-dataChan: - if !ok { - // Channel closed, exit - return - } - // Removed V(4) logging from hot path - only log errors and important events - - // Wrap request processing with panic recovery to prevent deadlocks - // If processRequestSync panics, we MUST still send a response to avoid blocking the response writer - var response []byte - var err error - func() { - defer func() { - if r := recover(); r != nil { - glog.Errorf("[%s] PANIC in data plane correlation=%d: %v", connectionID, req.correlationID, r) - err = fmt.Errorf("internal server error: panic in request handler: %v", r) - } - }() - response, err = h.processRequestSync(req) - }() - - // Use select with context to avoid sending on closed channel - select { - case responseChan <- &kafkaResponse{ - correlationID: req.correlationID, - apiKey: req.apiKey, - apiVersion: req.apiVersion, - response: response, - err: err, - }: - // Response sent successfully - no logging here - case <-ctx.Done(): - // Connection closed, stop processing - return - case <-time.After(5 * time.Second): - glog.Warningf("[%s] Data plane: timeout sending response correlation=%d", connectionID, req.correlationID) - } - case <-ctx.Done(): - // Context cancelled, drain remaining requests before exiting - glog.V(2).Infof("[%s] Data plane: context cancelled, draining remaining requests", connectionID) - for { - select { - case req, ok := <-dataChan: - if !ok { - return - } - // Process remaining requests with a short timeout - response, err := h.processRequestSync(req) - select { - case responseChan <- &kafkaResponse{ - correlationID: req.correlationID, - apiKey: req.apiKey, - apiVersion: req.apiVersion, - response: response, - err: err, - }: - // Response sent - no logging - case <-time.After(1 * time.Second): - glog.Warningf("[%s] Data plane: timeout sending drained response correlation=%d, discarding", connectionID, req.correlationID) - return - } - default: - // Channel empty, safe to exit - glog.V(2).Infof("[%s] Data plane: drain complete, exiting", connectionID) - return - } - } - } - } - }() - - defer func() { - // Close channels in correct order to avoid panics - // 1. Close input channels to stop accepting new requests - close(controlChan) - close(dataChan) - // 2. Wait for worker goroutines to finish processing and sending responses - wg.Wait() - // 3. NOW close responseChan to signal response writer to exit - close(responseChan) - }() - - for { - // OPTIMIZATION: Consolidated context/deadline check - avoid redundant select statements - // Check context once at the beginning of the loop - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Set read deadline based on context or default timeout - // OPTIMIZATION: Calculate deadline once per iteration, not multiple times - var readDeadline time.Time - if deadline, ok := ctx.Deadline(); ok { - readDeadline = deadline - } else { - readDeadline = time.Now().Add(timeoutConfig.ReadTimeout) - } - - if err := conn.SetReadDeadline(readDeadline); err != nil { - return fmt.Errorf("set read deadline: %w", err) - } - - // Read message size (4 bytes) - var sizeBytes [4]byte - if _, err := io.ReadFull(r, sizeBytes[:]); err != nil { - if err == io.EOF { - return nil - } - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - // Track consecutive timeouts to detect stale connections - consecutiveTimeouts++ - if consecutiveTimeouts >= maxConsecutiveTimeouts { - return nil - } - // Idle timeout while waiting for next request; keep connection open - continue - } - return fmt.Errorf("read message size: %w", err) - } - - // Successfully read data, reset timeout counter - consecutiveTimeouts = 0 - - // Successfully read the message size - size := binary.BigEndian.Uint32(sizeBytes[:]) - if size == 0 || size > 1024*1024 { // 1MB limit - // Use standardized error for message size limit - // Send error response for message too large - errorResponse := BuildErrorResponse(0, ErrorCodeMessageTooLarge) // correlation ID 0 since we can't parse it yet - if writeErr := h.writeResponseWithCorrelationID(w, 0, errorResponse, timeoutConfig.WriteTimeout); writeErr != nil { - } - return fmt.Errorf("message size %d exceeds limit", size) - } - - // Set read deadline for message body - if err := conn.SetReadDeadline(time.Now().Add(timeoutConfig.ReadTimeout)); err != nil { - } - - // Read the message - // OPTIMIZATION: Use buffer pool to reduce GC pressure (was 1MB/sec at 1000 req/s) - messageBuf := mem.Allocate(int(size)) - defer mem.Free(messageBuf) - if _, err := io.ReadFull(r, messageBuf); err != nil { - _ = HandleTimeoutError(err, "read") // errorCode - return fmt.Errorf("read message: %w", err) - } - - - // Parse at least the basic header to get API key and correlation ID - if len(messageBuf) < 8 { - return fmt.Errorf("message too short") - } - - apiKey := binary.BigEndian.Uint16(messageBuf[0:2]) - apiVersion := binary.BigEndian.Uint16(messageBuf[2:4]) - correlationID := binary.BigEndian.Uint32(messageBuf[4:8]) - - // Validate API version against what we support - if err := h.validateAPIVersion(apiKey, apiVersion); err != nil { - glog.Errorf("API VERSION VALIDATION FAILED: Key=%d (%s), Version=%d, error=%v", apiKey, getAPIName(APIKey(apiKey)), apiVersion, err) - // Return proper Kafka error response for unsupported version - response, writeErr := h.buildUnsupportedVersionResponse(correlationID, apiKey, apiVersion) - if writeErr != nil { - return fmt.Errorf("build error response: %w", writeErr) - } - // Send error response through response queue to maintain sequential ordering - select { - case responseChan <- &kafkaResponse{ - correlationID: correlationID, - apiKey: apiKey, - apiVersion: apiVersion, - response: response, - err: nil, - }: - // Error response queued successfully, continue reading next request - continue - case <-ctx.Done(): - return ctx.Err() - } - } - - // Extract request body - special handling for ApiVersions requests - var requestBody []byte - if apiKey == uint16(APIKeyApiVersions) && apiVersion >= 3 { - // ApiVersions v3+ uses client_software_name + client_software_version, not client_id - bodyOffset := 8 // Skip api_key(2) + api_version(2) + correlation_id(4) - - // Skip client_software_name (compact string) - if len(messageBuf) > bodyOffset { - clientNameLen := int(messageBuf[bodyOffset]) // compact string length - if clientNameLen > 0 { - clientNameLen-- // compact strings encode length+1 - bodyOffset += 1 + clientNameLen - } else { - bodyOffset += 1 // just the length byte for null/empty - } - } - - // Skip client_software_version (compact string) - if len(messageBuf) > bodyOffset { - clientVersionLen := int(messageBuf[bodyOffset]) // compact string length - if clientVersionLen > 0 { - clientVersionLen-- // compact strings encode length+1 - bodyOffset += 1 + clientVersionLen - } else { - bodyOffset += 1 // just the length byte for null/empty - } - } - - // Skip tagged fields (should be 0x00 for ApiVersions) - if len(messageBuf) > bodyOffset { - bodyOffset += 1 // tagged fields byte - } - - requestBody = messageBuf[bodyOffset:] - } else { - // Parse header using flexible version utilities for other APIs - header, parsedRequestBody, parseErr := ParseRequestHeader(messageBuf) - if parseErr != nil { - glog.Errorf("Request header parsing failed: API=%d (%s) v%d, correlation=%d, error=%v", - apiKey, getAPIName(APIKey(apiKey)), apiVersion, correlationID, parseErr) - - // Fall back to basic header parsing if flexible version parsing fails - - // Basic header parsing fallback (original logic) - bodyOffset := 8 - if len(messageBuf) < bodyOffset+2 { - return fmt.Errorf("invalid header: missing client_id length") - } - clientIDLen := int16(binary.BigEndian.Uint16(messageBuf[bodyOffset : bodyOffset+2])) - bodyOffset += 2 - if clientIDLen >= 0 { - if len(messageBuf) < bodyOffset+int(clientIDLen) { - return fmt.Errorf("invalid header: client_id truncated") - } - bodyOffset += int(clientIDLen) - } - requestBody = messageBuf[bodyOffset:] - } else { - // Use the successfully parsed request body - requestBody = parsedRequestBody - - // Validate parsed header matches what we already extracted - if header.APIKey != apiKey || header.APIVersion != apiVersion || header.CorrelationID != correlationID { - // Fall back to basic parsing rather than failing - bodyOffset := 8 - if len(messageBuf) < bodyOffset+2 { - return fmt.Errorf("invalid header: missing client_id length") - } - clientIDLen := int16(binary.BigEndian.Uint16(messageBuf[bodyOffset : bodyOffset+2])) - bodyOffset += 2 - if clientIDLen >= 0 { - if len(messageBuf) < bodyOffset+int(clientIDLen) { - return fmt.Errorf("invalid header: client_id truncated") - } - bodyOffset += int(clientIDLen) - } - requestBody = messageBuf[bodyOffset:] - } else if header.ClientID != nil { - // Store client ID in connection context for use in fetch requests - connContext.ClientID = *header.ClientID - } - } - } - - // Route request to appropriate processor - // Control plane: Fast, never blocks (Metadata, Heartbeat, etc.) - // Data plane: Can be slow (Fetch, Produce) - - // Attach connection context to the Go context for retrieval in nested calls - ctxWithConn := context.WithValue(ctx, connContextKey, connContext) - - req := &kafkaRequest{ - correlationID: correlationID, - apiKey: apiKey, - apiVersion: apiVersion, - requestBody: requestBody, - ctx: ctxWithConn, - connContext: connContext, // Pass per-connection context to avoid race conditions - } - - // Route to appropriate channel based on API key - var targetChan chan *kafkaRequest - if apiKey == 2 { // ListOffsets - } - if isDataPlaneAPI(apiKey) { - targetChan = dataChan - } else { - targetChan = controlChan - } - - // Only add to correlation queue AFTER successful channel send - // If we add before and the channel blocks, the correlation ID is in the queue - // but the request never gets processed, causing response writer deadlock - select { - case targetChan <- req: - // Request queued successfully - NOW add to correlation tracking - correlationQueueMu.Lock() - correlationQueue = append(correlationQueue, correlationID) - correlationQueueMu.Unlock() - case <-ctx.Done(): - return ctx.Err() - case <-time.After(10 * time.Second): - // Channel full for too long - this shouldn't happen with proper backpressure - glog.Errorf("[%s] Failed to queue correlation=%d - channel full (10s timeout)", connectionID, correlationID) - return fmt.Errorf("request queue full: correlation=%d", correlationID) - } - } -} - -// processRequestSync processes a single Kafka API request synchronously and returns the response -func (h *Handler) processRequestSync(req *kafkaRequest) ([]byte, error) { - // Record request start time for latency tracking - requestStart := time.Now() - apiName := getAPIName(APIKey(req.apiKey)) - - - // Only log high-volume requests at V(2), not V(4) - if glog.V(2) { - glog.V(2).Infof("[API] %s (key=%d, ver=%d, corr=%d)", - apiName, req.apiKey, req.apiVersion, req.correlationID) - } - - var response []byte - var err error - - switch APIKey(req.apiKey) { - case APIKeyApiVersions: - response, err = h.handleApiVersions(req.correlationID, req.apiVersion) - - case APIKeyMetadata: - response, err = h.handleMetadata(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyListOffsets: - response, err = h.handleListOffsets(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyCreateTopics: - response, err = h.handleCreateTopics(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyDeleteTopics: - response, err = h.handleDeleteTopics(req.correlationID, req.requestBody) - - case APIKeyProduce: - response, err = h.handleProduce(req.ctx, req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyFetch: - response, err = h.handleFetch(req.ctx, req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyJoinGroup: - response, err = h.handleJoinGroup(req.connContext, req.correlationID, req.apiVersion, req.requestBody) - - case APIKeySyncGroup: - response, err = h.handleSyncGroup(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyOffsetCommit: - response, err = h.handleOffsetCommit(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyOffsetFetch: - response, err = h.handleOffsetFetch(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyFindCoordinator: - response, err = h.handleFindCoordinator(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyHeartbeat: - response, err = h.handleHeartbeat(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyLeaveGroup: - response, err = h.handleLeaveGroup(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyDescribeGroups: - response, err = h.handleDescribeGroups(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyListGroups: - response, err = h.handleListGroups(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyDescribeConfigs: - response, err = h.handleDescribeConfigs(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyDescribeCluster: - response, err = h.handleDescribeCluster(req.correlationID, req.apiVersion, req.requestBody) - - case APIKeyInitProducerId: - response, err = h.handleInitProducerId(req.correlationID, req.apiVersion, req.requestBody) - - default: - glog.Warningf("Unsupported API key: %d (%s) v%d - Correlation: %d", req.apiKey, apiName, req.apiVersion, req.correlationID) - err = fmt.Errorf("unsupported API key: %d (version %d)", req.apiKey, req.apiVersion) - } - - glog.V(2).Infof("processRequestSync: Switch completed for correlation=%d, about to record metrics", req.correlationID) - // Record metrics - requestLatency := time.Since(requestStart) - if err != nil { - RecordErrorMetrics(req.apiKey, requestLatency) - } else { - RecordRequestMetrics(req.apiKey, requestLatency) - } - glog.V(2).Infof("processRequestSync: Metrics recorded for correlation=%d, about to return", req.correlationID) - - return response, err -} - -// ApiKeyInfo represents supported API key information -type ApiKeyInfo struct { - ApiKey APIKey - MinVersion uint16 - MaxVersion uint16 -} - -// SupportedApiKeys defines all supported API keys and their version ranges -var SupportedApiKeys = []ApiKeyInfo{ - {APIKeyApiVersions, 0, 4}, // ApiVersions - support up to v4 for Kafka 8.0.0 compatibility - {APIKeyMetadata, 0, 7}, // Metadata - support up to v7 - {APIKeyProduce, 0, 7}, // Produce - {APIKeyFetch, 0, 7}, // Fetch - {APIKeyListOffsets, 0, 2}, // ListOffsets - {APIKeyCreateTopics, 0, 5}, // CreateTopics - {APIKeyDeleteTopics, 0, 4}, // DeleteTopics - {APIKeyFindCoordinator, 0, 3}, // FindCoordinator - v3+ supports flexible responses - {APIKeyJoinGroup, 0, 6}, // JoinGroup - {APIKeySyncGroup, 0, 5}, // SyncGroup - {APIKeyOffsetCommit, 0, 2}, // OffsetCommit - {APIKeyOffsetFetch, 0, 5}, // OffsetFetch - {APIKeyHeartbeat, 0, 4}, // Heartbeat - {APIKeyLeaveGroup, 0, 4}, // LeaveGroup - {APIKeyDescribeGroups, 0, 5}, // DescribeGroups - {APIKeyListGroups, 0, 4}, // ListGroups - {APIKeyDescribeConfigs, 0, 4}, // DescribeConfigs - {APIKeyInitProducerId, 0, 4}, // InitProducerId - support up to v4 for transactional producers - {APIKeyDescribeCluster, 0, 1}, // DescribeCluster - for AdminClient compatibility (KIP-919) -} - -func (h *Handler) handleApiVersions(correlationID uint32, apiVersion uint16) ([]byte, error) { - // Send correct flexible or non-flexible response based on API version - // This fixes the AdminClient "collection size 2184558" error by using proper varint encoding - response := make([]byte, 0, 512) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // === RESPONSE BODY === - // Error code (2 bytes) - always fixed-length - response = append(response, 0, 0) // No error - - // API Keys Array - use correct encoding based on version - if apiVersion >= 3 { - // FLEXIBLE FORMAT: Compact array with varint length - THIS FIXES THE ADMINCLIENT BUG! - response = append(response, CompactArrayLength(uint32(len(SupportedApiKeys)))...) - - // Add API key entries with per-element tagged fields - for _, api := range SupportedApiKeys { - response = append(response, byte(api.ApiKey>>8), byte(api.ApiKey)) // api_key (2 bytes) - response = append(response, byte(api.MinVersion>>8), byte(api.MinVersion)) // min_version (2 bytes) - response = append(response, byte(api.MaxVersion>>8), byte(api.MaxVersion)) // max_version (2 bytes) - response = append(response, 0x00) // Per-element tagged fields (varint: empty) - } - - } else { - // NON-FLEXIBLE FORMAT: Regular array with fixed 4-byte length - response = append(response, 0, 0, 0, byte(len(SupportedApiKeys))) // Array length (4 bytes) - - // Add API key entries without tagged fields - for _, api := range SupportedApiKeys { - response = append(response, byte(api.ApiKey>>8), byte(api.ApiKey)) // api_key (2 bytes) - response = append(response, byte(api.MinVersion>>8), byte(api.MinVersion)) // min_version (2 bytes) - response = append(response, byte(api.MaxVersion>>8), byte(api.MaxVersion)) // max_version (2 bytes) - } - } - - // Throttle time (for v1+) - always fixed-length - if apiVersion >= 1 { - response = append(response, 0, 0, 0, 0) // throttle_time_ms = 0 (4 bytes) - } - - // Response-level tagged fields (for v3+ flexible versions) - if apiVersion >= 3 { - response = append(response, 0x00) // Empty response-level tagged fields (varint: single byte 0) - } - - return response, nil -} - -// handleMetadataV0 implements the Metadata API response in version 0 format. -// v0 response layout: -// correlation_id(4) + brokers(ARRAY) + topics(ARRAY) -// broker: node_id(4) + host(STRING) + port(4) -// topic: error_code(2) + name(STRING) + partitions(ARRAY) -// partition: error_code(2) + partition_id(4) + leader(4) + replicas(ARRAY) + isr(ARRAY) -func (h *Handler) HandleMetadataV0(correlationID uint32, requestBody []byte) ([]byte, error) { - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Get consistent node ID for this gateway - nodeID := h.GetNodeID() - nodeIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(nodeIDBytes, uint32(nodeID)) - - // Brokers array length (4 bytes) - 1 broker (this gateway) - response = append(response, 0, 0, 0, 1) - - // Broker 0: node_id(4) + host(STRING) + port(4) - response = append(response, nodeIDBytes...) // Use consistent node ID - - // Get advertised address for client connections - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - // Host (STRING: 2 bytes length + bytes) - validate length fits in uint16 - if len(host) > 65535 { - return nil, fmt.Errorf("host name too long: %d bytes", len(host)) - } - hostLen := uint16(len(host)) - response = append(response, byte(hostLen>>8), byte(hostLen)) - response = append(response, []byte(host)...) - - // Port (4 bytes) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(port)) - response = append(response, portBytes...) - - // Parse requested topics (empty means all) - requestedTopics := h.parseMetadataTopics(requestBody) - glog.V(3).Infof("[METADATA v0] Requested topics: %v (empty=all)", requestedTopics) - - // Determine topics to return using SeaweedMQ handler - var topicsToReturn []string - if len(requestedTopics) == 0 { - topicsToReturn = h.seaweedMQHandler.ListTopics() - } else { - for _, name := range requestedTopics { - if h.seaweedMQHandler.TopicExists(name) { - topicsToReturn = append(topicsToReturn, name) - } else { - // Topic doesn't exist according to current cache, check broker directly - // This handles the race condition where producers just created topics - // and consumers are requesting metadata before cache TTL expires - glog.V(3).Infof("[METADATA v0] Topic %s not in cache, checking broker directly", name) - h.seaweedMQHandler.InvalidateTopicExistsCache(name) - if h.seaweedMQHandler.TopicExists(name) { - glog.V(3).Infof("[METADATA v0] Topic %s found on broker after cache refresh", name) - topicsToReturn = append(topicsToReturn, name) - } else { - glog.V(3).Infof("[METADATA v0] Topic %s not found, auto-creating with default partitions", name) - // Auto-create topic (matches Kafka's auto.create.topics.enable=true) - if err := h.createTopicWithSchemaSupport(name, h.GetDefaultPartitions()); err != nil { - glog.V(2).Infof("[METADATA v0] Failed to auto-create topic %s: %v", name, err) - // Don't add to topicsToReturn - client will get error - } else { - glog.V(2).Infof("[METADATA v0] Successfully auto-created topic %s", name) - topicsToReturn = append(topicsToReturn, name) - } - } - } - } - } - - // Topics array length (4 bytes) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, uint32(len(topicsToReturn))) - response = append(response, topicsCountBytes...) - - // Topic entries - for _, topicName := range topicsToReturn { - // error_code(2) = 0 - response = append(response, 0, 0) - - // name (STRING) - nameBytes := []byte(topicName) - nameLen := uint16(len(nameBytes)) - response = append(response, byte(nameLen>>8), byte(nameLen)) - response = append(response, nameBytes...) - - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topicName) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // partitions array length (4 bytes) - partitionsBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsBytes, uint32(partitionCount)) - response = append(response, partitionsBytes...) - - // Create partition entries for each partition - for partitionID := int32(0); partitionID < partitionCount; partitionID++ { - // partition: error_code(2) + partition_id(4) + leader(4) - response = append(response, 0, 0) // error_code - - // partition_id (4 bytes) - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, uint32(partitionID)) - response = append(response, partitionIDBytes...) - - response = append(response, nodeIDBytes...) // leader = this broker - - // replicas: array length(4) + one broker id (this broker) - response = append(response, 0, 0, 0, 1) - response = append(response, nodeIDBytes...) - - // isr: array length(4) + one broker id (this broker) - response = append(response, 0, 0, 0, 1) - response = append(response, nodeIDBytes...) - } - } - - for range topicsToReturn { - } - return response, nil -} - -func (h *Handler) HandleMetadataV1(correlationID uint32, requestBody []byte) ([]byte, error) { - // Simplified Metadata v1 implementation - based on working v0 + v1 additions - // v1 adds: ControllerID (after brokers), Rack (for brokers), IsInternal (for topics) - - // Parse requested topics (empty means all) - requestedTopics := h.parseMetadataTopics(requestBody) - glog.V(3).Infof("[METADATA v1] Requested topics: %v (empty=all)", requestedTopics) - - // Determine topics to return using SeaweedMQ handler - var topicsToReturn []string - if len(requestedTopics) == 0 { - topicsToReturn = h.seaweedMQHandler.ListTopics() - } else { - for _, name := range requestedTopics { - if h.seaweedMQHandler.TopicExists(name) { - topicsToReturn = append(topicsToReturn, name) - } else { - // Topic doesn't exist according to current cache, check broker directly - glog.V(3).Infof("[METADATA v1] Topic %s not in cache, checking broker directly", name) - h.seaweedMQHandler.InvalidateTopicExistsCache(name) - if h.seaweedMQHandler.TopicExists(name) { - glog.V(3).Infof("[METADATA v1] Topic %s found on broker after cache refresh", name) - topicsToReturn = append(topicsToReturn, name) - } else { - glog.V(3).Infof("[METADATA v1] Topic %s not found, auto-creating with default partitions", name) - if err := h.createTopicWithSchemaSupport(name, h.GetDefaultPartitions()); err != nil { - glog.V(2).Infof("[METADATA v1] Failed to auto-create topic %s: %v", name, err) - } else { - glog.V(2).Infof("[METADATA v1] Successfully auto-created topic %s", name) - topicsToReturn = append(topicsToReturn, name) - } - } - } - } - } - - // Build response using same approach as v0 but with v1 additions - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Get consistent node ID for this gateway - nodeID := h.GetNodeID() - nodeIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(nodeIDBytes, uint32(nodeID)) - - // Brokers array length (4 bytes) - 1 broker (this gateway) - response = append(response, 0, 0, 0, 1) - - // Broker 0: node_id(4) + host(STRING) + port(4) + rack(STRING) - response = append(response, nodeIDBytes...) // Use consistent node ID - - // Get advertised address for client connections - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - // Host (STRING: 2 bytes length + bytes) - validate length fits in uint16 - if len(host) > 65535 { - return nil, fmt.Errorf("host name too long: %d bytes", len(host)) - } - hostLen := uint16(len(host)) - response = append(response, byte(hostLen>>8), byte(hostLen)) - response = append(response, []byte(host)...) - - // Port (4 bytes) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - portBytes := make([]byte, 4) - binary.BigEndian.PutUint32(portBytes, uint32(port)) - response = append(response, portBytes...) - - // Rack (STRING: 2 bytes length + bytes) - v1 addition, non-nullable empty string - response = append(response, 0, 0) // empty string - - // ControllerID (4 bytes) - v1 addition - response = append(response, nodeIDBytes...) // controller_id = this broker - - // Topics array length (4 bytes) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, uint32(len(topicsToReturn))) - response = append(response, topicsCountBytes...) - - // Topics - for _, topicName := range topicsToReturn { - // error_code (2 bytes) - response = append(response, 0, 0) - - // topic name (STRING: 2 bytes length + bytes) - topicLen := uint16(len(topicName)) - response = append(response, byte(topicLen>>8), byte(topicLen)) - response = append(response, []byte(topicName)...) - - // is_internal (1 byte) - v1 addition - response = append(response, 0) // false - - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topicName) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // partitions array length (4 bytes) - partitionsBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsBytes, uint32(partitionCount)) - response = append(response, partitionsBytes...) - - // Create partition entries for each partition - for partitionID := int32(0); partitionID < partitionCount; partitionID++ { - // partition: error_code(2) + partition_id(4) + leader_id(4) + replicas(ARRAY) + isr(ARRAY) - response = append(response, 0, 0) // error_code - - // partition_id (4 bytes) - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, uint32(partitionID)) - response = append(response, partitionIDBytes...) - - response = append(response, nodeIDBytes...) // leader_id = this broker - - // replicas: array length(4) + one broker id (this broker) - response = append(response, 0, 0, 0, 1) - response = append(response, nodeIDBytes...) - - // isr: array length(4) + one broker id (this broker) - response = append(response, 0, 0, 0, 1) - response = append(response, nodeIDBytes...) - } - } - - return response, nil -} - -// HandleMetadataV2 implements Metadata API v2 with ClusterID field -func (h *Handler) HandleMetadataV2(correlationID uint32, requestBody []byte) ([]byte, error) { - // Metadata v2 adds ClusterID field (nullable string) - // v2 response layout: correlation_id(4) + brokers(ARRAY) + cluster_id(NULLABLE_STRING) + controller_id(4) + topics(ARRAY) - - // Parse requested topics (empty means all) - requestedTopics := h.parseMetadataTopics(requestBody) - glog.V(3).Infof("[METADATA v2] Requested topics: %v (empty=all)", requestedTopics) - - // Determine topics to return using SeaweedMQ handler - var topicsToReturn []string - if len(requestedTopics) == 0 { - topicsToReturn = h.seaweedMQHandler.ListTopics() - } else { - for _, name := range requestedTopics { - if h.seaweedMQHandler.TopicExists(name) { - topicsToReturn = append(topicsToReturn, name) - } else { - // Topic doesn't exist according to current cache, check broker directly - glog.V(3).Infof("[METADATA v2] Topic %s not in cache, checking broker directly", name) - h.seaweedMQHandler.InvalidateTopicExistsCache(name) - if h.seaweedMQHandler.TopicExists(name) { - glog.V(3).Infof("[METADATA v2] Topic %s found on broker after cache refresh", name) - topicsToReturn = append(topicsToReturn, name) - } else { - glog.V(3).Infof("[METADATA v2] Topic %s not found, auto-creating with default partitions", name) - if err := h.createTopicWithSchemaSupport(name, h.GetDefaultPartitions()); err != nil { - glog.V(2).Infof("[METADATA v2] Failed to auto-create topic %s: %v", name, err) - } else { - glog.V(2).Infof("[METADATA v2] Successfully auto-created topic %s", name) - topicsToReturn = append(topicsToReturn, name) - } - } - } - } - } - - var buf bytes.Buffer - - // Correlation ID (4 bytes) - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Brokers array (4 bytes length + brokers) - 1 broker (this gateway) - binary.Write(&buf, binary.BigEndian, int32(1)) - - // Get advertised address for client connections - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - nodeID := h.GetNodeID() // Get consistent node ID for this gateway - - // Broker: node_id(4) + host(STRING) + port(4) + rack(STRING) + cluster_id(NULLABLE_STRING) - binary.Write(&buf, binary.BigEndian, nodeID) - - // Host (STRING: 2 bytes length + data) - validate length fits in int16 - if len(host) > 32767 { - return nil, fmt.Errorf("host name too long: %d bytes", len(host)) - } - binary.Write(&buf, binary.BigEndian, int16(len(host))) - buf.WriteString(host) - - // Port (4 bytes) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - binary.Write(&buf, binary.BigEndian, int32(port)) - - // Rack (STRING: 2 bytes length + data) - v1+ addition, non-nullable - binary.Write(&buf, binary.BigEndian, int16(0)) // Empty string - - // ClusterID (NULLABLE_STRING: 2 bytes length + data) - v2 addition - // Schema Registry requires a non-null cluster ID - clusterID := "seaweedfs-kafka-gateway" - binary.Write(&buf, binary.BigEndian, int16(len(clusterID))) - buf.WriteString(clusterID) - - // ControllerID (4 bytes) - v1+ addition - binary.Write(&buf, binary.BigEndian, nodeID) - - // Topics array (4 bytes length + topics) - binary.Write(&buf, binary.BigEndian, int32(len(topicsToReturn))) - - for _, topicName := range topicsToReturn { - // ErrorCode (2 bytes) - binary.Write(&buf, binary.BigEndian, int16(0)) - - // Name (STRING: 2 bytes length + data) - binary.Write(&buf, binary.BigEndian, int16(len(topicName))) - buf.WriteString(topicName) - - // IsInternal (1 byte) - v1+ addition - buf.WriteByte(0) // false - - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topicName) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // Partitions array (4 bytes length + partitions) - binary.Write(&buf, binary.BigEndian, partitionCount) - - // Create partition entries for each partition - for partitionID := int32(0); partitionID < partitionCount; partitionID++ { - binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode - binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID - - // ReplicaNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - - // IsrNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - } - } - - response := buf.Bytes() - - return response, nil -} - -// HandleMetadataV3V4 implements Metadata API v3/v4 with ThrottleTimeMs field -func (h *Handler) HandleMetadataV3V4(correlationID uint32, requestBody []byte) ([]byte, error) { - // Metadata v3/v4 adds ThrottleTimeMs field at the beginning - // v3/v4 response layout: correlation_id(4) + throttle_time_ms(4) + brokers(ARRAY) + cluster_id(NULLABLE_STRING) + controller_id(4) + topics(ARRAY) - - // Parse requested topics (empty means all) - requestedTopics := h.parseMetadataTopics(requestBody) - glog.V(3).Infof("[METADATA v3/v4] Requested topics: %v (empty=all)", requestedTopics) - - // Determine topics to return using SeaweedMQ handler - var topicsToReturn []string - if len(requestedTopics) == 0 { - topicsToReturn = h.seaweedMQHandler.ListTopics() - } else { - for _, name := range requestedTopics { - if h.seaweedMQHandler.TopicExists(name) { - topicsToReturn = append(topicsToReturn, name) - } else { - // Topic doesn't exist according to current cache, check broker directly - glog.V(3).Infof("[METADATA v3/v4] Topic %s not in cache, checking broker directly", name) - h.seaweedMQHandler.InvalidateTopicExistsCache(name) - if h.seaweedMQHandler.TopicExists(name) { - glog.V(3).Infof("[METADATA v3/v4] Topic %s found on broker after cache refresh", name) - topicsToReturn = append(topicsToReturn, name) - } else { - glog.V(3).Infof("[METADATA v3/v4] Topic %s not found, auto-creating with default partitions", name) - if err := h.createTopicWithSchemaSupport(name, h.GetDefaultPartitions()); err != nil { - glog.V(2).Infof("[METADATA v3/v4] Failed to auto-create topic %s: %v", name, err) - } else { - glog.V(2).Infof("[METADATA v3/v4] Successfully auto-created topic %s", name) - topicsToReturn = append(topicsToReturn, name) - } - } - } - } - } - - var buf bytes.Buffer - - // Correlation ID (4 bytes) - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // ThrottleTimeMs (4 bytes) - v3+ addition - binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling - - // Brokers array (4 bytes length + brokers) - 1 broker (this gateway) - binary.Write(&buf, binary.BigEndian, int32(1)) - - // Get advertised address for client connections - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - nodeID := h.GetNodeID() // Get consistent node ID for this gateway - - // Broker: node_id(4) + host(STRING) + port(4) + rack(STRING) + cluster_id(NULLABLE_STRING) - binary.Write(&buf, binary.BigEndian, nodeID) - - // Host (STRING: 2 bytes length + data) - validate length fits in int16 - if len(host) > 32767 { - return nil, fmt.Errorf("host name too long: %d bytes", len(host)) - } - binary.Write(&buf, binary.BigEndian, int16(len(host))) - buf.WriteString(host) - - // Port (4 bytes) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - binary.Write(&buf, binary.BigEndian, int32(port)) - - // Rack (STRING: 2 bytes length + data) - v1+ addition, non-nullable - binary.Write(&buf, binary.BigEndian, int16(0)) // Empty string - - // ClusterID (NULLABLE_STRING: 2 bytes length + data) - v2+ addition - // Schema Registry requires a non-null cluster ID - clusterID := "seaweedfs-kafka-gateway" - binary.Write(&buf, binary.BigEndian, int16(len(clusterID))) - buf.WriteString(clusterID) - - // ControllerID (4 bytes) - v1+ addition - binary.Write(&buf, binary.BigEndian, nodeID) - - // Topics array (4 bytes length + topics) - binary.Write(&buf, binary.BigEndian, int32(len(topicsToReturn))) - - for _, topicName := range topicsToReturn { - // ErrorCode (2 bytes) - binary.Write(&buf, binary.BigEndian, int16(0)) - - // Name (STRING: 2 bytes length + data) - binary.Write(&buf, binary.BigEndian, int16(len(topicName))) - buf.WriteString(topicName) - - // IsInternal (1 byte) - v1+ addition - buf.WriteByte(0) // false - - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topicName) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // Partitions array (4 bytes length + partitions) - binary.Write(&buf, binary.BigEndian, partitionCount) - - // Create partition entries for each partition - for partitionID := int32(0); partitionID < partitionCount; partitionID++ { - binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode - binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID - - // ReplicaNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - - // IsrNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - } - } - - response := buf.Bytes() - - // Detailed logging for Metadata response - maxDisplay := len(response) - if maxDisplay > 50 { - maxDisplay = 50 - } - if len(response) > 100 { - } - - return response, nil -} - -// HandleMetadataV5V6 implements Metadata API v5/v6 with OfflineReplicas field -func (h *Handler) HandleMetadataV5V6(correlationID uint32, requestBody []byte) ([]byte, error) { - return h.handleMetadataV5ToV8(correlationID, requestBody, 5) -} - -// HandleMetadataV7 implements Metadata API v7 with LeaderEpoch field (REGULAR FORMAT, NOT FLEXIBLE) -func (h *Handler) HandleMetadataV7(correlationID uint32, requestBody []byte) ([]byte, error) { - // Metadata v7 uses REGULAR arrays/strings (like v5/v6), NOT compact format - // Only v9+ uses compact format (flexible responses) - return h.handleMetadataV5ToV8(correlationID, requestBody, 7) -} - -// handleMetadataV5ToV8 handles Metadata v5-v8 with regular (non-compact) encoding -// v5/v6: adds OfflineReplicas field to partitions -// v7: adds LeaderEpoch field to partitions -// v8: adds ClusterAuthorizedOperations field -// All use REGULAR arrays/strings (NOT compact) - only v9+ uses compact format -func (h *Handler) handleMetadataV5ToV8(correlationID uint32, requestBody []byte, apiVersion int) ([]byte, error) { - // v5-v8 response layout: throttle_time_ms(4) + brokers(ARRAY) + cluster_id(NULLABLE_STRING) + controller_id(4) + topics(ARRAY) [+ cluster_authorized_operations(4) for v8] - // Each partition includes: error_code(2) + partition_index(4) + leader_id(4) [+ leader_epoch(4) for v7+] + replica_nodes(ARRAY) + isr_nodes(ARRAY) + offline_replicas(ARRAY) - - // Parse requested topics (empty means all) - requestedTopics := h.parseMetadataTopics(requestBody) - glog.V(3).Infof("[METADATA v%d] Requested topics: %v (empty=all)", apiVersion, requestedTopics) - - // Determine topics to return using SeaweedMQ handler - var topicsToReturn []string - if len(requestedTopics) == 0 { - topicsToReturn = h.seaweedMQHandler.ListTopics() - } else { - // FIXED: Proper topic existence checking (removed the hack) - // Now that CreateTopics v5 works, we use proper Kafka workflow: - // 1. Check which requested topics actually exist - // 2. Auto-create system topics if they don't exist - // 3. Only return existing topics in metadata - // 4. Client will call CreateTopics for non-existent topics - // 5. Then request metadata again to see the created topics - for _, topic := range requestedTopics { - if isSystemTopic(topic) { - // Always try to auto-create system topics during metadata requests - glog.V(3).Infof("[METADATA v%d] Ensuring system topic %s exists during metadata request", apiVersion, topic) - if !h.seaweedMQHandler.TopicExists(topic) { - glog.V(3).Infof("[METADATA v%d] Auto-creating system topic %s during metadata request", apiVersion, topic) - if err := h.createTopicWithSchemaSupport(topic, 1); err != nil { - glog.V(0).Infof("[METADATA v%d] Failed to auto-create system topic %s: %v", apiVersion, topic, err) - // Continue without adding to topicsToReturn - client will get UNKNOWN_TOPIC_OR_PARTITION - } else { - glog.V(3).Infof("[METADATA v%d] Successfully auto-created system topic %s", apiVersion, topic) - } - } else { - glog.V(3).Infof("[METADATA v%d] System topic %s already exists", apiVersion, topic) - } - topicsToReturn = append(topicsToReturn, topic) - } else if h.seaweedMQHandler.TopicExists(topic) { - topicsToReturn = append(topicsToReturn, topic) - } else { - // Topic doesn't exist according to current cache, but let's check broker directly - // This handles the race condition where producers just created topics - // and consumers are requesting metadata before cache TTL expires - glog.V(3).Infof("[METADATA v%d] Topic %s not in cache, checking broker directly", apiVersion, topic) - // Force cache invalidation to do fresh broker check - h.seaweedMQHandler.InvalidateTopicExistsCache(topic) - if h.seaweedMQHandler.TopicExists(topic) { - glog.V(3).Infof("[METADATA v%d] Topic %s found on broker after cache refresh", apiVersion, topic) - topicsToReturn = append(topicsToReturn, topic) - } else { - glog.V(3).Infof("[METADATA v%d] Topic %s not found on broker, auto-creating with default partitions", apiVersion, topic) - // Auto-create non-system topics with default partitions (matches Kafka behavior) - if err := h.createTopicWithSchemaSupport(topic, h.GetDefaultPartitions()); err != nil { - glog.V(2).Infof("[METADATA v%d] Failed to auto-create topic %s: %v", apiVersion, topic, err) - // Don't add to topicsToReturn - client will get UNKNOWN_TOPIC_OR_PARTITION - } else { - glog.V(2).Infof("[METADATA v%d] Successfully auto-created topic %s", apiVersion, topic) - topicsToReturn = append(topicsToReturn, topic) - } - } - } - } - glog.V(3).Infof("[METADATA v%d] Returning topics: %v (requested: %v)", apiVersion, topicsToReturn, requestedTopics) - } - - var buf bytes.Buffer - - // Correlation ID (4 bytes) - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - - // ThrottleTimeMs (4 bytes) - v3+ addition - binary.Write(&buf, binary.BigEndian, int32(0)) // No throttling - - // Brokers array (4 bytes length + brokers) - 1 broker (this gateway) - binary.Write(&buf, binary.BigEndian, int32(1)) - - // Get advertised address for client connections - host, port := h.GetAdvertisedAddress(h.GetGatewayAddress()) - - nodeID := h.GetNodeID() // Get consistent node ID for this gateway - - // Broker: node_id(4) + host(STRING) + port(4) + rack(STRING) + cluster_id(NULLABLE_STRING) - binary.Write(&buf, binary.BigEndian, nodeID) - - // Host (STRING: 2 bytes length + data) - validate length fits in int16 - if len(host) > 32767 { - return nil, fmt.Errorf("host name too long: %d bytes", len(host)) - } - binary.Write(&buf, binary.BigEndian, int16(len(host))) - buf.WriteString(host) - - // Port (4 bytes) - validate port range - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port number: %d", port) - } - binary.Write(&buf, binary.BigEndian, int32(port)) - - // Rack (STRING: 2 bytes length + data) - v1+ addition, non-nullable - binary.Write(&buf, binary.BigEndian, int16(0)) // Empty string - - // ClusterID (NULLABLE_STRING: 2 bytes length + data) - v2+ addition - // Schema Registry requires a non-null cluster ID - clusterID := "seaweedfs-kafka-gateway" - binary.Write(&buf, binary.BigEndian, int16(len(clusterID))) - buf.WriteString(clusterID) - - // ControllerID (4 bytes) - v1+ addition - binary.Write(&buf, binary.BigEndian, nodeID) - - // Topics array (4 bytes length + topics) - binary.Write(&buf, binary.BigEndian, int32(len(topicsToReturn))) - - for _, topicName := range topicsToReturn { - // ErrorCode (2 bytes) - binary.Write(&buf, binary.BigEndian, int16(0)) - - // Name (STRING: 2 bytes length + data) - binary.Write(&buf, binary.BigEndian, int16(len(topicName))) - buf.WriteString(topicName) - - // IsInternal (1 byte) - v1+ addition - buf.WriteByte(0) // false - - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topicName) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // Partitions array (4 bytes length + partitions) - binary.Write(&buf, binary.BigEndian, partitionCount) - - // Create partition entries for each partition - for partitionID := int32(0); partitionID < partitionCount; partitionID++ { - binary.Write(&buf, binary.BigEndian, int16(0)) // ErrorCode - binary.Write(&buf, binary.BigEndian, partitionID) // PartitionIndex - binary.Write(&buf, binary.BigEndian, nodeID) // LeaderID - - // LeaderEpoch (4 bytes) - v7+ addition - if apiVersion >= 7 { - binary.Write(&buf, binary.BigEndian, int32(0)) // Leader epoch 0 - } - - // ReplicaNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 replica - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - - // IsrNodes array (4 bytes length + nodes) - binary.Write(&buf, binary.BigEndian, int32(1)) // 1 ISR node - binary.Write(&buf, binary.BigEndian, nodeID) // NodeID 1 - - // OfflineReplicas array (4 bytes length + nodes) - v5+ addition - binary.Write(&buf, binary.BigEndian, int32(0)) // No offline replicas - } - } - - // ClusterAuthorizedOperations (4 bytes) - v8+ addition - if apiVersion >= 8 { - binary.Write(&buf, binary.BigEndian, int32(-2147483648)) // All operations allowed (bit mask) - } - - response := buf.Bytes() - - // Detailed logging for Metadata response - maxDisplay := len(response) - if maxDisplay > 50 { - maxDisplay = 50 - } - if len(response) > 100 { - } - - return response, nil -} - -func (h *Handler) parseMetadataTopics(requestBody []byte) []string { - // Support both v0/v1 parsing: v1 payload starts directly with topics array length (int32), - // while older assumptions may have included a client_id string first. - if len(requestBody) < 4 { - return []string{} - } - - // Try path A: interpret first 4 bytes as topics_count - offset := 0 - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - if topicsCount == 0xFFFFFFFF { // -1 means all topics - return []string{} - } - if topicsCount <= 1000000 { // sane bound - offset += 4 - topics := make([]string, 0, topicsCount) - for i := uint32(0); i < topicsCount && offset+2 <= len(requestBody); i++ { - nameLen := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - if offset+nameLen > len(requestBody) { - break - } - topics = append(topics, string(requestBody[offset:offset+nameLen])) - offset += nameLen - } - return topics - } - - // Path B: assume leading client_id string then topics_count - if len(requestBody) < 6 { - return []string{} - } - clientIDLen := int(binary.BigEndian.Uint16(requestBody[0:2])) - offset = 2 + clientIDLen - if len(requestBody) < offset+4 { - return []string{} - } - topicsCount = binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - if topicsCount == 0xFFFFFFFF { - return []string{} - } - topics := make([]string, 0, topicsCount) - for i := uint32(0); i < topicsCount && offset+2 <= len(requestBody); i++ { - nameLen := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - if offset+nameLen > len(requestBody) { - break - } - topics = append(topics, string(requestBody[offset:offset+nameLen])) - offset += nameLen - } - return topics -} - -func (h *Handler) handleListOffsets(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse minimal request to understand what's being asked (header already stripped) - offset := 0 - - - maxBytes := len(requestBody) - if maxBytes > 64 { - maxBytes = 64 - } - - // v1+ has replica_id(4) - if apiVersion >= 1 { - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("ListOffsets v%d request missing replica_id", apiVersion) - } - _ = int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) // replicaID - offset += 4 - } - - // v2+ adds isolation_level(1) - if apiVersion >= 2 { - if len(requestBody) < offset+1 { - return nil, fmt.Errorf("ListOffsets v%d request missing isolation_level", apiVersion) - } - _ = requestBody[offset] // isolationLevel - offset += 1 - } - - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("ListOffsets request missing topics count") - } - - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Throttle time (4 bytes, 0 = no throttling) - v2+ only - if apiVersion >= 2 { - response = append(response, 0, 0, 0, 0) - } - - // Topics count (will be updated later with actual count) - topicsCountBytes := make([]byte, 4) - topicsCountOffset := len(response) // Remember where to update the count - binary.BigEndian.PutUint32(topicsCountBytes, topicsCount) - response = append(response, topicsCountBytes...) - - // Track how many topics we actually process - actualTopicsCount := uint32(0) - - // Process each requested topic - for i := uint32(0); i < topicsCount && offset < len(requestBody); i++ { - if len(requestBody) < offset+2 { - break - } - - // Parse topic name - topicNameSize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(topicNameSize)+4 { - break - } - - topicName := requestBody[offset : offset+int(topicNameSize)] - offset += int(topicNameSize) - - // Parse partitions count for this topic - partitionsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Response: topic_name_size(2) + topic_name + partitions_array - response = append(response, byte(topicNameSize>>8), byte(topicNameSize)) - response = append(response, topicName...) - - partitionsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsCountBytes, partitionsCount) - response = append(response, partitionsCountBytes...) - - // Process each partition - for j := uint32(0); j < partitionsCount && offset+12 <= len(requestBody); j++ { - // Parse partition request: partition_id(4) + timestamp(8) - partitionID := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - timestamp := int64(binary.BigEndian.Uint64(requestBody[offset+4 : offset+12])) - offset += 12 - - // Response: partition_id(4) + error_code(2) + timestamp(8) + offset(8) - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, partitionID) - response = append(response, partitionIDBytes...) - - // Error code (0 = no error) - response = append(response, 0, 0) - - // Use direct SMQ reading - no ledgers needed - // SMQ handles offset management internally - var responseTimestamp int64 - var responseOffset int64 - - switch timestamp { - case -2: // earliest offset - // Get the actual earliest offset from SMQ - earliestOffset, err := h.seaweedMQHandler.GetEarliestOffset(string(topicName), int32(partitionID)) - if err != nil { - responseOffset = 0 // fallback to 0 - } else { - responseOffset = earliestOffset - } - responseTimestamp = 0 // No specific timestamp for earliest - - case -1: // latest offset - // Get the actual latest offset from SMQ - if h.seaweedMQHandler == nil { - responseOffset = 0 - } else { - latestOffset, err := h.seaweedMQHandler.GetLatestOffset(string(topicName), int32(partitionID)) - if err != nil { - responseOffset = 0 // fallback to 0 - } else { - responseOffset = latestOffset - } - } - responseTimestamp = 0 // No specific timestamp for latest - default: // specific timestamp - find offset by timestamp - // For timestamp-based lookup, we need to implement this properly - // For now, return 0 as fallback - responseOffset = 0 - responseTimestamp = timestamp - } - - // Ensure we never return a timestamp as offset - this was the bug! - if responseOffset > 1000000000 { // If offset looks like a timestamp - responseOffset = 0 - } - - timestampBytes := make([]byte, 8) - binary.BigEndian.PutUint64(timestampBytes, uint64(responseTimestamp)) - response = append(response, timestampBytes...) - - offsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(offsetBytes, uint64(responseOffset)) - response = append(response, offsetBytes...) - } - - // Successfully processed this topic - actualTopicsCount++ - } - - // Update the topics count in the response header with the actual count - // This prevents ErrIncompleteResponse when request parsing fails mid-way - if actualTopicsCount != topicsCount { - binary.BigEndian.PutUint32(response[topicsCountOffset:topicsCountOffset+4], actualTopicsCount) - } else { - } - - if len(response) > 0 { - respPreview := len(response) - if respPreview > 32 { - respPreview = 32 - } - } - return response, nil - -} - -func (h *Handler) handleCreateTopics(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - if len(requestBody) < 2 { - return nil, fmt.Errorf("CreateTopics request too short") - } - - // Parse based on API version - switch apiVersion { - case 0, 1: - response, err := h.handleCreateTopicsV0V1(correlationID, requestBody) - return response, err - case 2, 3, 4: - // kafka-go sends v2-4 in regular format, not compact - response, err := h.handleCreateTopicsV2To4(correlationID, requestBody) - return response, err - case 5: - // v5+ uses flexible format with compact arrays - response, err := h.handleCreateTopicsV2Plus(correlationID, apiVersion, requestBody) - return response, err - default: - return nil, fmt.Errorf("unsupported CreateTopics API version: %d", apiVersion) - } -} - -// handleCreateTopicsV2To4 handles CreateTopics API versions 2-4 (auto-detect regular vs compact format) -func (h *Handler) handleCreateTopicsV2To4(correlationID uint32, requestBody []byte) ([]byte, error) { - // Auto-detect format: kafka-go sends regular format, tests send compact format - if len(requestBody) < 1 { - return nil, fmt.Errorf("CreateTopics v2-4 request too short") - } - - // Detect format by checking first byte - // Compact format: first byte is compact array length (usually 0x02 for 1 topic) - // Regular format: first 4 bytes are regular array count (usually 0x00000001 for 1 topic) - isCompactFormat := false - if len(requestBody) >= 4 { - // Check if this looks like a regular 4-byte array count - regularCount := binary.BigEndian.Uint32(requestBody[0:4]) - // If the "regular count" is very large (> 1000), it's probably compact format - // Also check if first byte is small (typical compact array length) - if regularCount > 1000 || (requestBody[0] <= 10 && requestBody[0] > 0) { - isCompactFormat = true - } - } else if requestBody[0] <= 10 && requestBody[0] > 0 { - isCompactFormat = true - } - - if isCompactFormat { - // Delegate to the compact format handler - response, err := h.handleCreateTopicsV2Plus(correlationID, 2, requestBody) - return response, err - } - - // Handle regular format - offset := 0 - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4 request too short for topics array") - } - - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Parse topics - topics := make([]struct { - name string - partitions uint32 - replication uint16 - }, 0, topicsCount) - for i := uint32(0); i < topicsCount; i++ { - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated topic name length") - } - nameLen := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - if len(requestBody) < offset+int(nameLen) { - return nil, fmt.Errorf("CreateTopics v2-4: truncated topic name") - } - topicName := string(requestBody[offset : offset+int(nameLen)]) - offset += int(nameLen) - - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated num_partitions") - } - numPartitions := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated replication_factor") - } - replication := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - // Assignments array (array of partition assignments) - skip contents - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated assignments count") - } - assignments := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - for j := uint32(0); j < assignments; j++ { - // partition_id (int32) + replicas (array int32) - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated assignment partition id") - } - offset += 4 - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated replicas count") - } - replicasCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - // skip replica ids - offset += int(replicasCount) * 4 - } - - // Configs array (array of (name,value) strings) - skip contents - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated configs count") - } - configs := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - for j := uint32(0); j < configs; j++ { - // name (string) - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated config name length") - } - nameLen := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 + int(nameLen) - // value (nullable string) - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("CreateTopics v2-4: truncated config value length") - } - valueLen := int16(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - if valueLen >= 0 { - offset += int(valueLen) - } - } - - topics = append(topics, struct { - name string - partitions uint32 - replication uint16 - }{topicName, numPartitions, replication}) - } - - // timeout_ms - if len(requestBody) >= offset+4 { - _ = binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - } - // validate_only (boolean) - if len(requestBody) >= offset+1 { - _ = requestBody[offset] - offset += 1 - } - - // Build response - response := make([]byte, 0, 128) - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - // throttle_time_ms (4 bytes) - response = append(response, 0, 0, 0, 0) - // topics array count (int32) - countBytes := make([]byte, 4) - binary.BigEndian.PutUint32(countBytes, uint32(len(topics))) - response = append(response, countBytes...) - // per-topic responses - for _, t := range topics { - // topic name (string) - nameLen := make([]byte, 2) - binary.BigEndian.PutUint16(nameLen, uint16(len(t.name))) - response = append(response, nameLen...) - response = append(response, []byte(t.name)...) - // error_code (int16) - var errCode uint16 = 0 - if h.seaweedMQHandler.TopicExists(t.name) { - errCode = 36 // TOPIC_ALREADY_EXISTS - } else if t.partitions == 0 { - errCode = 37 // INVALID_PARTITIONS - } else if t.replication == 0 { - errCode = 38 // INVALID_REPLICATION_FACTOR - } else { - // Use schema-aware topic creation - if err := h.createTopicWithSchemaSupport(t.name, int32(t.partitions)); err != nil { - errCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - } - } - eb := make([]byte, 2) - binary.BigEndian.PutUint16(eb, errCode) - response = append(response, eb...) - // error_message (nullable string) -> null - response = append(response, 0xFF, 0xFF) - } - - return response, nil -} - -func (h *Handler) handleCreateTopicsV0V1(correlationID uint32, requestBody []byte) ([]byte, error) { - - if len(requestBody) < 4 { - return nil, fmt.Errorf("CreateTopics v0/v1 request too short") - } - - offset := 0 - - // Parse topics array (regular array format: count + topics) - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Build response - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Topics array count (4 bytes in v0/v1) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, topicsCount) - response = append(response, topicsCountBytes...) - - // Process each topic - for i := uint32(0); i < topicsCount && offset < len(requestBody); i++ { - // Parse topic name (regular string: length + bytes) - if len(requestBody) < offset+2 { - break - } - topicNameLength := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(topicNameLength) { - break - } - topicName := string(requestBody[offset : offset+int(topicNameLength)]) - offset += int(topicNameLength) - - // Parse num_partitions (4 bytes) - if len(requestBody) < offset+4 { - break - } - numPartitions := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Parse replication_factor (2 bytes) - if len(requestBody) < offset+2 { - break - } - replicationFactor := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - // Parse assignments array (4 bytes count, then assignments) - if len(requestBody) < offset+4 { - break - } - assignmentsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Skip assignments for now (simplified) - for j := uint32(0); j < assignmentsCount && offset < len(requestBody); j++ { - // Skip partition_id (4 bytes) - if len(requestBody) >= offset+4 { - offset += 4 - } - // Skip replicas array (4 bytes count + replica_ids) - if len(requestBody) >= offset+4 { - replicasCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - offset += int(replicasCount) * 4 // Skip replica IDs - } - } - - // Parse configs array (4 bytes count, then configs) - if len(requestBody) >= offset+4 { - configsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Skip configs (simplified) - for j := uint32(0); j < configsCount && offset < len(requestBody); j++ { - // Skip config name (string: 2 bytes length + bytes) - if len(requestBody) >= offset+2 { - configNameLength := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 + int(configNameLength) - } - // Skip config value (string: 2 bytes length + bytes) - if len(requestBody) >= offset+2 { - configValueLength := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 + int(configValueLength) - } - } - } - - // Build response for this topic - // Topic name (string: length + bytes) - topicNameLengthBytes := make([]byte, 2) - binary.BigEndian.PutUint16(topicNameLengthBytes, uint16(len(topicName))) - response = append(response, topicNameLengthBytes...) - response = append(response, []byte(topicName)...) - - // Determine error code and message - var errorCode uint16 = 0 - - // Apply defaults for invalid values - if numPartitions <= 0 { - numPartitions = uint32(h.GetDefaultPartitions()) // Use configurable default - } - if replicationFactor <= 0 { - replicationFactor = 1 // Default to 1 replica - } - - // Use SeaweedMQ integration - if h.seaweedMQHandler.TopicExists(topicName) { - errorCode = 36 // TOPIC_ALREADY_EXISTS - } else { - // Create the topic in SeaweedMQ with schema support - if err := h.createTopicWithSchemaSupport(topicName, int32(numPartitions)); err != nil { - errorCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - } - } - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, errorCode) - response = append(response, errorCodeBytes...) - } - - // Parse timeout_ms (4 bytes) - at the end of request - if len(requestBody) >= offset+4 { - _ = binary.BigEndian.Uint32(requestBody[offset : offset+4]) // timeoutMs - offset += 4 - } - - // Parse validate_only (1 byte) - only in v1 - if len(requestBody) >= offset+1 { - _ = requestBody[offset] != 0 // validateOnly - } - - return response, nil -} - -// handleCreateTopicsV2Plus handles CreateTopics API versions 2+ (flexible versions with compact arrays/strings) -// For simplicity and consistency with existing response builder, this parses the flexible request, -// converts it into the non-flexible v2-v4 body format, and reuses handleCreateTopicsV2To4 to build the response. -func (h *Handler) handleCreateTopicsV2Plus(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - offset := 0 - - // ADMIN CLIENT COMPATIBILITY FIX: - // AdminClient's CreateTopics v5 request DOES start with top-level tagged fields (usually empty) - // Parse them first, then the topics compact array - - // Parse top-level tagged fields first (usually 0x00 for empty) - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - // Don't fail - AdminClient might not always include tagged fields properly - // Just log and continue with topics parsing - } else { - offset += consumed - } - - // Topics (compact array) - Now correctly positioned after tagged fields - topicsCount, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topics compact array: %w", apiVersion, err) - } - offset += consumed - - type topicSpec struct { - name string - partitions uint32 - replication uint16 - } - topics := make([]topicSpec, 0, topicsCount) - - for i := uint32(0); i < topicsCount; i++ { - // Topic name (compact string) - name, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] name: %w", apiVersion, i, err) - } - offset += consumed - - if len(requestBody) < offset+6 { - return nil, fmt.Errorf("CreateTopics v%d: truncated partitions/replication for topic[%d]", apiVersion, i) - } - - partitions := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - replication := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - // ADMIN CLIENT COMPATIBILITY: AdminClient uses little-endian for replication factor - // This violates Kafka protocol spec but we need to handle it for compatibility - if replication == 256 { - replication = 1 // AdminClient sent 0x01 0x00, intended as little-endian 1 - } - - // Apply defaults for invalid values - if partitions <= 0 { - partitions = uint32(h.GetDefaultPartitions()) // Use configurable default - } - if replication <= 0 { - replication = 1 // Default to 1 replica - } - - // FIX 2: Assignments (compact array) - this was missing! - assignCount, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] assignments array: %w", apiVersion, i, err) - } - offset += consumed - - // Skip assignment entries (partition_id + replicas array) - for j := uint32(0); j < assignCount; j++ { - // partition_id (int32) - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v%d: truncated assignment[%d] partition_id", apiVersion, j) - } - offset += 4 - - // replicas (compact array of int32) - replicasCount, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode assignment[%d] replicas: %w", apiVersion, j, err) - } - offset += consumed - - // Skip replica broker IDs (int32 each) - if len(requestBody) < offset+int(replicasCount)*4 { - return nil, fmt.Errorf("CreateTopics v%d: truncated assignment[%d] replicas", apiVersion, j) - } - offset += int(replicasCount) * 4 - - // Assignment tagged fields - _, consumed, err = DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode assignment[%d] tagged fields: %w", apiVersion, j, err) - } - offset += consumed - } - - // Configs (compact array) - skip entries - cfgCount, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] configs array: %w", apiVersion, i, err) - } - offset += consumed - - for j := uint32(0); j < cfgCount; j++ { - // name (compact string) - _, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] config[%d] name: %w", apiVersion, i, j, err) - } - offset += consumed - - // value (nullable compact string) - _, consumed, err = DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] config[%d] value: %w", apiVersion, i, j, err) - } - offset += consumed - - // tagged fields for each config - _, consumed, err = DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] config[%d] tagged fields: %w", apiVersion, i, j, err) - } - offset += consumed - } - - // Tagged fields for topic - _, consumed, err = DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("CreateTopics v%d: decode topic[%d] tagged fields: %w", apiVersion, i, err) - } - offset += consumed - - topics = append(topics, topicSpec{name: name, partitions: partitions, replication: replication}) - } - - for range topics { - } - - // timeout_ms (int32) - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("CreateTopics v%d: missing timeout_ms", apiVersion) - } - timeoutMs := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // validate_only (boolean) - if len(requestBody) < offset+1 { - return nil, fmt.Errorf("CreateTopics v%d: missing validate_only flag", apiVersion) - } - validateOnly := requestBody[offset] != 0 - offset += 1 - - // Remaining bytes after parsing - could be additional fields - if offset < len(requestBody) { - } - - // Reconstruct a non-flexible v2-like request body and reuse existing handler - // Format: topics(ARRAY) + timeout_ms(INT32) + validate_only(BOOLEAN) - var legacyBody []byte - - // topics count (int32) - legacyBody = append(legacyBody, 0, 0, 0, byte(len(topics))) - if len(topics) > 0 { - legacyBody[len(legacyBody)-1] = byte(len(topics)) - } - - for _, t := range topics { - // topic name (STRING) - nameLen := uint16(len(t.name)) - legacyBody = append(legacyBody, byte(nameLen>>8), byte(nameLen)) - legacyBody = append(legacyBody, []byte(t.name)...) - - // num_partitions (INT32) - legacyBody = append(legacyBody, byte(t.partitions>>24), byte(t.partitions>>16), byte(t.partitions>>8), byte(t.partitions)) - - // replication_factor (INT16) - legacyBody = append(legacyBody, byte(t.replication>>8), byte(t.replication)) - - // assignments array (INT32 count = 0) - legacyBody = append(legacyBody, 0, 0, 0, 0) - - // configs array (INT32 count = 0) - legacyBody = append(legacyBody, 0, 0, 0, 0) - } - - // timeout_ms - legacyBody = append(legacyBody, byte(timeoutMs>>24), byte(timeoutMs>>16), byte(timeoutMs>>8), byte(timeoutMs)) - - // validate_only - if validateOnly { - legacyBody = append(legacyBody, 1) - } else { - legacyBody = append(legacyBody, 0) - } - - // Build response directly instead of delegating to avoid circular dependency - response := make([]byte, 0, 128) - - // NOTE: Correlation ID and header tagged fields are handled by writeResponseWithHeader - // Do NOT include them in the response body - - // throttle_time_ms (4 bytes) - first field in CreateTopics response body - response = append(response, 0, 0, 0, 0) - - // topics (compact array) - V5 FLEXIBLE FORMAT - topicCount := len(topics) - - // Debug: log response size at each step - debugResponseSize := func(step string) { - } - debugResponseSize("After correlation ID and throttle_time_ms") - - // Compact array: length is encoded as UNSIGNED_VARINT(actualLength + 1) - response = append(response, EncodeUvarint(uint32(topicCount+1))...) - debugResponseSize("After topics array length") - - // For each topic - for _, t := range topics { - // name (compact string): length is encoded as UNSIGNED_VARINT(actualLength + 1) - nameBytes := []byte(t.name) - response = append(response, EncodeUvarint(uint32(len(nameBytes)+1))...) - response = append(response, nameBytes...) - - // TopicId - Not present in v5, only added in v7+ - // v5 CreateTopics response does not include TopicId field - - // error_code (int16) - var errCode uint16 = 0 - - // ADMIN CLIENT COMPATIBILITY: Apply defaults before error checking - actualPartitions := t.partitions - if actualPartitions == 0 { - actualPartitions = 1 // Default to 1 partition if 0 requested - } - actualReplication := t.replication - if actualReplication == 0 { - actualReplication = 1 // Default to 1 replication if 0 requested - } - - // ADMIN CLIENT COMPATIBILITY: Always return success for existing topics - // AdminClient expects topic creation to succeed, even if topic already exists - if h.seaweedMQHandler.TopicExists(t.name) { - errCode = 0 // SUCCESS - AdminClient can handle this gracefully - } else { - // Use corrected values for error checking and topic creation with schema support - if err := h.createTopicWithSchemaSupport(t.name, int32(actualPartitions)); err != nil { - errCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - } - } - eb := make([]byte, 2) - binary.BigEndian.PutUint16(eb, errCode) - response = append(response, eb...) - - // error_message (compact nullable string) - ADMINCLIENT 7.4.0-CE COMPATIBILITY FIX - // For "_schemas" topic, send null for byte-level compatibility with Java reference - // For other topics, send empty string to avoid NPE in AdminClient response handling - if t.name == "_schemas" { - response = append(response, 0) // Null = 0 - } else { - response = append(response, 1) // Empty string = 1 (0 chars + 1) - } - - // ADDED FOR V5: num_partitions (int32) - // ADMIN CLIENT COMPATIBILITY: Use corrected values from error checking logic - partBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partBytes, actualPartitions) - response = append(response, partBytes...) - - // ADDED FOR V5: replication_factor (int16) - replBytes := make([]byte, 2) - binary.BigEndian.PutUint16(replBytes, actualReplication) - response = append(response, replBytes...) - - // configs (compact nullable array) - ADDED FOR V5 - // ADMINCLIENT 7.4.0-CE NPE FIX: Send empty configs array instead of null - // AdminClient 7.4.0-ce has NPE when configs=null but were requested - // Empty array = 1 (0 configs + 1), still achieves ~30-byte response - response = append(response, 1) // Empty configs array = 1 (0 configs + 1) - - // Tagged fields for each topic - V5 format per Kafka source - // Count tagged fields (topicConfigErrorCode only if != 0) - topicConfigErrorCode := uint16(0) // No error - numTaggedFields := 0 - if topicConfigErrorCode != 0 { - numTaggedFields = 1 - } - - // Write tagged fields count - response = append(response, EncodeUvarint(uint32(numTaggedFields))...) - - // Write tagged fields (only if topicConfigErrorCode != 0) - if topicConfigErrorCode != 0 { - // Tag 0: TopicConfigErrorCode - response = append(response, EncodeUvarint(0)...) // Tag number 0 - response = append(response, EncodeUvarint(2)...) // Length (int16 = 2 bytes) - topicConfigErrBytes := make([]byte, 2) - binary.BigEndian.PutUint16(topicConfigErrBytes, topicConfigErrorCode) - response = append(response, topicConfigErrBytes...) - } - - debugResponseSize(fmt.Sprintf("After topic '%s'", t.name)) - } - - // Top-level tagged fields for v5 flexible response (empty) - response = append(response, 0) // Empty tagged fields = 0 - debugResponseSize("Final response") - - return response, nil -} - -func (h *Handler) handleDeleteTopics(correlationID uint32, requestBody []byte) ([]byte, error) { - // Parse minimal DeleteTopics request - // Request format: client_id + timeout(4) + topics_array - - if len(requestBody) < 6 { // client_id_size(2) + timeout(4) - return nil, fmt.Errorf("DeleteTopics request too short") - } - - // Skip client_id - clientIDSize := binary.BigEndian.Uint16(requestBody[0:2]) - offset := 2 + int(clientIDSize) - - if len(requestBody) < offset+8 { // timeout(4) + topics_count(4) - return nil, fmt.Errorf("DeleteTopics request missing data") - } - - // Skip timeout - offset += 4 - - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Throttle time (4 bytes, 0 = no throttling) - response = append(response, 0, 0, 0, 0) - - // Topics count (same as request) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, topicsCount) - response = append(response, topicsCountBytes...) - - // Process each topic (using SeaweedMQ handler) - - for i := uint32(0); i < topicsCount && offset < len(requestBody); i++ { - if len(requestBody) < offset+2 { - break - } - - // Parse topic name - topicNameSize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(topicNameSize) { - break - } - - topicName := string(requestBody[offset : offset+int(topicNameSize)]) - offset += int(topicNameSize) - - // Response: topic_name + error_code(2) + error_message - response = append(response, byte(topicNameSize>>8), byte(topicNameSize)) - response = append(response, []byte(topicName)...) - - // Check if topic exists and delete it - var errorCode uint16 = 0 - var errorMessage string = "" - - // Use SeaweedMQ integration - if !h.seaweedMQHandler.TopicExists(topicName) { - errorCode = 3 // UNKNOWN_TOPIC_OR_PARTITION - errorMessage = "Unknown topic" - } else { - // Delete the topic from SeaweedMQ - if err := h.seaweedMQHandler.DeleteTopic(topicName); err != nil { - errorCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - errorMessage = err.Error() - } - } - - // Error code - response = append(response, byte(errorCode>>8), byte(errorCode)) - - // Error message (nullable string) - if errorMessage == "" { - response = append(response, 0xFF, 0xFF) // null string - } else { - errorMsgLen := uint16(len(errorMessage)) - response = append(response, byte(errorMsgLen>>8), byte(errorMsgLen)) - response = append(response, []byte(errorMessage)...) - } - } - - return response, nil -} - -// validateAPIVersion checks if we support the requested API version -func (h *Handler) validateAPIVersion(apiKey, apiVersion uint16) error { - supportedVersions := map[APIKey][2]uint16{ - APIKeyApiVersions: {0, 4}, // ApiVersions: v0-v4 (Kafka 8.0.0 compatibility) - APIKeyMetadata: {0, 7}, // Metadata: v0-v7 - APIKeyProduce: {0, 7}, // Produce: v0-v7 - APIKeyFetch: {0, 7}, // Fetch: v0-v7 - APIKeyListOffsets: {0, 2}, // ListOffsets: v0-v2 - APIKeyCreateTopics: {0, 5}, // CreateTopics: v0-v5 (updated to match implementation) - APIKeyDeleteTopics: {0, 4}, // DeleteTopics: v0-v4 - APIKeyFindCoordinator: {0, 3}, // FindCoordinator: v0-v3 (v3+ uses flexible format) - APIKeyJoinGroup: {0, 6}, // JoinGroup: cap to v6 (first flexible version) - APIKeySyncGroup: {0, 5}, // SyncGroup: v0-v5 - APIKeyOffsetCommit: {0, 2}, // OffsetCommit: v0-v2 - APIKeyOffsetFetch: {0, 5}, // OffsetFetch: v0-v5 (updated to match implementation) - APIKeyHeartbeat: {0, 4}, // Heartbeat: v0-v4 - APIKeyLeaveGroup: {0, 4}, // LeaveGroup: v0-v4 - APIKeyDescribeGroups: {0, 5}, // DescribeGroups: v0-v5 - APIKeyListGroups: {0, 4}, // ListGroups: v0-v4 - APIKeyDescribeConfigs: {0, 4}, // DescribeConfigs: v0-v4 - APIKeyInitProducerId: {0, 4}, // InitProducerId: v0-v4 - APIKeyDescribeCluster: {0, 1}, // DescribeCluster: v0-v1 (KIP-919, AdminClient compatibility) - } - - if versionRange, exists := supportedVersions[APIKey(apiKey)]; exists { - minVer, maxVer := versionRange[0], versionRange[1] - if apiVersion < minVer || apiVersion > maxVer { - return fmt.Errorf("unsupported API version %d for API key %d (supported: %d-%d)", - apiVersion, apiKey, minVer, maxVer) - } - return nil - } - - return fmt.Errorf("unsupported API key: %d", apiKey) -} - -// buildUnsupportedVersionResponse creates a proper Kafka error response -func (h *Handler) buildUnsupportedVersionResponse(correlationID uint32, apiKey, apiVersion uint16) ([]byte, error) { - errorMsg := fmt.Sprintf("Unsupported version %d for API key", apiVersion) - return BuildErrorResponseWithMessage(correlationID, ErrorCodeUnsupportedVersion, errorMsg), nil -} - -// handleMetadata routes to the appropriate version-specific handler -func (h *Handler) handleMetadata(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - var response []byte - var err error - - switch apiVersion { - case 0: - response, err = h.HandleMetadataV0(correlationID, requestBody) - case 1: - response, err = h.HandleMetadataV1(correlationID, requestBody) - case 2: - response, err = h.HandleMetadataV2(correlationID, requestBody) - case 3, 4: - response, err = h.HandleMetadataV3V4(correlationID, requestBody) - case 5, 6: - response, err = h.HandleMetadataV5V6(correlationID, requestBody) - case 7: - response, err = h.HandleMetadataV7(correlationID, requestBody) - default: - // For versions > 7, use the V7 handler (flexible format) - if apiVersion > 7 { - response, err = h.HandleMetadataV7(correlationID, requestBody) - } else { - err = fmt.Errorf("metadata version %d not implemented yet", apiVersion) - } - } - - if err != nil { - } else { - } - return response, err -} - -// getAPIName returns a human-readable name for Kafka API keys (for debugging) -func getAPIName(apiKey APIKey) string { - switch apiKey { - case APIKeyProduce: - return "Produce" - case APIKeyFetch: - return "Fetch" - case APIKeyListOffsets: - return "ListOffsets" - case APIKeyMetadata: - return "Metadata" - case APIKeyOffsetCommit: - return "OffsetCommit" - case APIKeyOffsetFetch: - return "OffsetFetch" - case APIKeyFindCoordinator: - return "FindCoordinator" - case APIKeyJoinGroup: - return "JoinGroup" - case APIKeyHeartbeat: - return "Heartbeat" - case APIKeyLeaveGroup: - return "LeaveGroup" - case APIKeySyncGroup: - return "SyncGroup" - case APIKeyDescribeGroups: - return "DescribeGroups" - case APIKeyListGroups: - return "ListGroups" - case APIKeyApiVersions: - return "ApiVersions" - case APIKeyCreateTopics: - return "CreateTopics" - case APIKeyDeleteTopics: - return "DeleteTopics" - case APIKeyDescribeConfigs: - return "DescribeConfigs" - case APIKeyInitProducerId: - return "InitProducerId" - case APIKeyDescribeCluster: - return "DescribeCluster" - default: - return "Unknown" - } -} - -// handleDescribeConfigs handles DescribeConfigs API requests (API key 32) -func (h *Handler) handleDescribeConfigs(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse request to extract resources - resources, err := h.parseDescribeConfigsRequest(requestBody, apiVersion) - if err != nil { - glog.Errorf("DescribeConfigs parsing error: %v", err) - return nil, fmt.Errorf("failed to parse DescribeConfigs request: %w", err) - } - - isFlexible := apiVersion >= 4 - if !isFlexible { - // Legacy (non-flexible) response for v0-3 - response := make([]byte, 0, 2048) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Throttle time (0ms) - throttleBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throttleBytes, 0) - response = append(response, throttleBytes...) - - // Resources array length - resourcesBytes := make([]byte, 4) - binary.BigEndian.PutUint32(resourcesBytes, uint32(len(resources))) - response = append(response, resourcesBytes...) - - // For each resource, return appropriate configs - for _, resource := range resources { - resourceResponse := h.buildDescribeConfigsResourceResponse(resource, apiVersion) - response = append(response, resourceResponse...) - } - - return response, nil - } - - // Flexible response for v4+ - response := make([]byte, 0, 2048) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // throttle_time_ms (4 bytes) - response = append(response, 0, 0, 0, 0) - - // Results (compact array) - response = append(response, EncodeUvarint(uint32(len(resources)+1))...) - - for _, res := range resources { - // ErrorCode (int16) = 0 - response = append(response, 0, 0) - // ErrorMessage (compact nullable string) = null (0) - response = append(response, 0) - // ResourceType (int8) - response = append(response, byte(res.ResourceType)) - // ResourceName (compact string) - nameBytes := []byte(res.ResourceName) - response = append(response, EncodeUvarint(uint32(len(nameBytes)+1))...) - response = append(response, nameBytes...) - - // Build configs for this resource - var cfgs []ConfigEntry - if res.ResourceType == 2 { // Topic - cfgs = h.getTopicConfigs(res.ResourceName, res.ConfigNames) - // Ensure cleanup.policy is compact for _schemas - if res.ResourceName == "_schemas" { - replaced := false - for i := range cfgs { - if cfgs[i].Name == "cleanup.policy" { - cfgs[i].Value = "compact" - replaced = true - break - } - } - if !replaced { - cfgs = append(cfgs, ConfigEntry{Name: "cleanup.policy", Value: "compact"}) - } - } - } else if res.ResourceType == 4 { // Broker - cfgs = h.getBrokerConfigs(res.ConfigNames) - } else { - cfgs = []ConfigEntry{} - } - - // Configs (compact array) - response = append(response, EncodeUvarint(uint32(len(cfgs)+1))...) - - for _, cfg := range cfgs { - // name (compact string) - cb := []byte(cfg.Name) - response = append(response, EncodeUvarint(uint32(len(cb)+1))...) - response = append(response, cb...) - - // value (compact nullable string) - vb := []byte(cfg.Value) - if len(vb) == 0 { - response = append(response, 0) // null - } else { - response = append(response, EncodeUvarint(uint32(len(vb)+1))...) - response = append(response, vb...) - } - - // readOnly (bool) - if cfg.ReadOnly { - response = append(response, 1) - } else { - response = append(response, 0) - } - - // configSource (int8): DEFAULT_CONFIG = 5 - response = append(response, byte(5)) - - // isSensitive (bool) - if cfg.Sensitive { - response = append(response, 1) - } else { - response = append(response, 0) - } - - // synonyms (compact array) - empty - response = append(response, 1) - - // config_type (int8) - STRING = 1 - response = append(response, byte(1)) - - // documentation (compact nullable string) - null - response = append(response, 0) - - // per-config tagged fields (empty) - response = append(response, 0) - } - - // Per-result tagged fields (empty) - response = append(response, 0) - } - - // Top-level tagged fields (empty) - response = append(response, 0) - - return response, nil -} - -// isFlexibleResponse determines if an API response should use flexible format (with header tagged fields) -// Based on Kafka protocol specifications: most APIs become flexible at v3+, but some differ -func isFlexibleResponse(apiKey uint16, apiVersion uint16) bool { - // Reference: kafka-go/protocol/response.go:119 and sarama/response_header.go:21 - // Flexible responses have headerVersion >= 1, which adds tagged fields after correlation ID - - switch APIKey(apiKey) { - case APIKeyProduce: - return apiVersion >= 9 - case APIKeyFetch: - return apiVersion >= 12 - case APIKeyMetadata: - // Metadata v9+ uses flexible responses (v7-8 use compact arrays/strings but NOT flexible headers) - return apiVersion >= 9 - case APIKeyOffsetCommit: - return apiVersion >= 8 - case APIKeyOffsetFetch: - return apiVersion >= 6 - case APIKeyFindCoordinator: - return apiVersion >= 3 - case APIKeyJoinGroup: - return apiVersion >= 6 - case APIKeyHeartbeat: - return apiVersion >= 4 - case APIKeyLeaveGroup: - return apiVersion >= 4 - case APIKeySyncGroup: - return apiVersion >= 4 - case APIKeyApiVersions: - // AdminClient compatibility requires header version 0 (no tagged fields) - // Even though ApiVersions v3+ technically supports flexible responses, AdminClient - // expects the header to NOT include tagged fields. This is a known quirk. - return false // Always use non-flexible header for ApiVersions - case APIKeyCreateTopics: - return apiVersion >= 5 - case APIKeyDeleteTopics: - return apiVersion >= 4 - case APIKeyInitProducerId: - return apiVersion >= 2 // Flexible from v2+ (KIP-360) - case APIKeyDescribeConfigs: - return apiVersion >= 4 - case APIKeyDescribeCluster: - return true // All versions (0+) are flexible - default: - // For unknown APIs, assume non-flexible (safer default) - return false - } -} - -// writeResponseWithHeader writes a Kafka response following the wire protocol: -// [Size: 4 bytes][Correlation ID: 4 bytes][Tagged Fields (if flexible)][Body] -func (h *Handler) writeResponseWithHeader(w *bufio.Writer, correlationID uint32, apiKey uint16, apiVersion uint16, responseBody []byte, timeout time.Duration) error { - // Kafka wire protocol format (from kafka-go/protocol/response.go:116-138 and sarama/response_header.go:10-27): - // [4 bytes: size = len(everything after this)] - // [4 bytes: correlation ID] - // [varint: header tagged fields (0x00 for empty) - ONLY for flexible responses with headerVersion >= 1] - // [N bytes: response body] - - // Determine if this response should be flexible - isFlexible := isFlexibleResponse(apiKey, apiVersion) - - // Calculate total size: correlation ID (4) + tagged fields (1 if flexible) + body - totalSize := 4 + len(responseBody) - if isFlexible { - totalSize += 1 // Add 1 byte for empty tagged fields (0x00) - } - - // Build complete response in memory for hex dump logging - fullResponse := make([]byte, 0, 4+totalSize) - - // Write size - sizeBuf := make([]byte, 4) - binary.BigEndian.PutUint32(sizeBuf, uint32(totalSize)) - fullResponse = append(fullResponse, sizeBuf...) - - // Write correlation ID - correlationBuf := make([]byte, 4) - binary.BigEndian.PutUint32(correlationBuf, correlationID) - fullResponse = append(fullResponse, correlationBuf...) - - // Write header-level tagged fields for flexible responses - if isFlexible { - // Empty tagged fields = 0x00 (varint 0) - fullResponse = append(fullResponse, 0x00) - } - - // Write response body - fullResponse = append(fullResponse, responseBody...) - - // Write to connection - if _, err := w.Write(fullResponse); err != nil { - return fmt.Errorf("write response: %w", err) - } - - // Flush - if err := w.Flush(); err != nil { - return fmt.Errorf("flush response: %w", err) - } - - return nil -} - -// writeResponseWithCorrelationID is deprecated - use writeResponseWithHeader instead -// Kept for compatibility with direct callers that don't have API info -func (h *Handler) writeResponseWithCorrelationID(w *bufio.Writer, correlationID uint32, responseBody []byte, timeout time.Duration) error { - // Assume non-flexible for backward compatibility - return h.writeResponseWithHeader(w, correlationID, 0, 0, responseBody, timeout) -} - -// writeResponseWithTimeout writes a Kafka response with timeout handling -// DEPRECATED: Use writeResponseWithCorrelationID instead -func (h *Handler) writeResponseWithTimeout(w *bufio.Writer, response []byte, timeout time.Duration) error { - // This old function expects response to include correlation ID at the start - // For backward compatibility with any remaining callers - - // Write response size (4 bytes) - responseSizeBytes := make([]byte, 4) - binary.BigEndian.PutUint32(responseSizeBytes, uint32(len(response))) - - if _, err := w.Write(responseSizeBytes); err != nil { - return fmt.Errorf("write response size: %w", err) - } - - // Write response data - if _, err := w.Write(response); err != nil { - return fmt.Errorf("write response data: %w", err) - } - - // Flush the buffer - if err := w.Flush(); err != nil { - return fmt.Errorf("flush response: %w", err) - } - - return nil -} - -// EnableSchemaManagement enables schema management with the given configuration -func (h *Handler) EnableSchemaManagement(config schema.ManagerConfig) error { - manager, err := schema.NewManagerWithHealthCheck(config) - if err != nil { - return fmt.Errorf("failed to create schema manager: %w", err) - } - - h.schemaManager = manager - h.useSchema = true - - return nil -} - -// EnableBrokerIntegration enables mq.broker integration for schematized messages -func (h *Handler) EnableBrokerIntegration(brokers []string) error { - if !h.IsSchemaEnabled() { - return fmt.Errorf("schema management must be enabled before broker integration") - } - - brokerClient := schema.NewBrokerClient(schema.BrokerClientConfig{ - Brokers: brokers, - SchemaManager: h.schemaManager, - }) - - h.brokerClient = brokerClient - return nil -} - -// DisableSchemaManagement disables schema management and broker integration -func (h *Handler) DisableSchemaManagement() { - if h.brokerClient != nil { - h.brokerClient.Close() - h.brokerClient = nil - } - h.schemaManager = nil - h.useSchema = false -} - -// SetSchemaRegistryURL sets the Schema Registry URL for delayed initialization -func (h *Handler) SetSchemaRegistryURL(url string) { - h.schemaRegistryURL = url -} - -// SetDefaultPartitions sets the default partition count for auto-created topics -func (h *Handler) SetDefaultPartitions(partitions int32) { - h.defaultPartitions = partitions -} - -// GetDefaultPartitions returns the default partition count for auto-created topics -func (h *Handler) GetDefaultPartitions() int32 { - if h.defaultPartitions <= 0 { - return 4 // Fallback default - } - return h.defaultPartitions -} - -// IsSchemaEnabled returns whether schema management is enabled -func (h *Handler) IsSchemaEnabled() bool { - // Try to initialize schema management if not already done - if !h.useSchema && h.schemaRegistryURL != "" { - h.tryInitializeSchemaManagement() - } - return h.useSchema && h.schemaManager != nil -} - -// tryInitializeSchemaManagement attempts to initialize schema management -// This is called lazily when schema functionality is first needed -func (h *Handler) tryInitializeSchemaManagement() { - if h.useSchema || h.schemaRegistryURL == "" { - return // Already initialized or no URL provided - } - - schemaConfig := schema.ManagerConfig{ - RegistryURL: h.schemaRegistryURL, - } - - if err := h.EnableSchemaManagement(schemaConfig); err != nil { - return - } - -} - -// IsBrokerIntegrationEnabled returns true if broker integration is enabled -func (h *Handler) IsBrokerIntegrationEnabled() bool { - return h.IsSchemaEnabled() && h.brokerClient != nil -} - -// commitOffsetToSMQ commits offset using SMQ storage -func (h *Handler) commitOffsetToSMQ(key ConsumerOffsetKey, offsetValue int64, metadata string) error { - // Use new consumer offset storage if available, fall back to SMQ storage - if h.consumerOffsetStorage != nil { - return h.consumerOffsetStorage.CommitOffset(key.ConsumerGroup, key.Topic, key.Partition, offsetValue, metadata) - } - - // No SMQ offset storage - only use consumer offset storage - return fmt.Errorf("offset storage not initialized") -} - -// fetchOffsetFromSMQ fetches offset using SMQ storage -func (h *Handler) fetchOffsetFromSMQ(key ConsumerOffsetKey) (int64, string, error) { - // Use new consumer offset storage if available, fall back to SMQ storage - if h.consumerOffsetStorage != nil { - return h.consumerOffsetStorage.FetchOffset(key.ConsumerGroup, key.Topic, key.Partition) - } - - // SMQ offset storage removed - no fallback - return -1, "", fmt.Errorf("offset storage not initialized") -} - -// DescribeConfigsResource represents a resource in a DescribeConfigs request -type DescribeConfigsResource struct { - ResourceType int8 // 2 = Topic, 4 = Broker - ResourceName string - ConfigNames []string // Empty means return all configs -} - -// parseDescribeConfigsRequest parses a DescribeConfigs request body -func (h *Handler) parseDescribeConfigsRequest(requestBody []byte, apiVersion uint16) ([]DescribeConfigsResource, error) { - if len(requestBody) < 1 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - - // DescribeConfigs v4+ uses flexible protocol (compact arrays with varint) - isFlexible := apiVersion >= 4 - - var resourcesLength uint32 - if isFlexible { - // FIX: Skip top-level tagged fields for DescribeConfigs v4+ flexible protocol - // The request body starts with tagged fields count (usually 0x00 = empty) - _, consumed, err := DecodeTaggedFields(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("DescribeConfigs v%d: decode top-level tagged fields: %w", apiVersion, err) - } - offset += consumed - - // Resources (compact array) - Now correctly positioned after tagged fields - resourcesLength, consumed, err = DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode resources compact array: %w", err) - } - offset += consumed - } else { - // Regular array: length is int32 - if len(requestBody) < 4 { - return nil, fmt.Errorf("request too short for regular array") - } - resourcesLength = binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - } - - // Validate resources length to prevent panic - if resourcesLength > 100 { // Reasonable limit - return nil, fmt.Errorf("invalid resources length: %d", resourcesLength) - } - - resources := make([]DescribeConfigsResource, 0, resourcesLength) - - for i := uint32(0); i < resourcesLength; i++ { - if offset+1 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for resource type") - } - - // Resource type (1 byte) - resourceType := int8(requestBody[offset]) - offset++ - - // Resource name (string - compact for v4+, regular for v0-3) - var resourceName string - if isFlexible { - // Compact string: length is encoded as UNSIGNED_VARINT(actualLength + 1) - name, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode resource name compact string: %w", err) - } - resourceName = name - offset += consumed - } else { - // Regular string: length is int16 - if offset+2 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for resource name length") - } - nameLength := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - - // Validate name length to prevent panic - if nameLength < 0 || nameLength > 1000 { // Reasonable limit - return nil, fmt.Errorf("invalid resource name length: %d", nameLength) - } - - if offset+nameLength > len(requestBody) { - return nil, fmt.Errorf("insufficient data for resource name") - } - resourceName = string(requestBody[offset : offset+nameLength]) - offset += nameLength - } - - // Config names array (compact for v4+, regular for v0-3) - var configNames []string - if isFlexible { - // Compact array: length is encoded as UNSIGNED_VARINT(actualLength + 1) - // For nullable arrays, 0 means null, 1 means empty - configNamesCount, consumed, err := DecodeCompactArrayLength(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode config names compact array: %w", err) - } - offset += consumed - - // Parse each config name as compact string (if not null) - if configNamesCount > 0 { - for j := uint32(0); j < configNamesCount; j++ { - configName, consumed, err := DecodeFlexibleString(requestBody[offset:]) - if err != nil { - return nil, fmt.Errorf("decode config name[%d] compact string: %w", j, err) - } - offset += consumed - configNames = append(configNames, configName) - } - } - } else { - // Regular array: length is int32 - if offset+4 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for config names length") - } - configNamesLength := int32(binary.BigEndian.Uint32(requestBody[offset : offset+4])) - offset += 4 - - // Validate config names length to prevent panic - // Note: -1 means null/empty array in Kafka protocol - if configNamesLength < -1 || configNamesLength > 1000 { // Reasonable limit - return nil, fmt.Errorf("invalid config names length: %d", configNamesLength) - } - - // Handle null array case - if configNamesLength == -1 { - configNamesLength = 0 - } - - configNames = make([]string, 0, configNamesLength) - for j := int32(0); j < configNamesLength; j++ { - if offset+2 > len(requestBody) { - return nil, fmt.Errorf("insufficient data for config name length") - } - configNameLength := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - - // Validate config name length to prevent panic - if configNameLength < 0 || configNameLength > 500 { // Reasonable limit - return nil, fmt.Errorf("invalid config name length: %d", configNameLength) - } - - if offset+configNameLength > len(requestBody) { - return nil, fmt.Errorf("insufficient data for config name") - } - configName := string(requestBody[offset : offset+configNameLength]) - offset += configNameLength - - configNames = append(configNames, configName) - } - } - - resources = append(resources, DescribeConfigsResource{ - ResourceType: resourceType, - ResourceName: resourceName, - ConfigNames: configNames, - }) - } - - return resources, nil -} - -// buildDescribeConfigsResourceResponse builds the response for a single resource -func (h *Handler) buildDescribeConfigsResourceResponse(resource DescribeConfigsResource, apiVersion uint16) []byte { - response := make([]byte, 0, 512) - - // Error code (0 = no error) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, 0) - response = append(response, errorCodeBytes...) - - // Error message (null string = -1 length) - errorMsgBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorMsgBytes, 0xFFFF) // -1 as uint16 - response = append(response, errorMsgBytes...) - - // Resource type - response = append(response, byte(resource.ResourceType)) - - // Resource name - nameBytes := make([]byte, 2+len(resource.ResourceName)) - binary.BigEndian.PutUint16(nameBytes[0:2], uint16(len(resource.ResourceName))) - copy(nameBytes[2:], []byte(resource.ResourceName)) - response = append(response, nameBytes...) - - // Get configs for this resource - configs := h.getConfigsForResource(resource) - - // Config entries array length - configCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(configCountBytes, uint32(len(configs))) - response = append(response, configCountBytes...) - - // Add each config entry - for _, config := range configs { - configBytes := h.buildConfigEntry(config, apiVersion) - response = append(response, configBytes...) - } - - return response -} - -// ConfigEntry represents a single configuration entry -type ConfigEntry struct { - Name string - Value string - ReadOnly bool - IsDefault bool - Sensitive bool -} - -// getConfigsForResource returns appropriate configs for a resource -func (h *Handler) getConfigsForResource(resource DescribeConfigsResource) []ConfigEntry { - switch resource.ResourceType { - case 2: // Topic - return h.getTopicConfigs(resource.ResourceName, resource.ConfigNames) - case 4: // Broker - return h.getBrokerConfigs(resource.ConfigNames) - default: - return []ConfigEntry{} - } -} - -// getTopicConfigs returns topic-level configurations -func (h *Handler) getTopicConfigs(topicName string, requestedConfigs []string) []ConfigEntry { - // Default topic configs that admin clients commonly request - allConfigs := map[string]ConfigEntry{ - "cleanup.policy": { - Name: "cleanup.policy", - Value: "delete", - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "retention.ms": { - Name: "retention.ms", - Value: "604800000", // 7 days in milliseconds - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "retention.bytes": { - Name: "retention.bytes", - Value: "-1", // Unlimited - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "segment.ms": { - Name: "segment.ms", - Value: "86400000", // 1 day in milliseconds - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "max.message.bytes": { - Name: "max.message.bytes", - Value: "1048588", // ~1MB - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "min.insync.replicas": { - Name: "min.insync.replicas", - Value: "1", - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - } - - // If specific configs requested, filter to those - if len(requestedConfigs) > 0 { - filteredConfigs := make([]ConfigEntry, 0, len(requestedConfigs)) - for _, configName := range requestedConfigs { - if config, exists := allConfigs[configName]; exists { - filteredConfigs = append(filteredConfigs, config) - } - } - return filteredConfigs - } - - // Return all configs - configs := make([]ConfigEntry, 0, len(allConfigs)) - for _, config := range allConfigs { - configs = append(configs, config) - } - return configs -} - -// getBrokerConfigs returns broker-level configurations -func (h *Handler) getBrokerConfigs(requestedConfigs []string) []ConfigEntry { - // Default broker configs that admin clients commonly request - allConfigs := map[string]ConfigEntry{ - "log.retention.hours": { - Name: "log.retention.hours", - Value: "168", // 7 days - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "log.segment.bytes": { - Name: "log.segment.bytes", - Value: "1073741824", // 1GB - ReadOnly: false, - IsDefault: true, - Sensitive: false, - }, - "num.network.threads": { - Name: "num.network.threads", - Value: "3", - ReadOnly: true, - IsDefault: true, - Sensitive: false, - }, - "num.io.threads": { - Name: "num.io.threads", - Value: "8", - ReadOnly: true, - IsDefault: true, - Sensitive: false, - }, - } - - // If specific configs requested, filter to those - if len(requestedConfigs) > 0 { - filteredConfigs := make([]ConfigEntry, 0, len(requestedConfigs)) - for _, configName := range requestedConfigs { - if config, exists := allConfigs[configName]; exists { - filteredConfigs = append(filteredConfigs, config) - } - } - return filteredConfigs - } - - // Return all configs - configs := make([]ConfigEntry, 0, len(allConfigs)) - for _, config := range allConfigs { - configs = append(configs, config) - } - return configs -} - -// buildConfigEntry builds the wire format for a single config entry -func (h *Handler) buildConfigEntry(config ConfigEntry, apiVersion uint16) []byte { - entry := make([]byte, 0, 256) - - // Config name - nameBytes := make([]byte, 2+len(config.Name)) - binary.BigEndian.PutUint16(nameBytes[0:2], uint16(len(config.Name))) - copy(nameBytes[2:], []byte(config.Name)) - entry = append(entry, nameBytes...) - - // Config value - valueBytes := make([]byte, 2+len(config.Value)) - binary.BigEndian.PutUint16(valueBytes[0:2], uint16(len(config.Value))) - copy(valueBytes[2:], []byte(config.Value)) - entry = append(entry, valueBytes...) - - // Read only flag - if config.ReadOnly { - entry = append(entry, 1) - } else { - entry = append(entry, 0) - } - - // Is default flag (only for version 0) - if apiVersion == 0 { - if config.IsDefault { - entry = append(entry, 1) - } else { - entry = append(entry, 0) - } - } - - // Config source (for versions 1-3) - if apiVersion >= 1 && apiVersion <= 3 { - // ConfigSource: 1 = DYNAMIC_TOPIC_CONFIG, 2 = DYNAMIC_BROKER_CONFIG, 4 = STATIC_BROKER_CONFIG, 5 = DEFAULT_CONFIG - configSource := int8(5) // DEFAULT_CONFIG for all our configs since they're defaults - entry = append(entry, byte(configSource)) - } - - // Sensitive flag - if config.Sensitive { - entry = append(entry, 1) - } else { - entry = append(entry, 0) - } - - // Config synonyms (for versions 1-3) - if apiVersion >= 1 && apiVersion <= 3 { - // Empty synonyms array (4 bytes for array length = 0) - synonymsLength := make([]byte, 4) - binary.BigEndian.PutUint32(synonymsLength, 0) - entry = append(entry, synonymsLength...) - } - - // Config type (for version 3 only) - if apiVersion == 3 { - configType := int8(1) // STRING type for all our configs - entry = append(entry, byte(configType)) - } - - // Config documentation (for version 3 only) - if apiVersion == 3 { - // Null documentation (length = -1) - docLength := make([]byte, 2) - binary.BigEndian.PutUint16(docLength, 0xFFFF) // -1 as uint16 - entry = append(entry, docLength...) - } - - return entry -} - -// registerSchemasViaBrokerAPI registers both key and value schemas via the broker's ConfigureTopic API -// Only the gateway leader performs the registration to avoid concurrent updates. -func (h *Handler) registerSchemasViaBrokerAPI(topicName string, valueRecordType *schema_pb.RecordType, keyRecordType *schema_pb.RecordType) error { - if valueRecordType == nil && keyRecordType == nil { - return nil - } - - // Check coordinator registry for multi-gateway deployments - // In single-gateway mode, coordinator registry may not be initialized - that's OK - if reg := h.GetCoordinatorRegistry(); reg != nil { - // Multi-gateway mode - check if we're the leader - isLeader := reg.IsLeader() - - if !isLeader { - // Not leader - in production multi-gateway setups, skip to avoid conflicts - // In single-gateway setups where leader election fails, log warning but proceed - // This ensures schema registration works even if distributed locking has issues - // Note: Schema registration is idempotent, so duplicate registrations are safe - } else { - } - } else { - // No coordinator registry - definitely single-gateway mode - } - - // Require SeaweedMQ integration to access broker - if h.seaweedMQHandler == nil { - return fmt.Errorf("no SeaweedMQ handler available for broker access") - } - - // Get broker addresses - brokerAddresses := h.seaweedMQHandler.GetBrokerAddresses() - if len(brokerAddresses) == 0 { - return fmt.Errorf("no broker addresses available") - } - - // Use the first available broker - brokerAddress := brokerAddresses[0] - - // Load security configuration - util.LoadSecurityConfiguration() - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.mq") - - // Get current topic configuration to preserve partition count - seaweedTopic := &schema_pb.Topic{ - Namespace: DefaultKafkaNamespace, - Name: topicName, - } - - return pb.WithBrokerGrpcClient(false, brokerAddress, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - // First get current configuration - getResp, err := client.GetTopicConfiguration(context.Background(), &mq_pb.GetTopicConfigurationRequest{ - Topic: seaweedTopic, - }) - if err != nil { - // Convert dual schemas to flat schema format - var flatSchema *schema_pb.RecordType - var keyColumns []string - if keyRecordType != nil || valueRecordType != nil { - flatSchema, keyColumns = mqschema.CombineFlatSchemaFromKeyValue(keyRecordType, valueRecordType) - } - - // If topic doesn't exist, create it with configurable default partition count - // Get schema format from topic config if available - schemaFormat := h.getTopicSchemaFormat(topicName) - _, err := client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{ - Topic: seaweedTopic, - PartitionCount: h.GetDefaultPartitions(), // Use configurable default - MessageRecordType: flatSchema, - KeyColumns: keyColumns, - SchemaFormat: schemaFormat, - }) - return err - } - - // Convert dual schemas to flat schema format for update - var flatSchema *schema_pb.RecordType - var keyColumns []string - if keyRecordType != nil || valueRecordType != nil { - flatSchema, keyColumns = mqschema.CombineFlatSchemaFromKeyValue(keyRecordType, valueRecordType) - } - - // Update existing topic with new schema - // Get schema format from topic config if available - schemaFormat := h.getTopicSchemaFormat(topicName) - _, err = client.ConfigureTopic(context.Background(), &mq_pb.ConfigureTopicRequest{ - Topic: seaweedTopic, - PartitionCount: getResp.PartitionCount, - MessageRecordType: flatSchema, - KeyColumns: keyColumns, - Retention: getResp.Retention, - SchemaFormat: schemaFormat, - }) - return err - }) -} - -// handleInitProducerId handles InitProducerId API requests (API key 22) -// This API is used to initialize a producer for transactional or idempotent operations -func (h *Handler) handleInitProducerId(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // InitProducerId Request Format (varies by version): - // v0-v1: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) - // v2+: transactional_id(NULLABLE_STRING) + transaction_timeout_ms(INT32) + producer_id(INT64) + producer_epoch(INT16) - // v4+: Uses flexible format with tagged fields - - - maxBytes := len(requestBody) - if maxBytes > 64 { - maxBytes = 64 - } - - offset := 0 - - // Parse transactional_id (NULLABLE_STRING or COMPACT_NULLABLE_STRING for flexible versions) - var transactionalId *string - if apiVersion >= 4 { - // Flexible version - use compact nullable string - if len(requestBody) < offset+1 { - return nil, fmt.Errorf("InitProducerId request too short for transactional_id") - } - - length := int(requestBody[offset]) - offset++ - - if length == 0 { - // Null string - transactionalId = nil - } else { - // Non-null string (length is encoded as length+1 in compact format) - actualLength := length - 1 - if len(requestBody) < offset+actualLength { - return nil, fmt.Errorf("InitProducerId request transactional_id too short") - } - if actualLength > 0 { - id := string(requestBody[offset : offset+actualLength]) - transactionalId = &id - offset += actualLength - } else { - // Empty string - id := "" - transactionalId = &id - } - } - } else { - // Non-flexible version - use regular nullable string - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("InitProducerId request too short for transactional_id length") - } - - length := int(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - - if length == 0xFFFF { - // Null string (-1 as uint16) - transactionalId = nil - } else { - if len(requestBody) < offset+length { - return nil, fmt.Errorf("InitProducerId request transactional_id too short") - } - if length > 0 { - id := string(requestBody[offset : offset+length]) - transactionalId = &id - offset += length - } else { - // Empty string - id := "" - transactionalId = &id - } - } - } - _ = transactionalId // Used for logging/tracking, but not in core logic yet - - // Parse transaction_timeout_ms (INT32) - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("InitProducerId request too short for transaction_timeout_ms") - } - _ = binary.BigEndian.Uint32(requestBody[offset : offset+4]) // transactionTimeoutMs - offset += 4 - - // For v2+, there might be additional fields, but we'll ignore them for now - // as we're providing a basic implementation - - // Build response - response := make([]byte, 0, 64) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - // Note: Header tagged fields are also handled by writeResponseWithHeader for flexible versions - - // InitProducerId Response Format: - // throttle_time_ms(INT32) + error_code(INT16) + producer_id(INT64) + producer_epoch(INT16) - // + tagged_fields (for flexible versions) - - // Throttle time (4 bytes) - v1+ - if apiVersion >= 1 { - response = append(response, 0, 0, 0, 0) // No throttling - } - - // Error code (2 bytes) - SUCCESS - response = append(response, 0, 0) // No error - - // Producer ID (8 bytes) - generate a simple producer ID - // In a real implementation, this would be managed by a transaction coordinator - producerId := int64(1000) // Simple fixed producer ID for now - producerIdBytes := make([]byte, 8) - binary.BigEndian.PutUint64(producerIdBytes, uint64(producerId)) - response = append(response, producerIdBytes...) - - // Producer epoch (2 bytes) - start with epoch 0 - response = append(response, 0, 0) // Epoch 0 - - // For flexible versions (v4+), add response body tagged fields - if apiVersion >= 4 { - response = append(response, 0x00) // Empty response body tagged fields - } - - respPreview := len(response) - if respPreview > 32 { - respPreview = 32 - } - return response, nil -} - -// createTopicWithSchemaSupport creates a topic with optional schema integration -// This function creates topics with schema support when schema management is enabled -func (h *Handler) createTopicWithSchemaSupport(topicName string, partitions int32) error { - - // For system topics like _schemas, __consumer_offsets, etc., use default schema - if isSystemTopic(topicName) { - return h.createTopicWithDefaultFlexibleSchema(topicName, partitions) - } - - // Check if Schema Registry URL is configured - if h.schemaRegistryURL != "" { - - // Try to initialize schema management if not already done - if h.schemaManager == nil { - h.tryInitializeSchemaManagement() - } - - // If schema manager is still nil after initialization attempt, Schema Registry is unavailable - if h.schemaManager == nil { - return fmt.Errorf("Schema Registry is configured at %s but unavailable - cannot create topic %s without schema validation", h.schemaRegistryURL, topicName) - } - - // Schema Registry is available - try to fetch existing schema - keyRecordType, valueRecordType, err := h.fetchSchemaForTopic(topicName) - if err != nil { - // Check if this is a connection error vs schema not found - if h.isSchemaRegistryConnectionError(err) { - return fmt.Errorf("Schema Registry is unavailable: %w", err) - } - // Schema not found - this is an error when schema management is enforced - return fmt.Errorf("schema is required for topic %s but no schema found in Schema Registry", topicName) - } - - if keyRecordType != nil || valueRecordType != nil { - // Create topic with schema from Schema Registry - return h.seaweedMQHandler.CreateTopicWithSchemas(topicName, partitions, keyRecordType, valueRecordType) - } - - // No schemas found - this is an error when schema management is enforced - return fmt.Errorf("schema is required for topic %s but no schema found in Schema Registry", topicName) - } - - // Schema Registry URL not configured - create topic without schema (backward compatibility) - return h.seaweedMQHandler.CreateTopic(topicName, partitions) -} - -// createTopicWithDefaultFlexibleSchema creates a topic with a flexible default schema -// that can handle both Avro and JSON messages when schema management is enabled -func (h *Handler) createTopicWithDefaultFlexibleSchema(topicName string, partitions int32) error { - // System topics like _schemas should be PLAIN Kafka topics without schema management - // Schema Registry uses _schemas to STORE schemas, so it can't have schema management itself - - glog.V(1).Infof("Creating system topic %s as PLAIN topic (no schema management)", topicName) - return h.seaweedMQHandler.CreateTopic(topicName, partitions) -} - -// fetchSchemaForTopic attempts to fetch schema information for a topic from Schema Registry -// Returns key and value RecordTypes if schemas are found -func (h *Handler) fetchSchemaForTopic(topicName string) (*schema_pb.RecordType, *schema_pb.RecordType, error) { - if h.schemaManager == nil { - return nil, nil, fmt.Errorf("schema manager not available") - } - - var keyRecordType *schema_pb.RecordType - var valueRecordType *schema_pb.RecordType - var lastConnectionError error - - // Try to fetch value schema using standard Kafka naming convention: -value - valueSubject := topicName + "-value" - cachedSchema, err := h.schemaManager.GetLatestSchema(valueSubject) - if err != nil { - // Check if this is a connection error (Schema Registry unavailable) - if h.isSchemaRegistryConnectionError(err) { - lastConnectionError = err - } - // Not found or connection error - continue to check key schema - } else if cachedSchema != nil { - - // Convert schema to RecordType - recordType, err := h.convertSchemaToRecordType(cachedSchema.Schema, cachedSchema.LatestID) - if err == nil { - valueRecordType = recordType - // Store schema configuration for later use - h.storeTopicSchemaConfig(topicName, cachedSchema.LatestID, schema.FormatAvro) - } else { - } - } - - // Try to fetch key schema (optional) - keySubject := topicName + "-key" - cachedKeySchema, keyErr := h.schemaManager.GetLatestSchema(keySubject) - if keyErr != nil { - if h.isSchemaRegistryConnectionError(keyErr) { - lastConnectionError = keyErr - } - // Not found or connection error - key schema is optional - } else if cachedKeySchema != nil { - - // Convert schema to RecordType - recordType, err := h.convertSchemaToRecordType(cachedKeySchema.Schema, cachedKeySchema.LatestID) - if err == nil { - keyRecordType = recordType - // Store key schema configuration for later use - h.storeTopicKeySchemaConfig(topicName, cachedKeySchema.LatestID, schema.FormatAvro) - } else { - } - } - - // If we encountered connection errors, fail fast - if lastConnectionError != nil && keyRecordType == nil && valueRecordType == nil { - return nil, nil, fmt.Errorf("Schema Registry is unavailable: %w", lastConnectionError) - } - - // Return error if no schemas found (but Schema Registry was reachable) - if keyRecordType == nil && valueRecordType == nil { - return nil, nil, fmt.Errorf("no schemas found for topic %s", topicName) - } - - return keyRecordType, valueRecordType, nil -} - -// isSchemaRegistryConnectionError determines if an error is due to Schema Registry being unavailable -// vs a schema not being found (404) -func (h *Handler) isSchemaRegistryConnectionError(err error) bool { - if err == nil { - return false - } - - errStr := err.Error() - - // Connection errors (network issues, DNS resolution, etc.) - if strings.Contains(errStr, "failed to fetch") && - (strings.Contains(errStr, "connection refused") || - strings.Contains(errStr, "no such host") || - strings.Contains(errStr, "timeout") || - strings.Contains(errStr, "network is unreachable")) { - return true - } - - // HTTP 5xx errors (server errors) - if strings.Contains(errStr, "schema registry error 5") { - return true - } - - // HTTP 404 errors are "schema not found", not connection errors - if strings.Contains(errStr, "schema registry error 404") { - return false - } - - // Other HTTP errors (401, 403, etc.) should be treated as connection/config issues - if strings.Contains(errStr, "schema registry error") { - return true - } - - return false -} - -// convertSchemaToRecordType converts a schema string to a RecordType -func (h *Handler) convertSchemaToRecordType(schemaStr string, schemaID uint32) (*schema_pb.RecordType, error) { - // Get the cached schema to determine format - cachedSchema, err := h.schemaManager.GetSchemaByID(schemaID) - if err != nil { - return nil, fmt.Errorf("failed to get cached schema: %w", err) - } - - // Create appropriate decoder and infer RecordType based on format - switch cachedSchema.Format { - case schema.FormatAvro: - // Create Avro decoder and infer RecordType - decoder, err := schema.NewAvroDecoder(schemaStr) - if err != nil { - return nil, fmt.Errorf("failed to create Avro decoder: %w", err) - } - return decoder.InferRecordType() - - case schema.FormatJSONSchema: - // Create JSON Schema decoder and infer RecordType - decoder, err := schema.NewJSONSchemaDecoder(schemaStr) - if err != nil { - return nil, fmt.Errorf("failed to create JSON Schema decoder: %w", err) - } - return decoder.InferRecordType() - - case schema.FormatProtobuf: - // For Protobuf, we need the binary descriptor, not string - // This is a limitation - Protobuf schemas in Schema Registry are typically stored as binary descriptors - return nil, fmt.Errorf("Protobuf schema conversion from string not supported - requires binary descriptor") - - default: - return nil, fmt.Errorf("unsupported schema format: %v", cachedSchema.Format) - } -} - -// isSystemTopic checks if a topic is a Kafka system topic -func isSystemTopic(topicName string) bool { - systemTopics := []string{ - "_schemas", - "__consumer_offsets", - "__transaction_state", - "_confluent-ksql-default__command_topic", - "_confluent-metrics", - } - - for _, systemTopic := range systemTopics { - if topicName == systemTopic { - return true - } - } - - // Check for topics starting with underscore (common system topic pattern) - return len(topicName) > 0 && topicName[0] == '_' -} - -// getConnectionContextFromRequest extracts the connection context from the request context -func (h *Handler) getConnectionContextFromRequest(ctx context.Context) *ConnectionContext { - if connCtx, ok := ctx.Value(connContextKey).(*ConnectionContext); ok { - return connCtx - } - return nil -} - -// getOrCreatePartitionReader gets an existing partition reader or creates a new one -// This maintains persistent readers per connection that stream forward, eliminating -// repeated offset lookups and reducing broker CPU load -func (h *Handler) getOrCreatePartitionReader(ctx context.Context, connCtx *ConnectionContext, key TopicPartitionKey, startOffset int64) *partitionReader { - // Try to get existing reader - if val, ok := connCtx.partitionReaders.Load(key); ok { - return val.(*partitionReader) - } - - // Create new reader - reader := newPartitionReader(ctx, h, connCtx, key.Topic, key.Partition, startOffset) - - // Store it (handle race condition where another goroutine created one) - if actual, loaded := connCtx.partitionReaders.LoadOrStore(key, reader); loaded { - // Another goroutine created it first, close ours and use theirs - reader.close() - return actual.(*partitionReader) - } - - return reader -} - -// cleanupPartitionReaders closes all partition readers for a connection -// Called when connection is closing -func cleanupPartitionReaders(connCtx *ConnectionContext) { - if connCtx == nil { - return - } - - connCtx.partitionReaders.Range(func(key, value interface{}) bool { - if reader, ok := value.(*partitionReader); ok { - reader.close() - } - return true // Continue iteration - }) - - glog.V(4).Infof("[%s] Cleaned up partition readers", connCtx.ConnectionID) -} diff --git a/weed/mq/kafka/protocol/heartbeat_response_format_test.go b/weed/mq/kafka/protocol/heartbeat_response_format_test.go deleted file mode 100644 index f61a3b97f..000000000 --- a/weed/mq/kafka/protocol/heartbeat_response_format_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "testing" -) - -// TestHeartbeatResponseFormat_V0 verifies Heartbeat v0 response format -// v0: error_code (2 bytes) - NO throttle_time_ms -func TestHeartbeatResponseFormat_V0(t *testing.T) { - h := &Handler{} - response := HeartbeatResponse{ - CorrelationID: 12345, - ErrorCode: ErrorCodeNone, - } - - result := h.buildHeartbeatResponseV(response, 0) - - // v0 should only have error_code (2 bytes) - if len(result) != 2 { - t.Errorf("Heartbeat v0 response length = %d, want 2 bytes (error_code only)", len(result)) - } - - // Verify error code - errorCode := int16(binary.BigEndian.Uint16(result[0:2])) - if errorCode != ErrorCodeNone { - t.Errorf("Heartbeat v0 error_code = %d, want %d", errorCode, ErrorCodeNone) - } -} - -// TestHeartbeatResponseFormat_V1ToV3 verifies Heartbeat v1-v3 response format -// v1-v3: throttle_time_ms (4 bytes) -> error_code (2 bytes) -// CRITICAL: throttle_time_ms comes FIRST in v1+ -func TestHeartbeatResponseFormat_V1ToV3(t *testing.T) { - testCases := []struct { - apiVersion uint16 - name string - }{ - {1, "v1"}, - {2, "v2"}, - {3, "v3"}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - h := &Handler{} - response := HeartbeatResponse{ - CorrelationID: 12345, - ErrorCode: ErrorCodeNone, - } - - result := h.buildHeartbeatResponseV(response, tc.apiVersion) - - // v1-v3 should have throttle_time_ms (4 bytes) + error_code (2 bytes) = 6 bytes - if len(result) != 6 { - t.Errorf("Heartbeat %s response length = %d, want 6 bytes", tc.name, len(result)) - } - - // CRITICAL: Verify field order - throttle_time_ms BEFORE error_code - // Bytes 0-3: throttle_time_ms (should be 0) - throttleTime := int32(binary.BigEndian.Uint32(result[0:4])) - if throttleTime != 0 { - t.Errorf("Heartbeat %s throttle_time_ms = %d, want 0", tc.name, throttleTime) - } - - // Bytes 4-5: error_code (should be 0 = ErrorCodeNone) - errorCode := int16(binary.BigEndian.Uint16(result[4:6])) - if errorCode != ErrorCodeNone { - t.Errorf("Heartbeat %s error_code = %d, want %d", tc.name, errorCode, ErrorCodeNone) - } - }) - } -} - -// TestHeartbeatResponseFormat_V4Plus verifies Heartbeat v4+ response format (flexible) -// v4+: throttle_time_ms (4 bytes) -> error_code (2 bytes) -> tagged_fields (varint) -func TestHeartbeatResponseFormat_V4Plus(t *testing.T) { - testCases := []struct { - apiVersion uint16 - name string - }{ - {4, "v4"}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - h := &Handler{} - response := HeartbeatResponse{ - CorrelationID: 12345, - ErrorCode: ErrorCodeNone, - } - - result := h.buildHeartbeatResponseV(response, tc.apiVersion) - - // v4+ should have throttle_time_ms (4 bytes) + error_code (2 bytes) + tagged_fields (1 byte for empty) = 7 bytes - if len(result) != 7 { - t.Errorf("Heartbeat %s response length = %d, want 7 bytes", tc.name, len(result)) - } - - // Verify field order - throttle_time_ms BEFORE error_code - // Bytes 0-3: throttle_time_ms (should be 0) - throttleTime := int32(binary.BigEndian.Uint32(result[0:4])) - if throttleTime != 0 { - t.Errorf("Heartbeat %s throttle_time_ms = %d, want 0", tc.name, throttleTime) - } - - // Bytes 4-5: error_code (should be 0 = ErrorCodeNone) - errorCode := int16(binary.BigEndian.Uint16(result[4:6])) - if errorCode != ErrorCodeNone { - t.Errorf("Heartbeat %s error_code = %d, want %d", tc.name, errorCode, ErrorCodeNone) - } - - // Byte 6: tagged_fields (should be 0x00 for empty) - taggedFields := result[6] - if taggedFields != 0x00 { - t.Errorf("Heartbeat %s tagged_fields = 0x%02x, want 0x00", tc.name, taggedFields) - } - }) - } -} - -// TestHeartbeatResponseFormat_ErrorCode verifies error codes are correctly encoded -func TestHeartbeatResponseFormat_ErrorCode(t *testing.T) { - testCases := []struct { - errorCode int16 - name string - }{ - {ErrorCodeNone, "None"}, - {ErrorCodeUnknownMemberID, "UnknownMemberID"}, - {ErrorCodeIllegalGeneration, "IllegalGeneration"}, - {ErrorCodeRebalanceInProgress, "RebalanceInProgress"}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - h := &Handler{} - response := HeartbeatResponse{ - CorrelationID: 12345, - ErrorCode: tc.errorCode, - } - - // Test with v3 (non-flexible) - result := h.buildHeartbeatResponseV(response, 3) - - // Bytes 4-5: error_code - errorCode := int16(binary.BigEndian.Uint16(result[4:6])) - if errorCode != tc.errorCode { - t.Errorf("Heartbeat v3 error_code = %d, want %d", errorCode, tc.errorCode) - } - }) - } -} - -// TestHeartbeatResponseFormat_BugReproduce reproduces the original bug -// This test documents the bug where error_code was placed BEFORE throttle_time_ms in v1-v3 -func TestHeartbeatResponseFormat_BugReproduce(t *testing.T) { - t.Skip("This test documents the original bug - skip to avoid false failures") - - // Original buggy implementation would have: - // v1-v3: error_code (2 bytes) -> throttle_time_ms (4 bytes) - // This caused Sarama to read error_code bytes as throttle_time_ms, resulting in huge throttle values - - // Example: error_code = 0 (0x0000) would be read as throttle_time_ms = 0 - // But if there were any non-zero bytes, it would cause massive throttle times - - // But if error_code was non-zero, e.g., ErrorCodeIllegalGeneration = 22: - buggyResponseWithError := []byte{ - 0x00, 0x16, // error_code = 22 (0x0016) - 0x00, 0x00, 0x00, 0x00, // throttle_time_ms = 0 - } - - // Sarama would read: - // - Bytes 0-3 as throttle_time_ms: 0x00160000 = 1441792 ms = 24 minutes! - throttleTimeMs := binary.BigEndian.Uint32(buggyResponseWithError[0:4]) - if throttleTimeMs != 1441792 { - t.Errorf("Buggy format would cause throttle_time_ms = %d ms (%.1f minutes), want 1441792 ms (24 minutes)", - throttleTimeMs, float64(throttleTimeMs)/60000) - } - - t.Logf("Original bug: error_code=22 would be misread as throttle_time_ms=%d ms (%.1f minutes)", - throttleTimeMs, float64(throttleTimeMs)/60000) -} diff --git a/weed/mq/kafka/protocol/joingroup.go b/weed/mq/kafka/protocol/joingroup.go deleted file mode 100644 index 85a632070..000000000 --- a/weed/mq/kafka/protocol/joingroup.go +++ /dev/null @@ -1,1468 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer" -) - -// JoinGroup API (key 11) - Consumer group protocol -// Handles consumer joining a consumer group and initial coordination - -// JoinGroupRequest represents a JoinGroup request from a Kafka client -type JoinGroupRequest struct { - GroupID string - SessionTimeout int32 - RebalanceTimeout int32 - MemberID string // Empty for new members - GroupInstanceID string // Optional static membership - ProtocolType string // "consumer" for regular consumers - GroupProtocols []GroupProtocol -} - -// GroupProtocol represents a supported assignment protocol -type GroupProtocol struct { - Name string - Metadata []byte -} - -// JoinGroupResponse represents a JoinGroup response to a Kafka client -type JoinGroupResponse struct { - CorrelationID uint32 - ThrottleTimeMs int32 // versions 2+ - ErrorCode int16 - GenerationID int32 - ProtocolName string // NOT nullable in v6, nullable in v7+ - Leader string // NOT nullable - MemberID string - Version uint16 - Members []JoinGroupMember // Only populated for group leader -} - -// JoinGroupMember represents member info sent to group leader -type JoinGroupMember struct { - MemberID string - GroupInstanceID string - Metadata []byte -} - -// Error codes for JoinGroup are imported from errors.go - -func (h *Handler) handleJoinGroup(connContext *ConnectionContext, correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse JoinGroup request - request, err := h.parseJoinGroupRequest(requestBody, apiVersion) - if err != nil { - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Validate request - if request.GroupID == "" { - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - if !h.groupCoordinator.ValidateSessionTimeout(request.SessionTimeout) { - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeInvalidSessionTimeout, apiVersion), nil - } - - // Get or create consumer group - group := h.groupCoordinator.GetOrCreateGroup(request.GroupID) - - group.Mu.Lock() - defer group.Mu.Unlock() - - // Update group's last activity - group.LastActivity = time.Now() - - // Handle member ID logic with static membership support - var memberID string - var isNewMember bool - var existingMember *consumer.GroupMember - - // Use the actual ClientID from Kafka protocol header for unique member ID generation - clientKey := connContext.ClientID - if clientKey == "" { - // Fallback to deterministic key if ClientID not available - clientKey = fmt.Sprintf("%s-%d-%s", request.GroupID, request.SessionTimeout, request.ProtocolType) - glog.Warningf("[JoinGroup] No ClientID in ConnectionContext for group %s, using fallback: %s", request.GroupID, clientKey) - } else { - glog.V(1).Infof("[JoinGroup] Using ClientID from ConnectionContext for group %s: %s", request.GroupID, clientKey) - } - - // Check for static membership first - if request.GroupInstanceID != "" { - existingMember = h.groupCoordinator.FindStaticMemberLocked(group, request.GroupInstanceID) - if existingMember != nil { - memberID = existingMember.ID - isNewMember = false - } else { - // New static member - memberID = h.groupCoordinator.GenerateMemberID(request.GroupInstanceID, "static") - isNewMember = true - } - } else { - // Dynamic membership logic - if request.MemberID == "" { - // New member - check if we already have a member for this client - var existingMemberID string - for existingID, member := range group.Members { - if member.ClientID == clientKey && !h.groupCoordinator.IsStaticMember(member) { - existingMemberID = existingID - break - } - } - - if existingMemberID != "" { - // Reuse existing member ID for this client - memberID = existingMemberID - isNewMember = false - } else { - // Generate new deterministic member ID - memberID = h.groupCoordinator.GenerateMemberID(clientKey, "consumer") - isNewMember = true - } - } else { - memberID = request.MemberID - // Check if member exists - if _, exists := group.Members[memberID]; !exists { - // Member ID provided but doesn't exist - reject - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeUnknownMemberID, apiVersion), nil - } - isNewMember = false - } - } - - // Check group state - switch group.State { - case consumer.GroupStateEmpty, consumer.GroupStateStable: - // Can join or trigger rebalance - if isNewMember || len(group.Members) == 0 { - group.State = consumer.GroupStatePreparingRebalance - group.Generation++ - } - case consumer.GroupStatePreparingRebalance: - // Rebalance in progress - if this is the leader and we have members, transition to CompletingRebalance - if len(group.Members) > 0 && memberID == group.Leader { - group.State = consumer.GroupStateCompletingRebalance - } - case consumer.GroupStateCompletingRebalance: - // Allow join but don't change generation until SyncGroup - case consumer.GroupStateDead: - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Extract client host from connection context - clientHost := ExtractClientHost(connContext) - - // Create or update member with enhanced metadata parsing - var groupInstanceID *string - if request.GroupInstanceID != "" { - groupInstanceID = &request.GroupInstanceID - } - - member := &consumer.GroupMember{ - ID: memberID, - ClientID: clientKey, // Use actual Kafka ClientID for unique member identification - ClientHost: clientHost, // Now extracted from actual connection - GroupInstanceID: groupInstanceID, - SessionTimeout: request.SessionTimeout, - RebalanceTimeout: request.RebalanceTimeout, - Subscription: h.extractSubscriptionFromProtocolsEnhanced(request.GroupProtocols), - State: consumer.MemberStatePending, - LastHeartbeat: time.Now(), - JoinedAt: time.Now(), - } - - // Add or update the member in the group before computing subscriptions or leader - if group.Members == nil { - group.Members = make(map[string]*consumer.GroupMember) - } - group.Members[memberID] = member - - // Store consumer group and member ID in connection context for use in fetch requests - connContext.ConsumerGroup = request.GroupID - connContext.MemberID = memberID - - // Store protocol metadata for leader - if len(request.GroupProtocols) > 0 { - if len(request.GroupProtocols[0].Metadata) == 0 { - // Generate subscription metadata for available topics - availableTopics := h.getAvailableTopics() - - metadata := make([]byte, 0, 64) - // Version (2 bytes) - use version 0 - metadata = append(metadata, 0, 0) - // Topics count (4 bytes) - topicsCount := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCount, uint32(len(availableTopics))) - metadata = append(metadata, topicsCount...) - // Topics (string array) - for _, topic := range availableTopics { - topicLen := make([]byte, 2) - binary.BigEndian.PutUint16(topicLen, uint16(len(topic))) - metadata = append(metadata, topicLen...) - metadata = append(metadata, []byte(topic)...) - } - // UserData length (4 bytes) - empty - metadata = append(metadata, 0, 0, 0, 0) - member.Metadata = metadata - } else { - member.Metadata = request.GroupProtocols[0].Metadata - } - } - - // Add member to group - group.Members[memberID] = member - - // Register static member if applicable - if member.GroupInstanceID != nil && *member.GroupInstanceID != "" { - h.groupCoordinator.RegisterStaticMemberLocked(group, member) - } - - // Update group's subscribed topics - h.updateGroupSubscription(group) - - // Select assignment protocol using enhanced selection logic - // If the group already has a selected protocol, enforce compatibility with it. - existingProtocols := make([]string, 0, 1) - if group.Protocol != "" { - existingProtocols = append(existingProtocols, group.Protocol) - } - - groupProtocol := SelectBestProtocol(request.GroupProtocols, existingProtocols) - - // Ensure we have a valid protocol - fallback to "range" if empty - if groupProtocol == "" { - groupProtocol = consumer.ProtocolNameRange - } - - // If a protocol is already selected for the group, reject joins that do not support it. - if len(existingProtocols) > 0 && (groupProtocol == "" || groupProtocol != group.Protocol) { - // Rollback member addition and static registration before returning error - delete(group.Members, memberID) - if member.GroupInstanceID != nil && *member.GroupInstanceID != "" { - h.groupCoordinator.UnregisterStaticMemberLocked(group, *member.GroupInstanceID) - } - // Recompute group subscription without the rejected member - h.updateGroupSubscription(group) - return h.buildJoinGroupErrorResponse(correlationID, ErrorCodeInconsistentGroupProtocol, apiVersion), nil - } - - group.Protocol = groupProtocol - - // Select group leader (first member or keep existing if still present) - if group.Leader == "" || group.Members[group.Leader] == nil { - group.Leader = memberID - } else { - } - - // Build response - use the requested API version - response := JoinGroupResponse{ - CorrelationID: correlationID, - ThrottleTimeMs: 0, - ErrorCode: ErrorCodeNone, - GenerationID: group.Generation, - ProtocolName: groupProtocol, - Leader: group.Leader, - MemberID: memberID, - Version: apiVersion, - } - - // If this member is the leader, include all member info for assignment - if memberID == group.Leader { - response.Members = make([]JoinGroupMember, 0, len(group.Members)) - for mid, m := range group.Members { - instanceID := "" - if m.GroupInstanceID != nil { - instanceID = *m.GroupInstanceID - } - response.Members = append(response.Members, JoinGroupMember{ - MemberID: mid, - GroupInstanceID: instanceID, - Metadata: m.Metadata, - }) - } - } - - resp := h.buildJoinGroupResponse(response) - return resp, nil -} - -func (h *Handler) parseJoinGroupRequest(data []byte, apiVersion uint16) (*JoinGroupRequest, error) { - if len(data) < 8 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - isFlexible := IsFlexibleVersion(11, apiVersion) - - // For flexible versions, skip top-level tagged fields first - if isFlexible { - // Skip top-level tagged fields (they come before the actual request fields) - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err != nil { - return nil, fmt.Errorf("JoinGroup v%d: decode top-level tagged fields: %w", apiVersion, err) - } - offset += consumed - } - - // GroupID (string or compact string) - FIRST field in request - var groupID string - if isFlexible { - // Flexible protocol uses compact strings - endIdx := offset + 20 - if endIdx > len(data) { - endIdx = len(data) - } - groupIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid group ID compact string") - } - if groupIDBytes != nil { - groupID = string(groupIDBytes) - } - offset += consumed - } else { - // Non-flexible protocol uses regular strings - if offset+2 > len(data) { - return nil, fmt.Errorf("missing group ID length") - } - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID = string(data[offset : offset+groupIDLength]) - offset += groupIDLength - } - - // Session timeout (4 bytes) - if offset+4 > len(data) { - return nil, fmt.Errorf("missing session timeout") - } - sessionTimeout := int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - // Rebalance timeout (4 bytes) - for v1+ versions - rebalanceTimeout := sessionTimeout // Default to session timeout for v0 - if apiVersion >= 1 && offset+4 <= len(data) { - rebalanceTimeout = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - } - - // MemberID (string or compact string) - var memberID string - if isFlexible { - // Flexible protocol uses compact strings - memberIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid member ID compact string") - } - if memberIDBytes != nil { - memberID = string(memberIDBytes) - } - offset += consumed - } else { - // Non-flexible protocol uses regular strings - if offset+2 > len(data) { - return nil, fmt.Errorf("missing member ID length") - } - memberIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if memberIDLength > 0 { - if offset+memberIDLength > len(data) { - return nil, fmt.Errorf("invalid member ID length") - } - memberID = string(data[offset : offset+memberIDLength]) - offset += memberIDLength - } - } - - // Parse Group Instance ID (nullable string) - for JoinGroup v5+ - var groupInstanceID string - if apiVersion >= 5 { - if isFlexible { - // FLEXIBLE V6+ FIX: GroupInstanceID is a compact nullable string - groupInstanceIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 && len(data) > offset { - // Check if it's a null compact string (0x00) - if data[offset] == 0x00 { - groupInstanceID = "" // null - offset += 1 - } else { - return nil, fmt.Errorf("JoinGroup v%d: invalid group instance ID compact string", apiVersion) - } - } else { - if groupInstanceIDBytes != nil { - groupInstanceID = string(groupInstanceIDBytes) - } - offset += consumed - } - } else { - // Non-flexible v5: regular nullable string - if offset+2 > len(data) { - return nil, fmt.Errorf("missing group instance ID length") - } - instanceIDLength := int16(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - - if instanceIDLength == -1 { - groupInstanceID = "" // null string - } else if instanceIDLength >= 0 { - if offset+int(instanceIDLength) > len(data) { - return nil, fmt.Errorf("invalid group instance ID length") - } - groupInstanceID = string(data[offset : offset+int(instanceIDLength)]) - offset += int(instanceIDLength) - } - } - } - - // Parse Protocol Type - var protocolType string - if isFlexible { - // FLEXIBLE V6+ FIX: ProtocolType is a compact string, not regular string - endIdx := offset + 10 - if endIdx > len(data) { - endIdx = len(data) - } - protocolTypeBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("JoinGroup v%d: invalid protocol type compact string", apiVersion) - } - if protocolTypeBytes != nil { - protocolType = string(protocolTypeBytes) - } - offset += consumed - } else { - // Non-flexible parsing (v0-v5) - if len(data) < offset+2 { - return nil, fmt.Errorf("JoinGroup request missing protocol type") - } - protocolTypeLength := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if len(data) < offset+int(protocolTypeLength) { - return nil, fmt.Errorf("JoinGroup request protocol type too short") - } - protocolType = string(data[offset : offset+int(protocolTypeLength)]) - offset += int(protocolTypeLength) - } - - // Parse Group Protocols array - var protocolsCount uint32 - if isFlexible { - // FLEXIBLE V6+ FIX: GroupProtocols is a compact array, not regular array - compactLength, consumed, err := DecodeCompactArrayLength(data[offset:]) - if err != nil { - return nil, fmt.Errorf("JoinGroup v%d: invalid group protocols compact array: %w", apiVersion, err) - } - protocolsCount = compactLength - offset += consumed - } else { - // Non-flexible parsing (v0-v5) - if len(data) < offset+4 { - return nil, fmt.Errorf("JoinGroup request missing group protocols") - } - protocolsCount = binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - } - - protocols := make([]GroupProtocol, 0, protocolsCount) - - for i := uint32(0); i < protocolsCount && offset < len(data); i++ { - // Parse protocol name - var protocolName string - if isFlexible { - // FLEXIBLE V6+ FIX: Protocol name is a compact string - endIdx := offset + 10 - if endIdx > len(data) { - endIdx = len(data) - } - protocolNameBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("JoinGroup v%d: invalid protocol name compact string", apiVersion) - } - if protocolNameBytes != nil { - protocolName = string(protocolNameBytes) - } - offset += consumed - } else { - // Non-flexible parsing - if len(data) < offset+2 { - break - } - protocolNameLength := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if len(data) < offset+int(protocolNameLength) { - break - } - protocolName = string(data[offset : offset+int(protocolNameLength)]) - offset += int(protocolNameLength) - } - - // Parse protocol metadata - var metadata []byte - if isFlexible { - // FLEXIBLE V6+ FIX: Protocol metadata is compact bytes - metadataLength, consumed, err := DecodeCompactArrayLength(data[offset:]) - if err != nil { - return nil, fmt.Errorf("JoinGroup v%d: invalid protocol metadata compact bytes: %w", apiVersion, err) - } - offset += consumed - - if metadataLength > 0 && len(data) >= offset+int(metadataLength) { - metadata = make([]byte, metadataLength) - copy(metadata, data[offset:offset+int(metadataLength)]) - offset += int(metadataLength) - } - } else { - // Non-flexible parsing - if len(data) < offset+4 { - break - } - metadataLength := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - if metadataLength > 0 && len(data) >= offset+int(metadataLength) { - metadata = make([]byte, metadataLength) - copy(metadata, data[offset:offset+int(metadataLength)]) - offset += int(metadataLength) - } - } - - // Parse per-protocol tagged fields (v6+) - if isFlexible { - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err != nil { - // Don't fail - some clients might not send tagged fields - } else { - offset += consumed - } - } - - protocols = append(protocols, GroupProtocol{ - Name: protocolName, - Metadata: metadata, - }) - - } - - // Parse request-level tagged fields (v6+) - if isFlexible { - if offset < len(data) { - _, _, err := DecodeTaggedFields(data[offset:]) - if err != nil { - // Don't fail - some clients might not send tagged fields - } - } - } - - return &JoinGroupRequest{ - GroupID: groupID, - SessionTimeout: sessionTimeout, - RebalanceTimeout: rebalanceTimeout, - MemberID: memberID, - GroupInstanceID: groupInstanceID, - ProtocolType: protocolType, - GroupProtocols: protocols, - }, nil -} - -func (h *Handler) buildJoinGroupResponse(response JoinGroupResponse) []byte { - // Flexible response for v6+ - if IsFlexibleVersion(11, response.Version) { - out := make([]byte, 0, 256) - - // NOTE: Correlation ID and header-level tagged fields are handled by writeResponseWithHeader - // Do NOT include them in the response body - - // throttle_time_ms (int32) - versions 2+ - if response.Version >= 2 { - ttms := make([]byte, 4) - binary.BigEndian.PutUint32(ttms, uint32(response.ThrottleTimeMs)) - out = append(out, ttms...) - } - - // error_code (int16) - eb := make([]byte, 2) - binary.BigEndian.PutUint16(eb, uint16(response.ErrorCode)) - out = append(out, eb...) - - // generation_id (int32) - gb := make([]byte, 4) - binary.BigEndian.PutUint32(gb, uint32(response.GenerationID)) - out = append(out, gb...) - - // ProtocolType (v7+ nullable compact string) - NOT in v6! - if response.Version >= 7 { - pt := "consumer" - out = append(out, FlexibleNullableString(&pt)...) - } - - // ProtocolName (compact string in v6, nullable compact string in v7+) - if response.Version >= 7 { - // nullable compact string in v7+ - if response.ProtocolName == "" { - out = append(out, 0) // null - } else { - out = append(out, FlexibleString(response.ProtocolName)...) - } - } else { - // NON-nullable compact string in v6 - must not be empty! - if response.ProtocolName == "" { - response.ProtocolName = consumer.ProtocolNameRange // fallback to default - } - out = append(out, FlexibleString(response.ProtocolName)...) - } - - // leader (compact string) - NOT nullable - if response.Leader == "" { - response.Leader = "unknown" // fallback for error cases - } - out = append(out, FlexibleString(response.Leader)...) - - // SkipAssignment (bool) v9+ - if response.Version >= 9 { - out = append(out, 0) // false - } - - // member_id (compact string) - out = append(out, FlexibleString(response.MemberID)...) - - // members (compact array) - // Compact arrays use length+1 encoding (0 = null, 1 = empty, n+1 = array of length n) - out = append(out, EncodeUvarint(uint32(len(response.Members)+1))...) - for _, m := range response.Members { - // member_id (compact string) - out = append(out, FlexibleString(m.MemberID)...) - // group_instance_id (compact nullable string) - if m.GroupInstanceID == "" { - out = append(out, 0) - } else { - out = append(out, FlexibleString(m.GroupInstanceID)...) - } - // metadata (compact bytes) - // Compact bytes use length+1 encoding (0 = null, 1 = empty, n+1 = bytes of length n) - out = append(out, EncodeUvarint(uint32(len(m.Metadata)+1))...) - out = append(out, m.Metadata...) - // member tagged fields (empty) - out = append(out, 0) - } - - // top-level tagged fields (empty) - out = append(out, 0) - - return out - } - - // Legacy (non-flexible) response path - // Estimate response size - estimatedSize := 0 - // CorrelationID(4) + (optional throttle 4) + error_code(2) + generation_id(4) - if response.Version >= 2 { - estimatedSize = 4 + 4 + 2 + 4 - } else { - estimatedSize = 4 + 2 + 4 - } - estimatedSize += 2 + len(response.ProtocolName) // protocol string - estimatedSize += 2 + len(response.Leader) // leader string - estimatedSize += 2 + len(response.MemberID) // member id string - estimatedSize += 4 // members array count - for _, member := range response.Members { - // MemberID string - estimatedSize += 2 + len(member.MemberID) - if response.Version >= 5 { - // GroupInstanceID string - estimatedSize += 2 + len(member.GroupInstanceID) - } - // Metadata bytes (4 + len) - estimatedSize += 4 + len(member.Metadata) - } - - result := make([]byte, 0, estimatedSize) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // JoinGroup v2 adds throttle_time_ms - if response.Version >= 2 { - throttleTimeBytes := make([]byte, 4) - binary.BigEndian.PutUint32(throttleTimeBytes, 0) // No throttling - result = append(result, throttleTimeBytes...) - } - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - // Generation ID (4 bytes) - generationBytes := make([]byte, 4) - binary.BigEndian.PutUint32(generationBytes, uint32(response.GenerationID)) - result = append(result, generationBytes...) - - // Group protocol (string) - protocolLength := make([]byte, 2) - binary.BigEndian.PutUint16(protocolLength, uint16(len(response.ProtocolName))) - result = append(result, protocolLength...) - result = append(result, []byte(response.ProtocolName)...) - - // Group leader (string) - leaderLength := make([]byte, 2) - binary.BigEndian.PutUint16(leaderLength, uint16(len(response.Leader))) - result = append(result, leaderLength...) - result = append(result, []byte(response.Leader)...) - - // Member ID (string) - memberIDLength := make([]byte, 2) - binary.BigEndian.PutUint16(memberIDLength, uint16(len(response.MemberID))) - result = append(result, memberIDLength...) - result = append(result, []byte(response.MemberID)...) - - // Members array (4 bytes count + members) - memberCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(memberCountBytes, uint32(len(response.Members))) - result = append(result, memberCountBytes...) - - for _, member := range response.Members { - // Member ID (string) - memberLength := make([]byte, 2) - binary.BigEndian.PutUint16(memberLength, uint16(len(member.MemberID))) - result = append(result, memberLength...) - result = append(result, []byte(member.MemberID)...) - - if response.Version >= 5 { - // Group instance ID (string) - can be empty - instanceIDLength := make([]byte, 2) - binary.BigEndian.PutUint16(instanceIDLength, uint16(len(member.GroupInstanceID))) - result = append(result, instanceIDLength...) - if len(member.GroupInstanceID) > 0 { - result = append(result, []byte(member.GroupInstanceID)...) - } - } - - // Metadata (bytes) - metadataLength := make([]byte, 4) - binary.BigEndian.PutUint32(metadataLength, uint32(len(member.Metadata))) - result = append(result, metadataLength...) - result = append(result, member.Metadata...) - } - - return result -} - -func (h *Handler) buildJoinGroupErrorResponse(correlationID uint32, errorCode int16, apiVersion uint16) []byte { - response := JoinGroupResponse{ - CorrelationID: correlationID, - ThrottleTimeMs: 0, - ErrorCode: errorCode, - GenerationID: -1, - ProtocolName: consumer.ProtocolNameRange, // Use "range" as default protocol instead of empty string - Leader: "unknown", // Use "unknown" instead of empty string for non-nullable field - MemberID: "unknown", // Use "unknown" instead of empty string for non-nullable field - Version: apiVersion, - Members: []JoinGroupMember{}, - } - - return h.buildJoinGroupResponse(response) -} - -// extractSubscriptionFromProtocolsEnhanced uses improved metadata parsing with better error handling -func (h *Handler) extractSubscriptionFromProtocolsEnhanced(protocols []GroupProtocol) []string { - debugInfo := AnalyzeProtocolMetadata(protocols) - for _, info := range debugInfo { - if info.ParsedOK { - } else { - } - } - - // Extract topics using enhanced parsing - topics := ExtractTopicsFromMetadata(protocols, h.getAvailableTopics()) - - return topics -} - -func (h *Handler) updateGroupSubscription(group *consumer.ConsumerGroup) { - // Update group's subscribed topics from all members - group.SubscribedTopics = make(map[string]bool) - for _, member := range group.Members { - for _, topic := range member.Subscription { - group.SubscribedTopics[topic] = true - } - } -} - -// SyncGroup API (key 14) - Consumer group coordination completion -// Called by group members after JoinGroup to get partition assignments - -// SyncGroupRequest represents a SyncGroup request from a Kafka client -type SyncGroupRequest struct { - GroupID string - GenerationID int32 - MemberID string - GroupInstanceID string - GroupAssignments []GroupAssignment // Only from group leader -} - -// GroupAssignment represents partition assignment for a group member -type GroupAssignment struct { - MemberID string - Assignment []byte // Serialized assignment data -} - -// SyncGroupResponse represents a SyncGroup response to a Kafka client -type SyncGroupResponse struct { - CorrelationID uint32 - ErrorCode int16 - Assignment []byte // Serialized partition assignment for this member -} - -// Additional error codes for SyncGroup -// Error codes for SyncGroup are imported from errors.go - -func (h *Handler) handleSyncGroup(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Parse SyncGroup request - request, err := h.parseSyncGroupRequest(requestBody, apiVersion) - if err != nil { - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Validate request - if request.GroupID == "" || request.MemberID == "" { - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Get consumer group - group := h.groupCoordinator.GetGroup(request.GroupID) - if group == nil { - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - group.Mu.Lock() - defer group.Mu.Unlock() - - // Update group's last activity - group.LastActivity = time.Now() - - // Validate member exists - member, exists := group.Members[request.MemberID] - if !exists { - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeUnknownMemberID, apiVersion), nil - } - - // Validate generation - if request.GenerationID != group.Generation { - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeIllegalGeneration, apiVersion), nil - } - - // Check if this is the group leader with assignments - glog.V(2).Infof("[SYNCGROUP] Member=%s Leader=%s GroupState=%s HasAssignments=%v MemberCount=%d Gen=%d", - request.MemberID, group.Leader, group.State, len(request.GroupAssignments) > 0, len(group.Members), request.GenerationID) - - if request.MemberID == group.Leader && len(request.GroupAssignments) > 0 { - // Leader is providing assignments - process and store them - glog.V(2).Infof("[SYNCGROUP] Leader %s providing client-side assignments for group %s (%d assignments)", - request.MemberID, request.GroupID, len(request.GroupAssignments)) - err = h.processGroupAssignments(group, request.GroupAssignments) - if err != nil { - glog.Errorf("[SYNCGROUP] ERROR processing leader assignments: %v", err) - return h.buildSyncGroupErrorResponse(correlationID, ErrorCodeInconsistentGroupProtocol, apiVersion), nil - } - - // Move group to stable state - group.State = consumer.GroupStateStable - - // Mark all members as stable - for _, m := range group.Members { - m.State = consumer.MemberStateStable - } - glog.V(2).Infof("[SYNCGROUP] Leader assignments processed successfully, group now STABLE") - } else if request.MemberID != group.Leader && len(request.GroupAssignments) == 0 { - // Non-leader member requesting its assignment - // CRITICAL FIX: Non-leader members should ALWAYS wait for leader's client-side assignments - // This is the correct behavior for Sarama and other client-side assignment protocols - glog.V(3).Infof("[SYNCGROUP] Non-leader %s waiting for/retrieving assignment in group %s (state=%s)", - request.MemberID, request.GroupID, group.State) - // Assignment will be retrieved from member.Assignment below - } else { - // Trigger partition assignment using built-in strategy (server-side assignment) - // This should only happen for server-side assignment protocols (not Sarama's client-side) - glog.Warningf("[SYNCGROUP] Using server-side assignment for group %s (Leader=%s State=%s) - this should not happen with Sarama!", - request.GroupID, group.Leader, group.State) - topicPartitions := h.getTopicPartitions(group) - group.AssignPartitions(topicPartitions) - - group.State = consumer.GroupStateStable - for _, m := range group.Members { - m.State = consumer.MemberStateStable - } - } - - // Get assignment for this member - // SCHEMA REGISTRY COMPATIBILITY: Check if this is a Schema Registry client - var assignment []byte - if request.GroupID == "schema-registry" { - // Schema Registry expects JSON format assignment - assignment = h.serializeSchemaRegistryAssignment(group, member.Assignment) - } else { - // Standard Kafka binary assignment format - assignment = h.serializeMemberAssignment(member.Assignment) - } - - // Log member assignment details - glog.V(3).Infof("[SYNCGROUP] Member %s in group %s assigned %d partitions: %v", - request.MemberID, request.GroupID, len(member.Assignment), member.Assignment) - - // Build response - response := SyncGroupResponse{ - CorrelationID: correlationID, - ErrorCode: ErrorCodeNone, - Assignment: assignment, - } - - assignmentPreview := assignment - if len(assignmentPreview) > 100 { - assignmentPreview = assignment[:100] - } - - resp := h.buildSyncGroupResponse(response, apiVersion) - return resp, nil -} - -func (h *Handler) parseSyncGroupRequest(data []byte, apiVersion uint16) (*SyncGroupRequest, error) { - if len(data) < 8 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - isFlexible := IsFlexibleVersion(14, apiVersion) // SyncGroup API key = 14 - - // ADMINCLIENT COMPATIBILITY FIX: Parse top-level tagged fields at the beginning for flexible versions - if isFlexible { - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err == nil { - offset += consumed - } else { - } - } - - // Parse GroupID - var groupID string - if isFlexible { - // FLEXIBLE V4+ FIX: GroupID is a compact string - groupIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid group ID compact string") - } - if groupIDBytes != nil { - groupID = string(groupIDBytes) - } - offset += consumed - } else { - // Non-flexible parsing (v0-v3) - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID = string(data[offset : offset+groupIDLength]) - offset += groupIDLength - } - - // Generation ID (4 bytes) - always fixed-length - if offset+4 > len(data) { - return nil, fmt.Errorf("missing generation ID") - } - generationID := int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - // Parse MemberID - var memberID string - if isFlexible { - // FLEXIBLE V4+ FIX: MemberID is a compact string - memberIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - return nil, fmt.Errorf("invalid member ID compact string") - } - if memberIDBytes != nil { - memberID = string(memberIDBytes) - } - offset += consumed - } else { - // Non-flexible parsing (v0-v3) - if offset+2 > len(data) { - return nil, fmt.Errorf("missing member ID length") - } - memberIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+memberIDLength > len(data) { - return nil, fmt.Errorf("invalid member ID length") - } - memberID = string(data[offset : offset+memberIDLength]) - offset += memberIDLength - } - - // Parse GroupInstanceID (nullable string) - for SyncGroup v3+ - var groupInstanceID string - if apiVersion >= 3 { - if isFlexible { - // FLEXIBLE V4+ FIX: GroupInstanceID is a compact nullable string - groupInstanceIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 && len(data) > offset && data[offset] == 0x00 { - groupInstanceID = "" // null - offset += 1 - } else { - if groupInstanceIDBytes != nil { - groupInstanceID = string(groupInstanceIDBytes) - } - offset += consumed - } - } else { - // Non-flexible v3: regular nullable string - if offset+2 > len(data) { - return nil, fmt.Errorf("missing group instance ID length") - } - instanceIDLength := int16(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - - if instanceIDLength == -1 { - groupInstanceID = "" // null string - } else if instanceIDLength >= 0 { - if offset+int(instanceIDLength) > len(data) { - return nil, fmt.Errorf("invalid group instance ID length") - } - groupInstanceID = string(data[offset : offset+int(instanceIDLength)]) - offset += int(instanceIDLength) - } - } - } - - // Parse assignments array if present (leader sends assignments) - assignments := make([]GroupAssignment, 0) - - if offset < len(data) { - var assignmentsCount uint32 - if isFlexible { - // FLEXIBLE V4+ FIX: Assignments is a compact array - compactLength, consumed, err := DecodeCompactArrayLength(data[offset:]) - if err != nil { - } else { - assignmentsCount = compactLength - offset += consumed - } - } else { - // Non-flexible: regular array with 4-byte length - if offset+4 <= len(data) { - assignmentsCount = binary.BigEndian.Uint32(data[offset:]) - offset += 4 - } - } - - // Basic sanity check to avoid very large allocations - if assignmentsCount > 0 && assignmentsCount < 10000 { - for i := uint32(0); i < assignmentsCount && offset < len(data); i++ { - var mID string - var assign []byte - - // Parse member_id - if isFlexible { - // FLEXIBLE V4+ FIX: member_id is a compact string - memberIDBytes, consumed := parseCompactString(data[offset:]) - if consumed == 0 { - break - } - if memberIDBytes != nil { - mID = string(memberIDBytes) - } - offset += consumed - } else { - // Non-flexible: regular string - if offset+2 > len(data) { - break - } - memberLen := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if memberLen < 0 || offset+memberLen > len(data) { - break - } - mID = string(data[offset : offset+memberLen]) - offset += memberLen - } - - // Parse assignment (bytes) - if isFlexible { - // FLEXIBLE V4+ FIX: assignment is compact bytes - assignLength, consumed, err := DecodeCompactArrayLength(data[offset:]) - if err != nil { - break - } - offset += consumed - if assignLength > 0 && offset+int(assignLength) <= len(data) { - assign = make([]byte, assignLength) - copy(assign, data[offset:offset+int(assignLength)]) - offset += int(assignLength) - } - - // Flexible format requires tagged fields after each assignment struct - if offset < len(data) { - _, taggedConsumed, tagErr := DecodeTaggedFields(data[offset:]) - if tagErr == nil { - offset += taggedConsumed - } - } - } else { - // Non-flexible: regular bytes - if offset+4 > len(data) { - break - } - assignLen := int(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - if assignLen < 0 || offset+assignLen > len(data) { - break - } - if assignLen > 0 { - assign = make([]byte, assignLen) - copy(assign, data[offset:offset+assignLen]) - } - offset += assignLen - } - - assignments = append(assignments, GroupAssignment{MemberID: mID, Assignment: assign}) - } - } - } - - // Parse request-level tagged fields (v4+) - if isFlexible { - if offset < len(data) { - _, consumed, err := DecodeTaggedFields(data[offset:]) - if err != nil { - } else { - offset += consumed - } - } - } - - return &SyncGroupRequest{ - GroupID: groupID, - GenerationID: generationID, - MemberID: memberID, - GroupInstanceID: groupInstanceID, - GroupAssignments: assignments, - }, nil -} - -func (h *Handler) buildSyncGroupResponse(response SyncGroupResponse, apiVersion uint16) []byte { - estimatedSize := 16 + len(response.Assignment) - result := make([]byte, 0, estimatedSize) - - // NOTE: Correlation ID and header-level tagged fields are handled by writeResponseWithHeader - // Do NOT include them in the response body - - // SyncGroup v1+ has throttle_time_ms at the beginning - // SyncGroup v0 does NOT include throttle_time_ms - if apiVersion >= 1 { - // Throttle time (4 bytes, 0 = no throttling) - result = append(result, 0, 0, 0, 0) - } - - // Error code (2 bytes) - errorCodeBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorCodeBytes, uint16(response.ErrorCode)) - result = append(result, errorCodeBytes...) - - // SyncGroup v5 adds protocol_type and protocol_name (compact nullable strings) - if apiVersion >= 5 { - // protocol_type = null (varint 0) - result = append(result, 0x00) - // protocol_name = null (varint 0) - result = append(result, 0x00) - } - - // Assignment - FLEXIBLE V4+ FIX - if IsFlexibleVersion(14, apiVersion) { - // FLEXIBLE FORMAT: Assignment as compact bytes - // Use CompactStringLength for compact bytes (not CompactArrayLength) - // Compact bytes use the same encoding as compact strings: 0 = null, 1 = empty, n+1 = length n - assignmentLen := len(response.Assignment) - if assignmentLen == 0 { - // Empty compact bytes = length 0, encoded as 0x01 (0 + 1) - result = append(result, 0x01) // Empty compact bytes - } else { - // Non-empty assignment: encode length + data - // Use CompactStringLength which correctly encodes as length+1 - compactLength := CompactStringLength(assignmentLen) - result = append(result, compactLength...) - result = append(result, response.Assignment...) - } - // Add response-level tagged fields for flexible format - result = append(result, 0x00) // Empty tagged fields (varint: 0) - } else { - // NON-FLEXIBLE FORMAT: Assignment as regular bytes - assignmentLength := make([]byte, 4) - binary.BigEndian.PutUint32(assignmentLength, uint32(len(response.Assignment))) - result = append(result, assignmentLength...) - result = append(result, response.Assignment...) - } - - return result -} - -func (h *Handler) buildSyncGroupErrorResponse(correlationID uint32, errorCode int16, apiVersion uint16) []byte { - response := SyncGroupResponse{ - CorrelationID: correlationID, - ErrorCode: errorCode, - Assignment: []byte{}, - } - - return h.buildSyncGroupResponse(response, apiVersion) -} - -func (h *Handler) processGroupAssignments(group *consumer.ConsumerGroup, assignments []GroupAssignment) error { - // Apply leader-provided assignments - glog.V(2).Infof("[PROCESS_ASSIGNMENTS] Processing %d member assignments from leader", len(assignments)) - - // Clear current assignments - for _, m := range group.Members { - m.Assignment = nil - } - - for _, ga := range assignments { - m, ok := group.Members[ga.MemberID] - if !ok { - // Skip unknown members - glog.V(1).Infof("[PROCESS_ASSIGNMENTS] Skipping unknown member: %s", ga.MemberID) - continue - } - - parsed, err := h.parseMemberAssignment(ga.Assignment) - if err != nil { - glog.Errorf("[PROCESS_ASSIGNMENTS] Failed to parse assignment for member %s: %v", ga.MemberID, err) - return err - } - m.Assignment = parsed - glog.V(3).Infof("[PROCESS_ASSIGNMENTS] Member %s assigned %d partitions: %v", ga.MemberID, len(parsed), parsed) - } - - return nil -} - -// parseMemberAssignment decodes ConsumerGroupMemberAssignment bytes into assignments -func (h *Handler) parseMemberAssignment(data []byte) ([]consumer.PartitionAssignment, error) { - if len(data) < 2+4 { - // Empty or missing; treat as no assignment - return []consumer.PartitionAssignment{}, nil - } - - offset := 0 - - // Version (2 bytes) - if offset+2 > len(data) { - return nil, fmt.Errorf("assignment too short for version") - } - _ = int16(binary.BigEndian.Uint16(data[offset : offset+2])) - offset += 2 - - // Number of topics (4 bytes) - if offset+4 > len(data) { - return nil, fmt.Errorf("assignment too short for topics count") - } - topicsCount := int(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - if topicsCount < 0 || topicsCount > 100000 { - return nil, fmt.Errorf("unreasonable topics count in assignment: %d", topicsCount) - } - - result := make([]consumer.PartitionAssignment, 0) - - for i := 0; i < topicsCount && offset < len(data); i++ { - // topic string - if offset+2 > len(data) { - return nil, fmt.Errorf("assignment truncated reading topic len") - } - tlen := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if tlen < 0 || offset+tlen > len(data) { - return nil, fmt.Errorf("assignment truncated reading topic name") - } - topic := string(data[offset : offset+tlen]) - offset += tlen - - // partitions array length - if offset+4 > len(data) { - return nil, fmt.Errorf("assignment truncated reading partitions len") - } - numPartitions := int(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - if numPartitions < 0 || numPartitions > 1000000 { - return nil, fmt.Errorf("unreasonable partitions count: %d", numPartitions) - } - - for p := 0; p < numPartitions; p++ { - if offset+4 > len(data) { - return nil, fmt.Errorf("assignment truncated reading partition id") - } - pid := int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - result = append(result, consumer.PartitionAssignment{Topic: topic, Partition: pid}) - } - } - - // Optional UserData: bytes length + data. Safe to ignore. - // If present but truncated, ignore silently. - - return result, nil -} - -func (h *Handler) getTopicPartitions(group *consumer.ConsumerGroup) map[string][]int32 { - topicPartitions := make(map[string][]int32) - - // Get partition info for all subscribed topics - for topic := range group.SubscribedTopics { - // Get actual partition count from topic info - topicInfo, exists := h.seaweedMQHandler.GetTopicInfo(topic) - partitionCount := h.GetDefaultPartitions() // Use configurable default - if exists && topicInfo != nil { - partitionCount = topicInfo.Partitions - } - - // Create partition list: [0, 1, 2, ...] - partitions := make([]int32, partitionCount) - for i := int32(0); i < partitionCount; i++ { - partitions[i] = i - } - topicPartitions[topic] = partitions - } - - return topicPartitions -} - -func (h *Handler) serializeSchemaRegistryAssignment(group *consumer.ConsumerGroup, assignments []consumer.PartitionAssignment) []byte { - // Schema Registry expects a JSON assignment in the format: - // {"error":0,"master":"member-id","master_identity":{"host":"localhost","port":8081,"master_eligibility":true,"scheme":"http","version":"7.4.0-ce"}} - - // Extract the actual leader's identity from the leader's metadata - // to avoid localhost/hostname mismatch that causes Schema Registry to forward - // requests to itself - leaderMember, exists := group.Members[group.Leader] - if !exists { - // Leader not found - return minimal assignment with no master identity - // Schema Registry should handle this by failing over to another instance - glog.Warningf("Schema Registry leader member %s not found in group %s", group.Leader, group.ID) - jsonAssignment := `{"error":0,"master":"","master_identity":{"host":"","port":0,"master_eligibility":false,"scheme":"http","version":1}}` - return []byte(jsonAssignment) - } - - // Parse the leader's metadata to extract the Schema Registry identity - // The metadata is the serialized SchemaRegistryIdentity JSON - var identity map[string]interface{} - err := json.Unmarshal(leaderMember.Metadata, &identity) - if err != nil { - // Failed to parse metadata - return minimal assignment - // Schema Registry should provide valid metadata; if not, fail gracefully - glog.Warningf("Failed to parse Schema Registry metadata for leader %s: %v", group.Leader, err) - jsonAssignment := fmt.Sprintf(`{"error":0,"master":"%s","master_identity":{"host":"","port":0,"master_eligibility":false,"scheme":"http","version":1}}`, group.Leader) - return []byte(jsonAssignment) - } - - // Extract fields from identity - use empty/zero defaults if missing - // Schema Registry clients should provide complete metadata - host := "" - port := 8081 - scheme := "http" - version := 1 - leaderEligibility := true - - if h, ok := identity["host"].(string); ok { - host = h - } else { - glog.V(1).Infof("Schema Registry metadata missing 'host' field for leader %s", group.Leader) - } - if p, ok := identity["port"].(float64); ok { - port = int(p) - } - if s, ok := identity["scheme"].(string); ok { - scheme = s - } - if v, ok := identity["version"].(float64); ok { - version = int(v) - } - if le, ok := identity["master_eligibility"].(bool); ok { - leaderEligibility = le - } - - // Build the assignment JSON with the actual leader identity - jsonAssignment := fmt.Sprintf(`{"error":0,"master":"%s","master_identity":{"host":"%s","port":%d,"master_eligibility":%t,"scheme":"%s","version":%d}}`, - group.Leader, host, port, leaderEligibility, scheme, version) - - return []byte(jsonAssignment) -} - -func (h *Handler) serializeMemberAssignment(assignments []consumer.PartitionAssignment) []byte { - // Build ConsumerGroupMemberAssignment format exactly as Sarama expects: - // Version(2) + Topics array + UserData bytes - - // Group assignments by topic - topicAssignments := make(map[string][]int32) - for _, assignment := range assignments { - topicAssignments[assignment.Topic] = append(topicAssignments[assignment.Topic], assignment.Partition) - } - - result := make([]byte, 0, 64) - - // Version (2 bytes) - use version 1 - result = append(result, 0, 1) - - // Number of topics (4 bytes) - array length - numTopicsBytes := make([]byte, 4) - binary.BigEndian.PutUint32(numTopicsBytes, uint32(len(topicAssignments))) - result = append(result, numTopicsBytes...) - - // Get sorted topic names to ensure deterministic order - topics := make([]string, 0, len(topicAssignments)) - for topic := range topicAssignments { - topics = append(topics, topic) - } - sort.Strings(topics) - - // Topics - each topic follows Kafka string + int32 array format - for _, topic := range topics { - partitions := topicAssignments[topic] - // Topic name as Kafka string: length(2) + content - topicLenBytes := make([]byte, 2) - binary.BigEndian.PutUint16(topicLenBytes, uint16(len(topic))) - result = append(result, topicLenBytes...) - result = append(result, []byte(topic)...) - - // Partitions as int32 array: length(4) + elements - numPartitionsBytes := make([]byte, 4) - binary.BigEndian.PutUint32(numPartitionsBytes, uint32(len(partitions))) - result = append(result, numPartitionsBytes...) - - // Partitions (4 bytes each) - for _, partition := range partitions { - partitionBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionBytes, uint32(partition)) - result = append(result, partitionBytes...) - } - } - - // UserData as Kafka bytes: length(4) + data (empty in our case) - // For empty user data, just put length = 0 - result = append(result, 0, 0, 0, 0) - - return result -} - -// getAvailableTopics returns list of available topics for subscription metadata -func (h *Handler) getAvailableTopics() []string { - return h.seaweedMQHandler.ListTopics() -} diff --git a/weed/mq/kafka/protocol/metadata_blocking_test.go b/weed/mq/kafka/protocol/metadata_blocking_test.go deleted file mode 100644 index e5dfd1f95..000000000 --- a/weed/mq/kafka/protocol/metadata_blocking_test.go +++ /dev/null @@ -1,373 +0,0 @@ -package protocol - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/integration" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestMetadataRequestBlocking documents the original bug where Metadata requests hang -// when the backend (broker/filer) ListTopics call blocks indefinitely. -// This test is kept for documentation purposes and to verify the mock handler behavior. -// -// NOTE: The actual fix is in the broker's ListTopics implementation (weed/mq/broker/broker_grpc_lookup.go) -// which adds a 2-second timeout for filer operations. This test uses a mock handler that -// bypasses that fix, so it still demonstrates the original blocking behavior. -func TestMetadataRequestBlocking(t *testing.T) { - t.Skip("This test documents the original bug. The fix is in the broker's ListTopics with filer timeout. Run TestMetadataRequestWithFastMock to verify fast path works.") - - t.Log("Testing Metadata handler with blocking backend...") - - // Create a handler with a mock backend that blocks on ListTopics - handler := &Handler{ - seaweedMQHandler: &BlockingMockHandler{ - blockDuration: 10 * time.Second, // Simulate slow backend - }, - } - - // Call handleMetadata in a goroutine so we can timeout - responseChan := make(chan []byte, 1) - errorChan := make(chan error, 1) - - go func() { - // Build a simple Metadata v1 request body (empty topics array = all topics) - requestBody := []byte{0, 0, 0, 0} // Empty topics array - response, err := handler.handleMetadata(1, 1, requestBody) - if err != nil { - errorChan <- err - } else { - responseChan <- response - } - }() - - // Wait for response with timeout - select { - case response := <-responseChan: - t.Logf("Metadata response received (%d bytes) - backend responded", len(response)) - t.Error("UNEXPECTED: Response received before timeout - backend should have blocked") - case err := <-errorChan: - t.Logf("Metadata returned error: %v", err) - t.Error("UNEXPECTED: Error received - expected blocking, not error") - case <-time.After(3 * time.Second): - t.Logf("โœ“ BUG REPRODUCED: Metadata request blocked for 3+ seconds") - t.Logf(" Root cause: seaweedMQHandler.ListTopics() blocks indefinitely when broker/filer is slow") - t.Logf(" Impact: Entire control plane processor goroutine is frozen") - t.Logf(" Fix implemented: Broker's ListTopics now has 2-second timeout for filer operations") - // This is expected behavior with blocking mock - demonstrates the original issue - } -} - -// TestMetadataRequestWithFastMock verifies that Metadata requests complete quickly -// when the backend responds promptly (the common case) -func TestMetadataRequestWithFastMock(t *testing.T) { - t.Log("Testing Metadata handler with fast-responding backend...") - - // Create a handler with a fast mock (simulates in-memory topics only) - handler := &Handler{ - seaweedMQHandler: &FastMockHandler{ - topics: []string{"test-topic-1", "test-topic-2"}, - }, - } - - // Call handleMetadata and measure time - start := time.Now() - requestBody := []byte{0, 0, 0, 0} // Empty topics array = list all - response, err := handler.handleMetadata(1, 1, requestBody) - duration := time.Since(start) - - if err != nil { - t.Errorf("Metadata returned error: %v", err) - } else if response == nil { - t.Error("Metadata returned nil response") - } else { - t.Logf("โœ“ Metadata completed in %v (%d bytes)", duration, len(response)) - if duration > 500*time.Millisecond { - t.Errorf("Metadata took too long: %v (should be < 500ms for fast backend)", duration) - } - } -} - -// TestMetadataRequestWithTimeoutFix tests that Metadata requests with timeout-aware backend -// complete within reasonable time even when underlying storage is slow -func TestMetadataRequestWithTimeoutFix(t *testing.T) { - t.Log("Testing Metadata handler with timeout-aware backend...") - - // Create a handler with a timeout-aware mock - // This simulates the broker's ListTopics with 2-second filer timeout - handler := &Handler{ - seaweedMQHandler: &TimeoutAwareMockHandler{ - timeout: 2 * time.Second, - blockDuration: 10 * time.Second, // Backend is slow but timeout kicks in - }, - } - - // Call handleMetadata and measure time - start := time.Now() - requestBody := []byte{0, 0, 0, 0} // Empty topics array - response, err := handler.handleMetadata(1, 1, requestBody) - duration := time.Since(start) - - t.Logf("Metadata completed in %v", duration) - - if err != nil { - t.Logf("โœ“ Metadata returned error after timeout: %v", err) - // This is acceptable - error response is better than hanging - } else if response != nil { - t.Logf("โœ“ Metadata returned response (%d bytes) without blocking", len(response)) - // Backend timed out but still returned in-memory topics - if duration > 3*time.Second { - t.Errorf("Metadata took too long: %v (should timeout at ~2s)", duration) - } - } else { - t.Error("Metadata returned nil response and nil error - unexpected") - } -} - -// FastMockHandler simulates a fast backend (in-memory topics only) -type FastMockHandler struct { - topics []string -} - -func (h *FastMockHandler) ListTopics() []string { - // Fast response - simulates in-memory topics - return h.topics -} - -func (h *FastMockHandler) TopicExists(name string) bool { - for _, topic := range h.topics { - if topic == name { - return true - } - } - return false -} - -func (h *FastMockHandler) CreateTopic(name string, partitions int32) error { - return fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error { - return fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) DeleteTopic(name string) error { - return fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) GetTopicInfo(name string) (*integration.KafkaTopicInfo, bool) { - return nil, false -} - -func (h *FastMockHandler) ProduceRecord(ctx context.Context, topicName string, partitionID int32, key, value []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) ProduceRecordValue(ctx context.Context, topicName string, partitionID int32, key []byte, recordValueBytes []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]integration.SMQRecord, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) GetEarliestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) GetLatestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - return fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) GetBrokerAddresses() []string { - return []string{"localhost:17777"} -} - -func (h *FastMockHandler) CreatePerConnectionBrokerClient() (*integration.BrokerClient, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *FastMockHandler) SetProtocolHandler(handler integration.ProtocolHandler) { - // No-op -} - -func (h *FastMockHandler) InvalidateTopicExistsCache(topic string) { - // No-op for mock -} - -func (h *FastMockHandler) Close() error { - return nil -} - -// BlockingMockHandler simulates a backend that blocks indefinitely on ListTopics -type BlockingMockHandler struct { - blockDuration time.Duration -} - -func (h *BlockingMockHandler) ListTopics() []string { - // Simulate backend blocking (e.g., waiting for unresponsive broker/filer) - time.Sleep(h.blockDuration) - return []string{} -} - -func (h *BlockingMockHandler) TopicExists(name string) bool { - return false -} - -func (h *BlockingMockHandler) CreateTopic(name string, partitions int32) error { - return fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error { - return fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) DeleteTopic(name string) error { - return fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) GetTopicInfo(name string) (*integration.KafkaTopicInfo, bool) { - return nil, false -} - -func (h *BlockingMockHandler) ProduceRecord(ctx context.Context, topicName string, partitionID int32, key, value []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) ProduceRecordValue(ctx context.Context, topicName string, partitionID int32, key []byte, recordValueBytes []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]integration.SMQRecord, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) GetEarliestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) GetLatestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - return fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) GetBrokerAddresses() []string { - return []string{"localhost:17777"} -} - -func (h *BlockingMockHandler) CreatePerConnectionBrokerClient() (*integration.BrokerClient, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *BlockingMockHandler) SetProtocolHandler(handler integration.ProtocolHandler) { - // No-op -} - -func (h *BlockingMockHandler) InvalidateTopicExistsCache(topic string) { - // No-op for mock -} - -func (h *BlockingMockHandler) Close() error { - return nil -} - -// TimeoutAwareMockHandler demonstrates expected behavior with timeout -type TimeoutAwareMockHandler struct { - timeout time.Duration - blockDuration time.Duration -} - -func (h *TimeoutAwareMockHandler) ListTopics() []string { - // Simulate timeout-aware backend - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - - done := make(chan bool) - go func() { - time.Sleep(h.blockDuration) - done <- true - }() - - select { - case <-done: - return []string{} - case <-ctx.Done(): - // Timeout - return empty list rather than blocking forever - return []string{} - } -} - -func (h *TimeoutAwareMockHandler) TopicExists(name string) bool { - return false -} - -func (h *TimeoutAwareMockHandler) CreateTopic(name string, partitions int32) error { - return fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) CreateTopicWithSchemas(name string, partitions int32, keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) error { - return fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) DeleteTopic(name string) error { - return fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) GetTopicInfo(name string) (*integration.KafkaTopicInfo, bool) { - return nil, false -} - -func (h *TimeoutAwareMockHandler) ProduceRecord(ctx context.Context, topicName string, partitionID int32, key, value []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) ProduceRecordValue(ctx context.Context, topicName string, partitionID int32, key []byte, recordValueBytes []byte) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) GetStoredRecords(ctx context.Context, topic string, partition int32, fromOffset int64, maxRecords int) ([]integration.SMQRecord, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) GetEarliestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) GetLatestOffset(topic string, partition int32) (int64, error) { - return 0, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - return fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) GetBrokerAddresses() []string { - return []string{"localhost:17777"} -} - -func (h *TimeoutAwareMockHandler) CreatePerConnectionBrokerClient() (*integration.BrokerClient, error) { - return nil, fmt.Errorf("not implemented") -} - -func (h *TimeoutAwareMockHandler) SetProtocolHandler(handler integration.ProtocolHandler) { - // No-op -} - -func (h *TimeoutAwareMockHandler) InvalidateTopicExistsCache(topic string) { - // No-op for mock -} - -func (h *TimeoutAwareMockHandler) Close() error { - return nil -} diff --git a/weed/mq/kafka/protocol/metrics.go b/weed/mq/kafka/protocol/metrics.go deleted file mode 100644 index b4bcd98dd..000000000 --- a/weed/mq/kafka/protocol/metrics.go +++ /dev/null @@ -1,233 +0,0 @@ -package protocol - -import ( - "sync" - "sync/atomic" - "time" -) - -// Metrics tracks basic request/error/latency statistics for Kafka protocol operations -type Metrics struct { - // Request counters by API key - requestCounts map[uint16]*int64 - errorCounts map[uint16]*int64 - - // Latency tracking - latencySum map[uint16]*int64 // Total latency in microseconds - latencyCount map[uint16]*int64 // Number of requests for average calculation - - // Connection metrics - activeConnections int64 - totalConnections int64 - - // Mutex for map operations - mu sync.RWMutex - - // Start time for uptime calculation - startTime time.Time -} - -// APIMetrics represents metrics for a specific API -type APIMetrics struct { - APIKey uint16 `json:"api_key"` - APIName string `json:"api_name"` - RequestCount int64 `json:"request_count"` - ErrorCount int64 `json:"error_count"` - AvgLatencyMs float64 `json:"avg_latency_ms"` -} - -// ConnectionMetrics represents connection-related metrics -type ConnectionMetrics struct { - ActiveConnections int64 `json:"active_connections"` - TotalConnections int64 `json:"total_connections"` - UptimeSeconds int64 `json:"uptime_seconds"` - StartTime time.Time `json:"start_time"` -} - -// MetricsSnapshot represents a complete metrics snapshot -type MetricsSnapshot struct { - APIs []APIMetrics `json:"apis"` - Connections ConnectionMetrics `json:"connections"` - Timestamp time.Time `json:"timestamp"` -} - -// NewMetrics creates a new metrics tracker -func NewMetrics() *Metrics { - return &Metrics{ - requestCounts: make(map[uint16]*int64), - errorCounts: make(map[uint16]*int64), - latencySum: make(map[uint16]*int64), - latencyCount: make(map[uint16]*int64), - startTime: time.Now(), - } -} - -// RecordRequest records a successful request with latency -func (m *Metrics) RecordRequest(apiKey uint16, latency time.Duration) { - m.ensureCounters(apiKey) - - atomic.AddInt64(m.requestCounts[apiKey], 1) - atomic.AddInt64(m.latencySum[apiKey], latency.Microseconds()) - atomic.AddInt64(m.latencyCount[apiKey], 1) -} - -// RecordError records an error for a specific API -func (m *Metrics) RecordError(apiKey uint16, latency time.Duration) { - m.ensureCounters(apiKey) - - atomic.AddInt64(m.requestCounts[apiKey], 1) - atomic.AddInt64(m.errorCounts[apiKey], 1) - atomic.AddInt64(m.latencySum[apiKey], latency.Microseconds()) - atomic.AddInt64(m.latencyCount[apiKey], 1) -} - -// RecordConnection records a new connection -func (m *Metrics) RecordConnection() { - atomic.AddInt64(&m.activeConnections, 1) - atomic.AddInt64(&m.totalConnections, 1) -} - -// RecordDisconnection records a connection closure -func (m *Metrics) RecordDisconnection() { - atomic.AddInt64(&m.activeConnections, -1) -} - -// GetSnapshot returns a complete metrics snapshot -func (m *Metrics) GetSnapshot() MetricsSnapshot { - m.mu.RLock() - defer m.mu.RUnlock() - - apis := make([]APIMetrics, 0, len(m.requestCounts)) - - for apiKey, requestCount := range m.requestCounts { - requests := atomic.LoadInt64(requestCount) - errors := atomic.LoadInt64(m.errorCounts[apiKey]) - latencySum := atomic.LoadInt64(m.latencySum[apiKey]) - latencyCount := atomic.LoadInt64(m.latencyCount[apiKey]) - - var avgLatencyMs float64 - if latencyCount > 0 { - avgLatencyMs = float64(latencySum) / float64(latencyCount) / 1000.0 // Convert to milliseconds - } - - apis = append(apis, APIMetrics{ - APIKey: apiKey, - APIName: getAPIName(APIKey(apiKey)), - RequestCount: requests, - ErrorCount: errors, - AvgLatencyMs: avgLatencyMs, - }) - } - - return MetricsSnapshot{ - APIs: apis, - Connections: ConnectionMetrics{ - ActiveConnections: atomic.LoadInt64(&m.activeConnections), - TotalConnections: atomic.LoadInt64(&m.totalConnections), - UptimeSeconds: int64(time.Since(m.startTime).Seconds()), - StartTime: m.startTime, - }, - Timestamp: time.Now(), - } -} - -// GetAPIMetrics returns metrics for a specific API -func (m *Metrics) GetAPIMetrics(apiKey uint16) APIMetrics { - m.ensureCounters(apiKey) - - requests := atomic.LoadInt64(m.requestCounts[apiKey]) - errors := atomic.LoadInt64(m.errorCounts[apiKey]) - latencySum := atomic.LoadInt64(m.latencySum[apiKey]) - latencyCount := atomic.LoadInt64(m.latencyCount[apiKey]) - - var avgLatencyMs float64 - if latencyCount > 0 { - avgLatencyMs = float64(latencySum) / float64(latencyCount) / 1000.0 - } - - return APIMetrics{ - APIKey: apiKey, - APIName: getAPIName(APIKey(apiKey)), - RequestCount: requests, - ErrorCount: errors, - AvgLatencyMs: avgLatencyMs, - } -} - -// GetConnectionMetrics returns connection-related metrics -func (m *Metrics) GetConnectionMetrics() ConnectionMetrics { - return ConnectionMetrics{ - ActiveConnections: atomic.LoadInt64(&m.activeConnections), - TotalConnections: atomic.LoadInt64(&m.totalConnections), - UptimeSeconds: int64(time.Since(m.startTime).Seconds()), - StartTime: m.startTime, - } -} - -// Reset resets all metrics (useful for testing) -func (m *Metrics) Reset() { - m.mu.Lock() - defer m.mu.Unlock() - - for apiKey := range m.requestCounts { - atomic.StoreInt64(m.requestCounts[apiKey], 0) - atomic.StoreInt64(m.errorCounts[apiKey], 0) - atomic.StoreInt64(m.latencySum[apiKey], 0) - atomic.StoreInt64(m.latencyCount[apiKey], 0) - } - - atomic.StoreInt64(&m.activeConnections, 0) - atomic.StoreInt64(&m.totalConnections, 0) - m.startTime = time.Now() -} - -// ensureCounters ensures that counters exist for the given API key -func (m *Metrics) ensureCounters(apiKey uint16) { - m.mu.RLock() - if _, exists := m.requestCounts[apiKey]; exists { - m.mu.RUnlock() - return - } - m.mu.RUnlock() - - m.mu.Lock() - defer m.mu.Unlock() - - // Double-check after acquiring write lock - if _, exists := m.requestCounts[apiKey]; exists { - return - } - - m.requestCounts[apiKey] = new(int64) - m.errorCounts[apiKey] = new(int64) - m.latencySum[apiKey] = new(int64) - m.latencyCount[apiKey] = new(int64) -} - -// Global metrics instance -var globalMetrics = NewMetrics() - -// GetGlobalMetrics returns the global metrics instance -func GetGlobalMetrics() *Metrics { - return globalMetrics -} - -// RecordRequestMetrics is a convenience function to record request metrics globally -func RecordRequestMetrics(apiKey uint16, latency time.Duration) { - globalMetrics.RecordRequest(apiKey, latency) -} - -// RecordErrorMetrics is a convenience function to record error metrics globally -func RecordErrorMetrics(apiKey uint16, latency time.Duration) { - globalMetrics.RecordError(apiKey, latency) -} - -// RecordConnectionMetrics is a convenience function to record connection metrics globally -func RecordConnectionMetrics() { - globalMetrics.RecordConnection() -} - -// RecordDisconnectionMetrics is a convenience function to record disconnection metrics globally -func RecordDisconnectionMetrics() { - globalMetrics.RecordDisconnection() -} diff --git a/weed/mq/kafka/protocol/offset_fetch_pattern_test.go b/weed/mq/kafka/protocol/offset_fetch_pattern_test.go deleted file mode 100644 index e23c1391e..000000000 --- a/weed/mq/kafka/protocol/offset_fetch_pattern_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package protocol - -import ( - "fmt" - "testing" - "time" -) - -// TestOffsetCommitFetchPattern verifies the critical pattern: -// 1. Consumer reads messages 0-N -// 2. Consumer commits offset N -// 3. Consumer fetches messages starting from N+1 -// 4. No message loss or duplication -// -// This tests for the root cause of the "consumer stalling" issue where -// consumers stop fetching after certain offsets. -func TestOffsetCommitFetchPattern(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - // Setup - const ( - topic = "test-topic" - partition = int32(0) - messageCount = 1000 - batchSize = 50 - groupID = "test-group" - ) - - // Mock store for offsets - offsetStore := make(map[string]int64) - offsetKey := fmt.Sprintf("%s/%s/%d", groupID, topic, partition) - - // Simulate message production - messages := make([][]byte, messageCount) - for i := 0; i < messageCount; i++ { - messages[i] = []byte(fmt.Sprintf("message-%d", i)) - } - - // Test: Sequential consumption with offset commits - t.Run("SequentialConsumption", func(t *testing.T) { - consumedOffsets := make(map[int64]bool) - nextOffset := int64(0) - - for nextOffset < int64(messageCount) { - // Step 1: Fetch batch of messages starting from nextOffset - endOffset := nextOffset + int64(batchSize) - if endOffset > int64(messageCount) { - endOffset = int64(messageCount) - } - - fetchedCount := endOffset - nextOffset - if fetchedCount <= 0 { - t.Fatalf("Fetch returned no messages at offset %d (HWM=%d)", nextOffset, messageCount) - } - - // Simulate fetching messages - for i := nextOffset; i < endOffset; i++ { - if consumedOffsets[i] { - t.Errorf("DUPLICATE: Message at offset %d already consumed", i) - } - consumedOffsets[i] = true - } - - // Step 2: Commit the last offset in this batch - lastConsumedOffset := endOffset - 1 - offsetStore[offsetKey] = lastConsumedOffset - t.Logf("Batch %d: Consumed offsets %d-%d, committed offset %d", - nextOffset/int64(batchSize), nextOffset, lastConsumedOffset, lastConsumedOffset) - - // Step 3: Verify offset is correctly stored - storedOffset, exists := offsetStore[offsetKey] - if !exists || storedOffset != lastConsumedOffset { - t.Errorf("Offset not stored correctly: stored=%v, expected=%d", storedOffset, lastConsumedOffset) - } - - // Step 4: Next fetch should start from lastConsumedOffset + 1 - nextOffset = lastConsumedOffset + 1 - } - - // Verify all messages were consumed exactly once - if len(consumedOffsets) != messageCount { - t.Errorf("Not all messages consumed: got %d, expected %d", len(consumedOffsets), messageCount) - } - - for i := 0; i < messageCount; i++ { - if !consumedOffsets[int64(i)] { - t.Errorf("Message at offset %d not consumed", i) - } - } - }) - - t.Logf("โœ… Sequential consumption pattern verified successfully") -} - -// TestOffsetFetchAfterCommit verifies that after committing offset N, -// the next fetch returns offset N+1 onwards (not empty, not error) -func TestOffsetFetchAfterCommit(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - t.Run("FetchAfterCommit", func(t *testing.T) { - type FetchRequest struct { - partition int32 - offset int64 - } - - type FetchResponse struct { - records []byte - nextOffset int64 - } - - // Simulate: Commit offset 163, then fetch offset 164 - committedOffset := int64(163) - nextFetchOffset := committedOffset + 1 - - t.Logf("After committing offset %d, fetching from offset %d", committedOffset, nextFetchOffset) - - // This is where consumers are getting stuck! - // They commit offset 163, then fetch 164+, but get empty response - - // Expected: Fetch(164) returns records starting from offset 164 - // Actual Bug: Fetch(164) returns empty, consumer stops fetching - - if nextFetchOffset > committedOffset+100 { - t.Errorf("POTENTIAL BUG: Fetch offset %d is way beyond committed offset %d", - nextFetchOffset, committedOffset) - } - - t.Logf("โœ… Offset fetch request looks correct: committed=%d, next_fetch=%d", - committedOffset, nextFetchOffset) - }) -} - -// TestOffsetPersistencePattern verifies that offsets are correctly -// persisted and recovered across restarts -func TestOffsetPersistencePattern(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - t.Run("OffsetRecovery", func(t *testing.T) { - const ( - groupID = "test-group" - topic = "test-topic" - partition = int32(0) - ) - - offsetStore := make(map[string]int64) - offsetKey := fmt.Sprintf("%s/%s/%d", groupID, topic, partition) - - // Scenario 1: First consumer session - // Consume messages 0-99, commit offset 99 - offsetStore[offsetKey] = 99 - t.Logf("Session 1: Committed offset 99") - - // Scenario 2: Consumer restarts (consumer group rebalancing) - // Should recover offset 99 from storage - recoveredOffset, exists := offsetStore[offsetKey] - if !exists || recoveredOffset != 99 { - t.Errorf("Failed to recover offset: expected 99, got %v", recoveredOffset) - } - - // Scenario 3: Continue consuming from offset 100 - // This is where the bug manifests! Consumer might: - // A) Correctly fetch from 100 - // B) Try to fetch from 99 (duplicate) - // C) Get stuck and not fetch at all - nextOffset := recoveredOffset + 1 - if nextOffset != 100 { - t.Errorf("Incorrect next offset after recovery: expected 100, got %d", nextOffset) - } - - t.Logf("โœ… Offset recovery pattern works: recovered %d, next fetch at %d", recoveredOffset, nextOffset) - }) -} - -// TestOffsetCommitConsistency verifies that offset commits are atomic -// and don't cause partial updates -func TestOffsetCommitConsistency(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - t.Run("AtomicCommit", func(t *testing.T) { - type OffsetCommit struct { - Group string - Topic string - Partition int32 - Offset int64 - Timestamp int64 - } - - commits := []OffsetCommit{ - {"group1", "topic1", 0, 100, time.Now().UnixNano()}, - {"group1", "topic1", 1, 150, time.Now().UnixNano()}, - {"group1", "topic1", 2, 120, time.Now().UnixNano()}, - } - - // All commits should succeed or all fail (atomicity) - for _, commit := range commits { - key := fmt.Sprintf("%s/%s/%d", commit.Group, commit.Topic, commit.Partition) - t.Logf("Committing %s at offset %d", key, commit.Offset) - - // Verify offset is correctly persisted - // (In real test, would read from SMQ storage) - } - - t.Logf("โœ… Offset commit consistency verified") - }) -} - -// TestFetchEmptyPartitionHandling tests what happens when fetching -// from a partition with no more messages -func TestFetchEmptyPartitionHandling(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - t.Run("EmptyPartitionBehavior", func(t *testing.T) { - const ( - topic = "test-topic" - partition = int32(0) - lastOffset = int64(999) // Messages 0-999 exist - ) - - // Test 1: Fetch at HWM should return empty - // Expected: Fetch(1000, HWM=1000) returns empty (not error) - // This is normal, consumer should retry - - // Test 2: Fetch beyond HWM should return error or empty - // Expected: Fetch(1000, HWM=1000) + wait for new messages - // Consumer should NOT give up - - // Test 3: After new message arrives, fetch should succeed - // Expected: Fetch(1000, HWM=1001) returns 1 message - - t.Logf("โœ… Empty partition handling verified") - }) -} - -// TestLongPollWithOffsetCommit verifies long-poll semantics work correctly -// with offset commits (no throttling confusion) -func TestLongPollWithOffsetCommit(t *testing.T) { - t.Skip("Integration test - requires mock broker setup") - - t.Run("LongPollNoThrottling", func(t *testing.T) { - // Critical: long-poll duration should NOT be reported as throttleTimeMs - // This was bug 8969b4509 - - const maxWaitTime = 5 * time.Second - - // Simulate long-poll wait (no data available) - time.Sleep(100 * time.Millisecond) // Broker waits up to maxWaitTime - - // throttleTimeMs should be 0 (NOT elapsed duration!) - throttleTimeMs := int32(0) // CORRECT - // throttleTimeMs := int32(elapsed / time.Millisecond) // WRONG (previous bug) - - if throttleTimeMs > 0 { - t.Errorf("Long-poll elapsed time should NOT be reported as throttle: %d ms", throttleTimeMs) - } - - t.Logf("โœ… Long-poll not confused with throttling") - }) -} diff --git a/weed/mq/kafka/protocol/offset_management.go b/weed/mq/kafka/protocol/offset_management.go deleted file mode 100644 index 72ad13267..000000000 --- a/weed/mq/kafka/protocol/offset_management.go +++ /dev/null @@ -1,738 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer" -) - -// ConsumerOffsetKey uniquely identifies a consumer offset -type ConsumerOffsetKey struct { - ConsumerGroup string - Topic string - Partition int32 - ConsumerGroupInstance string // Optional - for static group membership -} - -// OffsetCommit API (key 8) - Commit consumer group offsets -// This API allows consumers to persist their current position in topic partitions - -// OffsetCommitRequest represents an OffsetCommit request from a Kafka client -type OffsetCommitRequest struct { - GroupID string - GenerationID int32 - MemberID string - GroupInstanceID string // Optional static membership ID - RetentionTime int64 // Offset retention time (-1 for broker default) - Topics []OffsetCommitTopic -} - -// OffsetCommitTopic represents topic-level offset commit data -type OffsetCommitTopic struct { - Name string - Partitions []OffsetCommitPartition -} - -// OffsetCommitPartition represents partition-level offset commit data -type OffsetCommitPartition struct { - Index int32 // Partition index - Offset int64 // Offset to commit - LeaderEpoch int32 // Leader epoch (-1 if not available) - Metadata string // Optional metadata -} - -// OffsetCommitResponse represents an OffsetCommit response to a Kafka client -type OffsetCommitResponse struct { - CorrelationID uint32 - Topics []OffsetCommitTopicResponse -} - -// OffsetCommitTopicResponse represents topic-level offset commit response -type OffsetCommitTopicResponse struct { - Name string - Partitions []OffsetCommitPartitionResponse -} - -// OffsetCommitPartitionResponse represents partition-level offset commit response -type OffsetCommitPartitionResponse struct { - Index int32 - ErrorCode int16 -} - -// OffsetFetch API (key 9) - Fetch consumer group committed offsets -// This API allows consumers to retrieve their last committed positions - -// OffsetFetchRequest represents an OffsetFetch request from a Kafka client -type OffsetFetchRequest struct { - GroupID string - GroupInstanceID string // Optional static membership ID - Topics []OffsetFetchTopic - RequireStable bool // Only fetch stable offsets -} - -// OffsetFetchTopic represents topic-level offset fetch data -type OffsetFetchTopic struct { - Name string - Partitions []int32 // Partition indices to fetch (empty = all partitions) -} - -// OffsetFetchResponse represents an OffsetFetch response to a Kafka client -type OffsetFetchResponse struct { - CorrelationID uint32 - Topics []OffsetFetchTopicResponse - ErrorCode int16 // Group-level error -} - -// OffsetFetchTopicResponse represents topic-level offset fetch response -type OffsetFetchTopicResponse struct { - Name string - Partitions []OffsetFetchPartitionResponse -} - -// OffsetFetchPartitionResponse represents partition-level offset fetch response -type OffsetFetchPartitionResponse struct { - Index int32 - Offset int64 // Committed offset (-1 if no offset) - LeaderEpoch int32 // Leader epoch (-1 if not available) - Metadata string // Optional metadata - ErrorCode int16 // Partition-level error -} - -// Error codes specific to offset management are imported from errors.go - -func (h *Handler) handleOffsetCommit(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse OffsetCommit request - req, err := h.parseOffsetCommitRequest(requestBody, apiVersion) - if err != nil { - return h.buildOffsetCommitErrorResponse(correlationID, ErrorCodeInvalidCommitOffsetSize, apiVersion), nil - } - - // Validate request - if req.GroupID == "" || req.MemberID == "" { - return h.buildOffsetCommitErrorResponse(correlationID, ErrorCodeInvalidGroupID, apiVersion), nil - } - - // Get or create consumer group - // Some Kafka clients (like kafka-go Reader) commit offsets without formally joining - // the group via JoinGroup/SyncGroup. We need to support these "simple consumer" use cases. - group := h.groupCoordinator.GetOrCreateGroup(req.GroupID) - - group.Mu.Lock() - defer group.Mu.Unlock() - - // Update group's last activity - group.LastActivity = time.Now() - - // Check generation compatibility - // Allow commits for empty groups (no active members) to support simple consumers - // that commit offsets without formal group membership - groupIsEmpty := len(group.Members) == 0 - generationMatches := groupIsEmpty || (req.GenerationID == group.Generation) - - glog.V(3).Infof("[OFFSET_COMMIT] Group check: id=%s reqGen=%d groupGen=%d members=%d empty=%v matches=%v", - req.GroupID, req.GenerationID, group.Generation, len(group.Members), groupIsEmpty, generationMatches) - - // Process offset commits - resp := OffsetCommitResponse{ - CorrelationID: correlationID, - Topics: make([]OffsetCommitTopicResponse, 0, len(req.Topics)), - } - - for _, t := range req.Topics { - topicResp := OffsetCommitTopicResponse{ - Name: t.Name, - Partitions: make([]OffsetCommitPartitionResponse, 0, len(t.Partitions)), - } - - for _, p := range t.Partitions { - - // Create consumer offset key for SMQ storage (not used immediately) - key := ConsumerOffsetKey{ - Topic: t.Name, - Partition: p.Index, - ConsumerGroup: req.GroupID, - ConsumerGroupInstance: req.GroupInstanceID, - } - - // Commit offset synchronously for immediate consistency - var errCode int16 = ErrorCodeNone - if generationMatches { - // Store in in-memory map for immediate response - // This is the primary committed offset position for consumers - if err := h.commitOffset(group, t.Name, p.Index, p.Offset, p.Metadata); err != nil { - errCode = ErrorCodeOffsetMetadataTooLarge - glog.V(2).Infof("[OFFSET_COMMIT] Failed to commit offset: group=%s topic=%s partition=%d offset=%d err=%v", - req.GroupID, t.Name, p.Index, p.Offset, err) - } else { - // Also persist to SMQ storage for durability across broker restarts - // This is done synchronously to ensure offset is not lost - if err := h.commitOffsetToSMQ(key, p.Offset, p.Metadata); err != nil { - // Log the error but don't fail the commit - // In-memory commit is the source of truth for active consumers - // SMQ persistence is best-effort for crash recovery - glog.V(3).Infof("[OFFSET_COMMIT] SMQ persist failed (non-fatal): group=%s topic=%s partition=%d offset=%d err=%v", - req.GroupID, t.Name, p.Index, p.Offset, err) - } - glog.V(3).Infof("[OFFSET_COMMIT] Committed: group=%s topic=%s partition=%d offset=%d gen=%d", - req.GroupID, t.Name, p.Index, p.Offset, group.Generation) - } - } else { - // Do not store commit if generation mismatch - errCode = 22 // IllegalGeneration - glog.V(2).Infof("[OFFSET_COMMIT] Rejected - generation mismatch: group=%s expected=%d got=%d members=%d", - req.GroupID, group.Generation, req.GenerationID, len(group.Members)) - } - - topicResp.Partitions = append(topicResp.Partitions, OffsetCommitPartitionResponse{ - Index: p.Index, - ErrorCode: errCode, - }) - } - - resp.Topics = append(resp.Topics, topicResp) - } - - return h.buildOffsetCommitResponse(resp, apiVersion), nil -} - -func (h *Handler) handleOffsetFetch(correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse OffsetFetch request - request, err := h.parseOffsetFetchRequest(requestBody) - if err != nil { - return h.buildOffsetFetchErrorResponse(correlationID, ErrorCodeInvalidGroupID), nil - } - - // Validate request - if request.GroupID == "" { - return h.buildOffsetFetchErrorResponse(correlationID, ErrorCodeInvalidGroupID), nil - } - - // Get or create consumer group - // IMPORTANT: Use GetOrCreateGroup (not GetGroup) to allow fetching persisted offsets - // even if the group doesn't exist in memory yet. This is critical for consumer restarts. - // Kafka allows offset fetches for groups that haven't joined yet (e.g., simple consumers). - group := h.groupCoordinator.GetOrCreateGroup(request.GroupID) - - group.Mu.RLock() - defer group.Mu.RUnlock() - - glog.V(4).Infof("[OFFSET_FETCH] Request: group=%s topics=%d", request.GroupID, len(request.Topics)) - - // Build response - response := OffsetFetchResponse{ - CorrelationID: correlationID, - Topics: make([]OffsetFetchTopicResponse, 0, len(request.Topics)), - ErrorCode: ErrorCodeNone, - } - - for _, topic := range request.Topics { - topicResponse := OffsetFetchTopicResponse{ - Name: topic.Name, - Partitions: make([]OffsetFetchPartitionResponse, 0), - } - - // If no partitions specified, fetch all partitions for the topic - partitionsToFetch := topic.Partitions - if len(partitionsToFetch) == 0 { - // Get all partitions for this topic from group's offset commits - if topicOffsets, exists := group.OffsetCommits[topic.Name]; exists { - for partition := range topicOffsets { - partitionsToFetch = append(partitionsToFetch, partition) - } - } - } - - // Fetch offsets for requested partitions - for _, partition := range partitionsToFetch { - var fetchedOffset int64 = -1 - var metadata string = "" - var errorCode int16 = ErrorCodeNone - - // Try fetching from in-memory cache first (works for both mock and SMQ backends) - if off, meta, err := h.fetchOffset(group, topic.Name, partition); err == nil && off >= 0 { - fetchedOffset = off - metadata = meta - glog.V(4).Infof("[OFFSET_FETCH] Found in memory: group=%s topic=%s partition=%d offset=%d", - request.GroupID, topic.Name, partition, off) - } else { - // Fallback: try fetching from SMQ persistent storage - // This handles cases where offsets are stored in SMQ but not yet loaded into memory - key := ConsumerOffsetKey{ - Topic: topic.Name, - Partition: partition, - ConsumerGroup: request.GroupID, - ConsumerGroupInstance: request.GroupInstanceID, - } - if off, meta, err := h.fetchOffsetFromSMQ(key); err == nil && off >= 0 { - fetchedOffset = off - metadata = meta - glog.V(3).Infof("[OFFSET_FETCH] Found in storage: group=%s topic=%s partition=%d offset=%d", - request.GroupID, topic.Name, partition, off) - } else { - glog.V(3).Infof("[OFFSET_FETCH] No offset found: group=%s topic=%s partition=%d (will start from auto.offset.reset)", - request.GroupID, topic.Name, partition) - } - // No offset found in either location (-1 indicates no committed offset) - } - - partitionResponse := OffsetFetchPartitionResponse{ - Index: partition, - Offset: fetchedOffset, - LeaderEpoch: 0, // Default epoch for SeaweedMQ (single leader model) - Metadata: metadata, - ErrorCode: errorCode, - } - topicResponse.Partitions = append(topicResponse.Partitions, partitionResponse) - } - - response.Topics = append(response.Topics, topicResponse) - } - - return h.buildOffsetFetchResponse(response, apiVersion), nil -} - -func (h *Handler) parseOffsetCommitRequest(data []byte, apiVersion uint16) (*OffsetCommitRequest, error) { - if len(data) < 8 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - - // GroupID (string) - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID := string(data[offset : offset+groupIDLength]) - offset += groupIDLength - - // Generation ID (4 bytes) - if offset+4 > len(data) { - return nil, fmt.Errorf("missing generation ID") - } - generationID := int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - // MemberID (string) - if offset+2 > len(data) { - return nil, fmt.Errorf("missing member ID length") - } - memberIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+memberIDLength > len(data) { - return nil, fmt.Errorf("invalid member ID length") - } - memberID := string(data[offset : offset+memberIDLength]) - offset += memberIDLength - - // RetentionTime (8 bytes) - exists in v0-v4, removed in v5+ - var retentionTime int64 = -1 - if apiVersion <= 4 { - if len(data) < offset+8 { - return nil, fmt.Errorf("missing retention time for v%d", apiVersion) - } - retentionTime = int64(binary.BigEndian.Uint64(data[offset : offset+8])) - offset += 8 - } - - // GroupInstanceID (nullable string) - ONLY in version 3+ - var groupInstanceID string - if apiVersion >= 3 { - if offset+2 > len(data) { - return nil, fmt.Errorf("missing group instance ID length") - } - groupInstanceIDLength := int(int16(binary.BigEndian.Uint16(data[offset:]))) - offset += 2 - if groupInstanceIDLength == -1 { - // Null string - groupInstanceID = "" - } else if groupInstanceIDLength > 0 { - if offset+groupInstanceIDLength > len(data) { - return nil, fmt.Errorf("invalid group instance ID length") - } - groupInstanceID = string(data[offset : offset+groupInstanceIDLength]) - offset += groupInstanceIDLength - } - } - - // Topics array - var topicsCount uint32 - if len(data) >= offset+4 { - topicsCount = binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - } - - topics := make([]OffsetCommitTopic, 0, topicsCount) - - for i := uint32(0); i < topicsCount && offset < len(data); i++ { - // Parse topic name - if len(data) < offset+2 { - break - } - topicNameLength := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if len(data) < offset+int(topicNameLength) { - break - } - topicName := string(data[offset : offset+int(topicNameLength)]) - offset += int(topicNameLength) - - // Parse partitions array - if len(data) < offset+4 { - break - } - partitionsCount := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - partitions := make([]OffsetCommitPartition, 0, partitionsCount) - - for j := uint32(0); j < partitionsCount && offset < len(data); j++ { - // Parse partition index (4 bytes) - if len(data) < offset+4 { - break - } - partitionIndex := int32(binary.BigEndian.Uint32(data[offset : offset+4])) - offset += 4 - - // Parse committed offset (8 bytes) - if len(data) < offset+8 { - break - } - committedOffset := int64(binary.BigEndian.Uint64(data[offset : offset+8])) - offset += 8 - - // Parse leader epoch (4 bytes) - ONLY in version 6+ - var leaderEpoch int32 = -1 - if apiVersion >= 6 { - if len(data) < offset+4 { - break - } - leaderEpoch = int32(binary.BigEndian.Uint32(data[offset : offset+4])) - offset += 4 - } - - // Parse metadata (string) - var metadata string = "" - if len(data) >= offset+2 { - metadataLength := int16(binary.BigEndian.Uint16(data[offset : offset+2])) - offset += 2 - if metadataLength == -1 { - metadata = "" - } else if metadataLength >= 0 && len(data) >= offset+int(metadataLength) { - metadata = string(data[offset : offset+int(metadataLength)]) - offset += int(metadataLength) - } - } - - partitions = append(partitions, OffsetCommitPartition{ - Index: partitionIndex, - Offset: committedOffset, - LeaderEpoch: leaderEpoch, - Metadata: metadata, - }) - } - topics = append(topics, OffsetCommitTopic{ - Name: topicName, - Partitions: partitions, - }) - } - - return &OffsetCommitRequest{ - GroupID: groupID, - GenerationID: generationID, - MemberID: memberID, - GroupInstanceID: groupInstanceID, - RetentionTime: retentionTime, - Topics: topics, - }, nil -} - -func (h *Handler) parseOffsetFetchRequest(data []byte) (*OffsetFetchRequest, error) { - if len(data) < 4 { - return nil, fmt.Errorf("request too short") - } - - offset := 0 - - // GroupID (string) - groupIDLength := int(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - if offset+groupIDLength > len(data) { - return nil, fmt.Errorf("invalid group ID length") - } - groupID := string(data[offset : offset+groupIDLength]) - offset += groupIDLength - - // Parse Topics array - classic encoding (INT32 count) for v0-v5 - if len(data) < offset+4 { - return nil, fmt.Errorf("OffsetFetch request missing topics array") - } - topicsCount := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - topics := make([]OffsetFetchTopic, 0, topicsCount) - - for i := uint32(0); i < topicsCount && offset < len(data); i++ { - // Parse topic name (STRING: INT16 length + bytes) - if len(data) < offset+2 { - break - } - topicNameLength := binary.BigEndian.Uint16(data[offset : offset+2]) - offset += 2 - - if len(data) < offset+int(topicNameLength) { - break - } - topicName := string(data[offset : offset+int(topicNameLength)]) - offset += int(topicNameLength) - - // Parse partitions array (ARRAY: INT32 count) - if len(data) < offset+4 { - break - } - partitionsCount := binary.BigEndian.Uint32(data[offset : offset+4]) - offset += 4 - - partitions := make([]int32, 0, partitionsCount) - - // If partitionsCount is 0, it means "fetch all partitions" - if partitionsCount == 0 { - partitions = nil // nil means all partitions - } else { - for j := uint32(0); j < partitionsCount && offset < len(data); j++ { - // Parse partition index (4 bytes) - if len(data) < offset+4 { - break - } - partitionIndex := int32(binary.BigEndian.Uint32(data[offset : offset+4])) - offset += 4 - - partitions = append(partitions, partitionIndex) - } - } - - topics = append(topics, OffsetFetchTopic{ - Name: topicName, - Partitions: partitions, - }) - } - - // Parse RequireStable flag (1 byte) - for transactional consistency - var requireStable bool - if len(data) >= offset+1 { - requireStable = data[offset] != 0 - offset += 1 - } - - return &OffsetFetchRequest{ - GroupID: groupID, - Topics: topics, - RequireStable: requireStable, - }, nil -} - -func (h *Handler) commitOffset(group *consumer.ConsumerGroup, topic string, partition int32, offset int64, metadata string) error { - // Initialize topic offsets if needed - if group.OffsetCommits == nil { - group.OffsetCommits = make(map[string]map[int32]consumer.OffsetCommit) - } - - if group.OffsetCommits[topic] == nil { - group.OffsetCommits[topic] = make(map[int32]consumer.OffsetCommit) - } - - // Store the offset commit - group.OffsetCommits[topic][partition] = consumer.OffsetCommit{ - Offset: offset, - Metadata: metadata, - Timestamp: time.Now(), - } - - return nil -} - -func (h *Handler) fetchOffset(group *consumer.ConsumerGroup, topic string, partition int32) (int64, string, error) { - // Check if topic exists in offset commits - if group.OffsetCommits == nil { - return -1, "", nil // No committed offset - } - - topicOffsets, exists := group.OffsetCommits[topic] - if !exists { - return -1, "", nil // No committed offset for topic - } - - offsetCommit, exists := topicOffsets[partition] - if !exists { - return -1, "", nil // No committed offset for partition - } - - return offsetCommit.Offset, offsetCommit.Metadata, nil -} - -func (h *Handler) buildOffsetCommitResponse(response OffsetCommitResponse, apiVersion uint16) []byte { - estimatedSize := 16 - for _, topic := range response.Topics { - estimatedSize += len(topic.Name) + 8 + len(topic.Partitions)*8 - } - - result := make([]byte, 0, estimatedSize) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Throttle time (4 bytes) - ONLY for version 3+, and it goes at the BEGINNING - if apiVersion >= 3 { - result = append(result, 0, 0, 0, 0) // throttle_time_ms = 0 - } - - // Topics array length (4 bytes) - topicsLengthBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsLengthBytes, uint32(len(response.Topics))) - result = append(result, topicsLengthBytes...) - - // Topics - for _, topic := range response.Topics { - // Topic name length (2 bytes) - nameLength := make([]byte, 2) - binary.BigEndian.PutUint16(nameLength, uint16(len(topic.Name))) - result = append(result, nameLength...) - - // Topic name - result = append(result, []byte(topic.Name)...) - - // Partitions array length (4 bytes) - partitionsLength := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsLength, uint32(len(topic.Partitions))) - result = append(result, partitionsLength...) - - // Partitions - for _, partition := range topic.Partitions { - // Partition index (4 bytes) - indexBytes := make([]byte, 4) - binary.BigEndian.PutUint32(indexBytes, uint32(partition.Index)) - result = append(result, indexBytes...) - - // Error code (2 bytes) - errorBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorBytes, uint16(partition.ErrorCode)) - result = append(result, errorBytes...) - } - } - - return result -} - -func (h *Handler) buildOffsetFetchResponse(response OffsetFetchResponse, apiVersion uint16) []byte { - estimatedSize := 32 - for _, topic := range response.Topics { - estimatedSize += len(topic.Name) + 16 + len(topic.Partitions)*32 - for _, partition := range topic.Partitions { - estimatedSize += len(partition.Metadata) - } - } - - result := make([]byte, 0, estimatedSize) - - // NOTE: Correlation ID is handled by writeResponseWithCorrelationID - // Do NOT include it in the response body - - // Throttle time (4 bytes) - for version 3+ this appears immediately after correlation ID - if apiVersion >= 3 { - result = append(result, 0, 0, 0, 0) // throttle_time_ms = 0 - } - - // Topics array length (4 bytes) - topicsLengthBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsLengthBytes, uint32(len(response.Topics))) - result = append(result, topicsLengthBytes...) - - // Topics - for _, topic := range response.Topics { - // Topic name length (2 bytes) - nameLength := make([]byte, 2) - binary.BigEndian.PutUint16(nameLength, uint16(len(topic.Name))) - result = append(result, nameLength...) - - // Topic name - result = append(result, []byte(topic.Name)...) - - // Partitions array length (4 bytes) - partitionsLength := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsLength, uint32(len(topic.Partitions))) - result = append(result, partitionsLength...) - - // Partitions - for _, partition := range topic.Partitions { - // Partition index (4 bytes) - indexBytes := make([]byte, 4) - binary.BigEndian.PutUint32(indexBytes, uint32(partition.Index)) - result = append(result, indexBytes...) - - // Committed offset (8 bytes) - offsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(offsetBytes, uint64(partition.Offset)) - result = append(result, offsetBytes...) - - // Leader epoch (4 bytes) - only included in version 5+ - if apiVersion >= 5 { - epochBytes := make([]byte, 4) - binary.BigEndian.PutUint32(epochBytes, uint32(partition.LeaderEpoch)) - result = append(result, epochBytes...) - } - - // Metadata length (2 bytes) - metadataLength := make([]byte, 2) - binary.BigEndian.PutUint16(metadataLength, uint16(len(partition.Metadata))) - result = append(result, metadataLength...) - - // Metadata - result = append(result, []byte(partition.Metadata)...) - - // Error code (2 bytes) - errorBytes := make([]byte, 2) - binary.BigEndian.PutUint16(errorBytes, uint16(partition.ErrorCode)) - result = append(result, errorBytes...) - } - } - - // Group-level error code (2 bytes) - only included in version 2+ - if apiVersion >= 2 { - groupErrorBytes := make([]byte, 2) - binary.BigEndian.PutUint16(groupErrorBytes, uint16(response.ErrorCode)) - result = append(result, groupErrorBytes...) - } - - return result -} - -func (h *Handler) buildOffsetCommitErrorResponse(correlationID uint32, errorCode int16, apiVersion uint16) []byte { - response := OffsetCommitResponse{ - CorrelationID: correlationID, - Topics: []OffsetCommitTopicResponse{ - { - Name: "", - Partitions: []OffsetCommitPartitionResponse{ - {Index: 0, ErrorCode: errorCode}, - }, - }, - }, - } - - return h.buildOffsetCommitResponse(response, apiVersion) -} - -func (h *Handler) buildOffsetFetchErrorResponse(correlationID uint32, errorCode int16) []byte { - response := OffsetFetchResponse{ - CorrelationID: correlationID, - Topics: []OffsetFetchTopicResponse{}, - ErrorCode: errorCode, - } - - return h.buildOffsetFetchResponse(response, 0) -} diff --git a/weed/mq/kafka/protocol/offset_storage_adapter.go b/weed/mq/kafka/protocol/offset_storage_adapter.go deleted file mode 100644 index 079c5b621..000000000 --- a/weed/mq/kafka/protocol/offset_storage_adapter.go +++ /dev/null @@ -1,50 +0,0 @@ -package protocol - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/consumer_offset" -) - -// offsetStorageAdapter adapts consumer_offset.OffsetStorage to ConsumerOffsetStorage interface -type offsetStorageAdapter struct { - storage consumer_offset.OffsetStorage -} - -// newOffsetStorageAdapter creates a new adapter -func newOffsetStorageAdapter(storage consumer_offset.OffsetStorage) ConsumerOffsetStorage { - return &offsetStorageAdapter{storage: storage} -} - -func (a *offsetStorageAdapter) CommitOffset(group, topic string, partition int32, offset int64, metadata string) error { - return a.storage.CommitOffset(group, topic, partition, offset, metadata) -} - -func (a *offsetStorageAdapter) FetchOffset(group, topic string, partition int32) (int64, string, error) { - return a.storage.FetchOffset(group, topic, partition) -} - -func (a *offsetStorageAdapter) FetchAllOffsets(group string) (map[TopicPartition]OffsetMetadata, error) { - offsets, err := a.storage.FetchAllOffsets(group) - if err != nil { - return nil, err - } - - // Convert from consumer_offset types to protocol types - result := make(map[TopicPartition]OffsetMetadata, len(offsets)) - for tp, om := range offsets { - result[TopicPartition{Topic: tp.Topic, Partition: tp.Partition}] = OffsetMetadata{ - Offset: om.Offset, - Metadata: om.Metadata, - } - } - - return result, nil -} - -func (a *offsetStorageAdapter) DeleteGroup(group string) error { - return a.storage.DeleteGroup(group) -} - -func (a *offsetStorageAdapter) Close() error { - return a.storage.Close() -} - diff --git a/weed/mq/kafka/protocol/produce.go b/weed/mq/kafka/protocol/produce.go deleted file mode 100644 index 849d1148d..000000000 --- a/weed/mq/kafka/protocol/produce.go +++ /dev/null @@ -1,1546 +0,0 @@ -package protocol - -import ( - "context" - "encoding/binary" - "fmt" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/compression" - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "google.golang.org/protobuf/proto" -) - -func (h *Handler) handleProduce(ctx context.Context, correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // Version-specific handling - switch apiVersion { - case 0, 1: - return h.handleProduceV0V1(ctx, correlationID, apiVersion, requestBody) - case 2, 3, 4, 5, 6, 7: - return h.handleProduceV2Plus(ctx, correlationID, apiVersion, requestBody) - default: - return nil, fmt.Errorf("produce version %d not implemented yet", apiVersion) - } -} - -func (h *Handler) handleProduceV0V1(ctx context.Context, correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - // Parse Produce v0/v1 request - // Request format: client_id + acks(2) + timeout(4) + topics_array - - if len(requestBody) < 8 { // client_id_size(2) + acks(2) + timeout(4) - return nil, fmt.Errorf("Produce request too short") - } - - // Skip client_id - clientIDSize := binary.BigEndian.Uint16(requestBody[0:2]) - - if len(requestBody) < 2+int(clientIDSize) { - return nil, fmt.Errorf("Produce request client_id too short") - } - - _ = string(requestBody[2 : 2+int(clientIDSize)]) // clientID - offset := 2 + int(clientIDSize) - - if len(requestBody) < offset+10 { // acks(2) + timeout(4) + topics_count(4) - return nil, fmt.Errorf("Produce request missing data") - } - - // Parse acks and timeout - _ = int16(binary.BigEndian.Uint16(requestBody[offset : offset+2])) // acks - offset += 2 - - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - response := make([]byte, 0, 1024) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Topics count (same as request) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, topicsCount) - response = append(response, topicsCountBytes...) - - // Process each topic - for i := uint32(0); i < topicsCount && offset < len(requestBody); i++ { - if len(requestBody) < offset+2 { - break - } - - // Parse topic name - topicNameSize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(topicNameSize)+4 { - break - } - - topicName := string(requestBody[offset : offset+int(topicNameSize)]) - offset += int(topicNameSize) - - // Parse partitions count - partitionsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Check if topic exists, auto-create if it doesn't (simulates auto.create.topics.enable=true) - topicExists := h.seaweedMQHandler.TopicExists(topicName) - - _ = h.seaweedMQHandler.ListTopics() // existingTopics - if !topicExists { - // Use schema-aware topic creation for auto-created topics with configurable default partitions - defaultPartitions := h.GetDefaultPartitions() - glog.V(1).Infof("[PRODUCE] Topic %s does not exist, auto-creating with %d partitions", topicName, defaultPartitions) - if err := h.createTopicWithSchemaSupport(topicName, defaultPartitions); err != nil { - glog.V(0).Infof("[PRODUCE] ERROR: Failed to auto-create topic %s: %v", topicName, err) - } else { - glog.V(1).Infof("[PRODUCE] Successfully auto-created topic %s", topicName) - // Invalidate cache immediately after creation so consumers can find it - h.seaweedMQHandler.InvalidateTopicExistsCache(topicName) - topicExists = true - } - } else { - glog.V(2).Infof("[PRODUCE] Topic %s already exists", topicName) - } - - // Response: topic_name_size(2) + topic_name + partitions_array - response = append(response, byte(topicNameSize>>8), byte(topicNameSize)) - response = append(response, []byte(topicName)...) - - partitionsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsCountBytes, partitionsCount) - response = append(response, partitionsCountBytes...) - - // Process each partition - for j := uint32(0); j < partitionsCount && offset < len(requestBody); j++ { - if len(requestBody) < offset+8 { - break - } - - // Parse partition: partition_id(4) + record_set_size(4) + record_set - partitionID := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - recordSetSize := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - if len(requestBody) < offset+int(recordSetSize) { - break - } - - // CRITICAL FIX: Make a copy of recordSetData to prevent buffer sharing corruption - // The slice requestBody[offset:offset+int(recordSetSize)] shares the underlying array - // with the request buffer, which can be reused and cause data corruption - recordSetData := make([]byte, recordSetSize) - copy(recordSetData, requestBody[offset:offset+int(recordSetSize)]) - offset += int(recordSetSize) - - // Response: partition_id(4) + error_code(2) + base_offset(8) + log_append_time(8) + log_start_offset(8) - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, partitionID) - response = append(response, partitionIDBytes...) - - var errorCode uint16 = 0 - var baseOffset int64 = 0 - currentTime := time.Now().UnixNano() - - if !topicExists { - errorCode = 3 // UNKNOWN_TOPIC_OR_PARTITION - } else { - // Process the record set - recordCount, _, parseErr := h.parseRecordSet(recordSetData) // totalSize unused - if parseErr != nil { - errorCode = 42 // INVALID_RECORD - } else if recordCount > 0 { - // Use SeaweedMQ integration - offset, err := h.produceToSeaweedMQ(ctx, topicName, int32(partitionID), recordSetData) - if err != nil { - // Check if this is a schema validation error and add delay to prevent overloading - if h.isSchemaValidationError(err) { - time.Sleep(200 * time.Millisecond) // Brief delay for schema validation failures - } - errorCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - } else { - baseOffset = offset - } - } - } - - // Error code - response = append(response, byte(errorCode>>8), byte(errorCode)) - - // Base offset (8 bytes) - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - response = append(response, baseOffsetBytes...) - - // Log append time (8 bytes) - timestamp when appended - logAppendTimeBytes := make([]byte, 8) - binary.BigEndian.PutUint64(logAppendTimeBytes, uint64(currentTime)) - response = append(response, logAppendTimeBytes...) - - // Log start offset (8 bytes) - same as base for now - logStartOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(logStartOffsetBytes, uint64(baseOffset)) - response = append(response, logStartOffsetBytes...) - } - } - - // Add throttle time at the end (4 bytes) - response = append(response, 0, 0, 0, 0) - - // Even for acks=0, kafka-go expects a minimal response structure - return response, nil -} - -// parseRecordSet parses a Kafka record set using the enhanced record batch parser -// Now supports: -// - Proper record batch format parsing (v2) -// - Compression support (gzip, snappy, lz4, zstd) -// - CRC32 validation -// - Individual record extraction -func (h *Handler) parseRecordSet(recordSetData []byte) (recordCount int32, totalSize int32, err error) { - - // Heuristic: permit short inputs for tests - if len(recordSetData) < 61 { - // If very small, decide error vs fallback - if len(recordSetData) < 8 { - return 0, 0, fmt.Errorf("failed to parse record batch: record set too small: %d bytes", len(recordSetData)) - } - // If we have at least 20 bytes, attempt to read a count at [16:20] - if len(recordSetData) >= 20 { - cnt := int32(binary.BigEndian.Uint32(recordSetData[16:20])) - if cnt <= 0 || cnt > 1000000 { - cnt = 1 - } - return cnt, int32(len(recordSetData)), nil - } - // Otherwise default to 1 record - return 1, int32(len(recordSetData)), nil - } - - parser := NewRecordBatchParser() - - // Parse the record batch with CRC validation - batch, err := parser.ParseRecordBatchWithValidation(recordSetData, true) - if err != nil { - // If CRC validation fails, try without validation for backward compatibility - batch, err = parser.ParseRecordBatch(recordSetData) - if err != nil { - return 0, 0, fmt.Errorf("failed to parse record batch: %w", err) - } - } - - return batch.RecordCount, int32(len(recordSetData)), nil -} - -// produceToSeaweedMQ publishes a single record to SeaweedMQ (simplified for Phase 2) -// ctx controls the publish timeout - if client cancels, produce operation is cancelled -func (h *Handler) produceToSeaweedMQ(ctx context.Context, topic string, partition int32, recordSetData []byte) (int64, error) { - // Extract all records from the record set and publish each one - // extractAllRecords handles fallback internally for various cases - records := h.extractAllRecords(recordSetData) - - if len(records) == 0 { - return 0, fmt.Errorf("failed to parse Kafka record set: no records extracted") - } - - // Publish all records and return the offset of the first record (base offset) - var baseOffset int64 - for idx, kv := range records { - offsetProduced, err := h.produceSchemaBasedRecord(ctx, topic, partition, kv.Key, kv.Value) - if err != nil { - return 0, err - } - if idx == 0 { - baseOffset = offsetProduced - } - } - - return baseOffset, nil -} - -// extractAllRecords parses a Kafka record batch and returns all records' key/value pairs -func (h *Handler) extractAllRecords(recordSetData []byte) []struct{ Key, Value []byte } { - results := make([]struct{ Key, Value []byte }, 0, 8) - - if len(recordSetData) > 0 { - } - - if len(recordSetData) < 61 { - // Too small to be a full batch; treat as single opaque record - key, value := h.extractFirstRecord(recordSetData) - // Always include records, even if both key and value are null - // Schema Registry Noop records may have null values - results = append(results, struct{ Key, Value []byte }{Key: key, Value: value}) - return results - } - - // Parse record batch header (Kafka v2) - offset := 0 - _ = int64(binary.BigEndian.Uint64(recordSetData[offset:])) // baseOffset - offset += 8 // base_offset - _ = binary.BigEndian.Uint32(recordSetData[offset:]) // batchLength - offset += 4 // batch_length - _ = binary.BigEndian.Uint32(recordSetData[offset:]) // partitionLeaderEpoch - offset += 4 // partition_leader_epoch - - if offset >= len(recordSetData) { - return results - } - magic := recordSetData[offset] // magic - offset += 1 - - if magic != 2 { - // Unsupported, fallback - key, value := h.extractFirstRecord(recordSetData) - // Always include records, even if both key and value are null - results = append(results, struct{ Key, Value []byte }{Key: key, Value: value}) - return results - } - - // Skip CRC, read attributes to check compression - offset += 4 // crc - attributes := binary.BigEndian.Uint16(recordSetData[offset:]) - offset += 2 // attributes - - // Check compression codec from attributes (bits 0-2) - compressionCodec := compression.CompressionCodec(attributes & 0x07) - - offset += 4 // last_offset_delta - offset += 8 // first_timestamp - offset += 8 // max_timestamp - offset += 8 // producer_id - offset += 2 // producer_epoch - offset += 4 // base_sequence - - // records_count - if offset+4 > len(recordSetData) { - return results - } - recordsCount := int(binary.BigEndian.Uint32(recordSetData[offset:])) - offset += 4 - - // Extract and decompress the records section - recordsData := recordSetData[offset:] - if compressionCodec != compression.None { - decompressed, err := compression.Decompress(compressionCodec, recordsData) - if err != nil { - // Fallback to extractFirstRecord - key, value := h.extractFirstRecord(recordSetData) - results = append(results, struct{ Key, Value []byte }{Key: key, Value: value}) - return results - } - recordsData = decompressed - } - // Reset offset to start of records data (whether compressed or not) - offset = 0 - - if len(recordsData) > 0 { - } - - // Iterate records - for i := 0; i < recordsCount && offset < len(recordsData); i++ { - // record_length is a SIGNED zigzag-encoded varint (like all varints in Kafka record format) - recLen, n := decodeVarint(recordsData[offset:]) - if n == 0 || recLen <= 0 { - break - } - offset += n - if offset+int(recLen) > len(recordsData) { - break - } - rec := recordsData[offset : offset+int(recLen)] - offset += int(recLen) - - // Parse record fields - rpos := 0 - if rpos >= len(rec) { - break - } - rpos += 1 // attributes - - // timestamp_delta (varint) - var nBytes int - _, nBytes = decodeVarint(rec[rpos:]) - if nBytes == 0 { - continue - } - rpos += nBytes - // offset_delta (varint) - _, nBytes = decodeVarint(rec[rpos:]) - if nBytes == 0 { - continue - } - rpos += nBytes - - // key - keyLen, nBytes := decodeVarint(rec[rpos:]) - if nBytes == 0 { - continue - } - rpos += nBytes - var key []byte - if keyLen >= 0 { - if rpos+int(keyLen) > len(rec) { - continue - } - key = rec[rpos : rpos+int(keyLen)] - rpos += int(keyLen) - } - - // value - valLen, nBytes := decodeVarint(rec[rpos:]) - if nBytes == 0 { - continue - } - rpos += nBytes - var value []byte - if valLen >= 0 { - if rpos+int(valLen) > len(rec) { - continue - } - value = rec[rpos : rpos+int(valLen)] - rpos += int(valLen) - } - - // headers (varint) - skip - _, n = decodeVarint(rec[rpos:]) - if n == 0 { /* ignore */ - } - - // DO NOT normalize nils to empty slices - Kafka distinguishes null vs empty - // Keep nil as nil, empty as empty - - results = append(results, struct{ Key, Value []byte }{Key: key, Value: value}) - } - - return results -} - -// extractFirstRecord extracts the first record from a Kafka record batch -func (h *Handler) extractFirstRecord(recordSetData []byte) ([]byte, []byte) { - - if len(recordSetData) < 61 { - // Record set too small to contain a valid Kafka v2 batch - return nil, nil - } - - offset := 0 - - // Parse record batch header (Kafka v2 format) - // base_offset(8) + batch_length(4) + partition_leader_epoch(4) + magic(1) + crc(4) + attributes(2) - // + last_offset_delta(4) + first_timestamp(8) + max_timestamp(8) + producer_id(8) + producer_epoch(2) - // + base_sequence(4) + records_count(4) = 61 bytes header - - offset += 8 // skip base_offset - _ = int32(binary.BigEndian.Uint32(recordSetData[offset:])) // batchLength unused - offset += 4 // batch_length - - offset += 4 // skip partition_leader_epoch - magic := recordSetData[offset] - offset += 1 // magic byte - - if magic != 2 { - // Unsupported magic byte - only Kafka v2 format is supported - return nil, nil - } - - offset += 4 // skip crc - offset += 2 // skip attributes - offset += 4 // skip last_offset_delta - offset += 8 // skip first_timestamp - offset += 8 // skip max_timestamp - offset += 8 // skip producer_id - offset += 2 // skip producer_epoch - offset += 4 // skip base_sequence - - recordsCount := int32(binary.BigEndian.Uint32(recordSetData[offset:])) - offset += 4 // records_count - - if recordsCount == 0 { - // No records in batch - return nil, nil - } - - // Parse first record - if offset >= len(recordSetData) { - // Not enough data to parse record - return nil, nil - } - - // Read record length (unsigned varint) - recordLengthU32, varintLen, err := DecodeUvarint(recordSetData[offset:]) - if err != nil || varintLen == 0 { - // Invalid varint encoding - return nil, nil - } - recordLength := int64(recordLengthU32) - offset += varintLen - - if offset+int(recordLength) > len(recordSetData) { - // Record length exceeds available data - return nil, nil - } - - recordData := recordSetData[offset : offset+int(recordLength)] - recordOffset := 0 - - // Parse record: attributes(1) + timestamp_delta(varint) + offset_delta(varint) + key + value + headers - recordOffset += 1 // skip attributes - - // Skip timestamp_delta (varint) - _, varintLen = decodeVarint(recordData[recordOffset:]) - if varintLen == 0 { - // Invalid timestamp_delta varint - return nil, nil - } - recordOffset += varintLen - - // Skip offset_delta (varint) - _, varintLen = decodeVarint(recordData[recordOffset:]) - if varintLen == 0 { - // Invalid offset_delta varint - return nil, nil - } - recordOffset += varintLen - - // Read key length and key - keyLength, varintLen := decodeVarint(recordData[recordOffset:]) - if varintLen == 0 { - // Invalid key length varint - return nil, nil - } - recordOffset += varintLen - - var key []byte - if keyLength == -1 { - key = nil // null key - } else if keyLength == 0 { - key = []byte{} // empty key - } else { - if recordOffset+int(keyLength) > len(recordData) { - // Key length exceeds available data - return nil, nil - } - key = recordData[recordOffset : recordOffset+int(keyLength)] - recordOffset += int(keyLength) - } - - // Read value length and value - valueLength, varintLen := decodeVarint(recordData[recordOffset:]) - if varintLen == 0 { - // Invalid value length varint - return nil, nil - } - recordOffset += varintLen - - var value []byte - if valueLength == -1 { - value = nil // null value - } else if valueLength == 0 { - value = []byte{} // empty value - } else { - if recordOffset+int(valueLength) > len(recordData) { - // Value length exceeds available data - return nil, nil - } - value = recordData[recordOffset : recordOffset+int(valueLength)] - } - - // Preserve null semantics - don't convert null to empty - // Schema Registry Noop records specifically use null values - return key, value -} - -// decodeVarint decodes a variable-length integer from bytes using zigzag encoding -// Returns the decoded value and the number of bytes consumed -func decodeVarint(data []byte) (int64, int) { - if len(data) == 0 { - return 0, 0 - } - - var result int64 - var shift uint - var bytesRead int - - for i, b := range data { - if i > 9 { // varints can be at most 10 bytes - return 0, 0 // invalid varint - } - - bytesRead++ - result |= int64(b&0x7F) << shift - - if (b & 0x80) == 0 { - // Most significant bit is 0, we're done - // Apply zigzag decoding for signed integers - return (result >> 1) ^ (-(result & 1)), bytesRead - } - - shift += 7 - } - - return 0, 0 // incomplete varint -} - -// handleProduceV2Plus handles Produce API v2-v7 (Kafka 0.11+) -func (h *Handler) handleProduceV2Plus(ctx context.Context, correlationID uint32, apiVersion uint16, requestBody []byte) ([]byte, error) { - - // For now, use simplified parsing similar to v0/v1 but handle v2+ response format - // In v2+, the main differences are: - // - Request: transactional_id field (nullable string) at the beginning - // - Response: throttle_time_ms field at the end (v1+) - - // Parse Produce v2+ request format (client_id already stripped in HandleConn) - // v2: acks(INT16) + timeout_ms(INT32) + topics(ARRAY) - // v3+: transactional_id(NULLABLE_STRING) + acks(INT16) + timeout_ms(INT32) + topics(ARRAY) - - offset := 0 - - // transactional_id only exists in v3+ - if apiVersion >= 3 { - if len(requestBody) < offset+2 { - return nil, fmt.Errorf("Produce v%d request too short for transactional_id", apiVersion) - } - txIDLen := int16(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - if txIDLen >= 0 { - if len(requestBody) < offset+int(txIDLen) { - return nil, fmt.Errorf("Produce v%d request transactional_id too short", apiVersion) - } - _ = string(requestBody[offset : offset+int(txIDLen)]) - offset += int(txIDLen) - } - } - - // Parse acks (INT16) and timeout_ms (INT32) - if len(requestBody) < offset+6 { - return nil, fmt.Errorf("Produce v%d request missing acks/timeout", apiVersion) - } - - acks := int16(binary.BigEndian.Uint16(requestBody[offset : offset+2])) - offset += 2 - _ = binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Remember if this is fire-and-forget mode - isFireAndForget := acks == 0 - if isFireAndForget { - } else { - } - - if len(requestBody) < offset+4 { - return nil, fmt.Errorf("Produce v%d request missing topics count", apiVersion) - } - topicsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // If topicsCount is implausible, there might be a parsing issue - if topicsCount > 1000 { - return nil, fmt.Errorf("Produce v%d request has implausible topics count: %d", apiVersion, topicsCount) - } - - // Build response - response := make([]byte, 0, 256) - - // NOTE: Correlation ID is handled by writeResponseWithHeader - // Do NOT include it in the response body - - // Topics array length (first field in response body) - topicsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(topicsCountBytes, topicsCount) - response = append(response, topicsCountBytes...) - - // Process each topic with correct parsing and response format - for i := uint32(0); i < topicsCount && offset < len(requestBody); i++ { - // Parse topic name - if len(requestBody) < offset+2 { - break - } - - topicNameSize := binary.BigEndian.Uint16(requestBody[offset : offset+2]) - offset += 2 - - if len(requestBody) < offset+int(topicNameSize)+4 { - break - } - - topicName := string(requestBody[offset : offset+int(topicNameSize)]) - offset += int(topicNameSize) - - // Parse partitions count - partitionsCount := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - - // Response: topic name (STRING: 2 bytes length + data) - response = append(response, byte(topicNameSize>>8), byte(topicNameSize)) - response = append(response, []byte(topicName)...) - - // Response: partitions count (4 bytes) - partitionsCountBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionsCountBytes, partitionsCount) - response = append(response, partitionsCountBytes...) - - // Process each partition with correct parsing - for j := uint32(0); j < partitionsCount && offset < len(requestBody); j++ { - // Parse partition request: partition_id(4) + record_set_size(4) + record_set_data - if len(requestBody) < offset+8 { - break - } - partitionID := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - recordSetSize := binary.BigEndian.Uint32(requestBody[offset : offset+4]) - offset += 4 - if len(requestBody) < offset+int(recordSetSize) { - break - } - // CRITICAL FIX: Make a copy of recordSetData to prevent buffer sharing corruption - // The slice requestBody[offset:offset+int(recordSetSize)] shares the underlying array - // with the request buffer, which can be reused and cause data corruption - recordSetData := make([]byte, recordSetSize) - copy(recordSetData, requestBody[offset:offset+int(recordSetSize)]) - offset += int(recordSetSize) - - // Process the record set and store in ledger - var errorCode uint16 = 0 - var baseOffset int64 = 0 - currentTime := time.Now().UnixNano() - - // Check if topic exists; for v2+ do NOT auto-create - topicExists := h.seaweedMQHandler.TopicExists(topicName) - - if !topicExists { - errorCode = 3 // UNKNOWN_TOPIC_OR_PARTITION - } else { - // Process the record set (lenient parsing) - recordCount, _, parseErr := h.parseRecordSet(recordSetData) // totalSize unused - - if parseErr != nil { - errorCode = 42 // INVALID_RECORD - } else if recordCount > 0 { - // Extract all records from the record set and publish each one - // extractAllRecords handles fallback internally for various cases - records := h.extractAllRecords(recordSetData) - - if len(records) == 0 { - errorCode = 42 // INVALID_RECORD - } else { - for idx, kv := range records { - offsetProduced, prodErr := h.produceSchemaBasedRecord(ctx, topicName, int32(partitionID), kv.Key, kv.Value) - - if prodErr != nil { - // Check if this is a schema validation error and add delay to prevent overloading - if h.isSchemaValidationError(prodErr) { - time.Sleep(200 * time.Millisecond) // Brief delay for schema validation failures - } - errorCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - break - } - - if idx == 0 { - baseOffset = offsetProduced - } - } - } - } else { - // Try to extract anyway - this might be a Noop record - records := h.extractAllRecords(recordSetData) - if len(records) > 0 { - for idx, kv := range records { - offsetProduced, prodErr := h.produceSchemaBasedRecord(ctx, topicName, int32(partitionID), kv.Key, kv.Value) - if prodErr != nil { - errorCode = 0xFFFF // UNKNOWN_SERVER_ERROR (-1 as uint16) - break - } - if idx == 0 { - baseOffset = offsetProduced - } - } - } - } - } - - // Build correct Produce v2+ response for this partition - // Format: partition_id(4) + error_code(2) + base_offset(8) + [log_append_time(8) if v>=2] + [log_start_offset(8) if v>=5] - - // partition_id (4 bytes) - partitionIDBytes := make([]byte, 4) - binary.BigEndian.PutUint32(partitionIDBytes, partitionID) - response = append(response, partitionIDBytes...) - - // error_code (2 bytes) - response = append(response, byte(errorCode>>8), byte(errorCode)) - - // base_offset (8 bytes) - offset of first message - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - response = append(response, baseOffsetBytes...) - - // log_append_time (8 bytes) - v2+ field (actual timestamp, not -1) - if apiVersion >= 2 { - logAppendTimeBytes := make([]byte, 8) - binary.BigEndian.PutUint64(logAppendTimeBytes, uint64(currentTime)) - response = append(response, logAppendTimeBytes...) - } - - // log_start_offset (8 bytes) - v5+ field - if apiVersion >= 5 { - logStartOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(logStartOffsetBytes, uint64(baseOffset)) - response = append(response, logStartOffsetBytes...) - } - } - } - - // For fire-and-forget mode, return empty response after processing - if isFireAndForget { - return []byte{}, nil - } - - // Append throttle_time_ms at the END for v1+ (as per original Kafka protocol) - if apiVersion >= 1 { - response = append(response, 0, 0, 0, 0) // throttle_time_ms = 0 - } - - if len(response) < 20 { - } - - return response, nil -} - -// performSchemaValidation performs comprehensive schema validation for a topic -func (h *Handler) performSchemaValidation(topicName string, schemaID uint32, messageFormat schema.Format, messageBytes []byte) error { - // 1. Check if topic is configured to require schemas - if !h.isSchematizedTopic(topicName) { - // Topic doesn't require schemas, but message is schematized - this is allowed - return nil - } - - // 2. Get expected schema metadata for the topic - expectedMetadata, err := h.getSchemaMetadataForTopic(topicName) - if err != nil { - // No expected schema found - in strict mode this would be an error - // In permissive mode, allow any valid schema - if h.isStrictSchemaValidation() { - // Add delay before returning schema validation error to prevent overloading - time.Sleep(100 * time.Millisecond) - return fmt.Errorf("topic %s requires schema but no expected schema found: %w", topicName, err) - } - return nil - } - - // 3. Validate schema ID matches expected schema - expectedSchemaID, err := h.parseSchemaID(expectedMetadata["schema_id"]) - if err != nil { - // Add delay before returning schema validation error to prevent overloading - time.Sleep(100 * time.Millisecond) - return fmt.Errorf("invalid expected schema ID for topic %s: %w", topicName, err) - } - - // 4. Check schema compatibility - if schemaID != expectedSchemaID { - // Schema ID doesn't match - check if it's a compatible evolution - compatible, err := h.checkSchemaEvolution(topicName, expectedSchemaID, schemaID, messageFormat) - if err != nil { - // Add delay before returning schema validation error to prevent overloading - time.Sleep(100 * time.Millisecond) - return fmt.Errorf("failed to check schema evolution for topic %s: %w", topicName, err) - } - if !compatible { - // Add delay before returning schema validation error to prevent overloading - time.Sleep(100 * time.Millisecond) - return fmt.Errorf("schema ID %d is not compatible with expected schema %d for topic %s", - schemaID, expectedSchemaID, topicName) - } - } - - // 5. Validate message format matches expected format - expectedFormatStr := expectedMetadata["schema_format"] - var expectedFormat schema.Format - switch expectedFormatStr { - case "AVRO": - expectedFormat = schema.FormatAvro - case "PROTOBUF": - expectedFormat = schema.FormatProtobuf - case "JSON_SCHEMA": - expectedFormat = schema.FormatJSONSchema - default: - expectedFormat = schema.FormatUnknown - } - if messageFormat != expectedFormat { - return fmt.Errorf("message format %s does not match expected format %s for topic %s", - messageFormat, expectedFormat, topicName) - } - - // 6. Perform message-level validation - return h.validateMessageContent(schemaID, messageFormat, messageBytes) -} - -// checkSchemaEvolution checks if a schema evolution is compatible -func (h *Handler) checkSchemaEvolution(topicName string, expectedSchemaID, actualSchemaID uint32, format schema.Format) (bool, error) { - // Get both schemas - expectedSchema, err := h.schemaManager.GetSchemaByID(expectedSchemaID) - if err != nil { - return false, fmt.Errorf("failed to get expected schema %d: %w", expectedSchemaID, err) - } - - actualSchema, err := h.schemaManager.GetSchemaByID(actualSchemaID) - if err != nil { - return false, fmt.Errorf("failed to get actual schema %d: %w", actualSchemaID, err) - } - - // Since we're accessing schema from registry for this topic, ensure topic config is updated - h.ensureTopicSchemaFromRegistryCache(topicName, expectedSchema, actualSchema) - - // Check compatibility based on topic's compatibility level - compatibilityLevel := h.getTopicCompatibilityLevel(topicName) - - result, err := h.schemaManager.CheckSchemaCompatibility( - expectedSchema.Schema, - actualSchema.Schema, - format, - compatibilityLevel, - ) - if err != nil { - return false, fmt.Errorf("failed to check schema compatibility: %w", err) - } - - return result.Compatible, nil -} - -// validateMessageContent validates the message content against its schema -func (h *Handler) validateMessageContent(schemaID uint32, format schema.Format, messageBytes []byte) error { - // Decode the message to validate it can be parsed correctly - _, err := h.schemaManager.DecodeMessage(messageBytes) - if err != nil { - return fmt.Errorf("message validation failed for schema %d: %w", schemaID, err) - } - - // Additional format-specific validation could be added here - switch format { - case schema.FormatAvro: - return h.validateAvroMessage(schemaID, messageBytes) - case schema.FormatProtobuf: - return h.validateProtobufMessage(schemaID, messageBytes) - case schema.FormatJSONSchema: - return h.validateJSONSchemaMessage(schemaID, messageBytes) - default: - return fmt.Errorf("unsupported schema format for validation: %s", format) - } -} - -// validateAvroMessage performs Avro-specific validation -func (h *Handler) validateAvroMessage(schemaID uint32, messageBytes []byte) error { - // Basic validation is already done in DecodeMessage - // Additional Avro-specific validation could be added here - return nil -} - -// validateProtobufMessage performs Protobuf-specific validation -func (h *Handler) validateProtobufMessage(schemaID uint32, messageBytes []byte) error { - // Get the schema for additional validation - cachedSchema, err := h.schemaManager.GetSchemaByID(schemaID) - if err != nil { - return fmt.Errorf("failed to get Protobuf schema %d: %w", schemaID, err) - } - - // Parse the schema to get the descriptor - parser := schema.NewProtobufDescriptorParser() - protobufSchema, err := parser.ParseBinaryDescriptor([]byte(cachedSchema.Schema), "") - if err != nil { - return fmt.Errorf("failed to parse Protobuf schema: %w", err) - } - - // Validate message against schema - envelope, ok := schema.ParseConfluentEnvelope(messageBytes) - if !ok { - return fmt.Errorf("invalid Confluent envelope") - } - - return protobufSchema.ValidateMessage(envelope.Payload) -} - -// validateJSONSchemaMessage performs JSON Schema-specific validation -func (h *Handler) validateJSONSchemaMessage(schemaID uint32, messageBytes []byte) error { - // Get the schema for validation - cachedSchema, err := h.schemaManager.GetSchemaByID(schemaID) - if err != nil { - return fmt.Errorf("failed to get JSON schema %d: %w", schemaID, err) - } - - // Create JSON Schema decoder for validation - decoder, err := schema.NewJSONSchemaDecoder(cachedSchema.Schema) - if err != nil { - return fmt.Errorf("failed to create JSON Schema decoder: %w", err) - } - - // Parse envelope and validate payload - envelope, ok := schema.ParseConfluentEnvelope(messageBytes) - if !ok { - return fmt.Errorf("invalid Confluent envelope") - } - - // Validate JSON payload against schema - _, err = decoder.Decode(envelope.Payload) - if err != nil { - return fmt.Errorf("JSON Schema validation failed: %w", err) - } - - return nil -} - -// Helper methods for configuration - -// isSchemaValidationError checks if an error is related to schema validation -func (h *Handler) isSchemaValidationError(err error) bool { - if err == nil { - return false - } - errStr := strings.ToLower(err.Error()) - return strings.Contains(errStr, "schema") || - strings.Contains(errStr, "decode") || - strings.Contains(errStr, "validation") || - strings.Contains(errStr, "registry") || - strings.Contains(errStr, "avro") || - strings.Contains(errStr, "protobuf") || - strings.Contains(errStr, "json schema") -} - -// isStrictSchemaValidation returns whether strict schema validation is enabled -func (h *Handler) isStrictSchemaValidation() bool { - // This could be configurable per topic or globally - // For now, default to permissive mode - return false -} - -// getTopicCompatibilityLevel returns the compatibility level for a topic -func (h *Handler) getTopicCompatibilityLevel(topicName string) schema.CompatibilityLevel { - // This could be configurable per topic - // For now, default to backward compatibility - return schema.CompatibilityBackward -} - -// parseSchemaID parses a schema ID from string -func (h *Handler) parseSchemaID(schemaIDStr string) (uint32, error) { - if schemaIDStr == "" { - return 0, fmt.Errorf("empty schema ID") - } - - var schemaID uint64 - if _, err := fmt.Sscanf(schemaIDStr, "%d", &schemaID); err != nil { - return 0, fmt.Errorf("invalid schema ID format: %w", err) - } - - if schemaID > 0xFFFFFFFF { - return 0, fmt.Errorf("schema ID too large: %d", schemaID) - } - - return uint32(schemaID), nil -} - -// isSystemTopic checks if a topic should bypass schema processing -func (h *Handler) isSystemTopic(topicName string) bool { - // System topics that should be stored as-is without schema processing - systemTopics := []string{ - "_schemas", // Schema Registry topic - "__consumer_offsets", // Kafka consumer offsets topic - "__transaction_state", // Kafka transaction state topic - } - - for _, systemTopic := range systemTopics { - if topicName == systemTopic { - return true - } - } - - // Also check for topics with system prefixes - return strings.HasPrefix(topicName, "_") || strings.HasPrefix(topicName, "__") -} - -// produceSchemaBasedRecord produces a record using schema-based encoding to RecordValue -// ctx controls the publish timeout - if client cancels, produce operation is cancelled -func (h *Handler) produceSchemaBasedRecord(ctx context.Context, topic string, partition int32, key []byte, value []byte) (int64, error) { - - // System topics should always bypass schema processing and be stored as-is - if h.isSystemTopic(topic) { - offset, err := h.seaweedMQHandler.ProduceRecord(ctx, topic, partition, key, value) - return offset, err - } - - // If schema management is not enabled, fall back to raw message handling - isEnabled := h.IsSchemaEnabled() - if !isEnabled { - return h.seaweedMQHandler.ProduceRecord(ctx, topic, partition, key, value) - } - - var keyDecodedMsg *schema.DecodedMessage - var valueDecodedMsg *schema.DecodedMessage - - // Check and decode key if schematized - if key != nil { - isSchematized := h.schemaManager.IsSchematized(key) - if isSchematized { - var err error - keyDecodedMsg, err = h.schemaManager.DecodeMessage(key) - if err != nil { - // Add delay before returning schema decoding error to prevent overloading - time.Sleep(100 * time.Millisecond) - return 0, fmt.Errorf("failed to decode schematized key: %w", err) - } - } - } - - // Check and decode value if schematized - if value != nil && len(value) > 0 { - isSchematized := h.schemaManager.IsSchematized(value) - if isSchematized { - var err error - valueDecodedMsg, err = h.schemaManager.DecodeMessage(value) - if err != nil { - // If message has schema ID (magic byte 0x00), decoding MUST succeed - // Do not fall back to raw storage - this would corrupt the data model - time.Sleep(100 * time.Millisecond) - return 0, fmt.Errorf("message has schema ID but decoding failed (schema registry may be unavailable): %w", err) - } - } - } - - // If neither key nor value is schematized, fall back to raw message handling - // This is OK for non-schematized messages (no magic byte 0x00) - if keyDecodedMsg == nil && valueDecodedMsg == nil { - return h.seaweedMQHandler.ProduceRecord(ctx, topic, partition, key, value) - } - - // Process key schema if present - if keyDecodedMsg != nil { - // Store key schema information in memory cache for fetch path performance - if !h.hasTopicKeySchemaConfig(topic, keyDecodedMsg.SchemaID, keyDecodedMsg.SchemaFormat) { - err := h.storeTopicKeySchemaConfig(topic, keyDecodedMsg.SchemaID, keyDecodedMsg.SchemaFormat) - if err != nil { - } - - // Schedule key schema registration in background (leader-only, non-blocking) - h.scheduleKeySchemaRegistration(topic, keyDecodedMsg.RecordType) - } - } - - // Process value schema if present and create combined RecordValue with key fields - var recordValueBytes []byte - if valueDecodedMsg != nil { - // Create combined RecordValue that includes both key and value fields - combinedRecordValue := h.createCombinedRecordValue(keyDecodedMsg, valueDecodedMsg) - - // Store the combined RecordValue - schema info is stored in topic configuration - var err error - recordValueBytes, err = proto.Marshal(combinedRecordValue) - if err != nil { - return 0, fmt.Errorf("failed to marshal combined RecordValue: %w", err) - } - - // Store value schema information in memory cache for fetch path performance - // Only store if not already cached to avoid mutex contention on hot path - hasConfig := h.hasTopicSchemaConfig(topic, valueDecodedMsg.SchemaID, valueDecodedMsg.SchemaFormat) - if !hasConfig { - err = h.storeTopicSchemaConfig(topic, valueDecodedMsg.SchemaID, valueDecodedMsg.SchemaFormat) - if err != nil { - // Log error but don't fail the produce - } - - // Schedule value schema registration in background (leader-only, non-blocking) - h.scheduleSchemaRegistration(topic, valueDecodedMsg.RecordType) - } - } else if keyDecodedMsg != nil { - // If only key is schematized, create RecordValue with just key fields - combinedRecordValue := h.createCombinedRecordValue(keyDecodedMsg, nil) - - var err error - recordValueBytes, err = proto.Marshal(combinedRecordValue) - if err != nil { - return 0, fmt.Errorf("failed to marshal key-only RecordValue: %w", err) - } - } else { - // If value is not schematized, use raw value - recordValueBytes = value - } - - // Prepare final key for storage - finalKey := key - if keyDecodedMsg != nil { - // If key was schematized, convert back to raw bytes for storage - keyBytes, err := proto.Marshal(keyDecodedMsg.RecordValue) - if err != nil { - return 0, fmt.Errorf("failed to marshal key RecordValue: %w", err) - } - finalKey = keyBytes - } - - // Send to SeaweedMQ - if valueDecodedMsg != nil || keyDecodedMsg != nil { - // Store the DECODED RecordValue (not the original Confluent Wire Format) - // This enables SQL queries to work properly. Kafka consumers will receive the RecordValue - // which can be re-encoded to Confluent Wire Format during fetch if needed - return h.seaweedMQHandler.ProduceRecordValue(ctx, topic, partition, finalKey, recordValueBytes) - } else { - // Send with raw format for non-schematized data - return h.seaweedMQHandler.ProduceRecord(ctx, topic, partition, finalKey, recordValueBytes) - } -} - -// hasTopicSchemaConfig checks if schema config already exists (read-only, fast path) -func (h *Handler) hasTopicSchemaConfig(topic string, schemaID uint32, schemaFormat schema.Format) bool { - h.topicSchemaConfigMu.RLock() - defer h.topicSchemaConfigMu.RUnlock() - - if h.topicSchemaConfigs == nil { - return false - } - - config, exists := h.topicSchemaConfigs[topic] - if !exists { - return false - } - - // Check if the schema matches (avoid re-registration of same schema) - return config.ValueSchemaID == schemaID && config.ValueSchemaFormat == schemaFormat -} - -// storeTopicSchemaConfig stores original Kafka schema metadata (ID + format) for fetch path -// This is kept in memory for performance when reconstructing Confluent messages during fetch. -// The translated RecordType is persisted via background schema registration. -func (h *Handler) storeTopicSchemaConfig(topic string, schemaID uint32, schemaFormat schema.Format) error { - // Store in memory cache for quick access during fetch operations - h.topicSchemaConfigMu.Lock() - defer h.topicSchemaConfigMu.Unlock() - - if h.topicSchemaConfigs == nil { - h.topicSchemaConfigs = make(map[string]*TopicSchemaConfig) - } - - config, exists := h.topicSchemaConfigs[topic] - if !exists { - config = &TopicSchemaConfig{} - h.topicSchemaConfigs[topic] = config - } - - config.ValueSchemaID = schemaID - config.ValueSchemaFormat = schemaFormat - - return nil -} - -// storeTopicKeySchemaConfig stores key schema configuration -func (h *Handler) storeTopicKeySchemaConfig(topic string, schemaID uint32, schemaFormat schema.Format) error { - h.topicSchemaConfigMu.Lock() - defer h.topicSchemaConfigMu.Unlock() - - if h.topicSchemaConfigs == nil { - h.topicSchemaConfigs = make(map[string]*TopicSchemaConfig) - } - - config, exists := h.topicSchemaConfigs[topic] - if !exists { - config = &TopicSchemaConfig{} - h.topicSchemaConfigs[topic] = config - } - - config.KeySchemaID = schemaID - config.KeySchemaFormat = schemaFormat - config.HasKeySchema = true - - return nil -} - -// hasTopicKeySchemaConfig checks if key schema config already exists -func (h *Handler) hasTopicKeySchemaConfig(topic string, schemaID uint32, schemaFormat schema.Format) bool { - h.topicSchemaConfigMu.RLock() - defer h.topicSchemaConfigMu.RUnlock() - - config, exists := h.topicSchemaConfigs[topic] - if !exists { - return false - } - - // Check if the key schema matches - return config.HasKeySchema && config.KeySchemaID == schemaID && config.KeySchemaFormat == schemaFormat -} - -// scheduleSchemaRegistration registers value schema once per topic-schema combination -func (h *Handler) scheduleSchemaRegistration(topicName string, recordType *schema_pb.RecordType) { - if recordType == nil { - return - } - - // Create a unique key for this value schema registration - schemaKey := fmt.Sprintf("%s:value:%d", topicName, h.getRecordTypeHash(recordType)) - - // Check if already registered - h.registeredSchemasMu.RLock() - if h.registeredSchemas[schemaKey] { - h.registeredSchemasMu.RUnlock() - return // Already registered - } - h.registeredSchemasMu.RUnlock() - - // Double-check with write lock to prevent race condition - h.registeredSchemasMu.Lock() - defer h.registeredSchemasMu.Unlock() - - if h.registeredSchemas[schemaKey] { - return // Already registered by another goroutine - } - - // Mark as registered before attempting registration - h.registeredSchemas[schemaKey] = true - - // Perform synchronous registration - if err := h.registerSchemasViaBrokerAPI(topicName, recordType, nil); err != nil { - // Remove from registered map on failure so it can be retried - delete(h.registeredSchemas, schemaKey) - } -} - -// scheduleKeySchemaRegistration registers key schema once per topic-schema combination -func (h *Handler) scheduleKeySchemaRegistration(topicName string, recordType *schema_pb.RecordType) { - if recordType == nil { - return - } - - // Create a unique key for this key schema registration - schemaKey := fmt.Sprintf("%s:key:%d", topicName, h.getRecordTypeHash(recordType)) - - // Check if already registered - h.registeredSchemasMu.RLock() - if h.registeredSchemas[schemaKey] { - h.registeredSchemasMu.RUnlock() - return // Already registered - } - h.registeredSchemasMu.RUnlock() - - // Double-check with write lock to prevent race condition - h.registeredSchemasMu.Lock() - defer h.registeredSchemasMu.Unlock() - - if h.registeredSchemas[schemaKey] { - return // Already registered by another goroutine - } - - // Mark as registered before attempting registration - h.registeredSchemas[schemaKey] = true - - // Register key schema to the same topic (not a phantom "-key" topic) - // This uses the extended ConfigureTopicRequest with separate key/value RecordTypes - if err := h.registerSchemasViaBrokerAPI(topicName, nil, recordType); err != nil { - // Remove from registered map on failure so it can be retried - delete(h.registeredSchemas, schemaKey) - } else { - } -} - -// ensureTopicSchemaFromRegistryCache ensures topic configuration is updated when schemas are retrieved from registry -func (h *Handler) ensureTopicSchemaFromRegistryCache(topicName string, schemas ...*schema.CachedSchema) { - if len(schemas) == 0 { - return - } - - // Use the latest/most relevant schema (last one in the list) - latestSchema := schemas[len(schemas)-1] - if latestSchema == nil { - return - } - - // Try to infer RecordType from the cached schema - recordType, err := h.inferRecordTypeFromCachedSchema(latestSchema) - if err != nil { - return - } - - // Schedule schema registration to update topic.conf - if recordType != nil { - h.scheduleSchemaRegistration(topicName, recordType) - } -} - -// ensureTopicKeySchemaFromRegistryCache ensures topic configuration is updated when key schemas are retrieved from registry -func (h *Handler) ensureTopicKeySchemaFromRegistryCache(topicName string, schemas ...*schema.CachedSchema) { - if len(schemas) == 0 { - return - } - - // Use the latest/most relevant schema (last one in the list) - latestSchema := schemas[len(schemas)-1] - if latestSchema == nil { - return - } - - // Try to infer RecordType from the cached schema - recordType, err := h.inferRecordTypeFromCachedSchema(latestSchema) - if err != nil { - return - } - - // Schedule key schema registration to update topic.conf - if recordType != nil { - h.scheduleKeySchemaRegistration(topicName, recordType) - } -} - -// getRecordTypeHash generates a simple hash for RecordType to use as a key -func (h *Handler) getRecordTypeHash(recordType *schema_pb.RecordType) uint32 { - if recordType == nil { - return 0 - } - - // Simple hash based on field count and first field name - hash := uint32(len(recordType.Fields)) - if len(recordType.Fields) > 0 { - // Use first field name for additional uniqueness - firstFieldName := recordType.Fields[0].Name - for _, char := range firstFieldName { - hash = hash*31 + uint32(char) - } - } - - return hash -} - -// createCombinedRecordValue creates a RecordValue that combines fields from both key and value decoded messages -// Key fields are prefixed with "key_" to distinguish them from value fields -// The message key bytes are stored in the _key system column (from logEntry.Key) -func (h *Handler) createCombinedRecordValue(keyDecodedMsg *schema.DecodedMessage, valueDecodedMsg *schema.DecodedMessage) *schema_pb.RecordValue { - combinedFields := make(map[string]*schema_pb.Value) - - // Add key fields with "key_" prefix - if keyDecodedMsg != nil && keyDecodedMsg.RecordValue != nil { - for fieldName, fieldValue := range keyDecodedMsg.RecordValue.Fields { - combinedFields["key_"+fieldName] = fieldValue - } - // Note: The message key bytes are stored in the _key system column (from logEntry.Key) - // We don't create a "key" field here to avoid redundancy - } - - // Add value fields (no prefix) - if valueDecodedMsg != nil && valueDecodedMsg.RecordValue != nil { - for fieldName, fieldValue := range valueDecodedMsg.RecordValue.Fields { - combinedFields[fieldName] = fieldValue - } - } - - return &schema_pb.RecordValue{ - Fields: combinedFields, - } -} - -// inferRecordTypeFromCachedSchema attempts to infer RecordType from a cached schema -func (h *Handler) inferRecordTypeFromCachedSchema(cachedSchema *schema.CachedSchema) (*schema_pb.RecordType, error) { - if cachedSchema == nil { - return nil, fmt.Errorf("cached schema is nil") - } - - switch cachedSchema.Format { - case schema.FormatAvro: - return h.inferRecordTypeFromAvroSchema(cachedSchema.Schema) - case schema.FormatProtobuf: - return h.inferRecordTypeFromProtobufSchema(cachedSchema.Schema) - case schema.FormatJSONSchema: - return h.inferRecordTypeFromJSONSchema(cachedSchema.Schema) - default: - return nil, fmt.Errorf("unsupported schema format for inference: %v", cachedSchema.Format) - } -} - -// inferRecordTypeFromAvroSchema infers RecordType from Avro schema string -// Uses cache to avoid recreating expensive Avro codecs (17% CPU overhead!) -func (h *Handler) inferRecordTypeFromAvroSchema(avroSchema string) (*schema_pb.RecordType, error) { - // Check cache first - h.inferredRecordTypesMu.RLock() - if recordType, exists := h.inferredRecordTypes[avroSchema]; exists { - h.inferredRecordTypesMu.RUnlock() - return recordType, nil - } - h.inferredRecordTypesMu.RUnlock() - - // Cache miss - create decoder and infer type - decoder, err := schema.NewAvroDecoder(avroSchema) - if err != nil { - return nil, fmt.Errorf("failed to create Avro decoder: %w", err) - } - - recordType, err := decoder.InferRecordType() - if err != nil { - return nil, err - } - - // Cache the result - h.inferredRecordTypesMu.Lock() - h.inferredRecordTypes[avroSchema] = recordType - h.inferredRecordTypesMu.Unlock() - - return recordType, nil -} - -// inferRecordTypeFromProtobufSchema infers RecordType from Protobuf schema -// Uses cache to avoid recreating expensive decoders -func (h *Handler) inferRecordTypeFromProtobufSchema(protobufSchema string) (*schema_pb.RecordType, error) { - // Check cache first - cacheKey := "protobuf:" + protobufSchema - h.inferredRecordTypesMu.RLock() - if recordType, exists := h.inferredRecordTypes[cacheKey]; exists { - h.inferredRecordTypesMu.RUnlock() - return recordType, nil - } - h.inferredRecordTypesMu.RUnlock() - - // Cache miss - create decoder and infer type - decoder, err := schema.NewProtobufDecoder([]byte(protobufSchema)) - if err != nil { - return nil, fmt.Errorf("failed to create Protobuf decoder: %w", err) - } - - recordType, err := decoder.InferRecordType() - if err != nil { - return nil, err - } - - // Cache the result - h.inferredRecordTypesMu.Lock() - h.inferredRecordTypes[cacheKey] = recordType - h.inferredRecordTypesMu.Unlock() - - return recordType, nil -} - -// inferRecordTypeFromJSONSchema infers RecordType from JSON Schema string -// Uses cache to avoid recreating expensive decoders -func (h *Handler) inferRecordTypeFromJSONSchema(jsonSchema string) (*schema_pb.RecordType, error) { - // Check cache first - cacheKey := "json:" + jsonSchema - h.inferredRecordTypesMu.RLock() - if recordType, exists := h.inferredRecordTypes[cacheKey]; exists { - h.inferredRecordTypesMu.RUnlock() - return recordType, nil - } - h.inferredRecordTypesMu.RUnlock() - - // Cache miss - create decoder and infer type - decoder, err := schema.NewJSONSchemaDecoder(jsonSchema) - if err != nil { - return nil, fmt.Errorf("failed to create JSON Schema decoder: %w", err) - } - - recordType, err := decoder.InferRecordType() - if err != nil { - return nil, err - } - - // Cache the result - h.inferredRecordTypesMu.Lock() - h.inferredRecordTypes[cacheKey] = recordType - h.inferredRecordTypesMu.Unlock() - - return recordType, nil -} diff --git a/weed/mq/kafka/protocol/record_batch_parser.go b/weed/mq/kafka/protocol/record_batch_parser.go deleted file mode 100644 index 1153b6c5a..000000000 --- a/weed/mq/kafka/protocol/record_batch_parser.go +++ /dev/null @@ -1,290 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "fmt" - "hash/crc32" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/compression" -) - -// RecordBatch represents a parsed Kafka record batch -type RecordBatch struct { - BaseOffset int64 - BatchLength int32 - PartitionLeaderEpoch int32 - Magic int8 - CRC32 uint32 - Attributes int16 - LastOffsetDelta int32 - FirstTimestamp int64 - MaxTimestamp int64 - ProducerID int64 - ProducerEpoch int16 - BaseSequence int32 - RecordCount int32 - Records []byte // Raw records data (may be compressed) -} - -// RecordBatchParser handles parsing of Kafka record batches with compression support -type RecordBatchParser struct { - // Add any configuration or state needed -} - -// NewRecordBatchParser creates a new record batch parser -func NewRecordBatchParser() *RecordBatchParser { - return &RecordBatchParser{} -} - -// ParseRecordBatch parses a Kafka record batch from binary data -func (p *RecordBatchParser) ParseRecordBatch(data []byte) (*RecordBatch, error) { - if len(data) < 61 { // Minimum record batch header size - return nil, fmt.Errorf("record batch too small: %d bytes, need at least 61", len(data)) - } - - batch := &RecordBatch{} - offset := 0 - - // Parse record batch header - batch.BaseOffset = int64(binary.BigEndian.Uint64(data[offset:])) - offset += 8 - - batch.BatchLength = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - batch.PartitionLeaderEpoch = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - batch.Magic = int8(data[offset]) - offset += 1 - - // Validate magic byte - if batch.Magic != 2 { - return nil, fmt.Errorf("unsupported record batch magic byte: %d, expected 2", batch.Magic) - } - - batch.CRC32 = binary.BigEndian.Uint32(data[offset:]) - offset += 4 - - batch.Attributes = int16(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - - batch.LastOffsetDelta = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - batch.FirstTimestamp = int64(binary.BigEndian.Uint64(data[offset:])) - offset += 8 - - batch.MaxTimestamp = int64(binary.BigEndian.Uint64(data[offset:])) - offset += 8 - - batch.ProducerID = int64(binary.BigEndian.Uint64(data[offset:])) - offset += 8 - - batch.ProducerEpoch = int16(binary.BigEndian.Uint16(data[offset:])) - offset += 2 - - batch.BaseSequence = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - batch.RecordCount = int32(binary.BigEndian.Uint32(data[offset:])) - offset += 4 - - // Validate record count - if batch.RecordCount < 0 || batch.RecordCount > 1000000 { - return nil, fmt.Errorf("invalid record count: %d", batch.RecordCount) - } - - // Extract records data (rest of the batch) - if offset < len(data) { - batch.Records = data[offset:] - } - - return batch, nil -} - -// GetCompressionCodec extracts the compression codec from the batch attributes -func (batch *RecordBatch) GetCompressionCodec() compression.CompressionCodec { - return compression.ExtractCompressionCodec(batch.Attributes) -} - -// IsCompressed returns true if the record batch is compressed -func (batch *RecordBatch) IsCompressed() bool { - return batch.GetCompressionCodec() != compression.None -} - -// DecompressRecords decompresses the records data if compressed -func (batch *RecordBatch) DecompressRecords() ([]byte, error) { - if !batch.IsCompressed() { - return batch.Records, nil - } - - codec := batch.GetCompressionCodec() - decompressed, err := compression.Decompress(codec, batch.Records) - if err != nil { - return nil, fmt.Errorf("failed to decompress records with %s: %w", codec, err) - } - - return decompressed, nil -} - -// ValidateCRC32 validates the CRC32 checksum of the record batch -func (batch *RecordBatch) ValidateCRC32(originalData []byte) error { - if len(originalData) < 17 { // Need at least up to CRC field - return fmt.Errorf("data too small for CRC validation") - } - - // CRC32 is calculated over the data starting after the CRC field - // Skip: BaseOffset(8) + BatchLength(4) + PartitionLeaderEpoch(4) + Magic(1) + CRC(4) = 21 bytes - // Kafka uses Castagnoli (CRC-32C) algorithm for record batch CRC - dataForCRC := originalData[21:] - - calculatedCRC := crc32.Checksum(dataForCRC, crc32.MakeTable(crc32.Castagnoli)) - - if calculatedCRC != batch.CRC32 { - return fmt.Errorf("CRC32 mismatch: expected %x, got %x", batch.CRC32, calculatedCRC) - } - - return nil -} - -// ParseRecordBatchWithValidation parses and validates a record batch -func (p *RecordBatchParser) ParseRecordBatchWithValidation(data []byte, validateCRC bool) (*RecordBatch, error) { - batch, err := p.ParseRecordBatch(data) - if err != nil { - return nil, err - } - - if validateCRC { - if err := batch.ValidateCRC32(data); err != nil { - return nil, fmt.Errorf("CRC validation failed: %w", err) - } - } - - return batch, nil -} - -// ExtractRecords extracts and decompresses individual records from the batch -func (batch *RecordBatch) ExtractRecords() ([]Record, error) { - decompressedData, err := batch.DecompressRecords() - if err != nil { - return nil, err - } - - // Parse individual records from decompressed data - // This is a simplified implementation - full implementation would parse varint-encoded records - records := make([]Record, 0, batch.RecordCount) - - // For now, create placeholder records - // In a full implementation, this would parse the actual record format - for i := int32(0); i < batch.RecordCount; i++ { - record := Record{ - Offset: batch.BaseOffset + int64(i), - Key: nil, // Would be parsed from record data - Value: decompressedData, // Simplified - would be individual record value - Headers: nil, // Would be parsed from record data - Timestamp: batch.FirstTimestamp + int64(i), // Simplified - } - records = append(records, record) - } - - return records, nil -} - -// Record represents a single Kafka record -type Record struct { - Offset int64 - Key []byte - Value []byte - Headers map[string][]byte - Timestamp int64 -} - -// CompressRecordBatch compresses a record batch using the specified codec -func CompressRecordBatch(codec compression.CompressionCodec, records []byte) ([]byte, int16, error) { - if codec == compression.None { - return records, 0, nil - } - - compressed, err := compression.Compress(codec, records) - if err != nil { - return nil, 0, fmt.Errorf("failed to compress record batch: %w", err) - } - - attributes := compression.SetCompressionCodec(0, codec) - return compressed, attributes, nil -} - -// CreateRecordBatch creates a new record batch with the given parameters -func CreateRecordBatch(baseOffset int64, records []byte, codec compression.CompressionCodec) ([]byte, error) { - // Compress records if needed - compressedRecords, attributes, err := CompressRecordBatch(codec, records) - if err != nil { - return nil, err - } - - // Calculate batch length (everything after the batch length field) - recordsLength := len(compressedRecords) - batchLength := 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4 + 4 + recordsLength // Header + records - - // Build the record batch - batch := make([]byte, 0, 61+recordsLength) - - // Base offset (8 bytes) - baseOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffsetBytes, uint64(baseOffset)) - batch = append(batch, baseOffsetBytes...) - - // Batch length (4 bytes) - batchLengthBytes := make([]byte, 4) - binary.BigEndian.PutUint32(batchLengthBytes, uint32(batchLength)) - batch = append(batch, batchLengthBytes...) - - // Partition leader epoch (4 bytes) - use 0 for simplicity - batch = append(batch, 0, 0, 0, 0) - - // Magic byte (1 byte) - version 2 - batch = append(batch, 2) - - // CRC32 placeholder (4 bytes) - will be calculated later - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - attributesBytes := make([]byte, 2) - binary.BigEndian.PutUint16(attributesBytes, uint16(attributes)) - batch = append(batch, attributesBytes...) - - // Last offset delta (4 bytes) - assume single record for simplicity - batch = append(batch, 0, 0, 0, 0) - - // First timestamp (8 bytes) - use current time - // For simplicity, use 0 - batch = append(batch, 0, 0, 0, 0, 0, 0, 0, 0) - - // Max timestamp (8 bytes) - batch = append(batch, 0, 0, 0, 0, 0, 0, 0, 0) - - // Producer ID (8 bytes) - use -1 for non-transactional - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // Producer epoch (2 bytes) - use -1 - batch = append(batch, 0xFF, 0xFF) - - // Base sequence (4 bytes) - use -1 - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // Record count (4 bytes) - assume 1 for simplicity - batch = append(batch, 0, 0, 0, 1) - - // Records data - batch = append(batch, compressedRecords...) - - // Calculate and set CRC32 - // Kafka uses Castagnoli (CRC-32C) algorithm for record batch CRC - dataForCRC := batch[21:] // Everything after CRC field - crc := crc32.Checksum(dataForCRC, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - return batch, nil -} diff --git a/weed/mq/kafka/protocol/record_batch_parser_test.go b/weed/mq/kafka/protocol/record_batch_parser_test.go deleted file mode 100644 index d445b9421..000000000 --- a/weed/mq/kafka/protocol/record_batch_parser_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package protocol - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/mq/kafka/compression" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestRecordBatchParser_ParseRecordBatch tests basic record batch parsing -func TestRecordBatchParser_ParseRecordBatch(t *testing.T) { - parser := NewRecordBatchParser() - - // Create a minimal valid record batch - recordData := []byte("test record data") - batch, err := CreateRecordBatch(100, recordData, compression.None) - require.NoError(t, err) - - // Parse the batch - parsed, err := parser.ParseRecordBatch(batch) - require.NoError(t, err) - - // Verify parsed fields - assert.Equal(t, int64(100), parsed.BaseOffset) - assert.Equal(t, int8(2), parsed.Magic) - assert.Equal(t, int32(1), parsed.RecordCount) - assert.Equal(t, compression.None, parsed.GetCompressionCodec()) - assert.False(t, parsed.IsCompressed()) -} - -// TestRecordBatchParser_ParseRecordBatch_TooSmall tests parsing with insufficient data -func TestRecordBatchParser_ParseRecordBatch_TooSmall(t *testing.T) { - parser := NewRecordBatchParser() - - // Test with data that's too small - smallData := make([]byte, 30) // Less than 61 bytes minimum - _, err := parser.ParseRecordBatch(smallData) - assert.Error(t, err) - assert.Contains(t, err.Error(), "record batch too small") -} - -// TestRecordBatchParser_ParseRecordBatch_InvalidMagic tests parsing with invalid magic byte -func TestRecordBatchParser_ParseRecordBatch_InvalidMagic(t *testing.T) { - parser := NewRecordBatchParser() - - // Create a batch with invalid magic byte - recordData := []byte("test record data") - batch, err := CreateRecordBatch(100, recordData, compression.None) - require.NoError(t, err) - - // Corrupt the magic byte (at offset 16) - batch[16] = 1 // Invalid magic byte - - // Parse should fail - _, err = parser.ParseRecordBatch(batch) - assert.Error(t, err) - assert.Contains(t, err.Error(), "unsupported record batch magic byte") -} - -// TestRecordBatchParser_Compression tests compression support -func TestRecordBatchParser_Compression(t *testing.T) { - parser := NewRecordBatchParser() - recordData := []byte("This is a test record that should compress well when repeated. " + - "This is a test record that should compress well when repeated. " + - "This is a test record that should compress well when repeated.") - - codecs := []compression.CompressionCodec{ - compression.None, - compression.Gzip, - compression.Snappy, - compression.Lz4, - compression.Zstd, - } - - for _, codec := range codecs { - t.Run(codec.String(), func(t *testing.T) { - // Create compressed batch - batch, err := CreateRecordBatch(200, recordData, codec) - require.NoError(t, err) - - // Parse the batch - parsed, err := parser.ParseRecordBatch(batch) - require.NoError(t, err) - - // Verify compression codec - assert.Equal(t, codec, parsed.GetCompressionCodec()) - assert.Equal(t, codec != compression.None, parsed.IsCompressed()) - - // Decompress and verify data - decompressed, err := parsed.DecompressRecords() - require.NoError(t, err) - assert.Equal(t, recordData, decompressed) - }) - } -} - -// TestRecordBatchParser_CRCValidation tests CRC32 validation -func TestRecordBatchParser_CRCValidation(t *testing.T) { - parser := NewRecordBatchParser() - recordData := []byte("test record for CRC validation") - - // Create a valid batch - batch, err := CreateRecordBatch(300, recordData, compression.None) - require.NoError(t, err) - - t.Run("Valid CRC", func(t *testing.T) { - // Parse with CRC validation should succeed - parsed, err := parser.ParseRecordBatchWithValidation(batch, true) - require.NoError(t, err) - assert.Equal(t, int64(300), parsed.BaseOffset) - }) - - t.Run("Invalid CRC", func(t *testing.T) { - // Corrupt the CRC field - corruptedBatch := make([]byte, len(batch)) - copy(corruptedBatch, batch) - corruptedBatch[17] = 0xFF // Corrupt CRC - - // Parse with CRC validation should fail - _, err := parser.ParseRecordBatchWithValidation(corruptedBatch, true) - assert.Error(t, err) - assert.Contains(t, err.Error(), "CRC validation failed") - }) - - t.Run("Skip CRC validation", func(t *testing.T) { - // Corrupt the CRC field - corruptedBatch := make([]byte, len(batch)) - copy(corruptedBatch, batch) - corruptedBatch[17] = 0xFF // Corrupt CRC - - // Parse without CRC validation should succeed - parsed, err := parser.ParseRecordBatchWithValidation(corruptedBatch, false) - require.NoError(t, err) - assert.Equal(t, int64(300), parsed.BaseOffset) - }) -} - -// TestRecordBatchParser_ExtractRecords tests record extraction -func TestRecordBatchParser_ExtractRecords(t *testing.T) { - parser := NewRecordBatchParser() - recordData := []byte("test record data for extraction") - - // Create a batch - batch, err := CreateRecordBatch(400, recordData, compression.Gzip) - require.NoError(t, err) - - // Parse the batch - parsed, err := parser.ParseRecordBatch(batch) - require.NoError(t, err) - - // Extract records - records, err := parsed.ExtractRecords() - require.NoError(t, err) - - // Verify extracted records (simplified implementation returns 1 record) - assert.Len(t, records, 1) - assert.Equal(t, int64(400), records[0].Offset) - assert.Equal(t, recordData, records[0].Value) -} - -// TestCompressRecordBatch tests the compression helper function -func TestCompressRecordBatch(t *testing.T) { - recordData := []byte("test data for compression") - - t.Run("No compression", func(t *testing.T) { - compressed, attributes, err := CompressRecordBatch(compression.None, recordData) - require.NoError(t, err) - assert.Equal(t, recordData, compressed) - assert.Equal(t, int16(0), attributes) - }) - - t.Run("Gzip compression", func(t *testing.T) { - compressed, attributes, err := CompressRecordBatch(compression.Gzip, recordData) - require.NoError(t, err) - assert.NotEqual(t, recordData, compressed) - assert.Equal(t, int16(1), attributes) - - // Verify we can decompress - decompressed, err := compression.Decompress(compression.Gzip, compressed) - require.NoError(t, err) - assert.Equal(t, recordData, decompressed) - }) -} - -// TestCreateRecordBatch tests record batch creation -func TestCreateRecordBatch(t *testing.T) { - recordData := []byte("test record data") - baseOffset := int64(500) - - t.Run("Uncompressed batch", func(t *testing.T) { - batch, err := CreateRecordBatch(baseOffset, recordData, compression.None) - require.NoError(t, err) - assert.True(t, len(batch) >= 61) // Minimum header size - - // Parse and verify - parser := NewRecordBatchParser() - parsed, err := parser.ParseRecordBatch(batch) - require.NoError(t, err) - assert.Equal(t, baseOffset, parsed.BaseOffset) - assert.Equal(t, compression.None, parsed.GetCompressionCodec()) - }) - - t.Run("Compressed batch", func(t *testing.T) { - batch, err := CreateRecordBatch(baseOffset, recordData, compression.Snappy) - require.NoError(t, err) - assert.True(t, len(batch) >= 61) // Minimum header size - - // Parse and verify - parser := NewRecordBatchParser() - parsed, err := parser.ParseRecordBatch(batch) - require.NoError(t, err) - assert.Equal(t, baseOffset, parsed.BaseOffset) - assert.Equal(t, compression.Snappy, parsed.GetCompressionCodec()) - assert.True(t, parsed.IsCompressed()) - - // Verify decompression works - decompressed, err := parsed.DecompressRecords() - require.NoError(t, err) - assert.Equal(t, recordData, decompressed) - }) -} - -// TestRecordBatchParser_InvalidRecordCount tests handling of invalid record counts -func TestRecordBatchParser_InvalidRecordCount(t *testing.T) { - parser := NewRecordBatchParser() - - // Create a valid batch first - recordData := []byte("test record data") - batch, err := CreateRecordBatch(100, recordData, compression.None) - require.NoError(t, err) - - // Corrupt the record count field (at offset 57-60) - // Set to a very large number - batch[57] = 0xFF - batch[58] = 0xFF - batch[59] = 0xFF - batch[60] = 0xFF - - // Parse should fail - _, err = parser.ParseRecordBatch(batch) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid record count") -} - -// BenchmarkRecordBatchParser tests parsing performance -func BenchmarkRecordBatchParser(b *testing.B) { - parser := NewRecordBatchParser() - recordData := make([]byte, 1024) // 1KB record - for i := range recordData { - recordData[i] = byte(i % 256) - } - - codecs := []compression.CompressionCodec{ - compression.None, - compression.Gzip, - compression.Snappy, - compression.Lz4, - compression.Zstd, - } - - for _, codec := range codecs { - batch, err := CreateRecordBatch(0, recordData, codec) - if err != nil { - b.Fatal(err) - } - - b.Run("Parse_"+codec.String(), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := parser.ParseRecordBatch(batch) - if err != nil { - b.Fatal(err) - } - } - }) - - b.Run("Decompress_"+codec.String(), func(b *testing.B) { - parsed, err := parser.ParseRecordBatch(batch) - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := parsed.DecompressRecords() - if err != nil { - b.Fatal(err) - } - } - }) - } -} diff --git a/weed/mq/kafka/protocol/record_extraction_test.go b/weed/mq/kafka/protocol/record_extraction_test.go deleted file mode 100644 index e1f8afe0b..000000000 --- a/weed/mq/kafka/protocol/record_extraction_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "hash/crc32" - "testing" -) - -// TestExtractAllRecords_RealKafkaFormat tests extracting records from a real Kafka v2 record batch -func TestExtractAllRecords_RealKafkaFormat(t *testing.T) { - h := &Handler{} // Minimal handler for testing - - // Create a proper Kafka v2 record batch with 1 record - // This mimics what Schema Registry or other Kafka clients would send - - // Build record batch header (61 bytes) - batch := make([]byte, 0, 200) - - // BaseOffset (8 bytes) - baseOffset := make([]byte, 8) - binary.BigEndian.PutUint64(baseOffset, 0) - batch = append(batch, baseOffset...) - - // BatchLength (4 bytes) - will set after we know total size - batchLengthPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // PartitionLeaderEpoch (4 bytes) - batch = append(batch, 0, 0, 0, 0) - - // Magic (1 byte) - must be 2 for v2 - batch = append(batch, 2) - - // CRC32 (4 bytes) - will calculate and set later - crcPos := len(batch) - batch = append(batch, 0, 0, 0, 0) - - // Attributes (2 bytes) - no compression - batch = append(batch, 0, 0) - - // LastOffsetDelta (4 bytes) - batch = append(batch, 0, 0, 0, 0) - - // FirstTimestamp (8 bytes) - batch = append(batch, 0, 0, 0, 0, 0, 0, 0, 0) - - // MaxTimestamp (8 bytes) - batch = append(batch, 0, 0, 0, 0, 0, 0, 0, 0) - - // ProducerID (8 bytes) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF) - - // ProducerEpoch (2 bytes) - batch = append(batch, 0xFF, 0xFF) - - // BaseSequence (4 bytes) - batch = append(batch, 0xFF, 0xFF, 0xFF, 0xFF) - - // RecordCount (4 bytes) - batch = append(batch, 0, 0, 0, 1) // 1 record - - // Now add the actual record (varint-encoded) - // Record format: - // - length (signed zigzag varint) - // - attributes (1 byte) - // - timestampDelta (signed zigzag varint) - // - offsetDelta (signed zigzag varint) - // - keyLength (signed zigzag varint, -1 for null) - // - key (bytes) - // - valueLength (signed zigzag varint, -1 for null) - // - value (bytes) - // - headersCount (signed zigzag varint) - - record := make([]byte, 0, 50) - - // attributes (1 byte) - record = append(record, 0) - - // timestampDelta (signed zigzag varint - 0) - // 0 in zigzag is: (0 << 1) ^ (0 >> 63) = 0 - record = append(record, 0) - - // offsetDelta (signed zigzag varint - 0) - record = append(record, 0) - - // keyLength (signed zigzag varint - -1 for null) - // -1 in zigzag is: (-1 << 1) ^ (-1 >> 63) = -2 ^ -1 = 1 - record = append(record, 1) - - // key (none, because null with length -1) - - // valueLength (signed zigzag varint) - testValue := []byte(`{"type":"string"}`) - // Positive length N in zigzag is: (N << 1) = N*2 - valueLen := len(testValue) - record = append(record, byte(valueLen<<1)) - - // value - record = append(record, testValue...) - - // headersCount (signed zigzag varint - 0) - record = append(record, 0) - - // Prepend record length as zigzag-encoded varint - recordLength := len(record) - recordWithLength := make([]byte, 0, recordLength+5) - // Zigzag encode the length: (n << 1) for positive n - zigzagLength := byte(recordLength << 1) - recordWithLength = append(recordWithLength, zigzagLength) - recordWithLength = append(recordWithLength, record...) - - // Append record to batch - batch = append(batch, recordWithLength...) - - // Calculate and set BatchLength (from PartitionLeaderEpoch to end) - batchLength := len(batch) - 12 // Exclude BaseOffset(8) + BatchLength(4) - binary.BigEndian.PutUint32(batch[batchLengthPos:batchLengthPos+4], uint32(batchLength)) - - // Calculate and set CRC32 (from Attributes to end) - // Kafka uses Castagnoli (CRC-32C) algorithm for record batch CRC - crcData := batch[21:] // From Attributes onwards - crc := crc32.Checksum(crcData, crc32.MakeTable(crc32.Castagnoli)) - binary.BigEndian.PutUint32(batch[crcPos:crcPos+4], crc) - - t.Logf("Created batch of %d bytes, record value: %s", len(batch), string(testValue)) - - // Now test extraction - results := h.extractAllRecords(batch) - - if len(results) == 0 { - t.Fatalf("extractAllRecords returned 0 records, expected 1") - } - - if len(results) != 1 { - t.Fatalf("extractAllRecords returned %d records, expected 1", len(results)) - } - - result := results[0] - - // Key should be nil (we sent null key with varint -1) - if result.Key != nil { - t.Errorf("Expected nil key, got %v", result.Key) - } - - // Value should match our test value - if string(result.Value) != string(testValue) { - t.Errorf("Value mismatch:\n got: %s\n want: %s", string(result.Value), string(testValue)) - } - - t.Logf("Successfully extracted record with value: %s", string(result.Value)) -} - -// TestExtractAllRecords_CompressedBatch tests extracting records from a compressed batch -func TestExtractAllRecords_CompressedBatch(t *testing.T) { - // This would test with actual compression, but for now we'll skip - // as we need to ensure uncompressed works first - t.Skip("Compressed batch test - implement after uncompressed works") -} diff --git a/weed/mq/kafka/protocol/response_cache.go b/weed/mq/kafka/protocol/response_cache.go deleted file mode 100644 index f6dd8b69d..000000000 --- a/weed/mq/kafka/protocol/response_cache.go +++ /dev/null @@ -1,80 +0,0 @@ -package protocol - -import ( - "sync" - "time" -) - -// ResponseCache caches API responses to reduce CPU usage for repeated requests -type ResponseCache struct { - mu sync.RWMutex - cache map[string]*cacheEntry - ttl time.Duration -} - -type cacheEntry struct { - response []byte - timestamp time.Time -} - -// NewResponseCache creates a new response cache with the specified TTL -func NewResponseCache(ttl time.Duration) *ResponseCache { - return &ResponseCache{ - cache: make(map[string]*cacheEntry), - ttl: ttl, - } -} - -// Get retrieves a cached response if it exists and hasn't expired -func (c *ResponseCache) Get(key string) ([]byte, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - - entry, exists := c.cache[key] - if !exists { - return nil, false - } - - // Check if entry has expired - if time.Since(entry.timestamp) > c.ttl { - return nil, false - } - - return entry.response, true -} - -// Put stores a response in the cache -func (c *ResponseCache) Put(key string, response []byte) { - c.mu.Lock() - defer c.mu.Unlock() - - c.cache[key] = &cacheEntry{ - response: response, - timestamp: time.Now(), - } -} - -// Cleanup removes expired entries from the cache -func (c *ResponseCache) Cleanup() { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - for key, entry := range c.cache { - if now.Sub(entry.timestamp) > c.ttl { - delete(c.cache, key) - } - } -} - -// StartCleanupLoop starts a background goroutine to periodically clean up expired entries -func (c *ResponseCache) StartCleanupLoop(interval time.Duration) { - go func() { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for range ticker.C { - c.Cleanup() - } - }() -} diff --git a/weed/mq/kafka/protocol/response_format_test.go b/weed/mq/kafka/protocol/response_format_test.go deleted file mode 100644 index afc0c1d36..000000000 --- a/weed/mq/kafka/protocol/response_format_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "testing" -) - -// TestResponseFormatsNoCorrelationID verifies that NO API response includes -// the correlation ID in the response body (it should only be in the wire header) -func TestResponseFormatsNoCorrelationID(t *testing.T) { - tests := []struct { - name string - apiKey uint16 - apiVersion uint16 - buildFunc func(correlationID uint32) ([]byte, error) - description string - }{ - // Control Plane APIs - { - name: "ApiVersions_v0", - apiKey: 18, - apiVersion: 0, - description: "ApiVersions v0 should not include correlation ID in body", - }, - { - name: "ApiVersions_v4", - apiKey: 18, - apiVersion: 4, - description: "ApiVersions v4 (flexible) should not include correlation ID in body", - }, - { - name: "Metadata_v0", - apiKey: 3, - apiVersion: 0, - description: "Metadata v0 should not include correlation ID in body", - }, - { - name: "Metadata_v7", - apiKey: 3, - apiVersion: 7, - description: "Metadata v7 should not include correlation ID in body", - }, - { - name: "FindCoordinator_v0", - apiKey: 10, - apiVersion: 0, - description: "FindCoordinator v0 should not include correlation ID in body", - }, - { - name: "FindCoordinator_v2", - apiKey: 10, - apiVersion: 2, - description: "FindCoordinator v2 should not include correlation ID in body", - }, - { - name: "DescribeConfigs_v0", - apiKey: 32, - apiVersion: 0, - description: "DescribeConfigs v0 should not include correlation ID in body", - }, - { - name: "DescribeConfigs_v4", - apiKey: 32, - apiVersion: 4, - description: "DescribeConfigs v4 (flexible) should not include correlation ID in body", - }, - { - name: "DescribeCluster_v0", - apiKey: 60, - apiVersion: 0, - description: "DescribeCluster v0 (flexible) should not include correlation ID in body", - }, - { - name: "InitProducerId_v0", - apiKey: 22, - apiVersion: 0, - description: "InitProducerId v0 should not include correlation ID in body", - }, - { - name: "InitProducerId_v4", - apiKey: 22, - apiVersion: 4, - description: "InitProducerId v4 (flexible) should not include correlation ID in body", - }, - - // Consumer Group Coordination APIs - { - name: "JoinGroup_v0", - apiKey: 11, - apiVersion: 0, - description: "JoinGroup v0 should not include correlation ID in body", - }, - { - name: "SyncGroup_v0", - apiKey: 14, - apiVersion: 0, - description: "SyncGroup v0 should not include correlation ID in body", - }, - { - name: "Heartbeat_v0", - apiKey: 12, - apiVersion: 0, - description: "Heartbeat v0 should not include correlation ID in body", - }, - { - name: "LeaveGroup_v0", - apiKey: 13, - apiVersion: 0, - description: "LeaveGroup v0 should not include correlation ID in body", - }, - { - name: "OffsetFetch_v0", - apiKey: 9, - apiVersion: 0, - description: "OffsetFetch v0 should not include correlation ID in body", - }, - { - name: "OffsetCommit_v0", - apiKey: 8, - apiVersion: 0, - description: "OffsetCommit v0 should not include correlation ID in body", - }, - - // Data Plane APIs - { - name: "Produce_v0", - apiKey: 0, - apiVersion: 0, - description: "Produce v0 should not include correlation ID in body", - }, - { - name: "Produce_v7", - apiKey: 0, - apiVersion: 7, - description: "Produce v7 should not include correlation ID in body", - }, - { - name: "Fetch_v0", - apiKey: 1, - apiVersion: 0, - description: "Fetch v0 should not include correlation ID in body", - }, - { - name: "Fetch_v7", - apiKey: 1, - apiVersion: 7, - description: "Fetch v7 should not include correlation ID in body", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Logf("Testing %s: %s", tt.name, tt.description) - - // This test documents the EXPECTATION but can't automatically verify - // all responses without implementing mock handlers for each API. - // The key insight is: ALL responses should be checked manually - // or with integration tests. - - t.Logf("โœ“ API Key %d Version %d: Correlation ID should be handled by writeResponseWithHeader", - tt.apiKey, tt.apiVersion) - }) - } -} - -// TestFlexibleResponseHeaderFormat verifies that flexible responses -// include the 0x00 tagged fields byte in the header -func TestFlexibleResponseHeaderFormat(t *testing.T) { - tests := []struct { - name string - apiKey uint16 - apiVersion uint16 - isFlexible bool - }{ - // ApiVersions is special - never flexible header (AdminClient compatibility) - {"ApiVersions_v0", 18, 0, false}, - {"ApiVersions_v3", 18, 3, false}, // Special case! - {"ApiVersions_v4", 18, 4, false}, // Special case! - - // Metadata becomes flexible at v9+ - {"Metadata_v0", 3, 0, false}, - {"Metadata_v7", 3, 7, false}, - {"Metadata_v9", 3, 9, true}, - - // Produce becomes flexible at v9+ - {"Produce_v0", 0, 0, false}, - {"Produce_v7", 0, 7, false}, - {"Produce_v9", 0, 9, true}, - - // Fetch becomes flexible at v12+ - {"Fetch_v0", 1, 0, false}, - {"Fetch_v7", 1, 7, false}, - {"Fetch_v12", 1, 12, true}, - - // FindCoordinator becomes flexible at v3+ - {"FindCoordinator_v0", 10, 0, false}, - {"FindCoordinator_v2", 10, 2, false}, - {"FindCoordinator_v3", 10, 3, true}, - - // JoinGroup becomes flexible at v6+ - {"JoinGroup_v0", 11, 0, false}, - {"JoinGroup_v5", 11, 5, false}, - {"JoinGroup_v6", 11, 6, true}, - - // SyncGroup becomes flexible at v4+ - {"SyncGroup_v0", 14, 0, false}, - {"SyncGroup_v3", 14, 3, false}, - {"SyncGroup_v4", 14, 4, true}, - - // Heartbeat becomes flexible at v4+ - {"Heartbeat_v0", 12, 0, false}, - {"Heartbeat_v3", 12, 3, false}, - {"Heartbeat_v4", 12, 4, true}, - - // LeaveGroup becomes flexible at v4+ - {"LeaveGroup_v0", 13, 0, false}, - {"LeaveGroup_v3", 13, 3, false}, - {"LeaveGroup_v4", 13, 4, true}, - - // OffsetFetch becomes flexible at v6+ - {"OffsetFetch_v0", 9, 0, false}, - {"OffsetFetch_v5", 9, 5, false}, - {"OffsetFetch_v6", 9, 6, true}, - - // OffsetCommit becomes flexible at v8+ - {"OffsetCommit_v0", 8, 0, false}, - {"OffsetCommit_v7", 8, 7, false}, - {"OffsetCommit_v8", 8, 8, true}, - - // DescribeConfigs becomes flexible at v4+ - {"DescribeConfigs_v0", 32, 0, false}, - {"DescribeConfigs_v3", 32, 3, false}, - {"DescribeConfigs_v4", 32, 4, true}, - - // InitProducerId becomes flexible at v2+ - {"InitProducerId_v0", 22, 0, false}, - {"InitProducerId_v1", 22, 1, false}, - {"InitProducerId_v2", 22, 2, true}, - - // DescribeCluster is always flexible - {"DescribeCluster_v0", 60, 0, true}, - {"DescribeCluster_v1", 60, 1, true}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := isFlexibleResponse(tt.apiKey, tt.apiVersion) - if actual != tt.isFlexible { - t.Errorf("%s: isFlexibleResponse(%d, %d) = %v, want %v", - tt.name, tt.apiKey, tt.apiVersion, actual, tt.isFlexible) - } else { - t.Logf("โœ“ %s: correctly identified as flexible=%v", tt.name, tt.isFlexible) - } - }) - } -} - -// TestCorrelationIDNotInResponseBody is a helper that can be used -// to scan response bytes and detect if correlation ID appears in the body -func TestCorrelationIDNotInResponseBody(t *testing.T) { - // Test helper function - hasCorrelationIDInBody := func(responseBody []byte, correlationID uint32) bool { - if len(responseBody) < 4 { - return false - } - - // Check if the first 4 bytes match the correlation ID - actual := binary.BigEndian.Uint32(responseBody[0:4]) - return actual == correlationID - } - - t.Run("DetectCorrelationIDInBody", func(t *testing.T) { - correlationID := uint32(12345) - - // Case 1: Response with correlation ID (BAD) - badResponse := make([]byte, 8) - binary.BigEndian.PutUint32(badResponse[0:4], correlationID) - badResponse[4] = 0x00 // some data - - if !hasCorrelationIDInBody(badResponse, correlationID) { - t.Error("Failed to detect correlation ID in response body") - } else { - t.Log("โœ“ Successfully detected correlation ID in body (bad response)") - } - - // Case 2: Response without correlation ID (GOOD) - goodResponse := make([]byte, 8) - goodResponse[0] = 0x00 // error code - goodResponse[1] = 0x00 - - if hasCorrelationIDInBody(goodResponse, correlationID) { - t.Error("False positive: detected correlation ID when it's not there") - } else { - t.Log("โœ“ Correctly identified response without correlation ID") - } - }) -} - -// TestWireProtocolFormat documents the expected wire format -func TestWireProtocolFormat(t *testing.T) { - t.Log("Kafka Wire Protocol Format (KIP-482):") - t.Log(" Non-flexible responses:") - t.Log(" [Size: 4 bytes][Correlation ID: 4 bytes][Response Body]") - t.Log("") - t.Log(" Flexible responses (header version 1+):") - t.Log(" [Size: 4 bytes][Correlation ID: 4 bytes][Tagged Fields: 1+ bytes][Response Body]") - t.Log("") - t.Log(" Size field: includes correlation ID + tagged fields + body") - t.Log(" Tagged Fields: varint-encoded, 0x00 for empty") - t.Log("") - t.Log("CRITICAL: Response body should NEVER include correlation ID!") - t.Log(" It is written ONLY by writeResponseWithHeader") -} diff --git a/weed/mq/kafka/protocol/response_validation_example_test.go b/weed/mq/kafka/protocol/response_validation_example_test.go deleted file mode 100644 index 9476bb791..000000000 --- a/weed/mq/kafka/protocol/response_validation_example_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package protocol - -import ( - "encoding/binary" - "testing" -) - -// This file demonstrates what FIELD-LEVEL testing would look like -// Currently these tests are NOT run automatically because they require -// complex parsing logic for each API. - -// TestJoinGroupResponseStructure shows what we SHOULD test but currently don't -func TestJoinGroupResponseStructure(t *testing.T) { - t.Skip("This is a demonstration test - shows what we SHOULD check") - - // Hypothetical: build a JoinGroup response - // response := buildJoinGroupResponseV6(correlationID, generationID, protocolType, ...) - - // What we SHOULD verify: - t.Log("Field-level checks we should perform:") - t.Log(" 1. Error code (int16) - always present") - t.Log(" 2. Generation ID (int32) - always present") - t.Log(" 3. Protocol type (string/compact string) - nullable in some versions") - t.Log(" 4. Protocol name (string/compact string) - always present") - t.Log(" 5. Leader (string/compact string) - always present") - t.Log(" 6. Member ID (string/compact string) - always present") - t.Log(" 7. Members array - NON-NULLABLE, can be empty but must exist") - t.Log(" ^-- THIS is where the current bug is!") - - // Example of what parsing would look like: - // offset := 0 - // errorCode := binary.BigEndian.Uint16(response[offset:]) - // offset += 2 - // generationID := binary.BigEndian.Uint32(response[offset:]) - // offset += 4 - // ... parse protocol type ... - // ... parse protocol name ... - // ... parse leader ... - // ... parse member ID ... - // membersLength := parseCompactArray(response[offset:]) - // if membersLength < 0 { - // t.Error("Members array is null, but it should be non-nullable!") - // } -} - -// TestProduceResponseStructure shows another example -func TestProduceResponseStructure(t *testing.T) { - t.Skip("This is a demonstration test - shows what we SHOULD check") - - t.Log("Produce response v7 structure:") - t.Log(" 1. Topics array - must not be null") - t.Log(" - Topic name (string)") - t.Log(" - Partitions array - must not be null") - t.Log(" - Partition ID (int32)") - t.Log(" - Error code (int16)") - t.Log(" - Base offset (int64)") - t.Log(" - Log append time (int64)") - t.Log(" - Log start offset (int64)") - t.Log(" 2. Throttle time (int32) - v1+") -} - -// CompareWithReferenceImplementation shows ideal testing approach -func TestCompareWithReferenceImplementation(t *testing.T) { - t.Skip("This would require a reference Kafka broker or client library") - - // Ideal approach: - t.Log("1. Generate test data") - t.Log("2. Build response with our Gateway") - t.Log("3. Build response with kafka-go or Sarama library") - t.Log("4. Compare byte-by-byte") - t.Log("5. If different, highlight which fields differ") - - // This would catch: - // - Wrong field order - // - Wrong field encoding - // - Missing fields - // - Null vs empty distinctions -} - -// CurrentTestingApproach documents what we actually do -func TestCurrentTestingApproach(t *testing.T) { - t.Log("Current testing strategy (as of Oct 2025):") - t.Log("") - t.Log("LEVEL 1: Static Code Analysis") - t.Log(" Tool: check_responses.sh") - t.Log(" Checks: Correlation ID patterns") - t.Log(" Coverage: Good for known issues") - t.Log("") - t.Log("LEVEL 2: Protocol Format Tests") - t.Log(" Tool: TestFlexibleResponseHeaderFormat") - t.Log(" Checks: Flexible vs non-flexible classification") - t.Log(" Coverage: Header format only") - t.Log("") - t.Log("LEVEL 3: Integration Testing") - t.Log(" Tool: Schema Registry, kafka-go, Sarama, Java client") - t.Log(" Checks: Real client compatibility") - t.Log(" Coverage: Complete but requires manual debugging") - t.Log("") - t.Log("MISSING: Field-level response body validation") - t.Log(" This is why JoinGroup issue wasn't caught by unit tests") -} - -// parseCompactArray is a helper that would be needed for field-level testing -func parseCompactArray(data []byte) int { - // Compact array encoding: varint length (length+1 for non-null, 0 for null) - length := int(data[0]) - if length == 0 { - return -1 // null - } - return length - 1 // actual length -} - -// Example of a REAL field-level test we could write -func TestMetadataResponseHasBrokers(t *testing.T) { - t.Skip("Example of what a real field-level test would look like") - - // Build a minimal metadata response - response := make([]byte, 0, 256) - - // Brokers array (non-nullable) - brokerCount := uint32(1) - response = append(response, - byte(brokerCount>>24), - byte(brokerCount>>16), - byte(brokerCount>>8), - byte(brokerCount)) - - // Broker 1 - response = append(response, 0, 0, 0, 1) // node_id = 1 - // ... more fields ... - - // Parse it back - offset := 0 - parsedCount := binary.BigEndian.Uint32(response[offset : offset+4]) - - // Verify - if parsedCount == 0 { - t.Error("Metadata response has 0 brokers - should have at least 1") - } - - t.Logf("โœ“ Metadata response correctly has %d broker(s)", parsedCount) -} - diff --git a/weed/mq/kafka/protocol/syncgroup_assignment_test.go b/weed/mq/kafka/protocol/syncgroup_assignment_test.go deleted file mode 100644 index ed1da3771..000000000 --- a/weed/mq/kafka/protocol/syncgroup_assignment_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package protocol - -import ( - "testing" -) - -// TestSyncGroup_RaceCondition_BugDocumentation documents the original race condition bug -// This test documents the bug where non-leader in Stable state would trigger server-side assignment -func TestSyncGroup_RaceCondition_BugDocumentation(t *testing.T) { - // Original bug scenario: - // 1. Consumer 1 (leader) joins, gets all 15 partitions - // 2. Consumer 2 joins, triggers rebalance - // 3. Consumer 1 commits offsets during cleanup - // 4. Consumer 1 calls SyncGroup with client-side assignments, group moves to Stable - // 5. Consumer 2 calls SyncGroup (late arrival), group is already Stable - // 6. BUG: Consumer 2 falls into "else" branch, triggers server-side assignment - // 7. Consumer 2 gets 10 partitions via server-side assignment - // 8. Result: Some partitions (e.g., partition 2) assigned to BOTH consumers - // 9. Consumer 2 fetches offsets, gets offset 0 (no committed offsets yet) - // 10. Consumer 2 re-reads messages from offset 0 -> DUPLICATES (66.7%)! - - // ORIGINAL BUGGY CODE (joingroup.go lines 887-905): - // } else if group.State == consumer.GroupStateCompletingRebalance || group.State == consumer.GroupStatePreparingRebalance { - // // Non-leader member waiting for leader to provide assignments - // glog.Infof("[SYNCGROUP] Non-leader %s waiting for leader assignments in group %s (state=%s)", - // request.MemberID, request.GroupID, group.State) - // } else { - // // BUG: This branch was triggered when non-leader arrived in Stable state! - // glog.Warningf("[SYNCGROUP] Using server-side assignment for group %s (Leader=%s State=%s)", - // request.GroupID, group.Leader, group.State) - // topicPartitions := h.getTopicPartitions(group) - // group.AssignPartitions(topicPartitions) // <- Duplicate assignment! - // } - - // FIXED CODE (joingroup.go lines 887-906): - // } else if request.MemberID != group.Leader && len(request.GroupAssignments) == 0 { - // // Non-leader member requesting its assignment - // // CRITICAL FIX: Non-leader members should ALWAYS wait for leader's client-side assignments - // // This is the correct behavior for Sarama and other client-side assignment protocols - // glog.Infof("[SYNCGROUP] Non-leader %s waiting for/retrieving assignment in group %s (state=%s)", - // request.MemberID, request.GroupID, group.State) - // // Assignment will be retrieved from member.Assignment below - // } else { - // // This branch should only be reached for server-side assignment protocols - // // (not Sarama's client-side assignment) - // } - - t.Log("Original bug: Non-leader in Stable state would trigger server-side assignment") - t.Log("This caused duplicate partition assignments and message re-reads (66.7% duplicates)") - t.Log("Fix: Check if member is non-leader with empty assignments, regardless of group state") -} - -// TestSyncGroup_FixVerification verifies the fix logic -func TestSyncGroup_FixVerification(t *testing.T) { - testCases := []struct { - name string - isLeader bool - hasAssignments bool - shouldWait bool - shouldAssign bool - description string - }{ - { - name: "Leader with assignments", - isLeader: true, - hasAssignments: true, - shouldWait: false, - shouldAssign: false, - description: "Leader provides client-side assignments, processes them", - }, - { - name: "Non-leader without assignments (PreparingRebalance)", - isLeader: false, - hasAssignments: false, - shouldWait: true, - shouldAssign: false, - description: "Non-leader waits for leader to provide assignments", - }, - { - name: "Non-leader without assignments (Stable) - THE BUG CASE", - isLeader: false, - hasAssignments: false, - shouldWait: true, - shouldAssign: false, - description: "Non-leader retrieves assignment from leader (already processed)", - }, - { - name: "Leader without assignments", - isLeader: true, - hasAssignments: false, - shouldWait: false, - shouldAssign: true, - description: "Edge case: server-side assignment (should not happen with Sarama)", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Simulate the fixed logic - memberID := "consumer-1" - leaderID := "consumer-1" - if !tc.isLeader { - memberID = "consumer-2" - } - - groupAssignmentsCount := 0 - if tc.hasAssignments { - groupAssignmentsCount = 2 // Leader provides assignments for 2 members - } - - // THE FIX: Check if non-leader with no assignments - isNonLeaderWaiting := (memberID != leaderID) && (groupAssignmentsCount == 0) - - if tc.shouldWait && !isNonLeaderWaiting { - t.Errorf("%s: Expected to wait, but logic says no", tc.description) - } - if !tc.shouldWait && isNonLeaderWaiting { - t.Errorf("%s: Expected not to wait, but logic says yes", tc.description) - } - - t.Logf("โœ“ %s: isLeader=%v hasAssignments=%v shouldWait=%v", - tc.description, tc.isLeader, tc.hasAssignments, tc.shouldWait) - }) - } -} diff --git a/weed/mq/kafka/schema/avro_decoder.go b/weed/mq/kafka/schema/avro_decoder.go deleted file mode 100644 index f40236a81..000000000 --- a/weed/mq/kafka/schema/avro_decoder.go +++ /dev/null @@ -1,719 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "reflect" - "time" - - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// AvroDecoder handles Avro schema decoding and conversion to SeaweedMQ format -type AvroDecoder struct { - codec *goavro.Codec -} - -// NewAvroDecoder creates a new Avro decoder from a schema string -func NewAvroDecoder(schemaStr string) (*AvroDecoder, error) { - codec, err := goavro.NewCodec(schemaStr) - if err != nil { - return nil, fmt.Errorf("failed to create Avro codec: %w", err) - } - - return &AvroDecoder{ - codec: codec, - }, nil -} - -// Decode decodes Avro binary data to a Go map -func (ad *AvroDecoder) Decode(data []byte) (map[string]interface{}, error) { - native, _, err := ad.codec.NativeFromBinary(data) - if err != nil { - return nil, fmt.Errorf("failed to decode Avro data: %w", err) - } - - // Convert to map[string]interface{} for easier processing - result, ok := native.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expected Avro record, got %T", native) - } - - return result, nil -} - -// DecodeToRecordValue decodes Avro data directly to SeaweedMQ RecordValue -func (ad *AvroDecoder) DecodeToRecordValue(data []byte) (*schema_pb.RecordValue, error) { - nativeMap, err := ad.Decode(data) - if err != nil { - return nil, err - } - - return MapToRecordValue(nativeMap), nil -} - -// InferRecordType infers a SeaweedMQ RecordType from an Avro schema -func (ad *AvroDecoder) InferRecordType() (*schema_pb.RecordType, error) { - schema := ad.codec.Schema() - return avroSchemaToRecordType(schema) -} - -// MapToRecordValue converts a Go map to SeaweedMQ RecordValue -func MapToRecordValue(m map[string]interface{}) *schema_pb.RecordValue { - fields := make(map[string]*schema_pb.Value) - - for key, value := range m { - fields[key] = goValueToSchemaValue(value) - } - - return &schema_pb.RecordValue{ - Fields: fields, - } -} - -// goValueToSchemaValue converts a Go value to a SeaweedMQ Value -func goValueToSchemaValue(value interface{}) *schema_pb.Value { - if value == nil { - // For null values, use an empty string as default - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - } - } - - switch v := value.(type) { - case bool: - return &schema_pb.Value{ - Kind: &schema_pb.Value_BoolValue{BoolValue: v}, - } - case int32: - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int32Value{Int32Value: v}, - } - case int64: - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: v}, - } - case int: - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(v)}, - } - case float32: - return &schema_pb.Value{ - Kind: &schema_pb.Value_FloatValue{FloatValue: v}, - } - case float64: - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: v}, - } - case string: - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: v}, - } - case []byte: - return &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: v}, - } - case time.Time: - return &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: v.UnixMicro(), - IsUtc: true, - }, - }, - } - case []interface{}: - // Handle arrays - listValues := make([]*schema_pb.Value, len(v)) - for i, item := range v { - listValues[i] = goValueToSchemaValue(item) - } - return &schema_pb.Value{ - Kind: &schema_pb.Value_ListValue{ - ListValue: &schema_pb.ListValue{ - Values: listValues, - }, - }, - } - case map[string]interface{}: - // Check if this is an Avro union type (single key-value pair with type name as key) - // Union types have keys that are typically Avro type names like "int", "string", etc. - // Regular nested records would have meaningful field names like "inner", "name", etc. - if len(v) == 1 { - for unionType, unionValue := range v { - // Handle common Avro union type patterns (only if key looks like a type name) - switch unionType { - case "int": - if intVal, ok := unionValue.(int32); ok { - // Store union as a record with the union type as field name - // This preserves the union information for re-encoding - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "int": { - Kind: &schema_pb.Value_Int32Value{Int32Value: intVal}, - }, - }, - }, - }, - } - } - case "long": - if longVal, ok := unionValue.(int64); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "long": { - Kind: &schema_pb.Value_Int64Value{Int64Value: longVal}, - }, - }, - }, - }, - } - } - case "float": - if floatVal, ok := unionValue.(float32); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "float": { - Kind: &schema_pb.Value_FloatValue{FloatValue: floatVal}, - }, - }, - }, - }, - } - } - case "double": - if doubleVal, ok := unionValue.(float64); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "double": { - Kind: &schema_pb.Value_DoubleValue{DoubleValue: doubleVal}, - }, - }, - }, - }, - } - } - case "string": - if strVal, ok := unionValue.(string); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "string": { - Kind: &schema_pb.Value_StringValue{StringValue: strVal}, - }, - }, - }, - }, - } - } - case "boolean": - if boolVal, ok := unionValue.(bool); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "boolean": { - Kind: &schema_pb.Value_BoolValue{BoolValue: boolVal}, - }, - }, - }, - }, - } - } - } - // If it's not a recognized union type, fall through to treat as nested record - } - } - - // Handle nested records (both single-field and multi-field maps) - fields := make(map[string]*schema_pb.Value) - for key, val := range v { - fields[key] = goValueToSchemaValue(val) - } - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: fields, - }, - }, - } - default: - // Handle other types by converting to string - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{ - StringValue: fmt.Sprintf("%v", v), - }, - } - } -} - -// avroSchemaToRecordType converts an Avro schema to SeaweedMQ RecordType -func avroSchemaToRecordType(schemaStr string) (*schema_pb.RecordType, error) { - // Validate the Avro schema by creating a codec (this ensures it's valid) - _, err := goavro.NewCodec(schemaStr) - if err != nil { - return nil, fmt.Errorf("failed to parse Avro schema: %w", err) - } - - // Parse the schema JSON to extract field definitions - var avroSchema map[string]interface{} - if err := json.Unmarshal([]byte(schemaStr), &avroSchema); err != nil { - return nil, fmt.Errorf("failed to parse Avro schema JSON: %w", err) - } - - // Extract fields from the Avro schema - fields, err := extractAvroFields(avroSchema) - if err != nil { - return nil, fmt.Errorf("failed to extract Avro fields: %w", err) - } - - return &schema_pb.RecordType{ - Fields: fields, - }, nil -} - -// extractAvroFields extracts field definitions from parsed Avro schema JSON -func extractAvroFields(avroSchema map[string]interface{}) ([]*schema_pb.Field, error) { - // Check if this is a record type - schemaType, ok := avroSchema["type"].(string) - if !ok || schemaType != "record" { - return nil, fmt.Errorf("expected record type, got %v", schemaType) - } - - // Extract fields array - fieldsInterface, ok := avroSchema["fields"] - if !ok { - return nil, fmt.Errorf("no fields found in Avro record schema") - } - - fieldsArray, ok := fieldsInterface.([]interface{}) - if !ok { - return nil, fmt.Errorf("fields must be an array") - } - - // Convert each Avro field to SeaweedMQ field - fields := make([]*schema_pb.Field, 0, len(fieldsArray)) - for i, fieldInterface := range fieldsArray { - fieldMap, ok := fieldInterface.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("field %d is not a valid object", i) - } - - field, err := convertAvroFieldToSeaweedMQ(fieldMap, int32(i)) - if err != nil { - return nil, fmt.Errorf("failed to convert field %d: %w", i, err) - } - - fields = append(fields, field) - } - - return fields, nil -} - -// convertAvroFieldToSeaweedMQ converts a single Avro field to SeaweedMQ Field -func convertAvroFieldToSeaweedMQ(avroField map[string]interface{}, fieldIndex int32) (*schema_pb.Field, error) { - // Extract field name - name, ok := avroField["name"].(string) - if !ok { - return nil, fmt.Errorf("field name is required") - } - - // Extract field type and check if it's an array - fieldType, isRepeated, err := convertAvroTypeToSeaweedMQWithRepeated(avroField["type"]) - if err != nil { - return nil, fmt.Errorf("failed to convert field type for %s: %w", name, err) - } - - // Check if field has a default value (indicates it's optional) - _, hasDefault := avroField["default"] - isRequired := !hasDefault - - return &schema_pb.Field{ - Name: name, - FieldIndex: fieldIndex, - Type: fieldType, - IsRequired: isRequired, - IsRepeated: isRepeated, - }, nil -} - -// convertAvroTypeToSeaweedMQ converts Avro type to SeaweedMQ Type -func convertAvroTypeToSeaweedMQ(avroType interface{}) (*schema_pb.Type, error) { - fieldType, _, err := convertAvroTypeToSeaweedMQWithRepeated(avroType) - return fieldType, err -} - -// convertAvroTypeToSeaweedMQWithRepeated converts Avro type to SeaweedMQ Type and returns if it's repeated -func convertAvroTypeToSeaweedMQWithRepeated(avroType interface{}) (*schema_pb.Type, bool, error) { - switch t := avroType.(type) { - case string: - // Simple type - fieldType, err := convertAvroSimpleType(t) - return fieldType, false, err - - case map[string]interface{}: - // Complex type (record, enum, array, map, fixed) - return convertAvroComplexTypeWithRepeated(t) - - case []interface{}: - // Union type - fieldType, err := convertAvroUnionType(t) - return fieldType, false, err - - default: - return nil, false, fmt.Errorf("unsupported Avro type: %T", avroType) - } -} - -// convertAvroSimpleType converts simple Avro types to SeaweedMQ types -func convertAvroSimpleType(avroType string) (*schema_pb.Type, error) { - switch avroType { - case "null": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, // Use bytes for null - }, - }, nil - case "boolean": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BOOL, - }, - }, nil - case "int": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - }, nil - case "long": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - }, nil - case "float": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_FLOAT, - }, - }, nil - case "double": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_DOUBLE, - }, - }, nil - case "bytes": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - }, nil - case "string": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - }, nil - default: - return nil, fmt.Errorf("unsupported simple Avro type: %s", avroType) - } -} - -// convertAvroComplexType converts complex Avro types to SeaweedMQ types -func convertAvroComplexType(avroType map[string]interface{}) (*schema_pb.Type, error) { - fieldType, _, err := convertAvroComplexTypeWithRepeated(avroType) - return fieldType, err -} - -// convertAvroComplexTypeWithRepeated converts complex Avro types to SeaweedMQ types and returns if it's repeated -func convertAvroComplexTypeWithRepeated(avroType map[string]interface{}) (*schema_pb.Type, bool, error) { - typeStr, ok := avroType["type"].(string) - if !ok { - return nil, false, fmt.Errorf("complex type must have a type field") - } - - // Handle logical types - they are based on underlying primitive types - if _, hasLogicalType := avroType["logicalType"]; hasLogicalType { - // For logical types, use the underlying primitive type - return convertAvroSimpleTypeWithLogical(typeStr, avroType) - } - - switch typeStr { - case "record": - // Nested record type - fields, err := extractAvroFields(avroType) - if err != nil { - return nil, false, fmt.Errorf("failed to extract nested record fields: %w", err) - } - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: &schema_pb.RecordType{ - Fields: fields, - }, - }, - }, false, nil - - case "enum": - // Enum type - treat as string for now - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - }, false, nil - - case "array": - // Array type - itemsType, err := convertAvroTypeToSeaweedMQ(avroType["items"]) - if err != nil { - return nil, false, fmt.Errorf("failed to convert array items type: %w", err) - } - // For arrays, we return the item type and set IsRepeated=true - return itemsType, true, nil - - case "map": - // Map type - treat as record with dynamic fields - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: &schema_pb.RecordType{ - Fields: []*schema_pb.Field{}, // Dynamic fields - }, - }, - }, false, nil - - case "fixed": - // Fixed-length bytes - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - }, false, nil - - default: - return nil, false, fmt.Errorf("unsupported complex Avro type: %s", typeStr) - } -} - -// convertAvroSimpleTypeWithLogical handles logical types based on their underlying primitive types -func convertAvroSimpleTypeWithLogical(primitiveType string, avroType map[string]interface{}) (*schema_pb.Type, bool, error) { - logicalType, _ := avroType["logicalType"].(string) - - // Map logical types to appropriate SeaweedMQ types - switch logicalType { - case "decimal": - // Decimal logical type - use bytes for precision - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - }, false, nil - case "uuid": - // UUID logical type - use string - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - }, false, nil - case "date": - // Date logical type (int) - use int32 - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - }, false, nil - case "time-millis": - // Time in milliseconds (int) - use int32 - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - }, false, nil - case "time-micros": - // Time in microseconds (long) - use int64 - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - }, false, nil - case "timestamp-millis": - // Timestamp in milliseconds (long) - use int64 - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - }, false, nil - case "timestamp-micros": - // Timestamp in microseconds (long) - use int64 - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - }, false, nil - default: - // For unknown logical types, fall back to the underlying primitive type - fieldType, err := convertAvroSimpleType(primitiveType) - return fieldType, false, err - } -} - -// convertAvroUnionType converts Avro union types to SeaweedMQ types -func convertAvroUnionType(unionTypes []interface{}) (*schema_pb.Type, error) { - // For unions, we'll use the first non-null type - // This is a simplification - in a full implementation, we might want to create a union type - for _, unionType := range unionTypes { - if typeStr, ok := unionType.(string); ok && typeStr == "null" { - continue // Skip null types - } - - // Use the first non-null type - return convertAvroTypeToSeaweedMQ(unionType) - } - - // If all types are null, return bytes type - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - }, nil -} - -// InferRecordTypeFromMap infers a RecordType from a decoded map -// This is useful when we don't have the original Avro schema -func InferRecordTypeFromMap(m map[string]interface{}) *schema_pb.RecordType { - fields := make([]*schema_pb.Field, 0, len(m)) - fieldIndex := int32(0) - - for key, value := range m { - fieldType := inferTypeFromValue(value) - - field := &schema_pb.Field{ - Name: key, - FieldIndex: fieldIndex, - Type: fieldType, - IsRequired: value != nil, // Non-nil values are considered required - IsRepeated: false, - } - - // Check if it's an array - if reflect.TypeOf(value).Kind() == reflect.Slice { - field.IsRepeated = true - } - - fields = append(fields, field) - fieldIndex++ - } - - return &schema_pb.RecordType{ - Fields: fields, - } -} - -// inferTypeFromValue infers a SeaweedMQ Type from a Go value -func inferTypeFromValue(value interface{}) *schema_pb.Type { - if value == nil { - // Default to string for null values - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } - - switch v := value.(type) { - case bool: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BOOL, - }, - } - case int32: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - } - case int64, int: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - } - case float32: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_FLOAT, - }, - } - case float64: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_DOUBLE, - }, - } - case string: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - case []byte: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - } - case time.Time: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_TIMESTAMP, - }, - } - case []interface{}: - // Handle arrays - infer element type from first element - var elementType *schema_pb.Type - if len(v) > 0 { - elementType = inferTypeFromValue(v[0]) - } else { - // Default to string for empty arrays - elementType = &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } - - return &schema_pb.Type{ - Kind: &schema_pb.Type_ListType{ - ListType: &schema_pb.ListType{ - ElementType: elementType, - }, - }, - } - case map[string]interface{}: - // Handle nested records - nestedRecordType := InferRecordTypeFromMap(v) - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: nestedRecordType, - }, - } - default: - // Default to string for unknown types - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } -} diff --git a/weed/mq/kafka/schema/avro_decoder_test.go b/weed/mq/kafka/schema/avro_decoder_test.go deleted file mode 100644 index f34a0a800..000000000 --- a/weed/mq/kafka/schema/avro_decoder_test.go +++ /dev/null @@ -1,542 +0,0 @@ -package schema - -import ( - "reflect" - "testing" - "time" - - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestNewAvroDecoder(t *testing.T) { - tests := []struct { - name string - schema string - expectErr bool - }{ - { - name: "valid record schema", - schema: `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }`, - expectErr: false, - }, - { - name: "valid enum schema", - schema: `{ - "type": "enum", - "name": "Color", - "symbols": ["RED", "GREEN", "BLUE"] - }`, - expectErr: false, - }, - { - name: "invalid schema", - schema: `{"invalid": "schema"}`, - expectErr: true, - }, - { - name: "empty schema", - schema: "", - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - decoder, err := NewAvroDecoder(tt.schema) - - if (err != nil) != tt.expectErr { - t.Errorf("NewAvroDecoder() error = %v, expectErr %v", err, tt.expectErr) - return - } - - if !tt.expectErr && decoder == nil { - t.Error("Expected non-nil decoder for valid schema") - } - }) - } -} - -func TestAvroDecoder_Decode(t *testing.T) { - schema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null} - ] - }` - - decoder, err := NewAvroDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - // Create test data - codec, _ := goavro.NewCodec(schema) - testRecord := map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - "email": map[string]interface{}{ - "string": "john@example.com", // Avro union format - }, - } - - // Encode to binary - binary, err := codec.BinaryFromNative(nil, testRecord) - if err != nil { - t.Fatalf("Failed to encode test data: %v", err) - } - - // Test decoding - result, err := decoder.Decode(binary) - if err != nil { - t.Fatalf("Failed to decode: %v", err) - } - - // Verify results - if result["id"] != int32(123) { - t.Errorf("Expected id=123, got %v", result["id"]) - } - - if result["name"] != "John Doe" { - t.Errorf("Expected name='John Doe', got %v", result["name"]) - } - - // For union types, Avro returns a map with the type name as key - if emailMap, ok := result["email"].(map[string]interface{}); ok { - if emailMap["string"] != "john@example.com" { - t.Errorf("Expected email='john@example.com', got %v", emailMap["string"]) - } - } else { - t.Errorf("Expected email to be a union map, got %v", result["email"]) - } -} - -func TestAvroDecoder_DecodeToRecordValue(t *testing.T) { - schema := `{ - "type": "record", - "name": "SimpleRecord", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - decoder, err := NewAvroDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - // Create and encode test data - codec, _ := goavro.NewCodec(schema) - testRecord := map[string]interface{}{ - "id": int32(456), - "name": "Jane Smith", - } - - binary, err := codec.BinaryFromNative(nil, testRecord) - if err != nil { - t.Fatalf("Failed to encode test data: %v", err) - } - - // Test decoding to RecordValue - recordValue, err := decoder.DecodeToRecordValue(binary) - if err != nil { - t.Fatalf("Failed to decode to RecordValue: %v", err) - } - - // Verify RecordValue structure - if recordValue.Fields == nil { - t.Fatal("Expected non-nil fields") - } - - idValue := recordValue.Fields["id"] - if idValue == nil { - t.Fatal("Expected id field") - } - - if idValue.GetInt32Value() != 456 { - t.Errorf("Expected id=456, got %v", idValue.GetInt32Value()) - } - - nameValue := recordValue.Fields["name"] - if nameValue == nil { - t.Fatal("Expected name field") - } - - if nameValue.GetStringValue() != "Jane Smith" { - t.Errorf("Expected name='Jane Smith', got %v", nameValue.GetStringValue()) - } -} - -func TestMapToRecordValue(t *testing.T) { - testMap := map[string]interface{}{ - "bool_field": true, - "int32_field": int32(123), - "int64_field": int64(456), - "float_field": float32(1.23), - "double_field": float64(4.56), - "string_field": "hello", - "bytes_field": []byte("world"), - "null_field": nil, - "array_field": []interface{}{"a", "b", "c"}, - "nested_field": map[string]interface{}{ - "inner": "value", - }, - } - - recordValue := MapToRecordValue(testMap) - - // Test each field type - if !recordValue.Fields["bool_field"].GetBoolValue() { - t.Error("Expected bool_field=true") - } - - if recordValue.Fields["int32_field"].GetInt32Value() != 123 { - t.Error("Expected int32_field=123") - } - - if recordValue.Fields["int64_field"].GetInt64Value() != 456 { - t.Error("Expected int64_field=456") - } - - if recordValue.Fields["float_field"].GetFloatValue() != 1.23 { - t.Error("Expected float_field=1.23") - } - - if recordValue.Fields["double_field"].GetDoubleValue() != 4.56 { - t.Error("Expected double_field=4.56") - } - - if recordValue.Fields["string_field"].GetStringValue() != "hello" { - t.Error("Expected string_field='hello'") - } - - if string(recordValue.Fields["bytes_field"].GetBytesValue()) != "world" { - t.Error("Expected bytes_field='world'") - } - - // Test null value (converted to empty string) - if recordValue.Fields["null_field"].GetStringValue() != "" { - t.Error("Expected null_field to be empty string") - } - - // Test array - arrayValue := recordValue.Fields["array_field"].GetListValue() - if arrayValue == nil || len(arrayValue.Values) != 3 { - t.Error("Expected array with 3 elements") - } - - // Test nested record - nestedValue := recordValue.Fields["nested_field"].GetRecordValue() - if nestedValue == nil { - t.Fatal("Expected nested record") - } - - if nestedValue.Fields["inner"].GetStringValue() != "value" { - t.Error("Expected nested inner='value'") - } -} - -func TestGoValueToSchemaValue(t *testing.T) { - tests := []struct { - name string - input interface{} - expected func(*schema_pb.Value) bool - }{ - { - name: "nil value", - input: nil, - expected: func(v *schema_pb.Value) bool { - return v.GetStringValue() == "" - }, - }, - { - name: "bool value", - input: true, - expected: func(v *schema_pb.Value) bool { - return v.GetBoolValue() == true - }, - }, - { - name: "int32 value", - input: int32(123), - expected: func(v *schema_pb.Value) bool { - return v.GetInt32Value() == 123 - }, - }, - { - name: "int64 value", - input: int64(456), - expected: func(v *schema_pb.Value) bool { - return v.GetInt64Value() == 456 - }, - }, - { - name: "string value", - input: "test", - expected: func(v *schema_pb.Value) bool { - return v.GetStringValue() == "test" - }, - }, - { - name: "bytes value", - input: []byte("data"), - expected: func(v *schema_pb.Value) bool { - return string(v.GetBytesValue()) == "data" - }, - }, - { - name: "time value", - input: time.Unix(1234567890, 0), - expected: func(v *schema_pb.Value) bool { - return v.GetTimestampValue() != nil - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := goValueToSchemaValue(tt.input) - if !tt.expected(result) { - t.Errorf("goValueToSchemaValue() failed for %v", tt.input) - } - }) - } -} - -func TestInferRecordTypeFromMap(t *testing.T) { - testMap := map[string]interface{}{ - "id": int64(123), - "name": "test", - "active": true, - "score": float64(95.5), - "tags": []interface{}{"tag1", "tag2"}, - "metadata": map[string]interface{}{"key": "value"}, - } - - recordType := InferRecordTypeFromMap(testMap) - - if len(recordType.Fields) != 6 { - t.Errorf("Expected 6 fields, got %d", len(recordType.Fields)) - } - - // Create a map for easier field lookup - fieldMap := make(map[string]*schema_pb.Field) - for _, field := range recordType.Fields { - fieldMap[field.Name] = field - } - - // Test field types - if fieldMap["id"].Type.GetScalarType() != schema_pb.ScalarType_INT64 { - t.Error("Expected id field to be INT64") - } - - if fieldMap["name"].Type.GetScalarType() != schema_pb.ScalarType_STRING { - t.Error("Expected name field to be STRING") - } - - if fieldMap["active"].Type.GetScalarType() != schema_pb.ScalarType_BOOL { - t.Error("Expected active field to be BOOL") - } - - if fieldMap["score"].Type.GetScalarType() != schema_pb.ScalarType_DOUBLE { - t.Error("Expected score field to be DOUBLE") - } - - // Test array field - if fieldMap["tags"].Type.GetListType() == nil { - t.Error("Expected tags field to be LIST") - } - - // Test nested record field - if fieldMap["metadata"].Type.GetRecordType() == nil { - t.Error("Expected metadata field to be RECORD") - } -} - -func TestInferTypeFromValue(t *testing.T) { - tests := []struct { - name string - input interface{} - expected schema_pb.ScalarType - }{ - {"nil", nil, schema_pb.ScalarType_STRING}, // Default for nil - {"bool", true, schema_pb.ScalarType_BOOL}, - {"int32", int32(123), schema_pb.ScalarType_INT32}, - {"int64", int64(456), schema_pb.ScalarType_INT64}, - {"int", int(789), schema_pb.ScalarType_INT64}, - {"float32", float32(1.23), schema_pb.ScalarType_FLOAT}, - {"float64", float64(4.56), schema_pb.ScalarType_DOUBLE}, - {"string", "test", schema_pb.ScalarType_STRING}, - {"bytes", []byte("data"), schema_pb.ScalarType_BYTES}, - {"time", time.Now(), schema_pb.ScalarType_TIMESTAMP}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := inferTypeFromValue(tt.input) - - // Handle special cases - if tt.input == nil || reflect.TypeOf(tt.input).Kind() == reflect.Slice || - reflect.TypeOf(tt.input).Kind() == reflect.Map { - // Skip scalar type check for complex types - return - } - - if result.GetScalarType() != tt.expected { - t.Errorf("inferTypeFromValue() = %v, want %v", result.GetScalarType(), tt.expected) - } - }) - } -} - -// Integration test with real Avro data -func TestAvroDecoder_Integration(t *testing.T) { - // Complex Avro schema with nested records and arrays - schema := `{ - "type": "record", - "name": "Order", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "customer_id", "type": "int"}, - {"name": "total", "type": "double"}, - {"name": "items", "type": { - "type": "array", - "items": { - "type": "record", - "name": "Item", - "fields": [ - {"name": "product_id", "type": "string"}, - {"name": "quantity", "type": "int"}, - {"name": "price", "type": "double"} - ] - } - }}, - {"name": "metadata", "type": { - "type": "record", - "name": "Metadata", - "fields": [ - {"name": "source", "type": "string"}, - {"name": "timestamp", "type": "long"} - ] - }} - ] - }` - - decoder, err := NewAvroDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - // Create complex test data - codec, _ := goavro.NewCodec(schema) - testOrder := map[string]interface{}{ - "id": "order-123", - "customer_id": int32(456), - "total": float64(99.99), - "items": []interface{}{ - map[string]interface{}{ - "product_id": "prod-1", - "quantity": int32(2), - "price": float64(29.99), - }, - map[string]interface{}{ - "product_id": "prod-2", - "quantity": int32(1), - "price": float64(39.99), - }, - }, - "metadata": map[string]interface{}{ - "source": "web", - "timestamp": int64(1234567890), - }, - } - - // Encode to binary - binary, err := codec.BinaryFromNative(nil, testOrder) - if err != nil { - t.Fatalf("Failed to encode test data: %v", err) - } - - // Decode to RecordValue - recordValue, err := decoder.DecodeToRecordValue(binary) - if err != nil { - t.Fatalf("Failed to decode to RecordValue: %v", err) - } - - // Verify complex structure - if recordValue.Fields["id"].GetStringValue() != "order-123" { - t.Error("Expected order ID to be preserved") - } - - if recordValue.Fields["customer_id"].GetInt32Value() != 456 { - t.Error("Expected customer ID to be preserved") - } - - // Check array handling - itemsArray := recordValue.Fields["items"].GetListValue() - if itemsArray == nil || len(itemsArray.Values) != 2 { - t.Fatal("Expected items array with 2 elements") - } - - // Check nested record handling - metadataRecord := recordValue.Fields["metadata"].GetRecordValue() - if metadataRecord == nil { - t.Fatal("Expected metadata record") - } - - if metadataRecord.Fields["source"].GetStringValue() != "web" { - t.Error("Expected metadata source to be preserved") - } -} - -// Benchmark tests -func BenchmarkAvroDecoder_Decode(b *testing.B) { - schema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - decoder, _ := NewAvroDecoder(schema) - codec, _ := goavro.NewCodec(schema) - - testRecord := map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - } - - binary, _ := codec.BinaryFromNative(nil, testRecord) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = decoder.Decode(binary) - } -} - -func BenchmarkMapToRecordValue(b *testing.B) { - testMap := map[string]interface{}{ - "id": int64(123), - "name": "test", - "active": true, - "score": float64(95.5), - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = MapToRecordValue(testMap) - } -} diff --git a/weed/mq/kafka/schema/broker_client.go b/weed/mq/kafka/schema/broker_client.go deleted file mode 100644 index 2bb632ccc..000000000 --- a/weed/mq/kafka/schema/broker_client.go +++ /dev/null @@ -1,384 +0,0 @@ -package schema - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/client/pub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/client/sub_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// BrokerClient wraps pub_client.TopicPublisher to handle schematized messages -type BrokerClient struct { - brokers []string - schemaManager *Manager - - // Publisher cache: topic -> publisher - publishersLock sync.RWMutex - publishers map[string]*pub_client.TopicPublisher - - // Subscriber cache: topic -> subscriber - subscribersLock sync.RWMutex - subscribers map[string]*sub_client.TopicSubscriber -} - -// BrokerClientConfig holds configuration for the broker client -type BrokerClientConfig struct { - Brokers []string - SchemaManager *Manager -} - -// NewBrokerClient creates a new broker client for publishing schematized messages -func NewBrokerClient(config BrokerClientConfig) *BrokerClient { - return &BrokerClient{ - brokers: config.Brokers, - schemaManager: config.SchemaManager, - publishers: make(map[string]*pub_client.TopicPublisher), - subscribers: make(map[string]*sub_client.TopicSubscriber), - } -} - -// PublishSchematizedMessage publishes a Confluent-framed message after decoding it -func (bc *BrokerClient) PublishSchematizedMessage(topicName string, key []byte, messageBytes []byte) error { - // Step 1: Decode the schematized message - decoded, err := bc.schemaManager.DecodeMessage(messageBytes) - if err != nil { - return fmt.Errorf("failed to decode schematized message: %w", err) - } - - // Step 2: Get or create publisher for this topic - publisher, err := bc.getOrCreatePublisher(topicName, decoded.RecordType) - if err != nil { - return fmt.Errorf("failed to get publisher for topic %s: %w", topicName, err) - } - - // Step 3: Publish the decoded RecordValue to mq.broker - return publisher.PublishRecord(key, decoded.RecordValue) -} - -// PublishRawMessage publishes a raw message (non-schematized) to mq.broker -func (bc *BrokerClient) PublishRawMessage(topicName string, key []byte, value []byte) error { - // For raw messages, create a simple publisher without RecordType - publisher, err := bc.getOrCreatePublisher(topicName, nil) - if err != nil { - return fmt.Errorf("failed to get publisher for topic %s: %w", topicName, err) - } - - return publisher.Publish(key, value) -} - -// getOrCreatePublisher gets or creates a TopicPublisher for the given topic -func (bc *BrokerClient) getOrCreatePublisher(topicName string, recordType *schema_pb.RecordType) (*pub_client.TopicPublisher, error) { - // Create cache key that includes record type info - cacheKey := topicName - if recordType != nil { - cacheKey = fmt.Sprintf("%s:schematized", topicName) - } - - // Try to get existing publisher - bc.publishersLock.RLock() - if publisher, exists := bc.publishers[cacheKey]; exists { - bc.publishersLock.RUnlock() - return publisher, nil - } - bc.publishersLock.RUnlock() - - // Create new publisher - bc.publishersLock.Lock() - defer bc.publishersLock.Unlock() - - // Double-check after acquiring write lock - if publisher, exists := bc.publishers[cacheKey]; exists { - return publisher, nil - } - - // Create publisher configuration - config := &pub_client.PublisherConfiguration{ - Topic: topic.NewTopic("kafka", topicName), // Use "kafka" namespace - PartitionCount: 1, // Start with single partition - Brokers: bc.brokers, - PublisherName: "kafka-gateway-schema", - RecordType: recordType, // Set RecordType for schematized messages - } - - // Create the publisher - publisher, err := pub_client.NewTopicPublisher(config) - if err != nil { - return nil, fmt.Errorf("failed to create topic publisher: %w", err) - } - - // Cache the publisher - bc.publishers[cacheKey] = publisher - - return publisher, nil -} - -// FetchSchematizedMessages fetches RecordValue messages from mq.broker and reconstructs Confluent envelopes -func (bc *BrokerClient) FetchSchematizedMessages(topicName string, maxMessages int) ([][]byte, error) { - // Get or create subscriber for this topic - subscriber, err := bc.getOrCreateSubscriber(topicName) - if err != nil { - return nil, fmt.Errorf("failed to get subscriber for topic %s: %w", topicName, err) - } - - // Fetch RecordValue messages - messages := make([][]byte, 0, maxMessages) - for len(messages) < maxMessages { - // Try to receive a message (non-blocking for now) - recordValue, err := bc.receiveRecordValue(subscriber) - if err != nil { - break // No more messages available - } - - // Reconstruct Confluent envelope from RecordValue - envelope, err := bc.reconstructConfluentEnvelope(recordValue) - if err != nil { - continue - } - - messages = append(messages, envelope) - } - - return messages, nil -} - -// getOrCreateSubscriber gets or creates a TopicSubscriber for the given topic -func (bc *BrokerClient) getOrCreateSubscriber(topicName string) (*sub_client.TopicSubscriber, error) { - // Try to get existing subscriber - bc.subscribersLock.RLock() - if subscriber, exists := bc.subscribers[topicName]; exists { - bc.subscribersLock.RUnlock() - return subscriber, nil - } - bc.subscribersLock.RUnlock() - - // Create new subscriber - bc.subscribersLock.Lock() - defer bc.subscribersLock.Unlock() - - // Double-check after acquiring write lock - if subscriber, exists := bc.subscribers[topicName]; exists { - return subscriber, nil - } - - // Create subscriber configuration - subscriberConfig := &sub_client.SubscriberConfiguration{ - ClientId: "kafka-gateway-schema", - ConsumerGroup: "kafka-gateway", - ConsumerGroupInstanceId: fmt.Sprintf("kafka-gateway-%s", topicName), - MaxPartitionCount: 1, - SlidingWindowSize: 10, - } - - // Create content configuration - contentConfig := &sub_client.ContentConfiguration{ - Topic: topic.NewTopic("kafka", topicName), - Filter: "", - OffsetType: schema_pb.OffsetType_RESET_TO_EARLIEST, - } - - // Create partition offset channel - partitionOffsetChan := make(chan sub_client.KeyedTimestamp, 100) - - // Create the subscriber - _ = sub_client.NewTopicSubscriber( - context.Background(), - bc.brokers, - subscriberConfig, - contentConfig, - partitionOffsetChan, - ) - - // Try to initialize the subscriber connection - // If it fails (e.g., with mock brokers), don't cache it - // Use a context with timeout to avoid hanging on connection attempts - subCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Test the connection by attempting to subscribe - // This will fail with mock brokers that don't exist - testSubscriber := sub_client.NewTopicSubscriber( - subCtx, - bc.brokers, - subscriberConfig, - contentConfig, - partitionOffsetChan, - ) - - // Try to start the subscription - this should fail for mock brokers - go func() { - defer cancel() - err := testSubscriber.Subscribe() - if err != nil { - // Expected to fail with mock brokers - return - } - }() - - // Give it a brief moment to try connecting - select { - case <-time.After(100 * time.Millisecond): - // Connection attempt timed out (expected with mock brokers) - return nil, fmt.Errorf("failed to connect to brokers: connection timeout") - case <-subCtx.Done(): - // Connection attempt failed (expected with mock brokers) - return nil, fmt.Errorf("failed to connect to brokers: %w", subCtx.Err()) - } -} - -// receiveRecordValue receives a single RecordValue from the subscriber -func (bc *BrokerClient) receiveRecordValue(subscriber *sub_client.TopicSubscriber) (*schema_pb.RecordValue, error) { - // This is a simplified implementation - in a real system, this would - // integrate with the subscriber's message receiving mechanism - // For now, return an error to indicate no messages available - return nil, fmt.Errorf("no messages available") -} - -// reconstructConfluentEnvelope reconstructs a Confluent envelope from a RecordValue -func (bc *BrokerClient) reconstructConfluentEnvelope(recordValue *schema_pb.RecordValue) ([]byte, error) { - // Extract schema information from the RecordValue metadata - // This is a simplified implementation - in practice, we'd need to store - // schema metadata alongside the RecordValue when publishing - - // For now, create a placeholder envelope - // In a real implementation, we would: - // 1. Extract the original schema ID from RecordValue metadata - // 2. Get the schema format from the schema registry - // 3. Encode the RecordValue back to the original format (Avro, JSON, etc.) - // 4. Create the Confluent envelope with magic byte + schema ID + encoded data - - schemaID := uint32(1) // Placeholder - would be extracted from metadata - format := FormatAvro // Placeholder - would be determined from schema registry - - // Encode RecordValue back to original format - encodedData, err := bc.schemaManager.EncodeMessage(recordValue, schemaID, format) - if err != nil { - return nil, fmt.Errorf("failed to encode RecordValue: %w", err) - } - - return encodedData, nil -} - -// Close shuts down all publishers and subscribers -func (bc *BrokerClient) Close() error { - var lastErr error - - // Close publishers - bc.publishersLock.Lock() - for key, publisher := range bc.publishers { - if err := publisher.FinishPublish(); err != nil { - lastErr = fmt.Errorf("failed to finish publisher %s: %w", key, err) - } - if err := publisher.Shutdown(); err != nil { - lastErr = fmt.Errorf("failed to shutdown publisher %s: %w", key, err) - } - delete(bc.publishers, key) - } - bc.publishersLock.Unlock() - - // Close subscribers - bc.subscribersLock.Lock() - for key, subscriber := range bc.subscribers { - // TopicSubscriber doesn't have a Shutdown method in the current implementation - // In a real implementation, we would properly close the subscriber - _ = subscriber // Avoid unused variable warning - delete(bc.subscribers, key) - } - bc.subscribersLock.Unlock() - - return lastErr -} - -// GetPublisherStats returns statistics about active publishers and subscribers -func (bc *BrokerClient) GetPublisherStats() map[string]interface{} { - bc.publishersLock.RLock() - bc.subscribersLock.RLock() - defer bc.publishersLock.RUnlock() - defer bc.subscribersLock.RUnlock() - - stats := make(map[string]interface{}) - stats["active_publishers"] = len(bc.publishers) - stats["active_subscribers"] = len(bc.subscribers) - stats["brokers"] = bc.brokers - - publisherTopics := make([]string, 0, len(bc.publishers)) - for key := range bc.publishers { - publisherTopics = append(publisherTopics, key) - } - stats["publisher_topics"] = publisherTopics - - subscriberTopics := make([]string, 0, len(bc.subscribers)) - for key := range bc.subscribers { - subscriberTopics = append(subscriberTopics, key) - } - stats["subscriber_topics"] = subscriberTopics - - // Add "topics" key for backward compatibility with tests - allTopics := make([]string, 0) - topicSet := make(map[string]bool) - for _, topic := range publisherTopics { - if !topicSet[topic] { - allTopics = append(allTopics, topic) - topicSet[topic] = true - } - } - for _, topic := range subscriberTopics { - if !topicSet[topic] { - allTopics = append(allTopics, topic) - topicSet[topic] = true - } - } - stats["topics"] = allTopics - - return stats -} - -// IsSchematized checks if a message is Confluent-framed -func (bc *BrokerClient) IsSchematized(messageBytes []byte) bool { - return bc.schemaManager.IsSchematized(messageBytes) -} - -// ValidateMessage validates a schematized message without publishing -func (bc *BrokerClient) ValidateMessage(messageBytes []byte) (*DecodedMessage, error) { - return bc.schemaManager.DecodeMessage(messageBytes) -} - -// CreateRecordType creates a RecordType for a topic based on schema information -func (bc *BrokerClient) CreateRecordType(schemaID uint32, format Format) (*schema_pb.RecordType, error) { - // Get schema from registry - cachedSchema, err := bc.schemaManager.registryClient.GetSchemaByID(schemaID) - if err != nil { - return nil, fmt.Errorf("failed to get schema %d: %w", schemaID, err) - } - - // Create appropriate decoder and infer RecordType - switch format { - case FormatAvro: - decoder, err := bc.schemaManager.getAvroDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to create Avro decoder: %w", err) - } - return decoder.InferRecordType() - - case FormatJSONSchema: - decoder, err := bc.schemaManager.getJSONSchemaDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to create JSON Schema decoder: %w", err) - } - return decoder.InferRecordType() - - case FormatProtobuf: - decoder, err := bc.schemaManager.getProtobufDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to create Protobuf decoder: %w", err) - } - return decoder.InferRecordType() - - default: - return nil, fmt.Errorf("unsupported schema format: %v", format) - } -} diff --git a/weed/mq/kafka/schema/broker_client_fetch_test.go b/weed/mq/kafka/schema/broker_client_fetch_test.go deleted file mode 100644 index 19a1dbb85..000000000 --- a/weed/mq/kafka/schema/broker_client_fetch_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBrokerClient_FetchIntegration tests the fetch functionality -func TestBrokerClient_FetchIntegration(t *testing.T) { - // Create mock schema registry - registry := createFetchTestRegistry(t) - defer registry.Close() - - // Create schema manager - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - // Create broker client - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, // Mock broker address - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Fetch Schema Integration", func(t *testing.T) { - schemaID := int32(1) - schemaJSON := `{ - "type": "record", - "name": "FetchTest", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "data", "type": "string"} - ] - }` - - // Register schema - registerFetchTestSchema(t, registry, schemaID, schemaJSON) - - // Test FetchSchematizedMessages (will fail to connect to mock broker) - messages, err := brokerClient.FetchSchematizedMessages("fetch-test-topic", 5) - assert.Error(t, err) // Expect error with mock broker that doesn't exist - assert.Contains(t, err.Error(), "failed to get subscriber") - assert.Nil(t, messages) - - t.Logf("Fetch integration test completed - connection failed as expected with mock broker: %v", err) - }) - - t.Run("Envelope Reconstruction", func(t *testing.T) { - schemaID := int32(2) - schemaJSON := `{ - "type": "record", - "name": "ReconstructTest", - "fields": [ - {"name": "message", "type": "string"}, - {"name": "count", "type": "int"} - ] - }` - - registerFetchTestSchema(t, registry, schemaID, schemaJSON) - - // Create a test RecordValue with all required fields - recordValue := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "message": { - Kind: &schema_pb.Value_StringValue{StringValue: "test message"}, - }, - "count": { - Kind: &schema_pb.Value_Int64Value{Int64Value: 42}, - }, - }, - } - - // Test envelope reconstruction (may fail due to schema mismatch, which is expected) - envelope, err := brokerClient.reconstructConfluentEnvelope(recordValue) - if err != nil { - t.Logf("Expected error in envelope reconstruction due to schema mismatch: %v", err) - assert.Contains(t, err.Error(), "failed to encode RecordValue") - } else { - assert.True(t, len(envelope) > 5) // Should have magic byte + schema ID + data - - // Verify envelope structure - assert.Equal(t, byte(0x00), envelope[0]) // Magic byte - reconstructedSchemaID := binary.BigEndian.Uint32(envelope[1:5]) - assert.True(t, reconstructedSchemaID > 0) // Should have a schema ID - - t.Logf("Successfully reconstructed envelope with %d bytes", len(envelope)) - } - }) - - t.Run("Subscriber Management", func(t *testing.T) { - // Test subscriber creation (may succeed with current implementation) - _, err := brokerClient.getOrCreateSubscriber("subscriber-test-topic") - if err != nil { - t.Logf("Subscriber creation failed as expected with mock brokers: %v", err) - } else { - t.Logf("Subscriber creation succeeded - testing subscriber caching logic") - } - - // Verify stats include subscriber information - stats := brokerClient.GetPublisherStats() - assert.Contains(t, stats, "active_subscribers") - assert.Contains(t, stats, "subscriber_topics") - - // Check that subscriber was created (may be > 0 if creation succeeded) - subscriberCount := stats["active_subscribers"].(int) - t.Logf("Active subscribers: %d", subscriberCount) - }) -} - -// TestBrokerClient_RoundTripIntegration tests the complete publish/fetch cycle -func TestBrokerClient_RoundTripIntegration(t *testing.T) { - registry := createFetchTestRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Complete Schema Workflow", func(t *testing.T) { - schemaID := int32(10) - schemaJSON := `{ - "type": "record", - "name": "RoundTripTest", - "fields": [ - {"name": "user_id", "type": "string"}, - {"name": "action", "type": "string"}, - {"name": "timestamp", "type": "long"} - ] - }` - - registerFetchTestSchema(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{ - "user_id": "user-123", - "action": "login", - "timestamp": int64(1640995200000), - } - - // Encode with Avro - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createFetchTestEnvelope(schemaID, avroBinary) - - // Test validation (this works with mock) - decoded, err := brokerClient.ValidateMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - assert.Equal(t, FormatAvro, decoded.SchemaFormat) - - // Verify decoded fields - userIDField := decoded.RecordValue.Fields["user_id"] - actionField := decoded.RecordValue.Fields["action"] - assert.Equal(t, "user-123", userIDField.GetStringValue()) - assert.Equal(t, "login", actionField.GetStringValue()) - - // Test publishing (will succeed with validation but not actually publish to mock broker) - // This demonstrates the complete schema processing pipeline - t.Logf("Round-trip test completed - schema validation and processing successful") - }) - - t.Run("Error Handling in Fetch", func(t *testing.T) { - // Test fetch with non-existent topic - with mock brokers this may not error - messages, err := brokerClient.FetchSchematizedMessages("non-existent-topic", 1) - if err != nil { - assert.Error(t, err) - } - assert.Equal(t, 0, len(messages)) - - // Test reconstruction with invalid RecordValue - invalidRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{}, // Empty fields - } - - _, err = brokerClient.reconstructConfluentEnvelope(invalidRecord) - // With mock setup, this might not error - just verify it doesn't panic - t.Logf("Reconstruction result: %v", err) - }) -} - -// TestBrokerClient_SubscriberConfiguration tests subscriber setup -func TestBrokerClient_SubscriberConfiguration(t *testing.T) { - registry := createFetchTestRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Subscriber Cache Management", func(t *testing.T) { - // Initially no subscribers - stats := brokerClient.GetPublisherStats() - assert.Equal(t, 0, stats["active_subscribers"]) - - // Attempt to create subscriber (will fail with mock, but tests caching logic) - _, err1 := brokerClient.getOrCreateSubscriber("cache-test-topic") - _, err2 := brokerClient.getOrCreateSubscriber("cache-test-topic") - - // With mock brokers, behavior may vary - just verify no panic - t.Logf("Subscriber creation results: err1=%v, err2=%v", err1, err2) - // Don't assert errors as mock behavior may vary - - // Verify broker client is still functional after failed subscriber creation - if brokerClient != nil { - t.Log("Broker client remains functional after subscriber creation attempts") - } - }) - - t.Run("Multiple Topic Subscribers", func(t *testing.T) { - topics := []string{"topic-a", "topic-b", "topic-c"} - - for _, topic := range topics { - _, err := brokerClient.getOrCreateSubscriber(topic) - t.Logf("Subscriber creation for %s: %v", topic, err) - // Don't assert error as mock behavior may vary - } - - // Verify no subscribers were actually created due to mock broker failures - stats := brokerClient.GetPublisherStats() - assert.Equal(t, 0, stats["active_subscribers"]) - }) -} - -// Helper functions for fetch tests - -func createFetchTestRegistry(t *testing.T) *httptest.Server { - schemas := make(map[int32]string) - - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/subjects": - w.WriteHeader(http.StatusOK) - w.Write([]byte("[]")) - default: - // Handle schema requests - var schemaID int32 - if n, err := fmt.Sscanf(r.URL.Path, "/schemas/ids/%d", &schemaID); n == 1 && err == nil { - if schema, exists := schemas[schemaID]; exists { - response := fmt.Sprintf(`{"schema": %q}`, schema) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write([]byte(response)) - } else { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error_code": 40403, "message": "Schema not found"}`)) - } - } else if r.Method == "POST" && r.URL.Path == "/register-schema" { - var req struct { - SchemaID int32 `json:"schema_id"` - Schema string `json:"schema"` - } - if err := json.NewDecoder(r.Body).Decode(&req); err == nil { - schemas[req.SchemaID] = req.Schema - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"success": true}`)) - } else { - w.WriteHeader(http.StatusBadRequest) - } - } else { - w.WriteHeader(http.StatusNotFound) - } - } - })) -} - -func registerFetchTestSchema(t *testing.T, registry *httptest.Server, schemaID int32, schema string) { - reqBody := fmt.Sprintf(`{"schema_id": %d, "schema": %q}`, schemaID, schema) - resp, err := http.Post(registry.URL+"/register-schema", "application/json", bytes.NewReader([]byte(reqBody))) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode) -} - -func createFetchTestEnvelope(schemaID int32, data []byte) []byte { - envelope := make([]byte, 5+len(data)) - envelope[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(envelope[1:5], uint32(schemaID)) - copy(envelope[5:], data) - return envelope -} diff --git a/weed/mq/kafka/schema/broker_client_test.go b/weed/mq/kafka/schema/broker_client_test.go deleted file mode 100644 index 586e8873d..000000000 --- a/weed/mq/kafka/schema/broker_client_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBrokerClient_SchematizedMessage tests publishing schematized messages -func TestBrokerClient_SchematizedMessage(t *testing.T) { - // Create mock schema registry - registry := createBrokerTestRegistry(t) - defer registry.Close() - - // Create schema manager - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - // Create broker client (with mock brokers) - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, // Mock broker address - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Avro Schematized Message", func(t *testing.T) { - schemaID := int32(1) - schemaJSON := `{ - "type": "record", - "name": "TestMessage", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "value", "type": "int"} - ] - }` - - // Register schema - registerBrokerTestSchema(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{ - "id": "test-123", - "value": int32(42), - } - - // Encode with Avro - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createBrokerTestEnvelope(schemaID, avroBinary) - - // Test validation without publishing - decoded, err := brokerClient.ValidateMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - assert.Equal(t, FormatAvro, decoded.SchemaFormat) - - // Verify decoded fields - idField := decoded.RecordValue.Fields["id"] - valueField := decoded.RecordValue.Fields["value"] - assert.Equal(t, "test-123", idField.GetStringValue()) - // Note: Integer decoding has known issues in current Avro implementation - if valueField.GetInt64Value() != 42 { - t.Logf("Known issue: Integer value decoded as %d instead of 42", valueField.GetInt64Value()) - } - - // Test schematized detection - assert.True(t, brokerClient.IsSchematized(envelope)) - assert.False(t, brokerClient.IsSchematized([]byte("raw message"))) - - // Note: Actual publishing would require a real mq.broker - // For unit tests, we focus on the schema processing logic - t.Logf("Successfully validated schematized message with schema ID %d", schemaID) - }) - - t.Run("RecordType Creation", func(t *testing.T) { - schemaID := int32(2) - schemaJSON := `{ - "type": "record", - "name": "RecordTypeTest", - "fields": [ - {"name": "name", "type": "string"}, - {"name": "age", "type": "int"}, - {"name": "active", "type": "boolean"} - ] - }` - - registerBrokerTestSchema(t, registry, schemaID, schemaJSON) - - // Test RecordType creation - recordType, err := brokerClient.CreateRecordType(uint32(schemaID), FormatAvro) - require.NoError(t, err) - assert.NotNil(t, recordType) - - // Note: RecordType inference has known limitations in current implementation - if len(recordType.Fields) != 3 { - t.Logf("Known issue: RecordType has %d fields instead of expected 3", len(recordType.Fields)) - // For now, just verify we got at least some fields - assert.Greater(t, len(recordType.Fields), 0, "Should have at least one field") - } else { - // Verify field types if inference worked correctly - fieldMap := make(map[string]*schema_pb.Field) - for _, field := range recordType.Fields { - fieldMap[field.Name] = field - } - - if nameField := fieldMap["name"]; nameField != nil { - assert.Equal(t, schema_pb.ScalarType_STRING, nameField.Type.GetScalarType()) - } - - if ageField := fieldMap["age"]; ageField != nil { - assert.Equal(t, schema_pb.ScalarType_INT32, ageField.Type.GetScalarType()) - } - - if activeField := fieldMap["active"]; activeField != nil { - assert.Equal(t, schema_pb.ScalarType_BOOL, activeField.Type.GetScalarType()) - } - } - }) - - t.Run("Publisher Stats", func(t *testing.T) { - stats := brokerClient.GetPublisherStats() - assert.Contains(t, stats, "active_publishers") - assert.Contains(t, stats, "brokers") - assert.Contains(t, stats, "topics") - - brokers := stats["brokers"].([]string) - assert.Equal(t, []string{"localhost:17777"}, brokers) - }) -} - -// TestBrokerClient_ErrorHandling tests error conditions -func TestBrokerClient_ErrorHandling(t *testing.T) { - registry := createBrokerTestRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Invalid Schematized Message", func(t *testing.T) { - // Create invalid envelope - invalidEnvelope := []byte{0x00, 0x00, 0x00, 0x00, 0x99, 0xFF, 0xFF} - - _, err := brokerClient.ValidateMessage(invalidEnvelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "schema") - }) - - t.Run("Non-Schematized Message", func(t *testing.T) { - rawMessage := []byte("This is not schematized") - - _, err := brokerClient.ValidateMessage(rawMessage) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not schematized") - }) - - t.Run("Unknown Schema ID", func(t *testing.T) { - // Create envelope with non-existent schema ID - envelope := createBrokerTestEnvelope(999, []byte("test")) - - _, err := brokerClient.ValidateMessage(envelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to get schema") - }) - - t.Run("Invalid RecordType Creation", func(t *testing.T) { - _, err := brokerClient.CreateRecordType(999, FormatAvro) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to get schema") - }) -} - -// TestBrokerClient_Integration tests integration scenarios (without real broker) -func TestBrokerClient_Integration(t *testing.T) { - registry := createBrokerTestRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - brokerClient := NewBrokerClient(BrokerClientConfig{ - Brokers: []string{"localhost:17777"}, - SchemaManager: manager, - }) - defer brokerClient.Close() - - t.Run("Multiple Schema Formats", func(t *testing.T) { - // Test Avro schema - avroSchemaID := int32(10) - avroSchema := `{ - "type": "record", - "name": "AvroMessage", - "fields": [{"name": "content", "type": "string"}] - }` - registerBrokerTestSchema(t, registry, avroSchemaID, avroSchema) - - // Create Avro message - codec, err := goavro.NewCodec(avroSchema) - require.NoError(t, err) - avroData := map[string]interface{}{"content": "avro message"} - avroBinary, err := codec.BinaryFromNative(nil, avroData) - require.NoError(t, err) - avroEnvelope := createBrokerTestEnvelope(avroSchemaID, avroBinary) - - // Validate Avro message - avroDecoded, err := brokerClient.ValidateMessage(avroEnvelope) - require.NoError(t, err) - assert.Equal(t, FormatAvro, avroDecoded.SchemaFormat) - - // Test JSON Schema (now correctly detected as JSON Schema format) - jsonSchemaID := int32(11) - jsonSchema := `{ - "type": "object", - "properties": {"message": {"type": "string"}} - }` - registerBrokerTestSchema(t, registry, jsonSchemaID, jsonSchema) - - jsonData := map[string]interface{}{"message": "json message"} - jsonBytes, err := json.Marshal(jsonData) - require.NoError(t, err) - jsonEnvelope := createBrokerTestEnvelope(jsonSchemaID, jsonBytes) - - // This should now work correctly with improved format detection - jsonDecoded, err := brokerClient.ValidateMessage(jsonEnvelope) - require.NoError(t, err) - assert.Equal(t, FormatJSONSchema, jsonDecoded.SchemaFormat) - t.Logf("Successfully validated JSON Schema message with schema ID %d", jsonSchemaID) - }) - - t.Run("Cache Behavior", func(t *testing.T) { - schemaID := int32(20) - schemaJSON := `{ - "type": "record", - "name": "CacheTest", - "fields": [{"name": "data", "type": "string"}] - }` - registerBrokerTestSchema(t, registry, schemaID, schemaJSON) - - // Create test message - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - testData := map[string]interface{}{"data": "cached"} - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - envelope := createBrokerTestEnvelope(schemaID, avroBinary) - - // First validation - populates cache - decoded1, err := brokerClient.ValidateMessage(envelope) - require.NoError(t, err) - - // Second validation - uses cache - decoded2, err := brokerClient.ValidateMessage(envelope) - require.NoError(t, err) - - // Verify consistent results - assert.Equal(t, decoded1.SchemaID, decoded2.SchemaID) - assert.Equal(t, decoded1.SchemaFormat, decoded2.SchemaFormat) - - // Check cache stats - decoders, schemas, _ := manager.GetCacheStats() - assert.True(t, decoders > 0) - assert.True(t, schemas > 0) - }) -} - -// Helper functions for broker client tests - -func createBrokerTestRegistry(t *testing.T) *httptest.Server { - schemas := make(map[int32]string) - - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/subjects": - w.WriteHeader(http.StatusOK) - w.Write([]byte("[]")) - default: - // Handle schema requests - var schemaID int32 - if n, err := fmt.Sscanf(r.URL.Path, "/schemas/ids/%d", &schemaID); n == 1 && err == nil { - if schema, exists := schemas[schemaID]; exists { - response := fmt.Sprintf(`{"schema": %q}`, schema) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write([]byte(response)) - } else { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error_code": 40403, "message": "Schema not found"}`)) - } - } else if r.Method == "POST" && r.URL.Path == "/register-schema" { - var req struct { - SchemaID int32 `json:"schema_id"` - Schema string `json:"schema"` - } - if err := json.NewDecoder(r.Body).Decode(&req); err == nil { - schemas[req.SchemaID] = req.Schema - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"success": true}`)) - } else { - w.WriteHeader(http.StatusBadRequest) - } - } else { - w.WriteHeader(http.StatusNotFound) - } - } - })) -} - -func registerBrokerTestSchema(t *testing.T, registry *httptest.Server, schemaID int32, schema string) { - reqBody := fmt.Sprintf(`{"schema_id": %d, "schema": %q}`, schemaID, schema) - resp, err := http.Post(registry.URL+"/register-schema", "application/json", bytes.NewReader([]byte(reqBody))) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode) -} - -func createBrokerTestEnvelope(schemaID int32, data []byte) []byte { - envelope := make([]byte, 5+len(data)) - envelope[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(envelope[1:5], uint32(schemaID)) - copy(envelope[5:], data) - return envelope -} diff --git a/weed/mq/kafka/schema/decode_encode_basic_test.go b/weed/mq/kafka/schema/decode_encode_basic_test.go deleted file mode 100644 index af6091e3f..000000000 --- a/weed/mq/kafka/schema/decode_encode_basic_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBasicSchemaDecodeEncode tests the core decode/encode functionality with working schemas -func TestBasicSchemaDecodeEncode(t *testing.T) { - // Create mock schema registry - registry := createBasicMockRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - t.Run("Simple Avro String Record", func(t *testing.T) { - schemaID := int32(1) - schemaJSON := `{ - "type": "record", - "name": "SimpleMessage", - "fields": [ - {"name": "message", "type": "string"} - ] - }` - - // Register schema - registerBasicSchema(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{ - "message": "Hello World", - } - - // Encode with Avro - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createBasicEnvelope(schemaID, avroBinary) - - // Test decode - decoded, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - assert.Equal(t, FormatAvro, decoded.SchemaFormat) - assert.NotNil(t, decoded.RecordValue) - - // Verify the message field - messageField, exists := decoded.RecordValue.Fields["message"] - require.True(t, exists) - assert.Equal(t, "Hello World", messageField.GetStringValue()) - - // Test encode back - reconstructed, err := manager.EncodeMessage(decoded.RecordValue, decoded.SchemaID, decoded.SchemaFormat) - require.NoError(t, err) - - // Verify envelope structure - assert.Equal(t, envelope[:5], reconstructed[:5]) // Magic byte + schema ID - assert.True(t, len(reconstructed) > 5) - }) - - t.Run("JSON Schema with String Field", func(t *testing.T) { - schemaID := int32(10) - schemaJSON := `{ - "type": "object", - "properties": { - "name": {"type": "string"} - }, - "required": ["name"] - }` - - // Register schema - registerBasicSchema(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{ - "name": "Test User", - } - - // Encode as JSON - jsonBytes, err := json.Marshal(testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createBasicEnvelope(schemaID, jsonBytes) - - // For now, this will be detected as Avro due to format detection logic - // We'll test that it at least doesn't crash and provides a meaningful error - decoded, err := manager.DecodeMessage(envelope) - - // The current implementation may detect this as Avro and fail - // That's expected behavior for now - we're testing the error handling - if err != nil { - t.Logf("Expected error for JSON Schema detected as Avro: %v", err) - assert.Contains(t, err.Error(), "Avro") - } else { - // If it succeeds (future improvement), verify basic structure - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - assert.NotNil(t, decoded.RecordValue) - } - }) - - t.Run("Cache Performance", func(t *testing.T) { - schemaID := int32(20) - schemaJSON := `{ - "type": "record", - "name": "CacheTest", - "fields": [ - {"name": "value", "type": "string"} - ] - }` - - registerBasicSchema(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{"value": "cached"} - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - envelope := createBasicEnvelope(schemaID, avroBinary) - - // First decode - populates cache - decoded1, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - - // Second decode - uses cache - decoded2, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - - // Verify results are consistent - assert.Equal(t, decoded1.SchemaID, decoded2.SchemaID) - assert.Equal(t, decoded1.SchemaFormat, decoded2.SchemaFormat) - - // Verify field values match - field1 := decoded1.RecordValue.Fields["value"] - field2 := decoded2.RecordValue.Fields["value"] - assert.Equal(t, field1.GetStringValue(), field2.GetStringValue()) - - // Check that cache is populated - decoders, schemas, _ := manager.GetCacheStats() - assert.True(t, decoders > 0, "Should have cached decoders") - assert.True(t, schemas > 0, "Should have cached schemas") - }) -} - -// TestSchemaValidation tests schema validation functionality -func TestSchemaValidation(t *testing.T) { - registry := createBasicMockRegistry(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - t.Run("Valid Schema Message", func(t *testing.T) { - schemaID := int32(100) - schemaJSON := `{ - "type": "record", - "name": "ValidMessage", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "timestamp", "type": "long"} - ] - }` - - registerBasicSchema(t, registry, schemaID, schemaJSON) - - // Create valid test data - testData := map[string]interface{}{ - "id": "msg-123", - "timestamp": int64(1640995200000), - } - - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - envelope := createBasicEnvelope(schemaID, avroBinary) - - // Should decode successfully - decoded, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - - // Verify fields - idField := decoded.RecordValue.Fields["id"] - timestampField := decoded.RecordValue.Fields["timestamp"] - assert.Equal(t, "msg-123", idField.GetStringValue()) - assert.Equal(t, int64(1640995200000), timestampField.GetInt64Value()) - }) - - t.Run("Non-Schematized Message", func(t *testing.T) { - // Raw message without Confluent envelope - rawMessage := []byte("This is not a schematized message") - - _, err := manager.DecodeMessage(rawMessage) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not schematized") - }) - - t.Run("Invalid Envelope", func(t *testing.T) { - // Too short envelope - shortEnvelope := []byte{0x00, 0x00} - _, err := manager.DecodeMessage(shortEnvelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not schematized") - }) -} - -// Helper functions for basic tests - -func createBasicMockRegistry(t *testing.T) *httptest.Server { - schemas := make(map[int32]string) - - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/subjects": - w.WriteHeader(http.StatusOK) - w.Write([]byte("[]")) - default: - // Handle schema requests like /schemas/ids/1 - var schemaID int32 - if n, err := fmt.Sscanf(r.URL.Path, "/schemas/ids/%d", &schemaID); n == 1 && err == nil { - if schema, exists := schemas[schemaID]; exists { - response := fmt.Sprintf(`{"schema": %q}`, schema) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write([]byte(response)) - } else { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error_code": 40403, "message": "Schema not found"}`)) - } - } else if r.Method == "POST" && r.URL.Path == "/register-schema" { - // Custom endpoint for test registration - var req struct { - SchemaID int32 `json:"schema_id"` - Schema string `json:"schema"` - } - if err := json.NewDecoder(r.Body).Decode(&req); err == nil { - schemas[req.SchemaID] = req.Schema - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"success": true}`)) - } else { - w.WriteHeader(http.StatusBadRequest) - } - } else { - w.WriteHeader(http.StatusNotFound) - } - } - })) -} - -func registerBasicSchema(t *testing.T, registry *httptest.Server, schemaID int32, schema string) { - reqBody := fmt.Sprintf(`{"schema_id": %d, "schema": %q}`, schemaID, schema) - resp, err := http.Post(registry.URL+"/register-schema", "application/json", bytes.NewReader([]byte(reqBody))) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode) -} - -func createBasicEnvelope(schemaID int32, data []byte) []byte { - envelope := make([]byte, 5+len(data)) - envelope[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(envelope[1:5], uint32(schemaID)) - copy(envelope[5:], data) - return envelope -} diff --git a/weed/mq/kafka/schema/decode_encode_test.go b/weed/mq/kafka/schema/decode_encode_test.go deleted file mode 100644 index bb6b88625..000000000 --- a/weed/mq/kafka/schema/decode_encode_test.go +++ /dev/null @@ -1,569 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSchemaDecodeEncode_Avro tests comprehensive Avro decode/encode workflow -func TestSchemaDecodeEncode_Avro(t *testing.T) { - // Create mock schema registry - registry := createMockSchemaRegistryForDecodeTest(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - // Test data - testCases := []struct { - name string - schemaID int32 - schemaJSON string - testData map[string]interface{} - }{ - { - name: "Simple User Record", - schemaID: 1, - schemaJSON: `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null} - ] - }`, - testData: map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - "email": map[string]interface{}{"string": "john@example.com"}, - }, - }, - { - name: "Complex Record with Arrays", - schemaID: 2, - schemaJSON: `{ - "type": "record", - "name": "Order", - "fields": [ - {"name": "order_id", "type": "string"}, - {"name": "items", "type": {"type": "array", "items": "string"}}, - {"name": "total", "type": "double"}, - {"name": "metadata", "type": {"type": "map", "values": "string"}} - ] - }`, - testData: map[string]interface{}{ - "order_id": "ORD-001", - "items": []interface{}{"item1", "item2", "item3"}, - "total": 99.99, - "metadata": map[string]interface{}{ - "source": "web", - "campaign": "summer2024", - }, - }, - }, - { - name: "Union Types", - schemaID: 3, - schemaJSON: `{ - "type": "record", - "name": "Event", - "fields": [ - {"name": "event_id", "type": "string"}, - {"name": "payload", "type": ["null", "string", "int"]}, - {"name": "timestamp", "type": "long"} - ] - }`, - testData: map[string]interface{}{ - "event_id": "evt-123", - "payload": map[string]interface{}{"int": int32(42)}, - "timestamp": int64(1640995200000), - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Register schema in mock registry - registerSchemaInMock(t, registry, tc.schemaID, tc.schemaJSON) - - // Create Avro codec - codec, err := goavro.NewCodec(tc.schemaJSON) - require.NoError(t, err) - - // Encode test data to Avro binary - avroBinary, err := codec.BinaryFromNative(nil, tc.testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createConfluentEnvelope(tc.schemaID, avroBinary) - - // Test decode - decoded, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(tc.schemaID), decoded.SchemaID) - assert.Equal(t, FormatAvro, decoded.SchemaFormat) - assert.NotNil(t, decoded.RecordValue) - - // Verify decoded fields match original data - verifyDecodedFields(t, tc.testData, decoded.RecordValue.Fields) - - // Test re-encoding (round-trip) - reconstructed, err := manager.EncodeMessage(decoded.RecordValue, decoded.SchemaID, decoded.SchemaFormat) - require.NoError(t, err) - - // Verify reconstructed envelope - assert.Equal(t, envelope[:5], reconstructed[:5]) // Magic byte + schema ID - - // Decode reconstructed data to verify round-trip integrity - decodedAgain, err := manager.DecodeMessage(reconstructed) - require.NoError(t, err) - assert.Equal(t, decoded.SchemaID, decodedAgain.SchemaID) - assert.Equal(t, decoded.SchemaFormat, decodedAgain.SchemaFormat) - - // // Verify fields are identical after round-trip - // verifyRecordValuesEqual(t, decoded.RecordValue, decodedAgain.RecordValue) - }) - } -} - -// TestSchemaDecodeEncode_JSONSchema tests JSON Schema decode/encode workflow -func TestSchemaDecodeEncode_JSONSchema(t *testing.T) { - registry := createMockSchemaRegistryForDecodeTest(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - testCases := []struct { - name string - schemaID int32 - schemaJSON string - testData map[string]interface{} - }{ - { - name: "Product Schema", - schemaID: 10, - schemaJSON: `{ - "type": "object", - "properties": { - "product_id": {"type": "string"}, - "name": {"type": "string"}, - "price": {"type": "number"}, - "in_stock": {"type": "boolean"}, - "tags": { - "type": "array", - "items": {"type": "string"} - } - }, - "required": ["product_id", "name", "price"] - }`, - testData: map[string]interface{}{ - "product_id": "PROD-123", - "name": "Awesome Widget", - "price": 29.99, - "in_stock": true, - "tags": []interface{}{"electronics", "gadget"}, - }, - }, - { - name: "Nested Object Schema", - schemaID: 11, - schemaJSON: `{ - "type": "object", - "properties": { - "customer": { - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "address": { - "type": "object", - "properties": { - "street": {"type": "string"}, - "city": {"type": "string"}, - "zip": {"type": "string"} - } - } - } - }, - "order_date": {"type": "string", "format": "date"} - } - }`, - testData: map[string]interface{}{ - "customer": map[string]interface{}{ - "id": float64(456), // JSON numbers are float64 - "name": "Jane Smith", - "address": map[string]interface{}{ - "street": "123 Main St", - "city": "Anytown", - "zip": "12345", - }, - }, - "order_date": "2024-01-15", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Register schema in mock registry - registerSchemaInMock(t, registry, tc.schemaID, tc.schemaJSON) - - // Encode test data to JSON - jsonBytes, err := json.Marshal(tc.testData) - require.NoError(t, err) - - // Create Confluent envelope - envelope := createConfluentEnvelope(tc.schemaID, jsonBytes) - - // Test decode - decoded, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - assert.Equal(t, uint32(tc.schemaID), decoded.SchemaID) - assert.Equal(t, FormatJSONSchema, decoded.SchemaFormat) - assert.NotNil(t, decoded.RecordValue) - - // Test encode back to Confluent envelope - reconstructed, err := manager.EncodeMessage(decoded.RecordValue, decoded.SchemaID, decoded.SchemaFormat) - require.NoError(t, err) - - // Verify reconstructed envelope has correct header - assert.Equal(t, envelope[:5], reconstructed[:5]) // Magic byte + schema ID - - // Decode reconstructed data to verify round-trip integrity - decodedAgain, err := manager.DecodeMessage(reconstructed) - require.NoError(t, err) - assert.Equal(t, decoded.SchemaID, decodedAgain.SchemaID) - assert.Equal(t, decoded.SchemaFormat, decodedAgain.SchemaFormat) - - // Verify fields are identical after round-trip - verifyRecordValuesEqual(t, decoded.RecordValue, decodedAgain.RecordValue) - }) - } -} - -// TestSchemaDecodeEncode_Protobuf tests Protobuf decode/encode workflow -func TestSchemaDecodeEncode_Protobuf(t *testing.T) { - registry := createMockSchemaRegistryForDecodeTest(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - // Test that Protobuf text schema parsing and decoding works - schemaID := int32(20) - protoSchema := `syntax = "proto3"; message TestMessage { string name = 1; int32 id = 2; }` - - // Register schema in mock registry - registerSchemaInMock(t, registry, schemaID, protoSchema) - - // Create a Protobuf message: name="test", id=123 - protobufData := []byte{0x0a, 0x04, 0x74, 0x65, 0x73, 0x74, 0x10, 0x7b} - envelope := createConfluentEnvelope(schemaID, protobufData) - - // Test decode - should work with text .proto schema parsing - decoded, err := manager.DecodeMessage(envelope) - - // Should successfully decode now that text .proto parsing is implemented - require.NoError(t, err) - assert.NotNil(t, decoded) - assert.Equal(t, uint32(schemaID), decoded.SchemaID) - assert.Equal(t, FormatProtobuf, decoded.SchemaFormat) - assert.NotNil(t, decoded.RecordValue) - - // Verify the decoded fields - assert.Contains(t, decoded.RecordValue.Fields, "name") - assert.Contains(t, decoded.RecordValue.Fields, "id") -} - -// TestSchemaDecodeEncode_ErrorHandling tests various error conditions -func TestSchemaDecodeEncode_ErrorHandling(t *testing.T) { - registry := createMockSchemaRegistryForDecodeTest(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - t.Run("Invalid Confluent Envelope", func(t *testing.T) { - // Too short envelope - _, err := manager.DecodeMessage([]byte{0x00, 0x00}) - assert.Error(t, err) - assert.Contains(t, err.Error(), "message is not schematized") - - // Wrong magic byte - wrongMagic := []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x41, 0x42} - _, err = manager.DecodeMessage(wrongMagic) - assert.Error(t, err) - assert.Contains(t, err.Error(), "message is not schematized") - }) - - t.Run("Schema Not Found", func(t *testing.T) { - // Create envelope with non-existent schema ID - envelope := createConfluentEnvelope(999, []byte("test")) - _, err := manager.DecodeMessage(envelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to get schema 999") - }) - - t.Run("Invalid Avro Data", func(t *testing.T) { - schemaID := int32(100) - schemaJSON := `{"type": "record", "name": "Test", "fields": [{"name": "id", "type": "int"}]}` - registerSchemaInMock(t, registry, schemaID, schemaJSON) - - // Create envelope with invalid Avro data that will fail decoding - invalidAvroData := []byte{0xFF, 0xFF, 0xFF, 0xFF} // Invalid Avro binary data - envelope := createConfluentEnvelope(schemaID, invalidAvroData) - _, err := manager.DecodeMessage(envelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to decode Avro") - }) - - t.Run("Invalid JSON Data", func(t *testing.T) { - schemaID := int32(101) - schemaJSON := `{"type": "object", "properties": {"name": {"type": "string"}}}` - registerSchemaInMock(t, registry, schemaID, schemaJSON) - - // Create envelope with invalid JSON data - envelope := createConfluentEnvelope(schemaID, []byte("{invalid json")) - _, err := manager.DecodeMessage(envelope) - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to decode") - }) -} - -// TestSchemaDecodeEncode_CachePerformance tests caching behavior -func TestSchemaDecodeEncode_CachePerformance(t *testing.T) { - registry := createMockSchemaRegistryForDecodeTest(t) - defer registry.Close() - - manager, err := NewManager(ManagerConfig{ - RegistryURL: registry.URL, - }) - require.NoError(t, err) - - schemaID := int32(200) - schemaJSON := `{"type": "record", "name": "CacheTest", "fields": [{"name": "value", "type": "string"}]}` - registerSchemaInMock(t, registry, schemaID, schemaJSON) - - // Create test data - testData := map[string]interface{}{"value": "test"} - codec, err := goavro.NewCodec(schemaJSON) - require.NoError(t, err) - avroBinary, err := codec.BinaryFromNative(nil, testData) - require.NoError(t, err) - envelope := createConfluentEnvelope(schemaID, avroBinary) - - // First decode - should populate cache - decoded1, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - - // Second decode - should use cache - decoded2, err := manager.DecodeMessage(envelope) - require.NoError(t, err) - - // Verify both results are identical - assert.Equal(t, decoded1.SchemaID, decoded2.SchemaID) - assert.Equal(t, decoded1.SchemaFormat, decoded2.SchemaFormat) - verifyRecordValuesEqual(t, decoded1.RecordValue, decoded2.RecordValue) - - // Check cache stats - decoders, schemas, subjects := manager.GetCacheStats() - assert.True(t, decoders > 0) - assert.True(t, schemas > 0) - assert.True(t, subjects >= 0) -} - -// Helper functions - -func createMockSchemaRegistryForDecodeTest(t *testing.T) *httptest.Server { - schemas := make(map[int32]string) - - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/subjects": - w.WriteHeader(http.StatusOK) - w.Write([]byte("[]")) - default: - // Handle schema requests like /schemas/ids/1 - var schemaID int32 - if n, err := fmt.Sscanf(r.URL.Path, "/schemas/ids/%d", &schemaID); n == 1 && err == nil { - if schema, exists := schemas[schemaID]; exists { - response := fmt.Sprintf(`{"schema": %q}`, schema) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write([]byte(response)) - } else { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error_code": 40403, "message": "Schema not found"}`)) - } - } else if r.Method == "POST" && r.URL.Path == "/register-schema" { - // Custom endpoint for test registration - var req struct { - SchemaID int32 `json:"schema_id"` - Schema string `json:"schema"` - } - if err := json.NewDecoder(r.Body).Decode(&req); err == nil { - schemas[req.SchemaID] = req.Schema - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"success": true}`)) - } else { - w.WriteHeader(http.StatusBadRequest) - } - } else { - w.WriteHeader(http.StatusNotFound) - } - } - })) -} - -func registerSchemaInMock(t *testing.T, registry *httptest.Server, schemaID int32, schema string) { - reqBody := fmt.Sprintf(`{"schema_id": %d, "schema": %q}`, schemaID, schema) - resp, err := http.Post(registry.URL+"/register-schema", "application/json", bytes.NewReader([]byte(reqBody))) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode) -} - -func createConfluentEnvelope(schemaID int32, data []byte) []byte { - envelope := make([]byte, 5+len(data)) - envelope[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(envelope[1:5], uint32(schemaID)) - copy(envelope[5:], data) - return envelope -} - -func verifyDecodedFields(t *testing.T, expected map[string]interface{}, actual map[string]*schema_pb.Value) { - for key, expectedValue := range expected { - actualValue, exists := actual[key] - require.True(t, exists, "Field %s should exist", key) - - switch v := expectedValue.(type) { - case int32: - // Check both Int32Value and Int64Value since Avro integers can be stored as either - if actualValue.GetInt32Value() != 0 { - assert.Equal(t, v, actualValue.GetInt32Value(), "Field %s should match", key) - } else { - assert.Equal(t, int64(v), actualValue.GetInt64Value(), "Field %s should match", key) - } - case string: - assert.Equal(t, v, actualValue.GetStringValue(), "Field %s should match", key) - case float64: - assert.Equal(t, v, actualValue.GetDoubleValue(), "Field %s should match", key) - case bool: - assert.Equal(t, v, actualValue.GetBoolValue(), "Field %s should match", key) - case []interface{}: - listValue := actualValue.GetListValue() - require.NotNil(t, listValue, "Field %s should be a list", key) - assert.Equal(t, len(v), len(listValue.Values), "List %s should have correct length", key) - case map[string]interface{}: - // Check if this is an Avro union type (single key-value pair with type name) - if len(v) == 1 { - for unionType, unionValue := range v { - // Handle Avro union types - they are now stored as records - switch unionType { - case "int": - if intVal, ok := unionValue.(int32); ok { - // Union values are now stored as records with the union type as field name - recordValue := actualValue.GetRecordValue() - require.NotNil(t, recordValue, "Field %s should be a union record", key) - unionField := recordValue.Fields[unionType] - require.NotNil(t, unionField, "Union field %s should exist", unionType) - assert.Equal(t, intVal, unionField.GetInt32Value(), "Field %s should match", key) - } - case "string": - if strVal, ok := unionValue.(string); ok { - recordValue := actualValue.GetRecordValue() - require.NotNil(t, recordValue, "Field %s should be a union record", key) - unionField := recordValue.Fields[unionType] - require.NotNil(t, unionField, "Union field %s should exist", unionType) - assert.Equal(t, strVal, unionField.GetStringValue(), "Field %s should match", key) - } - case "long": - if longVal, ok := unionValue.(int64); ok { - recordValue := actualValue.GetRecordValue() - require.NotNil(t, recordValue, "Field %s should be a union record", key) - unionField := recordValue.Fields[unionType] - require.NotNil(t, unionField, "Union field %s should exist", unionType) - assert.Equal(t, longVal, unionField.GetInt64Value(), "Field %s should match", key) - } - default: - // If not a recognized union type, treat as regular nested record - recordValue := actualValue.GetRecordValue() - require.NotNil(t, recordValue, "Field %s should be a record", key) - verifyDecodedFields(t, v, recordValue.Fields) - } - break // Only one iteration for single-key map - } - } else { - // Handle regular maps/objects - recordValue := actualValue.GetRecordValue() - require.NotNil(t, recordValue, "Field %s should be a record", key) - verifyDecodedFields(t, v, recordValue.Fields) - } - } - } -} - -func verifyRecordValuesEqual(t *testing.T, expected, actual *schema_pb.RecordValue) { - require.Equal(t, len(expected.Fields), len(actual.Fields), "Record should have same number of fields") - - for key, expectedValue := range expected.Fields { - actualValue, exists := actual.Fields[key] - require.True(t, exists, "Field %s should exist", key) - - // Compare values based on type - switch expectedValue.Kind.(type) { - case *schema_pb.Value_StringValue: - assert.Equal(t, expectedValue.GetStringValue(), actualValue.GetStringValue()) - case *schema_pb.Value_Int64Value: - assert.Equal(t, expectedValue.GetInt64Value(), actualValue.GetInt64Value()) - case *schema_pb.Value_DoubleValue: - assert.Equal(t, expectedValue.GetDoubleValue(), actualValue.GetDoubleValue()) - case *schema_pb.Value_BoolValue: - assert.Equal(t, expectedValue.GetBoolValue(), actualValue.GetBoolValue()) - case *schema_pb.Value_ListValue: - expectedList := expectedValue.GetListValue() - actualList := actualValue.GetListValue() - require.Equal(t, len(expectedList.Values), len(actualList.Values)) - for i, expectedItem := range expectedList.Values { - verifyValuesEqual(t, expectedItem, actualList.Values[i]) - } - case *schema_pb.Value_RecordValue: - verifyRecordValuesEqual(t, expectedValue.GetRecordValue(), actualValue.GetRecordValue()) - } - } -} - -func verifyValuesEqual(t *testing.T, expected, actual *schema_pb.Value) { - switch expected.Kind.(type) { - case *schema_pb.Value_StringValue: - assert.Equal(t, expected.GetStringValue(), actual.GetStringValue()) - case *schema_pb.Value_Int64Value: - assert.Equal(t, expected.GetInt64Value(), actual.GetInt64Value()) - case *schema_pb.Value_DoubleValue: - assert.Equal(t, expected.GetDoubleValue(), actual.GetDoubleValue()) - case *schema_pb.Value_BoolValue: - assert.Equal(t, expected.GetBoolValue(), actual.GetBoolValue()) - default: - t.Errorf("Unsupported value type for comparison") - } -} diff --git a/weed/mq/kafka/schema/envelope.go b/weed/mq/kafka/schema/envelope.go deleted file mode 100644 index b20d44006..000000000 --- a/weed/mq/kafka/schema/envelope.go +++ /dev/null @@ -1,259 +0,0 @@ -package schema - -import ( - "encoding/binary" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// Format represents the schema format type -type Format int - -const ( - FormatUnknown Format = iota - FormatAvro - FormatProtobuf - FormatJSONSchema -) - -func (f Format) String() string { - switch f { - case FormatAvro: - return "AVRO" - case FormatProtobuf: - return "PROTOBUF" - case FormatJSONSchema: - return "JSON_SCHEMA" - default: - return "UNKNOWN" - } -} - -// ConfluentEnvelope represents the parsed Confluent Schema Registry envelope -type ConfluentEnvelope struct { - Format Format - SchemaID uint32 - Indexes []int // For Protobuf nested message resolution - Payload []byte // The actual encoded data - OriginalBytes []byte // The complete original envelope bytes -} - -// ParseConfluentEnvelope parses a Confluent Schema Registry framed message -// Returns the envelope details and whether the message was successfully parsed -func ParseConfluentEnvelope(data []byte) (*ConfluentEnvelope, bool) { - if len(data) < 5 { - return nil, false // Too short to contain magic byte + schema ID - } - - // Check for Confluent magic byte (0x00) - if data[0] != 0x00 { - return nil, false // Not a Confluent-framed message - } - - // Extract schema ID (big-endian uint32) - schemaID := binary.BigEndian.Uint32(data[1:5]) - - envelope := &ConfluentEnvelope{ - Format: FormatAvro, // Default assumption; will be refined by schema registry lookup - SchemaID: schemaID, - Indexes: nil, - Payload: data[5:], // Default: payload starts after schema ID - OriginalBytes: data, // Store the complete original envelope - } - - // Note: Format detection should be done by the schema registry lookup - // For now, we'll default to Avro and let the manager determine the actual format - // based on the schema registry information - - return envelope, true -} - -// ParseConfluentProtobufEnvelope parses a Confluent Protobuf envelope with indexes -// This is a specialized version for Protobuf that handles message indexes -// -// Note: This function uses heuristics to distinguish between index varints and -// payload data, which may not be 100% reliable in all cases. For production use, -// consider using ParseConfluentProtobufEnvelopeWithIndexCount if you know the -// expected number of indexes. -func ParseConfluentProtobufEnvelope(data []byte) (*ConfluentEnvelope, bool) { - // For now, assume no indexes to avoid parsing issues - // This can be enhanced later when we have better schema information - return ParseConfluentProtobufEnvelopeWithIndexCount(data, 0) -} - -// ParseConfluentProtobufEnvelopeWithIndexCount parses a Confluent Protobuf envelope -// when you know the expected number of indexes -func ParseConfluentProtobufEnvelopeWithIndexCount(data []byte, expectedIndexCount int) (*ConfluentEnvelope, bool) { - if len(data) < 5 { - return nil, false - } - - // Check for Confluent magic byte - if data[0] != 0x00 { - return nil, false - } - - // Extract schema ID (big-endian uint32) - schemaID := binary.BigEndian.Uint32(data[1:5]) - - envelope := &ConfluentEnvelope{ - Format: FormatProtobuf, - SchemaID: schemaID, - Indexes: nil, - Payload: data[5:], // Default: payload starts after schema ID - OriginalBytes: data, - } - - // Parse the expected number of indexes - offset := 5 - for i := 0; i < expectedIndexCount && offset < len(data); i++ { - index, bytesRead := readVarint(data[offset:]) - if bytesRead == 0 { - // Invalid varint, stop parsing - break - } - envelope.Indexes = append(envelope.Indexes, int(index)) - offset += bytesRead - } - - envelope.Payload = data[offset:] - return envelope, true -} - -// IsSchematized checks if the given bytes represent a Confluent-framed message -func IsSchematized(data []byte) bool { - _, ok := ParseConfluentEnvelope(data) - return ok -} - -// ExtractSchemaID extracts just the schema ID without full parsing (for quick checks) -func ExtractSchemaID(data []byte) (uint32, bool) { - if len(data) < 5 || data[0] != 0x00 { - return 0, false - } - return binary.BigEndian.Uint32(data[1:5]), true -} - -// CreateConfluentEnvelope creates a Confluent-framed message from components -// This will be useful for reconstructing messages on the Fetch path -func CreateConfluentEnvelope(format Format, schemaID uint32, indexes []int, payload []byte) []byte { - // Start with magic byte + schema ID (5 bytes minimum) - // Validate sizes to prevent overflow - const maxSize = 1 << 30 // 1 GB limit - indexSize := len(indexes) * 4 - totalCapacity := 5 + len(payload) + indexSize - if len(payload) > maxSize || indexSize > maxSize || totalCapacity < 0 || totalCapacity > maxSize { - glog.Errorf("Envelope size too large: payload=%d, indexes=%d", len(payload), len(indexes)) - return nil - } - result := make([]byte, 5, totalCapacity) - result[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(result[1:5], schemaID) - - // For Protobuf, add indexes as varints - if format == FormatProtobuf && len(indexes) > 0 { - for _, index := range indexes { - varintBytes := encodeVarint(uint64(index)) - result = append(result, varintBytes...) - } - } - - // Append the actual payload - result = append(result, payload...) - - return result -} - -// ValidateEnvelope performs basic validation on a parsed envelope -func (e *ConfluentEnvelope) Validate() error { - if e.SchemaID == 0 { - return fmt.Errorf("invalid schema ID: 0") - } - - if len(e.Payload) == 0 { - return fmt.Errorf("empty payload") - } - - // Format-specific validation - switch e.Format { - case FormatAvro: - // Avro payloads should be valid binary data - // More specific validation will be done by the Avro decoder - case FormatProtobuf: - // Protobuf validation will be implemented in Phase 5 - case FormatJSONSchema: - // JSON Schema validation will be implemented in Phase 6 - default: - return fmt.Errorf("unsupported format: %v", e.Format) - } - - return nil -} - -// Metadata returns a map of envelope metadata for storage -func (e *ConfluentEnvelope) Metadata() map[string]string { - metadata := map[string]string{ - "schema_format": e.Format.String(), - "schema_id": fmt.Sprintf("%d", e.SchemaID), - } - - if len(e.Indexes) > 0 { - // Store indexes for Protobuf reconstruction - indexStr := "" - for i, idx := range e.Indexes { - if i > 0 { - indexStr += "," - } - indexStr += fmt.Sprintf("%d", idx) - } - metadata["protobuf_indexes"] = indexStr - } - - return metadata -} - -// encodeVarint encodes a uint64 as a varint -func encodeVarint(value uint64) []byte { - if value == 0 { - return []byte{0} - } - - var result []byte - for value > 0 { - b := byte(value & 0x7F) - value >>= 7 - - if value > 0 { - b |= 0x80 // Set continuation bit - } - - result = append(result, b) - } - - return result -} - -// readVarint reads a varint from the byte slice and returns the value and bytes consumed -func readVarint(data []byte) (uint64, int) { - var result uint64 - var shift uint - - for i, b := range data { - if i >= 10 { // Prevent overflow (max varint is 10 bytes) - return 0, 0 - } - - result |= uint64(b&0x7F) << shift - - if b&0x80 == 0 { - // Last byte (MSB is 0) - return result, i + 1 - } - - shift += 7 - } - - // Incomplete varint - return 0, 0 -} diff --git a/weed/mq/kafka/schema/envelope_test.go b/weed/mq/kafka/schema/envelope_test.go deleted file mode 100644 index 4a209779e..000000000 --- a/weed/mq/kafka/schema/envelope_test.go +++ /dev/null @@ -1,320 +0,0 @@ -package schema - -import ( - "encoding/binary" - "testing" -) - -func TestParseConfluentEnvelope(t *testing.T) { - tests := []struct { - name string - input []byte - expectOK bool - expectID uint32 - expectFormat Format - }{ - { - name: "valid Avro message", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, // schema ID 1 + "Hello" - expectOK: true, - expectID: 1, - expectFormat: FormatAvro, - }, - { - name: "valid message with larger schema ID", - input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, // schema ID 1234 + "foo" - expectOK: true, - expectID: 1234, - expectFormat: FormatAvro, - }, - { - name: "too short message", - input: []byte{0x00, 0x00, 0x00}, - expectOK: false, - }, - { - name: "no magic byte", - input: []byte{0x01, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - expectOK: false, - }, - { - name: "empty message", - input: []byte{}, - expectOK: false, - }, - { - name: "minimal valid message", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01}, // schema ID 1, empty payload - expectOK: true, - expectID: 1, - expectFormat: FormatAvro, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - envelope, ok := ParseConfluentEnvelope(tt.input) - - if ok != tt.expectOK { - t.Errorf("ParseConfluentEnvelope() ok = %v, want %v", ok, tt.expectOK) - return - } - - if !tt.expectOK { - return // No need to check further if we expected failure - } - - if envelope.SchemaID != tt.expectID { - t.Errorf("ParseConfluentEnvelope() schemaID = %v, want %v", envelope.SchemaID, tt.expectID) - } - - if envelope.Format != tt.expectFormat { - t.Errorf("ParseConfluentEnvelope() format = %v, want %v", envelope.Format, tt.expectFormat) - } - - // Verify payload extraction - expectedPayloadLen := len(tt.input) - 5 // 5 bytes for magic + schema ID - if len(envelope.Payload) != expectedPayloadLen { - t.Errorf("ParseConfluentEnvelope() payload length = %v, want %v", len(envelope.Payload), expectedPayloadLen) - } - }) - } -} - -func TestIsSchematized(t *testing.T) { - tests := []struct { - name string - input []byte - expect bool - }{ - { - name: "schematized message", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - expect: true, - }, - { - name: "non-schematized message", - input: []byte{0x48, 0x65, 0x6c, 0x6c, 0x6f}, // Just "Hello" - expect: false, - }, - { - name: "empty message", - input: []byte{}, - expect: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := IsSchematized(tt.input) - if result != tt.expect { - t.Errorf("IsSchematized() = %v, want %v", result, tt.expect) - } - }) - } -} - -func TestExtractSchemaID(t *testing.T) { - tests := []struct { - name string - input []byte - expectID uint32 - expectOK bool - }{ - { - name: "valid schema ID", - input: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - expectID: 1, - expectOK: true, - }, - { - name: "large schema ID", - input: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x02, 0x66, 0x6f, 0x6f}, - expectID: 1234, - expectOK: true, - }, - { - name: "no magic byte", - input: []byte{0x01, 0x00, 0x00, 0x00, 0x01}, - expectID: 0, - expectOK: false, - }, - { - name: "too short", - input: []byte{0x00, 0x00}, - expectID: 0, - expectOK: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - id, ok := ExtractSchemaID(tt.input) - - if ok != tt.expectOK { - t.Errorf("ExtractSchemaID() ok = %v, want %v", ok, tt.expectOK) - } - - if id != tt.expectID { - t.Errorf("ExtractSchemaID() id = %v, want %v", id, tt.expectID) - } - }) - } -} - -func TestCreateConfluentEnvelope(t *testing.T) { - tests := []struct { - name string - format Format - schemaID uint32 - indexes []int - payload []byte - expected []byte - }{ - { - name: "simple Avro message", - format: FormatAvro, - schemaID: 1, - indexes: nil, - payload: []byte("Hello"), - expected: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - }, - { - name: "large schema ID", - format: FormatAvro, - schemaID: 1234, - indexes: nil, - payload: []byte("foo"), - expected: []byte{0x00, 0x00, 0x00, 0x04, 0xd2, 0x66, 0x6f, 0x6f}, - }, - { - name: "empty payload", - format: FormatAvro, - schemaID: 5, - indexes: nil, - payload: []byte{}, - expected: []byte{0x00, 0x00, 0x00, 0x00, 0x05}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := CreateConfluentEnvelope(tt.format, tt.schemaID, tt.indexes, tt.payload) - - if len(result) != len(tt.expected) { - t.Errorf("CreateConfluentEnvelope() length = %v, want %v", len(result), len(tt.expected)) - return - } - - for i, b := range result { - if b != tt.expected[i] { - t.Errorf("CreateConfluentEnvelope() byte[%d] = %v, want %v", i, b, tt.expected[i]) - } - } - }) - } -} - -func TestEnvelopeValidate(t *testing.T) { - tests := []struct { - name string - envelope *ConfluentEnvelope - expectErr bool - }{ - { - name: "valid Avro envelope", - envelope: &ConfluentEnvelope{ - Format: FormatAvro, - SchemaID: 1, - Payload: []byte("Hello"), - }, - expectErr: false, - }, - { - name: "zero schema ID", - envelope: &ConfluentEnvelope{ - Format: FormatAvro, - SchemaID: 0, - Payload: []byte("Hello"), - }, - expectErr: true, - }, - { - name: "empty payload", - envelope: &ConfluentEnvelope{ - Format: FormatAvro, - SchemaID: 1, - Payload: []byte{}, - }, - expectErr: true, - }, - { - name: "unknown format", - envelope: &ConfluentEnvelope{ - Format: FormatUnknown, - SchemaID: 1, - Payload: []byte("Hello"), - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.envelope.Validate() - - if (err != nil) != tt.expectErr { - t.Errorf("Envelope.Validate() error = %v, expectErr %v", err, tt.expectErr) - } - }) - } -} - -func TestEnvelopeMetadata(t *testing.T) { - envelope := &ConfluentEnvelope{ - Format: FormatAvro, - SchemaID: 123, - Indexes: []int{1, 2, 3}, - Payload: []byte("test"), - } - - metadata := envelope.Metadata() - - if metadata["schema_format"] != "AVRO" { - t.Errorf("Expected schema_format=AVRO, got %s", metadata["schema_format"]) - } - - if metadata["schema_id"] != "123" { - t.Errorf("Expected schema_id=123, got %s", metadata["schema_id"]) - } - - if metadata["protobuf_indexes"] != "1,2,3" { - t.Errorf("Expected protobuf_indexes=1,2,3, got %s", metadata["protobuf_indexes"]) - } -} - -// Benchmark tests for performance -func BenchmarkParseConfluentEnvelope(b *testing.B) { - // Create a test message - testMsg := make([]byte, 1024) - testMsg[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(testMsg[1:5], 123) // Schema ID - // Fill rest with dummy data - for i := 5; i < len(testMsg); i++ { - testMsg[i] = byte(i % 256) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = ParseConfluentEnvelope(testMsg) - } -} - -func BenchmarkIsSchematized(b *testing.B) { - testMsg := []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = IsSchematized(testMsg) - } -} diff --git a/weed/mq/kafka/schema/envelope_varint_test.go b/weed/mq/kafka/schema/envelope_varint_test.go deleted file mode 100644 index 92004c3d6..000000000 --- a/weed/mq/kafka/schema/envelope_varint_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncodeDecodeVarint(t *testing.T) { - testCases := []struct { - name string - value uint64 - }{ - {"zero", 0}, - {"small", 1}, - {"medium", 127}, - {"large", 128}, - {"very_large", 16384}, - {"max_uint32", 4294967295}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Encode the value - encoded := encodeVarint(tc.value) - require.NotEmpty(t, encoded) - - // Decode it back - decoded, bytesRead := readVarint(encoded) - require.Equal(t, len(encoded), bytesRead, "Should consume all encoded bytes") - assert.Equal(t, tc.value, decoded, "Decoded value should match original") - }) - } -} - -func TestCreateConfluentEnvelopeWithProtobufIndexes(t *testing.T) { - testCases := []struct { - name string - format Format - schemaID uint32 - indexes []int - payload []byte - }{ - { - name: "avro_no_indexes", - format: FormatAvro, - schemaID: 123, - indexes: nil, - payload: []byte("avro payload"), - }, - { - name: "protobuf_no_indexes", - format: FormatProtobuf, - schemaID: 456, - indexes: nil, - payload: []byte("protobuf payload"), - }, - { - name: "protobuf_single_index", - format: FormatProtobuf, - schemaID: 789, - indexes: []int{1}, - payload: []byte("protobuf with index"), - }, - { - name: "protobuf_multiple_indexes", - format: FormatProtobuf, - schemaID: 101112, - indexes: []int{0, 1, 2, 3}, - payload: []byte("protobuf with multiple indexes"), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create the envelope - envelope := CreateConfluentEnvelope(tc.format, tc.schemaID, tc.indexes, tc.payload) - - // Verify basic structure - require.True(t, len(envelope) >= 5, "Envelope should be at least 5 bytes") - assert.Equal(t, byte(0x00), envelope[0], "Magic byte should be 0x00") - - // Extract and verify schema ID - extractedSchemaID, ok := ExtractSchemaID(envelope) - require.True(t, ok, "Should be able to extract schema ID") - assert.Equal(t, tc.schemaID, extractedSchemaID, "Schema ID should match") - - // Parse the envelope based on format - if tc.format == FormatProtobuf && len(tc.indexes) > 0 { - // Use Protobuf-specific parser with known index count - parsed, ok := ParseConfluentProtobufEnvelopeWithIndexCount(envelope, len(tc.indexes)) - require.True(t, ok, "Should be able to parse Protobuf envelope") - assert.Equal(t, tc.format, parsed.Format) - assert.Equal(t, tc.schemaID, parsed.SchemaID) - assert.Equal(t, tc.indexes, parsed.Indexes, "Indexes should match") - assert.Equal(t, tc.payload, parsed.Payload, "Payload should match") - } else { - // Use generic parser - parsed, ok := ParseConfluentEnvelope(envelope) - require.True(t, ok, "Should be able to parse envelope") - assert.Equal(t, tc.schemaID, parsed.SchemaID) - - if tc.format == FormatProtobuf && len(tc.indexes) == 0 { - // For Protobuf without indexes, payload should match - assert.Equal(t, tc.payload, parsed.Payload, "Payload should match") - } else if tc.format == FormatAvro { - // For Avro, payload should match (no indexes) - assert.Equal(t, tc.payload, parsed.Payload, "Payload should match") - } - } - }) - } -} - -func TestProtobufEnvelopeRoundTrip(t *testing.T) { - // Use more realistic index values (typically small numbers for message types) - originalIndexes := []int{0, 1, 2, 3} - originalPayload := []byte("test protobuf message data") - schemaID := uint32(12345) - - // Create envelope - envelope := CreateConfluentEnvelope(FormatProtobuf, schemaID, originalIndexes, originalPayload) - - // Parse it back with known index count - parsed, ok := ParseConfluentProtobufEnvelopeWithIndexCount(envelope, len(originalIndexes)) - require.True(t, ok, "Should be able to parse created envelope") - - // Verify all fields - assert.Equal(t, FormatProtobuf, parsed.Format) - assert.Equal(t, schemaID, parsed.SchemaID) - assert.Equal(t, originalIndexes, parsed.Indexes) - assert.Equal(t, originalPayload, parsed.Payload) - assert.Equal(t, envelope, parsed.OriginalBytes) -} - -func TestVarintEdgeCases(t *testing.T) { - t.Run("empty_data", func(t *testing.T) { - value, bytesRead := readVarint([]byte{}) - assert.Equal(t, uint64(0), value) - assert.Equal(t, 0, bytesRead) - }) - - t.Run("incomplete_varint", func(t *testing.T) { - // Create an incomplete varint (continuation bit set but no more bytes) - incompleteVarint := []byte{0x80} // Continuation bit set, but no more bytes - value, bytesRead := readVarint(incompleteVarint) - assert.Equal(t, uint64(0), value) - assert.Equal(t, 0, bytesRead) - }) - - t.Run("max_varint_length", func(t *testing.T) { - // Create a varint that's too long (more than 10 bytes) - tooLongVarint := make([]byte, 11) - for i := 0; i < 10; i++ { - tooLongVarint[i] = 0x80 // All continuation bits - } - tooLongVarint[10] = 0x01 // Final byte - - value, bytesRead := readVarint(tooLongVarint) - assert.Equal(t, uint64(0), value) - assert.Equal(t, 0, bytesRead) - }) -} - -func TestProtobufEnvelopeValidation(t *testing.T) { - t.Run("valid_envelope", func(t *testing.T) { - indexes := []int{1, 2} - envelope := CreateConfluentEnvelope(FormatProtobuf, 123, indexes, []byte("payload")) - parsed, ok := ParseConfluentProtobufEnvelopeWithIndexCount(envelope, len(indexes)) - require.True(t, ok) - - err := parsed.Validate() - assert.NoError(t, err) - }) - - t.Run("zero_schema_id", func(t *testing.T) { - indexes := []int{1} - envelope := CreateConfluentEnvelope(FormatProtobuf, 0, indexes, []byte("payload")) - parsed, ok := ParseConfluentProtobufEnvelopeWithIndexCount(envelope, len(indexes)) - require.True(t, ok) - - err := parsed.Validate() - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid schema ID: 0") - }) - - t.Run("empty_payload", func(t *testing.T) { - indexes := []int{1} - envelope := CreateConfluentEnvelope(FormatProtobuf, 123, indexes, []byte{}) - parsed, ok := ParseConfluentProtobufEnvelopeWithIndexCount(envelope, len(indexes)) - require.True(t, ok) - - err := parsed.Validate() - assert.Error(t, err) - assert.Contains(t, err.Error(), "empty payload") - }) -} diff --git a/weed/mq/kafka/schema/evolution.go b/weed/mq/kafka/schema/evolution.go deleted file mode 100644 index 73b56fc03..000000000 --- a/weed/mq/kafka/schema/evolution.go +++ /dev/null @@ -1,522 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/linkedin/goavro/v2" -) - -// CompatibilityLevel defines the schema compatibility level -type CompatibilityLevel string - -const ( - CompatibilityNone CompatibilityLevel = "NONE" - CompatibilityBackward CompatibilityLevel = "BACKWARD" - CompatibilityForward CompatibilityLevel = "FORWARD" - CompatibilityFull CompatibilityLevel = "FULL" -) - -// SchemaEvolutionChecker handles schema compatibility checking and evolution -type SchemaEvolutionChecker struct { - // Cache for parsed schemas to avoid re-parsing - schemaCache map[string]interface{} -} - -// NewSchemaEvolutionChecker creates a new schema evolution checker -func NewSchemaEvolutionChecker() *SchemaEvolutionChecker { - return &SchemaEvolutionChecker{ - schemaCache: make(map[string]interface{}), - } -} - -// CompatibilityResult represents the result of a compatibility check -type CompatibilityResult struct { - Compatible bool - Issues []string - Level CompatibilityLevel -} - -// CheckCompatibility checks if two schemas are compatible according to the specified level -func (checker *SchemaEvolutionChecker) CheckCompatibility( - oldSchemaStr, newSchemaStr string, - format Format, - level CompatibilityLevel, -) (*CompatibilityResult, error) { - - result := &CompatibilityResult{ - Compatible: true, - Issues: []string{}, - Level: level, - } - - if level == CompatibilityNone { - return result, nil - } - - switch format { - case FormatAvro: - return checker.checkAvroCompatibility(oldSchemaStr, newSchemaStr, level) - case FormatProtobuf: - return checker.checkProtobufCompatibility(oldSchemaStr, newSchemaStr, level) - case FormatJSONSchema: - return checker.checkJSONSchemaCompatibility(oldSchemaStr, newSchemaStr, level) - default: - return nil, fmt.Errorf("unsupported schema format for compatibility check: %s", format) - } -} - -// checkAvroCompatibility checks Avro schema compatibility -func (checker *SchemaEvolutionChecker) checkAvroCompatibility( - oldSchemaStr, newSchemaStr string, - level CompatibilityLevel, -) (*CompatibilityResult, error) { - - result := &CompatibilityResult{ - Compatible: true, - Issues: []string{}, - Level: level, - } - - // Parse old schema - oldSchema, err := goavro.NewCodec(oldSchemaStr) - if err != nil { - return nil, fmt.Errorf("failed to parse old Avro schema: %w", err) - } - - // Parse new schema - newSchema, err := goavro.NewCodec(newSchemaStr) - if err != nil { - return nil, fmt.Errorf("failed to parse new Avro schema: %w", err) - } - - // Parse schema structures for detailed analysis - var oldSchemaMap, newSchemaMap map[string]interface{} - if err := json.Unmarshal([]byte(oldSchemaStr), &oldSchemaMap); err != nil { - return nil, fmt.Errorf("failed to parse old schema JSON: %w", err) - } - if err := json.Unmarshal([]byte(newSchemaStr), &newSchemaMap); err != nil { - return nil, fmt.Errorf("failed to parse new schema JSON: %w", err) - } - - // Check compatibility based on level - switch level { - case CompatibilityBackward: - checker.checkAvroBackwardCompatibility(oldSchemaMap, newSchemaMap, result) - case CompatibilityForward: - checker.checkAvroForwardCompatibility(oldSchemaMap, newSchemaMap, result) - case CompatibilityFull: - checker.checkAvroBackwardCompatibility(oldSchemaMap, newSchemaMap, result) - if result.Compatible { - checker.checkAvroForwardCompatibility(oldSchemaMap, newSchemaMap, result) - } - } - - // Additional validation: try to create test data and check if it can be read - if result.Compatible { - if err := checker.validateAvroDataCompatibility(oldSchema, newSchema, level); err != nil { - result.Compatible = false - result.Issues = append(result.Issues, fmt.Sprintf("Data compatibility test failed: %v", err)) - } - } - - return result, nil -} - -// checkAvroBackwardCompatibility checks if new schema can read data written with old schema -func (checker *SchemaEvolutionChecker) checkAvroBackwardCompatibility( - oldSchema, newSchema map[string]interface{}, - result *CompatibilityResult, -) { - // Check if fields were removed without defaults - oldFields := checker.extractAvroFields(oldSchema) - newFields := checker.extractAvroFields(newSchema) - - for fieldName, oldField := range oldFields { - if newField, exists := newFields[fieldName]; !exists { - // Field was removed - this breaks backward compatibility - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Field '%s' was removed, breaking backward compatibility", fieldName)) - } else { - // Field exists, check type compatibility - if !checker.areAvroTypesCompatible(oldField["type"], newField["type"], true) { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Field '%s' type changed incompatibly", fieldName)) - } - } - } - - // Check if new required fields were added without defaults - for fieldName, newField := range newFields { - if _, exists := oldFields[fieldName]; !exists { - // New field added - if _, hasDefault := newField["default"]; !hasDefault { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("New required field '%s' added without default value", fieldName)) - } - } - } -} - -// checkAvroForwardCompatibility checks if old schema can read data written with new schema -func (checker *SchemaEvolutionChecker) checkAvroForwardCompatibility( - oldSchema, newSchema map[string]interface{}, - result *CompatibilityResult, -) { - // Check if fields were added without defaults in old schema - oldFields := checker.extractAvroFields(oldSchema) - newFields := checker.extractAvroFields(newSchema) - - for fieldName, newField := range newFields { - if _, exists := oldFields[fieldName]; !exists { - // New field added - for forward compatibility, the new field should have a default - // so that old schema can ignore it when reading data written with new schema - if _, hasDefault := newField["default"]; !hasDefault { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("New field '%s' cannot be read by old schema (no default)", fieldName)) - } - } else { - // Field exists, check type compatibility (reverse direction) - oldField := oldFields[fieldName] - if !checker.areAvroTypesCompatible(newField["type"], oldField["type"], false) { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Field '%s' type change breaks forward compatibility", fieldName)) - } - } - } - - // Check if fields were removed - for fieldName := range oldFields { - if _, exists := newFields[fieldName]; !exists { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Field '%s' was removed, breaking forward compatibility", fieldName)) - } - } -} - -// extractAvroFields extracts field information from an Avro schema -func (checker *SchemaEvolutionChecker) extractAvroFields(schema map[string]interface{}) map[string]map[string]interface{} { - fields := make(map[string]map[string]interface{}) - - if fieldsArray, ok := schema["fields"].([]interface{}); ok { - for _, fieldInterface := range fieldsArray { - if field, ok := fieldInterface.(map[string]interface{}); ok { - if name, ok := field["name"].(string); ok { - fields[name] = field - } - } - } - } - - return fields -} - -// areAvroTypesCompatible checks if two Avro types are compatible -func (checker *SchemaEvolutionChecker) areAvroTypesCompatible(oldType, newType interface{}, backward bool) bool { - // Simplified type compatibility check - // In a full implementation, this would handle complex types, unions, etc. - - oldTypeStr := fmt.Sprintf("%v", oldType) - newTypeStr := fmt.Sprintf("%v", newType) - - // Same type is always compatible - if oldTypeStr == newTypeStr { - return true - } - - // Check for promotable types (e.g., int -> long, float -> double) - if backward { - return checker.isPromotableType(oldTypeStr, newTypeStr) - } else { - return checker.isPromotableType(newTypeStr, oldTypeStr) - } -} - -// isPromotableType checks if a type can be promoted to another -func (checker *SchemaEvolutionChecker) isPromotableType(from, to string) bool { - promotions := map[string][]string{ - "int": {"long", "float", "double"}, - "long": {"float", "double"}, - "float": {"double"}, - "string": {"bytes"}, - "bytes": {"string"}, - } - - if validPromotions, exists := promotions[from]; exists { - for _, validTo := range validPromotions { - if to == validTo { - return true - } - } - } - - return false -} - -// validateAvroDataCompatibility validates compatibility by testing with actual data -func (checker *SchemaEvolutionChecker) validateAvroDataCompatibility( - oldSchema, newSchema *goavro.Codec, - level CompatibilityLevel, -) error { - // Create test data with old schema - testData := map[string]interface{}{ - "test_field": "test_value", - } - - // Try to encode with old schema - encoded, err := oldSchema.BinaryFromNative(nil, testData) - if err != nil { - // If we can't create test data, skip validation - return nil - } - - // Try to decode with new schema (backward compatibility) - if level == CompatibilityBackward || level == CompatibilityFull { - _, _, err := newSchema.NativeFromBinary(encoded) - if err != nil { - return fmt.Errorf("backward compatibility failed: %w", err) - } - } - - // Try to encode with new schema and decode with old (forward compatibility) - if level == CompatibilityForward || level == CompatibilityFull { - newEncoded, err := newSchema.BinaryFromNative(nil, testData) - if err == nil { - _, _, err = oldSchema.NativeFromBinary(newEncoded) - if err != nil { - return fmt.Errorf("forward compatibility failed: %w", err) - } - } - } - - return nil -} - -// checkProtobufCompatibility checks Protobuf schema compatibility -func (checker *SchemaEvolutionChecker) checkProtobufCompatibility( - oldSchemaStr, newSchemaStr string, - level CompatibilityLevel, -) (*CompatibilityResult, error) { - - result := &CompatibilityResult{ - Compatible: true, - Issues: []string{}, - Level: level, - } - - // For now, implement basic Protobuf compatibility rules - // In a full implementation, this would parse .proto files and check field numbers, types, etc. - - // Basic check: if schemas are identical, they're compatible - if oldSchemaStr == newSchemaStr { - return result, nil - } - - // For protobuf, we need to parse the schema and check: - // - Field numbers haven't changed - // - Required fields haven't been removed - // - Field types are compatible - - // Simplified implementation - mark as compatible with warning - result.Issues = append(result.Issues, "Protobuf compatibility checking is simplified - manual review recommended") - - return result, nil -} - -// checkJSONSchemaCompatibility checks JSON Schema compatibility -func (checker *SchemaEvolutionChecker) checkJSONSchemaCompatibility( - oldSchemaStr, newSchemaStr string, - level CompatibilityLevel, -) (*CompatibilityResult, error) { - - result := &CompatibilityResult{ - Compatible: true, - Issues: []string{}, - Level: level, - } - - // Parse JSON schemas - var oldSchema, newSchema map[string]interface{} - if err := json.Unmarshal([]byte(oldSchemaStr), &oldSchema); err != nil { - return nil, fmt.Errorf("failed to parse old JSON schema: %w", err) - } - if err := json.Unmarshal([]byte(newSchemaStr), &newSchema); err != nil { - return nil, fmt.Errorf("failed to parse new JSON schema: %w", err) - } - - // Check compatibility based on level - switch level { - case CompatibilityBackward: - checker.checkJSONSchemaBackwardCompatibility(oldSchema, newSchema, result) - case CompatibilityForward: - checker.checkJSONSchemaForwardCompatibility(oldSchema, newSchema, result) - case CompatibilityFull: - checker.checkJSONSchemaBackwardCompatibility(oldSchema, newSchema, result) - if result.Compatible { - checker.checkJSONSchemaForwardCompatibility(oldSchema, newSchema, result) - } - } - - return result, nil -} - -// checkJSONSchemaBackwardCompatibility checks JSON Schema backward compatibility -func (checker *SchemaEvolutionChecker) checkJSONSchemaBackwardCompatibility( - oldSchema, newSchema map[string]interface{}, - result *CompatibilityResult, -) { - // Check if required fields were added - oldRequired := checker.extractJSONSchemaRequired(oldSchema) - newRequired := checker.extractJSONSchemaRequired(newSchema) - - for _, field := range newRequired { - if !contains(oldRequired, field) { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("New required field '%s' breaks backward compatibility", field)) - } - } - - // Check if properties were removed - oldProperties := checker.extractJSONSchemaProperties(oldSchema) - newProperties := checker.extractJSONSchemaProperties(newSchema) - - for propName := range oldProperties { - if _, exists := newProperties[propName]; !exists { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Property '%s' was removed, breaking backward compatibility", propName)) - } - } -} - -// checkJSONSchemaForwardCompatibility checks JSON Schema forward compatibility -func (checker *SchemaEvolutionChecker) checkJSONSchemaForwardCompatibility( - oldSchema, newSchema map[string]interface{}, - result *CompatibilityResult, -) { - // Check if required fields were removed - oldRequired := checker.extractJSONSchemaRequired(oldSchema) - newRequired := checker.extractJSONSchemaRequired(newSchema) - - for _, field := range oldRequired { - if !contains(newRequired, field) { - result.Compatible = false - result.Issues = append(result.Issues, - fmt.Sprintf("Required field '%s' was removed, breaking forward compatibility", field)) - } - } - - // Check if properties were added - oldProperties := checker.extractJSONSchemaProperties(oldSchema) - newProperties := checker.extractJSONSchemaProperties(newSchema) - - for propName := range newProperties { - if _, exists := oldProperties[propName]; !exists { - result.Issues = append(result.Issues, - fmt.Sprintf("New property '%s' added - ensure old schema can handle it", propName)) - } - } -} - -// extractJSONSchemaRequired extracts required fields from JSON Schema -func (checker *SchemaEvolutionChecker) extractJSONSchemaRequired(schema map[string]interface{}) []string { - if required, ok := schema["required"].([]interface{}); ok { - var fields []string - for _, field := range required { - if fieldStr, ok := field.(string); ok { - fields = append(fields, fieldStr) - } - } - return fields - } - return []string{} -} - -// extractJSONSchemaProperties extracts properties from JSON Schema -func (checker *SchemaEvolutionChecker) extractJSONSchemaProperties(schema map[string]interface{}) map[string]interface{} { - if properties, ok := schema["properties"].(map[string]interface{}); ok { - return properties - } - return make(map[string]interface{}) -} - -// contains checks if a slice contains a string -func contains(slice []string, item string) bool { - for _, s := range slice { - if s == item { - return true - } - } - return false -} - -// GetCompatibilityLevel returns the compatibility level for a subject -func (checker *SchemaEvolutionChecker) GetCompatibilityLevel(subject string) CompatibilityLevel { - // In a real implementation, this would query the schema registry - // For now, return a default level - return CompatibilityBackward -} - -// SetCompatibilityLevel sets the compatibility level for a subject -func (checker *SchemaEvolutionChecker) SetCompatibilityLevel(subject string, level CompatibilityLevel) error { - // In a real implementation, this would update the schema registry - return nil -} - -// CanEvolve checks if a schema can be evolved according to the compatibility rules -func (checker *SchemaEvolutionChecker) CanEvolve( - subject string, - currentSchemaStr, newSchemaStr string, - format Format, -) (*CompatibilityResult, error) { - - level := checker.GetCompatibilityLevel(subject) - return checker.CheckCompatibility(currentSchemaStr, newSchemaStr, format, level) -} - -// SuggestEvolution suggests how to evolve a schema to maintain compatibility -func (checker *SchemaEvolutionChecker) SuggestEvolution( - oldSchemaStr, newSchemaStr string, - format Format, - level CompatibilityLevel, -) ([]string, error) { - - suggestions := []string{} - - result, err := checker.CheckCompatibility(oldSchemaStr, newSchemaStr, format, level) - if err != nil { - return nil, err - } - - if result.Compatible { - suggestions = append(suggestions, "Schema evolution is compatible") - return suggestions, nil - } - - // Analyze issues and provide suggestions - for _, issue := range result.Issues { - if strings.Contains(issue, "required field") && strings.Contains(issue, "added") { - suggestions = append(suggestions, "Add default values to new required fields") - } - if strings.Contains(issue, "removed") { - suggestions = append(suggestions, "Consider deprecating fields instead of removing them") - } - if strings.Contains(issue, "type changed") { - suggestions = append(suggestions, "Use type promotion or union types for type changes") - } - } - - if len(suggestions) == 0 { - suggestions = append(suggestions, "Manual schema review required - compatibility issues detected") - } - - return suggestions, nil -} diff --git a/weed/mq/kafka/schema/evolution_test.go b/weed/mq/kafka/schema/evolution_test.go deleted file mode 100644 index 37279ce2b..000000000 --- a/weed/mq/kafka/schema/evolution_test.go +++ /dev/null @@ -1,556 +0,0 @@ -package schema - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestSchemaEvolutionChecker_AvroBackwardCompatibility tests Avro backward compatibility -func TestSchemaEvolutionChecker_AvroBackwardCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Compatible - Add optional field", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - assert.Empty(t, result.Issues) - }) - - t.Run("Incompatible - Remove field", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "Field 'email' was removed") - }) - - t.Run("Incompatible - Add required field", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "New required field 'email' added without default") - }) - - t.Run("Compatible - Type promotion", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "score", "type": "int"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "score", "type": "long"} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) -} - -// TestSchemaEvolutionChecker_AvroForwardCompatibility tests Avro forward compatibility -func TestSchemaEvolutionChecker_AvroForwardCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Compatible - Remove optional field", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityForward) - require.NoError(t, err) - assert.False(t, result.Compatible) // Forward compatibility is stricter - assert.Contains(t, result.Issues[0], "Field 'email' was removed") - }) - - t.Run("Incompatible - Add field without default in old schema", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityForward) - require.NoError(t, err) - // This should be compatible in forward direction since new field has default - // But our simplified implementation might flag it - // The exact behavior depends on implementation details - _ = result // Use the result to avoid unused variable error - }) -} - -// TestSchemaEvolutionChecker_AvroFullCompatibility tests Avro full compatibility -func TestSchemaEvolutionChecker_AvroFullCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Compatible - Add optional field with default", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityFull) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) - - t.Run("Incompatible - Remove field", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityFull) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.True(t, len(result.Issues) > 0) - }) -} - -// TestSchemaEvolutionChecker_JSONSchemaCompatibility tests JSON Schema compatibility -func TestSchemaEvolutionChecker_JSONSchemaCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Compatible - Add optional property", func(t *testing.T) { - oldSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - }, - "required": ["id", "name"] - }` - - newSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "email": {"type": "string"} - }, - "required": ["id", "name"] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatJSONSchema, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) - - t.Run("Incompatible - Add required property", func(t *testing.T) { - oldSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - }, - "required": ["id", "name"] - }` - - newSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "email": {"type": "string"} - }, - "required": ["id", "name", "email"] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatJSONSchema, CompatibilityBackward) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "New required field 'email'") - }) - - t.Run("Incompatible - Remove property", func(t *testing.T) { - oldSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "email": {"type": "string"} - }, - "required": ["id", "name"] - }` - - newSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - }, - "required": ["id", "name"] - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatJSONSchema, CompatibilityBackward) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "Property 'email' was removed") - }) -} - -// TestSchemaEvolutionChecker_ProtobufCompatibility tests Protobuf compatibility -func TestSchemaEvolutionChecker_ProtobufCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Simplified Protobuf check", func(t *testing.T) { - oldSchema := `syntax = "proto3"; - message User { - int32 id = 1; - string name = 2; - }` - - newSchema := `syntax = "proto3"; - message User { - int32 id = 1; - string name = 2; - string email = 3; - }` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatProtobuf, CompatibilityBackward) - require.NoError(t, err) - // Our simplified implementation marks as compatible with warning - assert.True(t, result.Compatible) - assert.Contains(t, result.Issues[0], "simplified") - }) -} - -// TestSchemaEvolutionChecker_NoCompatibility tests no compatibility checking -func TestSchemaEvolutionChecker_NoCompatibility(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - oldSchema := `{"type": "string"}` - newSchema := `{"type": "integer"}` - - result, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityNone) - require.NoError(t, err) - assert.True(t, result.Compatible) - assert.Empty(t, result.Issues) -} - -// TestSchemaEvolutionChecker_TypePromotion tests type promotion rules -func TestSchemaEvolutionChecker_TypePromotion(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - tests := []struct { - from string - to string - promotable bool - }{ - {"int", "long", true}, - {"int", "float", true}, - {"int", "double", true}, - {"long", "float", true}, - {"long", "double", true}, - {"float", "double", true}, - {"string", "bytes", true}, - {"bytes", "string", true}, - {"long", "int", false}, - {"double", "float", false}, - {"string", "int", false}, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("%s_to_%s", test.from, test.to), func(t *testing.T) { - result := checker.isPromotableType(test.from, test.to) - assert.Equal(t, test.promotable, result) - }) - } -} - -// TestSchemaEvolutionChecker_SuggestEvolution tests evolution suggestions -func TestSchemaEvolutionChecker_SuggestEvolution(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Compatible schema", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string", "default": ""} - ] - }` - - suggestions, err := checker.SuggestEvolution(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.Contains(t, suggestions[0], "compatible") - }) - - t.Run("Incompatible schema with suggestions", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"} - ] - }` - - suggestions, err := checker.SuggestEvolution(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, len(suggestions) > 0) - // Should suggest not removing fields - found := false - for _, suggestion := range suggestions { - if strings.Contains(suggestion, "deprecating") { - found = true - break - } - } - assert.True(t, found) - }) -} - -// TestSchemaEvolutionChecker_CanEvolve tests the CanEvolve method -func TestSchemaEvolutionChecker_CanEvolve(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string", "default": ""} - ] - }` - - result, err := checker.CanEvolve("user-topic", oldSchema, newSchema, FormatAvro) - require.NoError(t, err) - assert.True(t, result.Compatible) -} - -// TestSchemaEvolutionChecker_ExtractFields tests field extraction utilities -func TestSchemaEvolutionChecker_ExtractFields(t *testing.T) { - checker := NewSchemaEvolutionChecker() - - t.Run("Extract Avro fields", func(t *testing.T) { - schema := map[string]interface{}{ - "fields": []interface{}{ - map[string]interface{}{ - "name": "id", - "type": "int", - }, - map[string]interface{}{ - "name": "name", - "type": "string", - "default": "", - }, - }, - } - - fields := checker.extractAvroFields(schema) - assert.Len(t, fields, 2) - assert.Contains(t, fields, "id") - assert.Contains(t, fields, "name") - assert.Equal(t, "int", fields["id"]["type"]) - assert.Equal(t, "", fields["name"]["default"]) - }) - - t.Run("Extract JSON Schema required fields", func(t *testing.T) { - schema := map[string]interface{}{ - "required": []interface{}{"id", "name"}, - } - - required := checker.extractJSONSchemaRequired(schema) - assert.Len(t, required, 2) - assert.Contains(t, required, "id") - assert.Contains(t, required, "name") - }) - - t.Run("Extract JSON Schema properties", func(t *testing.T) { - schema := map[string]interface{}{ - "properties": map[string]interface{}{ - "id": map[string]interface{}{"type": "integer"}, - "name": map[string]interface{}{"type": "string"}, - }, - } - - properties := checker.extractJSONSchemaProperties(schema) - assert.Len(t, properties, 2) - assert.Contains(t, properties, "id") - assert.Contains(t, properties, "name") - }) -} - -// BenchmarkSchemaCompatibilityCheck benchmarks compatibility checking performance -func BenchmarkSchemaCompatibilityCheck(b *testing.B) { - checker := NewSchemaEvolutionChecker() - - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""}, - {"name": "age", "type": "int", "default": 0} - ] - }` - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := checker.CheckCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/weed/mq/kafka/schema/integration_test.go b/weed/mq/kafka/schema/integration_test.go deleted file mode 100644 index 5677131c1..000000000 --- a/weed/mq/kafka/schema/integration_test.go +++ /dev/null @@ -1,643 +0,0 @@ -package schema - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/linkedin/goavro/v2" -) - -// TestFullIntegration_AvroWorkflow tests the complete Avro workflow -func TestFullIntegration_AvroWorkflow(t *testing.T) { - // Create comprehensive mock schema registry - server := createMockSchemaRegistry(t) - defer server.Close() - - // Create manager with realistic configuration - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - EnableMirroring: false, - CacheTTL: "5m", - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Test 1: Producer workflow - encode schematized message - t.Run("Producer_Workflow", func(t *testing.T) { - // Create realistic user data (with proper Avro union handling) - userData := map[string]interface{}{ - "id": int32(12345), - "name": "Alice Johnson", - "email": map[string]interface{}{"string": "alice@example.com"}, // Avro union - "age": map[string]interface{}{"int": int32(28)}, // Avro union - "preferences": map[string]interface{}{ - "Preferences": map[string]interface{}{ // Avro union with record type - "notifications": true, - "theme": "dark", - }, - }, - } - - // Create Avro message (simulate what a Kafka producer would send) - avroSchema := getUserAvroSchema() - codec, err := goavro.NewCodec(avroSchema) - if err != nil { - t.Fatalf("Failed to create Avro codec: %v", err) - } - - avroBinary, err := codec.BinaryFromNative(nil, userData) - if err != nil { - t.Fatalf("Failed to encode Avro data: %v", err) - } - - // Create Confluent envelope (what Kafka Gateway receives) - confluentMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - // Decode message (Produce path processing) - decodedMsg, err := manager.DecodeMessage(confluentMsg) - if err != nil { - t.Fatalf("Failed to decode message: %v", err) - } - - // Verify decoded data - if decodedMsg.SchemaID != 1 { - t.Errorf("Expected schema ID 1, got %d", decodedMsg.SchemaID) - } - - if decodedMsg.SchemaFormat != FormatAvro { - t.Errorf("Expected Avro format, got %v", decodedMsg.SchemaFormat) - } - - // Verify field values - fields := decodedMsg.RecordValue.Fields - if fields["id"].GetInt32Value() != 12345 { - t.Errorf("Expected id=12345, got %v", fields["id"].GetInt32Value()) - } - - if fields["name"].GetStringValue() != "Alice Johnson" { - t.Errorf("Expected name='Alice Johnson', got %v", fields["name"].GetStringValue()) - } - - t.Logf("Successfully processed producer message with %d fields", len(fields)) - }) - - // Test 2: Consumer workflow - reconstruct original message - t.Run("Consumer_Workflow", func(t *testing.T) { - // Create test RecordValue (simulate what's stored in SeaweedMQ) - testData := map[string]interface{}{ - "id": int32(67890), - "name": "Bob Smith", - "email": map[string]interface{}{"string": "bob@example.com"}, - "age": map[string]interface{}{"int": int32(35)}, // Avro union - } - recordValue := MapToRecordValue(testData) - - // Reconstruct message (Fetch path processing) - reconstructedMsg, err := manager.EncodeMessage(recordValue, 1, FormatAvro) - if err != nil { - t.Fatalf("Failed to reconstruct message: %v", err) - } - - // Verify reconstructed message can be parsed - envelope, ok := ParseConfluentEnvelope(reconstructedMsg) - if !ok { - t.Fatal("Failed to parse reconstructed envelope") - } - - if envelope.SchemaID != 1 { - t.Errorf("Expected schema ID 1, got %d", envelope.SchemaID) - } - - // Verify the payload can be decoded by Avro - avroSchema := getUserAvroSchema() - codec, err := goavro.NewCodec(avroSchema) - if err != nil { - t.Fatalf("Failed to create Avro codec: %v", err) - } - - decodedData, _, err := codec.NativeFromBinary(envelope.Payload) - if err != nil { - t.Fatalf("Failed to decode reconstructed Avro data: %v", err) - } - - // Verify data integrity - decodedMap := decodedData.(map[string]interface{}) - if decodedMap["id"] != int32(67890) { - t.Errorf("Expected id=67890, got %v", decodedMap["id"]) - } - - if decodedMap["name"] != "Bob Smith" { - t.Errorf("Expected name='Bob Smith', got %v", decodedMap["name"]) - } - - t.Logf("Successfully reconstructed consumer message: %d bytes", len(reconstructedMsg)) - }) - - // Test 3: Round-trip integrity - t.Run("Round_Trip_Integrity", func(t *testing.T) { - originalData := map[string]interface{}{ - "id": int32(99999), - "name": "Charlie Brown", - "email": map[string]interface{}{"string": "charlie@example.com"}, - "age": map[string]interface{}{"int": int32(42)}, // Avro union - "preferences": map[string]interface{}{ - "Preferences": map[string]interface{}{ // Avro union with record type - "notifications": true, - "theme": "dark", - }, - }, - } - - // Encode -> Decode -> Encode -> Decode - avroSchema := getUserAvroSchema() - codec, _ := goavro.NewCodec(avroSchema) - - // Step 1: Original -> Confluent - avroBinary, _ := codec.BinaryFromNative(nil, originalData) - confluentMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - // Step 2: Confluent -> RecordValue - decodedMsg, _ := manager.DecodeMessage(confluentMsg) - - // Step 3: RecordValue -> Confluent - reconstructedMsg, encodeErr := manager.EncodeMessage(decodedMsg.RecordValue, 1, FormatAvro) - if encodeErr != nil { - t.Fatalf("Failed to encode message: %v", encodeErr) - } - - // Verify the reconstructed message is valid - if len(reconstructedMsg) == 0 { - t.Fatal("Reconstructed message is empty") - } - - // Step 4: Confluent -> Verify - finalDecodedMsg, err := manager.DecodeMessage(reconstructedMsg) - if err != nil { - // Debug: Check if the reconstructed message is properly formatted - envelope, ok := ParseConfluentEnvelope(reconstructedMsg) - if !ok { - t.Fatalf("Round-trip failed: reconstructed message is not a valid Confluent envelope") - } - t.Logf("Debug: Envelope SchemaID=%d, Format=%v, PayloadLen=%d", - envelope.SchemaID, envelope.Format, len(envelope.Payload)) - t.Fatalf("Round-trip failed: %v", err) - } - - // Verify data integrity through complete round-trip - finalFields := finalDecodedMsg.RecordValue.Fields - if finalFields["id"].GetInt32Value() != 99999 { - t.Error("Round-trip failed for id field") - } - - if finalFields["name"].GetStringValue() != "Charlie Brown" { - t.Error("Round-trip failed for name field") - } - - t.Log("Round-trip integrity test passed") - }) -} - -// TestFullIntegration_MultiFormatSupport tests all schema formats together -func TestFullIntegration_MultiFormatSupport(t *testing.T) { - server := createMockSchemaRegistry(t) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - testCases := []struct { - name string - format Format - schemaID uint32 - testData interface{} - }{ - { - name: "Avro_Format", - format: FormatAvro, - schemaID: 1, - testData: map[string]interface{}{ - "id": int32(123), - "name": "Avro User", - }, - }, - { - name: "JSON_Schema_Format", - format: FormatJSONSchema, - schemaID: 3, - testData: map[string]interface{}{ - "id": float64(456), // JSON numbers are float64 - "name": "JSON User", - "active": true, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create RecordValue from test data - recordValue := MapToRecordValue(tc.testData.(map[string]interface{})) - - // Test encoding - encoded, err := manager.EncodeMessage(recordValue, tc.schemaID, tc.format) - if err != nil { - if tc.format == FormatProtobuf { - // Protobuf encoding may fail due to incomplete implementation - t.Skipf("Protobuf encoding not fully implemented: %v", err) - } else { - t.Fatalf("Failed to encode %s message: %v", tc.name, err) - } - } - - // Test decoding - decoded, err := manager.DecodeMessage(encoded) - if err != nil { - t.Fatalf("Failed to decode %s message: %v", tc.name, err) - } - - // Verify format - if decoded.SchemaFormat != tc.format { - t.Errorf("Expected format %v, got %v", tc.format, decoded.SchemaFormat) - } - - // Verify schema ID - if decoded.SchemaID != tc.schemaID { - t.Errorf("Expected schema ID %d, got %d", tc.schemaID, decoded.SchemaID) - } - - t.Logf("Successfully processed %s format", tc.name) - }) - } -} - -// TestIntegration_CachePerformance tests caching behavior under load -func TestIntegration_CachePerformance(t *testing.T) { - server := createMockSchemaRegistry(t) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Create test message - testData := map[string]interface{}{ - "id": int32(1), - "name": "Cache Test", - } - - avroSchema := getUserAvroSchema() - codec, _ := goavro.NewCodec(avroSchema) - avroBinary, _ := codec.BinaryFromNative(nil, testData) - testMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - // First decode (should hit registry) - start := time.Now() - _, err = manager.DecodeMessage(testMsg) - if err != nil { - t.Fatalf("First decode failed: %v", err) - } - firstDuration := time.Since(start) - - // Subsequent decodes (should hit cache) - start = time.Now() - for i := 0; i < 100; i++ { - _, err = manager.DecodeMessage(testMsg) - if err != nil { - t.Fatalf("Cached decode failed: %v", err) - } - } - cachedDuration := time.Since(start) - - // Verify cache performance improvement - avgCachedTime := cachedDuration / 100 - if avgCachedTime >= firstDuration { - t.Logf("Warning: Cache may not be effective. First: %v, Avg Cached: %v", - firstDuration, avgCachedTime) - } - - // Check cache stats - decoders, schemas, subjects := manager.GetCacheStats() - if decoders == 0 || schemas == 0 { - t.Error("Expected non-zero cache stats") - } - - t.Logf("Cache performance: First decode: %v, Average cached: %v", - firstDuration, avgCachedTime) - t.Logf("Cache stats: %d decoders, %d schemas, %d subjects", - decoders, schemas, subjects) -} - -// TestIntegration_ErrorHandling tests error scenarios -func TestIntegration_ErrorHandling(t *testing.T) { - server := createMockSchemaRegistry(t) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationStrict, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - testCases := []struct { - name string - message []byte - expectError bool - errorType string - }{ - { - name: "Non_Schematized_Message", - message: []byte("plain text message"), - expectError: true, - errorType: "not schematized", - }, - { - name: "Invalid_Schema_ID", - message: CreateConfluentEnvelope(FormatAvro, 999, nil, []byte("payload")), - expectError: true, - errorType: "schema not found", - }, - { - name: "Empty_Payload", - message: CreateConfluentEnvelope(FormatAvro, 1, nil, []byte{}), - expectError: true, - errorType: "empty payload", - }, - { - name: "Corrupted_Avro_Data", - message: CreateConfluentEnvelope(FormatAvro, 1, nil, []byte("invalid avro")), - expectError: true, - errorType: "decode failed", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - _, err := manager.DecodeMessage(tc.message) - - if (err != nil) != tc.expectError { - t.Errorf("Expected error: %v, got error: %v", tc.expectError, err != nil) - } - - if tc.expectError && err != nil { - t.Logf("Expected error occurred: %v", err) - } - }) - } -} - -// TestIntegration_SchemaEvolution tests schema evolution scenarios -func TestIntegration_SchemaEvolution(t *testing.T) { - server := createMockSchemaRegistryWithEvolution(t) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Test decoding messages with different schema versions - t.Run("Schema_V1_Message", func(t *testing.T) { - // Create message with schema v1 (basic user) - userData := map[string]interface{}{ - "id": int32(1), - "name": "User V1", - } - - avroSchema := getUserAvroSchemaV1() - codec, _ := goavro.NewCodec(avroSchema) - avroBinary, _ := codec.BinaryFromNative(nil, userData) - msg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - decoded, err := manager.DecodeMessage(msg) - if err != nil { - t.Fatalf("Failed to decode v1 message: %v", err) - } - - if decoded.Version != 1 { - t.Errorf("Expected version 1, got %d", decoded.Version) - } - }) - - t.Run("Schema_V2_Message", func(t *testing.T) { - // Create message with schema v2 (user with email) - userData := map[string]interface{}{ - "id": int32(2), - "name": "User V2", - "email": map[string]interface{}{"string": "user@example.com"}, - } - - avroSchema := getUserAvroSchemaV2() - codec, _ := goavro.NewCodec(avroSchema) - avroBinary, _ := codec.BinaryFromNative(nil, userData) - msg := CreateConfluentEnvelope(FormatAvro, 2, nil, avroBinary) - - decoded, err := manager.DecodeMessage(msg) - if err != nil { - t.Fatalf("Failed to decode v2 message: %v", err) - } - - if decoded.Version != 2 { - t.Errorf("Expected version 2, got %d", decoded.Version) - } - }) -} - -// Helper functions for creating mock schema registries - -func createMockSchemaRegistry(t *testing.T) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/subjects": - // List subjects - subjects := []string{"user-value", "product-value", "order-value"} - json.NewEncoder(w).Encode(subjects) - - case "/schemas/ids/1": - // Avro user schema - response := map[string]interface{}{ - "schema": getUserAvroSchema(), - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - - case "/schemas/ids/2": - // Protobuf schema (simplified) - response := map[string]interface{}{ - "schema": "syntax = \"proto3\"; message User { int32 id = 1; string name = 2; }", - "subject": "user-value", - "version": 2, - } - json.NewEncoder(w).Encode(response) - - case "/schemas/ids/3": - // JSON Schema - response := map[string]interface{}{ - "schema": getUserJSONSchema(), - "subject": "user-value", - "version": 3, - } - json.NewEncoder(w).Encode(response) - - default: - w.WriteHeader(http.StatusNotFound) - } - })) -} - -func createMockSchemaRegistryWithEvolution(t *testing.T) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/schemas/ids/1": - // Schema v1 - response := map[string]interface{}{ - "schema": getUserAvroSchemaV1(), - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - - case "/schemas/ids/2": - // Schema v2 (evolved) - response := map[string]interface{}{ - "schema": getUserAvroSchemaV2(), - "subject": "user-value", - "version": 2, - } - json.NewEncoder(w).Encode(response) - - default: - w.WriteHeader(http.StatusNotFound) - } - })) -} - -// Schema definitions for testing - -func getUserAvroSchema() string { - return `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null}, - {"name": "age", "type": ["null", "int"], "default": null}, - {"name": "preferences", "type": ["null", { - "type": "record", - "name": "Preferences", - "fields": [ - {"name": "notifications", "type": "boolean", "default": true}, - {"name": "theme", "type": "string", "default": "light"} - ] - }], "default": null} - ] - }` -} - -func getUserAvroSchemaV1() string { - return `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` -} - -func getUserAvroSchemaV2() string { - return `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": ["null", "string"], "default": null} - ] - }` -} - -func getUserJSONSchema() string { - return `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "active": {"type": "boolean"} - }, - "required": ["id", "name"] - }` -} - -// Benchmark tests for integration scenarios - -func BenchmarkIntegration_AvroDecoding(b *testing.B) { - server := createMockSchemaRegistry(nil) - defer server.Close() - - config := ManagerConfig{RegistryURL: server.URL} - manager, _ := NewManager(config) - - // Create test message - testData := map[string]interface{}{ - "id": int32(1), - "name": "Benchmark User", - } - - avroSchema := getUserAvroSchema() - codec, _ := goavro.NewCodec(avroSchema) - avroBinary, _ := codec.BinaryFromNative(nil, testData) - testMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = manager.DecodeMessage(testMsg) - } -} - -func BenchmarkIntegration_JSONSchemaDecoding(b *testing.B) { - server := createMockSchemaRegistry(nil) - defer server.Close() - - config := ManagerConfig{RegistryURL: server.URL} - manager, _ := NewManager(config) - - // Create test message - jsonData := []byte(`{"id": 1, "name": "Benchmark User", "active": true}`) - testMsg := CreateConfluentEnvelope(FormatJSONSchema, 3, nil, jsonData) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = manager.DecodeMessage(testMsg) - } -} diff --git a/weed/mq/kafka/schema/json_schema_decoder.go b/weed/mq/kafka/schema/json_schema_decoder.go deleted file mode 100644 index 7c5caec3c..000000000 --- a/weed/mq/kafka/schema/json_schema_decoder.go +++ /dev/null @@ -1,506 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/xeipuuv/gojsonschema" -) - -// JSONSchemaDecoder handles JSON Schema validation and conversion to SeaweedMQ format -type JSONSchemaDecoder struct { - schema *gojsonschema.Schema - schemaDoc map[string]interface{} // Parsed schema document for type inference - schemaJSON string // Original schema JSON -} - -// NewJSONSchemaDecoder creates a new JSON Schema decoder from a schema string -func NewJSONSchemaDecoder(schemaJSON string) (*JSONSchemaDecoder, error) { - // Parse the schema JSON - var schemaDoc map[string]interface{} - if err := json.Unmarshal([]byte(schemaJSON), &schemaDoc); err != nil { - return nil, fmt.Errorf("failed to parse JSON schema: %w", err) - } - - // Create JSON Schema validator - schemaLoader := gojsonschema.NewStringLoader(schemaJSON) - schema, err := gojsonschema.NewSchema(schemaLoader) - if err != nil { - return nil, fmt.Errorf("failed to create JSON schema validator: %w", err) - } - - return &JSONSchemaDecoder{ - schema: schema, - schemaDoc: schemaDoc, - schemaJSON: schemaJSON, - }, nil -} - -// Decode decodes and validates JSON data against the schema, returning a Go map -// Uses json.Number to preserve integer precision (important for large int64 like timestamps) -func (jsd *JSONSchemaDecoder) Decode(data []byte) (map[string]interface{}, error) { - // Parse JSON data with Number support to preserve large integers - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.UseNumber() - - var jsonData interface{} - if err := decoder.Decode(&jsonData); err != nil { - return nil, fmt.Errorf("failed to parse JSON data: %w", err) - } - - // Validate against schema - documentLoader := gojsonschema.NewGoLoader(jsonData) - result, err := jsd.schema.Validate(documentLoader) - if err != nil { - return nil, fmt.Errorf("failed to validate JSON data: %w", err) - } - - if !result.Valid() { - // Collect validation errors - var errorMsgs []string - for _, desc := range result.Errors() { - errorMsgs = append(errorMsgs, desc.String()) - } - return nil, fmt.Errorf("JSON data validation failed: %v", errorMsgs) - } - - // Convert to map[string]interface{} for consistency - switch v := jsonData.(type) { - case map[string]interface{}: - return v, nil - case []interface{}: - // Handle array at root level by wrapping in a map - return map[string]interface{}{"items": v}, nil - default: - // Handle primitive values at root level - return map[string]interface{}{"value": v}, nil - } -} - -// DecodeToRecordValue decodes JSON data directly to SeaweedMQ RecordValue -// Preserves large integers (like nanosecond timestamps) with full precision -func (jsd *JSONSchemaDecoder) DecodeToRecordValue(data []byte) (*schema_pb.RecordValue, error) { - // Decode with json.Number for precision - jsonMap, err := jsd.Decode(data) - if err != nil { - return nil, err - } - - // Convert with schema-aware type conversion - return jsd.mapToRecordValueWithSchema(jsonMap), nil -} - -// mapToRecordValueWithSchema converts a map to RecordValue using schema type information -func (jsd *JSONSchemaDecoder) mapToRecordValueWithSchema(m map[string]interface{}) *schema_pb.RecordValue { - fields := make(map[string]*schema_pb.Value) - properties, _ := jsd.schemaDoc["properties"].(map[string]interface{}) - - for key, value := range m { - // Check if we have schema information for this field - if fieldSchema, exists := properties[key]; exists { - if fieldSchemaMap, ok := fieldSchema.(map[string]interface{}); ok { - fields[key] = jsd.goValueToSchemaValueWithType(value, fieldSchemaMap) - continue - } - } - // Fallback to default conversion - fields[key] = goValueToSchemaValue(value) - } - - return &schema_pb.RecordValue{ - Fields: fields, - } -} - -// goValueToSchemaValueWithType converts a Go value to SchemaValue using schema type hints -func (jsd *JSONSchemaDecoder) goValueToSchemaValueWithType(value interface{}, schemaDoc map[string]interface{}) *schema_pb.Value { - if value == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - } - } - - schemaType, _ := schemaDoc["type"].(string) - - // Handle numbers from JSON that should be integers - if schemaType == "integer" { - switch v := value.(type) { - case json.Number: - // Preserve precision by parsing as int64 - if intVal, err := v.Int64(); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: intVal}, - } - } - // Fallback to float conversion if int64 parsing fails - if floatVal, err := v.Float64(); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(floatVal)}, - } - } - case float64: - // JSON unmarshals all numbers as float64, convert to int64 for integer types - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(v)}, - } - case int64: - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: v}, - } - case int: - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(v)}, - } - } - } - - // Handle json.Number for other numeric types - if numVal, ok := value.(json.Number); ok { - // Try int64 first - if intVal, err := numVal.Int64(); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: intVal}, - } - } - // Fallback to float64 - if floatVal, err := numVal.Float64(); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: floatVal}, - } - } - } - - // Handle nested objects - if schemaType == "object" { - if nestedMap, ok := value.(map[string]interface{}); ok { - nestedProperties, _ := schemaDoc["properties"].(map[string]interface{}) - nestedFields := make(map[string]*schema_pb.Value) - - for key, val := range nestedMap { - if fieldSchema, exists := nestedProperties[key]; exists { - if fieldSchemaMap, ok := fieldSchema.(map[string]interface{}); ok { - nestedFields[key] = jsd.goValueToSchemaValueWithType(val, fieldSchemaMap) - continue - } - } - // Fallback - nestedFields[key] = goValueToSchemaValue(val) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_RecordValue{ - RecordValue: &schema_pb.RecordValue{ - Fields: nestedFields, - }, - }, - } - } - } - - // For other types, use default conversion - return goValueToSchemaValue(value) -} - -// InferRecordType infers a SeaweedMQ RecordType from the JSON Schema -func (jsd *JSONSchemaDecoder) InferRecordType() (*schema_pb.RecordType, error) { - return jsd.jsonSchemaToRecordType(jsd.schemaDoc), nil -} - -// ValidateOnly validates JSON data against the schema without decoding -func (jsd *JSONSchemaDecoder) ValidateOnly(data []byte) error { - _, err := jsd.Decode(data) - return err -} - -// jsonSchemaToRecordType converts a JSON Schema to SeaweedMQ RecordType -func (jsd *JSONSchemaDecoder) jsonSchemaToRecordType(schemaDoc map[string]interface{}) *schema_pb.RecordType { - schemaType, _ := schemaDoc["type"].(string) - - if schemaType == "object" { - return jsd.objectSchemaToRecordType(schemaDoc) - } - - // For non-object schemas, create a wrapper record - return &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "value", - FieldIndex: 0, - Type: jsd.jsonSchemaTypeToType(schemaDoc), - IsRequired: true, - IsRepeated: false, - }, - }, - } -} - -// objectSchemaToRecordType converts an object JSON Schema to RecordType -func (jsd *JSONSchemaDecoder) objectSchemaToRecordType(schemaDoc map[string]interface{}) *schema_pb.RecordType { - properties, _ := schemaDoc["properties"].(map[string]interface{}) - required, _ := schemaDoc["required"].([]interface{}) - - // Create set of required fields for quick lookup - requiredFields := make(map[string]bool) - for _, req := range required { - if reqStr, ok := req.(string); ok { - requiredFields[reqStr] = true - } - } - - fields := make([]*schema_pb.Field, 0, len(properties)) - fieldIndex := int32(0) - - for fieldName, fieldSchema := range properties { - fieldSchemaMap, ok := fieldSchema.(map[string]interface{}) - if !ok { - continue - } - - field := &schema_pb.Field{ - Name: fieldName, - FieldIndex: fieldIndex, - Type: jsd.jsonSchemaTypeToType(fieldSchemaMap), - IsRequired: requiredFields[fieldName], - IsRepeated: jsd.isArrayType(fieldSchemaMap), - } - - fields = append(fields, field) - fieldIndex++ - } - - return &schema_pb.RecordType{ - Fields: fields, - } -} - -// jsonSchemaTypeToType converts a JSON Schema type to SeaweedMQ Type -func (jsd *JSONSchemaDecoder) jsonSchemaTypeToType(schemaDoc map[string]interface{}) *schema_pb.Type { - schemaType, _ := schemaDoc["type"].(string) - - switch schemaType { - case "boolean": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BOOL, - }, - } - case "integer": - // Check for format hints - format, _ := schemaDoc["format"].(string) - switch format { - case "int32": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - } - default: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - } - } - case "number": - // Check for format hints - format, _ := schemaDoc["format"].(string) - switch format { - case "float": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_FLOAT, - }, - } - default: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_DOUBLE, - }, - } - } - case "string": - // Check for format hints - format, _ := schemaDoc["format"].(string) - switch format { - case "date-time": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_TIMESTAMP, - }, - } - case "byte", "binary": - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - } - default: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } - case "array": - items, _ := schemaDoc["items"].(map[string]interface{}) - elementType := jsd.jsonSchemaTypeToType(items) - return &schema_pb.Type{ - Kind: &schema_pb.Type_ListType{ - ListType: &schema_pb.ListType{ - ElementType: elementType, - }, - }, - } - case "object": - nestedRecordType := jsd.objectSchemaToRecordType(schemaDoc) - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: nestedRecordType, - }, - } - default: - // Handle union types (oneOf, anyOf, allOf) - if oneOf, exists := schemaDoc["oneOf"].([]interface{}); exists && len(oneOf) > 0 { - // For unions, use the first type as default - if firstType, ok := oneOf[0].(map[string]interface{}); ok { - return jsd.jsonSchemaTypeToType(firstType) - } - } - - // Default to string for unknown types - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } -} - -// isArrayType checks if a JSON Schema represents an array type -func (jsd *JSONSchemaDecoder) isArrayType(schemaDoc map[string]interface{}) bool { - schemaType, _ := schemaDoc["type"].(string) - return schemaType == "array" -} - -// EncodeFromRecordValue encodes a RecordValue back to JSON format -func (jsd *JSONSchemaDecoder) EncodeFromRecordValue(recordValue *schema_pb.RecordValue) ([]byte, error) { - // Convert RecordValue back to Go map - goMap := recordValueToMap(recordValue) - - // Encode to JSON - jsonData, err := json.Marshal(goMap) - if err != nil { - return nil, fmt.Errorf("failed to encode to JSON: %w", err) - } - - // Validate the generated JSON against the schema - if err := jsd.ValidateOnly(jsonData); err != nil { - return nil, fmt.Errorf("generated JSON failed schema validation: %w", err) - } - - return jsonData, nil -} - -// GetSchemaInfo returns information about the JSON Schema -func (jsd *JSONSchemaDecoder) GetSchemaInfo() map[string]interface{} { - info := make(map[string]interface{}) - - if title, exists := jsd.schemaDoc["title"]; exists { - info["title"] = title - } - - if description, exists := jsd.schemaDoc["description"]; exists { - info["description"] = description - } - - if schemaVersion, exists := jsd.schemaDoc["$schema"]; exists { - info["schema_version"] = schemaVersion - } - - if schemaType, exists := jsd.schemaDoc["type"]; exists { - info["type"] = schemaType - } - - return info -} - -// Enhanced JSON value conversion with better type handling -func (jsd *JSONSchemaDecoder) convertJSONValue(value interface{}, expectedType string) interface{} { - if value == nil { - return nil - } - - switch expectedType { - case "integer": - switch v := value.(type) { - case float64: - return int64(v) - case string: - if i, err := strconv.ParseInt(v, 10, 64); err == nil { - return i - } - } - case "number": - switch v := value.(type) { - case string: - if f, err := strconv.ParseFloat(v, 64); err == nil { - return f - } - } - case "boolean": - switch v := value.(type) { - case string: - if b, err := strconv.ParseBool(v); err == nil { - return b - } - } - case "string": - // Handle date-time format conversion - if str, ok := value.(string); ok { - // Try to parse as RFC3339 timestamp - if t, err := time.Parse(time.RFC3339, str); err == nil { - return t - } - } - } - - return value -} - -// ValidateAndNormalize validates JSON data and normalizes types according to schema -func (jsd *JSONSchemaDecoder) ValidateAndNormalize(data []byte) ([]byte, error) { - // First decode normally - jsonMap, err := jsd.Decode(data) - if err != nil { - return nil, err - } - - // Normalize types based on schema - normalized := jsd.normalizeMapTypes(jsonMap, jsd.schemaDoc) - - // Re-encode with normalized types - return json.Marshal(normalized) -} - -// normalizeMapTypes normalizes map values according to JSON Schema types -func (jsd *JSONSchemaDecoder) normalizeMapTypes(data map[string]interface{}, schemaDoc map[string]interface{}) map[string]interface{} { - properties, _ := schemaDoc["properties"].(map[string]interface{}) - result := make(map[string]interface{}) - - for key, value := range data { - if fieldSchema, exists := properties[key]; exists { - if fieldSchemaMap, ok := fieldSchema.(map[string]interface{}); ok { - fieldType, _ := fieldSchemaMap["type"].(string) - result[key] = jsd.convertJSONValue(value, fieldType) - continue - } - } - result[key] = value - } - - return result -} diff --git a/weed/mq/kafka/schema/json_schema_decoder_test.go b/weed/mq/kafka/schema/json_schema_decoder_test.go deleted file mode 100644 index 28f762757..000000000 --- a/weed/mq/kafka/schema/json_schema_decoder_test.go +++ /dev/null @@ -1,544 +0,0 @@ -package schema - -import ( - "encoding/json" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestNewJSONSchemaDecoder(t *testing.T) { - tests := []struct { - name string - schema string - expectErr bool - }{ - { - name: "valid object schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "active": {"type": "boolean"} - }, - "required": ["id", "name"] - }`, - expectErr: false, - }, - { - name: "valid array schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "array", - "items": { - "type": "string" - } - }`, - expectErr: false, - }, - { - name: "valid string schema with format", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "string", - "format": "date-time" - }`, - expectErr: false, - }, - { - name: "invalid JSON", - schema: `{"invalid": json}`, - expectErr: true, - }, - { - name: "empty schema", - schema: "", - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - decoder, err := NewJSONSchemaDecoder(tt.schema) - - if (err != nil) != tt.expectErr { - t.Errorf("NewJSONSchemaDecoder() error = %v, expectErr %v", err, tt.expectErr) - return - } - - if !tt.expectErr && decoder == nil { - t.Error("Expected non-nil decoder for valid schema") - } - }) - } -} - -func TestJSONSchemaDecoder_Decode(t *testing.T) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "email": {"type": "string", "format": "email"}, - "age": {"type": "integer", "minimum": 0}, - "active": {"type": "boolean"} - }, - "required": ["id", "name"] - }` - - decoder, err := NewJSONSchemaDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - tests := []struct { - name string - jsonData string - expectErr bool - }{ - { - name: "valid complete data", - jsonData: `{ - "id": 123, - "name": "John Doe", - "email": "john@example.com", - "age": 30, - "active": true - }`, - expectErr: false, - }, - { - name: "valid minimal data", - jsonData: `{ - "id": 456, - "name": "Jane Smith" - }`, - expectErr: false, - }, - { - name: "missing required field", - jsonData: `{ - "name": "Missing ID" - }`, - expectErr: true, - }, - { - name: "invalid type", - jsonData: `{ - "id": "not-a-number", - "name": "John Doe" - }`, - expectErr: true, - }, - { - name: "invalid email format", - jsonData: `{ - "id": 123, - "name": "John Doe", - "email": "not-an-email" - }`, - expectErr: true, - }, - { - name: "negative age", - jsonData: `{ - "id": 123, - "name": "John Doe", - "age": -5 - }`, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := decoder.Decode([]byte(tt.jsonData)) - - if (err != nil) != tt.expectErr { - t.Errorf("Decode() error = %v, expectErr %v", err, tt.expectErr) - return - } - - if !tt.expectErr { - if result == nil { - t.Error("Expected non-nil result for valid data") - } - - // Verify some basic fields - if id, exists := result["id"]; exists { - // Numbers are now json.Number for precision - if _, ok := id.(json.Number); !ok { - t.Errorf("Expected id to be json.Number, got %T", id) - } - } - - if name, exists := result["name"]; exists { - if _, ok := name.(string); !ok { - t.Errorf("Expected name to be string, got %T", name) - } - } - } - }) - } -} - -func TestJSONSchemaDecoder_DecodeToRecordValue(t *testing.T) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - } - } - }` - - decoder, err := NewJSONSchemaDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - jsonData := `{ - "id": 789, - "name": "Test User", - "tags": ["tag1", "tag2", "tag3"] - }` - - recordValue, err := decoder.DecodeToRecordValue([]byte(jsonData)) - if err != nil { - t.Fatalf("Failed to decode to RecordValue: %v", err) - } - - // Verify RecordValue structure - if recordValue.Fields == nil { - t.Fatal("Expected non-nil fields") - } - - // Check id field - idValue := recordValue.Fields["id"] - if idValue == nil { - t.Fatal("Expected id field") - } - // JSON numbers are decoded as float64 by default - // The MapToRecordValue function should handle this conversion - expectedID := int64(789) - actualID := idValue.GetInt64Value() - if actualID != expectedID { - // Try checking if it was stored as float64 instead - if floatVal := idValue.GetDoubleValue(); floatVal == 789.0 { - t.Logf("ID was stored as float64: %v", floatVal) - } else { - t.Errorf("Expected id=789, got int64=%v, float64=%v", actualID, floatVal) - } - } - - // Check name field - nameValue := recordValue.Fields["name"] - if nameValue == nil { - t.Fatal("Expected name field") - } - if nameValue.GetStringValue() != "Test User" { - t.Errorf("Expected name='Test User', got %v", nameValue.GetStringValue()) - } - - // Check tags array - tagsValue := recordValue.Fields["tags"] - if tagsValue == nil { - t.Fatal("Expected tags field") - } - tagsList := tagsValue.GetListValue() - if tagsList == nil || len(tagsList.Values) != 3 { - t.Errorf("Expected tags array with 3 elements, got %v", tagsList) - } -} - -func TestJSONSchemaDecoder_InferRecordType(t *testing.T) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer", "format": "int32"}, - "name": {"type": "string"}, - "score": {"type": "number", "format": "float"}, - "timestamp": {"type": "string", "format": "date-time"}, - "data": {"type": "string", "format": "byte"}, - "active": {"type": "boolean"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, - "metadata": { - "type": "object", - "properties": { - "source": {"type": "string"} - } - } - }, - "required": ["id", "name"] - }` - - decoder, err := NewJSONSchemaDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - recordType, err := decoder.InferRecordType() - if err != nil { - t.Fatalf("Failed to infer RecordType: %v", err) - } - - if len(recordType.Fields) != 8 { - t.Errorf("Expected 8 fields, got %d", len(recordType.Fields)) - } - - // Create a map for easier field lookup - fieldMap := make(map[string]*schema_pb.Field) - for _, field := range recordType.Fields { - fieldMap[field.Name] = field - } - - // Test specific field types - if fieldMap["id"].Type.GetScalarType() != schema_pb.ScalarType_INT32 { - t.Error("Expected id field to be INT32") - } - - if fieldMap["name"].Type.GetScalarType() != schema_pb.ScalarType_STRING { - t.Error("Expected name field to be STRING") - } - - if fieldMap["score"].Type.GetScalarType() != schema_pb.ScalarType_FLOAT { - t.Error("Expected score field to be FLOAT") - } - - if fieldMap["timestamp"].Type.GetScalarType() != schema_pb.ScalarType_TIMESTAMP { - t.Error("Expected timestamp field to be TIMESTAMP") - } - - if fieldMap["data"].Type.GetScalarType() != schema_pb.ScalarType_BYTES { - t.Error("Expected data field to be BYTES") - } - - if fieldMap["active"].Type.GetScalarType() != schema_pb.ScalarType_BOOL { - t.Error("Expected active field to be BOOL") - } - - // Test array field - if fieldMap["tags"].Type.GetListType() == nil { - t.Error("Expected tags field to be LIST") - } - - // Test nested object field - if fieldMap["metadata"].Type.GetRecordType() == nil { - t.Error("Expected metadata field to be RECORD") - } - - // Test required fields - if !fieldMap["id"].IsRequired { - t.Error("Expected id field to be required") - } - - if !fieldMap["name"].IsRequired { - t.Error("Expected name field to be required") - } - - if fieldMap["active"].IsRequired { - t.Error("Expected active field to be optional") - } -} - -func TestJSONSchemaDecoder_EncodeFromRecordValue(t *testing.T) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "active": {"type": "boolean"} - }, - "required": ["id", "name"] - }` - - decoder, err := NewJSONSchemaDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - // Create test RecordValue - testMap := map[string]interface{}{ - "id": int64(123), - "name": "Test User", - "active": true, - } - recordValue := MapToRecordValue(testMap) - - // Encode back to JSON - jsonData, err := decoder.EncodeFromRecordValue(recordValue) - if err != nil { - t.Fatalf("Failed to encode RecordValue: %v", err) - } - - // Verify the JSON is valid and contains expected data - var result map[string]interface{} - if err := json.Unmarshal(jsonData, &result); err != nil { - t.Fatalf("Failed to parse generated JSON: %v", err) - } - - if result["id"] != float64(123) { // JSON numbers are float64 - t.Errorf("Expected id=123, got %v", result["id"]) - } - - if result["name"] != "Test User" { - t.Errorf("Expected name='Test User', got %v", result["name"]) - } - - if result["active"] != true { - t.Errorf("Expected active=true, got %v", result["active"]) - } -} - -func TestJSONSchemaDecoder_ArrayAndPrimitiveSchemas(t *testing.T) { - tests := []struct { - name string - schema string - jsonData string - expectOK bool - }{ - { - name: "array schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "array", - "items": {"type": "string"} - }`, - jsonData: `["item1", "item2", "item3"]`, - expectOK: true, - }, - { - name: "string schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "string" - }`, - jsonData: `"hello world"`, - expectOK: true, - }, - { - name: "number schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "number" - }`, - jsonData: `42.5`, - expectOK: true, - }, - { - name: "boolean schema", - schema: `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "boolean" - }`, - jsonData: `true`, - expectOK: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - decoder, err := NewJSONSchemaDecoder(tt.schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - result, err := decoder.Decode([]byte(tt.jsonData)) - - if (err == nil) != tt.expectOK { - t.Errorf("Decode() error = %v, expectOK %v", err, tt.expectOK) - return - } - - if tt.expectOK && result == nil { - t.Error("Expected non-nil result for valid data") - } - }) - } -} - -func TestJSONSchemaDecoder_GetSchemaInfo(t *testing.T) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "User Schema", - "description": "A schema for user objects", - "type": "object", - "properties": { - "id": {"type": "integer"} - } - }` - - decoder, err := NewJSONSchemaDecoder(schema) - if err != nil { - t.Fatalf("Failed to create decoder: %v", err) - } - - info := decoder.GetSchemaInfo() - - if info["title"] != "User Schema" { - t.Errorf("Expected title='User Schema', got %v", info["title"]) - } - - if info["description"] != "A schema for user objects" { - t.Errorf("Expected description='A schema for user objects', got %v", info["description"]) - } - - if info["schema_version"] != "http://json-schema.org/draft-07/schema#" { - t.Errorf("Expected schema_version='http://json-schema.org/draft-07/schema#', got %v", info["schema_version"]) - } - - if info["type"] != "object" { - t.Errorf("Expected type='object', got %v", info["type"]) - } -} - -// Benchmark tests -func BenchmarkJSONSchemaDecoder_Decode(b *testing.B) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - } - }` - - decoder, _ := NewJSONSchemaDecoder(schema) - jsonData := []byte(`{"id": 123, "name": "John Doe"}`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = decoder.Decode(jsonData) - } -} - -func BenchmarkJSONSchemaDecoder_DecodeToRecordValue(b *testing.B) { - schema := `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - } - }` - - decoder, _ := NewJSONSchemaDecoder(schema) - jsonData := []byte(`{"id": 123, "name": "John Doe"}`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = decoder.DecodeToRecordValue(jsonData) - } -} diff --git a/weed/mq/kafka/schema/loadtest_decode_test.go b/weed/mq/kafka/schema/loadtest_decode_test.go deleted file mode 100644 index de94f8cb3..000000000 --- a/weed/mq/kafka/schema/loadtest_decode_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package schema - -import ( - "encoding/binary" - "encoding/json" - "testing" - "time" - - "github.com/linkedin/goavro/v2" - schema_pb "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// LoadTestMessage represents the test message structure -type LoadTestMessage struct { - ID string `json:"id"` - Timestamp int64 `json:"timestamp"` - ProducerID int `json:"producer_id"` - Counter int64 `json:"counter"` - UserID string `json:"user_id"` - EventType string `json:"event_type"` - Properties map[string]string `json:"properties"` -} - -const ( - // LoadTest schemas matching the loadtest client - loadTestAvroSchema = `{ - "type": "record", - "name": "LoadTestMessage", - "namespace": "com.seaweedfs.loadtest", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "timestamp", "type": "long"}, - {"name": "producer_id", "type": "int"}, - {"name": "counter", "type": "long"}, - {"name": "user_id", "type": "string"}, - {"name": "event_type", "type": "string"}, - {"name": "properties", "type": {"type": "map", "values": "string"}} - ] - }` - - loadTestJSONSchema = `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "LoadTestMessage", - "type": "object", - "properties": { - "id": {"type": "string"}, - "timestamp": {"type": "integer"}, - "producer_id": {"type": "integer"}, - "counter": {"type": "integer"}, - "user_id": {"type": "string"}, - "event_type": {"type": "string"}, - "properties": { - "type": "object", - "additionalProperties": {"type": "string"} - } - }, - "required": ["id", "timestamp", "producer_id", "counter", "user_id", "event_type"] - }` - - loadTestProtobufSchema = `syntax = "proto3"; - -package com.seaweedfs.loadtest; - -message LoadTestMessage { - string id = 1; - int64 timestamp = 2; - int32 producer_id = 3; - int64 counter = 4; - string user_id = 5; - string event_type = 6; - map properties = 7; -}` -) - -// createTestMessage creates a sample load test message -func createTestMessage() *LoadTestMessage { - return &LoadTestMessage{ - ID: "msg-test-123", - Timestamp: time.Now().UnixNano(), - ProducerID: 0, - Counter: 42, - UserID: "user-789", - EventType: "click", - Properties: map[string]string{ - "browser": "chrome", - "version": "1.0", - }, - } -} - -// createConfluentWireFormat wraps payload with Confluent wire format -func createConfluentWireFormat(schemaID uint32, payload []byte) []byte { - wireFormat := make([]byte, 5+len(payload)) - wireFormat[0] = 0x00 // Magic byte - binary.BigEndian.PutUint32(wireFormat[1:5], schemaID) - copy(wireFormat[5:], payload) - return wireFormat -} - -// TestAvroLoadTestDecoding tests Avro decoding with load test schema -func TestAvroLoadTestDecoding(t *testing.T) { - msg := createTestMessage() - - // Create Avro codec - codec, err := goavro.NewCodec(loadTestAvroSchema) - if err != nil { - t.Fatalf("Failed to create Avro codec: %v", err) - } - - // Convert message to map for Avro encoding - msgMap := map[string]interface{}{ - "id": msg.ID, - "timestamp": msg.Timestamp, - "producer_id": int32(msg.ProducerID), // Avro uses int32 for "int" - "counter": msg.Counter, - "user_id": msg.UserID, - "event_type": msg.EventType, - "properties": msg.Properties, - } - - // Encode as Avro binary - avroBytes, err := codec.BinaryFromNative(nil, msgMap) - if err != nil { - t.Fatalf("Failed to encode Avro message: %v", err) - } - - t.Logf("Avro encoded size: %d bytes", len(avroBytes)) - - // Wrap in Confluent wire format - schemaID := uint32(1) - wireFormat := createConfluentWireFormat(schemaID, avroBytes) - - t.Logf("Confluent wire format size: %d bytes", len(wireFormat)) - - // Parse envelope - envelope, ok := ParseConfluentEnvelope(wireFormat) - if !ok { - t.Fatalf("Failed to parse Confluent envelope") - } - - if envelope.SchemaID != schemaID { - t.Errorf("Expected schema ID %d, got %d", schemaID, envelope.SchemaID) - } - - // Create decoder - decoder, err := NewAvroDecoder(loadTestAvroSchema) - if err != nil { - t.Fatalf("Failed to create Avro decoder: %v", err) - } - - // Decode - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - t.Fatalf("Failed to decode Avro message: %v", err) - } - - // Verify fields - if recordValue.Fields == nil { - t.Fatal("RecordValue fields is nil") - } - - // Check specific fields - verifyField(t, recordValue, "id", msg.ID) - verifyField(t, recordValue, "timestamp", msg.Timestamp) - verifyField(t, recordValue, "producer_id", int64(msg.ProducerID)) - verifyField(t, recordValue, "counter", msg.Counter) - verifyField(t, recordValue, "user_id", msg.UserID) - verifyField(t, recordValue, "event_type", msg.EventType) - - t.Logf("โœ… Avro decoding successful: %d fields", len(recordValue.Fields)) -} - -// TestJSONSchemaLoadTestDecoding tests JSON Schema decoding with load test schema -func TestJSONSchemaLoadTestDecoding(t *testing.T) { - msg := createTestMessage() - - // Encode as JSON - jsonBytes, err := json.Marshal(msg) - if err != nil { - t.Fatalf("Failed to encode JSON message: %v", err) - } - - t.Logf("JSON encoded size: %d bytes", len(jsonBytes)) - t.Logf("JSON content: %s", string(jsonBytes)) - - // Wrap in Confluent wire format - schemaID := uint32(3) - wireFormat := createConfluentWireFormat(schemaID, jsonBytes) - - t.Logf("Confluent wire format size: %d bytes", len(wireFormat)) - - // Parse envelope - envelope, ok := ParseConfluentEnvelope(wireFormat) - if !ok { - t.Fatalf("Failed to parse Confluent envelope") - } - - if envelope.SchemaID != schemaID { - t.Errorf("Expected schema ID %d, got %d", schemaID, envelope.SchemaID) - } - - // Create JSON Schema decoder - decoder, err := NewJSONSchemaDecoder(loadTestJSONSchema) - if err != nil { - t.Fatalf("Failed to create JSON Schema decoder: %v", err) - } - - // Decode - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - t.Fatalf("Failed to decode JSON Schema message: %v", err) - } - - // Verify fields - if recordValue.Fields == nil { - t.Fatal("RecordValue fields is nil") - } - - // Check specific fields - verifyField(t, recordValue, "id", msg.ID) - verifyField(t, recordValue, "timestamp", msg.Timestamp) - verifyField(t, recordValue, "producer_id", int64(msg.ProducerID)) - verifyField(t, recordValue, "counter", msg.Counter) - verifyField(t, recordValue, "user_id", msg.UserID) - verifyField(t, recordValue, "event_type", msg.EventType) - - t.Logf("โœ… JSON Schema decoding successful: %d fields", len(recordValue.Fields)) -} - -// TestProtobufLoadTestDecoding tests Protobuf decoding with load test schema -func TestProtobufLoadTestDecoding(t *testing.T) { - msg := createTestMessage() - - // For Protobuf, we need to first compile the schema and then encode - // For now, let's test JSON encoding with Protobuf schema (common pattern) - jsonBytes, err := json.Marshal(msg) - if err != nil { - t.Fatalf("Failed to encode JSON message: %v", err) - } - - t.Logf("JSON (for Protobuf) encoded size: %d bytes", len(jsonBytes)) - t.Logf("JSON content: %s", string(jsonBytes)) - - // Wrap in Confluent wire format - schemaID := uint32(5) - wireFormat := createConfluentWireFormat(schemaID, jsonBytes) - - t.Logf("Confluent wire format size: %d bytes", len(wireFormat)) - - // Parse envelope - envelope, ok := ParseConfluentEnvelope(wireFormat) - if !ok { - t.Fatalf("Failed to parse Confluent envelope") - } - - if envelope.SchemaID != schemaID { - t.Errorf("Expected schema ID %d, got %d", schemaID, envelope.SchemaID) - } - - // Create Protobuf decoder from text schema - decoder, err := NewProtobufDecoderFromString(loadTestProtobufSchema) - if err != nil { - t.Fatalf("Failed to create Protobuf decoder: %v", err) - } - - // Try to decode - this will likely fail because JSON is not valid Protobuf binary - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - t.Logf("โš ๏ธ Expected failure: Protobuf decoder cannot decode JSON: %v", err) - t.Logf("This confirms the issue: producer sends JSON but gateway expects Protobuf binary") - return - } - - // If we get here, something unexpected happened - t.Logf("Unexpectedly succeeded in decoding JSON as Protobuf") - if recordValue.Fields != nil { - t.Logf("RecordValue has %d fields", len(recordValue.Fields)) - } -} - -// verifyField checks if a field exists in RecordValue with expected value -func verifyField(t *testing.T, rv *schema_pb.RecordValue, fieldName string, expectedValue interface{}) { - field, exists := rv.Fields[fieldName] - if !exists { - t.Errorf("Field '%s' not found in RecordValue", fieldName) - return - } - - switch expected := expectedValue.(type) { - case string: - if field.GetStringValue() != expected { - t.Errorf("Field '%s': expected '%s', got '%s'", fieldName, expected, field.GetStringValue()) - } - case int64: - if field.GetInt64Value() != expected { - t.Errorf("Field '%s': expected %d, got %d", fieldName, expected, field.GetInt64Value()) - } - case int: - if field.GetInt64Value() != int64(expected) { - t.Errorf("Field '%s': expected %d, got %d", fieldName, expected, field.GetInt64Value()) - } - default: - t.Logf("Field '%s' has unexpected type", fieldName) - } -} diff --git a/weed/mq/kafka/schema/manager.go b/weed/mq/kafka/schema/manager.go deleted file mode 100644 index 7006b0322..000000000 --- a/weed/mq/kafka/schema/manager.go +++ /dev/null @@ -1,787 +0,0 @@ -package schema - -import ( - "fmt" - "strings" - "sync" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/dynamicpb" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// Manager coordinates schema operations for the Kafka Gateway -type Manager struct { - registryClient *RegistryClient - - // Decoder cache - avroDecoders map[uint32]*AvroDecoder // schema ID -> decoder - protobufDecoders map[uint32]*ProtobufDecoder // schema ID -> decoder - jsonSchemaDecoders map[uint32]*JSONSchemaDecoder // schema ID -> decoder - decoderMu sync.RWMutex - - // Schema evolution checker - evolutionChecker *SchemaEvolutionChecker - - // Configuration - config ManagerConfig -} - -// ManagerConfig holds configuration for the schema manager -type ManagerConfig struct { - RegistryURL string - RegistryUsername string - RegistryPassword string - CacheTTL string - ValidationMode ValidationMode - EnableMirroring bool - MirrorPath string // Path in SeaweedFS Filer to mirror schemas -} - -// ValidationMode defines how strict schema validation should be -type ValidationMode int - -const ( - ValidationPermissive ValidationMode = iota // Allow unknown fields, best-effort decoding - ValidationStrict // Reject messages that don't match schema exactly -) - -// DecodedMessage represents a decoded Kafka message with schema information -type DecodedMessage struct { - // Original envelope information - Envelope *ConfluentEnvelope - - // Schema information - SchemaID uint32 - SchemaFormat Format - Subject string - Version int - - // Decoded data - RecordValue *schema_pb.RecordValue - RecordType *schema_pb.RecordType - - // Metadata for storage - Metadata map[string]string -} - -// NewManager creates a new schema manager -func NewManager(config ManagerConfig) (*Manager, error) { - registryConfig := RegistryConfig{ - URL: config.RegistryURL, - Username: config.RegistryUsername, - Password: config.RegistryPassword, - } - - registryClient := NewRegistryClient(registryConfig) - - return &Manager{ - registryClient: registryClient, - avroDecoders: make(map[uint32]*AvroDecoder), - protobufDecoders: make(map[uint32]*ProtobufDecoder), - jsonSchemaDecoders: make(map[uint32]*JSONSchemaDecoder), - evolutionChecker: NewSchemaEvolutionChecker(), - config: config, - }, nil -} - -// NewManagerWithHealthCheck creates a new schema manager and validates connectivity -func NewManagerWithHealthCheck(config ManagerConfig) (*Manager, error) { - manager, err := NewManager(config) - if err != nil { - return nil, err - } - - // Test connectivity - if err := manager.registryClient.HealthCheck(); err != nil { - return nil, fmt.Errorf("schema registry health check failed: %w", err) - } - - return manager, nil -} - -// DecodeMessage decodes a Kafka message if it contains schema information -func (m *Manager) DecodeMessage(messageBytes []byte) (*DecodedMessage, error) { - // Step 1: Check if message is schematized - envelope, isSchematized := ParseConfluentEnvelope(messageBytes) - if !isSchematized { - return nil, fmt.Errorf("message is not schematized") - } - - // Step 2: Validate envelope - if err := envelope.Validate(); err != nil { - return nil, fmt.Errorf("invalid envelope: %w", err) - } - - // Step 3: Get schema from registry - cachedSchema, err := m.registryClient.GetSchemaByID(envelope.SchemaID) - if err != nil { - return nil, fmt.Errorf("failed to get schema %d: %w", envelope.SchemaID, err) - } - - // Step 4: Decode based on format - var recordValue *schema_pb.RecordValue - var recordType *schema_pb.RecordType - - switch cachedSchema.Format { - case FormatAvro: - recordValue, recordType, err = m.decodeAvroMessage(envelope, cachedSchema) - if err != nil { - return nil, fmt.Errorf("failed to decode Avro message: %w", err) - } - case FormatProtobuf: - recordValue, recordType, err = m.decodeProtobufMessage(envelope, cachedSchema) - if err != nil { - return nil, fmt.Errorf("failed to decode Protobuf message: %w", err) - } - case FormatJSONSchema: - recordValue, recordType, err = m.decodeJSONSchemaMessage(envelope, cachedSchema) - if err != nil { - return nil, fmt.Errorf("failed to decode JSON Schema message: %w", err) - } - default: - return nil, fmt.Errorf("unsupported schema format: %v", cachedSchema.Format) - } - - // Step 5: Create decoded message - decodedMsg := &DecodedMessage{ - Envelope: envelope, - SchemaID: envelope.SchemaID, - SchemaFormat: cachedSchema.Format, - Subject: cachedSchema.Subject, - Version: cachedSchema.Version, - RecordValue: recordValue, - RecordType: recordType, - Metadata: m.createMetadata(envelope, cachedSchema), - } - - return decodedMsg, nil -} - -// decodeAvroMessage decodes an Avro message using cached or new decoder -func (m *Manager) decodeAvroMessage(envelope *ConfluentEnvelope, cachedSchema *CachedSchema) (*schema_pb.RecordValue, *schema_pb.RecordType, error) { - // Get or create Avro decoder - decoder, err := m.getAvroDecoder(envelope.SchemaID, cachedSchema.Schema) - if err != nil { - return nil, nil, fmt.Errorf("failed to get Avro decoder: %w", err) - } - - // Decode to RecordValue - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - if m.config.ValidationMode == ValidationStrict { - return nil, nil, fmt.Errorf("strict validation failed: %w", err) - } - // In permissive mode, try to decode as much as possible - // For now, return the error - we could implement partial decoding later - return nil, nil, fmt.Errorf("permissive decoding failed: %w", err) - } - - // Infer or get RecordType - recordType, err := decoder.InferRecordType() - if err != nil { - // Fall back to inferring from the decoded map - if decodedMap, decodeErr := decoder.Decode(envelope.Payload); decodeErr == nil { - recordType = InferRecordTypeFromMap(decodedMap) - } else { - return nil, nil, fmt.Errorf("failed to infer record type: %w", err) - } - } - - return recordValue, recordType, nil -} - -// decodeProtobufMessage decodes a Protobuf message using cached or new decoder -func (m *Manager) decodeProtobufMessage(envelope *ConfluentEnvelope, cachedSchema *CachedSchema) (*schema_pb.RecordValue, *schema_pb.RecordType, error) { - // Get or create Protobuf decoder - decoder, err := m.getProtobufDecoder(envelope.SchemaID, cachedSchema.Schema) - if err != nil { - return nil, nil, fmt.Errorf("failed to get Protobuf decoder: %w", err) - } - - // Decode to RecordValue - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - if m.config.ValidationMode == ValidationStrict { - return nil, nil, fmt.Errorf("strict validation failed: %w", err) - } - // In permissive mode, try to decode as much as possible - return nil, nil, fmt.Errorf("permissive decoding failed: %w", err) - } - - // Get RecordType from descriptor - recordType, err := decoder.InferRecordType() - if err != nil { - // Fall back to inferring from the decoded map - if decodedMap, decodeErr := decoder.Decode(envelope.Payload); decodeErr == nil { - recordType = InferRecordTypeFromMap(decodedMap) - } else { - return nil, nil, fmt.Errorf("failed to infer record type: %w", err) - } - } - - return recordValue, recordType, nil -} - -// decodeJSONSchemaMessage decodes a JSON Schema message using cached or new decoder -func (m *Manager) decodeJSONSchemaMessage(envelope *ConfluentEnvelope, cachedSchema *CachedSchema) (*schema_pb.RecordValue, *schema_pb.RecordType, error) { - // Get or create JSON Schema decoder - decoder, err := m.getJSONSchemaDecoder(envelope.SchemaID, cachedSchema.Schema) - if err != nil { - return nil, nil, fmt.Errorf("failed to get JSON Schema decoder: %w", err) - } - - // Decode to RecordValue - recordValue, err := decoder.DecodeToRecordValue(envelope.Payload) - if err != nil { - if m.config.ValidationMode == ValidationStrict { - return nil, nil, fmt.Errorf("strict validation failed: %w", err) - } - // In permissive mode, try to decode as much as possible - return nil, nil, fmt.Errorf("permissive decoding failed: %w", err) - } - - // Get RecordType from schema - recordType, err := decoder.InferRecordType() - if err != nil { - // Fall back to inferring from the decoded map - if decodedMap, decodeErr := decoder.Decode(envelope.Payload); decodeErr == nil { - recordType = InferRecordTypeFromMap(decodedMap) - } else { - return nil, nil, fmt.Errorf("failed to infer record type: %w", err) - } - } - - return recordValue, recordType, nil -} - -// getAvroDecoder gets or creates an Avro decoder for the given schema -func (m *Manager) getAvroDecoder(schemaID uint32, schemaStr string) (*AvroDecoder, error) { - // Check cache first - m.decoderMu.RLock() - if decoder, exists := m.avroDecoders[schemaID]; exists { - m.decoderMu.RUnlock() - return decoder, nil - } - m.decoderMu.RUnlock() - - // Create new decoder - decoder, err := NewAvroDecoder(schemaStr) - if err != nil { - return nil, err - } - - // Cache the decoder - m.decoderMu.Lock() - m.avroDecoders[schemaID] = decoder - m.decoderMu.Unlock() - - return decoder, nil -} - -// getProtobufDecoder gets or creates a Protobuf decoder for the given schema -func (m *Manager) getProtobufDecoder(schemaID uint32, schemaStr string) (*ProtobufDecoder, error) { - // Check cache first - m.decoderMu.RLock() - if decoder, exists := m.protobufDecoders[schemaID]; exists { - m.decoderMu.RUnlock() - return decoder, nil - } - m.decoderMu.RUnlock() - - // In Confluent Schema Registry, Protobuf schemas can be stored as: - // 1. Text .proto format (most common) - // 2. Binary FileDescriptorSet - // Try to detect which format we have - var decoder *ProtobufDecoder - var err error - - // Check if it looks like text .proto (contains "syntax", "message", etc.) - if strings.Contains(schemaStr, "syntax") || strings.Contains(schemaStr, "message") { - // Parse as text .proto - decoder, err = NewProtobufDecoderFromString(schemaStr) - } else { - // Try binary format - schemaBytes := []byte(schemaStr) - decoder, err = NewProtobufDecoder(schemaBytes) - } - - if err != nil { - return nil, err - } - - // Cache the decoder - m.decoderMu.Lock() - m.protobufDecoders[schemaID] = decoder - m.decoderMu.Unlock() - - return decoder, nil -} - -// getJSONSchemaDecoder gets or creates a JSON Schema decoder for the given schema -func (m *Manager) getJSONSchemaDecoder(schemaID uint32, schemaStr string) (*JSONSchemaDecoder, error) { - // Check cache first - m.decoderMu.RLock() - if decoder, exists := m.jsonSchemaDecoders[schemaID]; exists { - m.decoderMu.RUnlock() - return decoder, nil - } - m.decoderMu.RUnlock() - - // Create new decoder - decoder, err := NewJSONSchemaDecoder(schemaStr) - if err != nil { - return nil, err - } - - // Cache the decoder - m.decoderMu.Lock() - m.jsonSchemaDecoders[schemaID] = decoder - m.decoderMu.Unlock() - - return decoder, nil -} - -// createMetadata creates metadata for storage in SeaweedMQ -func (m *Manager) createMetadata(envelope *ConfluentEnvelope, cachedSchema *CachedSchema) map[string]string { - metadata := envelope.Metadata() - - // Add schema registry information - metadata["schema_subject"] = cachedSchema.Subject - metadata["schema_version"] = fmt.Sprintf("%d", cachedSchema.Version) - metadata["registry_url"] = m.registryClient.baseURL - - // Add decoding information - metadata["decoded_at"] = fmt.Sprintf("%d", cachedSchema.CachedAt.Unix()) - metadata["validation_mode"] = fmt.Sprintf("%d", m.config.ValidationMode) - - return metadata -} - -// IsSchematized checks if a message contains schema information -func (m *Manager) IsSchematized(messageBytes []byte) bool { - return IsSchematized(messageBytes) -} - -// GetSchemaInfo extracts basic schema information without full decoding -func (m *Manager) GetSchemaInfo(messageBytes []byte) (uint32, Format, error) { - envelope, ok := ParseConfluentEnvelope(messageBytes) - if !ok { - return 0, FormatUnknown, fmt.Errorf("not a schematized message") - } - - // Get basic schema info from cache or registry - cachedSchema, err := m.registryClient.GetSchemaByID(envelope.SchemaID) - if err != nil { - return 0, FormatUnknown, fmt.Errorf("failed to get schema info: %w", err) - } - - return envelope.SchemaID, cachedSchema.Format, nil -} - -// RegisterSchema registers a new schema with the registry -func (m *Manager) RegisterSchema(subject, schema string) (uint32, error) { - return m.registryClient.RegisterSchema(subject, schema) -} - -// CheckCompatibility checks if a schema is compatible with existing versions -func (m *Manager) CheckCompatibility(subject, schema string) (bool, error) { - return m.registryClient.CheckCompatibility(subject, schema) -} - -// ListSubjects returns all subjects in the registry -func (m *Manager) ListSubjects() ([]string, error) { - return m.registryClient.ListSubjects() -} - -// ClearCache clears all cached decoders and registry data -func (m *Manager) ClearCache() { - m.decoderMu.Lock() - m.avroDecoders = make(map[uint32]*AvroDecoder) - m.protobufDecoders = make(map[uint32]*ProtobufDecoder) - m.jsonSchemaDecoders = make(map[uint32]*JSONSchemaDecoder) - m.decoderMu.Unlock() - - m.registryClient.ClearCache() -} - -// GetCacheStats returns cache statistics -func (m *Manager) GetCacheStats() (decoders, schemas, subjects int) { - m.decoderMu.RLock() - decoders = len(m.avroDecoders) + len(m.protobufDecoders) + len(m.jsonSchemaDecoders) - m.decoderMu.RUnlock() - - schemas, subjects, _ = m.registryClient.GetCacheStats() - return -} - -// EncodeMessage encodes a RecordValue back to Confluent format (for Fetch path) -func (m *Manager) EncodeMessage(recordValue *schema_pb.RecordValue, schemaID uint32, format Format) ([]byte, error) { - switch format { - case FormatAvro: - return m.encodeAvroMessage(recordValue, schemaID) - case FormatProtobuf: - return m.encodeProtobufMessage(recordValue, schemaID) - case FormatJSONSchema: - return m.encodeJSONSchemaMessage(recordValue, schemaID) - default: - return nil, fmt.Errorf("unsupported format for encoding: %v", format) - } -} - -// encodeAvroMessage encodes a RecordValue back to Avro binary format -func (m *Manager) encodeAvroMessage(recordValue *schema_pb.RecordValue, schemaID uint32) ([]byte, error) { - // Get schema from registry - cachedSchema, err := m.registryClient.GetSchemaByID(schemaID) - if err != nil { - return nil, fmt.Errorf("failed to get schema for encoding: %w", err) - } - - // Get decoder (which contains the codec) - decoder, err := m.getAvroDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to get decoder for encoding: %w", err) - } - - // Convert RecordValue back to Go map with Avro union format preservation - goMap := recordValueToMapWithAvroContext(recordValue, true) - - // Encode using Avro codec - binary, err := decoder.codec.BinaryFromNative(nil, goMap) - if err != nil { - return nil, fmt.Errorf("failed to encode to Avro binary: %w", err) - } - - // Create Confluent envelope - envelope := CreateConfluentEnvelope(FormatAvro, schemaID, nil, binary) - - return envelope, nil -} - -// encodeProtobufMessage encodes a RecordValue back to Protobuf binary format -func (m *Manager) encodeProtobufMessage(recordValue *schema_pb.RecordValue, schemaID uint32) ([]byte, error) { - // Get schema from registry - cachedSchema, err := m.registryClient.GetSchemaByID(schemaID) - if err != nil { - return nil, fmt.Errorf("failed to get schema for encoding: %w", err) - } - - // Get decoder (which contains the descriptor) - decoder, err := m.getProtobufDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to get decoder for encoding: %w", err) - } - - // Convert RecordValue back to Go map - goMap := recordValueToMap(recordValue) - - // Create a new message instance and populate it - msg := decoder.msgType.New() - if err := m.populateProtobufMessage(msg, goMap, decoder.descriptor); err != nil { - return nil, fmt.Errorf("failed to populate Protobuf message: %w", err) - } - - // Encode using Protobuf - binary, err := proto.Marshal(msg.Interface()) - if err != nil { - return nil, fmt.Errorf("failed to encode to Protobuf binary: %w", err) - } - - // Create Confluent envelope (with indexes if needed) - envelope := CreateConfluentEnvelope(FormatProtobuf, schemaID, nil, binary) - - return envelope, nil -} - -// encodeJSONSchemaMessage encodes a RecordValue back to JSON Schema format -func (m *Manager) encodeJSONSchemaMessage(recordValue *schema_pb.RecordValue, schemaID uint32) ([]byte, error) { - // Get schema from registry - cachedSchema, err := m.registryClient.GetSchemaByID(schemaID) - if err != nil { - return nil, fmt.Errorf("failed to get schema for encoding: %w", err) - } - - // Get decoder (which contains the schema validator) - decoder, err := m.getJSONSchemaDecoder(schemaID, cachedSchema.Schema) - if err != nil { - return nil, fmt.Errorf("failed to get decoder for encoding: %w", err) - } - - // Encode using JSON Schema decoder - jsonData, err := decoder.EncodeFromRecordValue(recordValue) - if err != nil { - return nil, fmt.Errorf("failed to encode to JSON: %w", err) - } - - // Create Confluent envelope - envelope := CreateConfluentEnvelope(FormatJSONSchema, schemaID, nil, jsonData) - - return envelope, nil -} - -// populateProtobufMessage populates a Protobuf message from a Go map -func (m *Manager) populateProtobufMessage(msg protoreflect.Message, data map[string]interface{}, desc protoreflect.MessageDescriptor) error { - for key, value := range data { - // Find the field descriptor - fieldDesc := desc.Fields().ByName(protoreflect.Name(key)) - if fieldDesc == nil { - // Skip unknown fields in permissive mode - continue - } - - // Handle map fields specially - if fieldDesc.IsMap() { - if mapData, ok := value.(map[string]interface{}); ok { - mapValue := msg.Mutable(fieldDesc).Map() - for mk, mv := range mapData { - // Convert map key (always string for our schema) - mapKey := protoreflect.ValueOfString(mk).MapKey() - - // Convert map value based on value type - valueDesc := fieldDesc.MapValue() - mvProto, err := m.goValueToProtoValue(mv, valueDesc) - if err != nil { - return fmt.Errorf("failed to convert map value for key %s: %w", mk, err) - } - mapValue.Set(mapKey, mvProto) - } - continue - } - } - - // Convert and set the value - protoValue, err := m.goValueToProtoValue(value, fieldDesc) - if err != nil { - return fmt.Errorf("failed to convert field %s: %w", key, err) - } - - msg.Set(fieldDesc, protoValue) - } - - return nil -} - -// goValueToProtoValue converts a Go value to a Protobuf Value -func (m *Manager) goValueToProtoValue(value interface{}, fieldDesc protoreflect.FieldDescriptor) (protoreflect.Value, error) { - if value == nil { - return protoreflect.Value{}, nil - } - - switch fieldDesc.Kind() { - case protoreflect.BoolKind: - if b, ok := value.(bool); ok { - return protoreflect.ValueOfBool(b), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if i, ok := value.(int32); ok { - return protoreflect.ValueOfInt32(i), nil - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if i, ok := value.(int64); ok { - return protoreflect.ValueOfInt64(i), nil - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if i, ok := value.(uint32); ok { - return protoreflect.ValueOfUint32(i), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if i, ok := value.(uint64); ok { - return protoreflect.ValueOfUint64(i), nil - } - case protoreflect.FloatKind: - if f, ok := value.(float32); ok { - return protoreflect.ValueOfFloat32(f), nil - } - case protoreflect.DoubleKind: - if f, ok := value.(float64); ok { - return protoreflect.ValueOfFloat64(f), nil - } - case protoreflect.StringKind: - if s, ok := value.(string); ok { - return protoreflect.ValueOfString(s), nil - } - case protoreflect.BytesKind: - if b, ok := value.([]byte); ok { - return protoreflect.ValueOfBytes(b), nil - } - case protoreflect.EnumKind: - if i, ok := value.(int32); ok { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(i)), nil - } - case protoreflect.MessageKind: - if nestedMap, ok := value.(map[string]interface{}); ok { - // Handle nested messages - nestedMsg := dynamicpb.NewMessage(fieldDesc.Message()) - if err := m.populateProtobufMessage(nestedMsg, nestedMap, fieldDesc.Message()); err != nil { - return protoreflect.Value{}, err - } - return protoreflect.ValueOfMessage(nestedMsg), nil - } - } - - return protoreflect.Value{}, fmt.Errorf("unsupported value type %T for field kind %v", value, fieldDesc.Kind()) -} - -// recordValueToMap converts a RecordValue back to a Go map for encoding -func recordValueToMap(recordValue *schema_pb.RecordValue) map[string]interface{} { - return recordValueToMapWithAvroContext(recordValue, false) -} - -// recordValueToMapWithAvroContext converts a RecordValue back to a Go map for encoding -// with optional Avro union format preservation -func recordValueToMapWithAvroContext(recordValue *schema_pb.RecordValue, preserveAvroUnions bool) map[string]interface{} { - result := make(map[string]interface{}) - - for key, value := range recordValue.Fields { - result[key] = schemaValueToGoValueWithAvroContext(value, preserveAvroUnions) - } - - return result -} - -// schemaValueToGoValue converts a schema Value back to a Go value -func schemaValueToGoValue(value *schema_pb.Value) interface{} { - return schemaValueToGoValueWithAvroContext(value, false) -} - -// schemaValueToGoValueWithAvroContext converts a schema Value back to a Go value -// with optional Avro union format preservation -func schemaValueToGoValueWithAvroContext(value *schema_pb.Value, preserveAvroUnions bool) interface{} { - switch v := value.Kind.(type) { - case *schema_pb.Value_BoolValue: - return v.BoolValue - case *schema_pb.Value_Int32Value: - return v.Int32Value - case *schema_pb.Value_Int64Value: - return v.Int64Value - case *schema_pb.Value_FloatValue: - return v.FloatValue - case *schema_pb.Value_DoubleValue: - return v.DoubleValue - case *schema_pb.Value_StringValue: - return v.StringValue - case *schema_pb.Value_BytesValue: - return v.BytesValue - case *schema_pb.Value_ListValue: - result := make([]interface{}, len(v.ListValue.Values)) - for i, item := range v.ListValue.Values { - result[i] = schemaValueToGoValueWithAvroContext(item, preserveAvroUnions) - } - return result - case *schema_pb.Value_RecordValue: - recordMap := recordValueToMapWithAvroContext(v.RecordValue, preserveAvroUnions) - - // Check if this record represents an Avro union - if preserveAvroUnions && isAvroUnionRecord(v.RecordValue) { - // Return the union map directly since it's already in the correct format - return recordMap - } - - return recordMap - case *schema_pb.Value_TimestampValue: - // Convert back to time if needed, or return as int64 - return v.TimestampValue.TimestampMicros - default: - // Default to string representation - return fmt.Sprintf("%v", value) - } -} - -// isAvroUnionRecord checks if a RecordValue represents an Avro union -func isAvroUnionRecord(record *schema_pb.RecordValue) bool { - // A record represents an Avro union if it has exactly one field - // and the field name is an Avro type name - if len(record.Fields) != 1 { - return false - } - - for key := range record.Fields { - return isAvroUnionTypeName(key) - } - - return false -} - -// isAvroUnionTypeName checks if a string is a valid Avro union type name -func isAvroUnionTypeName(name string) bool { - switch name { - case "null", "boolean", "int", "long", "float", "double", "bytes", "string": - return true - } - return false -} - -// CheckSchemaCompatibility checks if two schemas are compatible -func (m *Manager) CheckSchemaCompatibility( - oldSchemaStr, newSchemaStr string, - format Format, - level CompatibilityLevel, -) (*CompatibilityResult, error) { - return m.evolutionChecker.CheckCompatibility(oldSchemaStr, newSchemaStr, format, level) -} - -// CanEvolveSchema checks if a schema can be evolved for a given subject -func (m *Manager) CanEvolveSchema( - subject string, - currentSchemaStr, newSchemaStr string, - format Format, -) (*CompatibilityResult, error) { - return m.evolutionChecker.CanEvolve(subject, currentSchemaStr, newSchemaStr, format) -} - -// SuggestSchemaEvolution provides suggestions for schema evolution -func (m *Manager) SuggestSchemaEvolution( - oldSchemaStr, newSchemaStr string, - format Format, - level CompatibilityLevel, -) ([]string, error) { - return m.evolutionChecker.SuggestEvolution(oldSchemaStr, newSchemaStr, format, level) -} - -// ValidateSchemaEvolution validates a schema evolution before applying it -func (m *Manager) ValidateSchemaEvolution( - subject string, - newSchemaStr string, - format Format, -) error { - // Get the current schema for the subject - currentSchema, err := m.registryClient.GetLatestSchema(subject) - if err != nil { - // If no current schema exists, any schema is valid - return nil - } - - // Check compatibility - result, err := m.CanEvolveSchema(subject, currentSchema.Schema, newSchemaStr, format) - if err != nil { - return fmt.Errorf("failed to check schema compatibility: %w", err) - } - - if !result.Compatible { - return fmt.Errorf("schema evolution is not compatible: %v", result.Issues) - } - - return nil -} - -// GetCompatibilityLevel gets the compatibility level for a subject -func (m *Manager) GetCompatibilityLevel(subject string) CompatibilityLevel { - return m.evolutionChecker.GetCompatibilityLevel(subject) -} - -// SetCompatibilityLevel sets the compatibility level for a subject -func (m *Manager) SetCompatibilityLevel(subject string, level CompatibilityLevel) error { - return m.evolutionChecker.SetCompatibilityLevel(subject, level) -} - -// GetSchemaByID retrieves a schema by its ID -func (m *Manager) GetSchemaByID(schemaID uint32) (*CachedSchema, error) { - return m.registryClient.GetSchemaByID(schemaID) -} - -// GetLatestSchema retrieves the latest schema for a subject -func (m *Manager) GetLatestSchema(subject string) (*CachedSubject, error) { - return m.registryClient.GetLatestSchema(subject) -} diff --git a/weed/mq/kafka/schema/manager_evolution_test.go b/weed/mq/kafka/schema/manager_evolution_test.go deleted file mode 100644 index 232c0e1e7..000000000 --- a/weed/mq/kafka/schema/manager_evolution_test.go +++ /dev/null @@ -1,344 +0,0 @@ -package schema - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestManager_SchemaEvolution tests schema evolution integration in the manager -func TestManager_SchemaEvolution(t *testing.T) { - // Create a manager without registry (for testing evolution logic only) - manager := &Manager{ - evolutionChecker: NewSchemaEvolutionChecker(), - } - - t.Run("Compatible Avro evolution", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - assert.Empty(t, result.Issues) - }) - - t.Run("Incompatible Avro evolution", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - result, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.NotEmpty(t, result.Issues) - assert.Contains(t, result.Issues[0], "Field 'email' was removed") - }) - - t.Run("Schema evolution suggestions", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - suggestions, err := manager.SuggestSchemaEvolution(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.NotEmpty(t, suggestions) - - // Should suggest adding default values - found := false - for _, suggestion := range suggestions { - if strings.Contains(suggestion, "default") { - found = true - break - } - } - assert.True(t, found, "Should suggest adding default values, got: %v", suggestions) - }) - - t.Run("JSON Schema evolution", func(t *testing.T) { - oldSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"} - }, - "required": ["id", "name"] - }` - - newSchema := `{ - "type": "object", - "properties": { - "id": {"type": "integer"}, - "name": {"type": "string"}, - "email": {"type": "string"} - }, - "required": ["id", "name"] - }` - - result, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatJSONSchema, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) - - t.Run("Full compatibility check", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityFull) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) - - t.Run("Type promotion compatibility", func(t *testing.T) { - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "score", "type": "int"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "score", "type": "long"} - ] - }` - - result, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) -} - -// TestManager_CompatibilityLevels tests compatibility level management -func TestManager_CompatibilityLevels(t *testing.T) { - manager := &Manager{ - evolutionChecker: NewSchemaEvolutionChecker(), - } - - t.Run("Get default compatibility level", func(t *testing.T) { - level := manager.GetCompatibilityLevel("test-subject") - assert.Equal(t, CompatibilityBackward, level) - }) - - t.Run("Set compatibility level", func(t *testing.T) { - err := manager.SetCompatibilityLevel("test-subject", CompatibilityFull) - assert.NoError(t, err) - }) -} - -// TestManager_CanEvolveSchema tests the CanEvolveSchema method -func TestManager_CanEvolveSchema(t *testing.T) { - manager := &Manager{ - evolutionChecker: NewSchemaEvolutionChecker(), - } - - t.Run("Compatible evolution", func(t *testing.T) { - currentSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - result, err := manager.CanEvolveSchema("test-subject", currentSchema, newSchema, FormatAvro) - require.NoError(t, err) - assert.True(t, result.Compatible) - }) - - t.Run("Incompatible evolution", func(t *testing.T) { - currentSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string"} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - result, err := manager.CanEvolveSchema("test-subject", currentSchema, newSchema, FormatAvro) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "Field 'email' was removed") - }) -} - -// TestManager_SchemaEvolutionWorkflow tests a complete schema evolution workflow -func TestManager_SchemaEvolutionWorkflow(t *testing.T) { - manager := &Manager{ - evolutionChecker: NewSchemaEvolutionChecker(), - } - - t.Run("Complete evolution workflow", func(t *testing.T) { - // Step 1: Define initial schema - initialSchema := `{ - "type": "record", - "name": "UserEvent", - "fields": [ - {"name": "userId", "type": "int"}, - {"name": "action", "type": "string"} - ] - }` - - // Step 2: Propose schema evolution (compatible) - evolvedSchema := `{ - "type": "record", - "name": "UserEvent", - "fields": [ - {"name": "userId", "type": "int"}, - {"name": "action", "type": "string"}, - {"name": "timestamp", "type": "long", "default": 0} - ] - }` - - // Check compatibility explicitly - result, err := manager.CanEvolveSchema("user-events", initialSchema, evolvedSchema, FormatAvro) - require.NoError(t, err) - assert.True(t, result.Compatible) - - // Step 3: Try incompatible evolution - incompatibleSchema := `{ - "type": "record", - "name": "UserEvent", - "fields": [ - {"name": "userId", "type": "int"} - ] - }` - - result, err = manager.CanEvolveSchema("user-events", initialSchema, incompatibleSchema, FormatAvro) - require.NoError(t, err) - assert.False(t, result.Compatible) - assert.Contains(t, result.Issues[0], "Field 'action' was removed") - - // Step 4: Get suggestions for incompatible evolution - suggestions, err := manager.SuggestSchemaEvolution(initialSchema, incompatibleSchema, FormatAvro, CompatibilityBackward) - require.NoError(t, err) - assert.NotEmpty(t, suggestions) - }) -} - -// BenchmarkSchemaEvolution benchmarks schema evolution operations -func BenchmarkSchemaEvolution(b *testing.B) { - manager := &Manager{ - evolutionChecker: NewSchemaEvolutionChecker(), - } - - oldSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""} - ] - }` - - newSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"}, - {"name": "email", "type": "string", "default": ""}, - {"name": "age", "type": "int", "default": 0} - ] - }` - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := manager.CheckSchemaCompatibility(oldSchema, newSchema, FormatAvro, CompatibilityBackward) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/weed/mq/kafka/schema/manager_test.go b/weed/mq/kafka/schema/manager_test.go deleted file mode 100644 index eec2a479e..000000000 --- a/weed/mq/kafka/schema/manager_test.go +++ /dev/null @@ -1,331 +0,0 @@ -package schema - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" -) - -func TestManager_DecodeMessage(t *testing.T) { - // Create mock schema registry - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/schemas/ids/1" { - response := map[string]interface{}{ - "schema": `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - // Create manager - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Create test Avro message - avroSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - codec, err := goavro.NewCodec(avroSchema) - if err != nil { - t.Fatalf("Failed to create Avro codec: %v", err) - } - - // Create test data - testRecord := map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - } - - // Encode to Avro binary - avroBinary, err := codec.BinaryFromNative(nil, testRecord) - if err != nil { - t.Fatalf("Failed to encode Avro data: %v", err) - } - - // Create Confluent envelope - confluentMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - // Test decoding - decodedMsg, err := manager.DecodeMessage(confluentMsg) - if err != nil { - t.Fatalf("Failed to decode message: %v", err) - } - - // Verify decoded message - if decodedMsg.SchemaID != 1 { - t.Errorf("Expected schema ID 1, got %d", decodedMsg.SchemaID) - } - - if decodedMsg.SchemaFormat != FormatAvro { - t.Errorf("Expected Avro format, got %v", decodedMsg.SchemaFormat) - } - - if decodedMsg.Subject != "user-value" { - t.Errorf("Expected subject 'user-value', got %s", decodedMsg.Subject) - } - - // Verify decoded data - if decodedMsg.RecordValue == nil { - t.Fatal("Expected non-nil RecordValue") - } - - idValue := decodedMsg.RecordValue.Fields["id"] - if idValue == nil || idValue.GetInt32Value() != 123 { - t.Errorf("Expected id=123, got %v", idValue) - } - - nameValue := decodedMsg.RecordValue.Fields["name"] - if nameValue == nil || nameValue.GetStringValue() != "John Doe" { - t.Errorf("Expected name='John Doe', got %v", nameValue) - } -} - -func TestManager_IsSchematized(t *testing.T) { - config := ManagerConfig{ - RegistryURL: "http://localhost:8081", // Not used for this test - } - - manager, err := NewManager(config) - if err != nil { - // Skip test if we can't connect to registry - t.Skip("Skipping test - no registry available") - } - - tests := []struct { - name string - message []byte - expected bool - }{ - { - name: "schematized message", - message: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x48, 0x65, 0x6c, 0x6c, 0x6f}, - expected: true, - }, - { - name: "non-schematized message", - message: []byte{0x48, 0x65, 0x6c, 0x6c, 0x6f}, // Just "Hello" - expected: false, - }, - { - name: "empty message", - message: []byte{}, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := manager.IsSchematized(tt.message) - if result != tt.expected { - t.Errorf("IsSchematized() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestManager_GetSchemaInfo(t *testing.T) { - // Create mock schema registry - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/schemas/ids/42" { - response := map[string]interface{}{ - "schema": `{ - "type": "record", - "name": "Product", - "fields": [ - {"name": "id", "type": "string"}, - {"name": "price", "type": "double"} - ] - }`, - "subject": "product-value", - "version": 3, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Create test message with schema ID 42 - testMsg := CreateConfluentEnvelope(FormatAvro, 42, nil, []byte("test-payload")) - - schemaID, format, err := manager.GetSchemaInfo(testMsg) - if err != nil { - t.Fatalf("Failed to get schema info: %v", err) - } - - if schemaID != 42 { - t.Errorf("Expected schema ID 42, got %d", schemaID) - } - - if format != FormatAvro { - t.Errorf("Expected Avro format, got %v", format) - } -} - -func TestManager_CacheManagement(t *testing.T) { - config := ManagerConfig{ - RegistryURL: "http://localhost:8081", // Not used for this test - } - - manager, err := NewManager(config) - if err != nil { - t.Skip("Skipping test - no registry available") - } - - // Check initial cache stats - decoders, schemas, subjects := manager.GetCacheStats() - if decoders != 0 || schemas != 0 || subjects != 0 { - t.Errorf("Expected empty cache initially, got decoders=%d, schemas=%d, subjects=%d", - decoders, schemas, subjects) - } - - // Clear cache (should be no-op on empty cache) - manager.ClearCache() - - // Verify still empty - decoders, schemas, subjects = manager.GetCacheStats() - if decoders != 0 || schemas != 0 || subjects != 0 { - t.Errorf("Expected empty cache after clear, got decoders=%d, schemas=%d, subjects=%d", - decoders, schemas, subjects) - } -} - -func TestManager_EncodeMessage(t *testing.T) { - // Create mock schema registry - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/schemas/ids/1" { - response := map[string]interface{}{ - "schema": `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := ManagerConfig{ - RegistryURL: server.URL, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Create test RecordValue - testMap := map[string]interface{}{ - "id": int32(456), - "name": "Jane Smith", - } - recordValue := MapToRecordValue(testMap) - - // Test encoding - encoded, err := manager.EncodeMessage(recordValue, 1, FormatAvro) - if err != nil { - t.Fatalf("Failed to encode message: %v", err) - } - - // Verify it's a valid Confluent envelope - envelope, ok := ParseConfluentEnvelope(encoded) - if !ok { - t.Fatal("Encoded message is not a valid Confluent envelope") - } - - if envelope.SchemaID != 1 { - t.Errorf("Expected schema ID 1, got %d", envelope.SchemaID) - } - - if envelope.Format != FormatAvro { - t.Errorf("Expected Avro format, got %v", envelope.Format) - } - - // Test round-trip: decode the encoded message - decodedMsg, err := manager.DecodeMessage(encoded) - if err != nil { - t.Fatalf("Failed to decode round-trip message: %v", err) - } - - // Verify round-trip data integrity - if decodedMsg.RecordValue.Fields["id"].GetInt32Value() != 456 { - t.Error("Round-trip failed for id field") - } - - if decodedMsg.RecordValue.Fields["name"].GetStringValue() != "Jane Smith" { - t.Error("Round-trip failed for name field") - } -} - -// Benchmark tests -func BenchmarkManager_DecodeMessage(b *testing.B) { - // Setup (similar to TestManager_DecodeMessage but simplified) - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - response := map[string]interface{}{ - "schema": `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - })) - defer server.Close() - - config := ManagerConfig{RegistryURL: server.URL} - manager, _ := NewManager(config) - - // Create test message - codec, _ := goavro.NewCodec(`{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`) - avroBinary, _ := codec.BinaryFromNative(nil, map[string]interface{}{"id": int32(123)}) - testMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = manager.DecodeMessage(testMsg) - } -} diff --git a/weed/mq/kafka/schema/protobuf_decoder.go b/weed/mq/kafka/schema/protobuf_decoder.go deleted file mode 100644 index 02de896a0..000000000 --- a/weed/mq/kafka/schema/protobuf_decoder.go +++ /dev/null @@ -1,359 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - - "github.com/jhump/protoreflect/desc/protoparse" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/dynamicpb" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// ProtobufDecoder handles Protobuf schema decoding and conversion to SeaweedMQ format -type ProtobufDecoder struct { - descriptor protoreflect.MessageDescriptor - msgType protoreflect.MessageType -} - -// NewProtobufDecoder creates a new Protobuf decoder from a schema descriptor -func NewProtobufDecoder(schemaBytes []byte) (*ProtobufDecoder, error) { - // Parse the binary descriptor using the descriptor parser - parser := NewProtobufDescriptorParser() - - // For now, we need to extract the message name from the schema bytes - // In a real implementation, this would be provided by the Schema Registry - // For this phase, we'll try to find the first message in the descriptor - schema, err := parser.ParseBinaryDescriptor(schemaBytes, "") - if err != nil { - return nil, fmt.Errorf("failed to parse binary descriptor: %w", err) - } - - // Create the decoder using the parsed descriptor - if schema.MessageDescriptor == nil { - return nil, fmt.Errorf("no message descriptor found in schema") - } - - return NewProtobufDecoderFromDescriptor(schema.MessageDescriptor), nil -} - -// NewProtobufDecoderFromDescriptor creates a Protobuf decoder from a message descriptor -// This is used for testing and when we have pre-built descriptors -func NewProtobufDecoderFromDescriptor(msgDesc protoreflect.MessageDescriptor) *ProtobufDecoder { - msgType := dynamicpb.NewMessageType(msgDesc) - - return &ProtobufDecoder{ - descriptor: msgDesc, - msgType: msgType, - } -} - -// NewProtobufDecoderFromString creates a Protobuf decoder from a schema string -// This parses text .proto format from Schema Registry -func NewProtobufDecoderFromString(schemaStr string) (*ProtobufDecoder, error) { - // Use protoparse to parse the text .proto schema - parser := protoparse.Parser{ - Accessor: protoparse.FileContentsFromMap(map[string]string{ - "schema.proto": schemaStr, - }), - } - - // Parse the schema - fileDescs, err := parser.ParseFiles("schema.proto") - if err != nil { - return nil, fmt.Errorf("failed to parse .proto schema: %w", err) - } - - if len(fileDescs) == 0 { - return nil, fmt.Errorf("no file descriptors found in schema") - } - - fileDesc := fileDescs[0] - - // Convert to protoreflect FileDescriptor - fileDescProto := fileDesc.AsFileDescriptorProto() - - // Create a FileDescriptor from the proto - protoFileDesc, err := protodesc.NewFile(fileDescProto, nil) - if err != nil { - return nil, fmt.Errorf("failed to create file descriptor: %w", err) - } - - // Find the first message in the file - messages := protoFileDesc.Messages() - if messages.Len() == 0 { - return nil, fmt.Errorf("no message types found in schema") - } - - // Get the first message descriptor - msgDesc := messages.Get(0) - - return NewProtobufDecoderFromDescriptor(msgDesc), nil -} - -// Decode decodes Protobuf binary data to a Go map representation -// Also supports JSON fallback for compatibility with producers that don't yet support Protobuf binary -func (pd *ProtobufDecoder) Decode(data []byte) (map[string]interface{}, error) { - // Create a new message instance - msg := pd.msgType.New() - - // Try to unmarshal as Protobuf binary first - if err := proto.Unmarshal(data, msg.Interface()); err != nil { - // Fallback: Try JSON decoding (for compatibility with producers that send JSON) - var jsonMap map[string]interface{} - if jsonErr := json.Unmarshal(data, &jsonMap); jsonErr == nil { - // Successfully decoded as JSON - return it - // Note: This is a compatibility fallback, proper Protobuf binary is preferred - return jsonMap, nil - } - // Both failed - return the original Protobuf error - return nil, fmt.Errorf("failed to unmarshal Protobuf data: %w", err) - } - - // Convert to map representation - return pd.messageToMap(msg), nil -} - -// DecodeToRecordValue decodes Protobuf data directly to SeaweedMQ RecordValue -func (pd *ProtobufDecoder) DecodeToRecordValue(data []byte) (*schema_pb.RecordValue, error) { - msgMap, err := pd.Decode(data) - if err != nil { - return nil, err - } - - return MapToRecordValue(msgMap), nil -} - -// InferRecordType infers a SeaweedMQ RecordType from the Protobuf descriptor -func (pd *ProtobufDecoder) InferRecordType() (*schema_pb.RecordType, error) { - return pd.descriptorToRecordType(pd.descriptor), nil -} - -// messageToMap converts a Protobuf message to a Go map -func (pd *ProtobufDecoder) messageToMap(msg protoreflect.Message) map[string]interface{} { - result := make(map[string]interface{}) - - msg.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - fieldName := string(fd.Name()) - result[fieldName] = pd.valueToInterface(fd, v) - return true - }) - - return result -} - -// valueToInterface converts a Protobuf value to a Go interface{} -func (pd *ProtobufDecoder) valueToInterface(fd protoreflect.FieldDescriptor, v protoreflect.Value) interface{} { - if fd.IsList() { - // Handle repeated fields - list := v.List() - result := make([]interface{}, list.Len()) - for i := 0; i < list.Len(); i++ { - result[i] = pd.scalarValueToInterface(fd, list.Get(i)) - } - return result - } - - if fd.IsMap() { - // Handle map fields - mapVal := v.Map() - result := make(map[string]interface{}) - mapVal.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - keyStr := fmt.Sprintf("%v", k.Interface()) - result[keyStr] = pd.scalarValueToInterface(fd.MapValue(), v) - return true - }) - return result - } - - return pd.scalarValueToInterface(fd, v) -} - -// scalarValueToInterface converts a scalar Protobuf value to Go interface{} -func (pd *ProtobufDecoder) scalarValueToInterface(fd protoreflect.FieldDescriptor, v protoreflect.Value) interface{} { - switch fd.Kind() { - case protoreflect.BoolKind: - return v.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return int32(v.Int()) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return v.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return uint32(v.Uint()) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return v.Uint() - case protoreflect.FloatKind: - return float32(v.Float()) - case protoreflect.DoubleKind: - return v.Float() - case protoreflect.StringKind: - return v.String() - case protoreflect.BytesKind: - return v.Bytes() - case protoreflect.EnumKind: - return int32(v.Enum()) - case protoreflect.MessageKind: - // Handle nested messages - nestedMsg := v.Message() - return pd.messageToMap(nestedMsg) - default: - // Fallback to string representation - return fmt.Sprintf("%v", v.Interface()) - } -} - -// descriptorToRecordType converts a Protobuf descriptor to SeaweedMQ RecordType -func (pd *ProtobufDecoder) descriptorToRecordType(desc protoreflect.MessageDescriptor) *schema_pb.RecordType { - fields := make([]*schema_pb.Field, 0, desc.Fields().Len()) - - for i := 0; i < desc.Fields().Len(); i++ { - fd := desc.Fields().Get(i) - - field := &schema_pb.Field{ - Name: string(fd.Name()), - FieldIndex: int32(fd.Number() - 1), // Protobuf field numbers start at 1 - Type: pd.fieldDescriptorToType(fd), - IsRequired: fd.Cardinality() == protoreflect.Required, - IsRepeated: fd.IsList(), - } - - fields = append(fields, field) - } - - return &schema_pb.RecordType{ - Fields: fields, - } -} - -// fieldDescriptorToType converts a Protobuf field descriptor to SeaweedMQ Type -func (pd *ProtobufDecoder) fieldDescriptorToType(fd protoreflect.FieldDescriptor) *schema_pb.Type { - if fd.IsList() { - // Handle repeated fields - elementType := pd.scalarKindToType(fd.Kind(), fd.Message()) - return &schema_pb.Type{ - Kind: &schema_pb.Type_ListType{ - ListType: &schema_pb.ListType{ - ElementType: elementType, - }, - }, - } - } - - if fd.IsMap() { - // Handle map fields - for simplicity, treat as record with key/value fields - keyType := pd.scalarKindToType(fd.MapKey().Kind(), nil) - valueType := pd.scalarKindToType(fd.MapValue().Kind(), fd.MapValue().Message()) - - mapRecordType := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "key", - FieldIndex: 0, - Type: keyType, - IsRequired: true, - }, - { - Name: "value", - FieldIndex: 1, - Type: valueType, - IsRequired: false, - }, - }, - } - - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: mapRecordType, - }, - } - } - - return pd.scalarKindToType(fd.Kind(), fd.Message()) -} - -// scalarKindToType converts a Protobuf kind to SeaweedMQ scalar type -func (pd *ProtobufDecoder) scalarKindToType(kind protoreflect.Kind, msgDesc protoreflect.MessageDescriptor) *schema_pb.Type { - switch kind { - case protoreflect.BoolKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BOOL, - }, - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, - }, - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, - }, - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, // Map uint32 to int32 for simplicity - }, - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT64, // Map uint64 to int64 for simplicity - }, - } - case protoreflect.FloatKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_FLOAT, - }, - } - case protoreflect.DoubleKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_DOUBLE, - }, - } - case protoreflect.StringKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - case protoreflect.BytesKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_BYTES, - }, - } - case protoreflect.EnumKind: - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_INT32, // Enums as int32 - }, - } - case protoreflect.MessageKind: - if msgDesc != nil { - // Handle nested messages - nestedRecordType := pd.descriptorToRecordType(msgDesc) - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: nestedRecordType, - }, - } - } - fallthrough - default: - // Default to string for unknown types - return &schema_pb.Type{ - Kind: &schema_pb.Type_ScalarType{ - ScalarType: schema_pb.ScalarType_STRING, - }, - } - } -} diff --git a/weed/mq/kafka/schema/protobuf_decoder_test.go b/weed/mq/kafka/schema/protobuf_decoder_test.go deleted file mode 100644 index 4514a6589..000000000 --- a/weed/mq/kafka/schema/protobuf_decoder_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package schema - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/descriptorpb" -) - -// TestProtobufDecoder_BasicDecoding tests basic protobuf decoding functionality -func TestProtobufDecoder_BasicDecoding(t *testing.T) { - // Create a test FileDescriptorSet with a simple message - fds := createTestFileDescriptorSet(t, "TestMessage", []TestField{ - {Name: "name", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - {Name: "id", Number: 2, Type: descriptorpb.FieldDescriptorProto_TYPE_INT32, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - t.Run("NewProtobufDecoder with binary descriptor", func(t *testing.T) { - // This should now work with our integrated descriptor parser - decoder, err := NewProtobufDecoder(binaryData) - - // Phase E3: Descriptor resolution now works! - if err != nil { - // If it fails, it should be due to remaining implementation issues - assert.True(t, - strings.Contains(err.Error(), "failed to build file descriptor") || - strings.Contains(err.Error(), "message descriptor resolution not fully implemented"), - "Expected descriptor resolution error, got: %s", err.Error()) - assert.Nil(t, decoder) - } else { - // Success! Decoder creation is working - assert.NotNil(t, decoder) - assert.NotNil(t, decoder.descriptor) - t.Log("Protobuf decoder creation succeeded - Phase E3 is working!") - } - }) - - t.Run("NewProtobufDecoder with empty message name", func(t *testing.T) { - // Test the findFirstMessageName functionality - parser := NewProtobufDescriptorParser() - schema, err := parser.ParseBinaryDescriptor(binaryData, "") - - // Phase E3: Should find the first message name and may succeed - if err != nil { - // If it fails, it should be due to remaining implementation issues - assert.True(t, - strings.Contains(err.Error(), "failed to build file descriptor") || - strings.Contains(err.Error(), "message descriptor resolution not fully implemented"), - "Expected descriptor resolution error, got: %s", err.Error()) - } else { - // Success! Empty message name resolution is working - assert.NotNil(t, schema) - assert.Equal(t, "TestMessage", schema.MessageName) - t.Log("Empty message name resolution succeeded - Phase E3 is working!") - } - }) -} - -// TestProtobufDecoder_Integration tests integration with the descriptor parser -func TestProtobufDecoder_Integration(t *testing.T) { - // Create a more complex test descriptor - fds := createComplexTestFileDescriptorSet(t) - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - t.Run("Parse complex descriptor", func(t *testing.T) { - parser := NewProtobufDescriptorParser() - - // Test with empty message name - should find first message - schema, err := parser.ParseBinaryDescriptor(binaryData, "") - // Phase E3: May succeed or fail depending on message complexity - if err != nil { - assert.True(t, - strings.Contains(err.Error(), "failed to build file descriptor") || - strings.Contains(err.Error(), "cannot resolve type"), - "Expected descriptor building error, got: %s", err.Error()) - } else { - assert.NotNil(t, schema) - assert.NotEmpty(t, schema.MessageName) - t.Log("Empty message name resolution succeeded!") - } - - // Test with specific message name - schema2, err2 := parser.ParseBinaryDescriptor(binaryData, "ComplexMessage") - // Phase E3: May succeed or fail depending on message complexity - if err2 != nil { - assert.True(t, - strings.Contains(err2.Error(), "failed to build file descriptor") || - strings.Contains(err2.Error(), "cannot resolve type"), - "Expected descriptor building error, got: %s", err2.Error()) - } else { - assert.NotNil(t, schema2) - assert.Equal(t, "ComplexMessage", schema2.MessageName) - t.Log("Complex message resolution succeeded!") - } - }) -} - -// TestProtobufDecoder_Caching tests that decoder creation uses caching properly -func TestProtobufDecoder_Caching(t *testing.T) { - fds := createTestFileDescriptorSet(t, "CacheTestMessage", []TestField{ - {Name: "value", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - t.Run("Decoder creation uses cache", func(t *testing.T) { - // First attempt - _, err1 := NewProtobufDecoder(binaryData) - assert.Error(t, err1) - - // Second attempt - should use cached parsing - _, err2 := NewProtobufDecoder(binaryData) - assert.Error(t, err2) - - // Errors should be identical (indicating cache usage) - assert.Equal(t, err1.Error(), err2.Error()) - }) -} - -// Helper function to create a complex test FileDescriptorSet -func createComplexTestFileDescriptorSet(t *testing.T) *descriptorpb.FileDescriptorSet { - // Create a file descriptor with multiple messages - fileDesc := &descriptorpb.FileDescriptorProto{ - Name: proto.String("test_complex.proto"), - Package: proto.String("test"), - MessageType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("ComplexMessage"), - Field: []*descriptorpb.FieldDescriptorProto{ - { - Name: proto.String("simple_field"), - Number: proto.Int32(1), - Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), - }, - { - Name: proto.String("repeated_field"), - Number: proto.Int32(2), - Type: descriptorpb.FieldDescriptorProto_TYPE_INT32.Enum(), - Label: descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum(), - }, - }, - }, - { - Name: proto.String("SimpleMessage"), - Field: []*descriptorpb.FieldDescriptorProto{ - { - Name: proto.String("id"), - Number: proto.Int32(1), - Type: descriptorpb.FieldDescriptorProto_TYPE_INT64.Enum(), - }, - }, - }, - }, - } - - return &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{fileDesc}, - } -} - -// TestProtobufDecoder_ErrorHandling tests error handling in various scenarios -func TestProtobufDecoder_ErrorHandling(t *testing.T) { - t.Run("Invalid binary data", func(t *testing.T) { - invalidData := []byte("not a protobuf descriptor") - decoder, err := NewProtobufDecoder(invalidData) - - assert.Error(t, err) - assert.Nil(t, decoder) - assert.Contains(t, err.Error(), "failed to parse binary descriptor") - }) - - t.Run("Empty binary data", func(t *testing.T) { - emptyData := []byte{} - decoder, err := NewProtobufDecoder(emptyData) - - assert.Error(t, err) - assert.Nil(t, decoder) - }) - - t.Run("FileDescriptorSet with no messages", func(t *testing.T) { - // Create an empty FileDescriptorSet - fds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{ - { - Name: proto.String("empty.proto"), - Package: proto.String("empty"), - // No MessageType defined - }, - }, - } - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - decoder, err := NewProtobufDecoder(binaryData) - assert.Error(t, err) - assert.Nil(t, decoder) - assert.Contains(t, err.Error(), "no messages found") - }) -} diff --git a/weed/mq/kafka/schema/protobuf_descriptor.go b/weed/mq/kafka/schema/protobuf_descriptor.go deleted file mode 100644 index a0f584114..000000000 --- a/weed/mq/kafka/schema/protobuf_descriptor.go +++ /dev/null @@ -1,485 +0,0 @@ -package schema - -import ( - "fmt" - "sync" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/types/descriptorpb" - "google.golang.org/protobuf/types/dynamicpb" -) - -// ProtobufSchema represents a parsed Protobuf schema with message type information -type ProtobufSchema struct { - FileDescriptorSet *descriptorpb.FileDescriptorSet - MessageDescriptor protoreflect.MessageDescriptor - MessageName string - PackageName string - Dependencies []string -} - -// ProtobufDescriptorParser handles parsing of Confluent Schema Registry Protobuf descriptors -type ProtobufDescriptorParser struct { - mu sync.RWMutex - // Cache for parsed descriptors to avoid re-parsing - descriptorCache map[string]*ProtobufSchema -} - -// NewProtobufDescriptorParser creates a new parser instance -func NewProtobufDescriptorParser() *ProtobufDescriptorParser { - return &ProtobufDescriptorParser{ - descriptorCache: make(map[string]*ProtobufSchema), - } -} - -// ParseBinaryDescriptor parses a Confluent Schema Registry Protobuf binary descriptor -// The input is typically a serialized FileDescriptorSet from the schema registry -func (p *ProtobufDescriptorParser) ParseBinaryDescriptor(binaryData []byte, messageName string) (*ProtobufSchema, error) { - // Check cache first - cacheKey := fmt.Sprintf("%x:%s", binaryData[:min(32, len(binaryData))], messageName) - p.mu.RLock() - if cached, exists := p.descriptorCache[cacheKey]; exists { - p.mu.RUnlock() - // If we have a cached schema but no message descriptor, return the same error - if cached.MessageDescriptor == nil { - return cached, fmt.Errorf("failed to find message descriptor for %s: message descriptor resolution not fully implemented in Phase E1 - found message %s in package %s", messageName, messageName, cached.PackageName) - } - return cached, nil - } - p.mu.RUnlock() - - // Parse the FileDescriptorSet from binary data - var fileDescriptorSet descriptorpb.FileDescriptorSet - if err := proto.Unmarshal(binaryData, &fileDescriptorSet); err != nil { - return nil, fmt.Errorf("failed to unmarshal FileDescriptorSet: %w", err) - } - - // Validate the descriptor set - if err := p.validateDescriptorSet(&fileDescriptorSet); err != nil { - return nil, fmt.Errorf("invalid descriptor set: %w", err) - } - - // If no message name provided, try to find the first available message - if messageName == "" { - messageName = p.findFirstMessageName(&fileDescriptorSet) - if messageName == "" { - return nil, fmt.Errorf("no messages found in FileDescriptorSet") - } - } - - // Find the target message descriptor - messageDesc, packageName, err := p.findMessageDescriptor(&fileDescriptorSet, messageName) - if err != nil { - // For Phase E1, we still cache the FileDescriptorSet even if message resolution fails - // This allows us to test caching behavior and avoid re-parsing the same binary data - schema := &ProtobufSchema{ - FileDescriptorSet: &fileDescriptorSet, - MessageDescriptor: nil, // Not resolved in Phase E1 - MessageName: messageName, - PackageName: packageName, - Dependencies: p.extractDependencies(&fileDescriptorSet), - } - p.mu.Lock() - p.descriptorCache[cacheKey] = schema - p.mu.Unlock() - return schema, fmt.Errorf("failed to find message descriptor for %s: %w", messageName, err) - } - - // Extract dependencies - dependencies := p.extractDependencies(&fileDescriptorSet) - - // Create the schema object - schema := &ProtobufSchema{ - FileDescriptorSet: &fileDescriptorSet, - MessageDescriptor: messageDesc, - MessageName: messageName, - PackageName: packageName, - Dependencies: dependencies, - } - - // Cache the result - p.mu.Lock() - p.descriptorCache[cacheKey] = schema - p.mu.Unlock() - - return schema, nil -} - -// validateDescriptorSet performs basic validation on the FileDescriptorSet -func (p *ProtobufDescriptorParser) validateDescriptorSet(fds *descriptorpb.FileDescriptorSet) error { - if len(fds.File) == 0 { - return fmt.Errorf("FileDescriptorSet contains no files") - } - - for i, file := range fds.File { - if file.Name == nil { - return fmt.Errorf("file descriptor %d has no name", i) - } - if file.Package == nil { - return fmt.Errorf("file descriptor %s has no package", *file.Name) - } - } - - return nil -} - -// findFirstMessageName finds the first message name in the FileDescriptorSet -func (p *ProtobufDescriptorParser) findFirstMessageName(fds *descriptorpb.FileDescriptorSet) string { - for _, file := range fds.File { - if len(file.MessageType) > 0 { - return file.MessageType[0].GetName() - } - } - return "" -} - -// findMessageDescriptor locates a specific message descriptor within the FileDescriptorSet -func (p *ProtobufDescriptorParser) findMessageDescriptor(fds *descriptorpb.FileDescriptorSet, messageName string) (protoreflect.MessageDescriptor, string, error) { - // This is a simplified implementation for Phase E1 - // In a complete implementation, we would: - // 1. Build a complete descriptor registry from the FileDescriptorSet - // 2. Resolve all imports and dependencies - // 3. Handle nested message types and packages correctly - // 4. Support fully qualified message names - - for _, file := range fds.File { - packageName := "" - if file.Package != nil { - packageName = *file.Package - } - - // Search for the message in this file - for _, messageType := range file.MessageType { - if messageType.Name != nil && *messageType.Name == messageName { - // Try to build a proper descriptor from the FileDescriptorProto - fileDesc, err := p.buildFileDescriptor(file) - if err != nil { - return nil, packageName, fmt.Errorf("failed to build file descriptor: %w", err) - } - - // Find the message descriptor in the built file - msgDesc := p.findMessageInFileDescriptor(fileDesc, messageName) - if msgDesc != nil { - return msgDesc, packageName, nil - } - - return nil, packageName, fmt.Errorf("message descriptor built but not found: %s", messageName) - } - - // Search nested messages (simplified) - if nestedDesc := p.searchNestedMessages(messageType, messageName); nestedDesc != nil { - // Try to build descriptor for nested message - fileDesc, err := p.buildFileDescriptor(file) - if err != nil { - return nil, packageName, fmt.Errorf("failed to build file descriptor for nested message: %w", err) - } - - msgDesc := p.findMessageInFileDescriptor(fileDesc, messageName) - if msgDesc != nil { - return msgDesc, packageName, nil - } - - return nil, packageName, fmt.Errorf("nested message descriptor built but not found: %s", messageName) - } - } - } - - return nil, "", fmt.Errorf("message %s not found in descriptor set", messageName) -} - -// buildFileDescriptor builds a protoreflect.FileDescriptor from a FileDescriptorProto -func (p *ProtobufDescriptorParser) buildFileDescriptor(fileProto *descriptorpb.FileDescriptorProto) (protoreflect.FileDescriptor, error) { - // Create a local registry to avoid conflicts - localFiles := &protoregistry.Files{} - - // Build the file descriptor using protodesc - fileDesc, err := protodesc.NewFile(fileProto, localFiles) - if err != nil { - return nil, fmt.Errorf("failed to create file descriptor: %w", err) - } - - return fileDesc, nil -} - -// findMessageInFileDescriptor searches for a message descriptor within a file descriptor -func (p *ProtobufDescriptorParser) findMessageInFileDescriptor(fileDesc protoreflect.FileDescriptor, messageName string) protoreflect.MessageDescriptor { - // Search top-level messages - messages := fileDesc.Messages() - for i := 0; i < messages.Len(); i++ { - msgDesc := messages.Get(i) - if string(msgDesc.Name()) == messageName { - return msgDesc - } - - // Search nested messages - if nestedDesc := p.findNestedMessageDescriptor(msgDesc, messageName); nestedDesc != nil { - return nestedDesc - } - } - - return nil -} - -// findNestedMessageDescriptor recursively searches for nested messages -func (p *ProtobufDescriptorParser) findNestedMessageDescriptor(msgDesc protoreflect.MessageDescriptor, messageName string) protoreflect.MessageDescriptor { - nestedMessages := msgDesc.Messages() - for i := 0; i < nestedMessages.Len(); i++ { - nestedDesc := nestedMessages.Get(i) - if string(nestedDesc.Name()) == messageName { - return nestedDesc - } - - // Recursively search deeper nested messages - if deeperNested := p.findNestedMessageDescriptor(nestedDesc, messageName); deeperNested != nil { - return deeperNested - } - } - - return nil -} - -// searchNestedMessages recursively searches for nested message types -func (p *ProtobufDescriptorParser) searchNestedMessages(messageType *descriptorpb.DescriptorProto, targetName string) *descriptorpb.DescriptorProto { - for _, nested := range messageType.NestedType { - if nested.Name != nil && *nested.Name == targetName { - return nested - } - // Recursively search deeper nesting - if found := p.searchNestedMessages(nested, targetName); found != nil { - return found - } - } - return nil -} - -// extractDependencies extracts the list of dependencies from the FileDescriptorSet -func (p *ProtobufDescriptorParser) extractDependencies(fds *descriptorpb.FileDescriptorSet) []string { - dependencySet := make(map[string]bool) - - for _, file := range fds.File { - for _, dep := range file.Dependency { - dependencySet[dep] = true - } - } - - dependencies := make([]string, 0, len(dependencySet)) - for dep := range dependencySet { - dependencies = append(dependencies, dep) - } - - return dependencies -} - -// GetMessageFields returns information about the fields in the message -func (s *ProtobufSchema) GetMessageFields() ([]FieldInfo, error) { - if s.FileDescriptorSet == nil { - return nil, fmt.Errorf("no FileDescriptorSet available") - } - - // Find the message descriptor for this schema - messageDesc := s.findMessageDescriptor(s.MessageName) - if messageDesc == nil { - return nil, fmt.Errorf("message %s not found in descriptor set", s.MessageName) - } - - // Extract field information - fields := make([]FieldInfo, 0, len(messageDesc.Field)) - for _, field := range messageDesc.Field { - fieldInfo := FieldInfo{ - Name: field.GetName(), - Number: field.GetNumber(), - Type: s.fieldTypeToString(field.GetType()), - Label: s.fieldLabelToString(field.GetLabel()), - } - - // Set TypeName for message/enum types - if field.GetTypeName() != "" { - fieldInfo.TypeName = field.GetTypeName() - } - - fields = append(fields, fieldInfo) - } - - return fields, nil -} - -// FieldInfo represents information about a Protobuf field -type FieldInfo struct { - Name string - Number int32 - Type string - Label string // optional, required, repeated - TypeName string // for message/enum types -} - -// GetFieldByName returns information about a specific field -func (s *ProtobufSchema) GetFieldByName(fieldName string) (*FieldInfo, error) { - fields, err := s.GetMessageFields() - if err != nil { - return nil, err - } - - for _, field := range fields { - if field.Name == fieldName { - return &field, nil - } - } - - return nil, fmt.Errorf("field %s not found", fieldName) -} - -// GetFieldByNumber returns information about a field by its number -func (s *ProtobufSchema) GetFieldByNumber(fieldNumber int32) (*FieldInfo, error) { - fields, err := s.GetMessageFields() - if err != nil { - return nil, err - } - - for _, field := range fields { - if field.Number == fieldNumber { - return &field, nil - } - } - - return nil, fmt.Errorf("field number %d not found", fieldNumber) -} - -// findMessageDescriptor finds a message descriptor by name in the FileDescriptorSet -func (s *ProtobufSchema) findMessageDescriptor(messageName string) *descriptorpb.DescriptorProto { - if s.FileDescriptorSet == nil { - return nil - } - - for _, file := range s.FileDescriptorSet.File { - // Check top-level messages - for _, message := range file.MessageType { - if message.GetName() == messageName { - return message - } - // Check nested messages - if nested := searchNestedMessages(message, messageName); nested != nil { - return nested - } - } - } - - return nil -} - -// searchNestedMessages recursively searches for nested message types -func searchNestedMessages(messageType *descriptorpb.DescriptorProto, targetName string) *descriptorpb.DescriptorProto { - for _, nested := range messageType.NestedType { - if nested.Name != nil && *nested.Name == targetName { - return nested - } - // Recursively search deeper nesting - if found := searchNestedMessages(nested, targetName); found != nil { - return found - } - } - return nil -} - -// fieldTypeToString converts a FieldDescriptorProto_Type to string -func (s *ProtobufSchema) fieldTypeToString(fieldType descriptorpb.FieldDescriptorProto_Type) string { - switch fieldType { - case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE: - return "double" - case descriptorpb.FieldDescriptorProto_TYPE_FLOAT: - return "float" - case descriptorpb.FieldDescriptorProto_TYPE_INT64: - return "int64" - case descriptorpb.FieldDescriptorProto_TYPE_UINT64: - return "uint64" - case descriptorpb.FieldDescriptorProto_TYPE_INT32: - return "int32" - case descriptorpb.FieldDescriptorProto_TYPE_FIXED64: - return "fixed64" - case descriptorpb.FieldDescriptorProto_TYPE_FIXED32: - return "fixed32" - case descriptorpb.FieldDescriptorProto_TYPE_BOOL: - return "bool" - case descriptorpb.FieldDescriptorProto_TYPE_STRING: - return "string" - case descriptorpb.FieldDescriptorProto_TYPE_GROUP: - return "group" - case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE: - return "message" - case descriptorpb.FieldDescriptorProto_TYPE_BYTES: - return "bytes" - case descriptorpb.FieldDescriptorProto_TYPE_UINT32: - return "uint32" - case descriptorpb.FieldDescriptorProto_TYPE_ENUM: - return "enum" - case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32: - return "sfixed32" - case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64: - return "sfixed64" - case descriptorpb.FieldDescriptorProto_TYPE_SINT32: - return "sint32" - case descriptorpb.FieldDescriptorProto_TYPE_SINT64: - return "sint64" - default: - return "unknown" - } -} - -// fieldLabelToString converts a FieldDescriptorProto_Label to string -func (s *ProtobufSchema) fieldLabelToString(label descriptorpb.FieldDescriptorProto_Label) string { - switch label { - case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL: - return "optional" - case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED: - return "required" - case descriptorpb.FieldDescriptorProto_LABEL_REPEATED: - return "repeated" - default: - return "unknown" - } -} - -// ValidateMessage validates that a message conforms to the schema -func (s *ProtobufSchema) ValidateMessage(messageData []byte) error { - if s.MessageDescriptor == nil { - return fmt.Errorf("no message descriptor available for validation") - } - - // Create a dynamic message from the descriptor - msgType := dynamicpb.NewMessageType(s.MessageDescriptor) - msg := msgType.New() - - // Try to unmarshal the message data - if err := proto.Unmarshal(messageData, msg.Interface()); err != nil { - return fmt.Errorf("message validation failed: %w", err) - } - - // Basic validation passed - the message can be unmarshaled with the schema - return nil -} - -// ClearCache clears the descriptor cache -func (p *ProtobufDescriptorParser) ClearCache() { - p.mu.Lock() - defer p.mu.Unlock() - p.descriptorCache = make(map[string]*ProtobufSchema) -} - -// GetCacheStats returns statistics about the descriptor cache -func (p *ProtobufDescriptorParser) GetCacheStats() map[string]interface{} { - p.mu.RLock() - defer p.mu.RUnlock() - return map[string]interface{}{ - "cached_descriptors": len(p.descriptorCache), - } -} - -// Helper function for min -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/weed/mq/kafka/schema/protobuf_descriptor_test.go b/weed/mq/kafka/schema/protobuf_descriptor_test.go deleted file mode 100644 index d1d923243..000000000 --- a/weed/mq/kafka/schema/protobuf_descriptor_test.go +++ /dev/null @@ -1,411 +0,0 @@ -package schema - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/descriptorpb" -) - -// TestProtobufDescriptorParser_BasicParsing tests basic descriptor parsing functionality -func TestProtobufDescriptorParser_BasicParsing(t *testing.T) { - parser := NewProtobufDescriptorParser() - - t.Run("Parse Simple Message Descriptor", func(t *testing.T) { - // Create a simple FileDescriptorSet for testing - fds := createTestFileDescriptorSet(t, "TestMessage", []TestField{ - {Name: "id", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_INT32, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - {Name: "name", Number: 2, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - // Parse the descriptor - schema, err := parser.ParseBinaryDescriptor(binaryData, "TestMessage") - - // Phase E3: Descriptor resolution now works! - if err != nil { - // If it fails, it should be due to remaining implementation issues - assert.True(t, - strings.Contains(err.Error(), "message descriptor resolution not fully implemented") || - strings.Contains(err.Error(), "failed to build file descriptor"), - "Expected descriptor resolution error, got: %s", err.Error()) - } else { - // Success! Descriptor resolution is working - assert.NotNil(t, schema) - assert.NotNil(t, schema.MessageDescriptor) - assert.Equal(t, "TestMessage", schema.MessageName) - t.Log("Simple message descriptor resolution succeeded - Phase E3 is working!") - } - }) - - t.Run("Parse Complex Message Descriptor", func(t *testing.T) { - // Create a more complex FileDescriptorSet - fds := createTestFileDescriptorSet(t, "ComplexMessage", []TestField{ - {Name: "user_id", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - {Name: "metadata", Number: 2, Type: descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, TypeName: "Metadata", Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - {Name: "tags", Number: 3, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_REPEATED}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - // Parse the descriptor - schema, err := parser.ParseBinaryDescriptor(binaryData, "ComplexMessage") - - // Phase E3: May succeed or fail depending on message type resolution - if err != nil { - // If it fails, it should be due to unresolved message types (Metadata) - assert.True(t, - strings.Contains(err.Error(), "failed to build file descriptor") || - strings.Contains(err.Error(), "not found") || - strings.Contains(err.Error(), "cannot resolve type"), - "Expected type resolution error, got: %s", err.Error()) - } else { - // Success! Complex descriptor resolution is working - assert.NotNil(t, schema) - assert.NotNil(t, schema.MessageDescriptor) - assert.Equal(t, "ComplexMessage", schema.MessageName) - t.Log("Complex message descriptor resolution succeeded - Phase E3 is working!") - } - }) - - t.Run("Cache Functionality", func(t *testing.T) { - // Create a fresh parser for this test to avoid interference - freshParser := NewProtobufDescriptorParser() - - fds := createTestFileDescriptorSet(t, "CacheTest", []TestField{ - {Name: "value", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - // First parse - schema1, err1 := freshParser.ParseBinaryDescriptor(binaryData, "CacheTest") - - // Second parse (should use cache) - schema2, err2 := freshParser.ParseBinaryDescriptor(binaryData, "CacheTest") - - // Both should have the same result (success or failure) - assert.Equal(t, err1 == nil, err2 == nil, "Both calls should have same success/failure status") - - if err1 == nil && err2 == nil { - // Success case - both schemas should be identical (from cache) - assert.Equal(t, schema1, schema2, "Cached schema should be identical") - assert.NotNil(t, schema1.MessageDescriptor) - t.Log("Cache functionality working with successful descriptor resolution!") - } else { - // Error case - errors should be identical (indicating cache usage) - assert.Equal(t, err1.Error(), err2.Error(), "Cached errors should be identical") - } - - // Check cache stats - should be 1 since descriptor was cached - stats := freshParser.GetCacheStats() - assert.Equal(t, 1, stats["cached_descriptors"]) - }) -} - -// TestProtobufDescriptorParser_Validation tests descriptor validation -func TestProtobufDescriptorParser_Validation(t *testing.T) { - parser := NewProtobufDescriptorParser() - - t.Run("Invalid Binary Data", func(t *testing.T) { - invalidData := []byte("not a protobuf descriptor") - - _, err := parser.ParseBinaryDescriptor(invalidData, "TestMessage") - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to unmarshal FileDescriptorSet") - }) - - t.Run("Empty FileDescriptorSet", func(t *testing.T) { - emptyFds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{}, - } - - binaryData, err := proto.Marshal(emptyFds) - require.NoError(t, err) - - _, err = parser.ParseBinaryDescriptor(binaryData, "TestMessage") - assert.Error(t, err) - assert.Contains(t, err.Error(), "FileDescriptorSet contains no files") - }) - - t.Run("FileDescriptor Without Name", func(t *testing.T) { - invalidFds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{ - { - // Missing Name field - Package: proto.String("test.package"), - }, - }, - } - - binaryData, err := proto.Marshal(invalidFds) - require.NoError(t, err) - - _, err = parser.ParseBinaryDescriptor(binaryData, "TestMessage") - assert.Error(t, err) - assert.Contains(t, err.Error(), "file descriptor 0 has no name") - }) - - t.Run("FileDescriptor Without Package", func(t *testing.T) { - invalidFds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{ - { - Name: proto.String("test.proto"), - // Missing Package field - }, - }, - } - - binaryData, err := proto.Marshal(invalidFds) - require.NoError(t, err) - - _, err = parser.ParseBinaryDescriptor(binaryData, "TestMessage") - assert.Error(t, err) - assert.Contains(t, err.Error(), "file descriptor test.proto has no package") - }) -} - -// TestProtobufDescriptorParser_MessageSearch tests message finding functionality -func TestProtobufDescriptorParser_MessageSearch(t *testing.T) { - parser := NewProtobufDescriptorParser() - - t.Run("Message Not Found", func(t *testing.T) { - fds := createTestFileDescriptorSet(t, "ExistingMessage", []TestField{ - {Name: "field1", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - _, err = parser.ParseBinaryDescriptor(binaryData, "NonExistentMessage") - assert.Error(t, err) - assert.Contains(t, err.Error(), "message NonExistentMessage not found") - }) - - t.Run("Nested Message Search", func(t *testing.T) { - // Create FileDescriptorSet with nested messages - fds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{ - { - Name: proto.String("test.proto"), - Package: proto.String("test.package"), - MessageType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("OuterMessage"), - NestedType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("NestedMessage"), - Field: []*descriptorpb.FieldDescriptorProto{ - { - Name: proto.String("nested_field"), - Number: proto.Int32(1), - Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), - Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), - }, - }, - }, - }, - }, - }, - }, - }, - } - - binaryData, err := proto.Marshal(fds) - require.NoError(t, err) - - _, err = parser.ParseBinaryDescriptor(binaryData, "NestedMessage") - // Nested message search now works! May succeed or fail on descriptor building - if err != nil { - // If it fails, it should be due to descriptor building issues - assert.True(t, - strings.Contains(err.Error(), "failed to build file descriptor") || - strings.Contains(err.Error(), "invalid cardinality") || - strings.Contains(err.Error(), "nested message descriptor resolution not fully implemented"), - "Expected descriptor building error, got: %s", err.Error()) - } else { - // Success! Nested message resolution is working - t.Log("Nested message resolution succeeded - Phase E3 is working!") - } - }) -} - -// TestProtobufDescriptorParser_Dependencies tests dependency extraction -func TestProtobufDescriptorParser_Dependencies(t *testing.T) { - parser := NewProtobufDescriptorParser() - - t.Run("Extract Dependencies", func(t *testing.T) { - // Create FileDescriptorSet with dependencies - fds := &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{ - { - Name: proto.String("main.proto"), - Package: proto.String("main.package"), - Dependency: []string{ - "google/protobuf/timestamp.proto", - "common/types.proto", - }, - MessageType: []*descriptorpb.DescriptorProto{ - { - Name: proto.String("MainMessage"), - Field: []*descriptorpb.FieldDescriptorProto{ - { - Name: proto.String("id"), - Number: proto.Int32(1), - Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(), - }, - }, - }, - }, - }, - }, - } - - _, err := proto.Marshal(fds) - require.NoError(t, err) - - // Parse and check dependencies (even though parsing fails, we can test dependency extraction) - dependencies := parser.extractDependencies(fds) - assert.Len(t, dependencies, 2) - assert.Contains(t, dependencies, "google/protobuf/timestamp.proto") - assert.Contains(t, dependencies, "common/types.proto") - }) -} - -// TestProtobufSchema_Methods tests ProtobufSchema methods -func TestProtobufSchema_Methods(t *testing.T) { - // Create a basic schema for testing - fds := createTestFileDescriptorSet(t, "TestSchema", []TestField{ - {Name: "field1", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - schema := &ProtobufSchema{ - FileDescriptorSet: fds, - MessageDescriptor: nil, // Not implemented in Phase E1 - MessageName: "TestSchema", - PackageName: "test.package", - Dependencies: []string{"common.proto"}, - } - - t.Run("GetMessageFields Implemented", func(t *testing.T) { - fields, err := schema.GetMessageFields() - assert.NoError(t, err) - assert.Len(t, fields, 1) - assert.Equal(t, "field1", fields[0].Name) - assert.Equal(t, int32(1), fields[0].Number) - assert.Equal(t, "string", fields[0].Type) - assert.Equal(t, "optional", fields[0].Label) - }) - - t.Run("GetFieldByName Implemented", func(t *testing.T) { - field, err := schema.GetFieldByName("field1") - assert.NoError(t, err) - assert.Equal(t, "field1", field.Name) - assert.Equal(t, int32(1), field.Number) - assert.Equal(t, "string", field.Type) - assert.Equal(t, "optional", field.Label) - }) - - t.Run("GetFieldByNumber Implemented", func(t *testing.T) { - field, err := schema.GetFieldByNumber(1) - assert.NoError(t, err) - assert.Equal(t, "field1", field.Name) - assert.Equal(t, int32(1), field.Number) - assert.Equal(t, "string", field.Type) - assert.Equal(t, "optional", field.Label) - }) - - t.Run("ValidateMessage Requires MessageDescriptor", func(t *testing.T) { - err := schema.ValidateMessage([]byte("test message")) - assert.Error(t, err) - assert.Contains(t, err.Error(), "no message descriptor available for validation") - }) -} - -// TestProtobufDescriptorParser_CacheManagement tests cache management -func TestProtobufDescriptorParser_CacheManagement(t *testing.T) { - parser := NewProtobufDescriptorParser() - - // Add some entries to cache - fds1 := createTestFileDescriptorSet(t, "Message1", []TestField{ - {Name: "field1", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_STRING, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - fds2 := createTestFileDescriptorSet(t, "Message2", []TestField{ - {Name: "field2", Number: 1, Type: descriptorpb.FieldDescriptorProto_TYPE_INT32, Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL}, - }) - - binaryData1, _ := proto.Marshal(fds1) - binaryData2, _ := proto.Marshal(fds2) - - // Parse both (will fail but add to cache) - parser.ParseBinaryDescriptor(binaryData1, "Message1") - parser.ParseBinaryDescriptor(binaryData2, "Message2") - - // Check cache has entries (descriptors cached even though resolution failed) - stats := parser.GetCacheStats() - assert.Equal(t, 2, stats["cached_descriptors"]) - - // Clear cache - parser.ClearCache() - - // Check cache is empty - stats = parser.GetCacheStats() - assert.Equal(t, 0, stats["cached_descriptors"]) -} - -// Helper types and functions for testing - -type TestField struct { - Name string - Number int32 - Type descriptorpb.FieldDescriptorProto_Type - Label descriptorpb.FieldDescriptorProto_Label - TypeName string -} - -func createTestFileDescriptorSet(t *testing.T, messageName string, fields []TestField) *descriptorpb.FileDescriptorSet { - // Create field descriptors - fieldDescriptors := make([]*descriptorpb.FieldDescriptorProto, len(fields)) - for i, field := range fields { - fieldDesc := &descriptorpb.FieldDescriptorProto{ - Name: proto.String(field.Name), - Number: proto.Int32(field.Number), - Type: field.Type.Enum(), - } - - if field.Label != descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL { - fieldDesc.Label = field.Label.Enum() - } - - if field.TypeName != "" { - fieldDesc.TypeName = proto.String(field.TypeName) - } - - fieldDescriptors[i] = fieldDesc - } - - // Create message descriptor - messageDesc := &descriptorpb.DescriptorProto{ - Name: proto.String(messageName), - Field: fieldDescriptors, - } - - // Create file descriptor - fileDesc := &descriptorpb.FileDescriptorProto{ - Name: proto.String("test.proto"), - Package: proto.String("test.package"), - MessageType: []*descriptorpb.DescriptorProto{messageDesc}, - } - - // Create FileDescriptorSet - return &descriptorpb.FileDescriptorSet{ - File: []*descriptorpb.FileDescriptorProto{fileDesc}, - } -} diff --git a/weed/mq/kafka/schema/reconstruction_test.go b/weed/mq/kafka/schema/reconstruction_test.go deleted file mode 100644 index 291bfaa61..000000000 --- a/weed/mq/kafka/schema/reconstruction_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package schema - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/linkedin/goavro/v2" -) - -func TestSchemaReconstruction_Avro(t *testing.T) { - // Create mock schema registry - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/schemas/ids/1" { - response := map[string]interface{}{ - "schema": `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - // Create manager - config := ManagerConfig{ - RegistryURL: server.URL, - ValidationMode: ValidationPermissive, - } - - manager, err := NewManager(config) - if err != nil { - t.Fatalf("Failed to create manager: %v", err) - } - - // Create test Avro message - avroSchema := `{ - "type": "record", - "name": "User", - "fields": [ - {"name": "id", "type": "int"}, - {"name": "name", "type": "string"} - ] - }` - - codec, err := goavro.NewCodec(avroSchema) - if err != nil { - t.Fatalf("Failed to create Avro codec: %v", err) - } - - // Create original test data - originalRecord := map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - } - - // Encode to Avro binary - avroBinary, err := codec.BinaryFromNative(nil, originalRecord) - if err != nil { - t.Fatalf("Failed to encode Avro data: %v", err) - } - - // Create original Confluent message - originalMsg := CreateConfluentEnvelope(FormatAvro, 1, nil, avroBinary) - - // Debug: Check the created message - t.Logf("Original Avro binary length: %d", len(avroBinary)) - t.Logf("Original Confluent message length: %d", len(originalMsg)) - - // Debug: Parse the envelope manually to see what's happening - envelope, ok := ParseConfluentEnvelope(originalMsg) - if !ok { - t.Fatal("Failed to parse Confluent envelope") - } - t.Logf("Parsed envelope - SchemaID: %d, Format: %v, Payload length: %d", - envelope.SchemaID, envelope.Format, len(envelope.Payload)) - - // Step 1: Decode the original message (simulate Produce path) - decodedMsg, err := manager.DecodeMessage(originalMsg) - if err != nil { - t.Fatalf("Failed to decode message: %v", err) - } - - // Step 2: Reconstruct the message (simulate Fetch path) - reconstructedMsg, err := manager.EncodeMessage(decodedMsg.RecordValue, 1, FormatAvro) - if err != nil { - t.Fatalf("Failed to reconstruct message: %v", err) - } - - // Step 3: Verify the reconstructed message can be decoded again - finalDecodedMsg, err := manager.DecodeMessage(reconstructedMsg) - if err != nil { - t.Fatalf("Failed to decode reconstructed message: %v", err) - } - - // Verify data integrity through the round trip - if finalDecodedMsg.RecordValue.Fields["id"].GetInt32Value() != 123 { - t.Errorf("Expected id=123, got %v", finalDecodedMsg.RecordValue.Fields["id"].GetInt32Value()) - } - - if finalDecodedMsg.RecordValue.Fields["name"].GetStringValue() != "John Doe" { - t.Errorf("Expected name='John Doe', got %v", finalDecodedMsg.RecordValue.Fields["name"].GetStringValue()) - } - - // Verify schema information is preserved - if finalDecodedMsg.SchemaID != 1 { - t.Errorf("Expected schema ID 1, got %d", finalDecodedMsg.SchemaID) - } - - if finalDecodedMsg.SchemaFormat != FormatAvro { - t.Errorf("Expected Avro format, got %v", finalDecodedMsg.SchemaFormat) - } - - t.Logf("Successfully completed round-trip: Original -> Decode -> Encode -> Decode") - t.Logf("Original message size: %d bytes", len(originalMsg)) - t.Logf("Reconstructed message size: %d bytes", len(reconstructedMsg)) -} - -func TestSchemaReconstruction_MultipleFormats(t *testing.T) { - // Test that the reconstruction framework can handle multiple schema formats - - testCases := []struct { - name string - format Format - }{ - {"Avro", FormatAvro}, - {"Protobuf", FormatProtobuf}, - {"JSON Schema", FormatJSONSchema}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create test RecordValue - testMap := map[string]interface{}{ - "id": int32(456), - "name": "Jane Smith", - } - recordValue := MapToRecordValue(testMap) - - // Create mock manager (without registry for this test) - config := ManagerConfig{ - RegistryURL: "http://localhost:8081", // Not used for this test - } - - manager, err := NewManager(config) - if err != nil { - t.Skip("Skipping test - no registry available") - } - - // Test encoding (will fail for Protobuf/JSON Schema in Phase 7, which is expected) - _, err = manager.EncodeMessage(recordValue, 1, tc.format) - - switch tc.format { - case FormatAvro: - // Avro should work (but will fail due to no registry) - if err == nil { - t.Error("Expected error for Avro without registry setup") - } - case FormatProtobuf: - // Protobuf should fail gracefully - if err == nil { - t.Error("Expected error for Protobuf in Phase 7") - } - if err.Error() != "failed to get schema for encoding: schema registry health check failed with status 404" { - // This is expected - we don't have a real registry - } - case FormatJSONSchema: - // JSON Schema should fail gracefully - if err == nil { - t.Error("Expected error for JSON Schema in Phase 7") - } - expectedErr := "JSON Schema encoding not yet implemented (Phase 7)" - if err.Error() != "failed to get schema for encoding: schema registry health check failed with status 404" { - // This is also expected due to registry issues - } - _ = expectedErr // Use the variable to avoid unused warning - } - }) - } -} - -func TestConfluentEnvelope_RoundTrip(t *testing.T) { - // Test that Confluent envelope creation and parsing work correctly - - testCases := []struct { - name string - format Format - schemaID uint32 - indexes []int - payload []byte - }{ - { - name: "Avro message", - format: FormatAvro, - schemaID: 1, - indexes: nil, - payload: []byte("avro-payload"), - }, - { - name: "Protobuf message with indexes", - format: FormatProtobuf, - schemaID: 2, - indexes: nil, // TODO: Implement proper Protobuf index handling - payload: []byte("protobuf-payload"), - }, - { - name: "JSON Schema message", - format: FormatJSONSchema, - schemaID: 3, - indexes: nil, - payload: []byte("json-payload"), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create envelope - envelopeBytes := CreateConfluentEnvelope(tc.format, tc.schemaID, tc.indexes, tc.payload) - - // Parse envelope - parsedEnvelope, ok := ParseConfluentEnvelope(envelopeBytes) - if !ok { - t.Fatal("Failed to parse created envelope") - } - - // Verify schema ID - if parsedEnvelope.SchemaID != tc.schemaID { - t.Errorf("Expected schema ID %d, got %d", tc.schemaID, parsedEnvelope.SchemaID) - } - - // Verify payload - if string(parsedEnvelope.Payload) != string(tc.payload) { - t.Errorf("Expected payload %s, got %s", string(tc.payload), string(parsedEnvelope.Payload)) - } - - // For Protobuf, verify indexes (if any) - if tc.format == FormatProtobuf && len(tc.indexes) > 0 { - if len(parsedEnvelope.Indexes) != len(tc.indexes) { - t.Errorf("Expected %d indexes, got %d", len(tc.indexes), len(parsedEnvelope.Indexes)) - } else { - for i, expectedIndex := range tc.indexes { - if parsedEnvelope.Indexes[i] != expectedIndex { - t.Errorf("Expected index[%d]=%d, got %d", i, expectedIndex, parsedEnvelope.Indexes[i]) - } - } - } - } - - t.Logf("Successfully round-tripped %s envelope: %d bytes", tc.name, len(envelopeBytes)) - }) - } -} - -func TestSchemaMetadata_Preservation(t *testing.T) { - // Test that schema metadata is properly preserved through the reconstruction process - - envelope := &ConfluentEnvelope{ - Format: FormatAvro, - SchemaID: 42, - Indexes: []int{1, 2, 3}, - Payload: []byte("test-payload"), - } - - // Get metadata - metadata := envelope.Metadata() - - // Verify metadata contents - expectedMetadata := map[string]string{ - "schema_format": "AVRO", - "schema_id": "42", - "protobuf_indexes": "1,2,3", - } - - for key, expectedValue := range expectedMetadata { - if metadata[key] != expectedValue { - t.Errorf("Expected metadata[%s]=%s, got %s", key, expectedValue, metadata[key]) - } - } - - // Test metadata reconstruction - reconstructedFormat := FormatUnknown - switch metadata["schema_format"] { - case "AVRO": - reconstructedFormat = FormatAvro - case "PROTOBUF": - reconstructedFormat = FormatProtobuf - case "JSON_SCHEMA": - reconstructedFormat = FormatJSONSchema - } - - if reconstructedFormat != envelope.Format { - t.Errorf("Failed to reconstruct format from metadata: expected %v, got %v", - envelope.Format, reconstructedFormat) - } - - t.Log("Successfully preserved and reconstructed schema metadata") -} - -// Benchmark tests for reconstruction performance -func BenchmarkSchemaReconstruction_Avro(b *testing.B) { - // Setup - testMap := map[string]interface{}{ - "id": int32(123), - "name": "John Doe", - } - recordValue := MapToRecordValue(testMap) - - config := ManagerConfig{ - RegistryURL: "http://localhost:8081", - } - - manager, err := NewManager(config) - if err != nil { - b.Skip("Skipping benchmark - no registry available") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - // This will fail without proper registry setup, but measures the overhead - _, _ = manager.EncodeMessage(recordValue, 1, FormatAvro) - } -} - -func BenchmarkConfluentEnvelope_Creation(b *testing.B) { - payload := []byte("test-payload-for-benchmarking") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = CreateConfluentEnvelope(FormatAvro, 1, nil, payload) - } -} - -func BenchmarkConfluentEnvelope_Parsing(b *testing.B) { - envelope := CreateConfluentEnvelope(FormatAvro, 1, nil, []byte("test-payload")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = ParseConfluentEnvelope(envelope) - } -} diff --git a/weed/mq/kafka/schema/registry_client.go b/weed/mq/kafka/schema/registry_client.go deleted file mode 100644 index 8be7fbb79..000000000 --- a/weed/mq/kafka/schema/registry_client.go +++ /dev/null @@ -1,381 +0,0 @@ -package schema - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "sync" - "time" -) - -// RegistryClient provides access to a Confluent Schema Registry -type RegistryClient struct { - baseURL string - httpClient *http.Client - - // Caching - schemaCache map[uint32]*CachedSchema // schema ID -> schema - subjectCache map[string]*CachedSubject // subject -> latest version info - negativeCache map[string]time.Time // subject -> time when 404 was cached - cacheMu sync.RWMutex - cacheTTL time.Duration - negativeCacheTTL time.Duration // TTL for negative (404) cache entries -} - -// CachedSchema represents a cached schema with metadata -type CachedSchema struct { - ID uint32 `json:"id"` - Schema string `json:"schema"` - Subject string `json:"subject"` - Version int `json:"version"` - Format Format `json:"-"` // Derived from schema content - CachedAt time.Time `json:"-"` -} - -// CachedSubject represents cached subject information -type CachedSubject struct { - Subject string `json:"subject"` - LatestID uint32 `json:"id"` - Version int `json:"version"` - Schema string `json:"schema"` - CachedAt time.Time `json:"-"` -} - -// RegistryConfig holds configuration for the Schema Registry client -type RegistryConfig struct { - URL string - Username string // Optional basic auth - Password string // Optional basic auth - Timeout time.Duration - CacheTTL time.Duration - MaxRetries int -} - -// NewRegistryClient creates a new Schema Registry client -func NewRegistryClient(config RegistryConfig) *RegistryClient { - if config.Timeout == 0 { - config.Timeout = 30 * time.Second - } - if config.CacheTTL == 0 { - config.CacheTTL = 5 * time.Minute - } - - httpClient := &http.Client{ - Timeout: config.Timeout, - } - - return &RegistryClient{ - baseURL: config.URL, - httpClient: httpClient, - schemaCache: make(map[uint32]*CachedSchema), - subjectCache: make(map[string]*CachedSubject), - negativeCache: make(map[string]time.Time), - cacheTTL: config.CacheTTL, - negativeCacheTTL: 2 * time.Minute, // Cache 404s for 2 minutes - } -} - -// GetSchemaByID retrieves a schema by its ID -func (rc *RegistryClient) GetSchemaByID(schemaID uint32) (*CachedSchema, error) { - // Check cache first - rc.cacheMu.RLock() - if cached, exists := rc.schemaCache[schemaID]; exists { - if time.Since(cached.CachedAt) < rc.cacheTTL { - rc.cacheMu.RUnlock() - return cached, nil - } - } - rc.cacheMu.RUnlock() - - // Fetch from registry - url := fmt.Sprintf("%s/schemas/ids/%d", rc.baseURL, schemaID) - resp, err := rc.httpClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to fetch schema %d: %w", schemaID, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("schema registry error %d: %s", resp.StatusCode, string(body)) - } - - var schemaResp struct { - Schema string `json:"schema"` - Subject string `json:"subject"` - Version int `json:"version"` - } - - if err := json.NewDecoder(resp.Body).Decode(&schemaResp); err != nil { - return nil, fmt.Errorf("failed to decode schema response: %w", err) - } - - // Determine format from schema content - format := rc.detectSchemaFormat(schemaResp.Schema) - - cached := &CachedSchema{ - ID: schemaID, - Schema: schemaResp.Schema, - Subject: schemaResp.Subject, - Version: schemaResp.Version, - Format: format, - CachedAt: time.Now(), - } - - // Update cache - rc.cacheMu.Lock() - rc.schemaCache[schemaID] = cached - rc.cacheMu.Unlock() - - return cached, nil -} - -// GetLatestSchema retrieves the latest schema for a subject -func (rc *RegistryClient) GetLatestSchema(subject string) (*CachedSubject, error) { - // Check positive cache first - rc.cacheMu.RLock() - if cached, exists := rc.subjectCache[subject]; exists { - if time.Since(cached.CachedAt) < rc.cacheTTL { - rc.cacheMu.RUnlock() - return cached, nil - } - } - - // Check negative cache (404 cache) - if cachedAt, exists := rc.negativeCache[subject]; exists { - if time.Since(cachedAt) < rc.negativeCacheTTL { - rc.cacheMu.RUnlock() - return nil, fmt.Errorf("schema registry error 404: subject not found (cached)") - } - } - rc.cacheMu.RUnlock() - - // Fetch from registry - url := fmt.Sprintf("%s/subjects/%s/versions/latest", rc.baseURL, subject) - resp, err := rc.httpClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to fetch latest schema for %s: %w", subject, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - - // Cache 404 responses to avoid repeated lookups - if resp.StatusCode == http.StatusNotFound { - rc.cacheMu.Lock() - rc.negativeCache[subject] = time.Now() - rc.cacheMu.Unlock() - } - - return nil, fmt.Errorf("schema registry error %d: %s", resp.StatusCode, string(body)) - } - - var schemaResp struct { - ID uint32 `json:"id"` - Schema string `json:"schema"` - Subject string `json:"subject"` - Version int `json:"version"` - } - - if err := json.NewDecoder(resp.Body).Decode(&schemaResp); err != nil { - return nil, fmt.Errorf("failed to decode schema response: %w", err) - } - - cached := &CachedSubject{ - Subject: subject, - LatestID: schemaResp.ID, - Version: schemaResp.Version, - Schema: schemaResp.Schema, - CachedAt: time.Now(), - } - - // Update cache and clear negative cache entry - rc.cacheMu.Lock() - rc.subjectCache[subject] = cached - delete(rc.negativeCache, subject) // Clear any cached 404 - rc.cacheMu.Unlock() - - return cached, nil -} - -// RegisterSchema registers a new schema for a subject -func (rc *RegistryClient) RegisterSchema(subject, schema string) (uint32, error) { - url := fmt.Sprintf("%s/subjects/%s/versions", rc.baseURL, subject) - - reqBody := map[string]string{ - "schema": schema, - } - - jsonData, err := json.Marshal(reqBody) - if err != nil { - return 0, fmt.Errorf("failed to marshal schema request: %w", err) - } - - resp, err := rc.httpClient.Post(url, "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return 0, fmt.Errorf("failed to register schema: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return 0, fmt.Errorf("schema registry error %d: %s", resp.StatusCode, string(body)) - } - - var regResp struct { - ID uint32 `json:"id"` - } - - if err := json.NewDecoder(resp.Body).Decode(®Resp); err != nil { - return 0, fmt.Errorf("failed to decode registration response: %w", err) - } - - // Invalidate caches for this subject - rc.cacheMu.Lock() - delete(rc.subjectCache, subject) - delete(rc.negativeCache, subject) // Clear any cached 404 - // Note: we don't cache the new schema here since we don't have full metadata - rc.cacheMu.Unlock() - - return regResp.ID, nil -} - -// CheckCompatibility checks if a schema is compatible with the subject -func (rc *RegistryClient) CheckCompatibility(subject, schema string) (bool, error) { - url := fmt.Sprintf("%s/compatibility/subjects/%s/versions/latest", rc.baseURL, subject) - - reqBody := map[string]string{ - "schema": schema, - } - - jsonData, err := json.Marshal(reqBody) - if err != nil { - return false, fmt.Errorf("failed to marshal compatibility request: %w", err) - } - - resp, err := rc.httpClient.Post(url, "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return false, fmt.Errorf("failed to check compatibility: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return false, fmt.Errorf("schema registry error %d: %s", resp.StatusCode, string(body)) - } - - var compatResp struct { - IsCompatible bool `json:"is_compatible"` - } - - if err := json.NewDecoder(resp.Body).Decode(&compatResp); err != nil { - return false, fmt.Errorf("failed to decode compatibility response: %w", err) - } - - return compatResp.IsCompatible, nil -} - -// ListSubjects returns all subjects in the registry -func (rc *RegistryClient) ListSubjects() ([]string, error) { - url := fmt.Sprintf("%s/subjects", rc.baseURL) - resp, err := rc.httpClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to list subjects: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("schema registry error %d: %s", resp.StatusCode, string(body)) - } - - var subjects []string - if err := json.NewDecoder(resp.Body).Decode(&subjects); err != nil { - return nil, fmt.Errorf("failed to decode subjects response: %w", err) - } - - return subjects, nil -} - -// ClearCache clears all cached schemas and subjects -func (rc *RegistryClient) ClearCache() { - rc.cacheMu.Lock() - defer rc.cacheMu.Unlock() - - rc.schemaCache = make(map[uint32]*CachedSchema) - rc.subjectCache = make(map[string]*CachedSubject) - rc.negativeCache = make(map[string]time.Time) -} - -// GetCacheStats returns cache statistics -func (rc *RegistryClient) GetCacheStats() (schemaCount, subjectCount, negativeCacheCount int) { - rc.cacheMu.RLock() - defer rc.cacheMu.RUnlock() - - return len(rc.schemaCache), len(rc.subjectCache), len(rc.negativeCache) -} - -// detectSchemaFormat attempts to determine the schema format from content -func (rc *RegistryClient) detectSchemaFormat(schema string) Format { - // Try to parse as JSON first (Avro schemas are JSON) - var jsonObj interface{} - if err := json.Unmarshal([]byte(schema), &jsonObj); err == nil { - // Check for Avro-specific fields - if schemaMap, ok := jsonObj.(map[string]interface{}); ok { - if schemaType, exists := schemaMap["type"]; exists { - if typeStr, ok := schemaType.(string); ok { - // Common Avro types - avroTypes := []string{"record", "enum", "array", "map", "union", "fixed"} - for _, avroType := range avroTypes { - if typeStr == avroType { - return FormatAvro - } - } - // Common JSON Schema types (that are not Avro types) - // Note: "string" is ambiguous - it could be Avro primitive or JSON Schema - // We need to check other indicators first - jsonSchemaTypes := []string{"object", "number", "integer", "boolean", "null"} - for _, jsonSchemaType := range jsonSchemaTypes { - if typeStr == jsonSchemaType { - return FormatJSONSchema - } - } - } - } - // Check for JSON Schema indicators - if _, exists := schemaMap["$schema"]; exists { - return FormatJSONSchema - } - // Check for JSON Schema properties field - if _, exists := schemaMap["properties"]; exists { - return FormatJSONSchema - } - } - // Default JSON-based schema to Avro only if it doesn't look like JSON Schema - return FormatAvro - } - - // Check for Protobuf (typically not JSON) - // Protobuf schemas in Schema Registry are usually stored as descriptors - // For now, assume non-JSON schemas are Protobuf - return FormatProtobuf -} - -// HealthCheck verifies the registry is accessible -func (rc *RegistryClient) HealthCheck() error { - url := fmt.Sprintf("%s/subjects", rc.baseURL) - resp, err := rc.httpClient.Get(url) - if err != nil { - return fmt.Errorf("schema registry health check failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("schema registry health check failed with status %d", resp.StatusCode) - } - - return nil -} diff --git a/weed/mq/kafka/schema/registry_client_test.go b/weed/mq/kafka/schema/registry_client_test.go deleted file mode 100644 index 45728959c..000000000 --- a/weed/mq/kafka/schema/registry_client_test.go +++ /dev/null @@ -1,362 +0,0 @@ -package schema - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func TestNewRegistryClient(t *testing.T) { - config := RegistryConfig{ - URL: "http://localhost:8081", - } - - client := NewRegistryClient(config) - - if client.baseURL != config.URL { - t.Errorf("Expected baseURL %s, got %s", config.URL, client.baseURL) - } - - if client.cacheTTL != 5*time.Minute { - t.Errorf("Expected default cacheTTL 5m, got %v", client.cacheTTL) - } - - if client.httpClient.Timeout != 30*time.Second { - t.Errorf("Expected default timeout 30s, got %v", client.httpClient.Timeout) - } -} - -func TestRegistryClient_GetSchemaByID(t *testing.T) { - // Mock server - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/schemas/ids/1" { - response := map[string]interface{}{ - "schema": `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - } else if r.URL.Path == "/schemas/ids/999" { - w.WriteHeader(http.StatusNotFound) - w.Write([]byte(`{"error_code":40403,"message":"Schema not found"}`)) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := RegistryConfig{ - URL: server.URL, - CacheTTL: 1 * time.Minute, - } - client := NewRegistryClient(config) - - t.Run("successful fetch", func(t *testing.T) { - schema, err := client.GetSchemaByID(1) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if schema.ID != 1 { - t.Errorf("Expected schema ID 1, got %d", schema.ID) - } - - if schema.Subject != "user-value" { - t.Errorf("Expected subject 'user-value', got %s", schema.Subject) - } - - if schema.Format != FormatAvro { - t.Errorf("Expected Avro format, got %v", schema.Format) - } - }) - - t.Run("schema not found", func(t *testing.T) { - _, err := client.GetSchemaByID(999) - if err == nil { - t.Fatal("Expected error for non-existent schema") - } - }) - - t.Run("cache hit", func(t *testing.T) { - // First call should cache the result - schema1, err := client.GetSchemaByID(1) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - // Second call should hit cache (same timestamp) - schema2, err := client.GetSchemaByID(1) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if schema1.CachedAt != schema2.CachedAt { - t.Error("Expected cache hit with same timestamp") - } - }) -} - -func TestRegistryClient_GetLatestSchema(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/subjects/user-value/versions/latest" { - response := map[string]interface{}{ - "id": uint32(1), - "schema": `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - schema, err := client.GetLatestSchema("user-value") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if schema.LatestID != 1 { - t.Errorf("Expected schema ID 1, got %d", schema.LatestID) - } - - if schema.Subject != "user-value" { - t.Errorf("Expected subject 'user-value', got %s", schema.Subject) - } -} - -func TestRegistryClient_RegisterSchema(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" && r.URL.Path == "/subjects/test-value/versions" { - response := map[string]interface{}{ - "id": uint32(123), - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - schemaStr := `{"type":"record","name":"Test","fields":[{"name":"id","type":"int"}]}` - id, err := client.RegisterSchema("test-value", schemaStr) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if id != 123 { - t.Errorf("Expected schema ID 123, got %d", id) - } -} - -func TestRegistryClient_CheckCompatibility(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" && r.URL.Path == "/compatibility/subjects/test-value/versions/latest" { - response := map[string]interface{}{ - "is_compatible": true, - } - json.NewEncoder(w).Encode(response) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - schemaStr := `{"type":"record","name":"Test","fields":[{"name":"id","type":"int"}]}` - compatible, err := client.CheckCompatibility("test-value", schemaStr) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if !compatible { - t.Error("Expected schema to be compatible") - } -} - -func TestRegistryClient_ListSubjects(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/subjects" { - subjects := []string{"user-value", "order-value", "product-key"} - json.NewEncoder(w).Encode(subjects) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - subjects, err := client.ListSubjects() - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - expectedSubjects := []string{"user-value", "order-value", "product-key"} - if len(subjects) != len(expectedSubjects) { - t.Errorf("Expected %d subjects, got %d", len(expectedSubjects), len(subjects)) - } - - for i, expected := range expectedSubjects { - if subjects[i] != expected { - t.Errorf("Expected subject %s, got %s", expected, subjects[i]) - } - } -} - -func TestRegistryClient_DetectSchemaFormat(t *testing.T) { - config := RegistryConfig{URL: "http://localhost:8081"} - client := NewRegistryClient(config) - - tests := []struct { - name string - schema string - expected Format - }{ - { - name: "Avro record schema", - schema: `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`, - expected: FormatAvro, - }, - { - name: "Avro enum schema", - schema: `{"type":"enum","name":"Color","symbols":["RED","GREEN","BLUE"]}`, - expected: FormatAvro, - }, - { - name: "JSON Schema", - schema: `{"$schema":"http://json-schema.org/draft-07/schema#","type":"object"}`, - expected: FormatJSONSchema, - }, - { - name: "Protobuf (non-JSON)", - schema: "syntax = \"proto3\"; message User { int32 id = 1; }", - expected: FormatProtobuf, - }, - { - name: "Simple Avro primitive", - schema: `{"type":"string"}`, - expected: FormatAvro, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - format := client.detectSchemaFormat(tt.schema) - if format != tt.expected { - t.Errorf("Expected format %v, got %v", tt.expected, format) - } - }) - } -} - -func TestRegistryClient_CacheManagement(t *testing.T) { - config := RegistryConfig{ - URL: "http://localhost:8081", - CacheTTL: 100 * time.Millisecond, // Short TTL for testing - } - client := NewRegistryClient(config) - - // Add some cache entries manually - client.schemaCache[1] = &CachedSchema{ - ID: 1, - Schema: "test", - CachedAt: time.Now(), - } - client.subjectCache["test"] = &CachedSubject{ - Subject: "test", - CachedAt: time.Now(), - } - - // Check cache stats - schemaCount, subjectCount, _ := client.GetCacheStats() - if schemaCount != 1 || subjectCount != 1 { - t.Errorf("Expected 1 schema and 1 subject in cache, got %d and %d", schemaCount, subjectCount) - } - - // Clear cache - client.ClearCache() - schemaCount, subjectCount, _ = client.GetCacheStats() - if schemaCount != 0 || subjectCount != 0 { - t.Errorf("Expected empty cache after clear, got %d schemas and %d subjects", schemaCount, subjectCount) - } -} - -func TestRegistryClient_HealthCheck(t *testing.T) { - t.Run("healthy registry", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/subjects" { - json.NewEncoder(w).Encode([]string{}) - } - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - err := client.HealthCheck() - if err != nil { - t.Errorf("Expected healthy registry, got error: %v", err) - } - }) - - t.Run("unhealthy registry", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - err := client.HealthCheck() - if err == nil { - t.Error("Expected error for unhealthy registry") - } - }) -} - -// Benchmark tests -func BenchmarkRegistryClient_GetSchemaByID(b *testing.B) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - response := map[string]interface{}{ - "schema": `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}`, - "subject": "user-value", - "version": 1, - } - json.NewEncoder(w).Encode(response) - })) - defer server.Close() - - config := RegistryConfig{URL: server.URL} - client := NewRegistryClient(config) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = client.GetSchemaByID(1) - } -} - -func BenchmarkRegistryClient_DetectSchemaFormat(b *testing.B) { - config := RegistryConfig{URL: "http://localhost:8081"} - client := NewRegistryClient(config) - - avroSchema := `{"type":"record","name":"User","fields":[{"name":"id","type":"int"}]}` - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = client.detectSchemaFormat(avroSchema) - } -} diff --git a/weed/mq/logstore/log_to_parquet.go b/weed/mq/logstore/log_to_parquet.go deleted file mode 100644 index bfd5ff10e..000000000 --- a/weed/mq/logstore/log_to_parquet.go +++ /dev/null @@ -1,612 +0,0 @@ -package logstore - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/parquet-go/parquet-go" - "github.com/parquet-go/parquet-go/compress/zstd" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "google.golang.org/protobuf/proto" -) - -const ( - SW_COLUMN_NAME_TS = "_ts_ns" - SW_COLUMN_NAME_KEY = "_key" - SW_COLUMN_NAME_OFFSET = "_offset" - SW_COLUMN_NAME_VALUE = "_value" -) - -func CompactTopicPartitions(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration, recordType *schema_pb.RecordType, preference *operation.StoragePreference) error { - // list the topic partition versions - topicVersions, err := collectTopicVersions(filerClient, t, timeAgo) - if err != nil { - return fmt.Errorf("list topic files: %w", err) - } - - // compact the partitions - for _, topicVersion := range topicVersions { - partitions, err := collectTopicVersionsPartitions(filerClient, t, topicVersion) - if err != nil { - return fmt.Errorf("list partitions %s/%s/%s: %v", t.Namespace, t.Name, topicVersion, err) - } - for _, partition := range partitions { - err := compactTopicPartition(filerClient, t, timeAgo, recordType, partition, preference) - if err != nil { - return fmt.Errorf("compact partition %s/%s/%s/%s: %v", t.Namespace, t.Name, topicVersion, partition, err) - } - } - } - return nil -} - -func collectTopicVersions(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration) (partitionVersions []time.Time, err error) { - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(t.Dir()), "", func(entry *filer_pb.Entry, isLast bool) error { - t, err := topic.ParseTopicVersion(entry.Name) - if err != nil { - // skip non-partition directories - return nil - } - if t.Unix() < time.Now().Unix()-int64(timeAgo/time.Second) { - partitionVersions = append(partitionVersions, t) - } - return nil - }) - return -} - -func collectTopicVersionsPartitions(filerClient filer_pb.FilerClient, t topic.Topic, topicVersion time.Time) (partitions []topic.Partition, err error) { - version := topicVersion.Format(topic.PartitionGenerationFormat) - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(t.Dir()).Child(version), "", func(entry *filer_pb.Entry, isLast bool) error { - if !entry.IsDirectory { - return nil - } - start, stop := topic.ParsePartitionBoundary(entry.Name) - if start != stop { - partitions = append(partitions, topic.Partition{ - RangeStart: start, - RangeStop: stop, - RingSize: topic.PartitionCount, - UnixTimeNs: topicVersion.UnixNano(), - }) - } - return nil - }) - return -} - -func compactTopicPartition(filerClient filer_pb.FilerClient, t topic.Topic, timeAgo time.Duration, recordType *schema_pb.RecordType, partition topic.Partition, preference *operation.StoragePreference) error { - partitionDir := topic.PartitionDir(t, partition) - - // compact the partition directory - return compactTopicPartitionDir(filerClient, t.Name, partitionDir, timeAgo, recordType, preference) -} - -func compactTopicPartitionDir(filerClient filer_pb.FilerClient, topicName, partitionDir string, timeAgo time.Duration, recordType *schema_pb.RecordType, preference *operation.StoragePreference) error { - // read all existing parquet files - minTsNs, maxTsNs, err := readAllParquetFiles(filerClient, partitionDir) - if err != nil { - return err - } - - // read all log files - logFiles, err := readAllLogFiles(filerClient, partitionDir, timeAgo, minTsNs, maxTsNs) - if err != nil { - return err - } - if len(logFiles) == 0 { - return nil - } - - // divide log files into groups of 128MB - logFileGroups := groupFilesBySize(logFiles, 128*1024*1024) - - // write to parquet file - parquetLevels, err := schema.ToParquetLevels(recordType) - if err != nil { - return fmt.Errorf("ToParquetLevels failed %+v: %v", recordType, err) - } - - // create a parquet schema - parquetSchema, err := schema.ToParquetSchema(topicName, recordType) - if err != nil { - return fmt.Errorf("ToParquetSchema failed: %w", err) - } - - // TODO parallelize the writing - for _, logFileGroup := range logFileGroups { - if err = writeLogFilesToParquet(filerClient, partitionDir, recordType, logFileGroup, parquetSchema, parquetLevels, preference); err != nil { - return err - } - } - - return nil -} - -func groupFilesBySize(logFiles []*filer_pb.Entry, maxGroupSize int64) (logFileGroups [][]*filer_pb.Entry) { - var logFileGroup []*filer_pb.Entry - var groupSize int64 - for _, logFile := range logFiles { - if groupSize+int64(logFile.Attributes.FileSize) > maxGroupSize { - logFileGroups = append(logFileGroups, logFileGroup) - logFileGroup = nil - groupSize = 0 - } - logFileGroup = append(logFileGroup, logFile) - groupSize += int64(logFile.Attributes.FileSize) - } - if len(logFileGroup) > 0 { - logFileGroups = append(logFileGroups, logFileGroup) - } - return -} - -func readAllLogFiles(filerClient filer_pb.FilerClient, partitionDir string, timeAgo time.Duration, minTsNs, maxTsNs int64) (logFiles []*filer_pb.Entry, err error) { - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionDir), "", func(entry *filer_pb.Entry, isLast bool) error { - if strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - if entry.Attributes.Crtime > time.Now().Unix()-int64(timeAgo/time.Second) { - return nil - } - logTime, err := time.Parse(topic.TIME_FORMAT, entry.Name) - if err != nil { - // glog.Warningf("parse log time %s: %v", entry.Name, err) - return nil - } - if maxTsNs > 0 && logTime.UnixNano() <= maxTsNs { - return nil - } - logFiles = append(logFiles, entry) - return nil - }) - return -} - -func readAllParquetFiles(filerClient filer_pb.FilerClient, partitionDir string) (minTsNs, maxTsNs int64, err error) { - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionDir), "", func(entry *filer_pb.Entry, isLast bool) error { - if !strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - if len(entry.Extended) == 0 { - return nil - } - - // read min ts - minTsBytes := entry.Extended[mq.ExtendedAttrTimestampMin] - if len(minTsBytes) != 8 { - return nil - } - minTs := int64(binary.BigEndian.Uint64(minTsBytes)) - if minTsNs == 0 || minTs < minTsNs { - minTsNs = minTs - } - - // read max ts - maxTsBytes := entry.Extended[mq.ExtendedAttrTimestampMax] - if len(maxTsBytes) != 8 { - return nil - } - maxTs := int64(binary.BigEndian.Uint64(maxTsBytes)) - if maxTsNs == 0 || maxTs > maxTsNs { - maxTsNs = maxTs - } - return nil - }) - return -} - -// isSchemalessRecordType checks if the recordType represents a schema-less topic -// Schema-less topics only have system fields: _ts_ns, _key, and _value -func isSchemalessRecordType(recordType *schema_pb.RecordType) bool { - if recordType == nil { - return false - } - - // Count only non-system data fields (exclude _ts_ns and _key which are always present) - // Schema-less topics should only have _value as the data field - hasValue := false - dataFieldCount := 0 - - for _, field := range recordType.Fields { - switch field.Name { - case SW_COLUMN_NAME_TS, SW_COLUMN_NAME_KEY, SW_COLUMN_NAME_OFFSET: - // System fields - ignore - continue - case SW_COLUMN_NAME_VALUE: - hasValue = true - dataFieldCount++ - default: - // Any other field means it's not schema-less - dataFieldCount++ - } - } - - // Schema-less = only has _value field as the data field (plus system fields) - return hasValue && dataFieldCount == 1 -} - -func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir string, recordType *schema_pb.RecordType, logFileGroups []*filer_pb.Entry, parquetSchema *parquet.Schema, parquetLevels *schema.ParquetLevels, preference *operation.StoragePreference) (err error) { - - tempFile, err := os.CreateTemp(".", "t*.parquet") - if err != nil { - return fmt.Errorf("create temp file: %w", err) - } - defer func() { - tempFile.Close() - os.Remove(tempFile.Name()) - }() - - // Enable column statistics for fast aggregation queries - writer := parquet.NewWriter(tempFile, parquetSchema, - parquet.Compression(&zstd.Codec{Level: zstd.DefaultLevel}), - parquet.DataPageStatistics(true), // Enable column statistics - ) - rowBuilder := parquet.NewRowBuilder(parquetSchema) - - var startTsNs, stopTsNs int64 - var minOffset, maxOffset int64 - var hasOffsets bool - isSchemaless := isSchemalessRecordType(recordType) - - for _, logFile := range logFileGroups { - var rows []parquet.Row - if err := iterateLogEntries(filerClient, logFile, func(entry *filer_pb.LogEntry) error { - - // Skip control entries without actual data (same logic as read operations) - if isControlEntry(entry) { - return nil - } - - if startTsNs == 0 { - startTsNs = entry.TsNs - } - stopTsNs = entry.TsNs - - // Track offset ranges for Kafka integration - if entry.Offset > 0 { - if !hasOffsets { - minOffset = entry.Offset - maxOffset = entry.Offset - hasOffsets = true - } else { - if entry.Offset < minOffset { - minOffset = entry.Offset - } - if entry.Offset > maxOffset { - maxOffset = entry.Offset - } - } - } - - // write to parquet file - rowBuilder.Reset() - - record := &schema_pb.RecordValue{} - - if isSchemaless { - // For schema-less topics, put raw entry.Data into _value field - record.Fields = make(map[string]*schema_pb.Value) - record.Fields[SW_COLUMN_NAME_VALUE] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{ - BytesValue: entry.Data, - }, - } - } else { - // For schematized topics, unmarshal entry.Data as RecordValue - if err := proto.Unmarshal(entry.Data, record); err != nil { - return fmt.Errorf("unmarshal record value: %w", err) - } - - // Initialize Fields map if nil (prevents nil map assignment panic) - if record.Fields == nil { - record.Fields = make(map[string]*schema_pb.Value) - } - - // Add offset field to parquet records for native offset support - // ASSUMPTION: LogEntry.Offset field is populated by broker during message publishing - record.Fields[SW_COLUMN_NAME_OFFSET] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{ - Int64Value: entry.Offset, - }, - } - } - - // Add system columns (for both schematized and schema-less topics) - record.Fields[SW_COLUMN_NAME_TS] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{ - Int64Value: entry.TsNs, - }, - } - - // Handle nil key bytes to prevent growslice panic in parquet-go - keyBytes := entry.Key - if keyBytes == nil { - keyBytes = []byte{} // Use empty slice instead of nil - } - record.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{ - BytesValue: keyBytes, - }, - } - - if err := schema.AddRecordValue(rowBuilder, recordType, parquetLevels, record); err != nil { - return fmt.Errorf("add record value: %w", err) - } - - // Build row and normalize any nil ByteArray values to empty slices - row := rowBuilder.Row() - for i, value := range row { - if value.Kind() == parquet.ByteArray { - if value.ByteArray() == nil { - row[i] = parquet.ByteArrayValue([]byte{}) - } - } - } - - rows = append(rows, row) - - return nil - - }); err != nil { - return fmt.Errorf("iterate log entry %v/%v: %w", partitionDir, logFile.Name, err) - } - - // Nil ByteArray handling is done during row creation - - // Write all rows in a single call - if _, err := writer.WriteRows(rows); err != nil { - return fmt.Errorf("write rows: %w", err) - } - } - - if err := writer.Close(); err != nil { - return fmt.Errorf("close writer: %w", err) - } - - // write to parquet file to partitionDir - parquetFileName := fmt.Sprintf("%s.parquet", time.Unix(0, startTsNs).UTC().Format("2006-01-02-15-04-05")) - - // Collect source log file names and buffer_start metadata for deduplication - var sourceLogFiles []string - var earliestBufferStart int64 - for _, logFile := range logFileGroups { - sourceLogFiles = append(sourceLogFiles, logFile.Name) - - // Extract buffer_start from log file metadata - if bufferStart := getBufferStartFromLogFile(logFile); bufferStart > 0 { - if earliestBufferStart == 0 || bufferStart < earliestBufferStart { - earliestBufferStart = bufferStart - } - } - } - - if err := saveParquetFileToPartitionDir(filerClient, tempFile, partitionDir, parquetFileName, preference, startTsNs, stopTsNs, sourceLogFiles, earliestBufferStart, minOffset, maxOffset, hasOffsets); err != nil { - return fmt.Errorf("save parquet file %s: %v", parquetFileName, err) - } - - return nil - -} - -func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile *os.File, partitionDir, parquetFileName string, preference *operation.StoragePreference, startTsNs, stopTsNs int64, sourceLogFiles []string, earliestBufferStart int64, minOffset, maxOffset int64, hasOffsets bool) error { - uploader, err := operation.NewUploader() - if err != nil { - return fmt.Errorf("new uploader: %w", err) - } - - // get file size - fileInfo, err := sourceFile.Stat() - if err != nil { - return fmt.Errorf("stat source file: %w", err) - } - - // upload file in chunks - chunkSize := int64(4 * 1024 * 1024) - chunkCount := (fileInfo.Size() + chunkSize - 1) / chunkSize - entry := &filer_pb.Entry{ - Name: parquetFileName, - Attributes: &filer_pb.FuseAttributes{ - Crtime: time.Now().Unix(), - Mtime: time.Now().Unix(), - FileMode: uint32(os.FileMode(0644)), - FileSize: uint64(fileInfo.Size()), - Mime: "application/vnd.apache.parquet", - }, - } - entry.Extended = make(map[string][]byte) - minTsBytes := make([]byte, 8) - binary.BigEndian.PutUint64(minTsBytes, uint64(startTsNs)) - entry.Extended[mq.ExtendedAttrTimestampMin] = minTsBytes - maxTsBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxTsBytes, uint64(stopTsNs)) - entry.Extended[mq.ExtendedAttrTimestampMax] = maxTsBytes - - // Add offset range metadata for Kafka integration (same as regular log files) - if hasOffsets && minOffset > 0 && maxOffset >= minOffset { - minOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(minOffsetBytes, uint64(minOffset)) - entry.Extended[mq.ExtendedAttrOffsetMin] = minOffsetBytes - - maxOffsetBytes := make([]byte, 8) - binary.BigEndian.PutUint64(maxOffsetBytes, uint64(maxOffset)) - entry.Extended[mq.ExtendedAttrOffsetMax] = maxOffsetBytes - } - - // Store source log files for deduplication (JSON-encoded list) - if len(sourceLogFiles) > 0 { - sourceLogFilesJson, _ := json.Marshal(sourceLogFiles) - entry.Extended[mq.ExtendedAttrSources] = sourceLogFilesJson - } - - // Store earliest buffer_start for precise broker deduplication - if earliestBufferStart > 0 { - bufferStartBytes := make([]byte, 8) - binary.BigEndian.PutUint64(bufferStartBytes, uint64(earliestBufferStart)) - entry.Extended[mq.ExtendedAttrBufferStart] = bufferStartBytes - } - - for i := int64(0); i < chunkCount; i++ { - fileId, uploadResult, err, _ := uploader.UploadWithRetry( - filerClient, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: preference.Replication, - Collection: preference.Collection, - TtlSec: 0, // TODO set ttl - DiskType: preference.DiskType, - Path: partitionDir + "/" + parquetFileName, - }, - &operation.UploadOption{ - Filename: parquetFileName, - Cipher: false, - IsInputCompressed: false, - MimeType: "application/vnd.apache.parquet", - PairMap: nil, - }, - func(host, fileId string) string { - return fmt.Sprintf("http://%s/%s", host, fileId) - }, - io.NewSectionReader(sourceFile, i*chunkSize, chunkSize), - ) - if err != nil { - return fmt.Errorf("upload chunk %d: %v", i, err) - } - if uploadResult.Error != "" { - return fmt.Errorf("upload result: %v", uploadResult.Error) - } - entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(fileId, i*chunkSize, time.Now().UnixNano())) - } - - // write the entry to partitionDir - if err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer_pb.CreateEntry(context.Background(), client, &filer_pb.CreateEntryRequest{ - Directory: partitionDir, - Entry: entry, - }) - }); err != nil { - return fmt.Errorf("create entry: %w", err) - } - - return nil -} - -func iterateLogEntries(filerClient filer_pb.FilerClient, logFile *filer_pb.Entry, eachLogEntryFn func(entry *filer_pb.LogEntry) error) error { - lookupFn := filer.LookupFn(filerClient) - _, err := eachFile(logFile, lookupFn, func(logEntry *filer_pb.LogEntry) (isDone bool, err error) { - if err := eachLogEntryFn(logEntry); err != nil { - return true, err - } - return false, nil - }) - return err -} - -func eachFile(entry *filer_pb.Entry, lookupFileIdFn func(ctx context.Context, fileId string) (targetUrls []string, err error), eachLogEntryFn log_buffer.EachLogEntryFuncType) (processedTsNs int64, err error) { - if len(entry.Content) > 0 { - // skip .offset files - return - } - var urlStrings []string - for _, chunk := range entry.Chunks { - if chunk.Size == 0 { - continue - } - if chunk.IsChunkManifest { - return - } - urlStrings, err = lookupFileIdFn(context.Background(), chunk.FileId) - if err != nil { - err = fmt.Errorf("lookup %s: %v", chunk.FileId, err) - return - } - if len(urlStrings) == 0 { - err = fmt.Errorf("no url found for %s", chunk.FileId) - return - } - - // try one of the urlString until util.Get(urlString) succeeds - var processed bool - for _, urlString := range urlStrings { - var data []byte - if data, _, err = util_http.Get(urlString); err == nil { - processed = true - if processedTsNs, err = eachChunk(data, eachLogEntryFn); err != nil { - return - } - break - } - } - if !processed { - err = fmt.Errorf("no data processed for %s %s", entry.Name, chunk.FileId) - return - } - - } - return -} - -func eachChunk(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType) (processedTsNs int64, err error) { - for pos := 0; pos+4 < len(buf); { - - size := util.BytesToUint32(buf[pos : pos+4]) - if pos+4+int(size) > len(buf) { - err = fmt.Errorf("reach each log chunk: read [%d,%d) from [0,%d)", pos, pos+int(size)+4, len(buf)) - return - } - entryData := buf[pos+4 : pos+4+int(size)] - - logEntry := &filer_pb.LogEntry{} - if err = proto.Unmarshal(entryData, logEntry); err != nil { - pos += 4 + int(size) - err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err) - return - } - - if _, err = eachLogEntryFn(logEntry); err != nil { - err = fmt.Errorf("process log entry %v: %w", logEntry, err) - return - } - - processedTsNs = logEntry.TsNs - - pos += 4 + int(size) - - } - - return -} - -// getBufferStartFromLogFile extracts the buffer_start index from log file extended metadata -func getBufferStartFromLogFile(logFile *filer_pb.Entry) int64 { - if logFile.Extended == nil { - return 0 - } - - // Parse buffer_start binary format - if startData, exists := logFile.Extended["buffer_start"]; exists { - if len(startData) == 8 { - startIndex := int64(binary.BigEndian.Uint64(startData)) - if startIndex > 0 { - return startIndex - } - } - } - - return 0 -} diff --git a/weed/mq/logstore/merged_read.go b/weed/mq/logstore/merged_read.go deleted file mode 100644 index c2e8e3caf..000000000 --- a/weed/mq/logstore/merged_read.go +++ /dev/null @@ -1,50 +0,0 @@ -package logstore - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -func GenMergedReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType { - fromParquetFn := GenParquetReadFunc(filerClient, t, p) - readLogDirectFn := GenLogOnDiskReadFunc(filerClient, t, p) - // Reversed order: live logs first (recent), then Parquet files (historical) - // This provides better performance for real-time analytics queries - return mergeReadFuncs(readLogDirectFn, fromParquetFn) -} - -func mergeReadFuncs(readLogDirectFn, fromParquetFn log_buffer.LogReadFromDiskFuncType) log_buffer.LogReadFromDiskFuncType { - // CRITICAL FIX: Removed stateful closure variables (exhaustedLiveLogs, lastProcessedPosition) - // These caused the function to skip disk reads on subsequent calls, leading to - // Schema Registry timeout when data was flushed after the first read attempt. - // The function must be stateless and check for data on EVERY call. - return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) { - // Always try reading from live logs first (recent data) - lastReadPosition, isDone, err = readLogDirectFn(startPosition, stopTsNs, eachLogEntryFn) - if isDone { - // For very early timestamps (like timestamp=1 for RESET_TO_EARLIEST), - // we want to continue to read from in-memory data - isDone = false - } - if err != nil { - return - } - - // If live logs returned data, update startPosition for parquet read - if lastReadPosition.Offset > startPosition.Offset || lastReadPosition.Time.After(startPosition.Time) { - startPosition = lastReadPosition - } - - // Then try reading from Parquet files (historical data) - lastReadPosition, isDone, err = fromParquetFn(startPosition, stopTsNs, eachLogEntryFn) - - if isDone { - // For very early timestamps (like timestamp=1 for RESET_TO_EARLIEST), - // parquet files won't exist, but we want to continue to in-memory data reading - isDone = false - } - - return - } -} diff --git a/weed/mq/logstore/read_log_from_disk.go b/weed/mq/logstore/read_log_from_disk.go deleted file mode 100644 index 86c8b40cc..000000000 --- a/weed/mq/logstore/read_log_from_disk.go +++ /dev/null @@ -1,340 +0,0 @@ -package logstore - -import ( - "context" - "encoding/binary" - "fmt" - "math" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "google.golang.org/protobuf/proto" -) - -func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType { - partitionDir := topic.PartitionDir(t, p) - - // Create a small cache for recently-read file chunks (3 files, 60s TTL) - // This significantly reduces Filer load when multiple consumers are catching up - fileCache := log_buffer.NewDiskBufferCache(3, 60*time.Second) - - lookupFileIdFn := filer.LookupFn(filerClient) - - eachChunkFn := func(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64, startOffset int64, isOffsetBased bool) (processedTsNs int64, err error) { - entriesSkipped := 0 - entriesProcessed := 0 - for pos := 0; pos+4 < len(buf); { - - size := util.BytesToUint32(buf[pos : pos+4]) - if pos+4+int(size) > len(buf) { - err = fmt.Errorf("GenLogOnDiskReadFunc: read [%d,%d) from [0,%d)", pos, pos+int(size)+4, len(buf)) - return - } - entryData := buf[pos+4 : pos+4+int(size)] - - logEntry := &filer_pb.LogEntry{} - if err = proto.Unmarshal(entryData, logEntry); err != nil { - pos += 4 + int(size) - err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err) - return - } - - // Filter by offset if this is an offset-based subscription - if isOffsetBased { - if logEntry.Offset < startOffset { - entriesSkipped++ - pos += 4 + int(size) - continue - } - } else { - // Filter by timestamp for timestamp-based subscriptions - if logEntry.TsNs <= starTsNs { - pos += 4 + int(size) - continue - } - if stopTsNs != 0 && logEntry.TsNs > stopTsNs { - println("stopTsNs", stopTsNs, "logEntry.TsNs", logEntry.TsNs) - return - } - } - - // fmt.Printf(" read logEntry: %v, ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC()) - if _, err = eachLogEntryFn(logEntry); err != nil { - err = fmt.Errorf("process log entry %v: %w", logEntry, err) - return - } - - processedTsNs = logEntry.TsNs - entriesProcessed++ - - pos += 4 + int(size) - - } - - return - } - - eachFileFn := func(entry *filer_pb.Entry, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64, startOffset int64, isOffsetBased bool) (processedTsNs int64, err error) { - if len(entry.Content) > 0 { - // skip .offset files - return - } - var urlStrings []string - for _, chunk := range entry.Chunks { - if chunk.Size == 0 { - continue - } - if chunk.IsChunkManifest { - glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name) - return - } - urlStrings, err = lookupFileIdFn(context.Background(), chunk.FileId) - if err != nil { - glog.V(1).Infof("lookup %s failed: %v", chunk.FileId, err) - err = fmt.Errorf("lookup %s: %v", chunk.FileId, err) - return - } - if len(urlStrings) == 0 { - glog.V(1).Infof("no url found for %s", chunk.FileId) - err = fmt.Errorf("no url found for %s", chunk.FileId) - return - } - glog.V(2).Infof("lookup %s returned %d URLs", chunk.FileId, len(urlStrings)) - - // Try to get data from cache first - cacheKey := fmt.Sprintf("%s/%s/%d/%s", t.Name, p.String(), p.RangeStart, chunk.FileId) - if cachedData, _, found := fileCache.Get(cacheKey); found { - if cachedData == nil { - // Negative cache hit - data doesn't exist - continue - } - // Positive cache hit - data exists - if processedTsNs, err = eachChunkFn(cachedData, eachLogEntryFn, starTsNs, stopTsNs, startOffset, isOffsetBased); err != nil { - glog.V(1).Infof("eachChunkFn failed on cached data: %v", err) - return - } - continue - } - - // Cache miss - try one of the urlString until util.Get(urlString) succeeds - var processed bool - for _, urlString := range urlStrings { - // TODO optimization opportunity: reuse the buffer - var data []byte - glog.V(2).Infof("trying to fetch data from %s", urlString) - if data, _, err = util_http.Get(urlString); err == nil { - glog.V(2).Infof("successfully fetched %d bytes from %s", len(data), urlString) - processed = true - - // Store in cache for future reads - fileCache.Put(cacheKey, data, startOffset) - - if processedTsNs, err = eachChunkFn(data, eachLogEntryFn, starTsNs, stopTsNs, startOffset, isOffsetBased); err != nil { - glog.V(1).Infof("eachChunkFn failed: %v", err) - return - } - break - } else { - glog.V(2).Infof("failed to fetch from %s: %v", urlString, err) - } - } - if !processed { - // Store negative cache entry - data doesn't exist or all URLs failed - fileCache.Put(cacheKey, nil, startOffset) - glog.V(1).Infof("no data processed for %s %s - all URLs failed", entry.Name, chunk.FileId) - err = fmt.Errorf("no data processed for %s %s", entry.Name, chunk.FileId) - return - } - - } - return - } - - return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) { - startFileName := startPosition.Time.UTC().Format(topic.TIME_FORMAT) - startTsNs := startPosition.Time.UnixNano() - stopTime := time.Unix(0, stopTsNs) - var processedTsNs int64 - - // Check if this is an offset-based subscription - isOffsetBased := startPosition.IsOffsetBased - var startOffset int64 - if isOffsetBased { - startOffset = startPosition.Offset - // CRITICAL FIX: For offset-based reads, ignore startFileName (which is based on Time) - // and list all files from the beginning to find the right offset - startFileName = "" - glog.V(1).Infof("disk read start: topic=%s partition=%s startOffset=%d", - t.Name, p, startOffset) - } - - // OPTIMIZATION: For offset-based reads, collect all files with their offset ranges first - // Then use binary search to find the right file, and skip files that don't contain the offset - var candidateFiles []*filer_pb.Entry - var foundStartFile bool - - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // First pass: collect all relevant files with their metadata - glog.V(2).Infof("listing directory %s for offset %d startFileName=%q", partitionDir, startOffset, startFileName) - return filer_pb.SeaweedList(context.Background(), client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error { - - if entry.IsDirectory { - return nil - } - if strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - if strings.HasSuffix(entry.Name, ".offset") { - return nil - } - if stopTsNs != 0 && entry.Name > stopTime.UTC().Format(topic.TIME_FORMAT) { - return nil - } - - // OPTIMIZATION: For offset-based reads, check if this file contains the requested offset - if isOffsetBased { - glog.V(3).Infof("found file %s", entry.Name) - // Check if file has offset range metadata - if minOffsetBytes, hasMin := entry.Extended["offset_min"]; hasMin && len(minOffsetBytes) == 8 { - if maxOffsetBytes, hasMax := entry.Extended["offset_max"]; hasMax && len(maxOffsetBytes) == 8 { - fileMinOffset := int64(binary.BigEndian.Uint64(minOffsetBytes)) - fileMaxOffset := int64(binary.BigEndian.Uint64(maxOffsetBytes)) - - // Skip files that don't contain our offset range - if startOffset > fileMaxOffset { - return nil - } - - // If we haven't found the start file yet, check if this file contains it - if !foundStartFile && startOffset >= fileMinOffset && startOffset <= fileMaxOffset { - foundStartFile = true - } - } - } - // If file doesn't have offset metadata, include it (might be old format) - } else { - // Timestamp-based filtering - topicName := t.Name - if dotIndex := strings.LastIndex(topicName, "."); dotIndex != -1 { - topicName = topicName[dotIndex+1:] - } - isSystemTopic := strings.HasPrefix(topicName, "_") - if !isSystemTopic && startPosition.Time.Unix() > 86400 && entry.Name < startPosition.Time.UTC().Format(topic.TIME_FORMAT) { - return nil - } - } - - // Add file to candidates for processing - candidateFiles = append(candidateFiles, entry) - glog.V(3).Infof("added candidate file %s (total=%d)", entry.Name, len(candidateFiles)) - return nil - - }, startFileName, true, math.MaxInt32) - }) - - if err != nil { - glog.Errorf("failed to list directory %s: %v", partitionDir, err) - return - } - - glog.V(2).Infof("found %d candidate files for topic=%s partition=%s offset=%d", - len(candidateFiles), t.Name, p, startOffset) - - if len(candidateFiles) == 0 { - glog.V(2).Infof("no files found in %s", partitionDir) - return startPosition, isDone, nil - } - - // OPTIMIZATION: For offset-based reads with many files, use binary search to find start file - if isOffsetBased && len(candidateFiles) > 10 { - // Binary search to find the first file that might contain our offset - left, right := 0, len(candidateFiles)-1 - startIdx := 0 - - for left <= right { - mid := (left + right) / 2 - entry := candidateFiles[mid] - - if minOffsetBytes, hasMin := entry.Extended["offset_min"]; hasMin && len(minOffsetBytes) == 8 { - if maxOffsetBytes, hasMax := entry.Extended["offset_max"]; hasMax && len(maxOffsetBytes) == 8 { - fileMinOffset := int64(binary.BigEndian.Uint64(minOffsetBytes)) - fileMaxOffset := int64(binary.BigEndian.Uint64(maxOffsetBytes)) - - if startOffset < fileMinOffset { - // Our offset is before this file, search left - right = mid - 1 - } else if startOffset > fileMaxOffset { - // Our offset is after this file, search right - left = mid + 1 - startIdx = left - } else { - // Found the file containing our offset - startIdx = mid - break - } - } else { - break - } - } else { - break - } - } - - // Process files starting from the found index - candidateFiles = candidateFiles[startIdx:] - } - - // Second pass: process the filtered files - // CRITICAL: For offset-based reads, process ALL candidate files in one call - // This prevents multiple ReadFromDiskFn calls with 1.127s overhead each - var filesProcessed int - var lastProcessedOffset int64 - for _, entry := range candidateFiles { - var fileTsNs int64 - if fileTsNs, err = eachFileFn(entry, eachLogEntryFn, startTsNs, stopTsNs, startOffset, isOffsetBased); err != nil { - return lastReadPosition, isDone, err - } - if fileTsNs > 0 { - processedTsNs = fileTsNs - filesProcessed++ - } - - // For offset-based reads, track the last processed offset - // We need to continue reading ALL files to avoid multiple disk read calls - if isOffsetBased { - // Extract the last offset from the file's extended attributes - if maxOffsetBytes, hasMax := entry.Extended["offset_max"]; hasMax && len(maxOffsetBytes) == 8 { - fileMaxOffset := int64(binary.BigEndian.Uint64(maxOffsetBytes)) - if fileMaxOffset > lastProcessedOffset { - lastProcessedOffset = fileMaxOffset - } - } - } - } - - if isOffsetBased && filesProcessed > 0 { - // Return a position that indicates we've read all disk data up to lastProcessedOffset - // This prevents the subscription from calling ReadFromDiskFn again for these offsets - lastReadPosition = log_buffer.NewMessagePositionFromOffset(lastProcessedOffset + 1) - } else { - // CRITICAL FIX: If no files were processed (e.g., all data already consumed), - // return the requested offset to prevent busy loop - if isOffsetBased { - // For offset-based reads with no data, return the requested offset - // This signals "I've checked, there's no data at this offset, move forward" - lastReadPosition = log_buffer.NewMessagePositionFromOffset(startOffset) - } else { - // For timestamp-based reads, return error (-2) - lastReadPosition = log_buffer.NewMessagePosition(processedTsNs, -2) - } - } - return - } -} diff --git a/weed/mq/logstore/read_parquet_to_log.go b/weed/mq/logstore/read_parquet_to_log.go deleted file mode 100644 index 01191eaad..000000000 --- a/weed/mq/logstore/read_parquet_to_log.go +++ /dev/null @@ -1,222 +0,0 @@ -package logstore - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "math" - "strings" - - "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "google.golang.org/protobuf/proto" -) - -var ( - chunkCache = chunk_cache.NewChunkCacheInMemory(256) // 256 entries, 8MB max per entry -) - -// isControlEntry checks if a log entry is a control entry without actual data -// Based on MQ system analysis, control entries are: -// 1. DataMessages with populated Ctrl field (publisher close signals) -// 2. Entries with empty keys (as filtered by subscriber) -// 3. Entries with no data -func isControlEntry(logEntry *filer_pb.LogEntry) bool { - // Skip entries with no data - if len(logEntry.Data) == 0 { - return true - } - - // Skip entries with empty keys (same logic as subscriber) - if len(logEntry.Key) == 0 { - return true - } - - // Check if this is a DataMessage with control field populated - dataMessage := &mq_pb.DataMessage{} - if err := proto.Unmarshal(logEntry.Data, dataMessage); err == nil { - // If it has a control field, it's a control message - if dataMessage.Ctrl != nil { - return true - } - } - - return false -} - -func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic.Partition) log_buffer.LogReadFromDiskFuncType { - partitionDir := topic.PartitionDir(t, p) - - lookupFileIdFn := filer.LookupFn(filerClient) - - // read topic conf from filer - var topicConf *mq_pb.ConfigureTopicResponse - var err error - if err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - topicConf, err = t.ReadConfFile(client) - return err - }); err != nil { - // Return a no-op function for test environments or when topic config can't be read - return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (log_buffer.MessagePosition, bool, error) { - return startPosition, true, nil - } - } - // Get schema - prefer flat schema if available - var recordType *schema_pb.RecordType - if topicConf.GetMessageRecordType() != nil { - // New flat schema format - use directly - recordType = topicConf.GetMessageRecordType() - } - - if recordType == nil || len(recordType.Fields) == 0 { - // Return a no-op function if no schema is available - return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (log_buffer.MessagePosition, bool, error) { - return startPosition, true, nil - } - } - recordType = schema.NewRecordTypeBuilder(recordType). - WithField(SW_COLUMN_NAME_TS, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - WithField(SW_COLUMN_NAME_OFFSET, schema.TypeInt64). - RecordTypeEnd() - - parquetLevels, err := schema.ToParquetLevels(recordType) - if err != nil { - return nil - } - - // eachFileFn reads a parquet file and calls eachLogEntryFn for each log entry - eachFileFn := func(entry *filer_pb.Entry, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) { - // create readerAt for the parquet file - fileSize := filer.FileSize(entry) - visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(context.Background(), lookupFileIdFn, entry.Chunks, 0, int64(fileSize)) - chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize)) - readerCache := filer.NewReaderCache(32, chunkCache, lookupFileIdFn) - readerAt := filer.NewChunkReaderAtFromClient(context.Background(), readerCache, chunkViews, int64(fileSize)) - - // create parquet reader - parquetReader := parquet.NewReader(readerAt) - rows := make([]parquet.Row, 128) - for { - rowCount, readErr := parquetReader.ReadRows(rows) - - // Process the rows first, even if EOF is returned - for i := 0; i < rowCount; i++ { - row := rows[i] - // convert parquet row to schema_pb.RecordValue - recordValue, err := schema.ToRecordValue(recordType, parquetLevels, row) - if err != nil { - return processedTsNs, fmt.Errorf("ToRecordValue failed: %w", err) - } - processedTsNs = recordValue.Fields[SW_COLUMN_NAME_TS].GetInt64Value() - if processedTsNs <= starTsNs { - continue - } - if stopTsNs != 0 && processedTsNs >= stopTsNs { - return processedTsNs, nil - } - - data, marshalErr := proto.Marshal(recordValue) - if marshalErr != nil { - return processedTsNs, fmt.Errorf("marshal record value: %w", marshalErr) - } - - // Get offset from parquet, default to 0 if not present (backward compatibility) - var offset int64 = 0 - if offsetValue, exists := recordValue.Fields[SW_COLUMN_NAME_OFFSET]; exists { - offset = offsetValue.GetInt64Value() - } - - logEntry := &filer_pb.LogEntry{ - Key: recordValue.Fields[SW_COLUMN_NAME_KEY].GetBytesValue(), - TsNs: processedTsNs, - Data: data, - Offset: offset, - } - - // Skip control entries without actual data - if isControlEntry(logEntry) { - continue - } - - // fmt.Printf(" parquet entry %s ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC()) - - if _, err = eachLogEntryFn(logEntry); err != nil { - return processedTsNs, fmt.Errorf("process log entry %v: %w", logEntry, err) - } - } - - // Check for end conditions after processing rows - if readErr != nil { - if readErr == io.EOF { - return processedTsNs, nil - } - return processedTsNs, readErr - } - if rowCount == 0 { - return processedTsNs, nil - } - } - } - - return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) { - startFileName := startPosition.Time.UTC().Format(topic.TIME_FORMAT) - startTsNs := startPosition.Time.UnixNano() - var processedTsNs int64 - - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - - return filer_pb.SeaweedList(context.Background(), client, partitionDir, "", func(entry *filer_pb.Entry, isLast bool) error { - if entry.IsDirectory { - return nil - } - if !strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - if len(entry.Extended) == 0 { - return nil - } - - // read minTs from the parquet file - minTsBytes := entry.Extended[mq.ExtendedAttrTimestampMin] - if len(minTsBytes) != 8 { - return nil - } - minTsNs := int64(binary.BigEndian.Uint64(minTsBytes)) - - // read max ts - maxTsBytes := entry.Extended[mq.ExtendedAttrTimestampMax] - if len(maxTsBytes) != 8 { - return nil - } - maxTsNs := int64(binary.BigEndian.Uint64(maxTsBytes)) - - if stopTsNs != 0 && stopTsNs <= minTsNs { - isDone = true - return nil - } - - if maxTsNs < startTsNs { - return nil - } - - if processedTsNs, err = eachFileFn(entry, eachLogEntryFn, startTsNs, stopTsNs); err != nil { - return err - } - return nil - - }, startFileName, true, math.MaxInt32) - }) - lastReadPosition = log_buffer.NewMessagePosition(processedTsNs, -2) - return - } -} diff --git a/weed/mq/logstore/write_rows_no_panic_test.go b/weed/mq/logstore/write_rows_no_panic_test.go deleted file mode 100644 index 4e40b6d09..000000000 --- a/weed/mq/logstore/write_rows_no_panic_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logstore - -import ( - "os" - "testing" - - parquet "github.com/parquet-go/parquet-go" - "github.com/parquet-go/parquet-go/compress/zstd" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestWriteRowsNoPanic builds a representative schema and rows and ensures WriteRows completes without panic. -func TestWriteRowsNoPanic(t *testing.T) { - // Build schema similar to ecommerce.user_events - recordType := schema.RecordTypeBegin(). - WithField("id", schema.TypeInt64). - WithField("user_id", schema.TypeInt64). - WithField("user_type", schema.TypeString). - WithField("action", schema.TypeString). - WithField("status", schema.TypeString). - WithField("amount", schema.TypeDouble). - WithField("timestamp", schema.TypeString). - WithField("metadata", schema.TypeString). - RecordTypeEnd() - - // Add log columns - recordType = schema.NewRecordTypeBuilder(recordType). - WithField(SW_COLUMN_NAME_TS, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - RecordTypeEnd() - - ps, err := schema.ToParquetSchema("synthetic", recordType) - if err != nil { - t.Fatalf("schema: %v", err) - } - levels, err := schema.ToParquetLevels(recordType) - if err != nil { - t.Fatalf("levels: %v", err) - } - - tmp, err := os.CreateTemp(".", "synthetic*.parquet") - if err != nil { - t.Fatalf("tmp: %v", err) - } - defer func() { - tmp.Close() - os.Remove(tmp.Name()) - }() - - w := parquet.NewWriter(tmp, ps, - parquet.Compression(&zstd.Codec{Level: zstd.DefaultLevel}), - parquet.DataPageStatistics(true), - ) - defer w.Close() - - rb := parquet.NewRowBuilder(ps) - var rows []parquet.Row - - // Build a few hundred rows with various optional/missing values and nil/empty keys - for i := 0; i < 200; i++ { - rb.Reset() - - rec := &schema_pb.RecordValue{Fields: map[string]*schema_pb.Value{}} - // Required-like fields present - rec.Fields["id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: int64(1000 + i)}} - rec.Fields["user_id"] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: int64(i)}} - rec.Fields["user_type"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "standard"}} - rec.Fields["action"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "click"}} - rec.Fields["status"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "active"}} - - // Optional fields vary: sometimes omitted, sometimes empty - if i%3 == 0 { - rec.Fields["amount"] = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: float64(i)}} - } - if i%4 == 0 { - rec.Fields["metadata"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: ""}} - } - if i%5 == 0 { - rec.Fields["timestamp"] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "2025-09-03T15:36:29Z"}} - } - - // Log columns - rec.Fields[SW_COLUMN_NAME_TS] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: int64(1756913789000000000 + i)}} - var keyBytes []byte - if i%7 == 0 { - keyBytes = nil // ensure nil-keys are handled - } else if i%7 == 1 { - keyBytes = []byte{} // empty - } else { - keyBytes = []byte("key-") - } - rec.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: keyBytes}} - - if err := schema.AddRecordValue(rb, recordType, levels, rec); err != nil { - t.Fatalf("add record: %v", err) - } - rows = append(rows, rb.Row()) - } - - deferredPanicked := false - defer func() { - if r := recover(); r != nil { - deferredPanicked = true - t.Fatalf("unexpected panic: %v", r) - } - }() - - if _, err := w.WriteRows(rows); err != nil { - t.Fatalf("WriteRows: %v", err) - } - if err := w.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - if deferredPanicked { - t.Fatal("panicked") - } -} diff --git a/weed/mq/metadata_constants.go b/weed/mq/metadata_constants.go deleted file mode 100644 index 18ba98a31..000000000 --- a/weed/mq/metadata_constants.go +++ /dev/null @@ -1,21 +0,0 @@ -package mq - -// Extended attribute keys for SeaweedMQ file metadata -// These constants are used across different packages (broker, logstore, kafka, query) -const ( - // Timestamp range metadata - ExtendedAttrTimestampMin = "ts_min" // 8-byte binary (BigEndian) minimum timestamp in nanoseconds - ExtendedAttrTimestampMax = "ts_max" // 8-byte binary (BigEndian) maximum timestamp in nanoseconds - - // Offset range metadata for Kafka integration - ExtendedAttrOffsetMin = "offset_min" // 8-byte binary (BigEndian) minimum Kafka offset - ExtendedAttrOffsetMax = "offset_max" // 8-byte binary (BigEndian) maximum Kafka offset - - // Buffer tracking metadata - ExtendedAttrBufferStart = "buffer_start" // 8-byte binary (BigEndian) buffer start index - - // Source file tracking for parquet deduplication - ExtendedAttrSources = "sources" // JSON-encoded list of source log files -) - - diff --git a/weed/mq/offset/benchmark_test.go b/weed/mq/offset/benchmark_test.go deleted file mode 100644 index 0fdacf127..000000000 --- a/weed/mq/offset/benchmark_test.go +++ /dev/null @@ -1,452 +0,0 @@ -package offset - -import ( - "fmt" - "os" - "testing" - "time" - - _ "github.com/mattn/go-sqlite3" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// BenchmarkOffsetAssignment benchmarks sequential offset assignment -func BenchmarkOffsetAssignment(b *testing.B) { - storage := NewInMemoryOffsetStorage() - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - b.Fatalf("Failed to create partition manager: %v", err) - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - manager.AssignOffset() - } - }) -} - -// BenchmarkBatchOffsetAssignment benchmarks batch offset assignment -func BenchmarkBatchOffsetAssignment(b *testing.B) { - storage := NewInMemoryOffsetStorage() - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - b.Fatalf("Failed to create partition manager: %v", err) - } - - batchSizes := []int64{1, 10, 100, 1000} - - for _, batchSize := range batchSizes { - b.Run(fmt.Sprintf("BatchSize%d", batchSize), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - manager.AssignOffsets(batchSize) - } - }) - } -} - -// BenchmarkSQLOffsetStorage benchmarks SQL storage operations -func BenchmarkSQLOffsetStorage(b *testing.B) { - // Create temporary database - tmpFile, err := os.CreateTemp("", "benchmark_*.db") - if err != nil { - b.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - b.Fatalf("Failed to create database: %v", err) - } - defer db.Close() - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - b.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - partitionKey := partitionKey(partition) - - b.Run("SaveCheckpoint", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.SaveCheckpoint("test-namespace", "test-topic", partition, int64(i)) - } - }) - - b.Run("LoadCheckpoint", func(b *testing.B) { - storage.SaveCheckpoint("test-namespace", "test-topic", partition, 1000) - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.LoadCheckpoint("test-namespace", "test-topic", partition) - } - }) - - b.Run("SaveOffsetMapping", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), 100) - } - }) - - // Pre-populate for read benchmarks - for i := 0; i < 1000; i++ { - storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), 100) - } - - b.Run("GetHighestOffset", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.GetHighestOffset("test-namespace", "test-topic", partition) - } - }) - - b.Run("LoadOffsetMappings", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.LoadOffsetMappings(partitionKey) - } - }) - - b.Run("GetOffsetMappingsByRange", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := int64(i % 900) - end := start + 100 - storage.GetOffsetMappingsByRange(partitionKey, start, end) - } - }) - - b.Run("GetPartitionStats", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - storage.GetPartitionStats(partitionKey) - } - }) -} - -// BenchmarkInMemoryVsSQL compares in-memory and SQL storage performance -func BenchmarkInMemoryVsSQL(b *testing.B) { - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // In-memory storage benchmark - b.Run("InMemory", func(b *testing.B) { - storage := NewInMemoryOffsetStorage() - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - b.Fatalf("Failed to create partition manager: %v", err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - manager.AssignOffset() - } - }) - - // SQL storage benchmark - b.Run("SQL", func(b *testing.B) { - tmpFile, err := os.CreateTemp("", "benchmark_sql_*.db") - if err != nil { - b.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - b.Fatalf("Failed to create database: %v", err) - } - defer db.Close() - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - b.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - b.Fatalf("Failed to create partition manager: %v", err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - manager.AssignOffset() - } - }) -} - -// BenchmarkOffsetSubscription benchmarks subscription operations -func BenchmarkOffsetSubscription(b *testing.B) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // Pre-assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 10000) - - b.Run("CreateSubscription", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - subscriptionID := fmt.Sprintf("bench-sub-%d", i) - _, err := subscriber.CreateSubscription( - subscriptionID, - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - b.Fatalf("Failed to create subscription: %v", err) - } - subscriber.CloseSubscription(subscriptionID) - } - }) - - // Create subscription for other benchmarks - sub, err := subscriber.CreateSubscription( - "bench-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - b.Fatalf("Failed to create subscription: %v", err) - } - - b.Run("GetOffsetRange", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - sub.GetOffsetRange(100) - } - }) - - b.Run("AdvanceOffset", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - sub.AdvanceOffset() - } - }) - - b.Run("GetLag", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - sub.GetLag() - } - }) - - b.Run("SeekToOffset", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - offset := int64(i % 9000) // Stay within bounds - sub.SeekToOffset(offset) - } - }) -} - -// BenchmarkSMQOffsetIntegration benchmarks the full integration layer -func BenchmarkSMQOffsetIntegration(b *testing.B) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - b.Run("PublishRecord", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - key := fmt.Sprintf("key-%d", i) - integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{}) - } - }) - - b.Run("PublishRecordBatch", func(b *testing.B) { - batchSizes := []int{1, 10, 100} - - for _, batchSize := range batchSizes { - b.Run(fmt.Sprintf("BatchSize%d", batchSize), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - records := make([]PublishRecordRequest, batchSize) - for j := 0; j < batchSize; j++ { - records[j] = PublishRecordRequest{ - Key: []byte(fmt.Sprintf("batch-%d-key-%d", i, j)), - Value: &schema_pb.RecordValue{}, - } - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - } - }) - } - }) - - // Pre-populate for subscription benchmarks - records := make([]PublishRecordRequest, 1000) - for i := 0; i < 1000; i++ { - records[i] = PublishRecordRequest{ - Key: []byte(fmt.Sprintf("pre-key-%d", i)), - Value: &schema_pb.RecordValue{}, - } - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - b.Run("CreateSubscription", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - subscriptionID := fmt.Sprintf("integration-sub-%d", i) - _, err := integration.CreateSubscription( - subscriptionID, - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - b.Fatalf("Failed to create subscription: %v", err) - } - integration.CloseSubscription(subscriptionID) - } - }) - - b.Run("GetHighWaterMark", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - integration.GetHighWaterMark("test-namespace", "test-topic", partition) - } - }) - - b.Run("GetPartitionOffsetInfo", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition) - } - }) -} - -// BenchmarkConcurrentOperations benchmarks concurrent offset operations -func BenchmarkConcurrentOperations(b *testing.B) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - b.Run("ConcurrentPublish", func(b *testing.B) { - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - i := 0 - for pb.Next() { - key := fmt.Sprintf("concurrent-key-%d", i) - integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{}) - i++ - } - }) - }) - - // Pre-populate for concurrent reads - for i := 0; i < 1000; i++ { - key := fmt.Sprintf("read-key-%d", i) - integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{}) - } - - b.Run("ConcurrentRead", func(b *testing.B) { - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - integration.GetHighWaterMark("test-namespace", "test-topic", partition) - } - }) - }) - - b.Run("ConcurrentMixed", func(b *testing.B) { - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - i := 0 - for pb.Next() { - if i%10 == 0 { - // 10% writes - key := fmt.Sprintf("mixed-key-%d", i) - integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{}) - } else { - // 90% reads - integration.GetHighWaterMark("test-namespace", "test-topic", partition) - } - i++ - } - }) - }) -} - -// BenchmarkMemoryUsage benchmarks memory usage patterns -func BenchmarkMemoryUsage(b *testing.B) { - b.Run("InMemoryStorage", func(b *testing.B) { - storage := NewInMemoryOffsetStorage() - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - b.Fatalf("Failed to create partition manager: %v", err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - manager.AssignOffset() - // Note: Checkpointing now happens automatically in background every 2 seconds - } - - // Clean up background goroutine - manager.Close() - }) -} diff --git a/weed/mq/offset/consumer_group_storage.go b/weed/mq/offset/consumer_group_storage.go deleted file mode 100644 index 74c2db908..000000000 --- a/weed/mq/offset/consumer_group_storage.go +++ /dev/null @@ -1,181 +0,0 @@ -package offset - -import ( - "context" - "encoding/json" - "fmt" - "io" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// ConsumerGroupPosition represents a consumer's position in a partition -// This can be either a timestamp or an offset -type ConsumerGroupPosition struct { - Type string `json:"type"` // "offset" or "timestamp" - Value int64 `json:"value"` // The actual offset or timestamp value - OffsetType string `json:"offset_type"` // Optional: OffsetType enum name (e.g., "EXACT_OFFSET") - CommittedAt int64 `json:"committed_at"` // Unix timestamp in milliseconds when committed - Metadata string `json:"metadata"` // Optional: application-specific metadata -} - -// ConsumerGroupOffsetStorage handles consumer group offset persistence -// Each consumer group gets its own offset file in a dedicated consumers/ subfolder: -// Path: /topics/{namespace}/{topic}/{version}/{partition}/consumers/{consumer_group}.offset -type ConsumerGroupOffsetStorage interface { - // SaveConsumerGroupOffset saves the committed offset for a consumer group - SaveConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string, offset int64) error - - // SaveConsumerGroupPosition saves the committed position (offset or timestamp) for a consumer group - SaveConsumerGroupPosition(t topic.Topic, p topic.Partition, consumerGroup string, position *ConsumerGroupPosition) error - - // LoadConsumerGroupOffset loads the committed offset for a consumer group (backward compatible) - LoadConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) (int64, error) - - // LoadConsumerGroupPosition loads the committed position for a consumer group - LoadConsumerGroupPosition(t topic.Topic, p topic.Partition, consumerGroup string) (*ConsumerGroupPosition, error) - - // ListConsumerGroups returns all consumer groups for a topic partition - ListConsumerGroups(t topic.Topic, p topic.Partition) ([]string, error) - - // DeleteConsumerGroupOffset removes the offset file for a consumer group - DeleteConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) error -} - -// FilerConsumerGroupOffsetStorage implements ConsumerGroupOffsetStorage using SeaweedFS filer -type FilerConsumerGroupOffsetStorage struct { - filerClientAccessor *filer_client.FilerClientAccessor -} - -// NewFilerConsumerGroupOffsetStorageWithAccessor creates storage using a shared filer client accessor -func NewFilerConsumerGroupOffsetStorageWithAccessor(filerClientAccessor *filer_client.FilerClientAccessor) *FilerConsumerGroupOffsetStorage { - return &FilerConsumerGroupOffsetStorage{ - filerClientAccessor: filerClientAccessor, - } -} - -// SaveConsumerGroupOffset saves the committed offset for a consumer group -// Stores as: /topics/{namespace}/{topic}/{version}/{partition}/consumers/{consumer_group}.offset -// This is a convenience method that wraps SaveConsumerGroupPosition -func (f *FilerConsumerGroupOffsetStorage) SaveConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string, offset int64) error { - position := &ConsumerGroupPosition{ - Type: "offset", - Value: offset, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET.String(), - CommittedAt: time.Now().UnixMilli(), - } - return f.SaveConsumerGroupPosition(t, p, consumerGroup, position) -} - -// SaveConsumerGroupPosition saves the committed position (offset or timestamp) for a consumer group -// Stores as JSON: /topics/{namespace}/{topic}/{version}/{partition}/consumers/{consumer_group}.offset -func (f *FilerConsumerGroupOffsetStorage) SaveConsumerGroupPosition(t topic.Topic, p topic.Partition, consumerGroup string, position *ConsumerGroupPosition) error { - partitionDir := topic.PartitionDir(t, p) - consumersDir := fmt.Sprintf("%s/consumers", partitionDir) - offsetFileName := fmt.Sprintf("%s.offset", consumerGroup) - - // Marshal position to JSON - jsonBytes, err := json.Marshal(position) - if err != nil { - return fmt.Errorf("failed to marshal position to JSON: %w", err) - } - - return f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer.SaveInsideFiler(client, consumersDir, offsetFileName, jsonBytes) - }) -} - -// LoadConsumerGroupOffset loads the committed offset for a consumer group -// This method provides backward compatibility and returns just the offset value -func (f *FilerConsumerGroupOffsetStorage) LoadConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) (int64, error) { - position, err := f.LoadConsumerGroupPosition(t, p, consumerGroup) - if err != nil { - return -1, err - } - return position.Value, nil -} - -// LoadConsumerGroupPosition loads the committed position for a consumer group -func (f *FilerConsumerGroupOffsetStorage) LoadConsumerGroupPosition(t topic.Topic, p topic.Partition, consumerGroup string) (*ConsumerGroupPosition, error) { - partitionDir := topic.PartitionDir(t, p) - consumersDir := fmt.Sprintf("%s/consumers", partitionDir) - offsetFileName := fmt.Sprintf("%s.offset", consumerGroup) - - var position *ConsumerGroupPosition - err := f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - data, err := filer.ReadInsideFiler(client, consumersDir, offsetFileName) - if err != nil { - return err - } - - // Parse JSON format - position = &ConsumerGroupPosition{} - if err := json.Unmarshal(data, position); err != nil { - return fmt.Errorf("invalid consumer group offset file format: %w", err) - } - - return nil - }) - - if err != nil { - return nil, err - } - - return position, nil -} - -// ListConsumerGroups returns all consumer groups for a topic partition -func (f *FilerConsumerGroupOffsetStorage) ListConsumerGroups(t topic.Topic, p topic.Partition) ([]string, error) { - partitionDir := topic.PartitionDir(t, p) - consumersDir := fmt.Sprintf("%s/consumers", partitionDir) - var consumerGroups []string - - err := f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Use ListEntries to get directory contents - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: consumersDir, - }) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - break - } - return err - } - - entry := resp.Entry - if entry != nil && !entry.IsDirectory && entry.Name != "" { - // Check if this is a consumer group offset file (ends with .offset) - if len(entry.Name) > 7 && entry.Name[len(entry.Name)-7:] == ".offset" { - // Extract consumer group name (remove .offset suffix) - consumerGroup := entry.Name[:len(entry.Name)-7] - consumerGroups = append(consumerGroups, consumerGroup) - } - } - } - return nil - }) - - return consumerGroups, err -} - -// DeleteConsumerGroupOffset removes the offset file for a consumer group -func (f *FilerConsumerGroupOffsetStorage) DeleteConsumerGroupOffset(t topic.Topic, p topic.Partition, consumerGroup string) error { - partitionDir := topic.PartitionDir(t, p) - consumersDir := fmt.Sprintf("%s/consumers", partitionDir) - offsetFileName := fmt.Sprintf("%s.offset", consumerGroup) - - return f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer_pb.DoRemove(context.Background(), client, consumersDir, offsetFileName, false, false, false, false, nil) - }) -} diff --git a/weed/mq/offset/consumer_group_storage_test.go b/weed/mq/offset/consumer_group_storage_test.go deleted file mode 100644 index ff1163e93..000000000 --- a/weed/mq/offset/consumer_group_storage_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package offset - -import ( - "encoding/json" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestConsumerGroupPosition_JSON(t *testing.T) { - tests := []struct { - name string - position *ConsumerGroupPosition - }{ - { - name: "offset-based position", - position: &ConsumerGroupPosition{ - Type: "offset", - Value: 12345, - OffsetType: schema_pb.OffsetType_EXACT_OFFSET.String(), - CommittedAt: time.Now().UnixMilli(), - Metadata: "test metadata", - }, - }, - { - name: "timestamp-based position", - position: &ConsumerGroupPosition{ - Type: "timestamp", - Value: time.Now().UnixNano(), - OffsetType: schema_pb.OffsetType_EXACT_TS_NS.String(), - CommittedAt: time.Now().UnixMilli(), - Metadata: "checkpoint at 2024-10-05", - }, - }, - { - name: "minimal position", - position: &ConsumerGroupPosition{ - Type: "offset", - Value: 42, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Marshal to JSON - jsonBytes, err := json.Marshal(tt.position) - if err != nil { - t.Fatalf("Failed to marshal: %v", err) - } - - t.Logf("JSON: %s", string(jsonBytes)) - - // Unmarshal from JSON - var decoded ConsumerGroupPosition - if err := json.Unmarshal(jsonBytes, &decoded); err != nil { - t.Fatalf("Failed to unmarshal: %v", err) - } - - // Verify fields - if decoded.Type != tt.position.Type { - t.Errorf("Type mismatch: got %s, want %s", decoded.Type, tt.position.Type) - } - if decoded.Value != tt.position.Value { - t.Errorf("Value mismatch: got %d, want %d", decoded.Value, tt.position.Value) - } - if decoded.OffsetType != tt.position.OffsetType { - t.Errorf("OffsetType mismatch: got %s, want %s", decoded.OffsetType, tt.position.OffsetType) - } - if decoded.Metadata != tt.position.Metadata { - t.Errorf("Metadata mismatch: got %s, want %s", decoded.Metadata, tt.position.Metadata) - } - }) - } -} - -func TestConsumerGroupPosition_JSONExamples(t *testing.T) { - // Test JSON format examples - jsonExamples := []string{ - `{"type":"offset","value":12345}`, - `{"type":"timestamp","value":1696521600000000000}`, - `{"type":"offset","value":42,"offset_type":"EXACT_OFFSET","committed_at":1696521600000,"metadata":"test"}`, - } - - for i, jsonStr := range jsonExamples { - var position ConsumerGroupPosition - if err := json.Unmarshal([]byte(jsonStr), &position); err != nil { - t.Errorf("Example %d: Failed to parse JSON: %v", i, err) - continue - } - - t.Logf("Example %d: Type=%s, Value=%d", i, position.Type, position.Value) - - // Verify required fields - if position.Type == "" { - t.Errorf("Example %d: Type is empty", i) - } - if position.Value == 0 { - t.Errorf("Example %d: Value is zero", i) - } - } -} - -func TestConsumerGroupPosition_TypeValidation(t *testing.T) { - validTypes := []string{"offset", "timestamp"} - - for _, typ := range validTypes { - position := &ConsumerGroupPosition{ - Type: typ, - Value: 100, - } - - jsonBytes, err := json.Marshal(position) - if err != nil { - t.Fatalf("Failed to marshal position with type '%s': %v", typ, err) - } - - var decoded ConsumerGroupPosition - if err := json.Unmarshal(jsonBytes, &decoded); err != nil { - t.Fatalf("Failed to unmarshal position with type '%s': %v", typ, err) - } - - if decoded.Type != typ { - t.Errorf("Type mismatch: got '%s', want '%s'", decoded.Type, typ) - } - } -} diff --git a/weed/mq/offset/end_to_end_test.go b/weed/mq/offset/end_to_end_test.go deleted file mode 100644 index f2b57b843..000000000 --- a/weed/mq/offset/end_to_end_test.go +++ /dev/null @@ -1,473 +0,0 @@ -package offset - -import ( - "fmt" - "os" - "testing" - "time" - - _ "github.com/mattn/go-sqlite3" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestEndToEndOffsetFlow tests the complete offset management flow -func TestEndToEndOffsetFlow(t *testing.T) { - // Create temporary database - tmpFile, err := os.CreateTemp("", "e2e_offset_test_*.db") - if err != nil { - t.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - // Create database with migrations - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to create database: %v", err) - } - defer db.Close() - - // Create SQL storage - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - // Create SMQ offset integration - integration := NewSMQOffsetIntegration(storage) - - // Test partition - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - t.Run("PublishAndAssignOffsets", func(t *testing.T) { - // Simulate publishing messages with offset assignment - records := []PublishRecordRequest{ - {Key: []byte("user1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("user2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("user3"), Value: &schema_pb.RecordValue{}}, - } - - response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - if err != nil { - t.Fatalf("Failed to publish record batch: %v", err) - } - - if response.BaseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", response.BaseOffset) - } - - if response.LastOffset != 2 { - t.Errorf("Expected last offset 2, got %d", response.LastOffset) - } - - // Verify high water mark - hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark: %v", err) - } - - if hwm != 3 { - t.Errorf("Expected high water mark 3, got %d", hwm) - } - }) - - t.Run("CreateAndUseSubscription", func(t *testing.T) { - // Create subscription from earliest - sub, err := integration.CreateSubscription( - "e2e-test-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Subscribe to records - responses, err := integration.SubscribeRecords(sub, 2) - if err != nil { - t.Fatalf("Failed to subscribe to records: %v", err) - } - - if len(responses) != 2 { - t.Errorf("Expected 2 responses, got %d", len(responses)) - } - - // Check subscription advancement - if sub.CurrentOffset != 2 { - t.Errorf("Expected current offset 2, got %d", sub.CurrentOffset) - } - - // Get subscription lag - lag, err := sub.GetLag() - if err != nil { - t.Fatalf("Failed to get lag: %v", err) - } - - if lag != 1 { // 3 (hwm) - 2 (current) = 1 - t.Errorf("Expected lag 1, got %d", lag) - } - }) - - t.Run("OffsetSeekingAndRanges", func(t *testing.T) { - // Create subscription at specific offset - sub, err := integration.CreateSubscription( - "seek-test-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_EXACT_OFFSET, - 1, - ) - if err != nil { - t.Fatalf("Failed to create subscription at offset 1: %v", err) - } - - // Verify starting position - if sub.CurrentOffset != 1 { - t.Errorf("Expected current offset 1, got %d", sub.CurrentOffset) - } - - // Get offset range - offsetRange, err := sub.GetOffsetRange(2) - if err != nil { - t.Fatalf("Failed to get offset range: %v", err) - } - - if offsetRange.StartOffset != 1 { - t.Errorf("Expected start offset 1, got %d", offsetRange.StartOffset) - } - - if offsetRange.Count != 2 { - t.Errorf("Expected count 2, got %d", offsetRange.Count) - } - - // Seek to different offset - err = sub.SeekToOffset(0) - if err != nil { - t.Fatalf("Failed to seek to offset 0: %v", err) - } - - if sub.CurrentOffset != 0 { - t.Errorf("Expected current offset 0 after seek, got %d", sub.CurrentOffset) - } - }) - - t.Run("PartitionInformationAndMetrics", func(t *testing.T) { - // Get partition offset info - info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get partition offset info: %v", err) - } - - if info.EarliestOffset != 0 { - t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset) - } - - if info.LatestOffset != 2 { - t.Errorf("Expected latest offset 2, got %d", info.LatestOffset) - } - - if info.HighWaterMark != 3 { - t.Errorf("Expected high water mark 3, got %d", info.HighWaterMark) - } - - if info.ActiveSubscriptions != 2 { // Two subscriptions created above - t.Errorf("Expected 2 active subscriptions, got %d", info.ActiveSubscriptions) - } - - // Get offset metrics - metrics := integration.GetOffsetMetrics() - if metrics.PartitionCount != 1 { - t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount) - } - - if metrics.ActiveSubscriptions != 2 { - t.Errorf("Expected 2 active subscriptions in metrics, got %d", metrics.ActiveSubscriptions) - } - }) -} - -// TestOffsetPersistenceAcrossRestarts tests that offsets persist across system restarts -func TestOffsetPersistenceAcrossRestarts(t *testing.T) { - // Create temporary database - tmpFile, err := os.CreateTemp("", "persistence_test_*.db") - if err != nil { - t.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - var lastOffset int64 - - // First session: Create database and assign offsets - { - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to create database: %v", err) - } - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - - integration := NewSMQOffsetIntegration(storage) - - // Publish some records - records := []PublishRecordRequest{ - {Key: []byte("msg1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("msg2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("msg3"), Value: &schema_pb.RecordValue{}}, - } - - response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - if err != nil { - t.Fatalf("Failed to publish records: %v", err) - } - - lastOffset = response.LastOffset - - // Close connections - Close integration first to trigger final checkpoint - integration.Close() - storage.Close() - db.Close() - } - - // Second session: Reopen database and verify persistence - { - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to reopen database: %v", err) - } - defer db.Close() - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - integration := NewSMQOffsetIntegration(storage) - - // Verify high water mark persisted - hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark after restart: %v", err) - } - - if hwm != lastOffset+1 { - t.Errorf("Expected high water mark %d after restart, got %d", lastOffset+1, hwm) - } - - // Assign new offsets and verify continuity - newResponse, err := integration.PublishRecord("test-namespace", "test-topic", partition, []byte("msg4"), &schema_pb.RecordValue{}) - if err != nil { - t.Fatalf("Failed to publish new record after restart: %v", err) - } - - expectedNextOffset := lastOffset + 1 - if newResponse.BaseOffset != expectedNextOffset { - t.Errorf("Expected next offset %d after restart, got %d", expectedNextOffset, newResponse.BaseOffset) - } - } -} - -// TestConcurrentOffsetOperations tests concurrent offset operations -func TestConcurrentOffsetOperations(t *testing.T) { - // Create temporary database - tmpFile, err := os.CreateTemp("", "concurrent_test_*.db") - if err != nil { - t.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to create database: %v", err) - } - defer db.Close() - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - integration := NewSMQOffsetIntegration(storage) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - // Concurrent publishers - const numPublishers = 5 - const recordsPerPublisher = 10 - - done := make(chan bool, numPublishers) - - for i := 0; i < numPublishers; i++ { - go func(publisherID int) { - defer func() { done <- true }() - - for j := 0; j < recordsPerPublisher; j++ { - key := fmt.Sprintf("publisher-%d-msg-%d", publisherID, j) - _, err := integration.PublishRecord("test-namespace", "test-topic", partition, []byte(key), &schema_pb.RecordValue{}) - if err != nil { - t.Errorf("Publisher %d failed to publish message %d: %v", publisherID, j, err) - return - } - } - }(i) - } - - // Wait for all publishers to complete - for i := 0; i < numPublishers; i++ { - <-done - } - - // Verify total records - hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark: %v", err) - } - - expectedTotal := int64(numPublishers * recordsPerPublisher) - if hwm != expectedTotal { - t.Errorf("Expected high water mark %d, got %d", expectedTotal, hwm) - } - - // Verify no duplicate offsets - info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get partition info: %v", err) - } - - if info.RecordCount != expectedTotal { - t.Errorf("Expected record count %d, got %d", expectedTotal, info.RecordCount) - } -} - -// TestOffsetValidationAndErrorHandling tests error conditions and validation -func TestOffsetValidationAndErrorHandling(t *testing.T) { - // Create temporary database - tmpFile, err := os.CreateTemp("", "validation_test_*.db") - if err != nil { - t.Fatalf("Failed to create temp database: %v", err) - } - tmpFile.Close() - defer os.Remove(tmpFile.Name()) - - db, err := CreateDatabase(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to create database: %v", err) - } - defer db.Close() - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - integration := NewSMQOffsetIntegration(storage) - - partition := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - t.Run("InvalidOffsetSubscription", func(t *testing.T) { - // Try to create subscription with invalid offset - _, err := integration.CreateSubscription( - "invalid-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_EXACT_OFFSET, - 100, // Beyond any existing data - ) - if err == nil { - t.Error("Expected error for subscription beyond high water mark") - } - }) - - t.Run("NegativeOffsetValidation", func(t *testing.T) { - // Try to create subscription with negative offset - _, err := integration.CreateSubscription( - "negative-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_EXACT_OFFSET, - -1, - ) - if err == nil { - t.Error("Expected error for negative offset") - } - }) - - t.Run("DuplicateSubscriptionID", func(t *testing.T) { - // Create first subscription - _, err := integration.CreateSubscription( - "duplicate-id", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create first subscription: %v", err) - } - - // Try to create duplicate - _, err = integration.CreateSubscription( - "duplicate-id", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err == nil { - t.Error("Expected error for duplicate subscription ID") - } - }) - - t.Run("OffsetRangeValidation", func(t *testing.T) { - // Add some data first - integration.PublishRecord("test-namespace", "test-topic", partition, []byte("test"), &schema_pb.RecordValue{}) - - // Test invalid range validation - err := integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 5, 10) // Beyond high water mark - if err == nil { - t.Error("Expected error for range beyond high water mark") - } - - err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 10, 5) // End before start - if err == nil { - t.Error("Expected error for end offset before start offset") - } - - err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, -1, 5) // Negative start - if err == nil { - t.Error("Expected error for negative start offset") - } - }) -} diff --git a/weed/mq/offset/filer_storage.go b/weed/mq/offset/filer_storage.go deleted file mode 100644 index 81be78470..000000000 --- a/weed/mq/offset/filer_storage.go +++ /dev/null @@ -1,100 +0,0 @@ -package offset - -import ( - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// FilerOffsetStorage implements OffsetStorage using SeaweedFS filer -// Stores offset data as files in the same directory structure as SMQ -// Path: /topics/{namespace}/{topic}/{version}/{partition}/checkpoint.offset -// The namespace and topic are derived from the actual partition information -type FilerOffsetStorage struct { - filerClientAccessor *filer_client.FilerClientAccessor -} - -// NewFilerOffsetStorageWithAccessor creates a new filer-based offset storage using existing filer client accessor -func NewFilerOffsetStorageWithAccessor(filerClientAccessor *filer_client.FilerClientAccessor) *FilerOffsetStorage { - return &FilerOffsetStorage{ - filerClientAccessor: filerClientAccessor, - } -} - -// SaveCheckpoint saves the checkpoint for a partition -// Stores as: /topics/{namespace}/{topic}/{version}/{partition}/checkpoint.offset -func (f *FilerOffsetStorage) SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, offset int64) error { - partitionDir := f.getPartitionDir(namespace, topicName, partition) - fileName := "checkpoint.offset" - - // Use SMQ's 8-byte offset format - offsetBytes := make([]byte, 8) - util.Uint64toBytes(offsetBytes, uint64(offset)) - - return f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer.SaveInsideFiler(client, partitionDir, fileName, offsetBytes) - }) -} - -// LoadCheckpoint loads the checkpoint for a partition -func (f *FilerOffsetStorage) LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - partitionDir := f.getPartitionDir(namespace, topicName, partition) - fileName := "checkpoint.offset" - - var offset int64 = -1 - err := f.filerClientAccessor.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - data, err := filer.ReadInsideFiler(client, partitionDir, fileName) - if err != nil { - return err - } - if len(data) != 8 { - return fmt.Errorf("invalid checkpoint file format: expected 8 bytes, got %d", len(data)) - } - offset = int64(util.BytesToUint64(data)) - return nil - }) - - if err != nil { - return -1, err - } - - return offset, nil -} - -// GetHighestOffset returns the highest offset stored for a partition -// For filer storage, this is the same as the checkpoint since we don't store individual records -func (f *FilerOffsetStorage) GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - return f.LoadCheckpoint(namespace, topicName, partition) -} - -// Reset clears all data for testing -func (f *FilerOffsetStorage) Reset() error { - // For testing, we could delete all offset files, but this is dangerous - // Instead, just return success - individual tests should clean up their own data - return nil -} - -// Helper methods - -// getPartitionDir returns the directory path for a partition following SMQ convention -// Format: /topics/{namespace}/{topic}/{version}/{partition} -func (f *FilerOffsetStorage) getPartitionDir(namespace, topicName string, partition *schema_pb.Partition) string { - // Generate version from UnixTimeNs - version := time.Unix(0, partition.UnixTimeNs).UTC().Format("v2006-01-02-15-04-05") - - // Generate partition range string - partitionRange := fmt.Sprintf("%04d-%04d", partition.RangeStart, partition.RangeStop) - - return fmt.Sprintf("%s/%s/%s/%s/%s", filer.TopicsDir, namespace, topicName, version, partitionRange) -} - -// getPartitionKey generates a unique key for a partition -func (f *FilerOffsetStorage) getPartitionKey(partition *schema_pb.Partition) string { - return fmt.Sprintf("ring:%d:range:%d-%d:time:%d", - partition.RingSize, partition.RangeStart, partition.RangeStop, partition.UnixTimeNs) -} diff --git a/weed/mq/offset/integration.go b/weed/mq/offset/integration.go deleted file mode 100644 index 53bc113e7..000000000 --- a/weed/mq/offset/integration.go +++ /dev/null @@ -1,387 +0,0 @@ -package offset - -import ( - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// SMQOffsetIntegration provides integration between offset management and SMQ broker -type SMQOffsetIntegration struct { - mu sync.RWMutex - registry *PartitionOffsetRegistry - offsetAssigner *OffsetAssigner - offsetSubscriber *OffsetSubscriber - offsetSeeker *OffsetSeeker -} - -// NewSMQOffsetIntegration creates a new SMQ offset integration -func NewSMQOffsetIntegration(storage OffsetStorage) *SMQOffsetIntegration { - registry := NewPartitionOffsetRegistry(storage) - assigner := &OffsetAssigner{registry: registry} - - return &SMQOffsetIntegration{ - registry: registry, - offsetAssigner: assigner, - offsetSubscriber: NewOffsetSubscriber(registry), - offsetSeeker: NewOffsetSeeker(registry), - } -} - -// Close stops all background checkpoint goroutines and performs final checkpoints -func (integration *SMQOffsetIntegration) Close() error { - return integration.registry.Close() -} - -// PublishRecord publishes a record and assigns it an offset -func (integration *SMQOffsetIntegration) PublishRecord( - namespace, topicName string, - partition *schema_pb.Partition, - key []byte, - value *schema_pb.RecordValue, -) (*mq_agent_pb.PublishRecordResponse, error) { - - // Assign offset for this record - result := integration.offsetAssigner.AssignSingleOffset(namespace, topicName, partition) - if result.Error != nil { - return &mq_agent_pb.PublishRecordResponse{ - Error: fmt.Sprintf("Failed to assign offset: %v", result.Error), - }, nil - } - - assignment := result.Assignment - - // Note: Removed in-memory mapping storage to prevent memory leaks - // Record-to-offset mappings are now handled by persistent storage layer - - // Return response with offset information - return &mq_agent_pb.PublishRecordResponse{ - AckSequence: assignment.Offset, // Use offset as ack sequence for now - BaseOffset: assignment.Offset, - LastOffset: assignment.Offset, - Error: "", - }, nil -} - -// PublishRecordBatch publishes a batch of records and assigns them offsets -func (integration *SMQOffsetIntegration) PublishRecordBatch( - namespace, topicName string, - partition *schema_pb.Partition, - records []PublishRecordRequest, -) (*mq_agent_pb.PublishRecordResponse, error) { - - if len(records) == 0 { - return &mq_agent_pb.PublishRecordResponse{ - Error: "Empty record batch", - }, nil - } - - // Assign batch of offsets - result := integration.offsetAssigner.AssignBatchOffsets(namespace, topicName, partition, int64(len(records))) - if result.Error != nil { - return &mq_agent_pb.PublishRecordResponse{ - Error: fmt.Sprintf("Failed to assign batch offsets: %v", result.Error), - }, nil - } - - batch := result.Batch - - // Note: Removed in-memory mapping storage to prevent memory leaks - // Batch record-to-offset mappings are now handled by persistent storage layer - - return &mq_agent_pb.PublishRecordResponse{ - AckSequence: batch.LastOffset, // Use last offset as ack sequence - BaseOffset: batch.BaseOffset, - LastOffset: batch.LastOffset, - Error: "", - }, nil -} - -// CreateSubscription creates an offset-based subscription -func (integration *SMQOffsetIntegration) CreateSubscription( - subscriptionID string, - namespace, topicName string, - partition *schema_pb.Partition, - offsetType schema_pb.OffsetType, - startOffset int64, -) (*OffsetSubscription, error) { - - return integration.offsetSubscriber.CreateSubscription( - subscriptionID, - namespace, topicName, - partition, - offsetType, - startOffset, - ) -} - -// SubscribeRecords subscribes to records starting from a specific offset -func (integration *SMQOffsetIntegration) SubscribeRecords( - subscription *OffsetSubscription, - maxRecords int64, -) ([]*mq_agent_pb.SubscribeRecordResponse, error) { - - if !subscription.IsActive { - return nil, fmt.Errorf("subscription is not active") - } - - // Get the range of offsets to read - offsetRange, err := subscription.GetOffsetRange(maxRecords) - if err != nil { - return nil, fmt.Errorf("failed to get offset range: %w", err) - } - - if offsetRange.Count == 0 { - // No records available - return []*mq_agent_pb.SubscribeRecordResponse{}, nil - } - - // TODO: This is where we would integrate with SMQ's actual storage layer - // For now, return mock responses with offset information - responses := make([]*mq_agent_pb.SubscribeRecordResponse, offsetRange.Count) - - for i := int64(0); i < offsetRange.Count; i++ { - offset := offsetRange.StartOffset + i - - responses[i] = &mq_agent_pb.SubscribeRecordResponse{ - Key: []byte(fmt.Sprintf("key-%d", offset)), - Value: &schema_pb.RecordValue{}, // Mock value - TsNs: offset * 1000000, // Mock timestamp based on offset - Offset: offset, - IsEndOfStream: false, - IsEndOfTopic: false, - Error: "", - } - } - - // Advance the subscription - subscription.AdvanceOffsetBy(offsetRange.Count) - - return responses, nil -} - -// GetHighWaterMark returns the high water mark for a partition -func (integration *SMQOffsetIntegration) GetHighWaterMark(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - return integration.offsetAssigner.GetHighWaterMark(namespace, topicName, partition) -} - -// SeekSubscription seeks a subscription to a specific offset -func (integration *SMQOffsetIntegration) SeekSubscription( - subscriptionID string, - offset int64, -) error { - - subscription, err := integration.offsetSubscriber.GetSubscription(subscriptionID) - if err != nil { - return fmt.Errorf("subscription not found: %w", err) - } - - return subscription.SeekToOffset(offset) -} - -// GetSubscriptionLag returns the lag for a subscription -func (integration *SMQOffsetIntegration) GetSubscriptionLag(subscriptionID string) (int64, error) { - subscription, err := integration.offsetSubscriber.GetSubscription(subscriptionID) - if err != nil { - return 0, fmt.Errorf("subscription not found: %w", err) - } - - return subscription.GetLag() -} - -// CloseSubscription closes a subscription -func (integration *SMQOffsetIntegration) CloseSubscription(subscriptionID string) error { - return integration.offsetSubscriber.CloseSubscription(subscriptionID) -} - -// ValidateOffsetRange validates an offset range for a partition -func (integration *SMQOffsetIntegration) ValidateOffsetRange( - namespace, topicName string, - partition *schema_pb.Partition, - startOffset, endOffset int64, -) error { - - return integration.offsetSeeker.ValidateOffsetRange(namespace, topicName, partition, startOffset, endOffset) -} - -// GetAvailableOffsetRange returns the available offset range for a partition -func (integration *SMQOffsetIntegration) GetAvailableOffsetRange(namespace, topicName string, partition *schema_pb.Partition) (*OffsetRange, error) { - return integration.offsetSeeker.GetAvailableOffsetRange(namespace, topicName, partition) -} - -// PublishRecordRequest represents a record to be published -type PublishRecordRequest struct { - Key []byte - Value *schema_pb.RecordValue -} - -// OffsetMetrics provides metrics about offset usage -type OffsetMetrics struct { - PartitionCount int64 - TotalOffsets int64 - ActiveSubscriptions int64 - AverageLatency float64 -} - -// GetOffsetMetrics returns metrics about offset usage -func (integration *SMQOffsetIntegration) GetOffsetMetrics() *OffsetMetrics { - integration.mu.RLock() - defer integration.mu.RUnlock() - - // Count active subscriptions - activeSubscriptions := int64(0) - for _, subscription := range integration.offsetSubscriber.subscriptions { - if subscription.IsActive { - activeSubscriptions++ - } - } - - // Calculate total offsets from all partition managers instead of in-memory map - var totalOffsets int64 - for _, manager := range integration.offsetAssigner.registry.managers { - totalOffsets += manager.GetHighWaterMark() - } - - return &OffsetMetrics{ - PartitionCount: int64(len(integration.offsetAssigner.registry.managers)), - TotalOffsets: totalOffsets, // Now calculated from storage, not memory maps - ActiveSubscriptions: activeSubscriptions, - AverageLatency: 0.0, // TODO: Implement latency tracking - } -} - -// OffsetInfo provides detailed information about an offset -type OffsetInfo struct { - Offset int64 - Timestamp int64 - Partition *schema_pb.Partition - Exists bool -} - -// GetOffsetInfo returns detailed information about a specific offset -func (integration *SMQOffsetIntegration) GetOffsetInfo( - namespace, topicName string, - partition *schema_pb.Partition, - offset int64, -) (*OffsetInfo, error) { - - hwm, err := integration.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return nil, fmt.Errorf("failed to get high water mark: %w", err) - } - - exists := offset >= 0 && offset < hwm - - // TODO: Get actual timestamp from storage - timestamp := int64(0) - // Note: Timestamp lookup from in-memory map removed to prevent memory leaks - // For now, use a placeholder timestamp. In production, this should come from - // persistent storage if timestamp tracking is needed. - if exists { - timestamp = time.Now().UnixNano() // Placeholder - should come from storage - } - - return &OffsetInfo{ - Offset: offset, - Timestamp: timestamp, - Partition: partition, - Exists: exists, - }, nil -} - -// PartitionOffsetInfo provides offset information for a partition -type PartitionOffsetInfo struct { - Partition *schema_pb.Partition - EarliestOffset int64 - LatestOffset int64 - HighWaterMark int64 - RecordCount int64 - ActiveSubscriptions int64 -} - -// GetPartitionOffsetInfo returns comprehensive offset information for a partition -func (integration *SMQOffsetIntegration) GetPartitionOffsetInfo(namespace, topicName string, partition *schema_pb.Partition) (*PartitionOffsetInfo, error) { - hwm, err := integration.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return nil, fmt.Errorf("failed to get high water mark: %w", err) - } - - earliestOffset := int64(0) - latestOffset := hwm - 1 - if hwm == 0 { - latestOffset = -1 // No records - } - - // Count active subscriptions for this partition - activeSubscriptions := int64(0) - integration.mu.RLock() - for _, subscription := range integration.offsetSubscriber.subscriptions { - if subscription.IsActive && partitionKey(subscription.Partition) == partitionKey(partition) { - activeSubscriptions++ - } - } - integration.mu.RUnlock() - - return &PartitionOffsetInfo{ - Partition: partition, - EarliestOffset: earliestOffset, - LatestOffset: latestOffset, - HighWaterMark: hwm, - RecordCount: hwm, - ActiveSubscriptions: activeSubscriptions, - }, nil -} - -// GetSubscription retrieves an existing subscription -func (integration *SMQOffsetIntegration) GetSubscription(subscriptionID string) (*OffsetSubscription, error) { - return integration.offsetSubscriber.GetSubscription(subscriptionID) -} - -// ListActiveSubscriptions returns all active subscriptions -func (integration *SMQOffsetIntegration) ListActiveSubscriptions() ([]*OffsetSubscription, error) { - integration.mu.RLock() - defer integration.mu.RUnlock() - - result := make([]*OffsetSubscription, 0) - for _, subscription := range integration.offsetSubscriber.subscriptions { - if subscription.IsActive { - result = append(result, subscription) - } - } - - return result, nil -} - -// AssignSingleOffset assigns a single offset for a partition -func (integration *SMQOffsetIntegration) AssignSingleOffset(namespace, topicName string, partition *schema_pb.Partition) *AssignmentResult { - return integration.offsetAssigner.AssignSingleOffset(namespace, topicName, partition) -} - -// AssignBatchOffsets assigns a batch of offsets for a partition -func (integration *SMQOffsetIntegration) AssignBatchOffsets(namespace, topicName string, partition *schema_pb.Partition, count int64) *AssignmentResult { - return integration.offsetAssigner.AssignBatchOffsets(namespace, topicName, partition, count) -} - -// Reset resets the integration layer state (for testing) -func (integration *SMQOffsetIntegration) Reset() { - integration.mu.Lock() - defer integration.mu.Unlock() - - // Note: No in-memory maps to clear (removed to prevent memory leaks) - - // Close all subscriptions - for _, subscription := range integration.offsetSubscriber.subscriptions { - subscription.IsActive = false - } - integration.offsetSubscriber.subscriptions = make(map[string]*OffsetSubscription) - - // Reset the registries by creating new ones with the same storage - // This ensures that partition managers start fresh - registry := NewPartitionOffsetRegistry(integration.offsetAssigner.registry.storage) - integration.offsetAssigner.registry = registry - integration.offsetSubscriber.offsetRegistry = registry - integration.offsetSeeker.offsetRegistry = registry -} diff --git a/weed/mq/offset/integration_test.go b/weed/mq/offset/integration_test.go deleted file mode 100644 index 35299be65..000000000 --- a/weed/mq/offset/integration_test.go +++ /dev/null @@ -1,544 +0,0 @@ -package offset - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestSMQOffsetIntegration_PublishRecord(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish a single record - response, err := integration.PublishRecord( - "test-namespace", "test-topic", - partition, - []byte("test-key"), - &schema_pb.RecordValue{}, - ) - - if err != nil { - t.Fatalf("Failed to publish record: %v", err) - } - - if response.Error != "" { - t.Errorf("Expected no error, got: %s", response.Error) - } - - if response.BaseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", response.BaseOffset) - } - - if response.LastOffset != 0 { - t.Errorf("Expected last offset 0, got %d", response.LastOffset) - } -} - -func TestSMQOffsetIntegration_PublishRecordBatch(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Create batch of records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - } - - // Publish batch - response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - if err != nil { - t.Fatalf("Failed to publish record batch: %v", err) - } - - if response.Error != "" { - t.Errorf("Expected no error, got: %s", response.Error) - } - - if response.BaseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", response.BaseOffset) - } - - if response.LastOffset != 2 { - t.Errorf("Expected last offset 2, got %d", response.LastOffset) - } - - // Verify high water mark - hwm, err := integration.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark: %v", err) - } - - if hwm != 3 { - t.Errorf("Expected high water mark 3, got %d", hwm) - } -} - -func TestSMQOffsetIntegration_EmptyBatch(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish empty batch - response, err := integration.PublishRecordBatch("test-namespace", "test-topic", partition, []PublishRecordRequest{}) - if err != nil { - t.Fatalf("Failed to publish empty batch: %v", err) - } - - if response.Error == "" { - t.Error("Expected error for empty batch") - } -} - -func TestSMQOffsetIntegration_CreateSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish some records first - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscription - sub, err := integration.CreateSubscription( - "test-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - if sub.ID != "test-sub" { - t.Errorf("Expected subscription ID 'test-sub', got %s", sub.ID) - } - - if sub.StartOffset != 0 { - t.Errorf("Expected start offset 0, got %d", sub.StartOffset) - } -} - -func TestSMQOffsetIntegration_SubscribeRecords(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish some records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscription - sub, err := integration.CreateSubscription( - "test-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Subscribe to records - responses, err := integration.SubscribeRecords(sub, 2) - if err != nil { - t.Fatalf("Failed to subscribe to records: %v", err) - } - - if len(responses) != 2 { - t.Errorf("Expected 2 responses, got %d", len(responses)) - } - - // Check offset progression - if responses[0].Offset != 0 { - t.Errorf("Expected first record offset 0, got %d", responses[0].Offset) - } - - if responses[1].Offset != 1 { - t.Errorf("Expected second record offset 1, got %d", responses[1].Offset) - } - - // Check subscription advancement - if sub.CurrentOffset != 2 { - t.Errorf("Expected subscription current offset 2, got %d", sub.CurrentOffset) - } -} - -func TestSMQOffsetIntegration_SubscribeEmptyPartition(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Create subscription on empty partition - sub, err := integration.CreateSubscription( - "empty-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Subscribe to records (should return empty) - responses, err := integration.SubscribeRecords(sub, 10) - if err != nil { - t.Fatalf("Failed to subscribe to empty partition: %v", err) - } - - if len(responses) != 0 { - t.Errorf("Expected 0 responses from empty partition, got %d", len(responses)) - } -} - -func TestSMQOffsetIntegration_SeekSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key4"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key5"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscription - sub, err := integration.CreateSubscription( - "seek-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Seek to offset 3 - err = integration.SeekSubscription("seek-sub", 3) - if err != nil { - t.Fatalf("Failed to seek subscription: %v", err) - } - - if sub.CurrentOffset != 3 { - t.Errorf("Expected current offset 3 after seek, got %d", sub.CurrentOffset) - } - - // Subscribe from new position - responses, err := integration.SubscribeRecords(sub, 2) - if err != nil { - t.Fatalf("Failed to subscribe after seek: %v", err) - } - - if len(responses) != 2 { - t.Errorf("Expected 2 responses after seek, got %d", len(responses)) - } - - if responses[0].Offset != 3 { - t.Errorf("Expected first record offset 3 after seek, got %d", responses[0].Offset) - } -} - -func TestSMQOffsetIntegration_GetSubscriptionLag(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscription at offset 1 - sub, err := integration.CreateSubscription( - "lag-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_EXACT_OFFSET, - 1, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Get lag - lag, err := integration.GetSubscriptionLag("lag-sub") - if err != nil { - t.Fatalf("Failed to get subscription lag: %v", err) - } - - expectedLag := int64(3 - 1) // hwm - current - if lag != expectedLag { - t.Errorf("Expected lag %d, got %d", expectedLag, lag) - } - - // Advance subscription and check lag again - integration.SubscribeRecords(sub, 1) - - lag, err = integration.GetSubscriptionLag("lag-sub") - if err != nil { - t.Fatalf("Failed to get lag after advance: %v", err) - } - - expectedLag = int64(3 - 2) // hwm - current - if lag != expectedLag { - t.Errorf("Expected lag %d after advance, got %d", expectedLag, lag) - } -} - -func TestSMQOffsetIntegration_CloseSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Create subscription - _, err := integration.CreateSubscription( - "close-sub", - "test-namespace", "test-topic", - partition, - schema_pb.OffsetType_RESET_TO_EARLIEST, - 0, - ) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Close subscription - err = integration.CloseSubscription("close-sub") - if err != nil { - t.Fatalf("Failed to close subscription: %v", err) - } - - // Try to get lag (should fail) - _, err = integration.GetSubscriptionLag("close-sub") - if err == nil { - t.Error("Expected error when getting lag for closed subscription") - } -} - -func TestSMQOffsetIntegration_ValidateOffsetRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Publish some records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Test valid range - err := integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 0, 2) - if err != nil { - t.Errorf("Valid range should not return error: %v", err) - } - - // Test invalid range (beyond hwm) - err = integration.ValidateOffsetRange("test-namespace", "test-topic", partition, 0, 5) - if err == nil { - t.Error("Expected error for range beyond high water mark") - } -} - -func TestSMQOffsetIntegration_GetAvailableOffsetRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Test empty partition - offsetRange, err := integration.GetAvailableOffsetRange("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get available range for empty partition: %v", err) - } - - if offsetRange.Count != 0 { - t.Errorf("Expected empty range for empty partition, got count %d", offsetRange.Count) - } - - // Publish records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Test with data - offsetRange, err = integration.GetAvailableOffsetRange("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get available range: %v", err) - } - - if offsetRange.StartOffset != 0 { - t.Errorf("Expected start offset 0, got %d", offsetRange.StartOffset) - } - - if offsetRange.EndOffset != 1 { - t.Errorf("Expected end offset 1, got %d", offsetRange.EndOffset) - } - - if offsetRange.Count != 2 { - t.Errorf("Expected count 2, got %d", offsetRange.Count) - } -} - -func TestSMQOffsetIntegration_GetOffsetMetrics(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Initial metrics - metrics := integration.GetOffsetMetrics() - if metrics.TotalOffsets != 0 { - t.Errorf("Expected 0 total offsets initially, got %d", metrics.TotalOffsets) - } - - if metrics.ActiveSubscriptions != 0 { - t.Errorf("Expected 0 active subscriptions initially, got %d", metrics.ActiveSubscriptions) - } - - // Publish records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscriptions - integration.CreateSubscription("sub1", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - integration.CreateSubscription("sub2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - - // Check updated metrics - metrics = integration.GetOffsetMetrics() - if metrics.TotalOffsets != 2 { - t.Errorf("Expected 2 total offsets, got %d", metrics.TotalOffsets) - } - - if metrics.ActiveSubscriptions != 2 { - t.Errorf("Expected 2 active subscriptions, got %d", metrics.ActiveSubscriptions) - } - - if metrics.PartitionCount != 1 { - t.Errorf("Expected 1 partition, got %d", metrics.PartitionCount) - } -} - -func TestSMQOffsetIntegration_GetOffsetInfo(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Test non-existent offset - info, err := integration.GetOffsetInfo("test-namespace", "test-topic", partition, 0) - if err != nil { - t.Fatalf("Failed to get offset info: %v", err) - } - - if info.Exists { - t.Error("Offset should not exist in empty partition") - } - - // Publish record - integration.PublishRecord("test-namespace", "test-topic", partition, []byte("key1"), &schema_pb.RecordValue{}) - - // Test existing offset - info, err = integration.GetOffsetInfo("test-namespace", "test-topic", partition, 0) - if err != nil { - t.Fatalf("Failed to get offset info for existing offset: %v", err) - } - - if !info.Exists { - t.Error("Offset should exist after publishing") - } - - if info.Offset != 0 { - t.Errorf("Expected offset 0, got %d", info.Offset) - } -} - -func TestSMQOffsetIntegration_GetPartitionOffsetInfo(t *testing.T) { - storage := NewInMemoryOffsetStorage() - integration := NewSMQOffsetIntegration(storage) - partition := createTestPartition() - - // Test empty partition - info, err := integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get partition offset info: %v", err) - } - - if info.EarliestOffset != 0 { - t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset) - } - - if info.LatestOffset != -1 { - t.Errorf("Expected latest offset -1 for empty partition, got %d", info.LatestOffset) - } - - if info.HighWaterMark != 0 { - t.Errorf("Expected high water mark 0, got %d", info.HighWaterMark) - } - - if info.RecordCount != 0 { - t.Errorf("Expected record count 0, got %d", info.RecordCount) - } - - // Publish records - records := []PublishRecordRequest{ - {Key: []byte("key1"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key2"), Value: &schema_pb.RecordValue{}}, - {Key: []byte("key3"), Value: &schema_pb.RecordValue{}}, - } - integration.PublishRecordBatch("test-namespace", "test-topic", partition, records) - - // Create subscription - integration.CreateSubscription("test-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - - // Test with data - info, err = integration.GetPartitionOffsetInfo("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get partition offset info with data: %v", err) - } - - if info.EarliestOffset != 0 { - t.Errorf("Expected earliest offset 0, got %d", info.EarliestOffset) - } - - if info.LatestOffset != 2 { - t.Errorf("Expected latest offset 2, got %d", info.LatestOffset) - } - - if info.HighWaterMark != 3 { - t.Errorf("Expected high water mark 3, got %d", info.HighWaterMark) - } - - if info.RecordCount != 3 { - t.Errorf("Expected record count 3, got %d", info.RecordCount) - } - - if info.ActiveSubscriptions != 1 { - t.Errorf("Expected 1 active subscription, got %d", info.ActiveSubscriptions) - } -} diff --git a/weed/mq/offset/manager.go b/weed/mq/offset/manager.go deleted file mode 100644 index 53388d82f..000000000 --- a/weed/mq/offset/manager.go +++ /dev/null @@ -1,385 +0,0 @@ -package offset - -import ( - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// PartitionOffsetManager manages sequential offset assignment for a single partition -type PartitionOffsetManager struct { - mu sync.RWMutex - namespace string - topicName string - partition *schema_pb.Partition - nextOffset int64 - - // Checkpointing for recovery - lastCheckpoint int64 - lastCheckpointedOffset int64 - storage OffsetStorage - - // Background checkpointing - stopCheckpoint chan struct{} -} - -// OffsetStorage interface for persisting offset state -type OffsetStorage interface { - // SaveCheckpoint persists the current offset state for recovery - // Takes topic information along with partition to determine the correct storage location - SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, offset int64) error - - // LoadCheckpoint retrieves the last saved offset state - LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) - - // GetHighestOffset scans storage to find the highest assigned offset - GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) -} - -// NewPartitionOffsetManager creates a new offset manager for a partition -func NewPartitionOffsetManager(namespace, topicName string, partition *schema_pb.Partition, storage OffsetStorage) (*PartitionOffsetManager, error) { - manager := &PartitionOffsetManager{ - namespace: namespace, - topicName: topicName, - partition: partition, - storage: storage, - stopCheckpoint: make(chan struct{}), - } - - // Recover offset state - if err := manager.recover(); err != nil { - return nil, fmt.Errorf("failed to recover offset state: %w", err) - } - - // Start background checkpoint goroutine - go manager.runPeriodicCheckpoint() - - return manager, nil -} - -// Close stops the background checkpoint goroutine and performs a final checkpoint -func (m *PartitionOffsetManager) Close() error { - close(m.stopCheckpoint) - - // Perform final checkpoint - m.mu.RLock() - currentOffset := m.nextOffset - 1 // Last assigned offset - lastCheckpointed := m.lastCheckpointedOffset - m.mu.RUnlock() - - if currentOffset >= 0 && currentOffset > lastCheckpointed { - return m.storage.SaveCheckpoint(m.namespace, m.topicName, m.partition, currentOffset) - } - return nil -} - -// AssignOffset assigns the next sequential offset -func (m *PartitionOffsetManager) AssignOffset() int64 { - m.mu.Lock() - offset := m.nextOffset - m.nextOffset++ - m.mu.Unlock() - - return offset -} - -// AssignOffsets assigns a batch of sequential offsets -func (m *PartitionOffsetManager) AssignOffsets(count int64) (baseOffset int64, lastOffset int64) { - m.mu.Lock() - baseOffset = m.nextOffset - lastOffset = m.nextOffset + count - 1 - m.nextOffset += count - m.mu.Unlock() - - return baseOffset, lastOffset -} - -// GetNextOffset returns the next offset that will be assigned -func (m *PartitionOffsetManager) GetNextOffset() int64 { - m.mu.RLock() - defer m.mu.RUnlock() - return m.nextOffset -} - -// GetHighWaterMark returns the high water mark (next offset) -func (m *PartitionOffsetManager) GetHighWaterMark() int64 { - return m.GetNextOffset() -} - -// recover restores offset state from storage -func (m *PartitionOffsetManager) recover() error { - var checkpointOffset int64 = -1 - var highestOffset int64 = -1 - - // Try to load checkpoint - if offset, err := m.storage.LoadCheckpoint(m.namespace, m.topicName, m.partition); err == nil && offset >= 0 { - checkpointOffset = offset - } - - // Try to scan storage for highest offset - if offset, err := m.storage.GetHighestOffset(m.namespace, m.topicName, m.partition); err == nil && offset >= 0 { - highestOffset = offset - } - - // Use the higher of checkpoint or storage scan - if checkpointOffset >= 0 && highestOffset >= 0 { - if highestOffset > checkpointOffset { - m.nextOffset = highestOffset + 1 - m.lastCheckpoint = highestOffset - m.lastCheckpointedOffset = highestOffset - } else { - m.nextOffset = checkpointOffset + 1 - m.lastCheckpoint = checkpointOffset - m.lastCheckpointedOffset = checkpointOffset - } - } else if checkpointOffset >= 0 { - m.nextOffset = checkpointOffset + 1 - m.lastCheckpoint = checkpointOffset - m.lastCheckpointedOffset = checkpointOffset - } else if highestOffset >= 0 { - m.nextOffset = highestOffset + 1 - m.lastCheckpoint = highestOffset - m.lastCheckpointedOffset = highestOffset - } else { - // No data exists, start from 0 - m.nextOffset = 0 - m.lastCheckpoint = -1 - m.lastCheckpointedOffset = -1 - } - - return nil -} - -// runPeriodicCheckpoint runs in the background and checkpoints every 2 seconds if the offset changed -func (m *PartitionOffsetManager) runPeriodicCheckpoint() { - ticker := time.NewTicker(2 * time.Second) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - m.performCheckpointIfChanged() - case <-m.stopCheckpoint: - return - } - } -} - -// performCheckpointIfChanged saves checkpoint only if offset has changed since last checkpoint -func (m *PartitionOffsetManager) performCheckpointIfChanged() { - m.mu.RLock() - currentOffset := m.nextOffset - 1 // Last assigned offset - lastCheckpointed := m.lastCheckpointedOffset - m.mu.RUnlock() - - // Skip if no messages have been assigned, or no change since last checkpoint - if currentOffset < 0 || currentOffset == lastCheckpointed { - return - } - - // Perform checkpoint - if err := m.storage.SaveCheckpoint(m.namespace, m.topicName, m.partition, currentOffset); err != nil { - // Log error but don't fail - checkpointing is for optimization - fmt.Printf("Failed to checkpoint offset %d for %s/%s: %v\n", currentOffset, m.namespace, m.topicName, err) - return - } - - // Update last checkpointed offset - m.mu.Lock() - m.lastCheckpointedOffset = currentOffset - m.lastCheckpoint = currentOffset - m.mu.Unlock() -} - -// PartitionOffsetRegistry manages offset managers for multiple partitions -type PartitionOffsetRegistry struct { - mu sync.RWMutex - managers map[string]*PartitionOffsetManager - storage OffsetStorage -} - -// NewPartitionOffsetRegistry creates a new registry -func NewPartitionOffsetRegistry(storage OffsetStorage) *PartitionOffsetRegistry { - return &PartitionOffsetRegistry{ - managers: make(map[string]*PartitionOffsetManager), - storage: storage, - } -} - -// GetManager returns the offset manager for a partition, creating it if needed -func (r *PartitionOffsetRegistry) GetManager(namespace, topicName string, partition *schema_pb.Partition) (*PartitionOffsetManager, error) { - // CRITICAL FIX: Use TopicPartitionKey to ensure each topic has its own offset manager - key := TopicPartitionKey(namespace, topicName, partition) - - r.mu.RLock() - manager, exists := r.managers[key] - r.mu.RUnlock() - - if exists { - return manager, nil - } - - // Create new manager - r.mu.Lock() - defer r.mu.Unlock() - - // Double-check after acquiring write lock - if manager, exists := r.managers[key]; exists { - return manager, nil - } - - manager, err := NewPartitionOffsetManager(namespace, topicName, partition, r.storage) - if err != nil { - return nil, err - } - - r.managers[key] = manager - return manager, nil -} - -// AssignOffset assigns an offset for the given partition -func (r *PartitionOffsetRegistry) AssignOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - manager, err := r.GetManager(namespace, topicName, partition) - if err != nil { - return 0, err - } - - assignedOffset := manager.AssignOffset() - - return assignedOffset, nil -} - -// AssignOffsets assigns a batch of offsets for the given partition -func (r *PartitionOffsetRegistry) AssignOffsets(namespace, topicName string, partition *schema_pb.Partition, count int64) (baseOffset, lastOffset int64, err error) { - manager, err := r.GetManager(namespace, topicName, partition) - if err != nil { - return 0, 0, err - } - - baseOffset, lastOffset = manager.AssignOffsets(count) - return baseOffset, lastOffset, nil -} - -// GetHighWaterMark returns the high water mark for a partition -func (r *PartitionOffsetRegistry) GetHighWaterMark(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - manager, err := r.GetManager(namespace, topicName, partition) - if err != nil { - return 0, err - } - - return manager.GetHighWaterMark(), nil -} - -// Close stops all partition managers and performs final checkpoints -func (r *PartitionOffsetRegistry) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - - var firstErr error - for _, manager := range r.managers { - if err := manager.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - - return firstErr -} - -// TopicPartitionKey generates a unique key for a topic-partition combination -// This is the canonical key format used across the offset management system -func TopicPartitionKey(namespace, topicName string, partition *schema_pb.Partition) string { - return fmt.Sprintf("%s/%s/ring:%d:range:%d-%d", - namespace, topicName, - partition.RingSize, partition.RangeStart, partition.RangeStop) -} - -// PartitionKey generates a unique key for a partition (without topic context) -// Note: UnixTimeNs is intentionally excluded from the key because it represents -// partition creation time, not partition identity. Using it would cause offset -// tracking to reset whenever a partition is recreated or looked up again. -// DEPRECATED: Use TopicPartitionKey for production code to avoid key collisions -func PartitionKey(partition *schema_pb.Partition) string { - return fmt.Sprintf("ring:%d:range:%d-%d", - partition.RingSize, partition.RangeStart, partition.RangeStop) -} - -// partitionKey is the internal lowercase version for backward compatibility within this package -func partitionKey(partition *schema_pb.Partition) string { - return PartitionKey(partition) -} - -// OffsetAssignment represents an assigned offset with metadata -type OffsetAssignment struct { - Offset int64 - Timestamp int64 - Partition *schema_pb.Partition -} - -// BatchOffsetAssignment represents a batch of assigned offsets -type BatchOffsetAssignment struct { - BaseOffset int64 - LastOffset int64 - Count int64 - Timestamp int64 - Partition *schema_pb.Partition -} - -// AssignmentResult contains the result of offset assignment -type AssignmentResult struct { - Assignment *OffsetAssignment - Batch *BatchOffsetAssignment - Error error -} - -// OffsetAssigner provides high-level offset assignment operations -type OffsetAssigner struct { - registry *PartitionOffsetRegistry -} - -// NewOffsetAssigner creates a new offset assigner -func NewOffsetAssigner(storage OffsetStorage) *OffsetAssigner { - return &OffsetAssigner{ - registry: NewPartitionOffsetRegistry(storage), - } -} - -// AssignSingleOffset assigns a single offset with timestamp -func (a *OffsetAssigner) AssignSingleOffset(namespace, topicName string, partition *schema_pb.Partition) *AssignmentResult { - offset, err := a.registry.AssignOffset(namespace, topicName, partition) - if err != nil { - return &AssignmentResult{Error: err} - } - - return &AssignmentResult{ - Assignment: &OffsetAssignment{ - Offset: offset, - Timestamp: time.Now().UnixNano(), - Partition: partition, - }, - } -} - -// AssignBatchOffsets assigns a batch of offsets with timestamp -func (a *OffsetAssigner) AssignBatchOffsets(namespace, topicName string, partition *schema_pb.Partition, count int64) *AssignmentResult { - baseOffset, lastOffset, err := a.registry.AssignOffsets(namespace, topicName, partition, count) - if err != nil { - return &AssignmentResult{Error: err} - } - - return &AssignmentResult{ - Batch: &BatchOffsetAssignment{ - BaseOffset: baseOffset, - LastOffset: lastOffset, - Count: count, - Timestamp: time.Now().UnixNano(), - Partition: partition, - }, - } -} - -// GetHighWaterMark returns the high water mark for a partition -func (a *OffsetAssigner) GetHighWaterMark(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - return a.registry.GetHighWaterMark(namespace, topicName, partition) -} diff --git a/weed/mq/offset/manager_test.go b/weed/mq/offset/manager_test.go deleted file mode 100644 index 0db301e84..000000000 --- a/weed/mq/offset/manager_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package offset - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func createTestPartition() *schema_pb.Partition { - return &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } -} - -func TestPartitionOffsetManager_BasicAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - partition := createTestPartition() - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - t.Fatalf("Failed to create offset manager: %v", err) - } - - // Test sequential offset assignment - for i := int64(0); i < 10; i++ { - offset := manager.AssignOffset() - if offset != i { - t.Errorf("Expected offset %d, got %d", i, offset) - } - } - - // Test high water mark - hwm := manager.GetHighWaterMark() - if hwm != 10 { - t.Errorf("Expected high water mark 10, got %d", hwm) - } -} - -func TestPartitionOffsetManager_BatchAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - partition := createTestPartition() - - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - t.Fatalf("Failed to create offset manager: %v", err) - } - - // Assign batch of 5 offsets - baseOffset, lastOffset := manager.AssignOffsets(5) - if baseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", baseOffset) - } - if lastOffset != 4 { - t.Errorf("Expected last offset 4, got %d", lastOffset) - } - - // Assign another batch - baseOffset, lastOffset = manager.AssignOffsets(3) - if baseOffset != 5 { - t.Errorf("Expected base offset 5, got %d", baseOffset) - } - if lastOffset != 7 { - t.Errorf("Expected last offset 7, got %d", lastOffset) - } - - // Check high water mark - hwm := manager.GetHighWaterMark() - if hwm != 8 { - t.Errorf("Expected high water mark 8, got %d", hwm) - } -} - -func TestPartitionOffsetManager_Recovery(t *testing.T) { - storage := NewInMemoryOffsetStorage() - partition := createTestPartition() - - // Create manager and assign some offsets - manager1, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - t.Fatalf("Failed to create offset manager: %v", err) - } - - // Assign offsets and simulate records - for i := 0; i < 150; i++ { // More than checkpoint interval - offset := manager1.AssignOffset() - storage.AddRecord("test-namespace", "test-topic", partition, offset) - } - - // Wait for checkpoint to complete - time.Sleep(100 * time.Millisecond) - - // Create new manager (simulates restart) - manager2, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - t.Fatalf("Failed to create offset manager after recovery: %v", err) - } - - // Next offset should continue from checkpoint + 1 - // With checkpoint interval 100, checkpoint happens at offset 100 - // So recovery should start from 101, but we assigned 150 offsets (0-149) - // The checkpoint should be at 100, so next offset should be 101 - // But since we have records up to 149, it should recover from storage scan - nextOffset := manager2.AssignOffset() - if nextOffset != 150 { - t.Errorf("Expected next offset 150 after recovery, got %d", nextOffset) - } -} - -func TestPartitionOffsetManager_RecoveryFromStorage(t *testing.T) { - storage := NewInMemoryOffsetStorage() - partition := createTestPartition() - - // Simulate existing records in storage without checkpoint - for i := int64(0); i < 50; i++ { - storage.AddRecord("test-namespace", "test-topic", partition, i) - } - - // Create manager - should recover from storage scan - manager, err := NewPartitionOffsetManager("test-namespace", "test-topic", partition, storage) - if err != nil { - t.Fatalf("Failed to create offset manager: %v", err) - } - - // Next offset should be 50 - nextOffset := manager.AssignOffset() - if nextOffset != 50 { - t.Errorf("Expected next offset 50 after storage recovery, got %d", nextOffset) - } -} - -func TestPartitionOffsetRegistry_MultiplePartitions(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - - // Create different partitions - partition1 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } - - partition2 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 32, - RangeStop: 63, - UnixTimeNs: time.Now().UnixNano(), - } - - // Assign offsets to different partitions - offset1, err := registry.AssignOffset("test-namespace", "test-topic", partition1) - if err != nil { - t.Fatalf("Failed to assign offset to partition1: %v", err) - } - if offset1 != 0 { - t.Errorf("Expected offset 0 for partition1, got %d", offset1) - } - - offset2, err := registry.AssignOffset("test-namespace", "test-topic", partition2) - if err != nil { - t.Fatalf("Failed to assign offset to partition2: %v", err) - } - if offset2 != 0 { - t.Errorf("Expected offset 0 for partition2, got %d", offset2) - } - - // Assign more offsets to partition1 - offset1_2, err := registry.AssignOffset("test-namespace", "test-topic", partition1) - if err != nil { - t.Fatalf("Failed to assign second offset to partition1: %v", err) - } - if offset1_2 != 1 { - t.Errorf("Expected offset 1 for partition1, got %d", offset1_2) - } - - // Partition2 should still be at 0 for next assignment - offset2_2, err := registry.AssignOffset("test-namespace", "test-topic", partition2) - if err != nil { - t.Fatalf("Failed to assign second offset to partition2: %v", err) - } - if offset2_2 != 1 { - t.Errorf("Expected offset 1 for partition2, got %d", offset2_2) - } -} - -func TestPartitionOffsetRegistry_BatchAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - partition := createTestPartition() - - // Assign batch of offsets - baseOffset, lastOffset, err := registry.AssignOffsets("test-namespace", "test-topic", partition, 10) - if err != nil { - t.Fatalf("Failed to assign batch offsets: %v", err) - } - - if baseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", baseOffset) - } - if lastOffset != 9 { - t.Errorf("Expected last offset 9, got %d", lastOffset) - } - - // Get high water mark - hwm, err := registry.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark: %v", err) - } - if hwm != 10 { - t.Errorf("Expected high water mark 10, got %d", hwm) - } -} - -func TestOffsetAssigner_SingleAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - assigner := NewOffsetAssigner(storage) - partition := createTestPartition() - - // Assign single offset - result := assigner.AssignSingleOffset("test-namespace", "test-topic", partition) - if result.Error != nil { - t.Fatalf("Failed to assign single offset: %v", result.Error) - } - - if result.Assignment == nil { - t.Fatal("Assignment result is nil") - } - - if result.Assignment.Offset != 0 { - t.Errorf("Expected offset 0, got %d", result.Assignment.Offset) - } - - if result.Assignment.Partition != partition { - t.Error("Partition mismatch in assignment") - } - - if result.Assignment.Timestamp <= 0 { - t.Error("Timestamp should be set") - } -} - -func TestOffsetAssigner_BatchAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - assigner := NewOffsetAssigner(storage) - partition := createTestPartition() - - // Assign batch of offsets - result := assigner.AssignBatchOffsets("test-namespace", "test-topic", partition, 5) - if result.Error != nil { - t.Fatalf("Failed to assign batch offsets: %v", result.Error) - } - - if result.Batch == nil { - t.Fatal("Batch result is nil") - } - - if result.Batch.BaseOffset != 0 { - t.Errorf("Expected base offset 0, got %d", result.Batch.BaseOffset) - } - - if result.Batch.LastOffset != 4 { - t.Errorf("Expected last offset 4, got %d", result.Batch.LastOffset) - } - - if result.Batch.Count != 5 { - t.Errorf("Expected count 5, got %d", result.Batch.Count) - } - - if result.Batch.Timestamp <= 0 { - t.Error("Timestamp should be set") - } -} - -func TestOffsetAssigner_HighWaterMark(t *testing.T) { - storage := NewInMemoryOffsetStorage() - assigner := NewOffsetAssigner(storage) - partition := createTestPartition() - - // Initially should be 0 - hwm, err := assigner.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get initial high water mark: %v", err) - } - if hwm != 0 { - t.Errorf("Expected initial high water mark 0, got %d", hwm) - } - - // Assign some offsets - assigner.AssignBatchOffsets("test-namespace", "test-topic", partition, 10) - - // High water mark should be updated - hwm, err = assigner.GetHighWaterMark("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get high water mark after assignment: %v", err) - } - if hwm != 10 { - t.Errorf("Expected high water mark 10, got %d", hwm) - } -} - -func TestPartitionKey(t *testing.T) { - partition1 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: 1234567890, - } - - partition2 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: 1234567890, - } - - partition3 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 32, - RangeStop: 63, - UnixTimeNs: 1234567890, - } - - key1 := partitionKey(partition1) - key2 := partitionKey(partition2) - key3 := partitionKey(partition3) - - // Same partitions should have same key - if key1 != key2 { - t.Errorf("Same partitions should have same key: %s vs %s", key1, key2) - } - - // Different partitions should have different keys - if key1 == key3 { - t.Errorf("Different partitions should have different keys: %s vs %s", key1, key3) - } -} - -func TestConcurrentOffsetAssignment(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - partition := createTestPartition() - - const numGoroutines = 10 - const offsetsPerGoroutine = 100 - - results := make(chan int64, numGoroutines*offsetsPerGoroutine) - - // Start concurrent offset assignments - for i := 0; i < numGoroutines; i++ { - go func() { - for j := 0; j < offsetsPerGoroutine; j++ { - offset, err := registry.AssignOffset("test-namespace", "test-topic", partition) - if err != nil { - t.Errorf("Failed to assign offset: %v", err) - return - } - results <- offset - } - }() - } - - // Collect all results - offsets := make(map[int64]bool) - for i := 0; i < numGoroutines*offsetsPerGoroutine; i++ { - offset := <-results - if offsets[offset] { - t.Errorf("Duplicate offset assigned: %d", offset) - } - offsets[offset] = true - } - - // Verify we got all expected offsets - expectedCount := numGoroutines * offsetsPerGoroutine - if len(offsets) != expectedCount { - t.Errorf("Expected %d unique offsets, got %d", expectedCount, len(offsets)) - } - - // Verify offsets are in expected range - for offset := range offsets { - if offset < 0 || offset >= int64(expectedCount) { - t.Errorf("Offset %d is out of expected range [0, %d)", offset, expectedCount) - } - } -} diff --git a/weed/mq/offset/memory_storage_test.go b/weed/mq/offset/memory_storage_test.go deleted file mode 100644 index 4434e1eb6..000000000 --- a/weed/mq/offset/memory_storage_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package offset - -import ( - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// recordEntry holds a record with timestamp for TTL cleanup -type recordEntry struct { - exists bool - timestamp time.Time -} - -// InMemoryOffsetStorage provides an in-memory implementation of OffsetStorage for testing ONLY -// WARNING: This should NEVER be used in production - use FilerOffsetStorage or SQLOffsetStorage instead -type InMemoryOffsetStorage struct { - mu sync.RWMutex - checkpoints map[string]int64 // partition key -> offset - records map[string]map[int64]*recordEntry // partition key -> offset -> entry with timestamp - - // Memory leak protection - maxRecordsPerPartition int // Maximum records to keep per partition - recordTTL time.Duration // TTL for record entries - lastCleanup time.Time // Last cleanup time - cleanupInterval time.Duration // How often to run cleanup -} - -// NewInMemoryOffsetStorage creates a new in-memory storage with memory leak protection -// FOR TESTING ONLY - do not use in production -func NewInMemoryOffsetStorage() *InMemoryOffsetStorage { - return &InMemoryOffsetStorage{ - checkpoints: make(map[string]int64), - records: make(map[string]map[int64]*recordEntry), - maxRecordsPerPartition: 10000, // Limit to 10K records per partition - recordTTL: 1 * time.Hour, // Records expire after 1 hour - cleanupInterval: 5 * time.Minute, // Cleanup every 5 minutes - lastCleanup: time.Now(), - } -} - -// SaveCheckpoint saves the checkpoint for a partition -func (s *InMemoryOffsetStorage) SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, offset int64) error { - s.mu.Lock() - defer s.mu.Unlock() - - // Use TopicPartitionKey for consistency with other storage implementations - key := TopicPartitionKey(namespace, topicName, partition) - s.checkpoints[key] = offset - return nil -} - -// LoadCheckpoint loads the checkpoint for a partition -func (s *InMemoryOffsetStorage) LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - // Use TopicPartitionKey to match SaveCheckpoint - key := TopicPartitionKey(namespace, topicName, partition) - offset, exists := s.checkpoints[key] - if !exists { - return -1, fmt.Errorf("no checkpoint found") - } - - return offset, nil -} - -// GetHighestOffset finds the highest offset in storage for a partition -func (s *InMemoryOffsetStorage) GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - // Use TopicPartitionKey to match SaveCheckpoint - key := TopicPartitionKey(namespace, topicName, partition) - offsets, exists := s.records[key] - if !exists || len(offsets) == 0 { - return -1, fmt.Errorf("no records found") - } - - var highest int64 = -1 - for offset, entry := range offsets { - if entry.exists && offset > highest { - highest = offset - } - } - - return highest, nil -} - -// AddRecord simulates storing a record with an offset (for testing) -func (s *InMemoryOffsetStorage) AddRecord(namespace, topicName string, partition *schema_pb.Partition, offset int64) { - s.mu.Lock() - defer s.mu.Unlock() - - // Use TopicPartitionKey to match GetHighestOffset - key := TopicPartitionKey(namespace, topicName, partition) - if s.records[key] == nil { - s.records[key] = make(map[int64]*recordEntry) - } - - // Add record with current timestamp - s.records[key][offset] = &recordEntry{ - exists: true, - timestamp: time.Now(), - } - - // Trigger cleanup if needed (memory leak protection) - s.cleanupIfNeeded() -} - -// GetRecordCount returns the number of records for a partition (for testing) -func (s *InMemoryOffsetStorage) GetRecordCount(namespace, topicName string, partition *schema_pb.Partition) int { - s.mu.RLock() - defer s.mu.RUnlock() - - // Use TopicPartitionKey to match GetHighestOffset - key := TopicPartitionKey(namespace, topicName, partition) - if offsets, exists := s.records[key]; exists { - count := 0 - for _, entry := range offsets { - if entry.exists { - count++ - } - } - return count - } - return 0 -} - -// Clear removes all data (for testing) -func (s *InMemoryOffsetStorage) Clear() { - s.mu.Lock() - defer s.mu.Unlock() - - s.checkpoints = make(map[string]int64) - s.records = make(map[string]map[int64]*recordEntry) - s.lastCleanup = time.Now() -} - -// Reset removes all data (implements resettable interface for shutdown) -func (s *InMemoryOffsetStorage) Reset() error { - s.Clear() - return nil -} - -// cleanupIfNeeded performs memory leak protection cleanup -// This method assumes the caller already holds the write lock -func (s *InMemoryOffsetStorage) cleanupIfNeeded() { - now := time.Now() - - // Only cleanup if enough time has passed - if now.Sub(s.lastCleanup) < s.cleanupInterval { - return - } - - s.lastCleanup = now - cutoff := now.Add(-s.recordTTL) - - // Clean up expired records and enforce size limits - for partitionKey, offsets := range s.records { - // Remove expired records - for offset, entry := range offsets { - if entry.timestamp.Before(cutoff) { - delete(offsets, offset) - } - } - - // Enforce size limit per partition - if len(offsets) > s.maxRecordsPerPartition { - // Keep only the most recent records - type offsetTime struct { - offset int64 - time time.Time - } - - var entries []offsetTime - for offset, entry := range offsets { - entries = append(entries, offsetTime{offset: offset, time: entry.timestamp}) - } - - // Sort by timestamp (newest first) - for i := 0; i < len(entries)-1; i++ { - for j := i + 1; j < len(entries); j++ { - if entries[i].time.Before(entries[j].time) { - entries[i], entries[j] = entries[j], entries[i] - } - } - } - - // Keep only the newest maxRecordsPerPartition entries - newOffsets := make(map[int64]*recordEntry) - for i := 0; i < s.maxRecordsPerPartition && i < len(entries); i++ { - offset := entries[i].offset - newOffsets[offset] = offsets[offset] - } - - s.records[partitionKey] = newOffsets - } - - // Remove empty partition maps - if len(offsets) == 0 { - delete(s.records, partitionKey) - } - } -} - -// GetMemoryStats returns memory usage statistics for monitoring -func (s *InMemoryOffsetStorage) GetMemoryStats() map[string]interface{} { - s.mu.RLock() - defer s.mu.RUnlock() - - totalRecords := 0 - partitionCount := len(s.records) - - for _, offsets := range s.records { - totalRecords += len(offsets) - } - - return map[string]interface{}{ - "total_partitions": partitionCount, - "total_records": totalRecords, - "max_records_per_partition": s.maxRecordsPerPartition, - "record_ttl_hours": s.recordTTL.Hours(), - "last_cleanup": s.lastCleanup, - } -} diff --git a/weed/mq/offset/migration.go b/weed/mq/offset/migration.go deleted file mode 100644 index 106129206..000000000 --- a/weed/mq/offset/migration.go +++ /dev/null @@ -1,302 +0,0 @@ -package offset - -import ( - "database/sql" - "fmt" - "time" -) - -// MigrationVersion represents a database migration version -type MigrationVersion struct { - Version int - Description string - SQL string -} - -// GetMigrations returns all available migrations for offset storage -func GetMigrations() []MigrationVersion { - return []MigrationVersion{ - { - Version: 1, - Description: "Create initial offset storage tables", - SQL: ` - -- Partition offset checkpoints table - -- TODO: Add _index as computed column when supported by database - CREATE TABLE IF NOT EXISTS partition_offset_checkpoints ( - partition_key TEXT PRIMARY KEY, - ring_size INTEGER NOT NULL, - range_start INTEGER NOT NULL, - range_stop INTEGER NOT NULL, - unix_time_ns INTEGER NOT NULL, - checkpoint_offset INTEGER NOT NULL, - updated_at INTEGER NOT NULL - ); - - -- Offset mappings table for detailed tracking - -- TODO: Add _index as computed column when supported by database - CREATE TABLE IF NOT EXISTS offset_mappings ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - partition_key TEXT NOT NULL, - kafka_offset INTEGER NOT NULL, - smq_timestamp INTEGER NOT NULL, - message_size INTEGER NOT NULL, - created_at INTEGER NOT NULL, - UNIQUE(partition_key, kafka_offset) - ); - - -- Schema migrations tracking table - CREATE TABLE IF NOT EXISTS schema_migrations ( - version INTEGER PRIMARY KEY, - description TEXT NOT NULL, - applied_at INTEGER NOT NULL - ); - `, - }, - { - Version: 2, - Description: "Add indexes for performance optimization", - SQL: ` - -- Indexes for performance - CREATE INDEX IF NOT EXISTS idx_partition_offset_checkpoints_partition - ON partition_offset_checkpoints(partition_key); - - CREATE INDEX IF NOT EXISTS idx_offset_mappings_partition_offset - ON offset_mappings(partition_key, kafka_offset); - - CREATE INDEX IF NOT EXISTS idx_offset_mappings_timestamp - ON offset_mappings(partition_key, smq_timestamp); - - CREATE INDEX IF NOT EXISTS idx_offset_mappings_created_at - ON offset_mappings(created_at); - `, - }, - { - Version: 3, - Description: "Add partition metadata table for enhanced tracking", - SQL: ` - -- Partition metadata table - CREATE TABLE IF NOT EXISTS partition_metadata ( - partition_key TEXT PRIMARY KEY, - ring_size INTEGER NOT NULL, - range_start INTEGER NOT NULL, - range_stop INTEGER NOT NULL, - unix_time_ns INTEGER NOT NULL, - created_at INTEGER NOT NULL, - last_activity_at INTEGER NOT NULL, - record_count INTEGER DEFAULT 0, - total_size INTEGER DEFAULT 0 - ); - - -- Index for partition metadata - CREATE INDEX IF NOT EXISTS idx_partition_metadata_activity - ON partition_metadata(last_activity_at); - `, - }, - } -} - -// MigrationManager handles database schema migrations -type MigrationManager struct { - db *sql.DB -} - -// NewMigrationManager creates a new migration manager -func NewMigrationManager(db *sql.DB) *MigrationManager { - return &MigrationManager{db: db} -} - -// GetCurrentVersion returns the current schema version -func (m *MigrationManager) GetCurrentVersion() (int, error) { - // First, ensure the migrations table exists - _, err := m.db.Exec(` - CREATE TABLE IF NOT EXISTS schema_migrations ( - version INTEGER PRIMARY KEY, - description TEXT NOT NULL, - applied_at INTEGER NOT NULL - ) - `) - if err != nil { - return 0, fmt.Errorf("failed to create migrations table: %w", err) - } - - var version sql.NullInt64 - err = m.db.QueryRow("SELECT MAX(version) FROM schema_migrations").Scan(&version) - if err != nil { - return 0, fmt.Errorf("failed to get current version: %w", err) - } - - if !version.Valid { - return 0, nil // No migrations applied yet - } - - return int(version.Int64), nil -} - -// ApplyMigrations applies all pending migrations -func (m *MigrationManager) ApplyMigrations() error { - currentVersion, err := m.GetCurrentVersion() - if err != nil { - return fmt.Errorf("failed to get current version: %w", err) - } - - migrations := GetMigrations() - - for _, migration := range migrations { - if migration.Version <= currentVersion { - continue // Already applied - } - - fmt.Printf("Applying migration %d: %s\n", migration.Version, migration.Description) - - // Begin transaction - tx, err := m.db.Begin() - if err != nil { - return fmt.Errorf("failed to begin transaction for migration %d: %w", migration.Version, err) - } - - // Execute migration SQL - _, err = tx.Exec(migration.SQL) - if err != nil { - tx.Rollback() - return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err) - } - - // Record migration as applied - _, err = tx.Exec( - "INSERT INTO schema_migrations (version, description, applied_at) VALUES (?, ?, ?)", - migration.Version, - migration.Description, - getCurrentTimestamp(), - ) - if err != nil { - tx.Rollback() - return fmt.Errorf("failed to record migration %d: %w", migration.Version, err) - } - - // Commit transaction - err = tx.Commit() - if err != nil { - return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err) - } - - fmt.Printf("Successfully applied migration %d\n", migration.Version) - } - - return nil -} - -// RollbackMigration rolls back a specific migration (if supported) -func (m *MigrationManager) RollbackMigration(version int) error { - // TODO: Implement rollback functionality - // ASSUMPTION: For now, rollbacks are not supported as they require careful planning - return fmt.Errorf("migration rollbacks not implemented - manual intervention required") -} - -// GetAppliedMigrations returns a list of all applied migrations -func (m *MigrationManager) GetAppliedMigrations() ([]AppliedMigration, error) { - rows, err := m.db.Query(` - SELECT version, description, applied_at - FROM schema_migrations - ORDER BY version - `) - if err != nil { - return nil, fmt.Errorf("failed to query applied migrations: %w", err) - } - defer rows.Close() - - var migrations []AppliedMigration - for rows.Next() { - var migration AppliedMigration - err := rows.Scan(&migration.Version, &migration.Description, &migration.AppliedAt) - if err != nil { - return nil, fmt.Errorf("failed to scan migration: %w", err) - } - migrations = append(migrations, migration) - } - - return migrations, nil -} - -// ValidateSchema validates that the database schema is up to date -func (m *MigrationManager) ValidateSchema() error { - currentVersion, err := m.GetCurrentVersion() - if err != nil { - return fmt.Errorf("failed to get current version: %w", err) - } - - migrations := GetMigrations() - if len(migrations) == 0 { - return nil - } - - latestVersion := migrations[len(migrations)-1].Version - if currentVersion < latestVersion { - return fmt.Errorf("schema is outdated: current version %d, latest version %d", currentVersion, latestVersion) - } - - return nil -} - -// AppliedMigration represents a migration that has been applied -type AppliedMigration struct { - Version int - Description string - AppliedAt int64 -} - -// getCurrentTimestamp returns the current timestamp in nanoseconds -func getCurrentTimestamp() int64 { - return time.Now().UnixNano() -} - -// CreateDatabase creates and initializes a new offset storage database -func CreateDatabase(dbPath string) (*sql.DB, error) { - // TODO: Support different database types (PostgreSQL, MySQL, etc.) - // ASSUMPTION: Using SQLite for now, can be extended for other databases - - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return nil, fmt.Errorf("failed to open database: %w", err) - } - - // Configure SQLite for better performance - pragmas := []string{ - "PRAGMA journal_mode=WAL", // Write-Ahead Logging for better concurrency - "PRAGMA synchronous=NORMAL", // Balance between safety and performance - "PRAGMA cache_size=10000", // Increase cache size - "PRAGMA foreign_keys=ON", // Enable foreign key constraints - "PRAGMA temp_store=MEMORY", // Store temporary tables in memory - } - - for _, pragma := range pragmas { - _, err := db.Exec(pragma) - if err != nil { - db.Close() - return nil, fmt.Errorf("failed to set pragma %s: %w", pragma, err) - } - } - - // Apply migrations - migrationManager := NewMigrationManager(db) - err = migrationManager.ApplyMigrations() - if err != nil { - db.Close() - return nil, fmt.Errorf("failed to apply migrations: %w", err) - } - - return db, nil -} - -// BackupDatabase creates a backup of the offset storage database -func BackupDatabase(sourceDB *sql.DB, backupPath string) error { - // TODO: Implement database backup functionality - // ASSUMPTION: This would use database-specific backup mechanisms - return fmt.Errorf("database backup not implemented yet") -} - -// RestoreDatabase restores a database from a backup -func RestoreDatabase(backupPath, targetPath string) error { - // TODO: Implement database restore functionality - // ASSUMPTION: This would use database-specific restore mechanisms - return fmt.Errorf("database restore not implemented yet") -} diff --git a/weed/mq/offset/sql_storage.go b/weed/mq/offset/sql_storage.go deleted file mode 100644 index c3107e5a4..000000000 --- a/weed/mq/offset/sql_storage.go +++ /dev/null @@ -1,394 +0,0 @@ -package offset - -import ( - "database/sql" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// OffsetEntry represents a mapping between Kafka offset and SMQ timestamp -type OffsetEntry struct { - KafkaOffset int64 - SMQTimestamp int64 - MessageSize int32 -} - -// SQLOffsetStorage implements OffsetStorage using SQL database with _index column -type SQLOffsetStorage struct { - db *sql.DB -} - -// NewSQLOffsetStorage creates a new SQL-based offset storage -func NewSQLOffsetStorage(db *sql.DB) (*SQLOffsetStorage, error) { - storage := &SQLOffsetStorage{db: db} - - // Initialize database schema - if err := storage.initializeSchema(); err != nil { - return nil, fmt.Errorf("failed to initialize schema: %w", err) - } - - return storage, nil -} - -// initializeSchema creates the necessary tables for offset storage -func (s *SQLOffsetStorage) initializeSchema() error { - // TODO: Create offset storage tables with _index as hidden column - // ASSUMPTION: Using SQLite-compatible syntax, may need adaptation for other databases - - queries := []string{ - // Partition offset checkpoints table - // TODO: Add _index as computed column when supported by database - // ASSUMPTION: Using regular columns for now, _index concept preserved for future enhancement - `CREATE TABLE IF NOT EXISTS partition_offset_checkpoints ( - partition_key TEXT PRIMARY KEY, - ring_size INTEGER NOT NULL, - range_start INTEGER NOT NULL, - range_stop INTEGER NOT NULL, - unix_time_ns INTEGER NOT NULL, - checkpoint_offset INTEGER NOT NULL, - updated_at INTEGER NOT NULL - )`, - - // Offset mappings table for detailed tracking - // TODO: Add _index as computed column when supported by database - `CREATE TABLE IF NOT EXISTS offset_mappings ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - partition_key TEXT NOT NULL, - kafka_offset INTEGER NOT NULL, - smq_timestamp INTEGER NOT NULL, - message_size INTEGER NOT NULL, - created_at INTEGER NOT NULL, - UNIQUE(partition_key, kafka_offset) - )`, - - // Indexes for performance - `CREATE INDEX IF NOT EXISTS idx_partition_offset_checkpoints_partition - ON partition_offset_checkpoints(partition_key)`, - - `CREATE INDEX IF NOT EXISTS idx_offset_mappings_partition_offset - ON offset_mappings(partition_key, kafka_offset)`, - - `CREATE INDEX IF NOT EXISTS idx_offset_mappings_timestamp - ON offset_mappings(partition_key, smq_timestamp)`, - } - - for _, query := range queries { - if _, err := s.db.Exec(query); err != nil { - return fmt.Errorf("failed to execute schema query: %w", err) - } - } - - return nil -} - -// SaveCheckpoint saves the checkpoint for a partition -func (s *SQLOffsetStorage) SaveCheckpoint(namespace, topicName string, partition *schema_pb.Partition, offset int64) error { - // Use TopicPartitionKey to ensure each topic has isolated checkpoint storage - partitionKey := TopicPartitionKey(namespace, topicName, partition) - now := time.Now().UnixNano() - - // TODO: Use UPSERT for better performance - // ASSUMPTION: SQLite REPLACE syntax, may need adaptation for other databases - query := ` - REPLACE INTO partition_offset_checkpoints - (partition_key, ring_size, range_start, range_stop, unix_time_ns, checkpoint_offset, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?) - ` - - _, err := s.db.Exec(query, - partitionKey, - partition.RingSize, - partition.RangeStart, - partition.RangeStop, - partition.UnixTimeNs, - offset, - now, - ) - - if err != nil { - return fmt.Errorf("failed to save checkpoint: %w", err) - } - - return nil -} - -// LoadCheckpoint loads the checkpoint for a partition -func (s *SQLOffsetStorage) LoadCheckpoint(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - // Use TopicPartitionKey to match SaveCheckpoint - partitionKey := TopicPartitionKey(namespace, topicName, partition) - - query := ` - SELECT checkpoint_offset - FROM partition_offset_checkpoints - WHERE partition_key = ? - ` - - var checkpointOffset int64 - err := s.db.QueryRow(query, partitionKey).Scan(&checkpointOffset) - - if err == sql.ErrNoRows { - return -1, fmt.Errorf("no checkpoint found") - } - - if err != nil { - return -1, fmt.Errorf("failed to load checkpoint: %w", err) - } - - return checkpointOffset, nil -} - -// GetHighestOffset finds the highest offset in storage for a partition -func (s *SQLOffsetStorage) GetHighestOffset(namespace, topicName string, partition *schema_pb.Partition) (int64, error) { - // Use TopicPartitionKey to match SaveCheckpoint - partitionKey := TopicPartitionKey(namespace, topicName, partition) - - // TODO: Use _index column for efficient querying - // ASSUMPTION: kafka_offset represents the sequential offset we're tracking - query := ` - SELECT MAX(kafka_offset) - FROM offset_mappings - WHERE partition_key = ? - ` - - var highestOffset sql.NullInt64 - err := s.db.QueryRow(query, partitionKey).Scan(&highestOffset) - - if err != nil { - return -1, fmt.Errorf("failed to get highest offset: %w", err) - } - - if !highestOffset.Valid { - return -1, fmt.Errorf("no records found") - } - - return highestOffset.Int64, nil -} - -// SaveOffsetMapping stores an offset mapping (extends OffsetStorage interface) -func (s *SQLOffsetStorage) SaveOffsetMapping(partitionKey string, kafkaOffset, smqTimestamp int64, size int32) error { - now := time.Now().UnixNano() - - // TODO: Handle duplicate key conflicts gracefully - // ASSUMPTION: Using INSERT OR REPLACE for conflict resolution - query := ` - INSERT OR REPLACE INTO offset_mappings - (partition_key, kafka_offset, smq_timestamp, message_size, created_at) - VALUES (?, ?, ?, ?, ?) - ` - - _, err := s.db.Exec(query, partitionKey, kafkaOffset, smqTimestamp, size, now) - if err != nil { - return fmt.Errorf("failed to save offset mapping: %w", err) - } - - return nil -} - -// LoadOffsetMappings retrieves all offset mappings for a partition -func (s *SQLOffsetStorage) LoadOffsetMappings(partitionKey string) ([]OffsetEntry, error) { - // TODO: Add pagination for large result sets - // ASSUMPTION: Loading all mappings for now, should be paginated in production - query := ` - SELECT kafka_offset, smq_timestamp, message_size - FROM offset_mappings - WHERE partition_key = ? - ORDER BY kafka_offset ASC - ` - - rows, err := s.db.Query(query, partitionKey) - if err != nil { - return nil, fmt.Errorf("failed to query offset mappings: %w", err) - } - defer rows.Close() - - var entries []OffsetEntry - for rows.Next() { - var entry OffsetEntry - err := rows.Scan(&entry.KafkaOffset, &entry.SMQTimestamp, &entry.MessageSize) - if err != nil { - return nil, fmt.Errorf("failed to scan offset entry: %w", err) - } - entries = append(entries, entry) - } - - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("error iterating offset mappings: %w", err) - } - - return entries, nil -} - -// GetOffsetMappingsByRange retrieves offset mappings within a specific range -func (s *SQLOffsetStorage) GetOffsetMappingsByRange(partitionKey string, startOffset, endOffset int64) ([]OffsetEntry, error) { - // TODO: Use _index column for efficient range queries - query := ` - SELECT kafka_offset, smq_timestamp, message_size - FROM offset_mappings - WHERE partition_key = ? AND kafka_offset >= ? AND kafka_offset <= ? - ORDER BY kafka_offset ASC - ` - - rows, err := s.db.Query(query, partitionKey, startOffset, endOffset) - if err != nil { - return nil, fmt.Errorf("failed to query offset range: %w", err) - } - defer rows.Close() - - var entries []OffsetEntry - for rows.Next() { - var entry OffsetEntry - err := rows.Scan(&entry.KafkaOffset, &entry.SMQTimestamp, &entry.MessageSize) - if err != nil { - return nil, fmt.Errorf("failed to scan offset entry: %w", err) - } - entries = append(entries, entry) - } - - return entries, nil -} - -// GetPartitionStats returns statistics about a partition's offset usage -func (s *SQLOffsetStorage) GetPartitionStats(partitionKey string) (*PartitionStats, error) { - query := ` - SELECT - COUNT(*) as record_count, - MIN(kafka_offset) as earliest_offset, - MAX(kafka_offset) as latest_offset, - SUM(message_size) as total_size, - MIN(created_at) as first_record_time, - MAX(created_at) as last_record_time - FROM offset_mappings - WHERE partition_key = ? - ` - - var stats PartitionStats - var earliestOffset, latestOffset sql.NullInt64 - var totalSize sql.NullInt64 - var firstRecordTime, lastRecordTime sql.NullInt64 - - err := s.db.QueryRow(query, partitionKey).Scan( - &stats.RecordCount, - &earliestOffset, - &latestOffset, - &totalSize, - &firstRecordTime, - &lastRecordTime, - ) - - if err != nil { - return nil, fmt.Errorf("failed to get partition stats: %w", err) - } - - stats.PartitionKey = partitionKey - - if earliestOffset.Valid { - stats.EarliestOffset = earliestOffset.Int64 - } else { - stats.EarliestOffset = -1 - } - - if latestOffset.Valid { - stats.LatestOffset = latestOffset.Int64 - stats.HighWaterMark = latestOffset.Int64 + 1 - } else { - stats.LatestOffset = -1 - stats.HighWaterMark = 0 - } - - if firstRecordTime.Valid { - stats.FirstRecordTime = firstRecordTime.Int64 - } - - if lastRecordTime.Valid { - stats.LastRecordTime = lastRecordTime.Int64 - } - - if totalSize.Valid { - stats.TotalSize = totalSize.Int64 - } - - return &stats, nil -} - -// CleanupOldMappings removes offset mappings older than the specified time -func (s *SQLOffsetStorage) CleanupOldMappings(olderThanNs int64) error { - // TODO: Add configurable cleanup policies - // ASSUMPTION: Simple time-based cleanup, could be enhanced with retention policies - query := ` - DELETE FROM offset_mappings - WHERE created_at < ? - ` - - result, err := s.db.Exec(query, olderThanNs) - if err != nil { - return fmt.Errorf("failed to cleanup old mappings: %w", err) - } - - rowsAffected, _ := result.RowsAffected() - if rowsAffected > 0 { - // Log cleanup activity - fmt.Printf("Cleaned up %d old offset mappings\n", rowsAffected) - } - - return nil -} - -// Close closes the database connection -func (s *SQLOffsetStorage) Close() error { - if s.db != nil { - return s.db.Close() - } - return nil -} - -// PartitionStats provides statistics about a partition's offset usage -type PartitionStats struct { - PartitionKey string - RecordCount int64 - EarliestOffset int64 - LatestOffset int64 - HighWaterMark int64 - TotalSize int64 - FirstRecordTime int64 - LastRecordTime int64 -} - -// GetAllPartitions returns a list of all partitions with offset data -func (s *SQLOffsetStorage) GetAllPartitions() ([]string, error) { - query := ` - SELECT DISTINCT partition_key - FROM offset_mappings - ORDER BY partition_key - ` - - rows, err := s.db.Query(query) - if err != nil { - return nil, fmt.Errorf("failed to get all partitions: %w", err) - } - defer rows.Close() - - var partitions []string - for rows.Next() { - var partitionKey string - if err := rows.Scan(&partitionKey); err != nil { - return nil, fmt.Errorf("failed to scan partition key: %w", err) - } - partitions = append(partitions, partitionKey) - } - - return partitions, nil -} - -// Vacuum performs database maintenance operations -func (s *SQLOffsetStorage) Vacuum() error { - // TODO: Add database-specific optimization commands - // ASSUMPTION: SQLite VACUUM command, may need adaptation for other databases - _, err := s.db.Exec("VACUUM") - if err != nil { - return fmt.Errorf("failed to vacuum database: %w", err) - } - - return nil -} diff --git a/weed/mq/offset/sql_storage_test.go b/weed/mq/offset/sql_storage_test.go deleted file mode 100644 index 661f317de..000000000 --- a/weed/mq/offset/sql_storage_test.go +++ /dev/null @@ -1,516 +0,0 @@ -package offset - -import ( - "database/sql" - "os" - "testing" - "time" - - _ "github.com/mattn/go-sqlite3" // SQLite driver - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func createTestDB(t *testing.T) *sql.DB { - // Create temporary database file - tmpFile, err := os.CreateTemp("", "offset_test_*.db") - if err != nil { - t.Fatalf("Failed to create temp database file: %v", err) - } - tmpFile.Close() - - // Clean up the file when test completes - t.Cleanup(func() { - os.Remove(tmpFile.Name()) - }) - - db, err := sql.Open("sqlite3", tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to open database: %v", err) - } - - t.Cleanup(func() { - db.Close() - }) - - return db -} - -func createTestPartitionForSQL() *schema_pb.Partition { - return &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: time.Now().UnixNano(), - } -} - -func TestSQLOffsetStorage_InitializeSchema(t *testing.T) { - db := createTestDB(t) - - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - // Verify tables were created - tables := []string{ - "partition_offset_checkpoints", - "offset_mappings", - } - - for _, table := range tables { - var count int - err := db.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?", table).Scan(&count) - if err != nil { - t.Fatalf("Failed to check table %s: %v", table, err) - } - - if count != 1 { - t.Errorf("Table %s was not created", table) - } - } -} - -func TestSQLOffsetStorage_SaveLoadCheckpoint(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - - // Test saving checkpoint - err = storage.SaveCheckpoint("test-namespace", "test-topic", partition, 100) - if err != nil { - t.Fatalf("Failed to save checkpoint: %v", err) - } - - // Test loading checkpoint - checkpoint, err := storage.LoadCheckpoint("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to load checkpoint: %v", err) - } - - if checkpoint != 100 { - t.Errorf("Expected checkpoint 100, got %d", checkpoint) - } - - // Test updating checkpoint - err = storage.SaveCheckpoint("test-namespace", "test-topic", partition, 200) - if err != nil { - t.Fatalf("Failed to update checkpoint: %v", err) - } - - checkpoint, err = storage.LoadCheckpoint("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to load updated checkpoint: %v", err) - } - - if checkpoint != 200 { - t.Errorf("Expected updated checkpoint 200, got %d", checkpoint) - } -} - -func TestSQLOffsetStorage_LoadCheckpointNotFound(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - - // Test loading non-existent checkpoint - _, err = storage.LoadCheckpoint("test-namespace", "test-topic", partition) - if err == nil { - t.Error("Expected error for non-existent checkpoint") - } -} - -func TestSQLOffsetStorage_SaveLoadOffsetMappings(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - - // Save multiple offset mappings - mappings := []struct { - offset int64 - timestamp int64 - size int32 - }{ - {0, 1000, 100}, - {1, 2000, 150}, - {2, 3000, 200}, - } - - for _, mapping := range mappings { - err := storage.SaveOffsetMapping(partitionKey, mapping.offset, mapping.timestamp, mapping.size) - if err != nil { - t.Fatalf("Failed to save offset mapping: %v", err) - } - } - - // Load offset mappings - entries, err := storage.LoadOffsetMappings(partitionKey) - if err != nil { - t.Fatalf("Failed to load offset mappings: %v", err) - } - - if len(entries) != len(mappings) { - t.Errorf("Expected %d entries, got %d", len(mappings), len(entries)) - } - - // Verify entries are sorted by offset - for i, entry := range entries { - expected := mappings[i] - if entry.KafkaOffset != expected.offset { - t.Errorf("Entry %d: expected offset %d, got %d", i, expected.offset, entry.KafkaOffset) - } - if entry.SMQTimestamp != expected.timestamp { - t.Errorf("Entry %d: expected timestamp %d, got %d", i, expected.timestamp, entry.SMQTimestamp) - } - if entry.MessageSize != expected.size { - t.Errorf("Entry %d: expected size %d, got %d", i, expected.size, entry.MessageSize) - } - } -} - -func TestSQLOffsetStorage_GetHighestOffset(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := TopicPartitionKey("test-namespace", "test-topic", partition) - - // Test empty partition - _, err = storage.GetHighestOffset("test-namespace", "test-topic", partition) - if err == nil { - t.Error("Expected error for empty partition") - } - - // Add some offset mappings - offsets := []int64{5, 1, 3, 2, 4} - for _, offset := range offsets { - err := storage.SaveOffsetMapping(partitionKey, offset, offset*1000, 100) - if err != nil { - t.Fatalf("Failed to save offset mapping: %v", err) - } - } - - // Get highest offset - highest, err := storage.GetHighestOffset("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get highest offset: %v", err) - } - - if highest != 5 { - t.Errorf("Expected highest offset 5, got %d", highest) - } -} - -func TestSQLOffsetStorage_GetOffsetMappingsByRange(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - - // Add offset mappings - for i := int64(0); i < 10; i++ { - err := storage.SaveOffsetMapping(partitionKey, i, i*1000, 100) - if err != nil { - t.Fatalf("Failed to save offset mapping: %v", err) - } - } - - // Get range of offsets - entries, err := storage.GetOffsetMappingsByRange(partitionKey, 3, 7) - if err != nil { - t.Fatalf("Failed to get offset range: %v", err) - } - - expectedCount := 5 // offsets 3, 4, 5, 6, 7 - if len(entries) != expectedCount { - t.Errorf("Expected %d entries, got %d", expectedCount, len(entries)) - } - - // Verify range - for i, entry := range entries { - expectedOffset := int64(3 + i) - if entry.KafkaOffset != expectedOffset { - t.Errorf("Entry %d: expected offset %d, got %d", i, expectedOffset, entry.KafkaOffset) - } - } -} - -func TestSQLOffsetStorage_GetPartitionStats(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - - // Test empty partition stats - stats, err := storage.GetPartitionStats(partitionKey) - if err != nil { - t.Fatalf("Failed to get empty partition stats: %v", err) - } - - if stats.RecordCount != 0 { - t.Errorf("Expected record count 0, got %d", stats.RecordCount) - } - - if stats.EarliestOffset != -1 { - t.Errorf("Expected earliest offset -1, got %d", stats.EarliestOffset) - } - - // Add some data - sizes := []int32{100, 150, 200} - for i, size := range sizes { - err := storage.SaveOffsetMapping(partitionKey, int64(i), int64(i*1000), size) - if err != nil { - t.Fatalf("Failed to save offset mapping: %v", err) - } - } - - // Get stats with data - stats, err = storage.GetPartitionStats(partitionKey) - if err != nil { - t.Fatalf("Failed to get partition stats: %v", err) - } - - if stats.RecordCount != 3 { - t.Errorf("Expected record count 3, got %d", stats.RecordCount) - } - - if stats.EarliestOffset != 0 { - t.Errorf("Expected earliest offset 0, got %d", stats.EarliestOffset) - } - - if stats.LatestOffset != 2 { - t.Errorf("Expected latest offset 2, got %d", stats.LatestOffset) - } - - if stats.HighWaterMark != 3 { - t.Errorf("Expected high water mark 3, got %d", stats.HighWaterMark) - } - - expectedTotalSize := int64(100 + 150 + 200) - if stats.TotalSize != expectedTotalSize { - t.Errorf("Expected total size %d, got %d", expectedTotalSize, stats.TotalSize) - } -} - -func TestSQLOffsetStorage_GetAllPartitions(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - // Test empty database - partitions, err := storage.GetAllPartitions() - if err != nil { - t.Fatalf("Failed to get all partitions: %v", err) - } - - if len(partitions) != 0 { - t.Errorf("Expected 0 partitions, got %d", len(partitions)) - } - - // Add data for multiple partitions - partition1 := createTestPartitionForSQL() - partition2 := &schema_pb.Partition{ - RingSize: 1024, - RangeStart: 32, - RangeStop: 63, - UnixTimeNs: time.Now().UnixNano(), - } - - partitionKey1 := partitionKey(partition1) - partitionKey2 := partitionKey(partition2) - - storage.SaveOffsetMapping(partitionKey1, 0, 1000, 100) - storage.SaveOffsetMapping(partitionKey2, 0, 2000, 150) - - // Get all partitions - partitions, err = storage.GetAllPartitions() - if err != nil { - t.Fatalf("Failed to get all partitions: %v", err) - } - - if len(partitions) != 2 { - t.Errorf("Expected 2 partitions, got %d", len(partitions)) - } - - // Verify partition keys are present - partitionMap := make(map[string]bool) - for _, p := range partitions { - partitionMap[p] = true - } - - if !partitionMap[partitionKey1] { - t.Errorf("Partition key %s not found", partitionKey1) - } - - if !partitionMap[partitionKey2] { - t.Errorf("Partition key %s not found", partitionKey2) - } -} - -func TestSQLOffsetStorage_CleanupOldMappings(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - - // Add mappings with different timestamps - now := time.Now().UnixNano() - - // Add old mapping by directly inserting with old timestamp - oldTime := now - (24 * time.Hour).Nanoseconds() // 24 hours ago - _, err = db.Exec(` - INSERT INTO offset_mappings - (partition_key, kafka_offset, smq_timestamp, message_size, created_at) - VALUES (?, ?, ?, ?, ?) - `, partitionKey, 0, oldTime, 100, oldTime) - if err != nil { - t.Fatalf("Failed to insert old mapping: %v", err) - } - - // Add recent mapping - storage.SaveOffsetMapping(partitionKey, 1, now, 150) - - // Verify both mappings exist - entries, err := storage.LoadOffsetMappings(partitionKey) - if err != nil { - t.Fatalf("Failed to load mappings: %v", err) - } - - if len(entries) != 2 { - t.Errorf("Expected 2 mappings before cleanup, got %d", len(entries)) - } - - // Cleanup old mappings (older than 12 hours) - cutoffTime := now - (12 * time.Hour).Nanoseconds() - err = storage.CleanupOldMappings(cutoffTime) - if err != nil { - t.Fatalf("Failed to cleanup old mappings: %v", err) - } - - // Verify only recent mapping remains - entries, err = storage.LoadOffsetMappings(partitionKey) - if err != nil { - t.Fatalf("Failed to load mappings after cleanup: %v", err) - } - - if len(entries) != 1 { - t.Errorf("Expected 1 mapping after cleanup, got %d", len(entries)) - } - - if entries[0].KafkaOffset != 1 { - t.Errorf("Expected remaining mapping offset 1, got %d", entries[0].KafkaOffset) - } -} - -func TestSQLOffsetStorage_Vacuum(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - // Vacuum should not fail on empty database - err = storage.Vacuum() - if err != nil { - t.Fatalf("Failed to vacuum database: %v", err) - } - - // Add some data and vacuum again - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - storage.SaveOffsetMapping(partitionKey, 0, 1000, 100) - - err = storage.Vacuum() - if err != nil { - t.Fatalf("Failed to vacuum database with data: %v", err) - } -} - -func TestSQLOffsetStorage_ConcurrentAccess(t *testing.T) { - db := createTestDB(t) - storage, err := NewSQLOffsetStorage(db) - if err != nil { - t.Fatalf("Failed to create SQL storage: %v", err) - } - defer storage.Close() - - partition := createTestPartitionForSQL() - partitionKey := partitionKey(partition) - - // Test concurrent writes - const numGoroutines = 10 - const offsetsPerGoroutine = 10 - - done := make(chan bool, numGoroutines) - - for i := 0; i < numGoroutines; i++ { - go func(goroutineID int) { - defer func() { done <- true }() - - for j := 0; j < offsetsPerGoroutine; j++ { - offset := int64(goroutineID*offsetsPerGoroutine + j) - err := storage.SaveOffsetMapping(partitionKey, offset, offset*1000, 100) - if err != nil { - t.Errorf("Failed to save offset mapping %d: %v", offset, err) - return - } - } - }(i) - } - - // Wait for all goroutines to complete - for i := 0; i < numGoroutines; i++ { - <-done - } - - // Verify all mappings were saved - entries, err := storage.LoadOffsetMappings(partitionKey) - if err != nil { - t.Fatalf("Failed to load mappings: %v", err) - } - - expectedCount := numGoroutines * offsetsPerGoroutine - if len(entries) != expectedCount { - t.Errorf("Expected %d mappings, got %d", expectedCount, len(entries)) - } -} diff --git a/weed/mq/offset/storage.go b/weed/mq/offset/storage.go deleted file mode 100644 index b3eaddd6b..000000000 --- a/weed/mq/offset/storage.go +++ /dev/null @@ -1,5 +0,0 @@ -package offset - -// Note: OffsetStorage interface is defined in manager.go -// Production implementations: FilerOffsetStorage (filer_storage.go), SQLOffsetStorage (sql_storage.go) -// Test implementation: InMemoryOffsetStorage (storage_test.go) diff --git a/weed/mq/offset/subscriber.go b/weed/mq/offset/subscriber.go deleted file mode 100644 index d39932aae..000000000 --- a/weed/mq/offset/subscriber.go +++ /dev/null @@ -1,355 +0,0 @@ -package offset - -import ( - "fmt" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// OffsetSubscriber handles offset-based subscription logic -type OffsetSubscriber struct { - mu sync.RWMutex - offsetRegistry *PartitionOffsetRegistry - subscriptions map[string]*OffsetSubscription -} - -// OffsetSubscription represents an active offset-based subscription -type OffsetSubscription struct { - ID string - Namespace string - TopicName string - Partition *schema_pb.Partition - StartOffset int64 - CurrentOffset int64 - OffsetType schema_pb.OffsetType - IsActive bool - offsetRegistry *PartitionOffsetRegistry -} - -// NewOffsetSubscriber creates a new offset-based subscriber -func NewOffsetSubscriber(offsetRegistry *PartitionOffsetRegistry) *OffsetSubscriber { - return &OffsetSubscriber{ - offsetRegistry: offsetRegistry, - subscriptions: make(map[string]*OffsetSubscription), - } -} - -// CreateSubscription creates a new offset-based subscription -func (s *OffsetSubscriber) CreateSubscription( - subscriptionID string, - namespace, topicName string, - partition *schema_pb.Partition, - offsetType schema_pb.OffsetType, - startOffset int64, -) (*OffsetSubscription, error) { - - s.mu.Lock() - defer s.mu.Unlock() - - // Check if subscription already exists - if _, exists := s.subscriptions[subscriptionID]; exists { - return nil, fmt.Errorf("subscription %s already exists", subscriptionID) - } - - // Resolve the actual start offset based on type - actualStartOffset, err := s.resolveStartOffset(namespace, topicName, partition, offsetType, startOffset) - if err != nil { - return nil, fmt.Errorf("failed to resolve start offset: %w", err) - } - - subscription := &OffsetSubscription{ - ID: subscriptionID, - Namespace: namespace, - TopicName: topicName, - Partition: partition, - StartOffset: actualStartOffset, - CurrentOffset: actualStartOffset, - OffsetType: offsetType, - IsActive: true, - offsetRegistry: s.offsetRegistry, - } - - s.subscriptions[subscriptionID] = subscription - return subscription, nil -} - -// GetSubscription retrieves an existing subscription -func (s *OffsetSubscriber) GetSubscription(subscriptionID string) (*OffsetSubscription, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - subscription, exists := s.subscriptions[subscriptionID] - if !exists { - return nil, fmt.Errorf("subscription %s not found", subscriptionID) - } - - return subscription, nil -} - -// CloseSubscription closes and removes a subscription -func (s *OffsetSubscriber) CloseSubscription(subscriptionID string) error { - s.mu.Lock() - defer s.mu.Unlock() - - subscription, exists := s.subscriptions[subscriptionID] - if !exists { - return fmt.Errorf("subscription %s not found", subscriptionID) - } - - subscription.IsActive = false - delete(s.subscriptions, subscriptionID) - return nil -} - -// resolveStartOffset resolves the actual start offset based on OffsetType -func (s *OffsetSubscriber) resolveStartOffset( - namespace, topicName string, - partition *schema_pb.Partition, - offsetType schema_pb.OffsetType, - requestedOffset int64, -) (int64, error) { - - switch offsetType { - case schema_pb.OffsetType_EXACT_OFFSET: - // Validate that the requested offset exists - return s.validateAndGetOffset(namespace, topicName, partition, requestedOffset) - - case schema_pb.OffsetType_RESET_TO_OFFSET: - // Use the requested offset, even if it doesn't exist yet - return requestedOffset, nil - - case schema_pb.OffsetType_RESET_TO_EARLIEST: - // Start from offset 0 - return 0, nil - - case schema_pb.OffsetType_RESET_TO_LATEST: - // Start from the current high water mark - hwm, err := s.offsetRegistry.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return 0, err - } - return hwm, nil - - case schema_pb.OffsetType_RESUME_OR_EARLIEST: - // Try to resume from a saved position, fallback to earliest - // For now, just use earliest (consumer group position tracking will be added later) - return 0, nil - - case schema_pb.OffsetType_RESUME_OR_LATEST: - // Try to resume from a saved position, fallback to latest - // For now, just use latest - hwm, err := s.offsetRegistry.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return 0, err - } - return hwm, nil - - default: - return 0, fmt.Errorf("unsupported offset type: %v", offsetType) - } -} - -// validateAndGetOffset validates that an offset exists and returns it -func (s *OffsetSubscriber) validateAndGetOffset(namespace, topicName string, partition *schema_pb.Partition, offset int64) (int64, error) { - if offset < 0 { - return 0, fmt.Errorf("offset cannot be negative: %d", offset) - } - - // Get the current high water mark - hwm, err := s.offsetRegistry.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return 0, fmt.Errorf("failed to get high water mark: %w", err) - } - - // Check if offset is within valid range - if offset >= hwm { - return 0, fmt.Errorf("offset %d is beyond high water mark %d", offset, hwm) - } - - return offset, nil -} - -// SeekToOffset seeks a subscription to a specific offset -func (sub *OffsetSubscription) SeekToOffset(offset int64) error { - if !sub.IsActive { - return fmt.Errorf("subscription is not active") - } - - // Validate the offset - if offset < 0 { - return fmt.Errorf("offset cannot be negative: %d", offset) - } - - hwm, err := sub.offsetRegistry.GetHighWaterMark(sub.Namespace, sub.TopicName, sub.Partition) - if err != nil { - return fmt.Errorf("failed to get high water mark: %w", err) - } - - if offset > hwm { - return fmt.Errorf("offset %d is beyond high water mark %d", offset, hwm) - } - - sub.CurrentOffset = offset - return nil -} - -// GetNextOffset returns the next offset to read -func (sub *OffsetSubscription) GetNextOffset() int64 { - return sub.CurrentOffset -} - -// AdvanceOffset advances the subscription to the next offset -func (sub *OffsetSubscription) AdvanceOffset() { - sub.CurrentOffset++ -} - -// GetLag returns the lag between current position and high water mark -func (sub *OffsetSubscription) GetLag() (int64, error) { - if !sub.IsActive { - return 0, fmt.Errorf("subscription is not active") - } - - hwm, err := sub.offsetRegistry.GetHighWaterMark(sub.Namespace, sub.TopicName, sub.Partition) - if err != nil { - return 0, fmt.Errorf("failed to get high water mark: %w", err) - } - - lag := hwm - sub.CurrentOffset - if lag < 0 { - lag = 0 - } - - return lag, nil -} - -// IsAtEnd checks if the subscription has reached the end of available data -func (sub *OffsetSubscription) IsAtEnd() (bool, error) { - if !sub.IsActive { - return true, fmt.Errorf("subscription is not active") - } - - hwm, err := sub.offsetRegistry.GetHighWaterMark(sub.Namespace, sub.TopicName, sub.Partition) - if err != nil { - return false, fmt.Errorf("failed to get high water mark: %w", err) - } - - return sub.CurrentOffset >= hwm, nil -} - -// OffsetRange represents a range of offsets -type OffsetRange struct { - StartOffset int64 - EndOffset int64 - Count int64 -} - -// GetOffsetRange returns a range of offsets for batch reading -func (sub *OffsetSubscription) GetOffsetRange(maxCount int64) (*OffsetRange, error) { - if !sub.IsActive { - return nil, fmt.Errorf("subscription is not active") - } - - hwm, err := sub.offsetRegistry.GetHighWaterMark(sub.Namespace, sub.TopicName, sub.Partition) - if err != nil { - return nil, fmt.Errorf("failed to get high water mark: %w", err) - } - - startOffset := sub.CurrentOffset - endOffset := startOffset + maxCount - 1 - - // Don't go beyond high water mark - if endOffset >= hwm { - endOffset = hwm - 1 - } - - // If start is already at or beyond HWM, return empty range - if startOffset >= hwm { - return &OffsetRange{ - StartOffset: startOffset, - EndOffset: startOffset - 1, // Empty range - Count: 0, - }, nil - } - - count := endOffset - startOffset + 1 - return &OffsetRange{ - StartOffset: startOffset, - EndOffset: endOffset, - Count: count, - }, nil -} - -// AdvanceOffsetBy advances the subscription by a specific number of offsets -func (sub *OffsetSubscription) AdvanceOffsetBy(count int64) { - sub.CurrentOffset += count -} - -// OffsetSeeker provides utilities for offset-based seeking -type OffsetSeeker struct { - offsetRegistry *PartitionOffsetRegistry -} - -// NewOffsetSeeker creates a new offset seeker -func NewOffsetSeeker(offsetRegistry *PartitionOffsetRegistry) *OffsetSeeker { - return &OffsetSeeker{ - offsetRegistry: offsetRegistry, - } -} - -// SeekToTimestamp finds the offset closest to a given timestamp -// This bridges offset-based and timestamp-based seeking -func (seeker *OffsetSeeker) SeekToTimestamp(partition *schema_pb.Partition, timestamp int64) (int64, error) { - // TODO: This requires integration with the storage layer to map timestamps to offsets - // For now, return an error indicating this feature needs implementation - return 0, fmt.Errorf("timestamp-to-offset mapping not implemented yet") -} - -// ValidateOffsetRange validates that an offset range is valid -func (seeker *OffsetSeeker) ValidateOffsetRange(namespace, topicName string, partition *schema_pb.Partition, startOffset, endOffset int64) error { - if startOffset < 0 { - return fmt.Errorf("start offset cannot be negative: %d", startOffset) - } - - if endOffset < startOffset { - return fmt.Errorf("end offset %d cannot be less than start offset %d", endOffset, startOffset) - } - - hwm, err := seeker.offsetRegistry.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return fmt.Errorf("failed to get high water mark: %w", err) - } - - if startOffset >= hwm { - return fmt.Errorf("start offset %d is beyond high water mark %d", startOffset, hwm) - } - - if endOffset >= hwm { - return fmt.Errorf("end offset %d is beyond high water mark %d", endOffset, hwm) - } - - return nil -} - -// GetAvailableOffsetRange returns the range of available offsets for a partition -func (seeker *OffsetSeeker) GetAvailableOffsetRange(namespace, topicName string, partition *schema_pb.Partition) (*OffsetRange, error) { - hwm, err := seeker.offsetRegistry.GetHighWaterMark(namespace, topicName, partition) - if err != nil { - return nil, fmt.Errorf("failed to get high water mark: %w", err) - } - - if hwm == 0 { - // No data available - return &OffsetRange{ - StartOffset: 0, - EndOffset: -1, - Count: 0, - }, nil - } - - return &OffsetRange{ - StartOffset: 0, - EndOffset: hwm - 1, - Count: hwm, - }, nil -} diff --git a/weed/mq/offset/subscriber_test.go b/weed/mq/offset/subscriber_test.go deleted file mode 100644 index 1ab97dadc..000000000 --- a/weed/mq/offset/subscriber_test.go +++ /dev/null @@ -1,457 +0,0 @@ -package offset - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestOffsetSubscriber_CreateSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign some offsets first - registry.AssignOffsets("test-namespace", "test-topic", partition, 10) - - // Test EXACT_OFFSET subscription - sub, err := subscriber.CreateSubscription("test-sub-1", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5) - if err != nil { - t.Fatalf("Failed to create EXACT_OFFSET subscription: %v", err) - } - - if sub.StartOffset != 5 { - t.Errorf("Expected start offset 5, got %d", sub.StartOffset) - } - if sub.CurrentOffset != 5 { - t.Errorf("Expected current offset 5, got %d", sub.CurrentOffset) - } - - // Test RESET_TO_LATEST subscription - sub2, err := subscriber.CreateSubscription("test-sub-2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0) - if err != nil { - t.Fatalf("Failed to create RESET_TO_LATEST subscription: %v", err) - } - - if sub2.StartOffset != 10 { // Should be at high water mark - t.Errorf("Expected start offset 10, got %d", sub2.StartOffset) - } -} - -func TestOffsetSubscriber_InvalidSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign some offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 5) - - // Test invalid offset (beyond high water mark) - _, err := subscriber.CreateSubscription("invalid-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 10) - if err == nil { - t.Error("Expected error for offset beyond high water mark") - } - - // Test negative offset - _, err = subscriber.CreateSubscription("invalid-sub-2", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, -1) - if err == nil { - t.Error("Expected error for negative offset") - } -} - -func TestOffsetSubscriber_DuplicateSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Create first subscription - _, err := subscriber.CreateSubscription("duplicate-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err != nil { - t.Fatalf("Failed to create first subscription: %v", err) - } - - // Try to create duplicate - _, err = subscriber.CreateSubscription("duplicate-sub", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err == nil { - t.Error("Expected error for duplicate subscription ID") - } -} - -func TestOffsetSubscription_SeekToOffset(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 20) - - // Create subscription - sub, err := subscriber.CreateSubscription("seek-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Test valid seek - err = sub.SeekToOffset(10) - if err != nil { - t.Fatalf("Failed to seek to offset 10: %v", err) - } - - if sub.CurrentOffset != 10 { - t.Errorf("Expected current offset 10, got %d", sub.CurrentOffset) - } - - // Test invalid seek (beyond high water mark) - err = sub.SeekToOffset(25) - if err == nil { - t.Error("Expected error for seek beyond high water mark") - } - - // Test negative seek - err = sub.SeekToOffset(-1) - if err == nil { - t.Error("Expected error for negative seek offset") - } -} - -func TestOffsetSubscription_AdvanceOffset(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Create subscription - sub, err := subscriber.CreateSubscription("advance-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Test single advance - initialOffset := sub.GetNextOffset() - sub.AdvanceOffset() - - if sub.GetNextOffset() != initialOffset+1 { - t.Errorf("Expected offset %d, got %d", initialOffset+1, sub.GetNextOffset()) - } - - // Test batch advance - sub.AdvanceOffsetBy(5) - - if sub.GetNextOffset() != initialOffset+6 { - t.Errorf("Expected offset %d, got %d", initialOffset+6, sub.GetNextOffset()) - } -} - -func TestOffsetSubscription_GetLag(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 15) - - // Create subscription at offset 5 - sub, err := subscriber.CreateSubscription("lag-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Check initial lag - lag, err := sub.GetLag() - if err != nil { - t.Fatalf("Failed to get lag: %v", err) - } - - expectedLag := int64(15 - 5) // hwm - current - if lag != expectedLag { - t.Errorf("Expected lag %d, got %d", expectedLag, lag) - } - - // Advance and check lag again - sub.AdvanceOffsetBy(3) - - lag, err = sub.GetLag() - if err != nil { - t.Fatalf("Failed to get lag after advance: %v", err) - } - - expectedLag = int64(15 - 8) // hwm - current - if lag != expectedLag { - t.Errorf("Expected lag %d after advance, got %d", expectedLag, lag) - } -} - -func TestOffsetSubscription_IsAtEnd(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 10) - - // Create subscription at end - sub, err := subscriber.CreateSubscription("end-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Should be at end - atEnd, err := sub.IsAtEnd() - if err != nil { - t.Fatalf("Failed to check if at end: %v", err) - } - - if !atEnd { - t.Error("Expected subscription to be at end") - } - - // Seek to middle and check again - sub.SeekToOffset(5) - - atEnd, err = sub.IsAtEnd() - if err != nil { - t.Fatalf("Failed to check if at end after seek: %v", err) - } - - if atEnd { - t.Error("Expected subscription not to be at end after seek") - } -} - -func TestOffsetSubscription_GetOffsetRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 20) - - // Create subscription - sub, err := subscriber.CreateSubscription("range-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_EXACT_OFFSET, 5) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Test normal range - offsetRange, err := sub.GetOffsetRange(10) - if err != nil { - t.Fatalf("Failed to get offset range: %v", err) - } - - if offsetRange.StartOffset != 5 { - t.Errorf("Expected start offset 5, got %d", offsetRange.StartOffset) - } - if offsetRange.EndOffset != 14 { - t.Errorf("Expected end offset 14, got %d", offsetRange.EndOffset) - } - if offsetRange.Count != 10 { - t.Errorf("Expected count 10, got %d", offsetRange.Count) - } - - // Test range that exceeds high water mark - sub.SeekToOffset(15) - offsetRange, err = sub.GetOffsetRange(10) - if err != nil { - t.Fatalf("Failed to get offset range near end: %v", err) - } - - if offsetRange.StartOffset != 15 { - t.Errorf("Expected start offset 15, got %d", offsetRange.StartOffset) - } - if offsetRange.EndOffset != 19 { // Should be capped at hwm-1 - t.Errorf("Expected end offset 19, got %d", offsetRange.EndOffset) - } - if offsetRange.Count != 5 { - t.Errorf("Expected count 5, got %d", offsetRange.Count) - } -} - -func TestOffsetSubscription_EmptyRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 10) - - // Create subscription at end - sub, err := subscriber.CreateSubscription("empty-range-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_LATEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Request range when at end - offsetRange, err := sub.GetOffsetRange(5) - if err != nil { - t.Fatalf("Failed to get offset range at end: %v", err) - } - - if offsetRange.Count != 0 { - t.Errorf("Expected empty range (count 0), got count %d", offsetRange.Count) - } - - if offsetRange.StartOffset != 10 { - t.Errorf("Expected start offset 10, got %d", offsetRange.StartOffset) - } - - if offsetRange.EndOffset != 9 { // Empty range: end < start - t.Errorf("Expected end offset 9 (empty range), got %d", offsetRange.EndOffset) - } -} - -func TestOffsetSeeker_ValidateOffsetRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - seeker := NewOffsetSeeker(registry) - partition := createTestPartition() - - // Assign offsets - registry.AssignOffsets("test-namespace", "test-topic", partition, 15) - - // Test valid range - err := seeker.ValidateOffsetRange("test-namespace", "test-topic", partition, 5, 10) - if err != nil { - t.Errorf("Valid range should not return error: %v", err) - } - - // Test invalid ranges - testCases := []struct { - name string - startOffset int64 - endOffset int64 - expectError bool - }{ - {"negative start", -1, 5, true}, - {"end before start", 10, 5, true}, - {"start beyond hwm", 20, 25, true}, - {"valid range", 0, 14, false}, - {"single offset", 5, 5, false}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - err := seeker.ValidateOffsetRange("test-namespace", "test-topic", partition, tc.startOffset, tc.endOffset) - if tc.expectError && err == nil { - t.Error("Expected error but got none") - } - if !tc.expectError && err != nil { - t.Errorf("Expected no error but got: %v", err) - } - }) - } -} - -func TestOffsetSeeker_GetAvailableOffsetRange(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - seeker := NewOffsetSeeker(registry) - partition := createTestPartition() - - // Test empty partition - offsetRange, err := seeker.GetAvailableOffsetRange("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get available range for empty partition: %v", err) - } - - if offsetRange.Count != 0 { - t.Errorf("Expected empty range for empty partition, got count %d", offsetRange.Count) - } - - // Assign offsets and test again - registry.AssignOffsets("test-namespace", "test-topic", partition, 25) - - offsetRange, err = seeker.GetAvailableOffsetRange("test-namespace", "test-topic", partition) - if err != nil { - t.Fatalf("Failed to get available range: %v", err) - } - - if offsetRange.StartOffset != 0 { - t.Errorf("Expected start offset 0, got %d", offsetRange.StartOffset) - } - if offsetRange.EndOffset != 24 { - t.Errorf("Expected end offset 24, got %d", offsetRange.EndOffset) - } - if offsetRange.Count != 25 { - t.Errorf("Expected count 25, got %d", offsetRange.Count) - } -} - -func TestOffsetSubscriber_CloseSubscription(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Create subscription - sub, err := subscriber.CreateSubscription("close-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - // Verify subscription exists - _, err = subscriber.GetSubscription("close-test") - if err != nil { - t.Fatalf("Subscription should exist: %v", err) - } - - // Close subscription - err = subscriber.CloseSubscription("close-test") - if err != nil { - t.Fatalf("Failed to close subscription: %v", err) - } - - // Verify subscription is gone - _, err = subscriber.GetSubscription("close-test") - if err == nil { - t.Error("Subscription should not exist after close") - } - - // Verify subscription is marked inactive - if sub.IsActive { - t.Error("Subscription should be marked inactive after close") - } -} - -func TestOffsetSubscription_InactiveOperations(t *testing.T) { - storage := NewInMemoryOffsetStorage() - registry := NewPartitionOffsetRegistry(storage) - subscriber := NewOffsetSubscriber(registry) - partition := createTestPartition() - - // Create and close subscription - sub, err := subscriber.CreateSubscription("inactive-test", "test-namespace", "test-topic", partition, schema_pb.OffsetType_RESET_TO_EARLIEST, 0) - if err != nil { - t.Fatalf("Failed to create subscription: %v", err) - } - - subscriber.CloseSubscription("inactive-test") - - // Test operations on inactive subscription - err = sub.SeekToOffset(5) - if err == nil { - t.Error("Expected error for seek on inactive subscription") - } - - _, err = sub.GetLag() - if err == nil { - t.Error("Expected error for GetLag on inactive subscription") - } - - _, err = sub.IsAtEnd() - if err == nil { - t.Error("Expected error for IsAtEnd on inactive subscription") - } - - _, err = sub.GetOffsetRange(10) - if err == nil { - t.Error("Expected error for GetOffsetRange on inactive subscription") - } -} diff --git a/weed/mq/pub_balancer/allocate.go b/weed/mq/pub_balancer/allocate.go deleted file mode 100644 index 09124284b..000000000 --- a/weed/mq/pub_balancer/allocate.go +++ /dev/null @@ -1,128 +0,0 @@ -package pub_balancer - -import ( - "math/rand/v2" - "time" - - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func AllocateTopicPartitions(brokers cmap.ConcurrentMap[string, *BrokerStats], partitionCount int32) (assignments []*mq_pb.BrokerPartitionAssignment) { - // divide the ring into partitions - now := time.Now().UnixNano() - rangeSize := MaxPartitionCount / partitionCount - for i := int32(0); i < partitionCount; i++ { - assignment := &mq_pb.BrokerPartitionAssignment{ - Partition: &schema_pb.Partition{ - RingSize: MaxPartitionCount, - RangeStart: int32(i * rangeSize), - RangeStop: int32((i + 1) * rangeSize), - UnixTimeNs: now, - }, - } - if i == partitionCount-1 { - assignment.Partition.RangeStop = MaxPartitionCount - } - assignments = append(assignments, assignment) - } - - EnsureAssignmentsToActiveBrokers(brokers, 1, assignments) - - glog.V(0).Infof("allocate topic partitions %d: %v", len(assignments), assignments) - return -} - -// randomly pick n brokers, which may contain duplicates -// TODO pick brokers based on the broker stats -func pickBrokers(brokers cmap.ConcurrentMap[string, *BrokerStats], count int32) []string { - candidates := make([]string, 0, brokers.Count()) - for brokerStatsItem := range brokers.IterBuffered() { - candidates = append(candidates, brokerStatsItem.Key) - } - pickedBrokers := make([]string, 0, count) - for i := int32(0); i < count; i++ { - p := rand.IntN(len(candidates)) - pickedBrokers = append(pickedBrokers, candidates[p]) - } - return pickedBrokers -} - -// reservoir sampling select N brokers from the active brokers, with exclusion of the excluded broker -func pickBrokersExcluded(brokers []string, count int, excludedLeadBroker string, excludedBroker string) []string { - pickedBrokers := make([]string, 0, count) - for i, broker := range brokers { - if broker == excludedBroker { - continue - } - if len(pickedBrokers) < count { - pickedBrokers = append(pickedBrokers, broker) - } else { - j := rand.IntN(i + 1) - if j < count { - pickedBrokers[j] = broker - } - } - } - - // shuffle the picked brokers - count = len(pickedBrokers) - for i := 0; i < count; i++ { - j := rand.IntN(count) - pickedBrokers[i], pickedBrokers[j] = pickedBrokers[j], pickedBrokers[i] - } - - return pickedBrokers -} - -// EnsureAssignmentsToActiveBrokers ensures the assignments are assigned to active brokers -func EnsureAssignmentsToActiveBrokers(activeBrokers cmap.ConcurrentMap[string, *BrokerStats], followerCount int, assignments []*mq_pb.BrokerPartitionAssignment) (hasChanges bool) { - glog.V(4).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v", activeBrokers.Count(), followerCount, assignments) - - candidates := make([]string, 0, activeBrokers.Count()) - for brokerStatsItem := range activeBrokers.IterBuffered() { - candidates = append(candidates, brokerStatsItem.Key) - } - - for _, assignment := range assignments { - // count how many brokers are needed - count := 0 - if assignment.LeaderBroker == "" { - count++ - } else if _, found := activeBrokers.Get(assignment.LeaderBroker); !found { - assignment.LeaderBroker = "" - count++ - } - if assignment.FollowerBroker == "" { - count++ - } else if _, found := activeBrokers.Get(assignment.FollowerBroker); !found { - assignment.FollowerBroker = "" - count++ - } - - if count > 0 { - pickedBrokers := pickBrokersExcluded(candidates, count, assignment.LeaderBroker, assignment.FollowerBroker) - i := 0 - if assignment.LeaderBroker == "" { - if i < len(pickedBrokers) { - assignment.LeaderBroker = pickedBrokers[i] - i++ - hasChanges = true - } - } - if assignment.FollowerBroker == "" { - if i < len(pickedBrokers) { - assignment.FollowerBroker = pickedBrokers[i] - i++ - hasChanges = true - } - } - } - - } - - glog.V(4).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v hasChanges: %v", activeBrokers.Count(), followerCount, assignments, hasChanges) - return -} diff --git a/weed/mq/pub_balancer/allocate_test.go b/weed/mq/pub_balancer/allocate_test.go deleted file mode 100644 index fc747634e..000000000 --- a/weed/mq/pub_balancer/allocate_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package pub_balancer - -import ( - "fmt" - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "testing" -) - -func Test_allocateOneBroker(t *testing.T) { - brokers := cmap.New[*BrokerStats]() - brokers.SetIfAbsent("localhost:17777", &BrokerStats{ - TopicPartitionCount: 0, - CpuUsagePercent: 0, - }) - - tests := []struct { - name string - args args - wantAssignments []*mq_pb.BrokerPartitionAssignment - }{ - { - name: "test only one broker", - args: args{ - brokers: brokers, - partitionCount: 1, - }, - wantAssignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:17777", - Partition: &schema_pb.Partition{ - RingSize: MaxPartitionCount, - RangeStart: 0, - RangeStop: MaxPartitionCount, - }, - }, - }, - }, - } - testThem(t, tests) -} - -type args struct { - brokers cmap.ConcurrentMap[string, *BrokerStats] - partitionCount int32 -} - -func testThem(t *testing.T, tests []struct { - name string - args args - wantAssignments []*mq_pb.BrokerPartitionAssignment -}) { - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotAssignments := AllocateTopicPartitions(tt.args.brokers, tt.args.partitionCount) - assert.Equal(t, len(tt.wantAssignments), len(gotAssignments)) - for i, gotAssignment := range gotAssignments { - assert.Equal(t, tt.wantAssignments[i].LeaderBroker, gotAssignment.LeaderBroker) - assert.Equal(t, tt.wantAssignments[i].Partition.RangeStart, gotAssignment.Partition.RangeStart) - assert.Equal(t, tt.wantAssignments[i].Partition.RangeStop, gotAssignment.Partition.RangeStop) - assert.Equal(t, tt.wantAssignments[i].Partition.RingSize, gotAssignment.Partition.RingSize) - } - }) - } -} - -func TestEnsureAssignmentsToActiveBrokersX(t *testing.T) { - type args struct { - activeBrokers cmap.ConcurrentMap[string, *BrokerStats] - followerCount int - assignments []*mq_pb.BrokerPartitionAssignment - } - activeBrokers := cmap.New[*BrokerStats]() - activeBrokers.SetIfAbsent("localhost:1", &BrokerStats{}) - activeBrokers.SetIfAbsent("localhost:2", &BrokerStats{}) - activeBrokers.SetIfAbsent("localhost:3", &BrokerStats{}) - activeBrokers.SetIfAbsent("localhost:4", &BrokerStats{}) - activeBrokers.SetIfAbsent("localhost:5", &BrokerStats{}) - activeBrokers.SetIfAbsent("localhost:6", &BrokerStats{}) - lowActiveBrokers := cmap.New[*BrokerStats]() - lowActiveBrokers.SetIfAbsent("localhost:1", &BrokerStats{}) - lowActiveBrokers.SetIfAbsent("localhost:2", &BrokerStats{}) - singleActiveBroker := cmap.New[*BrokerStats]() - singleActiveBroker.SetIfAbsent("localhost:1", &BrokerStats{}) - tests := []struct { - name string - args args - hasChanges bool - }{ - { - name: "test empty leader", - args: args{ - activeBrokers: activeBrokers, - followerCount: 1, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "", - Partition: &schema_pb.Partition{}, - FollowerBroker: "localhost:2", - }, - }, - }, - hasChanges: true, - }, - { - name: "test empty follower", - args: args{ - activeBrokers: activeBrokers, - followerCount: 1, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:1", - Partition: &schema_pb.Partition{}, - FollowerBroker: "", - }, - }, - }, - hasChanges: true, - }, - { - name: "test dead follower", - args: args{ - activeBrokers: activeBrokers, - followerCount: 1, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:1", - Partition: &schema_pb.Partition{}, - FollowerBroker: "localhost:200", - }, - }, - }, - hasChanges: true, - }, - { - name: "test dead leader and follower", - args: args{ - activeBrokers: activeBrokers, - followerCount: 1, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:100", - Partition: &schema_pb.Partition{}, - FollowerBroker: "localhost:200", - }, - }, - }, - hasChanges: true, - }, - { - name: "test low active brokers", - args: args{ - activeBrokers: lowActiveBrokers, - followerCount: 3, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:1", - Partition: &schema_pb.Partition{}, - FollowerBroker: "localhost:2", - }, - }, - }, - hasChanges: false, - }, - { - name: "test low active brokers with one follower", - args: args{ - activeBrokers: lowActiveBrokers, - followerCount: 1, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:1", - Partition: &schema_pb.Partition{}, - }, - }, - }, - hasChanges: true, - }, - { - name: "test single active broker", - args: args{ - activeBrokers: singleActiveBroker, - followerCount: 3, - assignments: []*mq_pb.BrokerPartitionAssignment{ - { - LeaderBroker: "localhost:1", - Partition: &schema_pb.Partition{}, - FollowerBroker: "localhost:2", - }, - }, - }, - hasChanges: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fmt.Printf("%v before %v\n", tt.name, tt.args.assignments) - hasChanges := EnsureAssignmentsToActiveBrokers(tt.args.activeBrokers, tt.args.followerCount, tt.args.assignments) - assert.Equalf(t, tt.hasChanges, hasChanges, "EnsureAssignmentsToActiveBrokers(%v, %v, %v)", tt.args.activeBrokers, tt.args.followerCount, tt.args.assignments) - fmt.Printf("%v after %v\n", tt.name, tt.args.assignments) - }) - } -} diff --git a/weed/mq/pub_balancer/balance.go b/weed/mq/pub_balancer/balance.go deleted file mode 100644 index b4f1e20cd..000000000 --- a/weed/mq/pub_balancer/balance.go +++ /dev/null @@ -1,73 +0,0 @@ -package pub_balancer - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "google.golang.org/grpc" -) - -/* -* Assuming a topic has [x,y] number of partitions when publishing, and there are b number of brokers. -* and p is the number of partitions per topic. -* if the broker number b <= x, then p = x. -* if the broker number x < b < y, then x <= p <= b. -* if the broker number b >= y, x <= p <= y - -Balance topic partitions to brokers -=================================== - -When the goal is to make sure that low traffic partitions can be merged, (and p >= x, and after last rebalance interval): -1. Calculate the average load(throughput) of partitions per topic. -2. If any two neighboring partitions have a load that is less than the average load, merge them. -3. If min(b, y) < p, then merge two neighboring partitions that have the least combined load. - -When the goal is to make sure that high traffic partitions can be split, (and p < y and p < b, and after last rebalance interval): -1. Calculate the average number of partitions per broker. -2. If any partition has a load that is more than the average load, split it into two partitions. - -When the goal is to make sure that each broker has the same number of partitions: -1. Calculate the average number of partitions per broker. -2. For the brokers that have more than the average number of partitions, move the partitions to the brokers that have less than the average number of partitions. - -*/ - -type BalanceAction interface { -} -type BalanceActionMerge struct { - Before []topic.TopicPartition - After topic.TopicPartition -} -type BalanceActionSplit struct { - Before topic.TopicPartition - After []topic.TopicPartition -} - -type BalanceActionMove struct { - TopicPartition topic.TopicPartition - SourceBroker string - TargetBroker string -} - -type BalanceActionCreate struct { - TopicPartition topic.TopicPartition - TargetBroker string -} - -// BalancePublishers check the stats of all brokers, -// and balance the publishers to the brokers. -func (balancer *PubBalancer) BalancePublishers() []BalanceAction { - action := BalanceTopicPartitionOnBrokers(balancer.Brokers) - return []BalanceAction{action} -} - -func (balancer *PubBalancer) ExecuteBalanceAction(actions []BalanceAction, grpcDialOption grpc.DialOption) (err error) { - for _, action := range actions { - switch action.(type) { - case *BalanceActionMove: - err = balancer.ExecuteBalanceActionMove(action.(*BalanceActionMove), grpcDialOption) - } - if err != nil { - return err - } - } - return nil -} diff --git a/weed/mq/pub_balancer/balance_action.go b/weed/mq/pub_balancer/balance_action.go deleted file mode 100644 index a2d888b2a..000000000 --- a/weed/mq/pub_balancer/balance_action.go +++ /dev/null @@ -1,58 +0,0 @@ -package pub_balancer - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "google.golang.org/grpc" -) - -// PubBalancer <= PublisherToPubBalancer() <= Broker <=> Publish() -// ExecuteBalanceActionMove from PubBalancer => AssignTopicPartitions() => Broker => Publish() - -func (balancer *PubBalancer) ExecuteBalanceActionMove(move *BalanceActionMove, grpcDialOption grpc.DialOption) error { - if _, found := balancer.Brokers.Get(move.SourceBroker); !found { - return fmt.Errorf("source broker %s not found", move.SourceBroker) - } - if _, found := balancer.Brokers.Get(move.TargetBroker); !found { - return fmt.Errorf("target broker %s not found", move.TargetBroker) - } - - err := pb.WithBrokerGrpcClient(false, move.TargetBroker, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - _, err := client.AssignTopicPartitions(context.Background(), &mq_pb.AssignTopicPartitionsRequest{ - Topic: move.TopicPartition.Topic.ToPbTopic(), - BrokerPartitionAssignments: []*mq_pb.BrokerPartitionAssignment{ - { - Partition: move.TopicPartition.ToPbPartition(), - }, - }, - IsLeader: true, - IsDraining: false, - }) - return err - }) - if err != nil { - return fmt.Errorf("assign topic partition %v to %s: %v", move.TopicPartition, move.TargetBroker, err) - } - - err = pb.WithBrokerGrpcClient(false, move.SourceBroker, grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { - _, err := client.AssignTopicPartitions(context.Background(), &mq_pb.AssignTopicPartitionsRequest{ - Topic: move.TopicPartition.Topic.ToPbTopic(), - BrokerPartitionAssignments: []*mq_pb.BrokerPartitionAssignment{ - { - Partition: move.TopicPartition.ToPbPartition(), - }, - }, - IsLeader: true, - IsDraining: true, - }) - return err - }) - if err != nil { - return fmt.Errorf("assign topic partition %v to %s: %v", move.TopicPartition, move.SourceBroker, err) - } - - return nil - -} diff --git a/weed/mq/pub_balancer/balance_action_split.go b/weed/mq/pub_balancer/balance_action_split.go deleted file mode 100644 index 6d317ffb9..000000000 --- a/weed/mq/pub_balancer/balance_action_split.go +++ /dev/null @@ -1,43 +0,0 @@ -package pub_balancer - -/* -Sequence of operations to ensure ordering - -Assuming Publisher P10 is publishing to Topic Partition TP10, and Subscriber S10 is subscribing to Topic TP10. -After splitting Topic TP10 into Topic Partition TP11 and Topic Partition TP21, -Publisher P11 is publishing to Topic Partition TP11, and Publisher P21 is publishing to Topic Partition TP21. -Subscriber S12 is subscribing to Topic Partition TP11, and Subscriber S21 is subscribing to Topic Partition TP21. - -(The last digit is ephoch generation number, which is increasing when the topic partitioning is changed.) - -The diagram is as follows: -P10 -> TP10 -> S10 - || - \/ -P11 -> TP11 -> S11 -P21 -> TP21 -> S21 - -The following is the sequence of events: -1. Create Topic Partition TP11 and TP21 -2. Close Publisher(s) P10 -3. Close Subscriber(s) S10 -4. Close Topic Partition TP10 -5. Start Publisher P11, P21 -6. Start Subscriber S11, S21 - -The dependency is as follows: - 2 => 3 => 4 - | | - v v - 1 => (5 | 6) - -And also: -2 => 5 -3 => 6 - -For brokers: -1. Close all publishers for a topic partition -2. Close all subscribers for a topic partition -3. Close the topic partition - -*/ diff --git a/weed/mq/pub_balancer/balance_brokers.go b/weed/mq/pub_balancer/balance_brokers.go deleted file mode 100644 index 54dd4cb35..000000000 --- a/weed/mq/pub_balancer/balance_brokers.go +++ /dev/null @@ -1,53 +0,0 @@ -package pub_balancer - -import ( - "math/rand/v2" - - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" -) - -func BalanceTopicPartitionOnBrokers(brokers cmap.ConcurrentMap[string, *BrokerStats]) BalanceAction { - // 1. calculate the average number of partitions per broker - var totalPartitionCount int32 - var totalBrokerCount int32 - for brokerStats := range brokers.IterBuffered() { - totalBrokerCount++ - totalPartitionCount += brokerStats.Val.TopicPartitionCount - } - averagePartitionCountPerBroker := totalPartitionCount / totalBrokerCount - minPartitionCountPerBroker := averagePartitionCountPerBroker - maxPartitionCountPerBroker := averagePartitionCountPerBroker - var sourceBroker, targetBroker string - var candidatePartition *topic.TopicPartition - for brokerStats := range brokers.IterBuffered() { - if minPartitionCountPerBroker > brokerStats.Val.TopicPartitionCount { - minPartitionCountPerBroker = brokerStats.Val.TopicPartitionCount - targetBroker = brokerStats.Key - } - if maxPartitionCountPerBroker < brokerStats.Val.TopicPartitionCount { - maxPartitionCountPerBroker = brokerStats.Val.TopicPartitionCount - sourceBroker = brokerStats.Key - // select a random partition from the source broker - randomPartitionIndex := rand.IntN(int(brokerStats.Val.TopicPartitionCount)) - index := 0 - for topicPartitionStats := range brokerStats.Val.TopicPartitionStats.IterBuffered() { - if index == randomPartitionIndex { - candidatePartition = &topicPartitionStats.Val.TopicPartition - break - } else { - index++ - } - } - } - } - if minPartitionCountPerBroker >= maxPartitionCountPerBroker-1 { - return nil - } - // 2. move the partitions from the source broker to the target broker - return &BalanceActionMove{ - TopicPartition: *candidatePartition, - SourceBroker: sourceBroker, - TargetBroker: targetBroker, - } -} diff --git a/weed/mq/pub_balancer/balance_brokers_test.go b/weed/mq/pub_balancer/balance_brokers_test.go deleted file mode 100644 index 58731c24c..000000000 --- a/weed/mq/pub_balancer/balance_brokers_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package pub_balancer - -import ( - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "reflect" - "testing" -) - -func TestBalanceTopicPartitionOnBrokers(t *testing.T) { - - brokers := cmap.New[*BrokerStats]() - broker1Stats := &BrokerStats{ - TopicPartitionCount: 1, - CpuUsagePercent: 1, - TopicPartitionStats: cmap.New[*TopicPartitionStats](), - } - broker1Stats.TopicPartitionStats.Set("topic1:0", &TopicPartitionStats{ - TopicPartition: topic.TopicPartition{ - Topic: topic.Topic{Namespace: "topic1", Name: "topic1"}, - Partition: topic.Partition{RangeStart: 0, RangeStop: 512, RingSize: 1024}, - }, - }) - broker2Stats := &BrokerStats{ - TopicPartitionCount: 2, - CpuUsagePercent: 1, - TopicPartitionStats: cmap.New[*TopicPartitionStats](), - } - broker2Stats.TopicPartitionStats.Set("topic1:1", &TopicPartitionStats{ - TopicPartition: topic.TopicPartition{ - Topic: topic.Topic{Namespace: "topic1", Name: "topic1"}, - Partition: topic.Partition{RangeStart: 512, RangeStop: 1024, RingSize: 1024}, - }, - }) - broker2Stats.TopicPartitionStats.Set("topic2:0", &TopicPartitionStats{ - TopicPartition: topic.TopicPartition{ - Topic: topic.Topic{Namespace: "topic2", Name: "topic2"}, - Partition: topic.Partition{RangeStart: 0, RangeStop: 1024, RingSize: 1024}, - }, - }) - brokers.Set("broker1", broker1Stats) - brokers.Set("broker2", broker2Stats) - - type args struct { - brokers cmap.ConcurrentMap[string, *BrokerStats] - } - tests := []struct { - name string - args args - want BalanceAction - }{ - { - name: "test", - args: args{ - brokers: brokers, - }, - want: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := BalanceTopicPartitionOnBrokers(tt.args.brokers); !reflect.DeepEqual(got, tt.want) { - t.Errorf("BalanceTopicPartitionOnBrokers() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/weed/mq/pub_balancer/broker_stats.go b/weed/mq/pub_balancer/broker_stats.go deleted file mode 100644 index da016b7fd..000000000 --- a/weed/mq/pub_balancer/broker_stats.go +++ /dev/null @@ -1,89 +0,0 @@ -package pub_balancer - -import ( - "fmt" - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type BrokerStats struct { - TopicPartitionCount int32 - PublisherCount int32 - SubscriberCount int32 - CpuUsagePercent int32 - TopicPartitionStats cmap.ConcurrentMap[string, *TopicPartitionStats] // key: topic_partition - Topics []topic.Topic -} -type TopicPartitionStats struct { - topic.TopicPartition - PublisherCount int32 - SubscriberCount int32 -} - -func NewBrokerStats() *BrokerStats { - return &BrokerStats{ - TopicPartitionStats: cmap.New[*TopicPartitionStats](), - } -} -func (bs *BrokerStats) String() string { - return fmt.Sprintf("BrokerStats{TopicPartitionCount:%d, Publishers:%d, Subscribers:%d CpuUsagePercent:%d, Stats:%+v}", - bs.TopicPartitionCount, bs.PublisherCount, bs.SubscriberCount, bs.CpuUsagePercent, bs.TopicPartitionStats.Items()) -} - -func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) { - bs.TopicPartitionCount = int32(len(stats.Stats)) - bs.CpuUsagePercent = stats.CpuUsagePercent - - var publisherCount, subscriberCount int32 - currentTopicPartitions := bs.TopicPartitionStats.Items() - for _, topicPartitionStats := range stats.Stats { - tps := &TopicPartitionStats{ - TopicPartition: topic.TopicPartition{ - Topic: topic.Topic{Namespace: topicPartitionStats.Topic.Namespace, Name: topicPartitionStats.Topic.Name}, - Partition: topic.Partition{ - RangeStart: topicPartitionStats.Partition.RangeStart, - RangeStop: topicPartitionStats.Partition.RangeStop, - RingSize: topicPartitionStats.Partition.RingSize, - UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs, - }, - }, - PublisherCount: topicPartitionStats.PublisherCount, - SubscriberCount: topicPartitionStats.SubscriberCount, - } - publisherCount += topicPartitionStats.PublisherCount - subscriberCount += topicPartitionStats.SubscriberCount - key := tps.TopicPartition.TopicPartitionId() - bs.TopicPartitionStats.Set(key, tps) - delete(currentTopicPartitions, key) - } - // remove the topic partitions that are not in the stats - for key := range currentTopicPartitions { - bs.TopicPartitionStats.Remove(key) - } - bs.PublisherCount = publisherCount - bs.SubscriberCount = subscriberCount -} - -func (bs *BrokerStats) RegisterAssignment(t *schema_pb.Topic, partition *schema_pb.Partition, isAdd bool) { - tps := &TopicPartitionStats{ - TopicPartition: topic.TopicPartition{ - Topic: topic.Topic{Namespace: t.Namespace, Name: t.Name}, - Partition: topic.Partition{ - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - RingSize: partition.RingSize, - UnixTimeNs: partition.UnixTimeNs, - }, - }, - PublisherCount: 0, - SubscriberCount: 0, - } - key := tps.TopicPartition.TopicPartitionId() - if isAdd { - bs.TopicPartitionStats.SetIfAbsent(key, tps) - } else { - bs.TopicPartitionStats.Remove(key) - } -} diff --git a/weed/mq/pub_balancer/lookup.go b/weed/mq/pub_balancer/lookup.go deleted file mode 100644 index 5f9c7f32f..000000000 --- a/weed/mq/pub_balancer/lookup.go +++ /dev/null @@ -1,36 +0,0 @@ -package pub_balancer - -import ( - "errors" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -var ( - ErrNoBroker = errors.New("no broker") -) - -func (balancer *PubBalancer) LookupTopicPartitions(topic *schema_pb.Topic) (assignments []*mq_pb.BrokerPartitionAssignment) { - // find existing topic partition assignments - for brokerStatsItem := range balancer.Brokers.IterBuffered() { - broker, brokerStats := brokerStatsItem.Key, brokerStatsItem.Val - for topicPartitionStatsItem := range brokerStats.TopicPartitionStats.IterBuffered() { - topicPartitionStat := topicPartitionStatsItem.Val - if topicPartitionStat.TopicPartition.Namespace == topic.Namespace && - topicPartitionStat.TopicPartition.Name == topic.Name { - assignment := &mq_pb.BrokerPartitionAssignment{ - Partition: &schema_pb.Partition{ - RingSize: MaxPartitionCount, - RangeStart: topicPartitionStat.RangeStart, - RangeStop: topicPartitionStat.RangeStop, - UnixTimeNs: topicPartitionStat.UnixTimeNs, - }, - } - // TODO fix follower setting - assignment.LeaderBroker = broker - assignments = append(assignments, assignment) - } - } - } - return -} diff --git a/weed/mq/pub_balancer/partition_list_broker.go b/weed/mq/pub_balancer/partition_list_broker.go deleted file mode 100644 index 34bdfd286..000000000 --- a/weed/mq/pub_balancer/partition_list_broker.go +++ /dev/null @@ -1,60 +0,0 @@ -package pub_balancer - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type PartitionSlotToBroker struct { - RangeStart int32 - RangeStop int32 - UnixTimeNs int64 - AssignedBroker string - FollowerBroker string -} - -type PartitionSlotToBrokerList struct { - PartitionSlots []*PartitionSlotToBroker - RingSize int32 -} - -func NewPartitionSlotToBrokerList(ringSize int32) *PartitionSlotToBrokerList { - return &PartitionSlotToBrokerList{ - RingSize: ringSize, - } -} - -func (ps *PartitionSlotToBrokerList) AddBroker(partition *schema_pb.Partition, broker string, follower string) { - for _, partitionSlot := range ps.PartitionSlots { - if partitionSlot.RangeStart == partition.RangeStart && partitionSlot.RangeStop == partition.RangeStop { - if partitionSlot.AssignedBroker != "" && partitionSlot.AssignedBroker != broker { - glog.V(0).Infof("partition %s broker change: %s => %s", partition, partitionSlot.AssignedBroker, broker) - partitionSlot.AssignedBroker = broker - } - if partitionSlot.FollowerBroker != "" && partitionSlot.FollowerBroker != follower { - glog.V(0).Infof("partition %s follower change: %s => %s", partition, partitionSlot.FollowerBroker, follower) - partitionSlot.FollowerBroker = follower - } - - return - } - } - ps.PartitionSlots = append(ps.PartitionSlots, &PartitionSlotToBroker{ - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - UnixTimeNs: partition.UnixTimeNs, - AssignedBroker: broker, - FollowerBroker: follower, - }) -} -func (ps *PartitionSlotToBrokerList) RemoveBroker(broker string) { - ps.ReplaceBroker(broker, "") -} - -func (ps *PartitionSlotToBrokerList) ReplaceBroker(oldBroker string, newBroker string) { - for _, partitionSlot := range ps.PartitionSlots { - if partitionSlot.AssignedBroker == oldBroker { - partitionSlot.AssignedBroker = newBroker - } - } -} diff --git a/weed/mq/pub_balancer/pub_balancer.go b/weed/mq/pub_balancer/pub_balancer.go deleted file mode 100644 index 9457b76fe..000000000 --- a/weed/mq/pub_balancer/pub_balancer.go +++ /dev/null @@ -1,102 +0,0 @@ -package pub_balancer - -import ( - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -const ( - MaxPartitionCount = 8 * 9 * 5 * 7 //2520 - LockBrokerBalancer = "broker_balancer" -) - -// PubBalancer collects stats from all brokers. -// -// When publishers wants to create topics, it picks brokers to assign the topic partitions. -// When consumers wants to subscribe topics, it tells which brokers are serving the topic partitions. -// -// When a partition needs to be split or merged, or a partition needs to be moved to another broker, -// the balancer will let the broker tell the consumer instance to stop processing the partition. -// The existing consumer instance will flush the internal state, and then stop processing. -// Then the balancer will tell the brokers to start sending new messages in the new/moved partition to the consumer instances. -// -// Failover to standby consumer instances: -// -// A consumer group can have min and max number of consumer instances. -// For consumer instances joined after the max number, they will be in standby mode. -// -// When a consumer instance is down, the broker will notice this and inform the balancer. -// The balancer will then tell the broker to send the partition to another standby consumer instance. -type PubBalancer struct { - Brokers cmap.ConcurrentMap[string, *BrokerStats] // key: broker address - // Collected from all brokers when they connect to the broker leader - TopicToBrokers cmap.ConcurrentMap[string, *PartitionSlotToBrokerList] // key: topic name - OnPartitionChange func(topic *schema_pb.Topic, assignments []*mq_pb.BrokerPartitionAssignment) -} - -func NewPubBalancer() *PubBalancer { - return &PubBalancer{ - Brokers: cmap.New[*BrokerStats](), - TopicToBrokers: cmap.New[*PartitionSlotToBrokerList](), - } -} - -func (balancer *PubBalancer) AddBroker(broker string) (brokerStats *BrokerStats) { - var found bool - brokerStats, found = balancer.Brokers.Get(broker) - if !found { - brokerStats = NewBrokerStats() - if !balancer.Brokers.SetIfAbsent(broker, brokerStats) { - brokerStats, _ = balancer.Brokers.Get(broker) - } - } - balancer.onPubAddBroker(broker, brokerStats) - return brokerStats -} - -func (balancer *PubBalancer) RemoveBroker(broker string, stats *BrokerStats) { - balancer.Brokers.Remove(broker) - - // update TopicToBrokers - for _, topic := range stats.Topics { - partitionSlotToBrokerList, found := balancer.TopicToBrokers.Get(topic.String()) - if !found { - continue - } - pickedBroker := pickBrokers(balancer.Brokers, 1) - if len(pickedBroker) == 0 { - partitionSlotToBrokerList.RemoveBroker(broker) - } else { - partitionSlotToBrokerList.ReplaceBroker(broker, pickedBroker[0]) - } - } - balancer.onPubRemoveBroker(broker, stats) -} - -func (balancer *PubBalancer) OnBrokerStatsUpdated(broker string, brokerStats *BrokerStats, receivedStats *mq_pb.BrokerStats) { - brokerStats.UpdateStats(receivedStats) - - // update TopicToBrokers - for _, topicPartitionStats := range receivedStats.Stats { - topicKey := topic.FromPbTopic(topicPartitionStats.Topic).String() - partition := topicPartitionStats.Partition - partitionSlotToBrokerList, found := balancer.TopicToBrokers.Get(topicKey) - if !found { - partitionSlotToBrokerList = NewPartitionSlotToBrokerList(MaxPartitionCount) - if !balancer.TopicToBrokers.SetIfAbsent(topicKey, partitionSlotToBrokerList) { - partitionSlotToBrokerList, _ = balancer.TopicToBrokers.Get(topicKey) - } - } - partitionSlotToBrokerList.AddBroker(partition, broker, topicPartitionStats.Follower) - } -} - -// OnPubAddBroker is called when a broker is added for a publisher coordinator -func (balancer *PubBalancer) onPubAddBroker(broker string, brokerStats *BrokerStats) { -} - -// OnPubRemoveBroker is called when a broker is removed for a publisher coordinator -func (balancer *PubBalancer) onPubRemoveBroker(broker string, brokerStats *BrokerStats) { -} diff --git a/weed/mq/pub_balancer/repair.go b/weed/mq/pub_balancer/repair.go deleted file mode 100644 index 9af81d27f..000000000 --- a/weed/mq/pub_balancer/repair.go +++ /dev/null @@ -1,123 +0,0 @@ -package pub_balancer - -import ( - "math/rand/v2" - "sort" - - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "modernc.org/mathutil" -) - -func (balancer *PubBalancer) RepairTopics() []BalanceAction { - action := BalanceTopicPartitionOnBrokers(balancer.Brokers) - return []BalanceAction{action} -} - -type TopicPartitionInfo struct { - Broker string -} - -// RepairMissingTopicPartitions check the stats of all brokers, -// and repair the missing topic partitions on the brokers. -func RepairMissingTopicPartitions(brokers cmap.ConcurrentMap[string, *BrokerStats]) (actions []BalanceAction) { - - // find all topic partitions - topicToTopicPartitions := make(map[topic.Topic]map[topic.Partition]*TopicPartitionInfo) - for brokerStatsItem := range brokers.IterBuffered() { - broker, brokerStats := brokerStatsItem.Key, brokerStatsItem.Val - for topicPartitionStatsItem := range brokerStats.TopicPartitionStats.IterBuffered() { - topicPartitionStat := topicPartitionStatsItem.Val - topicPartitionToInfo, found := topicToTopicPartitions[topicPartitionStat.Topic] - if !found { - topicPartitionToInfo = make(map[topic.Partition]*TopicPartitionInfo) - topicToTopicPartitions[topicPartitionStat.Topic] = topicPartitionToInfo - } - tpi, found := topicPartitionToInfo[topicPartitionStat.Partition] - if !found { - tpi = &TopicPartitionInfo{} - topicPartitionToInfo[topicPartitionStat.Partition] = tpi - } - tpi.Broker = broker - } - } - - // collect all brokers as candidates - candidates := make([]string, 0, brokers.Count()) - for brokerStatsItem := range brokers.IterBuffered() { - candidates = append(candidates, brokerStatsItem.Key) - } - - // find the missing topic partitions - for t, topicPartitionToInfo := range topicToTopicPartitions { - missingPartitions := EachTopicRepairMissingTopicPartitions(t, topicPartitionToInfo) - for _, partition := range missingPartitions { - actions = append(actions, BalanceActionCreate{ - TopicPartition: topic.TopicPartition{ - Topic: t, - Partition: partition, - }, - TargetBroker: candidates[rand.IntN(len(candidates))], - }) - } - } - - return actions -} - -func EachTopicRepairMissingTopicPartitions(t topic.Topic, info map[topic.Partition]*TopicPartitionInfo) (missingPartitions []topic.Partition) { - - // find the missing topic partitions - var partitions []topic.Partition - for partition := range info { - partitions = append(partitions, partition) - } - return findMissingPartitions(partitions, MaxPartitionCount) -} - -// findMissingPartitions find the missing partitions -func findMissingPartitions(partitions []topic.Partition, ringSize int32) (missingPartitions []topic.Partition) { - // sort the partitions by range start - sort.Slice(partitions, func(i, j int) bool { - return partitions[i].RangeStart < partitions[j].RangeStart - }) - - // calculate the average partition size - var covered int32 - for _, partition := range partitions { - covered += partition.RangeStop - partition.RangeStart - } - averagePartitionSize := covered / int32(len(partitions)) - - // find the missing partitions - var coveredWatermark int32 - i := 0 - for i < len(partitions) { - partition := partitions[i] - if partition.RangeStart > coveredWatermark { - upperBound := mathutil.MinInt32(coveredWatermark+averagePartitionSize, partition.RangeStart) - missingPartitions = append(missingPartitions, topic.Partition{ - RangeStart: coveredWatermark, - RangeStop: upperBound, - RingSize: ringSize, - }) - coveredWatermark = upperBound - if coveredWatermark == partition.RangeStop { - i++ - } - } else { - coveredWatermark = partition.RangeStop - i++ - } - } - for coveredWatermark < ringSize { - upperBound := mathutil.MinInt32(coveredWatermark+averagePartitionSize, ringSize) - missingPartitions = append(missingPartitions, topic.Partition{ - RangeStart: coveredWatermark, - RangeStop: upperBound, - RingSize: ringSize, - }) - coveredWatermark = upperBound - } - return missingPartitions -} diff --git a/weed/mq/pub_balancer/repair_test.go b/weed/mq/pub_balancer/repair_test.go deleted file mode 100644 index 08465c7e8..000000000 --- a/weed/mq/pub_balancer/repair_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package pub_balancer - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "reflect" - "testing" -) - -func Test_findMissingPartitions(t *testing.T) { - type args struct { - partitions []topic.Partition - } - tests := []struct { - name string - args args - wantMissingPartitions []topic.Partition - }{ - { - name: "one partition", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 1024}, - }, - }, - wantMissingPartitions: nil, - }, - { - name: "two partitions", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 512}, - {RingSize: 1024, RangeStart: 512, RangeStop: 1024}, - }, - }, - wantMissingPartitions: nil, - }, - { - name: "four partitions, missing last two", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 256}, - {RingSize: 1024, RangeStart: 256, RangeStop: 512}, - }, - }, - wantMissingPartitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 512, RangeStop: 768}, - {RingSize: 1024, RangeStart: 768, RangeStop: 1024}, - }, - }, - { - name: "four partitions, missing first two", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 512, RangeStop: 768}, - {RingSize: 1024, RangeStart: 768, RangeStop: 1024}, - }, - }, - wantMissingPartitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 256}, - {RingSize: 1024, RangeStart: 256, RangeStop: 512}, - }, - }, - { - name: "four partitions, missing middle two", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 256}, - {RingSize: 1024, RangeStart: 768, RangeStop: 1024}, - }, - }, - wantMissingPartitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 256, RangeStop: 512}, - {RingSize: 1024, RangeStart: 512, RangeStop: 768}, - }, - }, - { - name: "four partitions, missing three", - args: args{ - partitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 512, RangeStop: 768}, - }, - }, - wantMissingPartitions: []topic.Partition{ - {RingSize: 1024, RangeStart: 0, RangeStop: 256}, - {RingSize: 1024, RangeStart: 256, RangeStop: 512}, - {RingSize: 1024, RangeStart: 768, RangeStop: 1024}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotMissingPartitions := findMissingPartitions(tt.args.partitions, 1024); !reflect.DeepEqual(gotMissingPartitions, tt.wantMissingPartitions) { - t.Errorf("findMissingPartitions() = %v, want %v", gotMissingPartitions, tt.wantMissingPartitions) - } - }) - } -} diff --git a/weed/mq/schema/flat_schema_utils.go b/weed/mq/schema/flat_schema_utils.go deleted file mode 100644 index 93a241cec..000000000 --- a/weed/mq/schema/flat_schema_utils.go +++ /dev/null @@ -1,206 +0,0 @@ -package schema - -import ( - "fmt" - "sort" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// SplitFlatSchemaToKeyValue takes a flat RecordType and key column names, -// returns separate key and value RecordTypes -func SplitFlatSchemaToKeyValue(flatSchema *schema_pb.RecordType, keyColumns []string) (*schema_pb.RecordType, *schema_pb.RecordType, error) { - if flatSchema == nil { - return nil, nil, nil - } - - // Create maps for fast lookup - keyColumnSet := make(map[string]bool) - for _, col := range keyColumns { - keyColumnSet[col] = true - } - - var keyFields []*schema_pb.Field - var valueFields []*schema_pb.Field - - // Split fields based on key columns - for _, field := range flatSchema.Fields { - if keyColumnSet[field.Name] { - // Create key field with reindexed field index - keyField := &schema_pb.Field{ - Name: field.Name, - FieldIndex: int32(len(keyFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - keyFields = append(keyFields, keyField) - } else { - // Create value field with reindexed field index - valueField := &schema_pb.Field{ - Name: field.Name, - FieldIndex: int32(len(valueFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - valueFields = append(valueFields, valueField) - } - } - - // Validate that all key columns were found - if len(keyFields) != len(keyColumns) { - missingCols := []string{} - for _, col := range keyColumns { - found := false - for _, field := range keyFields { - if field.Name == col { - found = true - break - } - } - if !found { - missingCols = append(missingCols, col) - } - } - if len(missingCols) > 0 { - return nil, nil, fmt.Errorf("key columns not found in schema: %v", missingCols) - } - } - - var keyRecordType *schema_pb.RecordType - if len(keyFields) > 0 { - keyRecordType = &schema_pb.RecordType{Fields: keyFields} - } - - var valueRecordType *schema_pb.RecordType - if len(valueFields) > 0 { - valueRecordType = &schema_pb.RecordType{Fields: valueFields} - } - - return keyRecordType, valueRecordType, nil -} - -// CombineFlatSchemaFromKeyValue creates a flat RecordType by combining key and value schemas -// Key fields are placed first, then value fields -func CombineFlatSchemaFromKeyValue(keySchema *schema_pb.RecordType, valueSchema *schema_pb.RecordType) (*schema_pb.RecordType, []string) { - var combinedFields []*schema_pb.Field - var keyColumns []string - - // Add key fields first - if keySchema != nil { - for _, field := range keySchema.Fields { - combinedField := &schema_pb.Field{ - Name: field.Name, - FieldIndex: int32(len(combinedFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - combinedFields = append(combinedFields, combinedField) - keyColumns = append(keyColumns, field.Name) - } - } - - // Add value fields - if valueSchema != nil { - for _, field := range valueSchema.Fields { - // Check for name conflicts - fieldName := field.Name - for _, keyCol := range keyColumns { - if fieldName == keyCol { - // This shouldn't happen in well-formed schemas, but handle gracefully - fieldName = "value_" + fieldName - break - } - } - - combinedField := &schema_pb.Field{ - Name: fieldName, - FieldIndex: int32(len(combinedFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - combinedFields = append(combinedFields, combinedField) - } - } - - if len(combinedFields) == 0 { - return nil, keyColumns - } - - return &schema_pb.RecordType{Fields: combinedFields}, keyColumns -} - -// ExtractKeyColumnsFromCombinedSchema tries to infer key columns from a combined schema -// that was created using CreateCombinedRecordType (with key_ prefixes) -func ExtractKeyColumnsFromCombinedSchema(combinedSchema *schema_pb.RecordType) (flatSchema *schema_pb.RecordType, keyColumns []string) { - if combinedSchema == nil { - return nil, nil - } - - var flatFields []*schema_pb.Field - var keyColumns_ []string - - for _, field := range combinedSchema.Fields { - if strings.HasPrefix(field.Name, "key_") { - // This is a key field - remove the prefix - originalName := strings.TrimPrefix(field.Name, "key_") - flatField := &schema_pb.Field{ - Name: originalName, - FieldIndex: int32(len(flatFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - flatFields = append(flatFields, flatField) - keyColumns_ = append(keyColumns_, originalName) - } else { - // This is a value field - flatField := &schema_pb.Field{ - Name: field.Name, - FieldIndex: int32(len(flatFields)), - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - flatFields = append(flatFields, flatField) - } - } - - // Sort key columns to ensure deterministic order - sort.Strings(keyColumns_) - - if len(flatFields) == 0 { - return nil, keyColumns_ - } - - return &schema_pb.RecordType{Fields: flatFields}, keyColumns_ -} - -// ValidateKeyColumns checks that all key columns exist in the schema -func ValidateKeyColumns(schema *schema_pb.RecordType, keyColumns []string) error { - if schema == nil || len(keyColumns) == 0 { - return nil - } - - fieldNames := make(map[string]bool) - for _, field := range schema.Fields { - fieldNames[field.Name] = true - } - - var missingColumns []string - for _, keyCol := range keyColumns { - if !fieldNames[keyCol] { - missingColumns = append(missingColumns, keyCol) - } - } - - if len(missingColumns) > 0 { - return fmt.Errorf("key columns not found in schema: %v", missingColumns) - } - - return nil -} diff --git a/weed/mq/schema/flat_schema_utils_test.go b/weed/mq/schema/flat_schema_utils_test.go deleted file mode 100644 index 2bce9014c..000000000 --- a/weed/mq/schema/flat_schema_utils_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package schema - -import ( - "reflect" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestSplitFlatSchemaToKeyValue(t *testing.T) { - // Create a test flat schema - flatSchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "user_id", - FieldIndex: 0, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - { - Name: "session_id", - FieldIndex: 1, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "event_type", - FieldIndex: 2, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "timestamp", - FieldIndex: 3, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - }, - } - - keyColumns := []string{"user_id", "session_id"} - - keySchema, valueSchema, err := SplitFlatSchemaToKeyValue(flatSchema, keyColumns) - if err != nil { - t.Fatalf("SplitFlatSchemaToKeyValue failed: %v", err) - } - - // Verify key schema - if keySchema == nil { - t.Fatal("Expected key schema, got nil") - } - if len(keySchema.Fields) != 2 { - t.Errorf("Expected 2 key fields, got %d", len(keySchema.Fields)) - } - if keySchema.Fields[0].Name != "user_id" || keySchema.Fields[1].Name != "session_id" { - t.Errorf("Key field names incorrect: %v", []string{keySchema.Fields[0].Name, keySchema.Fields[1].Name}) - } - - // Verify value schema - if valueSchema == nil { - t.Fatal("Expected value schema, got nil") - } - if len(valueSchema.Fields) != 2 { - t.Errorf("Expected 2 value fields, got %d", len(valueSchema.Fields)) - } - if valueSchema.Fields[0].Name != "event_type" || valueSchema.Fields[1].Name != "timestamp" { - t.Errorf("Value field names incorrect: %v", []string{valueSchema.Fields[0].Name, valueSchema.Fields[1].Name}) - } - - // Verify field indices are reindexed - for i, field := range keySchema.Fields { - if field.FieldIndex != int32(i) { - t.Errorf("Key field %s has incorrect index %d, expected %d", field.Name, field.FieldIndex, i) - } - } - for i, field := range valueSchema.Fields { - if field.FieldIndex != int32(i) { - t.Errorf("Value field %s has incorrect index %d, expected %d", field.Name, field.FieldIndex, i) - } - } -} - -func TestSplitFlatSchemaToKeyValueMissingColumns(t *testing.T) { - flatSchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "field1", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - }, - } - - keyColumns := []string{"field1", "missing_field"} - - _, _, err := SplitFlatSchemaToKeyValue(flatSchema, keyColumns) - if err == nil { - t.Error("Expected error for missing key column, got nil") - } - if !contains(err.Error(), "missing_field") { - t.Errorf("Error should mention missing_field: %v", err) - } -} - -func TestCombineFlatSchemaFromKeyValue(t *testing.T) { - keySchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "user_id", - FieldIndex: 0, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - { - Name: "session_id", - FieldIndex: 1, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - }, - } - - valueSchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "event_type", - FieldIndex: 0, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "timestamp", - FieldIndex: 1, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - }, - } - - flatSchema, keyColumns := CombineFlatSchemaFromKeyValue(keySchema, valueSchema) - - // Verify combined schema - if flatSchema == nil { - t.Fatal("Expected flat schema, got nil") - } - if len(flatSchema.Fields) != 4 { - t.Errorf("Expected 4 fields, got %d", len(flatSchema.Fields)) - } - - // Verify key columns - expectedKeyColumns := []string{"user_id", "session_id"} - if !reflect.DeepEqual(keyColumns, expectedKeyColumns) { - t.Errorf("Expected key columns %v, got %v", expectedKeyColumns, keyColumns) - } - - // Verify field order (key fields first) - expectedNames := []string{"user_id", "session_id", "event_type", "timestamp"} - actualNames := make([]string, len(flatSchema.Fields)) - for i, field := range flatSchema.Fields { - actualNames[i] = field.Name - } - if !reflect.DeepEqual(actualNames, expectedNames) { - t.Errorf("Expected field names %v, got %v", expectedNames, actualNames) - } - - // Verify field indices are sequential - for i, field := range flatSchema.Fields { - if field.FieldIndex != int32(i) { - t.Errorf("Field %s has incorrect index %d, expected %d", field.Name, field.FieldIndex, i) - } - } -} - -func TestExtractKeyColumnsFromCombinedSchema(t *testing.T) { - // Create a combined schema with key_ prefixes (as created by CreateCombinedRecordType) - combinedSchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "key_user_id", - FieldIndex: 0, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - { - Name: "key_session_id", - FieldIndex: 1, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "event_type", - FieldIndex: 2, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "timestamp", - FieldIndex: 3, - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, - }, - }, - } - - flatSchema, keyColumns := ExtractKeyColumnsFromCombinedSchema(combinedSchema) - - // Verify flat schema - if flatSchema == nil { - t.Fatal("Expected flat schema, got nil") - } - if len(flatSchema.Fields) != 4 { - t.Errorf("Expected 4 fields, got %d", len(flatSchema.Fields)) - } - - // Verify key columns (should be sorted) - expectedKeyColumns := []string{"session_id", "user_id"} - if !reflect.DeepEqual(keyColumns, expectedKeyColumns) { - t.Errorf("Expected key columns %v, got %v", expectedKeyColumns, keyColumns) - } - - // Verify field names (key_ prefixes removed) - expectedNames := []string{"user_id", "session_id", "event_type", "timestamp"} - actualNames := make([]string, len(flatSchema.Fields)) - for i, field := range flatSchema.Fields { - actualNames[i] = field.Name - } - if !reflect.DeepEqual(actualNames, expectedNames) { - t.Errorf("Expected field names %v, got %v", expectedNames, actualNames) - } -} - -func TestValidateKeyColumns(t *testing.T) { - schema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "field1", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "field2", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}}, - }, - } - - // Valid key columns - err := ValidateKeyColumns(schema, []string{"field1"}) - if err != nil { - t.Errorf("Expected no error for valid key columns, got: %v", err) - } - - // Invalid key columns - err = ValidateKeyColumns(schema, []string{"field1", "missing_field"}) - if err == nil { - t.Error("Expected error for invalid key columns, got nil") - } - - // Nil schema should not error - err = ValidateKeyColumns(nil, []string{"any_field"}) - if err != nil { - t.Errorf("Expected no error for nil schema, got: %v", err) - } - - // Empty key columns should not error - err = ValidateKeyColumns(schema, []string{}) - if err != nil { - t.Errorf("Expected no error for empty key columns, got: %v", err) - } -} - -// Helper function to check if string contains substring -func contains(str, substr string) bool { - return len(str) >= len(substr) && - (len(substr) == 0 || str[len(str)-len(substr):] == substr || - str[:len(substr)] == substr || - len(str) > len(substr) && (str[len(str)-len(substr)-1:len(str)-len(substr)] == " " || str[len(str)-len(substr)-1] == ' ') && str[len(str)-len(substr):] == substr || - findInString(str, substr)) -} - -func findInString(str, substr string) bool { - for i := 0; i <= len(str)-len(substr); i++ { - if str[i:i+len(substr)] == substr { - return true - } - } - return false -} diff --git a/weed/mq/schema/schema.go b/weed/mq/schema/schema.go deleted file mode 100644 index 04d0a7b5e..000000000 --- a/weed/mq/schema/schema.go +++ /dev/null @@ -1,58 +0,0 @@ -package schema - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type Schema struct { - Namespace string - Name string - RevisionId uint32 - RecordType *schema_pb.RecordType - fieldMap map[string]*schema_pb.Field -} - -func NewSchema(namespace string, name string, recordType *schema_pb.RecordType) *Schema { - fieldMap := make(map[string]*schema_pb.Field) - for _, field := range recordType.Fields { - fieldMap[field.Name] = field - } - return &Schema{ - Namespace: namespace, - Name: name, - RecordType: recordType, - fieldMap: fieldMap, - } -} - -func (s *Schema) GetField(name string) (*schema_pb.Field, bool) { - field, ok := s.fieldMap[name] - return field, ok -} - -func TypeToString(t *schema_pb.Type) string { - switch t.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch t.GetScalarType() { - case schema_pb.ScalarType_BOOL: - return "bool" - case schema_pb.ScalarType_INT32: - return "int32" - case schema_pb.ScalarType_INT64: - return "int64" - case schema_pb.ScalarType_FLOAT: - return "float" - case schema_pb.ScalarType_DOUBLE: - return "double" - case schema_pb.ScalarType_BYTES: - return "bytes" - case schema_pb.ScalarType_STRING: - return "string" - } - case *schema_pb.Type_ListType: - return "list" - case *schema_pb.Type_RecordType: - return "record" - } - return "unknown" -} diff --git a/weed/mq/schema/schema_builder.go b/weed/mq/schema/schema_builder.go deleted file mode 100644 index 13f8af185..000000000 --- a/weed/mq/schema/schema_builder.go +++ /dev/null @@ -1,67 +0,0 @@ -package schema - -import ( - "sort" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -var ( - // Basic scalar types - TypeBoolean = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BOOL}} - TypeInt32 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT32}} - TypeInt64 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT64}} - TypeFloat = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_FLOAT}} - TypeDouble = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DOUBLE}} - TypeBytes = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BYTES}} - TypeString = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_STRING}} - - // Parquet logical types - TypeTimestamp = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_TIMESTAMP}} - TypeDate = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DATE}} - TypeDecimal = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DECIMAL}} - TypeTime = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_TIME}} -) - -type RecordTypeBuilder struct { - recordType *schema_pb.RecordType -} - -// RecordTypeBegin creates a new RecordTypeBuilder, it should be followed by a series of WithField methods and RecordTypeEnd -func RecordTypeBegin() *RecordTypeBuilder { - return &RecordTypeBuilder{recordType: &schema_pb.RecordType{}} -} - -// RecordTypeEnd finishes the building of a RecordValue -func (rtb *RecordTypeBuilder) RecordTypeEnd() *schema_pb.RecordType { - // be consistent with parquet.node.go `func (g Group) Fields() []Field` - sort.Slice(rtb.recordType.Fields, func(i, j int) bool { - return rtb.recordType.Fields[i].Name < rtb.recordType.Fields[j].Name - }) - return rtb.recordType -} - -// NewRecordTypeBuilder creates a new RecordTypeBuilder from an existing RecordType, it should be followed by a series of WithField methods and RecordTypeEnd -func NewRecordTypeBuilder(recordType *schema_pb.RecordType) (rtb *RecordTypeBuilder) { - return &RecordTypeBuilder{recordType: recordType} -} - -func (rtb *RecordTypeBuilder) WithField(name string, scalarType *schema_pb.Type) *RecordTypeBuilder { - rtb.recordType.Fields = append(rtb.recordType.Fields, &schema_pb.Field{ - Name: name, - Type: scalarType, - }) - return rtb -} - -func (rtb *RecordTypeBuilder) WithRecordField(name string, recordType *schema_pb.RecordType) *RecordTypeBuilder { - rtb.recordType.Fields = append(rtb.recordType.Fields, &schema_pb.Field{ - Name: name, - Type: &schema_pb.Type{Kind: &schema_pb.Type_RecordType{RecordType: recordType}}, - }) - return rtb -} - -func ListOf(elementType *schema_pb.Type) *schema_pb.Type { - return &schema_pb.Type{Kind: &schema_pb.Type_ListType{ListType: &schema_pb.ListType{ElementType: elementType}}} -} diff --git a/weed/mq/schema/schema_test.go b/weed/mq/schema/schema_test.go deleted file mode 100644 index f7dc8ff55..000000000 --- a/weed/mq/schema/schema_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package schema - -import ( - "encoding/json" - "github.com/golang/protobuf/proto" - . "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestEnumScalarType(t *testing.T) { - tests := []struct { - name string - enum ScalarType - expected int32 - }{ - {"Boolean", ScalarType_BOOL, 0}, - {"Integer", ScalarType_INT32, 1}, - {"Long", ScalarType_INT64, 3}, - {"Float", ScalarType_FLOAT, 4}, - {"Double", ScalarType_DOUBLE, 5}, - {"Bytes", ScalarType_BYTES, 6}, - {"String", ScalarType_STRING, 7}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, int32(tt.enum)) - }) - } -} - -func TestField(t *testing.T) { - field := &Field{ - Name: "field_name", - Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, - FieldIndex: 1, - IsRepeated: false, - } - assert.NotNil(t, field) -} - -func TestRecordType(t *testing.T) { - subRecord := &RecordType{ - Fields: []*Field{ - { - Name: "field_1", - Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, - FieldIndex: 1, - IsRepeated: false, - }, - { - Name: "field_2", - Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_STRING}}, - FieldIndex: 2, - IsRepeated: false, - }, - }, - } - record := &RecordType{ - Fields: []*Field{ - { - Name: "field_key", - Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, - FieldIndex: 1, - IsRepeated: false, - }, - { - Name: "field_record", - Type: &Type{Kind: &Type_RecordType{RecordType: subRecord}}, - FieldIndex: 2, - IsRepeated: false, - }, - }, - } - - // serialize record to protobuf text marshalling - text := proto.MarshalTextString(record) - println(text) - - bytes, _ := json.Marshal(record) - println(string(bytes)) - - assert.NotNil(t, record) -} diff --git a/weed/mq/schema/struct_to_schema.go b/weed/mq/schema/struct_to_schema.go deleted file mode 100644 index 2f0f2180b..000000000 --- a/weed/mq/schema/struct_to_schema.go +++ /dev/null @@ -1,105 +0,0 @@ -package schema - -import ( - "reflect" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func StructToSchema(instance any) *schema_pb.RecordType { - myType := reflect.TypeOf(instance) - if myType.Kind() != reflect.Struct { - return nil - } - st := reflectTypeToSchemaType(myType) - return st.GetRecordType() -} - -// CreateCombinedRecordType creates a combined RecordType that includes fields from both key and value schemas -// Key fields are prefixed with "key_" to distinguish them from value fields -func CreateCombinedRecordType(keyRecordType *schema_pb.RecordType, valueRecordType *schema_pb.RecordType) *schema_pb.RecordType { - var combinedFields []*schema_pb.Field - - // Add key fields with "key_" prefix - if keyRecordType != nil { - for _, field := range keyRecordType.Fields { - keyField := &schema_pb.Field{ - Name: "key_" + field.Name, - FieldIndex: field.FieldIndex, // Will be reindexed later - Type: field.Type, - IsRepeated: field.IsRepeated, - IsRequired: field.IsRequired, - } - combinedFields = append(combinedFields, keyField) - } - } - - // Add value fields (no prefix) - if valueRecordType != nil { - for _, field := range valueRecordType.Fields { - combinedFields = append(combinedFields, field) - } - } - - // Reindex all fields to have sequential indices - for i, field := range combinedFields { - field.FieldIndex = int32(i) - } - - return &schema_pb.RecordType{ - Fields: combinedFields, - } -} - -func reflectTypeToSchemaType(t reflect.Type) *schema_pb.Type { - switch t.Kind() { - case reflect.Bool: - return TypeBoolean - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: - return TypeInt32 - case reflect.Int64: - return TypeInt64 - case reflect.Float32: - return TypeFloat - case reflect.Float64: - return TypeDouble - case reflect.String: - return TypeString - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - return TypeBytes - default: - if st := reflectTypeToSchemaType(t.Elem()); st != nil { - return &schema_pb.Type{ - Kind: &schema_pb.Type_ListType{ - ListType: &schema_pb.ListType{ - ElementType: st, - }, - }, - } - } - } - case reflect.Struct: - recordType := &schema_pb.RecordType{} - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - fieldType := field.Type - fieldName := field.Name - schemaField := reflectTypeToSchemaType(fieldType) - if schemaField == nil { - return nil - } - recordType.Fields = append(recordType.Fields, &schema_pb.Field{ - Name: fieldName, - Type: schemaField, - }) - } - return &schema_pb.Type{ - Kind: &schema_pb.Type_RecordType{ - RecordType: recordType, - }, - } - } - return nil -} diff --git a/weed/mq/schema/struct_to_schema_test.go b/weed/mq/schema/struct_to_schema_test.go deleted file mode 100644 index fae27ecef..000000000 --- a/weed/mq/schema/struct_to_schema_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package schema - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestStructToSchema(t *testing.T) { - type args struct { - instance any - } - tests := []struct { - name string - args args - want *schema_pb.RecordType - }{ - { - name: "scalar type", - args: args{ - instance: 1, - }, - want: nil, - }, - { - name: "simple struct type", - args: args{ - instance: struct { - Field1 int - Field2 string - }{}, - }, - want: RecordTypeBegin(). - WithField("Field1", TypeInt32). - WithField("Field2", TypeString). - RecordTypeEnd(), - }, - { - name: "simple list", - args: args{ - instance: struct { - Field1 []int - Field2 string - }{}, - }, - want: RecordTypeBegin(). - WithField("Field1", ListOf(TypeInt32)). - WithField("Field2", TypeString). - RecordTypeEnd(), - }, - { - name: "simple []byte", - args: args{ - instance: struct { - Field2 []byte - }{}, - }, - want: RecordTypeBegin(). - WithField("Field2", TypeBytes). - RecordTypeEnd(), - }, - { - name: "nested simpe structs", - args: args{ - instance: struct { - Field1 int - Field2 struct { - Field3 string - Field4 int - } - }{}, - }, - want: RecordTypeBegin(). - WithField("Field1", TypeInt32). - WithRecordField("Field2", - RecordTypeBegin(). - WithField("Field3", TypeString). - WithField("Field4", TypeInt32). - RecordTypeEnd(), - ). - RecordTypeEnd(), - }, - { - name: "nested struct type", - args: args{ - instance: struct { - Field1 int - Field2 struct { - Field3 string - Field4 []int - Field5 struct { - Field6 string - Field7 []byte - } - } - }{}, - }, - want: RecordTypeBegin(). - WithField("Field1", TypeInt32). - WithRecordField("Field2", RecordTypeBegin(). - WithField("Field3", TypeString). - WithField("Field4", ListOf(TypeInt32)). - WithRecordField("Field5", - RecordTypeBegin(). - WithField("Field6", TypeString). - WithField("Field7", TypeBytes). - RecordTypeEnd(), - ).RecordTypeEnd(), - ). - RecordTypeEnd(), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equalf(t, tt.want, StructToSchema(tt.args.instance), "StructToSchema(%v)", tt.args.instance) - }) - } -} diff --git a/weed/mq/schema/to_parquet_levels.go b/weed/mq/schema/to_parquet_levels.go deleted file mode 100644 index f9fc073bb..000000000 --- a/weed/mq/schema/to_parquet_levels.go +++ /dev/null @@ -1,58 +0,0 @@ -package schema - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type ParquetLevels struct { - startColumnIndex int - endColumnIndex int - definitionDepth int - levels map[string]*ParquetLevels -} - -func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) { - return toRecordTypeLevels(recordType, 0, 0) -} - -func toFieldTypeLevels(fieldType *schema_pb.Type, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - return toFieldTypeScalarLevels(fieldType.GetScalarType(), startColumnIndex, definitionDepth) - case *schema_pb.Type_RecordType: - return toRecordTypeLevels(fieldType.GetRecordType(), startColumnIndex, definitionDepth) - case *schema_pb.Type_ListType: - return toFieldTypeListLevels(fieldType.GetListType(), startColumnIndex, definitionDepth) - } - return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind) -} - -func toFieldTypeListLevels(listType *schema_pb.ListType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { - return toFieldTypeLevels(listType.ElementType, startColumnIndex, definitionDepth) -} - -func toFieldTypeScalarLevels(scalarType schema_pb.ScalarType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { - return &ParquetLevels{ - startColumnIndex: startColumnIndex, - endColumnIndex: startColumnIndex + 1, - definitionDepth: definitionDepth, - }, nil -} -func toRecordTypeLevels(recordType *schema_pb.RecordType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { - recordTypeLevels := &ParquetLevels{ - startColumnIndex: startColumnIndex, - definitionDepth: definitionDepth, - levels: make(map[string]*ParquetLevels), - } - for _, field := range recordType.Fields { - fieldTypeLevels, err := toFieldTypeLevels(field.Type, startColumnIndex, definitionDepth+1) - if err != nil { - return nil, err - } - recordTypeLevels.levels[field.Name] = fieldTypeLevels - startColumnIndex = fieldTypeLevels.endColumnIndex - } - recordTypeLevels.endColumnIndex = startColumnIndex - return recordTypeLevels, nil -} diff --git a/weed/mq/schema/to_parquet_levels_test.go b/weed/mq/schema/to_parquet_levels_test.go deleted file mode 100644 index 5200c0e02..000000000 --- a/weed/mq/schema/to_parquet_levels_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package schema - -import ( - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestToParquetLevels(t *testing.T) { - type args struct { - recordType *schema_pb.RecordType - } - tests := []struct { - name string - args args - want *ParquetLevels - }{ - { - name: "nested type", - args: args{ - RecordTypeBegin(). - WithField("ID", TypeInt64). - WithField("CreatedAt", TypeInt64). - WithRecordField("Person", - RecordTypeBegin(). - WithField("zName", TypeString). - WithField("emails", ListOf(TypeString)). - RecordTypeEnd()). - WithField("Company", TypeString). - WithRecordField("Address", - RecordTypeBegin(). - WithField("Street", TypeString). - WithField("City", TypeString). - RecordTypeEnd()). - RecordTypeEnd(), - }, - want: &ParquetLevels{ - startColumnIndex: 0, - endColumnIndex: 7, - definitionDepth: 0, - levels: map[string]*ParquetLevels{ - "Address": { - startColumnIndex: 0, - endColumnIndex: 2, - definitionDepth: 1, - levels: map[string]*ParquetLevels{ - "City": { - startColumnIndex: 0, - endColumnIndex: 1, - definitionDepth: 2, - }, - "Street": { - startColumnIndex: 1, - endColumnIndex: 2, - definitionDepth: 2, - }, - }, - }, - "Company": { - startColumnIndex: 2, - endColumnIndex: 3, - definitionDepth: 1, - }, - "CreatedAt": { - startColumnIndex: 3, - endColumnIndex: 4, - definitionDepth: 1, - }, - "ID": { - startColumnIndex: 4, - endColumnIndex: 5, - definitionDepth: 1, - }, - "Person": { - startColumnIndex: 5, - endColumnIndex: 7, - definitionDepth: 1, - levels: map[string]*ParquetLevels{ - "emails": { - startColumnIndex: 5, - endColumnIndex: 6, - definitionDepth: 2, - }, - "zName": { - startColumnIndex: 6, - endColumnIndex: 7, - definitionDepth: 2, - }, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ToParquetLevels(tt.args.recordType) - assert.Nil(t, err) - assert.Equalf(t, tt.want, got, "ToParquetLevels(%v)", tt.args.recordType) - }) - } -} diff --git a/weed/mq/schema/to_parquet_schema.go b/weed/mq/schema/to_parquet_schema.go deleted file mode 100644 index 71bbf81ed..000000000 --- a/weed/mq/schema/to_parquet_schema.go +++ /dev/null @@ -1,117 +0,0 @@ -package schema - -import ( - "fmt" - - parquet "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func ToParquetSchema(topicName string, recordType *schema_pb.RecordType) (*parquet.Schema, error) { - rootNode, err := toParquetFieldTypeRecord(recordType) - if err != nil { - return nil, fmt.Errorf("failed to convert record type to parquet schema: %w", err) - } - - // Fields are sorted by name, so the value should be sorted also - // the sorting is inside parquet.`func (g Group) Fields() []Field` - return parquet.NewSchema(topicName, rootNode), nil -} - -func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err error) { - // This is the old function - now defaults to Optional for backward compatibility - return toParquetFieldTypeWithRequirement(fieldType, false) -} - -func toParquetFieldTypeList(listType *schema_pb.ListType) (parquet.Node, error) { - elementType, err := toParquetFieldType(listType.ElementType) - if err != nil { - return nil, err - } - return parquet.Repeated(elementType), nil -} - -func toParquetFieldTypeScalar(scalarType schema_pb.ScalarType) (parquet.Node, error) { - switch scalarType { - case schema_pb.ScalarType_BOOL: - return parquet.Leaf(parquet.BooleanType), nil - case schema_pb.ScalarType_INT32: - return parquet.Leaf(parquet.Int32Type), nil - case schema_pb.ScalarType_INT64: - return parquet.Leaf(parquet.Int64Type), nil - case schema_pb.ScalarType_FLOAT: - return parquet.Leaf(parquet.FloatType), nil - case schema_pb.ScalarType_DOUBLE: - return parquet.Leaf(parquet.DoubleType), nil - case schema_pb.ScalarType_BYTES: - return parquet.Leaf(parquet.ByteArrayType), nil - case schema_pb.ScalarType_STRING: - return parquet.Leaf(parquet.ByteArrayType), nil - // Parquet logical types - map to their physical storage types - case schema_pb.ScalarType_TIMESTAMP: - // Stored as INT64 (microseconds since Unix epoch) - return parquet.Leaf(parquet.Int64Type), nil - case schema_pb.ScalarType_DATE: - // Stored as INT32 (days since Unix epoch) - return parquet.Leaf(parquet.Int32Type), nil - case schema_pb.ScalarType_DECIMAL: - // Use maximum precision/scale to accommodate any decimal value - // Per Parquet spec: precision โ‰ค9โ†’INT32, โ‰ค18โ†’INT64, >18โ†’FixedLenByteArray - // Using precision=38 (max for most systems), scale=18 for flexibility - // Individual values can have smaller precision/scale, but schema supports maximum - return parquet.Decimal(18, 38, parquet.FixedLenByteArrayType(16)), nil - case schema_pb.ScalarType_TIME: - // Stored as INT64 (microseconds since midnight) - return parquet.Leaf(parquet.Int64Type), nil - default: - return nil, fmt.Errorf("unknown scalar type: %v", scalarType) - } -} -func toParquetFieldTypeRecord(recordType *schema_pb.RecordType) (parquet.Node, error) { - recordNode := parquet.Group{} - for _, field := range recordType.Fields { - parquetFieldType, err := toParquetFieldTypeWithRequirement(field.Type, field.IsRequired) - if err != nil { - return nil, err - } - recordNode[field.Name] = parquetFieldType - } - return recordNode, nil -} - -// toParquetFieldTypeWithRequirement creates parquet field type respecting required/optional constraints -func toParquetFieldTypeWithRequirement(fieldType *schema_pb.Type, isRequired bool) (dataType parquet.Node, err error) { - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - dataType, err = toParquetFieldTypeScalar(fieldType.GetScalarType()) - if err != nil { - return nil, err - } - if isRequired { - // Required fields are NOT wrapped in Optional - return dataType, nil - } else { - // Optional fields are wrapped in Optional - return parquet.Optional(dataType), nil - } - case *schema_pb.Type_RecordType: - dataType, err = toParquetFieldTypeRecord(fieldType.GetRecordType()) - if err != nil { - return nil, err - } - if isRequired { - return dataType, nil - } else { - return parquet.Optional(dataType), nil - } - case *schema_pb.Type_ListType: - dataType, err = toParquetFieldTypeList(fieldType.GetListType()) - if err != nil { - return nil, err - } - // Lists are typically optional by nature - return dataType, nil - default: - return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind) - } -} diff --git a/weed/mq/schema/to_parquet_value.go b/weed/mq/schema/to_parquet_value.go deleted file mode 100644 index 5573c2a38..000000000 --- a/weed/mq/schema/to_parquet_value.go +++ /dev/null @@ -1,346 +0,0 @@ -package schema - -import ( - "fmt" - "strconv" - - parquet "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func rowBuilderVisit(rowBuilder *parquet.RowBuilder, fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *schema_pb.Value) (err error) { - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - // If value is missing, write NULL at the correct column to keep rows aligned - if fieldValue == nil || fieldValue.Kind == nil { - rowBuilder.Add(levels.startColumnIndex, parquet.NullValue()) - return nil - } - var parquetValue parquet.Value - parquetValue, err = toParquetValueForType(fieldType, fieldValue) - if err != nil { - return - } - - // Safety check: prevent nil byte arrays from reaching parquet library - if parquetValue.Kind() == parquet.ByteArray { - byteData := parquetValue.ByteArray() - if byteData == nil { - parquetValue = parquet.ByteArrayValue([]byte{}) - } - } - - rowBuilder.Add(levels.startColumnIndex, parquetValue) - case *schema_pb.Type_ListType: - // Advance to list position even if value is missing - rowBuilder.Next(levels.startColumnIndex) - if fieldValue == nil || fieldValue.GetListValue() == nil { - return nil - } - - elementType := fieldType.GetListType().ElementType - for _, value := range fieldValue.GetListValue().Values { - if err = rowBuilderVisit(rowBuilder, elementType, levels, value); err != nil { - return - } - } - } - return -} - -func AddRecordValue(rowBuilder *parquet.RowBuilder, recordType *schema_pb.RecordType, parquetLevels *ParquetLevels, recordValue *schema_pb.RecordValue) error { - visitor := func(fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *schema_pb.Value) (err error) { - return rowBuilderVisit(rowBuilder, fieldType, levels, fieldValue) - } - fieldType := &schema_pb.Type{Kind: &schema_pb.Type_RecordType{RecordType: recordType}} - fieldValue := &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: recordValue}} - return doVisitValue(fieldType, parquetLevels, fieldValue, visitor) -} - -// typeValueVisitor is a function that is called for each value in a schema_pb.Value -// Find the column index. -// intended to be used in RowBuilder.Add(columnIndex, value) -type typeValueVisitor func(fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *schema_pb.Value) (err error) - -// endIndex is exclusive -// same logic as RowBuilder.configure in row_builder.go -func doVisitValue(fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *schema_pb.Value, visitor typeValueVisitor) (err error) { - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - return visitor(fieldType, levels, fieldValue) - case *schema_pb.Type_ListType: - return visitor(fieldType, levels, fieldValue) - case *schema_pb.Type_RecordType: - for _, field := range fieldType.GetRecordType().Fields { - var fv *schema_pb.Value - if fieldValue != nil && fieldValue.GetRecordValue() != nil { - var found bool - fv, found = fieldValue.GetRecordValue().Fields[field.Name] - if !found { - // pass nil so visitor can emit NULL for alignment - fv = nil - } - } - fieldLevels := levels.levels[field.Name] - err = doVisitValue(field.Type, fieldLevels, fv, visitor) - if err != nil { - return - } - } - return - } - return -} - -func toParquetValue(value *schema_pb.Value) (parquet.Value, error) { - // Safety check for nil value - if value == nil || value.Kind == nil { - return parquet.NullValue(), fmt.Errorf("nil value or nil value kind") - } - - switch value.Kind.(type) { - case *schema_pb.Value_BoolValue: - return parquet.BooleanValue(value.GetBoolValue()), nil - case *schema_pb.Value_Int32Value: - return parquet.Int32Value(value.GetInt32Value()), nil - case *schema_pb.Value_Int64Value: - return parquet.Int64Value(value.GetInt64Value()), nil - case *schema_pb.Value_FloatValue: - return parquet.FloatValue(value.GetFloatValue()), nil - case *schema_pb.Value_DoubleValue: - return parquet.DoubleValue(value.GetDoubleValue()), nil - case *schema_pb.Value_BytesValue: - // Handle nil byte slices to prevent growslice panic in parquet-go - byteData := value.GetBytesValue() - if byteData == nil { - byteData = []byte{} // Use empty slice instead of nil - } - return parquet.ByteArrayValue(byteData), nil - case *schema_pb.Value_StringValue: - // Convert string to bytes, ensuring we never pass nil - stringData := value.GetStringValue() - return parquet.ByteArrayValue([]byte(stringData)), nil - // Parquet logical types with safe conversion (preventing commit 7a4aeec60 panic) - case *schema_pb.Value_TimestampValue: - timestampValue := value.GetTimestampValue() - if timestampValue == nil { - return parquet.NullValue(), nil - } - return parquet.Int64Value(timestampValue.TimestampMicros), nil - case *schema_pb.Value_DateValue: - dateValue := value.GetDateValue() - if dateValue == nil { - return parquet.NullValue(), nil - } - return parquet.Int32Value(dateValue.DaysSinceEpoch), nil - case *schema_pb.Value_DecimalValue: - decimalValue := value.GetDecimalValue() - if decimalValue == nil || decimalValue.Value == nil || len(decimalValue.Value) == 0 { - return parquet.NullValue(), nil - } - - // Validate input data - reject unreasonably large values instead of corrupting data - if len(decimalValue.Value) > 64 { - // Reject extremely large decimal values (>512 bits) as likely corrupted data - // Better to fail fast than silently corrupt financial/scientific data - return parquet.NullValue(), fmt.Errorf("decimal value too large: %d bytes (max 64)", len(decimalValue.Value)) - } - - // Convert to FixedLenByteArray to match schema (DECIMAL with FixedLenByteArray physical type) - // This accommodates any precision up to 38 digits (16 bytes = 128 bits) - - // Pad or truncate to exactly 16 bytes for FixedLenByteArray - fixedBytes := make([]byte, 16) - if len(decimalValue.Value) <= 16 { - // Right-align the value (big-endian) - copy(fixedBytes[16-len(decimalValue.Value):], decimalValue.Value) - } else { - // Truncate if too large, taking the least significant bytes - copy(fixedBytes, decimalValue.Value[len(decimalValue.Value)-16:]) - } - - return parquet.FixedLenByteArrayValue(fixedBytes), nil - case *schema_pb.Value_TimeValue: - timeValue := value.GetTimeValue() - if timeValue == nil { - return parquet.NullValue(), nil - } - return parquet.Int64Value(timeValue.TimeMicros), nil - default: - return parquet.NullValue(), fmt.Errorf("unknown value type: %T", value.Kind) - } -} - -// toParquetValueForType coerces a schema_pb.Value into a parquet.Value that matches the declared field type. -func toParquetValueForType(fieldType *schema_pb.Type, value *schema_pb.Value) (parquet.Value, error) { - switch t := fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch t.ScalarType { - case schema_pb.ScalarType_BOOL: - switch v := value.Kind.(type) { - case *schema_pb.Value_BoolValue: - return parquet.BooleanValue(v.BoolValue), nil - case *schema_pb.Value_StringValue: - if b, err := strconv.ParseBool(v.StringValue); err == nil { - return parquet.BooleanValue(b), nil - } - return parquet.BooleanValue(false), nil - default: - return parquet.BooleanValue(false), nil - } - - case schema_pb.ScalarType_INT32: - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return parquet.Int32Value(v.Int32Value), nil - case *schema_pb.Value_Int64Value: - return parquet.Int32Value(int32(v.Int64Value)), nil - case *schema_pb.Value_DoubleValue: - return parquet.Int32Value(int32(v.DoubleValue)), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 32); err == nil { - return parquet.Int32Value(int32(i)), nil - } - return parquet.Int32Value(0), nil - default: - return parquet.Int32Value(0), nil - } - - case schema_pb.ScalarType_INT64: - switch v := value.Kind.(type) { - case *schema_pb.Value_Int64Value: - return parquet.Int64Value(v.Int64Value), nil - case *schema_pb.Value_Int32Value: - return parquet.Int64Value(int64(v.Int32Value)), nil - case *schema_pb.Value_DoubleValue: - return parquet.Int64Value(int64(v.DoubleValue)), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 64); err == nil { - return parquet.Int64Value(i), nil - } - return parquet.Int64Value(0), nil - default: - return parquet.Int64Value(0), nil - } - - case schema_pb.ScalarType_FLOAT: - switch v := value.Kind.(type) { - case *schema_pb.Value_FloatValue: - return parquet.FloatValue(v.FloatValue), nil - case *schema_pb.Value_DoubleValue: - return parquet.FloatValue(float32(v.DoubleValue)), nil - case *schema_pb.Value_Int64Value: - return parquet.FloatValue(float32(v.Int64Value)), nil - case *schema_pb.Value_StringValue: - if f, err := strconv.ParseFloat(v.StringValue, 32); err == nil { - return parquet.FloatValue(float32(f)), nil - } - return parquet.FloatValue(0), nil - default: - return parquet.FloatValue(0), nil - } - - case schema_pb.ScalarType_DOUBLE: - switch v := value.Kind.(type) { - case *schema_pb.Value_DoubleValue: - return parquet.DoubleValue(v.DoubleValue), nil - case *schema_pb.Value_Int64Value: - return parquet.DoubleValue(float64(v.Int64Value)), nil - case *schema_pb.Value_Int32Value: - return parquet.DoubleValue(float64(v.Int32Value)), nil - case *schema_pb.Value_StringValue: - if f, err := strconv.ParseFloat(v.StringValue, 64); err == nil { - return parquet.DoubleValue(f), nil - } - return parquet.DoubleValue(0), nil - default: - return parquet.DoubleValue(0), nil - } - - case schema_pb.ScalarType_BYTES: - switch v := value.Kind.(type) { - case *schema_pb.Value_BytesValue: - b := v.BytesValue - if b == nil { - b = []byte{} - } - return parquet.ByteArrayValue(b), nil - case *schema_pb.Value_StringValue: - return parquet.ByteArrayValue([]byte(v.StringValue)), nil - case *schema_pb.Value_Int64Value: - return parquet.ByteArrayValue([]byte(strconv.FormatInt(v.Int64Value, 10))), nil - case *schema_pb.Value_Int32Value: - return parquet.ByteArrayValue([]byte(strconv.FormatInt(int64(v.Int32Value), 10))), nil - case *schema_pb.Value_DoubleValue: - return parquet.ByteArrayValue([]byte(strconv.FormatFloat(v.DoubleValue, 'f', -1, 64))), nil - case *schema_pb.Value_FloatValue: - return parquet.ByteArrayValue([]byte(strconv.FormatFloat(float64(v.FloatValue), 'f', -1, 32))), nil - case *schema_pb.Value_BoolValue: - if v.BoolValue { - return parquet.ByteArrayValue([]byte("true")), nil - } - return parquet.ByteArrayValue([]byte("false")), nil - default: - return parquet.ByteArrayValue([]byte{}), nil - } - - case schema_pb.ScalarType_STRING: - // Same as bytes but semantically string - switch v := value.Kind.(type) { - case *schema_pb.Value_StringValue: - return parquet.ByteArrayValue([]byte(v.StringValue)), nil - default: - // Fallback through bytes coercion - b, _ := toParquetValueForType(&schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BYTES}}, value) - return b, nil - } - - case schema_pb.ScalarType_TIMESTAMP: - switch v := value.Kind.(type) { - case *schema_pb.Value_Int64Value: - return parquet.Int64Value(v.Int64Value), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 64); err == nil { - return parquet.Int64Value(i), nil - } - return parquet.Int64Value(0), nil - default: - return parquet.Int64Value(0), nil - } - - case schema_pb.ScalarType_DATE: - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return parquet.Int32Value(v.Int32Value), nil - case *schema_pb.Value_Int64Value: - return parquet.Int32Value(int32(v.Int64Value)), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 32); err == nil { - return parquet.Int32Value(int32(i)), nil - } - return parquet.Int32Value(0), nil - default: - return parquet.Int32Value(0), nil - } - - case schema_pb.ScalarType_DECIMAL: - // Reuse existing conversion path (FixedLenByteArray 16) - return toParquetValue(value) - - case schema_pb.ScalarType_TIME: - switch v := value.Kind.(type) { - case *schema_pb.Value_Int64Value: - return parquet.Int64Value(v.Int64Value), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 64); err == nil { - return parquet.Int64Value(i), nil - } - return parquet.Int64Value(0), nil - default: - return parquet.Int64Value(0), nil - } - } - } - // Fallback to generic conversion - return toParquetValue(value) -} diff --git a/weed/mq/schema/to_parquet_value_test.go b/weed/mq/schema/to_parquet_value_test.go deleted file mode 100644 index 71bd94ba5..000000000 --- a/weed/mq/schema/to_parquet_value_test.go +++ /dev/null @@ -1,666 +0,0 @@ -package schema - -import ( - "math/big" - "testing" - "time" - - "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestToParquetValue_BasicTypes(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "BoolValue true", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_BoolValue{BoolValue: true}, - }, - expected: parquet.BooleanValue(true), - }, - { - name: "Int32Value", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_Int32Value{Int32Value: 42}, - }, - expected: parquet.Int32Value(42), - }, - { - name: "Int64Value", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: 12345678901234}, - }, - expected: parquet.Int64Value(12345678901234), - }, - { - name: "FloatValue", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_FloatValue{FloatValue: 3.14159}, - }, - expected: parquet.FloatValue(3.14159), - }, - { - name: "DoubleValue", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: 2.718281828}, - }, - expected: parquet.DoubleValue(2.718281828), - }, - { - name: "BytesValue", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: []byte("hello world")}, - }, - expected: parquet.ByteArrayValue([]byte("hello world")), - }, - { - name: "BytesValue empty", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: []byte{}}, - }, - expected: parquet.ByteArrayValue([]byte{}), - }, - { - name: "StringValue", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: "test string"}, - }, - expected: parquet.ByteArrayValue([]byte("test string")), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestToParquetValue_TimestampValue(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "Valid TimestampValue UTC", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: 1704067200000000, // 2024-01-01 00:00:00 UTC in microseconds - IsUtc: true, - }, - }, - }, - expected: parquet.Int64Value(1704067200000000), - }, - { - name: "Valid TimestampValue local", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: 1704067200000000, - IsUtc: false, - }, - }, - }, - expected: parquet.Int64Value(1704067200000000), - }, - { - name: "TimestampValue zero", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: 0, - IsUtc: true, - }, - }, - }, - expected: parquet.Int64Value(0), - }, - { - name: "TimestampValue negative (before epoch)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: -1000000, // 1 second before epoch - IsUtc: true, - }, - }, - }, - expected: parquet.Int64Value(-1000000), - }, - { - name: "TimestampValue nil pointer", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: nil, - }, - }, - expected: parquet.NullValue(), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestToParquetValue_DateValue(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "Valid DateValue (2024-01-01)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DateValue{ - DateValue: &schema_pb.DateValue{ - DaysSinceEpoch: 19723, // 2024-01-01 = 19723 days since epoch - }, - }, - }, - expected: parquet.Int32Value(19723), - }, - { - name: "DateValue epoch (1970-01-01)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DateValue{ - DateValue: &schema_pb.DateValue{ - DaysSinceEpoch: 0, - }, - }, - }, - expected: parquet.Int32Value(0), - }, - { - name: "DateValue before epoch", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DateValue{ - DateValue: &schema_pb.DateValue{ - DaysSinceEpoch: -365, // 1969-01-01 - }, - }, - }, - expected: parquet.Int32Value(-365), - }, - { - name: "DateValue nil pointer", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DateValue{ - DateValue: nil, - }, - }, - expected: parquet.NullValue(), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestToParquetValue_DecimalValue(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "Small Decimal (precision <= 9) - positive", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(12345)), // 123.45 with scale 2 - Precision: 5, - Scale: 2, - }, - }, - }, - expected: createFixedLenByteArray(encodeBigIntToBytes(big.NewInt(12345))), // FixedLenByteArray conversion - }, - { - name: "Small Decimal (precision <= 9) - negative", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(-12345)), - Precision: 5, - Scale: 2, - }, - }, - }, - expected: createFixedLenByteArray(encodeBigIntToBytes(big.NewInt(-12345))), // FixedLenByteArray conversion - }, - { - name: "Medium Decimal (9 < precision <= 18)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(123456789012345)), - Precision: 15, - Scale: 2, - }, - }, - }, - expected: createFixedLenByteArray(encodeBigIntToBytes(big.NewInt(123456789012345))), // FixedLenByteArray conversion - }, - { - name: "Large Decimal (precision > 18)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}, // Large number as bytes - Precision: 25, - Scale: 5, - }, - }, - }, - expected: createFixedLenByteArray([]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}), // FixedLenByteArray conversion - }, - { - name: "Decimal with zero precision", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(0)), - Precision: 0, - Scale: 0, - }, - }, - }, - expected: createFixedLenByteArray(encodeBigIntToBytes(big.NewInt(0))), // Zero as FixedLenByteArray - }, - { - name: "Decimal nil pointer", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: nil, - }, - }, - expected: parquet.NullValue(), - }, - { - name: "Decimal with nil Value bytes", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: nil, // This was the original panic cause - Precision: 5, - Scale: 2, - }, - }, - }, - expected: parquet.NullValue(), - }, - { - name: "Decimal with empty Value bytes", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: []byte{}, // Empty slice - Precision: 5, - Scale: 2, - }, - }, - }, - expected: parquet.NullValue(), // Returns null for empty bytes - }, - { - name: "Decimal out of int32 range (stored as binary)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(999999999999)), // Too large for int32 - Precision: 5, // But precision says int32 - Scale: 0, - }, - }, - }, - expected: createFixedLenByteArray(encodeBigIntToBytes(big.NewInt(999999999999))), // FixedLenByteArray - }, - { - name: "Decimal out of int64 range (stored as binary)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: func() []byte { - // Create a number larger than int64 max - bigNum := new(big.Int) - bigNum.SetString("99999999999999999999999999999", 10) - return encodeBigIntToBytes(bigNum) - }(), - Precision: 15, // Says int64 but value is too large - Scale: 0, - }, - }, - }, - expected: createFixedLenByteArray(func() []byte { - bigNum := new(big.Int) - bigNum.SetString("99999999999999999999999999999", 10) - return encodeBigIntToBytes(bigNum) - }()), // Large number as FixedLenByteArray (truncated to 16 bytes) - }, - { - name: "Decimal extremely large value (should be rejected)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: make([]byte, 100), // 100 bytes > 64 byte limit - Precision: 100, - Scale: 0, - }, - }, - }, - expected: parquet.NullValue(), - wantErr: true, // Should return error instead of corrupting data - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestToParquetValue_TimeValue(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "Valid TimeValue (12:34:56.789)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimeValue{ - TimeValue: &schema_pb.TimeValue{ - TimeMicros: 45296789000, // 12:34:56.789 in microseconds since midnight - }, - }, - }, - expected: parquet.Int64Value(45296789000), - }, - { - name: "TimeValue midnight", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimeValue{ - TimeValue: &schema_pb.TimeValue{ - TimeMicros: 0, - }, - }, - }, - expected: parquet.Int64Value(0), - }, - { - name: "TimeValue end of day (23:59:59.999999)", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimeValue{ - TimeValue: &schema_pb.TimeValue{ - TimeMicros: 86399999999, // 23:59:59.999999 - }, - }, - }, - expected: parquet.Int64Value(86399999999), - }, - { - name: "TimeValue nil pointer", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_TimeValue{ - TimeValue: nil, - }, - }, - expected: parquet.NullValue(), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -func TestToParquetValue_EdgeCases(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected parquet.Value - wantErr bool - }{ - { - name: "Nil value", - value: &schema_pb.Value{ - Kind: nil, - }, - wantErr: true, - }, - { - name: "Completely nil value", - value: nil, - wantErr: true, - }, - { - name: "BytesValue with nil slice", - value: &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: nil}, - }, - expected: parquet.ByteArrayValue([]byte{}), // Should convert nil to empty slice - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := toParquetValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("toParquetValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !tt.wantErr && !parquetValuesEqual(result, tt.expected) { - t.Errorf("toParquetValue() = %v, want %v", result, tt.expected) - } - }) - } -} - -// Helper function to encode a big.Int to bytes using two's complement representation -func encodeBigIntToBytes(n *big.Int) []byte { - if n.Sign() == 0 { - return []byte{0} - } - - // For positive numbers, just use Bytes() - if n.Sign() > 0 { - return n.Bytes() - } - - // For negative numbers, we need two's complement representation - bitLen := n.BitLen() - if bitLen%8 != 0 { - bitLen += 8 - (bitLen % 8) // Round up to byte boundary - } - byteLen := bitLen / 8 - if byteLen == 0 { - byteLen = 1 - } - - // Calculate 2^(byteLen*8) - modulus := new(big.Int).Lsh(big.NewInt(1), uint(byteLen*8)) - - // Convert negative to positive representation: n + 2^(byteLen*8) - positive := new(big.Int).Add(n, modulus) - - bytes := positive.Bytes() - - // Pad with leading zeros if needed - if len(bytes) < byteLen { - padded := make([]byte, byteLen) - copy(padded[byteLen-len(bytes):], bytes) - return padded - } - - return bytes -} - -// Helper function to create a FixedLenByteArray(16) matching our conversion logic -func createFixedLenByteArray(inputBytes []byte) parquet.Value { - fixedBytes := make([]byte, 16) - if len(inputBytes) <= 16 { - // Right-align the value (big-endian) - same as our conversion logic - copy(fixedBytes[16-len(inputBytes):], inputBytes) - } else { - // Truncate if too large, taking the least significant bytes - copy(fixedBytes, inputBytes[len(inputBytes)-16:]) - } - return parquet.FixedLenByteArrayValue(fixedBytes) -} - -// Helper function to compare parquet values -func parquetValuesEqual(a, b parquet.Value) bool { - // Handle both being null - if a.IsNull() && b.IsNull() { - return true - } - if a.IsNull() != b.IsNull() { - return false - } - - // Compare kind first - if a.Kind() != b.Kind() { - return false - } - - // Compare based on type - switch a.Kind() { - case parquet.Boolean: - return a.Boolean() == b.Boolean() - case parquet.Int32: - return a.Int32() == b.Int32() - case parquet.Int64: - return a.Int64() == b.Int64() - case parquet.Float: - return a.Float() == b.Float() - case parquet.Double: - return a.Double() == b.Double() - case parquet.ByteArray: - aBytes := a.ByteArray() - bBytes := b.ByteArray() - if len(aBytes) != len(bBytes) { - return false - } - for i, v := range aBytes { - if v != bBytes[i] { - return false - } - } - return true - case parquet.FixedLenByteArray: - aBytes := a.ByteArray() // FixedLenByteArray also uses ByteArray() method - bBytes := b.ByteArray() - if len(aBytes) != len(bBytes) { - return false - } - for i, v := range aBytes { - if v != bBytes[i] { - return false - } - } - return true - default: - return false - } -} - -// Benchmark tests -func BenchmarkToParquetValue_BasicTypes(b *testing.B) { - value := &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: 12345678901234}, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = toParquetValue(value) - } -} - -func BenchmarkToParquetValue_TimestampValue(b *testing.B) { - value := &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: time.Now().UnixMicro(), - IsUtc: true, - }, - }, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = toParquetValue(value) - } -} - -func BenchmarkToParquetValue_DecimalValue(b *testing.B) { - value := &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: encodeBigIntToBytes(big.NewInt(123456789012345)), - Precision: 15, - Scale: 2, - }, - }, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = toParquetValue(value) - } -} diff --git a/weed/mq/schema/to_schema_value.go b/weed/mq/schema/to_schema_value.go deleted file mode 100644 index 50e86d233..000000000 --- a/weed/mq/schema/to_schema_value.go +++ /dev/null @@ -1,146 +0,0 @@ -package schema - -import ( - "bytes" - "fmt" - - "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// ToRecordValue converts a parquet.Row to a schema_pb.RecordValue -// This does not work or did not test with nested structures. -// Using this may fail to convert the parquet.Row to schema_pb.RecordValue -func ToRecordValue(recordType *schema_pb.RecordType, parquetLevels *ParquetLevels, row parquet.Row) (*schema_pb.RecordValue, error) { - values := []parquet.Value(row) - recordValue, _, err := toRecordValue(recordType, parquetLevels, values, 0) - if err != nil { - return nil, err - } - return recordValue.GetRecordValue(), nil -} - -func ToValue(t *schema_pb.Type, levels *ParquetLevels, values []parquet.Value, valueIndex int) (value *schema_pb.Value, endValueIndex int, err error) { - switch t.Kind.(type) { - case *schema_pb.Type_ScalarType: - return toScalarValue(t.GetScalarType(), levels, values, valueIndex) - case *schema_pb.Type_ListType: - return toListValue(t.GetListType(), levels, values, valueIndex) - case *schema_pb.Type_RecordType: - return toRecordValue(t.GetRecordType(), levels, values, valueIndex) - } - return nil, valueIndex, fmt.Errorf("unsupported type: %v", t) -} - -func toRecordValue(recordType *schema_pb.RecordType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (*schema_pb.Value, int, error) { - recordValue := schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)} - for _, field := range recordType.Fields { - fieldLevels := levels.levels[field.Name] - fieldValue, endValueIndex, err := ToValue(field.Type, fieldLevels, values, valueIndex) - if err != nil { - return nil, 0, err - } - valueIndex = endValueIndex - recordValue.Fields[field.Name] = fieldValue - } - return &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: &recordValue}}, valueIndex, nil -} - -func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) { - listValues := make([]*schema_pb.Value, 0) - var value *schema_pb.Value - for valueIndex < len(values) { - if values[valueIndex].Column() != levels.startColumnIndex { - break - } - value, valueIndex, err = ToValue(listType.ElementType, levels, values, valueIndex) - if err != nil { - return nil, valueIndex, err - } - listValues = append(listValues, value) - } - return &schema_pb.Value{Kind: &schema_pb.Value_ListValue{ListValue: &schema_pb.ListValue{Values: listValues}}}, valueIndex, nil -} - -func toScalarValue(scalarType schema_pb.ScalarType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (*schema_pb.Value, int, error) { - value := values[valueIndex] - if value.Column() != levels.startColumnIndex { - return nil, valueIndex, nil - } - switch scalarType { - case schema_pb.ScalarType_BOOL: - return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex + 1, nil - case schema_pb.ScalarType_INT32: - return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex + 1, nil - case schema_pb.ScalarType_INT64: - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex + 1, nil - case schema_pb.ScalarType_FLOAT: - return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex + 1, nil - case schema_pb.ScalarType_DOUBLE: - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex + 1, nil - case schema_pb.ScalarType_BYTES: - // Handle nil byte arrays from parquet to prevent growslice panic - byteData := value.ByteArray() - if byteData == nil { - byteData = []byte{} // Use empty slice instead of nil - } - return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: byteData}}, valueIndex + 1, nil - case schema_pb.ScalarType_STRING: - // Handle nil byte arrays from parquet to prevent string conversion issues - byteData := value.ByteArray() - if byteData == nil { - byteData = []byte{} // Use empty slice instead of nil - } - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(byteData)}}, valueIndex + 1, nil - // Parquet logical types - convert from their physical storage back to logical values - case schema_pb.ScalarType_TIMESTAMP: - // Stored as INT64, convert back to TimestampValue - return &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: value.Int64(), - IsUtc: true, // Default to UTC for compatibility - }, - }, - }, valueIndex + 1, nil - case schema_pb.ScalarType_DATE: - // Stored as INT32, convert back to DateValue - return &schema_pb.Value{ - Kind: &schema_pb.Value_DateValue{ - DateValue: &schema_pb.DateValue{ - DaysSinceEpoch: value.Int32(), - }, - }, - }, valueIndex + 1, nil - case schema_pb.ScalarType_DECIMAL: - // Stored as FixedLenByteArray, convert back to DecimalValue - fixedBytes := value.ByteArray() // FixedLenByteArray also uses ByteArray() method - if fixedBytes == nil { - fixedBytes = []byte{} // Use empty slice instead of nil - } - // Remove leading zeros to get the minimal representation - trimmedBytes := bytes.TrimLeft(fixedBytes, "\x00") - if len(trimmedBytes) == 0 { - trimmedBytes = []byte{0} // Ensure we have at least one byte for zero - } - return &schema_pb.Value{ - Kind: &schema_pb.Value_DecimalValue{ - DecimalValue: &schema_pb.DecimalValue{ - Value: trimmedBytes, - Precision: 38, // Maximum precision supported by schema - Scale: 18, // Maximum scale supported by schema - }, - }, - }, valueIndex + 1, nil - case schema_pb.ScalarType_TIME: - // Stored as INT64, convert back to TimeValue - return &schema_pb.Value{ - Kind: &schema_pb.Value_TimeValue{ - TimeValue: &schema_pb.TimeValue{ - TimeMicros: value.Int64(), - }, - }, - }, valueIndex + 1, nil - } - return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType) -} diff --git a/weed/mq/schema/value_builder.go b/weed/mq/schema/value_builder.go deleted file mode 100644 index 39dad050a..000000000 --- a/weed/mq/schema/value_builder.go +++ /dev/null @@ -1,113 +0,0 @@ -package schema - -import "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - -// RecordValueBuilder helps in constructing RecordValue protobuf messages -type RecordValueBuilder struct { - recordValue *schema_pb.RecordValue -} - -// RecordBegin creates a new RecordValueBuilder instance -func RecordBegin() *RecordValueBuilder { - return &RecordValueBuilder{recordValue: &schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)}} -} - -// RecordEnd returns the constructed RecordValue message -func (rvb *RecordValueBuilder) RecordEnd() *schema_pb.RecordValue { - return rvb.recordValue -} - -func (rvb *RecordValueBuilder) SetBool(key string, value bool) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetInt32(key string, value int32) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetInt64(key string, value int64) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetFloat(key string, value float32) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetDouble(key string, value float64) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetBytes(key string, value []byte) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetString(key string, value string) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: value}} - return rvb -} -func (rvb *RecordValueBuilder) SetRecord(key string, value *schema_pb.RecordValue) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: value}} - return rvb -} - -func (rvb *RecordValueBuilder) addListValue(key string, values []*schema_pb.Value) *RecordValueBuilder { - rvb.recordValue.Fields[key] = &schema_pb.Value{Kind: &schema_pb.Value_ListValue{ListValue: &schema_pb.ListValue{Values: values}}} - return rvb -} - -func (rvb *RecordValueBuilder) SetBoolList(key string, values ...bool) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetInt32List(key string, values ...int32) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetInt64List(key string, values ...int64) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetFloatList(key string, values ...float32) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetDoubleList(key string, values ...float64) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetBytesList(key string, values ...[]byte) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetStringList(key string, values ...string) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v}}) - } - return rvb.addListValue(key, listValues) -} -func (rvb *RecordValueBuilder) SetRecordList(key string, values ...*schema_pb.RecordValue) *RecordValueBuilder { - var listValues []*schema_pb.Value - for _, v := range values { - listValues = append(listValues, &schema_pb.Value{Kind: &schema_pb.Value_RecordValue{RecordValue: v}}) - } - return rvb.addListValue(key, listValues) -} diff --git a/weed/mq/schema/write_parquet_test.go b/weed/mq/schema/write_parquet_test.go deleted file mode 100644 index b7ecdcfc7..000000000 --- a/weed/mq/schema/write_parquet_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package schema - -import ( - "fmt" - "io" - "os" - "testing" - - "github.com/parquet-go/parquet-go" - "github.com/parquet-go/parquet-go/compress/zstd" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestWriteReadParquet(t *testing.T) { - // create a schema_pb.RecordType - recordType := RecordTypeBegin(). - WithField("ID", TypeInt64). - WithField("CreatedAt", TypeInt64). - WithRecordField("Person", - RecordTypeBegin(). - WithField("zName", TypeString). - WithField("emails", ListOf(TypeString)). - RecordTypeEnd()). - WithField("Company", TypeString). - WithRecordField("Address", - RecordTypeBegin(). - WithField("Street", TypeString). - WithField("City", TypeString). - RecordTypeEnd()). - RecordTypeEnd() - fmt.Printf("RecordType: %v\n", recordType) - - // create a parquet schema - parquetSchema, err := ToParquetSchema("example", recordType) - if err != nil { - t.Fatalf("ToParquetSchema failed: %v", err) - } - fmt.Printf("ParquetSchema: %v\n", parquetSchema) - - fmt.Printf("Go Type: %+v\n", parquetSchema.GoType()) - - filename := "example.parquet" - - count := 3 - - testWritingParquetFile(t, count, filename, parquetSchema, recordType) - - total := testReadingParquetFile(t, filename, parquetSchema, recordType) - - if total != count { - t.Fatalf("total != 128*1024: %v", total) - } - - if err = os.Remove(filename); err != nil { - t.Fatalf("os.Remove failed: %v", err) - } - -} - -func testWritingParquetFile(t *testing.T, count int, filename string, parquetSchema *parquet.Schema, recordType *schema_pb.RecordType) { - parquetLevels, err := ToParquetLevels(recordType) - if err != nil { - t.Fatalf("ToParquetLevels failed: %v", err) - } - - // create a parquet file - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0664) - if err != nil { - t.Fatalf("os.Open failed: %v", err) - } - defer file.Close() - writer := parquet.NewWriter(file, parquetSchema, parquet.Compression(&zstd.Codec{Level: zstd.DefaultLevel})) - rowBuilder := parquet.NewRowBuilder(parquetSchema) - for i := 0; i < count; i++ { - rowBuilder.Reset() - // generate random data - recordValue := RecordBegin(). - SetInt64("ID", 1+int64(i)). - SetInt64("CreatedAt", 2+2*int64(i)). - SetRecord("Person", - RecordBegin(). - SetString("zName", fmt.Sprintf("john_%d", i)). - SetStringList("emails", - fmt.Sprintf("john_%d@a.com", i), - fmt.Sprintf("john_%d@b.com", i), - fmt.Sprintf("john_%d@c.com", i), - fmt.Sprintf("john_%d@d.com", i), - fmt.Sprintf("john_%d@e.com", i)). - RecordEnd()). - SetString("Company", fmt.Sprintf("company_%d", i)). - RecordEnd() - AddRecordValue(rowBuilder, recordType, parquetLevels, recordValue) - - if count < 10 { - fmt.Printf("Write RecordValue: %v\n", recordValue) - } - - row := rowBuilder.Row() - - if count < 10 { - fmt.Printf("Build Row: %+v\n", row) - } - - if err != nil { - t.Fatalf("rowBuilder.Build failed: %v", err) - } - - if _, err = writer.WriteRows([]parquet.Row{row}); err != nil { - t.Fatalf("writer.Write failed: %v", err) - } - } - if err = writer.Close(); err != nil { - t.Fatalf("writer.WriteStop failed: %v", err) - } -} - -func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parquet.Schema, recordType *schema_pb.RecordType) (total int) { - parquetLevels, err := ToParquetLevels(recordType) - if err != nil { - t.Fatalf("ToParquetLevels failed: %v", err) - } - - // read the parquet file - file, err := os.Open(filename) - if err != nil { - t.Fatalf("os.Open failed: %v", err) - } - defer file.Close() - - // Get file info to determine size - fileInfo, err := file.Stat() - if err != nil { - t.Fatalf("file.Stat failed: %v", err) - } - - // Create a parquet file from the opened file - parquetFile, err := parquet.OpenFile(file, fileInfo.Size()) - if err != nil { - t.Fatalf("parquet.OpenFile failed: %v", err) - } - - reader := parquet.NewReader(parquetFile) - rows := make([]parquet.Row, 128) - for { - rowCount, err := reader.ReadRows(rows) - - // Process the rows first, even if EOF is returned - for i := 0; i < rowCount; i++ { - row := rows[i] - // convert parquet row to schema_pb.RecordValue - recordValue, err := ToRecordValue(recordType, parquetLevels, row) - if err != nil { - t.Fatalf("ToRecordValue failed: %v", err) - } - if rowCount < 10 { - fmt.Printf("Read RecordValue: %v\n", recordValue) - } - } - total += rowCount - - // Check for end conditions after processing rows - if err != nil { - if err == io.EOF { - break - } - t.Fatalf("reader.Read failed: %v", err) - } - if rowCount == 0 { - break - } - } - fmt.Printf("total: %v\n", total) - return -} diff --git a/weed/mq/segment/message_serde.go b/weed/mq/segment/message_serde.go deleted file mode 100644 index 66a76c57d..000000000 --- a/weed/mq/segment/message_serde.go +++ /dev/null @@ -1,109 +0,0 @@ -package segment - -import ( - flatbuffers "github.com/google/flatbuffers/go" - "github.com/seaweedfs/seaweedfs/weed/pb/message_fbs" -) - -type MessageBatchBuilder struct { - b *flatbuffers.Builder - producerId int32 - producerEpoch int32 - segmentId int32 - flags int32 - messageOffsets []flatbuffers.UOffsetT - segmentSeqBase int64 - segmentSeqLast int64 - tsMsBase int64 - tsMsLast int64 -} - -func NewMessageBatchBuilder(b *flatbuffers.Builder, - producerId int32, - producerEpoch int32, - segmentId int32, - flags int32) *MessageBatchBuilder { - - b.Reset() - - return &MessageBatchBuilder{ - b: b, - producerId: producerId, - producerEpoch: producerEpoch, - segmentId: segmentId, - flags: flags, - } -} - -func (builder *MessageBatchBuilder) AddMessage(segmentSeq int64, tsMs int64, properties map[string][]byte, key []byte, value []byte) { - if builder.segmentSeqBase == 0 { - builder.segmentSeqBase = segmentSeq - } - builder.segmentSeqLast = segmentSeq - if builder.tsMsBase == 0 { - builder.tsMsBase = tsMs - } - builder.tsMsLast = tsMs - - var names, values, pairs []flatbuffers.UOffsetT - for k, v := range properties { - names = append(names, builder.b.CreateString(k)) - values = append(values, builder.b.CreateByteVector(v)) - } - for i, _ := range names { - message_fbs.NameValueStart(builder.b) - message_fbs.NameValueAddName(builder.b, names[i]) - message_fbs.NameValueAddValue(builder.b, values[i]) - pair := message_fbs.NameValueEnd(builder.b) - pairs = append(pairs, pair) - } - - message_fbs.MessageStartPropertiesVector(builder.b, len(properties)) - for i := len(pairs) - 1; i >= 0; i-- { - builder.b.PrependUOffsetT(pairs[i]) - } - propOffset := builder.b.EndVector(len(properties)) - - keyOffset := builder.b.CreateByteVector(key) - valueOffset := builder.b.CreateByteVector(value) - - message_fbs.MessageStart(builder.b) - message_fbs.MessageAddSeqDelta(builder.b, int32(segmentSeq-builder.segmentSeqBase)) - message_fbs.MessageAddTsMsDelta(builder.b, int32(tsMs-builder.tsMsBase)) - - message_fbs.MessageAddProperties(builder.b, propOffset) - message_fbs.MessageAddKey(builder.b, keyOffset) - message_fbs.MessageAddData(builder.b, valueOffset) - messageOffset := message_fbs.MessageEnd(builder.b) - - builder.messageOffsets = append(builder.messageOffsets, messageOffset) - -} - -func (builder *MessageBatchBuilder) BuildMessageBatch() { - message_fbs.MessageBatchStartMessagesVector(builder.b, len(builder.messageOffsets)) - for i := len(builder.messageOffsets) - 1; i >= 0; i-- { - builder.b.PrependUOffsetT(builder.messageOffsets[i]) - } - messagesOffset := builder.b.EndVector(len(builder.messageOffsets)) - - message_fbs.MessageBatchStart(builder.b) - message_fbs.MessageBatchAddProducerId(builder.b, builder.producerId) - message_fbs.MessageBatchAddProducerEpoch(builder.b, builder.producerEpoch) - message_fbs.MessageBatchAddSegmentId(builder.b, builder.segmentId) - message_fbs.MessageBatchAddFlags(builder.b, builder.flags) - message_fbs.MessageBatchAddSegmentSeqBase(builder.b, builder.segmentSeqBase) - message_fbs.MessageBatchAddSegmentSeqMaxDelta(builder.b, int32(builder.segmentSeqLast-builder.segmentSeqBase)) - message_fbs.MessageBatchAddTsMsBase(builder.b, builder.tsMsBase) - message_fbs.MessageBatchAddTsMsMaxDelta(builder.b, int32(builder.tsMsLast-builder.tsMsBase)) - - message_fbs.MessageBatchAddMessages(builder.b, messagesOffset) - - messageBatch := message_fbs.MessageBatchEnd(builder.b) - - builder.b.Finish(messageBatch) -} - -func (builder *MessageBatchBuilder) GetBytes() []byte { - return builder.b.FinishedBytes() -} diff --git a/weed/mq/segment/message_serde_test.go b/weed/mq/segment/message_serde_test.go deleted file mode 100644 index a54ce5708..000000000 --- a/weed/mq/segment/message_serde_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package segment - -import ( - flatbuffers "github.com/google/flatbuffers/go" - "github.com/seaweedfs/seaweedfs/weed/pb/message_fbs" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestMessageSerde(t *testing.T) { - b := flatbuffers.NewBuilder(1024) - - prop := make(map[string][]byte) - prop["n1"] = []byte("v1") - prop["n2"] = []byte("v2") - - bb := NewMessageBatchBuilder(b, 1, 2, 3, 4) - - bb.AddMessage(5, 6, prop, []byte("the primary key"), []byte("body is here")) - bb.AddMessage(5, 7, prop, []byte("the primary 2"), []byte("body is 2")) - - bb.BuildMessageBatch() - - buf := bb.GetBytes() - - println("serialized size", len(buf)) - - mb := message_fbs.GetRootAsMessageBatch(buf, 0) - - assert.Equal(t, int32(1), mb.ProducerId()) - assert.Equal(t, int32(2), mb.ProducerEpoch()) - assert.Equal(t, int32(3), mb.SegmentId()) - assert.Equal(t, int32(4), mb.Flags()) - assert.Equal(t, int64(5), mb.SegmentSeqBase()) - assert.Equal(t, int32(0), mb.SegmentSeqMaxDelta()) - assert.Equal(t, int64(6), mb.TsMsBase()) - assert.Equal(t, int32(1), mb.TsMsMaxDelta()) - - assert.Equal(t, 2, mb.MessagesLength()) - - m := &message_fbs.Message{} - mb.Messages(m, 0) - - /* - // the vector seems not consistent - nv := &message_fbs.NameValue{} - m.Properties(nv, 0) - assert.Equal(t, "n1", string(nv.Name())) - assert.Equal(t, "v1", string(nv.Value())) - m.Properties(nv, 1) - assert.Equal(t, "n2", string(nv.Name())) - assert.Equal(t, "v2", string(nv.Value())) - */ - assert.Equal(t, []byte("the primary key"), m.Key()) - assert.Equal(t, []byte("body is here"), m.Data()) - - assert.Equal(t, int32(0), m.SeqDelta()) - assert.Equal(t, int32(0), m.TsMsDelta()) - -} diff --git a/weed/mq/segment/segment_serde.go b/weed/mq/segment/segment_serde.go deleted file mode 100644 index e076271d6..000000000 --- a/weed/mq/segment/segment_serde.go +++ /dev/null @@ -1 +0,0 @@ -package segment diff --git a/weed/mq/sub_coordinator/consumer_group.go b/weed/mq/sub_coordinator/consumer_group.go deleted file mode 100644 index ba94f34b4..000000000 --- a/weed/mq/sub_coordinator/consumer_group.go +++ /dev/null @@ -1,112 +0,0 @@ -package sub_coordinator - -import ( - "fmt" - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "time" -) - -type ConsumerGroup struct { - topic topic.Topic - // map a consumer group instance id to a consumer group instance - ConsumerGroupInstances cmap.ConcurrentMap[string, *ConsumerGroupInstance] - Market *Market - reBalanceTimer *time.Timer - filerClientAccessor *filer_client.FilerClientAccessor - stopCh chan struct{} -} - -func NewConsumerGroup(t *schema_pb.Topic, reblanceSeconds int32, filerClientAccessor *filer_client.FilerClientAccessor) *ConsumerGroup { - cg := &ConsumerGroup{ - topic: topic.FromPbTopic(t), - ConsumerGroupInstances: cmap.New[*ConsumerGroupInstance](), - filerClientAccessor: filerClientAccessor, - stopCh: make(chan struct{}), - } - if conf, err := cg.filerClientAccessor.ReadTopicConfFromFiler(cg.topic); err == nil { - var partitions []topic.Partition - for _, assignment := range conf.BrokerPartitionAssignments { - partitions = append(partitions, topic.FromPbPartition(assignment.Partition)) - } - cg.Market = NewMarket(partitions, time.Duration(reblanceSeconds)*time.Second) - } else { - glog.V(0).Infof("fail to read topic conf from filer: %v", err) - return nil - } - - go func() { - for { - select { - case adjustment := <-cg.Market.AdjustmentChan: - cgi, found := cg.ConsumerGroupInstances.Get(string(adjustment.consumer)) - if !found { - glog.V(0).Infof("consumer group instance %s not found", adjustment.consumer) - continue - } - if adjustment.isAssign { - if conf, err := cg.filerClientAccessor.ReadTopicConfFromFiler(cg.topic); err == nil { - for _, assignment := range conf.BrokerPartitionAssignments { - if adjustment.partition.Equals(topic.FromPbPartition(assignment.Partition)) { - cgi.ResponseChan <- &mq_pb.SubscriberToSubCoordinatorResponse{ - Message: &mq_pb.SubscriberToSubCoordinatorResponse_Assignment_{ - Assignment: &mq_pb.SubscriberToSubCoordinatorResponse_Assignment{ - PartitionAssignment: &mq_pb.BrokerPartitionAssignment{ - Partition: adjustment.partition.ToPbPartition(), - LeaderBroker: assignment.LeaderBroker, - FollowerBroker: assignment.FollowerBroker, - }, - }, - }, - } - glog.V(0).Infof("send assignment %v to %s", adjustment.partition, adjustment.consumer) - break - } - } - } - } else { - cgi.ResponseChan <- &mq_pb.SubscriberToSubCoordinatorResponse{ - Message: &mq_pb.SubscriberToSubCoordinatorResponse_UnAssignment_{ - UnAssignment: &mq_pb.SubscriberToSubCoordinatorResponse_UnAssignment{ - Partition: adjustment.partition.ToPbPartition(), - }, - }, - } - glog.V(0).Infof("send unassignment %v to %s", adjustment.partition, adjustment.consumer) - } - case <-cg.stopCh: - return - } - } - }() - - return cg -} - -func (cg *ConsumerGroup) AckAssignment(cgi *ConsumerGroupInstance, assignment *mq_pb.SubscriberToSubCoordinatorRequest_AckAssignmentMessage) { - fmt.Printf("ack assignment %v\n", assignment) - cg.Market.ConfirmAdjustment(&Adjustment{ - consumer: cgi.InstanceId, - partition: topic.FromPbPartition(assignment.Partition), - isAssign: true, - }) -} -func (cg *ConsumerGroup) AckUnAssignment(cgi *ConsumerGroupInstance, assignment *mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) { - fmt.Printf("ack unassignment %v\n", assignment) - cg.Market.ConfirmAdjustment(&Adjustment{ - consumer: cgi.InstanceId, - partition: topic.FromPbPartition(assignment.Partition), - isAssign: false, - }) -} - -func (cg *ConsumerGroup) OnPartitionListChange(assignments []*mq_pb.BrokerPartitionAssignment) { -} - -func (cg *ConsumerGroup) Shutdown() { - close(cg.stopCh) -} diff --git a/weed/mq/sub_coordinator/consumer_group_instance.go b/weed/mq/sub_coordinator/consumer_group_instance.go deleted file mode 100644 index 74a35b40a..000000000 --- a/weed/mq/sub_coordinator/consumer_group_instance.go +++ /dev/null @@ -1,23 +0,0 @@ -package sub_coordinator - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" -) - -type ConsumerGroupInstanceId string - -type ConsumerGroupInstance struct { - InstanceId ConsumerGroupInstanceId - AssignedPartitions []topic.Partition - ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse - MaxPartitionCount int32 -} - -func NewConsumerGroupInstance(instanceId string, maxPartitionCount int32) *ConsumerGroupInstance { - return &ConsumerGroupInstance{ - InstanceId: ConsumerGroupInstanceId(instanceId), - ResponseChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1), - MaxPartitionCount: maxPartitionCount, - } -} diff --git a/weed/mq/sub_coordinator/inflight_message_tracker.go b/weed/mq/sub_coordinator/inflight_message_tracker.go deleted file mode 100644 index 8ecbb2ccd..000000000 --- a/weed/mq/sub_coordinator/inflight_message_tracker.go +++ /dev/null @@ -1,171 +0,0 @@ -package sub_coordinator - -import ( - "sort" - "sync" -) - -type InflightMessageTracker struct { - messages map[string]int64 - mu sync.Mutex - timestamps *RingBuffer -} - -func NewInflightMessageTracker(capacity int) *InflightMessageTracker { - return &InflightMessageTracker{ - messages: make(map[string]int64), - timestamps: NewRingBuffer(capacity), - } -} - -// EnflightMessage tracks the message with the key and timestamp. -// These messages are sent to the consumer group instances and waiting for ack. -func (imt *InflightMessageTracker) EnflightMessage(key []byte, tsNs int64) { - // fmt.Printf("EnflightMessage(%s,%d)\n", string(key), tsNs) - imt.mu.Lock() - defer imt.mu.Unlock() - imt.messages[string(key)] = tsNs - imt.timestamps.EnflightTimestamp(tsNs) -} - -// IsMessageAcknowledged returns true if the message has been acknowledged. -// If the message is older than the oldest inflight messages, returns false. -// returns false if the message is inflight. -// Otherwise, returns false if the message is old and can be ignored. -func (imt *InflightMessageTracker) IsMessageAcknowledged(key []byte, tsNs int64) bool { - imt.mu.Lock() - defer imt.mu.Unlock() - - if tsNs <= imt.timestamps.OldestAckedTimestamp() { - return true - } - if tsNs > imt.timestamps.Latest() { - return false - } - - if _, found := imt.messages[string(key)]; found { - return false - } - - return true -} - -// AcknowledgeMessage acknowledges the message with the key and timestamp. -func (imt *InflightMessageTracker) AcknowledgeMessage(key []byte, tsNs int64) bool { - // fmt.Printf("AcknowledgeMessage(%s,%d)\n", string(key), tsNs) - imt.mu.Lock() - defer imt.mu.Unlock() - timestamp, exists := imt.messages[string(key)] - if !exists || timestamp != tsNs { - return false - } - delete(imt.messages, string(key)) - // Remove the specific timestamp from the ring buffer. - imt.timestamps.AckTimestamp(tsNs) - return true -} - -func (imt *InflightMessageTracker) GetOldestAckedTimestamp() int64 { - return imt.timestamps.OldestAckedTimestamp() -} - -// IsInflight returns true if the message with the key is inflight. -func (imt *InflightMessageTracker) IsInflight(key []byte) bool { - imt.mu.Lock() - defer imt.mu.Unlock() - _, found := imt.messages[string(key)] - return found -} - -// Cleanup clears all in-flight messages. This should be called when a subscriber disconnects -// to prevent messages from being stuck in the in-flight state indefinitely. -func (imt *InflightMessageTracker) Cleanup() int { - imt.mu.Lock() - defer imt.mu.Unlock() - count := len(imt.messages) - // Clear all in-flight messages - imt.messages = make(map[string]int64) - return count -} - -type TimestampStatus struct { - Timestamp int64 - Acked bool -} - -// RingBuffer represents a circular buffer to hold timestamps. -type RingBuffer struct { - buffer []*TimestampStatus - head int - size int - maxTimestamp int64 - maxAllAckedTs int64 -} - -// NewRingBuffer creates a new RingBuffer of the given capacity. -func NewRingBuffer(capacity int) *RingBuffer { - return &RingBuffer{ - buffer: newBuffer(capacity), - } -} - -func newBuffer(capacity int) []*TimestampStatus { - buffer := make([]*TimestampStatus, capacity) - for i := range buffer { - buffer[i] = &TimestampStatus{} - } - return buffer -} - -// EnflightTimestamp adds a new timestamp to the ring buffer. -func (rb *RingBuffer) EnflightTimestamp(timestamp int64) { - if rb.size < len(rb.buffer) { - rb.size++ - } else { - newBuf := newBuffer(2 * len(rb.buffer)) - for i := 0; i < rb.size; i++ { - newBuf[i] = rb.buffer[(rb.head+len(rb.buffer)-rb.size+i)%len(rb.buffer)] - } - rb.buffer = newBuf - rb.head = rb.size - rb.size++ - } - head := rb.buffer[rb.head] - head.Timestamp = timestamp - head.Acked = false - rb.head = (rb.head + 1) % len(rb.buffer) - if timestamp > rb.maxTimestamp { - rb.maxTimestamp = timestamp - } -} - -// AckTimestamp removes the specified timestamp from the ring buffer. -func (rb *RingBuffer) AckTimestamp(timestamp int64) { - // Perform binary search - index := sort.Search(rb.size, func(i int) bool { - return rb.buffer[(rb.head+len(rb.buffer)-rb.size+i)%len(rb.buffer)].Timestamp >= timestamp - }) - actualIndex := (rb.head + len(rb.buffer) - rb.size + index) % len(rb.buffer) - - rb.buffer[actualIndex].Acked = true - - // Remove all the continuously acknowledged timestamps from the buffer - startPos := (rb.head + len(rb.buffer) - rb.size) % len(rb.buffer) - for i := 0; i < len(rb.buffer) && rb.buffer[(startPos+i)%len(rb.buffer)].Acked; i++ { - t := rb.buffer[(startPos+i)%len(rb.buffer)] - if rb.maxAllAckedTs < t.Timestamp { - rb.size-- - rb.maxAllAckedTs = t.Timestamp - } - } -} - -// OldestAckedTimestamp returns the oldest that is already acked timestamp in the ring buffer. -func (rb *RingBuffer) OldestAckedTimestamp() int64 { - return rb.maxAllAckedTs -} - -// Latest returns the most recently known timestamp in the ring buffer. -func (rb *RingBuffer) Latest() int64 { - return rb.maxTimestamp -} diff --git a/weed/mq/sub_coordinator/inflight_message_tracker_test.go b/weed/mq/sub_coordinator/inflight_message_tracker_test.go deleted file mode 100644 index 5b7a1bdd8..000000000 --- a/weed/mq/sub_coordinator/inflight_message_tracker_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package sub_coordinator - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestRingBuffer(t *testing.T) { - // Initialize a RingBuffer with capacity 5 - rb := NewRingBuffer(5) - - // Add timestamps to the buffer - timestamps := []int64{100, 200, 300, 400, 500} - for _, ts := range timestamps { - rb.EnflightTimestamp(ts) - } - - // Test Add method and buffer size - expectedSize := 5 - if rb.size != expectedSize { - t.Errorf("Expected buffer size %d, got %d", expectedSize, rb.size) - } - - assert.Equal(t, int64(0), rb.OldestAckedTimestamp()) - assert.Equal(t, int64(500), rb.Latest()) - - rb.AckTimestamp(200) - assert.Equal(t, int64(0), rb.OldestAckedTimestamp()) - rb.AckTimestamp(100) - assert.Equal(t, int64(200), rb.OldestAckedTimestamp()) - - rb.EnflightTimestamp(int64(600)) - rb.EnflightTimestamp(int64(700)) - - rb.AckTimestamp(500) - assert.Equal(t, int64(200), rb.OldestAckedTimestamp()) - rb.AckTimestamp(400) - assert.Equal(t, int64(200), rb.OldestAckedTimestamp()) - rb.AckTimestamp(300) - assert.Equal(t, int64(500), rb.OldestAckedTimestamp()) - - assert.Equal(t, int64(700), rb.Latest()) -} - -func TestInflightMessageTracker(t *testing.T) { - // Initialize an InflightMessageTracker with capacity 5 - tracker := NewInflightMessageTracker(5) - - // Add inflight messages - key := []byte("1") - timestamp := int64(1) - tracker.EnflightMessage(key, timestamp) - - // Test IsMessageAcknowledged method - isOld := tracker.IsMessageAcknowledged(key, timestamp-10) - if !isOld { - t.Error("Expected message to be old") - } - - // Test AcknowledgeMessage method - acked := tracker.AcknowledgeMessage(key, timestamp) - if !acked { - t.Error("Expected message to be acked") - } - if _, exists := tracker.messages[string(key)]; exists { - t.Error("Expected message to be deleted after ack") - } - if tracker.timestamps.size != 0 { - t.Error("Expected buffer size to be 0 after ack") - } - assert.Equal(t, timestamp, tracker.GetOldestAckedTimestamp()) -} - -func TestInflightMessageTracker2(t *testing.T) { - // Initialize an InflightMessageTracker with initial capacity 1 - tracker := NewInflightMessageTracker(1) - - tracker.EnflightMessage([]byte("1"), int64(1)) - tracker.EnflightMessage([]byte("2"), int64(2)) - tracker.EnflightMessage([]byte("3"), int64(3)) - tracker.EnflightMessage([]byte("4"), int64(4)) - tracker.EnflightMessage([]byte("5"), int64(5)) - assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1))) - assert.Equal(t, int64(1), tracker.GetOldestAckedTimestamp()) - - // Test IsMessageAcknowledged method - isAcked := tracker.IsMessageAcknowledged([]byte("2"), int64(2)) - if isAcked { - t.Error("Expected message to be not acked") - } - - // Test AcknowledgeMessage method - assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2))) - assert.Equal(t, int64(2), tracker.GetOldestAckedTimestamp()) - -} - -func TestInflightMessageTracker3(t *testing.T) { - // Initialize an InflightMessageTracker with initial capacity 1 - tracker := NewInflightMessageTracker(1) - - tracker.EnflightMessage([]byte("1"), int64(1)) - tracker.EnflightMessage([]byte("2"), int64(2)) - tracker.EnflightMessage([]byte("3"), int64(3)) - assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1))) - tracker.EnflightMessage([]byte("4"), int64(4)) - tracker.EnflightMessage([]byte("5"), int64(5)) - assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2))) - assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3))) - tracker.EnflightMessage([]byte("6"), int64(6)) - tracker.EnflightMessage([]byte("7"), int64(7)) - assert.True(t, tracker.AcknowledgeMessage([]byte("4"), int64(4))) - assert.True(t, tracker.AcknowledgeMessage([]byte("5"), int64(5))) - assert.True(t, tracker.AcknowledgeMessage([]byte("6"), int64(6))) - assert.Equal(t, int64(6), tracker.GetOldestAckedTimestamp()) - assert.True(t, tracker.AcknowledgeMessage([]byte("7"), int64(7))) - assert.Equal(t, int64(7), tracker.GetOldestAckedTimestamp()) - -} - -func TestInflightMessageTracker4(t *testing.T) { - // Initialize an InflightMessageTracker with initial capacity 1 - tracker := NewInflightMessageTracker(1) - - tracker.EnflightMessage([]byte("1"), int64(1)) - tracker.EnflightMessage([]byte("2"), int64(2)) - assert.True(t, tracker.AcknowledgeMessage([]byte("1"), int64(1))) - assert.True(t, tracker.AcknowledgeMessage([]byte("2"), int64(2))) - tracker.EnflightMessage([]byte("3"), int64(3)) - assert.True(t, tracker.AcknowledgeMessage([]byte("3"), int64(3))) - assert.Equal(t, int64(3), tracker.GetOldestAckedTimestamp()) - -} diff --git a/weed/mq/sub_coordinator/market.go b/weed/mq/sub_coordinator/market.go deleted file mode 100644 index df07edfd5..000000000 --- a/weed/mq/sub_coordinator/market.go +++ /dev/null @@ -1,367 +0,0 @@ -package sub_coordinator - -import ( - "errors" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "sync" - "time" -) - -/* -Market is a data structure that keeps track of the state of the consumer group instances and the partitions. - -When rebalancing, the market will try to balance the load of the partitions among the consumer group instances. -For each loop, the market will: -* If a consumer group instance has more partitions than the average, it will unassign some partitions. -* If a consumer group instance has less partitions than the average, it will assign some partitions. - -Trigger rebalance when: -* A new consumer group instance is added -* Some partitions are unassigned from a consumer group instance. - -If multiple rebalance requests are received, after a certain period, the market will only process the latest request. - -However, if the number of unassigned partition is increased to exactly the total number of partitions, -and total partitions are less than or equal to the sum of the max partition count of all consumer group instances, -the market will process the request immediately. -This is to ensure a partition can be migrated to another consumer group instance as soon as possible. - -Emit these adjustments to the subscriber coordinator: -* Assign a partition to a consumer group instance -* Unassign a partition from a consumer group instance - -Because the adjustment is sent to the subscriber coordinator, the market will keep track of the inflight adjustments. -The subscriber coordinator will send back the response to the market when the adjustment is processed. -If the adjustment is older than a certain time(inflightAdjustmentTTL), it would be considered expired. -Otherwise, the adjustment is considered inflight, so it would be used when calculating the load. - -Later features: -* A consumer group instance is not keeping up with the load. - -Since a coordinator, and thus the market, may be restarted or moved to another node, the market should be able to recover the state from the subscriber coordinator. -The subscriber coordinator should be able to send the current state of the consumer group instances and the partitions to the market. - -*/ - -type PartitionSlot struct { - Partition topic.Partition - AssignedTo *ConsumerGroupInstance // Track the consumer assigned to this partition slot -} - -type Adjustment struct { - isAssign bool - partition topic.Partition - consumer ConsumerGroupInstanceId - ts time.Time -} - -type Market struct { - mu sync.Mutex - partitions map[topic.Partition]*PartitionSlot - consumerInstances map[ConsumerGroupInstanceId]*ConsumerGroupInstance - AdjustmentChan chan *Adjustment - inflightAdjustments []*Adjustment - inflightAdjustmentTTL time.Duration - lastBalancedTime time.Time - stopChan chan struct{} - balanceRequestChan chan struct{} - hasBalanceRequest bool -} - -func NewMarket(partitions []topic.Partition, inflightAdjustmentTTL time.Duration) *Market { - partitionMap := make(map[topic.Partition]*PartitionSlot) - for _, partition := range partitions { - partitionMap[partition] = &PartitionSlot{ - Partition: partition, - } - } - m := &Market{ - partitions: partitionMap, - consumerInstances: make(map[ConsumerGroupInstanceId]*ConsumerGroupInstance), - AdjustmentChan: make(chan *Adjustment, 100), - inflightAdjustmentTTL: inflightAdjustmentTTL, - stopChan: make(chan struct{}), - balanceRequestChan: make(chan struct{}), - } - m.lastBalancedTime = time.Now() - go m.loopBalanceLoad() - - return m -} - -func (m *Market) ShutdownMarket() { - close(m.stopChan) - close(m.AdjustmentChan) -} - -func (m *Market) AddConsumerInstance(consumer *ConsumerGroupInstance) error { - m.mu.Lock() - defer m.mu.Unlock() - - if _, exists := m.consumerInstances[consumer.InstanceId]; exists { - return errors.New("consumer instance already exists") - } - - m.consumerInstances[consumer.InstanceId] = consumer - m.balanceRequestChan <- struct{}{} - - return nil -} - -func (m *Market) RemoveConsumerInstance(consumerId ConsumerGroupInstanceId) error { - m.mu.Lock() - defer m.mu.Unlock() - - consumer, exists := m.consumerInstances[consumerId] - if !exists { - return nil - } - delete(m.consumerInstances, consumerId) - - for _, partition := range consumer.AssignedPartitions { - if partitionSlot, exists := m.partitions[partition]; exists { - partitionSlot.AssignedTo = nil - } - } - m.balanceRequestChan <- struct{}{} - - return nil -} - -func (m *Market) assignPartitionToConsumer(partition *PartitionSlot) { - var bestConsumer *ConsumerGroupInstance - var minLoad = int(^uint(0) >> 1) // Max int value - - inflightConsumerAdjustments := make(map[ConsumerGroupInstanceId]int) - for _, adjustment := range m.inflightAdjustments { - if adjustment.isAssign { - inflightConsumerAdjustments[adjustment.consumer]++ - } else { - inflightConsumerAdjustments[adjustment.consumer]-- - } - } - for _, consumer := range m.consumerInstances { - consumerLoad := len(consumer.AssignedPartitions) - if inflightAdjustments, exists := inflightConsumerAdjustments[consumer.InstanceId]; exists { - consumerLoad += inflightAdjustments - } - // fmt.Printf("Consumer %+v has load %d, max %d, min %d\n", consumer.InstanceId, consumerLoad, consumer.MaxPartitionCount, minLoad) - if consumerLoad < int(consumer.MaxPartitionCount) { - if consumerLoad < minLoad { - bestConsumer = consumer - minLoad = consumerLoad - // fmt.Printf("picked: Consumer %+v has load %d, max %d, min %d\n", consumer.InstanceId, consumerLoad, consumer.MaxPartitionCount, minLoad) - } - } - } - - if bestConsumer != nil { - // change consumer assigned partitions later when the adjustment is confirmed - adjustment := &Adjustment{ - isAssign: true, - partition: partition.Partition, - consumer: bestConsumer.InstanceId, - ts: time.Now(), - } - m.AdjustmentChan <- adjustment - m.inflightAdjustments = append(m.inflightAdjustments, adjustment) - m.lastBalancedTime = adjustment.ts - } -} - -func (m *Market) loopBalanceLoad() { - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if m.hasBalanceRequest { - m.hasBalanceRequest = false - inflightAdjustments := make([]*Adjustment, 0, len(m.inflightAdjustments)) - for _, adjustment := range m.inflightAdjustments { - if adjustment.ts.Add(m.inflightAdjustmentTTL).After(time.Now()) { - inflightAdjustments = append(inflightAdjustments, adjustment) - } - } - m.inflightAdjustments = inflightAdjustments - - m.doBalanceLoad() - // println("Balance load completed.") - m.Status() - } - case <-m.balanceRequestChan: - m.hasBalanceRequest = true - case <-m.stopChan: - return - } - } -} - -// doBalanceLoad will balance the load of the partitions among the consumer group instances. -// It will try to unassign partitions from the consumer group instances that have more partitions than the average. -// It will try to assign partitions to the consumer group instances that have less partitions than the average. -func (m *Market) doBalanceLoad() { - if len(m.consumerInstances) == 0 { - return - } - - // find the average load for all consumers - averageLoad := m.findAverageLoad() - - // find the consumers with the higher load than average - if m.adjustBusyConsumers(averageLoad) { - return - } - - // find partitions with no consumer assigned - m.adjustUnassignedPartitions() -} -func (m *Market) findAverageLoad() (averageLoad float32) { - var totalLoad int - for _, consumer := range m.consumerInstances { - totalLoad += len(consumer.AssignedPartitions) - } - for _, adjustment := range m.inflightAdjustments { - if adjustment.isAssign { - totalLoad++ - } else { - totalLoad-- - } - } - averageLoad = float32(totalLoad) / float32(len(m.consumerInstances)) - return -} - -func (m *Market) adjustBusyConsumers(averageLoad float32) (hasAdjustments bool) { - inflightConsumerAdjustments := make(map[ConsumerGroupInstanceId]int) - for _, adjustment := range m.inflightAdjustments { - if adjustment.isAssign { - inflightConsumerAdjustments[adjustment.consumer]++ - } else { - inflightConsumerAdjustments[adjustment.consumer]-- - } - } - for _, consumer := range m.consumerInstances { - consumerLoad := len(consumer.AssignedPartitions) - if inflightAdjustment, exists := inflightConsumerAdjustments[consumer.InstanceId]; exists { - consumerLoad += inflightAdjustment - } - delta := int(float32(consumerLoad) - averageLoad) - if delta <= 0 { - continue - } - adjustTime := time.Now() - for i := 0; i < delta; i++ { - adjustment := &Adjustment{ - isAssign: false, - partition: consumer.AssignedPartitions[i], - consumer: consumer.InstanceId, - ts: adjustTime, - } - m.AdjustmentChan <- adjustment - m.inflightAdjustments = append(m.inflightAdjustments, adjustment) - m.lastBalancedTime = adjustment.ts - } - hasAdjustments = true - } - return -} - -func (m *Market) adjustUnassignedPartitions() { - inflightPartitionAdjustments := make(map[topic.Partition]bool) - for _, adjustment := range m.inflightAdjustments { - inflightPartitionAdjustments[adjustment.partition] = true - } - for _, partitionSlot := range m.partitions { - if partitionSlot.AssignedTo == nil { - if _, exists := inflightPartitionAdjustments[partitionSlot.Partition]; exists { - continue - } - // fmt.Printf("Assigning partition %+v to consumer\n", partitionSlot.Partition) - m.assignPartitionToConsumer(partitionSlot) - } - } -} - -func (m *Market) ConfirmAdjustment(adjustment *Adjustment) { - if adjustment.isAssign { - m.confirmAssignPartition(adjustment.partition, adjustment.consumer) - } else { - m.unassignPartitionSlot(adjustment.partition) - } - glog.V(1).Infof("ConfirmAdjustment %+v", adjustment) - m.Status() -} - -func (m *Market) unassignPartitionSlot(partition topic.Partition) { - m.mu.Lock() - defer m.mu.Unlock() - - partitionSlot, exists := m.partitions[partition] - if !exists { - glog.V(0).Infof("partition %+v slot is not tracked", partition) - return - } - - if partitionSlot.AssignedTo == nil { - glog.V(0).Infof("partition %+v slot is not assigned to any consumer", partition) - return - } - - consumer := partitionSlot.AssignedTo - for i, p := range consumer.AssignedPartitions { - if p == partition { - consumer.AssignedPartitions = append(consumer.AssignedPartitions[:i], consumer.AssignedPartitions[i+1:]...) - partitionSlot.AssignedTo = nil - m.balanceRequestChan <- struct{}{} - return - } - } - - glog.V(0).Infof("partition %+v slot not found in assigned consumer", partition) - -} - -func (m *Market) confirmAssignPartition(partition topic.Partition, consumerInstanceId ConsumerGroupInstanceId) { - m.mu.Lock() - defer m.mu.Unlock() - - partitionSlot, exists := m.partitions[partition] - if !exists { - glog.V(0).Infof("partition %+v slot is not tracked", partition) - return - } - - if partitionSlot.AssignedTo != nil { - glog.V(0).Infof("partition %+v slot is already assigned to %+v", partition, partitionSlot.AssignedTo.InstanceId) - return - } - - consumerInstance, exists := m.consumerInstances[consumerInstanceId] - if !exists { - glog.V(0).Infof("consumer %+v is not tracked", consumerInstanceId) - return - } - - partitionSlot.AssignedTo = consumerInstance - consumerInstance.AssignedPartitions = append(consumerInstance.AssignedPartitions, partition) - -} - -func (m *Market) Status() { - m.mu.Lock() - defer m.mu.Unlock() - - glog.V(1).Infof("Market has %d partitions and %d consumer instances", len(m.partitions), len(m.consumerInstances)) - for partition, slot := range m.partitions { - if slot.AssignedTo == nil { - glog.V(1).Infof("Partition %+v is not assigned to any consumer", partition) - } else { - glog.V(1).Infof("Partition %+v is assigned to consumer %+v", partition, slot.AssignedTo.InstanceId) - } - } - for _, consumer := range m.consumerInstances { - glog.V(1).Infof("Consumer %+v has %d partitions", consumer.InstanceId, len(consumer.AssignedPartitions)) - } -} diff --git a/weed/mq/sub_coordinator/market_test.go b/weed/mq/sub_coordinator/market_test.go deleted file mode 100644 index 150a88a8d..000000000 --- a/weed/mq/sub_coordinator/market_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package sub_coordinator - -import ( - "fmt" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/stretchr/testify/assert" -) - -var partitions = []topic.Partition{ - { - RangeStart: 0, - RangeStop: 1, - RingSize: 3, - UnixTimeNs: 0, - }, - { - RangeStart: 1, - RangeStop: 2, - RingSize: 3, - UnixTimeNs: 0, - }, - { - RangeStart: 2, - RangeStop: 3, - RingSize: 3, - UnixTimeNs: 0, - }, -} - -func TestAddConsumerInstance(t *testing.T) { - market := NewMarket(partitions, 10*time.Second) - - consumer := &ConsumerGroupInstance{ - InstanceId: "first", - MaxPartitionCount: 2, - } - err := market.AddConsumerInstance(consumer) - - assert.Nil(t, err) - time.Sleep(1 * time.Second) // Allow time for background rebalancing - market.ShutdownMarket() - for adjustment := range market.AdjustmentChan { - fmt.Printf("%+v\n", adjustment) - } -} - -func TestMultipleConsumerInstances(t *testing.T) { - market := NewMarket(partitions, 10*time.Second) - - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "first", - MaxPartitionCount: 2, - }) - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "second", - MaxPartitionCount: 2, - }) - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "third", - MaxPartitionCount: 2, - }) - - time.Sleep(1 * time.Second) // Allow time for background rebalancing - market.ShutdownMarket() - for adjustment := range market.AdjustmentChan { - fmt.Printf("%+v\n", adjustment) - } -} - -func TestConfirmAdjustment(t *testing.T) { - market := NewMarket(partitions, 1*time.Second) - - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "first", - MaxPartitionCount: 2, - }) - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "second", - MaxPartitionCount: 2, - }) - market.AddConsumerInstance(&ConsumerGroupInstance{ - InstanceId: "third", - MaxPartitionCount: 2, - }) - - go func() { - time.Sleep(5 * time.Second) // Allow time for background rebalancing - market.ShutdownMarket() - }() - go func() { - time.Sleep(2 * time.Second) - market.RemoveConsumerInstance("third") - }() - - for adjustment := range market.AdjustmentChan { - fmt.Printf("%+v\n", adjustment) - market.ConfirmAdjustment(adjustment) - } - -} diff --git a/weed/mq/sub_coordinator/partition_consumer_mapping.go b/weed/mq/sub_coordinator/partition_consumer_mapping.go deleted file mode 100644 index e900e4a33..000000000 --- a/weed/mq/sub_coordinator/partition_consumer_mapping.go +++ /dev/null @@ -1,129 +0,0 @@ -package sub_coordinator - -import ( - "fmt" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "time" -) - -type PartitionConsumerMapping struct { - currentMapping *PartitionSlotToConsumerInstanceList - prevMappings []*PartitionSlotToConsumerInstanceList -} - -// Balance goal: -// 1. max processing power utilization -// 2. allow one consumer instance to be down unexpectedly -// without affecting the processing power utilization - -func (pcm *PartitionConsumerMapping) BalanceToConsumerInstances(partitionSlotToBrokerList *pub_balancer.PartitionSlotToBrokerList, consumerInstances []*ConsumerGroupInstance) { - if len(partitionSlotToBrokerList.PartitionSlots) == 0 || len(consumerInstances) == 0 { - return - } - newMapping := NewPartitionSlotToConsumerInstanceList(partitionSlotToBrokerList.RingSize, time.Now()) - var prevMapping *PartitionSlotToConsumerInstanceList - if len(pcm.prevMappings) > 0 { - prevMapping = pcm.prevMappings[len(pcm.prevMappings)-1] - } else { - prevMapping = nil - } - newMapping.PartitionSlots = doBalanceSticky(partitionSlotToBrokerList.PartitionSlots, consumerInstances, prevMapping) - if pcm.currentMapping != nil { - pcm.prevMappings = append(pcm.prevMappings, pcm.currentMapping) - if len(pcm.prevMappings) > 10 { - pcm.prevMappings = pcm.prevMappings[1:] - } - } - pcm.currentMapping = newMapping -} - -func doBalanceSticky(partitions []*pub_balancer.PartitionSlotToBroker, consumerInstances []*ConsumerGroupInstance, prevMapping *PartitionSlotToConsumerInstanceList) (partitionSlots []*PartitionSlotToConsumerInstance) { - // collect previous consumer instance ids - prevConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{}) - if prevMapping != nil { - for _, prevPartitionSlot := range prevMapping.PartitionSlots { - if prevPartitionSlot.AssignedInstanceId != "" { - prevConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId] = struct{}{} - } - } - } - // collect current consumer instance ids - currConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{}) - for _, consumerInstance := range consumerInstances { - currConsumerInstanceIds[consumerInstance.InstanceId] = struct{}{} - } - - // check deleted consumer instances - deletedConsumerInstanceIds := make(map[ConsumerGroupInstanceId]struct{}) - for consumerInstanceId := range prevConsumerInstanceIds { - if _, ok := currConsumerInstanceIds[consumerInstanceId]; !ok { - deletedConsumerInstanceIds[consumerInstanceId] = struct{}{} - } - } - - // convert partition slots from list to a map - prevPartitionSlotMap := make(map[string]*PartitionSlotToConsumerInstance) - if prevMapping != nil { - for _, partitionSlot := range prevMapping.PartitionSlots { - key := fmt.Sprintf("%d-%d", partitionSlot.RangeStart, partitionSlot.RangeStop) - prevPartitionSlotMap[key] = partitionSlot - } - } - - // make a copy of old mapping, skipping the deleted consumer instances - newPartitionSlots := make([]*PartitionSlotToConsumerInstance, 0, len(partitions)) - for _, partition := range partitions { - newPartitionSlots = append(newPartitionSlots, &PartitionSlotToConsumerInstance{ - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - UnixTimeNs: partition.UnixTimeNs, - Broker: partition.AssignedBroker, - FollowerBroker: partition.FollowerBroker, - }) - } - for _, newPartitionSlot := range newPartitionSlots { - key := fmt.Sprintf("%d-%d", newPartitionSlot.RangeStart, newPartitionSlot.RangeStop) - if prevPartitionSlot, ok := prevPartitionSlotMap[key]; ok { - if _, ok := deletedConsumerInstanceIds[prevPartitionSlot.AssignedInstanceId]; !ok { - newPartitionSlot.AssignedInstanceId = prevPartitionSlot.AssignedInstanceId - } - } - } - - // for all consumer instances, count the average number of partitions - // that are assigned to them - consumerInstancePartitionCount := make(map[ConsumerGroupInstanceId]int) - for _, newPartitionSlot := range newPartitionSlots { - if newPartitionSlot.AssignedInstanceId != "" { - consumerInstancePartitionCount[newPartitionSlot.AssignedInstanceId]++ - } - } - // average number of partitions that are assigned to each consumer instance - averageConsumerInstanceLoad := float32(len(partitions)) / float32(len(consumerInstances)) - - // assign unassigned partition slots to consumer instances that is underloaded - consumerInstanceIdsIndex := 0 - for _, newPartitionSlot := range newPartitionSlots { - if newPartitionSlot.AssignedInstanceId == "" { - for avoidDeadLoop := len(consumerInstances); avoidDeadLoop > 0; avoidDeadLoop-- { - consumerInstance := consumerInstances[consumerInstanceIdsIndex] - if float32(consumerInstancePartitionCount[consumerInstance.InstanceId]) < averageConsumerInstanceLoad { - newPartitionSlot.AssignedInstanceId = consumerInstance.InstanceId - consumerInstancePartitionCount[consumerInstance.InstanceId]++ - consumerInstanceIdsIndex++ - if consumerInstanceIdsIndex >= len(consumerInstances) { - consumerInstanceIdsIndex = 0 - } - break - } else { - consumerInstanceIdsIndex++ - if consumerInstanceIdsIndex >= len(consumerInstances) { - consumerInstanceIdsIndex = 0 - } - } - } - } - } - - return newPartitionSlots -} diff --git a/weed/mq/sub_coordinator/partition_consumer_mapping_test.go b/weed/mq/sub_coordinator/partition_consumer_mapping_test.go deleted file mode 100644 index 415eb27bd..000000000 --- a/weed/mq/sub_coordinator/partition_consumer_mapping_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package sub_coordinator - -import ( - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "reflect" - "testing" -) - -func Test_doBalanceSticky(t *testing.T) { - type args struct { - partitions []*pub_balancer.PartitionSlotToBroker - consumerInstanceIds []*ConsumerGroupInstance - prevMapping *PartitionSlotToConsumerInstanceList - } - tests := []struct { - name string - args args - wantPartitionSlots []*PartitionSlotToConsumerInstance - }{ - { - name: "1 consumer instance, 1 partition", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - }, - prevMapping: nil, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-1", - }, - }, - }, - { - name: "2 consumer instances, 1 partition", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - }, - prevMapping: nil, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-1", - }, - }, - }, - { - name: "1 consumer instance, 2 partitions", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - }, - prevMapping: nil, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-1", - }, - }, - }, - { - name: "2 consumer instances, 2 partitions", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - }, - prevMapping: nil, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - { - name: "2 consumer instances, 2 partitions, 1 deleted consumer instance", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - }, - prevMapping: &PartitionSlotToConsumerInstanceList{ - PartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-3", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - { - name: "2 consumer instances, 2 partitions, 1 new consumer instance", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-3", - MaxPartitionCount: 1, - }, - }, - prevMapping: &PartitionSlotToConsumerInstanceList{ - PartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-3", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-3", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - { - name: "2 consumer instances, 2 partitions, 1 new partition", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - { - RangeStart: 100, - RangeStop: 150, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - }, - prevMapping: &PartitionSlotToConsumerInstanceList{ - PartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - { - RangeStart: 100, - RangeStop: 150, - AssignedInstanceId: "consumer-instance-1", - }, - }, - }, - { - name: "2 consumer instances, 2 partitions, 1 new partition, 1 new consumer instance", - args: args{ - partitions: []*pub_balancer.PartitionSlotToBroker{ - { - RangeStart: 0, - RangeStop: 50, - }, - { - RangeStart: 50, - RangeStop: 100, - }, - { - RangeStart: 100, - RangeStop: 150, - }, - }, - consumerInstanceIds: []*ConsumerGroupInstance{ - { - InstanceId: "consumer-instance-1", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-2", - MaxPartitionCount: 1, - }, - { - InstanceId: "consumer-instance-3", - MaxPartitionCount: 1, - }, - }, - prevMapping: &PartitionSlotToConsumerInstanceList{ - PartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - }, - }, - }, - wantPartitionSlots: []*PartitionSlotToConsumerInstance{ - { - RangeStart: 0, - RangeStop: 50, - AssignedInstanceId: "consumer-instance-1", - }, - { - RangeStart: 50, - RangeStop: 100, - AssignedInstanceId: "consumer-instance-2", - }, - { - RangeStart: 100, - RangeStop: 150, - AssignedInstanceId: "consumer-instance-3", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotPartitionSlots := doBalanceSticky(tt.args.partitions, tt.args.consumerInstanceIds, tt.args.prevMapping); !reflect.DeepEqual(gotPartitionSlots, tt.wantPartitionSlots) { - t.Errorf("doBalanceSticky() = %v, want %v", gotPartitionSlots, tt.wantPartitionSlots) - } - }) - } -} diff --git a/weed/mq/sub_coordinator/partition_list.go b/weed/mq/sub_coordinator/partition_list.go deleted file mode 100644 index 16bf1ff0c..000000000 --- a/weed/mq/sub_coordinator/partition_list.go +++ /dev/null @@ -1,25 +0,0 @@ -package sub_coordinator - -import "time" - -type PartitionSlotToConsumerInstance struct { - RangeStart int32 - RangeStop int32 - UnixTimeNs int64 - Broker string - AssignedInstanceId ConsumerGroupInstanceId - FollowerBroker string -} - -type PartitionSlotToConsumerInstanceList struct { - PartitionSlots []*PartitionSlotToConsumerInstance - RingSize int32 - Version int64 -} - -func NewPartitionSlotToConsumerInstanceList(ringSize int32, version time.Time) *PartitionSlotToConsumerInstanceList { - return &PartitionSlotToConsumerInstanceList{ - RingSize: ringSize, - Version: version.UnixNano(), - } -} diff --git a/weed/mq/sub_coordinator/sub_coordinator.go b/weed/mq/sub_coordinator/sub_coordinator.go deleted file mode 100644 index df86da95f..000000000 --- a/weed/mq/sub_coordinator/sub_coordinator.go +++ /dev/null @@ -1,109 +0,0 @@ -package sub_coordinator - -import ( - "fmt" - - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -type TopicConsumerGroups struct { - // map a consumer group name to a consumer group - ConsumerGroups cmap.ConcurrentMap[string, *ConsumerGroup] -} - -// SubCoordinator coordinates the instances in the consumer group for one topic. -// It is responsible for: -// 1. (Maybe) assigning partitions when a consumer instance is up/down. - -type SubCoordinator struct { - // map topic name to consumer groups - TopicSubscribers cmap.ConcurrentMap[string, *TopicConsumerGroups] - FilerClientAccessor *filer_client.FilerClientAccessor -} - -func NewSubCoordinator() *SubCoordinator { - return &SubCoordinator{ - TopicSubscribers: cmap.New[*TopicConsumerGroups](), - } -} - -func (c *SubCoordinator) GetTopicConsumerGroups(topic *schema_pb.Topic, createIfMissing bool) *TopicConsumerGroups { - topicName := toTopicName(topic) - tcg, _ := c.TopicSubscribers.Get(topicName) - if tcg == nil && createIfMissing { - tcg = &TopicConsumerGroups{ - ConsumerGroups: cmap.New[*ConsumerGroup](), - } - if !c.TopicSubscribers.SetIfAbsent(topicName, tcg) { - tcg, _ = c.TopicSubscribers.Get(topicName) - } - } - return tcg -} -func (c *SubCoordinator) RemoveTopic(topic *schema_pb.Topic) { - topicName := toTopicName(topic) - c.TopicSubscribers.Remove(topicName) -} - -func toTopicName(topic *schema_pb.Topic) string { - topicName := topic.Namespace + "." + topic.Name - return topicName -} - -func (c *SubCoordinator) AddSubscriber(initMessage *mq_pb.SubscriberToSubCoordinatorRequest_InitMessage) (*ConsumerGroup, *ConsumerGroupInstance, error) { - tcg := c.GetTopicConsumerGroups(initMessage.Topic, true) - cg, _ := tcg.ConsumerGroups.Get(initMessage.ConsumerGroup) - if cg == nil { - cg = NewConsumerGroup(initMessage.Topic, initMessage.RebalanceSeconds, c.FilerClientAccessor) - if cg != nil { - tcg.ConsumerGroups.SetIfAbsent(initMessage.ConsumerGroup, cg) - } - cg, _ = tcg.ConsumerGroups.Get(initMessage.ConsumerGroup) - } - if cg == nil { - return nil, nil, fmt.Errorf("fail to create consumer group %s: topic %s not found", initMessage.ConsumerGroup, initMessage.Topic) - } - cgi, _ := cg.ConsumerGroupInstances.Get(initMessage.ConsumerGroupInstanceId) - if cgi == nil { - cgi = NewConsumerGroupInstance(initMessage.ConsumerGroupInstanceId, initMessage.MaxPartitionCount) - if !cg.ConsumerGroupInstances.SetIfAbsent(initMessage.ConsumerGroupInstanceId, cgi) { - cgi, _ = cg.ConsumerGroupInstances.Get(initMessage.ConsumerGroupInstanceId) - } - } - cgi.MaxPartitionCount = initMessage.MaxPartitionCount - cg.Market.AddConsumerInstance(cgi) - return cg, cgi, nil -} - -func (c *SubCoordinator) RemoveSubscriber(initMessage *mq_pb.SubscriberToSubCoordinatorRequest_InitMessage) { - tcg := c.GetTopicConsumerGroups(initMessage.Topic, false) - if tcg == nil { - return - } - cg, _ := tcg.ConsumerGroups.Get(initMessage.ConsumerGroup) - if cg == nil { - return - } - cg.ConsumerGroupInstances.Remove(initMessage.ConsumerGroupInstanceId) - cg.Market.RemoveConsumerInstance(ConsumerGroupInstanceId(initMessage.ConsumerGroupInstanceId)) - if cg.ConsumerGroupInstances.Count() == 0 { - tcg.ConsumerGroups.Remove(initMessage.ConsumerGroup) - cg.Shutdown() - } - if tcg.ConsumerGroups.Count() == 0 { - c.RemoveTopic(initMessage.Topic) - } -} - -func (c *SubCoordinator) OnPartitionChange(topic *schema_pb.Topic, assignments []*mq_pb.BrokerPartitionAssignment) { - tcg, _ := c.TopicSubscribers.Get(toTopicName(topic)) - if tcg == nil { - return - } - for _, cg := range tcg.ConsumerGroups.Items() { - cg.OnPartitionListChange(assignments) - } -} diff --git a/weed/mq/topic/local_manager.go b/weed/mq/topic/local_manager.go deleted file mode 100644 index bc33fdab0..000000000 --- a/weed/mq/topic/local_manager.go +++ /dev/null @@ -1,208 +0,0 @@ -package topic - -import ( - "context" - "time" - - cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/shirou/gopsutil/v4/cpu" -) - -// LocalTopicManager manages topics on local broker -type LocalTopicManager struct { - topics cmap.ConcurrentMap[string, *LocalTopic] - cleanupDone chan struct{} // Signal cleanup goroutine to stop - cleanupTimer *time.Ticker -} - -// NewLocalTopicManager creates a new LocalTopicManager -func NewLocalTopicManager() *LocalTopicManager { - return &LocalTopicManager{ - topics: cmap.New[*LocalTopic](), - cleanupDone: make(chan struct{}), - } -} - -// StartIdlePartitionCleanup starts a background goroutine that periodically -// cleans up idle partitions (partitions with no publishers and no subscribers) -func (manager *LocalTopicManager) StartIdlePartitionCleanup(ctx context.Context, checkInterval, idleTimeout time.Duration) { - manager.cleanupTimer = time.NewTicker(checkInterval) - - go func() { - defer close(manager.cleanupDone) - defer manager.cleanupTimer.Stop() - - glog.V(1).Infof("Idle partition cleanup started: check every %v, cleanup after %v idle", checkInterval, idleTimeout) - - for { - select { - case <-ctx.Done(): - glog.V(1).Info("Idle partition cleanup stopped") - return - case <-manager.cleanupTimer.C: - manager.cleanupIdlePartitions(idleTimeout) - } - } - }() -} - -// cleanupIdlePartitions removes idle partitions from memory -func (manager *LocalTopicManager) cleanupIdlePartitions(idleTimeout time.Duration) { - cleanedCount := 0 - - // Iterate through all topics - manager.topics.IterCb(func(topicKey string, localTopic *LocalTopic) { - localTopic.partitionLock.Lock() - defer localTopic.partitionLock.Unlock() - - // Check each partition - for i := len(localTopic.Partitions) - 1; i >= 0; i-- { - partition := localTopic.Partitions[i] - - if partition.ShouldCleanup(idleTimeout) { - glog.V(1).Infof("Cleaning up idle partition %s (idle for %v, publishers=%d, subscribers=%d)", - partition.Partition.String(), - partition.GetIdleDuration(), - partition.Publishers.Size(), - partition.Subscribers.Size()) - - // Shutdown the partition (closes LogBuffer, etc.) - partition.Shutdown() - - // Remove from slice - localTopic.Partitions = append(localTopic.Partitions[:i], localTopic.Partitions[i+1:]...) - cleanedCount++ - } - } - - // If topic has no partitions left, remove it - if len(localTopic.Partitions) == 0 { - glog.V(1).Infof("Removing empty topic %s", topicKey) - manager.topics.Remove(topicKey) - } - }) - - if cleanedCount > 0 { - glog.V(0).Infof("Cleaned up %d idle partition(s)", cleanedCount) - } -} - -// WaitForCleanupShutdown waits for the cleanup goroutine to finish -func (manager *LocalTopicManager) WaitForCleanupShutdown() { - <-manager.cleanupDone - glog.V(1).Info("Idle partition cleanup shutdown complete") -} - -// AddLocalPartition adds a topic to the local topic manager -func (manager *LocalTopicManager) AddLocalPartition(topic Topic, localPartition *LocalPartition) { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - localTopic = NewLocalTopic(topic) - } - if !manager.topics.SetIfAbsent(topic.String(), localTopic) { - localTopic, _ = manager.topics.Get(topic.String()) - } - localTopic.addPartition(localPartition) -} - -// GetLocalPartition gets a topic from the local topic manager -func (manager *LocalTopicManager) GetLocalPartition(topic Topic, partition Partition) *LocalPartition { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - return nil - } - result := localTopic.findPartition(partition) - return result -} - -// RemoveTopic removes a topic from the local topic manager -func (manager *LocalTopicManager) RemoveTopic(topic Topic) { - manager.topics.Remove(topic.String()) -} - -func (manager *LocalTopicManager) RemoveLocalPartition(topic Topic, partition Partition) (removed bool) { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - return false - } - return localTopic.removePartition(partition) -} - -func (manager *LocalTopicManager) ClosePublishers(topic Topic, unixTsNs int64) (removed bool) { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - return false - } - return localTopic.closePartitionPublishers(unixTsNs) -} - -func (manager *LocalTopicManager) CloseSubscribers(topic Topic, unixTsNs int64) (removed bool) { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - return false - } - return localTopic.closePartitionSubscribers(unixTsNs) -} - -// ListTopicsInMemory returns all topics currently tracked in memory -func (manager *LocalTopicManager) ListTopicsInMemory() []Topic { - var topics []Topic - for item := range manager.topics.IterBuffered() { - topics = append(topics, item.Val.Topic) - } - return topics -} - -// TopicExistsInMemory checks if a topic exists in memory (not flushed data) -func (manager *LocalTopicManager) TopicExistsInMemory(topic Topic) bool { - _, exists := manager.topics.Get(topic.String()) - return exists -} - -func (manager *LocalTopicManager) CollectStats(duration time.Duration) *mq_pb.BrokerStats { - stats := &mq_pb.BrokerStats{ - Stats: make(map[string]*mq_pb.TopicPartitionStats), - } - - // collect current broker's cpu usage - // this needs to be in front, so the following stats can be more accurate - usages, err := cpu.Percent(duration, false) - if err == nil && len(usages) > 0 { - stats.CpuUsagePercent = int32(usages[0]) - } - - // collect current broker's topics and partitions - manager.topics.IterCb(func(topic string, localTopic *LocalTopic) { - for _, localPartition := range localTopic.Partitions { - topicPartition := &TopicPartition{ - Topic: Topic{Namespace: localTopic.Namespace, Name: localTopic.Name}, - Partition: localPartition.Partition, - } - stats.Stats[topicPartition.TopicPartitionId()] = &mq_pb.TopicPartitionStats{ - Topic: &schema_pb.Topic{ - Namespace: string(localTopic.Namespace), - Name: localTopic.Name, - }, - Partition: localPartition.Partition.ToPbPartition(), - PublisherCount: int32(localPartition.Publishers.Size()), - SubscriberCount: int32(localPartition.Subscribers.Size()), - Follower: localPartition.Follower, - } - // fmt.Printf("collect topic %+v partition %+v\n", topicPartition, localPartition.Partition) - } - }) - - return stats - -} - -func (manager *LocalTopicManager) WaitUntilNoPublishers(topic Topic) { - localTopic, ok := manager.topics.Get(topic.String()) - if !ok { - return - } - localTopic.WaitUntilNoPublishers() -} diff --git a/weed/mq/topic/local_partition.go b/weed/mq/topic/local_partition.go deleted file mode 100644 index 5f5c2278f..000000000 --- a/weed/mq/topic/local_partition.go +++ /dev/null @@ -1,401 +0,0 @@ -package topic - -import ( - "context" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type LocalPartition struct { - ListenersWaits int64 - AckTsNs int64 - - // notifying clients - ListenersLock sync.Mutex - ListenersCond *sync.Cond - - Partition - LogBuffer *log_buffer.LogBuffer - Publishers *LocalPartitionPublishers - Subscribers *LocalPartitionSubscribers - - publishFolloweMeStream mq_pb.SeaweedMessaging_PublishFollowMeClient - followerGrpcConnection *grpc.ClientConn - Follower string - - // Track last activity for idle cleanup - lastActivityTime atomic.Int64 // Unix nano timestamp -} - -var TIME_FORMAT = "2006-01-02-15-04-05" -var PartitionGenerationFormat = "v2006-01-02-15-04-05" - -func NewLocalPartition(partition Partition, logFlushInterval int, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition { - lp := &LocalPartition{ - Partition: partition, - Publishers: NewLocalPartitionPublishers(), - Subscribers: NewLocalPartitionSubscribers(), - } - lp.ListenersCond = sync.NewCond(&lp.ListenersLock) - lp.lastActivityTime.Store(time.Now().UnixNano()) // Initialize with current time - - // Ensure a minimum flush interval to prevent busy-loop when set to 0 - // A flush interval of 0 would cause time.Sleep(0) creating a CPU-consuming busy loop - flushInterval := time.Duration(logFlushInterval) * time.Second - if flushInterval == 0 { - flushInterval = 1 * time.Second // Minimum 1 second to avoid busy-loop, allow near-immediate flushing - } - - lp.LogBuffer = log_buffer.NewLogBuffer(fmt.Sprintf("%d/%04d-%04d", partition.UnixTimeNs, partition.RangeStart, partition.RangeStop), - flushInterval, logFlushFn, readFromDiskFn, func() { - if atomic.LoadInt64(&lp.ListenersWaits) > 0 { - lp.ListenersCond.Broadcast() - } - }) - return lp -} - -func (p *LocalPartition) Publish(message *mq_pb.DataMessage) error { - p.LogBuffer.AddToBuffer(message) - p.UpdateActivity() // Track publish activity for idle cleanup - - // maybe send to the follower - if p.publishFolloweMeStream != nil { - // println("recv", string(message.Key), message.TsNs) - if followErr := p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{ - Message: &mq_pb.PublishFollowMeRequest_Data{ - Data: message, - }, - }); followErr != nil { - return fmt.Errorf("send to follower %s: %v", p.Follower, followErr) - } - } else { - atomic.StoreInt64(&p.AckTsNs, message.TsNs) - } - - return nil -} - -func (p *LocalPartition) Subscribe(clientName string, startPosition log_buffer.MessagePosition, - onNoMessageFn func() bool, eachMessageFn log_buffer.EachLogEntryFuncType) error { - var processedPosition log_buffer.MessagePosition - var readPersistedLogErr error - var readInMemoryLogErr error - var isDone bool - - p.UpdateActivity() // Track subscribe activity for idle cleanup - - // CRITICAL FIX: Use offset-based functions if startPosition is offset-based - // This allows reading historical data by offset, not just by timestamp - if startPosition.IsOffsetBased { - // Wrap eachMessageFn to match the signature expected by LoopProcessLogDataWithOffset - // Also update activity when messages are processed - eachMessageWithOffsetFn := func(logEntry *filer_pb.LogEntry, offset int64) (bool, error) { - p.UpdateActivity() // Track message read activity - return eachMessageFn(logEntry) - } - - // Always attempt initial disk read for historical data - // This is fast if no data on disk, and ensures we don't miss old data - // The memory read loop below handles new data with instant notifications - glog.V(2).Infof("%s reading historical data from disk starting at offset %d", clientName, startPosition.Offset) - processedPosition, isDone, readPersistedLogErr = p.LogBuffer.ReadFromDiskFn(startPosition, 0, eachMessageFn) - if readPersistedLogErr != nil { - glog.V(2).Infof("%s read %v persisted log: %v", clientName, p.Partition, readPersistedLogErr) - return readPersistedLogErr - } - if isDone { - return nil - } - - // Update position after reading from disk - if processedPosition.Time.UnixNano() != 0 || processedPosition.IsOffsetBased { - startPosition = processedPosition - } - - // Step 2: Enter the main loop - read from in-memory buffer, occasionally checking disk - for { - // Read from in-memory buffer (this is the hot path - handles streaming data) - glog.V(4).Infof("SUBSCRIBE: Reading from in-memory buffer for %s at offset %d", clientName, startPosition.Offset) - processedPosition, isDone, readInMemoryLogErr = p.LogBuffer.LoopProcessLogDataWithOffset(clientName, startPosition, 0, onNoMessageFn, eachMessageWithOffsetFn) - - if isDone { - return nil - } - - // Update position - // CRITICAL FIX: For offset-based reads, Time is zero, so check Offset instead - if processedPosition.Time.UnixNano() != 0 || processedPosition.IsOffsetBased { - startPosition = processedPosition - } - - // If we get ResumeFromDiskError, it means data was flushed to disk - // Read from disk ONCE to catch up, then continue with in-memory buffer - if readInMemoryLogErr == log_buffer.ResumeFromDiskError { - glog.V(4).Infof("SUBSCRIBE: ResumeFromDiskError - reading flushed data from disk for %s at offset %d", clientName, startPosition.Offset) - processedPosition, isDone, readPersistedLogErr = p.LogBuffer.ReadFromDiskFn(startPosition, 0, eachMessageFn) - if readPersistedLogErr != nil { - glog.V(2).Infof("%s read %v persisted log after flush: %v", clientName, p.Partition, readPersistedLogErr) - return readPersistedLogErr - } - if isDone { - return nil - } - - // Update position and continue the loop (back to in-memory buffer) - // CRITICAL FIX: For offset-based reads, Time is zero, so check Offset instead - if processedPosition.Time.UnixNano() != 0 || processedPosition.IsOffsetBased { - startPosition = processedPosition - } - // Loop continues - back to reading from in-memory buffer - continue - } - - // Any other error is a real error - if readInMemoryLogErr != nil { - glog.V(2).Infof("%s read %v in memory log: %v", clientName, p.Partition, readInMemoryLogErr) - return readInMemoryLogErr - } - - // If we get here with no error and not done, something is wrong - glog.V(1).Infof("SUBSCRIBE: Unexpected state for %s - no error but not done, continuing", clientName) - } - } - - // Original timestamp-based subscription logic - for { - processedPosition, isDone, readPersistedLogErr = p.LogBuffer.ReadFromDiskFn(startPosition, 0, eachMessageFn) - if readPersistedLogErr != nil { - glog.V(0).Infof("%s read %v persisted log: %v", clientName, p.Partition, readPersistedLogErr) - return readPersistedLogErr - } - if isDone { - return nil - } - - // CRITICAL FIX: For offset-based reads, Time is zero, so check Offset instead - if processedPosition.Time.UnixNano() != 0 || processedPosition.IsOffsetBased { - startPosition = processedPosition - } - processedPosition, isDone, readInMemoryLogErr = p.LogBuffer.LoopProcessLogData(clientName, startPosition, 0, onNoMessageFn, eachMessageFn) - if isDone { - return nil - } - // CRITICAL FIX: For offset-based reads, Time is zero, so check Offset instead - if processedPosition.Time.UnixNano() != 0 || processedPosition.IsOffsetBased { - startPosition = processedPosition - } - - if readInMemoryLogErr == log_buffer.ResumeFromDiskError { - continue - } - if readInMemoryLogErr != nil { - glog.V(0).Infof("%s read %v in memory log: %v", clientName, p.Partition, readInMemoryLogErr) - return readInMemoryLogErr - } - } -} - -func (p *LocalPartition) GetEarliestMessageTimeInMemory() time.Time { - return p.LogBuffer.GetEarliestTime() -} - -func (p *LocalPartition) HasData() bool { - return !p.LogBuffer.GetEarliestTime().IsZero() -} - -func (p *LocalPartition) GetEarliestInMemoryMessagePosition() log_buffer.MessagePosition { - return p.LogBuffer.GetEarliestPosition() -} - -func (p *LocalPartition) closePublishers() { - p.Publishers.SignalShutdown() -} -func (p *LocalPartition) closeSubscribers() { - p.Subscribers.SignalShutdown() -} - -func (p *LocalPartition) WaitUntilNoPublishers() { - for { - if p.Publishers.Size() == 0 { - return - } - time.Sleep(113 * time.Millisecond) - } -} - -func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessageRequest_InitMessage, grpcDialOption grpc.DialOption) (err error) { - if p.publishFolloweMeStream != nil { - return nil - } - if initMessage.FollowerBroker == "" { - return nil - } - - p.Follower = initMessage.FollowerBroker - ctx := context.Background() - p.followerGrpcConnection, err = pb.GrpcDial(ctx, p.Follower, true, grpcDialOption) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", p.Follower, err) - } - followerClient := mq_pb.NewSeaweedMessagingClient(p.followerGrpcConnection) - p.publishFolloweMeStream, err = followerClient.PublishFollowMe(ctx) - if err != nil { - return fmt.Errorf("fail to create publish client: %w", err) - } - if err = p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{ - Message: &mq_pb.PublishFollowMeRequest_Init{ - Init: &mq_pb.PublishFollowMeRequest_InitMessage{ - Topic: initMessage.Topic, - Partition: initMessage.Partition, - }, - }, - }); err != nil { - return err - } - - // start receiving ack from follower - go func() { - defer func() { - // println("stop receiving ack from follower") - }() - - for { - ack, err := p.publishFolloweMeStream.Recv() - if err != nil { - e, _ := status.FromError(err) - if e.Code() == codes.Canceled { - glog.V(0).Infof("local partition %v follower %v stopped", p.Partition, p.Follower) - return - } - glog.Errorf("Receiving local partition %v follower %s ack: %v", p.Partition, p.Follower, err) - return - } - atomic.StoreInt64(&p.AckTsNs, ack.AckTsNs) - // println("recv ack", ack.AckTsNs) - } - }() - return nil -} - -func (p *LocalPartition) MaybeShutdownLocalPartition() (hasShutdown bool) { - - if p.Publishers.Size() == 0 && p.Subscribers.Size() == 0 { - p.LogBuffer.ShutdownLogBuffer() - for !p.LogBuffer.IsAllFlushed() { - time.Sleep(113 * time.Millisecond) - } - if p.publishFolloweMeStream != nil { - // send close to the follower - if followErr := p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{ - Message: &mq_pb.PublishFollowMeRequest_Close{ - Close: &mq_pb.PublishFollowMeRequest_CloseMessage{}, - }, - }); followErr != nil { - glog.Errorf("Error closing follower stream: %v", followErr) - } - glog.V(4).Infof("closing grpcConnection to follower") - p.followerGrpcConnection.Close() - p.publishFolloweMeStream = nil - p.Follower = "" - } - - hasShutdown = true - } - - glog.V(0).Infof("local partition %v Publisher:%d Subscriber:%d follower:%s shutdown %v", p.Partition, p.Publishers.Size(), p.Subscribers.Size(), p.Follower, hasShutdown) - return -} - -// MaybeShutdownLocalPartitionForTopic is a topic-aware version that considers system topic retention -func (p *LocalPartition) MaybeShutdownLocalPartitionForTopic(topicName string) (hasShutdown bool) { - // For system topics like _schemas, be more conservative about shutdown - if isSystemTopic(topicName) { - glog.V(0).Infof("System topic %s - skipping aggressive shutdown for partition %v (Publishers:%d Subscribers:%d)", - topicName, p.Partition, p.Publishers.Size(), p.Subscribers.Size()) - return false - } - - // For regular topics, use the standard shutdown logic - return p.MaybeShutdownLocalPartition() -} - -// isSystemTopic checks if a topic should have special retention behavior -func isSystemTopic(topicName string) bool { - systemTopics := []string{ - "_schemas", // Schema Registry topic - "__consumer_offsets", // Kafka consumer offsets topic - "__transaction_state", // Kafka transaction state topic - } - - for _, systemTopic := range systemTopics { - if topicName == systemTopic { - return true - } - } - - // Also check for topics with system prefixes - return strings.HasPrefix(topicName, "_") || strings.HasPrefix(topicName, "__") -} - -func (p *LocalPartition) Shutdown() { - p.closePublishers() - p.closeSubscribers() - p.LogBuffer.ShutdownLogBuffer() - glog.V(0).Infof("local partition %v shutting down", p.Partition) -} - -func (p *LocalPartition) NotifyLogFlushed(flushTsNs int64) { - if p.publishFolloweMeStream != nil { - if followErr := p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{ - Message: &mq_pb.PublishFollowMeRequest_Flush{ - Flush: &mq_pb.PublishFollowMeRequest_FlushMessage{ - TsNs: flushTsNs, - }, - }, - }); followErr != nil { - glog.Errorf("send follower %s flush message: %v", p.Follower, followErr) - } - // println("notifying", p.Follower, "flushed at", flushTsNs) - } -} - -// UpdateActivity updates the last activity timestamp for this partition -// Should be called whenever a publisher publishes or a subscriber reads -func (p *LocalPartition) UpdateActivity() { - p.lastActivityTime.Store(time.Now().UnixNano()) -} - -// IsIdle returns true if the partition has no publishers and no subscribers -func (p *LocalPartition) IsIdle() bool { - return p.Publishers.Size() == 0 && p.Subscribers.Size() == 0 -} - -// GetIdleDuration returns how long the partition has been idle -func (p *LocalPartition) GetIdleDuration() time.Duration { - lastActivity := p.lastActivityTime.Load() - return time.Since(time.Unix(0, lastActivity)) -} - -// ShouldCleanup returns true if the partition should be cleaned up -// A partition should be cleaned up if: -// 1. It has no publishers and no subscribers -// 2. It has been idle for longer than the idle timeout -func (p *LocalPartition) ShouldCleanup(idleTimeout time.Duration) bool { - if !p.IsIdle() { - return false - } - return p.GetIdleDuration() > idleTimeout -} diff --git a/weed/mq/topic/local_partition_offset.go b/weed/mq/topic/local_partition_offset.go deleted file mode 100644 index e15234ca0..000000000 --- a/weed/mq/topic/local_partition_offset.go +++ /dev/null @@ -1,106 +0,0 @@ -package topic - -import ( - "fmt" - "sync/atomic" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// OffsetAssignmentFunc is a function type for assigning offsets to messages -type OffsetAssignmentFunc func() (int64, error) - -// PublishWithOffset publishes a message with offset assignment -// This method is used by the Kafka gateway integration for sequential offset assignment -func (p *LocalPartition) PublishWithOffset(message *mq_pb.DataMessage, assignOffsetFn OffsetAssignmentFunc) (int64, error) { - // Assign offset for this message - offset, err := assignOffsetFn() - if err != nil { - return 0, fmt.Errorf("failed to assign offset: %w", err) - } - - // Add message to buffer with offset - err = p.addToBufferWithOffset(message, offset) - if err != nil { - return 0, fmt.Errorf("failed to add message to buffer: %w", err) - } - - // Send to follower if needed (same logic as original Publish) - if p.publishFolloweMeStream != nil { - if followErr := p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{ - Message: &mq_pb.PublishFollowMeRequest_Data{ - Data: message, - }, - }); followErr != nil { - return 0, fmt.Errorf("send to follower %s: %v", p.Follower, followErr) - } - } else { - atomic.StoreInt64(&p.AckTsNs, message.TsNs) - } - - return offset, nil -} - -// addToBufferWithOffset adds a message to the log buffer with a pre-assigned offset -func (p *LocalPartition) addToBufferWithOffset(message *mq_pb.DataMessage, offset int64) error { - // Ensure we have a timestamp - processingTsNs := message.TsNs - if processingTsNs == 0 { - processingTsNs = time.Now().UnixNano() - } - - // Build a LogEntry that preserves the assigned sequential offset - logEntry := &filer_pb.LogEntry{ - TsNs: processingTsNs, - PartitionKeyHash: util.HashToInt32(message.Key), - Data: message.Value, - Key: message.Key, - Offset: offset, - } - - // Add the entry to the buffer in a way that preserves offset on disk and in-memory - p.LogBuffer.AddLogEntryToBuffer(logEntry) - - return nil -} - -// GetOffsetInfo returns offset information for this partition -// Used for debugging and monitoring partition offset state -func (p *LocalPartition) GetOffsetInfo() map[string]interface{} { - return map[string]interface{}{ - "partition_ring_size": p.RingSize, - "partition_range_start": p.RangeStart, - "partition_range_stop": p.RangeStop, - "partition_unix_time": p.UnixTimeNs, - "buffer_name": p.LogBuffer.GetName(), - "buffer_offset": p.LogBuffer.GetOffset(), - } -} - -// OffsetAwarePublisher wraps a LocalPartition with offset assignment capability -type OffsetAwarePublisher struct { - partition *LocalPartition - assignOffsetFn OffsetAssignmentFunc -} - -// NewOffsetAwarePublisher creates a new offset-aware publisher -func NewOffsetAwarePublisher(partition *LocalPartition, assignOffsetFn OffsetAssignmentFunc) *OffsetAwarePublisher { - return &OffsetAwarePublisher{ - partition: partition, - assignOffsetFn: assignOffsetFn, - } -} - -// Publish publishes a message with automatic offset assignment -func (oap *OffsetAwarePublisher) Publish(message *mq_pb.DataMessage) error { - _, err := oap.partition.PublishWithOffset(message, oap.assignOffsetFn) - return err -} - -// GetPartition returns the underlying partition -func (oap *OffsetAwarePublisher) GetPartition() *LocalPartition { - return oap.partition -} diff --git a/weed/mq/topic/local_partition_publishers.go b/weed/mq/topic/local_partition_publishers.go deleted file mode 100644 index 5142a66f2..000000000 --- a/weed/mq/topic/local_partition_publishers.go +++ /dev/null @@ -1,115 +0,0 @@ -package topic - -import ( - "sync" - "sync/atomic" - "time" -) - -type LocalPartitionPublishers struct { - publishers map[string]*LocalPublisher - publishersLock sync.RWMutex -} -type LocalPublisher struct { - connectTimeNs int64 // accessed atomically - lastSeenTimeNs int64 // accessed atomically - lastPublishedOffset int64 // accessed atomically - offset of last message published - lastAckedOffset int64 // accessed atomically - offset of last message acknowledged by broker -} - -func NewLocalPublisher() *LocalPublisher { - now := time.Now().UnixNano() - publisher := &LocalPublisher{} - atomic.StoreInt64(&publisher.connectTimeNs, now) - atomic.StoreInt64(&publisher.lastSeenTimeNs, now) - atomic.StoreInt64(&publisher.lastPublishedOffset, 0) - atomic.StoreInt64(&publisher.lastAckedOffset, 0) - return publisher -} -func (p *LocalPublisher) SignalShutdown() { -} - -// UpdateLastSeen updates the last activity time for this publisher -func (p *LocalPublisher) UpdateLastSeen() { - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// UpdatePublishedOffset updates the offset of the last message published by this publisher -func (p *LocalPublisher) UpdatePublishedOffset(offset int64) { - atomic.StoreInt64(&p.lastPublishedOffset, offset) - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// UpdateAckedOffset updates the offset of the last message acknowledged by the broker for this publisher -func (p *LocalPublisher) UpdateAckedOffset(offset int64) { - atomic.StoreInt64(&p.lastAckedOffset, offset) - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// GetTimestamps returns the connect and last seen timestamps safely -func (p *LocalPublisher) GetTimestamps() (connectTimeNs, lastSeenTimeNs int64) { - return atomic.LoadInt64(&p.connectTimeNs), atomic.LoadInt64(&p.lastSeenTimeNs) -} - -// GetOffsets returns the published and acknowledged offsets safely -func (p *LocalPublisher) GetOffsets() (lastPublishedOffset, lastAckedOffset int64) { - return atomic.LoadInt64(&p.lastPublishedOffset), atomic.LoadInt64(&p.lastAckedOffset) -} - -func NewLocalPartitionPublishers() *LocalPartitionPublishers { - return &LocalPartitionPublishers{ - publishers: make(map[string]*LocalPublisher), - } -} - -func (p *LocalPartitionPublishers) AddPublisher(clientName string, publisher *LocalPublisher) { - p.publishersLock.Lock() - defer p.publishersLock.Unlock() - - p.publishers[clientName] = publisher -} - -func (p *LocalPartitionPublishers) RemovePublisher(clientName string) { - p.publishersLock.Lock() - defer p.publishersLock.Unlock() - - delete(p.publishers, clientName) -} - -func (p *LocalPartitionPublishers) SignalShutdown() { - p.publishersLock.RLock() - defer p.publishersLock.RUnlock() - - for _, publisher := range p.publishers { - publisher.SignalShutdown() - } -} - -func (p *LocalPartitionPublishers) Size() int { - p.publishersLock.RLock() - defer p.publishersLock.RUnlock() - - return len(p.publishers) -} - -// GetPublisherNames returns the names of all publishers -func (p *LocalPartitionPublishers) GetPublisherNames() []string { - p.publishersLock.RLock() - defer p.publishersLock.RUnlock() - - names := make([]string, 0, len(p.publishers)) - for name := range p.publishers { - names = append(names, name) - } - return names -} - -// ForEachPublisher iterates over all publishers -func (p *LocalPartitionPublishers) ForEachPublisher(fn func(name string, publisher *LocalPublisher)) { - p.publishersLock.RLock() - defer p.publishersLock.RUnlock() - - for name, publisher := range p.publishers { - fn(name, publisher) - } -} diff --git a/weed/mq/topic/local_partition_subscribe_test.go b/weed/mq/topic/local_partition_subscribe_test.go deleted file mode 100644 index 3f49432e5..000000000 --- a/weed/mq/topic/local_partition_subscribe_test.go +++ /dev/null @@ -1,566 +0,0 @@ -package topic - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" -) - -// MockLogBuffer provides a controllable log buffer for testing -type MockLogBuffer struct { - // In-memory data - memoryEntries []*filer_pb.LogEntry - memoryStartTime time.Time - memoryStopTime time.Time - memoryStartOffset int64 - memoryStopOffset int64 - - // Disk data - diskEntries []*filer_pb.LogEntry - diskStartTime time.Time - diskStopTime time.Time - diskStartOffset int64 - diskStopOffset int64 - - // Behavior control - diskReadDelay time.Duration - memoryReadDelay time.Duration - diskReadError error - memoryReadError error -} - -// MockReadFromDiskFn simulates reading from disk -func (m *MockLogBuffer) MockReadFromDiskFn(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (log_buffer.MessagePosition, bool, error) { - if m.diskReadDelay > 0 { - time.Sleep(m.diskReadDelay) - } - - if m.diskReadError != nil { - return startPosition, false, m.diskReadError - } - - isOffsetBased := startPosition.IsOffsetBased - lastPosition := startPosition - isDone := false - - for _, entry := range m.diskEntries { - // Filter based on mode - if isOffsetBased { - if entry.Offset < startPosition.Offset { - continue - } - } else { - entryTime := time.Unix(0, entry.TsNs) - if entryTime.Before(startPosition.Time) { - continue - } - } - - // Apply stopTsNs filter - if stopTsNs > 0 && entry.TsNs > stopTsNs { - isDone = true - break - } - - // Call handler - done, err := eachLogEntryFn(entry) - if err != nil { - return lastPosition, false, err - } - if done { - isDone = true - break - } - - // Update position - if isOffsetBased { - lastPosition = log_buffer.NewMessagePosition(entry.TsNs, entry.Offset+1) - } else { - lastPosition = log_buffer.NewMessagePosition(entry.TsNs, entry.Offset) - } - } - - return lastPosition, isDone, nil -} - -// MockLoopProcessLogDataWithOffset simulates reading from memory with offset -func (m *MockLogBuffer) MockLoopProcessLogDataWithOffset(readerName string, startPosition log_buffer.MessagePosition, stopTsNs int64, waitForDataFn func() bool, eachLogDataFn log_buffer.EachLogEntryWithOffsetFuncType) (log_buffer.MessagePosition, bool, error) { - if m.memoryReadDelay > 0 { - time.Sleep(m.memoryReadDelay) - } - - if m.memoryReadError != nil { - return startPosition, false, m.memoryReadError - } - - lastPosition := startPosition - isDone := false - - // Check if requested offset is in memory - if startPosition.Offset < m.memoryStartOffset { - // Data is on disk - return startPosition, false, log_buffer.ResumeFromDiskError - } - - for _, entry := range m.memoryEntries { - // Filter by offset - if entry.Offset < startPosition.Offset { - continue - } - - // Apply stopTsNs filter - if stopTsNs > 0 && entry.TsNs > stopTsNs { - isDone = true - break - } - - // Call handler - done, err := eachLogDataFn(entry, entry.Offset) - if err != nil { - return lastPosition, false, err - } - if done { - isDone = true - break - } - - // Update position - lastPosition = log_buffer.NewMessagePosition(entry.TsNs, entry.Offset+1) - } - - return lastPosition, isDone, nil -} - -// Helper to create test entries -func createTestEntry(offset int64, timestamp time.Time, key, value string) *filer_pb.LogEntry { - return &filer_pb.LogEntry{ - TsNs: timestamp.UnixNano(), - Offset: offset, - Key: []byte(key), - Data: []byte(value), - } -} - -// TestOffsetBasedSubscribe_AllDataInMemory tests reading when all data is in memory -func TestOffsetBasedSubscribe_AllDataInMemory(t *testing.T) { - baseTime := time.Now() - - mock := &MockLogBuffer{ - memoryEntries: []*filer_pb.LogEntry{ - createTestEntry(0, baseTime, "key0", "value0"), - createTestEntry(1, baseTime.Add(1*time.Second), "key1", "value1"), - createTestEntry(2, baseTime.Add(2*time.Second), "key2", "value2"), - createTestEntry(3, baseTime.Add(3*time.Second), "key3", "value3"), - }, - memoryStartOffset: 0, - memoryStopOffset: 3, - diskEntries: []*filer_pb.LogEntry{}, // No disk data - } - - // Test reading from offset 0 - t.Run("ReadFromOffset0", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(0) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - // Simulate the Subscribe logic - // 1. Try disk read first - pos, done, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - if done { - t.Fatal("Should not be done after disk read") - } - - // 2. Read from memory - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", pos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != nil && err != log_buffer.ResumeFromDiskError { - t.Fatalf("Memory read failed: %v", err) - } - - // Verify we got all offsets in order - expected := []int64{0, 1, 2, 3} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d", len(expected), len(receivedOffsets)) - } - for i, offset := range receivedOffsets { - if offset != expected[i] { - t.Errorf("Offset[%d]: expected %d, got %d", i, expected[i], offset) - } - } - }) - - // Test reading from offset 2 - t.Run("ReadFromOffset2", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(2) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // Should skip disk and go straight to memory - pos, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", pos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != nil && err != log_buffer.ResumeFromDiskError { - t.Fatalf("Memory read failed: %v", err) - } - - // Verify we got offsets 2, 3 - expected := []int64{2, 3} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d", len(expected), len(receivedOffsets)) - } - for i, offset := range receivedOffsets { - if offset != expected[i] { - t.Errorf("Offset[%d]: expected %d, got %d", i, expected[i], offset) - } - } - }) -} - -// TestOffsetBasedSubscribe_DataOnDisk tests reading when data is on disk -func TestOffsetBasedSubscribe_DataOnDisk(t *testing.T) { - baseTime := time.Now() - - mock := &MockLogBuffer{ - // Offsets 0-9 on disk - diskEntries: []*filer_pb.LogEntry{ - createTestEntry(0, baseTime, "key0", "value0"), - createTestEntry(1, baseTime.Add(1*time.Second), "key1", "value1"), - createTestEntry(2, baseTime.Add(2*time.Second), "key2", "value2"), - createTestEntry(3, baseTime.Add(3*time.Second), "key3", "value3"), - createTestEntry(4, baseTime.Add(4*time.Second), "key4", "value4"), - createTestEntry(5, baseTime.Add(5*time.Second), "key5", "value5"), - createTestEntry(6, baseTime.Add(6*time.Second), "key6", "value6"), - createTestEntry(7, baseTime.Add(7*time.Second), "key7", "value7"), - createTestEntry(8, baseTime.Add(8*time.Second), "key8", "value8"), - createTestEntry(9, baseTime.Add(9*time.Second), "key9", "value9"), - }, - diskStartOffset: 0, - diskStopOffset: 9, - // Offsets 10-12 in memory - memoryEntries: []*filer_pb.LogEntry{ - createTestEntry(10, baseTime.Add(10*time.Second), "key10", "value10"), - createTestEntry(11, baseTime.Add(11*time.Second), "key11", "value11"), - createTestEntry(12, baseTime.Add(12*time.Second), "key12", "value12"), - }, - memoryStartOffset: 10, - memoryStopOffset: 12, - } - - // Test reading from offset 0 (on disk) - t.Run("ReadFromOffset0_OnDisk", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(0) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // 1. Read from disk (should get 0-9) - pos, done, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - if done { - t.Fatal("Should not be done after disk read") - } - - // 2. Read from memory (should get 10-12) - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", pos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != nil && err != log_buffer.ResumeFromDiskError { - t.Fatalf("Memory read failed: %v", err) - } - - // Verify we got all offsets 0-12 in order - expected := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d: %v", len(expected), len(receivedOffsets), receivedOffsets) - } - for i, offset := range receivedOffsets { - if i < len(expected) && offset != expected[i] { - t.Errorf("Offset[%d]: expected %d, got %d", i, expected[i], offset) - } - } - }) - - // Test reading from offset 5 (on disk, middle) - t.Run("ReadFromOffset5_OnDisk", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(5) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // 1. Read from disk (should get 5-9) - pos, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - - // 2. Read from memory (should get 10-12) - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", pos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != nil && err != log_buffer.ResumeFromDiskError { - t.Fatalf("Memory read failed: %v", err) - } - - // Verify we got offsets 5-12 - expected := []int64{5, 6, 7, 8, 9, 10, 11, 12} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d: %v", len(expected), len(receivedOffsets), receivedOffsets) - } - for i, offset := range receivedOffsets { - if i < len(expected) && offset != expected[i] { - t.Errorf("Offset[%d]: expected %d, got %d", i, expected[i], offset) - } - } - }) - - // Test reading from offset 11 (in memory) - t.Run("ReadFromOffset11_InMemory", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(11) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // 1. Try disk read (should get nothing) - pos, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - - // 2. Read from memory (should get 11-12) - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", pos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != nil && err != log_buffer.ResumeFromDiskError { - t.Fatalf("Memory read failed: %v", err) - } - - // Verify we got offsets 11-12 - expected := []int64{11, 12} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d: %v", len(expected), len(receivedOffsets), receivedOffsets) - } - for i, offset := range receivedOffsets { - if i < len(expected) && offset != expected[i] { - t.Errorf("Offset[%d]: expected %d, got %d", i, expected[i], offset) - } - } - }) -} - -// TestTimestampBasedSubscribe tests timestamp-based reading -func TestTimestampBasedSubscribe(t *testing.T) { - baseTime := time.Now() - - mock := &MockLogBuffer{ - diskEntries: []*filer_pb.LogEntry{ - createTestEntry(0, baseTime, "key0", "value0"), - createTestEntry(1, baseTime.Add(10*time.Second), "key1", "value1"), - createTestEntry(2, baseTime.Add(20*time.Second), "key2", "value2"), - }, - memoryEntries: []*filer_pb.LogEntry{ - createTestEntry(3, baseTime.Add(30*time.Second), "key3", "value3"), - createTestEntry(4, baseTime.Add(40*time.Second), "key4", "value4"), - }, - } - - // Test reading from beginning - t.Run("ReadFromBeginning", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePosition(baseTime.UnixNano(), -1) // Timestamp-based - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - // Read from disk - _, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - - // In real scenario, would then read from memory using LoopProcessLogData - // For this test, just verify disk gave us 0-2 - expected := []int64{0, 1, 2} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d", len(expected), len(receivedOffsets)) - } - }) - - // Test reading from middle timestamp - t.Run("ReadFromMiddleTimestamp", func(t *testing.T) { - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePosition(baseTime.Add(15*time.Second).UnixNano(), -1) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - // Read from disk - _, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Disk read failed: %v", err) - } - - // Should get offset 2 only (timestamp at 20s >= 15s, offset 1 at 10s is excluded) - expected := []int64{2} - if len(receivedOffsets) != len(expected) { - t.Errorf("Expected %d offsets, got %d: %v", len(expected), len(receivedOffsets), receivedOffsets) - } - }) -} - -// TestConcurrentSubscribers tests multiple concurrent subscribers -func TestConcurrentSubscribers(t *testing.T) { - baseTime := time.Now() - - mock := &MockLogBuffer{ - diskEntries: []*filer_pb.LogEntry{ - createTestEntry(0, baseTime, "key0", "value0"), - createTestEntry(1, baseTime.Add(1*time.Second), "key1", "value1"), - createTestEntry(2, baseTime.Add(2*time.Second), "key2", "value2"), - }, - memoryEntries: []*filer_pb.LogEntry{ - createTestEntry(3, baseTime.Add(3*time.Second), "key3", "value3"), - createTestEntry(4, baseTime.Add(4*time.Second), "key4", "value4"), - }, - memoryStartOffset: 3, - memoryStopOffset: 4, - } - - var wg sync.WaitGroup - results := make(map[string][]int64) - var mu sync.Mutex - - // Spawn 3 concurrent subscribers - for i := 0; i < 3; i++ { - wg.Add(1) - subscriberName := fmt.Sprintf("subscriber-%d", i) - - go func(name string) { - defer wg.Done() - - var receivedOffsets []int64 - startPos := log_buffer.NewMessagePositionFromOffset(0) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - receivedOffsets = append(receivedOffsets, entry.Offset) - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // Read from disk - pos, _, _ := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - - // Read from memory - mock.MockLoopProcessLogDataWithOffset(name, pos, 0, func() bool { return true }, eachLogWithOffsetFn) - - mu.Lock() - results[name] = receivedOffsets - mu.Unlock() - }(subscriberName) - } - - wg.Wait() - - // Verify all subscribers got the same data - expected := []int64{0, 1, 2, 3, 4} - for name, offsets := range results { - if len(offsets) != len(expected) { - t.Errorf("%s: Expected %d offsets, got %d", name, len(expected), len(offsets)) - continue - } - for i, offset := range offsets { - if offset != expected[i] { - t.Errorf("%s: Offset[%d]: expected %d, got %d", name, i, expected[i], offset) - } - } - } -} - -// TestResumeFromDiskError tests handling of ResumeFromDiskError -func TestResumeFromDiskError(t *testing.T) { - baseTime := time.Now() - - mock := &MockLogBuffer{ - diskEntries: []*filer_pb.LogEntry{ - createTestEntry(0, baseTime, "key0", "value0"), - createTestEntry(1, baseTime.Add(1*time.Second), "key1", "value1"), - }, - memoryEntries: []*filer_pb.LogEntry{ - createTestEntry(10, baseTime.Add(10*time.Second), "key10", "value10"), - }, - memoryStartOffset: 10, - memoryStopOffset: 10, - } - - // Try to read offset 5, which is between disk (0-1) and memory (10) - // This should trigger ResumeFromDiskError from memory read - startPos := log_buffer.NewMessagePositionFromOffset(5) - - eachLogFn := func(entry *filer_pb.LogEntry) (bool, error) { - return false, nil - } - - eachLogWithOffsetFn := func(entry *filer_pb.LogEntry, offset int64) (bool, error) { - return eachLogFn(entry) - } - - // Disk read should return no data (offset 5 > disk end) - _, _, err := mock.MockReadFromDiskFn(startPos, 0, eachLogFn) - if err != nil { - t.Fatalf("Unexpected disk read error: %v", err) - } - - // Memory read should return ResumeFromDiskError (offset 5 < memory start) - _, _, err = mock.MockLoopProcessLogDataWithOffset("test", startPos, 0, func() bool { return true }, eachLogWithOffsetFn) - if err != log_buffer.ResumeFromDiskError { - t.Errorf("Expected ResumeFromDiskError, got: %v", err) - } -} diff --git a/weed/mq/topic/local_partition_subscribers.go b/weed/mq/topic/local_partition_subscribers.go deleted file mode 100644 index 9c5d44adf..000000000 --- a/weed/mq/topic/local_partition_subscribers.go +++ /dev/null @@ -1,124 +0,0 @@ -package topic - -import ( - "sync" - "sync/atomic" - "time" -) - -type LocalPartitionSubscribers struct { - Subscribers map[string]*LocalSubscriber - SubscribersLock sync.RWMutex -} -type LocalSubscriber struct { - connectTimeNs int64 // accessed atomically - lastSeenTimeNs int64 // accessed atomically - lastReceivedOffset int64 // accessed atomically - offset of last message received - lastAckedOffset int64 // accessed atomically - offset of last message acknowledged - stopCh chan struct{} -} - -func NewLocalSubscriber() *LocalSubscriber { - now := time.Now().UnixNano() - subscriber := &LocalSubscriber{ - stopCh: make(chan struct{}, 1), - } - atomic.StoreInt64(&subscriber.connectTimeNs, now) - atomic.StoreInt64(&subscriber.lastSeenTimeNs, now) - atomic.StoreInt64(&subscriber.lastReceivedOffset, 0) - atomic.StoreInt64(&subscriber.lastAckedOffset, 0) - return subscriber -} -func (p *LocalSubscriber) SignalShutdown() { - close(p.stopCh) -} - -// UpdateLastSeen updates the last activity time for this subscriber -func (p *LocalSubscriber) UpdateLastSeen() { - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// UpdateReceivedOffset updates the offset of the last message received by this subscriber -func (p *LocalSubscriber) UpdateReceivedOffset(offset int64) { - atomic.StoreInt64(&p.lastReceivedOffset, offset) - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// UpdateAckedOffset updates the offset of the last message acknowledged by this subscriber -func (p *LocalSubscriber) UpdateAckedOffset(offset int64) { - atomic.StoreInt64(&p.lastAckedOffset, offset) - atomic.StoreInt64(&p.lastSeenTimeNs, time.Now().UnixNano()) -} - -// GetTimestamps returns the connect and last seen timestamps safely -func (p *LocalSubscriber) GetTimestamps() (connectTimeNs, lastSeenTimeNs int64) { - return atomic.LoadInt64(&p.connectTimeNs), atomic.LoadInt64(&p.lastSeenTimeNs) -} - -// GetOffsets returns the received and acknowledged offsets safely -func (p *LocalSubscriber) GetOffsets() (lastReceivedOffset, lastAckedOffset int64) { - return atomic.LoadInt64(&p.lastReceivedOffset), atomic.LoadInt64(&p.lastAckedOffset) -} - -// GetCurrentOffset returns the acknowledged offset (for compatibility) -func (p *LocalSubscriber) GetCurrentOffset() int64 { - return atomic.LoadInt64(&p.lastAckedOffset) -} - -func NewLocalPartitionSubscribers() *LocalPartitionSubscribers { - return &LocalPartitionSubscribers{ - Subscribers: make(map[string]*LocalSubscriber), - } -} - -func (p *LocalPartitionSubscribers) AddSubscriber(clientName string, Subscriber *LocalSubscriber) { - p.SubscribersLock.Lock() - defer p.SubscribersLock.Unlock() - - p.Subscribers[clientName] = Subscriber -} - -func (p *LocalPartitionSubscribers) RemoveSubscriber(clientName string) { - p.SubscribersLock.Lock() - defer p.SubscribersLock.Unlock() - - delete(p.Subscribers, clientName) -} - -func (p *LocalPartitionSubscribers) SignalShutdown() { - p.SubscribersLock.RLock() - defer p.SubscribersLock.RUnlock() - - for _, Subscriber := range p.Subscribers { - Subscriber.SignalShutdown() - } -} - -func (p *LocalPartitionSubscribers) Size() int { - p.SubscribersLock.RLock() - defer p.SubscribersLock.RUnlock() - - return len(p.Subscribers) -} - -// GetSubscriberNames returns the names of all subscribers -func (p *LocalPartitionSubscribers) GetSubscriberNames() []string { - p.SubscribersLock.RLock() - defer p.SubscribersLock.RUnlock() - - names := make([]string, 0, len(p.Subscribers)) - for name := range p.Subscribers { - names = append(names, name) - } - return names -} - -// ForEachSubscriber iterates over all subscribers -func (p *LocalPartitionSubscribers) ForEachSubscriber(fn func(name string, subscriber *LocalSubscriber)) { - p.SubscribersLock.RLock() - defer p.SubscribersLock.RUnlock() - - for name, subscriber := range p.Subscribers { - fn(name, subscriber) - } -} diff --git a/weed/mq/topic/local_topic.go b/weed/mq/topic/local_topic.go deleted file mode 100644 index 5a5086322..000000000 --- a/weed/mq/topic/local_topic.go +++ /dev/null @@ -1,113 +0,0 @@ -package topic - -import ( - "sync" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -type LocalTopic struct { - Topic - Partitions []*LocalPartition - partitionLock sync.RWMutex -} - -func NewLocalTopic(topic Topic) *LocalTopic { - return &LocalTopic{ - Topic: topic, - Partitions: make([]*LocalPartition, 0), - } -} - -func (localTopic *LocalTopic) findPartition(partition Partition) *LocalPartition { - localTopic.partitionLock.RLock() - defer localTopic.partitionLock.RUnlock() - - glog.V(4).Infof("findPartition searching for %s in %d partitions", partition.String(), len(localTopic.Partitions)) - for i, localPartition := range localTopic.Partitions { - glog.V(4).Infof("Comparing partition[%d]: %s with target %s", i, localPartition.Partition.String(), partition.String()) - if localPartition.Partition.LogicalEquals(partition) { - glog.V(4).Infof("Found matching partition at index %d", i) - return localPartition - } - } - glog.V(4).Infof("No matching partition found for %s", partition.String()) - return nil -} -func (localTopic *LocalTopic) removePartition(partition Partition) bool { - localTopic.partitionLock.Lock() - defer localTopic.partitionLock.Unlock() - - foundPartitionIndex := -1 - for i, localPartition := range localTopic.Partitions { - if localPartition.Partition.LogicalEquals(partition) { - foundPartitionIndex = i - localPartition.Shutdown() - break - } - } - if foundPartitionIndex == -1 { - return false - } - localTopic.Partitions = append(localTopic.Partitions[:foundPartitionIndex], localTopic.Partitions[foundPartitionIndex+1:]...) - return true -} -func (localTopic *LocalTopic) addPartition(localPartition *LocalPartition) { - localTopic.partitionLock.Lock() - defer localTopic.partitionLock.Unlock() - for _, partition := range localTopic.Partitions { - if localPartition.Partition.LogicalEquals(partition.Partition) { - return - } - } - localTopic.Partitions = append(localTopic.Partitions, localPartition) -} - -func (localTopic *LocalTopic) closePartitionPublishers(unixTsNs int64) bool { - var wg sync.WaitGroup - for _, localPartition := range localTopic.Partitions { - if localPartition.UnixTimeNs != unixTsNs { - continue - } - wg.Add(1) - go func(localPartition *LocalPartition) { - defer wg.Done() - localPartition.closePublishers() - }(localPartition) - } - wg.Wait() - return true -} - -func (localTopic *LocalTopic) closePartitionSubscribers(unixTsNs int64) bool { - var wg sync.WaitGroup - for _, localPartition := range localTopic.Partitions { - if localPartition.UnixTimeNs != unixTsNs { - continue - } - wg.Add(1) - go func(localPartition *LocalPartition) { - defer wg.Done() - localPartition.closeSubscribers() - }(localPartition) - } - wg.Wait() - return true -} - -func (localTopic *LocalTopic) WaitUntilNoPublishers() { - for { - var wg sync.WaitGroup - for _, localPartition := range localTopic.Partitions { - wg.Add(1) - go func(localPartition *LocalPartition) { - defer wg.Done() - localPartition.WaitUntilNoPublishers() - }(localPartition) - } - wg.Wait() - if len(localTopic.Partitions) == 0 { - return - } - } -} diff --git a/weed/mq/topic/partition.go b/weed/mq/topic/partition.go deleted file mode 100644 index 658ec85c4..000000000 --- a/weed/mq/topic/partition.go +++ /dev/null @@ -1,116 +0,0 @@ -package topic - -import ( - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -const PartitionCount = 4096 - -type Partition struct { - RangeStart int32 - RangeStop int32 // exclusive - RingSize int32 - UnixTimeNs int64 // in nanoseconds -} - -func NewPartition(rangeStart, rangeStop, ringSize int32, unixTimeNs int64) *Partition { - return &Partition{ - RangeStart: rangeStart, - RangeStop: rangeStop, - RingSize: ringSize, - UnixTimeNs: unixTimeNs, - } -} - -func (partition Partition) Equals(other Partition) bool { - if partition.RangeStart != other.RangeStart { - return false - } - if partition.RangeStop != other.RangeStop { - return false - } - if partition.RingSize != other.RingSize { - return false - } - if partition.UnixTimeNs != other.UnixTimeNs { - return false - } - return true -} - -// LogicalEquals compares only the partition boundaries (RangeStart, RangeStop) -// This is useful when comparing partitions that may have different timestamps or ring sizes -// but represent the same logical partition range -func (partition Partition) LogicalEquals(other Partition) bool { - return partition.RangeStart == other.RangeStart && partition.RangeStop == other.RangeStop -} - -func FromPbPartition(partition *schema_pb.Partition) Partition { - return Partition{ - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - RingSize: partition.RingSize, - UnixTimeNs: partition.UnixTimeNs, - } -} - -func SplitPartitions(targetCount int32, ts int64) []*Partition { - partitions := make([]*Partition, 0, targetCount) - partitionSize := PartitionCount / targetCount - for i := int32(0); i < targetCount; i++ { - partitionStop := (i + 1) * partitionSize - if i == targetCount-1 { - partitionStop = PartitionCount - } - partitions = append(partitions, &Partition{ - RangeStart: i * partitionSize, - RangeStop: partitionStop, - RingSize: PartitionCount, - UnixTimeNs: ts, - }) - } - return partitions -} - -func (partition Partition) ToPbPartition() *schema_pb.Partition { - return &schema_pb.Partition{ - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - RingSize: partition.RingSize, - UnixTimeNs: partition.UnixTimeNs, - } -} - -func (partition Partition) Overlaps(partition2 Partition) bool { - if partition.RangeStart >= partition2.RangeStop { - return false - } - if partition.RangeStop <= partition2.RangeStart { - return false - } - return true -} - -func (partition Partition) String() string { - return fmt.Sprintf("%04d-%04d", partition.RangeStart, partition.RangeStop) -} - -func ParseTopicVersion(name string) (t time.Time, err error) { - return time.Parse(PartitionGenerationFormat, name) -} - -func ParsePartitionBoundary(name string) (start, stop int32) { - _, err := fmt.Sscanf(name, "%04d-%04d", &start, &stop) - if err != nil { - return 0, 0 - } - return start, stop -} - -func PartitionDir(t Topic, p Partition) string { - partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(PartitionGenerationFormat) - return fmt.Sprintf("%s/%s/%04d-%04d", t.Dir(), partitionGeneration, p.RangeStart, p.RangeStop) -} diff --git a/weed/mq/topic/topic.go b/weed/mq/topic/topic.go deleted file mode 100644 index 6fb0f0ce9..000000000 --- a/weed/mq/topic/topic.go +++ /dev/null @@ -1,169 +0,0 @@ -package topic - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - jsonpb "google.golang.org/protobuf/encoding/protojson" -) - -type Topic struct { - Namespace string - Name string -} - -func NewTopic(namespace string, name string) Topic { - return Topic{ - Namespace: namespace, - Name: name, - } -} -func FromPbTopic(topic *schema_pb.Topic) Topic { - return Topic{ - Namespace: topic.Namespace, - Name: topic.Name, - } -} - -func (t Topic) ToPbTopic() *schema_pb.Topic { - return &schema_pb.Topic{ - Namespace: t.Namespace, - Name: t.Name, - } -} - -func (t Topic) String() string { - return fmt.Sprintf("%s.%s", t.Namespace, t.Name) -} - -func (t Topic) Dir() string { - return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name) -} - -func (t Topic) ReadConfFile(client filer_pb.SeaweedFilerClient) (*mq_pb.ConfigureTopicResponse, error) { - data, err := filer.ReadInsideFiler(client, t.Dir(), filer.TopicConfFile) - if errors.Is(err, filer_pb.ErrNotFound) { - return nil, err - } - if err != nil { - return nil, fmt.Errorf("read topic.conf of %v: %w", t, err) - } - // parse into filer conf object - conf := &mq_pb.ConfigureTopicResponse{} - if err = jsonpb.Unmarshal(data, conf); err != nil { - return nil, fmt.Errorf("unmarshal topic %v conf: %w", t, err) - } - return conf, nil -} - -// ReadConfFileWithMetadata reads the topic configuration and returns it along with file metadata -func (t Topic) ReadConfFileWithMetadata(client filer_pb.SeaweedFilerClient) (*mq_pb.ConfigureTopicResponse, int64, int64, error) { - // Use LookupDirectoryEntry to get both content and metadata - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: t.Dir(), - Name: filer.TopicConfFile, - } - - resp, err := filer_pb.LookupEntry(context.Background(), client, request) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - return nil, 0, 0, err - } - return nil, 0, 0, fmt.Errorf("lookup topic.conf of %v: %w", t, err) - } - - // Get file metadata - var createdAtNs, modifiedAtNs int64 - if resp.Entry.Attributes != nil { - createdAtNs = resp.Entry.Attributes.Crtime * 1e9 // convert seconds to nanoseconds - modifiedAtNs = resp.Entry.Attributes.Mtime * 1e9 // convert seconds to nanoseconds - } - - // Parse the configuration - conf := &mq_pb.ConfigureTopicResponse{} - if err = jsonpb.Unmarshal(resp.Entry.Content, conf); err != nil { - return nil, 0, 0, fmt.Errorf("unmarshal topic %v conf: %w", t, err) - } - - return conf, createdAtNs, modifiedAtNs, nil -} - -func (t Topic) WriteConfFile(client filer_pb.SeaweedFilerClient, conf *mq_pb.ConfigureTopicResponse) error { - var buf bytes.Buffer - filer.ProtoToText(&buf, conf) - if err := filer.SaveInsideFiler(client, t.Dir(), filer.TopicConfFile, buf.Bytes()); err != nil { - return fmt.Errorf("save topic %v conf: %w", t, err) - } - return nil -} - -// DiscoverPartitions discovers all partition directories for a topic by scanning the filesystem -// This centralizes partition discovery logic used across query engine, shell commands, etc. -func (t Topic) DiscoverPartitions(ctx context.Context, filerClient filer_pb.FilerClient) ([]string, error) { - var partitionPaths []string - - // Scan the topic directory for version directories (e.g., v2025-09-01-07-16-34) - err := filer_pb.ReadDirAllEntries(ctx, filerClient, util.FullPath(t.Dir()), "", func(versionEntry *filer_pb.Entry, isLast bool) error { - if !versionEntry.IsDirectory { - return nil // Skip non-directories - } - - // Parse version timestamp from directory name (e.g., "v2025-09-01-07-16-34") - if !IsValidVersionDirectory(versionEntry.Name) { - // Skip directories that don't match the version format - return nil - } - - // Scan partition directories within this version (e.g., 0000-0630) - versionDir := fmt.Sprintf("%s/%s", t.Dir(), versionEntry.Name) - return filer_pb.ReadDirAllEntries(ctx, filerClient, util.FullPath(versionDir), "", func(partitionEntry *filer_pb.Entry, isLast bool) error { - if !partitionEntry.IsDirectory { - return nil // Skip non-directories - } - - // Parse partition boundary from directory name (e.g., "0000-0630") - if !IsValidPartitionDirectory(partitionEntry.Name) { - return nil // Skip invalid partition names - } - - // Add this partition path to the list - partitionPath := fmt.Sprintf("%s/%s", versionDir, partitionEntry.Name) - partitionPaths = append(partitionPaths, partitionPath) - return nil - }) - }) - - return partitionPaths, err -} - -// IsValidVersionDirectory checks if a directory name matches the topic version format -// Format: v2025-09-01-07-16-34 -func IsValidVersionDirectory(name string) bool { - if !strings.HasPrefix(name, "v") || len(name) != 20 { - return false - } - - // Try to parse the timestamp part - timestampStr := name[1:] // Remove 'v' prefix - _, err := time.Parse("2006-01-02-15-04-05", timestampStr) - return err == nil -} - -// IsValidPartitionDirectory checks if a directory name matches the partition boundary format -// Format: 0000-0630 (rangeStart-rangeStop) -func IsValidPartitionDirectory(name string) bool { - // Use existing ParsePartitionBoundary function to validate - start, stop := ParsePartitionBoundary(name) - - // Valid partition ranges should have start < stop (and not both be 0, which indicates parse error) - return start < stop && start >= 0 -} diff --git a/weed/mq/topic/topic_partition.go b/weed/mq/topic/topic_partition.go deleted file mode 100644 index b14bc9c46..000000000 --- a/weed/mq/topic/topic_partition.go +++ /dev/null @@ -1,12 +0,0 @@ -package topic - -import "fmt" - -type TopicPartition struct { - Topic - Partition -} - -func (tp *TopicPartition) TopicPartitionId() string { - return fmt.Sprintf("%v.%v-%04d-%04d", tp.Namespace, tp.Topic, tp.RangeStart, tp.RangeStop) -} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index fedcf3566..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -8,10 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func init() { @@ -49,7 +49,7 @@ func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queue sess, err := session.NewSession(config) if err != nil { - return fmt.Errorf("create aws session: %w", err) + return fmt.Errorf("create aws session: %v", err) } k.svc = sqs.New(sess) @@ -70,10 +70,7 @@ func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queue func (k *AwsSqsPub) SendMessage(key string, message proto.Message) (err error) { - text, err := proto.Marshal(message) - if err != nil { - return fmt.Errorf("send message marshal %+v: %v", message, err) - } + text := proto.MarshalTextString(message) _, err = k.svc.SendMessage(&sqs.SendMessageInput{ DelaySeconds: aws.Int64(10), @@ -83,7 +80,7 @@ func (k *AwsSqsPub) SendMessage(key string, message proto.Message) (err error) { StringValue: aws.String(key), }, }, - MessageBody: aws.String(string(text)), + MessageBody: aws.String(text), QueueUrl: &k.queueUrl, }) diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 1c620f2e6..541a453e9 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -1,9 +1,9 @@ package notification import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) type MessageQueue interface { diff --git a/weed/notification/gocdk_pub_sub/doc.go b/weed/notification/gocdk_pub_sub/doc.go index 831ef289d..d7fbb9f78 100644 --- a/weed/notification/gocdk_pub_sub/doc.go +++ b/weed/notification/gocdk_pub_sub/doc.go @@ -1,7 +1,9 @@ /* + Package gocdk_pub_sub is for Azure Service Bus and RabbitMQ. The referenced "gocloud.dev/pubsub" library is too big when compiled. So this is only compiled in "make full_install". + */ package gocdk_pub_sub diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index 131345f9c..f31b6997e 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -20,19 +20,18 @@ package gocdk_pub_sub import ( "context" "fmt" - amqp "github.com/rabbitmq/amqp091-go" + "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" "gocloud.dev/pubsub/rabbitpubsub" - "google.golang.org/protobuf/proto" "net/url" "path" - "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -50,44 +49,33 @@ func getPath(rawUrl string) string { } type GoCDKPubSub struct { - topicURL string - topic *pubsub.Topic - topicLock sync.RWMutex + topicURL string + topic *pubsub.Topic } func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) setTopic(topic *pubsub.Topic) { - k.topicLock.Lock() - k.topic = topic - k.topicLock.Unlock() - k.doReconnect() -} - func (k *GoCDKPubSub) doReconnect() { var conn *amqp.Connection - k.topicLock.RLock() - defer k.topicLock.RUnlock() if k.topic.As(&conn) { - go func(c *amqp.Connection) { - <-c.NotifyClose(make(chan *amqp.Error)) - c.Close() - k.topicLock.RLock() + go func() { + <-conn.NotifyClose(make(chan *amqp.Error)) + conn.Close() k.topic.Shutdown(context.Background()) - k.topicLock.RUnlock() for { glog.Info("Try reconnect") conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) if err == nil { - k.setTopic(rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil)) + k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil) + k.doReconnect() break } glog.Error(err) time.Sleep(time.Second) } - }(conn) + }() } } @@ -98,7 +86,8 @@ func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string if err != nil { glog.Fatalf("Failed to open topic: %v", err) } - k.setTopic(topic) + k.topic = topic + k.doReconnect() return nil } @@ -107,8 +96,6 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { if err != nil { return err } - k.topicLock.RLock() - defer k.topicLock.RUnlock() err = k.topic.Send(context.Background(), &pubsub.Message{ Body: bytes, Metadata: map[string]string{"key": key}, diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index f5593fa48..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -6,11 +6,11 @@ import ( "os" "cloud.google.com/go/pubsub" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" "google.golang.org/api/option" - "google.golang.org/protobuf/proto" ) func init() { diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index 64cb4eaa9..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -2,10 +2,10 @@ package kafka import ( "github.com/Shopify/sarama" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func init() { diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index cc3557fee..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -1,10 +1,10 @@ package kafka import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func init() { diff --git a/weed/notification/webhook/filter.go b/weed/notification/webhook/filter.go deleted file mode 100644 index f346d6c93..000000000 --- a/weed/notification/webhook/filter.go +++ /dev/null @@ -1,64 +0,0 @@ -package webhook - -import ( - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -type filter struct { - eventTypes map[eventType]bool - pathPrefixes []string -} - -func newFilter(cfg *config) *filter { - f := &filter{ - eventTypes: make(map[eventType]bool), - pathPrefixes: cfg.pathPrefixes, - } - - if len(cfg.eventTypes) == 0 { - f.eventTypes[eventTypeCreate] = true - f.eventTypes[eventTypeDelete] = true - f.eventTypes[eventTypeUpdate] = true - f.eventTypes[eventTypeRename] = true - } else { - for _, et := range cfg.eventTypes { - t := eventType(et) - if !t.valid() { - glog.Warningf("invalid event type: %v", t) - - continue - } - - f.eventTypes[t] = true - } - } - - return f -} - -func (f *filter) shouldPublish(key string, notification *filer_pb.EventNotification) bool { - if !f.matchesPath(key) { - return false - } - - eventType := detectEventType(notification) - - return f.eventTypes[eventType] -} - -func (f *filter) matchesPath(key string) bool { - if len(f.pathPrefixes) == 0 { - return true - } - - for _, prefix := range f.pathPrefixes { - if strings.HasPrefix(key, prefix) { - return true - } - } - - return false -} diff --git a/weed/notification/webhook/filter_test.go b/weed/notification/webhook/filter_test.go deleted file mode 100644 index e95a085fe..000000000 --- a/weed/notification/webhook/filter_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package webhook - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -func TestFilterEventTypes(t *testing.T) { - tests := []struct { - name string - eventTypes []string - notification *filer_pb.EventNotification - expectedType eventType - shouldPublish bool - }{ - { - name: "create event - allowed", - eventTypes: []string{"create", "delete"}, - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - }, - expectedType: eventTypeCreate, - shouldPublish: true, - }, - { - name: "create event - not allowed", - eventTypes: []string{"delete", "update"}, - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - }, - expectedType: eventTypeCreate, - shouldPublish: false, - }, - { - name: "delete event - allowed", - eventTypes: []string{"create", "delete"}, - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "test.txt"}, - }, - expectedType: eventTypeDelete, - shouldPublish: true, - }, - { - name: "update event - allowed", - eventTypes: []string{"update"}, - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "test.txt"}, - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - }, - expectedType: eventTypeUpdate, - shouldPublish: true, - }, - { - name: "rename event - allowed", - eventTypes: []string{"rename"}, - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "old.txt"}, - NewEntry: &filer_pb.Entry{Name: "new.txt"}, - NewParentPath: "/new/path", - }, - expectedType: eventTypeRename, - shouldPublish: true, - }, - { - name: "rename event - not allowed", - eventTypes: []string{"create", "delete", "update"}, - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "old.txt"}, - NewEntry: &filer_pb.Entry{Name: "new.txt"}, - NewParentPath: "/new/path", - }, - expectedType: eventTypeRename, - shouldPublish: false, - }, - { - name: "all events allowed when empty", - eventTypes: []string{}, - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - }, - expectedType: eventTypeCreate, - shouldPublish: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := &config{eventTypes: tt.eventTypes} - f := newFilter(cfg) - - eventType := detectEventType(tt.notification) - if eventType != tt.expectedType { - t.Errorf("detectEventType() = %v, want %v", eventType, tt.expectedType) - } - - shouldPublish := f.shouldPublish("/test/path", tt.notification) - if shouldPublish != tt.shouldPublish { - t.Errorf("shouldPublish() = %v, want %v", shouldPublish, tt.shouldPublish) - } - }) - } -} - -func TestFilterPathPrefixes(t *testing.T) { - tests := []struct { - name string - pathPrefixes []string - key string - shouldPublish bool - }{ - { - name: "matches single prefix", - pathPrefixes: []string{"/data/"}, - key: "/data/file.txt", - shouldPublish: true, - }, - { - name: "matches one of multiple prefixes", - pathPrefixes: []string{"/data/", "/logs/", "/tmp/"}, - key: "/logs/app.log", - shouldPublish: true, - }, - { - name: "no match", - pathPrefixes: []string{"/data/", "/logs/"}, - key: "/other/file.txt", - shouldPublish: false, - }, - { - name: "empty prefixes allows all", - pathPrefixes: []string{}, - key: "/any/path/file.txt", - shouldPublish: true, - }, - { - name: "exact prefix match", - pathPrefixes: []string{"/data"}, - key: "/data", - shouldPublish: true, - }, - { - name: "partial match not allowed", - pathPrefixes: []string{"/data/"}, - key: "/database/file.txt", - shouldPublish: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := &config{ - pathPrefixes: tt.pathPrefixes, - eventTypes: []string{"create"}, - } - f := newFilter(cfg) - - notification := &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - } - - shouldPublish := f.shouldPublish(tt.key, notification) - if shouldPublish != tt.shouldPublish { - t.Errorf("shouldPublish() = %v, want %v", shouldPublish, tt.shouldPublish) - } - }) - } -} - -func TestFilterCombined(t *testing.T) { - cfg := &config{ - eventTypes: []string{"create", "update"}, - pathPrefixes: []string{"/data/", "/logs/"}, - } - f := newFilter(cfg) - - tests := []struct { - name string - key string - notification *filer_pb.EventNotification - shouldPublish bool - }{ - { - name: "allowed event and path", - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: true, - }, - { - name: "allowed event but wrong path", - key: "/other/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - { - name: "wrong event but allowed path", - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - { - name: "wrong event and wrong path", - key: "/other/file.txt", - notification: &filer_pb.EventNotification{ - OldEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - shouldPublish := f.shouldPublish(tt.key, tt.notification) - if shouldPublish != tt.shouldPublish { - t.Errorf("shouldPublish() = %v, want %v", shouldPublish, tt.shouldPublish) - } - }) - } -} diff --git a/weed/notification/webhook/http.go b/weed/notification/webhook/http.go deleted file mode 100644 index 6b1a0e26d..000000000 --- a/weed/notification/webhook/http.go +++ /dev/null @@ -1,93 +0,0 @@ -package webhook - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" -) - -type httpClient struct { - endpoint string - token string - timeout time.Duration -} - -func newHTTPClient(cfg *config) (*httpClient, error) { - return &httpClient{ - endpoint: cfg.endpoint, - token: cfg.authBearerToken, - timeout: time.Duration(cfg.timeoutSeconds) * time.Second, - }, nil -} - -func (h *httpClient) sendMessage(message *webhookMessage) error { - // Serialize the protobuf message to JSON for HTTP payload - notificationData, err := json.Marshal(message.Notification) - if err != nil { - return fmt.Errorf("failed to marshal notification: %w", err) - } - - payload := map[string]interface{}{ - "key": message.Key, - "event_type": message.EventType, - "message": json.RawMessage(notificationData), - } - - jsonData, err := json.Marshal(payload) - if err != nil { - return fmt.Errorf("failed to marshal message: %w", err) - } - - req, err := http.NewRequest(http.MethodPost, h.endpoint, bytes.NewBuffer(jsonData)) - if err != nil { - return fmt.Errorf("failed to create request: %w", err) - } - - req.Header.Set("Content-Type", "application/json") - if h.token != "" { - req.Header.Set("Authorization", "Bearer "+h.token) - } - - if h.timeout > 0 { - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - req = req.WithContext(ctx) - } - - resp, err := util_http.Do(req) - if err != nil { - if err = drainResponse(resp); err != nil { - glog.Errorf("failed to drain response: %v", err) - } - - return fmt.Errorf("failed to send request: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("webhook returned status code: %d", resp.StatusCode) - } - - return nil -} - -func drainResponse(resp *http.Response) error { - if resp == nil || resp.Body == nil { - return nil - } - - _, err := io.ReadAll(resp.Body) - - return errors.Join( - err, - resp.Body.Close(), - ) -} diff --git a/weed/notification/webhook/http_test.go b/weed/notification/webhook/http_test.go deleted file mode 100644 index f7ef006ae..000000000 --- a/weed/notification/webhook/http_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package webhook - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" -) - -func init() { - util_http.InitGlobalHttpClient() -} - -func TestHttpClientSendMessage(t *testing.T) { - var receivedPayload map[string]interface{} - var receivedHeaders http.Header - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedHeaders = r.Header - body, _ := io.ReadAll(r.Body) - if err := json.Unmarshal(body, &receivedPayload); err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - cfg := &config{ - endpoint: server.URL, - authBearerToken: "test-token", - } - - client, err := newHTTPClient(cfg) - if err != nil { - t.Fatalf("Failed to create HTTP client: %v", err) - } - - message := &filer_pb.EventNotification{ - OldEntry: nil, - NewEntry: &filer_pb.Entry{ - Name: "test.txt", - IsDirectory: false, - }, - } - - err = client.sendMessage(newWebhookMessage("/test/path", message)) - if err != nil { - t.Fatalf("Failed to send message: %v", err) - } - - if receivedPayload["key"] != "/test/path" { - t.Errorf("Expected key '/test/path', got %v", receivedPayload["key"]) - } - - if receivedPayload["event_type"] != "create" { - t.Errorf("Expected event_type 'create', got %v", receivedPayload["event_type"]) - } - - if receivedPayload["message"] == nil { - t.Error("Expected message to be present") - } - - if receivedHeaders.Get("Content-Type") != "application/json" { - t.Errorf("Expected Content-Type 'application/json', got %s", receivedHeaders.Get("Content-Type")) - } - - expectedAuth := "Bearer test-token" - if receivedHeaders.Get("Authorization") != expectedAuth { - t.Errorf("Expected Authorization '%s', got %s", expectedAuth, receivedHeaders.Get("Authorization")) - } -} - -func TestHttpClientSendMessageWithoutToken(t *testing.T) { - var receivedHeaders http.Header - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedHeaders = r.Header - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - cfg := &config{ - endpoint: server.URL, - authBearerToken: "", - } - - client, err := newHTTPClient(cfg) - if err != nil { - t.Fatalf("Failed to create HTTP client: %v", err) - } - - message := &filer_pb.EventNotification{} - - err = client.sendMessage(newWebhookMessage("/test/path", message)) - if err != nil { - t.Fatalf("Failed to send message: %v", err) - } - - if receivedHeaders.Get("Authorization") != "" { - t.Errorf("Expected no Authorization header, got %s", receivedHeaders.Get("Authorization")) - } -} - -func TestHttpClientSendMessageServerError(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - })) - defer server.Close() - - cfg := &config{ - endpoint: server.URL, - authBearerToken: "test-token", - } - - client, err := newHTTPClient(cfg) - if err != nil { - t.Fatalf("Failed to create HTTP client: %v", err) - } - - message := &filer_pb.EventNotification{} - - err = client.sendMessage(newWebhookMessage("/test/path", message)) - if err == nil { - t.Error("Expected error for server error response") - } -} - -func TestHttpClientSendMessageNetworkError(t *testing.T) { - cfg := &config{ - endpoint: "http://localhost:99999", - authBearerToken: "", - } - - client, err := newHTTPClient(cfg) - if err != nil { - t.Fatalf("Failed to create HTTP client: %v", err) - } - - message := &filer_pb.EventNotification{} - - err = client.sendMessage(newWebhookMessage("/test/path", message)) - if err == nil { - t.Error("Expected error for network failure") - } -} diff --git a/weed/notification/webhook/types.go b/weed/notification/webhook/types.go deleted file mode 100644 index 5cd79c7da..000000000 --- a/weed/notification/webhook/types.go +++ /dev/null @@ -1,182 +0,0 @@ -package webhook - -import ( - "fmt" - "net/url" - "slices" - "strconv" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" -) - -const ( - queueName = "webhook" - pubSubTopicName = "webhook_topic" - deadLetterTopic = "webhook_dead_letter" -) - -type eventType string - -const ( - eventTypeCreate eventType = "create" - eventTypeDelete eventType = "delete" - eventTypeUpdate eventType = "update" - eventTypeRename eventType = "rename" -) - -func (e eventType) valid() bool { - return slices.Contains([]eventType{ - eventTypeCreate, - eventTypeDelete, - eventTypeUpdate, - eventTypeRename, - }, - e, - ) -} - -var ( - pubSubHandlerNameTemplate = func(n int) string { - return "webhook_handler_" + strconv.Itoa(n) - } -) - -type client interface { - sendMessage(message *webhookMessage) error -} - -type webhookMessage struct { - Key string `json:"key"` - EventType string `json:"event_type"` - Notification *filer_pb.EventNotification `json:"message_data"` -} - -func newWebhookMessage(key string, message proto.Message) *webhookMessage { - notification, ok := message.(*filer_pb.EventNotification) - if !ok { - return nil - } - - eventType := string(detectEventType(notification)) - - return &webhookMessage{ - Key: key, - EventType: eventType, - Notification: notification, - } -} - -type config struct { - endpoint string - authBearerToken string - timeoutSeconds int - - maxRetries int - backoffSeconds int - maxBackoffSeconds int - nWorkers int - bufferSize int - - eventTypes []string - pathPrefixes []string -} - -func newConfigWithDefaults(configuration util.Configuration, prefix string) *config { - c := &config{ - endpoint: configuration.GetString(prefix + "endpoint"), - authBearerToken: configuration.GetString(prefix + "bearer_token"), - timeoutSeconds: 10, - maxRetries: 3, - backoffSeconds: 3, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10_000, - } - - if bufferSize := configuration.GetInt(prefix + "buffer_size"); bufferSize > 0 { - c.bufferSize = bufferSize - } - if workers := configuration.GetInt(prefix + "workers"); workers > 0 { - c.nWorkers = workers - } - if maxRetries := configuration.GetInt(prefix + "max_retries"); maxRetries > 0 { - c.maxRetries = maxRetries - } - if backoffSeconds := configuration.GetInt(prefix + "backoff_seconds"); backoffSeconds > 0 { - c.backoffSeconds = backoffSeconds - } - if maxBackoffSeconds := configuration.GetInt(prefix + "max_backoff_seconds"); maxBackoffSeconds > 0 { - c.maxBackoffSeconds = maxBackoffSeconds - } - if timeout := configuration.GetInt(prefix + "timeout_seconds"); timeout > 0 { - c.timeoutSeconds = timeout - } - - c.eventTypes = configuration.GetStringSlice(prefix + "event_types") - c.pathPrefixes = configuration.GetStringSlice(prefix + "path_prefixes") - - return c -} - -func (c *config) validate() error { - if c.endpoint == "" { - return fmt.Errorf("webhook endpoint is required") - } - - _, err := url.Parse(c.endpoint) - if err != nil { - return fmt.Errorf("invalid webhook endpoint: %w", err) - } - - if c.timeoutSeconds < 1 || c.timeoutSeconds > 300 { - return fmt.Errorf("timeout must be between 1 and 300 seconds, got %d", c.timeoutSeconds) - } - - if c.maxRetries < 0 || c.maxRetries > 10 { - return fmt.Errorf("max retries must be between 0 and 10, got %d", c.maxRetries) - } - - if c.backoffSeconds < 1 || c.backoffSeconds > 60 { - return fmt.Errorf("backoff seconds must be between 1 and 60, got %d", c.backoffSeconds) - } - - if c.maxBackoffSeconds < c.backoffSeconds || c.maxBackoffSeconds > 300 { - return fmt.Errorf("max backoff seconds must be between %d and 300, got %d", c.backoffSeconds, c.maxBackoffSeconds) - } - - if c.nWorkers < 1 || c.nWorkers > 100 { - return fmt.Errorf("workers must be between 1 and 100, got %d", c.nWorkers) - } - - if c.bufferSize < 100 || c.bufferSize > 1_000_000 { - return fmt.Errorf("buffer size must be between 100 and 1,000,000, got %d", c.bufferSize) - } - - return nil -} - -func detectEventType(notification *filer_pb.EventNotification) eventType { - hasOldEntry := notification.OldEntry != nil - hasNewEntry := notification.NewEntry != nil - hasNewParentPath := notification.NewParentPath != "" - - if !hasOldEntry && hasNewEntry { - return eventTypeCreate - } - - if hasOldEntry && !hasNewEntry { - return eventTypeDelete - } - - if hasOldEntry && hasNewEntry { - if hasNewParentPath { - return eventTypeRename - } - - return eventTypeUpdate - } - - return eventTypeUpdate -} diff --git a/weed/notification/webhook/webhook_queue.go b/weed/notification/webhook/webhook_queue.go deleted file mode 100644 index e034e9537..000000000 --- a/weed/notification/webhook/webhook_queue.go +++ /dev/null @@ -1,230 +0,0 @@ -package webhook - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/ThreeDotsLabs/watermill" - "github.com/ThreeDotsLabs/watermill/message" - "github.com/ThreeDotsLabs/watermill/message/router/middleware" - "github.com/ThreeDotsLabs/watermill/message/router/plugin" - "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" -) - -func init() { - notification.MessageQueues = append(notification.MessageQueues, &Queue{}) -} - -type Queue struct { - router *message.Router - queueChannel *gochannel.GoChannel - config *config - client client - filter *filter - - ctx context.Context - cancel context.CancelFunc -} - -func (w *Queue) GetName() string { - return queueName -} - -func (w *Queue) SendMessage(key string, msg proto.Message) error { - eventNotification, ok := msg.(*filer_pb.EventNotification) - if !ok { - return nil - } - - if w.filter != nil && !w.filter.shouldPublish(key, eventNotification) { - return nil - } - - m := newWebhookMessage(key, msg) - if m == nil { - return nil - } - - wMsg, err := m.toWaterMillMessage() - if err != nil { - return err - } - - return w.queueChannel.Publish(pubSubTopicName, wMsg) -} - -func (w *webhookMessage) toWaterMillMessage() (*message.Message, error) { - payload, err := proto.Marshal(w.Notification) - if err != nil { - return nil, err - } - - msg := message.NewMessage(watermill.NewUUID(), payload) - // Set event type and key as metadata - msg.Metadata.Set("event_type", w.EventType) - msg.Metadata.Set("key", w.Key) - - return msg, nil -} - -func (w *Queue) Initialize(configuration util.Configuration, prefix string) error { - c := newConfigWithDefaults(configuration, prefix) - - if err := c.validate(); err != nil { - return err - } - - return w.initialize(c) -} - -func (w *Queue) initialize(cfg *config) error { - w.ctx, w.cancel = context.WithCancel(context.Background()) - w.config = cfg - w.filter = newFilter(cfg) - - hClient, err := newHTTPClient(cfg) - if err != nil { - return fmt.Errorf("failed to create webhook http client: %w", err) - } - w.client = hClient - - if err = w.setupWatermillQueue(cfg); err != nil { - return fmt.Errorf("failed to setup watermill queue: %w", err) - } - if err = w.logDeadLetterMessages(); err != nil { - return err - } - - return nil -} - -func (w *Queue) setupWatermillQueue(cfg *config) error { - logger := watermill.NewStdLogger(false, false) - pubSubConfig := gochannel.Config{ - OutputChannelBuffer: int64(cfg.bufferSize), - Persistent: false, - } - w.queueChannel = gochannel.NewGoChannel(pubSubConfig, logger) - - router, err := message.NewRouter( - message.RouterConfig{ - CloseTimeout: 60 * time.Second, - }, - logger, - ) - if err != nil { - return fmt.Errorf("failed to create router: %w", err) - } - w.router = router - - retryMiddleware := middleware.Retry{ - MaxRetries: cfg.maxRetries, - InitialInterval: time.Duration(cfg.backoffSeconds) * time.Second, - MaxInterval: time.Duration(cfg.maxBackoffSeconds) * time.Second, - Multiplier: 2.0, - RandomizationFactor: 0.3, - Logger: logger, - }.Middleware - - poisonQueue, err := middleware.PoisonQueue(w.queueChannel, deadLetterTopic) - if err != nil { - return fmt.Errorf("failed to create poison queue: %w", err) - } - - router.AddPlugin(plugin.SignalsHandler) - router.AddMiddleware(retryMiddleware, poisonQueue) - - for i := 0; i < cfg.nWorkers; i++ { - router.AddNoPublisherHandler( - pubSubHandlerNameTemplate(i), - pubSubTopicName, - w.queueChannel, - w.handleWebhook, - ) - } - - go func() { - // cancels the queue context so the dead letter logger exists in case context not canceled by the shutdown signal already - defer w.cancel() - - if err := router.Run(w.ctx); err != nil && !errors.Is(err, context.Canceled) { - glog.Errorf("webhook pubsub worker stopped with error: %v", err) - } - - glog.Info("webhook pubsub worker stopped") - }() - - return nil -} - -func (w *Queue) handleWebhook(msg *message.Message) error { - var n filer_pb.EventNotification - if err := proto.Unmarshal(msg.Payload, &n); err != nil { - glog.Errorf("failed to unmarshal protobuf message: %v", err) - return err - } - - // Reconstruct webhook message from metadata and payload - webhookMsg := &webhookMessage{ - Key: msg.Metadata.Get("key"), - EventType: msg.Metadata.Get("event_type"), - Notification: &n, - } - - if err := w.client.sendMessage(webhookMsg); err != nil { - glog.Errorf("failed to send message to webhook %s: %v", webhookMsg.Key, err) - return err - } - - return nil -} - -func (w *Queue) logDeadLetterMessages() error { - ch, err := w.queueChannel.Subscribe(w.ctx, deadLetterTopic) - if err != nil { - return err - } - - go func() { - for { - select { - case msg, ok := <-ch: - if !ok { - glog.Info("dead letter channel closed") - return - } - if msg == nil { - glog.Errorf("received nil message from dead letter channel") - continue - } - key := "unknown" - if msg.Metadata != nil { - if keyValue, exists := msg.Metadata["key"]; exists { - key = keyValue - } - } - payload := "" - if msg.Payload != nil { - var n filer_pb.EventNotification - if err := proto.Unmarshal(msg.Payload, &n); err != nil { - payload = fmt.Sprintf("failed to unmarshal payload: %v", err) - } else { - payload = n.String() - } - } - glog.Errorf("received dead letter message: %s, key: %s", payload, key) - case <-w.ctx.Done(): - return - } - } - }() - - return nil -} diff --git a/weed/notification/webhook/webhook_queue_test.go b/weed/notification/webhook/webhook_queue_test.go deleted file mode 100644 index 52a290149..000000000 --- a/weed/notification/webhook/webhook_queue_test.go +++ /dev/null @@ -1,536 +0,0 @@ -package webhook - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "google.golang.org/protobuf/proto" -) - -func TestConfigValidation(t *testing.T) { - tests := []struct { - name string - config *config - wantErr bool - errMsg string - }{ - { - name: "valid config", - config: &config{ - endpoint: "https://example.com/webhook", - authBearerToken: "test-token", - timeoutSeconds: 30, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10000, - }, - wantErr: false, - }, - { - name: "empty endpoint", - config: &config{ - endpoint: "", - timeoutSeconds: 30, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10000, - }, - wantErr: true, - errMsg: "endpoint is required", - }, - { - name: "invalid URL", - config: &config{ - endpoint: "://invalid-url", - timeoutSeconds: 30, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10000, - }, - wantErr: true, - errMsg: "invalid webhook endpoint", - }, - { - name: "timeout too large", - config: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 301, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10000, - }, - wantErr: true, - errMsg: "timeout must be between", - }, - { - name: "too many retries", - config: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 30, - maxRetries: 11, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 10000, - }, - wantErr: true, - errMsg: "max retries must be between", - }, - { - name: "too many workers", - config: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 30, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 101, - bufferSize: 10000, - }, - wantErr: true, - errMsg: "workers must be between", - }, - { - name: "buffer too large", - config: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 30, - maxRetries: 3, - backoffSeconds: 5, - maxBackoffSeconds: 30, - nWorkers: 5, - bufferSize: 1000001, - }, - wantErr: true, - errMsg: "buffer size must be between", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.validate() - if (err != nil) != tt.wantErr { - t.Errorf("validate() error = %v, wantErr %v", err, tt.wantErr) - } - if err != nil && tt.errMsg != "" { - if err.Error() == "" || !strings.Contains(err.Error(), tt.errMsg) { - t.Errorf("validate() error message = %v, want to contain %v", err.Error(), tt.errMsg) - } - } - }) - } -} - -func TestWebhookMessageSerialization(t *testing.T) { - msg := &filer_pb.EventNotification{ - OldEntry: nil, - NewEntry: &filer_pb.Entry{ - Name: "test.txt", - IsDirectory: false, - }, - } - - webhookMsg := newWebhookMessage("/test/path", msg) - - wmMsg, err := webhookMsg.toWaterMillMessage() - if err != nil { - t.Fatalf("Failed to convert to watermill message: %v", err) - } - - // Unmarshal the protobuf payload directly - var eventNotification filer_pb.EventNotification - err = proto.Unmarshal(wmMsg.Payload, &eventNotification) - if err != nil { - t.Fatalf("Failed to unmarshal protobuf message: %v", err) - } - - // Check metadata - if wmMsg.Metadata.Get("key") != "/test/path" { - t.Errorf("Expected key '/test/path', got %v", wmMsg.Metadata.Get("key")) - } - - if wmMsg.Metadata.Get("event_type") != "create" { - t.Errorf("Expected event type 'create', got %v", wmMsg.Metadata.Get("event_type")) - } - - if eventNotification.NewEntry.Name != "test.txt" { - t.Errorf("Expected file name 'test.txt', got %v", eventNotification.NewEntry.Name) - } -} - -func TestQueueInitialize(t *testing.T) { - cfg := &config{ - endpoint: "https://example.com/webhook", - authBearerToken: "test-token", - timeoutSeconds: 10, - maxRetries: 3, - backoffSeconds: 3, - maxBackoffSeconds: 60, - nWorkers: 1, - bufferSize: 100, - } - - q := &Queue{} - err := q.initialize(cfg) - if err != nil { - t.Errorf("Initialize() error = %v", err) - } - - defer func() { - if q.cancel != nil { - q.cancel() - } - time.Sleep(100 * time.Millisecond) - if q.router != nil { - q.router.Close() - } - }() - - if q.router == nil { - t.Error("Expected router to be initialized") - } - if q.queueChannel == nil { - t.Error("Expected queueChannel to be initialized") - } - if q.client == nil { - t.Error("Expected client to be initialized") - } - if q.config == nil { - t.Error("Expected config to be initialized") - } -} - -// TestQueueSendMessage test sending messages to the queue -func TestQueueSendMessage(t *testing.T) { - cfg := &config{ - endpoint: "https://example.com/webhook", - authBearerToken: "test-token", - timeoutSeconds: 1, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - } - - q := &Queue{} - err := q.initialize(cfg) - if err != nil { - t.Fatalf("Failed to initialize queue: %v", err) - } - - defer func() { - if q.cancel != nil { - q.cancel() - } - time.Sleep(100 * time.Millisecond) - if q.router != nil { - q.router.Close() - } - }() - - msg := &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{ - Name: "test.txt", - }, - } - - err = q.SendMessage("/test/path", msg) - if err != nil { - t.Errorf("SendMessage() error = %v", err) - } -} - -func TestQueueHandleWebhook(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - cfg := &config{ - endpoint: server.URL, - authBearerToken: "test-token", - timeoutSeconds: 1, - maxRetries: 0, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - } - - client, _ := newHTTPClient(cfg) - q := &Queue{ - client: client, - } - - message := newWebhookMessage("/test/path", &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{ - Name: "test.txt", - }, - }) - - wmMsg, err := message.toWaterMillMessage() - if err != nil { - t.Fatalf("Failed to create watermill message: %v", err) - } - - err = q.handleWebhook(wmMsg) - if err != nil { - t.Errorf("handleWebhook() error = %v", err) - } -} - -func TestQueueEndToEnd(t *testing.T) { - // Simplified test - just verify the queue can be created and message can be sent - // without needing full end-to-end processing - cfg := &config{ - endpoint: "https://example.com/webhook", - authBearerToken: "test-token", - timeoutSeconds: 1, - maxRetries: 0, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - } - - q := &Queue{} - err := q.initialize(cfg) - if err != nil { - t.Fatalf("Failed to initialize queue: %v", err) - } - - defer func() { - if q.cancel != nil { - q.cancel() - } - time.Sleep(100 * time.Millisecond) - if q.router != nil { - q.router.Close() - } - }() - - msg := &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{ - Name: "test.txt", - }, - } - - err = q.SendMessage("/test/path", msg) - if err != nil { - t.Errorf("SendMessage() error = %v", err) - } -} - -func TestQueueRetryMechanism(t *testing.T) { - cfg := &config{ - endpoint: "https://example.com/webhook", - authBearerToken: "test-token", - timeoutSeconds: 1, - maxRetries: 3, // Test that this config is used - backoffSeconds: 2, - maxBackoffSeconds: 10, - nWorkers: 1, - bufferSize: 10, - } - - q := &Queue{} - err := q.initialize(cfg) - if err != nil { - t.Fatalf("Failed to initialize queue: %v", err) - } - - defer func() { - if q.cancel != nil { - q.cancel() - } - time.Sleep(100 * time.Millisecond) - if q.router != nil { - q.router.Close() - } - }() - - // Verify that the queue is properly configured for retries - if q.config.maxRetries != 3 { - t.Errorf("Expected maxRetries=3, got %d", q.config.maxRetries) - } - - if q.config.backoffSeconds != 2 { - t.Errorf("Expected backoffSeconds=2, got %d", q.config.backoffSeconds) - } - - if q.config.maxBackoffSeconds != 10 { - t.Errorf("Expected maxBackoffSeconds=10, got %d", q.config.maxBackoffSeconds) - } - - // Test that we can send a message (retry behavior is handled by Watermill middleware) - msg := &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "test.txt"}, - } - - err = q.SendMessage("/test/retry", msg) - if err != nil { - t.Errorf("SendMessage() error = %v", err) - } -} - -func TestQueueSendMessageWithFilter(t *testing.T) { - tests := []struct { - name string - cfg *config - key string - notification *filer_pb.EventNotification - shouldPublish bool - }{ - { - name: "allowed event type", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - eventTypes: []string{"create"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: true, - }, - { - name: "filtered event type", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - eventTypes: []string{"update", "rename"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - { - name: "allowed path prefix", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - pathPrefixes: []string{"/data/"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: true, - }, - { - name: "filtered path prefix", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - pathPrefixes: []string{"/logs/"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - { - name: "combined filters - both pass", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - eventTypes: []string{"create", "delete"}, - pathPrefixes: []string{"/data/", "/logs/"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: true, - }, - { - name: "combined filters - event fails", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - eventTypes: []string{"update", "delete"}, - pathPrefixes: []string{"/data/", "/logs/"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - { - name: "combined filters - path fails", - cfg: &config{ - endpoint: "https://example.com/webhook", - timeoutSeconds: 10, - maxRetries: 1, - backoffSeconds: 1, - maxBackoffSeconds: 1, - nWorkers: 1, - bufferSize: 10, - eventTypes: []string{"create", "delete"}, - pathPrefixes: []string{"/logs/"}, - }, - key: "/data/file.txt", - notification: &filer_pb.EventNotification{ - NewEntry: &filer_pb.Entry{Name: "file.txt"}, - }, - shouldPublish: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - shouldPublish := newFilter(tt.cfg).shouldPublish(tt.key, tt.notification) - if shouldPublish != tt.shouldPublish { - t.Errorf("Expected shouldPublish=%v, got %v", tt.shouldPublish, shouldPublish) - } - }) - } -} diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 61fd2de48..b716300e2 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,13 +3,12 @@ package operation import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "google.golang.org/grpc" - "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" ) type VolumeAssignRequest struct { @@ -35,111 +34,7 @@ type AssignResult struct { Replicas []Location `json:"replicas,omitempty"` } -// This is a proxy to the master server, only for assigning volume ids. -// It runs via grpc to the master server in streaming mode. -// The connection to the master would only be re-established when the last connection has error. -type AssignProxy struct { - grpcConnection *grpc.ClientConn - pool chan *singleThreadAssignProxy -} - -func NewAssignProxy(masterFn GetMasterFn, grpcDialOption grpc.DialOption, concurrency int) (ap *AssignProxy, err error) { - ap = &AssignProxy{ - pool: make(chan *singleThreadAssignProxy, concurrency), - } - ap.grpcConnection, err = pb.GrpcDial(context.Background(), masterFn(context.Background()).ToGrpcAddress(), true, grpcDialOption) - if err != nil { - return nil, fmt.Errorf("fail to dial %s: %v", masterFn(context.Background()).ToGrpcAddress(), err) - } - for i := 0; i < concurrency; i++ { - ap.pool <- &singleThreadAssignProxy{} - } - return ap, nil -} - -func (ap *AssignProxy) Assign(primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (ret *AssignResult, err error) { - p := <-ap.pool - defer func() { - ap.pool <- p - }() - - return p.doAssign(ap.grpcConnection, primaryRequest, alternativeRequests...) -} - -type singleThreadAssignProxy struct { - assignClient master_pb.Seaweed_StreamAssignClient - sync.Mutex -} - -func (ap *singleThreadAssignProxy) doAssign(grpcConnection *grpc.ClientConn, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (ret *AssignResult, err error) { - ap.Lock() - defer ap.Unlock() - - if ap.assignClient == nil { - client := master_pb.NewSeaweedClient(grpcConnection) - ap.assignClient, err = client.StreamAssign(context.Background()) - if err != nil { - ap.assignClient = nil - return nil, fmt.Errorf("fail to create stream assign client: %w", err) - } - } - - var requests []*VolumeAssignRequest - requests = append(requests, primaryRequest) - requests = append(requests, alternativeRequests...) - ret = &AssignResult{} - - for _, request := range requests { - if request == nil { - continue - } - req := &master_pb.AssignRequest{ - Count: request.Count, - Replication: request.Replication, - Collection: request.Collection, - Ttl: request.Ttl, - DiskType: request.DiskType, - DataCenter: request.DataCenter, - Rack: request.Rack, - DataNode: request.DataNode, - WritableVolumeCount: request.WritableVolumeCount, - } - if err = ap.assignClient.Send(req); err != nil { - return nil, fmt.Errorf("StreamAssignSend: %w", err) - } - resp, grpcErr := ap.assignClient.Recv() - if grpcErr != nil { - return nil, grpcErr - } - if resp.Error != "" { - return nil, fmt.Errorf("StreamAssignRecv: %v", resp.Error) - } - - ret.Count = resp.Count - ret.Fid = resp.Fid - ret.Url = resp.Location.Url - ret.PublicUrl = resp.Location.PublicUrl - ret.GrpcPort = int(resp.Location.GrpcPort) - ret.Error = resp.Error - ret.Auth = security.EncodedJwt(resp.Auth) - for _, r := range resp.Replicas { - ret.Replicas = append(ret.Replicas, Location{ - Url: r.Url, - PublicUrl: r.PublicUrl, - DataCenter: r.DataCenter, - }) - } - - if ret.Count <= 0 { - continue - } - break - } - - return -} - -func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -153,7 +48,8 @@ func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialO continue } - lastError = WithMasterServerClient(false, masterFn(ctx), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(false, masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + req := &master_pb.AssignRequest{ Count: request.Count, Replication: request.Replication, @@ -165,7 +61,7 @@ func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialO DataNode: request.DataNode, WritableVolumeCount: request.WritableVolumeCount, } - resp, grpcErr := masterClient.Assign(ctx, req) + resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { return grpcErr } @@ -183,9 +79,8 @@ func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialO ret.Auth = security.EncodedJwt(resp.Auth) for _, r := range resp.Replicas { ret.Replicas = append(ret.Replicas, Location{ - Url: r.Url, - PublicUrl: r.PublicUrl, - DataCenter: r.DataCenter, + Url: r.Url, + PublicUrl: r.PublicUrl, }) } @@ -194,7 +89,6 @@ func Assign(ctx context.Context, masterFn GetMasterFn, grpcDialOption grpc.DialO }) if lastError != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorChunkAssign).Inc() continue } @@ -241,10 +135,8 @@ type StorageOption struct { Rack string DataNode string TtlSeconds int32 - VolumeGrowthCount uint32 - MaxFileNameLength uint32 Fsync bool - SaveInside bool + VolumeGrowthCount uint32 } func (so *StorageOption) TtlString() string { diff --git a/weed/operation/assign_file_id_test.go b/weed/operation/assign_file_id_test.go deleted file mode 100644 index b2ec7d92a..000000000 --- a/weed/operation/assign_file_id_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package operation - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "google.golang.org/grpc" - "testing" - "time" -) - -func BenchmarkWithConcurrency(b *testing.B) { - concurrencyLevels := []int{1, 10, 100, 1000} - - ap, _ := NewAssignProxy(func(_ context.Context) pb.ServerAddress { - return pb.ServerAddress("localhost:9333") - }, grpc.WithInsecure(), 16) - - for _, concurrency := range concurrencyLevels { - b.Run( - fmt.Sprintf("Concurrency-%d", concurrency), - func(b *testing.B) { - for i := 0; i < b.N; i++ { - done := make(chan struct{}) - startTime := time.Now() - - for j := 0; j < concurrency; j++ { - go func() { - - ap.Assign(&VolumeAssignRequest{ - Count: 1, - }) - - done <- struct{}{} - }() - } - - for j := 0; j < concurrency; j++ { - <-done - } - - duration := time.Since(startTime) - b.Logf("Concurrency: %d, Duration: %v", concurrency, duration) - } - }, - ) - } -} - -func BenchmarkStreamAssign(b *testing.B) { - ap, _ := NewAssignProxy(func(_ context.Context) pb.ServerAddress { - return pb.ServerAddress("localhost:9333") - }, grpc.WithInsecure(), 16) - for i := 0; i < b.N; i++ { - ap.Assign(&VolumeAssignRequest{ - Count: 1, - }) - } -} - -func BenchmarkUnaryAssign(b *testing.B) { - for i := 0; i < b.N; i++ { - Assign(context.Background(), func(_ context.Context) pb.ServerAddress { - return pb.ServerAddress("localhost:9333") - }, grpc.WithInsecure(), &VolumeAssignRequest{ - Count: 1, - }) - } -} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index b0c6c651f..45068bbcc 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -1,7 +1,6 @@ package operation import ( - "context" "encoding/json" "errors" "fmt" @@ -12,10 +11,9 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -80,10 +78,10 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFileIds(masterFn, usePublicUrl, grpcDialOption, fileIds) + results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) - return fmt.Errorf("chunk delete: %w", err) + return fmt.Errorf("chunk delete: %v", err) } for _, result := range results { if result.Error != "" { @@ -96,7 +94,7 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g } func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (written int64, e error) { - req, err := http.NewRequest(http.MethodGet, fileUrl, nil) + req, err := http.NewRequest("GET", fileUrl, nil) if err != nil { return written, err } @@ -104,11 +102,14 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64, jwt string) (wri req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } - resp, err := util_http.Do(req) + resp, err := util.Do(req) if err != nil { return written, err } - defer util_http.CloseResponse(resp) + defer func() { + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() switch resp.StatusCode { case http.StatusRequestedRangeNotSatisfiable: @@ -175,7 +176,7 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { for ; chunkIndex < len(cf.chunkList); chunkIndex++ { ci := cf.chunkList[chunkIndex] // if we need read date from local volume server first? - fileUrl, jwt, lookupError := LookupFileId(func(_ context.Context) pb.ServerAddress { + fileUrl, jwt, lookupError := LookupFileId(func() pb.ServerAddress { return cf.master }, cf.grpcDialOption, ci.Fid) if lookupError != nil { diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 419223165..587cf1d01 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,13 +4,13 @@ import ( "context" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "google.golang.org/grpc" "net/http" "strings" "sync" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -28,8 +28,8 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { return fid[:commaIndex], fid[commaIndex+1:], nil } -// DeleteFileIds batch deletes a list of fileIds -func DeleteFileIds(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { +// DeleteFiles batch deletes a list of fileIds +func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { lookupFunc := func(vids []string) (results map[string]*LookupResult, err error) { results, err = LookupVolumeIds(masterFn, grpcDialOption, vids) @@ -43,11 +43,11 @@ func DeleteFileIds(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc. return } - return DeleteFileIdsWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) + return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) } -func DeleteFileIdsWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]*LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]*LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { var ret []*volume_server_pb.DeleteResult @@ -102,7 +102,7 @@ func DeleteFileIdsWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []s go func(server pb.ServerAddress, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFileIdsAtOneVolumeServer(server, grpcDialOption, fidList, false); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, false); deleteErr != nil { err = deleteErr } else if deleteResults != nil { resultChan <- deleteResults @@ -120,8 +120,8 @@ func DeleteFileIdsWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []s return ret, err } -// DeleteFileIdsAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFileIdsAtOneVolumeServer(volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) { +// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc +func DeleteFilesAtOneVolumeServer(volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) { err = WithVolumeServerClient(false, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index ecd8117ee..9b68d2286 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -3,25 +3,25 @@ package operation import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) func WithVolumeServerClient(streamingMode bool, volumeServer pb.ServerAddress, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { - return pb.WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { + return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) - }, volumeServer.ToGrpcAddress(), false, grpcDialOption) + }, volumeServer.ToGrpcAddress(), grpcDialOption) } func WithMasterServerClient(streamingMode bool, masterServer pb.ServerAddress, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - return pb.WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { + return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, masterServer.ToGrpcAddress(), false, grpcDialOption) + }, masterServer.ToGrpcAddress(), grpcDialOption) } diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index 9e9c719b5..1eb5dd320 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -4,20 +4,19 @@ import ( "context" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "google.golang.org/grpc" - "math/rand/v2" + "math/rand" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) type Location struct { - Url string `json:"url,omitempty"` - PublicUrl string `json:"publicUrl,omitempty"` - DataCenter string `json:"dataCenter,omitempty"` - GrpcPort int `json:"grpcPort,omitempty"` + Url string `json:"url,omitempty"` + PublicUrl string `json:"publicUrl,omitempty"` + GrpcPort int `json:"grpcPort,omitempty"` } func (l *Location) ServerAddress() pb.ServerAddress { @@ -51,7 +50,7 @@ func LookupFileId(masterFn GetMasterFn, grpcDialOption grpc.DialOption, fileId s if len(lookup.Locations) == 0 { return "", jwt, errors.New("File Not Found") } - return "http://" + lookup.Locations[rand.IntN(len(lookup.Locations))].Url + "/" + fileId, lookup.Jwt, nil + return "http://" + lookup.Locations[rand.Intn(len(lookup.Locations))].Url + "/" + fileId, lookup.Jwt, nil } func LookupVolumeId(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid string) (*LookupResult, error) { @@ -80,7 +79,7 @@ func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids //only query unknown_vids - err := WithMasterServerClient(false, masterFn(context.Background()), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(false, masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeOrFileIds: unknown_vids, @@ -95,10 +94,9 @@ func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids var locations []Location for _, loc := range vidLocations.Locations { locations = append(locations, Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - DataCenter: loc.DataCenter, - GrpcPort: int(loc.GrpcPort), + Url: loc.Url, + PublicUrl: loc.PublicUrl, + GrpcPort: int(loc.GrpcPort), }) } if vidLocations.Error != "" { diff --git a/weed/operation/lookup_vid_cache.go b/weed/operation/lookup_vid_cache.go index 248fc17de..ccc1f2beb 100644 --- a/weed/operation/lookup_vid_cache.go +++ b/weed/operation/lookup_vid_cache.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" ) var ErrorNotFound = errors.New("not found") diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go index 339d4507e..2b44b3b26 100644 --- a/weed/operation/needle_parse_test.go +++ b/weed/operation/needle_parse_test.go @@ -2,7 +2,6 @@ package operation import ( "bytes" - "context" "fmt" "io" "net/http" @@ -10,8 +9,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) type MockClient struct { @@ -39,13 +38,17 @@ If the content is already compressed, need to know the content size. */ func TestCreateNeedleFromRequest(t *testing.T) { - mockClient := &MockClient{} - uploader := newUploader(mockClient) + mc := &MockClient{} + tmp := HttpClient + HttpClient = mc + defer func() { + HttpClient = tmp + }() { - mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { assert.Equal(t, nil, err, "upload: %v", err) - assert.Equal(t, "text/plain; charset=utf-8", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime)) assert.Equal(t, true, n.IsCompressed(), "this should be compressed") assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize) @@ -59,7 +62,7 @@ func TestCreateNeedleFromRequest(t *testing.T) { PairMap: nil, Jwt: "", } - uploadResult, err, data := uploader.Upload(context.Background(), bytes.NewReader([]byte(textContent)), uploadOption) + uploadResult, err, data := Upload(bytes.NewReader([]byte(textContent)), uploadOption) if len(data) != len(textContent) { t.Errorf("data actual %d expected %d", len(data), len(textContent)) } @@ -70,7 +73,7 @@ func TestCreateNeedleFromRequest(t *testing.T) { } { - mockClient.needleHandling = func(n *needle.Needle, originalSize int, err error) { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { assert.Equal(t, nil, err, "upload: %v", err) assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) assert.Equal(t, true, n.IsCompressed(), "this should be compressed") @@ -87,7 +90,7 @@ func TestCreateNeedleFromRequest(t *testing.T) { PairMap: nil, Jwt: "", } - uploader.Upload(context.Background(), bytes.NewReader(gzippedData), uploadOption) + Upload(bytes.NewReader(gzippedData), uploadOption) } /* diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 1efa42b2f..648df174a 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,10 +1,8 @@ package operation import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "io" - "math/rand/v2" "mime" "net/url" "os" @@ -14,20 +12,24 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" ) type FilePart struct { - Reader io.Reader - FileName string - FileSize int64 - MimeType string - ModTime int64 //in seconds - Pref StoragePreference - Server string //this comes from assign result - Fid string //this comes from assign result, but customizable - Fsync bool + Reader io.Reader + FileName string + FileSize int64 + MimeType string + ModTime int64 //in seconds + Replication string + Collection string + DataCenter string + Ttl string + DiskType string + Server string //this comes from assign result + Fid string //this comes from assign result, but customizable + Fsync bool } type SubmitResult struct { @@ -38,31 +40,22 @@ type SubmitResult struct { Error string `json:"error,omitempty"` } -type StoragePreference struct { - Replication string - Collection string - DataCenter string - Ttl string - DiskType string - MaxMB int -} +type GetMasterFn func() pb.ServerAddress -type GetMasterFn func(ctx context.Context) pb.ServerAddress - -func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []*FilePart, pref StoragePreference, usePublicUrl bool) ([]SubmitResult, error) { +func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName } ar := &VolumeAssignRequest{ Count: uint64(len(files)), - Replication: pref.Replication, - Collection: pref.Collection, - DataCenter: pref.DataCenter, - Ttl: pref.Ttl, - DiskType: pref.DiskType, + Replication: replication, + Collection: collection, + DataCenter: dataCenter, + Ttl: ttl, + DiskType: diskType, } - ret, err := Assign(context.Background(), masterFn, grpcDialOption, ar) + ret, err := Assign(masterFn, grpcDialOption, ar) if err != nil { for index := range files { results[index].Error = err.Error() @@ -78,8 +71,12 @@ func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []* if usePublicUrl { file.Server = ret.PublicUrl } - file.Pref = pref - results[index].Size, err = file.Upload(pref.MaxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption) + file.Replication = replication + file.Collection = collection + file.DataCenter = dataCenter + file.Ttl = ttl + file.DiskType = diskType + results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -89,8 +86,8 @@ func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []* return results, nil } -func NewFileParts(fullPathFilenames []string) (ret []*FilePart, err error) { - ret = make([]*FilePart, len(fullPathFilenames)) +func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) { + ret = make([]FilePart, len(fullPathFilenames)) for index, file := range fullPathFilenames { if ret[index], err = newFilePart(file); err != nil { return @@ -98,8 +95,7 @@ func NewFileParts(fullPathFilenames []string) (ret []*FilePart, err error) { } return } -func newFilePart(fullPathFilename string) (ret *FilePart, err error) { - ret = &FilePart{} +func newFilePart(fullPathFilename string) (ret FilePart, err error) { fh, openErr := os.Open(fullPathFilename) if openErr != nil { glog.V(0).Info("Failed to open file: ", fullPathFilename) @@ -123,7 +119,7 @@ func newFilePart(fullPathFilename string) (ret *FilePart, err error) { return ret, nil } -func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { +func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) @@ -147,29 +143,29 @@ func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, j var ret *AssignResult var id string - if fi.Pref.DataCenter != "" { + if fi.DataCenter != "" { ar := &VolumeAssignRequest{ Count: uint64(chunks), - Replication: fi.Pref.Replication, - Collection: fi.Pref.Collection, - Ttl: fi.Pref.Ttl, - DiskType: fi.Pref.DiskType, + Replication: fi.Replication, + Collection: fi.Collection, + Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(context.Background(), masterFn, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { return } } for i := int64(0); i < chunks; i++ { - if fi.Pref.DataCenter == "" { + if fi.DataCenter == "" { ar := &VolumeAssignRequest{ Count: 1, - Replication: fi.Pref.Replication, - Collection: fi.Pref.Collection, - Ttl: fi.Pref.Ttl, - DiskType: fi.Pref.DiskType, + Replication: fi.Replication, + Collection: fi.Collection, + Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(context.Background(), masterFn, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { // delete all uploaded chunks cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) @@ -182,8 +178,11 @@ func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, j id += "_" + strconv.FormatInt(i, 10) } } - fileUrl := genFileUrl(ret, id, usePublicUrl) - count, e := uploadOneChunk( + fileUrl := "http://" + ret.Url + "/" + id + if usePublicUrl { + fileUrl = "http://" + ret.PublicUrl + "/" + id + } + count, e := upload_one_chunk( baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), masterFn, fileUrl, @@ -202,7 +201,7 @@ func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, j ) retSize += count } - err = uploadChunkedFileManifest(fileUrl, &cm, jwt) + err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) @@ -217,13 +216,7 @@ func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, j PairMap: nil, Jwt: jwt, } - - uploader, e := NewUploader() - if e != nil { - return 0, e - } - - ret, e, _ := uploader.Upload(context.Background(), fi.Reader, uploadOption) + ret, e, _ := Upload(fi.Reader, uploadOption) if e != nil { return 0, e } @@ -232,23 +225,7 @@ func (fi *FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, j return } -func genFileUrl(ret *AssignResult, id string, usePublicUrl bool) string { - fileUrl := "http://" + ret.Url + "/" + id - if usePublicUrl { - fileUrl = "http://" + ret.PublicUrl + "/" + id - } - for _, replica := range ret.Replicas { - if rand.IntN(len(ret.Replicas)+1) == 0 { - fileUrl = "http://" + replica.Url + "/" + id - if usePublicUrl { - fileUrl = "http://" + replica.PublicUrl + "/" + id - } - } - } - return fileUrl -} - -func uploadOneChunk(filename string, reader io.Reader, masterFn GetMasterFn, +func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") @@ -261,20 +238,14 @@ func uploadOneChunk(filename string, reader io.Reader, masterFn GetMasterFn, PairMap: nil, Jwt: jwt, } - - uploader, uploaderError := NewUploader() - if uploaderError != nil { - return 0, uploaderError - } - - uploadResult, uploadError, _ := uploader.Upload(context.Background(), reader, uploadOption) + uploadResult, uploadError, _ := Upload(reader, uploadOption) if uploadError != nil { return 0, uploadError } return uploadResult.Size, nil } -func uploadChunkedFileManifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error { +func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error { buf, e := manifest.Marshal() if e != nil { return e @@ -293,12 +264,6 @@ func uploadChunkedFileManifest(fileUrl string, manifest *ChunkManifest, jwt secu PairMap: nil, Jwt: jwt, } - - uploader, e := NewUploader() - if e != nil { - return e - } - - _, e = uploader.UploadData(context.Background(), buf, uploadOption) + _, e = UploadData(buf, uploadOption) return e } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index cb091aed8..de71a198d 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -2,8 +2,8 @@ package operation import ( "context" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "google.golang.org/grpc" ) diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index 8decc2df9..d3449873b 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -3,14 +3,13 @@ package operation import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" "io" - "github.com/seaweedfs/seaweedfs/weed/pb" - "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { @@ -54,10 +53,6 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia needleHeader := resp.NeedleHeader needleBody := resp.NeedleBody - version := needle.Version(resp.Version) - if version == 0 { - version = needle.GetCurrentVersion() - } if len(needleHeader) == 0 { continue @@ -77,7 +72,7 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia n := new(needle.Needle) n.ParseNeedleHeader(needleHeader) - err = n.ReadNeedleBodyBytes(needleBody, version) + err = n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion) if err != nil { return err } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index f469b2273..3d41d2eb5 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,29 +2,21 @@ package operation import ( "bytes" - "context" "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "io" "mime" "mime/multipart" + "net" "net/http" "net/textproto" "path/filepath" "strings" - "sync" "time" - - "github.com/seaweedfs/seaweedfs/weed/util/request_id" - "github.com/valyala/bytebufferpool" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client" ) type UploadOption struct { @@ -35,9 +27,6 @@ type UploadOption struct { MimeType string PairMap map[string]string Jwt security.EncodedJwt - RetryForever bool - Md5 string - BytesBuffer *bytes.Buffer } type UploadResult struct { @@ -52,13 +41,13 @@ type UploadResult struct { RetryCount int `json:"-"` } -func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsNs int64) *filer_pb.FileChunk { +func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { fid, _ := filer_pb.ToFileIdObject(fileId) return &filer_pb.FileChunk{ FileId: fileId, Offset: offset, Size: uint64(uploadResult.Size), - ModifiedTsNs: tsNs, + Mtime: time.Now().UnixNano(), ETag: uploadResult.ContentMd5, CipherKey: uploadResult.CipherKey, IsCompressed: uploadResult.Gzip > 0, @@ -66,158 +55,70 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64, tsN } } -// ToPbFileChunkWithSSE creates a FileChunk with SSE metadata -func (uploadResult *UploadResult) ToPbFileChunkWithSSE(fileId string, offset int64, tsNs int64, sseType filer_pb.SSEType, sseMetadata []byte) *filer_pb.FileChunk { - fid, _ := filer_pb.ToFileIdObject(fileId) - chunk := &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(uploadResult.Size), - ModifiedTsNs: tsNs, - ETag: uploadResult.ContentMd5, - CipherKey: uploadResult.CipherKey, - IsCompressed: uploadResult.Gzip > 0, - Fid: fid, - } - - // Add SSE metadata if provided - chunk.SseType = sseType - if len(sseMetadata) > 0 { - chunk.SseMetadata = sseMetadata - } - - return chunk -} - -var ( - fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "") - uploader *Uploader - uploaderErr error - once sync.Once -) - // HTTPClient interface for testing type HTTPClient interface { Do(req *http.Request) (*http.Response, error) } -// Uploader -type Uploader struct { - httpClient HTTPClient +var ( + HttpClient HTTPClient +) + +func init() { + HttpClient = &http.Client{Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 10 * time.Second, + }).DialContext, + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} } -func NewUploader() (*Uploader, error) { - once.Do(func() { - // With Dial context - var httpClient *util_http_client.HTTPClient - httpClient, uploaderErr = util_http.NewGlobalHttpClient(util_http_client.AddDialContext) - if uploaderErr != nil { - uploaderErr = fmt.Errorf("error initializing the loader: %s", uploaderErr) - } - if httpClient != nil { - uploader = newUploader(httpClient) - } - }) - return uploader, uploaderErr -} - -func newUploader(httpClient HTTPClient) *Uploader { - return &Uploader{ - httpClient: httpClient, - } -} - -// UploadWithRetry will retry both assigning volume request and uploading content -// The option parameter does not need to specify UploadUrl and Jwt, which will come from assigning volume. -func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assignRequest *filer_pb.AssignVolumeRequest, uploadOption *UploadOption, genFileUrlFn func(host, fileId string) string, reader io.Reader) (fileId string, uploadResult *UploadResult, err error, data []byte) { - doUploadFunc := func() error { - - var host string - var auth security.EncodedJwt - - // grpc assign volume - if grpcAssignErr := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, assignErr := client.AssignVolume(context.Background(), assignRequest) - if assignErr != nil { - glog.V(0).Infof("assign volume failure %v: %v", assignRequest, assignErr) - return assignErr - } - if resp.Error != "" { - return fmt.Errorf("assign volume failure %v: %v", assignRequest, resp.Error) - } - - fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth) - loc := resp.Location - host = filerClient.AdjustedUrl(loc) - - return nil - }); grpcAssignErr != nil { - return fmt.Errorf("filerGrpcAddress assign volume: %w", grpcAssignErr) - } - - uploadOption.UploadUrl = genFileUrlFn(host, fileId) - uploadOption.Jwt = auth - - var uploadErr error - uploadResult, uploadErr, data = uploader.doUpload(context.Background(), reader, uploadOption) - return uploadErr - } - if uploadOption.RetryForever { - util.RetryUntil("uploadWithRetryForever", doUploadFunc, func(err error) (shouldContinue bool) { - glog.V(0).Infof("upload content: %v", err) - return true - }) - } else { - uploadErrList := []string{"transport", "is read only"} - err = util.MultiRetry("uploadWithRetry", uploadErrList, doUploadFunc) - } - - return -} +var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", "") // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func (uploader *Uploader) UploadData(ctx context.Context, data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { - uploadResult, err = uploader.retriedUploadData(ctx, data, option) +func UploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { + uploadResult, err = retriedUploadData(data, option) return } // Upload sends a POST request to a volume server to upload the content with fast compression -func (uploader *Uploader) Upload(ctx context.Context, reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { - uploadResult, err, data = uploader.doUpload(ctx, reader, option) +func Upload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { + uploadResult, err, data = doUpload(reader, option) return } -func (uploader *Uploader) doUpload(ctx context.Context, reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { +func doUpload(reader io.Reader, option *UploadOption) (uploadResult *UploadResult, err error, data []byte) { bytesReader, ok := reader.(*util.BytesReader) if ok { data = bytesReader.Bytes } else { data, err = io.ReadAll(reader) if err != nil { - err = fmt.Errorf("read input: %w", err) + err = fmt.Errorf("read input: %v", err) return } } - uploadResult, uploadErr := uploader.retriedUploadData(ctx, data, option) + uploadResult, uploadErr := retriedUploadData(data, option) return uploadResult, uploadErr, data } -func (uploader *Uploader) retriedUploadData(ctx context.Context, data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { +func retriedUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { for i := 0; i < 3; i++ { - if i > 0 { - time.Sleep(time.Millisecond * time.Duration(237*(i+1))) - } - uploadResult, err = uploader.doUploadData(ctx, data, option) + uploadResult, err = doUploadData(data, option) if err == nil { uploadResult.RetryCount = i return + } else { + glog.Warningf("uploading to %s: %v", option.UploadUrl, err) } - glog.WarningfCtx(ctx, "uploading %d to %s: %v", i, option.UploadUrl, err) + time.Sleep(time.Millisecond * time.Duration(237*(i+1))) } return } -func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { +func doUploadData(data []byte, option *UploadOption) (uploadResult *UploadResult, err error) { contentIsGzipped := option.IsInputCompressed shouldGzipNow := false if !option.IsInputCompressed { @@ -233,9 +134,6 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option } else if !iAmSure && option.MimeType == "" && len(data) > 16*1024 { var compressed []byte compressed, err = util.GzipData(data[0:128]) - if err != nil { - return - } shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90% } } @@ -246,7 +144,7 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option // this could be double copying clearDataLen = len(data) clearData := data - if shouldGzipNow { + if shouldGzipNow && !option.Cipher { compressed, compressErr := util.GzipData(data) // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed)) if compressErr == nil { @@ -266,14 +164,14 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option // encrypt cipherKey := util.GenCipherKey() - encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey) if encryptionErr != nil { - err = fmt.Errorf("encrypt input: %w", encryptionErr) + err = fmt.Errorf("encrypt input: %v", encryptionErr) return } // upload data - uploadResult, err = uploader.upload_content(ctx, func(w io.Writer) (err error) { + uploadResult, err = upload_content(func(w io.Writer) (err error) { _, err = w.Write(encryptedData) return }, len(encryptedData), &UploadOption{ @@ -292,12 +190,9 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option uploadResult.Mime = option.MimeType uploadResult.CipherKey = cipherKey uploadResult.Size = uint32(clearDataLen) - if contentIsGzipped { - uploadResult.Gzip = 1 - } } else { // upload data - uploadResult, err = uploader.upload_content(ctx, func(w io.Writer) (err error) { + uploadResult, err = upload_content(func(w io.Writer) (err error) { _, err = w.Write(data) return }, len(data), &UploadOption{ @@ -308,8 +203,6 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option MimeType: option.MimeType, PairMap: option.PairMap, Jwt: option.Jwt, - Md5: option.Md5, - BytesBuffer: option.BytesBuffer, }) if uploadResult == nil { return @@ -323,18 +216,10 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option return uploadResult, err } -func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) { - var body_writer *multipart.Writer - var reqReader *bytes.Reader - var buf *bytebufferpool.ByteBuffer - if option.BytesBuffer == nil { - buf = GetBuffer() - defer PutBuffer(buf) - body_writer = multipart.NewWriter(buf) - } else { - option.BytesBuffer.Reset() - body_writer = multipart.NewWriter(option.BytesBuffer) - } +func upload_content(fillBufferFunction func(w io.Writer) error, originalDataSize int, option *UploadOption) (*UploadResult, error) { + buf := GetBuffer() + defer PutBuffer(buf) + body_writer := multipart.NewWriter(buf) h := make(textproto.MIMEHeader) filename := fileNameEscaper.Replace(option.Filename) h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, filename)) @@ -348,32 +233,25 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction if option.IsInputCompressed { h.Set("Content-Encoding", "gzip") } - if option.Md5 != "" { - h.Set("Content-MD5", option.Md5) - } file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { - glog.V(0).InfolnCtx(ctx, "error creating form file", cp_err.Error()) + glog.V(0).Infoln("error creating form file", cp_err.Error()) return nil, cp_err } if err := fillBufferFunction(file_writer); err != nil { - glog.V(0).InfolnCtx(ctx, "error copying data", err) + glog.V(0).Infoln("error copying data", err) return nil, err } content_type := body_writer.FormDataContentType() if err := body_writer.Close(); err != nil { - glog.V(0).InfolnCtx(ctx, "error closing body", err) + glog.V(0).Infoln("error closing body", err) return nil, err } - if option.BytesBuffer == nil { - reqReader = bytes.NewReader(buf.Bytes()) - } else { - reqReader = bytes.NewReader(option.BytesBuffer.Bytes()) - } - req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader) + + req, postErr := http.NewRequest("POST", option.UploadUrl, bytes.NewReader(buf.Bytes())) if postErr != nil { - glog.V(1).InfofCtx(ctx, "create upload request %s: %v", option.UploadUrl, postErr) + glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr) return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr) } req.Header.Set("Content-Type", content_type) @@ -383,25 +261,20 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction if option.Jwt != "" { req.Header.Set("Authorization", "BEARER "+string(option.Jwt)) } - - request_id.InjectToRequest(ctx, req) - // print("+") - resp, post_err := uploader.httpClient.Do(req) - defer util_http.CloseResponse(resp) + resp, post_err := HttpClient.Do(req) if post_err != nil { if strings.Contains(post_err.Error(), "connection reset by peer") || strings.Contains(post_err.Error(), "use of closed network connection") { - glog.V(1).InfofCtx(ctx, "repeat error upload request %s: %v", option.UploadUrl, postErr) - stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc() - resp, post_err = uploader.httpClient.Do(req) - defer util_http.CloseResponse(resp) + glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr) + resp, post_err = HttpClient.Do(req) } } if post_err != nil { return nil, fmt.Errorf("upload %s %d bytes to %v: %v", option.Filename, originalDataSize, option.UploadUrl, post_err) } // print("-") + defer util.CloseResponse(resp) var ret UploadResult etag := getEtag(resp) @@ -412,13 +285,13 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction resp_body, ra_err := io.ReadAll(resp.Body) if ra_err != nil { - return nil, fmt.Errorf("read response body %v: %w", option.UploadUrl, ra_err) + return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err) } unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.ErrorfCtx(ctx, "unmarshal %s: %v", option.UploadUrl, string(resp_body)) - return nil, fmt.Errorf("unmarshal %v: %w", option.UploadUrl, unmarshal_err) + glog.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body)) + return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err) } if ret.Error != "" { return nil, fmt.Errorf("unmarshalled error %v: %v", option.UploadUrl, ret.Error) diff --git a/weed/pb/Makefile b/weed/pb/Makefile index e5db76426..a8992bde2 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -10,12 +10,6 @@ gen: protoc iam.proto --go_out=./iam_pb --go-grpc_out=./iam_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative protoc mount.proto --go_out=./mount_pb --go-grpc_out=./mount_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative protoc s3.proto --go_out=./s3_pb --go-grpc_out=./s3_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative - protoc mq_broker.proto --go_out=./mq_pb --go-grpc_out=./mq_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative - protoc mq_schema.proto --go_out=./schema_pb --go-grpc_out=./schema_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative - protoc mq_agent.proto --go_out=./mq_agent_pb --go-grpc_out=./mq_agent_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative - protoc worker.proto --go_out=./worker_pb --go-grpc_out=./worker_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative + protoc messaging.proto --go_out=./messaging_pb --go-grpc_out=./messaging_pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto - -fbs: - flatc --go -o . --go-namespace message_fbs message.fbs diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 9257996ed..bd0932cb8 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package filer_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -54,15 +54,18 @@ service SeaweedFiler { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { } - rpc TraverseBfsMetadata (TraverseBfsMetadataRequest) returns (stream TraverseBfsMetadataResponse) { - } - rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { } rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { } + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + rpc KvGet (KvGetRequest) returns (KvGetResponse) { } @@ -71,16 +74,6 @@ service SeaweedFiler { rpc CacheRemoteObjectToLocalCluster (CacheRemoteObjectToLocalClusterRequest) returns (CacheRemoteObjectToLocalClusterResponse) { } - - rpc DistributedLock(LockRequest) returns (LockResponse) { - } - rpc DistributedUnlock(UnlockRequest) returns (UnlockResponse) { - } - rpc FindLockOwner(FindLockOwnerRequest) returns (FindLockOwnerResponse) { - } - // distributed lock management internal use only - rpc TransferLocks(TransferLocksRequest) returns (TransferLocksResponse) { - } } ////////////////////////////////////////////////// @@ -125,7 +118,6 @@ message Entry { RemoteEntry remote_entry = 10; int64 quota = 11; // for bucket only. Positive/Negative means enabled/disabled. - int64 worm_enforced_at_ts_ns = 12; } message FullEntry { @@ -142,18 +134,11 @@ message EventNotification { repeated int32 signatures = 6; } -enum SSEType { - NONE = 0; // No server-side encryption - SSE_C = 1; // Server-Side Encryption with Customer-Provided Keys - SSE_KMS = 2; // Server-Side Encryption with KMS-Managed Keys - SSE_S3 = 3; // Server-Side Encryption with S3-Managed Keys -} - message FileChunk { string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; - int64 modified_ts_ns = 4; + int64 mtime = 4; string e_tag = 5; string source_file_id = 6; // to be deprecated FileId fid = 7; @@ -161,8 +146,6 @@ message FileChunk { bytes cipher_key = 9; bool is_compressed = 10; bool is_chunk_manifest = 11; // content is a list of FileChunks - SSEType sse_type = 12; // Server-side encryption type - bytes sse_metadata = 13; // Serialized SSE metadata for this chunk (SSE-C, SSE-KMS, or SSE-S3) } message FileChunkManifest { @@ -231,7 +214,6 @@ message DeleteEntryRequest { bool ignore_recursive_error = 6; bool is_from_other_cluster = 7; repeated int32 signatures = 8; - int64 if_not_modified_after = 9; } message DeleteEntryResponse { @@ -295,7 +277,6 @@ message Location { string url = 1; string public_url = 2; uint32 grpc_port = 3; - string data_center = 4; } message LookupVolumeResponse { map locations_map = 1; @@ -355,8 +336,6 @@ message GetFilerConfigurationResponse { string version = 11; string cluster_id = 12; string filer_group = 13; - int32 major_version = 14; - int32 minor_version = 15; } message SubscribeMetadataRequest { @@ -367,8 +346,6 @@ message SubscribeMetadataRequest { repeated string path_prefixes = 6; int32 client_id = 7; int64 until_ns = 8; - int32 client_epoch = 9; - repeated string directories = 10; // exact directory to watch } message SubscribeMetadataResponse { string directory = 1; @@ -376,21 +353,10 @@ message SubscribeMetadataResponse { int64 ts_ns = 3; } -message TraverseBfsMetadataRequest { - string directory = 1; - repeated string excluded_prefixes = 2; -} -message TraverseBfsMetadataResponse { - string directory = 1; - Entry entry = 2; -} - message LogEntry { int64 ts_ns = 1; int32 partition_key_hash = 2; bytes data = 3; - bytes key = 4; - int64 offset = 5; // Sequential offset within partition } message KeepConnectedRequest { @@ -451,11 +417,6 @@ message FilerConf { string data_center = 9; string rack = 10; string data_node = 11; - uint32 max_file_name_length = 12; - bool disable_chunk_deletion = 13; - bool worm = 14; - uint64 worm_grace_period_seconds = 15; - uint64 worm_retention_time_seconds = 16; } repeated PathConf locations = 2; } @@ -470,47 +431,3 @@ message CacheRemoteObjectToLocalClusterRequest { message CacheRemoteObjectToLocalClusterResponse { Entry entry = 1; } - -///////////////////////// -// distributed lock management -///////////////////////// -message LockRequest { - string name = 1; - int64 seconds_to_lock = 2; - string renew_token = 3; - bool is_moved = 4; - string owner = 5; -} -message LockResponse { - string renew_token = 1; - string lock_owner = 2; - string lock_host_moved_to = 3; - string error = 4; -} -message UnlockRequest { - string name = 1; - string renew_token = 2; - bool is_moved = 3; -} -message UnlockResponse { - string error = 1; - string moved_to = 2; -} -message FindLockOwnerRequest { - string name = 1; - bool is_moved = 2; -} -message FindLockOwnerResponse { - string owner = 1; -} -message Lock { - string name = 1; - string renew_token = 2; - int64 expired_at_ns = 3; - string owner = 4; -} -message TransferLocksRequest { - repeated Lock locks = 1; -} -message TransferLocksResponse { -} diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 31de4e652..4ac560855 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: filer.proto package filer_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -21,71 +20,22 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type SSEType int32 - -const ( - SSEType_NONE SSEType = 0 // No server-side encryption - SSEType_SSE_C SSEType = 1 // Server-Side Encryption with Customer-Provided Keys - SSEType_SSE_KMS SSEType = 2 // Server-Side Encryption with KMS-Managed Keys - SSEType_SSE_S3 SSEType = 3 // Server-Side Encryption with S3-Managed Keys -) - -// Enum value maps for SSEType. -var ( - SSEType_name = map[int32]string{ - 0: "NONE", - 1: "SSE_C", - 2: "SSE_KMS", - 3: "SSE_S3", - } - SSEType_value = map[string]int32{ - "NONE": 0, - "SSE_C": 1, - "SSE_KMS": 2, - "SSE_S3": 3, - } -) - -func (x SSEType) Enum() *SSEType { - p := new(SSEType) - *p = x - return p -} - -func (x SSEType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SSEType) Descriptor() protoreflect.EnumDescriptor { - return file_filer_proto_enumTypes[0].Descriptor() -} - -func (SSEType) Type() protoreflect.EnumType { - return &file_filer_proto_enumTypes[0] -} - -func (x SSEType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SSEType.Descriptor instead. -func (SSEType) EnumDescriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{0} -} - type LookupDirectoryEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (x *LookupDirectoryEntryRequest) Reset() { *x = LookupDirectoryEntryRequest{} - mi := &file_filer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupDirectoryEntryRequest) String() string { @@ -96,7 +46,7 @@ func (*LookupDirectoryEntryRequest) ProtoMessage() {} func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -126,17 +76,20 @@ func (x *LookupDirectoryEntryRequest) GetName() string { } type LookupDirectoryEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` } func (x *LookupDirectoryEntryResponse) Reset() { *x = LookupDirectoryEntryResponse{} - mi := &file_filer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupDirectoryEntryResponse) String() string { @@ -147,7 +100,7 @@ func (*LookupDirectoryEntryResponse) ProtoMessage() {} func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -170,21 +123,24 @@ func (x *LookupDirectoryEntryResponse) GetEntry() *Entry { } type ListEntriesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` - StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"` - InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"` - Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"` + InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"` + Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` } func (x *ListEntriesRequest) Reset() { *x = ListEntriesRequest{} - mi := &file_filer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListEntriesRequest) String() string { @@ -195,7 +151,7 @@ func (*ListEntriesRequest) ProtoMessage() {} func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -246,17 +202,20 @@ func (x *ListEntriesRequest) GetLimit() uint32 { } type ListEntriesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` } func (x *ListEntriesResponse) Reset() { *x = ListEntriesResponse{} - mi := &file_filer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListEntriesResponse) String() string { @@ -267,7 +226,7 @@ func (*ListEntriesResponse) ProtoMessage() {} func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -290,21 +249,24 @@ func (x *ListEntriesResponse) GetEntry() *Entry { } type RemoteEntry struct { - state protoimpl.MessageState `protogen:"open.v1"` - StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName,proto3" json:"storage_name,omitempty"` - LastLocalSyncTsNs int64 `protobuf:"varint,2,opt,name=last_local_sync_ts_ns,json=lastLocalSyncTsNs,proto3" json:"last_local_sync_ts_ns,omitempty"` - RemoteETag string `protobuf:"bytes,3,opt,name=remote_e_tag,json=remoteETag,proto3" json:"remote_e_tag,omitempty"` - RemoteMtime int64 `protobuf:"varint,4,opt,name=remote_mtime,json=remoteMtime,proto3" json:"remote_mtime,omitempty"` - RemoteSize int64 `protobuf:"varint,5,opt,name=remote_size,json=remoteSize,proto3" json:"remote_size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName,proto3" json:"storage_name,omitempty"` + LastLocalSyncTsNs int64 `protobuf:"varint,2,opt,name=last_local_sync_ts_ns,json=lastLocalSyncTsNs,proto3" json:"last_local_sync_ts_ns,omitempty"` + RemoteETag string `protobuf:"bytes,3,opt,name=remote_e_tag,json=remoteETag,proto3" json:"remote_e_tag,omitempty"` + RemoteMtime int64 `protobuf:"varint,4,opt,name=remote_mtime,json=remoteMtime,proto3" json:"remote_mtime,omitempty"` + RemoteSize int64 `protobuf:"varint,5,opt,name=remote_size,json=remoteSize,proto3" json:"remote_size,omitempty"` } func (x *RemoteEntry) Reset() { *x = RemoteEntry{} - mi := &file_filer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RemoteEntry) String() string { @@ -315,7 +277,7 @@ func (*RemoteEntry) ProtoMessage() {} func (x *RemoteEntry) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -366,27 +328,29 @@ func (x *RemoteEntry) GetRemoteSize() int64 { } type Entry struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` - Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` - Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` - Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - HardLinkId []byte `protobuf:"bytes,7,opt,name=hard_link_id,json=hardLinkId,proto3" json:"hard_link_id,omitempty"` - HardLinkCounter int32 `protobuf:"varint,8,opt,name=hard_link_counter,json=hardLinkCounter,proto3" json:"hard_link_counter,omitempty"` // only exists in hard link meta data - Content []byte `protobuf:"bytes,9,opt,name=content,proto3" json:"content,omitempty"` // if not empty, the file content - RemoteEntry *RemoteEntry `protobuf:"bytes,10,opt,name=remote_entry,json=remoteEntry,proto3" json:"remote_entry,omitempty"` - Quota int64 `protobuf:"varint,11,opt,name=quota,proto3" json:"quota,omitempty"` // for bucket only. Positive/Negative means enabled/disabled. - WormEnforcedAtTsNs int64 `protobuf:"varint,12,opt,name=worm_enforced_at_ts_ns,json=wormEnforcedAtTsNs,proto3" json:"worm_enforced_at_ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` + Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HardLinkId []byte `protobuf:"bytes,7,opt,name=hard_link_id,json=hardLinkId,proto3" json:"hard_link_id,omitempty"` + HardLinkCounter int32 `protobuf:"varint,8,opt,name=hard_link_counter,json=hardLinkCounter,proto3" json:"hard_link_counter,omitempty"` // only exists in hard link meta data + Content []byte `protobuf:"bytes,9,opt,name=content,proto3" json:"content,omitempty"` // if not empty, the file content + RemoteEntry *RemoteEntry `protobuf:"bytes,10,opt,name=remote_entry,json=remoteEntry,proto3" json:"remote_entry,omitempty"` + Quota int64 `protobuf:"varint,11,opt,name=quota,proto3" json:"quota,omitempty"` // for bucket only. Positive/Negative means enabled/disabled. } func (x *Entry) Reset() { *x = Entry{} - mi := &file_filer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Entry) String() string { @@ -397,7 +361,7 @@ func (*Entry) ProtoMessage() {} func (x *Entry) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -482,26 +446,22 @@ func (x *Entry) GetQuota() int64 { return 0 } -func (x *Entry) GetWormEnforcedAtTsNs() int64 { - if x != nil { - return x.WormEnforcedAtTsNs - } - return 0 -} - type FullEntry struct { - state protoimpl.MessageState `protogen:"open.v1"` - Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` } func (x *FullEntry) Reset() { *x = FullEntry{} - mi := &file_filer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FullEntry) String() string { @@ -512,7 +472,7 @@ func (*FullEntry) ProtoMessage() {} func (x *FullEntry) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -542,22 +502,25 @@ func (x *FullEntry) GetEntry() *Entry { } type EventNotification struct { - state protoimpl.MessageState `protogen:"open.v1"` - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"` - NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"` - IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` - Signatures []int32 `protobuf:"varint,6,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,6,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } func (x *EventNotification) Reset() { *x = EventNotification{} - mi := &file_filer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *EventNotification) String() string { @@ -568,7 +531,7 @@ func (*EventNotification) ProtoMessage() {} func (x *EventNotification) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -626,29 +589,30 @@ func (x *EventNotification) GetSignatures() []int32 { } type FileChunk struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated - Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` - Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - ModifiedTsNs int64 `protobuf:"varint,4,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"` - ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` - SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated - Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` - SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"` - CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` - IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"` - IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks - SseType SSEType `protobuf:"varint,12,opt,name=sse_type,json=sseType,proto3,enum=filer_pb.SSEType" json:"sse_type,omitempty"` // Server-side encryption type - SseMetadata []byte `protobuf:"bytes,13,opt,name=sse_metadata,json=sseMetadata,proto3" json:"sse_metadata,omitempty"` // Serialized SSE metadata for this chunk (SSE-C, SSE-KMS, or SSE-S3) - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"` + ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` + SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated + Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` + SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"` + IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks } func (x *FileChunk) Reset() { *x = FileChunk{} - mi := &file_filer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileChunk) String() string { @@ -659,7 +623,7 @@ func (*FileChunk) ProtoMessage() {} func (x *FileChunk) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -695,9 +659,9 @@ func (x *FileChunk) GetSize() uint64 { return 0 } -func (x *FileChunk) GetModifiedTsNs() int64 { +func (x *FileChunk) GetMtime() int64 { if x != nil { - return x.ModifiedTsNs + return x.Mtime } return 0 } @@ -751,32 +715,21 @@ func (x *FileChunk) GetIsChunkManifest() bool { return false } -func (x *FileChunk) GetSseType() SSEType { - if x != nil { - return x.SseType - } - return SSEType_NONE -} - -func (x *FileChunk) GetSseMetadata() []byte { - if x != nil { - return x.SseMetadata - } - return nil -} - type FileChunkManifest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Chunks []*FileChunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Chunks []*FileChunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` } func (x *FileChunkManifest) Reset() { *x = FileChunkManifest{} - mi := &file_filer_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileChunkManifest) String() string { @@ -787,7 +740,7 @@ func (*FileChunkManifest) ProtoMessage() {} func (x *FileChunkManifest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -810,19 +763,22 @@ func (x *FileChunkManifest) GetChunks() []*FileChunk { } type FileId struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"` } func (x *FileId) Reset() { *x = FileId{} - mi := &file_filer_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FileId) String() string { @@ -833,7 +789,7 @@ func (*FileId) ProtoMessage() {} func (x *FileId) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -870,30 +826,33 @@ func (x *FileId) GetCookie() uint32 { } type FuseAttributes struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` - Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds - FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` - Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"` - Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds - Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"` - TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` - UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs - GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs - SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"` - Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"` - Rdev uint32 `protobuf:"varint,16,opt,name=rdev,proto3" json:"rdev,omitempty"` - Inode uint64 `protobuf:"varint,17,opt,name=inode,proto3" json:"inode,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds + FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` + Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"` + Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds + Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"` + TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs + GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs + SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"` + Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"` + Rdev uint32 `protobuf:"varint,16,opt,name=rdev,proto3" json:"rdev,omitempty"` + Inode uint64 `protobuf:"varint,17,opt,name=inode,proto3" json:"inode,omitempty"` } func (x *FuseAttributes) Reset() { *x = FuseAttributes{} - mi := &file_filer_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FuseAttributes) String() string { @@ -904,7 +863,7 @@ func (*FuseAttributes) ProtoMessage() {} func (x *FuseAttributes) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1018,22 +977,25 @@ func (x *FuseAttributes) GetInode() uint64 { } type CreateEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` - OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"` - IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` - Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - SkipCheckParentDirectory bool `protobuf:"varint,6,opt,name=skip_check_parent_directory,json=skipCheckParentDirectory,proto3" json:"skip_check_parent_directory,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` + SkipCheckParentDirectory bool `protobuf:"varint,6,opt,name=skip_check_parent_directory,json=skipCheckParentDirectory,proto3" json:"skip_check_parent_directory,omitempty"` } func (x *CreateEntryRequest) Reset() { *x = CreateEntryRequest{} - mi := &file_filer_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateEntryRequest) String() string { @@ -1044,7 +1006,7 @@ func (*CreateEntryRequest) ProtoMessage() {} func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1102,17 +1064,20 @@ func (x *CreateEntryRequest) GetSkipCheckParentDirectory() bool { } type CreateEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *CreateEntryResponse) Reset() { *x = CreateEntryResponse{} - mi := &file_filer_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateEntryResponse) String() string { @@ -1123,7 +1088,7 @@ func (*CreateEntryResponse) ProtoMessage() {} func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[13] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1146,20 +1111,23 @@ func (x *CreateEntryResponse) GetError() string { } type UpdateEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` - IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` - Signatures []int32 `protobuf:"varint,4,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,4,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } func (x *UpdateEntryRequest) Reset() { *x = UpdateEntryRequest{} - mi := &file_filer_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *UpdateEntryRequest) String() string { @@ -1170,7 +1138,7 @@ func (*UpdateEntryRequest) ProtoMessage() {} func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[14] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1214,16 +1182,18 @@ func (x *UpdateEntryRequest) GetSignatures() []int32 { } type UpdateEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *UpdateEntryResponse) Reset() { *x = UpdateEntryResponse{} - mi := &file_filer_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *UpdateEntryResponse) String() string { @@ -1234,7 +1204,7 @@ func (*UpdateEntryResponse) ProtoMessage() {} func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[15] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1250,19 +1220,22 @@ func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { } type AppendToEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"` - Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` } func (x *AppendToEntryRequest) Reset() { *x = AppendToEntryRequest{} - mi := &file_filer_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AppendToEntryRequest) String() string { @@ -1273,7 +1246,7 @@ func (*AppendToEntryRequest) ProtoMessage() {} func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[16] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1310,16 +1283,18 @@ func (x *AppendToEntryRequest) GetChunks() []*FileChunk { } type AppendToEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *AppendToEntryResponse) Reset() { *x = AppendToEntryResponse{} - mi := &file_filer_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AppendToEntryResponse) String() string { @@ -1330,7 +1305,7 @@ func (*AppendToEntryResponse) ProtoMessage() {} func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[17] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1346,25 +1321,27 @@ func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { } type DeleteEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // bool is_directory = 3; IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"` IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"` IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"` IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` Signatures []int32 `protobuf:"varint,8,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - IfNotModifiedAfter int64 `protobuf:"varint,9,opt,name=if_not_modified_after,json=ifNotModifiedAfter,proto3" json:"if_not_modified_after,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *DeleteEntryRequest) Reset() { *x = DeleteEntryRequest{} - mi := &file_filer_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteEntryRequest) String() string { @@ -1375,7 +1352,7 @@ func (*DeleteEntryRequest) ProtoMessage() {} func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[18] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1439,25 +1416,21 @@ func (x *DeleteEntryRequest) GetSignatures() []int32 { return nil } -func (x *DeleteEntryRequest) GetIfNotModifiedAfter() int64 { - if x != nil { - return x.IfNotModifiedAfter - } - return 0 -} - type DeleteEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *DeleteEntryResponse) Reset() { *x = DeleteEntryResponse{} - mi := &file_filer_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteEntryResponse) String() string { @@ -1468,7 +1441,7 @@ func (*DeleteEntryResponse) ProtoMessage() {} func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[19] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1491,21 +1464,24 @@ func (x *DeleteEntryResponse) GetError() string { } type AtomicRenameEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` - OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` - NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` - NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` - Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` + Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } func (x *AtomicRenameEntryRequest) Reset() { *x = AtomicRenameEntryRequest{} - mi := &file_filer_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AtomicRenameEntryRequest) String() string { @@ -1516,7 +1492,7 @@ func (*AtomicRenameEntryRequest) ProtoMessage() {} func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[20] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1567,16 +1543,18 @@ func (x *AtomicRenameEntryRequest) GetSignatures() []int32 { } type AtomicRenameEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *AtomicRenameEntryResponse) Reset() { *x = AtomicRenameEntryResponse{} - mi := &file_filer_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AtomicRenameEntryResponse) String() string { @@ -1587,7 +1565,7 @@ func (*AtomicRenameEntryResponse) ProtoMessage() {} func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[21] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1603,21 +1581,24 @@ func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { } type StreamRenameEntryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` - OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` - NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` - NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` - Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` + Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } func (x *StreamRenameEntryRequest) Reset() { *x = StreamRenameEntryRequest{} - mi := &file_filer_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StreamRenameEntryRequest) String() string { @@ -1628,7 +1609,7 @@ func (*StreamRenameEntryRequest) ProtoMessage() {} func (x *StreamRenameEntryRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[22] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1679,19 +1660,22 @@ func (x *StreamRenameEntryRequest) GetSignatures() []int32 { } type StreamRenameEntryResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` - TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` + TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` } func (x *StreamRenameEntryResponse) Reset() { *x = StreamRenameEntryResponse{} - mi := &file_filer_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StreamRenameEntryResponse) String() string { @@ -1702,7 +1686,7 @@ func (*StreamRenameEntryResponse) ProtoMessage() {} func (x *StreamRenameEntryResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[23] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1739,25 +1723,28 @@ func (x *StreamRenameEntryResponse) GetTsNs() int64 { } type AssignVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` - TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"` - Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,9,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` - DiskType string `protobuf:"bytes,8,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,9,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` + DiskType string `protobuf:"bytes,8,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *AssignVolumeRequest) Reset() { *x = AssignVolumeRequest{} - mi := &file_filer_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AssignVolumeRequest) String() string { @@ -1768,7 +1755,7 @@ func (*AssignVolumeRequest) ProtoMessage() {} func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[24] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1847,23 +1834,26 @@ func (x *AssignVolumeRequest) GetDiskType() string { } type AssignVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` - Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"` - Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"` - Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"` - Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"` - Location *Location `protobuf:"bytes,9,opt,name=location,proto3" json:"location,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"` + Location *Location `protobuf:"bytes,9,opt,name=location,proto3" json:"location,omitempty"` } func (x *AssignVolumeResponse) Reset() { *x = AssignVolumeResponse{} - mi := &file_filer_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AssignVolumeResponse) String() string { @@ -1874,7 +1864,7 @@ func (*AssignVolumeResponse) ProtoMessage() {} func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[25] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1939,17 +1929,20 @@ func (x *AssignVolumeResponse) GetLocation() *Location { } type LookupVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } func (x *LookupVolumeRequest) Reset() { *x = LookupVolumeRequest{} - mi := &file_filer_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupVolumeRequest) String() string { @@ -1960,7 +1953,7 @@ func (*LookupVolumeRequest) ProtoMessage() {} func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[26] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1983,17 +1976,20 @@ func (x *LookupVolumeRequest) GetVolumeIds() []string { } type Locations struct { - state protoimpl.MessageState `protogen:"open.v1"` - Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` } func (x *Locations) Reset() { *x = Locations{} - mi := &file_filer_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Locations) String() string { @@ -2004,7 +2000,7 @@ func (*Locations) ProtoMessage() {} func (x *Locations) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[27] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2027,20 +2023,22 @@ func (x *Locations) GetLocations() []*Location { } type Location struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` - GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` } func (x *Location) Reset() { *x = Location{} - mi := &file_filer_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Location) String() string { @@ -2051,7 +2049,7 @@ func (*Location) ProtoMessage() {} func (x *Location) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[28] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2087,25 +2085,21 @@ func (x *Location) GetGrpcPort() uint32 { return 0 } -func (x *Location) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - type LookupVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *LookupVolumeResponse) Reset() { *x = LookupVolumeResponse{} - mi := &file_filer_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupVolumeResponse) String() string { @@ -2116,7 +2110,7 @@ func (*LookupVolumeResponse) ProtoMessage() {} func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[29] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2139,17 +2133,20 @@ func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { } type Collection struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *Collection) Reset() { *x = Collection{} - mi := &file_filer_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Collection) String() string { @@ -2160,7 +2157,7 @@ func (*Collection) ProtoMessage() {} func (x *Collection) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[30] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2183,18 +2180,21 @@ func (x *Collection) GetName() string { } type CollectionListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` - IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` } func (x *CollectionListRequest) Reset() { *x = CollectionListRequest{} - mi := &file_filer_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionListRequest) String() string { @@ -2205,7 +2205,7 @@ func (*CollectionListRequest) ProtoMessage() {} func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[31] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2235,17 +2235,20 @@ func (x *CollectionListRequest) GetIncludeEcVolumes() bool { } type CollectionListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } func (x *CollectionListResponse) Reset() { *x = CollectionListResponse{} - mi := &file_filer_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionListResponse) String() string { @@ -2256,7 +2259,7 @@ func (*CollectionListResponse) ProtoMessage() {} func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[32] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2279,17 +2282,20 @@ func (x *CollectionListResponse) GetCollections() []*Collection { } type DeleteCollectionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *DeleteCollectionRequest) Reset() { *x = DeleteCollectionRequest{} - mi := &file_filer_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteCollectionRequest) String() string { @@ -2300,7 +2306,7 @@ func (*DeleteCollectionRequest) ProtoMessage() {} func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[33] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2323,16 +2329,18 @@ func (x *DeleteCollectionRequest) GetCollection() string { } type DeleteCollectionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *DeleteCollectionResponse) Reset() { *x = DeleteCollectionResponse{} - mi := &file_filer_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteCollectionResponse) String() string { @@ -2343,7 +2351,7 @@ func (*DeleteCollectionResponse) ProtoMessage() {} func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[34] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2359,20 +2367,23 @@ func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { } type StatisticsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` - DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *StatisticsRequest) Reset() { *x = StatisticsRequest{} - mi := &file_filer_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatisticsRequest) String() string { @@ -2383,7 +2394,7 @@ func (*StatisticsRequest) ProtoMessage() {} func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[35] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2427,19 +2438,22 @@ func (x *StatisticsRequest) GetDiskType() string { } type StatisticsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` } func (x *StatisticsResponse) Reset() { *x = StatisticsResponse{} - mi := &file_filer_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatisticsResponse) String() string { @@ -2450,7 +2464,7 @@ func (*StatisticsResponse) ProtoMessage() {} func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[36] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2487,18 +2501,21 @@ func (x *StatisticsResponse) GetFileCount() uint64 { } type PingRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself - TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself + TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` } func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_filer_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingRequest) String() string { @@ -2509,7 +2526,7 @@ func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[37] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2539,19 +2556,22 @@ func (x *PingRequest) GetTargetType() string { } type PingResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` - RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` - StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` + RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` + StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` } func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_filer_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingResponse) String() string { @@ -2562,7 +2582,7 @@ func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[38] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2599,16 +2619,18 @@ func (x *PingResponse) GetStopTimeNs() int64 { } type GetFilerConfigurationRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *GetFilerConfigurationRequest) Reset() { *x = GetFilerConfigurationRequest{} - mi := &file_filer_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetFilerConfigurationRequest) String() string { @@ -2619,7 +2641,7 @@ func (*GetFilerConfigurationRequest) ProtoMessage() {} func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[39] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2635,30 +2657,31 @@ func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { } type GetFilerConfigurationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"` - DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"` - Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"` - Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"` - MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` - MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"` - Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"` - ClusterId string `protobuf:"bytes,12,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - FilerGroup string `protobuf:"bytes,13,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` - MajorVersion int32 `protobuf:"varint,14,opt,name=major_version,json=majorVersion,proto3" json:"major_version,omitempty"` - MinorVersion int32 `protobuf:"varint,15,opt,name=minor_version,json=minorVersion,proto3" json:"minor_version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"` + Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"` + MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"` + Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"` + ClusterId string `protobuf:"bytes,12,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + FilerGroup string `protobuf:"bytes,13,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` } func (x *GetFilerConfigurationResponse) Reset() { *x = GetFilerConfigurationResponse{} - mi := &file_filer_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetFilerConfigurationResponse) String() string { @@ -2669,7 +2692,7 @@ func (*GetFilerConfigurationResponse) ProtoMessage() {} func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[40] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2768,40 +2791,27 @@ func (x *GetFilerConfigurationResponse) GetFilerGroup() string { return "" } -func (x *GetFilerConfigurationResponse) GetMajorVersion() int32 { - if x != nil { - return x.MajorVersion - } - return 0 -} - -func (x *GetFilerConfigurationResponse) GetMinorVersion() int32 { - if x != nil { - return x.MinorVersion - } - return 0 -} - type SubscribeMetadataRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` - PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` - SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` - Signature int32 `protobuf:"varint,4,opt,name=signature,proto3" json:"signature,omitempty"` - PathPrefixes []string `protobuf:"bytes,6,rep,name=path_prefixes,json=pathPrefixes,proto3" json:"path_prefixes,omitempty"` - ClientId int32 `protobuf:"varint,7,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - UntilNs int64 `protobuf:"varint,8,opt,name=until_ns,json=untilNs,proto3" json:"until_ns,omitempty"` - ClientEpoch int32 `protobuf:"varint,9,opt,name=client_epoch,json=clientEpoch,proto3" json:"client_epoch,omitempty"` - Directories []string `protobuf:"bytes,10,rep,name=directories,proto3" json:"directories,omitempty"` // exact directory to watch - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` + PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` + SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + Signature int32 `protobuf:"varint,4,opt,name=signature,proto3" json:"signature,omitempty"` + PathPrefixes []string `protobuf:"bytes,6,rep,name=path_prefixes,json=pathPrefixes,proto3" json:"path_prefixes,omitempty"` + ClientId int32 `protobuf:"varint,7,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + UntilNs int64 `protobuf:"varint,8,opt,name=until_ns,json=untilNs,proto3" json:"until_ns,omitempty"` } func (x *SubscribeMetadataRequest) Reset() { *x = SubscribeMetadataRequest{} - mi := &file_filer_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SubscribeMetadataRequest) String() string { @@ -2812,7 +2822,7 @@ func (*SubscribeMetadataRequest) ProtoMessage() {} func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[41] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2876,34 +2886,23 @@ func (x *SubscribeMetadataRequest) GetUntilNs() int64 { return 0 } -func (x *SubscribeMetadataRequest) GetClientEpoch() int32 { - if x != nil { - return x.ClientEpoch - } - return 0 -} - -func (x *SubscribeMetadataRequest) GetDirectories() []string { - if x != nil { - return x.Directories - } - return nil -} - type SubscribeMetadataResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` - TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` + TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` } func (x *SubscribeMetadataResponse) Reset() { *x = SubscribeMetadataResponse{} - mi := &file_filer_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SubscribeMetadataResponse) String() string { @@ -2914,7 +2913,7 @@ func (*SubscribeMetadataResponse) ProtoMessage() {} func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message { mi := &file_filer_proto_msgTypes[42] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2950,126 +2949,23 @@ func (x *SubscribeMetadataResponse) GetTsNs() int64 { return 0 } -type TraverseBfsMetadataRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - ExcludedPrefixes []string `protobuf:"bytes,2,rep,name=excluded_prefixes,json=excludedPrefixes,proto3" json:"excluded_prefixes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TraverseBfsMetadataRequest) Reset() { - *x = TraverseBfsMetadataRequest{} - mi := &file_filer_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TraverseBfsMetadataRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraverseBfsMetadataRequest) ProtoMessage() {} - -func (x *TraverseBfsMetadataRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[43] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraverseBfsMetadataRequest.ProtoReflect.Descriptor instead. -func (*TraverseBfsMetadataRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{43} -} - -func (x *TraverseBfsMetadataRequest) GetDirectory() string { - if x != nil { - return x.Directory - } - return "" -} - -func (x *TraverseBfsMetadataRequest) GetExcludedPrefixes() []string { - if x != nil { - return x.ExcludedPrefixes - } - return nil -} - -type TraverseBfsMetadataResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TraverseBfsMetadataResponse) Reset() { - *x = TraverseBfsMetadataResponse{} - mi := &file_filer_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TraverseBfsMetadataResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraverseBfsMetadataResponse) ProtoMessage() {} - -func (x *TraverseBfsMetadataResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[44] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraverseBfsMetadataResponse.ProtoReflect.Descriptor instead. -func (*TraverseBfsMetadataResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{44} -} - -func (x *TraverseBfsMetadataResponse) GetDirectory() string { - if x != nil { - return x.Directory - } - return "" -} - -func (x *TraverseBfsMetadataResponse) GetEntry() *Entry { - if x != nil { - return x.Entry - } - return nil -} - type LogEntry struct { - state protoimpl.MessageState `protogen:"open.v1"` - TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - Key []byte `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` - Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"` // Sequential offset within partition - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` + PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } func (x *LogEntry) Reset() { *x = LogEntry{} - mi := &file_filer_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LogEntry) String() string { @@ -3079,8 +2975,8 @@ func (x *LogEntry) String() string { func (*LogEntry) ProtoMessage() {} func (x *LogEntry) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[45] - if x != nil { + mi := &file_filer_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3092,7 +2988,7 @@ func (x *LogEntry) ProtoReflect() protoreflect.Message { // Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. func (*LogEntry) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{45} + return file_filer_proto_rawDescGZIP(), []int{43} } func (x *LogEntry) GetTsNs() int64 { @@ -3116,34 +3012,23 @@ func (x *LogEntry) GetData() []byte { return nil } -func (x *LogEntry) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *LogEntry) GetOffset() int64 { - if x != nil { - return x.Offset - } - return 0 -} - type KeepConnectedRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` + Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` } func (x *KeepConnectedRequest) Reset() { *x = KeepConnectedRequest{} - mi := &file_filer_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KeepConnectedRequest) String() string { @@ -3153,8 +3038,8 @@ func (x *KeepConnectedRequest) String() string { func (*KeepConnectedRequest) ProtoMessage() {} func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[46] - if x != nil { + mi := &file_filer_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3166,7 +3051,7 @@ func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{46} + return file_filer_proto_rawDescGZIP(), []int{44} } func (x *KeepConnectedRequest) GetName() string { @@ -3191,16 +3076,18 @@ func (x *KeepConnectedRequest) GetResources() []string { } type KeepConnectedResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *KeepConnectedResponse) Reset() { *x = KeepConnectedResponse{} - mi := &file_filer_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KeepConnectedResponse) String() string { @@ -3210,8 +3097,8 @@ func (x *KeepConnectedResponse) String() string { func (*KeepConnectedResponse) ProtoMessage() {} func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[47] - if x != nil { + mi := &file_filer_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3223,21 +3110,24 @@ func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead. func (*KeepConnectedResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{47} + return file_filer_proto_rawDescGZIP(), []int{45} } type LocateBrokerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` } func (x *LocateBrokerRequest) Reset() { *x = LocateBrokerRequest{} - mi := &file_filer_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LocateBrokerRequest) String() string { @@ -3247,8 +3137,8 @@ func (x *LocateBrokerRequest) String() string { func (*LocateBrokerRequest) ProtoMessage() {} func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[48] - if x != nil { + mi := &file_filer_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3260,7 +3150,7 @@ func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead. func (*LocateBrokerRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{48} + return file_filer_proto_rawDescGZIP(), []int{46} } func (x *LocateBrokerRequest) GetResource() string { @@ -3271,18 +3161,21 @@ func (x *LocateBrokerRequest) GetResource() string { } type LocateBrokerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"` - Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"` + Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` } func (x *LocateBrokerResponse) Reset() { *x = LocateBrokerResponse{} - mi := &file_filer_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LocateBrokerResponse) String() string { @@ -3292,8 +3185,8 @@ func (x *LocateBrokerResponse) String() string { func (*LocateBrokerResponse) ProtoMessage() {} func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[49] - if x != nil { + mi := &file_filer_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3305,7 +3198,7 @@ func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead. func (*LocateBrokerResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{49} + return file_filer_proto_rawDescGZIP(), []int{47} } func (x *LocateBrokerResponse) GetFound() bool { @@ -3322,21 +3215,24 @@ func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource { return nil } -// /////////////////////// +///////////////////////// // Key-Value operations -// /////////////////////// +///////////////////////// type KvGetRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` } func (x *KvGetRequest) Reset() { *x = KvGetRequest{} - mi := &file_filer_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KvGetRequest) String() string { @@ -3346,8 +3242,8 @@ func (x *KvGetRequest) String() string { func (*KvGetRequest) ProtoMessage() {} func (x *KvGetRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[50] - if x != nil { + mi := &file_filer_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3359,7 +3255,7 @@ func (x *KvGetRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use KvGetRequest.ProtoReflect.Descriptor instead. func (*KvGetRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{50} + return file_filer_proto_rawDescGZIP(), []int{48} } func (x *KvGetRequest) GetKey() []byte { @@ -3370,18 +3266,21 @@ func (x *KvGetRequest) GetKey() []byte { } type KvGetResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } func (x *KvGetResponse) Reset() { *x = KvGetResponse{} - mi := &file_filer_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KvGetResponse) String() string { @@ -3391,8 +3290,8 @@ func (x *KvGetResponse) String() string { func (*KvGetResponse) ProtoMessage() {} func (x *KvGetResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[51] - if x != nil { + mi := &file_filer_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3404,7 +3303,7 @@ func (x *KvGetResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use KvGetResponse.ProtoReflect.Descriptor instead. func (*KvGetResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{51} + return file_filer_proto_rawDescGZIP(), []int{49} } func (x *KvGetResponse) GetValue() []byte { @@ -3422,18 +3321,21 @@ func (x *KvGetResponse) GetError() string { } type KvPutRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *KvPutRequest) Reset() { *x = KvPutRequest{} - mi := &file_filer_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KvPutRequest) String() string { @@ -3443,8 +3345,8 @@ func (x *KvPutRequest) String() string { func (*KvPutRequest) ProtoMessage() {} func (x *KvPutRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[52] - if x != nil { + mi := &file_filer_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3456,7 +3358,7 @@ func (x *KvPutRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use KvPutRequest.ProtoReflect.Descriptor instead. func (*KvPutRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{52} + return file_filer_proto_rawDescGZIP(), []int{50} } func (x *KvPutRequest) GetKey() []byte { @@ -3474,17 +3376,20 @@ func (x *KvPutRequest) GetValue() []byte { } type KvPutResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *KvPutResponse) Reset() { *x = KvPutResponse{} - mi := &file_filer_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KvPutResponse) String() string { @@ -3494,8 +3399,8 @@ func (x *KvPutResponse) String() string { func (*KvPutResponse) ProtoMessage() {} func (x *KvPutResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[53] - if x != nil { + mi := &file_filer_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3507,7 +3412,7 @@ func (x *KvPutResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use KvPutResponse.ProtoReflect.Descriptor instead. func (*KvPutResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{53} + return file_filer_proto_rawDescGZIP(), []int{51} } func (x *KvPutResponse) GetError() string { @@ -3517,22 +3422,25 @@ func (x *KvPutResponse) GetError() string { return "" } -// /////////////////////// +///////////////////////// // path-based configurations -// /////////////////////// +///////////////////////// type FilerConf struct { - state protoimpl.MessageState `protogen:"open.v1"` - Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` } func (x *FilerConf) Reset() { *x = FilerConf{} - mi := &file_filer_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FilerConf) String() string { @@ -3542,8 +3450,8 @@ func (x *FilerConf) String() string { func (*FilerConf) ProtoMessage() {} func (x *FilerConf) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[54] - if x != nil { + mi := &file_filer_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3555,7 +3463,7 @@ func (x *FilerConf) ProtoReflect() protoreflect.Message { // Deprecated: Use FilerConf.ProtoReflect.Descriptor instead. func (*FilerConf) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{54} + return file_filer_proto_rawDescGZIP(), []int{52} } func (x *FilerConf) GetVersion() int32 { @@ -3572,22 +3480,25 @@ func (x *FilerConf) GetLocations() []*FilerConf_PathConf { return nil } -// /////////////////////// +///////////////////////// // Remote Storage related -// /////////////////////// +///////////////////////// type CacheRemoteObjectToLocalClusterRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (x *CacheRemoteObjectToLocalClusterRequest) Reset() { *x = CacheRemoteObjectToLocalClusterRequest{} - mi := &file_filer_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CacheRemoteObjectToLocalClusterRequest) String() string { @@ -3597,8 +3508,8 @@ func (x *CacheRemoteObjectToLocalClusterRequest) String() string { func (*CacheRemoteObjectToLocalClusterRequest) ProtoMessage() {} func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[55] - if x != nil { + mi := &file_filer_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3610,7 +3521,7 @@ func (x *CacheRemoteObjectToLocalClusterRequest) ProtoReflect() protoreflect.Mes // Deprecated: Use CacheRemoteObjectToLocalClusterRequest.ProtoReflect.Descriptor instead. func (*CacheRemoteObjectToLocalClusterRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{55} + return file_filer_proto_rawDescGZIP(), []int{53} } func (x *CacheRemoteObjectToLocalClusterRequest) GetDirectory() string { @@ -3628,17 +3539,20 @@ func (x *CacheRemoteObjectToLocalClusterRequest) GetName() string { } type CacheRemoteObjectToLocalClusterResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` } func (x *CacheRemoteObjectToLocalClusterResponse) Reset() { *x = CacheRemoteObjectToLocalClusterResponse{} - mi := &file_filer_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CacheRemoteObjectToLocalClusterResponse) String() string { @@ -3648,8 +3562,8 @@ func (x *CacheRemoteObjectToLocalClusterResponse) String() string { func (*CacheRemoteObjectToLocalClusterResponse) ProtoMessage() {} func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[56] - if x != nil { + mi := &file_filer_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3661,7 +3575,7 @@ func (x *CacheRemoteObjectToLocalClusterResponse) ProtoReflect() protoreflect.Me // Deprecated: Use CacheRemoteObjectToLocalClusterResponse.ProtoReflect.Descriptor instead. func (*CacheRemoteObjectToLocalClusterResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{56} + return file_filer_proto_rawDescGZIP(), []int{54} } func (x *CacheRemoteObjectToLocalClusterResponse) GetEntry() *Entry { @@ -3671,524 +3585,24 @@ func (x *CacheRemoteObjectToLocalClusterResponse) GetEntry() *Entry { return nil } -// /////////////////////// -// distributed lock management -// /////////////////////// -type LockRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - SecondsToLock int64 `protobuf:"varint,2,opt,name=seconds_to_lock,json=secondsToLock,proto3" json:"seconds_to_lock,omitempty"` - RenewToken string `protobuf:"bytes,3,opt,name=renew_token,json=renewToken,proto3" json:"renew_token,omitempty"` - IsMoved bool `protobuf:"varint,4,opt,name=is_moved,json=isMoved,proto3" json:"is_moved,omitempty"` - Owner string `protobuf:"bytes,5,opt,name=owner,proto3" json:"owner,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *LockRequest) Reset() { - *x = LockRequest{} - mi := &file_filer_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *LockRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LockRequest) ProtoMessage() {} - -func (x *LockRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[57] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LockRequest.ProtoReflect.Descriptor instead. -func (*LockRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{57} -} - -func (x *LockRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *LockRequest) GetSecondsToLock() int64 { - if x != nil { - return x.SecondsToLock - } - return 0 -} - -func (x *LockRequest) GetRenewToken() string { - if x != nil { - return x.RenewToken - } - return "" -} - -func (x *LockRequest) GetIsMoved() bool { - if x != nil { - return x.IsMoved - } - return false -} - -func (x *LockRequest) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -type LockResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - RenewToken string `protobuf:"bytes,1,opt,name=renew_token,json=renewToken,proto3" json:"renew_token,omitempty"` - LockOwner string `protobuf:"bytes,2,opt,name=lock_owner,json=lockOwner,proto3" json:"lock_owner,omitempty"` - LockHostMovedTo string `protobuf:"bytes,3,opt,name=lock_host_moved_to,json=lockHostMovedTo,proto3" json:"lock_host_moved_to,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *LockResponse) Reset() { - *x = LockResponse{} - mi := &file_filer_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *LockResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LockResponse) ProtoMessage() {} - -func (x *LockResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[58] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LockResponse.ProtoReflect.Descriptor instead. -func (*LockResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{58} -} - -func (x *LockResponse) GetRenewToken() string { - if x != nil { - return x.RenewToken - } - return "" -} - -func (x *LockResponse) GetLockOwner() string { - if x != nil { - return x.LockOwner - } - return "" -} - -func (x *LockResponse) GetLockHostMovedTo() string { - if x != nil { - return x.LockHostMovedTo - } - return "" -} - -func (x *LockResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -type UnlockRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RenewToken string `protobuf:"bytes,2,opt,name=renew_token,json=renewToken,proto3" json:"renew_token,omitempty"` - IsMoved bool `protobuf:"varint,3,opt,name=is_moved,json=isMoved,proto3" json:"is_moved,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UnlockRequest) Reset() { - *x = UnlockRequest{} - mi := &file_filer_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UnlockRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UnlockRequest) ProtoMessage() {} - -func (x *UnlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[59] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UnlockRequest.ProtoReflect.Descriptor instead. -func (*UnlockRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{59} -} - -func (x *UnlockRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UnlockRequest) GetRenewToken() string { - if x != nil { - return x.RenewToken - } - return "" -} - -func (x *UnlockRequest) GetIsMoved() bool { - if x != nil { - return x.IsMoved - } - return false -} - -type UnlockResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - MovedTo string `protobuf:"bytes,2,opt,name=moved_to,json=movedTo,proto3" json:"moved_to,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UnlockResponse) Reset() { - *x = UnlockResponse{} - mi := &file_filer_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UnlockResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UnlockResponse) ProtoMessage() {} - -func (x *UnlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[60] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UnlockResponse.ProtoReflect.Descriptor instead. -func (*UnlockResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{60} -} - -func (x *UnlockResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *UnlockResponse) GetMovedTo() string { - if x != nil { - return x.MovedTo - } - return "" -} - -type FindLockOwnerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - IsMoved bool `protobuf:"varint,2,opt,name=is_moved,json=isMoved,proto3" json:"is_moved,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FindLockOwnerRequest) Reset() { - *x = FindLockOwnerRequest{} - mi := &file_filer_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FindLockOwnerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FindLockOwnerRequest) ProtoMessage() {} - -func (x *FindLockOwnerRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[61] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FindLockOwnerRequest.ProtoReflect.Descriptor instead. -func (*FindLockOwnerRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{61} -} - -func (x *FindLockOwnerRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FindLockOwnerRequest) GetIsMoved() bool { - if x != nil { - return x.IsMoved - } - return false -} - -type FindLockOwnerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FindLockOwnerResponse) Reset() { - *x = FindLockOwnerResponse{} - mi := &file_filer_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FindLockOwnerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FindLockOwnerResponse) ProtoMessage() {} - -func (x *FindLockOwnerResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[62] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FindLockOwnerResponse.ProtoReflect.Descriptor instead. -func (*FindLockOwnerResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{62} -} - -func (x *FindLockOwnerResponse) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -type Lock struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RenewToken string `protobuf:"bytes,2,opt,name=renew_token,json=renewToken,proto3" json:"renew_token,omitempty"` - ExpiredAtNs int64 `protobuf:"varint,3,opt,name=expired_at_ns,json=expiredAtNs,proto3" json:"expired_at_ns,omitempty"` - Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Lock) Reset() { - *x = Lock{} - mi := &file_filer_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Lock) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Lock) ProtoMessage() {} - -func (x *Lock) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[63] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Lock.ProtoReflect.Descriptor instead. -func (*Lock) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{63} -} - -func (x *Lock) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Lock) GetRenewToken() string { - if x != nil { - return x.RenewToken - } - return "" -} - -func (x *Lock) GetExpiredAtNs() int64 { - if x != nil { - return x.ExpiredAtNs - } - return 0 -} - -func (x *Lock) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -type TransferLocksRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Locks []*Lock `protobuf:"bytes,1,rep,name=locks,proto3" json:"locks,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TransferLocksRequest) Reset() { - *x = TransferLocksRequest{} - mi := &file_filer_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TransferLocksRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransferLocksRequest) ProtoMessage() {} - -func (x *TransferLocksRequest) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[64] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransferLocksRequest.ProtoReflect.Descriptor instead. -func (*TransferLocksRequest) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{64} -} - -func (x *TransferLocksRequest) GetLocks() []*Lock { - if x != nil { - return x.Locks - } - return nil -} - -type TransferLocksResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TransferLocksResponse) Reset() { - *x = TransferLocksResponse{} - mi := &file_filer_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TransferLocksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransferLocksResponse) ProtoMessage() {} - -func (x *TransferLocksResponse) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[65] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransferLocksResponse.ProtoReflect.Descriptor instead. -func (*TransferLocksResponse) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{65} -} - // if found, send the exact address // if not found, send the full list of existing brokers type LocateBrokerResponse_Resource struct { - state protoimpl.MessageState `protogen:"open.v1"` - GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` - ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` + ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"` } func (x *LocateBrokerResponse_Resource) Reset() { *x = LocateBrokerResponse_Resource{} - mi := &file_filer_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LocateBrokerResponse_Resource) String() string { @@ -4198,8 +3612,8 @@ func (x *LocateBrokerResponse_Resource) String() string { func (*LocateBrokerResponse_Resource) ProtoMessage() {} func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[68] - if x != nil { + mi := &file_filer_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4211,7 +3625,7 @@ func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { // Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead. func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{49, 0} + return file_filer_proto_rawDescGZIP(), []int{47, 0} } func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string { @@ -4229,32 +3643,30 @@ func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 { } type FilerConf_PathConf struct { - state protoimpl.MessageState `protogen:"open.v1"` - LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` - DiskType string `protobuf:"bytes,5,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"` - VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"` - ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - DataCenter string `protobuf:"bytes,9,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,10,opt,name=rack,proto3" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,11,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` - MaxFileNameLength uint32 `protobuf:"varint,12,opt,name=max_file_name_length,json=maxFileNameLength,proto3" json:"max_file_name_length,omitempty"` - DisableChunkDeletion bool `protobuf:"varint,13,opt,name=disable_chunk_deletion,json=disableChunkDeletion,proto3" json:"disable_chunk_deletion,omitempty"` - Worm bool `protobuf:"varint,14,opt,name=worm,proto3" json:"worm,omitempty"` - WormGracePeriodSeconds uint64 `protobuf:"varint,15,opt,name=worm_grace_period_seconds,json=wormGracePeriodSeconds,proto3" json:"worm_grace_period_seconds,omitempty"` - WormRetentionTimeSeconds uint64 `protobuf:"varint,16,opt,name=worm_retention_time_seconds,json=wormRetentionTimeSeconds,proto3" json:"worm_retention_time_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,5,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` + Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"` + VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"` + ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + DataCenter string `protobuf:"bytes,9,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,10,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,11,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` } func (x *FilerConf_PathConf) Reset() { *x = FilerConf_PathConf{} - mi := &file_filer_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FilerConf_PathConf) String() string { @@ -4264,8 +3676,8 @@ func (x *FilerConf_PathConf) String() string { func (*FilerConf_PathConf) ProtoMessage() {} func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message { - mi := &file_filer_proto_msgTypes[69] - if x != nil { + mi := &file_filer_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4277,7 +3689,7 @@ func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message { // Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead. func (*FilerConf_PathConf) Descriptor() ([]byte, []int) { - return file_filer_proto_rawDescGZIP(), []int{54, 0} + return file_filer_proto_rawDescGZIP(), []int{52, 0} } func (x *FilerConf_PathConf) GetLocationPrefix() string { @@ -4357,618 +3769,768 @@ func (x *FilerConf_PathConf) GetDataNode() string { return "" } -func (x *FilerConf_PathConf) GetMaxFileNameLength() uint32 { - if x != nil { - return x.MaxFileNameLength - } - return 0 -} - -func (x *FilerConf_PathConf) GetDisableChunkDeletion() bool { - if x != nil { - return x.DisableChunkDeletion - } - return false -} - -func (x *FilerConf_PathConf) GetWorm() bool { - if x != nil { - return x.Worm - } - return false -} - -func (x *FilerConf_PathConf) GetWormGracePeriodSeconds() uint64 { - if x != nil { - return x.WormGracePeriodSeconds - } - return 0 -} - -func (x *FilerConf_PathConf) GetWormRetentionTimeSeconds() uint64 { - if x != nil { - return x.WormRetentionTimeSeconds - } - return 0 -} - var File_filer_proto protoreflect.FileDescriptor -const file_filer_proto_rawDesc = "" + - "\n" + - "\vfiler.proto\x12\bfiler_pb\"O\n" + - "\x1bLookupDirectoryEntryRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\"E\n" + - "\x1cLookupDirectoryEntryResponse\x12%\n" + - "\x05entry\x18\x01 \x01(\v2\x0f.filer_pb.EntryR\x05entry\"\xbe\x01\n" + - "\x12ListEntriesRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12\x16\n" + - "\x06prefix\x18\x02 \x01(\tR\x06prefix\x12,\n" + - "\x11startFromFileName\x18\x03 \x01(\tR\x11startFromFileName\x12.\n" + - "\x12inclusiveStartFrom\x18\x04 \x01(\bR\x12inclusiveStartFrom\x12\x14\n" + - "\x05limit\x18\x05 \x01(\rR\x05limit\"<\n" + - "\x13ListEntriesResponse\x12%\n" + - "\x05entry\x18\x01 \x01(\v2\x0f.filer_pb.EntryR\x05entry\"\xc8\x01\n" + - "\vRemoteEntry\x12!\n" + - "\fstorage_name\x18\x01 \x01(\tR\vstorageName\x120\n" + - "\x15last_local_sync_ts_ns\x18\x02 \x01(\x03R\x11lastLocalSyncTsNs\x12 \n" + - "\fremote_e_tag\x18\x03 \x01(\tR\n" + - "remoteETag\x12!\n" + - "\fremote_mtime\x18\x04 \x01(\x03R\vremoteMtime\x12\x1f\n" + - "\vremote_size\x18\x05 \x01(\x03R\n" + - "remoteSize\"\x89\x04\n" + - "\x05Entry\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + - "\fis_directory\x18\x02 \x01(\bR\visDirectory\x12+\n" + - "\x06chunks\x18\x03 \x03(\v2\x13.filer_pb.FileChunkR\x06chunks\x128\n" + - "\n" + - "attributes\x18\x04 \x01(\v2\x18.filer_pb.FuseAttributesR\n" + - "attributes\x129\n" + - "\bextended\x18\x05 \x03(\v2\x1d.filer_pb.Entry.ExtendedEntryR\bextended\x12 \n" + - "\fhard_link_id\x18\a \x01(\fR\n" + - "hardLinkId\x12*\n" + - "\x11hard_link_counter\x18\b \x01(\x05R\x0fhardLinkCounter\x12\x18\n" + - "\acontent\x18\t \x01(\fR\acontent\x128\n" + - "\fremote_entry\x18\n" + - " \x01(\v2\x15.filer_pb.RemoteEntryR\vremoteEntry\x12\x14\n" + - "\x05quota\x18\v \x01(\x03R\x05quota\x122\n" + - "\x16worm_enforced_at_ts_ns\x18\f \x01(\x03R\x12wormEnforcedAtTsNs\x1a;\n" + - "\rExtendedEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01\"D\n" + - "\tFullEntry\x12\x10\n" + - "\x03dir\x18\x01 \x01(\tR\x03dir\x12%\n" + - "\x05entry\x18\x02 \x01(\v2\x0f.filer_pb.EntryR\x05entry\"\x8f\x02\n" + - "\x11EventNotification\x12,\n" + - "\told_entry\x18\x01 \x01(\v2\x0f.filer_pb.EntryR\boldEntry\x12,\n" + - "\tnew_entry\x18\x02 \x01(\v2\x0f.filer_pb.EntryR\bnewEntry\x12#\n" + - "\rdelete_chunks\x18\x03 \x01(\bR\fdeleteChunks\x12&\n" + - "\x0fnew_parent_path\x18\x04 \x01(\tR\rnewParentPath\x121\n" + - "\x15is_from_other_cluster\x18\x05 \x01(\bR\x12isFromOtherCluster\x12\x1e\n" + - "\n" + - "signatures\x18\x06 \x03(\x05R\n" + - "signatures\"\xc7\x03\n" + - "\tFileChunk\x12\x17\n" + - "\afile_id\x18\x01 \x01(\tR\x06fileId\x12\x16\n" + - "\x06offset\x18\x02 \x01(\x03R\x06offset\x12\x12\n" + - "\x04size\x18\x03 \x01(\x04R\x04size\x12$\n" + - "\x0emodified_ts_ns\x18\x04 \x01(\x03R\fmodifiedTsNs\x12\x13\n" + - "\x05e_tag\x18\x05 \x01(\tR\x04eTag\x12$\n" + - "\x0esource_file_id\x18\x06 \x01(\tR\fsourceFileId\x12\"\n" + - "\x03fid\x18\a \x01(\v2\x10.filer_pb.FileIdR\x03fid\x12/\n" + - "\n" + - "source_fid\x18\b \x01(\v2\x10.filer_pb.FileIdR\tsourceFid\x12\x1d\n" + - "\n" + - "cipher_key\x18\t \x01(\fR\tcipherKey\x12#\n" + - "\ris_compressed\x18\n" + - " \x01(\bR\fisCompressed\x12*\n" + - "\x11is_chunk_manifest\x18\v \x01(\bR\x0fisChunkManifest\x12,\n" + - "\bsse_type\x18\f \x01(\x0e2\x11.filer_pb.SSETypeR\asseType\x12!\n" + - "\fsse_metadata\x18\r \x01(\fR\vsseMetadata\"@\n" + - "\x11FileChunkManifest\x12+\n" + - "\x06chunks\x18\x01 \x03(\v2\x13.filer_pb.FileChunkR\x06chunks\"X\n" + - "\x06FileId\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x19\n" + - "\bfile_key\x18\x02 \x01(\x04R\afileKey\x12\x16\n" + - "\x06cookie\x18\x03 \x01(\aR\x06cookie\"\xe8\x02\n" + - "\x0eFuseAttributes\x12\x1b\n" + - "\tfile_size\x18\x01 \x01(\x04R\bfileSize\x12\x14\n" + - "\x05mtime\x18\x02 \x01(\x03R\x05mtime\x12\x1b\n" + - "\tfile_mode\x18\x03 \x01(\rR\bfileMode\x12\x10\n" + - "\x03uid\x18\x04 \x01(\rR\x03uid\x12\x10\n" + - "\x03gid\x18\x05 \x01(\rR\x03gid\x12\x16\n" + - "\x06crtime\x18\x06 \x01(\x03R\x06crtime\x12\x12\n" + - "\x04mime\x18\a \x01(\tR\x04mime\x12\x17\n" + - "\attl_sec\x18\n" + - " \x01(\x05R\x06ttlSec\x12\x1b\n" + - "\tuser_name\x18\v \x01(\tR\buserName\x12\x1d\n" + - "\n" + - "group_name\x18\f \x03(\tR\tgroupName\x12%\n" + - "\x0esymlink_target\x18\r \x01(\tR\rsymlinkTarget\x12\x10\n" + - "\x03md5\x18\x0e \x01(\fR\x03md5\x12\x12\n" + - "\x04rdev\x18\x10 \x01(\rR\x04rdev\x12\x14\n" + - "\x05inode\x18\x11 \x01(\x04R\x05inode\"\x82\x02\n" + - "\x12CreateEntryRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12%\n" + - "\x05entry\x18\x02 \x01(\v2\x0f.filer_pb.EntryR\x05entry\x12\x15\n" + - "\x06o_excl\x18\x03 \x01(\bR\x05oExcl\x121\n" + - "\x15is_from_other_cluster\x18\x04 \x01(\bR\x12isFromOtherCluster\x12\x1e\n" + - "\n" + - "signatures\x18\x05 \x03(\x05R\n" + - "signatures\x12=\n" + - "\x1bskip_check_parent_directory\x18\x06 \x01(\bR\x18skipCheckParentDirectory\"+\n" + - "\x13CreateEntryResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"\xac\x01\n" + - "\x12UpdateEntryRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12%\n" + - "\x05entry\x18\x02 \x01(\v2\x0f.filer_pb.EntryR\x05entry\x121\n" + - "\x15is_from_other_cluster\x18\x03 \x01(\bR\x12isFromOtherCluster\x12\x1e\n" + - "\n" + - "signatures\x18\x04 \x03(\x05R\n" + - "signatures\"\x15\n" + - "\x13UpdateEntryResponse\"\x80\x01\n" + - "\x14AppendToEntryRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12\x1d\n" + - "\n" + - "entry_name\x18\x02 \x01(\tR\tentryName\x12+\n" + - "\x06chunks\x18\x03 \x03(\v2\x13.filer_pb.FileChunkR\x06chunks\"\x17\n" + - "\x15AppendToEntryResponse\"\xcb\x02\n" + - "\x12DeleteEntryRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12$\n" + - "\x0eis_delete_data\x18\x04 \x01(\bR\fisDeleteData\x12!\n" + - "\fis_recursive\x18\x05 \x01(\bR\visRecursive\x124\n" + - "\x16ignore_recursive_error\x18\x06 \x01(\bR\x14ignoreRecursiveError\x121\n" + - "\x15is_from_other_cluster\x18\a \x01(\bR\x12isFromOtherCluster\x12\x1e\n" + - "\n" + - "signatures\x18\b \x03(\x05R\n" + - "signatures\x121\n" + - "\x15if_not_modified_after\x18\t \x01(\x03R\x12ifNotModifiedAfter\"+\n" + - "\x13DeleteEntryResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"\xba\x01\n" + - "\x18AtomicRenameEntryRequest\x12#\n" + - "\rold_directory\x18\x01 \x01(\tR\foldDirectory\x12\x19\n" + - "\bold_name\x18\x02 \x01(\tR\aoldName\x12#\n" + - "\rnew_directory\x18\x03 \x01(\tR\fnewDirectory\x12\x19\n" + - "\bnew_name\x18\x04 \x01(\tR\anewName\x12\x1e\n" + - "\n" + - "signatures\x18\x05 \x03(\x05R\n" + - "signatures\"\x1b\n" + - "\x19AtomicRenameEntryResponse\"\xba\x01\n" + - "\x18StreamRenameEntryRequest\x12#\n" + - "\rold_directory\x18\x01 \x01(\tR\foldDirectory\x12\x19\n" + - "\bold_name\x18\x02 \x01(\tR\aoldName\x12#\n" + - "\rnew_directory\x18\x03 \x01(\tR\fnewDirectory\x12\x19\n" + - "\bnew_name\x18\x04 \x01(\tR\anewName\x12\x1e\n" + - "\n" + - "signatures\x18\x05 \x03(\x05R\n" + - "signatures\"\x9a\x01\n" + - "\x19StreamRenameEntryResponse\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12J\n" + - "\x12event_notification\x18\x02 \x01(\v2\x1b.filer_pb.EventNotificationR\x11eventNotification\x12\x13\n" + - "\x05ts_ns\x18\x03 \x01(\x03R\x04tsNs\"\x89\x02\n" + - "\x13AssignVolumeRequest\x12\x14\n" + - "\x05count\x18\x01 \x01(\x05R\x05count\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12 \n" + - "\vreplication\x18\x03 \x01(\tR\vreplication\x12\x17\n" + - "\attl_sec\x18\x04 \x01(\x05R\x06ttlSec\x12\x1f\n" + - "\vdata_center\x18\x05 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04path\x18\x06 \x01(\tR\x04path\x12\x12\n" + - "\x04rack\x18\a \x01(\tR\x04rack\x12\x1b\n" + - "\tdata_node\x18\t \x01(\tR\bdataNode\x12\x1b\n" + - "\tdisk_type\x18\b \x01(\tR\bdiskType\"\xe1\x01\n" + - "\x14AssignVolumeResponse\x12\x17\n" + - "\afile_id\x18\x01 \x01(\tR\x06fileId\x12\x14\n" + - "\x05count\x18\x04 \x01(\x05R\x05count\x12\x12\n" + - "\x04auth\x18\x05 \x01(\tR\x04auth\x12\x1e\n" + - "\n" + - "collection\x18\x06 \x01(\tR\n" + - "collection\x12 \n" + - "\vreplication\x18\a \x01(\tR\vreplication\x12\x14\n" + - "\x05error\x18\b \x01(\tR\x05error\x12.\n" + - "\blocation\x18\t \x01(\v2\x12.filer_pb.LocationR\blocation\"4\n" + - "\x13LookupVolumeRequest\x12\x1d\n" + - "\n" + - "volume_ids\x18\x01 \x03(\tR\tvolumeIds\"=\n" + - "\tLocations\x120\n" + - "\tlocations\x18\x01 \x03(\v2\x12.filer_pb.LocationR\tlocations\"y\n" + - "\bLocation\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" + - "\n" + - "public_url\x18\x02 \x01(\tR\tpublicUrl\x12\x1b\n" + - "\tgrpc_port\x18\x03 \x01(\rR\bgrpcPort\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\"\xc3\x01\n" + - "\x14LookupVolumeResponse\x12U\n" + - "\rlocations_map\x18\x01 \x03(\v20.filer_pb.LookupVolumeResponse.LocationsMapEntryR\flocationsMap\x1aT\n" + - "\x11LocationsMapEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12)\n" + - "\x05value\x18\x02 \x01(\v2\x13.filer_pb.LocationsR\x05value:\x028\x01\" \n" + - "\n" + - "Collection\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\"{\n" + - "\x15CollectionListRequest\x124\n" + - "\x16include_normal_volumes\x18\x01 \x01(\bR\x14includeNormalVolumes\x12,\n" + - "\x12include_ec_volumes\x18\x02 \x01(\bR\x10includeEcVolumes\"P\n" + - "\x16CollectionListResponse\x126\n" + - "\vcollections\x18\x01 \x03(\v2\x14.filer_pb.CollectionR\vcollections\"9\n" + - "\x17DeleteCollectionRequest\x12\x1e\n" + - "\n" + - "collection\x18\x01 \x01(\tR\n" + - "collection\"\x1a\n" + - "\x18DeleteCollectionResponse\"\x84\x01\n" + - "\x11StatisticsRequest\x12 \n" + - "\vreplication\x18\x01 \x01(\tR\vreplication\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x10\n" + - "\x03ttl\x18\x03 \x01(\tR\x03ttl\x12\x1b\n" + - "\tdisk_type\x18\x04 \x01(\tR\bdiskType\"o\n" + - "\x12StatisticsResponse\x12\x1d\n" + - "\n" + - "total_size\x18\x04 \x01(\x04R\ttotalSize\x12\x1b\n" + - "\tused_size\x18\x05 \x01(\x04R\busedSize\x12\x1d\n" + - "\n" + - "file_count\x18\x06 \x01(\x04R\tfileCount\"F\n" + - "\vPingRequest\x12\x16\n" + - "\x06target\x18\x01 \x01(\tR\x06target\x12\x1f\n" + - "\vtarget_type\x18\x02 \x01(\tR\n" + - "targetType\"z\n" + - "\fPingResponse\x12\"\n" + - "\rstart_time_ns\x18\x01 \x01(\x03R\vstartTimeNs\x12$\n" + - "\x0eremote_time_ns\x18\x02 \x01(\x03R\fremoteTimeNs\x12 \n" + - "\fstop_time_ns\x18\x03 \x01(\x03R\n" + - "stopTimeNs\"\x1e\n" + - "\x1cGetFilerConfigurationRequest\"\xe8\x03\n" + - "\x1dGetFilerConfigurationResponse\x12\x18\n" + - "\amasters\x18\x01 \x03(\tR\amasters\x12 \n" + - "\vreplication\x18\x02 \x01(\tR\vreplication\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12\x15\n" + - "\x06max_mb\x18\x04 \x01(\rR\x05maxMb\x12\x1f\n" + - "\vdir_buckets\x18\x05 \x01(\tR\n" + - "dirBuckets\x12\x16\n" + - "\x06cipher\x18\a \x01(\bR\x06cipher\x12\x1c\n" + - "\tsignature\x18\b \x01(\x05R\tsignature\x12'\n" + - "\x0fmetrics_address\x18\t \x01(\tR\x0emetricsAddress\x120\n" + - "\x14metrics_interval_sec\x18\n" + - " \x01(\x05R\x12metricsIntervalSec\x12\x18\n" + - "\aversion\x18\v \x01(\tR\aversion\x12\x1d\n" + - "\n" + - "cluster_id\x18\f \x01(\tR\tclusterId\x12\x1f\n" + - "\vfiler_group\x18\r \x01(\tR\n" + - "filerGroup\x12#\n" + - "\rmajor_version\x18\x0e \x01(\x05R\fmajorVersion\x12#\n" + - "\rminor_version\x18\x0f \x01(\x05R\fminorVersion\"\xb7\x02\n" + - "\x18SubscribeMetadataRequest\x12\x1f\n" + - "\vclient_name\x18\x01 \x01(\tR\n" + - "clientName\x12\x1f\n" + - "\vpath_prefix\x18\x02 \x01(\tR\n" + - "pathPrefix\x12\x19\n" + - "\bsince_ns\x18\x03 \x01(\x03R\asinceNs\x12\x1c\n" + - "\tsignature\x18\x04 \x01(\x05R\tsignature\x12#\n" + - "\rpath_prefixes\x18\x06 \x03(\tR\fpathPrefixes\x12\x1b\n" + - "\tclient_id\x18\a \x01(\x05R\bclientId\x12\x19\n" + - "\buntil_ns\x18\b \x01(\x03R\auntilNs\x12!\n" + - "\fclient_epoch\x18\t \x01(\x05R\vclientEpoch\x12 \n" + - "\vdirectories\x18\n" + - " \x03(\tR\vdirectories\"\x9a\x01\n" + - "\x19SubscribeMetadataResponse\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12J\n" + - "\x12event_notification\x18\x02 \x01(\v2\x1b.filer_pb.EventNotificationR\x11eventNotification\x12\x13\n" + - "\x05ts_ns\x18\x03 \x01(\x03R\x04tsNs\"g\n" + - "\x1aTraverseBfsMetadataRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12+\n" + - "\x11excluded_prefixes\x18\x02 \x03(\tR\x10excludedPrefixes\"b\n" + - "\x1bTraverseBfsMetadataResponse\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12%\n" + - "\x05entry\x18\x02 \x01(\v2\x0f.filer_pb.EntryR\x05entry\"\x8b\x01\n" + - "\bLogEntry\x12\x13\n" + - "\x05ts_ns\x18\x01 \x01(\x03R\x04tsNs\x12,\n" + - "\x12partition_key_hash\x18\x02 \x01(\x05R\x10partitionKeyHash\x12\x12\n" + - "\x04data\x18\x03 \x01(\fR\x04data\x12\x10\n" + - "\x03key\x18\x04 \x01(\fR\x03key\x12\x16\n" + - "\x06offset\x18\x05 \x01(\x03R\x06offset\"e\n" + - "\x14KeepConnectedRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x1b\n" + - "\tgrpc_port\x18\x02 \x01(\rR\bgrpcPort\x12\x1c\n" + - "\tresources\x18\x03 \x03(\tR\tresources\"\x17\n" + - "\x15KeepConnectedResponse\"1\n" + - "\x13LocateBrokerRequest\x12\x1a\n" + - "\bresource\x18\x01 \x01(\tR\bresource\"\xcd\x01\n" + - "\x14LocateBrokerResponse\x12\x14\n" + - "\x05found\x18\x01 \x01(\bR\x05found\x12E\n" + - "\tresources\x18\x02 \x03(\v2'.filer_pb.LocateBrokerResponse.ResourceR\tresources\x1aX\n" + - "\bResource\x12%\n" + - "\x0egrpc_addresses\x18\x01 \x01(\tR\rgrpcAddresses\x12%\n" + - "\x0eresource_count\x18\x02 \x01(\x05R\rresourceCount\" \n" + - "\fKvGetRequest\x12\x10\n" + - "\x03key\x18\x01 \x01(\fR\x03key\";\n" + - "\rKvGetResponse\x12\x14\n" + - "\x05value\x18\x01 \x01(\fR\x05value\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\"6\n" + - "\fKvPutRequest\x12\x10\n" + - "\x03key\x18\x01 \x01(\fR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\fR\x05value\"%\n" + - "\rKvPutResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"\xb2\x05\n" + - "\tFilerConf\x12\x18\n" + - "\aversion\x18\x01 \x01(\x05R\aversion\x12:\n" + - "\tlocations\x18\x02 \x03(\v2\x1c.filer_pb.FilerConf.PathConfR\tlocations\x1a\xce\x04\n" + - "\bPathConf\x12'\n" + - "\x0flocation_prefix\x18\x01 \x01(\tR\x0elocationPrefix\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12 \n" + - "\vreplication\x18\x03 \x01(\tR\vreplication\x12\x10\n" + - "\x03ttl\x18\x04 \x01(\tR\x03ttl\x12\x1b\n" + - "\tdisk_type\x18\x05 \x01(\tR\bdiskType\x12\x14\n" + - "\x05fsync\x18\x06 \x01(\bR\x05fsync\x12.\n" + - "\x13volume_growth_count\x18\a \x01(\rR\x11volumeGrowthCount\x12\x1b\n" + - "\tread_only\x18\b \x01(\bR\breadOnly\x12\x1f\n" + - "\vdata_center\x18\t \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\n" + - " \x01(\tR\x04rack\x12\x1b\n" + - "\tdata_node\x18\v \x01(\tR\bdataNode\x12/\n" + - "\x14max_file_name_length\x18\f \x01(\rR\x11maxFileNameLength\x124\n" + - "\x16disable_chunk_deletion\x18\r \x01(\bR\x14disableChunkDeletion\x12\x12\n" + - "\x04worm\x18\x0e \x01(\bR\x04worm\x129\n" + - "\x19worm_grace_period_seconds\x18\x0f \x01(\x04R\x16wormGracePeriodSeconds\x12=\n" + - "\x1bworm_retention_time_seconds\x18\x10 \x01(\x04R\x18wormRetentionTimeSeconds\"Z\n" + - "&CacheRemoteObjectToLocalClusterRequest\x12\x1c\n" + - "\tdirectory\x18\x01 \x01(\tR\tdirectory\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\"P\n" + - "'CacheRemoteObjectToLocalClusterResponse\x12%\n" + - "\x05entry\x18\x01 \x01(\v2\x0f.filer_pb.EntryR\x05entry\"\x9b\x01\n" + - "\vLockRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12&\n" + - "\x0fseconds_to_lock\x18\x02 \x01(\x03R\rsecondsToLock\x12\x1f\n" + - "\vrenew_token\x18\x03 \x01(\tR\n" + - "renewToken\x12\x19\n" + - "\bis_moved\x18\x04 \x01(\bR\aisMoved\x12\x14\n" + - "\x05owner\x18\x05 \x01(\tR\x05owner\"\x91\x01\n" + - "\fLockResponse\x12\x1f\n" + - "\vrenew_token\x18\x01 \x01(\tR\n" + - "renewToken\x12\x1d\n" + - "\n" + - "lock_owner\x18\x02 \x01(\tR\tlockOwner\x12+\n" + - "\x12lock_host_moved_to\x18\x03 \x01(\tR\x0flockHostMovedTo\x12\x14\n" + - "\x05error\x18\x04 \x01(\tR\x05error\"_\n" + - "\rUnlockRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n" + - "\vrenew_token\x18\x02 \x01(\tR\n" + - "renewToken\x12\x19\n" + - "\bis_moved\x18\x03 \x01(\bR\aisMoved\"A\n" + - "\x0eUnlockResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\x12\x19\n" + - "\bmoved_to\x18\x02 \x01(\tR\amovedTo\"E\n" + - "\x14FindLockOwnerRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x19\n" + - "\bis_moved\x18\x02 \x01(\bR\aisMoved\"-\n" + - "\x15FindLockOwnerResponse\x12\x14\n" + - "\x05owner\x18\x01 \x01(\tR\x05owner\"u\n" + - "\x04Lock\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n" + - "\vrenew_token\x18\x02 \x01(\tR\n" + - "renewToken\x12\"\n" + - "\rexpired_at_ns\x18\x03 \x01(\x03R\vexpiredAtNs\x12\x14\n" + - "\x05owner\x18\x04 \x01(\tR\x05owner\"<\n" + - "\x14TransferLocksRequest\x12$\n" + - "\x05locks\x18\x01 \x03(\v2\x0e.filer_pb.LockR\x05locks\"\x17\n" + - "\x15TransferLocksResponse*7\n" + - "\aSSEType\x12\b\n" + - "\x04NONE\x10\x00\x12\t\n" + - "\x05SSE_C\x10\x01\x12\v\n" + - "\aSSE_KMS\x10\x02\x12\n" + - "\n" + - "\x06SSE_S3\x10\x032\xf7\x10\n" + - "\fSeaweedFiler\x12g\n" + - "\x14LookupDirectoryEntry\x12%.filer_pb.LookupDirectoryEntryRequest\x1a&.filer_pb.LookupDirectoryEntryResponse\"\x00\x12N\n" + - "\vListEntries\x12\x1c.filer_pb.ListEntriesRequest\x1a\x1d.filer_pb.ListEntriesResponse\"\x000\x01\x12L\n" + - "\vCreateEntry\x12\x1c.filer_pb.CreateEntryRequest\x1a\x1d.filer_pb.CreateEntryResponse\"\x00\x12L\n" + - "\vUpdateEntry\x12\x1c.filer_pb.UpdateEntryRequest\x1a\x1d.filer_pb.UpdateEntryResponse\"\x00\x12R\n" + - "\rAppendToEntry\x12\x1e.filer_pb.AppendToEntryRequest\x1a\x1f.filer_pb.AppendToEntryResponse\"\x00\x12L\n" + - "\vDeleteEntry\x12\x1c.filer_pb.DeleteEntryRequest\x1a\x1d.filer_pb.DeleteEntryResponse\"\x00\x12^\n" + - "\x11AtomicRenameEntry\x12\".filer_pb.AtomicRenameEntryRequest\x1a#.filer_pb.AtomicRenameEntryResponse\"\x00\x12`\n" + - "\x11StreamRenameEntry\x12\".filer_pb.StreamRenameEntryRequest\x1a#.filer_pb.StreamRenameEntryResponse\"\x000\x01\x12O\n" + - "\fAssignVolume\x12\x1d.filer_pb.AssignVolumeRequest\x1a\x1e.filer_pb.AssignVolumeResponse\"\x00\x12O\n" + - "\fLookupVolume\x12\x1d.filer_pb.LookupVolumeRequest\x1a\x1e.filer_pb.LookupVolumeResponse\"\x00\x12U\n" + - "\x0eCollectionList\x12\x1f.filer_pb.CollectionListRequest\x1a .filer_pb.CollectionListResponse\"\x00\x12[\n" + - "\x10DeleteCollection\x12!.filer_pb.DeleteCollectionRequest\x1a\".filer_pb.DeleteCollectionResponse\"\x00\x12I\n" + - "\n" + - "Statistics\x12\x1b.filer_pb.StatisticsRequest\x1a\x1c.filer_pb.StatisticsResponse\"\x00\x127\n" + - "\x04Ping\x12\x15.filer_pb.PingRequest\x1a\x16.filer_pb.PingResponse\"\x00\x12j\n" + - "\x15GetFilerConfiguration\x12&.filer_pb.GetFilerConfigurationRequest\x1a'.filer_pb.GetFilerConfigurationResponse\"\x00\x12f\n" + - "\x13TraverseBfsMetadata\x12$.filer_pb.TraverseBfsMetadataRequest\x1a%.filer_pb.TraverseBfsMetadataResponse\"\x000\x01\x12`\n" + - "\x11SubscribeMetadata\x12\".filer_pb.SubscribeMetadataRequest\x1a#.filer_pb.SubscribeMetadataResponse\"\x000\x01\x12e\n" + - "\x16SubscribeLocalMetadata\x12\".filer_pb.SubscribeMetadataRequest\x1a#.filer_pb.SubscribeMetadataResponse\"\x000\x01\x12:\n" + - "\x05KvGet\x12\x16.filer_pb.KvGetRequest\x1a\x17.filer_pb.KvGetResponse\"\x00\x12:\n" + - "\x05KvPut\x12\x16.filer_pb.KvPutRequest\x1a\x17.filer_pb.KvPutResponse\"\x00\x12\x88\x01\n" + - "\x1fCacheRemoteObjectToLocalCluster\x120.filer_pb.CacheRemoteObjectToLocalClusterRequest\x1a1.filer_pb.CacheRemoteObjectToLocalClusterResponse\"\x00\x12B\n" + - "\x0fDistributedLock\x12\x15.filer_pb.LockRequest\x1a\x16.filer_pb.LockResponse\"\x00\x12H\n" + - "\x11DistributedUnlock\x12\x17.filer_pb.UnlockRequest\x1a\x18.filer_pb.UnlockResponse\"\x00\x12R\n" + - "\rFindLockOwner\x12\x1e.filer_pb.FindLockOwnerRequest\x1a\x1f.filer_pb.FindLockOwnerResponse\"\x00\x12R\n" + - "\rTransferLocks\x12\x1e.filer_pb.TransferLocksRequest\x1a\x1f.filer_pb.TransferLocksResponse\"\x00BO\n" + - "\x10seaweedfs.clientB\n" + - "FilerProtoZ/github.com/seaweedfs/seaweedfs/weed/pb/filer_pbb\x06proto3" +var file_filer_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xc8, + 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x30, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x54, + 0x73, 0x4e, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x5f, + 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x45, 0x54, 0x61, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, + 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x4d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xd5, 0x03, 0x0a, 0x05, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, + 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0c, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0a, 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x2a, + 0x0a, 0x11, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x4c, + 0x69, 0x6e, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x44, 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, + 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x8f, 0x02, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, + 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x09, 0x6e, + 0x65, 0x77, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6e, 0x65, 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, + 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x09, 0x46, 0x69, + 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, + 0x03, 0x66, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x03, 0x66, 0x69, + 0x64, 0x12, 0x2f, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x64, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x43, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x73, 0x5f, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x69, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, + 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, + 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x22, 0xe8, + 0x02, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x69, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, + 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, + 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35, 0x12, + 0x12, 0x0a, 0x04, 0x72, 0x64, 0x65, 0x76, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, + 0x64, 0x65, 0x76, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x82, 0x02, 0x0a, 0x12, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, + 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6f, 0x45, 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, + 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, + 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, + 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x3d, 0x0a, 0x1b, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0x2b, + 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xac, 0x01, 0x0a, 0x12, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, + 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, + 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x80, 0x01, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02, + 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, + 0x34, 0x0a, 0x16, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, + 0x69, 0x76, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, + 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xba, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xba, 0x01, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, + 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, + 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x9a, 0x01, 0x0a, + 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x13, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, + 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, + 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, + 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, + 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, + 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x09, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x58, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6d, + 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x20, + 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x50, 0x0a, + 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, + 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x46, + 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, + 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, + 0x4e, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x9e, 0x03, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x20, + 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, 0x72, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, + 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x22, 0xf2, 0x01, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x4e, 0x73, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, + 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, + 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, + 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, + 0x0d, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, + 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xbd, 0x03, 0x0a, 0x09, 0x46, 0x69, + 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, + 0x6e, 0x66, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd9, 0x02, + 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, + 0x72, 0x6f, 0x77, 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, + 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, + 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0x5a, 0x0a, 0x26, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x27, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x32, 0x82, 0x0f, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x60, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, + 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x37, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, + 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, + 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x47, + 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x88, 0x01, 0x0a, 0x1f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, + 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, + 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, + 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_filer_proto_rawDescOnce sync.Once - file_filer_proto_rawDescData []byte + file_filer_proto_rawDescData = file_filer_proto_rawDesc ) func file_filer_proto_rawDescGZIP() []byte { file_filer_proto_rawDescOnce.Do(func() { - file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_filer_proto_rawDesc), len(file_filer_proto_rawDesc))) + file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData) }) return file_filer_proto_rawDescData } -var file_filer_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 70) -var file_filer_proto_goTypes = []any{ - (SSEType)(0), // 0: filer_pb.SSEType - (*LookupDirectoryEntryRequest)(nil), // 1: filer_pb.LookupDirectoryEntryRequest - (*LookupDirectoryEntryResponse)(nil), // 2: filer_pb.LookupDirectoryEntryResponse - (*ListEntriesRequest)(nil), // 3: filer_pb.ListEntriesRequest - (*ListEntriesResponse)(nil), // 4: filer_pb.ListEntriesResponse - (*RemoteEntry)(nil), // 5: filer_pb.RemoteEntry - (*Entry)(nil), // 6: filer_pb.Entry - (*FullEntry)(nil), // 7: filer_pb.FullEntry - (*EventNotification)(nil), // 8: filer_pb.EventNotification - (*FileChunk)(nil), // 9: filer_pb.FileChunk - (*FileChunkManifest)(nil), // 10: filer_pb.FileChunkManifest - (*FileId)(nil), // 11: filer_pb.FileId - (*FuseAttributes)(nil), // 12: filer_pb.FuseAttributes - (*CreateEntryRequest)(nil), // 13: filer_pb.CreateEntryRequest - (*CreateEntryResponse)(nil), // 14: filer_pb.CreateEntryResponse - (*UpdateEntryRequest)(nil), // 15: filer_pb.UpdateEntryRequest - (*UpdateEntryResponse)(nil), // 16: filer_pb.UpdateEntryResponse - (*AppendToEntryRequest)(nil), // 17: filer_pb.AppendToEntryRequest - (*AppendToEntryResponse)(nil), // 18: filer_pb.AppendToEntryResponse - (*DeleteEntryRequest)(nil), // 19: filer_pb.DeleteEntryRequest - (*DeleteEntryResponse)(nil), // 20: filer_pb.DeleteEntryResponse - (*AtomicRenameEntryRequest)(nil), // 21: filer_pb.AtomicRenameEntryRequest - (*AtomicRenameEntryResponse)(nil), // 22: filer_pb.AtomicRenameEntryResponse - (*StreamRenameEntryRequest)(nil), // 23: filer_pb.StreamRenameEntryRequest - (*StreamRenameEntryResponse)(nil), // 24: filer_pb.StreamRenameEntryResponse - (*AssignVolumeRequest)(nil), // 25: filer_pb.AssignVolumeRequest - (*AssignVolumeResponse)(nil), // 26: filer_pb.AssignVolumeResponse - (*LookupVolumeRequest)(nil), // 27: filer_pb.LookupVolumeRequest - (*Locations)(nil), // 28: filer_pb.Locations - (*Location)(nil), // 29: filer_pb.Location - (*LookupVolumeResponse)(nil), // 30: filer_pb.LookupVolumeResponse - (*Collection)(nil), // 31: filer_pb.Collection - (*CollectionListRequest)(nil), // 32: filer_pb.CollectionListRequest - (*CollectionListResponse)(nil), // 33: filer_pb.CollectionListResponse - (*DeleteCollectionRequest)(nil), // 34: filer_pb.DeleteCollectionRequest - (*DeleteCollectionResponse)(nil), // 35: filer_pb.DeleteCollectionResponse - (*StatisticsRequest)(nil), // 36: filer_pb.StatisticsRequest - (*StatisticsResponse)(nil), // 37: filer_pb.StatisticsResponse - (*PingRequest)(nil), // 38: filer_pb.PingRequest - (*PingResponse)(nil), // 39: filer_pb.PingResponse - (*GetFilerConfigurationRequest)(nil), // 40: filer_pb.GetFilerConfigurationRequest - (*GetFilerConfigurationResponse)(nil), // 41: filer_pb.GetFilerConfigurationResponse - (*SubscribeMetadataRequest)(nil), // 42: filer_pb.SubscribeMetadataRequest - (*SubscribeMetadataResponse)(nil), // 43: filer_pb.SubscribeMetadataResponse - (*TraverseBfsMetadataRequest)(nil), // 44: filer_pb.TraverseBfsMetadataRequest - (*TraverseBfsMetadataResponse)(nil), // 45: filer_pb.TraverseBfsMetadataResponse - (*LogEntry)(nil), // 46: filer_pb.LogEntry - (*KeepConnectedRequest)(nil), // 47: filer_pb.KeepConnectedRequest - (*KeepConnectedResponse)(nil), // 48: filer_pb.KeepConnectedResponse - (*LocateBrokerRequest)(nil), // 49: filer_pb.LocateBrokerRequest - (*LocateBrokerResponse)(nil), // 50: filer_pb.LocateBrokerResponse - (*KvGetRequest)(nil), // 51: filer_pb.KvGetRequest - (*KvGetResponse)(nil), // 52: filer_pb.KvGetResponse - (*KvPutRequest)(nil), // 53: filer_pb.KvPutRequest - (*KvPutResponse)(nil), // 54: filer_pb.KvPutResponse - (*FilerConf)(nil), // 55: filer_pb.FilerConf - (*CacheRemoteObjectToLocalClusterRequest)(nil), // 56: filer_pb.CacheRemoteObjectToLocalClusterRequest - (*CacheRemoteObjectToLocalClusterResponse)(nil), // 57: filer_pb.CacheRemoteObjectToLocalClusterResponse - (*LockRequest)(nil), // 58: filer_pb.LockRequest - (*LockResponse)(nil), // 59: filer_pb.LockResponse - (*UnlockRequest)(nil), // 60: filer_pb.UnlockRequest - (*UnlockResponse)(nil), // 61: filer_pb.UnlockResponse - (*FindLockOwnerRequest)(nil), // 62: filer_pb.FindLockOwnerRequest - (*FindLockOwnerResponse)(nil), // 63: filer_pb.FindLockOwnerResponse - (*Lock)(nil), // 64: filer_pb.Lock - (*TransferLocksRequest)(nil), // 65: filer_pb.TransferLocksRequest - (*TransferLocksResponse)(nil), // 66: filer_pb.TransferLocksResponse - nil, // 67: filer_pb.Entry.ExtendedEntry - nil, // 68: filer_pb.LookupVolumeResponse.LocationsMapEntry - (*LocateBrokerResponse_Resource)(nil), // 69: filer_pb.LocateBrokerResponse.Resource - (*FilerConf_PathConf)(nil), // 70: filer_pb.FilerConf.PathConf +var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 59) +var file_filer_proto_goTypes = []interface{}{ + (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest + (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse + (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse + (*RemoteEntry)(nil), // 4: filer_pb.RemoteEntry + (*Entry)(nil), // 5: filer_pb.Entry + (*FullEntry)(nil), // 6: filer_pb.FullEntry + (*EventNotification)(nil), // 7: filer_pb.EventNotification + (*FileChunk)(nil), // 8: filer_pb.FileChunk + (*FileChunkManifest)(nil), // 9: filer_pb.FileChunkManifest + (*FileId)(nil), // 10: filer_pb.FileId + (*FuseAttributes)(nil), // 11: filer_pb.FuseAttributes + (*CreateEntryRequest)(nil), // 12: filer_pb.CreateEntryRequest + (*CreateEntryResponse)(nil), // 13: filer_pb.CreateEntryResponse + (*UpdateEntryRequest)(nil), // 14: filer_pb.UpdateEntryRequest + (*UpdateEntryResponse)(nil), // 15: filer_pb.UpdateEntryResponse + (*AppendToEntryRequest)(nil), // 16: filer_pb.AppendToEntryRequest + (*AppendToEntryResponse)(nil), // 17: filer_pb.AppendToEntryResponse + (*DeleteEntryRequest)(nil), // 18: filer_pb.DeleteEntryRequest + (*DeleteEntryResponse)(nil), // 19: filer_pb.DeleteEntryResponse + (*AtomicRenameEntryRequest)(nil), // 20: filer_pb.AtomicRenameEntryRequest + (*AtomicRenameEntryResponse)(nil), // 21: filer_pb.AtomicRenameEntryResponse + (*StreamRenameEntryRequest)(nil), // 22: filer_pb.StreamRenameEntryRequest + (*StreamRenameEntryResponse)(nil), // 23: filer_pb.StreamRenameEntryResponse + (*AssignVolumeRequest)(nil), // 24: filer_pb.AssignVolumeRequest + (*AssignVolumeResponse)(nil), // 25: filer_pb.AssignVolumeResponse + (*LookupVolumeRequest)(nil), // 26: filer_pb.LookupVolumeRequest + (*Locations)(nil), // 27: filer_pb.Locations + (*Location)(nil), // 28: filer_pb.Location + (*LookupVolumeResponse)(nil), // 29: filer_pb.LookupVolumeResponse + (*Collection)(nil), // 30: filer_pb.Collection + (*CollectionListRequest)(nil), // 31: filer_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 32: filer_pb.CollectionListResponse + (*DeleteCollectionRequest)(nil), // 33: filer_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 34: filer_pb.DeleteCollectionResponse + (*StatisticsRequest)(nil), // 35: filer_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 36: filer_pb.StatisticsResponse + (*PingRequest)(nil), // 37: filer_pb.PingRequest + (*PingResponse)(nil), // 38: filer_pb.PingResponse + (*GetFilerConfigurationRequest)(nil), // 39: filer_pb.GetFilerConfigurationRequest + (*GetFilerConfigurationResponse)(nil), // 40: filer_pb.GetFilerConfigurationResponse + (*SubscribeMetadataRequest)(nil), // 41: filer_pb.SubscribeMetadataRequest + (*SubscribeMetadataResponse)(nil), // 42: filer_pb.SubscribeMetadataResponse + (*LogEntry)(nil), // 43: filer_pb.LogEntry + (*KeepConnectedRequest)(nil), // 44: filer_pb.KeepConnectedRequest + (*KeepConnectedResponse)(nil), // 45: filer_pb.KeepConnectedResponse + (*LocateBrokerRequest)(nil), // 46: filer_pb.LocateBrokerRequest + (*LocateBrokerResponse)(nil), // 47: filer_pb.LocateBrokerResponse + (*KvGetRequest)(nil), // 48: filer_pb.KvGetRequest + (*KvGetResponse)(nil), // 49: filer_pb.KvGetResponse + (*KvPutRequest)(nil), // 50: filer_pb.KvPutRequest + (*KvPutResponse)(nil), // 51: filer_pb.KvPutResponse + (*FilerConf)(nil), // 52: filer_pb.FilerConf + (*CacheRemoteObjectToLocalClusterRequest)(nil), // 53: filer_pb.CacheRemoteObjectToLocalClusterRequest + (*CacheRemoteObjectToLocalClusterResponse)(nil), // 54: filer_pb.CacheRemoteObjectToLocalClusterResponse + nil, // 55: filer_pb.Entry.ExtendedEntry + nil, // 56: filer_pb.LookupVolumeResponse.LocationsMapEntry + (*LocateBrokerResponse_Resource)(nil), // 57: filer_pb.LocateBrokerResponse.Resource + (*FilerConf_PathConf)(nil), // 58: filer_pb.FilerConf.PathConf } var file_filer_proto_depIdxs = []int32{ - 6, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry - 6, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry - 9, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk - 12, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes - 67, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry - 5, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry - 6, // 6: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry - 6, // 7: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry - 6, // 8: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry - 11, // 9: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId - 11, // 10: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId - 0, // 11: filer_pb.FileChunk.sse_type:type_name -> filer_pb.SSEType - 9, // 12: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk - 6, // 13: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry - 6, // 14: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry - 9, // 15: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk - 8, // 16: filer_pb.StreamRenameEntryResponse.event_notification:type_name -> filer_pb.EventNotification - 29, // 17: filer_pb.AssignVolumeResponse.location:type_name -> filer_pb.Location - 29, // 18: filer_pb.Locations.locations:type_name -> filer_pb.Location - 68, // 19: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry - 31, // 20: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection - 8, // 21: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification - 6, // 22: filer_pb.TraverseBfsMetadataResponse.entry:type_name -> filer_pb.Entry - 69, // 23: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource - 70, // 24: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf - 6, // 25: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry - 64, // 26: filer_pb.TransferLocksRequest.locks:type_name -> filer_pb.Lock - 28, // 27: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations - 1, // 28: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest - 3, // 29: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest - 13, // 30: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest - 15, // 31: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest - 17, // 32: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest - 19, // 33: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest - 21, // 34: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest - 23, // 35: filer_pb.SeaweedFiler.StreamRenameEntry:input_type -> filer_pb.StreamRenameEntryRequest - 25, // 36: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest - 27, // 37: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest - 32, // 38: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest - 34, // 39: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest - 36, // 40: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest - 38, // 41: filer_pb.SeaweedFiler.Ping:input_type -> filer_pb.PingRequest - 40, // 42: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest - 44, // 43: filer_pb.SeaweedFiler.TraverseBfsMetadata:input_type -> filer_pb.TraverseBfsMetadataRequest - 42, // 44: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest - 42, // 45: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest - 51, // 46: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest - 53, // 47: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest - 56, // 48: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest - 58, // 49: filer_pb.SeaweedFiler.DistributedLock:input_type -> filer_pb.LockRequest - 60, // 50: filer_pb.SeaweedFiler.DistributedUnlock:input_type -> filer_pb.UnlockRequest - 62, // 51: filer_pb.SeaweedFiler.FindLockOwner:input_type -> filer_pb.FindLockOwnerRequest - 65, // 52: filer_pb.SeaweedFiler.TransferLocks:input_type -> filer_pb.TransferLocksRequest - 2, // 53: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse - 4, // 54: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse - 14, // 55: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse - 16, // 56: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse - 18, // 57: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse - 20, // 58: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse - 22, // 59: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse - 24, // 60: filer_pb.SeaweedFiler.StreamRenameEntry:output_type -> filer_pb.StreamRenameEntryResponse - 26, // 61: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse - 30, // 62: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse - 33, // 63: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse - 35, // 64: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse - 37, // 65: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse - 39, // 66: filer_pb.SeaweedFiler.Ping:output_type -> filer_pb.PingResponse - 41, // 67: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse - 45, // 68: filer_pb.SeaweedFiler.TraverseBfsMetadata:output_type -> filer_pb.TraverseBfsMetadataResponse - 43, // 69: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse - 43, // 70: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse - 52, // 71: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse - 54, // 72: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse - 57, // 73: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse - 59, // 74: filer_pb.SeaweedFiler.DistributedLock:output_type -> filer_pb.LockResponse - 61, // 75: filer_pb.SeaweedFiler.DistributedUnlock:output_type -> filer_pb.UnlockResponse - 63, // 76: filer_pb.SeaweedFiler.FindLockOwner:output_type -> filer_pb.FindLockOwnerResponse - 66, // 77: filer_pb.SeaweedFiler.TransferLocks:output_type -> filer_pb.TransferLocksResponse - 53, // [53:78] is the sub-list for method output_type - 28, // [28:53] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 5, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry + 5, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry + 8, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk + 11, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes + 55, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry + 4, // 5: filer_pb.Entry.remote_entry:type_name -> filer_pb.RemoteEntry + 5, // 6: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry + 5, // 7: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry + 5, // 8: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry + 10, // 9: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId + 10, // 10: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId + 8, // 11: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk + 5, // 12: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry + 5, // 13: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry + 8, // 14: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk + 7, // 15: filer_pb.StreamRenameEntryResponse.event_notification:type_name -> filer_pb.EventNotification + 28, // 16: filer_pb.AssignVolumeResponse.location:type_name -> filer_pb.Location + 28, // 17: filer_pb.Locations.locations:type_name -> filer_pb.Location + 56, // 18: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry + 30, // 19: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection + 7, // 20: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification + 57, // 21: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource + 58, // 22: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf + 5, // 23: filer_pb.CacheRemoteObjectToLocalClusterResponse.entry:type_name -> filer_pb.Entry + 27, // 24: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations + 0, // 25: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest + 2, // 26: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest + 12, // 27: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest + 14, // 28: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest + 16, // 29: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest + 18, // 30: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest + 20, // 31: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest + 22, // 32: filer_pb.SeaweedFiler.StreamRenameEntry:input_type -> filer_pb.StreamRenameEntryRequest + 24, // 33: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest + 26, // 34: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest + 31, // 35: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest + 33, // 36: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest + 35, // 37: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest + 37, // 38: filer_pb.SeaweedFiler.Ping:input_type -> filer_pb.PingRequest + 39, // 39: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest + 41, // 40: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 41, // 41: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 44, // 42: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest + 46, // 43: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest + 48, // 44: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest + 50, // 45: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest + 53, // 46: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:input_type -> filer_pb.CacheRemoteObjectToLocalClusterRequest + 1, // 47: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse + 3, // 48: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse + 13, // 49: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse + 15, // 50: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse + 17, // 51: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse + 19, // 52: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse + 21, // 53: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse + 23, // 54: filer_pb.SeaweedFiler.StreamRenameEntry:output_type -> filer_pb.StreamRenameEntryResponse + 25, // 55: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse + 29, // 56: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse + 32, // 57: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse + 34, // 58: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse + 36, // 59: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse + 38, // 60: filer_pb.SeaweedFiler.Ping:output_type -> filer_pb.PingResponse + 40, // 61: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse + 42, // 62: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 42, // 63: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 45, // 64: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse + 47, // 65: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse + 49, // 66: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse + 51, // 67: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse + 54, // 68: filer_pb.SeaweedFiler.CacheRemoteObjectToLocalCluster:output_type -> filer_pb.CacheRemoteObjectToLocalClusterResponse + 47, // [47:69] is the sub-list for method output_type + 25, // [25:47] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_filer_proto_init() } @@ -4976,22 +4538,708 @@ func file_filer_proto_init() { if File_filer_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunkManifest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FuseAttributes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamRenameEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamRenameEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CacheRemoteObjectToLocalClusterRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CacheRemoteObjectToLocalClusterResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse_Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf_PathConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_filer_proto_rawDesc), len(file_filer_proto_rawDesc)), - NumEnums: 1, - NumMessages: 70, + RawDescriptor: file_filer_proto_rawDesc, + NumEnums: 0, + NumMessages: 59, NumExtensions: 0, NumServices: 1, }, GoTypes: file_filer_proto_goTypes, DependencyIndexes: file_filer_proto_depIdxs, - EnumInfos: file_filer_proto_enumTypes, MessageInfos: file_filer_proto_msgTypes, }.Build() File_filer_proto = out.File + file_filer_proto_rawDesc = nil file_filer_proto_goTypes = nil file_filer_proto_depIdxs = nil } diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 80adab292..266bdae51 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -10,8 +10,8 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -20,12 +20,11 @@ var ( ) type FilerClient interface { - WithFilerClient(streamingMode bool, fn func(SeaweedFilerClient) error) error // 15 implementation + WithFilerClient(streamingMode bool, fn func(SeaweedFilerClient) error) error AdjustedUrl(location *Location) string - GetDataCenter() string } -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { +func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { dir, name := fullFilePath.DirAndName() @@ -37,9 +36,9 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath util.Fu } // glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := LookupEntry(ctx, client, request) + resp, err := LookupEntry(client, request) if err != nil { - glog.V(3).InfofCtx(ctx, "read %s %v: %v", fullFilePath, resp, err) + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) return err } @@ -55,9 +54,9 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath util.Fu return } -type EachEntryFunction func(entry *Entry, isLast bool) error +type EachEntryFunciton func(entry *Entry, isLast bool) error -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunction) (err error) { +func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) { var counter uint32 var startFrom string @@ -69,13 +68,13 @@ func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath var paginationLimit uint32 = 10000 - if err = doList(ctx, filerClient, fullDirPath, prefix, counterFunc, "", false, paginationLimit); err != nil { + if err = doList(filerClient, fullDirPath, prefix, counterFunc, "", false, paginationLimit); err != nil { return err } for counter == paginationLimit { counter = 0 - if err = doList(ctx, filerClient, fullDirPath, prefix, counterFunc, startFrom, false, paginationLimit); err != nil { + if err = doList(filerClient, fullDirPath, prefix, counterFunc, startFrom, false, paginationLimit); err != nil { return err } } @@ -83,23 +82,23 @@ func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath return nil } -func List(ctx context.Context, filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunction, startFrom string, inclusive bool, limit uint32) (err error) { +func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { - return doSeaweedList(ctx, client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) }) } -func doList(ctx context.Context, filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunction, startFrom string, inclusive bool, limit uint32) (err error) { +func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { - return doSeaweedList(ctx, client, fullDirPath, prefix, fn, startFrom, inclusive, limit) + return doSeaweedList(client, fullDirPath, prefix, fn, startFrom, inclusive, limit) }) } -func SeaweedList(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath, prefix string, fn EachEntryFunction, startFrom string, inclusive bool, limit uint32) (err error) { - return doSeaweedList(ctx, client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) +func SeaweedList(client SeaweedFilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) } -func doSeaweedList(ctx context.Context, client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunction, startFrom string, inclusive bool, limit uint32) (err error) { +func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { // Redundancy limit to make it correctly judge whether it is the last file. redLimit := limit @@ -117,8 +116,8 @@ func doSeaweedList(ctx context.Context, client SeaweedFilerClient, fullDirPath u InclusiveStartFrom: inclusive, } - glog.V(4).InfofCtx(ctx, "read directory: %v", request) - ctx, cancel := context.WithCancel(ctx) + glog.V(4).Infof("read directory: %v", request) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := client.ListEntries(ctx, request) if err != nil { @@ -156,7 +155,7 @@ func doSeaweedList(ctx context.Context, client SeaweedFilerClient, fullDirPath u return nil } -func Exists(ctx context.Context, filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { err = filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { @@ -165,14 +164,14 @@ func Exists(ctx context.Context, filerClient FilerClient, parentDirectoryPath st Name: entryName, } - glog.V(4).InfofCtx(ctx, "exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := LookupEntry(ctx, client, request) + glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + resp, err := LookupEntry(client, request) if err != nil { if err == ErrNotFound { exists = false return nil } - glog.V(0).InfofCtx(ctx, "exists entry %v: %v", request, err) + glog.V(0).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -184,7 +183,7 @@ func Exists(ctx context.Context, filerClient FilerClient, parentDirectoryPath st return } -func Touch(ctx context.Context, filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) { +func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { @@ -193,9 +192,9 @@ func Touch(ctx context.Context, filerClient FilerClient, parentDirectoryPath str Entry: entry, } - glog.V(4).InfofCtx(ctx, "touch entry %v/%v: %v", parentDirectoryPath, entryName, request) - if err := UpdateEntry(ctx, client, request); err != nil { - glog.V(0).InfofCtx(ctx, "touch exists entry %v: %v", request, err) + glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) + if err := UpdateEntry(client, request); err != nil { + glog.V(0).Infof("touch exists entry %v: %v", request, err) return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -204,13 +203,13 @@ func Touch(ctx context.Context, filerClient FilerClient, parentDirectoryPath str } -func Mkdir(ctx context.Context, filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { +func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { - return DoMkdir(ctx, client, parentDirectoryPath, dirName, fn) + return DoMkdir(client, parentDirectoryPath, dirName, fn) }) } -func DoMkdir(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { +func DoMkdir(client SeaweedFilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { entry := &Entry{ Name: dirName, IsDirectory: true, @@ -232,16 +231,16 @@ func DoMkdir(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath Entry: entry, } - glog.V(1).InfofCtx(ctx, "mkdir: %v", request) - if err := CreateEntry(ctx, client, request); err != nil { - glog.V(0).InfofCtx(ctx, "mkdir %v: %v", request, err) + glog.V(1).Infof("mkdir: %v", request) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } return nil } -func MkFile(ctx context.Context, filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk, fn func(entry *Entry)) error { +func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk, fn func(entry *Entry)) error { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { entry := &Entry{ @@ -266,9 +265,9 @@ func MkFile(ctx context.Context, filerClient FilerClient, parentDirectoryPath st Entry: entry, } - glog.V(1).InfofCtx(ctx, "create file: %s/%s", parentDirectoryPath, fileName) - if err := CreateEntry(ctx, client, request); err != nil { - glog.V(0).InfofCtx(ctx, "create file %v:%v", request, err) + glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -276,13 +275,13 @@ func MkFile(ctx context.Context, filerClient FilerClient, parentDirectoryPath st }) } -func Remove(ctx context.Context, filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error { +func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error { return filerClient.WithFilerClient(false, func(client SeaweedFilerClient) error { - return DoRemove(ctx, client, parentDirectoryPath, name, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster, signatures) + return DoRemove(client, parentDirectoryPath, name, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster, signatures) }) } -func DoRemove(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath string, name string, isDeleteData bool, isRecursive bool, ignoreRecursiveErr bool, isFromOtherCluster bool, signatures []int32) error { +func DoRemove(client SeaweedFilerClient, parentDirectoryPath string, name string, isDeleteData bool, isRecursive bool, ignoreRecursiveErr bool, isFromOtherCluster bool, signatures []int32) error { deleteEntryRequest := &DeleteEntryRequest{ Directory: parentDirectoryPath, Name: name, @@ -292,7 +291,7 @@ func DoRemove(ctx context.Context, client SeaweedFilerClient, parentDirectoryPat IsFromOtherCluster: isFromOtherCluster, Signatures: signatures, } - if resp, err := client.DeleteEntry(ctx, deleteEntryRequest); err != nil { + if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil { if strings.Contains(err.Error(), ErrNotFound.Error()) { return nil } diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go index eb2e9ccee..4e5b65f12 100644 --- a/weed/pb/filer_pb/filer_client_bfs.go +++ b/weed/pb/filer_pb/filer_client_bfs.go @@ -1,58 +1,51 @@ package filer_pb import ( - "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "io" "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" ) func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + K := 5 var jobQueueWg sync.WaitGroup - queue := util.NewQueue[util.FullPath]() + queue := util.NewQueue() jobQueueWg.Add(1) queue.Enqueue(parentPath) - terminates := make([]chan bool, K) + var isTerminating bool for i := 0; i < K; i++ { - terminates[i] = make(chan bool) - go func(j int) { + go func() { for { - select { - case <-terminates[j]: - return - default: - t := queue.Dequeue() - if t == "" { - time.Sleep(329 * time.Millisecond) - continue - } - dir := t - processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn) - if processErr != nil { - err = processErr - } - jobQueueWg.Done() + if isTerminating { + break } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(util.FullPath) + processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() } - }(i) + }() } jobQueueWg.Wait() - for i := 0; i < K; i++ { - close(terminates[i]) - } + isTerminating = true return } -func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue[util.FullPath], jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) { +func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) { - return ReadDirAllEntries(context.Background(), filerClient, parentPath, "", func(entry *Entry, isLast bool) error { + return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error { fn(parentPath, entry) @@ -68,28 +61,3 @@ func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queu }) } - -func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64, fn func(parentPath util.FullPath, entry *Entry) error) (err error) { - glog.V(0).Infof("TraverseBfsMetadata %v if before %v", dir, time.Unix(0, olderThanTsNs)) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := client.TraverseBfsMetadata(ctx, &TraverseBfsMetadataRequest{ - Directory: string(dir), - }) - if err != nil { - return fmt.Errorf("traverse bfs metadata: %w", err) - } - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("traverse bfs metadata: %w", err) - } - if err := fn(util.FullPath(resp.Directory), resp.Entry); err != nil { - return err - } - } - return nil -} diff --git a/weed/pb/filer_pb/filer_grpc.pb.go b/weed/pb/filer_pb/filer_grpc.pb.go index bc892380c..4a5f47d71 100644 --- a/weed/pb/filer_pb/filer_grpc.pb.go +++ b/weed/pb/filer_pb/filer_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: filer.proto package filer_pb @@ -15,49 +11,21 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SeaweedFiler_LookupDirectoryEntry_FullMethodName = "/filer_pb.SeaweedFiler/LookupDirectoryEntry" - SeaweedFiler_ListEntries_FullMethodName = "/filer_pb.SeaweedFiler/ListEntries" - SeaweedFiler_CreateEntry_FullMethodName = "/filer_pb.SeaweedFiler/CreateEntry" - SeaweedFiler_UpdateEntry_FullMethodName = "/filer_pb.SeaweedFiler/UpdateEntry" - SeaweedFiler_AppendToEntry_FullMethodName = "/filer_pb.SeaweedFiler/AppendToEntry" - SeaweedFiler_DeleteEntry_FullMethodName = "/filer_pb.SeaweedFiler/DeleteEntry" - SeaweedFiler_AtomicRenameEntry_FullMethodName = "/filer_pb.SeaweedFiler/AtomicRenameEntry" - SeaweedFiler_StreamRenameEntry_FullMethodName = "/filer_pb.SeaweedFiler/StreamRenameEntry" - SeaweedFiler_AssignVolume_FullMethodName = "/filer_pb.SeaweedFiler/AssignVolume" - SeaweedFiler_LookupVolume_FullMethodName = "/filer_pb.SeaweedFiler/LookupVolume" - SeaweedFiler_CollectionList_FullMethodName = "/filer_pb.SeaweedFiler/CollectionList" - SeaweedFiler_DeleteCollection_FullMethodName = "/filer_pb.SeaweedFiler/DeleteCollection" - SeaweedFiler_Statistics_FullMethodName = "/filer_pb.SeaweedFiler/Statistics" - SeaweedFiler_Ping_FullMethodName = "/filer_pb.SeaweedFiler/Ping" - SeaweedFiler_GetFilerConfiguration_FullMethodName = "/filer_pb.SeaweedFiler/GetFilerConfiguration" - SeaweedFiler_TraverseBfsMetadata_FullMethodName = "/filer_pb.SeaweedFiler/TraverseBfsMetadata" - SeaweedFiler_SubscribeMetadata_FullMethodName = "/filer_pb.SeaweedFiler/SubscribeMetadata" - SeaweedFiler_SubscribeLocalMetadata_FullMethodName = "/filer_pb.SeaweedFiler/SubscribeLocalMetadata" - SeaweedFiler_KvGet_FullMethodName = "/filer_pb.SeaweedFiler/KvGet" - SeaweedFiler_KvPut_FullMethodName = "/filer_pb.SeaweedFiler/KvPut" - SeaweedFiler_CacheRemoteObjectToLocalCluster_FullMethodName = "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster" - SeaweedFiler_DistributedLock_FullMethodName = "/filer_pb.SeaweedFiler/DistributedLock" - SeaweedFiler_DistributedUnlock_FullMethodName = "/filer_pb.SeaweedFiler/DistributedUnlock" - SeaweedFiler_FindLockOwner_FullMethodName = "/filer_pb.SeaweedFiler/FindLockOwner" - SeaweedFiler_TransferLocks_FullMethodName = "/filer_pb.SeaweedFiler/TransferLocks" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // SeaweedFilerClient is the client API for SeaweedFiler service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) - ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListEntriesResponse], error) + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) - StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamRenameEntryResponse], error) + StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (SeaweedFiler_StreamRenameEntryClient, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) @@ -65,17 +33,13 @@ type SeaweedFilerClient interface { Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) - TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TraverseBfsMetadataResponse], error) - SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) - SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) + SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) + SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) + KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) + LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error) - DistributedLock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) - DistributedUnlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) - FindLockOwner(ctx context.Context, in *FindLockOwnerRequest, opts ...grpc.CallOption) (*FindLockOwnerResponse, error) - // distributed lock management internal use only - TransferLocks(ctx context.Context, in *TransferLocksRequest, opts ...grpc.CallOption) (*TransferLocksResponse, error) } type seaweedFilerClient struct { @@ -87,22 +51,20 @@ func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient { } func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LookupDirectoryEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_LookupDirectoryEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ListEntriesResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[0], SeaweedFiler_ListEntries_FullMethodName, cOpts...) +func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ListEntriesRequest, ListEntriesResponse]{ClientStream: stream} + x := &seaweedFilerListEntriesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -112,13 +74,26 @@ func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesReq return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_ListEntriesClient = grpc.ServerStreamingClient[ListEntriesResponse] +type SeaweedFiler_ListEntriesClient interface { + Recv() (*ListEntriesResponse, error) + grpc.ClientStream +} + +type seaweedFilerListEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { + m := new(ListEntriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CreateEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_CreateEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...) if err != nil { return nil, err } @@ -126,9 +101,8 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq } func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(UpdateEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_UpdateEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...) if err != nil { return nil, err } @@ -136,9 +110,8 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq } func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AppendToEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_AppendToEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...) if err != nil { return nil, err } @@ -146,9 +119,8 @@ func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntr } func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_DeleteEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...) if err != nil { return nil, err } @@ -156,22 +128,20 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq } func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AtomicRenameEntryResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_AtomicRenameEntry_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[StreamRenameEntryResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[1], SeaweedFiler_StreamRenameEntry_FullMethodName, cOpts...) +func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRenameEntryRequest, opts ...grpc.CallOption) (SeaweedFiler_StreamRenameEntryClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[1], "/filer_pb.SeaweedFiler/StreamRenameEntry", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[StreamRenameEntryRequest, StreamRenameEntryResponse]{ClientStream: stream} + x := &seaweedFilerStreamRenameEntryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -181,13 +151,26 @@ func (c *seaweedFilerClient) StreamRenameEntry(ctx context.Context, in *StreamRe return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_StreamRenameEntryClient = grpc.ServerStreamingClient[StreamRenameEntryResponse] +type SeaweedFiler_StreamRenameEntryClient interface { + Recv() (*StreamRenameEntryResponse, error) + grpc.ClientStream +} + +type seaweedFilerStreamRenameEntryClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerStreamRenameEntryClient) Recv() (*StreamRenameEntryResponse, error) { + m := new(StreamRenameEntryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AssignVolumeResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_AssignVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...) if err != nil { return nil, err } @@ -195,9 +178,8 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR } func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LookupVolumeResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_LookupVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -205,9 +187,8 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR } func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CollectionListResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_CollectionList_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -215,9 +196,8 @@ func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionL } func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteCollectionResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_DeleteCollection_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -225,9 +205,8 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol } func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatisticsResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_Statistics_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -235,9 +214,8 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque } func (c *seaweedFilerClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PingResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_Ping_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Ping", in, out, opts...) if err != nil { return nil, err } @@ -245,22 +223,20 @@ func (c *seaweedFilerClient) Ping(ctx context.Context, in *PingRequest, opts ... } func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetFilerConfigurationResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_GetFilerConfiguration_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *TraverseBfsMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TraverseBfsMetadataResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], SeaweedFiler_TraverseBfsMetadata_FullMethodName, cOpts...) +func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[TraverseBfsMetadataRequest, TraverseBfsMetadataResponse]{ClientStream: stream} + x := &seaweedFilerSubscribeMetadataClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -270,16 +246,29 @@ func (c *seaweedFilerClient) TraverseBfsMetadata(ctx context.Context, in *Traver return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_TraverseBfsMetadataClient = grpc.ServerStreamingClient[TraverseBfsMetadataResponse] +type SeaweedFiler_SubscribeMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} -func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], SeaweedFiler_SubscribeMetadata_FullMethodName, cOpts...) +type seaweedFilerSubscribeMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[3], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ClientStream: stream} + x := &seaweedFilerSubscribeLocalMetadataClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -289,32 +278,66 @@ func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *Subscrib return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_SubscribeMetadataClient = grpc.ServerStreamingClient[SubscribeMetadataResponse] +type SeaweedFiler_SubscribeLocalMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} -func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SubscribeMetadataResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[4], SeaweedFiler_SubscribeLocalMetadata_FullMethodName, cOpts...) +type seaweedFilerSubscribeLocalMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedFiler_ServiceDesc.Streams[4], "/filer_pb.SeaweedFiler/KeepConnected", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } + x := &seaweedFilerKeepConnectedClient{stream} return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_SubscribeLocalMetadataClient = grpc.ServerStreamingClient[SubscribeMetadataResponse] +type SeaweedFiler_KeepConnectedClient interface { + Send(*KeepConnectedRequest) error + Recv() (*KeepConnectedResponse, error) + grpc.ClientStream +} + +type seaweedFilerKeepConnectedClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) { + m := new(KeepConnectedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) { + out := new(LocateBrokerResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(KvGetResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_KvGet_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvGet", in, out, opts...) if err != nil { return nil, err } @@ -322,9 +345,8 @@ func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts . } func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(KvPutResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_KvPut_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvPut", in, out, opts...) if err != nil { return nil, err } @@ -332,49 +354,8 @@ func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts . } func (c *seaweedFilerClient) CacheRemoteObjectToLocalCluster(ctx context.Context, in *CacheRemoteObjectToLocalClusterRequest, opts ...grpc.CallOption) (*CacheRemoteObjectToLocalClusterResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CacheRemoteObjectToLocalClusterResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_CacheRemoteObjectToLocalCluster_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedFilerClient) DistributedLock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(LockResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_DistributedLock_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedFilerClient) DistributedUnlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(UnlockResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_DistributedUnlock_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedFilerClient) FindLockOwner(ctx context.Context, in *FindLockOwnerRequest, opts ...grpc.CallOption) (*FindLockOwnerResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(FindLockOwnerResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_FindLockOwner_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedFilerClient) TransferLocks(ctx context.Context, in *TransferLocksRequest, opts ...grpc.CallOption) (*TransferLocksResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(TransferLocksResponse) - err := c.cc.Invoke(ctx, SeaweedFiler_TransferLocks_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster", in, out, opts...) if err != nil { return nil, err } @@ -383,16 +364,16 @@ func (c *seaweedFilerClient) TransferLocks(ctx context.Context, in *TransferLock // SeaweedFilerServer is the server API for SeaweedFiler service. // All implementations must embed UnimplementedSeaweedFilerServer -// for forward compatibility. +// for forward compatibility type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) - ListEntries(*ListEntriesRequest, grpc.ServerStreamingServer[ListEntriesResponse]) error + ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) - StreamRenameEntry(*StreamRenameEntryRequest, grpc.ServerStreamingServer[StreamRenameEntryResponse]) error + StreamRenameEntry(*StreamRenameEntryRequest, SeaweedFiler_StreamRenameEntryServer) error AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) @@ -400,31 +381,24 @@ type SeaweedFilerServer interface { Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) Ping(context.Context, *PingRequest) (*PingResponse, error) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) - TraverseBfsMetadata(*TraverseBfsMetadataRequest, grpc.ServerStreamingServer[TraverseBfsMetadataResponse]) error - SubscribeMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error - SubscribeLocalMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error + SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error + SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error + KeepConnected(SeaweedFiler_KeepConnectedServer) error + LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error) - DistributedLock(context.Context, *LockRequest) (*LockResponse, error) - DistributedUnlock(context.Context, *UnlockRequest) (*UnlockResponse, error) - FindLockOwner(context.Context, *FindLockOwnerRequest) (*FindLockOwnerResponse, error) - // distributed lock management internal use only - TransferLocks(context.Context, *TransferLocksRequest) (*TransferLocksResponse, error) mustEmbedUnimplementedSeaweedFilerServer() } -// UnimplementedSeaweedFilerServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedFilerServer struct{} +// UnimplementedSeaweedFilerServer must be embedded to have forward compatible implementations. +type UnimplementedSeaweedFilerServer struct { +} func (UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented") } -func (UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, grpc.ServerStreamingServer[ListEntriesResponse]) error { +func (UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error { return status.Errorf(codes.Unimplemented, "method ListEntries not implemented") } func (UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) { @@ -442,7 +416,7 @@ func (UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntry func (UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented") } -func (UnimplementedSeaweedFilerServer) StreamRenameEntry(*StreamRenameEntryRequest, grpc.ServerStreamingServer[StreamRenameEntryResponse]) error { +func (UnimplementedSeaweedFilerServer) StreamRenameEntry(*StreamRenameEntryRequest, SeaweedFiler_StreamRenameEntryServer) error { return status.Errorf(codes.Unimplemented, "method StreamRenameEntry not implemented") } func (UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) { @@ -466,15 +440,18 @@ func (UnimplementedSeaweedFilerServer) Ping(context.Context, *PingRequest) (*Pin func (UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented") } -func (UnimplementedSeaweedFilerServer) TraverseBfsMetadata(*TraverseBfsMetadataRequest, grpc.ServerStreamingServer[TraverseBfsMetadataResponse]) error { - return status.Errorf(codes.Unimplemented, "method TraverseBfsMetadata not implemented") -} -func (UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error { +func (UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error { return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented") } -func (UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, grpc.ServerStreamingServer[SubscribeMetadataResponse]) error { +func (UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error { return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented") } +func (UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented") +} func (UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method KvGet not implemented") } @@ -484,20 +461,7 @@ func (UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*K func (UnimplementedSeaweedFilerServer) CacheRemoteObjectToLocalCluster(context.Context, *CacheRemoteObjectToLocalClusterRequest) (*CacheRemoteObjectToLocalClusterResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CacheRemoteObjectToLocalCluster not implemented") } -func (UnimplementedSeaweedFilerServer) DistributedLock(context.Context, *LockRequest) (*LockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DistributedLock not implemented") -} -func (UnimplementedSeaweedFilerServer) DistributedUnlock(context.Context, *UnlockRequest) (*UnlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DistributedUnlock not implemented") -} -func (UnimplementedSeaweedFilerServer) FindLockOwner(context.Context, *FindLockOwnerRequest) (*FindLockOwnerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FindLockOwner not implemented") -} -func (UnimplementedSeaweedFilerServer) TransferLocks(context.Context, *TransferLocksRequest) (*TransferLocksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TransferLocks not implemented") -} func (UnimplementedSeaweedFilerServer) mustEmbedUnimplementedSeaweedFilerServer() {} -func (UnimplementedSeaweedFilerServer) testEmbeddedByValue() {} // UnsafeSeaweedFilerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SeaweedFilerServer will @@ -507,13 +471,6 @@ type UnsafeSeaweedFilerServer interface { } func RegisterSeaweedFilerServer(s grpc.ServiceRegistrar, srv SeaweedFilerServer) { - // If the following call pancis, it indicates UnimplementedSeaweedFilerServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&SeaweedFiler_ServiceDesc, srv) } @@ -527,7 +484,7 @@ func _SeaweedFiler_LookupDirectoryEntry_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_LookupDirectoryEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/LookupDirectoryEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).LookupDirectoryEntry(ctx, req.(*LookupDirectoryEntryRequest)) @@ -540,11 +497,21 @@ func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SeaweedFilerServer).ListEntries(m, &grpc.GenericServerStream[ListEntriesRequest, ListEntriesResponse]{ServerStream: stream}) + return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_ListEntriesServer = grpc.ServerStreamingServer[ListEntriesResponse] +type SeaweedFiler_ListEntriesServer interface { + Send(*ListEntriesResponse) error + grpc.ServerStream +} + +type seaweedFilerListEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error { + return x.ServerStream.SendMsg(m) +} func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateEntryRequest) @@ -556,7 +523,7 @@ func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_CreateEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/CreateEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).CreateEntry(ctx, req.(*CreateEntryRequest)) @@ -574,7 +541,7 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_UpdateEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/UpdateEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).UpdateEntry(ctx, req.(*UpdateEntryRequest)) @@ -592,7 +559,7 @@ func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_AppendToEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest)) @@ -610,7 +577,7 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_DeleteEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/DeleteEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).DeleteEntry(ctx, req.(*DeleteEntryRequest)) @@ -628,7 +595,7 @@ func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_AtomicRenameEntry_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest)) @@ -641,11 +608,21 @@ func _SeaweedFiler_StreamRenameEntry_Handler(srv interface{}, stream grpc.Server if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SeaweedFilerServer).StreamRenameEntry(m, &grpc.GenericServerStream[StreamRenameEntryRequest, StreamRenameEntryResponse]{ServerStream: stream}) + return srv.(SeaweedFilerServer).StreamRenameEntry(m, &seaweedFilerStreamRenameEntryServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_StreamRenameEntryServer = grpc.ServerStreamingServer[StreamRenameEntryResponse] +type SeaweedFiler_StreamRenameEntryServer interface { + Send(*StreamRenameEntryResponse) error + grpc.ServerStream +} + +type seaweedFilerStreamRenameEntryServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerStreamRenameEntryServer) Send(m *StreamRenameEntryResponse) error { + return x.ServerStream.SendMsg(m) +} func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AssignVolumeRequest) @@ -657,7 +634,7 @@ func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_AssignVolume_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/AssignVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).AssignVolume(ctx, req.(*AssignVolumeRequest)) @@ -675,7 +652,7 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_LookupVolume_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/LookupVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).LookupVolume(ctx, req.(*LookupVolumeRequest)) @@ -693,7 +670,7 @@ func _SeaweedFiler_CollectionList_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_CollectionList_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/CollectionList", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).CollectionList(ctx, req.(*CollectionListRequest)) @@ -711,7 +688,7 @@ func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_DeleteCollection_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/DeleteCollection", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) @@ -729,7 +706,7 @@ func _SeaweedFiler_Statistics_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_Statistics_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/Statistics", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).Statistics(ctx, req.(*StatisticsRequest)) @@ -747,7 +724,7 @@ func _SeaweedFiler_Ping_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_Ping_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/Ping", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).Ping(ctx, req.(*PingRequest)) @@ -765,7 +742,7 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_GetFilerConfiguration_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/GetFilerConfiguration", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, req.(*GetFilerConfigurationRequest)) @@ -773,38 +750,91 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_TraverseBfsMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(TraverseBfsMetadataRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SeaweedFilerServer).TraverseBfsMetadata(m, &grpc.GenericServerStream[TraverseBfsMetadataRequest, TraverseBfsMetadataResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_TraverseBfsMetadataServer = grpc.ServerStreamingServer[TraverseBfsMetadataResponse] - func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SubscribeMetadataRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SeaweedFilerServer).SubscribeMetadata(m, &grpc.GenericServerStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ServerStream: stream}) + return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_SubscribeMetadataServer = grpc.ServerStreamingServer[SubscribeMetadataResponse] +type SeaweedFiler_SubscribeMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SubscribeMetadataRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &grpc.GenericServerStream[SubscribeMetadataRequest, SubscribeMetadataResponse]{ServerStream: stream}) + return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedFiler_SubscribeLocalMetadataServer = grpc.ServerStreamingServer[SubscribeMetadataResponse] +type SeaweedFiler_SubscribeLocalMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeLocalMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream}) +} + +type SeaweedFiler_KeepConnectedServer interface { + Send(*KeepConnectedResponse) error + Recv() (*KeepConnectedRequest, error) + grpc.ServerStream +} + +type seaweedFilerKeepConnectedServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocateBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).LocateBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/LocateBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(KvGetRequest) @@ -816,7 +846,7 @@ func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_KvGet_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/KvGet", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).KvGet(ctx, req.(*KvGetRequest)) @@ -834,7 +864,7 @@ func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_KvPut_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/KvPut", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).KvPut(ctx, req.(*KvPutRequest)) @@ -852,7 +882,7 @@ func _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler(srv interface{}, ctx } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedFiler_CacheRemoteObjectToLocalCluster_FullMethodName, + FullMethod: "/filer_pb.SeaweedFiler/CacheRemoteObjectToLocalCluster", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedFilerServer).CacheRemoteObjectToLocalCluster(ctx, req.(*CacheRemoteObjectToLocalClusterRequest)) @@ -860,78 +890,6 @@ func _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler(srv interface{}, ctx return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_DistributedLock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).DistributedLock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedFiler_DistributedLock_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).DistributedLock(ctx, req.(*LockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedFiler_DistributedUnlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UnlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).DistributedUnlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedFiler_DistributedUnlock_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).DistributedUnlock(ctx, req.(*UnlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedFiler_FindLockOwner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FindLockOwnerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).FindLockOwner(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedFiler_FindLockOwner_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).FindLockOwner(ctx, req.(*FindLockOwnerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedFiler_TransferLocks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TransferLocksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).TransferLocks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedFiler_TransferLocks_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).TransferLocks(ctx, req.(*TransferLocksRequest)) - } - return interceptor(ctx, in, info, handler) -} - // SeaweedFiler_ServiceDesc is the grpc.ServiceDesc for SeaweedFiler service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -991,6 +949,10 @@ var SeaweedFiler_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetFilerConfiguration", Handler: _SeaweedFiler_GetFilerConfiguration_Handler, }, + { + MethodName: "LocateBroker", + Handler: _SeaweedFiler_LocateBroker_Handler, + }, { MethodName: "KvGet", Handler: _SeaweedFiler_KvGet_Handler, @@ -1003,22 +965,6 @@ var SeaweedFiler_ServiceDesc = grpc.ServiceDesc{ MethodName: "CacheRemoteObjectToLocalCluster", Handler: _SeaweedFiler_CacheRemoteObjectToLocalCluster_Handler, }, - { - MethodName: "DistributedLock", - Handler: _SeaweedFiler_DistributedLock_Handler, - }, - { - MethodName: "DistributedUnlock", - Handler: _SeaweedFiler_DistributedUnlock_Handler, - }, - { - MethodName: "FindLockOwner", - Handler: _SeaweedFiler_FindLockOwner_Handler, - }, - { - MethodName: "TransferLocks", - Handler: _SeaweedFiler_TransferLocks_Handler, - }, }, Streams: []grpc.StreamDesc{ { @@ -1031,11 +977,6 @@ var SeaweedFiler_ServiceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_StreamRenameEntry_Handler, ServerStreams: true, }, - { - StreamName: "TraverseBfsMetadata", - Handler: _SeaweedFiler_TraverseBfsMetadata_Handler, - ServerStreams: true, - }, { StreamName: "SubscribeMetadata", Handler: _SeaweedFiler_SubscribeMetadata_Handler, @@ -1046,6 +987,12 @@ var SeaweedFiler_ServiceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler, ServerStreams: true, }, + { + StreamName: "KeepConnected", + Handler: _SeaweedFiler_KeepConnected_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "filer.proto", } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index b5fd4e1e0..2c9526b80 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -6,18 +6,15 @@ import ( "fmt" "os" "strings" - "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/golang/protobuf/proto" "github.com/viant/ptrie" - "google.golang.org/protobuf/proto" ) -const cutoffTimeNewEmptyDir = 3 - func (entry *Entry) IsInRemoteOnly() bool { - return len(entry.GetChunks()) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0 + return len(entry.Chunks) == 0 && entry.RemoteEntry != nil && entry.RemoteEntry.RemoteSize > 0 } func (entry *Entry) IsDirectoryKeyObject() bool { @@ -31,10 +28,6 @@ func (entry *Entry) FileMode() (fileMode os.FileMode) { return } -func (entry *Entry) IsOlderDir() bool { - return entry.IsDirectory && entry.Attributes != nil && entry.Attributes.Mime == "" && entry.Attributes.GetCrtime() <= time.Now().Unix()-cutoffTimeNewEmptyDir -} - func ToFileIdObject(fileIdStr string) (*FileId, error) { t, err := needle.ParseFileIdFromString(fileIdStr) if err != nil { @@ -108,36 +101,36 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } -func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error { - resp, err := client.CreateEntry(ctx, request) +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) if err != nil { - glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) - return fmt.Errorf("CreateEntry: %w", err) + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry: %v", err) } if resp.Error != "" { - glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) return fmt.Errorf("CreateEntry : %v", resp.Error) } return nil } -func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *UpdateEntryRequest) error { - _, err := client.UpdateEntry(ctx, request) +func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error { + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(1).InfofCtx(ctx, "update entry %s/%s :%v", request.Directory, request.Entry.Name, err) - return fmt.Errorf("UpdateEntry: %w", err) + glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err) + return fmt.Errorf("UpdateEntry: %v", err) } return nil } -func LookupEntry(ctx context.Context, client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { - resp, err := client.LookupDirectoryEntry(ctx, request) +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { return nil, ErrNotFound } - glog.V(3).InfofCtx(ctx, "read %s/%v: %v", request.Directory, request.Name, err) - return nil, fmt.Errorf("LookupEntry1: %w", err) + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) } if resp.Entry == nil { return nil, ErrNotFound @@ -150,22 +143,18 @@ var ErrNotFound = errors.New("filer: no entry is found in filer store") func IsEmpty(event *SubscribeMetadataResponse) bool { return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry == nil } - func IsCreate(event *SubscribeMetadataResponse) bool { return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry == nil } - func IsUpdate(event *SubscribeMetadataResponse) bool { return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry != nil && event.Directory == event.EventNotification.NewParentPath && event.EventNotification.NewEntry.Name == event.EventNotification.OldEntry.Name } - func IsDelete(event *SubscribeMetadataResponse) bool { return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry != nil } - func IsRename(event *SubscribeMetadataResponse) bool { return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry != nil && diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go index 4e0b53017..0009afdbe 100644 --- a/weed/pb/filer_pb/filer_pb_helper_test.go +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -3,7 +3,7 @@ package filer_pb import ( "testing" - "google.golang.org/protobuf/proto" + "github.com/golang/protobuf/proto" ) func TestFileIdSize(t *testing.T) { diff --git a/weed/pb/filer_pb_tail.go b/weed/pb/filer_pb_tail.go index f5ffac129..2d6a7898b 100644 --- a/weed/pb/filer_pb_tail.go +++ b/weed/pb/filer_pb_tail.go @@ -3,9 +3,9 @@ package pb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" "io" "time" @@ -17,62 +17,51 @@ const ( TrivialOnError EventErrorType = iota FatalOnError RetryForeverOnError - DontLogError ) -// MetadataFollowOption is used to control the behavior of the metadata following -// process. Part of it is used as a cursor to resume the following process. -type MetadataFollowOption struct { - ClientName string - ClientId int32 - ClientEpoch int32 - SelfSignature int32 - PathPrefix string - AdditionalPathPrefixes []string - DirectoriesToWatch []string - StartTsNs int64 - StopTsNs int64 - EventErrorType EventErrorType -} - type ProcessMetadataFunc func(resp *filer_pb.SubscribeMetadataResponse) error -func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption, option *MetadataFollowOption, processEventFn ProcessMetadataFunc) error { +func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption, clientName string, clientId int32, + pathPrefix string, additionalPathPrefixes []string, lastTsNs int64, untilTsNs int64, selfSignature int32, + processEventFn ProcessMetadataFunc, eventErrorType EventErrorType) error { - err := WithFilerClient(true, option.SelfSignature, filerAddress, grpcDialOption, makeSubscribeMetadataFunc(option, processEventFn)) + err := WithFilerClient(true, filerAddress, grpcDialOption, makeSubscribeMetadataFunc(clientName, clientId, + pathPrefix, additionalPathPrefixes, &lastTsNs, untilTsNs, selfSignature, processEventFn, eventErrorType)) if err != nil { - return fmt.Errorf("subscribing filer meta change: %w", err) + return fmt.Errorf("subscribing filer meta change: %v", err) } return err } -func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient, option *MetadataFollowOption, processEventFn ProcessMetadataFunc) error { +func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient, + clientName string, clientId int32, pathPrefix string, lastTsNs *int64, untilTsNs int64, selfSignature int32, + processEventFn ProcessMetadataFunc, eventErrorType EventErrorType) error { - err := filerClient.WithFilerClient(true, makeSubscribeMetadataFunc(option, processEventFn)) + err := filerClient.WithFilerClient(true, makeSubscribeMetadataFunc(clientName, clientId, + pathPrefix, nil, lastTsNs, untilTsNs, selfSignature, processEventFn, eventErrorType)) if err != nil { - return fmt.Errorf("subscribing filer meta change: %w", err) + return fmt.Errorf("subscribing filer meta change: %v", err) } return nil } -func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn ProcessMetadataFunc) func(client filer_pb.SeaweedFilerClient) error { +func makeSubscribeMetadataFunc(clientName string, clientId int32, pathPrefix string, additionalPathPrefixes []string, lastTsNs *int64, untilTsNs int64, selfSignature int32, + processEventFn ProcessMetadataFunc, eventErrorType EventErrorType) func(client filer_pb.SeaweedFilerClient) error { return func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ - ClientName: option.ClientName, - PathPrefix: option.PathPrefix, - PathPrefixes: option.AdditionalPathPrefixes, - Directories: option.DirectoriesToWatch, - SinceNs: option.StartTsNs, - Signature: option.SelfSignature, - ClientId: option.ClientId, - ClientEpoch: option.ClientEpoch, - UntilNs: option.StopTsNs, + ClientName: clientName, + PathPrefix: pathPrefix, + PathPrefixes: additionalPathPrefixes, + SinceNs: *lastTsNs, + Signature: selfSignature, + ClientId: clientId, + UntilNs: untilTsNs, }) if err != nil { - return fmt.Errorf("subscribe: %w", err) + return fmt.Errorf("subscribe: %v", err) } for { @@ -85,25 +74,23 @@ func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn Proc } if err := processEventFn(resp); err != nil { - switch option.EventErrorType { + switch eventErrorType { case TrivialOnError: glog.Errorf("process %v: %v", resp, err) case FatalOnError: glog.Fatalf("process %v: %v", resp, err) case RetryForeverOnError: - util.RetryUntil("followMetaUpdates", func() error { + util.RetryForever("followMetaUpdates", func() error { return processEventFn(resp) }, func(err error) bool { glog.Errorf("process %v: %v", resp, err) return true }) - case DontLogError: - // pass default: glog.Errorf("process %v: %v", resp, err) } } - option.StartTsNs = resp.TsNs + *lastTsNs = resp.TsNs } } } diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go index e822c36c8..c7cb82a22 100644 --- a/weed/pb/grpc_client_server.go +++ b/weed/pb/grpc_client_server.go @@ -3,28 +3,22 @@ package pb import ( "context" "fmt" - "math/rand/v2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "math/rand" "net/http" "strconv" "strings" "sync" "time" - "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/util/request_id" - "google.golang.org/grpc/metadata" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" ) const ( @@ -62,7 +56,6 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { }), grpc.MaxRecvMsgSize(Max_Message_Size), grpc.MaxSendMsgSize(Max_Message_Size), - grpc.UnaryInterceptor(requestIDUnaryInterceptor()), ) for _, opt := range opts { if opt != nil { @@ -72,17 +65,15 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { return grpc.NewServer(options...) } -func GrpcDial(ctx context.Context, address string, waitForReady bool, opts ...grpc.DialOption) (*grpc.ClientConn, error) { +func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { // opts = append(opts, grpc.WithBlock()) // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) var options []grpc.DialOption - options = append(options, - // grpc.WithTransportCredentials(insecure.NewCredentials()), + // grpc.WithInsecure(), grpc.WithDefaultCallOptions( grpc.MaxCallSendMsgSize(Max_Message_Size), grpc.MaxCallRecvMsgSize(Max_Message_Size), - grpc.WaitForReady(waitForReady), ), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 30 * time.Second, // client ping server if no activity for this long @@ -97,7 +88,7 @@ func GrpcDial(ctx context.Context, address string, waitForReady bool, opts ...gr return grpc.DialContext(ctx, address, options...) } -func getOrCreateConnection(address string, waitForReady bool, opts ...grpc.DialOption) (*versionedGrpcClient, error) { +func getOrCreateConnection(address string, opts ...grpc.DialOption) (*versionedGrpcClient, error) { grpcClientsLock.Lock() defer grpcClientsLock.Unlock() @@ -108,7 +99,7 @@ func getOrCreateConnection(address string, waitForReady bool, opts ...grpc.DialO } ctx := context.Background() - grpcConnection, err := GrpcDial(ctx, address, waitForReady, opts...) + grpcConnection, err := GrpcDial(ctx, address, opts...) if err != nil { return nil, fmt.Errorf("fail to dial %s: %v", address, err) } @@ -123,41 +114,11 @@ func getOrCreateConnection(address string, waitForReady bool, opts ...grpc.DialO return vgc, nil } -func requestIDUnaryInterceptor() grpc.UnaryServerInterceptor { - return func( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (interface{}, error) { - incomingMd, _ := metadata.FromIncomingContext(ctx) - idList := incomingMd.Get(request_id.AmzRequestIDHeader) - var reqID string - if len(idList) > 0 { - reqID = idList[0] - } - if reqID == "" { - reqID = uuid.New().String() - } - - ctx = metadata.NewOutgoingContext(ctx, - metadata.New(map[string]string{ - request_id.AmzRequestIDHeader: reqID, - })) - - ctx = request_id.Set(ctx, reqID) - - grpc.SetTrailer(ctx, metadata.Pairs(request_id.AmzRequestIDHeader, reqID)) - - return handler(ctx, req) - } -} - // WithGrpcClient In streamingMode, always use a fresh connection. Otherwise, try to reuse an existing connection. -func WithGrpcClient(streamingMode bool, signature int32, fn func(*grpc.ClientConn) error, address string, waitForReady bool, opts ...grpc.DialOption) error { +func WithGrpcClient(streamingMode bool, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { if !streamingMode { - vgc, err := getOrCreateConnection(address, waitForReady, opts...) + vgc, err := getOrCreateConnection(address, opts...) if err != nil { return fmt.Errorf("getOrCreateConnection %s: %v", address, err) } @@ -177,12 +138,7 @@ func WithGrpcClient(streamingMode bool, signature int32, fn func(*grpc.ClientCon } return executionErr } else { - ctx := context.Background() - if signature != 0 { - md := metadata.New(map[string]string{"sw-client-id": fmt.Sprintf("%d", signature)}) - ctx = metadata.NewOutgoingContext(ctx, md) - } - grpcConnection, err := GrpcDial(ctx, address, waitForReady, opts...) + grpcConnection, err := GrpcDial(context.Background(), address, opts...) if err != nil { return fmt.Errorf("fail to dial %s: %v", address, err) } @@ -200,7 +156,7 @@ func ParseServerAddress(server string, deltaPort int) (newServerAddress string, host, port, parseErr := hostAndPort(server) if parseErr != nil { - return "", fmt.Errorf("server port parse error: %w", parseErr) + return "", fmt.Errorf("server port parse error: %v", parseErr) } newPort := int(port) + deltaPort @@ -215,7 +171,7 @@ func hostAndPort(address string) (host string, port uint64, err error) { } port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64) if err != nil { - return "", 0, fmt.Errorf("server port parse error: %w", err) + return "", 0, fmt.Errorf("server port parse error: %v", err) } return address[:colonIndex], port, err @@ -244,29 +200,29 @@ func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) { return util.JoinHostPort(host, port) } -func WithMasterClient(streamingMode bool, master ServerAddress, grpcDialOption grpc.DialOption, waitForReady bool, fn func(client master_pb.SeaweedClient) error) error { - return WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { +func WithMasterClient(streamingMode bool, master ServerAddress, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + return WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, master.ToGrpcAddress(), waitForReady, grpcDialOption) + }, master.ToGrpcAddress(), grpcDialOption) } func WithVolumeServerClient(streamingMode bool, volumeServer ServerAddress, grpcDialOption grpc.DialOption, fn func(client volume_server_pb.VolumeServerClient) error) error { - return WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { + return WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) - }, volumeServer.ToGrpcAddress(), false, grpcDialOption) + }, volumeServer.ToGrpcAddress(), grpcDialOption) } func WithOneOfGrpcMasterClients(streamingMode bool, masterGrpcAddresses map[string]ServerAddress, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) (err error) { for _, masterGrpcAddress := range masterGrpcAddresses { - err = WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { + err = WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, masterGrpcAddress.ToGrpcAddress(), false, grpcDialOption) + }, masterGrpcAddress.ToGrpcAddress(), grpcDialOption) if err == nil { return nil } @@ -275,37 +231,37 @@ func WithOneOfGrpcMasterClients(streamingMode bool, masterGrpcAddresses map[stri return err } -func WithBrokerGrpcClient(streamingMode bool, brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client mq_pb.SeaweedMessagingClient) error) error { +func WithBrokerGrpcClient(streamingMode bool, brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error { - return WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { - client := mq_pb.NewSeaweedMessagingClient(grpcConnection) + return WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { + client := messaging_pb.NewSeaweedMessagingClient(grpcConnection) return fn(client) - }, brokerGrpcAddress, false, grpcDialOption) + }, brokerGrpcAddress, grpcDialOption) } -func WithFilerClient(streamingMode bool, signature int32, filer ServerAddress, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { +func WithFilerClient(streamingMode bool, filer ServerAddress, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { - return WithGrpcFilerClient(streamingMode, signature, filer, grpcDialOption, fn) + return WithGrpcFilerClient(streamingMode, filer, grpcDialOption, fn) } -func WithGrpcFilerClient(streamingMode bool, signature int32, filerAddress ServerAddress, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { +func WithGrpcFilerClient(streamingMode bool, filerGrpcAddress ServerAddress, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { - return WithGrpcClient(streamingMode, signature, func(grpcConnection *grpc.ClientConn) error { + return WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, filerAddress.ToGrpcAddress(), false, grpcDialOption) + }, filerGrpcAddress.ToGrpcAddress(), grpcDialOption) } func WithOneOfGrpcFilerClients(streamingMode bool, filerAddresses []ServerAddress, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) (err error) { for _, filerAddress := range filerAddresses { - err = WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { + err = WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, filerAddress.ToGrpcAddress(), false, grpcDialOption) + }, filerAddress.ToGrpcAddress(), grpcDialOption) if err == nil { return nil } @@ -313,10 +269,3 @@ func WithOneOfGrpcFilerClients(streamingMode bool, filerAddresses []ServerAddres return err } - -func WithWorkerClient(streamingMode bool, workerAddress string, grpcDialOption grpc.DialOption, fn func(client worker_pb.WorkerServiceClient) error) error { - return WithGrpcClient(streamingMode, 0, func(grpcConnection *grpc.ClientConn) error { - client := worker_pb.NewWorkerServiceClient(grpcConnection) - return fn(client) - }, workerAddress, false, grpcDialOption) -} diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto index 99bb65ef2..558bd2b70 100644 --- a/weed/pb/iam.proto +++ b/weed/pb/iam.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package iam_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "IamProto"; @@ -16,14 +16,12 @@ service SeaweedIdentityAccessManagement { message S3ApiConfiguration { repeated Identity identities = 1; - repeated Account accounts = 2; } message Identity { string name = 1; repeated Credential credentials = 2; repeated string actions = 3; - Account account = 4; } message Credential { @@ -33,12 +31,6 @@ message Credential { // bool is_disabled = 4; } -message Account { - string id = 1; - string display_name = 2; - string email_address = 3; -} - /* message Policy { repeated Statement statements = 1; @@ -56,4 +48,4 @@ message Resource { string bucket = 1; // string path = 2; } -*/ +*/ \ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go index 4eabf8dc3..89a4f1584 100644 --- a/weed/pb/iam_pb/iam.pb.go +++ b/weed/pb/iam_pb/iam.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: iam.proto package iam_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,18 +21,20 @@ const ( ) type S3ApiConfiguration struct { - state protoimpl.MessageState `protogen:"open.v1"` - Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"` - Accounts []*Account `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"` } func (x *S3ApiConfiguration) Reset() { *x = S3ApiConfiguration{} - mi := &file_iam_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *S3ApiConfiguration) String() string { @@ -44,7 +45,7 @@ func (*S3ApiConfiguration) ProtoMessage() {} func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message { mi := &file_iam_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,28 +67,23 @@ func (x *S3ApiConfiguration) GetIdentities() []*Identity { return nil } -func (x *S3ApiConfiguration) GetAccounts() []*Account { - if x != nil { - return x.Accounts - } - return nil -} - type Identity struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"` - Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` - Account *Account `protobuf:"bytes,4,opt,name=account,proto3" json:"account,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` } func (x *Identity) Reset() { *x = Identity{} - mi := &file_iam_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Identity) String() string { @@ -98,7 +94,7 @@ func (*Identity) ProtoMessage() {} func (x *Identity) ProtoReflect() protoreflect.Message { mi := &file_iam_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -134,26 +130,22 @@ func (x *Identity) GetActions() []string { return nil } -func (x *Identity) GetAccount() *Account { - if x != nil { - return x.Account - } - return nil -} - type Credential struct { - state protoimpl.MessageState `protogen:"open.v1"` - AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` - SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` } func (x *Credential) Reset() { *x = Credential{} - mi := &file_iam_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Credential) String() string { @@ -164,7 +156,7 @@ func (*Credential) ProtoMessage() {} func (x *Credential) ProtoReflect() protoreflect.Message { mi := &file_iam_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -193,123 +185,62 @@ func (x *Credential) GetSecretKey() string { return "" } -type Account struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - EmailAddress string `protobuf:"bytes,3,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Account) Reset() { - *x = Account{} - mi := &file_iam_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Account) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Account) ProtoMessage() {} - -func (x *Account) ProtoReflect() protoreflect.Message { - mi := &file_iam_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Account.ProtoReflect.Descriptor instead. -func (*Account) Descriptor() ([]byte, []int) { - return file_iam_proto_rawDescGZIP(), []int{3} -} - -func (x *Account) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Account) GetDisplayName() string { - if x != nil { - return x.DisplayName - } - return "" -} - -func (x *Account) GetEmailAddress() string { - if x != nil { - return x.EmailAddress - } - return "" -} - var File_iam_proto protoreflect.FileDescriptor -const file_iam_proto_rawDesc = "" + - "\n" + - "\tiam.proto\x12\x06iam_pb\"s\n" + - "\x12S3ApiConfiguration\x120\n" + - "\n" + - "identities\x18\x01 \x03(\v2\x10.iam_pb.IdentityR\n" + - "identities\x12+\n" + - "\baccounts\x18\x02 \x03(\v2\x0f.iam_pb.AccountR\baccounts\"\x99\x01\n" + - "\bIdentity\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x124\n" + - "\vcredentials\x18\x02 \x03(\v2\x12.iam_pb.CredentialR\vcredentials\x12\x18\n" + - "\aactions\x18\x03 \x03(\tR\aactions\x12)\n" + - "\aaccount\x18\x04 \x01(\v2\x0f.iam_pb.AccountR\aaccount\"J\n" + - "\n" + - "Credential\x12\x1d\n" + - "\n" + - "access_key\x18\x01 \x01(\tR\taccessKey\x12\x1d\n" + - "\n" + - "secret_key\x18\x02 \x01(\tR\tsecretKey\"a\n" + - "\aAccount\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + - "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12#\n" + - "\remail_address\x18\x03 \x01(\tR\femailAddress2!\n" + - "\x1fSeaweedIdentityAccessManagementBK\n" + - "\x10seaweedfs.clientB\bIamProtoZ-github.com/seaweedfs/seaweedfs/weed/pb/iam_pbb\x06proto3" +var file_iam_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d, + 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, + 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, + 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, + 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_iam_proto_rawDescOnce sync.Once - file_iam_proto_rawDescData []byte + file_iam_proto_rawDescData = file_iam_proto_rawDesc ) func file_iam_proto_rawDescGZIP() []byte { file_iam_proto_rawDescOnce.Do(func() { - file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc))) + file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData) }) return file_iam_proto_rawDescData } -var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_iam_proto_goTypes = []any{ +var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_iam_proto_goTypes = []interface{}{ (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration (*Identity)(nil), // 1: iam_pb.Identity (*Credential)(nil), // 2: iam_pb.Credential - (*Account)(nil), // 3: iam_pb.Account } var file_iam_proto_depIdxs = []int32{ 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity - 3, // 1: iam_pb.S3ApiConfiguration.accounts:type_name -> iam_pb.Account - 2, // 2: iam_pb.Identity.credentials:type_name -> iam_pb.Credential - 3, // 3: iam_pb.Identity.account:type_name -> iam_pb.Account - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_iam_proto_init() } @@ -317,13 +248,51 @@ func file_iam_proto_init() { if File_iam_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ApiConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Credential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc)), + RawDescriptor: file_iam_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, @@ -332,6 +301,7 @@ func file_iam_proto_init() { MessageInfos: file_iam_proto_msgTypes, }.Build() File_iam_proto = out.File + file_iam_proto_rawDesc = nil file_iam_proto_goTypes = nil file_iam_proto_depIdxs = nil } diff --git a/weed/pb/iam_pb/iam_grpc.pb.go b/weed/pb/iam_pb/iam_grpc.pb.go index 5ca4a2293..b9438a295 100644 --- a/weed/pb/iam_pb/iam_grpc.pb.go +++ b/weed/pb/iam_pb/iam_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: iam.proto package iam_pb @@ -12,8 +8,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service. // @@ -31,21 +27,17 @@ func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) Seawe // SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service. // All implementations must embed UnimplementedSeaweedIdentityAccessManagementServer -// for forward compatibility. +// for forward compatibility type SeaweedIdentityAccessManagementServer interface { mustEmbedUnimplementedSeaweedIdentityAccessManagementServer() } -// UnimplementedSeaweedIdentityAccessManagementServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedIdentityAccessManagementServer struct{} +// UnimplementedSeaweedIdentityAccessManagementServer must be embedded to have forward compatible implementations. +type UnimplementedSeaweedIdentityAccessManagementServer struct { +} func (UnimplementedSeaweedIdentityAccessManagementServer) mustEmbedUnimplementedSeaweedIdentityAccessManagementServer() { } -func (UnimplementedSeaweedIdentityAccessManagementServer) testEmbeddedByValue() {} // UnsafeSeaweedIdentityAccessManagementServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SeaweedIdentityAccessManagementServer will @@ -55,13 +47,6 @@ type UnsafeSeaweedIdentityAccessManagementServer interface { } func RegisterSeaweedIdentityAccessManagementServer(s grpc.ServiceRegistrar, srv SeaweedIdentityAccessManagementServer) { - // If the following call pancis, it indicates UnimplementedSeaweedIdentityAccessManagementServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&SeaweedIdentityAccessManagement_ServiceDesc, srv) } diff --git a/weed/pb/master.proto b/weed/pb/master.proto index f8049c466..84ec7374b 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package master_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb"; ////////////////////////////////////////////////// @@ -15,8 +15,6 @@ service Seaweed { } rpc Assign (AssignRequest) returns (AssignResponse) { } - rpc StreamAssign (stream AssignRequest) returns (stream AssignResponse) { - } rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { @@ -29,12 +27,6 @@ service Seaweed { } rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) { } - rpc DisableVacuum (DisableVacuumRequest) returns (DisableVacuumResponse) { - } - rpc EnableVacuum (EnableVacuumRequest) returns (EnableVacuumResponse) { - } - rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { - } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { } rpc ListClusterNodes (ListClusterNodesRequest) returns (ListClusterNodesResponse) { @@ -51,8 +43,6 @@ service Seaweed { } rpc RaftRemoveServer (RaftRemoveServerRequest) returns (RaftRemoveServerResponse) { } - rpc VolumeGrow (VolumeGrowRequest) returns (VolumeGrowResponse) { - } } ////////////////////////////////////////////////// @@ -90,7 +80,6 @@ message HeartbeatResponse { uint32 metrics_interval_seconds = 4; repeated StorageBackend storage_backends = 5; repeated string duplicated_uuids = 6; - bool preallocate = 7; } message VolumeInformationMessage { @@ -109,7 +98,6 @@ message VolumeInformationMessage { string remote_storage_name = 13; string remote_storage_key = 14; string disk_type = 15; - uint32 disk_id = 16; } message VolumeShortInformationMessage { @@ -119,7 +107,6 @@ message VolumeShortInformationMessage { uint32 version = 9; uint32 ttl = 10; string disk_type = 15; - uint32 disk_id = 16; } message VolumeEcShardInformationMessage { @@ -127,9 +114,6 @@ message VolumeEcShardInformationMessage { string collection = 2; uint32 ec_index_bits = 3; string disk_type = 4; - uint64 expire_at_sec = 5; // used to record the destruction time of ec volume - uint32 disk_id = 6; - repeated int64 shard_sizes = 7; // optimized: sizes for shards in order of set bits in ec_index_bits } message StorageBackend { @@ -155,8 +139,6 @@ message KeepConnectedRequest { string client_address = 3; string version = 4; string filer_group = 5; - string data_center = 6; - string rack = 7; } message VolumeLocation { @@ -174,6 +156,7 @@ message VolumeLocation { message ClusterNodeUpdate { string node_type = 1; string address = 2; + bool is_leader = 3; bool is_add = 4; string filer_group = 5; int64 created_at_ns = 6; @@ -202,7 +185,6 @@ message Location { string url = 1; string public_url = 2; uint32 grpc_port = 3; - string data_center = 4; } message AssignRequest { @@ -214,22 +196,9 @@ message AssignRequest { string rack = 6; string data_node = 7; uint32 memory_map_max_size_mb = 8; - uint32 writable_volume_count = 9; + uint32 Writable_volume_count = 9; string disk_type = 10; } - -message VolumeGrowRequest { - uint32 writable_volume_count = 1; - string replication = 2; - string collection = 3; - string ttl = 4; - string data_center = 5; - string rack = 6; - string data_node = 7; - uint32 memory_map_max_size_mb = 8; - string disk_type = 9; -} - message AssignResponse { string fid = 1; uint64 count = 4; @@ -283,7 +252,6 @@ message DiskInfo { repeated VolumeInformationMessage volume_infos = 6; repeated VolumeEcShardInformationMessage ec_shard_infos = 7; int64 remote_volume_count = 8; - uint32 disk_id = 9; } message DataNodeInfo { string id = 1; @@ -332,30 +300,6 @@ message VacuumVolumeRequest { message VacuumVolumeResponse { } -message DisableVacuumRequest { -} -message DisableVacuumResponse { -} - -message EnableVacuumRequest { -} -message EnableVacuumResponse { -} - -message VolumeMarkReadonlyRequest { - string ip = 1; - uint32 port = 2; - uint32 volume_id = 4; - string collection = 5; - uint32 replica_placement = 6; - uint32 version = 7; - uint32 ttl = 8; - string disk_type = 9; - bool is_readonly = 10; -} -message VolumeMarkReadonlyResponse { -} - message GetMasterConfigurationRequest { } message GetMasterConfigurationResponse { @@ -371,15 +315,13 @@ message GetMasterConfigurationResponse { message ListClusterNodesRequest { string client_type = 1; string filer_group = 2; - int32 limit = 4; } message ListClusterNodesResponse { message ClusterNode { string address = 1; string version = 2; + bool is_leader = 3; int64 created_at_ns = 4; - string data_center = 5; - string rack = 6; } repeated ClusterNode cluster_nodes = 1; } @@ -435,11 +377,7 @@ message RaftListClusterServersResponse { message ClusterServers { string id = 1; string address = 2; - string suffrage = 3; - bool isLeader = 4; + string suffrage = 3; // } repeated ClusterServers cluster_servers = 1; } - -message VolumeGrowResponse { -} \ No newline at end of file diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 19df43d71..4ffa7fb69 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: master.proto package master_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,7 +21,10 @@ const ( ) type Heartbeat struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` @@ -41,18 +43,18 @@ type Heartbeat struct { NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"` DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"` HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"` - MaxVolumeCounts map[string]uint32 `protobuf:"bytes,4,rep,name=max_volume_counts,json=maxVolumeCounts,proto3" json:"max_volume_counts,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MaxVolumeCounts map[string]uint32 `protobuf:"bytes,4,rep,name=max_volume_counts,json=maxVolumeCounts,proto3" json:"max_volume_counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` GrpcPort uint32 `protobuf:"varint,20,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` LocationUuids []string `protobuf:"bytes,21,rep,name=location_uuids,json=locationUuids,proto3" json:"location_uuids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *Heartbeat) Reset() { *x = Heartbeat{} - mi := &file_master_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Heartbeat) String() string { @@ -63,7 +65,7 @@ func (*Heartbeat) ProtoMessage() {} func (x *Heartbeat) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -205,23 +207,25 @@ func (x *Heartbeat) GetLocationUuids() []string { } type HeartbeatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` - Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` - MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` - StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` - DuplicatedUuids []string `protobuf:"bytes,6,rep,name=duplicated_uuids,json=duplicatedUuids,proto3" json:"duplicated_uuids,omitempty"` - Preallocate bool `protobuf:"varint,7,opt,name=preallocate,proto3" json:"preallocate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` + DuplicatedUuids []string `protobuf:"bytes,6,rep,name=duplicated_uuids,json=duplicatedUuids,proto3" json:"duplicated_uuids,omitempty"` } func (x *HeartbeatResponse) Reset() { *x = HeartbeatResponse{} - mi := &file_master_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *HeartbeatResponse) String() string { @@ -232,7 +236,7 @@ func (*HeartbeatResponse) ProtoMessage() {} func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -289,40 +293,35 @@ func (x *HeartbeatResponse) GetDuplicatedUuids() []string { return nil } -func (x *HeartbeatResponse) GetPreallocate() bool { - if x != nil { - return x.Preallocate - } - return false -} - type VolumeInformationMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` - CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"` - RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"` - RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"` - DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - DiskId uint32 `protobuf:"varint,16,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *VolumeInformationMessage) Reset() { *x = VolumeInformationMessage{} - mi := &file_master_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeInformationMessage) String() string { @@ -333,7 +332,7 @@ func (*VolumeInformationMessage) ProtoMessage() {} func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -453,31 +452,26 @@ func (x *VolumeInformationMessage) GetDiskType() string { return "" } -func (x *VolumeInformationMessage) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - type VolumeShortInformationMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` - DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - DiskId uint32 `protobuf:"varint,16,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *VolumeShortInformationMessage) Reset() { *x = VolumeShortInformationMessage{} - mi := &file_master_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeShortInformationMessage) String() string { @@ -488,7 +482,7 @@ func (*VolumeShortInformationMessage) ProtoMessage() {} func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -545,31 +539,24 @@ func (x *VolumeShortInformationMessage) GetDiskType() string { return "" } -func (x *VolumeShortInformationMessage) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - type VolumeEcShardInformationMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"` - DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - ExpireAtSec uint64 `protobuf:"varint,5,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // used to record the destruction time of ec volume - DiskId uint32 `protobuf:"varint,6,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` - ShardSizes []int64 `protobuf:"varint,7,rep,packed,name=shard_sizes,json=shardSizes,proto3" json:"shard_sizes,omitempty"` // optimized: sizes for shards in order of set bits in ec_index_bits - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *VolumeEcShardInformationMessage) Reset() { *x = VolumeEcShardInformationMessage{} - mi := &file_master_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardInformationMessage) String() string { @@ -580,7 +567,7 @@ func (*VolumeEcShardInformationMessage) ProtoMessage() {} func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -623,41 +610,23 @@ func (x *VolumeEcShardInformationMessage) GetDiskType() string { return "" } -func (x *VolumeEcShardInformationMessage) GetExpireAtSec() uint64 { - if x != nil { - return x.ExpireAtSec - } - return 0 -} - -func (x *VolumeEcShardInformationMessage) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - -func (x *VolumeEcShardInformationMessage) GetShardSizes() []int64 { - if x != nil { - return x.ShardSizes - } - return nil -} - type StorageBackend struct { - state protoimpl.MessageState `protogen:"open.v1"` - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *StorageBackend) Reset() { *x = StorageBackend{} - mi := &file_master_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StorageBackend) String() string { @@ -668,7 +637,7 @@ func (*StorageBackend) ProtoMessage() {} func (x *StorageBackend) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -705,16 +674,18 @@ func (x *StorageBackend) GetProperties() map[string]string { } type Empty struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Empty) Reset() { *x = Empty{} - mi := &file_master_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Empty) String() string { @@ -725,7 +696,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -741,17 +712,20 @@ func (*Empty) Descriptor() ([]byte, []int) { } type SuperBlockExtra struct { - state protoimpl.MessageState `protogen:"open.v1"` - ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"` } func (x *SuperBlockExtra) Reset() { *x = SuperBlockExtra{} - mi := &file_master_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SuperBlockExtra) String() string { @@ -762,7 +736,7 @@ func (*SuperBlockExtra) ProtoMessage() {} func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -785,22 +759,23 @@ func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { } type KeepConnectedRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` - ClientAddress string `protobuf:"bytes,3,opt,name=client_address,json=clientAddress,proto3" json:"client_address,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - FilerGroup string `protobuf:"bytes,5,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` - DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` + ClientAddress string `protobuf:"bytes,3,opt,name=client_address,json=clientAddress,proto3" json:"client_address,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + FilerGroup string `protobuf:"bytes,5,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` } func (x *KeepConnectedRequest) Reset() { *x = KeepConnectedRequest{} - mi := &file_master_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KeepConnectedRequest) String() string { @@ -811,7 +786,7 @@ func (*KeepConnectedRequest) ProtoMessage() {} func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -854,40 +829,29 @@ func (x *KeepConnectedRequest) GetFilerGroup() string { return "" } -func (x *KeepConnectedRequest) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *KeepConnectedRequest) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - type VolumeLocation struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` - NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"` - Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself - DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // optional when DataCenter is in use - GrpcPort uint32 `protobuf:"varint,7,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - NewEcVids []uint32 `protobuf:"varint,8,rep,packed,name=new_ec_vids,json=newEcVids,proto3" json:"new_ec_vids,omitempty"` - DeletedEcVids []uint32 `protobuf:"varint,9,rep,packed,name=deleted_ec_vids,json=deletedEcVids,proto3" json:"deleted_ec_vids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"` + DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // optional when DataCenter is in use + GrpcPort uint32 `protobuf:"varint,7,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` + NewEcVids []uint32 `protobuf:"varint,8,rep,packed,name=new_ec_vids,json=newEcVids,proto3" json:"new_ec_vids,omitempty"` + DeletedEcVids []uint32 `protobuf:"varint,9,rep,packed,name=deleted_ec_vids,json=deletedEcVids,proto3" json:"deleted_ec_vids,omitempty"` } func (x *VolumeLocation) Reset() { *x = VolumeLocation{} - mi := &file_master_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeLocation) String() string { @@ -898,7 +862,7 @@ func (*VolumeLocation) ProtoMessage() {} func (x *VolumeLocation) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -977,21 +941,25 @@ func (x *VolumeLocation) GetDeletedEcVids() []uint32 { } type ClusterNodeUpdate struct { - state protoimpl.MessageState `protogen:"open.v1"` - NodeType string `protobuf:"bytes,1,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - IsAdd bool `protobuf:"varint,4,opt,name=is_add,json=isAdd,proto3" json:"is_add,omitempty"` - FilerGroup string `protobuf:"bytes,5,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` - CreatedAtNs int64 `protobuf:"varint,6,opt,name=created_at_ns,json=createdAtNs,proto3" json:"created_at_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeType string `protobuf:"bytes,1,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + IsLeader bool `protobuf:"varint,3,opt,name=is_leader,json=isLeader,proto3" json:"is_leader,omitempty"` + IsAdd bool `protobuf:"varint,4,opt,name=is_add,json=isAdd,proto3" json:"is_add,omitempty"` + FilerGroup string `protobuf:"bytes,5,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` + CreatedAtNs int64 `protobuf:"varint,6,opt,name=created_at_ns,json=createdAtNs,proto3" json:"created_at_ns,omitempty"` } func (x *ClusterNodeUpdate) Reset() { *x = ClusterNodeUpdate{} - mi := &file_master_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ClusterNodeUpdate) String() string { @@ -1002,7 +970,7 @@ func (*ClusterNodeUpdate) ProtoMessage() {} func (x *ClusterNodeUpdate) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1031,6 +999,13 @@ func (x *ClusterNodeUpdate) GetAddress() string { return "" } +func (x *ClusterNodeUpdate) GetIsLeader() bool { + if x != nil { + return x.IsLeader + } + return false +} + func (x *ClusterNodeUpdate) GetIsAdd() bool { if x != nil { return x.IsAdd @@ -1053,18 +1028,21 @@ func (x *ClusterNodeUpdate) GetCreatedAtNs() int64 { } type KeepConnectedResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeLocation *VolumeLocation `protobuf:"bytes,1,opt,name=volume_location,json=volumeLocation,proto3" json:"volume_location,omitempty"` - ClusterNodeUpdate *ClusterNodeUpdate `protobuf:"bytes,2,opt,name=cluster_node_update,json=clusterNodeUpdate,proto3" json:"cluster_node_update,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeLocation *VolumeLocation `protobuf:"bytes,1,opt,name=volume_location,json=volumeLocation,proto3" json:"volume_location,omitempty"` + ClusterNodeUpdate *ClusterNodeUpdate `protobuf:"bytes,2,opt,name=cluster_node_update,json=clusterNodeUpdate,proto3" json:"cluster_node_update,omitempty"` } func (x *KeepConnectedResponse) Reset() { *x = KeepConnectedResponse{} - mi := &file_master_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *KeepConnectedResponse) String() string { @@ -1075,7 +1053,7 @@ func (*KeepConnectedResponse) ProtoMessage() {} func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1105,18 +1083,21 @@ func (x *KeepConnectedResponse) GetClusterNodeUpdate() *ClusterNodeUpdate { } type LookupVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeOrFileIds []string `protobuf:"bytes,1,rep,name=volume_or_file_ids,json=volumeOrFileIds,proto3" json:"volume_or_file_ids,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeOrFileIds []string `protobuf:"bytes,1,rep,name=volume_or_file_ids,json=volumeOrFileIds,proto3" json:"volume_or_file_ids,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided. } func (x *LookupVolumeRequest) Reset() { *x = LookupVolumeRequest{} - mi := &file_master_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupVolumeRequest) String() string { @@ -1127,7 +1108,7 @@ func (*LookupVolumeRequest) ProtoMessage() {} func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1157,17 +1138,20 @@ func (x *LookupVolumeRequest) GetCollection() string { } type LookupVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *LookupVolumeResponse) Reset() { *x = LookupVolumeResponse{} - mi := &file_master_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupVolumeResponse) String() string { @@ -1178,7 +1162,7 @@ func (*LookupVolumeResponse) ProtoMessage() {} func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[13] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1201,20 +1185,22 @@ func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_Vo } type Location struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` - GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` } func (x *Location) Reset() { *x = Location{} - mi := &file_master_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Location) String() string { @@ -1225,7 +1211,7 @@ func (*Location) ProtoMessage() {} func (x *Location) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[14] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1261,34 +1247,30 @@ func (x *Location) GetGrpcPort() uint32 { return 0 } -func (x *Location) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - type AssignRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` - WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=writable_volume_count,json=writableVolumeCount,proto3" json:"writable_volume_count,omitempty"` - DiskType string `protobuf:"bytes,10,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"` + DiskType string `protobuf:"bytes,10,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *AssignRequest) Reset() { *x = AssignRequest{} - mi := &file_master_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AssignRequest) String() string { @@ -1299,7 +1281,7 @@ func (*AssignRequest) ProtoMessage() {} func (x *AssignRequest) ProtoReflect() protoreflect.Message { mi := &file_master_proto_msgTypes[15] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1384,131 +1366,26 @@ func (x *AssignRequest) GetDiskType() string { return "" } -type VolumeGrowRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WritableVolumeCount uint32 `protobuf:"varint,1,opt,name=writable_volume_count,json=writableVolumeCount,proto3" json:"writable_volume_count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` - DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeGrowRequest) Reset() { - *x = VolumeGrowRequest{} - mi := &file_master_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeGrowRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeGrowRequest) ProtoMessage() {} - -func (x *VolumeGrowRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeGrowRequest.ProtoReflect.Descriptor instead. -func (*VolumeGrowRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{16} -} - -func (x *VolumeGrowRequest) GetWritableVolumeCount() uint32 { - if x != nil { - return x.WritableVolumeCount - } - return 0 -} - -func (x *VolumeGrowRequest) GetReplication() string { - if x != nil { - return x.Replication - } - return "" -} - -func (x *VolumeGrowRequest) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *VolumeGrowRequest) GetTtl() string { - if x != nil { - return x.Ttl - } - return "" -} - -func (x *VolumeGrowRequest) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *VolumeGrowRequest) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - -func (x *VolumeGrowRequest) GetDataNode() string { - if x != nil { - return x.DataNode - } - return "" -} - -func (x *VolumeGrowRequest) GetMemoryMapMaxSizeMb() uint32 { - if x != nil { - return x.MemoryMapMaxSizeMb - } - return 0 -} - -func (x *VolumeGrowRequest) GetDiskType() string { - if x != nil { - return x.DiskType - } - return "" -} - type AssignResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"` - Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"` - Replicas []*Location `protobuf:"bytes,7,rep,name=replicas,proto3" json:"replicas,omitempty"` - Location *Location `protobuf:"bytes,8,opt,name=location,proto3" json:"location,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"` + Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"` + Replicas []*Location `protobuf:"bytes,7,rep,name=replicas,proto3" json:"replicas,omitempty"` + Location *Location `protobuf:"bytes,8,opt,name=location,proto3" json:"location,omitempty"` } func (x *AssignResponse) Reset() { *x = AssignResponse{} - mi := &file_master_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AssignResponse) String() string { @@ -1518,8 +1395,8 @@ func (x *AssignResponse) String() string { func (*AssignResponse) ProtoMessage() {} func (x *AssignResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[17] - if x != nil { + mi := &file_master_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1531,7 +1408,7 @@ func (x *AssignResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead. func (*AssignResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{17} + return file_master_proto_rawDescGZIP(), []int{16} } func (x *AssignResponse) GetFid() string { @@ -1577,20 +1454,23 @@ func (x *AssignResponse) GetLocation() *Location { } type StatisticsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` - DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *StatisticsRequest) Reset() { *x = StatisticsRequest{} - mi := &file_master_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatisticsRequest) String() string { @@ -1600,8 +1480,8 @@ func (x *StatisticsRequest) String() string { func (*StatisticsRequest) ProtoMessage() {} func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[18] - if x != nil { + mi := &file_master_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1613,7 +1493,7 @@ func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. func (*StatisticsRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{18} + return file_master_proto_rawDescGZIP(), []int{17} } func (x *StatisticsRequest) GetReplication() string { @@ -1645,19 +1525,22 @@ func (x *StatisticsRequest) GetDiskType() string { } type StatisticsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` } func (x *StatisticsResponse) Reset() { *x = StatisticsResponse{} - mi := &file_master_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *StatisticsResponse) String() string { @@ -1667,8 +1550,8 @@ func (x *StatisticsResponse) String() string { func (*StatisticsResponse) ProtoMessage() {} func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[19] - if x != nil { + mi := &file_master_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,7 +1563,7 @@ func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. func (*StatisticsResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{19} + return file_master_proto_rawDescGZIP(), []int{18} } func (x *StatisticsResponse) GetTotalSize() uint64 { @@ -1704,19 +1587,24 @@ func (x *StatisticsResponse) GetFileCount() uint64 { return 0 } +// // collection related +// type Collection struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *Collection) Reset() { *x = Collection{} - mi := &file_master_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Collection) String() string { @@ -1726,8 +1614,8 @@ func (x *Collection) String() string { func (*Collection) ProtoMessage() {} func (x *Collection) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[20] - if x != nil { + mi := &file_master_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1739,7 +1627,7 @@ func (x *Collection) ProtoReflect() protoreflect.Message { // Deprecated: Use Collection.ProtoReflect.Descriptor instead. func (*Collection) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{20} + return file_master_proto_rawDescGZIP(), []int{19} } func (x *Collection) GetName() string { @@ -1750,18 +1638,21 @@ func (x *Collection) GetName() string { } type CollectionListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` - IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` } func (x *CollectionListRequest) Reset() { *x = CollectionListRequest{} - mi := &file_master_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionListRequest) String() string { @@ -1771,8 +1662,8 @@ func (x *CollectionListRequest) String() string { func (*CollectionListRequest) ProtoMessage() {} func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[21] - if x != nil { + mi := &file_master_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1784,7 +1675,7 @@ func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. func (*CollectionListRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{21} + return file_master_proto_rawDescGZIP(), []int{20} } func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { @@ -1802,17 +1693,20 @@ func (x *CollectionListRequest) GetIncludeEcVolumes() bool { } type CollectionListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } func (x *CollectionListResponse) Reset() { *x = CollectionListResponse{} - mi := &file_master_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionListResponse) String() string { @@ -1822,8 +1716,8 @@ func (x *CollectionListResponse) String() string { func (*CollectionListResponse) ProtoMessage() {} func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[22] - if x != nil { + mi := &file_master_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1835,7 +1729,7 @@ func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. func (*CollectionListResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{22} + return file_master_proto_rawDescGZIP(), []int{21} } func (x *CollectionListResponse) GetCollections() []*Collection { @@ -1846,17 +1740,20 @@ func (x *CollectionListResponse) GetCollections() []*Collection { } type CollectionDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *CollectionDeleteRequest) Reset() { *x = CollectionDeleteRequest{} - mi := &file_master_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionDeleteRequest) String() string { @@ -1866,8 +1763,8 @@ func (x *CollectionDeleteRequest) String() string { func (*CollectionDeleteRequest) ProtoMessage() {} func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[23] - if x != nil { + mi := &file_master_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1879,7 +1776,7 @@ func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead. func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{23} + return file_master_proto_rawDescGZIP(), []int{22} } func (x *CollectionDeleteRequest) GetName() string { @@ -1890,16 +1787,18 @@ func (x *CollectionDeleteRequest) GetName() string { } type CollectionDeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *CollectionDeleteResponse) Reset() { *x = CollectionDeleteResponse{} - mi := &file_master_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CollectionDeleteResponse) String() string { @@ -1909,8 +1808,8 @@ func (x *CollectionDeleteResponse) String() string { func (*CollectionDeleteResponse) ProtoMessage() {} func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[24] - if x != nil { + mi := &file_master_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1922,12 +1821,17 @@ func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead. func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{24} + return file_master_proto_rawDescGZIP(), []int{23} } +// // volume related +// type DiskInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` VolumeCount int64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` MaxVolumeCount int64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` @@ -1936,16 +1840,15 @@ type DiskInfo struct { VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"` EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"` RemoteVolumeCount int64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` - DiskId uint32 `protobuf:"varint,9,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *DiskInfo) Reset() { *x = DiskInfo{} - mi := &file_master_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DiskInfo) String() string { @@ -1955,8 +1858,8 @@ func (x *DiskInfo) String() string { func (*DiskInfo) ProtoMessage() {} func (x *DiskInfo) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[25] - if x != nil { + mi := &file_master_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1968,7 +1871,7 @@ func (x *DiskInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DiskInfo.ProtoReflect.Descriptor instead. func (*DiskInfo) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{25} + return file_master_proto_rawDescGZIP(), []int{24} } func (x *DiskInfo) GetType() string { @@ -2027,27 +1930,23 @@ func (x *DiskInfo) GetRemoteVolumeCount() int64 { return 0 } -func (x *DiskInfo) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - type DataNodeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - DiskInfos map[string]*DiskInfo `protobuf:"bytes,2,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,2,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + GrpcPort uint32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` } func (x *DataNodeInfo) Reset() { *x = DataNodeInfo{} - mi := &file_master_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DataNodeInfo) String() string { @@ -2057,8 +1956,8 @@ func (x *DataNodeInfo) String() string { func (*DataNodeInfo) ProtoMessage() {} func (x *DataNodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[26] - if x != nil { + mi := &file_master_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2070,7 +1969,7 @@ func (x *DataNodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead. func (*DataNodeInfo) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{26} + return file_master_proto_rawDescGZIP(), []int{25} } func (x *DataNodeInfo) GetId() string { @@ -2095,19 +1994,22 @@ func (x *DataNodeInfo) GetGrpcPort() uint32 { } type RackInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - DataNodeInfos []*DataNodeInfo `protobuf:"bytes,2,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"` - DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,2,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *RackInfo) Reset() { *x = RackInfo{} - mi := &file_master_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RackInfo) String() string { @@ -2117,8 +2019,8 @@ func (x *RackInfo) String() string { func (*RackInfo) ProtoMessage() {} func (x *RackInfo) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[27] - if x != nil { + mi := &file_master_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2130,7 +2032,7 @@ func (x *RackInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RackInfo.ProtoReflect.Descriptor instead. func (*RackInfo) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{27} + return file_master_proto_rawDescGZIP(), []int{26} } func (x *RackInfo) GetId() string { @@ -2155,19 +2057,22 @@ func (x *RackInfo) GetDiskInfos() map[string]*DiskInfo { } type DataCenterInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RackInfos []*RackInfo `protobuf:"bytes,2,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"` - DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,2,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *DataCenterInfo) Reset() { *x = DataCenterInfo{} - mi := &file_master_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DataCenterInfo) String() string { @@ -2177,8 +2082,8 @@ func (x *DataCenterInfo) String() string { func (*DataCenterInfo) ProtoMessage() {} func (x *DataCenterInfo) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[28] - if x != nil { + mi := &file_master_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2190,7 +2095,7 @@ func (x *DataCenterInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead. func (*DataCenterInfo) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{28} + return file_master_proto_rawDescGZIP(), []int{27} } func (x *DataCenterInfo) GetId() string { @@ -2215,19 +2120,22 @@ func (x *DataCenterInfo) GetDiskInfos() map[string]*DiskInfo { } type TopologyInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - DataCenterInfos []*DataCenterInfo `protobuf:"bytes,2,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"` - DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,2,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *TopologyInfo) Reset() { *x = TopologyInfo{} - mi := &file_master_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *TopologyInfo) String() string { @@ -2237,8 +2145,8 @@ func (x *TopologyInfo) String() string { func (*TopologyInfo) ProtoMessage() {} func (x *TopologyInfo) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[29] - if x != nil { + mi := &file_master_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2250,7 +2158,7 @@ func (x *TopologyInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead. func (*TopologyInfo) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{29} + return file_master_proto_rawDescGZIP(), []int{28} } func (x *TopologyInfo) GetId() string { @@ -2275,16 +2183,18 @@ func (x *TopologyInfo) GetDiskInfos() map[string]*DiskInfo { } type VolumeListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeListRequest) Reset() { *x = VolumeListRequest{} - mi := &file_master_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeListRequest) String() string { @@ -2294,8 +2204,8 @@ func (x *VolumeListRequest) String() string { func (*VolumeListRequest) ProtoMessage() {} func (x *VolumeListRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[30] - if x != nil { + mi := &file_master_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2307,22 +2217,25 @@ func (x *VolumeListRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead. func (*VolumeListRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{30} + return file_master_proto_rawDescGZIP(), []int{29} } type VolumeListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"` - VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"` + VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"` } func (x *VolumeListResponse) Reset() { *x = VolumeListResponse{} - mi := &file_master_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeListResponse) String() string { @@ -2332,8 +2245,8 @@ func (x *VolumeListResponse) String() string { func (*VolumeListResponse) ProtoMessage() {} func (x *VolumeListResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[31] - if x != nil { + mi := &file_master_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2345,7 +2258,7 @@ func (x *VolumeListResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead. func (*VolumeListResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{31} + return file_master_proto_rawDescGZIP(), []int{30} } func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo { @@ -2363,17 +2276,20 @@ func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { } type LookupEcVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *LookupEcVolumeRequest) Reset() { *x = LookupEcVolumeRequest{} - mi := &file_master_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupEcVolumeRequest) String() string { @@ -2383,8 +2299,8 @@ func (x *LookupEcVolumeRequest) String() string { func (*LookupEcVolumeRequest) ProtoMessage() {} func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[32] - if x != nil { + mi := &file_master_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2396,7 +2312,7 @@ func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead. func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{32} + return file_master_proto_rawDescGZIP(), []int{31} } func (x *LookupEcVolumeRequest) GetVolumeId() uint32 { @@ -2407,18 +2323,21 @@ func (x *LookupEcVolumeRequest) GetVolumeId() uint32 { } type LookupEcVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *LookupEcVolumeResponse) Reset() { *x = LookupEcVolumeResponse{} - mi := &file_master_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupEcVolumeResponse) String() string { @@ -2428,8 +2347,8 @@ func (x *LookupEcVolumeResponse) String() string { func (*LookupEcVolumeResponse) ProtoMessage() {} func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[33] - if x != nil { + mi := &file_master_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2441,7 +2360,7 @@ func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead. func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{33} + return file_master_proto_rawDescGZIP(), []int{32} } func (x *LookupEcVolumeResponse) GetVolumeId() uint32 { @@ -2459,19 +2378,22 @@ func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse } type VacuumVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - GarbageThreshold float32 `protobuf:"fixed32,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` - VolumeId uint32 `protobuf:"varint,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageThreshold float32 `protobuf:"fixed32,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` + VolumeId uint32 `protobuf:"varint,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VacuumVolumeRequest) Reset() { *x = VacuumVolumeRequest{} - mi := &file_master_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeRequest) String() string { @@ -2481,8 +2403,8 @@ func (x *VacuumVolumeRequest) String() string { func (*VacuumVolumeRequest) ProtoMessage() {} func (x *VacuumVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[34] - if x != nil { + mi := &file_master_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2494,7 +2416,7 @@ func (x *VacuumVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeRequest.ProtoReflect.Descriptor instead. func (*VacuumVolumeRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{34} + return file_master_proto_rawDescGZIP(), []int{33} } func (x *VacuumVolumeRequest) GetGarbageThreshold() float32 { @@ -2519,16 +2441,18 @@ func (x *VacuumVolumeRequest) GetCollection() string { } type VacuumVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VacuumVolumeResponse) Reset() { *x = VacuumVolumeResponse{} - mi := &file_master_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeResponse) String() string { @@ -2538,8 +2462,8 @@ func (x *VacuumVolumeResponse) String() string { func (*VacuumVolumeResponse) ProtoMessage() {} func (x *VacuumVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[35] - if x != nil { + mi := &file_master_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2551,308 +2475,22 @@ func (x *VacuumVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VacuumVolumeResponse.ProtoReflect.Descriptor instead. func (*VacuumVolumeResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{35} -} - -type DisableVacuumRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DisableVacuumRequest) Reset() { - *x = DisableVacuumRequest{} - mi := &file_master_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DisableVacuumRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DisableVacuumRequest) ProtoMessage() {} - -func (x *DisableVacuumRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[36] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DisableVacuumRequest.ProtoReflect.Descriptor instead. -func (*DisableVacuumRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{36} -} - -type DisableVacuumResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DisableVacuumResponse) Reset() { - *x = DisableVacuumResponse{} - mi := &file_master_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DisableVacuumResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DisableVacuumResponse) ProtoMessage() {} - -func (x *DisableVacuumResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[37] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DisableVacuumResponse.ProtoReflect.Descriptor instead. -func (*DisableVacuumResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{37} -} - -type EnableVacuumRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnableVacuumRequest) Reset() { - *x = EnableVacuumRequest{} - mi := &file_master_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnableVacuumRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnableVacuumRequest) ProtoMessage() {} - -func (x *EnableVacuumRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[38] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnableVacuumRequest.ProtoReflect.Descriptor instead. -func (*EnableVacuumRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{38} -} - -type EnableVacuumResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnableVacuumResponse) Reset() { - *x = EnableVacuumResponse{} - mi := &file_master_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnableVacuumResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnableVacuumResponse) ProtoMessage() {} - -func (x *EnableVacuumResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[39] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnableVacuumResponse.ProtoReflect.Descriptor instead. -func (*EnableVacuumResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{39} -} - -type VolumeMarkReadonlyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - VolumeId uint32 `protobuf:"varint,4,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,6,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,8,opt,name=ttl,proto3" json:"ttl,omitempty"` - DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - IsReadonly bool `protobuf:"varint,10,opt,name=is_readonly,json=isReadonly,proto3" json:"is_readonly,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeMarkReadonlyRequest) Reset() { - *x = VolumeMarkReadonlyRequest{} - mi := &file_master_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeMarkReadonlyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeMarkReadonlyRequest) ProtoMessage() {} - -func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[40] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. -func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{40} -} - -func (x *VolumeMarkReadonlyRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *VolumeMarkReadonlyRequest) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *VolumeMarkReadonlyRequest) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *VolumeMarkReadonlyRequest) GetReplicaPlacement() uint32 { - if x != nil { - return x.ReplicaPlacement - } - return 0 -} - -func (x *VolumeMarkReadonlyRequest) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *VolumeMarkReadonlyRequest) GetTtl() uint32 { - if x != nil { - return x.Ttl - } - return 0 -} - -func (x *VolumeMarkReadonlyRequest) GetDiskType() string { - if x != nil { - return x.DiskType - } - return "" -} - -func (x *VolumeMarkReadonlyRequest) GetIsReadonly() bool { - if x != nil { - return x.IsReadonly - } - return false -} - -type VolumeMarkReadonlyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeMarkReadonlyResponse) Reset() { - *x = VolumeMarkReadonlyResponse{} - mi := &file_master_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeMarkReadonlyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeMarkReadonlyResponse) ProtoMessage() {} - -func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[41] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. -func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{41} + return file_master_proto_rawDescGZIP(), []int{34} } type GetMasterConfigurationRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *GetMasterConfigurationRequest) Reset() { *x = GetMasterConfigurationRequest{} - mi := &file_master_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMasterConfigurationRequest) String() string { @@ -2862,8 +2500,8 @@ func (x *GetMasterConfigurationRequest) String() string { func (*GetMasterConfigurationRequest) ProtoMessage() {} func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[42] - if x != nil { + mi := &file_master_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2875,27 +2513,30 @@ func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead. func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{42} + return file_master_proto_rawDescGZIP(), []int{35} } type GetMasterConfigurationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` - StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` - DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` - Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` - VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"` - VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` + DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` + VolumeSizeLimitMB uint32 `protobuf:"varint,6,opt,name=volume_size_limit_m_b,json=volumeSizeLimitMB,proto3" json:"volume_size_limit_m_b,omitempty"` + VolumePreallocate bool `protobuf:"varint,7,opt,name=volume_preallocate,json=volumePreallocate,proto3" json:"volume_preallocate,omitempty"` } func (x *GetMasterConfigurationResponse) Reset() { *x = GetMasterConfigurationResponse{} - mi := &file_master_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMasterConfigurationResponse) String() string { @@ -2905,8 +2546,8 @@ func (x *GetMasterConfigurationResponse) String() string { func (*GetMasterConfigurationResponse) ProtoMessage() {} func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[43] - if x != nil { + mi := &file_master_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2918,7 +2559,7 @@ func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead. func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{43} + return file_master_proto_rawDescGZIP(), []int{36} } func (x *GetMasterConfigurationResponse) GetMetricsAddress() string { @@ -2971,19 +2612,21 @@ func (x *GetMasterConfigurationResponse) GetVolumePreallocate() bool { } type ListClusterNodesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` - FilerGroup string `protobuf:"bytes,2,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` - Limit int32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` + FilerGroup string `protobuf:"bytes,2,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` } func (x *ListClusterNodesRequest) Reset() { *x = ListClusterNodesRequest{} - mi := &file_master_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListClusterNodesRequest) String() string { @@ -2993,8 +2636,8 @@ func (x *ListClusterNodesRequest) String() string { func (*ListClusterNodesRequest) ProtoMessage() {} func (x *ListClusterNodesRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[44] - if x != nil { + mi := &file_master_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3006,7 +2649,7 @@ func (x *ListClusterNodesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListClusterNodesRequest.ProtoReflect.Descriptor instead. func (*ListClusterNodesRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{44} + return file_master_proto_rawDescGZIP(), []int{37} } func (x *ListClusterNodesRequest) GetClientType() string { @@ -3023,25 +2666,21 @@ func (x *ListClusterNodesRequest) GetFilerGroup() string { return "" } -func (x *ListClusterNodesRequest) GetLimit() int32 { - if x != nil { - return x.Limit - } - return 0 -} - type ListClusterNodesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ClusterNodes []*ListClusterNodesResponse_ClusterNode `protobuf:"bytes,1,rep,name=cluster_nodes,json=clusterNodes,proto3" json:"cluster_nodes,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterNodes []*ListClusterNodesResponse_ClusterNode `protobuf:"bytes,1,rep,name=cluster_nodes,json=clusterNodes,proto3" json:"cluster_nodes,omitempty"` } func (x *ListClusterNodesResponse) Reset() { *x = ListClusterNodesResponse{} - mi := &file_master_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListClusterNodesResponse) String() string { @@ -3051,8 +2690,8 @@ func (x *ListClusterNodesResponse) String() string { func (*ListClusterNodesResponse) ProtoMessage() {} func (x *ListClusterNodesResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[45] - if x != nil { + mi := &file_master_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3064,7 +2703,7 @@ func (x *ListClusterNodesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListClusterNodesResponse.ProtoReflect.Descriptor instead. func (*ListClusterNodesResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{45} + return file_master_proto_rawDescGZIP(), []int{38} } func (x *ListClusterNodesResponse) GetClusterNodes() []*ListClusterNodesResponse_ClusterNode { @@ -3075,21 +2714,24 @@ func (x *ListClusterNodesResponse) GetClusterNodes() []*ListClusterNodesResponse } type LeaseAdminTokenRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` - PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` - LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` - ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` - Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` + ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` } func (x *LeaseAdminTokenRequest) Reset() { *x = LeaseAdminTokenRequest{} - mi := &file_master_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LeaseAdminTokenRequest) String() string { @@ -3099,8 +2741,8 @@ func (x *LeaseAdminTokenRequest) String() string { func (*LeaseAdminTokenRequest) ProtoMessage() {} func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[46] - if x != nil { + mi := &file_master_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3112,7 +2754,7 @@ func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead. func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{46} + return file_master_proto_rawDescGZIP(), []int{39} } func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 { @@ -3151,18 +2793,21 @@ func (x *LeaseAdminTokenRequest) GetMessage() string { } type LeaseAdminTokenResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"` - LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"` + LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"` } func (x *LeaseAdminTokenResponse) Reset() { *x = LeaseAdminTokenResponse{} - mi := &file_master_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LeaseAdminTokenResponse) String() string { @@ -3172,8 +2817,8 @@ func (x *LeaseAdminTokenResponse) String() string { func (*LeaseAdminTokenResponse) ProtoMessage() {} func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[47] - if x != nil { + mi := &file_master_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3185,7 +2830,7 @@ func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead. func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{47} + return file_master_proto_rawDescGZIP(), []int{40} } func (x *LeaseAdminTokenResponse) GetToken() int64 { @@ -3203,19 +2848,22 @@ func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 { } type ReleaseAdminTokenRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` - PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` - LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` } func (x *ReleaseAdminTokenRequest) Reset() { *x = ReleaseAdminTokenRequest{} - mi := &file_master_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReleaseAdminTokenRequest) String() string { @@ -3225,8 +2873,8 @@ func (x *ReleaseAdminTokenRequest) String() string { func (*ReleaseAdminTokenRequest) ProtoMessage() {} func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[48] - if x != nil { + mi := &file_master_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3238,7 +2886,7 @@ func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead. func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{48} + return file_master_proto_rawDescGZIP(), []int{41} } func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 { @@ -3263,16 +2911,18 @@ func (x *ReleaseAdminTokenRequest) GetLockName() string { } type ReleaseAdminTokenResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *ReleaseAdminTokenResponse) Reset() { *x = ReleaseAdminTokenResponse{} - mi := &file_master_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReleaseAdminTokenResponse) String() string { @@ -3282,8 +2932,8 @@ func (x *ReleaseAdminTokenResponse) String() string { func (*ReleaseAdminTokenResponse) ProtoMessage() {} func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[49] - if x != nil { + mi := &file_master_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3295,22 +2945,25 @@ func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead. func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{49} + return file_master_proto_rawDescGZIP(), []int{42} } type PingRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself - TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself + TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` } func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_master_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingRequest) String() string { @@ -3320,8 +2973,8 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[50] - if x != nil { + mi := &file_master_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3333,7 +2986,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{50} + return file_master_proto_rawDescGZIP(), []int{43} } func (x *PingRequest) GetTarget() string { @@ -3351,19 +3004,22 @@ func (x *PingRequest) GetTargetType() string { } type PingResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` - RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` - StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` + RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` + StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` } func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_master_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingResponse) String() string { @@ -3373,8 +3029,8 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[51] - if x != nil { + mi := &file_master_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3386,7 +3042,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{51} + return file_master_proto_rawDescGZIP(), []int{44} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -3411,19 +3067,22 @@ func (x *PingResponse) GetStopTimeNs() int64 { } type RaftAddServerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - Voter bool `protobuf:"varint,3,opt,name=voter,proto3" json:"voter,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + Voter bool `protobuf:"varint,3,opt,name=voter,proto3" json:"voter,omitempty"` } func (x *RaftAddServerRequest) Reset() { *x = RaftAddServerRequest{} - mi := &file_master_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftAddServerRequest) String() string { @@ -3433,8 +3092,8 @@ func (x *RaftAddServerRequest) String() string { func (*RaftAddServerRequest) ProtoMessage() {} func (x *RaftAddServerRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[52] - if x != nil { + mi := &file_master_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3446,7 +3105,7 @@ func (x *RaftAddServerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftAddServerRequest.ProtoReflect.Descriptor instead. func (*RaftAddServerRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{52} + return file_master_proto_rawDescGZIP(), []int{45} } func (x *RaftAddServerRequest) GetId() string { @@ -3471,16 +3130,18 @@ func (x *RaftAddServerRequest) GetVoter() bool { } type RaftAddServerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *RaftAddServerResponse) Reset() { *x = RaftAddServerResponse{} - mi := &file_master_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftAddServerResponse) String() string { @@ -3490,8 +3151,8 @@ func (x *RaftAddServerResponse) String() string { func (*RaftAddServerResponse) ProtoMessage() {} func (x *RaftAddServerResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[53] - if x != nil { + mi := &file_master_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3503,22 +3164,25 @@ func (x *RaftAddServerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftAddServerResponse.ProtoReflect.Descriptor instead. func (*RaftAddServerResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{53} + return file_master_proto_rawDescGZIP(), []int{46} } type RaftRemoveServerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` } func (x *RaftRemoveServerRequest) Reset() { *x = RaftRemoveServerRequest{} - mi := &file_master_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftRemoveServerRequest) String() string { @@ -3528,8 +3192,8 @@ func (x *RaftRemoveServerRequest) String() string { func (*RaftRemoveServerRequest) ProtoMessage() {} func (x *RaftRemoveServerRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[54] - if x != nil { + mi := &file_master_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3541,7 +3205,7 @@ func (x *RaftRemoveServerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftRemoveServerRequest.ProtoReflect.Descriptor instead. func (*RaftRemoveServerRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{54} + return file_master_proto_rawDescGZIP(), []int{47} } func (x *RaftRemoveServerRequest) GetId() string { @@ -3559,16 +3223,18 @@ func (x *RaftRemoveServerRequest) GetForce() bool { } type RaftRemoveServerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *RaftRemoveServerResponse) Reset() { *x = RaftRemoveServerResponse{} - mi := &file_master_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftRemoveServerResponse) String() string { @@ -3578,8 +3244,8 @@ func (x *RaftRemoveServerResponse) String() string { func (*RaftRemoveServerResponse) ProtoMessage() {} func (x *RaftRemoveServerResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[55] - if x != nil { + mi := &file_master_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3591,20 +3257,22 @@ func (x *RaftRemoveServerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftRemoveServerResponse.ProtoReflect.Descriptor instead. func (*RaftRemoveServerResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{55} + return file_master_proto_rawDescGZIP(), []int{48} } type RaftListClusterServersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *RaftListClusterServersRequest) Reset() { *x = RaftListClusterServersRequest{} - mi := &file_master_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftListClusterServersRequest) String() string { @@ -3614,8 +3282,8 @@ func (x *RaftListClusterServersRequest) String() string { func (*RaftListClusterServersRequest) ProtoMessage() {} func (x *RaftListClusterServersRequest) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[56] - if x != nil { + mi := &file_master_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3627,21 +3295,24 @@ func (x *RaftListClusterServersRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftListClusterServersRequest.ProtoReflect.Descriptor instead. func (*RaftListClusterServersRequest) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{56} + return file_master_proto_rawDescGZIP(), []int{49} } type RaftListClusterServersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ClusterServers []*RaftListClusterServersResponse_ClusterServers `protobuf:"bytes,1,rep,name=cluster_servers,json=clusterServers,proto3" json:"cluster_servers,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *RaftListClusterServersResponse) Reset() { *x = RaftListClusterServersResponse{} - mi := &file_master_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftListClusterServersResponse) String() string { @@ -3651,8 +3322,8 @@ func (x *RaftListClusterServersResponse) String() string { func (*RaftListClusterServersResponse) ProtoMessage() {} func (x *RaftListClusterServersResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[57] - if x != nil { + mi := &file_master_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3664,7 +3335,7 @@ func (x *RaftListClusterServersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RaftListClusterServersResponse.ProtoReflect.Descriptor instead. func (*RaftListClusterServersResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{57} + return file_master_proto_rawDescGZIP(), []int{50} } func (x *RaftListClusterServersResponse) GetClusterServers() []*RaftListClusterServersResponse_ClusterServers { @@ -3674,56 +3345,23 @@ func (x *RaftListClusterServersResponse) GetClusterServers() []*RaftListClusterS return nil } -type VolumeGrowResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeGrowResponse) Reset() { - *x = VolumeGrowResponse{} - mi := &file_master_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeGrowResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeGrowResponse) ProtoMessage() {} - -func (x *VolumeGrowResponse) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[58] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeGrowResponse.ProtoReflect.Descriptor instead. -func (*VolumeGrowResponse) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{58} -} - type SuperBlockExtra_ErasureCoding struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"` - Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"` - VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"` + Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"` + VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } func (x *SuperBlockExtra_ErasureCoding) Reset() { *x = SuperBlockExtra_ErasureCoding{} - mi := &file_master_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SuperBlockExtra_ErasureCoding) String() string { @@ -3733,8 +3371,8 @@ func (x *SuperBlockExtra_ErasureCoding) String() string { func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[61] - if x != nil { + mi := &file_master_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3771,20 +3409,23 @@ func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { } type LookupVolumeResponse_VolumeIdLocation struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeOrFileId string `protobuf:"bytes,1,opt,name=volume_or_file_id,json=volumeOrFileId,proto3" json:"volume_or_file_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` - Auth string `protobuf:"bytes,4,opt,name=auth,proto3" json:"auth,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeOrFileId string `protobuf:"bytes,1,opt,name=volume_or_file_id,json=volumeOrFileId,proto3" json:"volume_or_file_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + Auth string `protobuf:"bytes,4,opt,name=auth,proto3" json:"auth,omitempty"` } func (x *LookupVolumeResponse_VolumeIdLocation) Reset() { *x = LookupVolumeResponse_VolumeIdLocation{} - mi := &file_master_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupVolumeResponse_VolumeIdLocation) String() string { @@ -3794,8 +3435,8 @@ func (x *LookupVolumeResponse_VolumeIdLocation) String() string { func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[62] - if x != nil { + mi := &file_master_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3839,18 +3480,21 @@ func (x *LookupVolumeResponse_VolumeIdLocation) GetAuth() string { } type LookupEcVolumeResponse_EcShardIdLocation struct { - state protoimpl.MessageState `protogen:"open.v1"` - ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` } func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() { *x = LookupEcVolumeResponse_EcShardIdLocation{} - mi := &file_master_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { @@ -3860,8 +3504,8 @@ func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[67] - if x != nil { + mi := &file_master_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3873,7 +3517,7 @@ func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.M // Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead. func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{33, 0} + return file_master_proto_rawDescGZIP(), []int{32, 0} } func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { @@ -3891,21 +3535,23 @@ func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { } type ListClusterNodesResponse_ClusterNode struct { - state protoimpl.MessageState `protogen:"open.v1"` - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - CreatedAtNs int64 `protobuf:"varint,4,opt,name=created_at_ns,json=createdAtNs,proto3" json:"created_at_ns,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + IsLeader bool `protobuf:"varint,3,opt,name=is_leader,json=isLeader,proto3" json:"is_leader,omitempty"` + CreatedAtNs int64 `protobuf:"varint,4,opt,name=created_at_ns,json=createdAtNs,proto3" json:"created_at_ns,omitempty"` } func (x *ListClusterNodesResponse_ClusterNode) Reset() { *x = ListClusterNodesResponse_ClusterNode{} - mi := &file_master_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ListClusterNodesResponse_ClusterNode) String() string { @@ -3915,8 +3561,8 @@ func (x *ListClusterNodesResponse_ClusterNode) String() string { func (*ListClusterNodesResponse_ClusterNode) ProtoMessage() {} func (x *ListClusterNodesResponse_ClusterNode) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[68] - if x != nil { + mi := &file_master_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3928,7 +3574,7 @@ func (x *ListClusterNodesResponse_ClusterNode) ProtoReflect() protoreflect.Messa // Deprecated: Use ListClusterNodesResponse_ClusterNode.ProtoReflect.Descriptor instead. func (*ListClusterNodesResponse_ClusterNode) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{45, 0} + return file_master_proto_rawDescGZIP(), []int{38, 0} } func (x *ListClusterNodesResponse_ClusterNode) GetAddress() string { @@ -3945,6 +3591,13 @@ func (x *ListClusterNodesResponse_ClusterNode) GetVersion() string { return "" } +func (x *ListClusterNodesResponse_ClusterNode) GetIsLeader() bool { + if x != nil { + return x.IsLeader + } + return false +} + func (x *ListClusterNodesResponse_ClusterNode) GetCreatedAtNs() int64 { if x != nil { return x.CreatedAtNs @@ -3952,35 +3605,23 @@ func (x *ListClusterNodesResponse_ClusterNode) GetCreatedAtNs() int64 { return 0 } -func (x *ListClusterNodesResponse_ClusterNode) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *ListClusterNodesResponse_ClusterNode) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - type RaftListClusterServersResponse_ClusterServers struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - Suffrage string `protobuf:"bytes,3,opt,name=suffrage,proto3" json:"suffrage,omitempty"` - IsLeader bool `protobuf:"varint,4,opt,name=isLeader,proto3" json:"isLeader,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + Suffrage string `protobuf:"bytes,3,opt,name=suffrage,proto3" json:"suffrage,omitempty"` // } func (x *RaftListClusterServersResponse_ClusterServers) Reset() { *x = RaftListClusterServersResponse_ClusterServers{} - mi := &file_master_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RaftListClusterServersResponse_ClusterServers) String() string { @@ -3990,8 +3631,8 @@ func (x *RaftListClusterServersResponse_ClusterServers) String() string { func (*RaftListClusterServersResponse_ClusterServers) ProtoMessage() {} func (x *RaftListClusterServersResponse_ClusterServers) ProtoReflect() protoreflect.Message { - mi := &file_master_proto_msgTypes[69] - if x != nil { + mi := &file_master_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4003,7 +3644,7 @@ func (x *RaftListClusterServersResponse_ClusterServers) ProtoReflect() protorefl // Deprecated: Use RaftListClusterServersResponse_ClusterServers.ProtoReflect.Descriptor instead. func (*RaftListClusterServersResponse_ClusterServers) Descriptor() ([]byte, []int) { - return file_master_proto_rawDescGZIP(), []int{57, 0} + return file_master_proto_rawDescGZIP(), []int{50, 0} } func (x *RaftListClusterServersResponse_ClusterServers) GetId() string { @@ -4027,410 +3668,678 @@ func (x *RaftListClusterServersResponse_ClusterServers) GetSuffrage() string { return "" } -func (x *RaftListClusterServersResponse_ClusterServers) GetIsLeader() bool { - if x != nil { - return x.IsLeader - } - return false -} - var File_master_proto protoreflect.FileDescriptor -const file_master_proto_rawDesc = "" + - "\n" + - "\fmaster.proto\x12\tmaster_pb\"\xc0\a\n" + - "\tHeartbeat\x12\x0e\n" + - "\x02ip\x18\x01 \x01(\tR\x02ip\x12\x12\n" + - "\x04port\x18\x02 \x01(\rR\x04port\x12\x1d\n" + - "\n" + - "public_url\x18\x03 \x01(\tR\tpublicUrl\x12 \n" + - "\fmax_file_key\x18\x05 \x01(\x04R\n" + - "maxFileKey\x12\x1f\n" + - "\vdata_center\x18\x06 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\a \x01(\tR\x04rack\x12\x1d\n" + - "\n" + - "admin_port\x18\b \x01(\rR\tadminPort\x12=\n" + - "\avolumes\x18\t \x03(\v2#.master_pb.VolumeInformationMessageR\avolumes\x12I\n" + - "\vnew_volumes\x18\n" + - " \x03(\v2(.master_pb.VolumeShortInformationMessageR\n" + - "newVolumes\x12Q\n" + - "\x0fdeleted_volumes\x18\v \x03(\v2(.master_pb.VolumeShortInformationMessageR\x0edeletedVolumes\x12$\n" + - "\x0ehas_no_volumes\x18\f \x01(\bR\fhasNoVolumes\x12G\n" + - "\tec_shards\x18\x10 \x03(\v2*.master_pb.VolumeEcShardInformationMessageR\becShards\x12N\n" + - "\rnew_ec_shards\x18\x11 \x03(\v2*.master_pb.VolumeEcShardInformationMessageR\vnewEcShards\x12V\n" + - "\x11deleted_ec_shards\x18\x12 \x03(\v2*.master_pb.VolumeEcShardInformationMessageR\x0fdeletedEcShards\x12'\n" + - "\x10has_no_ec_shards\x18\x13 \x01(\bR\rhasNoEcShards\x12U\n" + - "\x11max_volume_counts\x18\x04 \x03(\v2).master_pb.Heartbeat.MaxVolumeCountsEntryR\x0fmaxVolumeCounts\x12\x1b\n" + - "\tgrpc_port\x18\x14 \x01(\rR\bgrpcPort\x12%\n" + - "\x0elocation_uuids\x18\x15 \x03(\tR\rlocationUuids\x1aB\n" + - "\x14MaxVolumeCountsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\rR\x05value:\x028\x01\"\xcd\x02\n" + - "\x11HeartbeatResponse\x12*\n" + - "\x11volume_size_limit\x18\x01 \x01(\x04R\x0fvolumeSizeLimit\x12\x16\n" + - "\x06leader\x18\x02 \x01(\tR\x06leader\x12'\n" + - "\x0fmetrics_address\x18\x03 \x01(\tR\x0emetricsAddress\x128\n" + - "\x18metrics_interval_seconds\x18\x04 \x01(\rR\x16metricsIntervalSeconds\x12D\n" + - "\x10storage_backends\x18\x05 \x03(\v2\x19.master_pb.StorageBackendR\x0fstorageBackends\x12)\n" + - "\x10duplicated_uuids\x18\x06 \x03(\tR\x0fduplicatedUuids\x12 \n" + - "\vpreallocate\x18\a \x01(\bR\vpreallocate\"\xb1\x04\n" + - "\x18VolumeInformationMessage\x12\x0e\n" + - "\x02id\x18\x01 \x01(\rR\x02id\x12\x12\n" + - "\x04size\x18\x02 \x01(\x04R\x04size\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12\x1d\n" + - "\n" + - "file_count\x18\x04 \x01(\x04R\tfileCount\x12!\n" + - "\fdelete_count\x18\x05 \x01(\x04R\vdeleteCount\x12,\n" + - "\x12deleted_byte_count\x18\x06 \x01(\x04R\x10deletedByteCount\x12\x1b\n" + - "\tread_only\x18\a \x01(\bR\breadOnly\x12+\n" + - "\x11replica_placement\x18\b \x01(\rR\x10replicaPlacement\x12\x18\n" + - "\aversion\x18\t \x01(\rR\aversion\x12\x10\n" + - "\x03ttl\x18\n" + - " \x01(\rR\x03ttl\x12)\n" + - "\x10compact_revision\x18\v \x01(\rR\x0fcompactRevision\x12,\n" + - "\x12modified_at_second\x18\f \x01(\x03R\x10modifiedAtSecond\x12.\n" + - "\x13remote_storage_name\x18\r \x01(\tR\x11remoteStorageName\x12,\n" + - "\x12remote_storage_key\x18\x0e \x01(\tR\x10remoteStorageKey\x12\x1b\n" + - "\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" + - "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xde\x01\n" + - "\x1dVolumeShortInformationMessage\x12\x0e\n" + - "\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12+\n" + - "\x11replica_placement\x18\b \x01(\rR\x10replicaPlacement\x12\x18\n" + - "\aversion\x18\t \x01(\rR\aversion\x12\x10\n" + - "\x03ttl\x18\n" + - " \x01(\rR\x03ttl\x12\x1b\n" + - "\tdisk_type\x18\x0f \x01(\tR\bdiskType\x12\x17\n" + - "\adisk_id\x18\x10 \x01(\rR\x06diskId\"\xf0\x01\n" + - "\x1fVolumeEcShardInformationMessage\x12\x0e\n" + - "\x02id\x18\x01 \x01(\rR\x02id\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\"\n" + - "\rec_index_bits\x18\x03 \x01(\rR\vecIndexBits\x12\x1b\n" + - "\tdisk_type\x18\x04 \x01(\tR\bdiskType\x12\"\n" + - "\rexpire_at_sec\x18\x05 \x01(\x04R\vexpireAtSec\x12\x17\n" + - "\adisk_id\x18\x06 \x01(\rR\x06diskId\x12\x1f\n" + - "\vshard_sizes\x18\a \x03(\x03R\n" + - "shardSizes\"\xbe\x01\n" + - "\x0eStorageBackend\x12\x12\n" + - "\x04type\x18\x01 \x01(\tR\x04type\x12\x0e\n" + - "\x02id\x18\x02 \x01(\tR\x02id\x12I\n" + - "\n" + - "properties\x18\x03 \x03(\v2).master_pb.StorageBackend.PropertiesEntryR\n" + - "properties\x1a=\n" + - "\x0fPropertiesEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\a\n" + - "\x05Empty\"\xbe\x01\n" + - "\x0fSuperBlockExtra\x12O\n" + - "\x0eerasure_coding\x18\x01 \x01(\v2(.master_pb.SuperBlockExtra.ErasureCodingR\rerasureCoding\x1aZ\n" + - "\rErasureCoding\x12\x12\n" + - "\x04data\x18\x01 \x01(\rR\x04data\x12\x16\n" + - "\x06parity\x18\x02 \x01(\rR\x06parity\x12\x1d\n" + - "\n" + - "volume_ids\x18\x03 \x03(\rR\tvolumeIds\"\xce\x01\n" + - "\x14KeepConnectedRequest\x12\x1f\n" + - "\vclient_type\x18\x01 \x01(\tR\n" + - "clientType\x12%\n" + - "\x0eclient_address\x18\x03 \x01(\tR\rclientAddress\x12\x18\n" + - "\aversion\x18\x04 \x01(\tR\aversion\x12\x1f\n" + - "\vfiler_group\x18\x05 \x01(\tR\n" + - "filerGroup\x12\x1f\n" + - "\vdata_center\x18\x06 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\a \x01(\tR\x04rack\"\x9d\x02\n" + - "\x0eVolumeLocation\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" + - "\n" + - "public_url\x18\x02 \x01(\tR\tpublicUrl\x12\x19\n" + - "\bnew_vids\x18\x03 \x03(\rR\anewVids\x12!\n" + - "\fdeleted_vids\x18\x04 \x03(\rR\vdeletedVids\x12\x16\n" + - "\x06leader\x18\x05 \x01(\tR\x06leader\x12\x1f\n" + - "\vdata_center\x18\x06 \x01(\tR\n" + - "dataCenter\x12\x1b\n" + - "\tgrpc_port\x18\a \x01(\rR\bgrpcPort\x12\x1e\n" + - "\vnew_ec_vids\x18\b \x03(\rR\tnewEcVids\x12&\n" + - "\x0fdeleted_ec_vids\x18\t \x03(\rR\rdeletedEcVids\"\xa6\x01\n" + - "\x11ClusterNodeUpdate\x12\x1b\n" + - "\tnode_type\x18\x01 \x01(\tR\bnodeType\x12\x18\n" + - "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x15\n" + - "\x06is_add\x18\x04 \x01(\bR\x05isAdd\x12\x1f\n" + - "\vfiler_group\x18\x05 \x01(\tR\n" + - "filerGroup\x12\"\n" + - "\rcreated_at_ns\x18\x06 \x01(\x03R\vcreatedAtNs\"\xa9\x01\n" + - "\x15KeepConnectedResponse\x12B\n" + - "\x0fvolume_location\x18\x01 \x01(\v2\x19.master_pb.VolumeLocationR\x0evolumeLocation\x12L\n" + - "\x13cluster_node_update\x18\x02 \x01(\v2\x1c.master_pb.ClusterNodeUpdateR\x11clusterNodeUpdate\"b\n" + - "\x13LookupVolumeRequest\x12+\n" + - "\x12volume_or_file_ids\x18\x01 \x03(\tR\x0fvolumeOrFileIds\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\"\x95\x02\n" + - "\x14LookupVolumeResponse\x12`\n" + - "\x13volume_id_locations\x18\x01 \x03(\v20.master_pb.LookupVolumeResponse.VolumeIdLocationR\x11volumeIdLocations\x1a\x9a\x01\n" + - "\x10VolumeIdLocation\x12)\n" + - "\x11volume_or_file_id\x18\x01 \x01(\tR\x0evolumeOrFileId\x121\n" + - "\tlocations\x18\x02 \x03(\v2\x13.master_pb.LocationR\tlocations\x12\x14\n" + - "\x05error\x18\x03 \x01(\tR\x05error\x12\x12\n" + - "\x04auth\x18\x04 \x01(\tR\x04auth\"y\n" + - "\bLocation\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" + - "\n" + - "public_url\x18\x02 \x01(\tR\tpublicUrl\x12\x1b\n" + - "\tgrpc_port\x18\x03 \x01(\rR\bgrpcPort\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\"\xd0\x02\n" + - "\rAssignRequest\x12\x14\n" + - "\x05count\x18\x01 \x01(\x04R\x05count\x12 \n" + - "\vreplication\x18\x02 \x01(\tR\vreplication\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12\x10\n" + - "\x03ttl\x18\x04 \x01(\tR\x03ttl\x12\x1f\n" + - "\vdata_center\x18\x05 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\x06 \x01(\tR\x04rack\x12\x1b\n" + - "\tdata_node\x18\a \x01(\tR\bdataNode\x122\n" + - "\x16memory_map_max_size_mb\x18\b \x01(\rR\x12memoryMapMaxSizeMb\x122\n" + - "\x15writable_volume_count\x18\t \x01(\rR\x13writableVolumeCount\x12\x1b\n" + - "\tdisk_type\x18\n" + - " \x01(\tR\bdiskType\"\xbe\x02\n" + - "\x11VolumeGrowRequest\x122\n" + - "\x15writable_volume_count\x18\x01 \x01(\rR\x13writableVolumeCount\x12 \n" + - "\vreplication\x18\x02 \x01(\tR\vreplication\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12\x10\n" + - "\x03ttl\x18\x04 \x01(\tR\x03ttl\x12\x1f\n" + - "\vdata_center\x18\x05 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\x06 \x01(\tR\x04rack\x12\x1b\n" + - "\tdata_node\x18\a \x01(\tR\bdataNode\x122\n" + - "\x16memory_map_max_size_mb\x18\b \x01(\rR\x12memoryMapMaxSizeMb\x12\x1b\n" + - "\tdisk_type\x18\t \x01(\tR\bdiskType\"\xc4\x01\n" + - "\x0eAssignResponse\x12\x10\n" + - "\x03fid\x18\x01 \x01(\tR\x03fid\x12\x14\n" + - "\x05count\x18\x04 \x01(\x04R\x05count\x12\x14\n" + - "\x05error\x18\x05 \x01(\tR\x05error\x12\x12\n" + - "\x04auth\x18\x06 \x01(\tR\x04auth\x12/\n" + - "\breplicas\x18\a \x03(\v2\x13.master_pb.LocationR\breplicas\x12/\n" + - "\blocation\x18\b \x01(\v2\x13.master_pb.LocationR\blocation\"\x84\x01\n" + - "\x11StatisticsRequest\x12 \n" + - "\vreplication\x18\x01 \x01(\tR\vreplication\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x10\n" + - "\x03ttl\x18\x03 \x01(\tR\x03ttl\x12\x1b\n" + - "\tdisk_type\x18\x04 \x01(\tR\bdiskType\"o\n" + - "\x12StatisticsResponse\x12\x1d\n" + - "\n" + - "total_size\x18\x04 \x01(\x04R\ttotalSize\x12\x1b\n" + - "\tused_size\x18\x05 \x01(\x04R\busedSize\x12\x1d\n" + - "\n" + - "file_count\x18\x06 \x01(\x04R\tfileCount\" \n" + - "\n" + - "Collection\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\"{\n" + - "\x15CollectionListRequest\x124\n" + - "\x16include_normal_volumes\x18\x01 \x01(\bR\x14includeNormalVolumes\x12,\n" + - "\x12include_ec_volumes\x18\x02 \x01(\bR\x10includeEcVolumes\"Q\n" + - "\x16CollectionListResponse\x127\n" + - "\vcollections\x18\x01 \x03(\v2\x15.master_pb.CollectionR\vcollections\"-\n" + - "\x17CollectionDeleteRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\"\x1a\n" + - "\x18CollectionDeleteResponse\"\xaa\x03\n" + - "\bDiskInfo\x12\x12\n" + - "\x04type\x18\x01 \x01(\tR\x04type\x12!\n" + - "\fvolume_count\x18\x02 \x01(\x03R\vvolumeCount\x12(\n" + - "\x10max_volume_count\x18\x03 \x01(\x03R\x0emaxVolumeCount\x12*\n" + - "\x11free_volume_count\x18\x04 \x01(\x03R\x0ffreeVolumeCount\x12.\n" + - "\x13active_volume_count\x18\x05 \x01(\x03R\x11activeVolumeCount\x12F\n" + - "\fvolume_infos\x18\x06 \x03(\v2#.master_pb.VolumeInformationMessageR\vvolumeInfos\x12P\n" + - "\x0eec_shard_infos\x18\a \x03(\v2*.master_pb.VolumeEcShardInformationMessageR\fecShardInfos\x12.\n" + - "\x13remote_volume_count\x18\b \x01(\x03R\x11remoteVolumeCount\x12\x17\n" + - "\adisk_id\x18\t \x01(\rR\x06diskId\"\xd4\x01\n" + - "\fDataNodeInfo\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12D\n" + - "\tdiskInfos\x18\x02 \x03(\v2&.master_pb.DataNodeInfo.DiskInfosEntryR\tdiskInfos\x12\x1b\n" + - "\tgrpc_port\x18\x03 \x01(\rR\bgrpcPort\x1aQ\n" + - "\x0eDiskInfosEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12)\n" + - "\x05value\x18\x02 \x01(\v2\x13.master_pb.DiskInfoR\x05value:\x028\x01\"\xf0\x01\n" + - "\bRackInfo\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12?\n" + - "\x0fdata_node_infos\x18\x02 \x03(\v2\x17.master_pb.DataNodeInfoR\rdataNodeInfos\x12@\n" + - "\tdiskInfos\x18\x03 \x03(\v2\".master_pb.RackInfo.DiskInfosEntryR\tdiskInfos\x1aQ\n" + - "\x0eDiskInfosEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12)\n" + - "\x05value\x18\x02 \x01(\v2\x13.master_pb.DiskInfoR\x05value:\x028\x01\"\xef\x01\n" + - "\x0eDataCenterInfo\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x122\n" + - "\n" + - "rack_infos\x18\x02 \x03(\v2\x13.master_pb.RackInfoR\trackInfos\x12F\n" + - "\tdiskInfos\x18\x03 \x03(\v2(.master_pb.DataCenterInfo.DiskInfosEntryR\tdiskInfos\x1aQ\n" + - "\x0eDiskInfosEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12)\n" + - "\x05value\x18\x02 \x01(\v2\x13.master_pb.DiskInfoR\x05value:\x028\x01\"\xfe\x01\n" + - "\fTopologyInfo\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12E\n" + - "\x11data_center_infos\x18\x02 \x03(\v2\x19.master_pb.DataCenterInfoR\x0fdataCenterInfos\x12D\n" + - "\tdiskInfos\x18\x03 \x03(\v2&.master_pb.TopologyInfo.DiskInfosEntryR\tdiskInfos\x1aQ\n" + - "\x0eDiskInfosEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12)\n" + - "\x05value\x18\x02 \x01(\v2\x13.master_pb.DiskInfoR\x05value:\x028\x01\"\x13\n" + - "\x11VolumeListRequest\"\x83\x01\n" + - "\x12VolumeListResponse\x12<\n" + - "\rtopology_info\x18\x01 \x01(\v2\x17.master_pb.TopologyInfoR\ftopologyInfo\x12/\n" + - "\x14volume_size_limit_mb\x18\x02 \x01(\x04R\x11volumeSizeLimitMb\"4\n" + - "\x15LookupEcVolumeRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\xfb\x01\n" + - "\x16LookupEcVolumeResponse\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12a\n" + - "\x12shard_id_locations\x18\x02 \x03(\v23.master_pb.LookupEcVolumeResponse.EcShardIdLocationR\x10shardIdLocations\x1aa\n" + - "\x11EcShardIdLocation\x12\x19\n" + - "\bshard_id\x18\x01 \x01(\rR\ashardId\x121\n" + - "\tlocations\x18\x02 \x03(\v2\x13.master_pb.LocationR\tlocations\"\x7f\n" + - "\x13VacuumVolumeRequest\x12+\n" + - "\x11garbage_threshold\x18\x01 \x01(\x02R\x10garbageThreshold\x12\x1b\n" + - "\tvolume_id\x18\x02 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\"\x16\n" + - "\x14VacuumVolumeResponse\"\x16\n" + - "\x14DisableVacuumRequest\"\x17\n" + - "\x15DisableVacuumResponse\"\x15\n" + - "\x13EnableVacuumRequest\"\x16\n" + - "\x14EnableVacuumResponse\"\x93\x02\n" + - "\x19VolumeMarkReadonlyRequest\x12\x0e\n" + - "\x02ip\x18\x01 \x01(\tR\x02ip\x12\x12\n" + - "\x04port\x18\x02 \x01(\rR\x04port\x12\x1b\n" + - "\tvolume_id\x18\x04 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x05 \x01(\tR\n" + - "collection\x12+\n" + - "\x11replica_placement\x18\x06 \x01(\rR\x10replicaPlacement\x12\x18\n" + - "\aversion\x18\a \x01(\rR\aversion\x12\x10\n" + - "\x03ttl\x18\b \x01(\rR\x03ttl\x12\x1b\n" + - "\tdisk_type\x18\t \x01(\tR\bdiskType\x12\x1f\n" + - "\vis_readonly\x18\n" + - " \x01(\bR\n" + - "isReadonly\"\x1c\n" + - "\x1aVolumeMarkReadonlyResponse\"\x1f\n" + - "\x1dGetMasterConfigurationRequest\"\xf3\x02\n" + - "\x1eGetMasterConfigurationResponse\x12'\n" + - "\x0fmetrics_address\x18\x01 \x01(\tR\x0emetricsAddress\x128\n" + - "\x18metrics_interval_seconds\x18\x02 \x01(\rR\x16metricsIntervalSeconds\x12D\n" + - "\x10storage_backends\x18\x03 \x03(\v2\x19.master_pb.StorageBackendR\x0fstorageBackends\x12/\n" + - "\x13default_replication\x18\x04 \x01(\tR\x12defaultReplication\x12\x16\n" + - "\x06leader\x18\x05 \x01(\tR\x06leader\x120\n" + - "\x15volume_size_limit_m_b\x18\x06 \x01(\rR\x11volumeSizeLimitMB\x12-\n" + - "\x12volume_preallocate\x18\a \x01(\bR\x11volumePreallocate\"q\n" + - "\x17ListClusterNodesRequest\x12\x1f\n" + - "\vclient_type\x18\x01 \x01(\tR\n" + - "clientType\x12\x1f\n" + - "\vfiler_group\x18\x02 \x01(\tR\n" + - "filerGroup\x12\x14\n" + - "\x05limit\x18\x04 \x01(\x05R\x05limit\"\x8d\x02\n" + - "\x18ListClusterNodesResponse\x12T\n" + - "\rcluster_nodes\x18\x01 \x03(\v2/.master_pb.ListClusterNodesResponse.ClusterNodeR\fclusterNodes\x1a\x9a\x01\n" + - "\vClusterNode\x12\x18\n" + - "\aaddress\x18\x01 \x01(\tR\aaddress\x12\x18\n" + - "\aversion\x18\x02 \x01(\tR\aversion\x12\"\n" + - "\rcreated_at_ns\x18\x04 \x01(\x03R\vcreatedAtNs\x12\x1f\n" + - "\vdata_center\x18\x05 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\x06 \x01(\tR\x04rack\"\xc5\x01\n" + - "\x16LeaseAdminTokenRequest\x12%\n" + - "\x0eprevious_token\x18\x01 \x01(\x03R\rpreviousToken\x12,\n" + - "\x12previous_lock_time\x18\x02 \x01(\x03R\x10previousLockTime\x12\x1b\n" + - "\tlock_name\x18\x03 \x01(\tR\blockName\x12\x1f\n" + - "\vclient_name\x18\x04 \x01(\tR\n" + - "clientName\x12\x18\n" + - "\amessage\x18\x05 \x01(\tR\amessage\"M\n" + - "\x17LeaseAdminTokenResponse\x12\x14\n" + - "\x05token\x18\x01 \x01(\x03R\x05token\x12\x1c\n" + - "\n" + - "lock_ts_ns\x18\x02 \x01(\x03R\blockTsNs\"\x8c\x01\n" + - "\x18ReleaseAdminTokenRequest\x12%\n" + - "\x0eprevious_token\x18\x01 \x01(\x03R\rpreviousToken\x12,\n" + - "\x12previous_lock_time\x18\x02 \x01(\x03R\x10previousLockTime\x12\x1b\n" + - "\tlock_name\x18\x03 \x01(\tR\blockName\"\x1b\n" + - "\x19ReleaseAdminTokenResponse\"F\n" + - "\vPingRequest\x12\x16\n" + - "\x06target\x18\x01 \x01(\tR\x06target\x12\x1f\n" + - "\vtarget_type\x18\x02 \x01(\tR\n" + - "targetType\"z\n" + - "\fPingResponse\x12\"\n" + - "\rstart_time_ns\x18\x01 \x01(\x03R\vstartTimeNs\x12$\n" + - "\x0eremote_time_ns\x18\x02 \x01(\x03R\fremoteTimeNs\x12 \n" + - "\fstop_time_ns\x18\x03 \x01(\x03R\n" + - "stopTimeNs\"V\n" + - "\x14RaftAddServerRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + - "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x14\n" + - "\x05voter\x18\x03 \x01(\bR\x05voter\"\x17\n" + - "\x15RaftAddServerResponse\"?\n" + - "\x17RaftRemoveServerRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + - "\x05force\x18\x02 \x01(\bR\x05force\"\x1a\n" + - "\x18RaftRemoveServerResponse\"\x1f\n" + - "\x1dRaftListClusterServersRequest\"\xf7\x01\n" + - "\x1eRaftListClusterServersResponse\x12a\n" + - "\x0fcluster_servers\x18\x01 \x03(\v28.master_pb.RaftListClusterServersResponse.ClusterServersR\x0eclusterServers\x1ar\n" + - "\x0eClusterServers\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + - "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x1a\n" + - "\bsuffrage\x18\x03 \x01(\tR\bsuffrage\x12\x1a\n" + - "\bisLeader\x18\x04 \x01(\bR\bisLeader\"\x14\n" + - "\x12VolumeGrowResponse2\xd5\x0f\n" + - "\aSeaweed\x12I\n" + - "\rSendHeartbeat\x12\x14.master_pb.Heartbeat\x1a\x1c.master_pb.HeartbeatResponse\"\x00(\x010\x01\x12X\n" + - "\rKeepConnected\x12\x1f.master_pb.KeepConnectedRequest\x1a .master_pb.KeepConnectedResponse\"\x00(\x010\x01\x12Q\n" + - "\fLookupVolume\x12\x1e.master_pb.LookupVolumeRequest\x1a\x1f.master_pb.LookupVolumeResponse\"\x00\x12?\n" + - "\x06Assign\x12\x18.master_pb.AssignRequest\x1a\x19.master_pb.AssignResponse\"\x00\x12I\n" + - "\fStreamAssign\x12\x18.master_pb.AssignRequest\x1a\x19.master_pb.AssignResponse\"\x00(\x010\x01\x12K\n" + - "\n" + - "Statistics\x12\x1c.master_pb.StatisticsRequest\x1a\x1d.master_pb.StatisticsResponse\"\x00\x12W\n" + - "\x0eCollectionList\x12 .master_pb.CollectionListRequest\x1a!.master_pb.CollectionListResponse\"\x00\x12]\n" + - "\x10CollectionDelete\x12\".master_pb.CollectionDeleteRequest\x1a#.master_pb.CollectionDeleteResponse\"\x00\x12K\n" + - "\n" + - "VolumeList\x12\x1c.master_pb.VolumeListRequest\x1a\x1d.master_pb.VolumeListResponse\"\x00\x12W\n" + - "\x0eLookupEcVolume\x12 .master_pb.LookupEcVolumeRequest\x1a!.master_pb.LookupEcVolumeResponse\"\x00\x12Q\n" + - "\fVacuumVolume\x12\x1e.master_pb.VacuumVolumeRequest\x1a\x1f.master_pb.VacuumVolumeResponse\"\x00\x12T\n" + - "\rDisableVacuum\x12\x1f.master_pb.DisableVacuumRequest\x1a .master_pb.DisableVacuumResponse\"\x00\x12Q\n" + - "\fEnableVacuum\x12\x1e.master_pb.EnableVacuumRequest\x1a\x1f.master_pb.EnableVacuumResponse\"\x00\x12c\n" + - "\x12VolumeMarkReadonly\x12$.master_pb.VolumeMarkReadonlyRequest\x1a%.master_pb.VolumeMarkReadonlyResponse\"\x00\x12o\n" + - "\x16GetMasterConfiguration\x12(.master_pb.GetMasterConfigurationRequest\x1a).master_pb.GetMasterConfigurationResponse\"\x00\x12]\n" + - "\x10ListClusterNodes\x12\".master_pb.ListClusterNodesRequest\x1a#.master_pb.ListClusterNodesResponse\"\x00\x12Z\n" + - "\x0fLeaseAdminToken\x12!.master_pb.LeaseAdminTokenRequest\x1a\".master_pb.LeaseAdminTokenResponse\"\x00\x12`\n" + - "\x11ReleaseAdminToken\x12#.master_pb.ReleaseAdminTokenRequest\x1a$.master_pb.ReleaseAdminTokenResponse\"\x00\x129\n" + - "\x04Ping\x12\x16.master_pb.PingRequest\x1a\x17.master_pb.PingResponse\"\x00\x12o\n" + - "\x16RaftListClusterServers\x12(.master_pb.RaftListClusterServersRequest\x1a).master_pb.RaftListClusterServersResponse\"\x00\x12T\n" + - "\rRaftAddServer\x12\x1f.master_pb.RaftAddServerRequest\x1a .master_pb.RaftAddServerResponse\"\x00\x12]\n" + - "\x10RaftRemoveServer\x12\".master_pb.RaftRemoveServerRequest\x1a#.master_pb.RaftRemoveServerResponse\"\x00\x12K\n" + - "\n" + - "VolumeGrow\x12\x1c.master_pb.VolumeGrowRequest\x1a\x1d.master_pb.VolumeGrowResponse\"\x00B2Z0github.com/seaweedfs/seaweedfs/weed/pb/master_pbb\x06proto3" +var file_master_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0xc0, 0x07, 0x0a, 0x09, 0x48, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, + 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a, + 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, + 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x08, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d, + 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65, + 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x55, 0x0a, + 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x2e, 0x4d, + 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x55, 0x75, 0x69, 0x64, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4d, 0x61, 0x78, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xab, 0x02, 0x0a, + 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x75, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x55, 0x75, 0x69, 0x64, 0x73, 0x22, 0x98, 0x04, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, + 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, + 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, + 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x92, 0x01, + 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, + 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x62, 0x69, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x63, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x42, 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01, 0x0a, + 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, + 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x99, 0x01, + 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, + 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x9d, 0x02, 0x0a, 0x0e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x19, 0x0a, + 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x07, 0x6e, 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0b, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x69, 0x64, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x45, 0x63, 0x56, 0x69, 0x64, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x63, 0x5f, + 0x76, 0x69, 0x64, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x45, 0x63, 0x56, 0x69, 0x64, 0x73, 0x22, 0xc3, 0x01, 0x0a, 0x11, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x41, 0x64, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0d, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, + 0xa9, 0x01, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, + 0x13, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, + 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x62, 0x0a, 0x13, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6f, 0x72, 0x5f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x95, 0x02, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x9a, 0x01, 0x0a, 0x10, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x29, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4f, 0x72, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x58, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, + 0x74, 0x22, 0xd0, 0x02, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, + 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, + 0x65, 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x2f, 0x0a, 0x08, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x11, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, + 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, + 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x91, 0x03, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, + 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x73, 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, + 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd4, 0x01, 0x0a, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x44, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, 0x08, + 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x64, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xef, + 0x01, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63, 0x6b, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, + 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xfe, 0x01, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x45, 0x0a, 0x11, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, + 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, + 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, + 0x0d, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, + 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x16, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, + 0x11, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, + 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x7f, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x67, 0x61, 0x72, 0x62, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x10, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf3, 0x02, 0x0a, 0x1e, 0x47, + 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, + 0x30, 0x0a, 0x15, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x5f, 0x62, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4d, + 0x42, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x22, 0x5b, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0xf5, 0x01, + 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x1a, 0x82, 0x01, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, + 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, + 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, + 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4d, 0x0a, + 0x17, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, + 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, + 0x18, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, + 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x7a, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x74, + 0x6f, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x22, 0x56, 0x0a, 0x14, + 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x6f, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, + 0x6f, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x0a, + 0x17, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x1a, + 0x0a, 0x18, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x52, 0x61, + 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x1e, + 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, + 0x0a, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x1a, 0x56, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x32, 0xaf, 0x0c, 0x0a, 0x07, 0x53, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, + 0x12, 0x58, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, + 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, + 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, + 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, + 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, + 0x10, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, + 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, + 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x50, 0x69, + 0x6e, 0x67, 0x12, 0x16, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, + 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, + 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, + 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x66, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x52, 0x61, 0x66, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, + 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, + 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_master_proto_rawDescOnce sync.Once - file_master_proto_rawDescData []byte + file_master_proto_rawDescData = file_master_proto_rawDesc ) func file_master_proto_rawDescGZIP() []byte { file_master_proto_rawDescOnce.Do(func() { - file_master_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_master_proto_rawDesc), len(file_master_proto_rawDesc))) + file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData) }) return file_master_proto_rawDescData } -var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 70) -var file_master_proto_goTypes = []any{ +var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 62) +var file_master_proto_goTypes = []interface{}{ (*Heartbeat)(nil), // 0: master_pb.Heartbeat (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage @@ -4447,60 +4356,52 @@ var file_master_proto_goTypes = []any{ (*LookupVolumeResponse)(nil), // 13: master_pb.LookupVolumeResponse (*Location)(nil), // 14: master_pb.Location (*AssignRequest)(nil), // 15: master_pb.AssignRequest - (*VolumeGrowRequest)(nil), // 16: master_pb.VolumeGrowRequest - (*AssignResponse)(nil), // 17: master_pb.AssignResponse - (*StatisticsRequest)(nil), // 18: master_pb.StatisticsRequest - (*StatisticsResponse)(nil), // 19: master_pb.StatisticsResponse - (*Collection)(nil), // 20: master_pb.Collection - (*CollectionListRequest)(nil), // 21: master_pb.CollectionListRequest - (*CollectionListResponse)(nil), // 22: master_pb.CollectionListResponse - (*CollectionDeleteRequest)(nil), // 23: master_pb.CollectionDeleteRequest - (*CollectionDeleteResponse)(nil), // 24: master_pb.CollectionDeleteResponse - (*DiskInfo)(nil), // 25: master_pb.DiskInfo - (*DataNodeInfo)(nil), // 26: master_pb.DataNodeInfo - (*RackInfo)(nil), // 27: master_pb.RackInfo - (*DataCenterInfo)(nil), // 28: master_pb.DataCenterInfo - (*TopologyInfo)(nil), // 29: master_pb.TopologyInfo - (*VolumeListRequest)(nil), // 30: master_pb.VolumeListRequest - (*VolumeListResponse)(nil), // 31: master_pb.VolumeListResponse - (*LookupEcVolumeRequest)(nil), // 32: master_pb.LookupEcVolumeRequest - (*LookupEcVolumeResponse)(nil), // 33: master_pb.LookupEcVolumeResponse - (*VacuumVolumeRequest)(nil), // 34: master_pb.VacuumVolumeRequest - (*VacuumVolumeResponse)(nil), // 35: master_pb.VacuumVolumeResponse - (*DisableVacuumRequest)(nil), // 36: master_pb.DisableVacuumRequest - (*DisableVacuumResponse)(nil), // 37: master_pb.DisableVacuumResponse - (*EnableVacuumRequest)(nil), // 38: master_pb.EnableVacuumRequest - (*EnableVacuumResponse)(nil), // 39: master_pb.EnableVacuumResponse - (*VolumeMarkReadonlyRequest)(nil), // 40: master_pb.VolumeMarkReadonlyRequest - (*VolumeMarkReadonlyResponse)(nil), // 41: master_pb.VolumeMarkReadonlyResponse - (*GetMasterConfigurationRequest)(nil), // 42: master_pb.GetMasterConfigurationRequest - (*GetMasterConfigurationResponse)(nil), // 43: master_pb.GetMasterConfigurationResponse - (*ListClusterNodesRequest)(nil), // 44: master_pb.ListClusterNodesRequest - (*ListClusterNodesResponse)(nil), // 45: master_pb.ListClusterNodesResponse - (*LeaseAdminTokenRequest)(nil), // 46: master_pb.LeaseAdminTokenRequest - (*LeaseAdminTokenResponse)(nil), // 47: master_pb.LeaseAdminTokenResponse - (*ReleaseAdminTokenRequest)(nil), // 48: master_pb.ReleaseAdminTokenRequest - (*ReleaseAdminTokenResponse)(nil), // 49: master_pb.ReleaseAdminTokenResponse - (*PingRequest)(nil), // 50: master_pb.PingRequest - (*PingResponse)(nil), // 51: master_pb.PingResponse - (*RaftAddServerRequest)(nil), // 52: master_pb.RaftAddServerRequest - (*RaftAddServerResponse)(nil), // 53: master_pb.RaftAddServerResponse - (*RaftRemoveServerRequest)(nil), // 54: master_pb.RaftRemoveServerRequest - (*RaftRemoveServerResponse)(nil), // 55: master_pb.RaftRemoveServerResponse - (*RaftListClusterServersRequest)(nil), // 56: master_pb.RaftListClusterServersRequest - (*RaftListClusterServersResponse)(nil), // 57: master_pb.RaftListClusterServersResponse - (*VolumeGrowResponse)(nil), // 58: master_pb.VolumeGrowResponse - nil, // 59: master_pb.Heartbeat.MaxVolumeCountsEntry - nil, // 60: master_pb.StorageBackend.PropertiesEntry - (*SuperBlockExtra_ErasureCoding)(nil), // 61: master_pb.SuperBlockExtra.ErasureCoding - (*LookupVolumeResponse_VolumeIdLocation)(nil), // 62: master_pb.LookupVolumeResponse.VolumeIdLocation - nil, // 63: master_pb.DataNodeInfo.DiskInfosEntry - nil, // 64: master_pb.RackInfo.DiskInfosEntry - nil, // 65: master_pb.DataCenterInfo.DiskInfosEntry - nil, // 66: master_pb.TopologyInfo.DiskInfosEntry - (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 67: master_pb.LookupEcVolumeResponse.EcShardIdLocation - (*ListClusterNodesResponse_ClusterNode)(nil), // 68: master_pb.ListClusterNodesResponse.ClusterNode - (*RaftListClusterServersResponse_ClusterServers)(nil), // 69: master_pb.RaftListClusterServersResponse.ClusterServers + (*AssignResponse)(nil), // 16: master_pb.AssignResponse + (*StatisticsRequest)(nil), // 17: master_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 18: master_pb.StatisticsResponse + (*Collection)(nil), // 19: master_pb.Collection + (*CollectionListRequest)(nil), // 20: master_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 21: master_pb.CollectionListResponse + (*CollectionDeleteRequest)(nil), // 22: master_pb.CollectionDeleteRequest + (*CollectionDeleteResponse)(nil), // 23: master_pb.CollectionDeleteResponse + (*DiskInfo)(nil), // 24: master_pb.DiskInfo + (*DataNodeInfo)(nil), // 25: master_pb.DataNodeInfo + (*RackInfo)(nil), // 26: master_pb.RackInfo + (*DataCenterInfo)(nil), // 27: master_pb.DataCenterInfo + (*TopologyInfo)(nil), // 28: master_pb.TopologyInfo + (*VolumeListRequest)(nil), // 29: master_pb.VolumeListRequest + (*VolumeListResponse)(nil), // 30: master_pb.VolumeListResponse + (*LookupEcVolumeRequest)(nil), // 31: master_pb.LookupEcVolumeRequest + (*LookupEcVolumeResponse)(nil), // 32: master_pb.LookupEcVolumeResponse + (*VacuumVolumeRequest)(nil), // 33: master_pb.VacuumVolumeRequest + (*VacuumVolumeResponse)(nil), // 34: master_pb.VacuumVolumeResponse + (*GetMasterConfigurationRequest)(nil), // 35: master_pb.GetMasterConfigurationRequest + (*GetMasterConfigurationResponse)(nil), // 36: master_pb.GetMasterConfigurationResponse + (*ListClusterNodesRequest)(nil), // 37: master_pb.ListClusterNodesRequest + (*ListClusterNodesResponse)(nil), // 38: master_pb.ListClusterNodesResponse + (*LeaseAdminTokenRequest)(nil), // 39: master_pb.LeaseAdminTokenRequest + (*LeaseAdminTokenResponse)(nil), // 40: master_pb.LeaseAdminTokenResponse + (*ReleaseAdminTokenRequest)(nil), // 41: master_pb.ReleaseAdminTokenRequest + (*ReleaseAdminTokenResponse)(nil), // 42: master_pb.ReleaseAdminTokenResponse + (*PingRequest)(nil), // 43: master_pb.PingRequest + (*PingResponse)(nil), // 44: master_pb.PingResponse + (*RaftAddServerRequest)(nil), // 45: master_pb.RaftAddServerRequest + (*RaftAddServerResponse)(nil), // 46: master_pb.RaftAddServerResponse + (*RaftRemoveServerRequest)(nil), // 47: master_pb.RaftRemoveServerRequest + (*RaftRemoveServerResponse)(nil), // 48: master_pb.RaftRemoveServerResponse + (*RaftListClusterServersRequest)(nil), // 49: master_pb.RaftListClusterServersRequest + (*RaftListClusterServersResponse)(nil), // 50: master_pb.RaftListClusterServersResponse + nil, // 51: master_pb.Heartbeat.MaxVolumeCountsEntry + nil, // 52: master_pb.StorageBackend.PropertiesEntry + (*SuperBlockExtra_ErasureCoding)(nil), // 53: master_pb.SuperBlockExtra.ErasureCoding + (*LookupVolumeResponse_VolumeIdLocation)(nil), // 54: master_pb.LookupVolumeResponse.VolumeIdLocation + nil, // 55: master_pb.DataNodeInfo.DiskInfosEntry + nil, // 56: master_pb.RackInfo.DiskInfosEntry + nil, // 57: master_pb.DataCenterInfo.DiskInfosEntry + nil, // 58: master_pb.TopologyInfo.DiskInfosEntry + (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 59: master_pb.LookupEcVolumeResponse.EcShardIdLocation + (*ListClusterNodesResponse_ClusterNode)(nil), // 60: master_pb.ListClusterNodesResponse.ClusterNode + (*RaftListClusterServersResponse_ClusterServers)(nil), // 61: master_pb.RaftListClusterServersResponse.ClusterServers } var file_master_proto_depIdxs = []int32{ 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage @@ -4509,84 +4410,74 @@ var file_master_proto_depIdxs = []int32{ 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage - 59, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry + 51, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend - 60, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry - 61, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 52, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 53, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding 9, // 10: master_pb.KeepConnectedResponse.volume_location:type_name -> master_pb.VolumeLocation 10, // 11: master_pb.KeepConnectedResponse.cluster_node_update:type_name -> master_pb.ClusterNodeUpdate - 62, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 54, // 12: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation 14, // 13: master_pb.AssignResponse.replicas:type_name -> master_pb.Location 14, // 14: master_pb.AssignResponse.location:type_name -> master_pb.Location - 20, // 15: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection + 19, // 15: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection 2, // 16: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage 4, // 17: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage - 63, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry - 26, // 19: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo - 64, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry - 27, // 21: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo - 65, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry - 28, // 23: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo - 66, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry - 29, // 25: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo - 67, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 55, // 18: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry + 25, // 19: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo + 56, // 20: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry + 26, // 21: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo + 57, // 22: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry + 27, // 23: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo + 58, // 24: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry + 28, // 25: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo + 59, // 26: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation 5, // 27: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend - 68, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode - 69, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers + 60, // 28: master_pb.ListClusterNodesResponse.cluster_nodes:type_name -> master_pb.ListClusterNodesResponse.ClusterNode + 61, // 29: master_pb.RaftListClusterServersResponse.cluster_servers:type_name -> master_pb.RaftListClusterServersResponse.ClusterServers 14, // 30: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location - 25, // 31: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 32: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 33: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo - 25, // 34: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 24, // 31: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 24, // 32: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 24, // 33: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 24, // 34: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo 14, // 35: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location 0, // 36: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat 8, // 37: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest 12, // 38: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest 15, // 39: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest - 15, // 40: master_pb.Seaweed.StreamAssign:input_type -> master_pb.AssignRequest - 18, // 41: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest - 21, // 42: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest - 23, // 43: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest - 30, // 44: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest - 32, // 45: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest - 34, // 46: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest - 36, // 47: master_pb.Seaweed.DisableVacuum:input_type -> master_pb.DisableVacuumRequest - 38, // 48: master_pb.Seaweed.EnableVacuum:input_type -> master_pb.EnableVacuumRequest - 40, // 49: master_pb.Seaweed.VolumeMarkReadonly:input_type -> master_pb.VolumeMarkReadonlyRequest - 42, // 50: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest - 44, // 51: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest - 46, // 52: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest - 48, // 53: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest - 50, // 54: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest - 56, // 55: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest - 52, // 56: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest - 54, // 57: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest - 16, // 58: master_pb.Seaweed.VolumeGrow:input_type -> master_pb.VolumeGrowRequest - 1, // 59: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse - 11, // 60: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse - 13, // 61: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse - 17, // 62: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse - 17, // 63: master_pb.Seaweed.StreamAssign:output_type -> master_pb.AssignResponse - 19, // 64: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse - 22, // 65: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse - 24, // 66: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse - 31, // 67: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse - 33, // 68: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse - 35, // 69: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse - 37, // 70: master_pb.Seaweed.DisableVacuum:output_type -> master_pb.DisableVacuumResponse - 39, // 71: master_pb.Seaweed.EnableVacuum:output_type -> master_pb.EnableVacuumResponse - 41, // 72: master_pb.Seaweed.VolumeMarkReadonly:output_type -> master_pb.VolumeMarkReadonlyResponse - 43, // 73: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse - 45, // 74: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse - 47, // 75: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse - 49, // 76: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse - 51, // 77: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse - 57, // 78: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse - 53, // 79: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse - 55, // 80: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse - 58, // 81: master_pb.Seaweed.VolumeGrow:output_type -> master_pb.VolumeGrowResponse - 59, // [59:82] is the sub-list for method output_type - 36, // [36:59] is the sub-list for method input_type + 17, // 40: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest + 20, // 41: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest + 22, // 42: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest + 29, // 43: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest + 31, // 44: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest + 33, // 45: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest + 35, // 46: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 37, // 47: master_pb.Seaweed.ListClusterNodes:input_type -> master_pb.ListClusterNodesRequest + 39, // 48: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 41, // 49: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 43, // 50: master_pb.Seaweed.Ping:input_type -> master_pb.PingRequest + 49, // 51: master_pb.Seaweed.RaftListClusterServers:input_type -> master_pb.RaftListClusterServersRequest + 45, // 52: master_pb.Seaweed.RaftAddServer:input_type -> master_pb.RaftAddServerRequest + 47, // 53: master_pb.Seaweed.RaftRemoveServer:input_type -> master_pb.RaftRemoveServerRequest + 1, // 54: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 11, // 55: master_pb.Seaweed.KeepConnected:output_type -> master_pb.KeepConnectedResponse + 13, // 56: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 16, // 57: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 18, // 58: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 21, // 59: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 23, // 60: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 30, // 61: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 32, // 62: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 34, // 63: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse + 36, // 64: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 38, // 65: master_pb.Seaweed.ListClusterNodes:output_type -> master_pb.ListClusterNodesResponse + 40, // 66: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 42, // 67: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 44, // 68: master_pb.Seaweed.Ping:output_type -> master_pb.PingResponse + 50, // 69: master_pb.Seaweed.RaftListClusterServers:output_type -> master_pb.RaftListClusterServersResponse + 46, // 70: master_pb.Seaweed.RaftAddServer:output_type -> master_pb.RaftAddServerResponse + 48, // 71: master_pb.Seaweed.RaftRemoveServer:output_type -> master_pb.RaftRemoveServerResponse + 54, // [54:72] is the sub-list for method output_type + 36, // [36:54] is the sub-list for method input_type 36, // [36:36] is the sub-list for extension type_name 36, // [36:36] is the sub-list for extension extendee 0, // [0:36] is the sub-list for field type_name @@ -4597,13 +4488,687 @@ func file_master_proto_init() { if File_master_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Heartbeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartbeatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeShortInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageBackend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterNodeUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataNodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RackInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataCenterInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListClusterNodesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListClusterNodesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftAddServerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftAddServerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftRemoveServerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftRemoveServerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftListClusterServersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftListClusterServersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra_ErasureCoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListClusterNodesResponse_ClusterNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RaftListClusterServersResponse_ClusterServers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_master_proto_rawDesc), len(file_master_proto_rawDesc)), + RawDescriptor: file_master_proto_rawDesc, NumEnums: 0, - NumMessages: 70, + NumMessages: 62, NumExtensions: 0, NumServices: 1, }, @@ -4612,6 +5177,7 @@ func file_master_proto_init() { MessageInfos: file_master_proto_msgTypes, }.Build() File_master_proto = out.File + file_master_proto_rawDesc = nil file_master_proto_goTypes = nil file_master_proto_depIdxs = nil } diff --git a/weed/pb/master_pb/master_grpc.pb.go b/weed/pb/master_pb/master_grpc.pb.go index 3062c5a5a..f8b11e8c5 100644 --- a/weed/pb/master_pb/master_grpc.pb.go +++ b/weed/pb/master_pb/master_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: master.proto package master_pb @@ -15,53 +11,23 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - Seaweed_SendHeartbeat_FullMethodName = "/master_pb.Seaweed/SendHeartbeat" - Seaweed_KeepConnected_FullMethodName = "/master_pb.Seaweed/KeepConnected" - Seaweed_LookupVolume_FullMethodName = "/master_pb.Seaweed/LookupVolume" - Seaweed_Assign_FullMethodName = "/master_pb.Seaweed/Assign" - Seaweed_StreamAssign_FullMethodName = "/master_pb.Seaweed/StreamAssign" - Seaweed_Statistics_FullMethodName = "/master_pb.Seaweed/Statistics" - Seaweed_CollectionList_FullMethodName = "/master_pb.Seaweed/CollectionList" - Seaweed_CollectionDelete_FullMethodName = "/master_pb.Seaweed/CollectionDelete" - Seaweed_VolumeList_FullMethodName = "/master_pb.Seaweed/VolumeList" - Seaweed_LookupEcVolume_FullMethodName = "/master_pb.Seaweed/LookupEcVolume" - Seaweed_VacuumVolume_FullMethodName = "/master_pb.Seaweed/VacuumVolume" - Seaweed_DisableVacuum_FullMethodName = "/master_pb.Seaweed/DisableVacuum" - Seaweed_EnableVacuum_FullMethodName = "/master_pb.Seaweed/EnableVacuum" - Seaweed_VolumeMarkReadonly_FullMethodName = "/master_pb.Seaweed/VolumeMarkReadonly" - Seaweed_GetMasterConfiguration_FullMethodName = "/master_pb.Seaweed/GetMasterConfiguration" - Seaweed_ListClusterNodes_FullMethodName = "/master_pb.Seaweed/ListClusterNodes" - Seaweed_LeaseAdminToken_FullMethodName = "/master_pb.Seaweed/LeaseAdminToken" - Seaweed_ReleaseAdminToken_FullMethodName = "/master_pb.Seaweed/ReleaseAdminToken" - Seaweed_Ping_FullMethodName = "/master_pb.Seaweed/Ping" - Seaweed_RaftListClusterServers_FullMethodName = "/master_pb.Seaweed/RaftListClusterServers" - Seaweed_RaftAddServer_FullMethodName = "/master_pb.Seaweed/RaftAddServer" - Seaweed_RaftRemoveServer_FullMethodName = "/master_pb.Seaweed/RaftRemoveServer" - Seaweed_VolumeGrow_FullMethodName = "/master_pb.Seaweed/VolumeGrow" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // SeaweedClient is the client API for Seaweed service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SeaweedClient interface { - SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse], error) - KeepConnected(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse], error) + SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) + KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) - StreamAssign(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[AssignRequest, AssignResponse], error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) - DisableVacuum(ctx context.Context, in *DisableVacuumRequest, opts ...grpc.CallOption) (*DisableVacuumResponse, error) - EnableVacuum(ctx context.Context, in *EnableVacuumRequest, opts ...grpc.CallOption) (*EnableVacuumResponse, error) - VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) ListClusterNodes(ctx context.Context, in *ListClusterNodesRequest, opts ...grpc.CallOption) (*ListClusterNodesResponse, error) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) @@ -70,7 +36,6 @@ type SeaweedClient interface { RaftListClusterServers(ctx context.Context, in *RaftListClusterServersRequest, opts ...grpc.CallOption) (*RaftListClusterServersResponse, error) RaftAddServer(ctx context.Context, in *RaftAddServerRequest, opts ...grpc.CallOption) (*RaftAddServerResponse, error) RaftRemoveServer(ctx context.Context, in *RaftRemoveServerRequest, opts ...grpc.CallOption) (*RaftRemoveServerResponse, error) - VolumeGrow(ctx context.Context, in *VolumeGrowRequest, opts ...grpc.CallOption) (*VolumeGrowResponse, error) } type seaweedClient struct { @@ -81,36 +46,71 @@ func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient { return &seaweedClient{cc} } -func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[0], Seaweed_SendHeartbeat_FullMethodName, cOpts...) +func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) { + stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[Heartbeat, HeartbeatResponse]{ClientStream: stream} + x := &seaweedSendHeartbeatClient{stream} return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_SendHeartbeatClient = grpc.BidiStreamingClient[Heartbeat, HeartbeatResponse] +type Seaweed_SendHeartbeatClient interface { + Send(*Heartbeat) error + Recv() (*HeartbeatResponse, error) + grpc.ClientStream +} -func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[1], Seaweed_KeepConnected_FullMethodName, cOpts...) +type seaweedSendHeartbeatClient struct { + grpc.ClientStream +} + +func (x *seaweedSendHeartbeatClient) Send(m *Heartbeat) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) { + m := new(HeartbeatResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) { + stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[KeepConnectedRequest, KeepConnectedResponse]{ClientStream: stream} + x := &seaweedKeepConnectedClient{stream} return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_KeepConnectedClient = grpc.BidiStreamingClient[KeepConnectedRequest, KeepConnectedResponse] +type Seaweed_KeepConnectedClient interface { + Send(*KeepConnectedRequest) error + Recv() (*KeepConnectedResponse, error) + grpc.ClientStream +} + +type seaweedKeepConnectedClient struct { + grpc.ClientStream +} + +func (x *seaweedKeepConnectedClient) Send(m *KeepConnectedRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedKeepConnectedClient) Recv() (*KeepConnectedResponse, error) { + m := new(KeepConnectedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LookupVolumeResponse) - err := c.cc.Invoke(ctx, Seaweed_LookupVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -118,32 +118,17 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques } func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AssignResponse) - err := c.cc.Invoke(ctx, Seaweed_Assign_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *seaweedClient) StreamAssign(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[AssignRequest, AssignResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &Seaweed_ServiceDesc.Streams[2], Seaweed_StreamAssign_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[AssignRequest, AssignResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_StreamAssignClient = grpc.BidiStreamingClient[AssignRequest, AssignResponse] - func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(StatisticsResponse) - err := c.cc.Invoke(ctx, Seaweed_Statistics_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -151,9 +136,8 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o } func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CollectionListResponse) - err := c.cc.Invoke(ctx, Seaweed_CollectionList_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -161,9 +145,8 @@ func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRe } func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CollectionDeleteResponse) - err := c.cc.Invoke(ctx, Seaweed_CollectionDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...) if err != nil { return nil, err } @@ -171,9 +154,8 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele } func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeListResponse) - err := c.cc.Invoke(ctx, Seaweed_VolumeList_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...) if err != nil { return nil, err } @@ -181,9 +163,8 @@ func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, o } func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LookupEcVolumeResponse) - err := c.cc.Invoke(ctx, Seaweed_LookupEcVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...) if err != nil { return nil, err } @@ -191,39 +172,8 @@ func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRe } func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeResponse) - err := c.cc.Invoke(ctx, Seaweed_VacuumVolume_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedClient) DisableVacuum(ctx context.Context, in *DisableVacuumRequest, opts ...grpc.CallOption) (*DisableVacuumResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(DisableVacuumResponse) - err := c.cc.Invoke(ctx, Seaweed_DisableVacuum_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedClient) EnableVacuum(ctx context.Context, in *EnableVacuumRequest, opts ...grpc.CallOption) (*EnableVacuumResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(EnableVacuumResponse) - err := c.cc.Invoke(ctx, Seaweed_EnableVacuum_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(VolumeMarkReadonlyResponse) - err := c.cc.Invoke(ctx, Seaweed_VolumeMarkReadonly_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VacuumVolume", in, out, opts...) if err != nil { return nil, err } @@ -231,9 +181,8 @@ func (c *seaweedClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkRe } func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetMasterConfigurationResponse) - err := c.cc.Invoke(ctx, Seaweed_GetMasterConfiguration_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...) if err != nil { return nil, err } @@ -241,9 +190,8 @@ func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMaste } func (c *seaweedClient) ListClusterNodes(ctx context.Context, in *ListClusterNodesRequest, opts ...grpc.CallOption) (*ListClusterNodesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ListClusterNodesResponse) - err := c.cc.Invoke(ctx, Seaweed_ListClusterNodes_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListClusterNodes", in, out, opts...) if err != nil { return nil, err } @@ -251,9 +199,8 @@ func (c *seaweedClient) ListClusterNodes(ctx context.Context, in *ListClusterNod } func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(LeaseAdminTokenResponse) - err := c.cc.Invoke(ctx, Seaweed_LeaseAdminToken_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...) if err != nil { return nil, err } @@ -261,9 +208,8 @@ func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminToken } func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReleaseAdminTokenResponse) - err := c.cc.Invoke(ctx, Seaweed_ReleaseAdminToken_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...) if err != nil { return nil, err } @@ -271,9 +217,8 @@ func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminT } func (c *seaweedClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PingResponse) - err := c.cc.Invoke(ctx, Seaweed_Ping_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Ping", in, out, opts...) if err != nil { return nil, err } @@ -281,9 +226,8 @@ func (c *seaweedClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc. } func (c *seaweedClient) RaftListClusterServers(ctx context.Context, in *RaftListClusterServersRequest, opts ...grpc.CallOption) (*RaftListClusterServersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RaftListClusterServersResponse) - err := c.cc.Invoke(ctx, Seaweed_RaftListClusterServers_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/RaftListClusterServers", in, out, opts...) if err != nil { return nil, err } @@ -291,9 +235,8 @@ func (c *seaweedClient) RaftListClusterServers(ctx context.Context, in *RaftList } func (c *seaweedClient) RaftAddServer(ctx context.Context, in *RaftAddServerRequest, opts ...grpc.CallOption) (*RaftAddServerResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RaftAddServerResponse) - err := c.cc.Invoke(ctx, Seaweed_RaftAddServer_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/RaftAddServer", in, out, opts...) if err != nil { return nil, err } @@ -301,19 +244,8 @@ func (c *seaweedClient) RaftAddServer(ctx context.Context, in *RaftAddServerRequ } func (c *seaweedClient) RaftRemoveServer(ctx context.Context, in *RaftRemoveServerRequest, opts ...grpc.CallOption) (*RaftRemoveServerResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(RaftRemoveServerResponse) - err := c.cc.Invoke(ctx, Seaweed_RaftRemoveServer_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedClient) VolumeGrow(ctx context.Context, in *VolumeGrowRequest, opts ...grpc.CallOption) (*VolumeGrowResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(VolumeGrowResponse) - err := c.cc.Invoke(ctx, Seaweed_VolumeGrow_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/RaftRemoveServer", in, out, opts...) if err != nil { return nil, err } @@ -322,22 +254,18 @@ func (c *seaweedClient) VolumeGrow(ctx context.Context, in *VolumeGrowRequest, o // SeaweedServer is the server API for Seaweed service. // All implementations must embed UnimplementedSeaweedServer -// for forward compatibility. +// for forward compatibility type SeaweedServer interface { - SendHeartbeat(grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse]) error - KeepConnected(grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse]) error + SendHeartbeat(Seaweed_SendHeartbeatServer) error + KeepConnected(Seaweed_KeepConnectedServer) error LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) Assign(context.Context, *AssignRequest) (*AssignResponse, error) - StreamAssign(grpc.BidiStreamingServer[AssignRequest, AssignResponse]) error Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) - DisableVacuum(context.Context, *DisableVacuumRequest) (*DisableVacuumResponse, error) - EnableVacuum(context.Context, *EnableVacuumRequest) (*EnableVacuumResponse, error) - VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) ListClusterNodes(context.Context, *ListClusterNodesRequest) (*ListClusterNodesResponse, error) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) @@ -346,21 +274,17 @@ type SeaweedServer interface { RaftListClusterServers(context.Context, *RaftListClusterServersRequest) (*RaftListClusterServersResponse, error) RaftAddServer(context.Context, *RaftAddServerRequest) (*RaftAddServerResponse, error) RaftRemoveServer(context.Context, *RaftRemoveServerRequest) (*RaftRemoveServerResponse, error) - VolumeGrow(context.Context, *VolumeGrowRequest) (*VolumeGrowResponse, error) mustEmbedUnimplementedSeaweedServer() } -// UnimplementedSeaweedServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedServer struct{} +// UnimplementedSeaweedServer must be embedded to have forward compatible implementations. +type UnimplementedSeaweedServer struct { +} -func (UnimplementedSeaweedServer) SendHeartbeat(grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse]) error { +func (UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error { return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented") } -func (UnimplementedSeaweedServer) KeepConnected(grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse]) error { +func (UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error { return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") } func (UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { @@ -369,9 +293,6 @@ func (UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeReq func (UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented") } -func (UnimplementedSeaweedServer) StreamAssign(grpc.BidiStreamingServer[AssignRequest, AssignResponse]) error { - return status.Errorf(codes.Unimplemented, "method StreamAssign not implemented") -} func (UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") } @@ -390,15 +311,6 @@ func (UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolum func (UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented") } -func (UnimplementedSeaweedServer) DisableVacuum(context.Context, *DisableVacuumRequest) (*DisableVacuumResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DisableVacuum not implemented") -} -func (UnimplementedSeaweedServer) EnableVacuum(context.Context, *EnableVacuumRequest) (*EnableVacuumResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EnableVacuum not implemented") -} -func (UnimplementedSeaweedServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") -} func (UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented") } @@ -423,11 +335,7 @@ func (UnimplementedSeaweedServer) RaftAddServer(context.Context, *RaftAddServerR func (UnimplementedSeaweedServer) RaftRemoveServer(context.Context, *RaftRemoveServerRequest) (*RaftRemoveServerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RaftRemoveServer not implemented") } -func (UnimplementedSeaweedServer) VolumeGrow(context.Context, *VolumeGrowRequest) (*VolumeGrowResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VolumeGrow not implemented") -} func (UnimplementedSeaweedServer) mustEmbedUnimplementedSeaweedServer() {} -func (UnimplementedSeaweedServer) testEmbeddedByValue() {} // UnsafeSeaweedServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SeaweedServer will @@ -437,29 +345,60 @@ type UnsafeSeaweedServer interface { } func RegisterSeaweedServer(s grpc.ServiceRegistrar, srv SeaweedServer) { - // If the following call pancis, it indicates UnimplementedSeaweedServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&Seaweed_ServiceDesc, srv) } func _Seaweed_SendHeartbeat_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedServer).SendHeartbeat(&grpc.GenericServerStream[Heartbeat, HeartbeatResponse]{ServerStream: stream}) + return srv.(SeaweedServer).SendHeartbeat(&seaweedSendHeartbeatServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_SendHeartbeatServer = grpc.BidiStreamingServer[Heartbeat, HeartbeatResponse] +type Seaweed_SendHeartbeatServer interface { + Send(*HeartbeatResponse) error + Recv() (*Heartbeat, error) + grpc.ServerStream +} + +type seaweedSendHeartbeatServer struct { + grpc.ServerStream +} + +func (x *seaweedSendHeartbeatServer) Send(m *HeartbeatResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedSendHeartbeatServer) Recv() (*Heartbeat, error) { + m := new(Heartbeat) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func _Seaweed_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedServer).KeepConnected(&grpc.GenericServerStream[KeepConnectedRequest, KeepConnectedResponse]{ServerStream: stream}) + return srv.(SeaweedServer).KeepConnected(&seaweedKeepConnectedServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_KeepConnectedServer = grpc.BidiStreamingServer[KeepConnectedRequest, KeepConnectedResponse] +type Seaweed_KeepConnectedServer interface { + Send(*KeepConnectedResponse) error + Recv() (*KeepConnectedRequest, error) + grpc.ServerStream +} + +type seaweedKeepConnectedServer struct { + grpc.ServerStream +} + +func (x *seaweedKeepConnectedServer) Send(m *KeepConnectedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func _Seaweed_LookupVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LookupVolumeRequest) @@ -471,7 +410,7 @@ func _Seaweed_LookupVolume_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_LookupVolume_FullMethodName, + FullMethod: "/master_pb.Seaweed/LookupVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).LookupVolume(ctx, req.(*LookupVolumeRequest)) @@ -489,7 +428,7 @@ func _Seaweed_Assign_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_Assign_FullMethodName, + FullMethod: "/master_pb.Seaweed/Assign", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).Assign(ctx, req.(*AssignRequest)) @@ -497,13 +436,6 @@ func _Seaweed_Assign_Handler(srv interface{}, ctx context.Context, dec func(inte return interceptor(ctx, in, info, handler) } -func _Seaweed_StreamAssign_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedServer).StreamAssign(&grpc.GenericServerStream[AssignRequest, AssignResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type Seaweed_StreamAssignServer = grpc.BidiStreamingServer[AssignRequest, AssignResponse] - func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StatisticsRequest) if err := dec(in); err != nil { @@ -514,7 +446,7 @@ func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_Statistics_FullMethodName, + FullMethod: "/master_pb.Seaweed/Statistics", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).Statistics(ctx, req.(*StatisticsRequest)) @@ -532,7 +464,7 @@ func _Seaweed_CollectionList_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_CollectionList_FullMethodName, + FullMethod: "/master_pb.Seaweed/CollectionList", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).CollectionList(ctx, req.(*CollectionListRequest)) @@ -550,7 +482,7 @@ func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_CollectionDelete_FullMethodName, + FullMethod: "/master_pb.Seaweed/CollectionDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).CollectionDelete(ctx, req.(*CollectionDeleteRequest)) @@ -568,7 +500,7 @@ func _Seaweed_VolumeList_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_VolumeList_FullMethodName, + FullMethod: "/master_pb.Seaweed/VolumeList", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).VolumeList(ctx, req.(*VolumeListRequest)) @@ -586,7 +518,7 @@ func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_LookupEcVolume_FullMethodName, + FullMethod: "/master_pb.Seaweed/LookupEcVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).LookupEcVolume(ctx, req.(*LookupEcVolumeRequest)) @@ -604,7 +536,7 @@ func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_VacuumVolume_FullMethodName, + FullMethod: "/master_pb.Seaweed/VacuumVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).VacuumVolume(ctx, req.(*VacuumVolumeRequest)) @@ -612,60 +544,6 @@ func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _Seaweed_DisableVacuum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DisableVacuumRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedServer).DisableVacuum(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Seaweed_DisableVacuum_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedServer).DisableVacuum(ctx, req.(*DisableVacuumRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Seaweed_EnableVacuum_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EnableVacuumRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedServer).EnableVacuum(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Seaweed_EnableVacuum_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedServer).EnableVacuum(ctx, req.(*EnableVacuumRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Seaweed_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeMarkReadonlyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedServer).VolumeMarkReadonly(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Seaweed_VolumeMarkReadonly_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetMasterConfigurationRequest) if err := dec(in); err != nil { @@ -676,7 +554,7 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_GetMasterConfiguration_FullMethodName, + FullMethod: "/master_pb.Seaweed/GetMasterConfiguration", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).GetMasterConfiguration(ctx, req.(*GetMasterConfigurationRequest)) @@ -694,7 +572,7 @@ func _Seaweed_ListClusterNodes_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_ListClusterNodes_FullMethodName, + FullMethod: "/master_pb.Seaweed/ListClusterNodes", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).ListClusterNodes(ctx, req.(*ListClusterNodesRequest)) @@ -712,7 +590,7 @@ func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_LeaseAdminToken_FullMethodName, + FullMethod: "/master_pb.Seaweed/LeaseAdminToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest)) @@ -730,7 +608,7 @@ func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_ReleaseAdminToken_FullMethodName, + FullMethod: "/master_pb.Seaweed/ReleaseAdminToken", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest)) @@ -748,7 +626,7 @@ func _Seaweed_Ping_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_Ping_FullMethodName, + FullMethod: "/master_pb.Seaweed/Ping", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).Ping(ctx, req.(*PingRequest)) @@ -766,7 +644,7 @@ func _Seaweed_RaftListClusterServers_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_RaftListClusterServers_FullMethodName, + FullMethod: "/master_pb.Seaweed/RaftListClusterServers", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).RaftListClusterServers(ctx, req.(*RaftListClusterServersRequest)) @@ -784,7 +662,7 @@ func _Seaweed_RaftAddServer_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_RaftAddServer_FullMethodName, + FullMethod: "/master_pb.Seaweed/RaftAddServer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).RaftAddServer(ctx, req.(*RaftAddServerRequest)) @@ -802,7 +680,7 @@ func _Seaweed_RaftRemoveServer_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: Seaweed_RaftRemoveServer_FullMethodName, + FullMethod: "/master_pb.Seaweed/RaftRemoveServer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedServer).RaftRemoveServer(ctx, req.(*RaftRemoveServerRequest)) @@ -810,24 +688,6 @@ func _Seaweed_RaftRemoveServer_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Seaweed_VolumeGrow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeGrowRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedServer).VolumeGrow(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Seaweed_VolumeGrow_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedServer).VolumeGrow(ctx, req.(*VolumeGrowRequest)) - } - return interceptor(ctx, in, info, handler) -} - // Seaweed_ServiceDesc is the grpc.ServiceDesc for Seaweed service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -867,18 +727,6 @@ var Seaweed_ServiceDesc = grpc.ServiceDesc{ MethodName: "VacuumVolume", Handler: _Seaweed_VacuumVolume_Handler, }, - { - MethodName: "DisableVacuum", - Handler: _Seaweed_DisableVacuum_Handler, - }, - { - MethodName: "EnableVacuum", - Handler: _Seaweed_EnableVacuum_Handler, - }, - { - MethodName: "VolumeMarkReadonly", - Handler: _Seaweed_VolumeMarkReadonly_Handler, - }, { MethodName: "GetMasterConfiguration", Handler: _Seaweed_GetMasterConfiguration_Handler, @@ -911,10 +759,6 @@ var Seaweed_ServiceDesc = grpc.ServiceDesc{ MethodName: "RaftRemoveServer", Handler: _Seaweed_RaftRemoveServer_Handler, }, - { - MethodName: "VolumeGrow", - Handler: _Seaweed_VolumeGrow_Handler, - }, }, Streams: []grpc.StreamDesc{ { @@ -929,12 +773,6 @@ var Seaweed_ServiceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, - { - StreamName: "StreamAssign", - Handler: _Seaweed_StreamAssign_Handler, - ServerStreams: true, - ClientStreams: true, - }, }, Metadata: "master.proto", } diff --git a/weed/pb/master_pb/master_helper.go b/weed/pb/master_pb/master_helper.go deleted file mode 100644 index 52006fdee..000000000 --- a/weed/pb/master_pb/master_helper.go +++ /dev/null @@ -1,5 +0,0 @@ -package master_pb - -func (v *VolumeLocation) IsEmptyUrl() bool { - return v.Url == "" || v.Url == ":0" -} diff --git a/weed/pb/message.fbs b/weed/pb/message.fbs deleted file mode 100644 index 170551df7..000000000 --- a/weed/pb/message.fbs +++ /dev/null @@ -1,23 +0,0 @@ -table NameValue { - name:string (key); - value:string; -} -table Message { - seq_delta:int32 (id:0); - ts_ms_delta:int32 (id:1); - properties:[NameValue] (id:2); - key:string (id:3); // bytes - data:string (id:4); // bytes -} - -table MessageBatch { - producer_id:int32 (id:0); - producer_epoch:int32 (id:1); - segment_id:int32 (id:2); - flags: int32 (id:3); - segment_seq_base:int64 (id:4); - segment_seq_max_delta:int32 (id:5); - ts_ms_base:int64 (id:6); - ts_ms_max_delta:int32 (id:7); - messages: [Message] (id:8); -} diff --git a/weed/pb/message_fbs/Message.go b/weed/pb/message_fbs/Message.go deleted file mode 100644 index e9ef83616..000000000 --- a/weed/pb/message_fbs/Message.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package message_fbs - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type Message struct { - _tab flatbuffers.Table -} - -func GetRootAsMessage(buf []byte, offset flatbuffers.UOffsetT) *Message { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &Message{} - x.Init(buf, n+offset) - return x -} - -func GetSizePrefixedRootAsMessage(buf []byte, offset flatbuffers.UOffsetT) *Message { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &Message{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func (rcv *Message) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *Message) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *Message) SeqDelta() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *Message) MutateSeqDelta(n int32) bool { - return rcv._tab.MutateInt32Slot(4, n) -} - -func (rcv *Message) TsMsDelta() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *Message) MutateTsMsDelta(n int32) bool { - return rcv._tab.MutateInt32Slot(6, n) -} - -func (rcv *Message) Properties(obj *NameValue, j int) bool { - o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) - if o != 0 { - x := rcv._tab.Vector(o) - x += flatbuffers.UOffsetT(j) * 4 - x = rcv._tab.Indirect(x) - obj.Init(rcv._tab.Bytes, x) - return true - } - return false -} - -func (rcv *Message) PropertiesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) - if o != 0 { - return rcv._tab.VectorLen(o) - } - return 0 -} - -func (rcv *Message) Key() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *Message) Data() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func MessageStart(builder *flatbuffers.Builder) { - builder.StartObject(5) -} -func MessageAddSeqDelta(builder *flatbuffers.Builder, seqDelta int32) { - builder.PrependInt32Slot(0, seqDelta, 0) -} -func MessageAddTsMsDelta(builder *flatbuffers.Builder, tsMsDelta int32) { - builder.PrependInt32Slot(1, tsMsDelta, 0) -} -func MessageAddProperties(builder *flatbuffers.Builder, properties flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(properties), 0) -} -func MessageStartPropertiesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) -} -func MessageAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(key), 0) -} -func MessageAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(data), 0) -} -func MessageEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/weed/pb/message_fbs/MessageBatch.go b/weed/pb/message_fbs/MessageBatch.go deleted file mode 100644 index 19d6a4816..000000000 --- a/weed/pb/message_fbs/MessageBatch.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package message_fbs - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type MessageBatch struct { - _tab flatbuffers.Table -} - -func GetRootAsMessageBatch(buf []byte, offset flatbuffers.UOffsetT) *MessageBatch { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &MessageBatch{} - x.Init(buf, n+offset) - return x -} - -func GetSizePrefixedRootAsMessageBatch(buf []byte, offset flatbuffers.UOffsetT) *MessageBatch { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &MessageBatch{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func (rcv *MessageBatch) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *MessageBatch) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *MessageBatch) ProducerId() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateProducerId(n int32) bool { - return rcv._tab.MutateInt32Slot(4, n) -} - -func (rcv *MessageBatch) ProducerEpoch() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateProducerEpoch(n int32) bool { - return rcv._tab.MutateInt32Slot(6, n) -} - -func (rcv *MessageBatch) SegmentId() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateSegmentId(n int32) bool { - return rcv._tab.MutateInt32Slot(8, n) -} - -func (rcv *MessageBatch) Flags() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateFlags(n int32) bool { - return rcv._tab.MutateInt32Slot(10, n) -} - -func (rcv *MessageBatch) SegmentSeqBase() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateSegmentSeqBase(n int64) bool { - return rcv._tab.MutateInt64Slot(12, n) -} - -func (rcv *MessageBatch) SegmentSeqMaxDelta() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateSegmentSeqMaxDelta(n int32) bool { - return rcv._tab.MutateInt32Slot(14, n) -} - -func (rcv *MessageBatch) TsMsBase() int64 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) - if o != 0 { - return rcv._tab.GetInt64(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateTsMsBase(n int64) bool { - return rcv._tab.MutateInt64Slot(16, n) -} - -func (rcv *MessageBatch) TsMsMaxDelta() int32 { - o := flatbuffers.UOffsetT(rcv._tab.Offset(18)) - if o != 0 { - return rcv._tab.GetInt32(o + rcv._tab.Pos) - } - return 0 -} - -func (rcv *MessageBatch) MutateTsMsMaxDelta(n int32) bool { - return rcv._tab.MutateInt32Slot(18, n) -} - -func (rcv *MessageBatch) Messages(obj *Message, j int) bool { - o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) - if o != 0 { - x := rcv._tab.Vector(o) - x += flatbuffers.UOffsetT(j) * 4 - x = rcv._tab.Indirect(x) - obj.Init(rcv._tab.Bytes, x) - return true - } - return false -} - -func (rcv *MessageBatch) MessagesLength() int { - o := flatbuffers.UOffsetT(rcv._tab.Offset(20)) - if o != 0 { - return rcv._tab.VectorLen(o) - } - return 0 -} - -func MessageBatchStart(builder *flatbuffers.Builder) { - builder.StartObject(9) -} -func MessageBatchAddProducerId(builder *flatbuffers.Builder, producerId int32) { - builder.PrependInt32Slot(0, producerId, 0) -} -func MessageBatchAddProducerEpoch(builder *flatbuffers.Builder, producerEpoch int32) { - builder.PrependInt32Slot(1, producerEpoch, 0) -} -func MessageBatchAddSegmentId(builder *flatbuffers.Builder, segmentId int32) { - builder.PrependInt32Slot(2, segmentId, 0) -} -func MessageBatchAddFlags(builder *flatbuffers.Builder, flags int32) { - builder.PrependInt32Slot(3, flags, 0) -} -func MessageBatchAddSegmentSeqBase(builder *flatbuffers.Builder, segmentSeqBase int64) { - builder.PrependInt64Slot(4, segmentSeqBase, 0) -} -func MessageBatchAddSegmentSeqMaxDelta(builder *flatbuffers.Builder, segmentSeqMaxDelta int32) { - builder.PrependInt32Slot(5, segmentSeqMaxDelta, 0) -} -func MessageBatchAddTsMsBase(builder *flatbuffers.Builder, tsMsBase int64) { - builder.PrependInt64Slot(6, tsMsBase, 0) -} -func MessageBatchAddTsMsMaxDelta(builder *flatbuffers.Builder, tsMsMaxDelta int32) { - builder.PrependInt32Slot(7, tsMsMaxDelta, 0) -} -func MessageBatchAddMessages(builder *flatbuffers.Builder, messages flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(8, flatbuffers.UOffsetT(messages), 0) -} -func MessageBatchStartMessagesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { - return builder.StartVector(4, numElems, 4) -} -func MessageBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/weed/pb/message_fbs/NameValue.go b/weed/pb/message_fbs/NameValue.go deleted file mode 100644 index b5dfdad16..000000000 --- a/weed/pb/message_fbs/NameValue.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package message_fbs - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -type NameValue struct { - _tab flatbuffers.Table -} - -func GetRootAsNameValue(buf []byte, offset flatbuffers.UOffsetT) *NameValue { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &NameValue{} - x.Init(buf, n+offset) - return x -} - -func GetSizePrefixedRootAsNameValue(buf []byte, offset flatbuffers.UOffsetT) *NameValue { - n := flatbuffers.GetUOffsetT(buf[offset+flatbuffers.SizeUint32:]) - x := &NameValue{} - x.Init(buf, n+offset+flatbuffers.SizeUint32) - return x -} - -func (rcv *NameValue) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *NameValue) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *NameValue) Name() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func (rcv *NameValue) Value() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -func NameValueStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func NameValueAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0) -} -func NameValueAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0) -} -func NameValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto new file mode 100644 index 000000000..04446ad16 --- /dev/null +++ b/weed/pb/messaging.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package messaging_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "MessagingProto"; + +////////////////////////////////////////////////// + +service SeaweedMessaging { + + rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) { + } + + rpc Publish (stream PublishRequest) returns (stream PublishResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { + } + + rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) { + } + +} + +////////////////////////////////////////////////// + +message SubscriberMessage { + message InitMessage { + string namespace = 1; + string topic = 2; + int32 partition = 3; + enum StartPosition { + LATEST = 0; // Start at the newest message + EARLIEST = 1; // Start at the oldest message + TIMESTAMP = 2; // Start after a specified timestamp, exclusive + } + StartPosition startPosition = 4; // Where to begin consuming from + int64 timestampNs = 5; // timestamp in nano seconds + string subscriber_id = 6; // uniquely identify a subscriber to track consumption + } + InitMessage init = 1; + message AckMessage { + int64 message_id = 1; + } + AckMessage ack = 2; + bool is_close = 3; +} + +message Message { + int64 event_time_ns = 1 [jstype = JS_STRING]; + bytes key = 2; // Message key + bytes value = 3; // Message payload + map headers = 4; // Message headers + bool is_close = 5; +} + +message BrokerMessage { + Message data = 1; +} + +message PublishRequest { + message InitMessage { + string namespace = 1; // only needed on the initial request + string topic = 2; // only needed on the initial request + int32 partition = 3; + } + InitMessage init = 1; + Message data = 2; +} + +message PublishResponse { + message ConfigMessage { + int32 partition_count = 1; + } + ConfigMessage config = 1; + message RedirectMessage { + string new_broker = 1; + } + RedirectMessage redirect = 2; + bool is_closed = 3; +} + +message DeleteTopicRequest { + string namespace = 1; + string topic = 2; +} +message DeleteTopicResponse { +} + +message ConfigureTopicRequest { + string namespace = 1; + string topic = 2; + TopicConfiguration configuration = 3; +} +message ConfigureTopicResponse { +} + +message GetTopicConfigurationRequest { + string namespace = 1; + string topic = 2; +} +message GetTopicConfigurationResponse { + TopicConfiguration configuration = 1; +} + +message FindBrokerRequest { + string namespace = 1; + string topic = 2; + int32 parition = 3; +} + +message FindBrokerResponse { + string broker = 1; +} + +message TopicConfiguration { + int32 partition_count = 1; + string collection = 2; + string replication = 3; + bool is_transient = 4; + enum Partitioning { + NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin + KeyHash = 1; // hash by key value + RoundRobin = 2; // round robin pick one partition + } + Partitioning partitoning = 5; +} diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go new file mode 100644 index 000000000..5b9ca1ee3 --- /dev/null +++ b/weed/pb/messaging_pb/messaging.pb.go @@ -0,0 +1,1719 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.17.3 +// source: messaging.proto + +package messaging_pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SubscriberMessage_InitMessage_StartPosition int32 + +const ( + SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message + SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message + SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive +) + +// Enum value maps for SubscriberMessage_InitMessage_StartPosition. +var ( + SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{ + 0: "LATEST", + 1: "EARLIEST", + 2: "TIMESTAMP", + } + SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{ + "LATEST": 0, + "EARLIEST": 1, + "TIMESTAMP": 2, + } +) + +func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition { + p := new(SubscriberMessage_InitMessage_StartPosition) + *p = x + return p +} + +func (x SubscriberMessage_InitMessage_StartPosition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[0].Descriptor() +} + +func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[0] +} + +func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead. +func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0} +} + +type TopicConfiguration_Partitioning int32 + +const ( + TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin + TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value + TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition +) + +// Enum value maps for TopicConfiguration_Partitioning. +var ( + TopicConfiguration_Partitioning_name = map[int32]string{ + 0: "NonNullKeyHash", + 1: "KeyHash", + 2: "RoundRobin", + } + TopicConfiguration_Partitioning_value = map[string]int32{ + "NonNullKeyHash": 0, + "KeyHash": 1, + "RoundRobin": 2, + } +) + +func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning { + p := new(TopicConfiguration_Partitioning) + *p = x + return p +} + +func (x TopicConfiguration_Partitioning) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[1].Descriptor() +} + +func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[1] +} + +func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead. +func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13, 0} +} + +type SubscriberMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"` + IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *SubscriberMessage) Reset() { + *x = SubscriberMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage) ProtoMessage() {} + +func (x *SubscriberMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage { + if x != nil { + return x.Ack + } + return nil +} + +func (x *SubscriberMessage) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload + Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers + IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{1} +} + +func (x *Message) GetEventTimeNs() int64 { + if x != nil { + return x.EventTimeNs + } + return 0 +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Message) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *Message) GetHeaders() map[string][]byte { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Message) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type BrokerMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *BrokerMessage) Reset() { + *x = BrokerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BrokerMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BrokerMessage) ProtoMessage() {} + +func (x *BrokerMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead. +func (*BrokerMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{2} +} + +func (x *BrokerMessage) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3} +} + +func (x *PublishRequest) GetInit() *PublishRequest_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *PublishRequest) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"` + IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4} +} + +func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage { + if x != nil { + return x.Config + } + return nil +} + +func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage { + if x != nil { + return x.Redirect + } + return nil +} + +func (x *PublishResponse) GetIsClosed() bool { + if x != nil { + return x.IsClosed + } + return false +} + +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type DeleteTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteTopicResponse) Reset() { + *x = DeleteTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicResponse) ProtoMessage() {} + +func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead. +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{6} +} + +type ConfigureTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *ConfigureTopicRequest) Reset() { + *x = ConfigureTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicRequest) ProtoMessage() {} + +func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead. +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{7} +} + +func (x *ConfigureTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ConfigureTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type ConfigureTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureTopicResponse) Reset() { + *x = ConfigureTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicResponse) ProtoMessage() {} + +func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead. +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{8} +} + +type GetTopicConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicConfigurationRequest) Reset() { + *x = GetTopicConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationRequest) ProtoMessage() {} + +func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{9} +} + +func (x *GetTopicConfigurationRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetTopicConfigurationRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type GetTopicConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *GetTopicConfigurationResponse) Reset() { + *x = GetTopicConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationResponse) ProtoMessage() {} + +func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{10} +} + +func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type FindBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"` +} + +func (x *FindBrokerRequest) Reset() { + *x = FindBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerRequest) ProtoMessage() {} + +func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead. +func (*FindBrokerRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{11} +} + +func (x *FindBrokerRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *FindBrokerRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *FindBrokerRequest) GetParition() int32 { + if x != nil { + return x.Parition + } + return 0 +} + +type FindBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` +} + +func (x *FindBrokerResponse) Reset() { + *x = FindBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerResponse) ProtoMessage() {} + +func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead. +func (*FindBrokerResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{12} +} + +func (x *FindBrokerResponse) GetBroker() string { + if x != nil { + return x.Broker + } + return "" +} + +type TopicConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"` + Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"` +} + +func (x *TopicConfiguration) Reset() { + *x = TopicConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopicConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopicConfiguration) ProtoMessage() {} + +func (x *TopicConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead. +func (*TopicConfiguration) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13} +} + +func (x *TopicConfiguration) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +func (x *TopicConfiguration) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *TopicConfiguration) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *TopicConfiguration) GetIsTransient() bool { + if x != nil { + return x.IsTransient + } + return false +} + +func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning { + if x != nil { + return x.Partitoning + } + return TopicConfiguration_NonNullKeyHash +} + +type SubscriberMessage_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` + StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from + TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds + SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption +} + +func (x *SubscriberMessage_InitMessage) Reset() { + *x = SubscriberMessage_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_InitMessage) ProtoMessage() {} + +func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SubscriberMessage_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition { + if x != nil { + return x.StartPosition + } + return SubscriberMessage_InitMessage_LATEST +} + +func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 { + if x != nil { + return x.TimestampNs + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetSubscriberId() string { + if x != nil { + return x.SubscriberId + } + return "" +} + +type SubscriberMessage_AckMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` +} + +func (x *SubscriberMessage_AckMessage) Reset() { + *x = SubscriberMessage_AckMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_AckMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_AckMessage) ProtoMessage() {} + +func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *SubscriberMessage_AckMessage) GetMessageId() int64 { + if x != nil { + return x.MessageId + } + return 0 +} + +type PublishRequest_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` +} + +func (x *PublishRequest_InitMessage) Reset() { + *x = PublishRequest_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest_InitMessage) ProtoMessage() {} + +func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead. +func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *PublishRequest_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PublishRequest_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +type PublishResponse_ConfigMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` +} + +func (x *PublishResponse_ConfigMessage) Reset() { + *x = PublishResponse_ConfigMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_ConfigMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_ConfigMessage) ProtoMessage() {} + +func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +type PublishResponse_RedirectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"` +} + +func (x *PublishResponse_RedirectMessage) Reset() { + *x = PublishResponse_RedirectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_RedirectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_RedirectMessage) ProtoMessage() {} + +func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *PublishResponse_RedirectMessage) GetNewBroker() string { + if x != nil { + return x.NewBroker + } + return "" +} + +var File_messaging_proto protoreflect.FileDescriptor + +var file_messaging_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, + 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, + 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, + 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45, + 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, + 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, + 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, + 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, + 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, + 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69, + 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, + 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f, + 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32, + 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a, + 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messaging_proto_rawDescOnce sync.Once + file_messaging_proto_rawDescData = file_messaging_proto_rawDesc +) + +func file_messaging_proto_rawDescGZIP() []byte { + file_messaging_proto_rawDescOnce.Do(func() { + file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData) + }) + return file_messaging_proto_rawDescData +} + +var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_messaging_proto_goTypes = []interface{}{ + (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition + (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning + (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage + (*Message)(nil), // 3: messaging_pb.Message + (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage + (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest + (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse + (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest + (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse + (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest + (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse + (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest + (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse + (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest + (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse + (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration + (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage + (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage + nil, // 18: messaging_pb.Message.HeadersEntry + (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage + (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage + (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage +} +var file_messaging_proto_depIdxs = []int32{ + 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage + 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage + 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry + 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message + 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage + 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message + 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage + 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage + 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration + 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration + 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning + 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition + 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage + 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest + 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest + 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest + 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest + 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest + 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage + 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse + 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse + 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse + 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse + 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse + 18, // [18:24] is the sub-list for method output_type + 12, // [12:18] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_messaging_proto_init() } +func file_messaging_proto_init() { + if File_messaging_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BrokerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopicConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_AckMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_ConfigMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_RedirectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messaging_proto_rawDesc, + NumEnums: 2, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_messaging_proto_goTypes, + DependencyIndexes: file_messaging_proto_depIdxs, + EnumInfos: file_messaging_proto_enumTypes, + MessageInfos: file_messaging_proto_msgTypes, + }.Build() + File_messaging_proto = out.File + file_messaging_proto_rawDesc = nil + file_messaging_proto_goTypes = nil + file_messaging_proto_depIdxs = nil +} diff --git a/weed/pb/messaging_pb/messaging_grpc.pb.go b/weed/pb/messaging_pb/messaging_grpc.pb.go new file mode 100644 index 000000000..234cffa95 --- /dev/null +++ b/weed/pb/messaging_pb/messaging_grpc.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package messaging_pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SeaweedMessagingClient is the client API for SeaweedMessaging service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SeaweedMessagingClient interface { + Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) + Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) + FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) +} + +type seaweedMessagingClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient { + return &seaweedMessagingClient{cc} +} + +func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingSubscribeClient{stream} + return x, nil +} + +type SeaweedMessaging_SubscribeClient interface { + Send(*SubscriberMessage) error + Recv() (*BrokerMessage, error) + grpc.ClientStream +} + +type seaweedMessagingSubscribeClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) { + m := new(BrokerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) { + stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingPublishClient{stream} + return x, nil +} + +type SeaweedMessaging_PublishClient interface { + Send(*PublishRequest) error + Recv() (*PublishResponse, error) + grpc.ClientStream +} + +type seaweedMessagingPublishClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) { + m := new(PublishResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { + out := new(GetTopicConfigurationResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) { + out := new(FindBrokerResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedMessagingServer is the server API for SeaweedMessaging service. +// All implementations must embed UnimplementedSeaweedMessagingServer +// for forward compatibility +type SeaweedMessagingServer interface { + Subscribe(SeaweedMessaging_SubscribeServer) error + Publish(SeaweedMessaging_PublishServer) error + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) + FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) + mustEmbedUnimplementedSeaweedMessagingServer() +} + +// UnimplementedSeaweedMessagingServer must be embedded to have forward compatible implementations. +type UnimplementedSeaweedMessagingServer struct { +} + +func (UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error { + return status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented") +} +func (UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented") +} +func (UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented") +} +func (UnimplementedSeaweedMessagingServer) mustEmbedUnimplementedSeaweedMessagingServer() {} + +// UnsafeSeaweedMessagingServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SeaweedMessagingServer will +// result in compilation errors. +type UnsafeSeaweedMessagingServer interface { + mustEmbedUnimplementedSeaweedMessagingServer() +} + +func RegisterSeaweedMessagingServer(s grpc.ServiceRegistrar, srv SeaweedMessagingServer) { + s.RegisterService(&SeaweedMessaging_ServiceDesc, srv) +} + +func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream}) +} + +type SeaweedMessaging_SubscribeServer interface { + Send(*BrokerMessage) error + Recv() (*SubscriberMessage, error) + grpc.ServerStream +} + +type seaweedMessagingSubscribeServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) { + m := new(SubscriberMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream}) +} + +type SeaweedMessaging_PublishServer interface { + Send(*PublishResponse) error + Recv() (*PublishRequest, error) + grpc.ServerStream +} + +type seaweedMessagingPublishServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) { + m := new(PublishRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).FindBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SeaweedMessaging_ServiceDesc is the grpc.ServiceDesc for SeaweedMessaging service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SeaweedMessaging_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "messaging_pb.SeaweedMessaging", + HandlerType: (*SeaweedMessagingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteTopic", + Handler: _SeaweedMessaging_DeleteTopic_Handler, + }, + { + MethodName: "ConfigureTopic", + Handler: _SeaweedMessaging_ConfigureTopic_Handler, + }, + { + MethodName: "GetTopicConfiguration", + Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, + }, + { + MethodName: "FindBroker", + Handler: _SeaweedMessaging_FindBroker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _SeaweedMessaging_Subscribe_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Publish", + Handler: _SeaweedMessaging_Publish_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messaging.proto", +} diff --git a/weed/pb/mount.proto b/weed/pb/mount.proto index 4df2990e4..ec0847f12 100644 --- a/weed/pb/mount.proto +++ b/weed/pb/mount.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package messaging_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/mount_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/mount_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "MountProto"; diff --git a/weed/pb/mount_pb/mount.pb.go b/weed/pb/mount_pb/mount.pb.go index 0d9fde354..cbaf533fe 100644 --- a/weed/pb/mount_pb/mount.pb.go +++ b/weed/pb/mount_pb/mount.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: mount.proto package mount_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,17 +21,20 @@ const ( ) type ConfigureRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"` } func (x *ConfigureRequest) Reset() { *x = ConfigureRequest{} - mi := &file_mount_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_mount_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ConfigureRequest) String() string { @@ -43,7 +45,7 @@ func (*ConfigureRequest) ProtoMessage() {} func (x *ConfigureRequest) ProtoReflect() protoreflect.Message { mi := &file_mount_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,16 +68,18 @@ func (x *ConfigureRequest) GetCollectionCapacity() int64 { } type ConfigureResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *ConfigureResponse) Reset() { *x = ConfigureResponse{} - mi := &file_mount_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_mount_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ConfigureResponse) String() string { @@ -86,7 +90,7 @@ func (*ConfigureResponse) ProtoMessage() {} func (x *ConfigureResponse) ProtoReflect() protoreflect.Message { mi := &file_mount_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -103,31 +107,42 @@ func (*ConfigureResponse) Descriptor() ([]byte, []int) { var File_mount_proto protoreflect.FileDescriptor -const file_mount_proto_rawDesc = "" + - "\n" + - "\vmount.proto\x12\fmessaging_pb\"C\n" + - "\x10ConfigureRequest\x12/\n" + - "\x13collection_capacity\x18\x01 \x01(\x03R\x12collectionCapacity\"\x13\n" + - "\x11ConfigureResponse2^\n" + - "\fSeaweedMount\x12N\n" + - "\tConfigure\x12\x1e.messaging_pb.ConfigureRequest\x1a\x1f.messaging_pb.ConfigureResponse\"\x00BO\n" + - "\x10seaweedfs.clientB\n" + - "MountProtoZ/github.com/seaweedfs/seaweedfs/weed/pb/mount_pbb\x06proto3" +var file_mount_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x43, 0x0a, 0x10, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x61, + 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, + 0x22, 0x13, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5e, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, + 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_mount_proto_rawDescOnce sync.Once - file_mount_proto_rawDescData []byte + file_mount_proto_rawDescData = file_mount_proto_rawDesc ) func file_mount_proto_rawDescGZIP() []byte { file_mount_proto_rawDescOnce.Do(func() { - file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mount_proto_rawDesc), len(file_mount_proto_rawDesc))) + file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_mount_proto_rawDescData) }) return file_mount_proto_rawDescData } var file_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_mount_proto_goTypes = []any{ +var file_mount_proto_goTypes = []interface{}{ (*ConfigureRequest)(nil), // 0: messaging_pb.ConfigureRequest (*ConfigureResponse)(nil), // 1: messaging_pb.ConfigureResponse } @@ -146,11 +161,37 @@ func file_mount_proto_init() { if File_mount_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mount_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_mount_proto_rawDesc), len(file_mount_proto_rawDesc)), + RawDescriptor: file_mount_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -161,6 +202,7 @@ func file_mount_proto_init() { MessageInfos: file_mount_proto_msgTypes, }.Build() File_mount_proto = out.File + file_mount_proto_rawDesc = nil file_mount_proto_goTypes = nil file_mount_proto_depIdxs = nil } diff --git a/weed/pb/mount_pb/mount_grpc.pb.go b/weed/pb/mount_pb/mount_grpc.pb.go index 599a8807a..41737aa21 100644 --- a/weed/pb/mount_pb/mount_grpc.pb.go +++ b/weed/pb/mount_pb/mount_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: mount.proto package mount_pb @@ -15,12 +11,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SeaweedMount_Configure_FullMethodName = "/messaging_pb.SeaweedMount/Configure" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // SeaweedMountClient is the client API for SeaweedMount service. // @@ -38,9 +30,8 @@ func NewSeaweedMountClient(cc grpc.ClientConnInterface) SeaweedMountClient { } func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest, opts ...grpc.CallOption) (*ConfigureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ConfigureResponse) - err := c.cc.Invoke(ctx, SeaweedMount_Configure_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMount/Configure", in, out, opts...) if err != nil { return nil, err } @@ -49,24 +40,20 @@ func (c *seaweedMountClient) Configure(ctx context.Context, in *ConfigureRequest // SeaweedMountServer is the server API for SeaweedMount service. // All implementations must embed UnimplementedSeaweedMountServer -// for forward compatibility. +// for forward compatibility type SeaweedMountServer interface { Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) mustEmbedUnimplementedSeaweedMountServer() } -// UnimplementedSeaweedMountServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedMountServer struct{} +// UnimplementedSeaweedMountServer must be embedded to have forward compatible implementations. +type UnimplementedSeaweedMountServer struct { +} func (UnimplementedSeaweedMountServer) Configure(context.Context, *ConfigureRequest) (*ConfigureResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") } func (UnimplementedSeaweedMountServer) mustEmbedUnimplementedSeaweedMountServer() {} -func (UnimplementedSeaweedMountServer) testEmbeddedByValue() {} // UnsafeSeaweedMountServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SeaweedMountServer will @@ -76,13 +63,6 @@ type UnsafeSeaweedMountServer interface { } func RegisterSeaweedMountServer(s grpc.ServiceRegistrar, srv SeaweedMountServer) { - // If the following call pancis, it indicates UnimplementedSeaweedMountServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&SeaweedMount_ServiceDesc, srv) } @@ -96,7 +76,7 @@ func _SeaweedMount_Configure_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedMount_Configure_FullMethodName, + FullMethod: "/messaging_pb.SeaweedMount/Configure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedMountServer).Configure(ctx, req.(*ConfigureRequest)) diff --git a/weed/pb/mq_agent.proto b/weed/pb/mq_agent.proto deleted file mode 100644 index 6457cbcd8..000000000 --- a/weed/pb/mq_agent.proto +++ /dev/null @@ -1,85 +0,0 @@ -syntax = "proto3"; - -package messaging_pb; - -import "mq_schema.proto"; - -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb"; -option java_package = "seaweedfs.mq_agent"; -option java_outer_classname = "MessageQueueAgentProto"; - -////////////////////////////////////////////////// - -service SeaweedMessagingAgent { - - // Publishing - rpc StartPublishSession (StartPublishSessionRequest) returns (StartPublishSessionResponse) { - } - rpc ClosePublishSession (ClosePublishSessionRequest) returns (ClosePublishSessionResponse) { - } - rpc PublishRecord (stream PublishRecordRequest) returns (stream PublishRecordResponse) { - } - - // Subscribing - rpc SubscribeRecord (stream SubscribeRecordRequest) returns (stream SubscribeRecordResponse) { - } - -} - -////////////////////////////////////////////////// -message StartPublishSessionRequest { - schema_pb.Topic topic = 1; - int32 partition_count = 2; - schema_pb.RecordType record_type = 3; - string publisher_name = 4; -} -message StartPublishSessionResponse { - string error = 1; - int64 session_id = 2; -} -message ClosePublishSessionRequest { - int64 session_id = 1; -} -message ClosePublishSessionResponse { - string error = 1; -} - -////////////////////////////////////////////////// -message PublishRecordRequest { - int64 session_id = 1; // session_id is required for the first record - bytes key = 2; - schema_pb.RecordValue value = 3; -} -message PublishRecordResponse { - int64 ack_sequence = 1; - string error = 2; - int64 base_offset = 3; // First offset assigned to this batch - int64 last_offset = 4; // Last offset assigned to this batch -} -////////////////////////////////////////////////// -message SubscribeRecordRequest { - message InitSubscribeRecordRequest { - string consumer_group = 1; - string consumer_group_instance_id = 2; - schema_pb.Topic topic = 4; - repeated schema_pb.PartitionOffset partition_offsets = 5; - schema_pb.OffsetType offset_type = 6; - int64 offset_ts_ns = 7; - string filter = 10; - int32 max_subscribed_partitions = 11; - int32 sliding_window_size = 12; - } - InitSubscribeRecordRequest init = 1; - int64 ack_sequence = 2; - bytes ack_key = 3; -} -message SubscribeRecordResponse { - bytes key = 2; - schema_pb.RecordValue value = 3; - int64 ts_ns = 4; - string error = 5; - bool is_end_of_stream = 6; - bool is_end_of_topic = 7; - int64 offset = 8; // Sequential offset within partition -} -////////////////////////////////////////////////// diff --git a/weed/pb/mq_agent_pb/mq_agent.pb.go b/weed/pb/mq_agent_pb/mq_agent.pb.go deleted file mode 100644 index bc321e957..000000000 --- a/weed/pb/mq_agent_pb/mq_agent.pb.go +++ /dev/null @@ -1,763 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 -// source: mq_agent.proto - -package mq_agent_pb - -import ( - schema_pb "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// //////////////////////////////////////////////// -type StartPublishSessionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionCount int32 `protobuf:"varint,2,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` - RecordType *schema_pb.RecordType `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"` - PublisherName string `protobuf:"bytes,4,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartPublishSessionRequest) Reset() { - *x = StartPublishSessionRequest{} - mi := &file_mq_agent_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartPublishSessionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartPublishSessionRequest) ProtoMessage() {} - -func (x *StartPublishSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartPublishSessionRequest.ProtoReflect.Descriptor instead. -func (*StartPublishSessionRequest) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{0} -} - -func (x *StartPublishSessionRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *StartPublishSessionRequest) GetPartitionCount() int32 { - if x != nil { - return x.PartitionCount - } - return 0 -} - -func (x *StartPublishSessionRequest) GetRecordType() *schema_pb.RecordType { - if x != nil { - return x.RecordType - } - return nil -} - -func (x *StartPublishSessionRequest) GetPublisherName() string { - if x != nil { - return x.PublisherName - } - return "" -} - -type StartPublishSessionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - SessionId int64 `protobuf:"varint,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *StartPublishSessionResponse) Reset() { - *x = StartPublishSessionResponse{} - mi := &file_mq_agent_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StartPublishSessionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartPublishSessionResponse) ProtoMessage() {} - -func (x *StartPublishSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartPublishSessionResponse.ProtoReflect.Descriptor instead. -func (*StartPublishSessionResponse) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{1} -} - -func (x *StartPublishSessionResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *StartPublishSessionResponse) GetSessionId() int64 { - if x != nil { - return x.SessionId - } - return 0 -} - -type ClosePublishSessionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ClosePublishSessionRequest) Reset() { - *x = ClosePublishSessionRequest{} - mi := &file_mq_agent_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ClosePublishSessionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClosePublishSessionRequest) ProtoMessage() {} - -func (x *ClosePublishSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClosePublishSessionRequest.ProtoReflect.Descriptor instead. -func (*ClosePublishSessionRequest) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{2} -} - -func (x *ClosePublishSessionRequest) GetSessionId() int64 { - if x != nil { - return x.SessionId - } - return 0 -} - -type ClosePublishSessionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ClosePublishSessionResponse) Reset() { - *x = ClosePublishSessionResponse{} - mi := &file_mq_agent_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ClosePublishSessionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClosePublishSessionResponse) ProtoMessage() {} - -func (x *ClosePublishSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClosePublishSessionResponse.ProtoReflect.Descriptor instead. -func (*ClosePublishSessionResponse) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{3} -} - -func (x *ClosePublishSessionResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -// //////////////////////////////////////////////// -type PublishRecordRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` // session_id is required for the first record - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishRecordRequest) Reset() { - *x = PublishRecordRequest{} - mi := &file_mq_agent_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishRecordRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishRecordRequest) ProtoMessage() {} - -func (x *PublishRecordRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishRecordRequest.ProtoReflect.Descriptor instead. -func (*PublishRecordRequest) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{4} -} - -func (x *PublishRecordRequest) GetSessionId() int64 { - if x != nil { - return x.SessionId - } - return 0 -} - -func (x *PublishRecordRequest) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *PublishRecordRequest) GetValue() *schema_pb.RecordValue { - if x != nil { - return x.Value - } - return nil -} - -type PublishRecordResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - AckSequence int64 `protobuf:"varint,1,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - BaseOffset int64 `protobuf:"varint,3,opt,name=base_offset,json=baseOffset,proto3" json:"base_offset,omitempty"` // First offset assigned to this batch - LastOffset int64 `protobuf:"varint,4,opt,name=last_offset,json=lastOffset,proto3" json:"last_offset,omitempty"` // Last offset assigned to this batch - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishRecordResponse) Reset() { - *x = PublishRecordResponse{} - mi := &file_mq_agent_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishRecordResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishRecordResponse) ProtoMessage() {} - -func (x *PublishRecordResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishRecordResponse.ProtoReflect.Descriptor instead. -func (*PublishRecordResponse) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{5} -} - -func (x *PublishRecordResponse) GetAckSequence() int64 { - if x != nil { - return x.AckSequence - } - return 0 -} - -func (x *PublishRecordResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *PublishRecordResponse) GetBaseOffset() int64 { - if x != nil { - return x.BaseOffset - } - return 0 -} - -func (x *PublishRecordResponse) GetLastOffset() int64 { - if x != nil { - return x.LastOffset - } - return 0 -} - -// //////////////////////////////////////////////// -type SubscribeRecordRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Init *SubscribeRecordRequest_InitSubscribeRecordRequest `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` - AckSequence int64 `protobuf:"varint,2,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"` - AckKey []byte `protobuf:"bytes,3,opt,name=ack_key,json=ackKey,proto3" json:"ack_key,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeRecordRequest) Reset() { - *x = SubscribeRecordRequest{} - mi := &file_mq_agent_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeRecordRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeRecordRequest) ProtoMessage() {} - -func (x *SubscribeRecordRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeRecordRequest.ProtoReflect.Descriptor instead. -func (*SubscribeRecordRequest) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{6} -} - -func (x *SubscribeRecordRequest) GetInit() *SubscribeRecordRequest_InitSubscribeRecordRequest { - if x != nil { - return x.Init - } - return nil -} - -func (x *SubscribeRecordRequest) GetAckSequence() int64 { - if x != nil { - return x.AckSequence - } - return 0 -} - -func (x *SubscribeRecordRequest) GetAckKey() []byte { - if x != nil { - return x.AckKey - } - return nil -} - -type SubscribeRecordResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - TsNs int64 `protobuf:"varint,4,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - IsEndOfStream bool `protobuf:"varint,6,opt,name=is_end_of_stream,json=isEndOfStream,proto3" json:"is_end_of_stream,omitempty"` - IsEndOfTopic bool `protobuf:"varint,7,opt,name=is_end_of_topic,json=isEndOfTopic,proto3" json:"is_end_of_topic,omitempty"` - Offset int64 `protobuf:"varint,8,opt,name=offset,proto3" json:"offset,omitempty"` // Sequential offset within partition - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeRecordResponse) Reset() { - *x = SubscribeRecordResponse{} - mi := &file_mq_agent_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeRecordResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeRecordResponse) ProtoMessage() {} - -func (x *SubscribeRecordResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeRecordResponse.ProtoReflect.Descriptor instead. -func (*SubscribeRecordResponse) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{7} -} - -func (x *SubscribeRecordResponse) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *SubscribeRecordResponse) GetValue() *schema_pb.RecordValue { - if x != nil { - return x.Value - } - return nil -} - -func (x *SubscribeRecordResponse) GetTsNs() int64 { - if x != nil { - return x.TsNs - } - return 0 -} - -func (x *SubscribeRecordResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *SubscribeRecordResponse) GetIsEndOfStream() bool { - if x != nil { - return x.IsEndOfStream - } - return false -} - -func (x *SubscribeRecordResponse) GetIsEndOfTopic() bool { - if x != nil { - return x.IsEndOfTopic - } - return false -} - -func (x *SubscribeRecordResponse) GetOffset() int64 { - if x != nil { - return x.Offset - } - return 0 -} - -type SubscribeRecordRequest_InitSubscribeRecordRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - ConsumerGroupInstanceId string `protobuf:"bytes,2,opt,name=consumer_group_instance_id,json=consumerGroupInstanceId,proto3" json:"consumer_group_instance_id,omitempty"` - Topic *schema_pb.Topic `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionOffsets []*schema_pb.PartitionOffset `protobuf:"bytes,5,rep,name=partition_offsets,json=partitionOffsets,proto3" json:"partition_offsets,omitempty"` - OffsetType schema_pb.OffsetType `protobuf:"varint,6,opt,name=offset_type,json=offsetType,proto3,enum=schema_pb.OffsetType" json:"offset_type,omitempty"` - OffsetTsNs int64 `protobuf:"varint,7,opt,name=offset_ts_ns,json=offsetTsNs,proto3" json:"offset_ts_ns,omitempty"` - Filter string `protobuf:"bytes,10,opt,name=filter,proto3" json:"filter,omitempty"` - MaxSubscribedPartitions int32 `protobuf:"varint,11,opt,name=max_subscribed_partitions,json=maxSubscribedPartitions,proto3" json:"max_subscribed_partitions,omitempty"` - SlidingWindowSize int32 `protobuf:"varint,12,opt,name=sliding_window_size,json=slidingWindowSize,proto3" json:"sliding_window_size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) Reset() { - *x = SubscribeRecordRequest_InitSubscribeRecordRequest{} - mi := &file_mq_agent_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeRecordRequest_InitSubscribeRecordRequest) ProtoMessage() {} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_agent_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeRecordRequest_InitSubscribeRecordRequest.ProtoReflect.Descriptor instead. -func (*SubscribeRecordRequest_InitSubscribeRecordRequest) Descriptor() ([]byte, []int) { - return file_mq_agent_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetConsumerGroupInstanceId() string { - if x != nil { - return x.ConsumerGroupInstanceId - } - return "" -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetPartitionOffsets() []*schema_pb.PartitionOffset { - if x != nil { - return x.PartitionOffsets - } - return nil -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetOffsetType() schema_pb.OffsetType { - if x != nil { - return x.OffsetType - } - return schema_pb.OffsetType(0) -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetOffsetTsNs() int64 { - if x != nil { - return x.OffsetTsNs - } - return 0 -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetMaxSubscribedPartitions() int32 { - if x != nil { - return x.MaxSubscribedPartitions - } - return 0 -} - -func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetSlidingWindowSize() int32 { - if x != nil { - return x.SlidingWindowSize - } - return 0 -} - -var File_mq_agent_proto protoreflect.FileDescriptor - -const file_mq_agent_proto_rawDesc = "" + - "\n" + - "\x0emq_agent.proto\x12\fmessaging_pb\x1a\x0fmq_schema.proto\"\xcc\x01\n" + - "\x1aStartPublishSessionRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12'\n" + - "\x0fpartition_count\x18\x02 \x01(\x05R\x0epartitionCount\x126\n" + - "\vrecord_type\x18\x03 \x01(\v2\x15.schema_pb.RecordTypeR\n" + - "recordType\x12%\n" + - "\x0epublisher_name\x18\x04 \x01(\tR\rpublisherName\"R\n" + - "\x1bStartPublishSessionResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\x12\x1d\n" + - "\n" + - "session_id\x18\x02 \x01(\x03R\tsessionId\";\n" + - "\x1aClosePublishSessionRequest\x12\x1d\n" + - "\n" + - "session_id\x18\x01 \x01(\x03R\tsessionId\"3\n" + - "\x1bClosePublishSessionResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"u\n" + - "\x14PublishRecordRequest\x12\x1d\n" + - "\n" + - "session_id\x18\x01 \x01(\x03R\tsessionId\x12\x10\n" + - "\x03key\x18\x02 \x01(\fR\x03key\x12,\n" + - "\x05value\x18\x03 \x01(\v2\x16.schema_pb.RecordValueR\x05value\"\x92\x01\n" + - "\x15PublishRecordResponse\x12!\n" + - "\fack_sequence\x18\x01 \x01(\x03R\vackSequence\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\x12\x1f\n" + - "\vbase_offset\x18\x03 \x01(\x03R\n" + - "baseOffset\x12\x1f\n" + - "\vlast_offset\x18\x04 \x01(\x03R\n" + - "lastOffset\"\xfb\x04\n" + - "\x16SubscribeRecordRequest\x12S\n" + - "\x04init\x18\x01 \x01(\v2?.messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequestR\x04init\x12!\n" + - "\fack_sequence\x18\x02 \x01(\x03R\vackSequence\x12\x17\n" + - "\aack_key\x18\x03 \x01(\fR\x06ackKey\x1a\xcf\x03\n" + - "\x1aInitSubscribeRecordRequest\x12%\n" + - "\x0econsumer_group\x18\x01 \x01(\tR\rconsumerGroup\x12;\n" + - "\x1aconsumer_group_instance_id\x18\x02 \x01(\tR\x17consumerGroupInstanceId\x12&\n" + - "\x05topic\x18\x04 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12G\n" + - "\x11partition_offsets\x18\x05 \x03(\v2\x1a.schema_pb.PartitionOffsetR\x10partitionOffsets\x126\n" + - "\voffset_type\x18\x06 \x01(\x0e2\x15.schema_pb.OffsetTypeR\n" + - "offsetType\x12 \n" + - "\foffset_ts_ns\x18\a \x01(\x03R\n" + - "offsetTsNs\x12\x16\n" + - "\x06filter\x18\n" + - " \x01(\tR\x06filter\x12:\n" + - "\x19max_subscribed_partitions\x18\v \x01(\x05R\x17maxSubscribedPartitions\x12.\n" + - "\x13sliding_window_size\x18\f \x01(\x05R\x11slidingWindowSize\"\xec\x01\n" + - "\x17SubscribeRecordResponse\x12\x10\n" + - "\x03key\x18\x02 \x01(\fR\x03key\x12,\n" + - "\x05value\x18\x03 \x01(\v2\x16.schema_pb.RecordValueR\x05value\x12\x13\n" + - "\x05ts_ns\x18\x04 \x01(\x03R\x04tsNs\x12\x14\n" + - "\x05error\x18\x05 \x01(\tR\x05error\x12'\n" + - "\x10is_end_of_stream\x18\x06 \x01(\bR\risEndOfStream\x12%\n" + - "\x0fis_end_of_topic\x18\a \x01(\bR\fisEndOfTopic\x12\x16\n" + - "\x06offset\x18\b \x01(\x03R\x06offset2\xb9\x03\n" + - "\x15SeaweedMessagingAgent\x12l\n" + - "\x13StartPublishSession\x12(.messaging_pb.StartPublishSessionRequest\x1a).messaging_pb.StartPublishSessionResponse\"\x00\x12l\n" + - "\x13ClosePublishSession\x12(.messaging_pb.ClosePublishSessionRequest\x1a).messaging_pb.ClosePublishSessionResponse\"\x00\x12^\n" + - "\rPublishRecord\x12\".messaging_pb.PublishRecordRequest\x1a#.messaging_pb.PublishRecordResponse\"\x00(\x010\x01\x12d\n" + - "\x0fSubscribeRecord\x12$.messaging_pb.SubscribeRecordRequest\x1a%.messaging_pb.SubscribeRecordResponse\"\x00(\x010\x01B`\n" + - "\x12seaweedfs.mq_agentB\x16MessageQueueAgentProtoZ2github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pbb\x06proto3" - -var ( - file_mq_agent_proto_rawDescOnce sync.Once - file_mq_agent_proto_rawDescData []byte -) - -func file_mq_agent_proto_rawDescGZIP() []byte { - file_mq_agent_proto_rawDescOnce.Do(func() { - file_mq_agent_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mq_agent_proto_rawDesc), len(file_mq_agent_proto_rawDesc))) - }) - return file_mq_agent_proto_rawDescData -} - -var file_mq_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_mq_agent_proto_goTypes = []any{ - (*StartPublishSessionRequest)(nil), // 0: messaging_pb.StartPublishSessionRequest - (*StartPublishSessionResponse)(nil), // 1: messaging_pb.StartPublishSessionResponse - (*ClosePublishSessionRequest)(nil), // 2: messaging_pb.ClosePublishSessionRequest - (*ClosePublishSessionResponse)(nil), // 3: messaging_pb.ClosePublishSessionResponse - (*PublishRecordRequest)(nil), // 4: messaging_pb.PublishRecordRequest - (*PublishRecordResponse)(nil), // 5: messaging_pb.PublishRecordResponse - (*SubscribeRecordRequest)(nil), // 6: messaging_pb.SubscribeRecordRequest - (*SubscribeRecordResponse)(nil), // 7: messaging_pb.SubscribeRecordResponse - (*SubscribeRecordRequest_InitSubscribeRecordRequest)(nil), // 8: messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequest - (*schema_pb.Topic)(nil), // 9: schema_pb.Topic - (*schema_pb.RecordType)(nil), // 10: schema_pb.RecordType - (*schema_pb.RecordValue)(nil), // 11: schema_pb.RecordValue - (*schema_pb.PartitionOffset)(nil), // 12: schema_pb.PartitionOffset - (schema_pb.OffsetType)(0), // 13: schema_pb.OffsetType -} -var file_mq_agent_proto_depIdxs = []int32{ - 9, // 0: messaging_pb.StartPublishSessionRequest.topic:type_name -> schema_pb.Topic - 10, // 1: messaging_pb.StartPublishSessionRequest.record_type:type_name -> schema_pb.RecordType - 11, // 2: messaging_pb.PublishRecordRequest.value:type_name -> schema_pb.RecordValue - 8, // 3: messaging_pb.SubscribeRecordRequest.init:type_name -> messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequest - 11, // 4: messaging_pb.SubscribeRecordResponse.value:type_name -> schema_pb.RecordValue - 9, // 5: messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequest.topic:type_name -> schema_pb.Topic - 12, // 6: messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequest.partition_offsets:type_name -> schema_pb.PartitionOffset - 13, // 7: messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequest.offset_type:type_name -> schema_pb.OffsetType - 0, // 8: messaging_pb.SeaweedMessagingAgent.StartPublishSession:input_type -> messaging_pb.StartPublishSessionRequest - 2, // 9: messaging_pb.SeaweedMessagingAgent.ClosePublishSession:input_type -> messaging_pb.ClosePublishSessionRequest - 4, // 10: messaging_pb.SeaweedMessagingAgent.PublishRecord:input_type -> messaging_pb.PublishRecordRequest - 6, // 11: messaging_pb.SeaweedMessagingAgent.SubscribeRecord:input_type -> messaging_pb.SubscribeRecordRequest - 1, // 12: messaging_pb.SeaweedMessagingAgent.StartPublishSession:output_type -> messaging_pb.StartPublishSessionResponse - 3, // 13: messaging_pb.SeaweedMessagingAgent.ClosePublishSession:output_type -> messaging_pb.ClosePublishSessionResponse - 5, // 14: messaging_pb.SeaweedMessagingAgent.PublishRecord:output_type -> messaging_pb.PublishRecordResponse - 7, // 15: messaging_pb.SeaweedMessagingAgent.SubscribeRecord:output_type -> messaging_pb.SubscribeRecordResponse - 12, // [12:16] is the sub-list for method output_type - 8, // [8:12] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name -} - -func init() { file_mq_agent_proto_init() } -func file_mq_agent_proto_init() { - if File_mq_agent_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_mq_agent_proto_rawDesc), len(file_mq_agent_proto_rawDesc)), - NumEnums: 0, - NumMessages: 9, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_mq_agent_proto_goTypes, - DependencyIndexes: file_mq_agent_proto_depIdxs, - MessageInfos: file_mq_agent_proto_msgTypes, - }.Build() - File_mq_agent_proto = out.File - file_mq_agent_proto_goTypes = nil - file_mq_agent_proto_depIdxs = nil -} diff --git a/weed/pb/mq_agent_pb/mq_agent_grpc.pb.go b/weed/pb/mq_agent_pb/mq_agent_grpc.pb.go deleted file mode 100644 index 5b020bd73..000000000 --- a/weed/pb/mq_agent_pb/mq_agent_grpc.pb.go +++ /dev/null @@ -1,228 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: mq_agent.proto - -package mq_agent_pb - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SeaweedMessagingAgent_StartPublishSession_FullMethodName = "/messaging_pb.SeaweedMessagingAgent/StartPublishSession" - SeaweedMessagingAgent_ClosePublishSession_FullMethodName = "/messaging_pb.SeaweedMessagingAgent/ClosePublishSession" - SeaweedMessagingAgent_PublishRecord_FullMethodName = "/messaging_pb.SeaweedMessagingAgent/PublishRecord" - SeaweedMessagingAgent_SubscribeRecord_FullMethodName = "/messaging_pb.SeaweedMessagingAgent/SubscribeRecord" -) - -// SeaweedMessagingAgentClient is the client API for SeaweedMessagingAgent service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SeaweedMessagingAgentClient interface { - // Publishing - StartPublishSession(ctx context.Context, in *StartPublishSessionRequest, opts ...grpc.CallOption) (*StartPublishSessionResponse, error) - ClosePublishSession(ctx context.Context, in *ClosePublishSessionRequest, opts ...grpc.CallOption) (*ClosePublishSessionResponse, error) - PublishRecord(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishRecordRequest, PublishRecordResponse], error) - // Subscribing - SubscribeRecord(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeRecordRequest, SubscribeRecordResponse], error) -} - -type seaweedMessagingAgentClient struct { - cc grpc.ClientConnInterface -} - -func NewSeaweedMessagingAgentClient(cc grpc.ClientConnInterface) SeaweedMessagingAgentClient { - return &seaweedMessagingAgentClient{cc} -} - -func (c *seaweedMessagingAgentClient) StartPublishSession(ctx context.Context, in *StartPublishSessionRequest, opts ...grpc.CallOption) (*StartPublishSessionResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(StartPublishSessionResponse) - err := c.cc.Invoke(ctx, SeaweedMessagingAgent_StartPublishSession_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingAgentClient) ClosePublishSession(ctx context.Context, in *ClosePublishSessionRequest, opts ...grpc.CallOption) (*ClosePublishSessionResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ClosePublishSessionResponse) - err := c.cc.Invoke(ctx, SeaweedMessagingAgent_ClosePublishSession_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingAgentClient) PublishRecord(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishRecordRequest, PublishRecordResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessagingAgent_ServiceDesc.Streams[0], SeaweedMessagingAgent_PublishRecord_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[PublishRecordRequest, PublishRecordResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessagingAgent_PublishRecordClient = grpc.BidiStreamingClient[PublishRecordRequest, PublishRecordResponse] - -func (c *seaweedMessagingAgentClient) SubscribeRecord(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeRecordRequest, SubscribeRecordResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessagingAgent_ServiceDesc.Streams[1], SeaweedMessagingAgent_SubscribeRecord_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[SubscribeRecordRequest, SubscribeRecordResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessagingAgent_SubscribeRecordClient = grpc.BidiStreamingClient[SubscribeRecordRequest, SubscribeRecordResponse] - -// SeaweedMessagingAgentServer is the server API for SeaweedMessagingAgent service. -// All implementations must embed UnimplementedSeaweedMessagingAgentServer -// for forward compatibility. -type SeaweedMessagingAgentServer interface { - // Publishing - StartPublishSession(context.Context, *StartPublishSessionRequest) (*StartPublishSessionResponse, error) - ClosePublishSession(context.Context, *ClosePublishSessionRequest) (*ClosePublishSessionResponse, error) - PublishRecord(grpc.BidiStreamingServer[PublishRecordRequest, PublishRecordResponse]) error - // Subscribing - SubscribeRecord(grpc.BidiStreamingServer[SubscribeRecordRequest, SubscribeRecordResponse]) error - mustEmbedUnimplementedSeaweedMessagingAgentServer() -} - -// UnimplementedSeaweedMessagingAgentServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedMessagingAgentServer struct{} - -func (UnimplementedSeaweedMessagingAgentServer) StartPublishSession(context.Context, *StartPublishSessionRequest) (*StartPublishSessionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartPublishSession not implemented") -} -func (UnimplementedSeaweedMessagingAgentServer) ClosePublishSession(context.Context, *ClosePublishSessionRequest) (*ClosePublishSessionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClosePublishSession not implemented") -} -func (UnimplementedSeaweedMessagingAgentServer) PublishRecord(grpc.BidiStreamingServer[PublishRecordRequest, PublishRecordResponse]) error { - return status.Errorf(codes.Unimplemented, "method PublishRecord not implemented") -} -func (UnimplementedSeaweedMessagingAgentServer) SubscribeRecord(grpc.BidiStreamingServer[SubscribeRecordRequest, SubscribeRecordResponse]) error { - return status.Errorf(codes.Unimplemented, "method SubscribeRecord not implemented") -} -func (UnimplementedSeaweedMessagingAgentServer) mustEmbedUnimplementedSeaweedMessagingAgentServer() {} -func (UnimplementedSeaweedMessagingAgentServer) testEmbeddedByValue() {} - -// UnsafeSeaweedMessagingAgentServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SeaweedMessagingAgentServer will -// result in compilation errors. -type UnsafeSeaweedMessagingAgentServer interface { - mustEmbedUnimplementedSeaweedMessagingAgentServer() -} - -func RegisterSeaweedMessagingAgentServer(s grpc.ServiceRegistrar, srv SeaweedMessagingAgentServer) { - // If the following call pancis, it indicates UnimplementedSeaweedMessagingAgentServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SeaweedMessagingAgent_ServiceDesc, srv) -} - -func _SeaweedMessagingAgent_StartPublishSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartPublishSessionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingAgentServer).StartPublishSession(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessagingAgent_StartPublishSession_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingAgentServer).StartPublishSession(ctx, req.(*StartPublishSessionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessagingAgent_ClosePublishSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClosePublishSessionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingAgentServer).ClosePublishSession(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessagingAgent_ClosePublishSession_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingAgentServer).ClosePublishSession(ctx, req.(*ClosePublishSessionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessagingAgent_PublishRecord_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingAgentServer).PublishRecord(&grpc.GenericServerStream[PublishRecordRequest, PublishRecordResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessagingAgent_PublishRecordServer = grpc.BidiStreamingServer[PublishRecordRequest, PublishRecordResponse] - -func _SeaweedMessagingAgent_SubscribeRecord_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingAgentServer).SubscribeRecord(&grpc.GenericServerStream[SubscribeRecordRequest, SubscribeRecordResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessagingAgent_SubscribeRecordServer = grpc.BidiStreamingServer[SubscribeRecordRequest, SubscribeRecordResponse] - -// SeaweedMessagingAgent_ServiceDesc is the grpc.ServiceDesc for SeaweedMessagingAgent service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SeaweedMessagingAgent_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "messaging_pb.SeaweedMessagingAgent", - HandlerType: (*SeaweedMessagingAgentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "StartPublishSession", - Handler: _SeaweedMessagingAgent_StartPublishSession_Handler, - }, - { - MethodName: "ClosePublishSession", - Handler: _SeaweedMessagingAgent_ClosePublishSession_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "PublishRecord", - Handler: _SeaweedMessagingAgent_PublishRecord_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SubscribeRecord", - Handler: _SeaweedMessagingAgent_SubscribeRecord_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "mq_agent.proto", -} diff --git a/weed/pb/mq_agent_pb/publish_response_test.go b/weed/pb/mq_agent_pb/publish_response_test.go deleted file mode 100644 index 1f2e767e4..000000000 --- a/weed/pb/mq_agent_pb/publish_response_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package mq_agent_pb - -import ( - "testing" - "google.golang.org/protobuf/proto" -) - -func TestPublishRecordResponseSerialization(t *testing.T) { - // Test that PublishRecordResponse can serialize/deserialize with new offset fields - original := &PublishRecordResponse{ - AckSequence: 123, - Error: "", - BaseOffset: 1000, // New field - LastOffset: 1005, // New field - } - - // Test proto marshaling/unmarshaling - data, err := proto.Marshal(original) - if err != nil { - t.Fatalf("Failed to marshal PublishRecordResponse: %v", err) - } - - restored := &PublishRecordResponse{} - err = proto.Unmarshal(data, restored) - if err != nil { - t.Fatalf("Failed to unmarshal PublishRecordResponse: %v", err) - } - - // Verify all fields are preserved - if restored.AckSequence != original.AckSequence { - t.Errorf("AckSequence = %d, want %d", restored.AckSequence, original.AckSequence) - } - if restored.BaseOffset != original.BaseOffset { - t.Errorf("BaseOffset = %d, want %d", restored.BaseOffset, original.BaseOffset) - } - if restored.LastOffset != original.LastOffset { - t.Errorf("LastOffset = %d, want %d", restored.LastOffset, original.LastOffset) - } -} - -func TestSubscribeRecordResponseSerialization(t *testing.T) { - // Test that SubscribeRecordResponse can serialize/deserialize with new offset field - original := &SubscribeRecordResponse{ - Key: []byte("test-key"), - TsNs: 1234567890, - Error: "", - IsEndOfStream: false, - IsEndOfTopic: false, - Offset: 42, // New field - } - - // Test proto marshaling/unmarshaling - data, err := proto.Marshal(original) - if err != nil { - t.Fatalf("Failed to marshal SubscribeRecordResponse: %v", err) - } - - restored := &SubscribeRecordResponse{} - err = proto.Unmarshal(data, restored) - if err != nil { - t.Fatalf("Failed to unmarshal SubscribeRecordResponse: %v", err) - } - - // Verify all fields are preserved - if restored.TsNs != original.TsNs { - t.Errorf("TsNs = %d, want %d", restored.TsNs, original.TsNs) - } - if restored.Offset != original.Offset { - t.Errorf("Offset = %d, want %d", restored.Offset, original.Offset) - } - if string(restored.Key) != string(original.Key) { - t.Errorf("Key = %s, want %s", string(restored.Key), string(original.Key)) - } -} - -func TestPublishRecordResponseBackwardCompatibility(t *testing.T) { - // Test that PublishRecordResponse without offset fields still works - original := &PublishRecordResponse{ - AckSequence: 123, - Error: "", - // BaseOffset and LastOffset not set (defaults to 0) - } - - data, err := proto.Marshal(original) - if err != nil { - t.Fatalf("Failed to marshal PublishRecordResponse: %v", err) - } - - restored := &PublishRecordResponse{} - err = proto.Unmarshal(data, restored) - if err != nil { - t.Fatalf("Failed to unmarshal PublishRecordResponse: %v", err) - } - - // Offset fields should default to 0 - if restored.BaseOffset != 0 { - t.Errorf("BaseOffset = %d, want 0", restored.BaseOffset) - } - if restored.LastOffset != 0 { - t.Errorf("LastOffset = %d, want 0", restored.LastOffset) - } -} diff --git a/weed/pb/mq_broker.proto b/weed/pb/mq_broker.proto deleted file mode 100644 index 47e4aaa8c..000000000 --- a/weed/pb/mq_broker.proto +++ /dev/null @@ -1,509 +0,0 @@ -syntax = "proto3"; - -package messaging_pb; - -import "mq_schema.proto"; -import "filer.proto"; - -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"; -option java_package = "seaweedfs.mq"; -option java_outer_classname = "MessageQueueProto"; - -////////////////////////////////////////////////// - -service SeaweedMessaging { - - // control plane - rpc FindBrokerLeader (FindBrokerLeaderRequest) returns (FindBrokerLeaderResponse) { - } - - // control plane for balancer - rpc PublisherToPubBalancer (stream PublisherToPubBalancerRequest) returns (stream PublisherToPubBalancerResponse) { - } - rpc BalanceTopics (BalanceTopicsRequest) returns (BalanceTopicsResponse) { - } - - // control plane for topic partitions - rpc ListTopics (ListTopicsRequest) returns (ListTopicsResponse) { - } - rpc TopicExists (TopicExistsRequest) returns (TopicExistsResponse) { - } - rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { - } - rpc LookupTopicBrokers (LookupTopicBrokersRequest) returns (LookupTopicBrokersResponse) { - } - rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { - } - rpc GetTopicPublishers (GetTopicPublishersRequest) returns (GetTopicPublishersResponse) { - } - rpc GetTopicSubscribers (GetTopicSubscribersRequest) returns (GetTopicSubscribersResponse) { - } - - // invoked by the balancer, running on each broker - rpc AssignTopicPartitions (AssignTopicPartitionsRequest) returns (AssignTopicPartitionsResponse) { - } - rpc ClosePublishers(ClosePublishersRequest) returns (ClosePublishersResponse) { - } - rpc CloseSubscribers(CloseSubscribersRequest) returns (CloseSubscribersResponse) { - } - - // subscriber connects to broker balancer, which coordinates with the subscribers - rpc SubscriberToSubCoordinator (stream SubscriberToSubCoordinatorRequest) returns (stream SubscriberToSubCoordinatorResponse) { - } - - // data plane for each topic partition - rpc PublishMessage (stream PublishMessageRequest) returns (stream PublishMessageResponse) { - } - rpc SubscribeMessage (stream SubscribeMessageRequest) returns (stream SubscribeMessageResponse) { - } - // The lead broker asks a follower broker to follow itself - rpc PublishFollowMe (stream PublishFollowMeRequest) returns (stream PublishFollowMeResponse) { - } - rpc SubscribeFollowMe (stream SubscribeFollowMeRequest) returns (SubscribeFollowMeResponse) { - } - - // Stateless fetch API (Kafka-style) - request/response pattern - // This is the recommended API for Kafka gateway and other stateless clients - // No streaming, no session state - each request is completely independent - rpc FetchMessage (FetchMessageRequest) returns (FetchMessageResponse) { - } - - // SQL query support - get unflushed messages from broker's in-memory buffer (streaming) - rpc GetUnflushedMessages (GetUnflushedMessagesRequest) returns (stream GetUnflushedMessagesResponse) { - } - - // Get comprehensive partition range information (offsets, timestamps, and other fields) - rpc GetPartitionRangeInfo (GetPartitionRangeInfoRequest) returns (GetPartitionRangeInfoResponse) { - } - - // Removed Kafka Gateway Registration - no longer needed -} - -////////////////////////////////////////////////// - -message FindBrokerLeaderRequest { - string filer_group = 1; -} - -message FindBrokerLeaderResponse { - string broker = 1; -} - -////////////////////////////////////////////////// -message BrokerStats { - int32 cpu_usage_percent = 1; - map stats = 2; -} -message TopicPartitionStats { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - int32 publisher_count = 3; - int32 subscriber_count = 4; - string follower = 5; -} - - -message PublisherToPubBalancerRequest { - message InitMessage { - string broker = 1; - } - oneof message { - InitMessage init = 1; - BrokerStats stats = 2; - } -} -message PublisherToPubBalancerResponse { -} - -message BalanceTopicsRequest { -} -message BalanceTopicsResponse { -} - -////////////////////////////////////////////////// -message TopicRetention { - int64 retention_seconds = 1; // retention duration in seconds - bool enabled = 2; // whether retention is enabled -} - -message ConfigureTopicRequest { - schema_pb.Topic topic = 1; - int32 partition_count = 2; - TopicRetention retention = 3; - schema_pb.RecordType message_record_type = 4; // Complete flat schema for the message - repeated string key_columns = 5; // Names of columns that form the key - string schema_format = 6; // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless -} -message ConfigureTopicResponse { - repeated BrokerPartitionAssignment broker_partition_assignments = 2; - TopicRetention retention = 3; - schema_pb.RecordType message_record_type = 4; // Complete flat schema for the message - repeated string key_columns = 5; // Names of columns that form the key - string schema_format = 6; // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless -} -message ListTopicsRequest { -} -message ListTopicsResponse { - repeated schema_pb.Topic topics = 1; -} -message TopicExistsRequest { - schema_pb.Topic topic = 1; -} -message TopicExistsResponse { - bool exists = 1; -} -message LookupTopicBrokersRequest { - schema_pb.Topic topic = 1; -} -message LookupTopicBrokersResponse { - schema_pb.Topic topic = 1; - repeated BrokerPartitionAssignment broker_partition_assignments = 2; -} -message BrokerPartitionAssignment { - schema_pb.Partition partition = 1; - string leader_broker = 2; - string follower_broker = 3; -} -message GetTopicConfigurationRequest { - schema_pb.Topic topic = 1; -} -message GetTopicConfigurationResponse { - schema_pb.Topic topic = 1; - int32 partition_count = 2; - repeated BrokerPartitionAssignment broker_partition_assignments = 3; - int64 created_at_ns = 4; - int64 last_updated_ns = 5; - TopicRetention retention = 6; - schema_pb.RecordType message_record_type = 7; // Complete flat schema for the message - repeated string key_columns = 8; // Names of columns that form the key - string schema_format = 9; // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless -} - -message GetTopicPublishersRequest { - schema_pb.Topic topic = 1; -} -message GetTopicPublishersResponse { - repeated TopicPublisher publishers = 1; -} - -message GetTopicSubscribersRequest { - schema_pb.Topic topic = 1; -} -message GetTopicSubscribersResponse { - repeated TopicSubscriber subscribers = 1; -} - -message TopicPublisher { - string publisher_name = 1; - string client_id = 2; - schema_pb.Partition partition = 3; - int64 connect_time_ns = 4; - int64 last_seen_time_ns = 5; - string broker = 6; - bool is_active = 7; - int64 last_published_offset = 8; - int64 last_acked_offset = 9; -} - -message TopicSubscriber { - string consumer_group = 1; - string consumer_id = 2; - string client_id = 3; - schema_pb.Partition partition = 4; - int64 connect_time_ns = 5; - int64 last_seen_time_ns = 6; - string broker = 7; - bool is_active = 8; - int64 current_offset = 9; // last acknowledged offset - int64 last_received_offset = 10; -} - -message AssignTopicPartitionsRequest { - schema_pb.Topic topic = 1; - repeated BrokerPartitionAssignment broker_partition_assignments = 2; - bool is_leader = 3; - bool is_draining = 4; -} -message AssignTopicPartitionsResponse { -} - -message SubscriberToSubCoordinatorRequest { - message InitMessage { - string consumer_group = 1; - string consumer_group_instance_id = 2; - schema_pb.Topic topic = 3; - // The consumer group instance will be assigned at most max_partition_count partitions. - // If the number of partitions is less than the sum of max_partition_count, - // the consumer group instance may be assigned partitions less than max_partition_count. - // Default is 1. - int32 max_partition_count = 4; - // If consumer group instance changes, wait for rebalance_seconds before reassigning partitions - // Exception: if adding a new consumer group instance and sum of max_partition_count equals the number of partitions, - // the rebalance will happen immediately. - // Default is 10 seconds. - int32 rebalance_seconds = 5; - } - message AckUnAssignmentMessage { - schema_pb.Partition partition = 1; - } - message AckAssignmentMessage { - schema_pb.Partition partition = 1; - } - oneof message { - InitMessage init = 1; - AckAssignmentMessage ack_assignment = 2; - AckUnAssignmentMessage ack_un_assignment = 3; - } -} -message SubscriberToSubCoordinatorResponse { - message Assignment { - BrokerPartitionAssignment partition_assignment = 1; - } - message UnAssignment { - schema_pb.Partition partition = 1; - } - oneof message { - Assignment assignment = 1; - UnAssignment un_assignment = 2; - } -} - -////////////////////////////////////////////////// -message ControlMessage { - bool is_close = 1; - string publisher_name = 2; -} -message DataMessage { - bytes key = 1; - bytes value = 2; - int64 ts_ns = 3; - ControlMessage ctrl = 4; -} -message PublishMessageRequest { - message InitMessage { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - int32 ack_interval = 3; - string follower_broker = 4; - string publisher_name = 5; // for debugging - } - oneof message { - InitMessage init = 1; - DataMessage data = 2; - } -} -message PublishMessageResponse { - int64 ack_ts_ns = 1; // Acknowledgment timestamp in nanoseconds - string error = 2; - bool should_close = 3; - int32 error_code = 4; // Structured error code for reliable error mapping - int64 assigned_offset = 5; // The actual offset assigned by SeaweedMQ for this message -} -message PublishFollowMeRequest { - message InitMessage { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - } - message FlushMessage { - int64 ts_ns = 1; - } - message CloseMessage { - } - oneof message { - InitMessage init = 1; - DataMessage data = 2; - FlushMessage flush = 3; - CloseMessage close = 4; - } -} -message PublishFollowMeResponse { - int64 ack_ts_ns = 1; -} -message SubscribeMessageRequest { - message InitMessage { - string consumer_group = 1; - string consumer_id = 2; - string client_id = 3; - schema_pb.Topic topic = 4; - schema_pb.PartitionOffset partition_offset = 5; - schema_pb.OffsetType offset_type = 6; - string filter = 10; - string follower_broker = 11; - int32 sliding_window_size = 12; - } - message AckMessage { - int64 ts_ns = 1; // Timestamp in nanoseconds for acknowledgment tracking - bytes key = 2; - } - message SeekMessage { - int64 offset = 1; // New offset to seek to - schema_pb.OffsetType offset_type = 2; // EXACT_OFFSET, RESET_TO_LATEST, etc. - } - oneof message { - InitMessage init = 1; - AckMessage ack = 2; - SeekMessage seek = 3; - } -} -message SubscribeMessageResponse { - message SubscribeCtrlMessage { - string error = 1; - bool is_end_of_stream = 2; - bool is_end_of_topic = 3; - } - oneof message { - SubscribeCtrlMessage ctrl = 1; - DataMessage data = 2; - } -} -message SubscribeFollowMeRequest { - message InitMessage { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - string consumer_group = 3; - } - message AckMessage { - int64 ts_ns = 1; - } - message CloseMessage { - } - oneof message { - InitMessage init = 1; - AckMessage ack = 2; - CloseMessage close = 3; - } -} -message SubscribeFollowMeResponse { - int64 ack_ts_ns = 1; -} - -////////////////////////////////////////////////// -// Stateless Fetch API (Kafka-style) -// Unlike SubscribeMessage which maintains long-lived Subscribe loops, -// FetchMessage is completely stateless - each request is independent. -// This eliminates concurrent access issues and stream corruption. -// -// Key differences from SubscribeMessage: -// 1. Request/Response pattern (not streaming) -// 2. No session state maintained -// 3. Each fetch is independent -// 4. Natural support for concurrent reads at different offsets -// 5. Client manages offset tracking (like Kafka) -////////////////////////////////////////////////// - -message FetchMessageRequest { - // Topic and partition to fetch from - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - - // Starting offset for this fetch - int64 start_offset = 3; - - // Maximum number of bytes to return (limit response size) - int32 max_bytes = 4; - - // Maximum number of messages to return - int32 max_messages = 5; - - // Maximum time to wait for data if partition is empty (milliseconds) - // 0 = return immediately, >0 = wait up to this long - int32 max_wait_ms = 6; - - // Minimum bytes before responding (0 = respond immediately) - // This allows batching for efficiency - int32 min_bytes = 7; - - // Consumer identity (for monitoring/debugging) - string consumer_group = 8; - string consumer_id = 9; -} - -message FetchMessageResponse { - // Messages fetched (may be empty if no data available) - repeated DataMessage messages = 1; - - // Metadata about partition state - int64 high_water_mark = 2; // Highest offset available - int64 log_start_offset = 3; // Earliest offset available - bool end_of_partition = 4; // True if no more data available - - // Error handling - string error = 5; - int32 error_code = 6; - - // Next offset to fetch (for client convenience) - // Client should fetch from this offset next - int64 next_offset = 7; -} - -message ClosePublishersRequest { - schema_pb.Topic topic = 1; - int64 unix_time_ns = 2; -} -message ClosePublishersResponse { -} -message CloseSubscribersRequest { - schema_pb.Topic topic = 1; - int64 unix_time_ns = 2; -} -message CloseSubscribersResponse { -} - -////////////////////////////////////////////////// -// SQL query support messages - -message GetUnflushedMessagesRequest { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; - int64 start_buffer_offset = 3; // Filter by buffer offset (messages from buffers >= this offset) -} - -message GetUnflushedMessagesResponse { - filer_pb.LogEntry message = 1; // Single message per response (streaming) - string error = 2; // Error message if any - bool end_of_stream = 3; // Indicates this is the final response -} - -////////////////////////////////////////////////// -// Partition range information messages - -message GetPartitionRangeInfoRequest { - schema_pb.Topic topic = 1; - schema_pb.Partition partition = 2; -} - -message GetPartitionRangeInfoResponse { - // Offset range information - OffsetRangeInfo offset_range = 1; - - // Timestamp range information - TimestampRangeInfo timestamp_range = 2; - - // Future: ID range information (for ordered IDs, UUIDs, etc.) - // IdRangeInfo id_range = 3; - - // Partition metadata - int64 record_count = 10; - int64 active_subscriptions = 11; - string error = 12; -} - -message OffsetRangeInfo { - int64 earliest_offset = 1; - int64 latest_offset = 2; - int64 high_water_mark = 3; -} - -message TimestampRangeInfo { - int64 earliest_timestamp_ns = 1; // Earliest message timestamp in nanoseconds - int64 latest_timestamp_ns = 2; // Latest message timestamp in nanoseconds -} - -// Future extension for ID ranges -// message IdRangeInfo { -// string earliest_id = 1; -// string latest_id = 2; -// string id_type = 3; // "uuid", "sequential", "custom", etc. -// } - -// Removed Kafka Gateway Registration messages - no longer needed diff --git a/weed/pb/mq_pb/mq_broker.pb.go b/weed/pb/mq_pb/mq_broker.pb.go deleted file mode 100644 index 7e7f706cb..000000000 --- a/weed/pb/mq_pb/mq_broker.pb.go +++ /dev/null @@ -1,4838 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 -// source: mq_broker.proto - -package mq_pb - -import ( - filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - schema_pb "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type FindBrokerLeaderRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - FilerGroup string `protobuf:"bytes,1,opt,name=filer_group,json=filerGroup,proto3" json:"filer_group,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FindBrokerLeaderRequest) Reset() { - *x = FindBrokerLeaderRequest{} - mi := &file_mq_broker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FindBrokerLeaderRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FindBrokerLeaderRequest) ProtoMessage() {} - -func (x *FindBrokerLeaderRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FindBrokerLeaderRequest.ProtoReflect.Descriptor instead. -func (*FindBrokerLeaderRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{0} -} - -func (x *FindBrokerLeaderRequest) GetFilerGroup() string { - if x != nil { - return x.FilerGroup - } - return "" -} - -type FindBrokerLeaderResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FindBrokerLeaderResponse) Reset() { - *x = FindBrokerLeaderResponse{} - mi := &file_mq_broker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FindBrokerLeaderResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FindBrokerLeaderResponse) ProtoMessage() {} - -func (x *FindBrokerLeaderResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FindBrokerLeaderResponse.ProtoReflect.Descriptor instead. -func (*FindBrokerLeaderResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{1} -} - -func (x *FindBrokerLeaderResponse) GetBroker() string { - if x != nil { - return x.Broker - } - return "" -} - -// //////////////////////////////////////////////// -type BrokerStats struct { - state protoimpl.MessageState `protogen:"open.v1"` - CpuUsagePercent int32 `protobuf:"varint,1,opt,name=cpu_usage_percent,json=cpuUsagePercent,proto3" json:"cpu_usage_percent,omitempty"` - Stats map[string]*TopicPartitionStats `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BrokerStats) Reset() { - *x = BrokerStats{} - mi := &file_mq_broker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BrokerStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BrokerStats) ProtoMessage() {} - -func (x *BrokerStats) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BrokerStats.ProtoReflect.Descriptor instead. -func (*BrokerStats) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{2} -} - -func (x *BrokerStats) GetCpuUsagePercent() int32 { - if x != nil { - return x.CpuUsagePercent - } - return 0 -} - -func (x *BrokerStats) GetStats() map[string]*TopicPartitionStats { - if x != nil { - return x.Stats - } - return nil -} - -type TopicPartitionStats struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - PublisherCount int32 `protobuf:"varint,3,opt,name=publisher_count,json=publisherCount,proto3" json:"publisher_count,omitempty"` - SubscriberCount int32 `protobuf:"varint,4,opt,name=subscriber_count,json=subscriberCount,proto3" json:"subscriber_count,omitempty"` - Follower string `protobuf:"bytes,5,opt,name=follower,proto3" json:"follower,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicPartitionStats) Reset() { - *x = TopicPartitionStats{} - mi := &file_mq_broker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicPartitionStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicPartitionStats) ProtoMessage() {} - -func (x *TopicPartitionStats) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicPartitionStats.ProtoReflect.Descriptor instead. -func (*TopicPartitionStats) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{3} -} - -func (x *TopicPartitionStats) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *TopicPartitionStats) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *TopicPartitionStats) GetPublisherCount() int32 { - if x != nil { - return x.PublisherCount - } - return 0 -} - -func (x *TopicPartitionStats) GetSubscriberCount() int32 { - if x != nil { - return x.SubscriberCount - } - return 0 -} - -func (x *TopicPartitionStats) GetFollower() string { - if x != nil { - return x.Follower - } - return "" -} - -type PublisherToPubBalancerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *PublisherToPubBalancerRequest_Init - // *PublisherToPubBalancerRequest_Stats - Message isPublisherToPubBalancerRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublisherToPubBalancerRequest) Reset() { - *x = PublisherToPubBalancerRequest{} - mi := &file_mq_broker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublisherToPubBalancerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublisherToPubBalancerRequest) ProtoMessage() {} - -func (x *PublisherToPubBalancerRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublisherToPubBalancerRequest.ProtoReflect.Descriptor instead. -func (*PublisherToPubBalancerRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{4} -} - -func (x *PublisherToPubBalancerRequest) GetMessage() isPublisherToPubBalancerRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *PublisherToPubBalancerRequest) GetInit() *PublisherToPubBalancerRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*PublisherToPubBalancerRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *PublisherToPubBalancerRequest) GetStats() *BrokerStats { - if x != nil { - if x, ok := x.Message.(*PublisherToPubBalancerRequest_Stats); ok { - return x.Stats - } - } - return nil -} - -type isPublisherToPubBalancerRequest_Message interface { - isPublisherToPubBalancerRequest_Message() -} - -type PublisherToPubBalancerRequest_Init struct { - Init *PublisherToPubBalancerRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type PublisherToPubBalancerRequest_Stats struct { - Stats *BrokerStats `protobuf:"bytes,2,opt,name=stats,proto3,oneof"` -} - -func (*PublisherToPubBalancerRequest_Init) isPublisherToPubBalancerRequest_Message() {} - -func (*PublisherToPubBalancerRequest_Stats) isPublisherToPubBalancerRequest_Message() {} - -type PublisherToPubBalancerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublisherToPubBalancerResponse) Reset() { - *x = PublisherToPubBalancerResponse{} - mi := &file_mq_broker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublisherToPubBalancerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublisherToPubBalancerResponse) ProtoMessage() {} - -func (x *PublisherToPubBalancerResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublisherToPubBalancerResponse.ProtoReflect.Descriptor instead. -func (*PublisherToPubBalancerResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{5} -} - -type BalanceTopicsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BalanceTopicsRequest) Reset() { - *x = BalanceTopicsRequest{} - mi := &file_mq_broker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BalanceTopicsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BalanceTopicsRequest) ProtoMessage() {} - -func (x *BalanceTopicsRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BalanceTopicsRequest.ProtoReflect.Descriptor instead. -func (*BalanceTopicsRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{6} -} - -type BalanceTopicsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BalanceTopicsResponse) Reset() { - *x = BalanceTopicsResponse{} - mi := &file_mq_broker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BalanceTopicsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BalanceTopicsResponse) ProtoMessage() {} - -func (x *BalanceTopicsResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BalanceTopicsResponse.ProtoReflect.Descriptor instead. -func (*BalanceTopicsResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{7} -} - -// //////////////////////////////////////////////// -type TopicRetention struct { - state protoimpl.MessageState `protogen:"open.v1"` - RetentionSeconds int64 `protobuf:"varint,1,opt,name=retention_seconds,json=retentionSeconds,proto3" json:"retention_seconds,omitempty"` // retention duration in seconds - Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` // whether retention is enabled - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicRetention) Reset() { - *x = TopicRetention{} - mi := &file_mq_broker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicRetention) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicRetention) ProtoMessage() {} - -func (x *TopicRetention) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicRetention.ProtoReflect.Descriptor instead. -func (*TopicRetention) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{8} -} - -func (x *TopicRetention) GetRetentionSeconds() int64 { - if x != nil { - return x.RetentionSeconds - } - return 0 -} - -func (x *TopicRetention) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -type ConfigureTopicRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionCount int32 `protobuf:"varint,2,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` - Retention *TopicRetention `protobuf:"bytes,3,opt,name=retention,proto3" json:"retention,omitempty"` - MessageRecordType *schema_pb.RecordType `protobuf:"bytes,4,opt,name=message_record_type,json=messageRecordType,proto3" json:"message_record_type,omitempty"` // Complete flat schema for the message - KeyColumns []string `protobuf:"bytes,5,rep,name=key_columns,json=keyColumns,proto3" json:"key_columns,omitempty"` // Names of columns that form the key - SchemaFormat string `protobuf:"bytes,6,opt,name=schema_format,json=schemaFormat,proto3" json:"schema_format,omitempty"` // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConfigureTopicRequest) Reset() { - *x = ConfigureTopicRequest{} - mi := &file_mq_broker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConfigureTopicRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigureTopicRequest) ProtoMessage() {} - -func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead. -func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{9} -} - -func (x *ConfigureTopicRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *ConfigureTopicRequest) GetPartitionCount() int32 { - if x != nil { - return x.PartitionCount - } - return 0 -} - -func (x *ConfigureTopicRequest) GetRetention() *TopicRetention { - if x != nil { - return x.Retention - } - return nil -} - -func (x *ConfigureTopicRequest) GetMessageRecordType() *schema_pb.RecordType { - if x != nil { - return x.MessageRecordType - } - return nil -} - -func (x *ConfigureTopicRequest) GetKeyColumns() []string { - if x != nil { - return x.KeyColumns - } - return nil -} - -func (x *ConfigureTopicRequest) GetSchemaFormat() string { - if x != nil { - return x.SchemaFormat - } - return "" -} - -type ConfigureTopicResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - BrokerPartitionAssignments []*BrokerPartitionAssignment `protobuf:"bytes,2,rep,name=broker_partition_assignments,json=brokerPartitionAssignments,proto3" json:"broker_partition_assignments,omitempty"` - Retention *TopicRetention `protobuf:"bytes,3,opt,name=retention,proto3" json:"retention,omitempty"` - MessageRecordType *schema_pb.RecordType `protobuf:"bytes,4,opt,name=message_record_type,json=messageRecordType,proto3" json:"message_record_type,omitempty"` // Complete flat schema for the message - KeyColumns []string `protobuf:"bytes,5,rep,name=key_columns,json=keyColumns,proto3" json:"key_columns,omitempty"` // Names of columns that form the key - SchemaFormat string `protobuf:"bytes,6,opt,name=schema_format,json=schemaFormat,proto3" json:"schema_format,omitempty"` // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ConfigureTopicResponse) Reset() { - *x = ConfigureTopicResponse{} - mi := &file_mq_broker_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ConfigureTopicResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigureTopicResponse) ProtoMessage() {} - -func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead. -func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{10} -} - -func (x *ConfigureTopicResponse) GetBrokerPartitionAssignments() []*BrokerPartitionAssignment { - if x != nil { - return x.BrokerPartitionAssignments - } - return nil -} - -func (x *ConfigureTopicResponse) GetRetention() *TopicRetention { - if x != nil { - return x.Retention - } - return nil -} - -func (x *ConfigureTopicResponse) GetMessageRecordType() *schema_pb.RecordType { - if x != nil { - return x.MessageRecordType - } - return nil -} - -func (x *ConfigureTopicResponse) GetKeyColumns() []string { - if x != nil { - return x.KeyColumns - } - return nil -} - -func (x *ConfigureTopicResponse) GetSchemaFormat() string { - if x != nil { - return x.SchemaFormat - } - return "" -} - -type ListTopicsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListTopicsRequest) Reset() { - *x = ListTopicsRequest{} - mi := &file_mq_broker_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListTopicsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicsRequest) ProtoMessage() {} - -func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead. -func (*ListTopicsRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{11} -} - -type ListTopicsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topics []*schema_pb.Topic `protobuf:"bytes,1,rep,name=topics,proto3" json:"topics,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListTopicsResponse) Reset() { - *x = ListTopicsResponse{} - mi := &file_mq_broker_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListTopicsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListTopicsResponse) ProtoMessage() {} - -func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead. -func (*ListTopicsResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{12} -} - -func (x *ListTopicsResponse) GetTopics() []*schema_pb.Topic { - if x != nil { - return x.Topics - } - return nil -} - -type TopicExistsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicExistsRequest) Reset() { - *x = TopicExistsRequest{} - mi := &file_mq_broker_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicExistsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicExistsRequest) ProtoMessage() {} - -func (x *TopicExistsRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicExistsRequest.ProtoReflect.Descriptor instead. -func (*TopicExistsRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{13} -} - -func (x *TopicExistsRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -type TopicExistsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicExistsResponse) Reset() { - *x = TopicExistsResponse{} - mi := &file_mq_broker_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicExistsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicExistsResponse) ProtoMessage() {} - -func (x *TopicExistsResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicExistsResponse.ProtoReflect.Descriptor instead. -func (*TopicExistsResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{14} -} - -func (x *TopicExistsResponse) GetExists() bool { - if x != nil { - return x.Exists - } - return false -} - -type LookupTopicBrokersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *LookupTopicBrokersRequest) Reset() { - *x = LookupTopicBrokersRequest{} - mi := &file_mq_broker_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *LookupTopicBrokersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LookupTopicBrokersRequest) ProtoMessage() {} - -func (x *LookupTopicBrokersRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LookupTopicBrokersRequest.ProtoReflect.Descriptor instead. -func (*LookupTopicBrokersRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{15} -} - -func (x *LookupTopicBrokersRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -type LookupTopicBrokersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - BrokerPartitionAssignments []*BrokerPartitionAssignment `protobuf:"bytes,2,rep,name=broker_partition_assignments,json=brokerPartitionAssignments,proto3" json:"broker_partition_assignments,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *LookupTopicBrokersResponse) Reset() { - *x = LookupTopicBrokersResponse{} - mi := &file_mq_broker_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *LookupTopicBrokersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LookupTopicBrokersResponse) ProtoMessage() {} - -func (x *LookupTopicBrokersResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LookupTopicBrokersResponse.ProtoReflect.Descriptor instead. -func (*LookupTopicBrokersResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{16} -} - -func (x *LookupTopicBrokersResponse) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *LookupTopicBrokersResponse) GetBrokerPartitionAssignments() []*BrokerPartitionAssignment { - if x != nil { - return x.BrokerPartitionAssignments - } - return nil -} - -type BrokerPartitionAssignment struct { - state protoimpl.MessageState `protogen:"open.v1"` - Partition *schema_pb.Partition `protobuf:"bytes,1,opt,name=partition,proto3" json:"partition,omitempty"` - LeaderBroker string `protobuf:"bytes,2,opt,name=leader_broker,json=leaderBroker,proto3" json:"leader_broker,omitempty"` - FollowerBroker string `protobuf:"bytes,3,opt,name=follower_broker,json=followerBroker,proto3" json:"follower_broker,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BrokerPartitionAssignment) Reset() { - *x = BrokerPartitionAssignment{} - mi := &file_mq_broker_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BrokerPartitionAssignment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BrokerPartitionAssignment) ProtoMessage() {} - -func (x *BrokerPartitionAssignment) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BrokerPartitionAssignment.ProtoReflect.Descriptor instead. -func (*BrokerPartitionAssignment) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{17} -} - -func (x *BrokerPartitionAssignment) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *BrokerPartitionAssignment) GetLeaderBroker() string { - if x != nil { - return x.LeaderBroker - } - return "" -} - -func (x *BrokerPartitionAssignment) GetFollowerBroker() string { - if x != nil { - return x.FollowerBroker - } - return "" -} - -type GetTopicConfigurationRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicConfigurationRequest) Reset() { - *x = GetTopicConfigurationRequest{} - mi := &file_mq_broker_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicConfigurationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicConfigurationRequest) ProtoMessage() {} - -func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead. -func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{18} -} - -func (x *GetTopicConfigurationRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -type GetTopicConfigurationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionCount int32 `protobuf:"varint,2,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` - BrokerPartitionAssignments []*BrokerPartitionAssignment `protobuf:"bytes,3,rep,name=broker_partition_assignments,json=brokerPartitionAssignments,proto3" json:"broker_partition_assignments,omitempty"` - CreatedAtNs int64 `protobuf:"varint,4,opt,name=created_at_ns,json=createdAtNs,proto3" json:"created_at_ns,omitempty"` - LastUpdatedNs int64 `protobuf:"varint,5,opt,name=last_updated_ns,json=lastUpdatedNs,proto3" json:"last_updated_ns,omitempty"` - Retention *TopicRetention `protobuf:"bytes,6,opt,name=retention,proto3" json:"retention,omitempty"` - MessageRecordType *schema_pb.RecordType `protobuf:"bytes,7,opt,name=message_record_type,json=messageRecordType,proto3" json:"message_record_type,omitempty"` // Complete flat schema for the message - KeyColumns []string `protobuf:"bytes,8,rep,name=key_columns,json=keyColumns,proto3" json:"key_columns,omitempty"` // Names of columns that form the key - SchemaFormat string `protobuf:"bytes,9,opt,name=schema_format,json=schemaFormat,proto3" json:"schema_format,omitempty"` // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicConfigurationResponse) Reset() { - *x = GetTopicConfigurationResponse{} - mi := &file_mq_broker_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicConfigurationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicConfigurationResponse) ProtoMessage() {} - -func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead. -func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{19} -} - -func (x *GetTopicConfigurationResponse) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *GetTopicConfigurationResponse) GetPartitionCount() int32 { - if x != nil { - return x.PartitionCount - } - return 0 -} - -func (x *GetTopicConfigurationResponse) GetBrokerPartitionAssignments() []*BrokerPartitionAssignment { - if x != nil { - return x.BrokerPartitionAssignments - } - return nil -} - -func (x *GetTopicConfigurationResponse) GetCreatedAtNs() int64 { - if x != nil { - return x.CreatedAtNs - } - return 0 -} - -func (x *GetTopicConfigurationResponse) GetLastUpdatedNs() int64 { - if x != nil { - return x.LastUpdatedNs - } - return 0 -} - -func (x *GetTopicConfigurationResponse) GetRetention() *TopicRetention { - if x != nil { - return x.Retention - } - return nil -} - -func (x *GetTopicConfigurationResponse) GetMessageRecordType() *schema_pb.RecordType { - if x != nil { - return x.MessageRecordType - } - return nil -} - -func (x *GetTopicConfigurationResponse) GetKeyColumns() []string { - if x != nil { - return x.KeyColumns - } - return nil -} - -func (x *GetTopicConfigurationResponse) GetSchemaFormat() string { - if x != nil { - return x.SchemaFormat - } - return "" -} - -type GetTopicPublishersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicPublishersRequest) Reset() { - *x = GetTopicPublishersRequest{} - mi := &file_mq_broker_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicPublishersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicPublishersRequest) ProtoMessage() {} - -func (x *GetTopicPublishersRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicPublishersRequest.ProtoReflect.Descriptor instead. -func (*GetTopicPublishersRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{20} -} - -func (x *GetTopicPublishersRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -type GetTopicPublishersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Publishers []*TopicPublisher `protobuf:"bytes,1,rep,name=publishers,proto3" json:"publishers,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicPublishersResponse) Reset() { - *x = GetTopicPublishersResponse{} - mi := &file_mq_broker_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicPublishersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicPublishersResponse) ProtoMessage() {} - -func (x *GetTopicPublishersResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicPublishersResponse.ProtoReflect.Descriptor instead. -func (*GetTopicPublishersResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{21} -} - -func (x *GetTopicPublishersResponse) GetPublishers() []*TopicPublisher { - if x != nil { - return x.Publishers - } - return nil -} - -type GetTopicSubscribersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicSubscribersRequest) Reset() { - *x = GetTopicSubscribersRequest{} - mi := &file_mq_broker_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicSubscribersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicSubscribersRequest) ProtoMessage() {} - -func (x *GetTopicSubscribersRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicSubscribersRequest.ProtoReflect.Descriptor instead. -func (*GetTopicSubscribersRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{22} -} - -func (x *GetTopicSubscribersRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -type GetTopicSubscribersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Subscribers []*TopicSubscriber `protobuf:"bytes,1,rep,name=subscribers,proto3" json:"subscribers,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetTopicSubscribersResponse) Reset() { - *x = GetTopicSubscribersResponse{} - mi := &file_mq_broker_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetTopicSubscribersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTopicSubscribersResponse) ProtoMessage() {} - -func (x *GetTopicSubscribersResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[23] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTopicSubscribersResponse.ProtoReflect.Descriptor instead. -func (*GetTopicSubscribersResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{23} -} - -func (x *GetTopicSubscribersResponse) GetSubscribers() []*TopicSubscriber { - if x != nil { - return x.Subscribers - } - return nil -} - -type TopicPublisher struct { - state protoimpl.MessageState `protogen:"open.v1"` - PublisherName string `protobuf:"bytes,1,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"` - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,3,opt,name=partition,proto3" json:"partition,omitempty"` - ConnectTimeNs int64 `protobuf:"varint,4,opt,name=connect_time_ns,json=connectTimeNs,proto3" json:"connect_time_ns,omitempty"` - LastSeenTimeNs int64 `protobuf:"varint,5,opt,name=last_seen_time_ns,json=lastSeenTimeNs,proto3" json:"last_seen_time_ns,omitempty"` - Broker string `protobuf:"bytes,6,opt,name=broker,proto3" json:"broker,omitempty"` - IsActive bool `protobuf:"varint,7,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` - LastPublishedOffset int64 `protobuf:"varint,8,opt,name=last_published_offset,json=lastPublishedOffset,proto3" json:"last_published_offset,omitempty"` - LastAckedOffset int64 `protobuf:"varint,9,opt,name=last_acked_offset,json=lastAckedOffset,proto3" json:"last_acked_offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicPublisher) Reset() { - *x = TopicPublisher{} - mi := &file_mq_broker_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicPublisher) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicPublisher) ProtoMessage() {} - -func (x *TopicPublisher) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicPublisher.ProtoReflect.Descriptor instead. -func (*TopicPublisher) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{24} -} - -func (x *TopicPublisher) GetPublisherName() string { - if x != nil { - return x.PublisherName - } - return "" -} - -func (x *TopicPublisher) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *TopicPublisher) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *TopicPublisher) GetConnectTimeNs() int64 { - if x != nil { - return x.ConnectTimeNs - } - return 0 -} - -func (x *TopicPublisher) GetLastSeenTimeNs() int64 { - if x != nil { - return x.LastSeenTimeNs - } - return 0 -} - -func (x *TopicPublisher) GetBroker() string { - if x != nil { - return x.Broker - } - return "" -} - -func (x *TopicPublisher) GetIsActive() bool { - if x != nil { - return x.IsActive - } - return false -} - -func (x *TopicPublisher) GetLastPublishedOffset() int64 { - if x != nil { - return x.LastPublishedOffset - } - return 0 -} - -func (x *TopicPublisher) GetLastAckedOffset() int64 { - if x != nil { - return x.LastAckedOffset - } - return 0 -} - -type TopicSubscriber struct { - state protoimpl.MessageState `protogen:"open.v1"` - ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"` - ConnectTimeNs int64 `protobuf:"varint,5,opt,name=connect_time_ns,json=connectTimeNs,proto3" json:"connect_time_ns,omitempty"` - LastSeenTimeNs int64 `protobuf:"varint,6,opt,name=last_seen_time_ns,json=lastSeenTimeNs,proto3" json:"last_seen_time_ns,omitempty"` - Broker string `protobuf:"bytes,7,opt,name=broker,proto3" json:"broker,omitempty"` - IsActive bool `protobuf:"varint,8,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` - CurrentOffset int64 `protobuf:"varint,9,opt,name=current_offset,json=currentOffset,proto3" json:"current_offset,omitempty"` // last acknowledged offset - LastReceivedOffset int64 `protobuf:"varint,10,opt,name=last_received_offset,json=lastReceivedOffset,proto3" json:"last_received_offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TopicSubscriber) Reset() { - *x = TopicSubscriber{} - mi := &file_mq_broker_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TopicSubscriber) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TopicSubscriber) ProtoMessage() {} - -func (x *TopicSubscriber) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TopicSubscriber.ProtoReflect.Descriptor instead. -func (*TopicSubscriber) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{25} -} - -func (x *TopicSubscriber) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -func (x *TopicSubscriber) GetConsumerId() string { - if x != nil { - return x.ConsumerId - } - return "" -} - -func (x *TopicSubscriber) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *TopicSubscriber) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *TopicSubscriber) GetConnectTimeNs() int64 { - if x != nil { - return x.ConnectTimeNs - } - return 0 -} - -func (x *TopicSubscriber) GetLastSeenTimeNs() int64 { - if x != nil { - return x.LastSeenTimeNs - } - return 0 -} - -func (x *TopicSubscriber) GetBroker() string { - if x != nil { - return x.Broker - } - return "" -} - -func (x *TopicSubscriber) GetIsActive() bool { - if x != nil { - return x.IsActive - } - return false -} - -func (x *TopicSubscriber) GetCurrentOffset() int64 { - if x != nil { - return x.CurrentOffset - } - return 0 -} - -func (x *TopicSubscriber) GetLastReceivedOffset() int64 { - if x != nil { - return x.LastReceivedOffset - } - return 0 -} - -type AssignTopicPartitionsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - BrokerPartitionAssignments []*BrokerPartitionAssignment `protobuf:"bytes,2,rep,name=broker_partition_assignments,json=brokerPartitionAssignments,proto3" json:"broker_partition_assignments,omitempty"` - IsLeader bool `protobuf:"varint,3,opt,name=is_leader,json=isLeader,proto3" json:"is_leader,omitempty"` - IsDraining bool `protobuf:"varint,4,opt,name=is_draining,json=isDraining,proto3" json:"is_draining,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *AssignTopicPartitionsRequest) Reset() { - *x = AssignTopicPartitionsRequest{} - mi := &file_mq_broker_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AssignTopicPartitionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AssignTopicPartitionsRequest) ProtoMessage() {} - -func (x *AssignTopicPartitionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AssignTopicPartitionsRequest.ProtoReflect.Descriptor instead. -func (*AssignTopicPartitionsRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{26} -} - -func (x *AssignTopicPartitionsRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *AssignTopicPartitionsRequest) GetBrokerPartitionAssignments() []*BrokerPartitionAssignment { - if x != nil { - return x.BrokerPartitionAssignments - } - return nil -} - -func (x *AssignTopicPartitionsRequest) GetIsLeader() bool { - if x != nil { - return x.IsLeader - } - return false -} - -func (x *AssignTopicPartitionsRequest) GetIsDraining() bool { - if x != nil { - return x.IsDraining - } - return false -} - -type AssignTopicPartitionsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *AssignTopicPartitionsResponse) Reset() { - *x = AssignTopicPartitionsResponse{} - mi := &file_mq_broker_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AssignTopicPartitionsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AssignTopicPartitionsResponse) ProtoMessage() {} - -func (x *AssignTopicPartitionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AssignTopicPartitionsResponse.ProtoReflect.Descriptor instead. -func (*AssignTopicPartitionsResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{27} -} - -type SubscriberToSubCoordinatorRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *SubscriberToSubCoordinatorRequest_Init - // *SubscriberToSubCoordinatorRequest_AckAssignment - // *SubscriberToSubCoordinatorRequest_AckUnAssignment - Message isSubscriberToSubCoordinatorRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorRequest) Reset() { - *x = SubscriberToSubCoordinatorRequest{} - mi := &file_mq_broker_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorRequest) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorRequest.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{28} -} - -func (x *SubscriberToSubCoordinatorRequest) GetMessage() isSubscriberToSubCoordinatorRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *SubscriberToSubCoordinatorRequest) GetInit() *SubscriberToSubCoordinatorRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*SubscriberToSubCoordinatorRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *SubscriberToSubCoordinatorRequest) GetAckAssignment() *SubscriberToSubCoordinatorRequest_AckAssignmentMessage { - if x != nil { - if x, ok := x.Message.(*SubscriberToSubCoordinatorRequest_AckAssignment); ok { - return x.AckAssignment - } - } - return nil -} - -func (x *SubscriberToSubCoordinatorRequest) GetAckUnAssignment() *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage { - if x != nil { - if x, ok := x.Message.(*SubscriberToSubCoordinatorRequest_AckUnAssignment); ok { - return x.AckUnAssignment - } - } - return nil -} - -type isSubscriberToSubCoordinatorRequest_Message interface { - isSubscriberToSubCoordinatorRequest_Message() -} - -type SubscriberToSubCoordinatorRequest_Init struct { - Init *SubscriberToSubCoordinatorRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type SubscriberToSubCoordinatorRequest_AckAssignment struct { - AckAssignment *SubscriberToSubCoordinatorRequest_AckAssignmentMessage `protobuf:"bytes,2,opt,name=ack_assignment,json=ackAssignment,proto3,oneof"` -} - -type SubscriberToSubCoordinatorRequest_AckUnAssignment struct { - AckUnAssignment *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage `protobuf:"bytes,3,opt,name=ack_un_assignment,json=ackUnAssignment,proto3,oneof"` -} - -func (*SubscriberToSubCoordinatorRequest_Init) isSubscriberToSubCoordinatorRequest_Message() {} - -func (*SubscriberToSubCoordinatorRequest_AckAssignment) isSubscriberToSubCoordinatorRequest_Message() { -} - -func (*SubscriberToSubCoordinatorRequest_AckUnAssignment) isSubscriberToSubCoordinatorRequest_Message() { -} - -type SubscriberToSubCoordinatorResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *SubscriberToSubCoordinatorResponse_Assignment_ - // *SubscriberToSubCoordinatorResponse_UnAssignment_ - Message isSubscriberToSubCoordinatorResponse_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorResponse) Reset() { - *x = SubscriberToSubCoordinatorResponse{} - mi := &file_mq_broker_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorResponse) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorResponse.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{29} -} - -func (x *SubscriberToSubCoordinatorResponse) GetMessage() isSubscriberToSubCoordinatorResponse_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *SubscriberToSubCoordinatorResponse) GetAssignment() *SubscriberToSubCoordinatorResponse_Assignment { - if x != nil { - if x, ok := x.Message.(*SubscriberToSubCoordinatorResponse_Assignment_); ok { - return x.Assignment - } - } - return nil -} - -func (x *SubscriberToSubCoordinatorResponse) GetUnAssignment() *SubscriberToSubCoordinatorResponse_UnAssignment { - if x != nil { - if x, ok := x.Message.(*SubscriberToSubCoordinatorResponse_UnAssignment_); ok { - return x.UnAssignment - } - } - return nil -} - -type isSubscriberToSubCoordinatorResponse_Message interface { - isSubscriberToSubCoordinatorResponse_Message() -} - -type SubscriberToSubCoordinatorResponse_Assignment_ struct { - Assignment *SubscriberToSubCoordinatorResponse_Assignment `protobuf:"bytes,1,opt,name=assignment,proto3,oneof"` -} - -type SubscriberToSubCoordinatorResponse_UnAssignment_ struct { - UnAssignment *SubscriberToSubCoordinatorResponse_UnAssignment `protobuf:"bytes,2,opt,name=un_assignment,json=unAssignment,proto3,oneof"` -} - -func (*SubscriberToSubCoordinatorResponse_Assignment_) isSubscriberToSubCoordinatorResponse_Message() { -} - -func (*SubscriberToSubCoordinatorResponse_UnAssignment_) isSubscriberToSubCoordinatorResponse_Message() { -} - -// //////////////////////////////////////////////// -type ControlMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - IsClose bool `protobuf:"varint,1,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` - PublisherName string `protobuf:"bytes,2,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ControlMessage) Reset() { - *x = ControlMessage{} - mi := &file_mq_broker_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ControlMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ControlMessage) ProtoMessage() {} - -func (x *ControlMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ControlMessage.ProtoReflect.Descriptor instead. -func (*ControlMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{30} -} - -func (x *ControlMessage) GetIsClose() bool { - if x != nil { - return x.IsClose - } - return false -} - -func (x *ControlMessage) GetPublisherName() string { - if x != nil { - return x.PublisherName - } - return "" -} - -type DataMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - Ctrl *ControlMessage `protobuf:"bytes,4,opt,name=ctrl,proto3" json:"ctrl,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DataMessage) Reset() { - *x = DataMessage{} - mi := &file_mq_broker_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DataMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DataMessage) ProtoMessage() {} - -func (x *DataMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[31] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DataMessage.ProtoReflect.Descriptor instead. -func (*DataMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{31} -} - -func (x *DataMessage) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *DataMessage) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -func (x *DataMessage) GetTsNs() int64 { - if x != nil { - return x.TsNs - } - return 0 -} - -func (x *DataMessage) GetCtrl() *ControlMessage { - if x != nil { - return x.Ctrl - } - return nil -} - -type PublishMessageRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *PublishMessageRequest_Init - // *PublishMessageRequest_Data - Message isPublishMessageRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishMessageRequest) Reset() { - *x = PublishMessageRequest{} - mi := &file_mq_broker_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishMessageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishMessageRequest) ProtoMessage() {} - -func (x *PublishMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[32] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishMessageRequest.ProtoReflect.Descriptor instead. -func (*PublishMessageRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{32} -} - -func (x *PublishMessageRequest) GetMessage() isPublishMessageRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *PublishMessageRequest) GetInit() *PublishMessageRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*PublishMessageRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *PublishMessageRequest) GetData() *DataMessage { - if x != nil { - if x, ok := x.Message.(*PublishMessageRequest_Data); ok { - return x.Data - } - } - return nil -} - -type isPublishMessageRequest_Message interface { - isPublishMessageRequest_Message() -} - -type PublishMessageRequest_Init struct { - Init *PublishMessageRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type PublishMessageRequest_Data struct { - Data *DataMessage `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -func (*PublishMessageRequest_Init) isPublishMessageRequest_Message() {} - -func (*PublishMessageRequest_Data) isPublishMessageRequest_Message() {} - -type PublishMessageResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - AckTsNs int64 `protobuf:"varint,1,opt,name=ack_ts_ns,json=ackTsNs,proto3" json:"ack_ts_ns,omitempty"` // Acknowledgment timestamp in nanoseconds - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - ShouldClose bool `protobuf:"varint,3,opt,name=should_close,json=shouldClose,proto3" json:"should_close,omitempty"` - ErrorCode int32 `protobuf:"varint,4,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` // Structured error code for reliable error mapping - AssignedOffset int64 `protobuf:"varint,5,opt,name=assigned_offset,json=assignedOffset,proto3" json:"assigned_offset,omitempty"` // The actual offset assigned by SeaweedMQ for this message - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishMessageResponse) Reset() { - *x = PublishMessageResponse{} - mi := &file_mq_broker_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishMessageResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishMessageResponse) ProtoMessage() {} - -func (x *PublishMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[33] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishMessageResponse.ProtoReflect.Descriptor instead. -func (*PublishMessageResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{33} -} - -func (x *PublishMessageResponse) GetAckTsNs() int64 { - if x != nil { - return x.AckTsNs - } - return 0 -} - -func (x *PublishMessageResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *PublishMessageResponse) GetShouldClose() bool { - if x != nil { - return x.ShouldClose - } - return false -} - -func (x *PublishMessageResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -func (x *PublishMessageResponse) GetAssignedOffset() int64 { - if x != nil { - return x.AssignedOffset - } - return 0 -} - -type PublishFollowMeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *PublishFollowMeRequest_Init - // *PublishFollowMeRequest_Data - // *PublishFollowMeRequest_Flush - // *PublishFollowMeRequest_Close - Message isPublishFollowMeRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishFollowMeRequest) Reset() { - *x = PublishFollowMeRequest{} - mi := &file_mq_broker_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishFollowMeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishFollowMeRequest) ProtoMessage() {} - -func (x *PublishFollowMeRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[34] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishFollowMeRequest.ProtoReflect.Descriptor instead. -func (*PublishFollowMeRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{34} -} - -func (x *PublishFollowMeRequest) GetMessage() isPublishFollowMeRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *PublishFollowMeRequest) GetInit() *PublishFollowMeRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*PublishFollowMeRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *PublishFollowMeRequest) GetData() *DataMessage { - if x != nil { - if x, ok := x.Message.(*PublishFollowMeRequest_Data); ok { - return x.Data - } - } - return nil -} - -func (x *PublishFollowMeRequest) GetFlush() *PublishFollowMeRequest_FlushMessage { - if x != nil { - if x, ok := x.Message.(*PublishFollowMeRequest_Flush); ok { - return x.Flush - } - } - return nil -} - -func (x *PublishFollowMeRequest) GetClose() *PublishFollowMeRequest_CloseMessage { - if x != nil { - if x, ok := x.Message.(*PublishFollowMeRequest_Close); ok { - return x.Close - } - } - return nil -} - -type isPublishFollowMeRequest_Message interface { - isPublishFollowMeRequest_Message() -} - -type PublishFollowMeRequest_Init struct { - Init *PublishFollowMeRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type PublishFollowMeRequest_Data struct { - Data *DataMessage `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -type PublishFollowMeRequest_Flush struct { - Flush *PublishFollowMeRequest_FlushMessage `protobuf:"bytes,3,opt,name=flush,proto3,oneof"` -} - -type PublishFollowMeRequest_Close struct { - Close *PublishFollowMeRequest_CloseMessage `protobuf:"bytes,4,opt,name=close,proto3,oneof"` -} - -func (*PublishFollowMeRequest_Init) isPublishFollowMeRequest_Message() {} - -func (*PublishFollowMeRequest_Data) isPublishFollowMeRequest_Message() {} - -func (*PublishFollowMeRequest_Flush) isPublishFollowMeRequest_Message() {} - -func (*PublishFollowMeRequest_Close) isPublishFollowMeRequest_Message() {} - -type PublishFollowMeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - AckTsNs int64 `protobuf:"varint,1,opt,name=ack_ts_ns,json=ackTsNs,proto3" json:"ack_ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishFollowMeResponse) Reset() { - *x = PublishFollowMeResponse{} - mi := &file_mq_broker_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishFollowMeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishFollowMeResponse) ProtoMessage() {} - -func (x *PublishFollowMeResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[35] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishFollowMeResponse.ProtoReflect.Descriptor instead. -func (*PublishFollowMeResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{35} -} - -func (x *PublishFollowMeResponse) GetAckTsNs() int64 { - if x != nil { - return x.AckTsNs - } - return 0 -} - -type SubscribeMessageRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *SubscribeMessageRequest_Init - // *SubscribeMessageRequest_Ack - // *SubscribeMessageRequest_Seek - Message isSubscribeMessageRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageRequest) Reset() { - *x = SubscribeMessageRequest{} - mi := &file_mq_broker_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageRequest) ProtoMessage() {} - -func (x *SubscribeMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[36] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageRequest.ProtoReflect.Descriptor instead. -func (*SubscribeMessageRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{36} -} - -func (x *SubscribeMessageRequest) GetMessage() isSubscribeMessageRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *SubscribeMessageRequest) GetInit() *SubscribeMessageRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeMessageRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *SubscribeMessageRequest) GetAck() *SubscribeMessageRequest_AckMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeMessageRequest_Ack); ok { - return x.Ack - } - } - return nil -} - -func (x *SubscribeMessageRequest) GetSeek() *SubscribeMessageRequest_SeekMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeMessageRequest_Seek); ok { - return x.Seek - } - } - return nil -} - -type isSubscribeMessageRequest_Message interface { - isSubscribeMessageRequest_Message() -} - -type SubscribeMessageRequest_Init struct { - Init *SubscribeMessageRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type SubscribeMessageRequest_Ack struct { - Ack *SubscribeMessageRequest_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3,oneof"` -} - -type SubscribeMessageRequest_Seek struct { - Seek *SubscribeMessageRequest_SeekMessage `protobuf:"bytes,3,opt,name=seek,proto3,oneof"` -} - -func (*SubscribeMessageRequest_Init) isSubscribeMessageRequest_Message() {} - -func (*SubscribeMessageRequest_Ack) isSubscribeMessageRequest_Message() {} - -func (*SubscribeMessageRequest_Seek) isSubscribeMessageRequest_Message() {} - -type SubscribeMessageResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *SubscribeMessageResponse_Ctrl - // *SubscribeMessageResponse_Data - Message isSubscribeMessageResponse_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageResponse) Reset() { - *x = SubscribeMessageResponse{} - mi := &file_mq_broker_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageResponse) ProtoMessage() {} - -func (x *SubscribeMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[37] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageResponse.ProtoReflect.Descriptor instead. -func (*SubscribeMessageResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{37} -} - -func (x *SubscribeMessageResponse) GetMessage() isSubscribeMessageResponse_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *SubscribeMessageResponse) GetCtrl() *SubscribeMessageResponse_SubscribeCtrlMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeMessageResponse_Ctrl); ok { - return x.Ctrl - } - } - return nil -} - -func (x *SubscribeMessageResponse) GetData() *DataMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeMessageResponse_Data); ok { - return x.Data - } - } - return nil -} - -type isSubscribeMessageResponse_Message interface { - isSubscribeMessageResponse_Message() -} - -type SubscribeMessageResponse_Ctrl struct { - Ctrl *SubscribeMessageResponse_SubscribeCtrlMessage `protobuf:"bytes,1,opt,name=ctrl,proto3,oneof"` -} - -type SubscribeMessageResponse_Data struct { - Data *DataMessage `protobuf:"bytes,2,opt,name=data,proto3,oneof"` -} - -func (*SubscribeMessageResponse_Ctrl) isSubscribeMessageResponse_Message() {} - -func (*SubscribeMessageResponse_Data) isSubscribeMessageResponse_Message() {} - -type SubscribeFollowMeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Message: - // - // *SubscribeFollowMeRequest_Init - // *SubscribeFollowMeRequest_Ack - // *SubscribeFollowMeRequest_Close - Message isSubscribeFollowMeRequest_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeFollowMeRequest) Reset() { - *x = SubscribeFollowMeRequest{} - mi := &file_mq_broker_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeFollowMeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeFollowMeRequest) ProtoMessage() {} - -func (x *SubscribeFollowMeRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[38] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeFollowMeRequest.ProtoReflect.Descriptor instead. -func (*SubscribeFollowMeRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{38} -} - -func (x *SubscribeFollowMeRequest) GetMessage() isSubscribeFollowMeRequest_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *SubscribeFollowMeRequest) GetInit() *SubscribeFollowMeRequest_InitMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeFollowMeRequest_Init); ok { - return x.Init - } - } - return nil -} - -func (x *SubscribeFollowMeRequest) GetAck() *SubscribeFollowMeRequest_AckMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeFollowMeRequest_Ack); ok { - return x.Ack - } - } - return nil -} - -func (x *SubscribeFollowMeRequest) GetClose() *SubscribeFollowMeRequest_CloseMessage { - if x != nil { - if x, ok := x.Message.(*SubscribeFollowMeRequest_Close); ok { - return x.Close - } - } - return nil -} - -type isSubscribeFollowMeRequest_Message interface { - isSubscribeFollowMeRequest_Message() -} - -type SubscribeFollowMeRequest_Init struct { - Init *SubscribeFollowMeRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3,oneof"` -} - -type SubscribeFollowMeRequest_Ack struct { - Ack *SubscribeFollowMeRequest_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3,oneof"` -} - -type SubscribeFollowMeRequest_Close struct { - Close *SubscribeFollowMeRequest_CloseMessage `protobuf:"bytes,3,opt,name=close,proto3,oneof"` -} - -func (*SubscribeFollowMeRequest_Init) isSubscribeFollowMeRequest_Message() {} - -func (*SubscribeFollowMeRequest_Ack) isSubscribeFollowMeRequest_Message() {} - -func (*SubscribeFollowMeRequest_Close) isSubscribeFollowMeRequest_Message() {} - -type SubscribeFollowMeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - AckTsNs int64 `protobuf:"varint,1,opt,name=ack_ts_ns,json=ackTsNs,proto3" json:"ack_ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeFollowMeResponse) Reset() { - *x = SubscribeFollowMeResponse{} - mi := &file_mq_broker_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeFollowMeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeFollowMeResponse) ProtoMessage() {} - -func (x *SubscribeFollowMeResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[39] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeFollowMeResponse.ProtoReflect.Descriptor instead. -func (*SubscribeFollowMeResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{39} -} - -func (x *SubscribeFollowMeResponse) GetAckTsNs() int64 { - if x != nil { - return x.AckTsNs - } - return 0 -} - -type FetchMessageRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Topic and partition to fetch from - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - // Starting offset for this fetch - StartOffset int64 `protobuf:"varint,3,opt,name=start_offset,json=startOffset,proto3" json:"start_offset,omitempty"` - // Maximum number of bytes to return (limit response size) - MaxBytes int32 `protobuf:"varint,4,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - // Maximum number of messages to return - MaxMessages int32 `protobuf:"varint,5,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` - // Maximum time to wait for data if partition is empty (milliseconds) - // 0 = return immediately, >0 = wait up to this long - MaxWaitMs int32 `protobuf:"varint,6,opt,name=max_wait_ms,json=maxWaitMs,proto3" json:"max_wait_ms,omitempty"` - // Minimum bytes before responding (0 = respond immediately) - // This allows batching for efficiency - MinBytes int32 `protobuf:"varint,7,opt,name=min_bytes,json=minBytes,proto3" json:"min_bytes,omitempty"` - // Consumer identity (for monitoring/debugging) - ConsumerGroup string `protobuf:"bytes,8,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - ConsumerId string `protobuf:"bytes,9,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FetchMessageRequest) Reset() { - *x = FetchMessageRequest{} - mi := &file_mq_broker_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FetchMessageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FetchMessageRequest) ProtoMessage() {} - -func (x *FetchMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[40] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FetchMessageRequest.ProtoReflect.Descriptor instead. -func (*FetchMessageRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{40} -} - -func (x *FetchMessageRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *FetchMessageRequest) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *FetchMessageRequest) GetStartOffset() int64 { - if x != nil { - return x.StartOffset - } - return 0 -} - -func (x *FetchMessageRequest) GetMaxBytes() int32 { - if x != nil { - return x.MaxBytes - } - return 0 -} - -func (x *FetchMessageRequest) GetMaxMessages() int32 { - if x != nil { - return x.MaxMessages - } - return 0 -} - -func (x *FetchMessageRequest) GetMaxWaitMs() int32 { - if x != nil { - return x.MaxWaitMs - } - return 0 -} - -func (x *FetchMessageRequest) GetMinBytes() int32 { - if x != nil { - return x.MinBytes - } - return 0 -} - -func (x *FetchMessageRequest) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -func (x *FetchMessageRequest) GetConsumerId() string { - if x != nil { - return x.ConsumerId - } - return "" -} - -type FetchMessageResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Messages fetched (may be empty if no data available) - Messages []*DataMessage `protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"` - // Metadata about partition state - HighWaterMark int64 `protobuf:"varint,2,opt,name=high_water_mark,json=highWaterMark,proto3" json:"high_water_mark,omitempty"` // Highest offset available - LogStartOffset int64 `protobuf:"varint,3,opt,name=log_start_offset,json=logStartOffset,proto3" json:"log_start_offset,omitempty"` // Earliest offset available - EndOfPartition bool `protobuf:"varint,4,opt,name=end_of_partition,json=endOfPartition,proto3" json:"end_of_partition,omitempty"` // True if no more data available - // Error handling - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - ErrorCode int32 `protobuf:"varint,6,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - // Next offset to fetch (for client convenience) - // Client should fetch from this offset next - NextOffset int64 `protobuf:"varint,7,opt,name=next_offset,json=nextOffset,proto3" json:"next_offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FetchMessageResponse) Reset() { - *x = FetchMessageResponse{} - mi := &file_mq_broker_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FetchMessageResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FetchMessageResponse) ProtoMessage() {} - -func (x *FetchMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[41] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FetchMessageResponse.ProtoReflect.Descriptor instead. -func (*FetchMessageResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{41} -} - -func (x *FetchMessageResponse) GetMessages() []*DataMessage { - if x != nil { - return x.Messages - } - return nil -} - -func (x *FetchMessageResponse) GetHighWaterMark() int64 { - if x != nil { - return x.HighWaterMark - } - return 0 -} - -func (x *FetchMessageResponse) GetLogStartOffset() int64 { - if x != nil { - return x.LogStartOffset - } - return 0 -} - -func (x *FetchMessageResponse) GetEndOfPartition() bool { - if x != nil { - return x.EndOfPartition - } - return false -} - -func (x *FetchMessageResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *FetchMessageResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -func (x *FetchMessageResponse) GetNextOffset() int64 { - if x != nil { - return x.NextOffset - } - return 0 -} - -type ClosePublishersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - UnixTimeNs int64 `protobuf:"varint,2,opt,name=unix_time_ns,json=unixTimeNs,proto3" json:"unix_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ClosePublishersRequest) Reset() { - *x = ClosePublishersRequest{} - mi := &file_mq_broker_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ClosePublishersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClosePublishersRequest) ProtoMessage() {} - -func (x *ClosePublishersRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[42] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClosePublishersRequest.ProtoReflect.Descriptor instead. -func (*ClosePublishersRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{42} -} - -func (x *ClosePublishersRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *ClosePublishersRequest) GetUnixTimeNs() int64 { - if x != nil { - return x.UnixTimeNs - } - return 0 -} - -type ClosePublishersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ClosePublishersResponse) Reset() { - *x = ClosePublishersResponse{} - mi := &file_mq_broker_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ClosePublishersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClosePublishersResponse) ProtoMessage() {} - -func (x *ClosePublishersResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[43] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClosePublishersResponse.ProtoReflect.Descriptor instead. -func (*ClosePublishersResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{43} -} - -type CloseSubscribersRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - UnixTimeNs int64 `protobuf:"varint,2,opt,name=unix_time_ns,json=unixTimeNs,proto3" json:"unix_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseSubscribersRequest) Reset() { - *x = CloseSubscribersRequest{} - mi := &file_mq_broker_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseSubscribersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseSubscribersRequest) ProtoMessage() {} - -func (x *CloseSubscribersRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[44] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseSubscribersRequest.ProtoReflect.Descriptor instead. -func (*CloseSubscribersRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{44} -} - -func (x *CloseSubscribersRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *CloseSubscribersRequest) GetUnixTimeNs() int64 { - if x != nil { - return x.UnixTimeNs - } - return 0 -} - -type CloseSubscribersResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CloseSubscribersResponse) Reset() { - *x = CloseSubscribersResponse{} - mi := &file_mq_broker_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CloseSubscribersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CloseSubscribersResponse) ProtoMessage() {} - -func (x *CloseSubscribersResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[45] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CloseSubscribersResponse.ProtoReflect.Descriptor instead. -func (*CloseSubscribersResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{45} -} - -type GetUnflushedMessagesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - StartBufferOffset int64 `protobuf:"varint,3,opt,name=start_buffer_offset,json=startBufferOffset,proto3" json:"start_buffer_offset,omitempty"` // Filter by buffer offset (messages from buffers >= this offset) - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetUnflushedMessagesRequest) Reset() { - *x = GetUnflushedMessagesRequest{} - mi := &file_mq_broker_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetUnflushedMessagesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetUnflushedMessagesRequest) ProtoMessage() {} - -func (x *GetUnflushedMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[46] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetUnflushedMessagesRequest.ProtoReflect.Descriptor instead. -func (*GetUnflushedMessagesRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{46} -} - -func (x *GetUnflushedMessagesRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *GetUnflushedMessagesRequest) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *GetUnflushedMessagesRequest) GetStartBufferOffset() int64 { - if x != nil { - return x.StartBufferOffset - } - return 0 -} - -type GetUnflushedMessagesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Message *filer_pb.LogEntry `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` // Single message per response (streaming) - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` // Error message if any - EndOfStream bool `protobuf:"varint,3,opt,name=end_of_stream,json=endOfStream,proto3" json:"end_of_stream,omitempty"` // Indicates this is the final response - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetUnflushedMessagesResponse) Reset() { - *x = GetUnflushedMessagesResponse{} - mi := &file_mq_broker_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetUnflushedMessagesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetUnflushedMessagesResponse) ProtoMessage() {} - -func (x *GetUnflushedMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[47] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetUnflushedMessagesResponse.ProtoReflect.Descriptor instead. -func (*GetUnflushedMessagesResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{47} -} - -func (x *GetUnflushedMessagesResponse) GetMessage() *filer_pb.LogEntry { - if x != nil { - return x.Message - } - return nil -} - -func (x *GetUnflushedMessagesResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *GetUnflushedMessagesResponse) GetEndOfStream() bool { - if x != nil { - return x.EndOfStream - } - return false -} - -type GetPartitionRangeInfoRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetPartitionRangeInfoRequest) Reset() { - *x = GetPartitionRangeInfoRequest{} - mi := &file_mq_broker_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPartitionRangeInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPartitionRangeInfoRequest) ProtoMessage() {} - -func (x *GetPartitionRangeInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[48] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPartitionRangeInfoRequest.ProtoReflect.Descriptor instead. -func (*GetPartitionRangeInfoRequest) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{48} -} - -func (x *GetPartitionRangeInfoRequest) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *GetPartitionRangeInfoRequest) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -type GetPartitionRangeInfoResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Offset range information - OffsetRange *OffsetRangeInfo `protobuf:"bytes,1,opt,name=offset_range,json=offsetRange,proto3" json:"offset_range,omitempty"` - // Timestamp range information - TimestampRange *TimestampRangeInfo `protobuf:"bytes,2,opt,name=timestamp_range,json=timestampRange,proto3" json:"timestamp_range,omitempty"` - // Partition metadata - RecordCount int64 `protobuf:"varint,10,opt,name=record_count,json=recordCount,proto3" json:"record_count,omitempty"` - ActiveSubscriptions int64 `protobuf:"varint,11,opt,name=active_subscriptions,json=activeSubscriptions,proto3" json:"active_subscriptions,omitempty"` - Error string `protobuf:"bytes,12,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetPartitionRangeInfoResponse) Reset() { - *x = GetPartitionRangeInfoResponse{} - mi := &file_mq_broker_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPartitionRangeInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPartitionRangeInfoResponse) ProtoMessage() {} - -func (x *GetPartitionRangeInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[49] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPartitionRangeInfoResponse.ProtoReflect.Descriptor instead. -func (*GetPartitionRangeInfoResponse) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{49} -} - -func (x *GetPartitionRangeInfoResponse) GetOffsetRange() *OffsetRangeInfo { - if x != nil { - return x.OffsetRange - } - return nil -} - -func (x *GetPartitionRangeInfoResponse) GetTimestampRange() *TimestampRangeInfo { - if x != nil { - return x.TimestampRange - } - return nil -} - -func (x *GetPartitionRangeInfoResponse) GetRecordCount() int64 { - if x != nil { - return x.RecordCount - } - return 0 -} - -func (x *GetPartitionRangeInfoResponse) GetActiveSubscriptions() int64 { - if x != nil { - return x.ActiveSubscriptions - } - return 0 -} - -func (x *GetPartitionRangeInfoResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -type OffsetRangeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - EarliestOffset int64 `protobuf:"varint,1,opt,name=earliest_offset,json=earliestOffset,proto3" json:"earliest_offset,omitempty"` - LatestOffset int64 `protobuf:"varint,2,opt,name=latest_offset,json=latestOffset,proto3" json:"latest_offset,omitempty"` - HighWaterMark int64 `protobuf:"varint,3,opt,name=high_water_mark,json=highWaterMark,proto3" json:"high_water_mark,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OffsetRangeInfo) Reset() { - *x = OffsetRangeInfo{} - mi := &file_mq_broker_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OffsetRangeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffsetRangeInfo) ProtoMessage() {} - -func (x *OffsetRangeInfo) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[50] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffsetRangeInfo.ProtoReflect.Descriptor instead. -func (*OffsetRangeInfo) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{50} -} - -func (x *OffsetRangeInfo) GetEarliestOffset() int64 { - if x != nil { - return x.EarliestOffset - } - return 0 -} - -func (x *OffsetRangeInfo) GetLatestOffset() int64 { - if x != nil { - return x.LatestOffset - } - return 0 -} - -func (x *OffsetRangeInfo) GetHighWaterMark() int64 { - if x != nil { - return x.HighWaterMark - } - return 0 -} - -type TimestampRangeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - EarliestTimestampNs int64 `protobuf:"varint,1,opt,name=earliest_timestamp_ns,json=earliestTimestampNs,proto3" json:"earliest_timestamp_ns,omitempty"` // Earliest message timestamp in nanoseconds - LatestTimestampNs int64 `protobuf:"varint,2,opt,name=latest_timestamp_ns,json=latestTimestampNs,proto3" json:"latest_timestamp_ns,omitempty"` // Latest message timestamp in nanoseconds - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TimestampRangeInfo) Reset() { - *x = TimestampRangeInfo{} - mi := &file_mq_broker_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TimestampRangeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimestampRangeInfo) ProtoMessage() {} - -func (x *TimestampRangeInfo) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[51] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimestampRangeInfo.ProtoReflect.Descriptor instead. -func (*TimestampRangeInfo) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{51} -} - -func (x *TimestampRangeInfo) GetEarliestTimestampNs() int64 { - if x != nil { - return x.EarliestTimestampNs - } - return 0 -} - -func (x *TimestampRangeInfo) GetLatestTimestampNs() int64 { - if x != nil { - return x.LatestTimestampNs - } - return 0 -} - -type PublisherToPubBalancerRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublisherToPubBalancerRequest_InitMessage) Reset() { - *x = PublisherToPubBalancerRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublisherToPubBalancerRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublisherToPubBalancerRequest_InitMessage) ProtoMessage() {} - -func (x *PublisherToPubBalancerRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[53] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublisherToPubBalancerRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*PublisherToPubBalancerRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *PublisherToPubBalancerRequest_InitMessage) GetBroker() string { - if x != nil { - return x.Broker - } - return "" -} - -type SubscriberToSubCoordinatorRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - ConsumerGroupInstanceId string `protobuf:"bytes,2,opt,name=consumer_group_instance_id,json=consumerGroupInstanceId,proto3" json:"consumer_group_instance_id,omitempty"` - Topic *schema_pb.Topic `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - // The consumer group instance will be assigned at most max_partition_count partitions. - // If the number of partitions is less than the sum of max_partition_count, - // the consumer group instance may be assigned partitions less than max_partition_count. - // Default is 1. - MaxPartitionCount int32 `protobuf:"varint,4,opt,name=max_partition_count,json=maxPartitionCount,proto3" json:"max_partition_count,omitempty"` - // If consumer group instance changes, wait for rebalance_seconds before reassigning partitions - // Exception: if adding a new consumer group instance and sum of max_partition_count equals the number of partitions, - // the rebalance will happen immediately. - // Default is 10 seconds. - RebalanceSeconds int32 `protobuf:"varint,5,opt,name=rebalance_seconds,json=rebalanceSeconds,proto3" json:"rebalance_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) Reset() { - *x = SubscriberToSubCoordinatorRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorRequest_InitMessage) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[54] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{28, 0} -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) GetConsumerGroupInstanceId() string { - if x != nil { - return x.ConsumerGroupInstanceId - } - return "" -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) GetMaxPartitionCount() int32 { - if x != nil { - return x.MaxPartitionCount - } - return 0 -} - -func (x *SubscriberToSubCoordinatorRequest_InitMessage) GetRebalanceSeconds() int32 { - if x != nil { - return x.RebalanceSeconds - } - return 0 -} - -type SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Partition *schema_pb.Partition `protobuf:"bytes,1,opt,name=partition,proto3" json:"partition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) Reset() { - *x = SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{} - mi := &file_mq_broker_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[55] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{28, 1} -} - -func (x *SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -type SubscriberToSubCoordinatorRequest_AckAssignmentMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Partition *schema_pb.Partition `protobuf:"bytes,1,opt,name=partition,proto3" json:"partition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorRequest_AckAssignmentMessage) Reset() { - *x = SubscriberToSubCoordinatorRequest_AckAssignmentMessage{} - mi := &file_mq_broker_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorRequest_AckAssignmentMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorRequest_AckAssignmentMessage) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorRequest_AckAssignmentMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[56] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorRequest_AckAssignmentMessage.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorRequest_AckAssignmentMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{28, 2} -} - -func (x *SubscriberToSubCoordinatorRequest_AckAssignmentMessage) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -type SubscriberToSubCoordinatorResponse_Assignment struct { - state protoimpl.MessageState `protogen:"open.v1"` - PartitionAssignment *BrokerPartitionAssignment `protobuf:"bytes,1,opt,name=partition_assignment,json=partitionAssignment,proto3" json:"partition_assignment,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorResponse_Assignment) Reset() { - *x = SubscriberToSubCoordinatorResponse_Assignment{} - mi := &file_mq_broker_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorResponse_Assignment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorResponse_Assignment) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorResponse_Assignment) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[57] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorResponse_Assignment.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorResponse_Assignment) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{29, 0} -} - -func (x *SubscriberToSubCoordinatorResponse_Assignment) GetPartitionAssignment() *BrokerPartitionAssignment { - if x != nil { - return x.PartitionAssignment - } - return nil -} - -type SubscriberToSubCoordinatorResponse_UnAssignment struct { - state protoimpl.MessageState `protogen:"open.v1"` - Partition *schema_pb.Partition `protobuf:"bytes,1,opt,name=partition,proto3" json:"partition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscriberToSubCoordinatorResponse_UnAssignment) Reset() { - *x = SubscriberToSubCoordinatorResponse_UnAssignment{} - mi := &file_mq_broker_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscriberToSubCoordinatorResponse_UnAssignment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriberToSubCoordinatorResponse_UnAssignment) ProtoMessage() {} - -func (x *SubscriberToSubCoordinatorResponse_UnAssignment) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[58] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriberToSubCoordinatorResponse_UnAssignment.ProtoReflect.Descriptor instead. -func (*SubscriberToSubCoordinatorResponse_UnAssignment) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{29, 1} -} - -func (x *SubscriberToSubCoordinatorResponse_UnAssignment) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -type PublishMessageRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - AckInterval int32 `protobuf:"varint,3,opt,name=ack_interval,json=ackInterval,proto3" json:"ack_interval,omitempty"` - FollowerBroker string `protobuf:"bytes,4,opt,name=follower_broker,json=followerBroker,proto3" json:"follower_broker,omitempty"` - PublisherName string `protobuf:"bytes,5,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"` // for debugging - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishMessageRequest_InitMessage) Reset() { - *x = PublishMessageRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishMessageRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishMessageRequest_InitMessage) ProtoMessage() {} - -func (x *PublishMessageRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[59] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishMessageRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*PublishMessageRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{32, 0} -} - -func (x *PublishMessageRequest_InitMessage) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *PublishMessageRequest_InitMessage) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *PublishMessageRequest_InitMessage) GetAckInterval() int32 { - if x != nil { - return x.AckInterval - } - return 0 -} - -func (x *PublishMessageRequest_InitMessage) GetFollowerBroker() string { - if x != nil { - return x.FollowerBroker - } - return "" -} - -func (x *PublishMessageRequest_InitMessage) GetPublisherName() string { - if x != nil { - return x.PublisherName - } - return "" -} - -type PublishFollowMeRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishFollowMeRequest_InitMessage) Reset() { - *x = PublishFollowMeRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishFollowMeRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishFollowMeRequest_InitMessage) ProtoMessage() {} - -func (x *PublishFollowMeRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[60] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishFollowMeRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*PublishFollowMeRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{34, 0} -} - -func (x *PublishFollowMeRequest_InitMessage) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *PublishFollowMeRequest_InitMessage) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -type PublishFollowMeRequest_FlushMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishFollowMeRequest_FlushMessage) Reset() { - *x = PublishFollowMeRequest_FlushMessage{} - mi := &file_mq_broker_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishFollowMeRequest_FlushMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishFollowMeRequest_FlushMessage) ProtoMessage() {} - -func (x *PublishFollowMeRequest_FlushMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[61] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishFollowMeRequest_FlushMessage.ProtoReflect.Descriptor instead. -func (*PublishFollowMeRequest_FlushMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{34, 1} -} - -func (x *PublishFollowMeRequest_FlushMessage) GetTsNs() int64 { - if x != nil { - return x.TsNs - } - return 0 -} - -type PublishFollowMeRequest_CloseMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishFollowMeRequest_CloseMessage) Reset() { - *x = PublishFollowMeRequest_CloseMessage{} - mi := &file_mq_broker_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishFollowMeRequest_CloseMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishFollowMeRequest_CloseMessage) ProtoMessage() {} - -func (x *PublishFollowMeRequest_CloseMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[62] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishFollowMeRequest_CloseMessage.ProtoReflect.Descriptor instead. -func (*PublishFollowMeRequest_CloseMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{34, 2} -} - -type SubscribeMessageRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - ConsumerId string `protobuf:"bytes,2,opt,name=consumer_id,json=consumerId,proto3" json:"consumer_id,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - Topic *schema_pb.Topic `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionOffset *schema_pb.PartitionOffset `protobuf:"bytes,5,opt,name=partition_offset,json=partitionOffset,proto3" json:"partition_offset,omitempty"` - OffsetType schema_pb.OffsetType `protobuf:"varint,6,opt,name=offset_type,json=offsetType,proto3,enum=schema_pb.OffsetType" json:"offset_type,omitempty"` - Filter string `protobuf:"bytes,10,opt,name=filter,proto3" json:"filter,omitempty"` - FollowerBroker string `protobuf:"bytes,11,opt,name=follower_broker,json=followerBroker,proto3" json:"follower_broker,omitempty"` - SlidingWindowSize int32 `protobuf:"varint,12,opt,name=sliding_window_size,json=slidingWindowSize,proto3" json:"sliding_window_size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageRequest_InitMessage) Reset() { - *x = SubscribeMessageRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageRequest_InitMessage) ProtoMessage() {} - -func (x *SubscribeMessageRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[63] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*SubscribeMessageRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{36, 0} -} - -func (x *SubscribeMessageRequest_InitMessage) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -func (x *SubscribeMessageRequest_InitMessage) GetConsumerId() string { - if x != nil { - return x.ConsumerId - } - return "" -} - -func (x *SubscribeMessageRequest_InitMessage) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *SubscribeMessageRequest_InitMessage) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *SubscribeMessageRequest_InitMessage) GetPartitionOffset() *schema_pb.PartitionOffset { - if x != nil { - return x.PartitionOffset - } - return nil -} - -func (x *SubscribeMessageRequest_InitMessage) GetOffsetType() schema_pb.OffsetType { - if x != nil { - return x.OffsetType - } - return schema_pb.OffsetType(0) -} - -func (x *SubscribeMessageRequest_InitMessage) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *SubscribeMessageRequest_InitMessage) GetFollowerBroker() string { - if x != nil { - return x.FollowerBroker - } - return "" -} - -func (x *SubscribeMessageRequest_InitMessage) GetSlidingWindowSize() int32 { - if x != nil { - return x.SlidingWindowSize - } - return 0 -} - -type SubscribeMessageRequest_AckMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` // Timestamp in nanoseconds for acknowledgment tracking - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageRequest_AckMessage) Reset() { - *x = SubscribeMessageRequest_AckMessage{} - mi := &file_mq_broker_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageRequest_AckMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageRequest_AckMessage) ProtoMessage() {} - -func (x *SubscribeMessageRequest_AckMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[64] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageRequest_AckMessage.ProtoReflect.Descriptor instead. -func (*SubscribeMessageRequest_AckMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{36, 1} -} - -func (x *SubscribeMessageRequest_AckMessage) GetTsNs() int64 { - if x != nil { - return x.TsNs - } - return 0 -} - -func (x *SubscribeMessageRequest_AckMessage) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -type SubscribeMessageRequest_SeekMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` // New offset to seek to - OffsetType schema_pb.OffsetType `protobuf:"varint,2,opt,name=offset_type,json=offsetType,proto3,enum=schema_pb.OffsetType" json:"offset_type,omitempty"` // EXACT_OFFSET, RESET_TO_LATEST, etc. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageRequest_SeekMessage) Reset() { - *x = SubscribeMessageRequest_SeekMessage{} - mi := &file_mq_broker_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageRequest_SeekMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageRequest_SeekMessage) ProtoMessage() {} - -func (x *SubscribeMessageRequest_SeekMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[65] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageRequest_SeekMessage.ProtoReflect.Descriptor instead. -func (*SubscribeMessageRequest_SeekMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{36, 2} -} - -func (x *SubscribeMessageRequest_SeekMessage) GetOffset() int64 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *SubscribeMessageRequest_SeekMessage) GetOffsetType() schema_pb.OffsetType { - if x != nil { - return x.OffsetType - } - return schema_pb.OffsetType(0) -} - -type SubscribeMessageResponse_SubscribeCtrlMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - IsEndOfStream bool `protobuf:"varint,2,opt,name=is_end_of_stream,json=isEndOfStream,proto3" json:"is_end_of_stream,omitempty"` - IsEndOfTopic bool `protobuf:"varint,3,opt,name=is_end_of_topic,json=isEndOfTopic,proto3" json:"is_end_of_topic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) Reset() { - *x = SubscribeMessageResponse_SubscribeCtrlMessage{} - mi := &file_mq_broker_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeMessageResponse_SubscribeCtrlMessage) ProtoMessage() {} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[66] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeMessageResponse_SubscribeCtrlMessage.ProtoReflect.Descriptor instead. -func (*SubscribeMessageResponse_SubscribeCtrlMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{37, 0} -} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) GetIsEndOfStream() bool { - if x != nil { - return x.IsEndOfStream - } - return false -} - -func (x *SubscribeMessageResponse_SubscribeCtrlMessage) GetIsEndOfTopic() bool { - if x != nil { - return x.IsEndOfTopic - } - return false -} - -type SubscribeFollowMeRequest_InitMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Partition *schema_pb.Partition `protobuf:"bytes,2,opt,name=partition,proto3" json:"partition,omitempty"` - ConsumerGroup string `protobuf:"bytes,3,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeFollowMeRequest_InitMessage) Reset() { - *x = SubscribeFollowMeRequest_InitMessage{} - mi := &file_mq_broker_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeFollowMeRequest_InitMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeFollowMeRequest_InitMessage) ProtoMessage() {} - -func (x *SubscribeFollowMeRequest_InitMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[67] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeFollowMeRequest_InitMessage.ProtoReflect.Descriptor instead. -func (*SubscribeFollowMeRequest_InitMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{38, 0} -} - -func (x *SubscribeFollowMeRequest_InitMessage) GetTopic() *schema_pb.Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *SubscribeFollowMeRequest_InitMessage) GetPartition() *schema_pb.Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *SubscribeFollowMeRequest_InitMessage) GetConsumerGroup() string { - if x != nil { - return x.ConsumerGroup - } - return "" -} - -type SubscribeFollowMeRequest_AckMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeFollowMeRequest_AckMessage) Reset() { - *x = SubscribeFollowMeRequest_AckMessage{} - mi := &file_mq_broker_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeFollowMeRequest_AckMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeFollowMeRequest_AckMessage) ProtoMessage() {} - -func (x *SubscribeFollowMeRequest_AckMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[68] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeFollowMeRequest_AckMessage.ProtoReflect.Descriptor instead. -func (*SubscribeFollowMeRequest_AckMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{38, 1} -} - -func (x *SubscribeFollowMeRequest_AckMessage) GetTsNs() int64 { - if x != nil { - return x.TsNs - } - return 0 -} - -type SubscribeFollowMeRequest_CloseMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SubscribeFollowMeRequest_CloseMessage) Reset() { - *x = SubscribeFollowMeRequest_CloseMessage{} - mi := &file_mq_broker_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SubscribeFollowMeRequest_CloseMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscribeFollowMeRequest_CloseMessage) ProtoMessage() {} - -func (x *SubscribeFollowMeRequest_CloseMessage) ProtoReflect() protoreflect.Message { - mi := &file_mq_broker_proto_msgTypes[69] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscribeFollowMeRequest_CloseMessage.ProtoReflect.Descriptor instead. -func (*SubscribeFollowMeRequest_CloseMessage) Descriptor() ([]byte, []int) { - return file_mq_broker_proto_rawDescGZIP(), []int{38, 2} -} - -var File_mq_broker_proto protoreflect.FileDescriptor - -const file_mq_broker_proto_rawDesc = "" + - "\n" + - "\x0fmq_broker.proto\x12\fmessaging_pb\x1a\x0fmq_schema.proto\x1a\vfiler.proto\":\n" + - "\x17FindBrokerLeaderRequest\x12\x1f\n" + - "\vfiler_group\x18\x01 \x01(\tR\n" + - "filerGroup\"2\n" + - "\x18FindBrokerLeaderResponse\x12\x16\n" + - "\x06broker\x18\x01 \x01(\tR\x06broker\"\xd2\x01\n" + - "\vBrokerStats\x12*\n" + - "\x11cpu_usage_percent\x18\x01 \x01(\x05R\x0fcpuUsagePercent\x12:\n" + - "\x05stats\x18\x02 \x03(\v2$.messaging_pb.BrokerStats.StatsEntryR\x05stats\x1a[\n" + - "\n" + - "StatsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x127\n" + - "\x05value\x18\x02 \x01(\v2!.messaging_pb.TopicPartitionStatsR\x05value:\x028\x01\"\xe1\x01\n" + - "\x13TopicPartitionStats\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12'\n" + - "\x0fpublisher_count\x18\x03 \x01(\x05R\x0epublisherCount\x12)\n" + - "\x10subscriber_count\x18\x04 \x01(\x05R\x0fsubscriberCount\x12\x1a\n" + - "\bfollower\x18\x05 \x01(\tR\bfollower\"\xd3\x01\n" + - "\x1dPublisherToPubBalancerRequest\x12M\n" + - "\x04init\x18\x01 \x01(\v27.messaging_pb.PublisherToPubBalancerRequest.InitMessageH\x00R\x04init\x121\n" + - "\x05stats\x18\x02 \x01(\v2\x19.messaging_pb.BrokerStatsH\x00R\x05stats\x1a%\n" + - "\vInitMessage\x12\x16\n" + - "\x06broker\x18\x01 \x01(\tR\x06brokerB\t\n" + - "\amessage\" \n" + - "\x1ePublisherToPubBalancerResponse\"\x16\n" + - "\x14BalanceTopicsRequest\"\x17\n" + - "\x15BalanceTopicsResponse\"W\n" + - "\x0eTopicRetention\x12+\n" + - "\x11retention_seconds\x18\x01 \x01(\x03R\x10retentionSeconds\x12\x18\n" + - "\aenabled\x18\x02 \x01(\bR\aenabled\"\xb1\x02\n" + - "\x15ConfigureTopicRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12'\n" + - "\x0fpartition_count\x18\x02 \x01(\x05R\x0epartitionCount\x12:\n" + - "\tretention\x18\x03 \x01(\v2\x1c.messaging_pb.TopicRetentionR\tretention\x12E\n" + - "\x13message_record_type\x18\x04 \x01(\v2\x15.schema_pb.RecordTypeR\x11messageRecordType\x12\x1f\n" + - "\vkey_columns\x18\x05 \x03(\tR\n" + - "keyColumns\x12#\n" + - "\rschema_format\x18\x06 \x01(\tR\fschemaFormat\"\xcc\x02\n" + - "\x16ConfigureTopicResponse\x12i\n" + - "\x1cbroker_partition_assignments\x18\x02 \x03(\v2'.messaging_pb.BrokerPartitionAssignmentR\x1abrokerPartitionAssignments\x12:\n" + - "\tretention\x18\x03 \x01(\v2\x1c.messaging_pb.TopicRetentionR\tretention\x12E\n" + - "\x13message_record_type\x18\x04 \x01(\v2\x15.schema_pb.RecordTypeR\x11messageRecordType\x12\x1f\n" + - "\vkey_columns\x18\x05 \x03(\tR\n" + - "keyColumns\x12#\n" + - "\rschema_format\x18\x06 \x01(\tR\fschemaFormat\"\x13\n" + - "\x11ListTopicsRequest\">\n" + - "\x12ListTopicsResponse\x12(\n" + - "\x06topics\x18\x01 \x03(\v2\x10.schema_pb.TopicR\x06topics\"<\n" + - "\x12TopicExistsRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\"-\n" + - "\x13TopicExistsResponse\x12\x16\n" + - "\x06exists\x18\x01 \x01(\bR\x06exists\"C\n" + - "\x19LookupTopicBrokersRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\"\xaf\x01\n" + - "\x1aLookupTopicBrokersResponse\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12i\n" + - "\x1cbroker_partition_assignments\x18\x02 \x03(\v2'.messaging_pb.BrokerPartitionAssignmentR\x1abrokerPartitionAssignments\"\x9d\x01\n" + - "\x19BrokerPartitionAssignment\x122\n" + - "\tpartition\x18\x01 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12#\n" + - "\rleader_broker\x18\x02 \x01(\tR\fleaderBroker\x12'\n" + - "\x0ffollower_broker\x18\x03 \x01(\tR\x0efollowerBroker\"F\n" + - "\x1cGetTopicConfigurationRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\"\xf0\x03\n" + - "\x1dGetTopicConfigurationResponse\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12'\n" + - "\x0fpartition_count\x18\x02 \x01(\x05R\x0epartitionCount\x12i\n" + - "\x1cbroker_partition_assignments\x18\x03 \x03(\v2'.messaging_pb.BrokerPartitionAssignmentR\x1abrokerPartitionAssignments\x12\"\n" + - "\rcreated_at_ns\x18\x04 \x01(\x03R\vcreatedAtNs\x12&\n" + - "\x0flast_updated_ns\x18\x05 \x01(\x03R\rlastUpdatedNs\x12:\n" + - "\tretention\x18\x06 \x01(\v2\x1c.messaging_pb.TopicRetentionR\tretention\x12E\n" + - "\x13message_record_type\x18\a \x01(\v2\x15.schema_pb.RecordTypeR\x11messageRecordType\x12\x1f\n" + - "\vkey_columns\x18\b \x03(\tR\n" + - "keyColumns\x12#\n" + - "\rschema_format\x18\t \x01(\tR\fschemaFormat\"C\n" + - "\x19GetTopicPublishersRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\"Z\n" + - "\x1aGetTopicPublishersResponse\x12<\n" + - "\n" + - "publishers\x18\x01 \x03(\v2\x1c.messaging_pb.TopicPublisherR\n" + - "publishers\"D\n" + - "\x1aGetTopicSubscribersRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\"^\n" + - "\x1bGetTopicSubscribersResponse\x12?\n" + - "\vsubscribers\x18\x01 \x03(\v2\x1d.messaging_pb.TopicSubscriberR\vsubscribers\"\xf0\x02\n" + - "\x0eTopicPublisher\x12%\n" + - "\x0epublisher_name\x18\x01 \x01(\tR\rpublisherName\x12\x1b\n" + - "\tclient_id\x18\x02 \x01(\tR\bclientId\x122\n" + - "\tpartition\x18\x03 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12&\n" + - "\x0fconnect_time_ns\x18\x04 \x01(\x03R\rconnectTimeNs\x12)\n" + - "\x11last_seen_time_ns\x18\x05 \x01(\x03R\x0elastSeenTimeNs\x12\x16\n" + - "\x06broker\x18\x06 \x01(\tR\x06broker\x12\x1b\n" + - "\tis_active\x18\a \x01(\bR\bisActive\x122\n" + - "\x15last_published_offset\x18\b \x01(\x03R\x13lastPublishedOffset\x12*\n" + - "\x11last_acked_offset\x18\t \x01(\x03R\x0flastAckedOffset\"\x8b\x03\n" + - "\x0fTopicSubscriber\x12%\n" + - "\x0econsumer_group\x18\x01 \x01(\tR\rconsumerGroup\x12\x1f\n" + - "\vconsumer_id\x18\x02 \x01(\tR\n" + - "consumerId\x12\x1b\n" + - "\tclient_id\x18\x03 \x01(\tR\bclientId\x122\n" + - "\tpartition\x18\x04 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12&\n" + - "\x0fconnect_time_ns\x18\x05 \x01(\x03R\rconnectTimeNs\x12)\n" + - "\x11last_seen_time_ns\x18\x06 \x01(\x03R\x0elastSeenTimeNs\x12\x16\n" + - "\x06broker\x18\a \x01(\tR\x06broker\x12\x1b\n" + - "\tis_active\x18\b \x01(\bR\bisActive\x12%\n" + - "\x0ecurrent_offset\x18\t \x01(\x03R\rcurrentOffset\x120\n" + - "\x14last_received_offset\x18\n" + - " \x01(\x03R\x12lastReceivedOffset\"\xef\x01\n" + - "\x1cAssignTopicPartitionsRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12i\n" + - "\x1cbroker_partition_assignments\x18\x02 \x03(\v2'.messaging_pb.BrokerPartitionAssignmentR\x1abrokerPartitionAssignments\x12\x1b\n" + - "\tis_leader\x18\x03 \x01(\bR\bisLeader\x12\x1f\n" + - "\vis_draining\x18\x04 \x01(\bR\n" + - "isDraining\"\x1f\n" + - "\x1dAssignTopicPartitionsResponse\"\xf9\x05\n" + - "!SubscriberToSubCoordinatorRequest\x12Q\n" + - "\x04init\x18\x01 \x01(\v2;.messaging_pb.SubscriberToSubCoordinatorRequest.InitMessageH\x00R\x04init\x12m\n" + - "\x0eack_assignment\x18\x02 \x01(\v2D.messaging_pb.SubscriberToSubCoordinatorRequest.AckAssignmentMessageH\x00R\rackAssignment\x12t\n" + - "\x11ack_un_assignment\x18\x03 \x01(\v2F.messaging_pb.SubscriberToSubCoordinatorRequest.AckUnAssignmentMessageH\x00R\x0fackUnAssignment\x1a\xf6\x01\n" + - "\vInitMessage\x12%\n" + - "\x0econsumer_group\x18\x01 \x01(\tR\rconsumerGroup\x12;\n" + - "\x1aconsumer_group_instance_id\x18\x02 \x01(\tR\x17consumerGroupInstanceId\x12&\n" + - "\x05topic\x18\x03 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12.\n" + - "\x13max_partition_count\x18\x04 \x01(\x05R\x11maxPartitionCount\x12+\n" + - "\x11rebalance_seconds\x18\x05 \x01(\x05R\x10rebalanceSeconds\x1aL\n" + - "\x16AckUnAssignmentMessage\x122\n" + - "\tpartition\x18\x01 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x1aJ\n" + - "\x14AckAssignmentMessage\x122\n" + - "\tpartition\x18\x01 \x01(\v2\x14.schema_pb.PartitionR\tpartitionB\t\n" + - "\amessage\"\xa2\x03\n" + - "\"SubscriberToSubCoordinatorResponse\x12]\n" + - "\n" + - "assignment\x18\x01 \x01(\v2;.messaging_pb.SubscriberToSubCoordinatorResponse.AssignmentH\x00R\n" + - "assignment\x12d\n" + - "\run_assignment\x18\x02 \x01(\v2=.messaging_pb.SubscriberToSubCoordinatorResponse.UnAssignmentH\x00R\funAssignment\x1ah\n" + - "\n" + - "Assignment\x12Z\n" + - "\x14partition_assignment\x18\x01 \x01(\v2'.messaging_pb.BrokerPartitionAssignmentR\x13partitionAssignment\x1aB\n" + - "\fUnAssignment\x122\n" + - "\tpartition\x18\x01 \x01(\v2\x14.schema_pb.PartitionR\tpartitionB\t\n" + - "\amessage\"R\n" + - "\x0eControlMessage\x12\x19\n" + - "\bis_close\x18\x01 \x01(\bR\aisClose\x12%\n" + - "\x0epublisher_name\x18\x02 \x01(\tR\rpublisherName\"|\n" + - "\vDataMessage\x12\x10\n" + - "\x03key\x18\x01 \x01(\fR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\fR\x05value\x12\x13\n" + - "\x05ts_ns\x18\x03 \x01(\x03R\x04tsNs\x120\n" + - "\x04ctrl\x18\x04 \x01(\v2\x1c.messaging_pb.ControlMessageR\x04ctrl\"\xf9\x02\n" + - "\x15PublishMessageRequest\x12E\n" + - "\x04init\x18\x01 \x01(\v2/.messaging_pb.PublishMessageRequest.InitMessageH\x00R\x04init\x12/\n" + - "\x04data\x18\x02 \x01(\v2\x19.messaging_pb.DataMessageH\x00R\x04data\x1a\xdc\x01\n" + - "\vInitMessage\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12!\n" + - "\fack_interval\x18\x03 \x01(\x05R\vackInterval\x12'\n" + - "\x0ffollower_broker\x18\x04 \x01(\tR\x0efollowerBroker\x12%\n" + - "\x0epublisher_name\x18\x05 \x01(\tR\rpublisherNameB\t\n" + - "\amessage\"\xb5\x01\n" + - "\x16PublishMessageResponse\x12\x1a\n" + - "\tack_ts_ns\x18\x01 \x01(\x03R\aackTsNs\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\x12!\n" + - "\fshould_close\x18\x03 \x01(\bR\vshouldClose\x12\x1d\n" + - "\n" + - "error_code\x18\x04 \x01(\x05R\terrorCode\x12'\n" + - "\x0fassigned_offset\x18\x05 \x01(\x03R\x0eassignedOffset\"\xd2\x03\n" + - "\x16PublishFollowMeRequest\x12F\n" + - "\x04init\x18\x01 \x01(\v20.messaging_pb.PublishFollowMeRequest.InitMessageH\x00R\x04init\x12/\n" + - "\x04data\x18\x02 \x01(\v2\x19.messaging_pb.DataMessageH\x00R\x04data\x12I\n" + - "\x05flush\x18\x03 \x01(\v21.messaging_pb.PublishFollowMeRequest.FlushMessageH\x00R\x05flush\x12I\n" + - "\x05close\x18\x04 \x01(\v21.messaging_pb.PublishFollowMeRequest.CloseMessageH\x00R\x05close\x1ai\n" + - "\vInitMessage\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x1a#\n" + - "\fFlushMessage\x12\x13\n" + - "\x05ts_ns\x18\x01 \x01(\x03R\x04tsNs\x1a\x0e\n" + - "\fCloseMessageB\t\n" + - "\amessage\"5\n" + - "\x17PublishFollowMeResponse\x12\x1a\n" + - "\tack_ts_ns\x18\x01 \x01(\x03R\aackTsNs\"\x9d\x06\n" + - "\x17SubscribeMessageRequest\x12G\n" + - "\x04init\x18\x01 \x01(\v21.messaging_pb.SubscribeMessageRequest.InitMessageH\x00R\x04init\x12D\n" + - "\x03ack\x18\x02 \x01(\v20.messaging_pb.SubscribeMessageRequest.AckMessageH\x00R\x03ack\x12G\n" + - "\x04seek\x18\x03 \x01(\v21.messaging_pb.SubscribeMessageRequest.SeekMessageH\x00R\x04seek\x1a\x8a\x03\n" + - "\vInitMessage\x12%\n" + - "\x0econsumer_group\x18\x01 \x01(\tR\rconsumerGroup\x12\x1f\n" + - "\vconsumer_id\x18\x02 \x01(\tR\n" + - "consumerId\x12\x1b\n" + - "\tclient_id\x18\x03 \x01(\tR\bclientId\x12&\n" + - "\x05topic\x18\x04 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12E\n" + - "\x10partition_offset\x18\x05 \x01(\v2\x1a.schema_pb.PartitionOffsetR\x0fpartitionOffset\x126\n" + - "\voffset_type\x18\x06 \x01(\x0e2\x15.schema_pb.OffsetTypeR\n" + - "offsetType\x12\x16\n" + - "\x06filter\x18\n" + - " \x01(\tR\x06filter\x12'\n" + - "\x0ffollower_broker\x18\v \x01(\tR\x0efollowerBroker\x12.\n" + - "\x13sliding_window_size\x18\f \x01(\x05R\x11slidingWindowSize\x1a3\n" + - "\n" + - "AckMessage\x12\x13\n" + - "\x05ts_ns\x18\x01 \x01(\x03R\x04tsNs\x12\x10\n" + - "\x03key\x18\x02 \x01(\fR\x03key\x1a]\n" + - "\vSeekMessage\x12\x16\n" + - "\x06offset\x18\x01 \x01(\x03R\x06offset\x126\n" + - "\voffset_type\x18\x02 \x01(\x0e2\x15.schema_pb.OffsetTypeR\n" + - "offsetTypeB\t\n" + - "\amessage\"\xa7\x02\n" + - "\x18SubscribeMessageResponse\x12Q\n" + - "\x04ctrl\x18\x01 \x01(\v2;.messaging_pb.SubscribeMessageResponse.SubscribeCtrlMessageH\x00R\x04ctrl\x12/\n" + - "\x04data\x18\x02 \x01(\v2\x19.messaging_pb.DataMessageH\x00R\x04data\x1a|\n" + - "\x14SubscribeCtrlMessage\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\x12'\n" + - "\x10is_end_of_stream\x18\x02 \x01(\bR\risEndOfStream\x12%\n" + - "\x0fis_end_of_topic\x18\x03 \x01(\bR\fisEndOfTopicB\t\n" + - "\amessage\"\xc9\x03\n" + - "\x18SubscribeFollowMeRequest\x12H\n" + - "\x04init\x18\x01 \x01(\v22.messaging_pb.SubscribeFollowMeRequest.InitMessageH\x00R\x04init\x12E\n" + - "\x03ack\x18\x02 \x01(\v21.messaging_pb.SubscribeFollowMeRequest.AckMessageH\x00R\x03ack\x12K\n" + - "\x05close\x18\x03 \x01(\v23.messaging_pb.SubscribeFollowMeRequest.CloseMessageH\x00R\x05close\x1a\x90\x01\n" + - "\vInitMessage\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12%\n" + - "\x0econsumer_group\x18\x03 \x01(\tR\rconsumerGroup\x1a!\n" + - "\n" + - "AckMessage\x12\x13\n" + - "\x05ts_ns\x18\x01 \x01(\x03R\x04tsNs\x1a\x0e\n" + - "\fCloseMessageB\t\n" + - "\amessage\"7\n" + - "\x19SubscribeFollowMeResponse\x12\x1a\n" + - "\tack_ts_ns\x18\x01 \x01(\x03R\aackTsNs\"\xd9\x02\n" + - "\x13FetchMessageRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12!\n" + - "\fstart_offset\x18\x03 \x01(\x03R\vstartOffset\x12\x1b\n" + - "\tmax_bytes\x18\x04 \x01(\x05R\bmaxBytes\x12!\n" + - "\fmax_messages\x18\x05 \x01(\x05R\vmaxMessages\x12\x1e\n" + - "\vmax_wait_ms\x18\x06 \x01(\x05R\tmaxWaitMs\x12\x1b\n" + - "\tmin_bytes\x18\a \x01(\x05R\bminBytes\x12%\n" + - "\x0econsumer_group\x18\b \x01(\tR\rconsumerGroup\x12\x1f\n" + - "\vconsumer_id\x18\t \x01(\tR\n" + - "consumerId\"\x9f\x02\n" + - "\x14FetchMessageResponse\x125\n" + - "\bmessages\x18\x01 \x03(\v2\x19.messaging_pb.DataMessageR\bmessages\x12&\n" + - "\x0fhigh_water_mark\x18\x02 \x01(\x03R\rhighWaterMark\x12(\n" + - "\x10log_start_offset\x18\x03 \x01(\x03R\x0elogStartOffset\x12(\n" + - "\x10end_of_partition\x18\x04 \x01(\bR\x0eendOfPartition\x12\x14\n" + - "\x05error\x18\x05 \x01(\tR\x05error\x12\x1d\n" + - "\n" + - "error_code\x18\x06 \x01(\x05R\terrorCode\x12\x1f\n" + - "\vnext_offset\x18\a \x01(\x03R\n" + - "nextOffset\"b\n" + - "\x16ClosePublishersRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12 \n" + - "\funix_time_ns\x18\x02 \x01(\x03R\n" + - "unixTimeNs\"\x19\n" + - "\x17ClosePublishersResponse\"c\n" + - "\x17CloseSubscribersRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12 \n" + - "\funix_time_ns\x18\x02 \x01(\x03R\n" + - "unixTimeNs\"\x1a\n" + - "\x18CloseSubscribersResponse\"\xa9\x01\n" + - "\x1bGetUnflushedMessagesRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12.\n" + - "\x13start_buffer_offset\x18\x03 \x01(\x03R\x11startBufferOffset\"\x86\x01\n" + - "\x1cGetUnflushedMessagesResponse\x12,\n" + - "\amessage\x18\x01 \x01(\v2\x12.filer_pb.LogEntryR\amessage\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\x12\"\n" + - "\rend_of_stream\x18\x03 \x01(\bR\vendOfStream\"z\n" + - "\x1cGetPartitionRangeInfoRequest\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x122\n" + - "\tpartition\x18\x02 \x01(\v2\x14.schema_pb.PartitionR\tpartition\"\x98\x02\n" + - "\x1dGetPartitionRangeInfoResponse\x12@\n" + - "\foffset_range\x18\x01 \x01(\v2\x1d.messaging_pb.OffsetRangeInfoR\voffsetRange\x12I\n" + - "\x0ftimestamp_range\x18\x02 \x01(\v2 .messaging_pb.TimestampRangeInfoR\x0etimestampRange\x12!\n" + - "\frecord_count\x18\n" + - " \x01(\x03R\vrecordCount\x121\n" + - "\x14active_subscriptions\x18\v \x01(\x03R\x13activeSubscriptions\x12\x14\n" + - "\x05error\x18\f \x01(\tR\x05error\"\x87\x01\n" + - "\x0fOffsetRangeInfo\x12'\n" + - "\x0fearliest_offset\x18\x01 \x01(\x03R\x0eearliestOffset\x12#\n" + - "\rlatest_offset\x18\x02 \x01(\x03R\flatestOffset\x12&\n" + - "\x0fhigh_water_mark\x18\x03 \x01(\x03R\rhighWaterMark\"x\n" + - "\x12TimestampRangeInfo\x122\n" + - "\x15earliest_timestamp_ns\x18\x01 \x01(\x03R\x13earliestTimestampNs\x12.\n" + - "\x13latest_timestamp_ns\x18\x02 \x01(\x03R\x11latestTimestampNs2\xad\x11\n" + - "\x10SeaweedMessaging\x12c\n" + - "\x10FindBrokerLeader\x12%.messaging_pb.FindBrokerLeaderRequest\x1a&.messaging_pb.FindBrokerLeaderResponse\"\x00\x12y\n" + - "\x16PublisherToPubBalancer\x12+.messaging_pb.PublisherToPubBalancerRequest\x1a,.messaging_pb.PublisherToPubBalancerResponse\"\x00(\x010\x01\x12Z\n" + - "\rBalanceTopics\x12\".messaging_pb.BalanceTopicsRequest\x1a#.messaging_pb.BalanceTopicsResponse\"\x00\x12Q\n" + - "\n" + - "ListTopics\x12\x1f.messaging_pb.ListTopicsRequest\x1a .messaging_pb.ListTopicsResponse\"\x00\x12T\n" + - "\vTopicExists\x12 .messaging_pb.TopicExistsRequest\x1a!.messaging_pb.TopicExistsResponse\"\x00\x12]\n" + - "\x0eConfigureTopic\x12#.messaging_pb.ConfigureTopicRequest\x1a$.messaging_pb.ConfigureTopicResponse\"\x00\x12i\n" + - "\x12LookupTopicBrokers\x12'.messaging_pb.LookupTopicBrokersRequest\x1a(.messaging_pb.LookupTopicBrokersResponse\"\x00\x12r\n" + - "\x15GetTopicConfiguration\x12*.messaging_pb.GetTopicConfigurationRequest\x1a+.messaging_pb.GetTopicConfigurationResponse\"\x00\x12i\n" + - "\x12GetTopicPublishers\x12'.messaging_pb.GetTopicPublishersRequest\x1a(.messaging_pb.GetTopicPublishersResponse\"\x00\x12l\n" + - "\x13GetTopicSubscribers\x12(.messaging_pb.GetTopicSubscribersRequest\x1a).messaging_pb.GetTopicSubscribersResponse\"\x00\x12r\n" + - "\x15AssignTopicPartitions\x12*.messaging_pb.AssignTopicPartitionsRequest\x1a+.messaging_pb.AssignTopicPartitionsResponse\"\x00\x12`\n" + - "\x0fClosePublishers\x12$.messaging_pb.ClosePublishersRequest\x1a%.messaging_pb.ClosePublishersResponse\"\x00\x12c\n" + - "\x10CloseSubscribers\x12%.messaging_pb.CloseSubscribersRequest\x1a&.messaging_pb.CloseSubscribersResponse\"\x00\x12\x85\x01\n" + - "\x1aSubscriberToSubCoordinator\x12/.messaging_pb.SubscriberToSubCoordinatorRequest\x1a0.messaging_pb.SubscriberToSubCoordinatorResponse\"\x00(\x010\x01\x12a\n" + - "\x0ePublishMessage\x12#.messaging_pb.PublishMessageRequest\x1a$.messaging_pb.PublishMessageResponse\"\x00(\x010\x01\x12g\n" + - "\x10SubscribeMessage\x12%.messaging_pb.SubscribeMessageRequest\x1a&.messaging_pb.SubscribeMessageResponse\"\x00(\x010\x01\x12d\n" + - "\x0fPublishFollowMe\x12$.messaging_pb.PublishFollowMeRequest\x1a%.messaging_pb.PublishFollowMeResponse\"\x00(\x010\x01\x12h\n" + - "\x11SubscribeFollowMe\x12&.messaging_pb.SubscribeFollowMeRequest\x1a'.messaging_pb.SubscribeFollowMeResponse\"\x00(\x01\x12W\n" + - "\fFetchMessage\x12!.messaging_pb.FetchMessageRequest\x1a\".messaging_pb.FetchMessageResponse\"\x00\x12q\n" + - "\x14GetUnflushedMessages\x12).messaging_pb.GetUnflushedMessagesRequest\x1a*.messaging_pb.GetUnflushedMessagesResponse\"\x000\x01\x12r\n" + - "\x15GetPartitionRangeInfo\x12*.messaging_pb.GetPartitionRangeInfoRequest\x1a+.messaging_pb.GetPartitionRangeInfoResponse\"\x00BO\n" + - "\fseaweedfs.mqB\x11MessageQueueProtoZ,github.com/seaweedfs/seaweedfs/weed/pb/mq_pbb\x06proto3" - -var ( - file_mq_broker_proto_rawDescOnce sync.Once - file_mq_broker_proto_rawDescData []byte -) - -func file_mq_broker_proto_rawDescGZIP() []byte { - file_mq_broker_proto_rawDescOnce.Do(func() { - file_mq_broker_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mq_broker_proto_rawDesc), len(file_mq_broker_proto_rawDesc))) - }) - return file_mq_broker_proto_rawDescData -} - -var file_mq_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 70) -var file_mq_broker_proto_goTypes = []any{ - (*FindBrokerLeaderRequest)(nil), // 0: messaging_pb.FindBrokerLeaderRequest - (*FindBrokerLeaderResponse)(nil), // 1: messaging_pb.FindBrokerLeaderResponse - (*BrokerStats)(nil), // 2: messaging_pb.BrokerStats - (*TopicPartitionStats)(nil), // 3: messaging_pb.TopicPartitionStats - (*PublisherToPubBalancerRequest)(nil), // 4: messaging_pb.PublisherToPubBalancerRequest - (*PublisherToPubBalancerResponse)(nil), // 5: messaging_pb.PublisherToPubBalancerResponse - (*BalanceTopicsRequest)(nil), // 6: messaging_pb.BalanceTopicsRequest - (*BalanceTopicsResponse)(nil), // 7: messaging_pb.BalanceTopicsResponse - (*TopicRetention)(nil), // 8: messaging_pb.TopicRetention - (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest - (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse - (*ListTopicsRequest)(nil), // 11: messaging_pb.ListTopicsRequest - (*ListTopicsResponse)(nil), // 12: messaging_pb.ListTopicsResponse - (*TopicExistsRequest)(nil), // 13: messaging_pb.TopicExistsRequest - (*TopicExistsResponse)(nil), // 14: messaging_pb.TopicExistsResponse - (*LookupTopicBrokersRequest)(nil), // 15: messaging_pb.LookupTopicBrokersRequest - (*LookupTopicBrokersResponse)(nil), // 16: messaging_pb.LookupTopicBrokersResponse - (*BrokerPartitionAssignment)(nil), // 17: messaging_pb.BrokerPartitionAssignment - (*GetTopicConfigurationRequest)(nil), // 18: messaging_pb.GetTopicConfigurationRequest - (*GetTopicConfigurationResponse)(nil), // 19: messaging_pb.GetTopicConfigurationResponse - (*GetTopicPublishersRequest)(nil), // 20: messaging_pb.GetTopicPublishersRequest - (*GetTopicPublishersResponse)(nil), // 21: messaging_pb.GetTopicPublishersResponse - (*GetTopicSubscribersRequest)(nil), // 22: messaging_pb.GetTopicSubscribersRequest - (*GetTopicSubscribersResponse)(nil), // 23: messaging_pb.GetTopicSubscribersResponse - (*TopicPublisher)(nil), // 24: messaging_pb.TopicPublisher - (*TopicSubscriber)(nil), // 25: messaging_pb.TopicSubscriber - (*AssignTopicPartitionsRequest)(nil), // 26: messaging_pb.AssignTopicPartitionsRequest - (*AssignTopicPartitionsResponse)(nil), // 27: messaging_pb.AssignTopicPartitionsResponse - (*SubscriberToSubCoordinatorRequest)(nil), // 28: messaging_pb.SubscriberToSubCoordinatorRequest - (*SubscriberToSubCoordinatorResponse)(nil), // 29: messaging_pb.SubscriberToSubCoordinatorResponse - (*ControlMessage)(nil), // 30: messaging_pb.ControlMessage - (*DataMessage)(nil), // 31: messaging_pb.DataMessage - (*PublishMessageRequest)(nil), // 32: messaging_pb.PublishMessageRequest - (*PublishMessageResponse)(nil), // 33: messaging_pb.PublishMessageResponse - (*PublishFollowMeRequest)(nil), // 34: messaging_pb.PublishFollowMeRequest - (*PublishFollowMeResponse)(nil), // 35: messaging_pb.PublishFollowMeResponse - (*SubscribeMessageRequest)(nil), // 36: messaging_pb.SubscribeMessageRequest - (*SubscribeMessageResponse)(nil), // 37: messaging_pb.SubscribeMessageResponse - (*SubscribeFollowMeRequest)(nil), // 38: messaging_pb.SubscribeFollowMeRequest - (*SubscribeFollowMeResponse)(nil), // 39: messaging_pb.SubscribeFollowMeResponse - (*FetchMessageRequest)(nil), // 40: messaging_pb.FetchMessageRequest - (*FetchMessageResponse)(nil), // 41: messaging_pb.FetchMessageResponse - (*ClosePublishersRequest)(nil), // 42: messaging_pb.ClosePublishersRequest - (*ClosePublishersResponse)(nil), // 43: messaging_pb.ClosePublishersResponse - (*CloseSubscribersRequest)(nil), // 44: messaging_pb.CloseSubscribersRequest - (*CloseSubscribersResponse)(nil), // 45: messaging_pb.CloseSubscribersResponse - (*GetUnflushedMessagesRequest)(nil), // 46: messaging_pb.GetUnflushedMessagesRequest - (*GetUnflushedMessagesResponse)(nil), // 47: messaging_pb.GetUnflushedMessagesResponse - (*GetPartitionRangeInfoRequest)(nil), // 48: messaging_pb.GetPartitionRangeInfoRequest - (*GetPartitionRangeInfoResponse)(nil), // 49: messaging_pb.GetPartitionRangeInfoResponse - (*OffsetRangeInfo)(nil), // 50: messaging_pb.OffsetRangeInfo - (*TimestampRangeInfo)(nil), // 51: messaging_pb.TimestampRangeInfo - nil, // 52: messaging_pb.BrokerStats.StatsEntry - (*PublisherToPubBalancerRequest_InitMessage)(nil), // 53: messaging_pb.PublisherToPubBalancerRequest.InitMessage - (*SubscriberToSubCoordinatorRequest_InitMessage)(nil), // 54: messaging_pb.SubscriberToSubCoordinatorRequest.InitMessage - (*SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage)(nil), // 55: messaging_pb.SubscriberToSubCoordinatorRequest.AckUnAssignmentMessage - (*SubscriberToSubCoordinatorRequest_AckAssignmentMessage)(nil), // 56: messaging_pb.SubscriberToSubCoordinatorRequest.AckAssignmentMessage - (*SubscriberToSubCoordinatorResponse_Assignment)(nil), // 57: messaging_pb.SubscriberToSubCoordinatorResponse.Assignment - (*SubscriberToSubCoordinatorResponse_UnAssignment)(nil), // 58: messaging_pb.SubscriberToSubCoordinatorResponse.UnAssignment - (*PublishMessageRequest_InitMessage)(nil), // 59: messaging_pb.PublishMessageRequest.InitMessage - (*PublishFollowMeRequest_InitMessage)(nil), // 60: messaging_pb.PublishFollowMeRequest.InitMessage - (*PublishFollowMeRequest_FlushMessage)(nil), // 61: messaging_pb.PublishFollowMeRequest.FlushMessage - (*PublishFollowMeRequest_CloseMessage)(nil), // 62: messaging_pb.PublishFollowMeRequest.CloseMessage - (*SubscribeMessageRequest_InitMessage)(nil), // 63: messaging_pb.SubscribeMessageRequest.InitMessage - (*SubscribeMessageRequest_AckMessage)(nil), // 64: messaging_pb.SubscribeMessageRequest.AckMessage - (*SubscribeMessageRequest_SeekMessage)(nil), // 65: messaging_pb.SubscribeMessageRequest.SeekMessage - (*SubscribeMessageResponse_SubscribeCtrlMessage)(nil), // 66: messaging_pb.SubscribeMessageResponse.SubscribeCtrlMessage - (*SubscribeFollowMeRequest_InitMessage)(nil), // 67: messaging_pb.SubscribeFollowMeRequest.InitMessage - (*SubscribeFollowMeRequest_AckMessage)(nil), // 68: messaging_pb.SubscribeFollowMeRequest.AckMessage - (*SubscribeFollowMeRequest_CloseMessage)(nil), // 69: messaging_pb.SubscribeFollowMeRequest.CloseMessage - (*schema_pb.Topic)(nil), // 70: schema_pb.Topic - (*schema_pb.Partition)(nil), // 71: schema_pb.Partition - (*schema_pb.RecordType)(nil), // 72: schema_pb.RecordType - (*filer_pb.LogEntry)(nil), // 73: filer_pb.LogEntry - (*schema_pb.PartitionOffset)(nil), // 74: schema_pb.PartitionOffset - (schema_pb.OffsetType)(0), // 75: schema_pb.OffsetType -} -var file_mq_broker_proto_depIdxs = []int32{ - 52, // 0: messaging_pb.BrokerStats.stats:type_name -> messaging_pb.BrokerStats.StatsEntry - 70, // 1: messaging_pb.TopicPartitionStats.topic:type_name -> schema_pb.Topic - 71, // 2: messaging_pb.TopicPartitionStats.partition:type_name -> schema_pb.Partition - 53, // 3: messaging_pb.PublisherToPubBalancerRequest.init:type_name -> messaging_pb.PublisherToPubBalancerRequest.InitMessage - 2, // 4: messaging_pb.PublisherToPubBalancerRequest.stats:type_name -> messaging_pb.BrokerStats - 70, // 5: messaging_pb.ConfigureTopicRequest.topic:type_name -> schema_pb.Topic - 8, // 6: messaging_pb.ConfigureTopicRequest.retention:type_name -> messaging_pb.TopicRetention - 72, // 7: messaging_pb.ConfigureTopicRequest.message_record_type:type_name -> schema_pb.RecordType - 17, // 8: messaging_pb.ConfigureTopicResponse.broker_partition_assignments:type_name -> messaging_pb.BrokerPartitionAssignment - 8, // 9: messaging_pb.ConfigureTopicResponse.retention:type_name -> messaging_pb.TopicRetention - 72, // 10: messaging_pb.ConfigureTopicResponse.message_record_type:type_name -> schema_pb.RecordType - 70, // 11: messaging_pb.ListTopicsResponse.topics:type_name -> schema_pb.Topic - 70, // 12: messaging_pb.TopicExistsRequest.topic:type_name -> schema_pb.Topic - 70, // 13: messaging_pb.LookupTopicBrokersRequest.topic:type_name -> schema_pb.Topic - 70, // 14: messaging_pb.LookupTopicBrokersResponse.topic:type_name -> schema_pb.Topic - 17, // 15: messaging_pb.LookupTopicBrokersResponse.broker_partition_assignments:type_name -> messaging_pb.BrokerPartitionAssignment - 71, // 16: messaging_pb.BrokerPartitionAssignment.partition:type_name -> schema_pb.Partition - 70, // 17: messaging_pb.GetTopicConfigurationRequest.topic:type_name -> schema_pb.Topic - 70, // 18: messaging_pb.GetTopicConfigurationResponse.topic:type_name -> schema_pb.Topic - 17, // 19: messaging_pb.GetTopicConfigurationResponse.broker_partition_assignments:type_name -> messaging_pb.BrokerPartitionAssignment - 8, // 20: messaging_pb.GetTopicConfigurationResponse.retention:type_name -> messaging_pb.TopicRetention - 72, // 21: messaging_pb.GetTopicConfigurationResponse.message_record_type:type_name -> schema_pb.RecordType - 70, // 22: messaging_pb.GetTopicPublishersRequest.topic:type_name -> schema_pb.Topic - 24, // 23: messaging_pb.GetTopicPublishersResponse.publishers:type_name -> messaging_pb.TopicPublisher - 70, // 24: messaging_pb.GetTopicSubscribersRequest.topic:type_name -> schema_pb.Topic - 25, // 25: messaging_pb.GetTopicSubscribersResponse.subscribers:type_name -> messaging_pb.TopicSubscriber - 71, // 26: messaging_pb.TopicPublisher.partition:type_name -> schema_pb.Partition - 71, // 27: messaging_pb.TopicSubscriber.partition:type_name -> schema_pb.Partition - 70, // 28: messaging_pb.AssignTopicPartitionsRequest.topic:type_name -> schema_pb.Topic - 17, // 29: messaging_pb.AssignTopicPartitionsRequest.broker_partition_assignments:type_name -> messaging_pb.BrokerPartitionAssignment - 54, // 30: messaging_pb.SubscriberToSubCoordinatorRequest.init:type_name -> messaging_pb.SubscriberToSubCoordinatorRequest.InitMessage - 56, // 31: messaging_pb.SubscriberToSubCoordinatorRequest.ack_assignment:type_name -> messaging_pb.SubscriberToSubCoordinatorRequest.AckAssignmentMessage - 55, // 32: messaging_pb.SubscriberToSubCoordinatorRequest.ack_un_assignment:type_name -> messaging_pb.SubscriberToSubCoordinatorRequest.AckUnAssignmentMessage - 57, // 33: messaging_pb.SubscriberToSubCoordinatorResponse.assignment:type_name -> messaging_pb.SubscriberToSubCoordinatorResponse.Assignment - 58, // 34: messaging_pb.SubscriberToSubCoordinatorResponse.un_assignment:type_name -> messaging_pb.SubscriberToSubCoordinatorResponse.UnAssignment - 30, // 35: messaging_pb.DataMessage.ctrl:type_name -> messaging_pb.ControlMessage - 59, // 36: messaging_pb.PublishMessageRequest.init:type_name -> messaging_pb.PublishMessageRequest.InitMessage - 31, // 37: messaging_pb.PublishMessageRequest.data:type_name -> messaging_pb.DataMessage - 60, // 38: messaging_pb.PublishFollowMeRequest.init:type_name -> messaging_pb.PublishFollowMeRequest.InitMessage - 31, // 39: messaging_pb.PublishFollowMeRequest.data:type_name -> messaging_pb.DataMessage - 61, // 40: messaging_pb.PublishFollowMeRequest.flush:type_name -> messaging_pb.PublishFollowMeRequest.FlushMessage - 62, // 41: messaging_pb.PublishFollowMeRequest.close:type_name -> messaging_pb.PublishFollowMeRequest.CloseMessage - 63, // 42: messaging_pb.SubscribeMessageRequest.init:type_name -> messaging_pb.SubscribeMessageRequest.InitMessage - 64, // 43: messaging_pb.SubscribeMessageRequest.ack:type_name -> messaging_pb.SubscribeMessageRequest.AckMessage - 65, // 44: messaging_pb.SubscribeMessageRequest.seek:type_name -> messaging_pb.SubscribeMessageRequest.SeekMessage - 66, // 45: messaging_pb.SubscribeMessageResponse.ctrl:type_name -> messaging_pb.SubscribeMessageResponse.SubscribeCtrlMessage - 31, // 46: messaging_pb.SubscribeMessageResponse.data:type_name -> messaging_pb.DataMessage - 67, // 47: messaging_pb.SubscribeFollowMeRequest.init:type_name -> messaging_pb.SubscribeFollowMeRequest.InitMessage - 68, // 48: messaging_pb.SubscribeFollowMeRequest.ack:type_name -> messaging_pb.SubscribeFollowMeRequest.AckMessage - 69, // 49: messaging_pb.SubscribeFollowMeRequest.close:type_name -> messaging_pb.SubscribeFollowMeRequest.CloseMessage - 70, // 50: messaging_pb.FetchMessageRequest.topic:type_name -> schema_pb.Topic - 71, // 51: messaging_pb.FetchMessageRequest.partition:type_name -> schema_pb.Partition - 31, // 52: messaging_pb.FetchMessageResponse.messages:type_name -> messaging_pb.DataMessage - 70, // 53: messaging_pb.ClosePublishersRequest.topic:type_name -> schema_pb.Topic - 70, // 54: messaging_pb.CloseSubscribersRequest.topic:type_name -> schema_pb.Topic - 70, // 55: messaging_pb.GetUnflushedMessagesRequest.topic:type_name -> schema_pb.Topic - 71, // 56: messaging_pb.GetUnflushedMessagesRequest.partition:type_name -> schema_pb.Partition - 73, // 57: messaging_pb.GetUnflushedMessagesResponse.message:type_name -> filer_pb.LogEntry - 70, // 58: messaging_pb.GetPartitionRangeInfoRequest.topic:type_name -> schema_pb.Topic - 71, // 59: messaging_pb.GetPartitionRangeInfoRequest.partition:type_name -> schema_pb.Partition - 50, // 60: messaging_pb.GetPartitionRangeInfoResponse.offset_range:type_name -> messaging_pb.OffsetRangeInfo - 51, // 61: messaging_pb.GetPartitionRangeInfoResponse.timestamp_range:type_name -> messaging_pb.TimestampRangeInfo - 3, // 62: messaging_pb.BrokerStats.StatsEntry.value:type_name -> messaging_pb.TopicPartitionStats - 70, // 63: messaging_pb.SubscriberToSubCoordinatorRequest.InitMessage.topic:type_name -> schema_pb.Topic - 71, // 64: messaging_pb.SubscriberToSubCoordinatorRequest.AckUnAssignmentMessage.partition:type_name -> schema_pb.Partition - 71, // 65: messaging_pb.SubscriberToSubCoordinatorRequest.AckAssignmentMessage.partition:type_name -> schema_pb.Partition - 17, // 66: messaging_pb.SubscriberToSubCoordinatorResponse.Assignment.partition_assignment:type_name -> messaging_pb.BrokerPartitionAssignment - 71, // 67: messaging_pb.SubscriberToSubCoordinatorResponse.UnAssignment.partition:type_name -> schema_pb.Partition - 70, // 68: messaging_pb.PublishMessageRequest.InitMessage.topic:type_name -> schema_pb.Topic - 71, // 69: messaging_pb.PublishMessageRequest.InitMessage.partition:type_name -> schema_pb.Partition - 70, // 70: messaging_pb.PublishFollowMeRequest.InitMessage.topic:type_name -> schema_pb.Topic - 71, // 71: messaging_pb.PublishFollowMeRequest.InitMessage.partition:type_name -> schema_pb.Partition - 70, // 72: messaging_pb.SubscribeMessageRequest.InitMessage.topic:type_name -> schema_pb.Topic - 74, // 73: messaging_pb.SubscribeMessageRequest.InitMessage.partition_offset:type_name -> schema_pb.PartitionOffset - 75, // 74: messaging_pb.SubscribeMessageRequest.InitMessage.offset_type:type_name -> schema_pb.OffsetType - 75, // 75: messaging_pb.SubscribeMessageRequest.SeekMessage.offset_type:type_name -> schema_pb.OffsetType - 70, // 76: messaging_pb.SubscribeFollowMeRequest.InitMessage.topic:type_name -> schema_pb.Topic - 71, // 77: messaging_pb.SubscribeFollowMeRequest.InitMessage.partition:type_name -> schema_pb.Partition - 0, // 78: messaging_pb.SeaweedMessaging.FindBrokerLeader:input_type -> messaging_pb.FindBrokerLeaderRequest - 4, // 79: messaging_pb.SeaweedMessaging.PublisherToPubBalancer:input_type -> messaging_pb.PublisherToPubBalancerRequest - 6, // 80: messaging_pb.SeaweedMessaging.BalanceTopics:input_type -> messaging_pb.BalanceTopicsRequest - 11, // 81: messaging_pb.SeaweedMessaging.ListTopics:input_type -> messaging_pb.ListTopicsRequest - 13, // 82: messaging_pb.SeaweedMessaging.TopicExists:input_type -> messaging_pb.TopicExistsRequest - 9, // 83: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest - 15, // 84: messaging_pb.SeaweedMessaging.LookupTopicBrokers:input_type -> messaging_pb.LookupTopicBrokersRequest - 18, // 85: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest - 20, // 86: messaging_pb.SeaweedMessaging.GetTopicPublishers:input_type -> messaging_pb.GetTopicPublishersRequest - 22, // 87: messaging_pb.SeaweedMessaging.GetTopicSubscribers:input_type -> messaging_pb.GetTopicSubscribersRequest - 26, // 88: messaging_pb.SeaweedMessaging.AssignTopicPartitions:input_type -> messaging_pb.AssignTopicPartitionsRequest - 42, // 89: messaging_pb.SeaweedMessaging.ClosePublishers:input_type -> messaging_pb.ClosePublishersRequest - 44, // 90: messaging_pb.SeaweedMessaging.CloseSubscribers:input_type -> messaging_pb.CloseSubscribersRequest - 28, // 91: messaging_pb.SeaweedMessaging.SubscriberToSubCoordinator:input_type -> messaging_pb.SubscriberToSubCoordinatorRequest - 32, // 92: messaging_pb.SeaweedMessaging.PublishMessage:input_type -> messaging_pb.PublishMessageRequest - 36, // 93: messaging_pb.SeaweedMessaging.SubscribeMessage:input_type -> messaging_pb.SubscribeMessageRequest - 34, // 94: messaging_pb.SeaweedMessaging.PublishFollowMe:input_type -> messaging_pb.PublishFollowMeRequest - 38, // 95: messaging_pb.SeaweedMessaging.SubscribeFollowMe:input_type -> messaging_pb.SubscribeFollowMeRequest - 40, // 96: messaging_pb.SeaweedMessaging.FetchMessage:input_type -> messaging_pb.FetchMessageRequest - 46, // 97: messaging_pb.SeaweedMessaging.GetUnflushedMessages:input_type -> messaging_pb.GetUnflushedMessagesRequest - 48, // 98: messaging_pb.SeaweedMessaging.GetPartitionRangeInfo:input_type -> messaging_pb.GetPartitionRangeInfoRequest - 1, // 99: messaging_pb.SeaweedMessaging.FindBrokerLeader:output_type -> messaging_pb.FindBrokerLeaderResponse - 5, // 100: messaging_pb.SeaweedMessaging.PublisherToPubBalancer:output_type -> messaging_pb.PublisherToPubBalancerResponse - 7, // 101: messaging_pb.SeaweedMessaging.BalanceTopics:output_type -> messaging_pb.BalanceTopicsResponse - 12, // 102: messaging_pb.SeaweedMessaging.ListTopics:output_type -> messaging_pb.ListTopicsResponse - 14, // 103: messaging_pb.SeaweedMessaging.TopicExists:output_type -> messaging_pb.TopicExistsResponse - 10, // 104: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse - 16, // 105: messaging_pb.SeaweedMessaging.LookupTopicBrokers:output_type -> messaging_pb.LookupTopicBrokersResponse - 19, // 106: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse - 21, // 107: messaging_pb.SeaweedMessaging.GetTopicPublishers:output_type -> messaging_pb.GetTopicPublishersResponse - 23, // 108: messaging_pb.SeaweedMessaging.GetTopicSubscribers:output_type -> messaging_pb.GetTopicSubscribersResponse - 27, // 109: messaging_pb.SeaweedMessaging.AssignTopicPartitions:output_type -> messaging_pb.AssignTopicPartitionsResponse - 43, // 110: messaging_pb.SeaweedMessaging.ClosePublishers:output_type -> messaging_pb.ClosePublishersResponse - 45, // 111: messaging_pb.SeaweedMessaging.CloseSubscribers:output_type -> messaging_pb.CloseSubscribersResponse - 29, // 112: messaging_pb.SeaweedMessaging.SubscriberToSubCoordinator:output_type -> messaging_pb.SubscriberToSubCoordinatorResponse - 33, // 113: messaging_pb.SeaweedMessaging.PublishMessage:output_type -> messaging_pb.PublishMessageResponse - 37, // 114: messaging_pb.SeaweedMessaging.SubscribeMessage:output_type -> messaging_pb.SubscribeMessageResponse - 35, // 115: messaging_pb.SeaweedMessaging.PublishFollowMe:output_type -> messaging_pb.PublishFollowMeResponse - 39, // 116: messaging_pb.SeaweedMessaging.SubscribeFollowMe:output_type -> messaging_pb.SubscribeFollowMeResponse - 41, // 117: messaging_pb.SeaweedMessaging.FetchMessage:output_type -> messaging_pb.FetchMessageResponse - 47, // 118: messaging_pb.SeaweedMessaging.GetUnflushedMessages:output_type -> messaging_pb.GetUnflushedMessagesResponse - 49, // 119: messaging_pb.SeaweedMessaging.GetPartitionRangeInfo:output_type -> messaging_pb.GetPartitionRangeInfoResponse - 99, // [99:120] is the sub-list for method output_type - 78, // [78:99] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name -} - -func init() { file_mq_broker_proto_init() } -func file_mq_broker_proto_init() { - if File_mq_broker_proto != nil { - return - } - file_mq_broker_proto_msgTypes[4].OneofWrappers = []any{ - (*PublisherToPubBalancerRequest_Init)(nil), - (*PublisherToPubBalancerRequest_Stats)(nil), - } - file_mq_broker_proto_msgTypes[28].OneofWrappers = []any{ - (*SubscriberToSubCoordinatorRequest_Init)(nil), - (*SubscriberToSubCoordinatorRequest_AckAssignment)(nil), - (*SubscriberToSubCoordinatorRequest_AckUnAssignment)(nil), - } - file_mq_broker_proto_msgTypes[29].OneofWrappers = []any{ - (*SubscriberToSubCoordinatorResponse_Assignment_)(nil), - (*SubscriberToSubCoordinatorResponse_UnAssignment_)(nil), - } - file_mq_broker_proto_msgTypes[32].OneofWrappers = []any{ - (*PublishMessageRequest_Init)(nil), - (*PublishMessageRequest_Data)(nil), - } - file_mq_broker_proto_msgTypes[34].OneofWrappers = []any{ - (*PublishFollowMeRequest_Init)(nil), - (*PublishFollowMeRequest_Data)(nil), - (*PublishFollowMeRequest_Flush)(nil), - (*PublishFollowMeRequest_Close)(nil), - } - file_mq_broker_proto_msgTypes[36].OneofWrappers = []any{ - (*SubscribeMessageRequest_Init)(nil), - (*SubscribeMessageRequest_Ack)(nil), - (*SubscribeMessageRequest_Seek)(nil), - } - file_mq_broker_proto_msgTypes[37].OneofWrappers = []any{ - (*SubscribeMessageResponse_Ctrl)(nil), - (*SubscribeMessageResponse_Data)(nil), - } - file_mq_broker_proto_msgTypes[38].OneofWrappers = []any{ - (*SubscribeFollowMeRequest_Init)(nil), - (*SubscribeFollowMeRequest_Ack)(nil), - (*SubscribeFollowMeRequest_Close)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_mq_broker_proto_rawDesc), len(file_mq_broker_proto_rawDesc)), - NumEnums: 0, - NumMessages: 70, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_mq_broker_proto_goTypes, - DependencyIndexes: file_mq_broker_proto_depIdxs, - MessageInfos: file_mq_broker_proto_msgTypes, - }.Build() - File_mq_broker_proto = out.File - file_mq_broker_proto_goTypes = nil - file_mq_broker_proto_depIdxs = nil -} diff --git a/weed/pb/mq_pb/mq_broker_grpc.pb.go b/weed/pb/mq_pb/mq_broker_grpc.pb.go deleted file mode 100644 index 77ff7df52..000000000 --- a/weed/pb/mq_pb/mq_broker_grpc.pb.go +++ /dev/null @@ -1,872 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: mq_broker.proto - -package mq_pb - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SeaweedMessaging_FindBrokerLeader_FullMethodName = "/messaging_pb.SeaweedMessaging/FindBrokerLeader" - SeaweedMessaging_PublisherToPubBalancer_FullMethodName = "/messaging_pb.SeaweedMessaging/PublisherToPubBalancer" - SeaweedMessaging_BalanceTopics_FullMethodName = "/messaging_pb.SeaweedMessaging/BalanceTopics" - SeaweedMessaging_ListTopics_FullMethodName = "/messaging_pb.SeaweedMessaging/ListTopics" - SeaweedMessaging_TopicExists_FullMethodName = "/messaging_pb.SeaweedMessaging/TopicExists" - SeaweedMessaging_ConfigureTopic_FullMethodName = "/messaging_pb.SeaweedMessaging/ConfigureTopic" - SeaweedMessaging_LookupTopicBrokers_FullMethodName = "/messaging_pb.SeaweedMessaging/LookupTopicBrokers" - SeaweedMessaging_GetTopicConfiguration_FullMethodName = "/messaging_pb.SeaweedMessaging/GetTopicConfiguration" - SeaweedMessaging_GetTopicPublishers_FullMethodName = "/messaging_pb.SeaweedMessaging/GetTopicPublishers" - SeaweedMessaging_GetTopicSubscribers_FullMethodName = "/messaging_pb.SeaweedMessaging/GetTopicSubscribers" - SeaweedMessaging_AssignTopicPartitions_FullMethodName = "/messaging_pb.SeaweedMessaging/AssignTopicPartitions" - SeaweedMessaging_ClosePublishers_FullMethodName = "/messaging_pb.SeaweedMessaging/ClosePublishers" - SeaweedMessaging_CloseSubscribers_FullMethodName = "/messaging_pb.SeaweedMessaging/CloseSubscribers" - SeaweedMessaging_SubscriberToSubCoordinator_FullMethodName = "/messaging_pb.SeaweedMessaging/SubscriberToSubCoordinator" - SeaweedMessaging_PublishMessage_FullMethodName = "/messaging_pb.SeaweedMessaging/PublishMessage" - SeaweedMessaging_SubscribeMessage_FullMethodName = "/messaging_pb.SeaweedMessaging/SubscribeMessage" - SeaweedMessaging_PublishFollowMe_FullMethodName = "/messaging_pb.SeaweedMessaging/PublishFollowMe" - SeaweedMessaging_SubscribeFollowMe_FullMethodName = "/messaging_pb.SeaweedMessaging/SubscribeFollowMe" - SeaweedMessaging_FetchMessage_FullMethodName = "/messaging_pb.SeaweedMessaging/FetchMessage" - SeaweedMessaging_GetUnflushedMessages_FullMethodName = "/messaging_pb.SeaweedMessaging/GetUnflushedMessages" - SeaweedMessaging_GetPartitionRangeInfo_FullMethodName = "/messaging_pb.SeaweedMessaging/GetPartitionRangeInfo" -) - -// SeaweedMessagingClient is the client API for SeaweedMessaging service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SeaweedMessagingClient interface { - // control plane - FindBrokerLeader(ctx context.Context, in *FindBrokerLeaderRequest, opts ...grpc.CallOption) (*FindBrokerLeaderResponse, error) - // control plane for balancer - PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse], error) - BalanceTopics(ctx context.Context, in *BalanceTopicsRequest, opts ...grpc.CallOption) (*BalanceTopicsResponse, error) - // control plane for topic partitions - ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) - TopicExists(ctx context.Context, in *TopicExistsRequest, opts ...grpc.CallOption) (*TopicExistsResponse, error) - ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) - LookupTopicBrokers(ctx context.Context, in *LookupTopicBrokersRequest, opts ...grpc.CallOption) (*LookupTopicBrokersResponse, error) - GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) - GetTopicPublishers(ctx context.Context, in *GetTopicPublishersRequest, opts ...grpc.CallOption) (*GetTopicPublishersResponse, error) - GetTopicSubscribers(ctx context.Context, in *GetTopicSubscribersRequest, opts ...grpc.CallOption) (*GetTopicSubscribersResponse, error) - // invoked by the balancer, running on each broker - AssignTopicPartitions(ctx context.Context, in *AssignTopicPartitionsRequest, opts ...grpc.CallOption) (*AssignTopicPartitionsResponse, error) - ClosePublishers(ctx context.Context, in *ClosePublishersRequest, opts ...grpc.CallOption) (*ClosePublishersResponse, error) - CloseSubscribers(ctx context.Context, in *CloseSubscribersRequest, opts ...grpc.CallOption) (*CloseSubscribersResponse, error) - // subscriber connects to broker balancer, which coordinates with the subscribers - SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse], error) - // data plane for each topic partition - PublishMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse], error) - SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse], error) - // The lead broker asks a follower broker to follow itself - PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse], error) - SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse], error) - // Stateless fetch API (Kafka-style) - request/response pattern - // This is the recommended API for Kafka gateway and other stateless clients - // No streaming, no session state - each request is completely independent - FetchMessage(ctx context.Context, in *FetchMessageRequest, opts ...grpc.CallOption) (*FetchMessageResponse, error) - // SQL query support - get unflushed messages from broker's in-memory buffer (streaming) - GetUnflushedMessages(ctx context.Context, in *GetUnflushedMessagesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetUnflushedMessagesResponse], error) - // Get comprehensive partition range information (offsets, timestamps, and other fields) - GetPartitionRangeInfo(ctx context.Context, in *GetPartitionRangeInfoRequest, opts ...grpc.CallOption) (*GetPartitionRangeInfoResponse, error) -} - -type seaweedMessagingClient struct { - cc grpc.ClientConnInterface -} - -func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient { - return &seaweedMessagingClient{cc} -} - -func (c *seaweedMessagingClient) FindBrokerLeader(ctx context.Context, in *FindBrokerLeaderRequest, opts ...grpc.CallOption) (*FindBrokerLeaderResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(FindBrokerLeaderResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_FindBrokerLeader_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) PublisherToPubBalancer(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[0], SeaweedMessaging_PublisherToPubBalancer_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublisherToPubBalancerClient = grpc.BidiStreamingClient[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse] - -func (c *seaweedMessagingClient) BalanceTopics(ctx context.Context, in *BalanceTopicsRequest, opts ...grpc.CallOption) (*BalanceTopicsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(BalanceTopicsResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_BalanceTopics_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ListTopicsResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_ListTopics_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) TopicExists(ctx context.Context, in *TopicExistsRequest, opts ...grpc.CallOption) (*TopicExistsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(TopicExistsResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_TopicExists_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ConfigureTopicResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_ConfigureTopic_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) LookupTopicBrokers(ctx context.Context, in *LookupTopicBrokersRequest, opts ...grpc.CallOption) (*LookupTopicBrokersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(LookupTopicBrokersResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_LookupTopicBrokers_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetTopicConfigurationResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_GetTopicConfiguration_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) GetTopicPublishers(ctx context.Context, in *GetTopicPublishersRequest, opts ...grpc.CallOption) (*GetTopicPublishersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetTopicPublishersResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_GetTopicPublishers_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) GetTopicSubscribers(ctx context.Context, in *GetTopicSubscribersRequest, opts ...grpc.CallOption) (*GetTopicSubscribersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetTopicSubscribersResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_GetTopicSubscribers_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) AssignTopicPartitions(ctx context.Context, in *AssignTopicPartitionsRequest, opts ...grpc.CallOption) (*AssignTopicPartitionsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(AssignTopicPartitionsResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_AssignTopicPartitions_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) ClosePublishers(ctx context.Context, in *ClosePublishersRequest, opts ...grpc.CallOption) (*ClosePublishersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ClosePublishersResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_ClosePublishers_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) CloseSubscribers(ctx context.Context, in *CloseSubscribersRequest, opts ...grpc.CallOption) (*CloseSubscribersResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CloseSubscribersResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_CloseSubscribers_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) SubscriberToSubCoordinator(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[1], SeaweedMessaging_SubscriberToSubCoordinator_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscriberToSubCoordinatorClient = grpc.BidiStreamingClient[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse] - -func (c *seaweedMessagingClient) PublishMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[2], SeaweedMessaging_PublishMessage_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[PublishMessageRequest, PublishMessageResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublishMessageClient = grpc.BidiStreamingClient[PublishMessageRequest, PublishMessageResponse] - -func (c *seaweedMessagingClient) SubscribeMessage(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[3], SeaweedMessaging_SubscribeMessage_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[SubscribeMessageRequest, SubscribeMessageResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscribeMessageClient = grpc.BidiStreamingClient[SubscribeMessageRequest, SubscribeMessageResponse] - -func (c *seaweedMessagingClient) PublishFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[4], SeaweedMessaging_PublishFollowMe_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[PublishFollowMeRequest, PublishFollowMeResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublishFollowMeClient = grpc.BidiStreamingClient[PublishFollowMeRequest, PublishFollowMeResponse] - -func (c *seaweedMessagingClient) SubscribeFollowMe(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[5], SeaweedMessaging_SubscribeFollowMe_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[SubscribeFollowMeRequest, SubscribeFollowMeResponse]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscribeFollowMeClient = grpc.ClientStreamingClient[SubscribeFollowMeRequest, SubscribeFollowMeResponse] - -func (c *seaweedMessagingClient) FetchMessage(ctx context.Context, in *FetchMessageRequest, opts ...grpc.CallOption) (*FetchMessageResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(FetchMessageResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_FetchMessage_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seaweedMessagingClient) GetUnflushedMessages(ctx context.Context, in *GetUnflushedMessagesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetUnflushedMessagesResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SeaweedMessaging_ServiceDesc.Streams[6], SeaweedMessaging_GetUnflushedMessages_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[GetUnflushedMessagesRequest, GetUnflushedMessagesResponse]{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_GetUnflushedMessagesClient = grpc.ServerStreamingClient[GetUnflushedMessagesResponse] - -func (c *seaweedMessagingClient) GetPartitionRangeInfo(ctx context.Context, in *GetPartitionRangeInfoRequest, opts ...grpc.CallOption) (*GetPartitionRangeInfoResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPartitionRangeInfoResponse) - err := c.cc.Invoke(ctx, SeaweedMessaging_GetPartitionRangeInfo_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SeaweedMessagingServer is the server API for SeaweedMessaging service. -// All implementations must embed UnimplementedSeaweedMessagingServer -// for forward compatibility. -type SeaweedMessagingServer interface { - // control plane - FindBrokerLeader(context.Context, *FindBrokerLeaderRequest) (*FindBrokerLeaderResponse, error) - // control plane for balancer - PublisherToPubBalancer(grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]) error - BalanceTopics(context.Context, *BalanceTopicsRequest) (*BalanceTopicsResponse, error) - // control plane for topic partitions - ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) - TopicExists(context.Context, *TopicExistsRequest) (*TopicExistsResponse, error) - ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) - LookupTopicBrokers(context.Context, *LookupTopicBrokersRequest) (*LookupTopicBrokersResponse, error) - GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) - GetTopicPublishers(context.Context, *GetTopicPublishersRequest) (*GetTopicPublishersResponse, error) - GetTopicSubscribers(context.Context, *GetTopicSubscribersRequest) (*GetTopicSubscribersResponse, error) - // invoked by the balancer, running on each broker - AssignTopicPartitions(context.Context, *AssignTopicPartitionsRequest) (*AssignTopicPartitionsResponse, error) - ClosePublishers(context.Context, *ClosePublishersRequest) (*ClosePublishersResponse, error) - CloseSubscribers(context.Context, *CloseSubscribersRequest) (*CloseSubscribersResponse, error) - // subscriber connects to broker balancer, which coordinates with the subscribers - SubscriberToSubCoordinator(grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]) error - // data plane for each topic partition - PublishMessage(grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse]) error - SubscribeMessage(grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse]) error - // The lead broker asks a follower broker to follow itself - PublishFollowMe(grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse]) error - SubscribeFollowMe(grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse]) error - // Stateless fetch API (Kafka-style) - request/response pattern - // This is the recommended API for Kafka gateway and other stateless clients - // No streaming, no session state - each request is completely independent - FetchMessage(context.Context, *FetchMessageRequest) (*FetchMessageResponse, error) - // SQL query support - get unflushed messages from broker's in-memory buffer (streaming) - GetUnflushedMessages(*GetUnflushedMessagesRequest, grpc.ServerStreamingServer[GetUnflushedMessagesResponse]) error - // Get comprehensive partition range information (offsets, timestamps, and other fields) - GetPartitionRangeInfo(context.Context, *GetPartitionRangeInfoRequest) (*GetPartitionRangeInfoResponse, error) - mustEmbedUnimplementedSeaweedMessagingServer() -} - -// UnimplementedSeaweedMessagingServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedMessagingServer struct{} - -func (UnimplementedSeaweedMessagingServer) FindBrokerLeader(context.Context, *FindBrokerLeaderRequest) (*FindBrokerLeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FindBrokerLeader not implemented") -} -func (UnimplementedSeaweedMessagingServer) PublisherToPubBalancer(grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]) error { - return status.Errorf(codes.Unimplemented, "method PublisherToPubBalancer not implemented") -} -func (UnimplementedSeaweedMessagingServer) BalanceTopics(context.Context, *BalanceTopicsRequest) (*BalanceTopicsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method BalanceTopics not implemented") -} -func (UnimplementedSeaweedMessagingServer) ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListTopics not implemented") -} -func (UnimplementedSeaweedMessagingServer) TopicExists(context.Context, *TopicExistsRequest) (*TopicExistsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TopicExists not implemented") -} -func (UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented") -} -func (UnimplementedSeaweedMessagingServer) LookupTopicBrokers(context.Context, *LookupTopicBrokersRequest) (*LookupTopicBrokersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LookupTopicBrokers not implemented") -} -func (UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented") -} -func (UnimplementedSeaweedMessagingServer) GetTopicPublishers(context.Context, *GetTopicPublishersRequest) (*GetTopicPublishersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTopicPublishers not implemented") -} -func (UnimplementedSeaweedMessagingServer) GetTopicSubscribers(context.Context, *GetTopicSubscribersRequest) (*GetTopicSubscribersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTopicSubscribers not implemented") -} -func (UnimplementedSeaweedMessagingServer) AssignTopicPartitions(context.Context, *AssignTopicPartitionsRequest) (*AssignTopicPartitionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AssignTopicPartitions not implemented") -} -func (UnimplementedSeaweedMessagingServer) ClosePublishers(context.Context, *ClosePublishersRequest) (*ClosePublishersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ClosePublishers not implemented") -} -func (UnimplementedSeaweedMessagingServer) CloseSubscribers(context.Context, *CloseSubscribersRequest) (*CloseSubscribersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseSubscribers not implemented") -} -func (UnimplementedSeaweedMessagingServer) SubscriberToSubCoordinator(grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]) error { - return status.Errorf(codes.Unimplemented, "method SubscriberToSubCoordinator not implemented") -} -func (UnimplementedSeaweedMessagingServer) PublishMessage(grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse]) error { - return status.Errorf(codes.Unimplemented, "method PublishMessage not implemented") -} -func (UnimplementedSeaweedMessagingServer) SubscribeMessage(grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse]) error { - return status.Errorf(codes.Unimplemented, "method SubscribeMessage not implemented") -} -func (UnimplementedSeaweedMessagingServer) PublishFollowMe(grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse]) error { - return status.Errorf(codes.Unimplemented, "method PublishFollowMe not implemented") -} -func (UnimplementedSeaweedMessagingServer) SubscribeFollowMe(grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse]) error { - return status.Errorf(codes.Unimplemented, "method SubscribeFollowMe not implemented") -} -func (UnimplementedSeaweedMessagingServer) FetchMessage(context.Context, *FetchMessageRequest) (*FetchMessageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FetchMessage not implemented") -} -func (UnimplementedSeaweedMessagingServer) GetUnflushedMessages(*GetUnflushedMessagesRequest, grpc.ServerStreamingServer[GetUnflushedMessagesResponse]) error { - return status.Errorf(codes.Unimplemented, "method GetUnflushedMessages not implemented") -} -func (UnimplementedSeaweedMessagingServer) GetPartitionRangeInfo(context.Context, *GetPartitionRangeInfoRequest) (*GetPartitionRangeInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPartitionRangeInfo not implemented") -} -func (UnimplementedSeaweedMessagingServer) mustEmbedUnimplementedSeaweedMessagingServer() {} -func (UnimplementedSeaweedMessagingServer) testEmbeddedByValue() {} - -// UnsafeSeaweedMessagingServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SeaweedMessagingServer will -// result in compilation errors. -type UnsafeSeaweedMessagingServer interface { - mustEmbedUnimplementedSeaweedMessagingServer() -} - -func RegisterSeaweedMessagingServer(s grpc.ServiceRegistrar, srv SeaweedMessagingServer) { - // If the following call pancis, it indicates UnimplementedSeaweedMessagingServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SeaweedMessaging_ServiceDesc, srv) -} - -func _SeaweedMessaging_FindBrokerLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FindBrokerLeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).FindBrokerLeader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_FindBrokerLeader_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).FindBrokerLeader(ctx, req.(*FindBrokerLeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_PublisherToPubBalancer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).PublisherToPubBalancer(&grpc.GenericServerStream[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublisherToPubBalancerServer = grpc.BidiStreamingServer[PublisherToPubBalancerRequest, PublisherToPubBalancerResponse] - -func _SeaweedMessaging_BalanceTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BalanceTopicsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).BalanceTopics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_BalanceTopics_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).BalanceTopics(ctx, req.(*BalanceTopicsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTopicsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).ListTopics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_ListTopics_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).ListTopics(ctx, req.(*ListTopicsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_TopicExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TopicExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).TopicExists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_TopicExists_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).TopicExists(ctx, req.(*TopicExistsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConfigureTopicRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_ConfigureTopic_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_LookupTopicBrokers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LookupTopicBrokersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).LookupTopicBrokers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_LookupTopicBrokers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).LookupTopicBrokers(ctx, req.(*LookupTopicBrokersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTopicConfigurationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_GetTopicConfiguration_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_GetTopicPublishers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTopicPublishersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).GetTopicPublishers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_GetTopicPublishers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).GetTopicPublishers(ctx, req.(*GetTopicPublishersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_GetTopicSubscribers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTopicSubscribersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).GetTopicSubscribers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_GetTopicSubscribers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).GetTopicSubscribers(ctx, req.(*GetTopicSubscribersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_AssignTopicPartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AssignTopicPartitionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).AssignTopicPartitions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_AssignTopicPartitions_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).AssignTopicPartitions(ctx, req.(*AssignTopicPartitionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_ClosePublishers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ClosePublishersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).ClosePublishers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_ClosePublishers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).ClosePublishers(ctx, req.(*ClosePublishersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_CloseSubscribers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CloseSubscribersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).CloseSubscribers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_CloseSubscribers_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).CloseSubscribers(ctx, req.(*CloseSubscribersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_SubscriberToSubCoordinator_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).SubscriberToSubCoordinator(&grpc.GenericServerStream[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscriberToSubCoordinatorServer = grpc.BidiStreamingServer[SubscriberToSubCoordinatorRequest, SubscriberToSubCoordinatorResponse] - -func _SeaweedMessaging_PublishMessage_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).PublishMessage(&grpc.GenericServerStream[PublishMessageRequest, PublishMessageResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublishMessageServer = grpc.BidiStreamingServer[PublishMessageRequest, PublishMessageResponse] - -func _SeaweedMessaging_SubscribeMessage_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).SubscribeMessage(&grpc.GenericServerStream[SubscribeMessageRequest, SubscribeMessageResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscribeMessageServer = grpc.BidiStreamingServer[SubscribeMessageRequest, SubscribeMessageResponse] - -func _SeaweedMessaging_PublishFollowMe_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).PublishFollowMe(&grpc.GenericServerStream[PublishFollowMeRequest, PublishFollowMeResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_PublishFollowMeServer = grpc.BidiStreamingServer[PublishFollowMeRequest, PublishFollowMeResponse] - -func _SeaweedMessaging_SubscribeFollowMe_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedMessagingServer).SubscribeFollowMe(&grpc.GenericServerStream[SubscribeFollowMeRequest, SubscribeFollowMeResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_SubscribeFollowMeServer = grpc.ClientStreamingServer[SubscribeFollowMeRequest, SubscribeFollowMeResponse] - -func _SeaweedMessaging_FetchMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FetchMessageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).FetchMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_FetchMessage_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).FetchMessage(ctx, req.(*FetchMessageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SeaweedMessaging_GetUnflushedMessages_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetUnflushedMessagesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SeaweedMessagingServer).GetUnflushedMessages(m, &grpc.GenericServerStream[GetUnflushedMessagesRequest, GetUnflushedMessagesResponse]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type SeaweedMessaging_GetUnflushedMessagesServer = grpc.ServerStreamingServer[GetUnflushedMessagesResponse] - -func _SeaweedMessaging_GetPartitionRangeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPartitionRangeInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedMessagingServer).GetPartitionRangeInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SeaweedMessaging_GetPartitionRangeInfo_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedMessagingServer).GetPartitionRangeInfo(ctx, req.(*GetPartitionRangeInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// SeaweedMessaging_ServiceDesc is the grpc.ServiceDesc for SeaweedMessaging service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SeaweedMessaging_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "messaging_pb.SeaweedMessaging", - HandlerType: (*SeaweedMessagingServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "FindBrokerLeader", - Handler: _SeaweedMessaging_FindBrokerLeader_Handler, - }, - { - MethodName: "BalanceTopics", - Handler: _SeaweedMessaging_BalanceTopics_Handler, - }, - { - MethodName: "ListTopics", - Handler: _SeaweedMessaging_ListTopics_Handler, - }, - { - MethodName: "TopicExists", - Handler: _SeaweedMessaging_TopicExists_Handler, - }, - { - MethodName: "ConfigureTopic", - Handler: _SeaweedMessaging_ConfigureTopic_Handler, - }, - { - MethodName: "LookupTopicBrokers", - Handler: _SeaweedMessaging_LookupTopicBrokers_Handler, - }, - { - MethodName: "GetTopicConfiguration", - Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, - }, - { - MethodName: "GetTopicPublishers", - Handler: _SeaweedMessaging_GetTopicPublishers_Handler, - }, - { - MethodName: "GetTopicSubscribers", - Handler: _SeaweedMessaging_GetTopicSubscribers_Handler, - }, - { - MethodName: "AssignTopicPartitions", - Handler: _SeaweedMessaging_AssignTopicPartitions_Handler, - }, - { - MethodName: "ClosePublishers", - Handler: _SeaweedMessaging_ClosePublishers_Handler, - }, - { - MethodName: "CloseSubscribers", - Handler: _SeaweedMessaging_CloseSubscribers_Handler, - }, - { - MethodName: "FetchMessage", - Handler: _SeaweedMessaging_FetchMessage_Handler, - }, - { - MethodName: "GetPartitionRangeInfo", - Handler: _SeaweedMessaging_GetPartitionRangeInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "PublisherToPubBalancer", - Handler: _SeaweedMessaging_PublisherToPubBalancer_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SubscriberToSubCoordinator", - Handler: _SeaweedMessaging_SubscriberToSubCoordinator_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "PublishMessage", - Handler: _SeaweedMessaging_PublishMessage_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SubscribeMessage", - Handler: _SeaweedMessaging_SubscribeMessage_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "PublishFollowMe", - Handler: _SeaweedMessaging_PublishFollowMe_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "SubscribeFollowMe", - Handler: _SeaweedMessaging_SubscribeFollowMe_Handler, - ClientStreams: true, - }, - { - StreamName: "GetUnflushedMessages", - Handler: _SeaweedMessaging_GetUnflushedMessages_Handler, - ServerStreams: true, - }, - }, - Metadata: "mq_broker.proto", -} diff --git a/weed/pb/mq_schema.proto b/weed/pb/mq_schema.proto deleted file mode 100644 index 81b523bcd..000000000 --- a/weed/pb/mq_schema.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; - -package schema_pb; - -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"; - -/////////////////////////// -// Topic definition -/////////////////////////// - -message Topic { - string namespace = 1; - string name = 2; -} -message Partition { - int32 ring_size = 1; - int32 range_start = 2; - int32 range_stop = 3; - int64 unix_time_ns = 4; -} - -message Offset { - Topic topic = 1; - repeated PartitionOffset partition_offsets = 2; -} - -enum OffsetType { - RESUME_OR_EARLIEST = 0; - RESET_TO_EARLIEST = 5; - EXACT_TS_NS = 10; - RESET_TO_LATEST = 15; - RESUME_OR_LATEST = 20; - // Offset-based positioning - EXACT_OFFSET = 25; - RESET_TO_OFFSET = 30; -} - -message PartitionOffset { - Partition partition = 1; - int64 start_ts_ns = 2; - int64 start_offset = 3; // For offset-based positioning -} - -/////////////////////////// -// schema definition -/////////////////////////// - -message RecordType { - repeated Field fields = 1; -} - -message Field { - string name = 1; - int32 field_index = 2; - Type type = 3; - bool is_repeated = 4; - bool is_required = 5; -} - -message Type { - oneof kind { - ScalarType scalar_type = 1; - RecordType record_type = 2; - ListType list_type = 3; - } -} - -enum ScalarType { - BOOL = 0; - INT32 = 1; - INT64 = 3; - FLOAT = 4; - DOUBLE = 5; - BYTES = 6; - STRING = 7; - // Parquet logical types for analytics - TIMESTAMP = 8; // UTC timestamp (microseconds since epoch) - DATE = 9; // Date (days since epoch) - DECIMAL = 10; // Arbitrary precision decimal - TIME = 11; // Time of day (microseconds) -} - -message ListType { - Type element_type = 1; -} - -/////////////////////////// -// value definition -/////////////////////////// -message RecordValue { - map fields = 1; -} -message Value { - oneof kind { - bool bool_value = 1; - int32 int32_value = 2; - int64 int64_value = 3; - float float_value = 4; - double double_value = 5; - bytes bytes_value = 6; - string string_value = 7; - // Parquet logical type values - TimestampValue timestamp_value = 8; - DateValue date_value = 9; - DecimalValue decimal_value = 10; - TimeValue time_value = 11; - // Complex types - ListValue list_value = 14; - RecordValue record_value = 15; - } -} -// Parquet logical type value messages -message TimestampValue { - int64 timestamp_micros = 1; // Microseconds since Unix epoch (UTC) - bool is_utc = 2; // True if UTC, false if local time -} - -message DateValue { - int32 days_since_epoch = 1; // Days since Unix epoch (1970-01-01) -} - -message DecimalValue { - bytes value = 1; // Arbitrary precision decimal as bytes - int32 precision = 2; // Total number of digits - int32 scale = 3; // Number of digits after decimal point -} - -message TimeValue { - int64 time_micros = 1; // Microseconds since midnight -} - -message ListValue { - repeated Value values = 1; -} diff --git a/weed/pb/proto_read_write_test.go b/weed/pb/proto_read_write_test.go index 06dc136b0..d6b2faaef 100644 --- a/weed/pb/proto_read_write_test.go +++ b/weed/pb/proto_read_write_test.go @@ -4,8 +4,8 @@ import ( "fmt" "testing" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - jsonpb "google.golang.org/protobuf/encoding/protojson" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/golang/protobuf/jsonpb" ) func TestJsonpMarshalUnmarshal(t *testing.T) { @@ -16,15 +16,15 @@ func TestJsonpMarshalUnmarshal(t *testing.T) { FileSize: 12, } - m := jsonpb.MarshalOptions{ - EmitUnpopulated: true, - Indent: " ", + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", } - if text, err := m.Marshal(tv); err != nil { + if text, err := m.MarshalToString(tv); err != nil { fmt.Printf("marshal eror: %v\n", err) } else { - fmt.Printf("marshalled: %s\n", string(text)) + fmt.Printf("marshalled: %s\n", text) } rawJson := `{ @@ -34,7 +34,7 @@ func TestJsonpMarshalUnmarshal(t *testing.T) { }` tv1 := &volume_server_pb.RemoteFile{} - if err := jsonpb.Unmarshal([]byte(rawJson), tv1); err != nil { + if err := jsonpb.UnmarshalString(rawJson, tv1); err != nil { fmt.Printf("unmarshal error: %v\n", err) } diff --git a/weed/pb/remote.proto b/weed/pb/remote.proto index 9d6d81ff5..13f7a878b 100644 --- a/weed/pb/remote.proto +++ b/weed/pb/remote.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package remote_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/remote_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -18,7 +18,6 @@ message RemoteConf { string s3_endpoint = 7; string s3_storage_class = 8; bool s3_force_path_style = 9; - bool s3_support_tagging = 13; bool s3_v4_signature = 11; string gcs_google_application_credentials = 10; @@ -30,7 +29,6 @@ message RemoteConf { string backblaze_key_id = 20; string backblaze_application_key = 21; string backblaze_endpoint = 22; - string backblaze_region = 23; string aliyun_access_key = 25; string aliyun_secret_key = 26; diff --git a/weed/pb/remote_pb/remote.pb.go b/weed/pb/remote_pb/remote.pb.go index 8ca391d3d..f746333ff 100644 --- a/weed/pb/remote_pb/remote.pb.go +++ b/weed/pb/remote_pb/remote.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: remote.proto package remote_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -21,63 +20,68 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// /////////////////////// +///////////////////////// // Remote Storage related -// /////////////////////// +///////////////////////// type RemoteConf struct { - state protoimpl.MessageState `protogen:"open.v1"` - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"` - S3SecretKey string `protobuf:"bytes,5,opt,name=s3_secret_key,json=s3SecretKey,proto3" json:"s3_secret_key,omitempty"` - S3Region string `protobuf:"bytes,6,opt,name=s3_region,json=s3Region,proto3" json:"s3_region,omitempty"` - S3Endpoint string `protobuf:"bytes,7,opt,name=s3_endpoint,json=s3Endpoint,proto3" json:"s3_endpoint,omitempty"` - S3StorageClass string `protobuf:"bytes,8,opt,name=s3_storage_class,json=s3StorageClass,proto3" json:"s3_storage_class,omitempty"` - S3ForcePathStyle bool `protobuf:"varint,9,opt,name=s3_force_path_style,json=s3ForcePathStyle,proto3" json:"s3_force_path_style,omitempty"` - S3SupportTagging bool `protobuf:"varint,13,opt,name=s3_support_tagging,json=s3SupportTagging,proto3" json:"s3_support_tagging,omitempty"` - S3V4Signature bool `protobuf:"varint,11,opt,name=s3_v4_signature,json=s3V4Signature,proto3" json:"s3_v4_signature,omitempty"` - GcsGoogleApplicationCredentials string `protobuf:"bytes,10,opt,name=gcs_google_application_credentials,json=gcsGoogleApplicationCredentials,proto3" json:"gcs_google_application_credentials,omitempty"` - GcsProjectId string `protobuf:"bytes,12,opt,name=gcs_project_id,json=gcsProjectId,proto3" json:"gcs_project_id,omitempty"` - AzureAccountName string `protobuf:"bytes,15,opt,name=azure_account_name,json=azureAccountName,proto3" json:"azure_account_name,omitempty"` - AzureAccountKey string `protobuf:"bytes,16,opt,name=azure_account_key,json=azureAccountKey,proto3" json:"azure_account_key,omitempty"` - BackblazeKeyId string `protobuf:"bytes,20,opt,name=backblaze_key_id,json=backblazeKeyId,proto3" json:"backblaze_key_id,omitempty"` - BackblazeApplicationKey string `protobuf:"bytes,21,opt,name=backblaze_application_key,json=backblazeApplicationKey,proto3" json:"backblaze_application_key,omitempty"` - BackblazeEndpoint string `protobuf:"bytes,22,opt,name=backblaze_endpoint,json=backblazeEndpoint,proto3" json:"backblaze_endpoint,omitempty"` - BackblazeRegion string `protobuf:"bytes,23,opt,name=backblaze_region,json=backblazeRegion,proto3" json:"backblaze_region,omitempty"` - AliyunAccessKey string `protobuf:"bytes,25,opt,name=aliyun_access_key,json=aliyunAccessKey,proto3" json:"aliyun_access_key,omitempty"` - AliyunSecretKey string `protobuf:"bytes,26,opt,name=aliyun_secret_key,json=aliyunSecretKey,proto3" json:"aliyun_secret_key,omitempty"` - AliyunEndpoint string `protobuf:"bytes,27,opt,name=aliyun_endpoint,json=aliyunEndpoint,proto3" json:"aliyun_endpoint,omitempty"` - AliyunRegion string `protobuf:"bytes,28,opt,name=aliyun_region,json=aliyunRegion,proto3" json:"aliyun_region,omitempty"` - TencentSecretId string `protobuf:"bytes,30,opt,name=tencent_secret_id,json=tencentSecretId,proto3" json:"tencent_secret_id,omitempty"` - TencentSecretKey string `protobuf:"bytes,31,opt,name=tencent_secret_key,json=tencentSecretKey,proto3" json:"tencent_secret_key,omitempty"` - TencentEndpoint string `protobuf:"bytes,32,opt,name=tencent_endpoint,json=tencentEndpoint,proto3" json:"tencent_endpoint,omitempty"` - BaiduAccessKey string `protobuf:"bytes,35,opt,name=baidu_access_key,json=baiduAccessKey,proto3" json:"baidu_access_key,omitempty"` - BaiduSecretKey string `protobuf:"bytes,36,opt,name=baidu_secret_key,json=baiduSecretKey,proto3" json:"baidu_secret_key,omitempty"` - BaiduEndpoint string `protobuf:"bytes,37,opt,name=baidu_endpoint,json=baiduEndpoint,proto3" json:"baidu_endpoint,omitempty"` - BaiduRegion string `protobuf:"bytes,38,opt,name=baidu_region,json=baiduRegion,proto3" json:"baidu_region,omitempty"` - WasabiAccessKey string `protobuf:"bytes,40,opt,name=wasabi_access_key,json=wasabiAccessKey,proto3" json:"wasabi_access_key,omitempty"` - WasabiSecretKey string `protobuf:"bytes,41,opt,name=wasabi_secret_key,json=wasabiSecretKey,proto3" json:"wasabi_secret_key,omitempty"` - WasabiEndpoint string `protobuf:"bytes,42,opt,name=wasabi_endpoint,json=wasabiEndpoint,proto3" json:"wasabi_endpoint,omitempty"` - WasabiRegion string `protobuf:"bytes,43,opt,name=wasabi_region,json=wasabiRegion,proto3" json:"wasabi_region,omitempty"` - FilebaseAccessKey string `protobuf:"bytes,60,opt,name=filebase_access_key,json=filebaseAccessKey,proto3" json:"filebase_access_key,omitempty"` - FilebaseSecretKey string `protobuf:"bytes,61,opt,name=filebase_secret_key,json=filebaseSecretKey,proto3" json:"filebase_secret_key,omitempty"` - FilebaseEndpoint string `protobuf:"bytes,62,opt,name=filebase_endpoint,json=filebaseEndpoint,proto3" json:"filebase_endpoint,omitempty"` - StorjAccessKey string `protobuf:"bytes,65,opt,name=storj_access_key,json=storjAccessKey,proto3" json:"storj_access_key,omitempty"` - StorjSecretKey string `protobuf:"bytes,66,opt,name=storj_secret_key,json=storjSecretKey,proto3" json:"storj_secret_key,omitempty"` - StorjEndpoint string `protobuf:"bytes,67,opt,name=storj_endpoint,json=storjEndpoint,proto3" json:"storj_endpoint,omitempty"` - ContaboAccessKey string `protobuf:"bytes,68,opt,name=contabo_access_key,json=contaboAccessKey,proto3" json:"contabo_access_key,omitempty"` - ContaboSecretKey string `protobuf:"bytes,69,opt,name=contabo_secret_key,json=contaboSecretKey,proto3" json:"contabo_secret_key,omitempty"` - ContaboEndpoint string `protobuf:"bytes,70,opt,name=contabo_endpoint,json=contaboEndpoint,proto3" json:"contabo_endpoint,omitempty"` - ContaboRegion string `protobuf:"bytes,71,opt,name=contabo_region,json=contaboRegion,proto3" json:"contabo_region,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"` + S3SecretKey string `protobuf:"bytes,5,opt,name=s3_secret_key,json=s3SecretKey,proto3" json:"s3_secret_key,omitempty"` + S3Region string `protobuf:"bytes,6,opt,name=s3_region,json=s3Region,proto3" json:"s3_region,omitempty"` + S3Endpoint string `protobuf:"bytes,7,opt,name=s3_endpoint,json=s3Endpoint,proto3" json:"s3_endpoint,omitempty"` + S3StorageClass string `protobuf:"bytes,8,opt,name=s3_storage_class,json=s3StorageClass,proto3" json:"s3_storage_class,omitempty"` + S3ForcePathStyle bool `protobuf:"varint,9,opt,name=s3_force_path_style,json=s3ForcePathStyle,proto3" json:"s3_force_path_style,omitempty"` + S3V4Signature bool `protobuf:"varint,11,opt,name=s3_v4_signature,json=s3V4Signature,proto3" json:"s3_v4_signature,omitempty"` + GcsGoogleApplicationCredentials string `protobuf:"bytes,10,opt,name=gcs_google_application_credentials,json=gcsGoogleApplicationCredentials,proto3" json:"gcs_google_application_credentials,omitempty"` + GcsProjectId string `protobuf:"bytes,12,opt,name=gcs_project_id,json=gcsProjectId,proto3" json:"gcs_project_id,omitempty"` + AzureAccountName string `protobuf:"bytes,15,opt,name=azure_account_name,json=azureAccountName,proto3" json:"azure_account_name,omitempty"` + AzureAccountKey string `protobuf:"bytes,16,opt,name=azure_account_key,json=azureAccountKey,proto3" json:"azure_account_key,omitempty"` + BackblazeKeyId string `protobuf:"bytes,20,opt,name=backblaze_key_id,json=backblazeKeyId,proto3" json:"backblaze_key_id,omitempty"` + BackblazeApplicationKey string `protobuf:"bytes,21,opt,name=backblaze_application_key,json=backblazeApplicationKey,proto3" json:"backblaze_application_key,omitempty"` + BackblazeEndpoint string `protobuf:"bytes,22,opt,name=backblaze_endpoint,json=backblazeEndpoint,proto3" json:"backblaze_endpoint,omitempty"` + AliyunAccessKey string `protobuf:"bytes,25,opt,name=aliyun_access_key,json=aliyunAccessKey,proto3" json:"aliyun_access_key,omitempty"` + AliyunSecretKey string `protobuf:"bytes,26,opt,name=aliyun_secret_key,json=aliyunSecretKey,proto3" json:"aliyun_secret_key,omitempty"` + AliyunEndpoint string `protobuf:"bytes,27,opt,name=aliyun_endpoint,json=aliyunEndpoint,proto3" json:"aliyun_endpoint,omitempty"` + AliyunRegion string `protobuf:"bytes,28,opt,name=aliyun_region,json=aliyunRegion,proto3" json:"aliyun_region,omitempty"` + TencentSecretId string `protobuf:"bytes,30,opt,name=tencent_secret_id,json=tencentSecretId,proto3" json:"tencent_secret_id,omitempty"` + TencentSecretKey string `protobuf:"bytes,31,opt,name=tencent_secret_key,json=tencentSecretKey,proto3" json:"tencent_secret_key,omitempty"` + TencentEndpoint string `protobuf:"bytes,32,opt,name=tencent_endpoint,json=tencentEndpoint,proto3" json:"tencent_endpoint,omitempty"` + BaiduAccessKey string `protobuf:"bytes,35,opt,name=baidu_access_key,json=baiduAccessKey,proto3" json:"baidu_access_key,omitempty"` + BaiduSecretKey string `protobuf:"bytes,36,opt,name=baidu_secret_key,json=baiduSecretKey,proto3" json:"baidu_secret_key,omitempty"` + BaiduEndpoint string `protobuf:"bytes,37,opt,name=baidu_endpoint,json=baiduEndpoint,proto3" json:"baidu_endpoint,omitempty"` + BaiduRegion string `protobuf:"bytes,38,opt,name=baidu_region,json=baiduRegion,proto3" json:"baidu_region,omitempty"` + WasabiAccessKey string `protobuf:"bytes,40,opt,name=wasabi_access_key,json=wasabiAccessKey,proto3" json:"wasabi_access_key,omitempty"` + WasabiSecretKey string `protobuf:"bytes,41,opt,name=wasabi_secret_key,json=wasabiSecretKey,proto3" json:"wasabi_secret_key,omitempty"` + WasabiEndpoint string `protobuf:"bytes,42,opt,name=wasabi_endpoint,json=wasabiEndpoint,proto3" json:"wasabi_endpoint,omitempty"` + WasabiRegion string `protobuf:"bytes,43,opt,name=wasabi_region,json=wasabiRegion,proto3" json:"wasabi_region,omitempty"` + HdfsNamenodes []string `protobuf:"bytes,50,rep,name=hdfs_namenodes,json=hdfsNamenodes,proto3" json:"hdfs_namenodes,omitempty"` + HdfsUsername string `protobuf:"bytes,51,opt,name=hdfs_username,json=hdfsUsername,proto3" json:"hdfs_username,omitempty"` + HdfsServicePrincipalName string `protobuf:"bytes,52,opt,name=hdfs_service_principal_name,json=hdfsServicePrincipalName,proto3" json:"hdfs_service_principal_name,omitempty"` + HdfsDataTransferProtection string `protobuf:"bytes,53,opt,name=hdfs_data_transfer_protection,json=hdfsDataTransferProtection,proto3" json:"hdfs_data_transfer_protection,omitempty"` + FilebaseAccessKey string `protobuf:"bytes,60,opt,name=filebase_access_key,json=filebaseAccessKey,proto3" json:"filebase_access_key,omitempty"` + FilebaseSecretKey string `protobuf:"bytes,61,opt,name=filebase_secret_key,json=filebaseSecretKey,proto3" json:"filebase_secret_key,omitempty"` + FilebaseEndpoint string `protobuf:"bytes,62,opt,name=filebase_endpoint,json=filebaseEndpoint,proto3" json:"filebase_endpoint,omitempty"` + StorjAccessKey string `protobuf:"bytes,65,opt,name=storj_access_key,json=storjAccessKey,proto3" json:"storj_access_key,omitempty"` + StorjSecretKey string `protobuf:"bytes,66,opt,name=storj_secret_key,json=storjSecretKey,proto3" json:"storj_secret_key,omitempty"` + StorjEndpoint string `protobuf:"bytes,67,opt,name=storj_endpoint,json=storjEndpoint,proto3" json:"storj_endpoint,omitempty"` + ContaboAccessKey string `protobuf:"bytes,68,opt,name=contabo_access_key,json=contaboAccessKey,proto3" json:"contabo_access_key,omitempty"` + ContaboSecretKey string `protobuf:"bytes,69,opt,name=contabo_secret_key,json=contaboSecretKey,proto3" json:"contabo_secret_key,omitempty"` + ContaboEndpoint string `protobuf:"bytes,70,opt,name=contabo_endpoint,json=contaboEndpoint,proto3" json:"contabo_endpoint,omitempty"` + ContaboRegion string `protobuf:"bytes,71,opt,name=contabo_region,json=contaboRegion,proto3" json:"contabo_region,omitempty"` } func (x *RemoteConf) Reset() { *x = RemoteConf{} - mi := &file_remote_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_remote_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RemoteConf) String() string { @@ -88,7 +92,7 @@ func (*RemoteConf) ProtoMessage() {} func (x *RemoteConf) ProtoReflect() protoreflect.Message { mi := &file_remote_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -159,13 +163,6 @@ func (x *RemoteConf) GetS3ForcePathStyle() bool { return false } -func (x *RemoteConf) GetS3SupportTagging() bool { - if x != nil { - return x.S3SupportTagging - } - return false -} - func (x *RemoteConf) GetS3V4Signature() bool { if x != nil { return x.S3V4Signature @@ -222,13 +219,6 @@ func (x *RemoteConf) GetBackblazeEndpoint() string { return "" } -func (x *RemoteConf) GetBackblazeRegion() string { - if x != nil { - return x.BackblazeRegion - } - return "" -} - func (x *RemoteConf) GetAliyunAccessKey() string { if x != nil { return x.AliyunAccessKey @@ -334,6 +324,34 @@ func (x *RemoteConf) GetWasabiRegion() string { return "" } +func (x *RemoteConf) GetHdfsNamenodes() []string { + if x != nil { + return x.HdfsNamenodes + } + return nil +} + +func (x *RemoteConf) GetHdfsUsername() string { + if x != nil { + return x.HdfsUsername + } + return "" +} + +func (x *RemoteConf) GetHdfsServicePrincipalName() string { + if x != nil { + return x.HdfsServicePrincipalName + } + return "" +} + +func (x *RemoteConf) GetHdfsDataTransferProtection() string { + if x != nil { + return x.HdfsDataTransferProtection + } + return "" +} + func (x *RemoteConf) GetFilebaseAccessKey() string { if x != nil { return x.FilebaseAccessKey @@ -405,18 +423,21 @@ func (x *RemoteConf) GetContaboRegion() string { } type RemoteStorageMapping struct { - state protoimpl.MessageState `protogen:"open.v1"` - Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` PrimaryBucketStorageName string `protobuf:"bytes,2,opt,name=primary_bucket_storage_name,json=primaryBucketStorageName,proto3" json:"primary_bucket_storage_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *RemoteStorageMapping) Reset() { *x = RemoteStorageMapping{} - mi := &file_remote_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_remote_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RemoteStorageMapping) String() string { @@ -427,7 +448,7 @@ func (*RemoteStorageMapping) ProtoMessage() {} func (x *RemoteStorageMapping) ProtoReflect() protoreflect.Message { mi := &file_remote_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -457,19 +478,22 @@ func (x *RemoteStorageMapping) GetPrimaryBucketStorageName() string { } type RemoteStorageLocation struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` } func (x *RemoteStorageLocation) Reset() { *x = RemoteStorageLocation{} - mi := &file_remote_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_remote_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RemoteStorageLocation) String() string { @@ -480,7 +504,7 @@ func (*RemoteStorageLocation) ProtoMessage() {} func (x *RemoteStorageLocation) ProtoReflect() protoreflect.Message { mi := &file_remote_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -518,83 +542,174 @@ func (x *RemoteStorageLocation) GetPath() string { var File_remote_proto protoreflect.FileDescriptor -const file_remote_proto_rawDesc = "" + - "\n" + - "\fremote.proto\x12\tremote_pb\"\x9b\x0e\n" + - "\n" + - "RemoteConf\x12\x12\n" + - "\x04type\x18\x01 \x01(\tR\x04type\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12\"\n" + - "\rs3_access_key\x18\x04 \x01(\tR\vs3AccessKey\x12\"\n" + - "\rs3_secret_key\x18\x05 \x01(\tR\vs3SecretKey\x12\x1b\n" + - "\ts3_region\x18\x06 \x01(\tR\bs3Region\x12\x1f\n" + - "\vs3_endpoint\x18\a \x01(\tR\n" + - "s3Endpoint\x12(\n" + - "\x10s3_storage_class\x18\b \x01(\tR\x0es3StorageClass\x12-\n" + - "\x13s3_force_path_style\x18\t \x01(\bR\x10s3ForcePathStyle\x12,\n" + - "\x12s3_support_tagging\x18\r \x01(\bR\x10s3SupportTagging\x12&\n" + - "\x0fs3_v4_signature\x18\v \x01(\bR\rs3V4Signature\x12K\n" + - "\"gcs_google_application_credentials\x18\n" + - " \x01(\tR\x1fgcsGoogleApplicationCredentials\x12$\n" + - "\x0egcs_project_id\x18\f \x01(\tR\fgcsProjectId\x12,\n" + - "\x12azure_account_name\x18\x0f \x01(\tR\x10azureAccountName\x12*\n" + - "\x11azure_account_key\x18\x10 \x01(\tR\x0fazureAccountKey\x12(\n" + - "\x10backblaze_key_id\x18\x14 \x01(\tR\x0ebackblazeKeyId\x12:\n" + - "\x19backblaze_application_key\x18\x15 \x01(\tR\x17backblazeApplicationKey\x12-\n" + - "\x12backblaze_endpoint\x18\x16 \x01(\tR\x11backblazeEndpoint\x12)\n" + - "\x10backblaze_region\x18\x17 \x01(\tR\x0fbackblazeRegion\x12*\n" + - "\x11aliyun_access_key\x18\x19 \x01(\tR\x0faliyunAccessKey\x12*\n" + - "\x11aliyun_secret_key\x18\x1a \x01(\tR\x0faliyunSecretKey\x12'\n" + - "\x0faliyun_endpoint\x18\x1b \x01(\tR\x0ealiyunEndpoint\x12#\n" + - "\raliyun_region\x18\x1c \x01(\tR\faliyunRegion\x12*\n" + - "\x11tencent_secret_id\x18\x1e \x01(\tR\x0ftencentSecretId\x12,\n" + - "\x12tencent_secret_key\x18\x1f \x01(\tR\x10tencentSecretKey\x12)\n" + - "\x10tencent_endpoint\x18 \x01(\tR\x0ftencentEndpoint\x12(\n" + - "\x10baidu_access_key\x18# \x01(\tR\x0ebaiduAccessKey\x12(\n" + - "\x10baidu_secret_key\x18$ \x01(\tR\x0ebaiduSecretKey\x12%\n" + - "\x0ebaidu_endpoint\x18% \x01(\tR\rbaiduEndpoint\x12!\n" + - "\fbaidu_region\x18& \x01(\tR\vbaiduRegion\x12*\n" + - "\x11wasabi_access_key\x18( \x01(\tR\x0fwasabiAccessKey\x12*\n" + - "\x11wasabi_secret_key\x18) \x01(\tR\x0fwasabiSecretKey\x12'\n" + - "\x0fwasabi_endpoint\x18* \x01(\tR\x0ewasabiEndpoint\x12#\n" + - "\rwasabi_region\x18+ \x01(\tR\fwasabiRegion\x12.\n" + - "\x13filebase_access_key\x18< \x01(\tR\x11filebaseAccessKey\x12.\n" + - "\x13filebase_secret_key\x18= \x01(\tR\x11filebaseSecretKey\x12+\n" + - "\x11filebase_endpoint\x18> \x01(\tR\x10filebaseEndpoint\x12(\n" + - "\x10storj_access_key\x18A \x01(\tR\x0estorjAccessKey\x12(\n" + - "\x10storj_secret_key\x18B \x01(\tR\x0estorjSecretKey\x12%\n" + - "\x0estorj_endpoint\x18C \x01(\tR\rstorjEndpoint\x12,\n" + - "\x12contabo_access_key\x18D \x01(\tR\x10contaboAccessKey\x12,\n" + - "\x12contabo_secret_key\x18E \x01(\tR\x10contaboSecretKey\x12)\n" + - "\x10contabo_endpoint\x18F \x01(\tR\x0fcontaboEndpoint\x12%\n" + - "\x0econtabo_region\x18G \x01(\tR\rcontaboRegion\"\xff\x01\n" + - "\x14RemoteStorageMapping\x12I\n" + - "\bmappings\x18\x01 \x03(\v2-.remote_pb.RemoteStorageMapping.MappingsEntryR\bmappings\x12=\n" + - "\x1bprimary_bucket_storage_name\x18\x02 \x01(\tR\x18primaryBucketStorageName\x1a]\n" + - "\rMappingsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x126\n" + - "\x05value\x18\x02 \x01(\v2 .remote_pb.RemoteStorageLocationR\x05value:\x028\x01\"W\n" + - "\x15RemoteStorageLocation\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + - "\x06bucket\x18\x02 \x01(\tR\x06bucket\x12\x12\n" + - "\x04path\x18\x03 \x01(\tR\x04pathBP\n" + - "\x10seaweedfs.clientB\n" + - "FilerProtoZ0github.com/seaweedfs/seaweedfs/weed/pb/remote_pbb\x06proto3" +var file_remote_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x22, 0x90, 0x0f, 0x0a, 0x0a, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x72, + 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x33, 0x52, + 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x33, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x33, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x33, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x33, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x2d, 0x0a, 0x13, 0x73, 0x33, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x5f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, + 0x33, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x33, 0x5f, 0x76, 0x34, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x33, 0x56, 0x34, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4b, 0x0a, 0x22, 0x67, 0x63, 0x73, 0x5f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x1f, 0x67, 0x63, 0x73, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x67, 0x63, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x63, + 0x73, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x61, 0x7a, + 0x75, 0x72, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x7a, 0x75, 0x72, + 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x3a, + 0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x17, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, + 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, + 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x69, + 0x79, 0x75, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x6c, 0x69, 0x79, + 0x75, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, + 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, + 0x2a, 0x0a, 0x11, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6e, 0x63, + 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x74, + 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x65, 0x6e, + 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x20, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x62, 0x61, 0x69, 0x64, 0x75, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x28, + 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x69, 0x64, + 0x75, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x62, 0x61, 0x69, 0x64, 0x75, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, + 0x26, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x69, 0x64, 0x75, 0x52, 0x65, 0x67, 0x69, + 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, + 0x61, 0x73, 0x61, 0x62, 0x69, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, + 0x69, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x61, + 0x73, 0x61, 0x62, 0x69, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x72, 0x65, + 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x73, 0x61, + 0x62, 0x69, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x68, 0x64, 0x66, 0x73, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x32, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x68, 0x64, 0x66, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x33, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x64, 0x66, 0x73, 0x55, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x34, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x68, 0x64, 0x66, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x1d, 0x68, 0x64, 0x66, 0x73, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x68, 0x64, 0x66, 0x73, + 0x44, 0x61, 0x74, 0x61, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, + 0x73, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x3c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, + 0x73, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x3d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, + 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x3e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x41, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x74, 0x6f, 0x72, 0x6a, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, + 0x10, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x42, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x6a, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x43, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x44, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x62, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x12, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x45, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, + 0x6f, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x46, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x45, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, + 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x47, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x22, 0xff, 0x01, 0x0a, + 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3d, 0x0a, 0x1b, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x5d, 0x0a, 0x0d, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, + 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x50, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} var ( file_remote_proto_rawDescOnce sync.Once - file_remote_proto_rawDescData []byte + file_remote_proto_rawDescData = file_remote_proto_rawDesc ) func file_remote_proto_rawDescGZIP() []byte { file_remote_proto_rawDescOnce.Do(func() { - file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_remote_proto_rawDesc), len(file_remote_proto_rawDesc))) + file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_proto_rawDescData) }) return file_remote_proto_rawDescData } var file_remote_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_remote_proto_goTypes = []any{ +var file_remote_proto_goTypes = []interface{}{ (*RemoteConf)(nil), // 0: remote_pb.RemoteConf (*RemoteStorageMapping)(nil), // 1: remote_pb.RemoteStorageMapping (*RemoteStorageLocation)(nil), // 2: remote_pb.RemoteStorageLocation @@ -615,11 +730,49 @@ func file_remote_proto_init() { if File_remote_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_remote_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_remote_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteStorageMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_remote_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteStorageLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_remote_proto_rawDesc), len(file_remote_proto_rawDesc)), + RawDescriptor: file_remote_proto_rawDesc, NumEnums: 0, NumMessages: 4, NumExtensions: 0, @@ -630,6 +783,7 @@ func file_remote_proto_init() { MessageInfos: file_remote_proto_msgTypes, }.Build() File_remote_proto = out.File + file_remote_proto_rawDesc = nil file_remote_proto_goTypes = nil file_remote_proto_depIdxs = nil } diff --git a/weed/pb/remote_pb/remote_pb_helper.go b/weed/pb/remote_pb/remote_pb_helper.go index 665239f1a..03a635e60 100644 --- a/weed/pb/remote_pb/remote_pb_helper.go +++ b/weed/pb/remote_pb/remote_pb_helper.go @@ -1,6 +1,6 @@ package remote_pb -import "google.golang.org/protobuf/proto" +import "github.com/golang/protobuf/proto" func (fp *RemoteStorageLocation) Key() interface{} { key, _ := proto.Marshal(fp) diff --git a/weed/pb/s3.proto b/weed/pb/s3.proto index 12f2dc356..45a877fac 100644 --- a/weed/pb/s3.proto +++ b/weed/pb/s3.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package messaging_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/s3_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "S3Proto"; @@ -33,31 +33,3 @@ message S3CircuitBreakerOptions { bool enabled=1; map actions = 2; } - -////////////////////////////////////////////////// -// Bucket Metadata - -message CORSRule { - repeated string allowed_headers = 1; - repeated string allowed_methods = 2; - repeated string allowed_origins = 3; - repeated string expose_headers = 4; - int32 max_age_seconds = 5; - string id = 6; -} - -message CORSConfiguration { - repeated CORSRule cors_rules = 1; -} - -message BucketMetadata { - map tags = 1; - CORSConfiguration cors = 2; - EncryptionConfiguration encryption = 3; -} - -message EncryptionConfiguration { - string sse_algorithm = 1; // "AES256" or "aws:kms" - string kms_key_id = 2; // KMS key ID (optional for aws:kms) - bool bucket_key_enabled = 3; // S3 Bucket Keys optimization -} diff --git a/weed/pb/s3_pb/s3.pb.go b/weed/pb/s3_pb/s3.pb.go index 31b6c8e2e..c1bd23556 100644 --- a/weed/pb/s3_pb/s3.pb.go +++ b/weed/pb/s3_pb/s3.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.28.0 +// protoc v3.21.1 // source: s3.proto package s3_pb @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,17 +21,20 @@ const ( ) type S3ConfigureRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - S3ConfigurationFileContent []byte `protobuf:"bytes,1,opt,name=s3_configuration_file_content,json=s3ConfigurationFileContent,proto3" json:"s3_configuration_file_content,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + S3ConfigurationFileContent []byte `protobuf:"bytes,1,opt,name=s3_configuration_file_content,json=s3ConfigurationFileContent,proto3" json:"s3_configuration_file_content,omitempty"` } func (x *S3ConfigureRequest) Reset() { *x = S3ConfigureRequest{} - mi := &file_s3_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *S3ConfigureRequest) String() string { @@ -43,7 +45,7 @@ func (*S3ConfigureRequest) ProtoMessage() {} func (x *S3ConfigureRequest) ProtoReflect() protoreflect.Message { mi := &file_s3_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,16 +68,18 @@ func (x *S3ConfigureRequest) GetS3ConfigurationFileContent() []byte { } type S3ConfigureResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *S3ConfigureResponse) Reset() { *x = S3ConfigureResponse{} - mi := &file_s3_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *S3ConfigureResponse) String() string { @@ -86,7 +90,7 @@ func (*S3ConfigureResponse) ProtoMessage() {} func (x *S3ConfigureResponse) ProtoReflect() protoreflect.Message { mi := &file_s3_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -102,18 +106,21 @@ func (*S3ConfigureResponse) Descriptor() ([]byte, []int) { } type S3CircuitBreakerConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"` - Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"` + Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *S3CircuitBreakerConfig) Reset() { *x = S3CircuitBreakerConfig{} - mi := &file_s3_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *S3CircuitBreakerConfig) String() string { @@ -124,7 +131,7 @@ func (*S3CircuitBreakerConfig) ProtoMessage() {} func (x *S3CircuitBreakerConfig) ProtoReflect() protoreflect.Message { mi := &file_s3_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -154,18 +161,21 @@ func (x *S3CircuitBreakerConfig) GetBuckets() map[string]*S3CircuitBreakerOption } type S3CircuitBreakerOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } func (x *S3CircuitBreakerOptions) Reset() { *x = S3CircuitBreakerOptions{} - mi := &file_s3_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *S3CircuitBreakerOptions) String() string { @@ -176,7 +186,7 @@ func (*S3CircuitBreakerOptions) ProtoMessage() {} func (x *S3CircuitBreakerOptions) ProtoReflect() protoreflect.Message { mi := &file_s3_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -205,344 +215,93 @@ func (x *S3CircuitBreakerOptions) GetActions() map[string]int64 { return nil } -type CORSRule struct { - state protoimpl.MessageState `protogen:"open.v1"` - AllowedHeaders []string `protobuf:"bytes,1,rep,name=allowed_headers,json=allowedHeaders,proto3" json:"allowed_headers,omitempty"` - AllowedMethods []string `protobuf:"bytes,2,rep,name=allowed_methods,json=allowedMethods,proto3" json:"allowed_methods,omitempty"` - AllowedOrigins []string `protobuf:"bytes,3,rep,name=allowed_origins,json=allowedOrigins,proto3" json:"allowed_origins,omitempty"` - ExposeHeaders []string `protobuf:"bytes,4,rep,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` - MaxAgeSeconds int32 `protobuf:"varint,5,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` - Id string `protobuf:"bytes,6,opt,name=id,proto3" json:"id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CORSRule) Reset() { - *x = CORSRule{} - mi := &file_s3_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CORSRule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CORSRule) ProtoMessage() {} - -func (x *CORSRule) ProtoReflect() protoreflect.Message { - mi := &file_s3_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CORSRule.ProtoReflect.Descriptor instead. -func (*CORSRule) Descriptor() ([]byte, []int) { - return file_s3_proto_rawDescGZIP(), []int{4} -} - -func (x *CORSRule) GetAllowedHeaders() []string { - if x != nil { - return x.AllowedHeaders - } - return nil -} - -func (x *CORSRule) GetAllowedMethods() []string { - if x != nil { - return x.AllowedMethods - } - return nil -} - -func (x *CORSRule) GetAllowedOrigins() []string { - if x != nil { - return x.AllowedOrigins - } - return nil -} - -func (x *CORSRule) GetExposeHeaders() []string { - if x != nil { - return x.ExposeHeaders - } - return nil -} - -func (x *CORSRule) GetMaxAgeSeconds() int32 { - if x != nil { - return x.MaxAgeSeconds - } - return 0 -} - -func (x *CORSRule) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -type CORSConfiguration struct { - state protoimpl.MessageState `protogen:"open.v1"` - CorsRules []*CORSRule `protobuf:"bytes,1,rep,name=cors_rules,json=corsRules,proto3" json:"cors_rules,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CORSConfiguration) Reset() { - *x = CORSConfiguration{} - mi := &file_s3_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CORSConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CORSConfiguration) ProtoMessage() {} - -func (x *CORSConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_s3_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CORSConfiguration.ProtoReflect.Descriptor instead. -func (*CORSConfiguration) Descriptor() ([]byte, []int) { - return file_s3_proto_rawDescGZIP(), []int{5} -} - -func (x *CORSConfiguration) GetCorsRules() []*CORSRule { - if x != nil { - return x.CorsRules - } - return nil -} - -type BucketMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - Tags map[string]string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Cors *CORSConfiguration `protobuf:"bytes,2,opt,name=cors,proto3" json:"cors,omitempty"` - Encryption *EncryptionConfiguration `protobuf:"bytes,3,opt,name=encryption,proto3" json:"encryption,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BucketMetadata) Reset() { - *x = BucketMetadata{} - mi := &file_s3_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BucketMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BucketMetadata) ProtoMessage() {} - -func (x *BucketMetadata) ProtoReflect() protoreflect.Message { - mi := &file_s3_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BucketMetadata.ProtoReflect.Descriptor instead. -func (*BucketMetadata) Descriptor() ([]byte, []int) { - return file_s3_proto_rawDescGZIP(), []int{6} -} - -func (x *BucketMetadata) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *BucketMetadata) GetCors() *CORSConfiguration { - if x != nil { - return x.Cors - } - return nil -} - -func (x *BucketMetadata) GetEncryption() *EncryptionConfiguration { - if x != nil { - return x.Encryption - } - return nil -} - -type EncryptionConfiguration struct { - state protoimpl.MessageState `protogen:"open.v1"` - SseAlgorithm string `protobuf:"bytes,1,opt,name=sse_algorithm,json=sseAlgorithm,proto3" json:"sse_algorithm,omitempty"` // "AES256" or "aws:kms" - KmsKeyId string `protobuf:"bytes,2,opt,name=kms_key_id,json=kmsKeyId,proto3" json:"kms_key_id,omitempty"` // KMS key ID (optional for aws:kms) - BucketKeyEnabled bool `protobuf:"varint,3,opt,name=bucket_key_enabled,json=bucketKeyEnabled,proto3" json:"bucket_key_enabled,omitempty"` // S3 Bucket Keys optimization - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EncryptionConfiguration) Reset() { - *x = EncryptionConfiguration{} - mi := &file_s3_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncryptionConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncryptionConfiguration) ProtoMessage() {} - -func (x *EncryptionConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_s3_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncryptionConfiguration.ProtoReflect.Descriptor instead. -func (*EncryptionConfiguration) Descriptor() ([]byte, []int) { - return file_s3_proto_rawDescGZIP(), []int{7} -} - -func (x *EncryptionConfiguration) GetSseAlgorithm() string { - if x != nil { - return x.SseAlgorithm - } - return "" -} - -func (x *EncryptionConfiguration) GetKmsKeyId() string { - if x != nil { - return x.KmsKeyId - } - return "" -} - -func (x *EncryptionConfiguration) GetBucketKeyEnabled() bool { - if x != nil { - return x.BucketKeyEnabled - } - return false -} - var File_s3_proto protoreflect.FileDescriptor -const file_s3_proto_rawDesc = "" + - "\n" + - "\bs3.proto\x12\fmessaging_pb\"W\n" + - "\x12S3ConfigureRequest\x12A\n" + - "\x1ds3_configuration_file_content\x18\x01 \x01(\fR\x1as3ConfigurationFileContent\"\x15\n" + - "\x13S3ConfigureResponse\"\x87\x02\n" + - "\x16S3CircuitBreakerConfig\x12=\n" + - "\x06global\x18\x01 \x01(\v2%.messaging_pb.S3CircuitBreakerOptionsR\x06global\x12K\n" + - "\abuckets\x18\x02 \x03(\v21.messaging_pb.S3CircuitBreakerConfig.BucketsEntryR\abuckets\x1aa\n" + - "\fBucketsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12;\n" + - "\x05value\x18\x02 \x01(\v2%.messaging_pb.S3CircuitBreakerOptionsR\x05value:\x028\x01\"\xbd\x01\n" + - "\x17S3CircuitBreakerOptions\x12\x18\n" + - "\aenabled\x18\x01 \x01(\bR\aenabled\x12L\n" + - "\aactions\x18\x02 \x03(\v22.messaging_pb.S3CircuitBreakerOptions.ActionsEntryR\aactions\x1a:\n" + - "\fActionsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01\"\xe4\x01\n" + - "\bCORSRule\x12'\n" + - "\x0fallowed_headers\x18\x01 \x03(\tR\x0eallowedHeaders\x12'\n" + - "\x0fallowed_methods\x18\x02 \x03(\tR\x0eallowedMethods\x12'\n" + - "\x0fallowed_origins\x18\x03 \x03(\tR\x0eallowedOrigins\x12%\n" + - "\x0eexpose_headers\x18\x04 \x03(\tR\rexposeHeaders\x12&\n" + - "\x0fmax_age_seconds\x18\x05 \x01(\x05R\rmaxAgeSeconds\x12\x0e\n" + - "\x02id\x18\x06 \x01(\tR\x02id\"J\n" + - "\x11CORSConfiguration\x125\n" + - "\n" + - "cors_rules\x18\x01 \x03(\v2\x16.messaging_pb.CORSRuleR\tcorsRules\"\x81\x02\n" + - "\x0eBucketMetadata\x12:\n" + - "\x04tags\x18\x01 \x03(\v2&.messaging_pb.BucketMetadata.TagsEntryR\x04tags\x123\n" + - "\x04cors\x18\x02 \x01(\v2\x1f.messaging_pb.CORSConfigurationR\x04cors\x12E\n" + - "\n" + - "encryption\x18\x03 \x01(\v2%.messaging_pb.EncryptionConfigurationR\n" + - "encryption\x1a7\n" + - "\tTagsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x8a\x01\n" + - "\x17EncryptionConfiguration\x12#\n" + - "\rsse_algorithm\x18\x01 \x01(\tR\fsseAlgorithm\x12\x1c\n" + - "\n" + - "kms_key_id\x18\x02 \x01(\tR\bkmsKeyId\x12,\n" + - "\x12bucket_key_enabled\x18\x03 \x01(\bR\x10bucketKeyEnabled2_\n" + - "\tSeaweedS3\x12R\n" + - "\tConfigure\x12 .messaging_pb.S3ConfigureRequest\x1a!.messaging_pb.S3ConfigureResponse\"\x00BI\n" + - "\x10seaweedfs.clientB\aS3ProtoZ,github.com/seaweedfs/seaweedfs/weed/pb/s3_pbb\x06proto3" +var file_s3_proto_rawDesc = []byte{ + 0x0a, 0x08, 0x73, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x57, 0x0a, 0x12, 0x53, 0x33, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, + 0x0a, 0x1d, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1a, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x16, 0x53, 0x33, 0x43, + 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, + 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x12, 0x4b, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, + 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x1a, + 0x61, 0x0a, 0x0c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x17, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, + 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x53, 0x33, 0x12, + 0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, + 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_s3_proto_rawDescOnce sync.Once - file_s3_proto_rawDescData []byte + file_s3_proto_rawDescData = file_s3_proto_rawDesc ) func file_s3_proto_rawDescGZIP() []byte { file_s3_proto_rawDescOnce.Do(func() { - file_s3_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_s3_proto_rawDesc), len(file_s3_proto_rawDesc))) + file_s3_proto_rawDescData = protoimpl.X.CompressGZIP(file_s3_proto_rawDescData) }) return file_s3_proto_rawDescData } -var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_s3_proto_goTypes = []any{ +var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_s3_proto_goTypes = []interface{}{ (*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest (*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse (*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig (*S3CircuitBreakerOptions)(nil), // 3: messaging_pb.S3CircuitBreakerOptions - (*CORSRule)(nil), // 4: messaging_pb.CORSRule - (*CORSConfiguration)(nil), // 5: messaging_pb.CORSConfiguration - (*BucketMetadata)(nil), // 6: messaging_pb.BucketMetadata - (*EncryptionConfiguration)(nil), // 7: messaging_pb.EncryptionConfiguration - nil, // 8: messaging_pb.S3CircuitBreakerConfig.BucketsEntry - nil, // 9: messaging_pb.S3CircuitBreakerOptions.ActionsEntry - nil, // 10: messaging_pb.BucketMetadata.TagsEntry + nil, // 4: messaging_pb.S3CircuitBreakerConfig.BucketsEntry + nil, // 5: messaging_pb.S3CircuitBreakerOptions.ActionsEntry } var file_s3_proto_depIdxs = []int32{ - 3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions - 8, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry - 9, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry - 4, // 3: messaging_pb.CORSConfiguration.cors_rules:type_name -> messaging_pb.CORSRule - 10, // 4: messaging_pb.BucketMetadata.tags:type_name -> messaging_pb.BucketMetadata.TagsEntry - 5, // 5: messaging_pb.BucketMetadata.cors:type_name -> messaging_pb.CORSConfiguration - 7, // 6: messaging_pb.BucketMetadata.encryption:type_name -> messaging_pb.EncryptionConfiguration - 3, // 7: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions - 0, // 8: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest - 1, // 9: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse - 9, // [9:10] is the sub-list for method output_type - 8, // [8:9] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions + 4, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry + 5, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry + 3, // 3: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions + 0, // 4: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest + 1, // 5: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_s3_proto_init() } @@ -550,13 +309,63 @@ func file_s3_proto_init() { if File_s3_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_s3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_s3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_s3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3CircuitBreakerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_s3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3CircuitBreakerOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_s3_proto_rawDesc), len(file_s3_proto_rawDesc)), + RawDescriptor: file_s3_proto_rawDesc, NumEnums: 0, - NumMessages: 11, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, @@ -565,6 +374,7 @@ func file_s3_proto_init() { MessageInfos: file_s3_proto_msgTypes, }.Build() File_s3_proto = out.File + file_s3_proto_rawDesc = nil file_s3_proto_goTypes = nil file_s3_proto_depIdxs = nil } diff --git a/weed/pb/s3_pb/s3_grpc.pb.go b/weed/pb/s3_pb/s3_grpc.pb.go index 91f3138ce..1bc956be6 100644 --- a/weed/pb/s3_pb/s3_grpc.pb.go +++ b/weed/pb/s3_pb/s3_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: s3.proto package s3_pb @@ -15,12 +11,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SeaweedS3_Configure_FullMethodName = "/messaging_pb.SeaweedS3/Configure" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // SeaweedS3Client is the client API for SeaweedS3 service. // @@ -38,9 +30,8 @@ func NewSeaweedS3Client(cc grpc.ClientConnInterface) SeaweedS3Client { } func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, opts ...grpc.CallOption) (*S3ConfigureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(S3ConfigureResponse) - err := c.cc.Invoke(ctx, SeaweedS3_Configure_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedS3/Configure", in, out, opts...) if err != nil { return nil, err } @@ -49,24 +40,20 @@ func (c *seaweedS3Client) Configure(ctx context.Context, in *S3ConfigureRequest, // SeaweedS3Server is the server API for SeaweedS3 service. // All implementations must embed UnimplementedSeaweedS3Server -// for forward compatibility. +// for forward compatibility type SeaweedS3Server interface { Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error) mustEmbedUnimplementedSeaweedS3Server() } -// UnimplementedSeaweedS3Server must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSeaweedS3Server struct{} +// UnimplementedSeaweedS3Server must be embedded to have forward compatible implementations. +type UnimplementedSeaweedS3Server struct { +} func (UnimplementedSeaweedS3Server) Configure(context.Context, *S3ConfigureRequest) (*S3ConfigureResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") } func (UnimplementedSeaweedS3Server) mustEmbedUnimplementedSeaweedS3Server() {} -func (UnimplementedSeaweedS3Server) testEmbeddedByValue() {} // UnsafeSeaweedS3Server may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SeaweedS3Server will @@ -76,13 +63,6 @@ type UnsafeSeaweedS3Server interface { } func RegisterSeaweedS3Server(s grpc.ServiceRegistrar, srv SeaweedS3Server) { - // If the following call pancis, it indicates UnimplementedSeaweedS3Server was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&SeaweedS3_ServiceDesc, srv) } @@ -96,7 +76,7 @@ func _SeaweedS3_Configure_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: SeaweedS3_Configure_FullMethodName, + FullMethod: "/messaging_pb.SeaweedS3/Configure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SeaweedS3Server).Configure(ctx, req.(*S3ConfigureRequest)) diff --git a/weed/pb/schema_pb/mq_schema.pb.go b/weed/pb/schema_pb/mq_schema.pb.go deleted file mode 100644 index 7fbf4a4e6..000000000 --- a/weed/pb/schema_pb/mq_schema.pb.go +++ /dev/null @@ -1,1423 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 -// source: mq_schema.proto - -package schema_pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type OffsetType int32 - -const ( - OffsetType_RESUME_OR_EARLIEST OffsetType = 0 - OffsetType_RESET_TO_EARLIEST OffsetType = 5 - OffsetType_EXACT_TS_NS OffsetType = 10 - OffsetType_RESET_TO_LATEST OffsetType = 15 - OffsetType_RESUME_OR_LATEST OffsetType = 20 - // Offset-based positioning - OffsetType_EXACT_OFFSET OffsetType = 25 - OffsetType_RESET_TO_OFFSET OffsetType = 30 -) - -// Enum value maps for OffsetType. -var ( - OffsetType_name = map[int32]string{ - 0: "RESUME_OR_EARLIEST", - 5: "RESET_TO_EARLIEST", - 10: "EXACT_TS_NS", - 15: "RESET_TO_LATEST", - 20: "RESUME_OR_LATEST", - 25: "EXACT_OFFSET", - 30: "RESET_TO_OFFSET", - } - OffsetType_value = map[string]int32{ - "RESUME_OR_EARLIEST": 0, - "RESET_TO_EARLIEST": 5, - "EXACT_TS_NS": 10, - "RESET_TO_LATEST": 15, - "RESUME_OR_LATEST": 20, - "EXACT_OFFSET": 25, - "RESET_TO_OFFSET": 30, - } -) - -func (x OffsetType) Enum() *OffsetType { - p := new(OffsetType) - *p = x - return p -} - -func (x OffsetType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OffsetType) Descriptor() protoreflect.EnumDescriptor { - return file_mq_schema_proto_enumTypes[0].Descriptor() -} - -func (OffsetType) Type() protoreflect.EnumType { - return &file_mq_schema_proto_enumTypes[0] -} - -func (x OffsetType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OffsetType.Descriptor instead. -func (OffsetType) EnumDescriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{0} -} - -type ScalarType int32 - -const ( - ScalarType_BOOL ScalarType = 0 - ScalarType_INT32 ScalarType = 1 - ScalarType_INT64 ScalarType = 3 - ScalarType_FLOAT ScalarType = 4 - ScalarType_DOUBLE ScalarType = 5 - ScalarType_BYTES ScalarType = 6 - ScalarType_STRING ScalarType = 7 - // Parquet logical types for analytics - ScalarType_TIMESTAMP ScalarType = 8 // UTC timestamp (microseconds since epoch) - ScalarType_DATE ScalarType = 9 // Date (days since epoch) - ScalarType_DECIMAL ScalarType = 10 // Arbitrary precision decimal - ScalarType_TIME ScalarType = 11 // Time of day (microseconds) -) - -// Enum value maps for ScalarType. -var ( - ScalarType_name = map[int32]string{ - 0: "BOOL", - 1: "INT32", - 3: "INT64", - 4: "FLOAT", - 5: "DOUBLE", - 6: "BYTES", - 7: "STRING", - 8: "TIMESTAMP", - 9: "DATE", - 10: "DECIMAL", - 11: "TIME", - } - ScalarType_value = map[string]int32{ - "BOOL": 0, - "INT32": 1, - "INT64": 3, - "FLOAT": 4, - "DOUBLE": 5, - "BYTES": 6, - "STRING": 7, - "TIMESTAMP": 8, - "DATE": 9, - "DECIMAL": 10, - "TIME": 11, - } -) - -func (x ScalarType) Enum() *ScalarType { - p := new(ScalarType) - *p = x - return p -} - -func (x ScalarType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ScalarType) Descriptor() protoreflect.EnumDescriptor { - return file_mq_schema_proto_enumTypes[1].Descriptor() -} - -func (ScalarType) Type() protoreflect.EnumType { - return &file_mq_schema_proto_enumTypes[1] -} - -func (x ScalarType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ScalarType.Descriptor instead. -func (ScalarType) EnumDescriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{1} -} - -type Topic struct { - state protoimpl.MessageState `protogen:"open.v1"` - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Topic) Reset() { - *x = Topic{} - mi := &file_mq_schema_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Topic) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Topic) ProtoMessage() {} - -func (x *Topic) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Topic.ProtoReflect.Descriptor instead. -func (*Topic) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{0} -} - -func (x *Topic) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Topic) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -type Partition struct { - state protoimpl.MessageState `protogen:"open.v1"` - RingSize int32 `protobuf:"varint,1,opt,name=ring_size,json=ringSize,proto3" json:"ring_size,omitempty"` - RangeStart int32 `protobuf:"varint,2,opt,name=range_start,json=rangeStart,proto3" json:"range_start,omitempty"` - RangeStop int32 `protobuf:"varint,3,opt,name=range_stop,json=rangeStop,proto3" json:"range_stop,omitempty"` - UnixTimeNs int64 `protobuf:"varint,4,opt,name=unix_time_ns,json=unixTimeNs,proto3" json:"unix_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Partition) Reset() { - *x = Partition{} - mi := &file_mq_schema_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Partition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Partition) ProtoMessage() {} - -func (x *Partition) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Partition.ProtoReflect.Descriptor instead. -func (*Partition) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{1} -} - -func (x *Partition) GetRingSize() int32 { - if x != nil { - return x.RingSize - } - return 0 -} - -func (x *Partition) GetRangeStart() int32 { - if x != nil { - return x.RangeStart - } - return 0 -} - -func (x *Partition) GetRangeStop() int32 { - if x != nil { - return x.RangeStop - } - return 0 -} - -func (x *Partition) GetUnixTimeNs() int64 { - if x != nil { - return x.UnixTimeNs - } - return 0 -} - -type Offset struct { - state protoimpl.MessageState `protogen:"open.v1"` - Topic *Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - PartitionOffsets []*PartitionOffset `protobuf:"bytes,2,rep,name=partition_offsets,json=partitionOffsets,proto3" json:"partition_offsets,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Offset) Reset() { - *x = Offset{} - mi := &file_mq_schema_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Offset) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Offset) ProtoMessage() {} - -func (x *Offset) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Offset.ProtoReflect.Descriptor instead. -func (*Offset) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{2} -} - -func (x *Offset) GetTopic() *Topic { - if x != nil { - return x.Topic - } - return nil -} - -func (x *Offset) GetPartitionOffsets() []*PartitionOffset { - if x != nil { - return x.PartitionOffsets - } - return nil -} - -type PartitionOffset struct { - state protoimpl.MessageState `protogen:"open.v1"` - Partition *Partition `protobuf:"bytes,1,opt,name=partition,proto3" json:"partition,omitempty"` - StartTsNs int64 `protobuf:"varint,2,opt,name=start_ts_ns,json=startTsNs,proto3" json:"start_ts_ns,omitempty"` - StartOffset int64 `protobuf:"varint,3,opt,name=start_offset,json=startOffset,proto3" json:"start_offset,omitempty"` // For offset-based positioning - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PartitionOffset) Reset() { - *x = PartitionOffset{} - mi := &file_mq_schema_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PartitionOffset) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PartitionOffset) ProtoMessage() {} - -func (x *PartitionOffset) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PartitionOffset.ProtoReflect.Descriptor instead. -func (*PartitionOffset) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{3} -} - -func (x *PartitionOffset) GetPartition() *Partition { - if x != nil { - return x.Partition - } - return nil -} - -func (x *PartitionOffset) GetStartTsNs() int64 { - if x != nil { - return x.StartTsNs - } - return 0 -} - -func (x *PartitionOffset) GetStartOffset() int64 { - if x != nil { - return x.StartOffset - } - return 0 -} - -type RecordType struct { - state protoimpl.MessageState `protogen:"open.v1"` - Fields []*Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordType) Reset() { - *x = RecordType{} - mi := &file_mq_schema_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordType) ProtoMessage() {} - -func (x *RecordType) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordType.ProtoReflect.Descriptor instead. -func (*RecordType) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{4} -} - -func (x *RecordType) GetFields() []*Field { - if x != nil { - return x.Fields - } - return nil -} - -type Field struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - FieldIndex int32 `protobuf:"varint,2,opt,name=field_index,json=fieldIndex,proto3" json:"field_index,omitempty"` - Type *Type `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - IsRepeated bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated,proto3" json:"is_repeated,omitempty"` - IsRequired bool `protobuf:"varint,5,opt,name=is_required,json=isRequired,proto3" json:"is_required,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Field) Reset() { - *x = Field{} - mi := &file_mq_schema_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Field) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Field) ProtoMessage() {} - -func (x *Field) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Field.ProtoReflect.Descriptor instead. -func (*Field) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{5} -} - -func (x *Field) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Field) GetFieldIndex() int32 { - if x != nil { - return x.FieldIndex - } - return 0 -} - -func (x *Field) GetType() *Type { - if x != nil { - return x.Type - } - return nil -} - -func (x *Field) GetIsRepeated() bool { - if x != nil { - return x.IsRepeated - } - return false -} - -func (x *Field) GetIsRequired() bool { - if x != nil { - return x.IsRequired - } - return false -} - -type Type struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Kind: - // - // *Type_ScalarType - // *Type_RecordType - // *Type_ListType - Kind isType_Kind `protobuf_oneof:"kind"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Type) Reset() { - *x = Type{} - mi := &file_mq_schema_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Type) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Type) ProtoMessage() {} - -func (x *Type) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Type.ProtoReflect.Descriptor instead. -func (*Type) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{6} -} - -func (x *Type) GetKind() isType_Kind { - if x != nil { - return x.Kind - } - return nil -} - -func (x *Type) GetScalarType() ScalarType { - if x != nil { - if x, ok := x.Kind.(*Type_ScalarType); ok { - return x.ScalarType - } - } - return ScalarType_BOOL -} - -func (x *Type) GetRecordType() *RecordType { - if x != nil { - if x, ok := x.Kind.(*Type_RecordType); ok { - return x.RecordType - } - } - return nil -} - -func (x *Type) GetListType() *ListType { - if x != nil { - if x, ok := x.Kind.(*Type_ListType); ok { - return x.ListType - } - } - return nil -} - -type isType_Kind interface { - isType_Kind() -} - -type Type_ScalarType struct { - ScalarType ScalarType `protobuf:"varint,1,opt,name=scalar_type,json=scalarType,proto3,enum=schema_pb.ScalarType,oneof"` -} - -type Type_RecordType struct { - RecordType *RecordType `protobuf:"bytes,2,opt,name=record_type,json=recordType,proto3,oneof"` -} - -type Type_ListType struct { - ListType *ListType `protobuf:"bytes,3,opt,name=list_type,json=listType,proto3,oneof"` -} - -func (*Type_ScalarType) isType_Kind() {} - -func (*Type_RecordType) isType_Kind() {} - -func (*Type_ListType) isType_Kind() {} - -type ListType struct { - state protoimpl.MessageState `protogen:"open.v1"` - ElementType *Type `protobuf:"bytes,1,opt,name=element_type,json=elementType,proto3" json:"element_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListType) Reset() { - *x = ListType{} - mi := &file_mq_schema_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListType) ProtoMessage() {} - -func (x *ListType) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListType.ProtoReflect.Descriptor instead. -func (*ListType) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{7} -} - -func (x *ListType) GetElementType() *Type { - if x != nil { - return x.ElementType - } - return nil -} - -// ///////////////////////// -// value definition -// ///////////////////////// -type RecordValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordValue) Reset() { - *x = RecordValue{} - mi := &file_mq_schema_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordValue) ProtoMessage() {} - -func (x *RecordValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordValue.ProtoReflect.Descriptor instead. -func (*RecordValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{8} -} - -func (x *RecordValue) GetFields() map[string]*Value { - if x != nil { - return x.Fields - } - return nil -} - -type Value struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Kind: - // - // *Value_BoolValue - // *Value_Int32Value - // *Value_Int64Value - // *Value_FloatValue - // *Value_DoubleValue - // *Value_BytesValue - // *Value_StringValue - // *Value_TimestampValue - // *Value_DateValue - // *Value_DecimalValue - // *Value_TimeValue - // *Value_ListValue - // *Value_RecordValue - Kind isValue_Kind `protobuf_oneof:"kind"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Value) Reset() { - *x = Value{} - mi := &file_mq_schema_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Value) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Value) ProtoMessage() {} - -func (x *Value) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Value.ProtoReflect.Descriptor instead. -func (*Value) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{9} -} - -func (x *Value) GetKind() isValue_Kind { - if x != nil { - return x.Kind - } - return nil -} - -func (x *Value) GetBoolValue() bool { - if x != nil { - if x, ok := x.Kind.(*Value_BoolValue); ok { - return x.BoolValue - } - } - return false -} - -func (x *Value) GetInt32Value() int32 { - if x != nil { - if x, ok := x.Kind.(*Value_Int32Value); ok { - return x.Int32Value - } - } - return 0 -} - -func (x *Value) GetInt64Value() int64 { - if x != nil { - if x, ok := x.Kind.(*Value_Int64Value); ok { - return x.Int64Value - } - } - return 0 -} - -func (x *Value) GetFloatValue() float32 { - if x != nil { - if x, ok := x.Kind.(*Value_FloatValue); ok { - return x.FloatValue - } - } - return 0 -} - -func (x *Value) GetDoubleValue() float64 { - if x != nil { - if x, ok := x.Kind.(*Value_DoubleValue); ok { - return x.DoubleValue - } - } - return 0 -} - -func (x *Value) GetBytesValue() []byte { - if x != nil { - if x, ok := x.Kind.(*Value_BytesValue); ok { - return x.BytesValue - } - } - return nil -} - -func (x *Value) GetStringValue() string { - if x != nil { - if x, ok := x.Kind.(*Value_StringValue); ok { - return x.StringValue - } - } - return "" -} - -func (x *Value) GetTimestampValue() *TimestampValue { - if x != nil { - if x, ok := x.Kind.(*Value_TimestampValue); ok { - return x.TimestampValue - } - } - return nil -} - -func (x *Value) GetDateValue() *DateValue { - if x != nil { - if x, ok := x.Kind.(*Value_DateValue); ok { - return x.DateValue - } - } - return nil -} - -func (x *Value) GetDecimalValue() *DecimalValue { - if x != nil { - if x, ok := x.Kind.(*Value_DecimalValue); ok { - return x.DecimalValue - } - } - return nil -} - -func (x *Value) GetTimeValue() *TimeValue { - if x != nil { - if x, ok := x.Kind.(*Value_TimeValue); ok { - return x.TimeValue - } - } - return nil -} - -func (x *Value) GetListValue() *ListValue { - if x != nil { - if x, ok := x.Kind.(*Value_ListValue); ok { - return x.ListValue - } - } - return nil -} - -func (x *Value) GetRecordValue() *RecordValue { - if x != nil { - if x, ok := x.Kind.(*Value_RecordValue); ok { - return x.RecordValue - } - } - return nil -} - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_Int32Value struct { - Int32Value int32 `protobuf:"varint,2,opt,name=int32_value,json=int32Value,proto3,oneof"` -} - -type Value_Int64Value struct { - Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Value_FloatValue struct { - FloatValue float32 `protobuf:"fixed32,4,opt,name=float_value,json=floatValue,proto3,oneof"` -} - -type Value_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Value_BytesValue struct { - BytesValue []byte `protobuf:"bytes,6,opt,name=bytes_value,json=bytesValue,proto3,oneof"` -} - -type Value_StringValue struct { - StringValue string `protobuf:"bytes,7,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_TimestampValue struct { - // Parquet logical type values - TimestampValue *TimestampValue `protobuf:"bytes,8,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` -} - -type Value_DateValue struct { - DateValue *DateValue `protobuf:"bytes,9,opt,name=date_value,json=dateValue,proto3,oneof"` -} - -type Value_DecimalValue struct { - DecimalValue *DecimalValue `protobuf:"bytes,10,opt,name=decimal_value,json=decimalValue,proto3,oneof"` -} - -type Value_TimeValue struct { - TimeValue *TimeValue `protobuf:"bytes,11,opt,name=time_value,json=timeValue,proto3,oneof"` -} - -type Value_ListValue struct { - // Complex types - ListValue *ListValue `protobuf:"bytes,14,opt,name=list_value,json=listValue,proto3,oneof"` -} - -type Value_RecordValue struct { - RecordValue *RecordValue `protobuf:"bytes,15,opt,name=record_value,json=recordValue,proto3,oneof"` -} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_Int32Value) isValue_Kind() {} - -func (*Value_Int64Value) isValue_Kind() {} - -func (*Value_FloatValue) isValue_Kind() {} - -func (*Value_DoubleValue) isValue_Kind() {} - -func (*Value_BytesValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_TimestampValue) isValue_Kind() {} - -func (*Value_DateValue) isValue_Kind() {} - -func (*Value_DecimalValue) isValue_Kind() {} - -func (*Value_TimeValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -func (*Value_RecordValue) isValue_Kind() {} - -// Parquet logical type value messages -type TimestampValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - TimestampMicros int64 `protobuf:"varint,1,opt,name=timestamp_micros,json=timestampMicros,proto3" json:"timestamp_micros,omitempty"` // Microseconds since Unix epoch (UTC) - IsUtc bool `protobuf:"varint,2,opt,name=is_utc,json=isUtc,proto3" json:"is_utc,omitempty"` // True if UTC, false if local time - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TimestampValue) Reset() { - *x = TimestampValue{} - mi := &file_mq_schema_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TimestampValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimestampValue) ProtoMessage() {} - -func (x *TimestampValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimestampValue.ProtoReflect.Descriptor instead. -func (*TimestampValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{10} -} - -func (x *TimestampValue) GetTimestampMicros() int64 { - if x != nil { - return x.TimestampMicros - } - return 0 -} - -func (x *TimestampValue) GetIsUtc() bool { - if x != nil { - return x.IsUtc - } - return false -} - -type DateValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - DaysSinceEpoch int32 `protobuf:"varint,1,opt,name=days_since_epoch,json=daysSinceEpoch,proto3" json:"days_since_epoch,omitempty"` // Days since Unix epoch (1970-01-01) - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DateValue) Reset() { - *x = DateValue{} - mi := &file_mq_schema_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DateValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DateValue) ProtoMessage() {} - -func (x *DateValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DateValue.ProtoReflect.Descriptor instead. -func (*DateValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{11} -} - -func (x *DateValue) GetDaysSinceEpoch() int32 { - if x != nil { - return x.DaysSinceEpoch - } - return 0 -} - -type DecimalValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` // Arbitrary precision decimal as bytes - Precision int32 `protobuf:"varint,2,opt,name=precision,proto3" json:"precision,omitempty"` // Total number of digits - Scale int32 `protobuf:"varint,3,opt,name=scale,proto3" json:"scale,omitempty"` // Number of digits after decimal point - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DecimalValue) Reset() { - *x = DecimalValue{} - mi := &file_mq_schema_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecimalValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecimalValue) ProtoMessage() {} - -func (x *DecimalValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecimalValue.ProtoReflect.Descriptor instead. -func (*DecimalValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{12} -} - -func (x *DecimalValue) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -func (x *DecimalValue) GetPrecision() int32 { - if x != nil { - return x.Precision - } - return 0 -} - -func (x *DecimalValue) GetScale() int32 { - if x != nil { - return x.Scale - } - return 0 -} - -type TimeValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - TimeMicros int64 `protobuf:"varint,1,opt,name=time_micros,json=timeMicros,proto3" json:"time_micros,omitempty"` // Microseconds since midnight - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TimeValue) Reset() { - *x = TimeValue{} - mi := &file_mq_schema_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TimeValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeValue) ProtoMessage() {} - -func (x *TimeValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeValue.ProtoReflect.Descriptor instead. -func (*TimeValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{13} -} - -func (x *TimeValue) GetTimeMicros() int64 { - if x != nil { - return x.TimeMicros - } - return 0 -} - -type ListValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListValue) Reset() { - *x = ListValue{} - mi := &file_mq_schema_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListValue) ProtoMessage() {} - -func (x *ListValue) ProtoReflect() protoreflect.Message { - mi := &file_mq_schema_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. -func (*ListValue) Descriptor() ([]byte, []int) { - return file_mq_schema_proto_rawDescGZIP(), []int{14} -} - -func (x *ListValue) GetValues() []*Value { - if x != nil { - return x.Values - } - return nil -} - -var File_mq_schema_proto protoreflect.FileDescriptor - -const file_mq_schema_proto_rawDesc = "" + - "\n" + - "\x0fmq_schema.proto\x12\tschema_pb\"9\n" + - "\x05Topic\x12\x1c\n" + - "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\"\x8a\x01\n" + - "\tPartition\x12\x1b\n" + - "\tring_size\x18\x01 \x01(\x05R\bringSize\x12\x1f\n" + - "\vrange_start\x18\x02 \x01(\x05R\n" + - "rangeStart\x12\x1d\n" + - "\n" + - "range_stop\x18\x03 \x01(\x05R\trangeStop\x12 \n" + - "\funix_time_ns\x18\x04 \x01(\x03R\n" + - "unixTimeNs\"y\n" + - "\x06Offset\x12&\n" + - "\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12G\n" + - "\x11partition_offsets\x18\x02 \x03(\v2\x1a.schema_pb.PartitionOffsetR\x10partitionOffsets\"\x88\x01\n" + - "\x0fPartitionOffset\x122\n" + - "\tpartition\x18\x01 \x01(\v2\x14.schema_pb.PartitionR\tpartition\x12\x1e\n" + - "\vstart_ts_ns\x18\x02 \x01(\x03R\tstartTsNs\x12!\n" + - "\fstart_offset\x18\x03 \x01(\x03R\vstartOffset\"6\n" + - "\n" + - "RecordType\x12(\n" + - "\x06fields\x18\x01 \x03(\v2\x10.schema_pb.FieldR\x06fields\"\xa3\x01\n" + - "\x05Field\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n" + - "\vfield_index\x18\x02 \x01(\x05R\n" + - "fieldIndex\x12#\n" + - "\x04type\x18\x03 \x01(\v2\x0f.schema_pb.TypeR\x04type\x12\x1f\n" + - "\vis_repeated\x18\x04 \x01(\bR\n" + - "isRepeated\x12\x1f\n" + - "\vis_required\x18\x05 \x01(\bR\n" + - "isRequired\"\xb6\x01\n" + - "\x04Type\x128\n" + - "\vscalar_type\x18\x01 \x01(\x0e2\x15.schema_pb.ScalarTypeH\x00R\n" + - "scalarType\x128\n" + - "\vrecord_type\x18\x02 \x01(\v2\x15.schema_pb.RecordTypeH\x00R\n" + - "recordType\x122\n" + - "\tlist_type\x18\x03 \x01(\v2\x13.schema_pb.ListTypeH\x00R\blistTypeB\x06\n" + - "\x04kind\">\n" + - "\bListType\x122\n" + - "\felement_type\x18\x01 \x01(\v2\x0f.schema_pb.TypeR\velementType\"\x96\x01\n" + - "\vRecordValue\x12:\n" + - "\x06fields\x18\x01 \x03(\v2\".schema_pb.RecordValue.FieldsEntryR\x06fields\x1aK\n" + - "\vFieldsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12&\n" + - "\x05value\x18\x02 \x01(\v2\x10.schema_pb.ValueR\x05value:\x028\x01\"\xee\x04\n" + - "\x05Value\x12\x1f\n" + - "\n" + - "bool_value\x18\x01 \x01(\bH\x00R\tboolValue\x12!\n" + - "\vint32_value\x18\x02 \x01(\x05H\x00R\n" + - "int32Value\x12!\n" + - "\vint64_value\x18\x03 \x01(\x03H\x00R\n" + - "int64Value\x12!\n" + - "\vfloat_value\x18\x04 \x01(\x02H\x00R\n" + - "floatValue\x12#\n" + - "\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12!\n" + - "\vbytes_value\x18\x06 \x01(\fH\x00R\n" + - "bytesValue\x12#\n" + - "\fstring_value\x18\a \x01(\tH\x00R\vstringValue\x12D\n" + - "\x0ftimestamp_value\x18\b \x01(\v2\x19.schema_pb.TimestampValueH\x00R\x0etimestampValue\x125\n" + - "\n" + - "date_value\x18\t \x01(\v2\x14.schema_pb.DateValueH\x00R\tdateValue\x12>\n" + - "\rdecimal_value\x18\n" + - " \x01(\v2\x17.schema_pb.DecimalValueH\x00R\fdecimalValue\x125\n" + - "\n" + - "time_value\x18\v \x01(\v2\x14.schema_pb.TimeValueH\x00R\ttimeValue\x125\n" + - "\n" + - "list_value\x18\x0e \x01(\v2\x14.schema_pb.ListValueH\x00R\tlistValue\x12;\n" + - "\frecord_value\x18\x0f \x01(\v2\x16.schema_pb.RecordValueH\x00R\vrecordValueB\x06\n" + - "\x04kind\"R\n" + - "\x0eTimestampValue\x12)\n" + - "\x10timestamp_micros\x18\x01 \x01(\x03R\x0ftimestampMicros\x12\x15\n" + - "\x06is_utc\x18\x02 \x01(\bR\x05isUtc\"5\n" + - "\tDateValue\x12(\n" + - "\x10days_since_epoch\x18\x01 \x01(\x05R\x0edaysSinceEpoch\"X\n" + - "\fDecimalValue\x12\x14\n" + - "\x05value\x18\x01 \x01(\fR\x05value\x12\x1c\n" + - "\tprecision\x18\x02 \x01(\x05R\tprecision\x12\x14\n" + - "\x05scale\x18\x03 \x01(\x05R\x05scale\",\n" + - "\tTimeValue\x12\x1f\n" + - "\vtime_micros\x18\x01 \x01(\x03R\n" + - "timeMicros\"5\n" + - "\tListValue\x12(\n" + - "\x06values\x18\x01 \x03(\v2\x10.schema_pb.ValueR\x06values*\x9e\x01\n" + - "\n" + - "OffsetType\x12\x16\n" + - "\x12RESUME_OR_EARLIEST\x10\x00\x12\x15\n" + - "\x11RESET_TO_EARLIEST\x10\x05\x12\x0f\n" + - "\vEXACT_TS_NS\x10\n" + - "\x12\x13\n" + - "\x0fRESET_TO_LATEST\x10\x0f\x12\x14\n" + - "\x10RESUME_OR_LATEST\x10\x14\x12\x10\n" + - "\fEXACT_OFFSET\x10\x19\x12\x13\n" + - "\x0fRESET_TO_OFFSET\x10\x1e*\x8a\x01\n" + - "\n" + - "ScalarType\x12\b\n" + - "\x04BOOL\x10\x00\x12\t\n" + - "\x05INT32\x10\x01\x12\t\n" + - "\x05INT64\x10\x03\x12\t\n" + - "\x05FLOAT\x10\x04\x12\n" + - "\n" + - "\x06DOUBLE\x10\x05\x12\t\n" + - "\x05BYTES\x10\x06\x12\n" + - "\n" + - "\x06STRING\x10\a\x12\r\n" + - "\tTIMESTAMP\x10\b\x12\b\n" + - "\x04DATE\x10\t\x12\v\n" + - "\aDECIMAL\x10\n" + - "\x12\b\n" + - "\x04TIME\x10\vB2Z0github.com/seaweedfs/seaweedfs/weed/pb/schema_pbb\x06proto3" - -var ( - file_mq_schema_proto_rawDescOnce sync.Once - file_mq_schema_proto_rawDescData []byte -) - -func file_mq_schema_proto_rawDescGZIP() []byte { - file_mq_schema_proto_rawDescOnce.Do(func() { - file_mq_schema_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mq_schema_proto_rawDesc), len(file_mq_schema_proto_rawDesc))) - }) - return file_mq_schema_proto_rawDescData -} - -var file_mq_schema_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_mq_schema_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_mq_schema_proto_goTypes = []any{ - (OffsetType)(0), // 0: schema_pb.OffsetType - (ScalarType)(0), // 1: schema_pb.ScalarType - (*Topic)(nil), // 2: schema_pb.Topic - (*Partition)(nil), // 3: schema_pb.Partition - (*Offset)(nil), // 4: schema_pb.Offset - (*PartitionOffset)(nil), // 5: schema_pb.PartitionOffset - (*RecordType)(nil), // 6: schema_pb.RecordType - (*Field)(nil), // 7: schema_pb.Field - (*Type)(nil), // 8: schema_pb.Type - (*ListType)(nil), // 9: schema_pb.ListType - (*RecordValue)(nil), // 10: schema_pb.RecordValue - (*Value)(nil), // 11: schema_pb.Value - (*TimestampValue)(nil), // 12: schema_pb.TimestampValue - (*DateValue)(nil), // 13: schema_pb.DateValue - (*DecimalValue)(nil), // 14: schema_pb.DecimalValue - (*TimeValue)(nil), // 15: schema_pb.TimeValue - (*ListValue)(nil), // 16: schema_pb.ListValue - nil, // 17: schema_pb.RecordValue.FieldsEntry -} -var file_mq_schema_proto_depIdxs = []int32{ - 2, // 0: schema_pb.Offset.topic:type_name -> schema_pb.Topic - 5, // 1: schema_pb.Offset.partition_offsets:type_name -> schema_pb.PartitionOffset - 3, // 2: schema_pb.PartitionOffset.partition:type_name -> schema_pb.Partition - 7, // 3: schema_pb.RecordType.fields:type_name -> schema_pb.Field - 8, // 4: schema_pb.Field.type:type_name -> schema_pb.Type - 1, // 5: schema_pb.Type.scalar_type:type_name -> schema_pb.ScalarType - 6, // 6: schema_pb.Type.record_type:type_name -> schema_pb.RecordType - 9, // 7: schema_pb.Type.list_type:type_name -> schema_pb.ListType - 8, // 8: schema_pb.ListType.element_type:type_name -> schema_pb.Type - 17, // 9: schema_pb.RecordValue.fields:type_name -> schema_pb.RecordValue.FieldsEntry - 12, // 10: schema_pb.Value.timestamp_value:type_name -> schema_pb.TimestampValue - 13, // 11: schema_pb.Value.date_value:type_name -> schema_pb.DateValue - 14, // 12: schema_pb.Value.decimal_value:type_name -> schema_pb.DecimalValue - 15, // 13: schema_pb.Value.time_value:type_name -> schema_pb.TimeValue - 16, // 14: schema_pb.Value.list_value:type_name -> schema_pb.ListValue - 10, // 15: schema_pb.Value.record_value:type_name -> schema_pb.RecordValue - 11, // 16: schema_pb.ListValue.values:type_name -> schema_pb.Value - 11, // 17: schema_pb.RecordValue.FieldsEntry.value:type_name -> schema_pb.Value - 18, // [18:18] is the sub-list for method output_type - 18, // [18:18] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name -} - -func init() { file_mq_schema_proto_init() } -func file_mq_schema_proto_init() { - if File_mq_schema_proto != nil { - return - } - file_mq_schema_proto_msgTypes[6].OneofWrappers = []any{ - (*Type_ScalarType)(nil), - (*Type_RecordType)(nil), - (*Type_ListType)(nil), - } - file_mq_schema_proto_msgTypes[9].OneofWrappers = []any{ - (*Value_BoolValue)(nil), - (*Value_Int32Value)(nil), - (*Value_Int64Value)(nil), - (*Value_FloatValue)(nil), - (*Value_DoubleValue)(nil), - (*Value_BytesValue)(nil), - (*Value_StringValue)(nil), - (*Value_TimestampValue)(nil), - (*Value_DateValue)(nil), - (*Value_DecimalValue)(nil), - (*Value_TimeValue)(nil), - (*Value_ListValue)(nil), - (*Value_RecordValue)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_mq_schema_proto_rawDesc), len(file_mq_schema_proto_rawDesc)), - NumEnums: 2, - NumMessages: 16, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_mq_schema_proto_goTypes, - DependencyIndexes: file_mq_schema_proto_depIdxs, - EnumInfos: file_mq_schema_proto_enumTypes, - MessageInfos: file_mq_schema_proto_msgTypes, - }.Build() - File_mq_schema_proto = out.File - file_mq_schema_proto_goTypes = nil - file_mq_schema_proto_depIdxs = nil -} diff --git a/weed/pb/schema_pb/offset_test.go b/weed/pb/schema_pb/offset_test.go deleted file mode 100644 index 28324836e..000000000 --- a/weed/pb/schema_pb/offset_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package schema_pb - -import ( - "testing" - "google.golang.org/protobuf/proto" -) - -func TestOffsetTypeEnums(t *testing.T) { - // Test that new offset-based enum values are defined - tests := []struct { - name string - value OffsetType - expected int32 - }{ - {"EXACT_OFFSET", OffsetType_EXACT_OFFSET, 25}, - {"RESET_TO_OFFSET", OffsetType_RESET_TO_OFFSET, 30}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if int32(tt.value) != tt.expected { - t.Errorf("OffsetType_%s = %d, want %d", tt.name, int32(tt.value), tt.expected) - } - }) - } -} - -func TestPartitionOffsetSerialization(t *testing.T) { - // Test that PartitionOffset can serialize/deserialize with new offset field - original := &PartitionOffset{ - Partition: &Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: 1234567890, - }, - StartTsNs: 1234567890, - StartOffset: 42, // New field - } - - // Test proto marshaling/unmarshaling - data, err := proto.Marshal(original) - if err != nil { - t.Fatalf("Failed to marshal PartitionOffset: %v", err) - } - - restored := &PartitionOffset{} - err = proto.Unmarshal(data, restored) - if err != nil { - t.Fatalf("Failed to unmarshal PartitionOffset: %v", err) - } - - // Verify all fields are preserved - if restored.StartTsNs != original.StartTsNs { - t.Errorf("StartTsNs = %d, want %d", restored.StartTsNs, original.StartTsNs) - } - if restored.StartOffset != original.StartOffset { - t.Errorf("StartOffset = %d, want %d", restored.StartOffset, original.StartOffset) - } - if restored.Partition.RingSize != original.Partition.RingSize { - t.Errorf("Partition.RingSize = %d, want %d", restored.Partition.RingSize, original.Partition.RingSize) - } -} - -func TestPartitionOffsetBackwardCompatibility(t *testing.T) { - // Test that PartitionOffset without StartOffset still works - original := &PartitionOffset{ - Partition: &Partition{ - RingSize: 1024, - RangeStart: 0, - RangeStop: 31, - UnixTimeNs: 1234567890, - }, - StartTsNs: 1234567890, - // StartOffset not set (defaults to 0) - } - - data, err := proto.Marshal(original) - if err != nil { - t.Fatalf("Failed to marshal PartitionOffset: %v", err) - } - - restored := &PartitionOffset{} - err = proto.Unmarshal(data, restored) - if err != nil { - t.Fatalf("Failed to unmarshal PartitionOffset: %v", err) - } - - // StartOffset should default to 0 - if restored.StartOffset != 0 { - t.Errorf("StartOffset = %d, want 0", restored.StartOffset) - } -} diff --git a/weed/pb/server_address.go b/weed/pb/server_address.go index a0aa79ae4..a66d0d831 100644 --- a/weed/pb/server_address.go +++ b/weed/pb/server_address.go @@ -2,8 +2,8 @@ package pb import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" "net" "strconv" "strings" @@ -11,7 +11,6 @@ import ( type ServerAddress string type ServerAddresses string -type ServerSrvAddress string func NewServerAddress(host string, port int, grpcPort int) ServerAddress { if grpcPort == 0 || grpcPort == port+10000 { @@ -77,42 +76,6 @@ func (sa ServerAddress) ToGrpcAddress() string { return ServerToGrpcAddress(string(sa)) } -// LookUp may return an error for some records along with successful lookups - make sure you do not -// discard `addresses` even if `err == nil` -func (r ServerSrvAddress) LookUp() (addresses []ServerAddress, err error) { - _, records, lookupErr := net.LookupSRV("", "", string(r)) - if lookupErr != nil { - err = fmt.Errorf("lookup SRV address %s: %v", r, lookupErr) - } - for _, srv := range records { - address := fmt.Sprintf("%s:%d", srv.Target, srv.Port) - addresses = append(addresses, ServerAddress(address)) - } - return -} - -// ToServiceDiscovery expects one of: a comma-separated list of ip:port, like -// -// 10.0.0.1:9999,10.0.0.2:24:9999 -// -// OR an SRV Record prepended with 'dnssrv+', like: -// -// dnssrv+_grpc._tcp.master.consul -// dnssrv+_grpc._tcp.headless.default.svc.cluster.local -// dnssrv+seaweed-master.master.consul -func (sa ServerAddresses) ToServiceDiscovery() (sd *ServerDiscovery) { - sd = &ServerDiscovery{} - prefix := "dnssrv+" - if strings.HasPrefix(string(sa), prefix) { - trimmed := strings.TrimPrefix(string(sa), prefix) - srv := ServerSrvAddress(trimmed) - sd.srvRecord = &srv - } else { - sd.list = sa.ToAddresses() - } - return -} - func (sa ServerAddresses) ToAddresses() (addresses []ServerAddress) { parts := strings.Split(string(sa), ",") for _, address := range parts { diff --git a/weed/pb/server_address_test.go b/weed/pb/server_address_test.go deleted file mode 100644 index f5a12427a..000000000 --- a/weed/pb/server_address_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package pb - -import ( - "reflect" - "testing" -) - -func TestServerAddresses_ToAddressMapOrSrv_shouldRemovePrefix(t *testing.T) { - str := ServerAddresses("dnssrv+hello.srv.consul") - - d := str.ToServiceDiscovery() - - expected := ServerSrvAddress("hello.srv.consul") - if *d.srvRecord != expected { - t.Fatalf(`ServerAddresses("dnssrv+hello.srv.consul") = %s, expected %s`, *d.srvRecord, expected) - } -} - -func TestServerAddresses_ToAddressMapOrSrv_shouldHandleIPPortList(t *testing.T) { - str := ServerAddresses("10.0.0.1:23,10.0.0.2:24") - - d := str.ToServiceDiscovery() - - if d.srvRecord != nil { - t.Fatalf(`ServerAddresses("dnssrv+hello.srv.consul") = %s, expected nil`, *d.srvRecord) - } - - expected := []ServerAddress{ - ServerAddress("10.0.0.1:23"), - ServerAddress("10.0.0.2:24"), - } - - if !reflect.DeepEqual(d.list, expected) { - t.Fatalf(`Expected %q, got %q`, expected, d.list) - } -} diff --git a/weed/pb/server_discovery.go b/weed/pb/server_discovery.go deleted file mode 100644 index 25c0360c5..000000000 --- a/weed/pb/server_discovery.go +++ /dev/null @@ -1,62 +0,0 @@ -package pb - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "reflect" -) - -// ServerDiscovery encodes a way to find at least 1 instance of a service, -// and provides utility functions to refresh the instance list -type ServerDiscovery struct { - list []ServerAddress - srvRecord *ServerSrvAddress -} - -func NewServiceDiscoveryFromMap(m map[string]ServerAddress) (sd *ServerDiscovery) { - sd = &ServerDiscovery{} - for _, s := range m { - sd.list = append(sd.list, s) - } - return sd -} - -// RefreshBySrvIfAvailable performs a DNS SRV lookup and updates list with the results -// of the lookup -func (sd *ServerDiscovery) RefreshBySrvIfAvailable() { - if sd.srvRecord == nil { - return - } - newList, err := sd.srvRecord.LookUp() - if err != nil { - glog.V(0).Infof("failed to lookup SRV for %s: %v", *sd.srvRecord, err) - } - if newList == nil || len(newList) == 0 { - glog.V(0).Infof("looked up SRV for %s, but found no well-formed names", *sd.srvRecord) - return - } - if !reflect.DeepEqual(sd.list, newList) { - sd.list = newList - } -} - -// GetInstances returns a copy of the latest known list of addresses -// call RefreshBySrvIfAvailable prior to this in order to get a more up-to-date view -func (sd *ServerDiscovery) GetInstances() (addresses []ServerAddress) { - for _, a := range sd.list { - addresses = append(addresses, a) - } - return addresses -} -func (sd *ServerDiscovery) GetInstancesAsStrings() (addresses []string) { - for _, i := range sd.list { - addresses = append(addresses, string(i)) - } - return addresses -} -func (sd *ServerDiscovery) GetInstancesAsMap() (addresses map[string]ServerAddress) { - addresses = make(map[string]ServerAddress) - for _, i := range sd.list { - addresses[string(i)] = i - } - return addresses -} diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index fcdad30ff..6b5fbe2f9 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -1,7 +1,7 @@ syntax = "proto3"; package volume_server_pb; -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"; import "remote.proto"; @@ -53,13 +53,9 @@ service VolumeServer { } rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { } - rpc ReceiveFile (stream ReceiveFileRequest) returns (ReceiveFileResponse) { - } rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) { } - rpc ReadNeedleMeta (ReadNeedleMetaRequest) returns (ReadNeedleMetaResponse) { - } rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) { } rpc ReadAllNeedles (ReadAllNeedlesRequest) returns (stream ReadAllNeedlesResponse) { @@ -89,8 +85,6 @@ service VolumeServer { } rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) { } - rpc VolumeEcShardsInfo (VolumeEcShardsInfoRequest) returns (VolumeEcShardsInfoResponse) { - } // tiered storage rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) { @@ -153,7 +147,6 @@ message VacuumVolumeCompactRequest { } message VacuumVolumeCompactResponse { int64 processed_bytes = 1; - float load_avg_1m = 2; } message VacuumVolumeCommitRequest { @@ -161,7 +154,6 @@ message VacuumVolumeCommitRequest { } message VacuumVolumeCommitResponse { bool is_read_only = 1; - uint64 volume_size = 2; } message VacuumVolumeCleanupRequest { @@ -184,7 +176,6 @@ message AllocateVolumeRequest { string ttl = 5; uint32 memory_map_max_size_mb = 6; string disk_type = 7; - uint32 version = 8; } message AllocateVolumeResponse { } @@ -200,7 +191,6 @@ message VolumeSyncStatusResponse { uint64 tail_offset = 6; uint32 compact_revision = 7; uint64 idx_file_size = 8; - uint32 version = 9; } message VolumeIncrementalCopyRequest { @@ -225,14 +215,12 @@ message VolumeUnmountResponse { message VolumeDeleteRequest { uint32 volume_id = 1; - bool only_empty = 2; } message VolumeDeleteResponse { } message VolumeMarkReadonlyRequest { uint32 volume_id = 1; - bool persist = 2; } message VolumeMarkReadonlyResponse { } @@ -256,9 +244,6 @@ message VolumeStatusRequest { } message VolumeStatusResponse { bool is_read_only = 1; - uint64 volume_size = 2; - uint64 file_count = 3; - uint64 file_deleted_count = 4; } message VolumeCopyRequest { @@ -268,7 +253,6 @@ message VolumeCopyRequest { string ttl = 4; string source_data_node = 5; string disk_type = 6; - int64 io_byte_per_second = 7; } message VolumeCopyResponse { uint64 last_append_at_ns = 1; @@ -289,48 +273,14 @@ message CopyFileResponse { int64 modified_ts_ns = 2; } -message ReceiveFileRequest { - oneof data { - ReceiveFileInfo info = 1; - bytes file_content = 2; - } -} - -message ReceiveFileInfo { - uint32 volume_id = 1; - string ext = 2; - string collection = 3; - bool is_ec_volume = 4; - uint32 shard_id = 5; - uint64 file_size = 6; -} - -message ReceiveFileResponse { - uint64 bytes_written = 1; - string error = 2; -} - message ReadNeedleBlobRequest { - uint32 volume_id = 1; - int64 offset = 3; // actual offset - int32 size = 4; -} -message ReadNeedleBlobResponse { - bytes needle_blob = 1; -} - -message ReadNeedleMetaRequest { uint32 volume_id = 1; uint64 needle_id = 2; int64 offset = 3; // actual offset int32 size = 4; } -message ReadNeedleMetaResponse { - uint32 cookie = 1; - uint64 last_modified = 2; - uint32 crc = 3; - string ttl = 4; - uint64 append_at_ns = 5; +message ReadNeedleBlobResponse { + bytes needle_blob = 1; } message WriteNeedleBlobRequest { @@ -350,11 +300,6 @@ message ReadAllNeedlesResponse { uint64 needle_id = 2; uint32 cookie = 3; bytes needle_blob = 5; - bool needle_blob_compressed = 6; - uint64 last_modified = 7; - uint32 crc = 8; - bytes name = 9; - bytes mime = 10; } message VolumeTailSenderRequest { @@ -366,7 +311,6 @@ message VolumeTailSenderResponse { bytes needle_header = 1; bytes needle_body = 2; bool is_last_chunk = 3; - uint32 version = 4; } message VolumeTailReceiverRequest { @@ -401,7 +345,6 @@ message VolumeEcShardsCopyRequest { string source_data_node = 5; bool copy_ecj_file = 6; bool copy_vif_file = 7; - uint32 disk_id = 8; // Target disk ID for storing EC shards } message VolumeEcShardsCopyResponse { } @@ -457,19 +400,6 @@ message VolumeEcShardsToVolumeRequest { message VolumeEcShardsToVolumeResponse { } -message VolumeEcShardsInfoRequest { - uint32 volume_id = 1; -} -message VolumeEcShardsInfoResponse { - repeated EcShardInfo ec_shard_infos = 1; -} - -message EcShardInfo { - uint32 shard_id = 1; - int64 size = 2; - string collection = 3; -} - message ReadVolumeFileStatusRequest { uint32 volume_id = 1; } @@ -483,8 +413,6 @@ message ReadVolumeFileStatusResponse { uint32 compaction_revision = 7; string collection = 8; string disk_type = 9; - VolumeInfo volume_info = 10; - uint32 version = 11; } message DiskStatus { @@ -521,19 +449,6 @@ message VolumeInfo { repeated RemoteFile files = 1; uint32 version = 2; string replication = 3; - uint32 bytes_offset = 4; - int64 dat_file_size = 5; // store the original dat file size - uint64 expire_at_sec = 6; // expiration time of ec volume - bool read_only = 7; -} -message OldVersionVolumeInfo { - repeated RemoteFile files = 1; - uint32 version = 2; - string replication = 3; - uint32 BytesOffset = 4; - int64 dat_file_size = 5; // store the original dat file size - uint64 DestroyTime = 6; // expiration time of ec volume - bool read_only = 7; } // tiered storage @@ -593,7 +508,6 @@ message FetchAndWriteNeedleRequest { remote_pb.RemoteStorageLocation remote_location = 16; } message FetchAndWriteNeedleResponse { - string e_tag = 1; } // select on volume servers @@ -614,7 +528,7 @@ message QueryRequest { string file_header_info = 1; // Valid values: NONE | USE | IGNORE string record_delimiter = 2; // Default: \n string field_delimiter = 3; // Default: , - string quote_character = 4; // Default: " + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " string comments = 6; // Default: # // If true, records might contain record delimiters within quote characters @@ -637,7 +551,7 @@ message QueryRequest { string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED string record_delimiter = 2; // Default: \n string field_delimiter = 3; // Default: , - string quote_character = 4; // Default: " + string quote_charactoer = 4; // Default: " string quote_escape_character = 5; // Default: " } message JSONOutput { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 503db63ef..4e4690f13 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,18 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: volume_server.proto package volume_server_pb import ( - remote_pb "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" + remote_pb "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -23,18 +22,21 @@ const ( ) type BatchDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` - SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` + SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` } func (x *BatchDeleteRequest) Reset() { *x = BatchDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BatchDeleteRequest) String() string { @@ -45,7 +47,7 @@ func (*BatchDeleteRequest) ProtoMessage() {} func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -75,17 +77,20 @@ func (x *BatchDeleteRequest) GetSkipCookieCheck() bool { } type BatchDeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } func (x *BatchDeleteResponse) Reset() { *x = BatchDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BatchDeleteResponse) String() string { @@ -96,7 +101,7 @@ func (*BatchDeleteResponse) ProtoMessage() {} func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -119,21 +124,24 @@ func (x *BatchDeleteResponse) GetResults() []*DeleteResult { } type DeleteResult struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` - Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` } func (x *DeleteResult) Reset() { *x = DeleteResult{} - mi := &file_volume_server_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteResult) String() string { @@ -144,7 +152,7 @@ func (*DeleteResult) ProtoMessage() {} func (x *DeleteResult) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -195,16 +203,18 @@ func (x *DeleteResult) GetVersion() uint32 { } type Empty struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Empty) Reset() { *x = Empty{} - mi := &file_volume_server_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Empty) String() string { @@ -215,7 +225,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -231,17 +241,20 @@ func (*Empty) Descriptor() ([]byte, []int) { } type VacuumVolumeCheckRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCheckRequest) Reset() { *x = VacuumVolumeCheckRequest{} - mi := &file_volume_server_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCheckRequest) String() string { @@ -252,7 +265,7 @@ func (*VacuumVolumeCheckRequest) ProtoMessage() {} func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -275,17 +288,20 @@ func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { } type VacuumVolumeCheckResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` } func (x *VacuumVolumeCheckResponse) Reset() { *x = VacuumVolumeCheckResponse{} - mi := &file_volume_server_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCheckResponse) String() string { @@ -296,7 +312,7 @@ func (*VacuumVolumeCheckResponse) ProtoMessage() {} func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -319,18 +335,21 @@ func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { } type VacuumVolumeCompactRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` } func (x *VacuumVolumeCompactRequest) Reset() { *x = VacuumVolumeCompactRequest{} - mi := &file_volume_server_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCompactRequest) String() string { @@ -341,7 +360,7 @@ func (*VacuumVolumeCompactRequest) ProtoMessage() {} func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -371,18 +390,20 @@ func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 { } type VacuumVolumeCompactResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProcessedBytes int64 `protobuf:"varint,1,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` - LoadAvg_1M float32 `protobuf:"fixed32,2,opt,name=load_avg_1m,json=loadAvg1m,proto3" json:"load_avg_1m,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProcessedBytes int64 `protobuf:"varint,1,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` } func (x *VacuumVolumeCompactResponse) Reset() { *x = VacuumVolumeCompactResponse{} - mi := &file_volume_server_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCompactResponse) String() string { @@ -393,7 +414,7 @@ func (*VacuumVolumeCompactResponse) ProtoMessage() {} func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -415,25 +436,21 @@ func (x *VacuumVolumeCompactResponse) GetProcessedBytes() int64 { return 0 } -func (x *VacuumVolumeCompactResponse) GetLoadAvg_1M() float32 { - if x != nil { - return x.LoadAvg_1M - } - return 0 -} - type VacuumVolumeCommitRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCommitRequest) Reset() { *x = VacuumVolumeCommitRequest{} - mi := &file_volume_server_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCommitRequest) String() string { @@ -444,7 +461,7 @@ func (*VacuumVolumeCommitRequest) ProtoMessage() {} func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -467,18 +484,20 @@ func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { } type VacuumVolumeCommitResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` - VolumeSize uint64 `protobuf:"varint,2,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } func (x *VacuumVolumeCommitResponse) Reset() { *x = VacuumVolumeCommitResponse{} - mi := &file_volume_server_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCommitResponse) String() string { @@ -489,7 +508,7 @@ func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -511,25 +530,21 @@ func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { return false } -func (x *VacuumVolumeCommitResponse) GetVolumeSize() uint64 { - if x != nil { - return x.VolumeSize - } - return 0 -} - type VacuumVolumeCleanupRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VacuumVolumeCleanupRequest) Reset() { *x = VacuumVolumeCleanupRequest{} - mi := &file_volume_server_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCleanupRequest) String() string { @@ -540,7 +555,7 @@ func (*VacuumVolumeCleanupRequest) ProtoMessage() {} func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -563,16 +578,18 @@ func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { } type VacuumVolumeCleanupResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VacuumVolumeCleanupResponse) Reset() { *x = VacuumVolumeCleanupResponse{} - mi := &file_volume_server_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VacuumVolumeCleanupResponse) String() string { @@ -583,7 +600,7 @@ func (*VacuumVolumeCleanupResponse) ProtoMessage() {} func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -599,17 +616,20 @@ func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { } type DeleteCollectionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *DeleteCollectionRequest) Reset() { *x = DeleteCollectionRequest{} - mi := &file_volume_server_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteCollectionRequest) String() string { @@ -620,7 +640,7 @@ func (*DeleteCollectionRequest) ProtoMessage() {} func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -643,16 +663,18 @@ func (x *DeleteCollectionRequest) GetCollection() string { } type DeleteCollectionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *DeleteCollectionResponse) Reset() { *x = DeleteCollectionResponse{} - mi := &file_volume_server_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DeleteCollectionResponse) String() string { @@ -663,7 +685,7 @@ func (*DeleteCollectionResponse) ProtoMessage() {} func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[13] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -679,24 +701,26 @@ func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { } type AllocateVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` - DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - Version uint32 `protobuf:"varint,8,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *AllocateVolumeRequest) Reset() { *x = AllocateVolumeRequest{} - mi := &file_volume_server_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AllocateVolumeRequest) String() string { @@ -707,7 +731,7 @@ func (*AllocateVolumeRequest) ProtoMessage() {} func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[14] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -771,24 +795,19 @@ func (x *AllocateVolumeRequest) GetDiskType() string { return "" } -func (x *AllocateVolumeRequest) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - type AllocateVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *AllocateVolumeResponse) Reset() { *x = AllocateVolumeResponse{} - mi := &file_volume_server_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AllocateVolumeResponse) String() string { @@ -799,7 +818,7 @@ func (*AllocateVolumeResponse) ProtoMessage() {} func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[15] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -815,17 +834,20 @@ func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { } type VolumeSyncStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeSyncStatusRequest) Reset() { *x = VolumeSyncStatusRequest{} - mi := &file_volume_server_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeSyncStatusRequest) String() string { @@ -836,7 +858,7 @@ func (*VolumeSyncStatusRequest) ProtoMessage() {} func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[16] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -859,24 +881,26 @@ func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { } type VolumeSyncStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` - TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` - CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` + CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` } func (x *VolumeSyncStatusResponse) Reset() { *x = VolumeSyncStatusResponse{} - mi := &file_volume_server_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeSyncStatusResponse) String() string { @@ -887,7 +911,7 @@ func (*VolumeSyncStatusResponse) ProtoMessage() {} func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[17] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -951,26 +975,22 @@ func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { return 0 } -func (x *VolumeSyncStatusResponse) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - type VolumeIncrementalCopyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` } func (x *VolumeIncrementalCopyRequest) Reset() { *x = VolumeIncrementalCopyRequest{} - mi := &file_volume_server_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeIncrementalCopyRequest) String() string { @@ -981,7 +1001,7 @@ func (*VolumeIncrementalCopyRequest) ProtoMessage() {} func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[18] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1011,17 +1031,20 @@ func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { } type VolumeIncrementalCopyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } func (x *VolumeIncrementalCopyResponse) Reset() { *x = VolumeIncrementalCopyResponse{} - mi := &file_volume_server_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeIncrementalCopyResponse) String() string { @@ -1032,7 +1055,7 @@ func (*VolumeIncrementalCopyResponse) ProtoMessage() {} func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[19] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1055,17 +1078,20 @@ func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { } type VolumeMountRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMountRequest) Reset() { *x = VolumeMountRequest{} - mi := &file_volume_server_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMountRequest) String() string { @@ -1076,7 +1102,7 @@ func (*VolumeMountRequest) ProtoMessage() {} func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[20] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1099,16 +1125,18 @@ func (x *VolumeMountRequest) GetVolumeId() uint32 { } type VolumeMountResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeMountResponse) Reset() { *x = VolumeMountResponse{} - mi := &file_volume_server_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMountResponse) String() string { @@ -1119,7 +1147,7 @@ func (*VolumeMountResponse) ProtoMessage() {} func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[21] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1135,17 +1163,20 @@ func (*VolumeMountResponse) Descriptor() ([]byte, []int) { } type VolumeUnmountRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeUnmountRequest) Reset() { *x = VolumeUnmountRequest{} - mi := &file_volume_server_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeUnmountRequest) String() string { @@ -1156,7 +1187,7 @@ func (*VolumeUnmountRequest) ProtoMessage() {} func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[22] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1179,16 +1210,18 @@ func (x *VolumeUnmountRequest) GetVolumeId() uint32 { } type VolumeUnmountResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeUnmountResponse) Reset() { *x = VolumeUnmountResponse{} - mi := &file_volume_server_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeUnmountResponse) String() string { @@ -1199,7 +1232,7 @@ func (*VolumeUnmountResponse) ProtoMessage() {} func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[23] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1215,18 +1248,20 @@ func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { } type VolumeDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - OnlyEmpty bool `protobuf:"varint,2,opt,name=only_empty,json=onlyEmpty,proto3" json:"only_empty,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeDeleteRequest) Reset() { *x = VolumeDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeDeleteRequest) String() string { @@ -1237,7 +1272,7 @@ func (*VolumeDeleteRequest) ProtoMessage() {} func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[24] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1259,24 +1294,19 @@ func (x *VolumeDeleteRequest) GetVolumeId() uint32 { return 0 } -func (x *VolumeDeleteRequest) GetOnlyEmpty() bool { - if x != nil { - return x.OnlyEmpty - } - return false -} - type VolumeDeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeDeleteResponse) Reset() { *x = VolumeDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeDeleteResponse) String() string { @@ -1287,7 +1317,7 @@ func (*VolumeDeleteResponse) ProtoMessage() {} func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[25] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1303,18 +1333,20 @@ func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { } type VolumeMarkReadonlyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Persist bool `protobuf:"varint,2,opt,name=persist,proto3" json:"persist,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMarkReadonlyRequest) Reset() { *x = VolumeMarkReadonlyRequest{} - mi := &file_volume_server_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMarkReadonlyRequest) String() string { @@ -1325,7 +1357,7 @@ func (*VolumeMarkReadonlyRequest) ProtoMessage() {} func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[26] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1347,24 +1379,19 @@ func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { return 0 } -func (x *VolumeMarkReadonlyRequest) GetPersist() bool { - if x != nil { - return x.Persist - } - return false -} - type VolumeMarkReadonlyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeMarkReadonlyResponse) Reset() { *x = VolumeMarkReadonlyResponse{} - mi := &file_volume_server_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMarkReadonlyResponse) String() string { @@ -1375,7 +1402,7 @@ func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[27] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1391,17 +1418,20 @@ func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { } type VolumeMarkWritableRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeMarkWritableRequest) Reset() { *x = VolumeMarkWritableRequest{} - mi := &file_volume_server_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMarkWritableRequest) String() string { @@ -1412,7 +1442,7 @@ func (*VolumeMarkWritableRequest) ProtoMessage() {} func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[28] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1435,16 +1465,18 @@ func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 { } type VolumeMarkWritableResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeMarkWritableResponse) Reset() { *x = VolumeMarkWritableResponse{} - mi := &file_volume_server_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeMarkWritableResponse) String() string { @@ -1455,7 +1487,7 @@ func (*VolumeMarkWritableResponse) ProtoMessage() {} func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[29] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1471,18 +1503,21 @@ func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) { } type VolumeConfigureRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` } func (x *VolumeConfigureRequest) Reset() { *x = VolumeConfigureRequest{} - mi := &file_volume_server_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeConfigureRequest) String() string { @@ -1493,7 +1528,7 @@ func (*VolumeConfigureRequest) ProtoMessage() {} func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[30] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1523,17 +1558,20 @@ func (x *VolumeConfigureRequest) GetReplication() string { } type VolumeConfigureResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (x *VolumeConfigureResponse) Reset() { *x = VolumeConfigureResponse{} - mi := &file_volume_server_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeConfigureResponse) String() string { @@ -1544,7 +1582,7 @@ func (*VolumeConfigureResponse) ProtoMessage() {} func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[31] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1567,17 +1605,20 @@ func (x *VolumeConfigureResponse) GetError() string { } type VolumeStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *VolumeStatusRequest) Reset() { *x = VolumeStatusRequest{} - mi := &file_volume_server_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeStatusRequest) String() string { @@ -1588,7 +1629,7 @@ func (*VolumeStatusRequest) ProtoMessage() {} func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[32] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1611,20 +1652,20 @@ func (x *VolumeStatusRequest) GetVolumeId() uint32 { } type VolumeStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` - VolumeSize uint64 `protobuf:"varint,2,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` - FileCount uint64 `protobuf:"varint,3,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - FileDeletedCount uint64 `protobuf:"varint,4,opt,name=file_deleted_count,json=fileDeletedCount,proto3" json:"file_deleted_count,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } func (x *VolumeStatusResponse) Reset() { *x = VolumeStatusResponse{} - mi := &file_volume_server_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeStatusResponse) String() string { @@ -1635,7 +1676,7 @@ func (*VolumeStatusResponse) ProtoMessage() {} func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[33] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1657,45 +1698,26 @@ func (x *VolumeStatusResponse) GetIsReadOnly() bool { return false } -func (x *VolumeStatusResponse) GetVolumeSize() uint64 { - if x != nil { - return x.VolumeSize - } - return 0 -} - -func (x *VolumeStatusResponse) GetFileCount() uint64 { - if x != nil { - return x.FileCount - } - return 0 -} - -func (x *VolumeStatusResponse) GetFileDeletedCount() uint64 { - if x != nil { - return x.FileDeletedCount - } - return 0 -} - type VolumeCopyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` - DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - IoBytePerSecond int64 `protobuf:"varint,7,opt,name=io_byte_per_second,json=ioBytePerSecond,proto3" json:"io_byte_per_second,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *VolumeCopyRequest) Reset() { *x = VolumeCopyRequest{} - mi := &file_volume_server_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeCopyRequest) String() string { @@ -1706,7 +1728,7 @@ func (*VolumeCopyRequest) ProtoMessage() {} func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[34] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1763,26 +1785,22 @@ func (x *VolumeCopyRequest) GetDiskType() string { return "" } -func (x *VolumeCopyRequest) GetIoBytePerSecond() int64 { - if x != nil { - return x.IoBytePerSecond - } - return 0 -} - type VolumeCopyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` - ProcessedBytes int64 `protobuf:"varint,2,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` + ProcessedBytes int64 `protobuf:"varint,2,opt,name=processed_bytes,json=processedBytes,proto3" json:"processed_bytes,omitempty"` } func (x *VolumeCopyResponse) Reset() { *x = VolumeCopyResponse{} - mi := &file_volume_server_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeCopyResponse) String() string { @@ -1793,7 +1811,7 @@ func (*VolumeCopyResponse) ProtoMessage() {} func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[35] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1823,23 +1841,26 @@ func (x *VolumeCopyResponse) GetProcessedBytes() int64 { } type CopyFileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` - CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` - StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` - IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` } func (x *CopyFileRequest) Reset() { *x = CopyFileRequest{} - mi := &file_volume_server_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CopyFileRequest) String() string { @@ -1850,7 +1871,7 @@ func (*CopyFileRequest) ProtoMessage() {} func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[36] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1915,18 +1936,21 @@ func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { } type CopyFileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` - ModifiedTsNs int64 `protobuf:"varint,2,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` + ModifiedTsNs int64 `protobuf:"varint,2,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"` } func (x *CopyFileResponse) Reset() { *x = CopyFileResponse{} - mi := &file_volume_server_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CopyFileResponse) String() string { @@ -1937,7 +1961,7 @@ func (*CopyFileResponse) ProtoMessage() {} func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { mi := &file_volume_server_proto_msgTypes[37] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1966,238 +1990,24 @@ func (x *CopyFileResponse) GetModifiedTsNs() int64 { return 0 } -type ReceiveFileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Data: - // - // *ReceiveFileRequest_Info - // *ReceiveFileRequest_FileContent - Data isReceiveFileRequest_Data `protobuf_oneof:"data"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReceiveFileRequest) Reset() { - *x = ReceiveFileRequest{} - mi := &file_volume_server_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReceiveFileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReceiveFileRequest) ProtoMessage() {} - -func (x *ReceiveFileRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[38] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReceiveFileRequest.ProtoReflect.Descriptor instead. -func (*ReceiveFileRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{38} -} - -func (x *ReceiveFileRequest) GetData() isReceiveFileRequest_Data { - if x != nil { - return x.Data - } - return nil -} - -func (x *ReceiveFileRequest) GetInfo() *ReceiveFileInfo { - if x != nil { - if x, ok := x.Data.(*ReceiveFileRequest_Info); ok { - return x.Info - } - } - return nil -} - -func (x *ReceiveFileRequest) GetFileContent() []byte { - if x != nil { - if x, ok := x.Data.(*ReceiveFileRequest_FileContent); ok { - return x.FileContent - } - } - return nil -} - -type isReceiveFileRequest_Data interface { - isReceiveFileRequest_Data() -} - -type ReceiveFileRequest_Info struct { - Info *ReceiveFileInfo `protobuf:"bytes,1,opt,name=info,proto3,oneof"` -} - -type ReceiveFileRequest_FileContent struct { - FileContent []byte `protobuf:"bytes,2,opt,name=file_content,json=fileContent,proto3,oneof"` -} - -func (*ReceiveFileRequest_Info) isReceiveFileRequest_Data() {} - -func (*ReceiveFileRequest_FileContent) isReceiveFileRequest_Data() {} - -type ReceiveFileInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,4,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` - ShardId uint32 `protobuf:"varint,5,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - FileSize uint64 `protobuf:"varint,6,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReceiveFileInfo) Reset() { - *x = ReceiveFileInfo{} - mi := &file_volume_server_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReceiveFileInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReceiveFileInfo) ProtoMessage() {} - -func (x *ReceiveFileInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[39] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReceiveFileInfo.ProtoReflect.Descriptor instead. -func (*ReceiveFileInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{39} -} - -func (x *ReceiveFileInfo) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *ReceiveFileInfo) GetExt() string { - if x != nil { - return x.Ext - } - return "" -} - -func (x *ReceiveFileInfo) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *ReceiveFileInfo) GetIsEcVolume() bool { - if x != nil { - return x.IsEcVolume - } - return false -} - -func (x *ReceiveFileInfo) GetShardId() uint32 { - if x != nil { - return x.ShardId - } - return 0 -} - -func (x *ReceiveFileInfo) GetFileSize() uint64 { - if x != nil { - return x.FileSize - } - return 0 -} - -type ReceiveFileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - BytesWritten uint64 `protobuf:"varint,1,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"` - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReceiveFileResponse) Reset() { - *x = ReceiveFileResponse{} - mi := &file_volume_server_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReceiveFileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReceiveFileResponse) ProtoMessage() {} - -func (x *ReceiveFileResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[40] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReceiveFileResponse.ProtoReflect.Descriptor instead. -func (*ReceiveFileResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{40} -} - -func (x *ReceiveFileResponse) GetBytesWritten() uint64 { - if x != nil { - return x.BytesWritten - } - return 0 -} - -func (x *ReceiveFileResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - type ReadNeedleBlobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset - Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset + Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` } func (x *ReadNeedleBlobRequest) Reset() { *x = ReadNeedleBlobRequest{} - mi := &file_volume_server_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadNeedleBlobRequest) String() string { @@ -2207,8 +2017,8 @@ func (x *ReadNeedleBlobRequest) String() string { func (*ReadNeedleBlobRequest) ProtoMessage() {} func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[41] - if x != nil { + mi := &file_volume_server_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2220,7 +2030,7 @@ func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{41} + return file_volume_server_proto_rawDescGZIP(), []int{38} } func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { @@ -2230,6 +2040,13 @@ func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { return 0 } +func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + func (x *ReadNeedleBlobRequest) GetOffset() int64 { if x != nil { return x.Offset @@ -2245,17 +2062,20 @@ func (x *ReadNeedleBlobRequest) GetSize() int32 { } type ReadNeedleBlobResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *ReadNeedleBlobResponse) Reset() { *x = ReadNeedleBlobResponse{} - mi := &file_volume_server_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadNeedleBlobResponse) String() string { @@ -2265,8 +2085,8 @@ func (x *ReadNeedleBlobResponse) String() string { func (*ReadNeedleBlobResponse) ProtoMessage() {} func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[42] - if x != nil { + mi := &file_volume_server_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2278,7 +2098,7 @@ func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{42} + return file_volume_server_proto_rawDescGZIP(), []int{39} } func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { @@ -2288,165 +2108,24 @@ func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { return nil } -type ReadNeedleMetaRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` - Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset - Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReadNeedleMetaRequest) Reset() { - *x = ReadNeedleMetaRequest{} - mi := &file_volume_server_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReadNeedleMetaRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadNeedleMetaRequest) ProtoMessage() {} - -func (x *ReadNeedleMetaRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[43] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadNeedleMetaRequest.ProtoReflect.Descriptor instead. -func (*ReadNeedleMetaRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{43} -} - -func (x *ReadNeedleMetaRequest) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *ReadNeedleMetaRequest) GetNeedleId() uint64 { - if x != nil { - return x.NeedleId - } - return 0 -} - -func (x *ReadNeedleMetaRequest) GetOffset() int64 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *ReadNeedleMetaRequest) GetSize() int32 { - if x != nil { - return x.Size - } - return 0 -} - -type ReadNeedleMetaResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cookie uint32 `protobuf:"varint,1,opt,name=cookie,proto3" json:"cookie,omitempty"` - LastModified uint64 `protobuf:"varint,2,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` - Crc uint32 `protobuf:"varint,3,opt,name=crc,proto3" json:"crc,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` - AppendAtNs uint64 `protobuf:"varint,5,opt,name=append_at_ns,json=appendAtNs,proto3" json:"append_at_ns,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReadNeedleMetaResponse) Reset() { - *x = ReadNeedleMetaResponse{} - mi := &file_volume_server_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReadNeedleMetaResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadNeedleMetaResponse) ProtoMessage() {} - -func (x *ReadNeedleMetaResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[44] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadNeedleMetaResponse.ProtoReflect.Descriptor instead. -func (*ReadNeedleMetaResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{44} -} - -func (x *ReadNeedleMetaResponse) GetCookie() uint32 { - if x != nil { - return x.Cookie - } - return 0 -} - -func (x *ReadNeedleMetaResponse) GetLastModified() uint64 { - if x != nil { - return x.LastModified - } - return 0 -} - -func (x *ReadNeedleMetaResponse) GetCrc() uint32 { - if x != nil { - return x.Crc - } - return 0 -} - -func (x *ReadNeedleMetaResponse) GetTtl() string { - if x != nil { - return x.Ttl - } - return "" -} - -func (x *ReadNeedleMetaResponse) GetAppendAtNs() uint64 { - if x != nil { - return x.AppendAtNs - } - return 0 -} - type WriteNeedleBlobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` - Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *WriteNeedleBlobRequest) Reset() { *x = WriteNeedleBlobRequest{} - mi := &file_volume_server_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *WriteNeedleBlobRequest) String() string { @@ -2456,8 +2135,8 @@ func (x *WriteNeedleBlobRequest) String() string { func (*WriteNeedleBlobRequest) ProtoMessage() {} func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[45] - if x != nil { + mi := &file_volume_server_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2469,7 +2148,7 @@ func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{45} + return file_volume_server_proto_rawDescGZIP(), []int{40} } func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { @@ -2501,16 +2180,18 @@ func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte { } type WriteNeedleBlobResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *WriteNeedleBlobResponse) Reset() { *x = WriteNeedleBlobResponse{} - mi := &file_volume_server_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *WriteNeedleBlobResponse) String() string { @@ -2520,8 +2201,8 @@ func (x *WriteNeedleBlobResponse) String() string { func (*WriteNeedleBlobResponse) ProtoMessage() {} func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[46] - if x != nil { + mi := &file_volume_server_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2533,21 +2214,24 @@ func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{46} + return file_volume_server_proto_rawDescGZIP(), []int{41} } type ReadAllNeedlesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeIds []uint32 `protobuf:"varint,1,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []uint32 `protobuf:"varint,1,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } func (x *ReadAllNeedlesRequest) Reset() { *x = ReadAllNeedlesRequest{} - mi := &file_volume_server_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadAllNeedlesRequest) String() string { @@ -2557,8 +2241,8 @@ func (x *ReadAllNeedlesRequest) String() string { func (*ReadAllNeedlesRequest) ProtoMessage() {} func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[47] - if x != nil { + mi := &file_volume_server_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2570,7 +2254,7 @@ func (x *ReadAllNeedlesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadAllNeedlesRequest.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{47} + return file_volume_server_proto_rawDescGZIP(), []int{42} } func (x *ReadAllNeedlesRequest) GetVolumeIds() []uint32 { @@ -2581,25 +2265,23 @@ func (x *ReadAllNeedlesRequest) GetVolumeIds() []uint32 { } type ReadAllNeedlesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` - Cookie uint32 `protobuf:"varint,3,opt,name=cookie,proto3" json:"cookie,omitempty"` - NeedleBlob []byte `protobuf:"bytes,5,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` - NeedleBlobCompressed bool `protobuf:"varint,6,opt,name=needle_blob_compressed,json=needleBlobCompressed,proto3" json:"needle_blob_compressed,omitempty"` - LastModified uint64 `protobuf:"varint,7,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` - Crc uint32 `protobuf:"varint,8,opt,name=crc,proto3" json:"crc,omitempty"` - Name []byte `protobuf:"bytes,9,opt,name=name,proto3" json:"name,omitempty"` - Mime []byte `protobuf:"bytes,10,opt,name=mime,proto3" json:"mime,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Cookie uint32 `protobuf:"varint,3,opt,name=cookie,proto3" json:"cookie,omitempty"` + NeedleBlob []byte `protobuf:"bytes,5,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` } func (x *ReadAllNeedlesResponse) Reset() { *x = ReadAllNeedlesResponse{} - mi := &file_volume_server_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadAllNeedlesResponse) String() string { @@ -2609,8 +2291,8 @@ func (x *ReadAllNeedlesResponse) String() string { func (*ReadAllNeedlesResponse) ProtoMessage() {} func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[48] - if x != nil { + mi := &file_volume_server_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2622,7 +2304,7 @@ func (x *ReadAllNeedlesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadAllNeedlesResponse.ProtoReflect.Descriptor instead. func (*ReadAllNeedlesResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{48} + return file_volume_server_proto_rawDescGZIP(), []int{43} } func (x *ReadAllNeedlesResponse) GetVolumeId() uint32 { @@ -2653,55 +2335,23 @@ func (x *ReadAllNeedlesResponse) GetNeedleBlob() []byte { return nil } -func (x *ReadAllNeedlesResponse) GetNeedleBlobCompressed() bool { - if x != nil { - return x.NeedleBlobCompressed - } - return false -} - -func (x *ReadAllNeedlesResponse) GetLastModified() uint64 { - if x != nil { - return x.LastModified - } - return 0 -} - -func (x *ReadAllNeedlesResponse) GetCrc() uint32 { - if x != nil { - return x.Crc - } - return 0 -} - -func (x *ReadAllNeedlesResponse) GetName() []byte { - if x != nil { - return x.Name - } - return nil -} - -func (x *ReadAllNeedlesResponse) GetMime() []byte { - if x != nil { - return x.Mime - } - return nil -} - type VolumeTailSenderRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` } func (x *VolumeTailSenderRequest) Reset() { *x = VolumeTailSenderRequest{} - mi := &file_volume_server_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTailSenderRequest) String() string { @@ -2711,8 +2361,8 @@ func (x *VolumeTailSenderRequest) String() string { func (*VolumeTailSenderRequest) ProtoMessage() {} func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[49] - if x != nil { + mi := &file_volume_server_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2724,7 +2374,7 @@ func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{49} + return file_volume_server_proto_rawDescGZIP(), []int{44} } func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { @@ -2749,20 +2399,22 @@ func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { } type VolumeTailSenderResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` - NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` - IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` - Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` + NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` + IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` } func (x *VolumeTailSenderResponse) Reset() { *x = VolumeTailSenderResponse{} - mi := &file_volume_server_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTailSenderResponse) String() string { @@ -2772,8 +2424,8 @@ func (x *VolumeTailSenderResponse) String() string { func (*VolumeTailSenderResponse) ProtoMessage() {} func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[50] - if x != nil { + mi := &file_volume_server_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2785,7 +2437,7 @@ func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{50} + return file_volume_server_proto_rawDescGZIP(), []int{45} } func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { @@ -2809,28 +2461,24 @@ func (x *VolumeTailSenderResponse) GetIsLastChunk() bool { return false } -func (x *VolumeTailSenderResponse) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - type VolumeTailReceiverRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` - SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` + SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` } func (x *VolumeTailReceiverRequest) Reset() { *x = VolumeTailReceiverRequest{} - mi := &file_volume_server_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTailReceiverRequest) String() string { @@ -2840,8 +2488,8 @@ func (x *VolumeTailReceiverRequest) String() string { func (*VolumeTailReceiverRequest) ProtoMessage() {} func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[51] - if x != nil { + mi := &file_volume_server_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2853,7 +2501,7 @@ func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{51} + return file_volume_server_proto_rawDescGZIP(), []int{46} } func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { @@ -2885,16 +2533,18 @@ func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string { } type VolumeTailReceiverResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeTailReceiverResponse) Reset() { *x = VolumeTailReceiverResponse{} - mi := &file_volume_server_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTailReceiverResponse) String() string { @@ -2904,8 +2554,8 @@ func (x *VolumeTailReceiverResponse) String() string { func (*VolumeTailReceiverResponse) ProtoMessage() {} func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[52] - if x != nil { + mi := &file_volume_server_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2917,22 +2567,25 @@ func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{52} + return file_volume_server_proto_rawDescGZIP(), []int{47} } type VolumeEcShardsGenerateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsGenerateRequest) Reset() { *x = VolumeEcShardsGenerateRequest{} - mi := &file_volume_server_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsGenerateRequest) String() string { @@ -2942,8 +2595,8 @@ func (x *VolumeEcShardsGenerateRequest) String() string { func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[53] - if x != nil { + mi := &file_volume_server_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2955,7 +2608,7 @@ func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{53} + return file_volume_server_proto_rawDescGZIP(), []int{48} } func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { @@ -2973,16 +2626,18 @@ func (x *VolumeEcShardsGenerateRequest) GetCollection() string { } type VolumeEcShardsGenerateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsGenerateResponse) Reset() { *x = VolumeEcShardsGenerateResponse{} - mi := &file_volume_server_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsGenerateResponse) String() string { @@ -2992,8 +2647,8 @@ func (x *VolumeEcShardsGenerateResponse) String() string { func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[54] - if x != nil { + mi := &file_volume_server_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3005,22 +2660,25 @@ func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{54} + return file_volume_server_proto_rawDescGZIP(), []int{49} } type VolumeEcShardsRebuildRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsRebuildRequest) Reset() { *x = VolumeEcShardsRebuildRequest{} - mi := &file_volume_server_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsRebuildRequest) String() string { @@ -3030,8 +2688,8 @@ func (x *VolumeEcShardsRebuildRequest) String() string { func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[55] - if x != nil { + mi := &file_volume_server_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3043,7 +2701,7 @@ func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{55} + return file_volume_server_proto_rawDescGZIP(), []int{50} } func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { @@ -3061,17 +2719,20 @@ func (x *VolumeEcShardsRebuildRequest) GetCollection() string { } type VolumeEcShardsRebuildResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` } func (x *VolumeEcShardsRebuildResponse) Reset() { *x = VolumeEcShardsRebuildResponse{} - mi := &file_volume_server_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsRebuildResponse) String() string { @@ -3081,8 +2742,8 @@ func (x *VolumeEcShardsRebuildResponse) String() string { func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[56] - if x != nil { + mi := &file_volume_server_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3094,7 +2755,7 @@ func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{56} + return file_volume_server_proto_rawDescGZIP(), []int{51} } func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { @@ -3105,24 +2766,26 @@ func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { } type VolumeEcShardsCopyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` - CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` - CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` - CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` - DiskId uint32 `protobuf:"varint,8,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID for storing EC shards - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` + CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` } func (x *VolumeEcShardsCopyRequest) Reset() { *x = VolumeEcShardsCopyRequest{} - mi := &file_volume_server_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsCopyRequest) String() string { @@ -3132,8 +2795,8 @@ func (x *VolumeEcShardsCopyRequest) String() string { func (*VolumeEcShardsCopyRequest) ProtoMessage() {} func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[57] - if x != nil { + mi := &file_volume_server_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3145,7 +2808,7 @@ func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{57} + return file_volume_server_proto_rawDescGZIP(), []int{52} } func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { @@ -3197,24 +2860,19 @@ func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { return false } -func (x *VolumeEcShardsCopyRequest) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - type VolumeEcShardsCopyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsCopyResponse) Reset() { *x = VolumeEcShardsCopyResponse{} - mi := &file_volume_server_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsCopyResponse) String() string { @@ -3224,8 +2882,8 @@ func (x *VolumeEcShardsCopyResponse) String() string { func (*VolumeEcShardsCopyResponse) ProtoMessage() {} func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[58] - if x != nil { + mi := &file_volume_server_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3237,23 +2895,26 @@ func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{58} + return file_volume_server_proto_rawDescGZIP(), []int{53} } type VolumeEcShardsDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsDeleteRequest) Reset() { *x = VolumeEcShardsDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsDeleteRequest) String() string { @@ -3263,8 +2924,8 @@ func (x *VolumeEcShardsDeleteRequest) String() string { func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[59] - if x != nil { + mi := &file_volume_server_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3276,7 +2937,7 @@ func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{59} + return file_volume_server_proto_rawDescGZIP(), []int{54} } func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { @@ -3301,16 +2962,18 @@ func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { } type VolumeEcShardsDeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsDeleteResponse) Reset() { *x = VolumeEcShardsDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsDeleteResponse) String() string { @@ -3320,8 +2983,8 @@ func (x *VolumeEcShardsDeleteResponse) String() string { func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[60] - if x != nil { + mi := &file_volume_server_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3333,23 +2996,26 @@ func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{60} + return file_volume_server_proto_rawDescGZIP(), []int{55} } type VolumeEcShardsMountRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsMountRequest) Reset() { *x = VolumeEcShardsMountRequest{} - mi := &file_volume_server_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsMountRequest) String() string { @@ -3359,8 +3025,8 @@ func (x *VolumeEcShardsMountRequest) String() string { func (*VolumeEcShardsMountRequest) ProtoMessage() {} func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[61] - if x != nil { + mi := &file_volume_server_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3372,7 +3038,7 @@ func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{61} + return file_volume_server_proto_rawDescGZIP(), []int{56} } func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { @@ -3397,16 +3063,18 @@ func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 { } type VolumeEcShardsMountResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsMountResponse) Reset() { *x = VolumeEcShardsMountResponse{} - mi := &file_volume_server_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsMountResponse) String() string { @@ -3416,8 +3084,8 @@ func (x *VolumeEcShardsMountResponse) String() string { func (*VolumeEcShardsMountResponse) ProtoMessage() {} func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[62] - if x != nil { + mi := &file_volume_server_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3429,22 +3097,25 @@ func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{62} + return file_volume_server_proto_rawDescGZIP(), []int{57} } type VolumeEcShardsUnmountRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` } func (x *VolumeEcShardsUnmountRequest) Reset() { *x = VolumeEcShardsUnmountRequest{} - mi := &file_volume_server_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsUnmountRequest) String() string { @@ -3454,8 +3125,8 @@ func (x *VolumeEcShardsUnmountRequest) String() string { func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[63] - if x != nil { + mi := &file_volume_server_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,7 +3138,7 @@ func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{63} + return file_volume_server_proto_rawDescGZIP(), []int{58} } func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { @@ -3485,16 +3156,18 @@ func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { } type VolumeEcShardsUnmountResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsUnmountResponse) Reset() { *x = VolumeEcShardsUnmountResponse{} - mi := &file_volume_server_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsUnmountResponse) String() string { @@ -3504,8 +3177,8 @@ func (x *VolumeEcShardsUnmountResponse) String() string { func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[64] - if x != nil { + mi := &file_volume_server_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3517,25 +3190,28 @@ func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{64} + return file_volume_server_proto_rawDescGZIP(), []int{59} } type VolumeEcShardReadRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` - FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` } func (x *VolumeEcShardReadRequest) Reset() { *x = VolumeEcShardReadRequest{} - mi := &file_volume_server_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardReadRequest) String() string { @@ -3545,8 +3221,8 @@ func (x *VolumeEcShardReadRequest) String() string { func (*VolumeEcShardReadRequest) ProtoMessage() {} func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[65] - if x != nil { + mi := &file_volume_server_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3558,7 +3234,7 @@ func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{65} + return file_volume_server_proto_rawDescGZIP(), []int{60} } func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { @@ -3597,18 +3273,21 @@ func (x *VolumeEcShardReadRequest) GetFileKey() uint64 { } type VolumeEcShardReadResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` } func (x *VolumeEcShardReadResponse) Reset() { *x = VolumeEcShardReadResponse{} - mi := &file_volume_server_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardReadResponse) String() string { @@ -3618,8 +3297,8 @@ func (x *VolumeEcShardReadResponse) String() string { func (*VolumeEcShardReadResponse) ProtoMessage() {} func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[66] - if x != nil { + mi := &file_volume_server_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3631,7 +3310,7 @@ func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{66} + return file_volume_server_proto_rawDescGZIP(), []int{61} } func (x *VolumeEcShardReadResponse) GetData() []byte { @@ -3649,20 +3328,23 @@ func (x *VolumeEcShardReadResponse) GetIsDeleted() bool { } type VolumeEcBlobDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` } func (x *VolumeEcBlobDeleteRequest) Reset() { *x = VolumeEcBlobDeleteRequest{} - mi := &file_volume_server_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcBlobDeleteRequest) String() string { @@ -3672,8 +3354,8 @@ func (x *VolumeEcBlobDeleteRequest) String() string { func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[67] - if x != nil { + mi := &file_volume_server_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3685,7 +3367,7 @@ func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{67} + return file_volume_server_proto_rawDescGZIP(), []int{62} } func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { @@ -3717,16 +3399,18 @@ func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 { } type VolumeEcBlobDeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcBlobDeleteResponse) Reset() { *x = VolumeEcBlobDeleteResponse{} - mi := &file_volume_server_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcBlobDeleteResponse) String() string { @@ -3736,8 +3420,8 @@ func (x *VolumeEcBlobDeleteResponse) String() string { func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[68] - if x != nil { + mi := &file_volume_server_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3749,22 +3433,25 @@ func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{68} + return file_volume_server_proto_rawDescGZIP(), []int{63} } type VolumeEcShardsToVolumeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` } func (x *VolumeEcShardsToVolumeRequest) Reset() { *x = VolumeEcShardsToVolumeRequest{} - mi := &file_volume_server_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsToVolumeRequest) String() string { @@ -3774,8 +3461,8 @@ func (x *VolumeEcShardsToVolumeRequest) String() string { func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[69] - if x != nil { + mi := &file_volume_server_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3787,7 +3474,7 @@ func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{69} + return file_volume_server_proto_rawDescGZIP(), []int{64} } func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { @@ -3805,16 +3492,18 @@ func (x *VolumeEcShardsToVolumeRequest) GetCollection() string { } type VolumeEcShardsToVolumeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeEcShardsToVolumeResponse) Reset() { *x = VolumeEcShardsToVolumeResponse{} - mi := &file_volume_server_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeEcShardsToVolumeResponse) String() string { @@ -3824,8 +3513,8 @@ func (x *VolumeEcShardsToVolumeResponse) String() string { func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[70] - if x != nil { + mi := &file_volume_server_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3837,169 +3526,24 @@ func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{70} -} - -type VolumeEcShardsInfoRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeEcShardsInfoRequest) Reset() { - *x = VolumeEcShardsInfoRequest{} - mi := &file_volume_server_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeEcShardsInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeEcShardsInfoRequest) ProtoMessage() {} - -func (x *VolumeEcShardsInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[71] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeEcShardsInfoRequest.ProtoReflect.Descriptor instead. -func (*VolumeEcShardsInfoRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{71} -} - -func (x *VolumeEcShardsInfoRequest) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -type VolumeEcShardsInfoResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - EcShardInfos []*EcShardInfo `protobuf:"bytes,1,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeEcShardsInfoResponse) Reset() { - *x = VolumeEcShardsInfoResponse{} - mi := &file_volume_server_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeEcShardsInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeEcShardsInfoResponse) ProtoMessage() {} - -func (x *VolumeEcShardsInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[72] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeEcShardsInfoResponse.ProtoReflect.Descriptor instead. -func (*VolumeEcShardsInfoResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{72} -} - -func (x *VolumeEcShardsInfoResponse) GetEcShardInfos() []*EcShardInfo { - if x != nil { - return x.EcShardInfos - } - return nil -} - -type EcShardInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EcShardInfo) Reset() { - *x = EcShardInfo{} - mi := &file_volume_server_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EcShardInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EcShardInfo) ProtoMessage() {} - -func (x *EcShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[73] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EcShardInfo.ProtoReflect.Descriptor instead. -func (*EcShardInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{73} -} - -func (x *EcShardInfo) GetShardId() uint32 { - if x != nil { - return x.ShardId - } - return 0 -} - -func (x *EcShardInfo) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *EcShardInfo) GetCollection() string { - if x != nil { - return x.Collection - } - return "" + return file_volume_server_proto_rawDescGZIP(), []int{65} } type ReadVolumeFileStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } func (x *ReadVolumeFileStatusRequest) Reset() { *x = ReadVolumeFileStatusRequest{} - mi := &file_volume_server_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadVolumeFileStatusRequest) String() string { @@ -4009,8 +3553,8 @@ func (x *ReadVolumeFileStatusRequest) String() string { func (*ReadVolumeFileStatusRequest) ProtoMessage() {} func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[74] - if x != nil { + mi := &file_volume_server_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4022,7 +3566,7 @@ func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{74} + return file_volume_server_proto_rawDescGZIP(), []int{66} } func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { @@ -4033,27 +3577,28 @@ func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { } type ReadVolumeFileStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` - IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` - DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` - DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` - Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` - DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - VolumeInfo *VolumeInfo `protobuf:"bytes,10,opt,name=volume_info,json=volumeInfo,proto3" json:"volume_info,omitempty"` - Version uint32 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` + DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` + DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *ReadVolumeFileStatusResponse) Reset() { *x = ReadVolumeFileStatusResponse{} - mi := &file_volume_server_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ReadVolumeFileStatusResponse) String() string { @@ -4063,8 +3608,8 @@ func (x *ReadVolumeFileStatusResponse) String() string { func (*ReadVolumeFileStatusResponse) ProtoMessage() {} func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[75] - if x != nil { + mi := &file_volume_server_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4076,7 +3621,7 @@ func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{75} + return file_volume_server_proto_rawDescGZIP(), []int{67} } func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { @@ -4142,38 +3687,27 @@ func (x *ReadVolumeFileStatusResponse) GetDiskType() string { return "" } -func (x *ReadVolumeFileStatusResponse) GetVolumeInfo() *VolumeInfo { - if x != nil { - return x.VolumeInfo - } - return nil -} - -func (x *ReadVolumeFileStatusResponse) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - type DiskStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` - Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` - PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` - PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` - DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } func (x *DiskStatus) Reset() { *x = DiskStatus{} - mi := &file_volume_server_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *DiskStatus) String() string { @@ -4183,8 +3717,8 @@ func (x *DiskStatus) String() string { func (*DiskStatus) ProtoMessage() {} func (x *DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[76] - if x != nil { + mi := &file_volume_server_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4196,7 +3730,7 @@ func (x *DiskStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. func (*DiskStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{76} + return file_volume_server_proto_rawDescGZIP(), []int{68} } func (x *DiskStatus) GetDir() string { @@ -4249,23 +3783,26 @@ func (x *DiskStatus) GetDiskType() string { } type MemStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` - Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` - Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` - Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` - Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` + Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` + Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` } func (x *MemStatus) Reset() { *x = MemStatus{} - mi := &file_volume_server_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *MemStatus) String() string { @@ -4275,8 +3812,8 @@ func (x *MemStatus) String() string { func (*MemStatus) ProtoMessage() {} func (x *MemStatus) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[77] - if x != nil { + mi := &file_volume_server_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4288,7 +3825,7 @@ func (x *MemStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. func (*MemStatus) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{77} + return file_volume_server_proto_rawDescGZIP(), []int{69} } func (x *MemStatus) GetGoroutines() int32 { @@ -4342,23 +3879,26 @@ func (x *MemStatus) GetStack() uint64 { // tired storage on volume servers type RemoteFile struct { - state protoimpl.MessageState `protogen:"open.v1"` - BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` - BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` - ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` - Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` } func (x *RemoteFile) Reset() { *x = RemoteFile{} - mi := &file_volume_server_proto_msgTypes[78] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RemoteFile) String() string { @@ -4368,8 +3908,8 @@ func (x *RemoteFile) String() string { func (*RemoteFile) ProtoMessage() {} func (x *RemoteFile) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[78] - if x != nil { + mi := &file_volume_server_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4381,7 +3921,7 @@ func (x *RemoteFile) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. func (*RemoteFile) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{78} + return file_volume_server_proto_rawDescGZIP(), []int{70} } func (x *RemoteFile) GetBackendType() string { @@ -4434,23 +3974,22 @@ func (x *RemoteFile) GetExtension() string { } type VolumeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` - Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` - BytesOffset uint32 `protobuf:"varint,4,opt,name=bytes_offset,json=bytesOffset,proto3" json:"bytes_offset,omitempty"` - DatFileSize int64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` // store the original dat file size - ExpireAtSec uint64 `protobuf:"varint,6,opt,name=expire_at_sec,json=expireAtSec,proto3" json:"expire_at_sec,omitempty"` // expiration time of ec volume - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` } func (x *VolumeInfo) Reset() { *x = VolumeInfo{} - mi := &file_volume_server_proto_msgTypes[79] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeInfo) String() string { @@ -4460,8 +3999,8 @@ func (x *VolumeInfo) String() string { func (*VolumeInfo) ProtoMessage() {} func (x *VolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[79] - if x != nil { + mi := &file_volume_server_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4473,7 +4012,7 @@ func (x *VolumeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. func (*VolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{79} + return file_volume_server_proto_rawDescGZIP(), []int{71} } func (x *VolumeInfo) GetFiles() []*RemoteFile { @@ -4497,142 +4036,25 @@ func (x *VolumeInfo) GetReplication() string { return "" } -func (x *VolumeInfo) GetBytesOffset() uint32 { - if x != nil { - return x.BytesOffset - } - return 0 -} - -func (x *VolumeInfo) GetDatFileSize() int64 { - if x != nil { - return x.DatFileSize - } - return 0 -} - -func (x *VolumeInfo) GetExpireAtSec() uint64 { - if x != nil { - return x.ExpireAtSec - } - return 0 -} - -func (x *VolumeInfo) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -type OldVersionVolumeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` - Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` - BytesOffset uint32 `protobuf:"varint,4,opt,name=BytesOffset,proto3" json:"BytesOffset,omitempty"` - DatFileSize int64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` // store the original dat file size - DestroyTime uint64 `protobuf:"varint,6,opt,name=DestroyTime,proto3" json:"DestroyTime,omitempty"` // expiration time of ec volume - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OldVersionVolumeInfo) Reset() { - *x = OldVersionVolumeInfo{} - mi := &file_volume_server_proto_msgTypes[80] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OldVersionVolumeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OldVersionVolumeInfo) ProtoMessage() {} - -func (x *OldVersionVolumeInfo) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[80] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OldVersionVolumeInfo.ProtoReflect.Descriptor instead. -func (*OldVersionVolumeInfo) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{80} -} - -func (x *OldVersionVolumeInfo) GetFiles() []*RemoteFile { - if x != nil { - return x.Files - } - return nil -} - -func (x *OldVersionVolumeInfo) GetVersion() uint32 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *OldVersionVolumeInfo) GetReplication() string { - if x != nil { - return x.Replication - } - return "" -} - -func (x *OldVersionVolumeInfo) GetBytesOffset() uint32 { - if x != nil { - return x.BytesOffset - } - return 0 -} - -func (x *OldVersionVolumeInfo) GetDatFileSize() int64 { - if x != nil { - return x.DatFileSize - } - return 0 -} - -func (x *OldVersionVolumeInfo) GetDestroyTime() uint64 { - if x != nil { - return x.DestroyTime - } - return 0 -} - -func (x *OldVersionVolumeInfo) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - // tiered storage type VolumeTierMoveDatToRemoteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` - KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` } func (x *VolumeTierMoveDatToRemoteRequest) Reset() { *x = VolumeTierMoveDatToRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[81] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTierMoveDatToRemoteRequest) String() string { @@ -4642,8 +4064,8 @@ func (x *VolumeTierMoveDatToRemoteRequest) String() string { func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[81] - if x != nil { + mi := &file_volume_server_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4655,7 +4077,7 @@ func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{81} + return file_volume_server_proto_rawDescGZIP(), []int{72} } func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -4687,18 +4109,21 @@ func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { } type VolumeTierMoveDatToRemoteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` } func (x *VolumeTierMoveDatToRemoteResponse) Reset() { *x = VolumeTierMoveDatToRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[82] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTierMoveDatToRemoteResponse) String() string { @@ -4708,8 +4133,8 @@ func (x *VolumeTierMoveDatToRemoteResponse) String() string { func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[82] - if x != nil { + mi := &file_volume_server_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4721,7 +4146,7 @@ func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{82} + return file_volume_server_proto_rawDescGZIP(), []int{73} } func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -4739,19 +4164,22 @@ func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { } type VolumeTierMoveDatFromRemoteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` - KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` } func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { *x = VolumeTierMoveDatFromRemoteRequest{} - mi := &file_volume_server_proto_msgTypes[83] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTierMoveDatFromRemoteRequest) String() string { @@ -4761,8 +4189,8 @@ func (x *VolumeTierMoveDatFromRemoteRequest) String() string { func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[83] - if x != nil { + mi := &file_volume_server_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4774,7 +4202,7 @@ func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message // Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{83} + return file_volume_server_proto_rawDescGZIP(), []int{74} } func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -4799,18 +4227,21 @@ func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { } type VolumeTierMoveDatFromRemoteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` } func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { *x = VolumeTierMoveDatFromRemoteResponse{} - mi := &file_volume_server_proto_msgTypes[84] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeTierMoveDatFromRemoteResponse) String() string { @@ -4820,8 +4251,8 @@ func (x *VolumeTierMoveDatFromRemoteResponse) String() string { func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[84] - if x != nil { + mi := &file_volume_server_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4833,7 +4264,7 @@ func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{84} + return file_volume_server_proto_rawDescGZIP(), []int{75} } func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -4851,16 +4282,18 @@ func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { } type VolumeServerStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeServerStatusRequest) Reset() { *x = VolumeServerStatusRequest{} - mi := &file_volume_server_proto_msgTypes[85] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeServerStatusRequest) String() string { @@ -4870,8 +4303,8 @@ func (x *VolumeServerStatusRequest) String() string { func (*VolumeServerStatusRequest) ProtoMessage() {} func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[85] - if x != nil { + mi := &file_volume_server_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4883,25 +4316,28 @@ func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{85} + return file_volume_server_proto_rawDescGZIP(), []int{76} } type VolumeServerStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` - MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"` } func (x *VolumeServerStatusResponse) Reset() { *x = VolumeServerStatusResponse{} - mi := &file_volume_server_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeServerStatusResponse) String() string { @@ -4911,8 +4347,8 @@ func (x *VolumeServerStatusResponse) String() string { func (*VolumeServerStatusResponse) ProtoMessage() {} func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[86] - if x != nil { + mi := &file_volume_server_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4924,7 +4360,7 @@ func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{86} + return file_volume_server_proto_rawDescGZIP(), []int{77} } func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { @@ -4963,16 +4399,18 @@ func (x *VolumeServerStatusResponse) GetRack() string { } type VolumeServerLeaveRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeServerLeaveRequest) Reset() { *x = VolumeServerLeaveRequest{} - mi := &file_volume_server_proto_msgTypes[87] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeServerLeaveRequest) String() string { @@ -4982,8 +4420,8 @@ func (x *VolumeServerLeaveRequest) String() string { func (*VolumeServerLeaveRequest) ProtoMessage() {} func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[87] - if x != nil { + mi := &file_volume_server_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4995,20 +4433,22 @@ func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{87} + return file_volume_server_proto_rawDescGZIP(), []int{78} } type VolumeServerLeaveResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *VolumeServerLeaveResponse) Reset() { *x = VolumeServerLeaveResponse{} - mi := &file_volume_server_proto_msgTypes[88] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeServerLeaveResponse) String() string { @@ -5018,8 +4458,8 @@ func (x *VolumeServerLeaveResponse) String() string { func (*VolumeServerLeaveResponse) ProtoMessage() {} func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[88] - if x != nil { + mi := &file_volume_server_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5031,12 +4471,15 @@ func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{88} + return file_volume_server_proto_rawDescGZIP(), []int{79} } // remote storage type FetchAndWriteNeedleRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` Cookie uint32 `protobuf:"varint,3,opt,name=cookie,proto3" json:"cookie,omitempty"` @@ -5047,15 +4490,15 @@ type FetchAndWriteNeedleRequest struct { // remote conf RemoteConf *remote_pb.RemoteConf `protobuf:"bytes,15,opt,name=remote_conf,json=remoteConf,proto3" json:"remote_conf,omitempty"` RemoteLocation *remote_pb.RemoteStorageLocation `protobuf:"bytes,16,opt,name=remote_location,json=remoteLocation,proto3" json:"remote_location,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *FetchAndWriteNeedleRequest) Reset() { *x = FetchAndWriteNeedleRequest{} - mi := &file_volume_server_proto_msgTypes[89] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FetchAndWriteNeedleRequest) String() string { @@ -5065,8 +4508,8 @@ func (x *FetchAndWriteNeedleRequest) String() string { func (*FetchAndWriteNeedleRequest) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[89] - if x != nil { + mi := &file_volume_server_proto_msgTypes[80] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5078,7 +4521,7 @@ func (x *FetchAndWriteNeedleRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleRequest.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89} + return file_volume_server_proto_rawDescGZIP(), []int{80} } func (x *FetchAndWriteNeedleRequest) GetVolumeId() uint32 { @@ -5145,17 +4588,18 @@ func (x *FetchAndWriteNeedleRequest) GetRemoteLocation() *remote_pb.RemoteStorag } type FetchAndWriteNeedleResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ETag string `protobuf:"bytes,1,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *FetchAndWriteNeedleResponse) Reset() { *x = FetchAndWriteNeedleResponse{} - mi := &file_volume_server_proto_msgTypes[90] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FetchAndWriteNeedleResponse) String() string { @@ -5165,8 +4609,8 @@ func (x *FetchAndWriteNeedleResponse) String() string { func (*FetchAndWriteNeedleResponse) ProtoMessage() {} func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[90] - if x != nil { + mi := &file_volume_server_proto_msgTypes[81] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5178,33 +4622,29 @@ func (x *FetchAndWriteNeedleResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchAndWriteNeedleResponse.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{90} -} - -func (x *FetchAndWriteNeedleResponse) GetETag() string { - if x != nil { - return x.ETag - } - return "" + return file_volume_server_proto_rawDescGZIP(), []int{81} } // select on volume servers type QueryRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"` FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"` Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"` OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *QueryRequest) Reset() { *x = QueryRequest{} - mi := &file_volume_server_proto_msgTypes[91] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest) String() string { @@ -5214,8 +4654,8 @@ func (x *QueryRequest) String() string { func (*QueryRequest) ProtoMessage() {} func (x *QueryRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[91] - if x != nil { + mi := &file_volume_server_proto_msgTypes[82] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5227,7 +4667,7 @@ func (x *QueryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. func (*QueryRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91} + return file_volume_server_proto_rawDescGZIP(), []int{82} } func (x *QueryRequest) GetSelections() []string { @@ -5266,17 +4706,20 @@ func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerializatio } type QueriedStripe struct { - state protoimpl.MessageState `protogen:"open.v1"` - Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` } func (x *QueriedStripe) Reset() { *x = QueriedStripe{} - mi := &file_volume_server_proto_msgTypes[92] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueriedStripe) String() string { @@ -5286,8 +4729,8 @@ func (x *QueriedStripe) String() string { func (*QueriedStripe) ProtoMessage() {} func (x *QueriedStripe) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[92] - if x != nil { + mi := &file_volume_server_proto_msgTypes[83] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5299,7 +4742,7 @@ func (x *QueriedStripe) ProtoReflect() protoreflect.Message { // Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. func (*QueriedStripe) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{92} + return file_volume_server_proto_rawDescGZIP(), []int{83} } func (x *QueriedStripe) GetRecords() []byte { @@ -5310,18 +4753,21 @@ func (x *QueriedStripe) GetRecords() []byte { } type VolumeNeedleStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` } func (x *VolumeNeedleStatusRequest) Reset() { *x = VolumeNeedleStatusRequest{} - mi := &file_volume_server_proto_msgTypes[93] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeNeedleStatusRequest) String() string { @@ -5331,8 +4777,8 @@ func (x *VolumeNeedleStatusRequest) String() string { func (*VolumeNeedleStatusRequest) ProtoMessage() {} func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[93] - if x != nil { + mi := &file_volume_server_proto_msgTypes[84] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5344,7 +4790,7 @@ func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{93} + return file_volume_server_proto_rawDescGZIP(), []int{84} } func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { @@ -5362,22 +4808,25 @@ func (x *VolumeNeedleStatusRequest) GetNeedleId() uint64 { } type VolumeNeedleStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` - Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"` - Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` - Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"` - Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"` + Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"` + Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` } func (x *VolumeNeedleStatusResponse) Reset() { *x = VolumeNeedleStatusResponse{} - mi := &file_volume_server_proto_msgTypes[94] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VolumeNeedleStatusResponse) String() string { @@ -5387,8 +4836,8 @@ func (x *VolumeNeedleStatusResponse) String() string { func (*VolumeNeedleStatusResponse) ProtoMessage() {} func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[94] - if x != nil { + mi := &file_volume_server_proto_msgTypes[85] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5400,7 +4849,7 @@ func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{94} + return file_volume_server_proto_rawDescGZIP(), []int{85} } func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { @@ -5446,18 +4895,21 @@ func (x *VolumeNeedleStatusResponse) GetTtl() string { } type PingRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself - TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // default to ping itself + TargetType string `protobuf:"bytes,2,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` } func (x *PingRequest) Reset() { *x = PingRequest{} - mi := &file_volume_server_proto_msgTypes[95] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingRequest) String() string { @@ -5467,8 +4919,8 @@ func (x *PingRequest) String() string { func (*PingRequest) ProtoMessage() {} func (x *PingRequest) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[95] - if x != nil { + mi := &file_volume_server_proto_msgTypes[86] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5480,7 +4932,7 @@ func (x *PingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. func (*PingRequest) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{95} + return file_volume_server_proto_rawDescGZIP(), []int{86} } func (x *PingRequest) GetTarget() string { @@ -5498,19 +4950,22 @@ func (x *PingRequest) GetTargetType() string { } type PingResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` - RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` - StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` + RemoteTimeNs int64 `protobuf:"varint,2,opt,name=remote_time_ns,json=remoteTimeNs,proto3" json:"remote_time_ns,omitempty"` + StopTimeNs int64 `protobuf:"varint,3,opt,name=stop_time_ns,json=stopTimeNs,proto3" json:"stop_time_ns,omitempty"` } func (x *PingResponse) Reset() { *x = PingResponse{} - mi := &file_volume_server_proto_msgTypes[96] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PingResponse) String() string { @@ -5520,8 +4975,8 @@ func (x *PingResponse) String() string { func (*PingResponse) ProtoMessage() {} func (x *PingResponse) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[96] - if x != nil { + mi := &file_volume_server_proto_msgTypes[87] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5533,7 +4988,7 @@ func (x *PingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. func (*PingResponse) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{96} + return file_volume_server_proto_rawDescGZIP(), []int{87} } func (x *PingResponse) GetStartTimeNs() int64 { @@ -5558,19 +5013,22 @@ func (x *PingResponse) GetStopTimeNs() int64 { } type FetchAndWriteNeedleRequest_Replica struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` - GrpcPort int32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + GrpcPort int32 `protobuf:"varint,3,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` } func (x *FetchAndWriteNeedleRequest_Replica) Reset() { *x = FetchAndWriteNeedleRequest_Replica{} - mi := &file_volume_server_proto_msgTypes[97] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *FetchAndWriteNeedleRequest_Replica) String() string { @@ -5580,8 +5038,8 @@ func (x *FetchAndWriteNeedleRequest_Replica) String() string { func (*FetchAndWriteNeedleRequest_Replica) ProtoMessage() {} func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[97] - if x != nil { + mi := &file_volume_server_proto_msgTypes[88] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5593,7 +5051,7 @@ func (x *FetchAndWriteNeedleRequest_Replica) ProtoReflect() protoreflect.Message // Deprecated: Use FetchAndWriteNeedleRequest_Replica.ProtoReflect.Descriptor instead. func (*FetchAndWriteNeedleRequest_Replica) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{89, 0} + return file_volume_server_proto_rawDescGZIP(), []int{80, 0} } func (x *FetchAndWriteNeedleRequest_Replica) GetUrl() string { @@ -5618,19 +5076,22 @@ func (x *FetchAndWriteNeedleRequest_Replica) GetGrpcPort() int32 { } type QueryRequest_Filter struct { - state protoimpl.MessageState `protogen:"open.v1"` - Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` - Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } func (x *QueryRequest_Filter) Reset() { *x = QueryRequest_Filter{} - mi := &file_volume_server_proto_msgTypes[98] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_Filter) String() string { @@ -5640,8 +5101,8 @@ func (x *QueryRequest_Filter) String() string { func (*QueryRequest_Filter) ProtoMessage() {} func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[98] - if x != nil { + mi := &file_volume_server_proto_msgTypes[89] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5653,7 +5114,7 @@ func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 0} + return file_volume_server_proto_rawDescGZIP(), []int{82, 0} } func (x *QueryRequest_Filter) GetField() string { @@ -5678,21 +5139,24 @@ func (x *QueryRequest_Filter) GetValue() string { } type QueryRequest_InputSerialization struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // NONE | GZIP | BZIP2 CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"` CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"` JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"` ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *QueryRequest_InputSerialization) Reset() { *x = QueryRequest_InputSerialization{} - mi := &file_volume_server_proto_msgTypes[99] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_InputSerialization) String() string { @@ -5702,8 +5166,8 @@ func (x *QueryRequest_InputSerialization) String() string { func (*QueryRequest_InputSerialization) ProtoMessage() {} func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[99] - if x != nil { + mi := &file_volume_server_proto_msgTypes[90] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5715,7 +5179,7 @@ func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1} + return file_volume_server_proto_rawDescGZIP(), []int{82, 1} } func (x *QueryRequest_InputSerialization) GetCompressionType() string { @@ -5747,18 +5211,21 @@ func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputS } type QueryRequest_OutputSerialization struct { - state protoimpl.MessageState `protogen:"open.v1"` - CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` - JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` + JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` } func (x *QueryRequest_OutputSerialization) Reset() { *x = QueryRequest_OutputSerialization{} - mi := &file_volume_server_proto_msgTypes[100] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_OutputSerialization) String() string { @@ -5768,8 +5235,8 @@ func (x *QueryRequest_OutputSerialization) String() string { func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[100] - if x != nil { + mi := &file_volume_server_proto_msgTypes[91] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5781,7 +5248,7 @@ func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2} + return file_volume_server_proto_rawDescGZIP(), []int{82, 2} } func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -5799,24 +5266,27 @@ func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputS } type QueryRequest_InputSerialization_CSVInput struct { - state protoimpl.MessageState `protogen:"open.v1"` - FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , - QuoteCharacter string `protobuf:"bytes,4,opt,name=quote_character,json=quoteCharacter,proto3" json:"quote_character,omitempty"` // Default: " - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " - Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " + Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # // If true, records might contain record delimiters within quote characters AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *QueryRequest_InputSerialization_CSVInput) Reset() { *x = QueryRequest_InputSerialization_CSVInput{} - mi := &file_volume_server_proto_msgTypes[101] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[92] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_InputSerialization_CSVInput) String() string { @@ -5826,8 +5296,8 @@ func (x *QueryRequest_InputSerialization_CSVInput) String() string { func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[101] - if x != nil { + mi := &file_volume_server_proto_msgTypes[92] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5839,7 +5309,7 @@ func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.M // Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 0} } func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -5863,9 +5333,9 @@ func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { return "" } -func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharacter() string { +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { if x != nil { - return x.QuoteCharacter + return x.QuoteCharactoer } return "" } @@ -5892,17 +5362,20 @@ func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter } type QueryRequest_InputSerialization_JSONInput struct { - state protoimpl.MessageState `protogen:"open.v1"` - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES } func (x *QueryRequest_InputSerialization_JSONInput) Reset() { *x = QueryRequest_InputSerialization_JSONInput{} - mi := &file_volume_server_proto_msgTypes[102] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[93] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_InputSerialization_JSONInput) String() string { @@ -5912,8 +5385,8 @@ func (x *QueryRequest_InputSerialization_JSONInput) String() string { func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[102] - if x != nil { + mi := &file_volume_server_proto_msgTypes[93] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5925,7 +5398,7 @@ func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect. // Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 1} } func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -5936,16 +5409,18 @@ func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { } type QueryRequest_InputSerialization_ParquetInput struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { *x = QueryRequest_InputSerialization_ParquetInput{} - mi := &file_volume_server_proto_msgTypes[103] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[94] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_InputSerialization_ParquetInput) String() string { @@ -5955,8 +5430,8 @@ func (x *QueryRequest_InputSerialization_ParquetInput) String() string { func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[103] - if x != nil { + mi := &file_volume_server_proto_msgTypes[94] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5968,25 +5443,28 @@ func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protorefle // Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 1, 2} + return file_volume_server_proto_rawDescGZIP(), []int{82, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { - state protoimpl.MessageState `protogen:"open.v1"` - QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , - QuoteCharacter string `protobuf:"bytes,4,opt,name=quote_character,json=quoteCharacter,proto3" json:"quote_character,omitempty"` // Default: " - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " } func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { *x = QueryRequest_OutputSerialization_CSVOutput{} - mi := &file_volume_server_proto_msgTypes[104] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[95] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { @@ -5996,8 +5474,8 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[104] - if x != nil { + mi := &file_volume_server_proto_msgTypes[95] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6009,7 +5487,7 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect // Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{82, 2, 0} } func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -6033,9 +5511,9 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string return "" } -func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharacter() string { +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { if x != nil { - return x.QuoteCharacter + return x.QuoteCharactoer } return "" } @@ -6048,17 +5526,20 @@ func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() s } type QueryRequest_OutputSerialization_JSONOutput struct { - state protoimpl.MessageState `protogen:"open.v1"` - RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` } func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { *x = QueryRequest_OutputSerialization_JSONOutput{} - mi := &file_volume_server_proto_msgTypes[105] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[96] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { @@ -6068,8 +5549,8 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { - mi := &file_volume_server_proto_msgTypes[105] - if x != nil { + mi := &file_volume_server_proto_msgTypes[96] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6081,7 +5562,7 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflec // Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return file_volume_server_proto_rawDescGZIP(), []int{91, 2, 1} + return file_volume_server_proto_rawDescGZIP(), []int{82, 2, 1} } func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -6093,526 +5574,962 @@ func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() strin var File_volume_server_proto protoreflect.FileDescriptor -const file_volume_server_proto_rawDesc = "" + - "\n" + - "\x13volume_server.proto\x12\x10volume_server_pb\x1a\fremote.proto\"[\n" + - "\x12BatchDeleteRequest\x12\x19\n" + - "\bfile_ids\x18\x01 \x03(\tR\afileIds\x12*\n" + - "\x11skip_cookie_check\x18\x02 \x01(\bR\x0fskipCookieCheck\"O\n" + - "\x13BatchDeleteResponse\x128\n" + - "\aresults\x18\x01 \x03(\v2\x1e.volume_server_pb.DeleteResultR\aresults\"\x83\x01\n" + - "\fDeleteResult\x12\x17\n" + - "\afile_id\x18\x01 \x01(\tR\x06fileId\x12\x16\n" + - "\x06status\x18\x02 \x01(\x05R\x06status\x12\x14\n" + - "\x05error\x18\x03 \x01(\tR\x05error\x12\x12\n" + - "\x04size\x18\x04 \x01(\rR\x04size\x12\x18\n" + - "\aversion\x18\x05 \x01(\rR\aversion\"\a\n" + - "\x05Empty\"7\n" + - "\x18VacuumVolumeCheckRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"@\n" + - "\x19VacuumVolumeCheckResponse\x12#\n" + - "\rgarbage_ratio\x18\x01 \x01(\x01R\fgarbageRatio\"[\n" + - "\x1aVacuumVolumeCompactRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12 \n" + - "\vpreallocate\x18\x02 \x01(\x03R\vpreallocate\"f\n" + - "\x1bVacuumVolumeCompactResponse\x12'\n" + - "\x0fprocessed_bytes\x18\x01 \x01(\x03R\x0eprocessedBytes\x12\x1e\n" + - "\vload_avg_1m\x18\x02 \x01(\x02R\tloadAvg1m\"8\n" + - "\x19VacuumVolumeCommitRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"_\n" + - "\x1aVacuumVolumeCommitResponse\x12 \n" + - "\fis_read_only\x18\x01 \x01(\bR\n" + - "isReadOnly\x12\x1f\n" + - "\vvolume_size\x18\x02 \x01(\x04R\n" + - "volumeSize\"9\n" + - "\x1aVacuumVolumeCleanupRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\x1d\n" + - "\x1bVacuumVolumeCleanupResponse\"9\n" + - "\x17DeleteCollectionRequest\x12\x1e\n" + - "\n" + - "collection\x18\x01 \x01(\tR\n" + - "collection\"\x1a\n" + - "\x18DeleteCollectionResponse\"\x95\x02\n" + - "\x15AllocateVolumeRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12 \n" + - "\vpreallocate\x18\x03 \x01(\x03R\vpreallocate\x12 \n" + - "\vreplication\x18\x04 \x01(\tR\vreplication\x12\x10\n" + - "\x03ttl\x18\x05 \x01(\tR\x03ttl\x122\n" + - "\x16memory_map_max_size_mb\x18\x06 \x01(\rR\x12memoryMapMaxSizeMb\x12\x1b\n" + - "\tdisk_type\x18\a \x01(\tR\bdiskType\x12\x18\n" + - "\aversion\x18\b \x01(\rR\aversion\"\x18\n" + - "\x16AllocateVolumeResponse\"6\n" + - "\x17VolumeSyncStatusRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\x95\x02\n" + - "\x18VolumeSyncStatusResponse\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12 \n" + - "\vreplication\x18\x04 \x01(\tR\vreplication\x12\x10\n" + - "\x03ttl\x18\x05 \x01(\tR\x03ttl\x12\x1f\n" + - "\vtail_offset\x18\x06 \x01(\x04R\n" + - "tailOffset\x12)\n" + - "\x10compact_revision\x18\a \x01(\rR\x0fcompactRevision\x12\"\n" + - "\ridx_file_size\x18\b \x01(\x04R\vidxFileSize\x12\x18\n" + - "\aversion\x18\t \x01(\rR\aversion\"V\n" + - "\x1cVolumeIncrementalCopyRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x19\n" + - "\bsince_ns\x18\x02 \x01(\x04R\asinceNs\"B\n" + - "\x1dVolumeIncrementalCopyResponse\x12!\n" + - "\ffile_content\x18\x01 \x01(\fR\vfileContent\"1\n" + - "\x12VolumeMountRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\x15\n" + - "\x13VolumeMountResponse\"3\n" + - "\x14VolumeUnmountRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\x17\n" + - "\x15VolumeUnmountResponse\"Q\n" + - "\x13VolumeDeleteRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1d\n" + - "\n" + - "only_empty\x18\x02 \x01(\bR\tonlyEmpty\"\x16\n" + - "\x14VolumeDeleteResponse\"R\n" + - "\x19VolumeMarkReadonlyRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x18\n" + - "\apersist\x18\x02 \x01(\bR\apersist\"\x1c\n" + - "\x1aVolumeMarkReadonlyResponse\"8\n" + - "\x19VolumeMarkWritableRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\x1c\n" + - "\x1aVolumeMarkWritableResponse\"W\n" + - "\x16VolumeConfigureRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12 \n" + - "\vreplication\x18\x02 \x01(\tR\vreplication\"/\n" + - "\x17VolumeConfigureResponse\x12\x14\n" + - "\x05error\x18\x01 \x01(\tR\x05error\"2\n" + - "\x13VolumeStatusRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\xa6\x01\n" + - "\x14VolumeStatusResponse\x12 \n" + - "\fis_read_only\x18\x01 \x01(\bR\n" + - "isReadOnly\x12\x1f\n" + - "\vvolume_size\x18\x02 \x01(\x04R\n" + - "volumeSize\x12\x1d\n" + - "\n" + - "file_count\x18\x03 \x01(\x04R\tfileCount\x12,\n" + - "\x12file_deleted_count\x18\x04 \x01(\x04R\x10fileDeletedCount\"\xf8\x01\n" + - "\x11VolumeCopyRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12 \n" + - "\vreplication\x18\x03 \x01(\tR\vreplication\x12\x10\n" + - "\x03ttl\x18\x04 \x01(\tR\x03ttl\x12(\n" + - "\x10source_data_node\x18\x05 \x01(\tR\x0esourceDataNode\x12\x1b\n" + - "\tdisk_type\x18\x06 \x01(\tR\bdiskType\x12+\n" + - "\x12io_byte_per_second\x18\a \x01(\x03R\x0fioBytePerSecond\"h\n" + - "\x12VolumeCopyResponse\x12)\n" + - "\x11last_append_at_ns\x18\x01 \x01(\x04R\x0elastAppendAtNs\x12'\n" + - "\x0fprocessed_bytes\x18\x02 \x01(\x03R\x0eprocessedBytes\"\x94\x02\n" + - "\x0fCopyFileRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x10\n" + - "\x03ext\x18\x02 \x01(\tR\x03ext\x12/\n" + - "\x13compaction_revision\x18\x03 \x01(\rR\x12compactionRevision\x12\x1f\n" + - "\vstop_offset\x18\x04 \x01(\x04R\n" + - "stopOffset\x12\x1e\n" + - "\n" + - "collection\x18\x05 \x01(\tR\n" + - "collection\x12 \n" + - "\fis_ec_volume\x18\x06 \x01(\bR\n" + - "isEcVolume\x12>\n" + - "\x1cignore_source_file_not_found\x18\a \x01(\bR\x18ignoreSourceFileNotFound\"[\n" + - "\x10CopyFileResponse\x12!\n" + - "\ffile_content\x18\x01 \x01(\fR\vfileContent\x12$\n" + - "\x0emodified_ts_ns\x18\x02 \x01(\x03R\fmodifiedTsNs\"z\n" + - "\x12ReceiveFileRequest\x127\n" + - "\x04info\x18\x01 \x01(\v2!.volume_server_pb.ReceiveFileInfoH\x00R\x04info\x12#\n" + - "\ffile_content\x18\x02 \x01(\fH\x00R\vfileContentB\x06\n" + - "\x04data\"\xba\x01\n" + - "\x0fReceiveFileInfo\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x10\n" + - "\x03ext\x18\x02 \x01(\tR\x03ext\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12 \n" + - "\fis_ec_volume\x18\x04 \x01(\bR\n" + - "isEcVolume\x12\x19\n" + - "\bshard_id\x18\x05 \x01(\rR\ashardId\x12\x1b\n" + - "\tfile_size\x18\x06 \x01(\x04R\bfileSize\"P\n" + - "\x13ReceiveFileResponse\x12#\n" + - "\rbytes_written\x18\x01 \x01(\x04R\fbytesWritten\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error\"`\n" + - "\x15ReadNeedleBlobRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x16\n" + - "\x06offset\x18\x03 \x01(\x03R\x06offset\x12\x12\n" + - "\x04size\x18\x04 \x01(\x05R\x04size\"9\n" + - "\x16ReadNeedleBlobResponse\x12\x1f\n" + - "\vneedle_blob\x18\x01 \x01(\fR\n" + - "needleBlob\"}\n" + - "\x15ReadNeedleMetaRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tneedle_id\x18\x02 \x01(\x04R\bneedleId\x12\x16\n" + - "\x06offset\x18\x03 \x01(\x03R\x06offset\x12\x12\n" + - "\x04size\x18\x04 \x01(\x05R\x04size\"\x9b\x01\n" + - "\x16ReadNeedleMetaResponse\x12\x16\n" + - "\x06cookie\x18\x01 \x01(\rR\x06cookie\x12#\n" + - "\rlast_modified\x18\x02 \x01(\x04R\flastModified\x12\x10\n" + - "\x03crc\x18\x03 \x01(\rR\x03crc\x12\x10\n" + - "\x03ttl\x18\x04 \x01(\tR\x03ttl\x12 \n" + - "\fappend_at_ns\x18\x05 \x01(\x04R\n" + - "appendAtNs\"\x87\x01\n" + - "\x16WriteNeedleBlobRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tneedle_id\x18\x02 \x01(\x04R\bneedleId\x12\x12\n" + - "\x04size\x18\x03 \x01(\x05R\x04size\x12\x1f\n" + - "\vneedle_blob\x18\x04 \x01(\fR\n" + - "needleBlob\"\x19\n" + - "\x17WriteNeedleBlobResponse\"6\n" + - "\x15ReadAllNeedlesRequest\x12\x1d\n" + - "\n" + - "volume_ids\x18\x01 \x03(\rR\tvolumeIds\"\xa0\x02\n" + - "\x16ReadAllNeedlesResponse\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tneedle_id\x18\x02 \x01(\x04R\bneedleId\x12\x16\n" + - "\x06cookie\x18\x03 \x01(\rR\x06cookie\x12\x1f\n" + - "\vneedle_blob\x18\x05 \x01(\fR\n" + - "needleBlob\x124\n" + - "\x16needle_blob_compressed\x18\x06 \x01(\bR\x14needleBlobCompressed\x12#\n" + - "\rlast_modified\x18\a \x01(\x04R\flastModified\x12\x10\n" + - "\x03crc\x18\b \x01(\rR\x03crc\x12\x12\n" + - "\x04name\x18\t \x01(\fR\x04name\x12\x12\n" + - "\x04mime\x18\n" + - " \x01(\fR\x04mime\"\x83\x01\n" + - "\x17VolumeTailSenderRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x19\n" + - "\bsince_ns\x18\x02 \x01(\x04R\asinceNs\x120\n" + - "\x14idle_timeout_seconds\x18\x03 \x01(\rR\x12idleTimeoutSeconds\"\x9e\x01\n" + - "\x18VolumeTailSenderResponse\x12#\n" + - "\rneedle_header\x18\x01 \x01(\fR\fneedleHeader\x12\x1f\n" + - "\vneedle_body\x18\x02 \x01(\fR\n" + - "needleBody\x12\"\n" + - "\ris_last_chunk\x18\x03 \x01(\bR\visLastChunk\x12\x18\n" + - "\aversion\x18\x04 \x01(\rR\aversion\"\xb7\x01\n" + - "\x19VolumeTailReceiverRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x19\n" + - "\bsince_ns\x18\x02 \x01(\x04R\asinceNs\x120\n" + - "\x14idle_timeout_seconds\x18\x03 \x01(\rR\x12idleTimeoutSeconds\x120\n" + - "\x14source_volume_server\x18\x04 \x01(\tR\x12sourceVolumeServer\"\x1c\n" + - "\x1aVolumeTailReceiverResponse\"\\\n" + - "\x1dVolumeEcShardsGenerateRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\" \n" + - "\x1eVolumeEcShardsGenerateResponse\"[\n" + - "\x1cVolumeEcShardsRebuildRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\"K\n" + - "\x1dVolumeEcShardsRebuildResponse\x12*\n" + - "\x11rebuilt_shard_ids\x18\x01 \x03(\rR\x0frebuiltShardIds\"\xa4\x02\n" + - "\x19VolumeEcShardsCopyRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x1b\n" + - "\tshard_ids\x18\x03 \x03(\rR\bshardIds\x12\"\n" + - "\rcopy_ecx_file\x18\x04 \x01(\bR\vcopyEcxFile\x12(\n" + - "\x10source_data_node\x18\x05 \x01(\tR\x0esourceDataNode\x12\"\n" + - "\rcopy_ecj_file\x18\x06 \x01(\bR\vcopyEcjFile\x12\"\n" + - "\rcopy_vif_file\x18\a \x01(\bR\vcopyVifFile\x12\x17\n" + - "\adisk_id\x18\b \x01(\rR\x06diskId\"\x1c\n" + - "\x1aVolumeEcShardsCopyResponse\"w\n" + - "\x1bVolumeEcShardsDeleteRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x1b\n" + - "\tshard_ids\x18\x03 \x03(\rR\bshardIds\"\x1e\n" + - "\x1cVolumeEcShardsDeleteResponse\"v\n" + - "\x1aVolumeEcShardsMountRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x1b\n" + - "\tshard_ids\x18\x03 \x03(\rR\bshardIds\"\x1d\n" + - "\x1bVolumeEcShardsMountResponse\"X\n" + - "\x1cVolumeEcShardsUnmountRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tshard_ids\x18\x03 \x03(\rR\bshardIds\"\x1f\n" + - "\x1dVolumeEcShardsUnmountResponse\"\x99\x01\n" + - "\x18VolumeEcShardReadRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x19\n" + - "\bshard_id\x18\x02 \x01(\rR\ashardId\x12\x16\n" + - "\x06offset\x18\x03 \x01(\x03R\x06offset\x12\x12\n" + - "\x04size\x18\x04 \x01(\x03R\x04size\x12\x19\n" + - "\bfile_key\x18\x05 \x01(\x04R\afileKey\"N\n" + - "\x19VolumeEcShardReadResponse\x12\x12\n" + - "\x04data\x18\x01 \x01(\fR\x04data\x12\x1d\n" + - "\n" + - "is_deleted\x18\x02 \x01(\bR\tisDeleted\"\x8d\x01\n" + - "\x19VolumeEcBlobDeleteRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12\x19\n" + - "\bfile_key\x18\x03 \x01(\x04R\afileKey\x12\x18\n" + - "\aversion\x18\x04 \x01(\rR\aversion\"\x1c\n" + - "\x1aVolumeEcBlobDeleteResponse\"\\\n" + - "\x1dVolumeEcShardsToVolumeRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\" \n" + - "\x1eVolumeEcShardsToVolumeResponse\"8\n" + - "\x19VolumeEcShardsInfoRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"a\n" + - "\x1aVolumeEcShardsInfoResponse\x12C\n" + - "\x0eec_shard_infos\x18\x01 \x03(\v2\x1d.volume_server_pb.EcShardInfoR\fecShardInfos\"\\\n" + - "\vEcShardInfo\x12\x19\n" + - "\bshard_id\x18\x01 \x01(\rR\ashardId\x12\x12\n" + - "\x04size\x18\x02 \x01(\x03R\x04size\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\":\n" + - "\x1bReadVolumeFileStatusRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\"\xe3\x03\n" + - "\x1cReadVolumeFileStatusResponse\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12;\n" + - "\x1aidx_file_timestamp_seconds\x18\x02 \x01(\x04R\x17idxFileTimestampSeconds\x12\"\n" + - "\ridx_file_size\x18\x03 \x01(\x04R\vidxFileSize\x12;\n" + - "\x1adat_file_timestamp_seconds\x18\x04 \x01(\x04R\x17datFileTimestampSeconds\x12\"\n" + - "\rdat_file_size\x18\x05 \x01(\x04R\vdatFileSize\x12\x1d\n" + - "\n" + - "file_count\x18\x06 \x01(\x04R\tfileCount\x12/\n" + - "\x13compaction_revision\x18\a \x01(\rR\x12compactionRevision\x12\x1e\n" + - "\n" + - "collection\x18\b \x01(\tR\n" + - "collection\x12\x1b\n" + - "\tdisk_type\x18\t \x01(\tR\bdiskType\x12=\n" + - "\vvolume_info\x18\n" + - " \x01(\v2\x1c.volume_server_pb.VolumeInfoR\n" + - "volumeInfo\x12\x18\n" + - "\aversion\x18\v \x01(\rR\aversion\"\xbb\x01\n" + - "\n" + - "DiskStatus\x12\x10\n" + - "\x03dir\x18\x01 \x01(\tR\x03dir\x12\x10\n" + - "\x03all\x18\x02 \x01(\x04R\x03all\x12\x12\n" + - "\x04used\x18\x03 \x01(\x04R\x04used\x12\x12\n" + - "\x04free\x18\x04 \x01(\x04R\x04free\x12!\n" + - "\fpercent_free\x18\x05 \x01(\x02R\vpercentFree\x12!\n" + - "\fpercent_used\x18\x06 \x01(\x02R\vpercentUsed\x12\x1b\n" + - "\tdisk_type\x18\a \x01(\tR\bdiskType\"\xa3\x01\n" + - "\tMemStatus\x12\x1e\n" + - "\n" + - "goroutines\x18\x01 \x01(\x05R\n" + - "goroutines\x12\x10\n" + - "\x03all\x18\x02 \x01(\x04R\x03all\x12\x12\n" + - "\x04used\x18\x03 \x01(\x04R\x04used\x12\x12\n" + - "\x04free\x18\x04 \x01(\x04R\x04free\x12\x12\n" + - "\x04self\x18\x05 \x01(\x04R\x04self\x12\x12\n" + - "\x04heap\x18\x06 \x01(\x04R\x04heap\x12\x14\n" + - "\x05stack\x18\a \x01(\x04R\x05stack\"\xd8\x01\n" + - "\n" + - "RemoteFile\x12!\n" + - "\fbackend_type\x18\x01 \x01(\tR\vbackendType\x12\x1d\n" + - "\n" + - "backend_id\x18\x02 \x01(\tR\tbackendId\x12\x10\n" + - "\x03key\x18\x03 \x01(\tR\x03key\x12\x16\n" + - "\x06offset\x18\x04 \x01(\x04R\x06offset\x12\x1b\n" + - "\tfile_size\x18\x05 \x01(\x04R\bfileSize\x12#\n" + - "\rmodified_time\x18\x06 \x01(\x04R\fmodifiedTime\x12\x1c\n" + - "\textension\x18\a \x01(\tR\textension\"\x84\x02\n" + - "\n" + - "VolumeInfo\x122\n" + - "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + - "\aversion\x18\x02 \x01(\rR\aversion\x12 \n" + - "\vreplication\x18\x03 \x01(\tR\vreplication\x12!\n" + - "\fbytes_offset\x18\x04 \x01(\rR\vbytesOffset\x12\"\n" + - "\rdat_file_size\x18\x05 \x01(\x03R\vdatFileSize\x12\"\n" + - "\rexpire_at_sec\x18\x06 \x01(\x04R\vexpireAtSec\x12\x1b\n" + - "\tread_only\x18\a \x01(\bR\breadOnly\"\x8b\x02\n" + - "\x14OldVersionVolumeInfo\x122\n" + - "\x05files\x18\x01 \x03(\v2\x1c.volume_server_pb.RemoteFileR\x05files\x12\x18\n" + - "\aversion\x18\x02 \x01(\rR\aversion\x12 \n" + - "\vreplication\x18\x03 \x01(\tR\vreplication\x12 \n" + - "\vBytesOffset\x18\x04 \x01(\rR\vBytesOffset\x12\"\n" + - "\rdat_file_size\x18\x05 \x01(\x03R\vdatFileSize\x12 \n" + - "\vDestroyTime\x18\x06 \x01(\x04R\vDestroyTime\x12\x1b\n" + - "\tread_only\x18\a \x01(\bR\breadOnly\"\xc8\x01\n" + - " VolumeTierMoveDatToRemoteRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x128\n" + - "\x18destination_backend_name\x18\x03 \x01(\tR\x16destinationBackendName\x12-\n" + - "\x13keep_local_dat_file\x18\x04 \x01(\bR\x10keepLocalDatFile\"s\n" + - "!VolumeTierMoveDatToRemoteResponse\x12\x1c\n" + - "\tprocessed\x18\x01 \x01(\x03R\tprocessed\x120\n" + - "\x13processedPercentage\x18\x02 \x01(\x02R\x13processedPercentage\"\x92\x01\n" + - "\"VolumeTierMoveDatFromRemoteRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x02 \x01(\tR\n" + - "collection\x12/\n" + - "\x14keep_remote_dat_file\x18\x03 \x01(\bR\x11keepRemoteDatFile\"u\n" + - "#VolumeTierMoveDatFromRemoteResponse\x12\x1c\n" + - "\tprocessed\x18\x01 \x01(\x03R\tprocessed\x120\n" + - "\x13processedPercentage\x18\x02 \x01(\x02R\x13processedPercentage\"\x1b\n" + - "\x19VolumeServerStatusRequest\"\xf0\x01\n" + - "\x1aVolumeServerStatusResponse\x12A\n" + - "\rdisk_statuses\x18\x01 \x03(\v2\x1c.volume_server_pb.DiskStatusR\fdiskStatuses\x12@\n" + - "\rmemory_status\x18\x02 \x01(\v2\x1b.volume_server_pb.MemStatusR\fmemoryStatus\x12\x18\n" + - "\aversion\x18\x03 \x01(\tR\aversion\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\x05 \x01(\tR\x04rack\"\x1a\n" + - "\x18VolumeServerLeaveRequest\"\x1b\n" + - "\x19VolumeServerLeaveResponse\"\xdc\x03\n" + - "\x1aFetchAndWriteNeedleRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tneedle_id\x18\x02 \x01(\x04R\bneedleId\x12\x16\n" + - "\x06cookie\x18\x03 \x01(\rR\x06cookie\x12\x16\n" + - "\x06offset\x18\x04 \x01(\x03R\x06offset\x12\x12\n" + - "\x04size\x18\x05 \x01(\x03R\x04size\x12P\n" + - "\breplicas\x18\x06 \x03(\v24.volume_server_pb.FetchAndWriteNeedleRequest.ReplicaR\breplicas\x12\x12\n" + - "\x04auth\x18\a \x01(\tR\x04auth\x126\n" + - "\vremote_conf\x18\x0f \x01(\v2\x15.remote_pb.RemoteConfR\n" + - "remoteConf\x12I\n" + - "\x0fremote_location\x18\x10 \x01(\v2 .remote_pb.RemoteStorageLocationR\x0eremoteLocation\x1aW\n" + - "\aReplica\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\x12\x1d\n" + - "\n" + - "public_url\x18\x02 \x01(\tR\tpublicUrl\x12\x1b\n" + - "\tgrpc_port\x18\x03 \x01(\x05R\bgrpcPort\"2\n" + - "\x1bFetchAndWriteNeedleResponse\x12\x13\n" + - "\x05e_tag\x18\x01 \x01(\tR\x04eTag\"\xf4\f\n" + - "\fQueryRequest\x12\x1e\n" + - "\n" + - "selections\x18\x01 \x03(\tR\n" + - "selections\x12\"\n" + - "\rfrom_file_ids\x18\x02 \x03(\tR\vfromFileIds\x12=\n" + - "\x06filter\x18\x03 \x01(\v2%.volume_server_pb.QueryRequest.FilterR\x06filter\x12b\n" + - "\x13input_serialization\x18\x04 \x01(\v21.volume_server_pb.QueryRequest.InputSerializationR\x12inputSerialization\x12e\n" + - "\x14output_serialization\x18\x05 \x01(\v22.volume_server_pb.QueryRequest.OutputSerializationR\x13outputSerialization\x1aN\n" + - "\x06Filter\x12\x14\n" + - "\x05field\x18\x01 \x01(\tR\x05field\x12\x18\n" + - "\aoperand\x18\x02 \x01(\tR\aoperand\x12\x14\n" + - "\x05value\x18\x03 \x01(\tR\x05value\x1a\xd3\x05\n" + - "\x12InputSerialization\x12)\n" + - "\x10compression_type\x18\x01 \x01(\tR\x0fcompressionType\x12W\n" + - "\tcsv_input\x18\x02 \x01(\v2:.volume_server_pb.QueryRequest.InputSerialization.CSVInputR\bcsvInput\x12Z\n" + - "\n" + - "json_input\x18\x03 \x01(\v2;.volume_server_pb.QueryRequest.InputSerialization.JSONInputR\tjsonInput\x12c\n" + - "\rparquet_input\x18\x04 \x01(\v2>.volume_server_pb.QueryRequest.InputSerialization.ParquetInputR\fparquetInput\x1a\xc6\x02\n" + - "\bCSVInput\x12(\n" + - "\x10file_header_info\x18\x01 \x01(\tR\x0efileHeaderInfo\x12)\n" + - "\x10record_delimiter\x18\x02 \x01(\tR\x0frecordDelimiter\x12'\n" + - "\x0ffield_delimiter\x18\x03 \x01(\tR\x0efieldDelimiter\x12'\n" + - "\x0fquote_character\x18\x04 \x01(\tR\x0equoteCharacter\x124\n" + - "\x16quote_escape_character\x18\x05 \x01(\tR\x14quoteEscapeCharacter\x12\x1a\n" + - "\bcomments\x18\x06 \x01(\tR\bcomments\x12A\n" + - "\x1dallow_quoted_record_delimiter\x18\a \x01(\bR\x1aallowQuotedRecordDelimiter\x1a\x1f\n" + - "\tJSONInput\x12\x12\n" + - "\x04type\x18\x01 \x01(\tR\x04type\x1a\x0e\n" + - "\fParquetInput\x1a\xef\x03\n" + - "\x13OutputSerialization\x12[\n" + - "\n" + - "csv_output\x18\x02 \x01(\v2<.volume_server_pb.QueryRequest.OutputSerialization.CSVOutputR\tcsvOutput\x12^\n" + - "\vjson_output\x18\x03 \x01(\v2=.volume_server_pb.QueryRequest.OutputSerialization.JSONOutputR\n" + - "jsonOutput\x1a\xe1\x01\n" + - "\tCSVOutput\x12!\n" + - "\fquote_fields\x18\x01 \x01(\tR\vquoteFields\x12)\n" + - "\x10record_delimiter\x18\x02 \x01(\tR\x0frecordDelimiter\x12'\n" + - "\x0ffield_delimiter\x18\x03 \x01(\tR\x0efieldDelimiter\x12'\n" + - "\x0fquote_character\x18\x04 \x01(\tR\x0equoteCharacter\x124\n" + - "\x16quote_escape_character\x18\x05 \x01(\tR\x14quoteEscapeCharacter\x1a7\n" + - "\n" + - "JSONOutput\x12)\n" + - "\x10record_delimiter\x18\x01 \x01(\tR\x0frecordDelimiter\")\n" + - "\rQueriedStripe\x12\x18\n" + - "\arecords\x18\x01 \x01(\fR\arecords\"U\n" + - "\x19VolumeNeedleStatusRequest\x12\x1b\n" + - "\tvolume_id\x18\x01 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tneedle_id\x18\x02 \x01(\x04R\bneedleId\"\xae\x01\n" + - "\x1aVolumeNeedleStatusResponse\x12\x1b\n" + - "\tneedle_id\x18\x01 \x01(\x04R\bneedleId\x12\x16\n" + - "\x06cookie\x18\x02 \x01(\rR\x06cookie\x12\x12\n" + - "\x04size\x18\x03 \x01(\rR\x04size\x12#\n" + - "\rlast_modified\x18\x04 \x01(\x04R\flastModified\x12\x10\n" + - "\x03crc\x18\x05 \x01(\rR\x03crc\x12\x10\n" + - "\x03ttl\x18\x06 \x01(\tR\x03ttl\"F\n" + - "\vPingRequest\x12\x16\n" + - "\x06target\x18\x01 \x01(\tR\x06target\x12\x1f\n" + - "\vtarget_type\x18\x02 \x01(\tR\n" + - "targetType\"z\n" + - "\fPingResponse\x12\"\n" + - "\rstart_time_ns\x18\x01 \x01(\x03R\vstartTimeNs\x12$\n" + - "\x0eremote_time_ns\x18\x02 \x01(\x03R\fremoteTimeNs\x12 \n" + - "\fstop_time_ns\x18\x03 \x01(\x03R\n" + - "stopTimeNs2\x8f&\n" + - "\fVolumeServer\x12\\\n" + - "\vBatchDelete\x12$.volume_server_pb.BatchDeleteRequest\x1a%.volume_server_pb.BatchDeleteResponse\"\x00\x12n\n" + - "\x11VacuumVolumeCheck\x12*.volume_server_pb.VacuumVolumeCheckRequest\x1a+.volume_server_pb.VacuumVolumeCheckResponse\"\x00\x12v\n" + - "\x13VacuumVolumeCompact\x12,.volume_server_pb.VacuumVolumeCompactRequest\x1a-.volume_server_pb.VacuumVolumeCompactResponse\"\x000\x01\x12q\n" + - "\x12VacuumVolumeCommit\x12+.volume_server_pb.VacuumVolumeCommitRequest\x1a,.volume_server_pb.VacuumVolumeCommitResponse\"\x00\x12t\n" + - "\x13VacuumVolumeCleanup\x12,.volume_server_pb.VacuumVolumeCleanupRequest\x1a-.volume_server_pb.VacuumVolumeCleanupResponse\"\x00\x12k\n" + - "\x10DeleteCollection\x12).volume_server_pb.DeleteCollectionRequest\x1a*.volume_server_pb.DeleteCollectionResponse\"\x00\x12e\n" + - "\x0eAllocateVolume\x12'.volume_server_pb.AllocateVolumeRequest\x1a(.volume_server_pb.AllocateVolumeResponse\"\x00\x12k\n" + - "\x10VolumeSyncStatus\x12).volume_server_pb.VolumeSyncStatusRequest\x1a*.volume_server_pb.VolumeSyncStatusResponse\"\x00\x12|\n" + - "\x15VolumeIncrementalCopy\x12..volume_server_pb.VolumeIncrementalCopyRequest\x1a/.volume_server_pb.VolumeIncrementalCopyResponse\"\x000\x01\x12\\\n" + - "\vVolumeMount\x12$.volume_server_pb.VolumeMountRequest\x1a%.volume_server_pb.VolumeMountResponse\"\x00\x12b\n" + - "\rVolumeUnmount\x12&.volume_server_pb.VolumeUnmountRequest\x1a'.volume_server_pb.VolumeUnmountResponse\"\x00\x12_\n" + - "\fVolumeDelete\x12%.volume_server_pb.VolumeDeleteRequest\x1a&.volume_server_pb.VolumeDeleteResponse\"\x00\x12q\n" + - "\x12VolumeMarkReadonly\x12+.volume_server_pb.VolumeMarkReadonlyRequest\x1a,.volume_server_pb.VolumeMarkReadonlyResponse\"\x00\x12q\n" + - "\x12VolumeMarkWritable\x12+.volume_server_pb.VolumeMarkWritableRequest\x1a,.volume_server_pb.VolumeMarkWritableResponse\"\x00\x12h\n" + - "\x0fVolumeConfigure\x12(.volume_server_pb.VolumeConfigureRequest\x1a).volume_server_pb.VolumeConfigureResponse\"\x00\x12_\n" + - "\fVolumeStatus\x12%.volume_server_pb.VolumeStatusRequest\x1a&.volume_server_pb.VolumeStatusResponse\"\x00\x12[\n" + - "\n" + - "VolumeCopy\x12#.volume_server_pb.VolumeCopyRequest\x1a$.volume_server_pb.VolumeCopyResponse\"\x000\x01\x12w\n" + - "\x14ReadVolumeFileStatus\x12-.volume_server_pb.ReadVolumeFileStatusRequest\x1a..volume_server_pb.ReadVolumeFileStatusResponse\"\x00\x12U\n" + - "\bCopyFile\x12!.volume_server_pb.CopyFileRequest\x1a\".volume_server_pb.CopyFileResponse\"\x000\x01\x12^\n" + - "\vReceiveFile\x12$.volume_server_pb.ReceiveFileRequest\x1a%.volume_server_pb.ReceiveFileResponse\"\x00(\x01\x12e\n" + - "\x0eReadNeedleBlob\x12'.volume_server_pb.ReadNeedleBlobRequest\x1a(.volume_server_pb.ReadNeedleBlobResponse\"\x00\x12e\n" + - "\x0eReadNeedleMeta\x12'.volume_server_pb.ReadNeedleMetaRequest\x1a(.volume_server_pb.ReadNeedleMetaResponse\"\x00\x12h\n" + - "\x0fWriteNeedleBlob\x12(.volume_server_pb.WriteNeedleBlobRequest\x1a).volume_server_pb.WriteNeedleBlobResponse\"\x00\x12g\n" + - "\x0eReadAllNeedles\x12'.volume_server_pb.ReadAllNeedlesRequest\x1a(.volume_server_pb.ReadAllNeedlesResponse\"\x000\x01\x12m\n" + - "\x10VolumeTailSender\x12).volume_server_pb.VolumeTailSenderRequest\x1a*.volume_server_pb.VolumeTailSenderResponse\"\x000\x01\x12q\n" + - "\x12VolumeTailReceiver\x12+.volume_server_pb.VolumeTailReceiverRequest\x1a,.volume_server_pb.VolumeTailReceiverResponse\"\x00\x12}\n" + - "\x16VolumeEcShardsGenerate\x12/.volume_server_pb.VolumeEcShardsGenerateRequest\x1a0.volume_server_pb.VolumeEcShardsGenerateResponse\"\x00\x12z\n" + - "\x15VolumeEcShardsRebuild\x12..volume_server_pb.VolumeEcShardsRebuildRequest\x1a/.volume_server_pb.VolumeEcShardsRebuildResponse\"\x00\x12q\n" + - "\x12VolumeEcShardsCopy\x12+.volume_server_pb.VolumeEcShardsCopyRequest\x1a,.volume_server_pb.VolumeEcShardsCopyResponse\"\x00\x12w\n" + - "\x14VolumeEcShardsDelete\x12-.volume_server_pb.VolumeEcShardsDeleteRequest\x1a..volume_server_pb.VolumeEcShardsDeleteResponse\"\x00\x12t\n" + - "\x13VolumeEcShardsMount\x12,.volume_server_pb.VolumeEcShardsMountRequest\x1a-.volume_server_pb.VolumeEcShardsMountResponse\"\x00\x12z\n" + - "\x15VolumeEcShardsUnmount\x12..volume_server_pb.VolumeEcShardsUnmountRequest\x1a/.volume_server_pb.VolumeEcShardsUnmountResponse\"\x00\x12p\n" + - "\x11VolumeEcShardRead\x12*.volume_server_pb.VolumeEcShardReadRequest\x1a+.volume_server_pb.VolumeEcShardReadResponse\"\x000\x01\x12q\n" + - "\x12VolumeEcBlobDelete\x12+.volume_server_pb.VolumeEcBlobDeleteRequest\x1a,.volume_server_pb.VolumeEcBlobDeleteResponse\"\x00\x12}\n" + - "\x16VolumeEcShardsToVolume\x12/.volume_server_pb.VolumeEcShardsToVolumeRequest\x1a0.volume_server_pb.VolumeEcShardsToVolumeResponse\"\x00\x12q\n" + - "\x12VolumeEcShardsInfo\x12+.volume_server_pb.VolumeEcShardsInfoRequest\x1a,.volume_server_pb.VolumeEcShardsInfoResponse\"\x00\x12\x88\x01\n" + - "\x19VolumeTierMoveDatToRemote\x122.volume_server_pb.VolumeTierMoveDatToRemoteRequest\x1a3.volume_server_pb.VolumeTierMoveDatToRemoteResponse\"\x000\x01\x12\x8e\x01\n" + - "\x1bVolumeTierMoveDatFromRemote\x124.volume_server_pb.VolumeTierMoveDatFromRemoteRequest\x1a5.volume_server_pb.VolumeTierMoveDatFromRemoteResponse\"\x000\x01\x12q\n" + - "\x12VolumeServerStatus\x12+.volume_server_pb.VolumeServerStatusRequest\x1a,.volume_server_pb.VolumeServerStatusResponse\"\x00\x12n\n" + - "\x11VolumeServerLeave\x12*.volume_server_pb.VolumeServerLeaveRequest\x1a+.volume_server_pb.VolumeServerLeaveResponse\"\x00\x12t\n" + - "\x13FetchAndWriteNeedle\x12,.volume_server_pb.FetchAndWriteNeedleRequest\x1a-.volume_server_pb.FetchAndWriteNeedleResponse\"\x00\x12L\n" + - "\x05Query\x12\x1e.volume_server_pb.QueryRequest\x1a\x1f.volume_server_pb.QueriedStripe\"\x000\x01\x12q\n" + - "\x12VolumeNeedleStatus\x12+.volume_server_pb.VolumeNeedleStatusRequest\x1a,.volume_server_pb.VolumeNeedleStatusResponse\"\x00\x12G\n" + - "\x04Ping\x12\x1d.volume_server_pb.PingRequest\x1a\x1e.volume_server_pb.PingResponse\"\x00B9Z7github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pbb\x06proto3" +var file_volume_server_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x1a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, + 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x19, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61, 0x72, 0x62, + 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x5b, 0x0a, + 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, + 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x1b, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfb, 0x01, + 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x41, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, + 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, + 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, + 0x61, 0x69, 0x6c, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6c, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, + 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, + 0x65, 0x4e, 0x73, 0x22, 0x42, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x33, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, + 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, + 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x16, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x14, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, + 0x6e, 0x6c, 0x79, 0x22, 0xcb, 0x01, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, + 0x4e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f, + 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x22, 0x5b, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x6f, 0x64, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x73, 0x4e, 0x73, 0x22, + 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, + 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, + 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, + 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, + 0x0a, 0x15, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x41, + 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, + 0x6f, 0x62, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, + 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, + 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x22, 0x0a, + 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, + 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x1c, 0x0a, 0x1a, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, + 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x22, 0x0a, + 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69, 0x66, 0x46, 0x69, 0x6c, + 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, + 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, + 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x3b, + 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x69, + 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0d, + 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xbb, 0x01, + 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x10, + 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x09, + 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x6f, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x67, + 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, + 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x70, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, + 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0a, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x05, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x20, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x18, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, + 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, + 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x22, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, + 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, + 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, + 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, + 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xf0, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x22, 0x1a, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xdc, 0x03, 0x0a, 0x1a, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, + 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x50, 0x0a, 0x08, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, + 0x74, 0x68, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x52, 0x0a, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, + 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x1d, + 0x0a, 0x1b, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, + 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, + 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, + 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, + 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, + 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, + 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, + 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, + 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, + 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, + 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, + 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, + 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, + 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, + 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, + 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, + 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, + 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, + 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, + 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, + 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, + 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, + 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, + 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, + 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, + 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, + 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, + 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, + 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x46, 0x0a, 0x0b, 0x50, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x20, 0x0a, + 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x32, + 0xd5, 0x23, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, + 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, + 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, + 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x0e, + 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x27, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, + 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, + 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, + 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5c, + 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x0d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, + 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, + 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, + 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x77, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, + 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x65, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, + 0x62, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x67, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x73, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x41, 0x6c, 0x6c, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, + 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, + 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, + 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, + 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x2a, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x12, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, + 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, + 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, + 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, + 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_volume_server_proto_rawDescOnce sync.Once - file_volume_server_proto_rawDescData []byte + file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc ) func file_volume_server_proto_rawDescGZIP() []byte { file_volume_server_proto_rawDescOnce.Do(func() { - file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc))) + file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData) }) return file_volume_server_proto_rawDescData } -var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 106) -var file_volume_server_proto_goTypes = []any{ +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 97) +var file_volume_server_proto_goTypes = []interface{}{ (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult @@ -6651,190 +6568,171 @@ var file_volume_server_proto_goTypes = []any{ (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse - (*ReceiveFileRequest)(nil), // 38: volume_server_pb.ReceiveFileRequest - (*ReceiveFileInfo)(nil), // 39: volume_server_pb.ReceiveFileInfo - (*ReceiveFileResponse)(nil), // 40: volume_server_pb.ReceiveFileResponse - (*ReadNeedleBlobRequest)(nil), // 41: volume_server_pb.ReadNeedleBlobRequest - (*ReadNeedleBlobResponse)(nil), // 42: volume_server_pb.ReadNeedleBlobResponse - (*ReadNeedleMetaRequest)(nil), // 43: volume_server_pb.ReadNeedleMetaRequest - (*ReadNeedleMetaResponse)(nil), // 44: volume_server_pb.ReadNeedleMetaResponse - (*WriteNeedleBlobRequest)(nil), // 45: volume_server_pb.WriteNeedleBlobRequest - (*WriteNeedleBlobResponse)(nil), // 46: volume_server_pb.WriteNeedleBlobResponse - (*ReadAllNeedlesRequest)(nil), // 47: volume_server_pb.ReadAllNeedlesRequest - (*ReadAllNeedlesResponse)(nil), // 48: volume_server_pb.ReadAllNeedlesResponse - (*VolumeTailSenderRequest)(nil), // 49: volume_server_pb.VolumeTailSenderRequest - (*VolumeTailSenderResponse)(nil), // 50: volume_server_pb.VolumeTailSenderResponse - (*VolumeTailReceiverRequest)(nil), // 51: volume_server_pb.VolumeTailReceiverRequest - (*VolumeTailReceiverResponse)(nil), // 52: volume_server_pb.VolumeTailReceiverResponse - (*VolumeEcShardsGenerateRequest)(nil), // 53: volume_server_pb.VolumeEcShardsGenerateRequest - (*VolumeEcShardsGenerateResponse)(nil), // 54: volume_server_pb.VolumeEcShardsGenerateResponse - (*VolumeEcShardsRebuildRequest)(nil), // 55: volume_server_pb.VolumeEcShardsRebuildRequest - (*VolumeEcShardsRebuildResponse)(nil), // 56: volume_server_pb.VolumeEcShardsRebuildResponse - (*VolumeEcShardsCopyRequest)(nil), // 57: volume_server_pb.VolumeEcShardsCopyRequest - (*VolumeEcShardsCopyResponse)(nil), // 58: volume_server_pb.VolumeEcShardsCopyResponse - (*VolumeEcShardsDeleteRequest)(nil), // 59: volume_server_pb.VolumeEcShardsDeleteRequest - (*VolumeEcShardsDeleteResponse)(nil), // 60: volume_server_pb.VolumeEcShardsDeleteResponse - (*VolumeEcShardsMountRequest)(nil), // 61: volume_server_pb.VolumeEcShardsMountRequest - (*VolumeEcShardsMountResponse)(nil), // 62: volume_server_pb.VolumeEcShardsMountResponse - (*VolumeEcShardsUnmountRequest)(nil), // 63: volume_server_pb.VolumeEcShardsUnmountRequest - (*VolumeEcShardsUnmountResponse)(nil), // 64: volume_server_pb.VolumeEcShardsUnmountResponse - (*VolumeEcShardReadRequest)(nil), // 65: volume_server_pb.VolumeEcShardReadRequest - (*VolumeEcShardReadResponse)(nil), // 66: volume_server_pb.VolumeEcShardReadResponse - (*VolumeEcBlobDeleteRequest)(nil), // 67: volume_server_pb.VolumeEcBlobDeleteRequest - (*VolumeEcBlobDeleteResponse)(nil), // 68: volume_server_pb.VolumeEcBlobDeleteResponse - (*VolumeEcShardsToVolumeRequest)(nil), // 69: volume_server_pb.VolumeEcShardsToVolumeRequest - (*VolumeEcShardsToVolumeResponse)(nil), // 70: volume_server_pb.VolumeEcShardsToVolumeResponse - (*VolumeEcShardsInfoRequest)(nil), // 71: volume_server_pb.VolumeEcShardsInfoRequest - (*VolumeEcShardsInfoResponse)(nil), // 72: volume_server_pb.VolumeEcShardsInfoResponse - (*EcShardInfo)(nil), // 73: volume_server_pb.EcShardInfo - (*ReadVolumeFileStatusRequest)(nil), // 74: volume_server_pb.ReadVolumeFileStatusRequest - (*ReadVolumeFileStatusResponse)(nil), // 75: volume_server_pb.ReadVolumeFileStatusResponse - (*DiskStatus)(nil), // 76: volume_server_pb.DiskStatus - (*MemStatus)(nil), // 77: volume_server_pb.MemStatus - (*RemoteFile)(nil), // 78: volume_server_pb.RemoteFile - (*VolumeInfo)(nil), // 79: volume_server_pb.VolumeInfo - (*OldVersionVolumeInfo)(nil), // 80: volume_server_pb.OldVersionVolumeInfo - (*VolumeTierMoveDatToRemoteRequest)(nil), // 81: volume_server_pb.VolumeTierMoveDatToRemoteRequest - (*VolumeTierMoveDatToRemoteResponse)(nil), // 82: volume_server_pb.VolumeTierMoveDatToRemoteResponse - (*VolumeTierMoveDatFromRemoteRequest)(nil), // 83: volume_server_pb.VolumeTierMoveDatFromRemoteRequest - (*VolumeTierMoveDatFromRemoteResponse)(nil), // 84: volume_server_pb.VolumeTierMoveDatFromRemoteResponse - (*VolumeServerStatusRequest)(nil), // 85: volume_server_pb.VolumeServerStatusRequest - (*VolumeServerStatusResponse)(nil), // 86: volume_server_pb.VolumeServerStatusResponse - (*VolumeServerLeaveRequest)(nil), // 87: volume_server_pb.VolumeServerLeaveRequest - (*VolumeServerLeaveResponse)(nil), // 88: volume_server_pb.VolumeServerLeaveResponse - (*FetchAndWriteNeedleRequest)(nil), // 89: volume_server_pb.FetchAndWriteNeedleRequest - (*FetchAndWriteNeedleResponse)(nil), // 90: volume_server_pb.FetchAndWriteNeedleResponse - (*QueryRequest)(nil), // 91: volume_server_pb.QueryRequest - (*QueriedStripe)(nil), // 92: volume_server_pb.QueriedStripe - (*VolumeNeedleStatusRequest)(nil), // 93: volume_server_pb.VolumeNeedleStatusRequest - (*VolumeNeedleStatusResponse)(nil), // 94: volume_server_pb.VolumeNeedleStatusResponse - (*PingRequest)(nil), // 95: volume_server_pb.PingRequest - (*PingResponse)(nil), // 96: volume_server_pb.PingResponse - (*FetchAndWriteNeedleRequest_Replica)(nil), // 97: volume_server_pb.FetchAndWriteNeedleRequest.Replica - (*QueryRequest_Filter)(nil), // 98: volume_server_pb.QueryRequest.Filter - (*QueryRequest_InputSerialization)(nil), // 99: volume_server_pb.QueryRequest.InputSerialization - (*QueryRequest_OutputSerialization)(nil), // 100: volume_server_pb.QueryRequest.OutputSerialization - (*QueryRequest_InputSerialization_CSVInput)(nil), // 101: volume_server_pb.QueryRequest.InputSerialization.CSVInput - (*QueryRequest_InputSerialization_JSONInput)(nil), // 102: volume_server_pb.QueryRequest.InputSerialization.JSONInput - (*QueryRequest_InputSerialization_ParquetInput)(nil), // 103: volume_server_pb.QueryRequest.InputSerialization.ParquetInput - (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 104: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 105: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - (*remote_pb.RemoteConf)(nil), // 106: remote_pb.RemoteConf - (*remote_pb.RemoteStorageLocation)(nil), // 107: remote_pb.RemoteStorageLocation + (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest + (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse + (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest + (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse + (*ReadAllNeedlesRequest)(nil), // 42: volume_server_pb.ReadAllNeedlesRequest + (*ReadAllNeedlesResponse)(nil), // 43: volume_server_pb.ReadAllNeedlesResponse + (*VolumeTailSenderRequest)(nil), // 44: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 45: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 46: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 47: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 48: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 49: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 50: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 51: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 52: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 53: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 54: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 55: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 58: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 59: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 60: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 61: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 62: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 63: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 64: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 65: volume_server_pb.VolumeEcShardsToVolumeResponse + (*ReadVolumeFileStatusRequest)(nil), // 66: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 67: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 68: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 69: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 70: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 71: volume_server_pb.VolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 74: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 75: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 76: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 77: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 78: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 79: volume_server_pb.VolumeServerLeaveResponse + (*FetchAndWriteNeedleRequest)(nil), // 80: volume_server_pb.FetchAndWriteNeedleRequest + (*FetchAndWriteNeedleResponse)(nil), // 81: volume_server_pb.FetchAndWriteNeedleResponse + (*QueryRequest)(nil), // 82: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 83: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 84: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 85: volume_server_pb.VolumeNeedleStatusResponse + (*PingRequest)(nil), // 86: volume_server_pb.PingRequest + (*PingResponse)(nil), // 87: volume_server_pb.PingResponse + (*FetchAndWriteNeedleRequest_Replica)(nil), // 88: volume_server_pb.FetchAndWriteNeedleRequest.Replica + (*QueryRequest_Filter)(nil), // 89: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 90: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 91: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 92: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 93: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 94: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 95: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 96: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + (*remote_pb.RemoteConf)(nil), // 97: remote_pb.RemoteConf + (*remote_pb.RemoteStorageLocation)(nil), // 98: remote_pb.RemoteStorageLocation } var file_volume_server_proto_depIdxs = []int32{ - 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult - 39, // 1: volume_server_pb.ReceiveFileRequest.info:type_name -> volume_server_pb.ReceiveFileInfo - 73, // 2: volume_server_pb.VolumeEcShardsInfoResponse.ec_shard_infos:type_name -> volume_server_pb.EcShardInfo - 79, // 3: volume_server_pb.ReadVolumeFileStatusResponse.volume_info:type_name -> volume_server_pb.VolumeInfo - 78, // 4: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 78, // 5: volume_server_pb.OldVersionVolumeInfo.files:type_name -> volume_server_pb.RemoteFile - 76, // 6: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus - 77, // 7: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus - 97, // 8: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica - 106, // 9: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf - 107, // 10: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation - 98, // 11: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter - 99, // 12: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization - 100, // 13: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization - 101, // 14: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput - 102, // 15: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput - 103, // 16: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput - 104, // 17: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput - 105, // 18: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput - 0, // 19: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest - 4, // 20: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest - 6, // 21: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest - 8, // 22: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest - 10, // 23: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest - 12, // 24: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest - 14, // 25: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest - 16, // 26: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest - 18, // 27: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest - 20, // 28: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest - 22, // 29: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest - 24, // 30: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest - 26, // 31: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest - 28, // 32: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest - 30, // 33: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest - 32, // 34: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest - 34, // 35: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest - 74, // 36: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest - 36, // 37: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest - 38, // 38: volume_server_pb.VolumeServer.ReceiveFile:input_type -> volume_server_pb.ReceiveFileRequest - 41, // 39: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest - 43, // 40: volume_server_pb.VolumeServer.ReadNeedleMeta:input_type -> volume_server_pb.ReadNeedleMetaRequest - 45, // 41: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest - 47, // 42: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest - 49, // 43: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest - 51, // 44: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest - 53, // 45: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest - 55, // 46: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest - 57, // 47: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest - 59, // 48: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest - 61, // 49: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest - 63, // 50: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest - 65, // 51: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest - 67, // 52: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest - 69, // 53: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest - 71, // 54: volume_server_pb.VolumeServer.VolumeEcShardsInfo:input_type -> volume_server_pb.VolumeEcShardsInfoRequest - 81, // 55: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest - 83, // 56: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest - 85, // 57: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest - 87, // 58: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest - 89, // 59: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest - 91, // 60: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest - 93, // 61: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest - 95, // 62: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest - 1, // 63: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse - 5, // 64: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse - 7, // 65: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse - 9, // 66: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse - 11, // 67: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse - 13, // 68: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse - 15, // 69: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse - 17, // 70: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse - 19, // 71: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse - 21, // 72: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse - 23, // 73: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse - 25, // 74: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse - 27, // 75: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse - 29, // 76: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse - 31, // 77: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse - 33, // 78: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse - 35, // 79: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse - 75, // 80: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse - 37, // 81: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse - 40, // 82: volume_server_pb.VolumeServer.ReceiveFile:output_type -> volume_server_pb.ReceiveFileResponse - 42, // 83: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse - 44, // 84: volume_server_pb.VolumeServer.ReadNeedleMeta:output_type -> volume_server_pb.ReadNeedleMetaResponse - 46, // 85: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse - 48, // 86: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse - 50, // 87: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse - 52, // 88: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse - 54, // 89: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse - 56, // 90: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse - 58, // 91: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse - 60, // 92: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse - 62, // 93: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse - 64, // 94: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse - 66, // 95: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse - 68, // 96: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse - 70, // 97: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse - 72, // 98: volume_server_pb.VolumeServer.VolumeEcShardsInfo:output_type -> volume_server_pb.VolumeEcShardsInfoResponse - 82, // 99: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse - 84, // 100: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse - 86, // 101: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse - 88, // 102: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse - 90, // 103: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse - 92, // 104: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe - 94, // 105: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse - 96, // 106: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse - 63, // [63:107] is the sub-list for method output_type - 19, // [19:63] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult + 70, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 68, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 69, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 88, // 4: volume_server_pb.FetchAndWriteNeedleRequest.replicas:type_name -> volume_server_pb.FetchAndWriteNeedleRequest.Replica + 97, // 5: volume_server_pb.FetchAndWriteNeedleRequest.remote_conf:type_name -> remote_pb.RemoteConf + 98, // 6: volume_server_pb.FetchAndWriteNeedleRequest.remote_location:type_name -> remote_pb.RemoteStorageLocation + 89, // 7: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 90, // 8: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 91, // 9: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 92, // 10: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 93, // 11: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 94, // 12: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 95, // 13: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 96, // 14: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 15: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 16: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 17: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 18: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 19: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 20: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 21: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 22: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 23: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 24: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 25: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 26: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 27: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 28: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 30, // 29: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 32, // 30: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 34, // 31: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 66, // 32: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 36, // 33: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 38, // 34: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 40, // 35: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 42, // 36: volume_server_pb.VolumeServer.ReadAllNeedles:input_type -> volume_server_pb.ReadAllNeedlesRequest + 44, // 37: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 46, // 38: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 48, // 39: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 50, // 40: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 52, // 41: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 54, // 42: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 56, // 43: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 58, // 44: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 60, // 45: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 62, // 46: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 64, // 47: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 72, // 48: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 74, // 49: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 76, // 50: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 78, // 51: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 80, // 52: volume_server_pb.VolumeServer.FetchAndWriteNeedle:input_type -> volume_server_pb.FetchAndWriteNeedleRequest + 82, // 53: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 84, // 54: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 86, // 55: volume_server_pb.VolumeServer.Ping:input_type -> volume_server_pb.PingRequest + 1, // 56: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 57: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 58: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 59: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 60: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 61: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 62: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 63: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 64: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 65: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 66: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 67: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 68: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 69: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 70: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 71: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 72: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 67, // 73: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 74: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 39, // 75: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 41, // 76: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 43, // 77: volume_server_pb.VolumeServer.ReadAllNeedles:output_type -> volume_server_pb.ReadAllNeedlesResponse + 45, // 78: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 47, // 79: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 49, // 80: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 51, // 81: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 53, // 82: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 55, // 83: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 57, // 84: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 59, // 85: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 61, // 86: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 63, // 87: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 65, // 88: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 73, // 89: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 75, // 90: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 77, // 91: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 79, // 92: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 81, // 93: volume_server_pb.VolumeServer.FetchAndWriteNeedle:output_type -> volume_server_pb.FetchAndWriteNeedleResponse + 83, // 94: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 85, // 95: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 87, // 96: volume_server_pb.VolumeServer.Ping:output_type -> volume_server_pb.PingResponse + 56, // [56:97] is the sub-list for method output_type + 15, // [15:56] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_volume_server_proto_init() } @@ -6842,17 +6740,1179 @@ func file_volume_server_proto_init() { if File_volume_server_proto != nil { return } - file_volume_server_proto_msgTypes[38].OneofWrappers = []any{ - (*ReceiveFileRequest_Info)(nil), - (*ReceiveFileRequest_FileContent)(nil), + if !protoimpl.UnsafeEnabled { + file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadAllNeedlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadAllNeedlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteFile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchAndWriteNeedleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchAndWriteNeedleResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueriedStripe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FetchAndWriteNeedleRequest_Replica); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_volume_server_proto_rawDesc), len(file_volume_server_proto_rawDesc)), + RawDescriptor: file_volume_server_proto_rawDesc, NumEnums: 0, - NumMessages: 106, + NumMessages: 97, NumExtensions: 0, NumServices: 1, }, @@ -6861,6 +7921,7 @@ func file_volume_server_proto_init() { MessageInfos: file_volume_server_proto_msgTypes, }.Build() File_volume_server_proto = out.File + file_volume_server_proto_rawDesc = nil file_volume_server_proto_goTypes = nil file_volume_server_proto_depIdxs = nil } diff --git a/weed/pb/volume_server_pb/volume_server_grpc.pb.go b/weed/pb/volume_server_pb/volume_server_grpc.pb.go index f43cff84c..e1b162457 100644 --- a/weed/pb/volume_server_pb/volume_server_grpc.pb.go +++ b/weed/pb/volume_server_pb/volume_server_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: volume_server.proto package volume_server_pb @@ -15,70 +11,23 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - VolumeServer_BatchDelete_FullMethodName = "/volume_server_pb.VolumeServer/BatchDelete" - VolumeServer_VacuumVolumeCheck_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCheck" - VolumeServer_VacuumVolumeCompact_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCompact" - VolumeServer_VacuumVolumeCommit_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCommit" - VolumeServer_VacuumVolumeCleanup_FullMethodName = "/volume_server_pb.VolumeServer/VacuumVolumeCleanup" - VolumeServer_DeleteCollection_FullMethodName = "/volume_server_pb.VolumeServer/DeleteCollection" - VolumeServer_AllocateVolume_FullMethodName = "/volume_server_pb.VolumeServer/AllocateVolume" - VolumeServer_VolumeSyncStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeSyncStatus" - VolumeServer_VolumeIncrementalCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeIncrementalCopy" - VolumeServer_VolumeMount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMount" - VolumeServer_VolumeUnmount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeUnmount" - VolumeServer_VolumeDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeDelete" - VolumeServer_VolumeMarkReadonly_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMarkReadonly" - VolumeServer_VolumeMarkWritable_FullMethodName = "/volume_server_pb.VolumeServer/VolumeMarkWritable" - VolumeServer_VolumeConfigure_FullMethodName = "/volume_server_pb.VolumeServer/VolumeConfigure" - VolumeServer_VolumeStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeStatus" - VolumeServer_VolumeCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeCopy" - VolumeServer_ReadVolumeFileStatus_FullMethodName = "/volume_server_pb.VolumeServer/ReadVolumeFileStatus" - VolumeServer_CopyFile_FullMethodName = "/volume_server_pb.VolumeServer/CopyFile" - VolumeServer_ReceiveFile_FullMethodName = "/volume_server_pb.VolumeServer/ReceiveFile" - VolumeServer_ReadNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleBlob" - VolumeServer_ReadNeedleMeta_FullMethodName = "/volume_server_pb.VolumeServer/ReadNeedleMeta" - VolumeServer_WriteNeedleBlob_FullMethodName = "/volume_server_pb.VolumeServer/WriteNeedleBlob" - VolumeServer_ReadAllNeedles_FullMethodName = "/volume_server_pb.VolumeServer/ReadAllNeedles" - VolumeServer_VolumeTailSender_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTailSender" - VolumeServer_VolumeTailReceiver_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTailReceiver" - VolumeServer_VolumeEcShardsGenerate_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate" - VolumeServer_VolumeEcShardsRebuild_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild" - VolumeServer_VolumeEcShardsCopy_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsCopy" - VolumeServer_VolumeEcShardsDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsDelete" - VolumeServer_VolumeEcShardsMount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsMount" - VolumeServer_VolumeEcShardsUnmount_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount" - VolumeServer_VolumeEcShardRead_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardRead" - VolumeServer_VolumeEcBlobDelete_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcBlobDelete" - VolumeServer_VolumeEcShardsToVolume_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume" - VolumeServer_VolumeEcShardsInfo_FullMethodName = "/volume_server_pb.VolumeServer/VolumeEcShardsInfo" - VolumeServer_VolumeTierMoveDatToRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote" - VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName = "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote" - VolumeServer_VolumeServerStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeServerStatus" - VolumeServer_VolumeServerLeave_FullMethodName = "/volume_server_pb.VolumeServer/VolumeServerLeave" - VolumeServer_FetchAndWriteNeedle_FullMethodName = "/volume_server_pb.VolumeServer/FetchAndWriteNeedle" - VolumeServer_Query_FullMethodName = "/volume_server_pb.VolumeServer/Query" - VolumeServer_VolumeNeedleStatus_FullMethodName = "/volume_server_pb.VolumeServer/VolumeNeedleStatus" - VolumeServer_Ping_FullMethodName = "/volume_server_pb.VolumeServer/Ping" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // VolumeServerClient is the client API for VolumeServer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type VolumeServerClient interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VacuumVolumeCompactResponse], error) + VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) - VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeIncrementalCopyResponse], error) + VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) @@ -87,15 +36,13 @@ type VolumeServerClient interface { VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume - VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error) + VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) - CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error) - ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) + CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) - ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) - ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) - VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) + ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) + VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) @@ -104,19 +51,18 @@ type VolumeServerClient interface { VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) - VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) + VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) - VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error) // tiered storage - VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) - VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) + VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) + VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) // query - Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) } @@ -130,9 +76,8 @@ func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { } func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(BatchDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_BatchDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) if err != nil { return nil, err } @@ -140,22 +85,20 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq } func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCheckResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCheck_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VacuumVolumeCompactResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[0], VolumeServer_VacuumVolumeCompact_FullMethodName, cOpts...) +func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[0], "/volume_server_pb.VolumeServer/VacuumVolumeCompact", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VacuumVolumeCompactRequest, VacuumVolumeCompactResponse]{ClientStream: stream} + x := &volumeServerVacuumVolumeCompactClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -165,13 +108,26 @@ func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *Vacuum return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VacuumVolumeCompactClient = grpc.ServerStreamingClient[VacuumVolumeCompactResponse] +type VolumeServer_VacuumVolumeCompactClient interface { + Recv() (*VacuumVolumeCompactResponse, error) + grpc.ClientStream +} + +type volumeServerVacuumVolumeCompactClient struct { + grpc.ClientStream +} + +func (x *volumeServerVacuumVolumeCompactClient) Recv() (*VacuumVolumeCompactResponse, error) { + m := new(VacuumVolumeCompactResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCommitResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCommit_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) if err != nil { return nil, err } @@ -179,9 +135,8 @@ func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumV } func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VacuumVolumeCleanupResponse) - err := c.cc.Invoke(ctx, VolumeServer_VacuumVolumeCleanup_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) if err != nil { return nil, err } @@ -189,9 +144,8 @@ func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *Vacuum } func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(DeleteCollectionResponse) - err := c.cc.Invoke(ctx, VolumeServer_DeleteCollection_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -199,9 +153,8 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol } func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(AllocateVolumeResponse) - err := c.cc.Invoke(ctx, VolumeServer_AllocateVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) if err != nil { return nil, err } @@ -209,22 +162,20 @@ func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVol } func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeSyncStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeSyncStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeIncrementalCopyResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[1], VolumeServer_VolumeIncrementalCopy_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[1], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeIncrementalCopyRequest, VolumeIncrementalCopyResponse]{ClientStream: stream} + x := &volumeServerVolumeIncrementalCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -234,13 +185,26 @@ func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *Volu return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeIncrementalCopyClient = grpc.ServerStreamingClient[VolumeIncrementalCopyResponse] +type VolumeServer_VolumeIncrementalCopyClient interface { + Recv() (*VolumeIncrementalCopyResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeIncrementalCopyClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) { + m := new(VolumeIncrementalCopyResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) if err != nil { return nil, err } @@ -248,9 +212,8 @@ func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountReq } func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeUnmountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeUnmount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) if err != nil { return nil, err } @@ -258,9 +221,8 @@ func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmoun } func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) if err != nil { return nil, err } @@ -268,9 +230,8 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR } func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMarkReadonlyResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMarkReadonly_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) if err != nil { return nil, err } @@ -278,9 +239,8 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM } func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeMarkWritableResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeMarkWritable_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...) if err != nil { return nil, err } @@ -288,9 +248,8 @@ func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeM } func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeConfigureResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeConfigure_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) if err != nil { return nil, err } @@ -298,22 +257,20 @@ func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConf } func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeCopyResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[2], VolumeServer_VolumeCopy_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeCopy", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeCopyRequest, VolumeCopyResponse]{ClientStream: stream} + x := &volumeServerVolumeCopyClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -323,26 +280,38 @@ func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyReque return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeCopyClient = grpc.ServerStreamingClient[VolumeCopyResponse] +type VolumeServer_VolumeCopyClient interface { + Recv() (*VolumeCopyResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeCopyClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeCopyClient) Recv() (*VolumeCopyResponse, error) { + m := new(VolumeCopyResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReadVolumeFileStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadVolumeFileStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CopyFileResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[3], VolumeServer_CopyFile_FullMethodName, cOpts...) +func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[3], "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[CopyFileRequest, CopyFileResponse]{ClientStream: stream} + x := &volumeServerCopyFileClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -352,36 +321,26 @@ func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_CopyFileClient = grpc.ServerStreamingClient[CopyFileResponse] - -func (c *volumeServerClient) ReceiveFile(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], VolumeServer_ReceiveFile_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[ReceiveFileRequest, ReceiveFileResponse]{ClientStream: stream} - return x, nil +type VolumeServer_CopyFileClient interface { + Recv() (*CopyFileResponse, error) + grpc.ClientStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReceiveFileClient = grpc.ClientStreamingClient[ReceiveFileRequest, ReceiveFileResponse] +type volumeServerCopyFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { + m := new(CopyFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ReadNeedleBlobResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadNeedleBlob_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *volumeServerClient) ReadNeedleMeta(ctx context.Context, in *ReadNeedleMetaRequest, opts ...grpc.CallOption) (*ReadNeedleMetaResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ReadNeedleMetaResponse) - err := c.cc.Invoke(ctx, VolumeServer_ReadNeedleMeta_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) if err != nil { return nil, err } @@ -389,22 +348,20 @@ func (c *volumeServerClient) ReadNeedleMeta(ctx context.Context, in *ReadNeedleM } func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(WriteNeedleBlobResponse) - err := c.cc.Invoke(ctx, VolumeServer_WriteNeedleBlob_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ReadAllNeedlesResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], VolumeServer_ReadAllNeedles_FullMethodName, cOpts...) +func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeedlesRequest, opts ...grpc.CallOption) (VolumeServer_ReadAllNeedlesClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[4], "/volume_server_pb.VolumeServer/ReadAllNeedles", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[ReadAllNeedlesRequest, ReadAllNeedlesResponse]{ClientStream: stream} + x := &volumeServerReadAllNeedlesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -414,16 +371,29 @@ func (c *volumeServerClient) ReadAllNeedles(ctx context.Context, in *ReadAllNeed return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReadAllNeedlesClient = grpc.ServerStreamingClient[ReadAllNeedlesResponse] +type VolumeServer_ReadAllNeedlesClient interface { + Recv() (*ReadAllNeedlesResponse, error) + grpc.ClientStream +} -func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTailSenderResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], VolumeServer_VolumeTailSender_FullMethodName, cOpts...) +type volumeServerReadAllNeedlesClient struct { + grpc.ClientStream +} + +func (x *volumeServerReadAllNeedlesClient) Recv() (*ReadAllNeedlesResponse, error) { + m := new(ReadAllNeedlesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeTailSenderRequest, VolumeTailSenderResponse]{ClientStream: stream} + x := &volumeServerVolumeTailSenderClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -433,13 +403,26 @@ func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTai return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTailSenderClient = grpc.ServerStreamingClient[VolumeTailSenderResponse] +type VolumeServer_VolumeTailSenderClient interface { + Recv() (*VolumeTailSenderResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTailSenderClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) { + m := new(VolumeTailSenderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeTailReceiverResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeTailReceiver_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) if err != nil { return nil, err } @@ -447,9 +430,8 @@ func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeT } func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsGenerateResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsGenerate_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) if err != nil { return nil, err } @@ -457,9 +439,8 @@ func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsRebuildResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsRebuild_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) if err != nil { return nil, err } @@ -467,9 +448,8 @@ func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsCopyResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsCopy_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) if err != nil { return nil, err } @@ -477,9 +457,8 @@ func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeE } func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) if err != nil { return nil, err } @@ -487,9 +466,8 @@ func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *Volum } func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsMountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsMount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) if err != nil { return nil, err } @@ -497,22 +475,20 @@ func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *Volume } func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsUnmountResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsUnmount_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeEcShardReadResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], VolumeServer_VolumeEcShardRead_FullMethodName, cOpts...) +func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[6], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeEcShardReadRequest, VolumeEcShardReadResponse]{ClientStream: stream} + x := &volumeServerVolumeEcShardReadClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -522,13 +498,26 @@ func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEc return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeEcShardReadClient = grpc.ServerStreamingClient[VolumeEcShardReadResponse] +type VolumeServer_VolumeEcShardReadClient interface { + Recv() (*VolumeEcShardReadResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeEcShardReadClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) { + m := new(VolumeEcShardReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcBlobDeleteResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcBlobDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) if err != nil { return nil, err } @@ -536,32 +525,20 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE } func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeEcShardsToVolumeResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsToVolume_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) VolumeEcShardsInfo(ctx context.Context, in *VolumeEcShardsInfoRequest, opts ...grpc.CallOption) (*VolumeEcShardsInfoResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(VolumeEcShardsInfoResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeEcShardsInfo_FullMethodName, in, out, cOpts...) +func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[7], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } - return out, nil -} - -func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], VolumeServer_VolumeTierMoveDatToRemote_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[VolumeTierMoveDatToRemoteRequest, VolumeTierMoveDatToRemoteResponse]{ClientStream: stream} + x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -571,16 +548,29 @@ func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in * return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatToRemoteClient = grpc.ServerStreamingClient[VolumeTierMoveDatToRemoteResponse] +type VolumeServer_VolumeTierMoveDatToRemoteClient interface { + Recv() (*VolumeTierMoveDatToRemoteResponse, error) + grpc.ClientStream +} -func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], VolumeServer_VolumeTierMoveDatFromRemote_FullMethodName, cOpts...) +type volumeServerVolumeTierMoveDatToRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { + m := new(VolumeTierMoveDatToRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[8], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[VolumeTierMoveDatFromRemoteRequest, VolumeTierMoveDatFromRemoteResponse]{ClientStream: stream} + x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -590,13 +580,26 @@ func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatFromRemoteClient = grpc.ServerStreamingClient[VolumeTierMoveDatFromRemoteResponse] +type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { + Recv() (*VolumeTierMoveDatFromRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatFromRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { + m := new(VolumeTierMoveDatFromRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeServerStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeServerStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) if err != nil { return nil, err } @@ -604,9 +607,8 @@ func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeS } func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeServerLeaveResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeServerLeave_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...) if err != nil { return nil, err } @@ -614,22 +616,20 @@ func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeSe } func (c *volumeServerClient) FetchAndWriteNeedle(ctx context.Context, in *FetchAndWriteNeedleRequest, opts ...grpc.CallOption) (*FetchAndWriteNeedleResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(FetchAndWriteNeedleResponse) - err := c.cc.Invoke(ctx, VolumeServer_FetchAndWriteNeedle_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[QueriedStripe], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[10], VolumeServer_Query_FullMethodName, cOpts...) +func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { + stream, err := c.cc.NewStream(ctx, &VolumeServer_ServiceDesc.Streams[9], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[QueryRequest, QueriedStripe]{ClientStream: stream} + x := &volumeServerQueryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -639,13 +639,26 @@ func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts . return x, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_QueryClient = grpc.ServerStreamingClient[QueriedStripe] +type VolumeServer_QueryClient interface { + Recv() (*QueriedStripe, error) + grpc.ClientStream +} + +type volumeServerQueryClient struct { + grpc.ClientStream +} + +func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { + m := new(QueriedStripe) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(VolumeNeedleStatusResponse) - err := c.cc.Invoke(ctx, VolumeServer_VolumeNeedleStatus_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...) if err != nil { return nil, err } @@ -653,9 +666,8 @@ func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeN } func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(PingResponse) - err := c.cc.Invoke(ctx, VolumeServer_Ping_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/Ping", in, out, opts...) if err != nil { return nil, err } @@ -664,18 +676,18 @@ func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ... // VolumeServerServer is the server API for VolumeServer service. // All implementations must embed UnimplementedVolumeServerServer -// for forward compatibility. +// for forward compatibility type VolumeServerServer interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(*VacuumVolumeCompactRequest, grpc.ServerStreamingServer[VacuumVolumeCompactResponse]) error + VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) - VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, grpc.ServerStreamingServer[VolumeIncrementalCopyResponse]) error + VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) @@ -684,15 +696,13 @@ type VolumeServerServer interface { VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume - VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error + VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) - CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error - ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error + CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) - ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) - ReadAllNeedles(*ReadAllNeedlesRequest, grpc.ServerStreamingServer[ReadAllNeedlesResponse]) error - VolumeTailSender(*VolumeTailSenderRequest, grpc.ServerStreamingServer[VolumeTailSenderResponse]) error + ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error + VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) @@ -701,30 +711,26 @@ type VolumeServerServer interface { VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) - VolumeEcShardRead(*VolumeEcShardReadRequest, grpc.ServerStreamingServer[VolumeEcShardReadResponse]) error + VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) - VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error) // tiered storage - VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error - VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse]) error + VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error + VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) // remote storage FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) // query - Query(*QueryRequest, grpc.ServerStreamingServer[QueriedStripe]) error + Query(*QueryRequest, VolumeServer_QueryServer) error VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) Ping(context.Context, *PingRequest) (*PingResponse, error) mustEmbedUnimplementedVolumeServerServer() } -// UnimplementedVolumeServerServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedVolumeServerServer struct{} +// UnimplementedVolumeServerServer must be embedded to have forward compatible implementations. +type UnimplementedVolumeServerServer struct { +} func (UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") @@ -732,7 +738,7 @@ func (UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDelete func (UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") } -func (UnimplementedVolumeServerServer) VacuumVolumeCompact(*VacuumVolumeCompactRequest, grpc.ServerStreamingServer[VacuumVolumeCompactResponse]) error { +func (UnimplementedVolumeServerServer) VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error { return status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") } func (UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { @@ -750,7 +756,7 @@ func (UnimplementedVolumeServerServer) AllocateVolume(context.Context, *Allocate func (UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") } -func (UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, grpc.ServerStreamingServer[VolumeIncrementalCopyResponse]) error { +func (UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") } func (UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { @@ -774,31 +780,25 @@ func (UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeC func (UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented") } -func (UnimplementedVolumeServerServer) VolumeCopy(*VolumeCopyRequest, grpc.ServerStreamingServer[VolumeCopyResponse]) error { +func (UnimplementedVolumeServerServer) VolumeCopy(*VolumeCopyRequest, VolumeServer_VolumeCopyServer) error { return status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") } func (UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") } -func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, grpc.ServerStreamingServer[CopyFileResponse]) error { +func (UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") } -func (UnimplementedVolumeServerServer) ReceiveFile(grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse]) error { - return status.Errorf(codes.Unimplemented, "method ReceiveFile not implemented") -} func (UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented") } -func (UnimplementedVolumeServerServer) ReadNeedleMeta(context.Context, *ReadNeedleMetaRequest) (*ReadNeedleMetaResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleMeta not implemented") -} func (UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") } -func (UnimplementedVolumeServerServer) ReadAllNeedles(*ReadAllNeedlesRequest, grpc.ServerStreamingServer[ReadAllNeedlesResponse]) error { +func (UnimplementedVolumeServerServer) ReadAllNeedles(*ReadAllNeedlesRequest, VolumeServer_ReadAllNeedlesServer) error { return status.Errorf(codes.Unimplemented, "method ReadAllNeedles not implemented") } -func (UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, grpc.ServerStreamingServer[VolumeTailSenderResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") } func (UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { @@ -822,7 +822,7 @@ func (UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *Vol func (UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") } -func (UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, grpc.ServerStreamingServer[VolumeEcShardReadResponse]) error { +func (UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") } func (UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { @@ -831,13 +831,10 @@ func (UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *Volu func (UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented") } -func (UnimplementedVolumeServerServer) VolumeEcShardsInfo(context.Context, *VolumeEcShardsInfoRequest) (*VolumeEcShardsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsInfo not implemented") -} -func (UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") } -func (UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse]) error { +func (UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") } func (UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { @@ -849,7 +846,7 @@ func (UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *Volum func (UnimplementedVolumeServerServer) FetchAndWriteNeedle(context.Context, *FetchAndWriteNeedleRequest) (*FetchAndWriteNeedleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchAndWriteNeedle not implemented") } -func (UnimplementedVolumeServerServer) Query(*QueryRequest, grpc.ServerStreamingServer[QueriedStripe]) error { +func (UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { return status.Errorf(codes.Unimplemented, "method Query not implemented") } func (UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) { @@ -859,7 +856,6 @@ func (UnimplementedVolumeServerServer) Ping(context.Context, *PingRequest) (*Pin return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } func (UnimplementedVolumeServerServer) mustEmbedUnimplementedVolumeServerServer() {} -func (UnimplementedVolumeServerServer) testEmbeddedByValue() {} // UnsafeVolumeServerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to VolumeServerServer will @@ -869,13 +865,6 @@ type UnsafeVolumeServerServer interface { } func RegisterVolumeServerServer(s grpc.ServiceRegistrar, srv VolumeServerServer) { - // If the following call pancis, it indicates UnimplementedVolumeServerServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&VolumeServer_ServiceDesc, srv) } @@ -889,7 +878,7 @@ func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_BatchDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/BatchDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest)) @@ -907,7 +896,7 @@ func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCheck_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest)) @@ -920,11 +909,21 @@ func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, stream grpc.Serv if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VacuumVolumeCompact(m, &grpc.GenericServerStream[VacuumVolumeCompactRequest, VacuumVolumeCompactResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VacuumVolumeCompact(m, &volumeServerVacuumVolumeCompactServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VacuumVolumeCompactServer = grpc.ServerStreamingServer[VacuumVolumeCompactResponse] +type VolumeServer_VacuumVolumeCompactServer interface { + Send(*VacuumVolumeCompactResponse) error + grpc.ServerStream +} + +type volumeServerVacuumVolumeCompactServer struct { + grpc.ServerStream +} + +func (x *volumeServerVacuumVolumeCompactServer) Send(m *VacuumVolumeCompactResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCommitRequest) @@ -936,7 +935,7 @@ func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCommit_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest)) @@ -954,7 +953,7 @@ func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VacuumVolumeCleanup_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest)) @@ -972,7 +971,7 @@ func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_DeleteCollection_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) @@ -990,7 +989,7 @@ func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_AllocateVolume_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) @@ -1008,7 +1007,7 @@ func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeSyncStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest)) @@ -1021,11 +1020,21 @@ func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.Se if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &grpc.GenericServerStream[VolumeIncrementalCopyRequest, VolumeIncrementalCopyResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeIncrementalCopyServer = grpc.ServerStreamingServer[VolumeIncrementalCopyResponse] +type VolumeServer_VolumeIncrementalCopyServer interface { + Send(*VolumeIncrementalCopyResponse) error + grpc.ServerStream +} + +type volumeServerVolumeIncrementalCopyServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeMountRequest) @@ -1037,7 +1046,7 @@ func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest)) @@ -1055,7 +1064,7 @@ func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeUnmount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest)) @@ -1073,7 +1082,7 @@ func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest)) @@ -1091,7 +1100,7 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMarkReadonly_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) @@ -1109,7 +1118,7 @@ func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeMarkWritable_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest)) @@ -1127,7 +1136,7 @@ func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeConfigure_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) @@ -1145,7 +1154,7 @@ func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest)) @@ -1158,11 +1167,21 @@ func _VolumeServer_VolumeCopy_Handler(srv interface{}, stream grpc.ServerStream) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeCopy(m, &grpc.GenericServerStream[VolumeCopyRequest, VolumeCopyResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeCopy(m, &volumeServerVolumeCopyServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeCopyServer = grpc.ServerStreamingServer[VolumeCopyResponse] +type VolumeServer_VolumeCopyServer interface { + Send(*VolumeCopyResponse) error + grpc.ServerStream +} + +type volumeServerVolumeCopyServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeCopyServer) Send(m *VolumeCopyResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadVolumeFileStatusRequest) @@ -1174,7 +1193,7 @@ func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_ReadVolumeFileStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) @@ -1187,18 +1206,21 @@ func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) e if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).CopyFile(m, &grpc.GenericServerStream[CopyFileRequest, CopyFileResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_CopyFileServer = grpc.ServerStreamingServer[CopyFileResponse] - -func _VolumeServer_ReceiveFile_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(VolumeServerServer).ReceiveFile(&grpc.GenericServerStream[ReceiveFileRequest, ReceiveFileResponse]{ServerStream: stream}) +type VolumeServer_CopyFileServer interface { + Send(*CopyFileResponse) error + grpc.ServerStream } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReceiveFileServer = grpc.ClientStreamingServer[ReceiveFileRequest, ReceiveFileResponse] +type volumeServerCopyFileServer struct { + grpc.ServerStream +} + +func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadNeedleBlobRequest) @@ -1210,7 +1232,7 @@ func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_ReadNeedleBlob_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) @@ -1218,24 +1240,6 @@ func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _VolumeServer_ReadNeedleMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadNeedleMetaRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VolumeServerServer).ReadNeedleMeta(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: VolumeServer_ReadNeedleMeta_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).ReadNeedleMeta(ctx, req.(*ReadNeedleMetaRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WriteNeedleBlobRequest) if err := dec(in); err != nil { @@ -1246,7 +1250,7 @@ func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_WriteNeedleBlob_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) @@ -1259,22 +1263,42 @@ func _VolumeServer_ReadAllNeedles_Handler(srv interface{}, stream grpc.ServerStr if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).ReadAllNeedles(m, &grpc.GenericServerStream[ReadAllNeedlesRequest, ReadAllNeedlesResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).ReadAllNeedles(m, &volumeServerReadAllNeedlesServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_ReadAllNeedlesServer = grpc.ServerStreamingServer[ReadAllNeedlesResponse] +type VolumeServer_ReadAllNeedlesServer interface { + Send(*ReadAllNeedlesResponse) error + grpc.ServerStream +} + +type volumeServerReadAllNeedlesServer struct { + grpc.ServerStream +} + +func (x *volumeServerReadAllNeedlesServer) Send(m *ReadAllNeedlesResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTailSender(m, &grpc.GenericServerStream[VolumeTailSenderRequest, VolumeTailSenderResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTailSenderServer = grpc.ServerStreamingServer[VolumeTailSenderResponse] +type VolumeServer_VolumeTailSenderServer interface { + Send(*VolumeTailSenderResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTailSenderServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeTailReceiverRequest) @@ -1286,7 +1310,7 @@ func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeTailReceiver_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest)) @@ -1304,7 +1328,7 @@ func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsGenerate_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest)) @@ -1322,7 +1346,7 @@ func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsRebuild_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest)) @@ -1340,7 +1364,7 @@ func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsCopy_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest)) @@ -1358,7 +1382,7 @@ func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest)) @@ -1376,7 +1400,7 @@ func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsMount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest)) @@ -1394,7 +1418,7 @@ func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsUnmount_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest)) @@ -1407,11 +1431,21 @@ func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.Server if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeEcShardRead(m, &grpc.GenericServerStream[VolumeEcShardReadRequest, VolumeEcShardReadResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeEcShardReadServer = grpc.ServerStreamingServer[VolumeEcShardReadResponse] +type VolumeServer_VolumeEcShardReadServer interface { + Send(*VolumeEcShardReadResponse) error + grpc.ServerStream +} + +type volumeServerVolumeEcShardReadServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeEcBlobDeleteRequest) @@ -1423,7 +1457,7 @@ func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcBlobDelete_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest)) @@ -1441,7 +1475,7 @@ func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeEcShardsToVolume_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) @@ -1449,45 +1483,47 @@ func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeEcShardsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeEcShardsInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VolumeServerServer).VolumeEcShardsInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: VolumeServer_VolumeEcShardsInfo_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VolumeEcShardsInfo(ctx, req.(*VolumeEcShardsInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTierMoveDatToRemoteRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &grpc.GenericServerStream[VolumeTierMoveDatToRemoteRequest, VolumeTierMoveDatToRemoteResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatToRemoteServer = grpc.ServerStreamingServer[VolumeTierMoveDatToRemoteResponse] +type VolumeServer_VolumeTierMoveDatToRemoteServer interface { + Send(*VolumeTierMoveDatToRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatToRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTierMoveDatFromRemoteRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &grpc.GenericServerStream[VolumeTierMoveDatFromRemoteRequest, VolumeTierMoveDatFromRemoteResponse]{ServerStream: stream}) + return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_VolumeTierMoveDatFromRemoteServer = grpc.ServerStreamingServer[VolumeTierMoveDatFromRemoteResponse] +type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { + Send(*VolumeTierMoveDatFromRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatFromRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeServerStatusRequest) @@ -1499,7 +1535,7 @@ func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeServerStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) @@ -1517,7 +1553,7 @@ func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeServerLeave_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest)) @@ -1535,7 +1571,7 @@ func _VolumeServer_FetchAndWriteNeedle_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_FetchAndWriteNeedle_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/FetchAndWriteNeedle", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).FetchAndWriteNeedle(ctx, req.(*FetchAndWriteNeedleRequest)) @@ -1548,11 +1584,21 @@ func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) erro if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).Query(m, &grpc.GenericServerStream[QueryRequest, QueriedStripe]{ServerStream: stream}) + return srv.(VolumeServerServer).Query(m, &volumeServerQueryServer{stream}) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type VolumeServer_QueryServer = grpc.ServerStreamingServer[QueriedStripe] +type VolumeServer_QueryServer interface { + Send(*QueriedStripe) error + grpc.ServerStream +} + +type volumeServerQueryServer struct { + grpc.ServerStream +} + +func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { + return x.ServerStream.SendMsg(m) +} func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeNeedleStatusRequest) @@ -1564,7 +1610,7 @@ func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_VolumeNeedleStatus_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest)) @@ -1582,7 +1628,7 @@ func _VolumeServer_Ping_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: VolumeServer_Ping_FullMethodName, + FullMethod: "/volume_server_pb.VolumeServer/Ping", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VolumeServerServer).Ping(ctx, req.(*PingRequest)) @@ -1661,10 +1707,6 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReadNeedleBlob", Handler: _VolumeServer_ReadNeedleBlob_Handler, }, - { - MethodName: "ReadNeedleMeta", - Handler: _VolumeServer_ReadNeedleMeta_Handler, - }, { MethodName: "WriteNeedleBlob", Handler: _VolumeServer_WriteNeedleBlob_Handler, @@ -1705,10 +1747,6 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcShardsToVolume", Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, }, - { - MethodName: "VolumeEcShardsInfo", - Handler: _VolumeServer_VolumeEcShardsInfo_Handler, - }, { MethodName: "VolumeServerStatus", Handler: _VolumeServer_VolumeServerStatus_Handler, @@ -1751,11 +1789,6 @@ var VolumeServer_ServiceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_CopyFile_Handler, ServerStreams: true, }, - { - StreamName: "ReceiveFile", - Handler: _VolumeServer_ReceiveFile_Handler, - ClientStreams: true, - }, { StreamName: "ReadAllNeedles", Handler: _VolumeServer_ReadAllNeedles_Handler, diff --git a/weed/pb/worker.proto b/weed/pb/worker.proto deleted file mode 100644 index b9e3d61d0..000000000 --- a/weed/pb/worker.proto +++ /dev/null @@ -1,399 +0,0 @@ -syntax = "proto3"; - -package worker_pb; - -option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"; - -// WorkerService provides bidirectional communication between admin and worker -service WorkerService { - // WorkerStream maintains a bidirectional stream for worker communication - rpc WorkerStream(stream WorkerMessage) returns (stream AdminMessage); -} - -// WorkerMessage represents messages from worker to admin -message WorkerMessage { - string worker_id = 1; - int64 timestamp = 2; - - oneof message { - WorkerRegistration registration = 3; - WorkerHeartbeat heartbeat = 4; - TaskRequest task_request = 5; - TaskUpdate task_update = 6; - TaskComplete task_complete = 7; - WorkerShutdown shutdown = 8; - TaskLogResponse task_log_response = 9; - } -} - -// AdminMessage represents messages from admin to worker -message AdminMessage { - string admin_id = 1; - int64 timestamp = 2; - - oneof message { - RegistrationResponse registration_response = 3; - HeartbeatResponse heartbeat_response = 4; - TaskAssignment task_assignment = 5; - TaskCancellation task_cancellation = 6; - AdminShutdown admin_shutdown = 7; - TaskLogRequest task_log_request = 8; - } -} - -// WorkerRegistration message when worker connects -message WorkerRegistration { - string worker_id = 1; - string address = 2; - repeated string capabilities = 3; - int32 max_concurrent = 4; - map metadata = 5; -} - -// RegistrationResponse confirms worker registration -message RegistrationResponse { - bool success = 1; - string message = 2; - string assigned_worker_id = 3; -} - -// WorkerHeartbeat sent periodically by worker -message WorkerHeartbeat { - string worker_id = 1; - string status = 2; - int32 current_load = 3; - int32 max_concurrent = 4; - repeated string current_task_ids = 5; - int32 tasks_completed = 6; - int32 tasks_failed = 7; - int64 uptime_seconds = 8; -} - -// HeartbeatResponse acknowledges heartbeat -message HeartbeatResponse { - bool success = 1; - string message = 2; -} - -// TaskRequest from worker asking for new tasks -message TaskRequest { - string worker_id = 1; - repeated string capabilities = 2; - int32 available_slots = 3; -} - -// TaskAssignment from admin to worker -message TaskAssignment { - string task_id = 1; - string task_type = 2; - TaskParams params = 3; - int32 priority = 4; - int64 created_time = 5; - map metadata = 6; -} - -// TaskParams contains task-specific parameters with typed variants -message TaskParams { - string task_id = 1; // ActiveTopology task ID for lifecycle management - uint32 volume_id = 2; // Primary volume ID for the task - string collection = 3; // Collection name - string data_center = 4; // Primary data center - string rack = 5; // Primary rack - uint64 volume_size = 6; // Original volume size in bytes for tracking size changes - - // Unified source and target arrays for all task types - repeated TaskSource sources = 7; // Source locations (volume replicas, EC shards, etc.) - repeated TaskTarget targets = 8; // Target locations (destinations, new replicas, etc.) - - // Typed task parameters - oneof task_params { - VacuumTaskParams vacuum_params = 9; - ErasureCodingTaskParams erasure_coding_params = 10; - BalanceTaskParams balance_params = 11; - ReplicationTaskParams replication_params = 12; - } -} - -// VacuumTaskParams for vacuum operations -message VacuumTaskParams { - double garbage_threshold = 1; // Minimum garbage ratio to trigger vacuum - bool force_vacuum = 2; // Force vacuum even if below threshold - int32 batch_size = 3; // Number of files to process per batch - string working_dir = 4; // Working directory for temporary files - bool verify_checksum = 5; // Verify file checksums during vacuum -} - -// ErasureCodingTaskParams for EC encoding operations -message ErasureCodingTaskParams { - uint64 estimated_shard_size = 1; // Estimated size per shard - int32 data_shards = 2; // Number of data shards (default: 10) - int32 parity_shards = 3; // Number of parity shards (default: 4) - string working_dir = 4; // Working directory for EC processing - string master_client = 5; // Master server address - bool cleanup_source = 6; // Whether to cleanup source volume after EC -} - -// TaskSource represents a unified source location for any task type -message TaskSource { - string node = 1; // Source server address - uint32 disk_id = 2; // Source disk ID - string rack = 3; // Source rack for tracking - string data_center = 4; // Source data center for tracking - uint32 volume_id = 5; // Volume ID (for volume operations) - repeated uint32 shard_ids = 6; // Shard IDs (for EC shard operations) - uint64 estimated_size = 7; // Estimated size to be processed -} - -// TaskTarget represents a unified target location for any task type -message TaskTarget { - string node = 1; // Target server address - uint32 disk_id = 2; // Target disk ID - string rack = 3; // Target rack for tracking - string data_center = 4; // Target data center for tracking - uint32 volume_id = 5; // Volume ID (for volume operations) - repeated uint32 shard_ids = 6; // Shard IDs (for EC shard operations) - uint64 estimated_size = 7; // Estimated size to be created -} - - - -// BalanceTaskParams for volume balancing operations -message BalanceTaskParams { - bool force_move = 1; // Force move even with conflicts - int32 timeout_seconds = 2; // Operation timeout -} - -// ReplicationTaskParams for adding replicas -message ReplicationTaskParams { - int32 replica_count = 1; // Target replica count - bool verify_consistency = 2; // Verify replica consistency after creation -} - -// TaskUpdate reports task progress -message TaskUpdate { - string task_id = 1; - string worker_id = 2; - string status = 3; - float progress = 4; - string message = 5; - map metadata = 6; -} - -// TaskComplete reports task completion -message TaskComplete { - string task_id = 1; - string worker_id = 2; - bool success = 3; - string error_message = 4; - int64 completion_time = 5; - map result_metadata = 6; -} - -// TaskCancellation from admin to cancel a task -message TaskCancellation { - string task_id = 1; - string reason = 2; - bool force = 3; -} - -// WorkerShutdown notifies admin that worker is shutting down -message WorkerShutdown { - string worker_id = 1; - string reason = 2; - repeated string pending_task_ids = 3; -} - -// AdminShutdown notifies worker that admin is shutting down -message AdminShutdown { - string reason = 1; - int32 graceful_shutdown_seconds = 2; -} - -// ========== Task Log Messages ========== - -// TaskLogRequest requests logs for a specific task -message TaskLogRequest { - string task_id = 1; - string worker_id = 2; - bool include_metadata = 3; // Include task metadata - int32 max_entries = 4; // Maximum number of log entries (0 = all) - string log_level = 5; // Filter by log level (INFO, WARNING, ERROR, DEBUG) - int64 start_time = 6; // Unix timestamp for start time filter - int64 end_time = 7; // Unix timestamp for end time filter -} - -// TaskLogResponse returns task logs and metadata -message TaskLogResponse { - string task_id = 1; - string worker_id = 2; - bool success = 3; - string error_message = 4; - TaskLogMetadata metadata = 5; - repeated TaskLogEntry log_entries = 6; -} - -// TaskLogMetadata contains metadata about task execution -message TaskLogMetadata { - string task_id = 1; - string task_type = 2; - string worker_id = 3; - int64 start_time = 4; - int64 end_time = 5; - int64 duration_ms = 6; - string status = 7; - float progress = 8; - uint32 volume_id = 9; - string server = 10; - string collection = 11; - string log_file_path = 12; - int64 created_at = 13; - map custom_data = 14; -} - -// TaskLogEntry represents a single log entry -message TaskLogEntry { - int64 timestamp = 1; - string level = 2; - string message = 3; - map fields = 4; - float progress = 5; - string status = 6; -} - -// ========== Maintenance Configuration Messages ========== - -// MaintenanceConfig holds configuration for the maintenance system -message MaintenanceConfig { - bool enabled = 1; - int32 scan_interval_seconds = 2; // How often to scan for maintenance needs - int32 worker_timeout_seconds = 3; // Worker heartbeat timeout - int32 task_timeout_seconds = 4; // Individual task timeout - int32 retry_delay_seconds = 5; // Delay between retries - int32 max_retries = 6; // Default max retries for tasks - int32 cleanup_interval_seconds = 7; // How often to clean up old tasks - int32 task_retention_seconds = 8; // How long to keep completed/failed tasks - MaintenancePolicy policy = 9; -} - -// MaintenancePolicy defines policies for maintenance operations -message MaintenancePolicy { - map task_policies = 1; // Task type -> policy mapping - int32 global_max_concurrent = 2; // Overall limit across all task types - int32 default_repeat_interval_seconds = 3; // Default seconds if task doesn't specify - int32 default_check_interval_seconds = 4; // Default seconds for periodic checks -} - -// TaskPolicy represents configuration for a specific task type -message TaskPolicy { - bool enabled = 1; - int32 max_concurrent = 2; - int32 repeat_interval_seconds = 3; // Seconds to wait before repeating - int32 check_interval_seconds = 4; // Seconds between checks - - // Typed task-specific configuration (replaces generic map) - oneof task_config { - VacuumTaskConfig vacuum_config = 5; - ErasureCodingTaskConfig erasure_coding_config = 6; - BalanceTaskConfig balance_config = 7; - ReplicationTaskConfig replication_config = 8; - } -} - -// Task-specific configuration messages - -// VacuumTaskConfig contains vacuum-specific configuration -message VacuumTaskConfig { - double garbage_threshold = 1; // Minimum garbage ratio to trigger vacuum (0.0-1.0) - int32 min_volume_age_hours = 2; // Minimum age before vacuum is considered - int32 min_interval_seconds = 3; // Minimum time between vacuum operations on the same volume -} - -// ErasureCodingTaskConfig contains EC-specific configuration -message ErasureCodingTaskConfig { - double fullness_ratio = 1; // Minimum fullness ratio to trigger EC (0.0-1.0) - int32 quiet_for_seconds = 2; // Minimum quiet time before EC - int32 min_volume_size_mb = 3; // Minimum volume size for EC - string collection_filter = 4; // Only process volumes from specific collections -} - -// BalanceTaskConfig contains balance-specific configuration -message BalanceTaskConfig { - double imbalance_threshold = 1; // Threshold for triggering rebalancing (0.0-1.0) - int32 min_server_count = 2; // Minimum number of servers required for balancing -} - -// ReplicationTaskConfig contains replication-specific configuration -message ReplicationTaskConfig { - int32 target_replica_count = 1; // Target number of replicas -} - -// ========== Task Persistence Messages ========== - -// MaintenanceTaskData represents complete task state for persistence -message MaintenanceTaskData { - string id = 1; - string type = 2; - string priority = 3; - string status = 4; - uint32 volume_id = 5; - string server = 6; - string collection = 7; - TaskParams typed_params = 8; - string reason = 9; - int64 created_at = 10; - int64 scheduled_at = 11; - int64 started_at = 12; - int64 completed_at = 13; - string worker_id = 14; - string error = 15; - double progress = 16; - int32 retry_count = 17; - int32 max_retries = 18; - - // Enhanced fields for detailed task tracking - string created_by = 19; - string creation_context = 20; - repeated TaskAssignmentRecord assignment_history = 21; - string detailed_reason = 22; - map tags = 23; - TaskCreationMetrics creation_metrics = 24; -} - -// TaskAssignmentRecord tracks worker assignments for a task -message TaskAssignmentRecord { - string worker_id = 1; - string worker_address = 2; - int64 assigned_at = 3; - int64 unassigned_at = 4; // Optional: when worker was unassigned - string reason = 5; // Reason for assignment/unassignment -} - -// TaskCreationMetrics tracks why and how a task was created -message TaskCreationMetrics { - string trigger_metric = 1; // Name of metric that triggered creation - double metric_value = 2; // Value that triggered creation - double threshold = 3; // Threshold that was exceeded - VolumeHealthMetrics volume_metrics = 4; // Volume health at creation time - map additional_data = 5; // Additional context data -} - -// VolumeHealthMetrics captures volume state at task creation -message VolumeHealthMetrics { - uint64 total_size = 1; - uint64 used_size = 2; - uint64 garbage_size = 3; - double garbage_ratio = 4; - int32 file_count = 5; - int32 deleted_file_count = 6; - int64 last_modified = 7; - int32 replica_count = 8; - bool is_ec_volume = 9; - string collection = 10; -} - -// TaskStateFile wraps task data with metadata for persistence -message TaskStateFile { - MaintenanceTaskData task = 1; - int64 last_updated = 2; - string admin_version = 3; -} \ No newline at end of file diff --git a/weed/pb/worker_pb/worker.pb.go b/weed/pb/worker_pb/worker.pb.go deleted file mode 100644 index 7ff5a8a36..000000000 --- a/weed/pb/worker_pb/worker.pb.go +++ /dev/null @@ -1,3812 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.6 -// protoc v5.29.3 -// source: worker.proto - -package worker_pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// WorkerMessage represents messages from worker to admin -type WorkerMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Message: - // - // *WorkerMessage_Registration - // *WorkerMessage_Heartbeat - // *WorkerMessage_TaskRequest - // *WorkerMessage_TaskUpdate - // *WorkerMessage_TaskComplete - // *WorkerMessage_Shutdown - // *WorkerMessage_TaskLogResponse - Message isWorkerMessage_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WorkerMessage) Reset() { - *x = WorkerMessage{} - mi := &file_worker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WorkerMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkerMessage) ProtoMessage() {} - -func (x *WorkerMessage) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkerMessage.ProtoReflect.Descriptor instead. -func (*WorkerMessage) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{0} -} - -func (x *WorkerMessage) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *WorkerMessage) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *WorkerMessage) GetMessage() isWorkerMessage_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *WorkerMessage) GetRegistration() *WorkerRegistration { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_Registration); ok { - return x.Registration - } - } - return nil -} - -func (x *WorkerMessage) GetHeartbeat() *WorkerHeartbeat { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_Heartbeat); ok { - return x.Heartbeat - } - } - return nil -} - -func (x *WorkerMessage) GetTaskRequest() *TaskRequest { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_TaskRequest); ok { - return x.TaskRequest - } - } - return nil -} - -func (x *WorkerMessage) GetTaskUpdate() *TaskUpdate { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_TaskUpdate); ok { - return x.TaskUpdate - } - } - return nil -} - -func (x *WorkerMessage) GetTaskComplete() *TaskComplete { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_TaskComplete); ok { - return x.TaskComplete - } - } - return nil -} - -func (x *WorkerMessage) GetShutdown() *WorkerShutdown { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_Shutdown); ok { - return x.Shutdown - } - } - return nil -} - -func (x *WorkerMessage) GetTaskLogResponse() *TaskLogResponse { - if x != nil { - if x, ok := x.Message.(*WorkerMessage_TaskLogResponse); ok { - return x.TaskLogResponse - } - } - return nil -} - -type isWorkerMessage_Message interface { - isWorkerMessage_Message() -} - -type WorkerMessage_Registration struct { - Registration *WorkerRegistration `protobuf:"bytes,3,opt,name=registration,proto3,oneof"` -} - -type WorkerMessage_Heartbeat struct { - Heartbeat *WorkerHeartbeat `protobuf:"bytes,4,opt,name=heartbeat,proto3,oneof"` -} - -type WorkerMessage_TaskRequest struct { - TaskRequest *TaskRequest `protobuf:"bytes,5,opt,name=task_request,json=taskRequest,proto3,oneof"` -} - -type WorkerMessage_TaskUpdate struct { - TaskUpdate *TaskUpdate `protobuf:"bytes,6,opt,name=task_update,json=taskUpdate,proto3,oneof"` -} - -type WorkerMessage_TaskComplete struct { - TaskComplete *TaskComplete `protobuf:"bytes,7,opt,name=task_complete,json=taskComplete,proto3,oneof"` -} - -type WorkerMessage_Shutdown struct { - Shutdown *WorkerShutdown `protobuf:"bytes,8,opt,name=shutdown,proto3,oneof"` -} - -type WorkerMessage_TaskLogResponse struct { - TaskLogResponse *TaskLogResponse `protobuf:"bytes,9,opt,name=task_log_response,json=taskLogResponse,proto3,oneof"` -} - -func (*WorkerMessage_Registration) isWorkerMessage_Message() {} - -func (*WorkerMessage_Heartbeat) isWorkerMessage_Message() {} - -func (*WorkerMessage_TaskRequest) isWorkerMessage_Message() {} - -func (*WorkerMessage_TaskUpdate) isWorkerMessage_Message() {} - -func (*WorkerMessage_TaskComplete) isWorkerMessage_Message() {} - -func (*WorkerMessage_Shutdown) isWorkerMessage_Message() {} - -func (*WorkerMessage_TaskLogResponse) isWorkerMessage_Message() {} - -// AdminMessage represents messages from admin to worker -type AdminMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` - AdminId string `protobuf:"bytes,1,opt,name=admin_id,json=adminId,proto3" json:"admin_id,omitempty"` - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Message: - // - // *AdminMessage_RegistrationResponse - // *AdminMessage_HeartbeatResponse - // *AdminMessage_TaskAssignment - // *AdminMessage_TaskCancellation - // *AdminMessage_AdminShutdown - // *AdminMessage_TaskLogRequest - Message isAdminMessage_Message `protobuf_oneof:"message"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *AdminMessage) Reset() { - *x = AdminMessage{} - mi := &file_worker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AdminMessage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdminMessage) ProtoMessage() {} - -func (x *AdminMessage) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdminMessage.ProtoReflect.Descriptor instead. -func (*AdminMessage) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{1} -} - -func (x *AdminMessage) GetAdminId() string { - if x != nil { - return x.AdminId - } - return "" -} - -func (x *AdminMessage) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *AdminMessage) GetMessage() isAdminMessage_Message { - if x != nil { - return x.Message - } - return nil -} - -func (x *AdminMessage) GetRegistrationResponse() *RegistrationResponse { - if x != nil { - if x, ok := x.Message.(*AdminMessage_RegistrationResponse); ok { - return x.RegistrationResponse - } - } - return nil -} - -func (x *AdminMessage) GetHeartbeatResponse() *HeartbeatResponse { - if x != nil { - if x, ok := x.Message.(*AdminMessage_HeartbeatResponse); ok { - return x.HeartbeatResponse - } - } - return nil -} - -func (x *AdminMessage) GetTaskAssignment() *TaskAssignment { - if x != nil { - if x, ok := x.Message.(*AdminMessage_TaskAssignment); ok { - return x.TaskAssignment - } - } - return nil -} - -func (x *AdminMessage) GetTaskCancellation() *TaskCancellation { - if x != nil { - if x, ok := x.Message.(*AdminMessage_TaskCancellation); ok { - return x.TaskCancellation - } - } - return nil -} - -func (x *AdminMessage) GetAdminShutdown() *AdminShutdown { - if x != nil { - if x, ok := x.Message.(*AdminMessage_AdminShutdown); ok { - return x.AdminShutdown - } - } - return nil -} - -func (x *AdminMessage) GetTaskLogRequest() *TaskLogRequest { - if x != nil { - if x, ok := x.Message.(*AdminMessage_TaskLogRequest); ok { - return x.TaskLogRequest - } - } - return nil -} - -type isAdminMessage_Message interface { - isAdminMessage_Message() -} - -type AdminMessage_RegistrationResponse struct { - RegistrationResponse *RegistrationResponse `protobuf:"bytes,3,opt,name=registration_response,json=registrationResponse,proto3,oneof"` -} - -type AdminMessage_HeartbeatResponse struct { - HeartbeatResponse *HeartbeatResponse `protobuf:"bytes,4,opt,name=heartbeat_response,json=heartbeatResponse,proto3,oneof"` -} - -type AdminMessage_TaskAssignment struct { - TaskAssignment *TaskAssignment `protobuf:"bytes,5,opt,name=task_assignment,json=taskAssignment,proto3,oneof"` -} - -type AdminMessage_TaskCancellation struct { - TaskCancellation *TaskCancellation `protobuf:"bytes,6,opt,name=task_cancellation,json=taskCancellation,proto3,oneof"` -} - -type AdminMessage_AdminShutdown struct { - AdminShutdown *AdminShutdown `protobuf:"bytes,7,opt,name=admin_shutdown,json=adminShutdown,proto3,oneof"` -} - -type AdminMessage_TaskLogRequest struct { - TaskLogRequest *TaskLogRequest `protobuf:"bytes,8,opt,name=task_log_request,json=taskLogRequest,proto3,oneof"` -} - -func (*AdminMessage_RegistrationResponse) isAdminMessage_Message() {} - -func (*AdminMessage_HeartbeatResponse) isAdminMessage_Message() {} - -func (*AdminMessage_TaskAssignment) isAdminMessage_Message() {} - -func (*AdminMessage_TaskCancellation) isAdminMessage_Message() {} - -func (*AdminMessage_AdminShutdown) isAdminMessage_Message() {} - -func (*AdminMessage_TaskLogRequest) isAdminMessage_Message() {} - -// WorkerRegistration message when worker connects -type WorkerRegistration struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - Capabilities []string `protobuf:"bytes,3,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - MaxConcurrent int32 `protobuf:"varint,4,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WorkerRegistration) Reset() { - *x = WorkerRegistration{} - mi := &file_worker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WorkerRegistration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkerRegistration) ProtoMessage() {} - -func (x *WorkerRegistration) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkerRegistration.ProtoReflect.Descriptor instead. -func (*WorkerRegistration) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{2} -} - -func (x *WorkerRegistration) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *WorkerRegistration) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *WorkerRegistration) GetCapabilities() []string { - if x != nil { - return x.Capabilities - } - return nil -} - -func (x *WorkerRegistration) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *WorkerRegistration) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -// RegistrationResponse confirms worker registration -type RegistrationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - AssignedWorkerId string `protobuf:"bytes,3,opt,name=assigned_worker_id,json=assignedWorkerId,proto3" json:"assigned_worker_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RegistrationResponse) Reset() { - *x = RegistrationResponse{} - mi := &file_worker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RegistrationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegistrationResponse) ProtoMessage() {} - -func (x *RegistrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegistrationResponse.ProtoReflect.Descriptor instead. -func (*RegistrationResponse) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{3} -} - -func (x *RegistrationResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *RegistrationResponse) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *RegistrationResponse) GetAssignedWorkerId() string { - if x != nil { - return x.AssignedWorkerId - } - return "" -} - -// WorkerHeartbeat sent periodically by worker -type WorkerHeartbeat struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - CurrentLoad int32 `protobuf:"varint,3,opt,name=current_load,json=currentLoad,proto3" json:"current_load,omitempty"` - MaxConcurrent int32 `protobuf:"varint,4,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - CurrentTaskIds []string `protobuf:"bytes,5,rep,name=current_task_ids,json=currentTaskIds,proto3" json:"current_task_ids,omitempty"` - TasksCompleted int32 `protobuf:"varint,6,opt,name=tasks_completed,json=tasksCompleted,proto3" json:"tasks_completed,omitempty"` - TasksFailed int32 `protobuf:"varint,7,opt,name=tasks_failed,json=tasksFailed,proto3" json:"tasks_failed,omitempty"` - UptimeSeconds int64 `protobuf:"varint,8,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WorkerHeartbeat) Reset() { - *x = WorkerHeartbeat{} - mi := &file_worker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WorkerHeartbeat) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkerHeartbeat) ProtoMessage() {} - -func (x *WorkerHeartbeat) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkerHeartbeat.ProtoReflect.Descriptor instead. -func (*WorkerHeartbeat) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{4} -} - -func (x *WorkerHeartbeat) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *WorkerHeartbeat) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *WorkerHeartbeat) GetCurrentLoad() int32 { - if x != nil { - return x.CurrentLoad - } - return 0 -} - -func (x *WorkerHeartbeat) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *WorkerHeartbeat) GetCurrentTaskIds() []string { - if x != nil { - return x.CurrentTaskIds - } - return nil -} - -func (x *WorkerHeartbeat) GetTasksCompleted() int32 { - if x != nil { - return x.TasksCompleted - } - return 0 -} - -func (x *WorkerHeartbeat) GetTasksFailed() int32 { - if x != nil { - return x.TasksFailed - } - return 0 -} - -func (x *WorkerHeartbeat) GetUptimeSeconds() int64 { - if x != nil { - return x.UptimeSeconds - } - return 0 -} - -// HeartbeatResponse acknowledges heartbeat -type HeartbeatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *HeartbeatResponse) Reset() { - *x = HeartbeatResponse{} - mi := &file_worker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *HeartbeatResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeartbeatResponse) ProtoMessage() {} - -func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead. -func (*HeartbeatResponse) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{5} -} - -func (x *HeartbeatResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *HeartbeatResponse) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -// TaskRequest from worker asking for new tasks -type TaskRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Capabilities []string `protobuf:"bytes,2,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - AvailableSlots int32 `protobuf:"varint,3,opt,name=available_slots,json=availableSlots,proto3" json:"available_slots,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskRequest) Reset() { - *x = TaskRequest{} - mi := &file_worker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskRequest) ProtoMessage() {} - -func (x *TaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskRequest.ProtoReflect.Descriptor instead. -func (*TaskRequest) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{6} -} - -func (x *TaskRequest) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskRequest) GetCapabilities() []string { - if x != nil { - return x.Capabilities - } - return nil -} - -func (x *TaskRequest) GetAvailableSlots() int32 { - if x != nil { - return x.AvailableSlots - } - return 0 -} - -// TaskAssignment from admin to worker -type TaskAssignment struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - TaskType string `protobuf:"bytes,2,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` - Params *TaskParams `protobuf:"bytes,3,opt,name=params,proto3" json:"params,omitempty"` - Priority int32 `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"` - CreatedTime int64 `protobuf:"varint,5,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` - Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskAssignment) Reset() { - *x = TaskAssignment{} - mi := &file_worker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskAssignment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskAssignment) ProtoMessage() {} - -func (x *TaskAssignment) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskAssignment.ProtoReflect.Descriptor instead. -func (*TaskAssignment) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{7} -} - -func (x *TaskAssignment) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskAssignment) GetTaskType() string { - if x != nil { - return x.TaskType - } - return "" -} - -func (x *TaskAssignment) GetParams() *TaskParams { - if x != nil { - return x.Params - } - return nil -} - -func (x *TaskAssignment) GetPriority() int32 { - if x != nil { - return x.Priority - } - return 0 -} - -func (x *TaskAssignment) GetCreatedTime() int64 { - if x != nil { - return x.CreatedTime - } - return 0 -} - -func (x *TaskAssignment) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -// TaskParams contains task-specific parameters with typed variants -type TaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // ActiveTopology task ID for lifecycle management - VolumeId uint32 `protobuf:"varint,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Primary volume ID for the task - Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` // Collection name - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Primary data center - Rack string `protobuf:"bytes,5,opt,name=rack,proto3" json:"rack,omitempty"` // Primary rack - VolumeSize uint64 `protobuf:"varint,6,opt,name=volume_size,json=volumeSize,proto3" json:"volume_size,omitempty"` // Original volume size in bytes for tracking size changes - // Unified source and target arrays for all task types - Sources []*TaskSource `protobuf:"bytes,7,rep,name=sources,proto3" json:"sources,omitempty"` // Source locations (volume replicas, EC shards, etc.) - Targets []*TaskTarget `protobuf:"bytes,8,rep,name=targets,proto3" json:"targets,omitempty"` // Target locations (destinations, new replicas, etc.) - // Typed task parameters - // - // Types that are valid to be assigned to TaskParams: - // - // *TaskParams_VacuumParams - // *TaskParams_ErasureCodingParams - // *TaskParams_BalanceParams - // *TaskParams_ReplicationParams - TaskParams isTaskParams_TaskParams `protobuf_oneof:"task_params"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskParams) Reset() { - *x = TaskParams{} - mi := &file_worker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskParams) ProtoMessage() {} - -func (x *TaskParams) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskParams.ProtoReflect.Descriptor instead. -func (*TaskParams) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{8} -} - -func (x *TaskParams) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskParams) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *TaskParams) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *TaskParams) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *TaskParams) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - -func (x *TaskParams) GetVolumeSize() uint64 { - if x != nil { - return x.VolumeSize - } - return 0 -} - -func (x *TaskParams) GetSources() []*TaskSource { - if x != nil { - return x.Sources - } - return nil -} - -func (x *TaskParams) GetTargets() []*TaskTarget { - if x != nil { - return x.Targets - } - return nil -} - -func (x *TaskParams) GetTaskParams() isTaskParams_TaskParams { - if x != nil { - return x.TaskParams - } - return nil -} - -func (x *TaskParams) GetVacuumParams() *VacuumTaskParams { - if x != nil { - if x, ok := x.TaskParams.(*TaskParams_VacuumParams); ok { - return x.VacuumParams - } - } - return nil -} - -func (x *TaskParams) GetErasureCodingParams() *ErasureCodingTaskParams { - if x != nil { - if x, ok := x.TaskParams.(*TaskParams_ErasureCodingParams); ok { - return x.ErasureCodingParams - } - } - return nil -} - -func (x *TaskParams) GetBalanceParams() *BalanceTaskParams { - if x != nil { - if x, ok := x.TaskParams.(*TaskParams_BalanceParams); ok { - return x.BalanceParams - } - } - return nil -} - -func (x *TaskParams) GetReplicationParams() *ReplicationTaskParams { - if x != nil { - if x, ok := x.TaskParams.(*TaskParams_ReplicationParams); ok { - return x.ReplicationParams - } - } - return nil -} - -type isTaskParams_TaskParams interface { - isTaskParams_TaskParams() -} - -type TaskParams_VacuumParams struct { - VacuumParams *VacuumTaskParams `protobuf:"bytes,9,opt,name=vacuum_params,json=vacuumParams,proto3,oneof"` -} - -type TaskParams_ErasureCodingParams struct { - ErasureCodingParams *ErasureCodingTaskParams `protobuf:"bytes,10,opt,name=erasure_coding_params,json=erasureCodingParams,proto3,oneof"` -} - -type TaskParams_BalanceParams struct { - BalanceParams *BalanceTaskParams `protobuf:"bytes,11,opt,name=balance_params,json=balanceParams,proto3,oneof"` -} - -type TaskParams_ReplicationParams struct { - ReplicationParams *ReplicationTaskParams `protobuf:"bytes,12,opt,name=replication_params,json=replicationParams,proto3,oneof"` -} - -func (*TaskParams_VacuumParams) isTaskParams_TaskParams() {} - -func (*TaskParams_ErasureCodingParams) isTaskParams_TaskParams() {} - -func (*TaskParams_BalanceParams) isTaskParams_TaskParams() {} - -func (*TaskParams_ReplicationParams) isTaskParams_TaskParams() {} - -// VacuumTaskParams for vacuum operations -type VacuumTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum - ForceVacuum bool `protobuf:"varint,2,opt,name=force_vacuum,json=forceVacuum,proto3" json:"force_vacuum,omitempty"` // Force vacuum even if below threshold - BatchSize int32 `protobuf:"varint,3,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` // Number of files to process per batch - WorkingDir string `protobuf:"bytes,4,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for temporary files - VerifyChecksum bool `protobuf:"varint,5,opt,name=verify_checksum,json=verifyChecksum,proto3" json:"verify_checksum,omitempty"` // Verify file checksums during vacuum - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VacuumTaskParams) Reset() { - *x = VacuumTaskParams{} - mi := &file_worker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VacuumTaskParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VacuumTaskParams) ProtoMessage() {} - -func (x *VacuumTaskParams) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VacuumTaskParams.ProtoReflect.Descriptor instead. -func (*VacuumTaskParams) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{9} -} - -func (x *VacuumTaskParams) GetGarbageThreshold() float64 { - if x != nil { - return x.GarbageThreshold - } - return 0 -} - -func (x *VacuumTaskParams) GetForceVacuum() bool { - if x != nil { - return x.ForceVacuum - } - return false -} - -func (x *VacuumTaskParams) GetBatchSize() int32 { - if x != nil { - return x.BatchSize - } - return 0 -} - -func (x *VacuumTaskParams) GetWorkingDir() string { - if x != nil { - return x.WorkingDir - } - return "" -} - -func (x *VacuumTaskParams) GetVerifyChecksum() bool { - if x != nil { - return x.VerifyChecksum - } - return false -} - -// ErasureCodingTaskParams for EC encoding operations -type ErasureCodingTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - EstimatedShardSize uint64 `protobuf:"varint,1,opt,name=estimated_shard_size,json=estimatedShardSize,proto3" json:"estimated_shard_size,omitempty"` // Estimated size per shard - DataShards int32 `protobuf:"varint,2,opt,name=data_shards,json=dataShards,proto3" json:"data_shards,omitempty"` // Number of data shards (default: 10) - ParityShards int32 `protobuf:"varint,3,opt,name=parity_shards,json=parityShards,proto3" json:"parity_shards,omitempty"` // Number of parity shards (default: 4) - WorkingDir string `protobuf:"bytes,4,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` // Working directory for EC processing - MasterClient string `protobuf:"bytes,5,opt,name=master_client,json=masterClient,proto3" json:"master_client,omitempty"` // Master server address - CleanupSource bool `protobuf:"varint,6,opt,name=cleanup_source,json=cleanupSource,proto3" json:"cleanup_source,omitempty"` // Whether to cleanup source volume after EC - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ErasureCodingTaskParams) Reset() { - *x = ErasureCodingTaskParams{} - mi := &file_worker_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ErasureCodingTaskParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErasureCodingTaskParams) ProtoMessage() {} - -func (x *ErasureCodingTaskParams) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErasureCodingTaskParams.ProtoReflect.Descriptor instead. -func (*ErasureCodingTaskParams) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{10} -} - -func (x *ErasureCodingTaskParams) GetEstimatedShardSize() uint64 { - if x != nil { - return x.EstimatedShardSize - } - return 0 -} - -func (x *ErasureCodingTaskParams) GetDataShards() int32 { - if x != nil { - return x.DataShards - } - return 0 -} - -func (x *ErasureCodingTaskParams) GetParityShards() int32 { - if x != nil { - return x.ParityShards - } - return 0 -} - -func (x *ErasureCodingTaskParams) GetWorkingDir() string { - if x != nil { - return x.WorkingDir - } - return "" -} - -func (x *ErasureCodingTaskParams) GetMasterClient() string { - if x != nil { - return x.MasterClient - } - return "" -} - -func (x *ErasureCodingTaskParams) GetCleanupSource() bool { - if x != nil { - return x.CleanupSource - } - return false -} - -// TaskSource represents a unified source location for any task type -type TaskSource struct { - state protoimpl.MessageState `protogen:"open.v1"` - Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Source server address - DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Source disk ID - Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Source rack for tracking - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Source data center for tracking - VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Volume ID (for volume operations) - ShardIds []uint32 `protobuf:"varint,6,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // Shard IDs (for EC shard operations) - EstimatedSize uint64 `protobuf:"varint,7,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated size to be processed - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskSource) Reset() { - *x = TaskSource{} - mi := &file_worker_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskSource) ProtoMessage() {} - -func (x *TaskSource) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskSource.ProtoReflect.Descriptor instead. -func (*TaskSource) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{11} -} - -func (x *TaskSource) GetNode() string { - if x != nil { - return x.Node - } - return "" -} - -func (x *TaskSource) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - -func (x *TaskSource) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - -func (x *TaskSource) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *TaskSource) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *TaskSource) GetShardIds() []uint32 { - if x != nil { - return x.ShardIds - } - return nil -} - -func (x *TaskSource) GetEstimatedSize() uint64 { - if x != nil { - return x.EstimatedSize - } - return 0 -} - -// TaskTarget represents a unified target location for any task type -type TaskTarget struct { - state protoimpl.MessageState `protogen:"open.v1"` - Node string `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` // Target server address - DiskId uint32 `protobuf:"varint,2,opt,name=disk_id,json=diskId,proto3" json:"disk_id,omitempty"` // Target disk ID - Rack string `protobuf:"bytes,3,opt,name=rack,proto3" json:"rack,omitempty"` // Target rack for tracking - DataCenter string `protobuf:"bytes,4,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // Target data center for tracking - VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Volume ID (for volume operations) - ShardIds []uint32 `protobuf:"varint,6,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` // Shard IDs (for EC shard operations) - EstimatedSize uint64 `protobuf:"varint,7,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` // Estimated size to be created - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskTarget) Reset() { - *x = TaskTarget{} - mi := &file_worker_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskTarget) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskTarget) ProtoMessage() {} - -func (x *TaskTarget) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskTarget.ProtoReflect.Descriptor instead. -func (*TaskTarget) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{12} -} - -func (x *TaskTarget) GetNode() string { - if x != nil { - return x.Node - } - return "" -} - -func (x *TaskTarget) GetDiskId() uint32 { - if x != nil { - return x.DiskId - } - return 0 -} - -func (x *TaskTarget) GetRack() string { - if x != nil { - return x.Rack - } - return "" -} - -func (x *TaskTarget) GetDataCenter() string { - if x != nil { - return x.DataCenter - } - return "" -} - -func (x *TaskTarget) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *TaskTarget) GetShardIds() []uint32 { - if x != nil { - return x.ShardIds - } - return nil -} - -func (x *TaskTarget) GetEstimatedSize() uint64 { - if x != nil { - return x.EstimatedSize - } - return 0 -} - -// BalanceTaskParams for volume balancing operations -type BalanceTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - ForceMove bool `protobuf:"varint,1,opt,name=force_move,json=forceMove,proto3" json:"force_move,omitempty"` // Force move even with conflicts - TimeoutSeconds int32 `protobuf:"varint,2,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` // Operation timeout - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BalanceTaskParams) Reset() { - *x = BalanceTaskParams{} - mi := &file_worker_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BalanceTaskParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BalanceTaskParams) ProtoMessage() {} - -func (x *BalanceTaskParams) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BalanceTaskParams.ProtoReflect.Descriptor instead. -func (*BalanceTaskParams) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{13} -} - -func (x *BalanceTaskParams) GetForceMove() bool { - if x != nil { - return x.ForceMove - } - return false -} - -func (x *BalanceTaskParams) GetTimeoutSeconds() int32 { - if x != nil { - return x.TimeoutSeconds - } - return 0 -} - -// ReplicationTaskParams for adding replicas -type ReplicationTaskParams struct { - state protoimpl.MessageState `protogen:"open.v1"` - ReplicaCount int32 `protobuf:"varint,1,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // Target replica count - VerifyConsistency bool `protobuf:"varint,2,opt,name=verify_consistency,json=verifyConsistency,proto3" json:"verify_consistency,omitempty"` // Verify replica consistency after creation - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReplicationTaskParams) Reset() { - *x = ReplicationTaskParams{} - mi := &file_worker_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReplicationTaskParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplicationTaskParams) ProtoMessage() {} - -func (x *ReplicationTaskParams) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplicationTaskParams.ProtoReflect.Descriptor instead. -func (*ReplicationTaskParams) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{14} -} - -func (x *ReplicationTaskParams) GetReplicaCount() int32 { - if x != nil { - return x.ReplicaCount - } - return 0 -} - -func (x *ReplicationTaskParams) GetVerifyConsistency() bool { - if x != nil { - return x.VerifyConsistency - } - return false -} - -// TaskUpdate reports task progress -type TaskUpdate struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - Progress float32 `protobuf:"fixed32,4,opt,name=progress,proto3" json:"progress,omitempty"` - Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` - Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskUpdate) Reset() { - *x = TaskUpdate{} - mi := &file_worker_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskUpdate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskUpdate) ProtoMessage() {} - -func (x *TaskUpdate) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskUpdate.ProtoReflect.Descriptor instead. -func (*TaskUpdate) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{15} -} - -func (x *TaskUpdate) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskUpdate) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskUpdate) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *TaskUpdate) GetProgress() float32 { - if x != nil { - return x.Progress - } - return 0 -} - -func (x *TaskUpdate) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *TaskUpdate) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -// TaskComplete reports task completion -type TaskComplete struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"` - ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - CompletionTime int64 `protobuf:"varint,5,opt,name=completion_time,json=completionTime,proto3" json:"completion_time,omitempty"` - ResultMetadata map[string]string `protobuf:"bytes,6,rep,name=result_metadata,json=resultMetadata,proto3" json:"result_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskComplete) Reset() { - *x = TaskComplete{} - mi := &file_worker_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskComplete) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskComplete) ProtoMessage() {} - -func (x *TaskComplete) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskComplete.ProtoReflect.Descriptor instead. -func (*TaskComplete) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{16} -} - -func (x *TaskComplete) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskComplete) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskComplete) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *TaskComplete) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -func (x *TaskComplete) GetCompletionTime() int64 { - if x != nil { - return x.CompletionTime - } - return 0 -} - -func (x *TaskComplete) GetResultMetadata() map[string]string { - if x != nil { - return x.ResultMetadata - } - return nil -} - -// TaskCancellation from admin to cancel a task -type TaskCancellation struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskCancellation) Reset() { - *x = TaskCancellation{} - mi := &file_worker_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskCancellation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskCancellation) ProtoMessage() {} - -func (x *TaskCancellation) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskCancellation.ProtoReflect.Descriptor instead. -func (*TaskCancellation) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{17} -} - -func (x *TaskCancellation) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskCancellation) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *TaskCancellation) GetForce() bool { - if x != nil { - return x.Force - } - return false -} - -// WorkerShutdown notifies admin that worker is shutting down -type WorkerShutdown struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` - PendingTaskIds []string `protobuf:"bytes,3,rep,name=pending_task_ids,json=pendingTaskIds,proto3" json:"pending_task_ids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *WorkerShutdown) Reset() { - *x = WorkerShutdown{} - mi := &file_worker_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *WorkerShutdown) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkerShutdown) ProtoMessage() {} - -func (x *WorkerShutdown) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkerShutdown.ProtoReflect.Descriptor instead. -func (*WorkerShutdown) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{18} -} - -func (x *WorkerShutdown) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *WorkerShutdown) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *WorkerShutdown) GetPendingTaskIds() []string { - if x != nil { - return x.PendingTaskIds - } - return nil -} - -// AdminShutdown notifies worker that admin is shutting down -type AdminShutdown struct { - state protoimpl.MessageState `protogen:"open.v1"` - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - GracefulShutdownSeconds int32 `protobuf:"varint,2,opt,name=graceful_shutdown_seconds,json=gracefulShutdownSeconds,proto3" json:"graceful_shutdown_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *AdminShutdown) Reset() { - *x = AdminShutdown{} - mi := &file_worker_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AdminShutdown) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdminShutdown) ProtoMessage() {} - -func (x *AdminShutdown) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdminShutdown.ProtoReflect.Descriptor instead. -func (*AdminShutdown) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{19} -} - -func (x *AdminShutdown) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *AdminShutdown) GetGracefulShutdownSeconds() int32 { - if x != nil { - return x.GracefulShutdownSeconds - } - return 0 -} - -// TaskLogRequest requests logs for a specific task -type TaskLogRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - IncludeMetadata bool `protobuf:"varint,3,opt,name=include_metadata,json=includeMetadata,proto3" json:"include_metadata,omitempty"` // Include task metadata - MaxEntries int32 `protobuf:"varint,4,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` // Maximum number of log entries (0 = all) - LogLevel string `protobuf:"bytes,5,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"` // Filter by log level (INFO, WARNING, ERROR, DEBUG) - StartTime int64 `protobuf:"varint,6,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` // Unix timestamp for start time filter - EndTime int64 `protobuf:"varint,7,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` // Unix timestamp for end time filter - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskLogRequest) Reset() { - *x = TaskLogRequest{} - mi := &file_worker_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskLogRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskLogRequest) ProtoMessage() {} - -func (x *TaskLogRequest) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskLogRequest.ProtoReflect.Descriptor instead. -func (*TaskLogRequest) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{20} -} - -func (x *TaskLogRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskLogRequest) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskLogRequest) GetIncludeMetadata() bool { - if x != nil { - return x.IncludeMetadata - } - return false -} - -func (x *TaskLogRequest) GetMaxEntries() int32 { - if x != nil { - return x.MaxEntries - } - return 0 -} - -func (x *TaskLogRequest) GetLogLevel() string { - if x != nil { - return x.LogLevel - } - return "" -} - -func (x *TaskLogRequest) GetStartTime() int64 { - if x != nil { - return x.StartTime - } - return 0 -} - -func (x *TaskLogRequest) GetEndTime() int64 { - if x != nil { - return x.EndTime - } - return 0 -} - -// TaskLogResponse returns task logs and metadata -type TaskLogResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - WorkerId string `protobuf:"bytes,2,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"` - ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - Metadata *TaskLogMetadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` - LogEntries []*TaskLogEntry `protobuf:"bytes,6,rep,name=log_entries,json=logEntries,proto3" json:"log_entries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskLogResponse) Reset() { - *x = TaskLogResponse{} - mi := &file_worker_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskLogResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskLogResponse) ProtoMessage() {} - -func (x *TaskLogResponse) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskLogResponse.ProtoReflect.Descriptor instead. -func (*TaskLogResponse) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{21} -} - -func (x *TaskLogResponse) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskLogResponse) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskLogResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *TaskLogResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -func (x *TaskLogResponse) GetMetadata() *TaskLogMetadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *TaskLogResponse) GetLogEntries() []*TaskLogEntry { - if x != nil { - return x.LogEntries - } - return nil -} - -// TaskLogMetadata contains metadata about task execution -type TaskLogMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - TaskType string `protobuf:"bytes,2,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` - WorkerId string `protobuf:"bytes,3,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - StartTime int64 `protobuf:"varint,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime int64 `protobuf:"varint,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` - Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"` - Progress float32 `protobuf:"fixed32,8,opt,name=progress,proto3" json:"progress,omitempty"` - VolumeId uint32 `protobuf:"varint,9,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Server string `protobuf:"bytes,10,opt,name=server,proto3" json:"server,omitempty"` - Collection string `protobuf:"bytes,11,opt,name=collection,proto3" json:"collection,omitempty"` - LogFilePath string `protobuf:"bytes,12,opt,name=log_file_path,json=logFilePath,proto3" json:"log_file_path,omitempty"` - CreatedAt int64 `protobuf:"varint,13,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - CustomData map[string]string `protobuf:"bytes,14,rep,name=custom_data,json=customData,proto3" json:"custom_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskLogMetadata) Reset() { - *x = TaskLogMetadata{} - mi := &file_worker_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskLogMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskLogMetadata) ProtoMessage() {} - -func (x *TaskLogMetadata) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskLogMetadata.ProtoReflect.Descriptor instead. -func (*TaskLogMetadata) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{22} -} - -func (x *TaskLogMetadata) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *TaskLogMetadata) GetTaskType() string { - if x != nil { - return x.TaskType - } - return "" -} - -func (x *TaskLogMetadata) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskLogMetadata) GetStartTime() int64 { - if x != nil { - return x.StartTime - } - return 0 -} - -func (x *TaskLogMetadata) GetEndTime() int64 { - if x != nil { - return x.EndTime - } - return 0 -} - -func (x *TaskLogMetadata) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -func (x *TaskLogMetadata) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *TaskLogMetadata) GetProgress() float32 { - if x != nil { - return x.Progress - } - return 0 -} - -func (x *TaskLogMetadata) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *TaskLogMetadata) GetServer() string { - if x != nil { - return x.Server - } - return "" -} - -func (x *TaskLogMetadata) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *TaskLogMetadata) GetLogFilePath() string { - if x != nil { - return x.LogFilePath - } - return "" -} - -func (x *TaskLogMetadata) GetCreatedAt() int64 { - if x != nil { - return x.CreatedAt - } - return 0 -} - -func (x *TaskLogMetadata) GetCustomData() map[string]string { - if x != nil { - return x.CustomData - } - return nil -} - -// TaskLogEntry represents a single log entry -type TaskLogEntry struct { - state protoimpl.MessageState `protogen:"open.v1"` - Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Level string `protobuf:"bytes,2,opt,name=level,proto3" json:"level,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - Fields map[string]string `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Progress float32 `protobuf:"fixed32,5,opt,name=progress,proto3" json:"progress,omitempty"` - Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskLogEntry) Reset() { - *x = TaskLogEntry{} - mi := &file_worker_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskLogEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskLogEntry) ProtoMessage() {} - -func (x *TaskLogEntry) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[23] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskLogEntry.ProtoReflect.Descriptor instead. -func (*TaskLogEntry) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{23} -} - -func (x *TaskLogEntry) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -func (x *TaskLogEntry) GetLevel() string { - if x != nil { - return x.Level - } - return "" -} - -func (x *TaskLogEntry) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *TaskLogEntry) GetFields() map[string]string { - if x != nil { - return x.Fields - } - return nil -} - -func (x *TaskLogEntry) GetProgress() float32 { - if x != nil { - return x.Progress - } - return 0 -} - -func (x *TaskLogEntry) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -// MaintenanceConfig holds configuration for the maintenance system -type MaintenanceConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - ScanIntervalSeconds int32 `protobuf:"varint,2,opt,name=scan_interval_seconds,json=scanIntervalSeconds,proto3" json:"scan_interval_seconds,omitempty"` // How often to scan for maintenance needs - WorkerTimeoutSeconds int32 `protobuf:"varint,3,opt,name=worker_timeout_seconds,json=workerTimeoutSeconds,proto3" json:"worker_timeout_seconds,omitempty"` // Worker heartbeat timeout - TaskTimeoutSeconds int32 `protobuf:"varint,4,opt,name=task_timeout_seconds,json=taskTimeoutSeconds,proto3" json:"task_timeout_seconds,omitempty"` // Individual task timeout - RetryDelaySeconds int32 `protobuf:"varint,5,opt,name=retry_delay_seconds,json=retryDelaySeconds,proto3" json:"retry_delay_seconds,omitempty"` // Delay between retries - MaxRetries int32 `protobuf:"varint,6,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` // Default max retries for tasks - CleanupIntervalSeconds int32 `protobuf:"varint,7,opt,name=cleanup_interval_seconds,json=cleanupIntervalSeconds,proto3" json:"cleanup_interval_seconds,omitempty"` // How often to clean up old tasks - TaskRetentionSeconds int32 `protobuf:"varint,8,opt,name=task_retention_seconds,json=taskRetentionSeconds,proto3" json:"task_retention_seconds,omitempty"` // How long to keep completed/failed tasks - Policy *MaintenancePolicy `protobuf:"bytes,9,opt,name=policy,proto3" json:"policy,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MaintenanceConfig) Reset() { - *x = MaintenanceConfig{} - mi := &file_worker_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MaintenanceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaintenanceConfig) ProtoMessage() {} - -func (x *MaintenanceConfig) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaintenanceConfig.ProtoReflect.Descriptor instead. -func (*MaintenanceConfig) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{24} -} - -func (x *MaintenanceConfig) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *MaintenanceConfig) GetScanIntervalSeconds() int32 { - if x != nil { - return x.ScanIntervalSeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetWorkerTimeoutSeconds() int32 { - if x != nil { - return x.WorkerTimeoutSeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetTaskTimeoutSeconds() int32 { - if x != nil { - return x.TaskTimeoutSeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetRetryDelaySeconds() int32 { - if x != nil { - return x.RetryDelaySeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetMaxRetries() int32 { - if x != nil { - return x.MaxRetries - } - return 0 -} - -func (x *MaintenanceConfig) GetCleanupIntervalSeconds() int32 { - if x != nil { - return x.CleanupIntervalSeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetTaskRetentionSeconds() int32 { - if x != nil { - return x.TaskRetentionSeconds - } - return 0 -} - -func (x *MaintenanceConfig) GetPolicy() *MaintenancePolicy { - if x != nil { - return x.Policy - } - return nil -} - -// MaintenancePolicy defines policies for maintenance operations -type MaintenancePolicy struct { - state protoimpl.MessageState `protogen:"open.v1"` - TaskPolicies map[string]*TaskPolicy `protobuf:"bytes,1,rep,name=task_policies,json=taskPolicies,proto3" json:"task_policies,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Task type -> policy mapping - GlobalMaxConcurrent int32 `protobuf:"varint,2,opt,name=global_max_concurrent,json=globalMaxConcurrent,proto3" json:"global_max_concurrent,omitempty"` // Overall limit across all task types - DefaultRepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=default_repeat_interval_seconds,json=defaultRepeatIntervalSeconds,proto3" json:"default_repeat_interval_seconds,omitempty"` // Default seconds if task doesn't specify - DefaultCheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=default_check_interval_seconds,json=defaultCheckIntervalSeconds,proto3" json:"default_check_interval_seconds,omitempty"` // Default seconds for periodic checks - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MaintenancePolicy) Reset() { - *x = MaintenancePolicy{} - mi := &file_worker_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MaintenancePolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaintenancePolicy) ProtoMessage() {} - -func (x *MaintenancePolicy) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaintenancePolicy.ProtoReflect.Descriptor instead. -func (*MaintenancePolicy) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{25} -} - -func (x *MaintenancePolicy) GetTaskPolicies() map[string]*TaskPolicy { - if x != nil { - return x.TaskPolicies - } - return nil -} - -func (x *MaintenancePolicy) GetGlobalMaxConcurrent() int32 { - if x != nil { - return x.GlobalMaxConcurrent - } - return 0 -} - -func (x *MaintenancePolicy) GetDefaultRepeatIntervalSeconds() int32 { - if x != nil { - return x.DefaultRepeatIntervalSeconds - } - return 0 -} - -func (x *MaintenancePolicy) GetDefaultCheckIntervalSeconds() int32 { - if x != nil { - return x.DefaultCheckIntervalSeconds - } - return 0 -} - -// TaskPolicy represents configuration for a specific task type -type TaskPolicy struct { - state protoimpl.MessageState `protogen:"open.v1"` - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - MaxConcurrent int32 `protobuf:"varint,2,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - RepeatIntervalSeconds int32 `protobuf:"varint,3,opt,name=repeat_interval_seconds,json=repeatIntervalSeconds,proto3" json:"repeat_interval_seconds,omitempty"` // Seconds to wait before repeating - CheckIntervalSeconds int32 `protobuf:"varint,4,opt,name=check_interval_seconds,json=checkIntervalSeconds,proto3" json:"check_interval_seconds,omitempty"` // Seconds between checks - // Typed task-specific configuration (replaces generic map) - // - // Types that are valid to be assigned to TaskConfig: - // - // *TaskPolicy_VacuumConfig - // *TaskPolicy_ErasureCodingConfig - // *TaskPolicy_BalanceConfig - // *TaskPolicy_ReplicationConfig - TaskConfig isTaskPolicy_TaskConfig `protobuf_oneof:"task_config"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskPolicy) Reset() { - *x = TaskPolicy{} - mi := &file_worker_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskPolicy) ProtoMessage() {} - -func (x *TaskPolicy) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskPolicy.ProtoReflect.Descriptor instead. -func (*TaskPolicy) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{26} -} - -func (x *TaskPolicy) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *TaskPolicy) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *TaskPolicy) GetRepeatIntervalSeconds() int32 { - if x != nil { - return x.RepeatIntervalSeconds - } - return 0 -} - -func (x *TaskPolicy) GetCheckIntervalSeconds() int32 { - if x != nil { - return x.CheckIntervalSeconds - } - return 0 -} - -func (x *TaskPolicy) GetTaskConfig() isTaskPolicy_TaskConfig { - if x != nil { - return x.TaskConfig - } - return nil -} - -func (x *TaskPolicy) GetVacuumConfig() *VacuumTaskConfig { - if x != nil { - if x, ok := x.TaskConfig.(*TaskPolicy_VacuumConfig); ok { - return x.VacuumConfig - } - } - return nil -} - -func (x *TaskPolicy) GetErasureCodingConfig() *ErasureCodingTaskConfig { - if x != nil { - if x, ok := x.TaskConfig.(*TaskPolicy_ErasureCodingConfig); ok { - return x.ErasureCodingConfig - } - } - return nil -} - -func (x *TaskPolicy) GetBalanceConfig() *BalanceTaskConfig { - if x != nil { - if x, ok := x.TaskConfig.(*TaskPolicy_BalanceConfig); ok { - return x.BalanceConfig - } - } - return nil -} - -func (x *TaskPolicy) GetReplicationConfig() *ReplicationTaskConfig { - if x != nil { - if x, ok := x.TaskConfig.(*TaskPolicy_ReplicationConfig); ok { - return x.ReplicationConfig - } - } - return nil -} - -type isTaskPolicy_TaskConfig interface { - isTaskPolicy_TaskConfig() -} - -type TaskPolicy_VacuumConfig struct { - VacuumConfig *VacuumTaskConfig `protobuf:"bytes,5,opt,name=vacuum_config,json=vacuumConfig,proto3,oneof"` -} - -type TaskPolicy_ErasureCodingConfig struct { - ErasureCodingConfig *ErasureCodingTaskConfig `protobuf:"bytes,6,opt,name=erasure_coding_config,json=erasureCodingConfig,proto3,oneof"` -} - -type TaskPolicy_BalanceConfig struct { - BalanceConfig *BalanceTaskConfig `protobuf:"bytes,7,opt,name=balance_config,json=balanceConfig,proto3,oneof"` -} - -type TaskPolicy_ReplicationConfig struct { - ReplicationConfig *ReplicationTaskConfig `protobuf:"bytes,8,opt,name=replication_config,json=replicationConfig,proto3,oneof"` -} - -func (*TaskPolicy_VacuumConfig) isTaskPolicy_TaskConfig() {} - -func (*TaskPolicy_ErasureCodingConfig) isTaskPolicy_TaskConfig() {} - -func (*TaskPolicy_BalanceConfig) isTaskPolicy_TaskConfig() {} - -func (*TaskPolicy_ReplicationConfig) isTaskPolicy_TaskConfig() {} - -// VacuumTaskConfig contains vacuum-specific configuration -type VacuumTaskConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - GarbageThreshold float64 `protobuf:"fixed64,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` // Minimum garbage ratio to trigger vacuum (0.0-1.0) - MinVolumeAgeHours int32 `protobuf:"varint,2,opt,name=min_volume_age_hours,json=minVolumeAgeHours,proto3" json:"min_volume_age_hours,omitempty"` // Minimum age before vacuum is considered - MinIntervalSeconds int32 `protobuf:"varint,3,opt,name=min_interval_seconds,json=minIntervalSeconds,proto3" json:"min_interval_seconds,omitempty"` // Minimum time between vacuum operations on the same volume - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VacuumTaskConfig) Reset() { - *x = VacuumTaskConfig{} - mi := &file_worker_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VacuumTaskConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VacuumTaskConfig) ProtoMessage() {} - -func (x *VacuumTaskConfig) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VacuumTaskConfig.ProtoReflect.Descriptor instead. -func (*VacuumTaskConfig) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{27} -} - -func (x *VacuumTaskConfig) GetGarbageThreshold() float64 { - if x != nil { - return x.GarbageThreshold - } - return 0 -} - -func (x *VacuumTaskConfig) GetMinVolumeAgeHours() int32 { - if x != nil { - return x.MinVolumeAgeHours - } - return 0 -} - -func (x *VacuumTaskConfig) GetMinIntervalSeconds() int32 { - if x != nil { - return x.MinIntervalSeconds - } - return 0 -} - -// ErasureCodingTaskConfig contains EC-specific configuration -type ErasureCodingTaskConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - FullnessRatio float64 `protobuf:"fixed64,1,opt,name=fullness_ratio,json=fullnessRatio,proto3" json:"fullness_ratio,omitempty"` // Minimum fullness ratio to trigger EC (0.0-1.0) - QuietForSeconds int32 `protobuf:"varint,2,opt,name=quiet_for_seconds,json=quietForSeconds,proto3" json:"quiet_for_seconds,omitempty"` // Minimum quiet time before EC - MinVolumeSizeMb int32 `protobuf:"varint,3,opt,name=min_volume_size_mb,json=minVolumeSizeMb,proto3" json:"min_volume_size_mb,omitempty"` // Minimum volume size for EC - CollectionFilter string `protobuf:"bytes,4,opt,name=collection_filter,json=collectionFilter,proto3" json:"collection_filter,omitempty"` // Only process volumes from specific collections - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ErasureCodingTaskConfig) Reset() { - *x = ErasureCodingTaskConfig{} - mi := &file_worker_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ErasureCodingTaskConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErasureCodingTaskConfig) ProtoMessage() {} - -func (x *ErasureCodingTaskConfig) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErasureCodingTaskConfig.ProtoReflect.Descriptor instead. -func (*ErasureCodingTaskConfig) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{28} -} - -func (x *ErasureCodingTaskConfig) GetFullnessRatio() float64 { - if x != nil { - return x.FullnessRatio - } - return 0 -} - -func (x *ErasureCodingTaskConfig) GetQuietForSeconds() int32 { - if x != nil { - return x.QuietForSeconds - } - return 0 -} - -func (x *ErasureCodingTaskConfig) GetMinVolumeSizeMb() int32 { - if x != nil { - return x.MinVolumeSizeMb - } - return 0 -} - -func (x *ErasureCodingTaskConfig) GetCollectionFilter() string { - if x != nil { - return x.CollectionFilter - } - return "" -} - -// BalanceTaskConfig contains balance-specific configuration -type BalanceTaskConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - ImbalanceThreshold float64 `protobuf:"fixed64,1,opt,name=imbalance_threshold,json=imbalanceThreshold,proto3" json:"imbalance_threshold,omitempty"` // Threshold for triggering rebalancing (0.0-1.0) - MinServerCount int32 `protobuf:"varint,2,opt,name=min_server_count,json=minServerCount,proto3" json:"min_server_count,omitempty"` // Minimum number of servers required for balancing - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BalanceTaskConfig) Reset() { - *x = BalanceTaskConfig{} - mi := &file_worker_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BalanceTaskConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BalanceTaskConfig) ProtoMessage() {} - -func (x *BalanceTaskConfig) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BalanceTaskConfig.ProtoReflect.Descriptor instead. -func (*BalanceTaskConfig) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{29} -} - -func (x *BalanceTaskConfig) GetImbalanceThreshold() float64 { - if x != nil { - return x.ImbalanceThreshold - } - return 0 -} - -func (x *BalanceTaskConfig) GetMinServerCount() int32 { - if x != nil { - return x.MinServerCount - } - return 0 -} - -// ReplicationTaskConfig contains replication-specific configuration -type ReplicationTaskConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - TargetReplicaCount int32 `protobuf:"varint,1,opt,name=target_replica_count,json=targetReplicaCount,proto3" json:"target_replica_count,omitempty"` // Target number of replicas - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ReplicationTaskConfig) Reset() { - *x = ReplicationTaskConfig{} - mi := &file_worker_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ReplicationTaskConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReplicationTaskConfig) ProtoMessage() {} - -func (x *ReplicationTaskConfig) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReplicationTaskConfig.ProtoReflect.Descriptor instead. -func (*ReplicationTaskConfig) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{30} -} - -func (x *ReplicationTaskConfig) GetTargetReplicaCount() int32 { - if x != nil { - return x.TargetReplicaCount - } - return 0 -} - -// MaintenanceTaskData represents complete task state for persistence -type MaintenanceTaskData struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - Priority string `protobuf:"bytes,3,opt,name=priority,proto3" json:"priority,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` - VolumeId uint32 `protobuf:"varint,5,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - Server string `protobuf:"bytes,6,opt,name=server,proto3" json:"server,omitempty"` - Collection string `protobuf:"bytes,7,opt,name=collection,proto3" json:"collection,omitempty"` - TypedParams *TaskParams `protobuf:"bytes,8,opt,name=typed_params,json=typedParams,proto3" json:"typed_params,omitempty"` - Reason string `protobuf:"bytes,9,opt,name=reason,proto3" json:"reason,omitempty"` - CreatedAt int64 `protobuf:"varint,10,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - ScheduledAt int64 `protobuf:"varint,11,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` - StartedAt int64 `protobuf:"varint,12,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - CompletedAt int64 `protobuf:"varint,13,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` - WorkerId string `protobuf:"bytes,14,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - Error string `protobuf:"bytes,15,opt,name=error,proto3" json:"error,omitempty"` - Progress float64 `protobuf:"fixed64,16,opt,name=progress,proto3" json:"progress,omitempty"` - RetryCount int32 `protobuf:"varint,17,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` - MaxRetries int32 `protobuf:"varint,18,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` - // Enhanced fields for detailed task tracking - CreatedBy string `protobuf:"bytes,19,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` - CreationContext string `protobuf:"bytes,20,opt,name=creation_context,json=creationContext,proto3" json:"creation_context,omitempty"` - AssignmentHistory []*TaskAssignmentRecord `protobuf:"bytes,21,rep,name=assignment_history,json=assignmentHistory,proto3" json:"assignment_history,omitempty"` - DetailedReason string `protobuf:"bytes,22,opt,name=detailed_reason,json=detailedReason,proto3" json:"detailed_reason,omitempty"` - Tags map[string]string `protobuf:"bytes,23,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - CreationMetrics *TaskCreationMetrics `protobuf:"bytes,24,opt,name=creation_metrics,json=creationMetrics,proto3" json:"creation_metrics,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *MaintenanceTaskData) Reset() { - *x = MaintenanceTaskData{} - mi := &file_worker_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MaintenanceTaskData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MaintenanceTaskData) ProtoMessage() {} - -func (x *MaintenanceTaskData) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[31] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MaintenanceTaskData.ProtoReflect.Descriptor instead. -func (*MaintenanceTaskData) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{31} -} - -func (x *MaintenanceTaskData) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *MaintenanceTaskData) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MaintenanceTaskData) GetPriority() string { - if x != nil { - return x.Priority - } - return "" -} - -func (x *MaintenanceTaskData) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *MaintenanceTaskData) GetVolumeId() uint32 { - if x != nil { - return x.VolumeId - } - return 0 -} - -func (x *MaintenanceTaskData) GetServer() string { - if x != nil { - return x.Server - } - return "" -} - -func (x *MaintenanceTaskData) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -func (x *MaintenanceTaskData) GetTypedParams() *TaskParams { - if x != nil { - return x.TypedParams - } - return nil -} - -func (x *MaintenanceTaskData) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *MaintenanceTaskData) GetCreatedAt() int64 { - if x != nil { - return x.CreatedAt - } - return 0 -} - -func (x *MaintenanceTaskData) GetScheduledAt() int64 { - if x != nil { - return x.ScheduledAt - } - return 0 -} - -func (x *MaintenanceTaskData) GetStartedAt() int64 { - if x != nil { - return x.StartedAt - } - return 0 -} - -func (x *MaintenanceTaskData) GetCompletedAt() int64 { - if x != nil { - return x.CompletedAt - } - return 0 -} - -func (x *MaintenanceTaskData) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *MaintenanceTaskData) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -func (x *MaintenanceTaskData) GetProgress() float64 { - if x != nil { - return x.Progress - } - return 0 -} - -func (x *MaintenanceTaskData) GetRetryCount() int32 { - if x != nil { - return x.RetryCount - } - return 0 -} - -func (x *MaintenanceTaskData) GetMaxRetries() int32 { - if x != nil { - return x.MaxRetries - } - return 0 -} - -func (x *MaintenanceTaskData) GetCreatedBy() string { - if x != nil { - return x.CreatedBy - } - return "" -} - -func (x *MaintenanceTaskData) GetCreationContext() string { - if x != nil { - return x.CreationContext - } - return "" -} - -func (x *MaintenanceTaskData) GetAssignmentHistory() []*TaskAssignmentRecord { - if x != nil { - return x.AssignmentHistory - } - return nil -} - -func (x *MaintenanceTaskData) GetDetailedReason() string { - if x != nil { - return x.DetailedReason - } - return "" -} - -func (x *MaintenanceTaskData) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *MaintenanceTaskData) GetCreationMetrics() *TaskCreationMetrics { - if x != nil { - return x.CreationMetrics - } - return nil -} - -// TaskAssignmentRecord tracks worker assignments for a task -type TaskAssignmentRecord struct { - state protoimpl.MessageState `protogen:"open.v1"` - WorkerId string `protobuf:"bytes,1,opt,name=worker_id,json=workerId,proto3" json:"worker_id,omitempty"` - WorkerAddress string `protobuf:"bytes,2,opt,name=worker_address,json=workerAddress,proto3" json:"worker_address,omitempty"` - AssignedAt int64 `protobuf:"varint,3,opt,name=assigned_at,json=assignedAt,proto3" json:"assigned_at,omitempty"` - UnassignedAt int64 `protobuf:"varint,4,opt,name=unassigned_at,json=unassignedAt,proto3" json:"unassigned_at,omitempty"` // Optional: when worker was unassigned - Reason string `protobuf:"bytes,5,opt,name=reason,proto3" json:"reason,omitempty"` // Reason for assignment/unassignment - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskAssignmentRecord) Reset() { - *x = TaskAssignmentRecord{} - mi := &file_worker_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskAssignmentRecord) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskAssignmentRecord) ProtoMessage() {} - -func (x *TaskAssignmentRecord) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[32] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskAssignmentRecord.ProtoReflect.Descriptor instead. -func (*TaskAssignmentRecord) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{32} -} - -func (x *TaskAssignmentRecord) GetWorkerId() string { - if x != nil { - return x.WorkerId - } - return "" -} - -func (x *TaskAssignmentRecord) GetWorkerAddress() string { - if x != nil { - return x.WorkerAddress - } - return "" -} - -func (x *TaskAssignmentRecord) GetAssignedAt() int64 { - if x != nil { - return x.AssignedAt - } - return 0 -} - -func (x *TaskAssignmentRecord) GetUnassignedAt() int64 { - if x != nil { - return x.UnassignedAt - } - return 0 -} - -func (x *TaskAssignmentRecord) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -// TaskCreationMetrics tracks why and how a task was created -type TaskCreationMetrics struct { - state protoimpl.MessageState `protogen:"open.v1"` - TriggerMetric string `protobuf:"bytes,1,opt,name=trigger_metric,json=triggerMetric,proto3" json:"trigger_metric,omitempty"` // Name of metric that triggered creation - MetricValue float64 `protobuf:"fixed64,2,opt,name=metric_value,json=metricValue,proto3" json:"metric_value,omitempty"` // Value that triggered creation - Threshold float64 `protobuf:"fixed64,3,opt,name=threshold,proto3" json:"threshold,omitempty"` // Threshold that was exceeded - VolumeMetrics *VolumeHealthMetrics `protobuf:"bytes,4,opt,name=volume_metrics,json=volumeMetrics,proto3" json:"volume_metrics,omitempty"` // Volume health at creation time - AdditionalData map[string]string `protobuf:"bytes,5,rep,name=additional_data,json=additionalData,proto3" json:"additional_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Additional context data - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskCreationMetrics) Reset() { - *x = TaskCreationMetrics{} - mi := &file_worker_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskCreationMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskCreationMetrics) ProtoMessage() {} - -func (x *TaskCreationMetrics) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[33] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskCreationMetrics.ProtoReflect.Descriptor instead. -func (*TaskCreationMetrics) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{33} -} - -func (x *TaskCreationMetrics) GetTriggerMetric() string { - if x != nil { - return x.TriggerMetric - } - return "" -} - -func (x *TaskCreationMetrics) GetMetricValue() float64 { - if x != nil { - return x.MetricValue - } - return 0 -} - -func (x *TaskCreationMetrics) GetThreshold() float64 { - if x != nil { - return x.Threshold - } - return 0 -} - -func (x *TaskCreationMetrics) GetVolumeMetrics() *VolumeHealthMetrics { - if x != nil { - return x.VolumeMetrics - } - return nil -} - -func (x *TaskCreationMetrics) GetAdditionalData() map[string]string { - if x != nil { - return x.AdditionalData - } - return nil -} - -// VolumeHealthMetrics captures volume state at task creation -type VolumeHealthMetrics struct { - state protoimpl.MessageState `protogen:"open.v1"` - TotalSize uint64 `protobuf:"varint,1,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,2,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` - GarbageSize uint64 `protobuf:"varint,3,opt,name=garbage_size,json=garbageSize,proto3" json:"garbage_size,omitempty"` - GarbageRatio float64 `protobuf:"fixed64,4,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` - FileCount int32 `protobuf:"varint,5,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` - DeletedFileCount int32 `protobuf:"varint,6,opt,name=deleted_file_count,json=deletedFileCount,proto3" json:"deleted_file_count,omitempty"` - LastModified int64 `protobuf:"varint,7,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` - ReplicaCount int32 `protobuf:"varint,8,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` - IsEcVolume bool `protobuf:"varint,9,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` - Collection string `protobuf:"bytes,10,opt,name=collection,proto3" json:"collection,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VolumeHealthMetrics) Reset() { - *x = VolumeHealthMetrics{} - mi := &file_worker_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VolumeHealthMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VolumeHealthMetrics) ProtoMessage() {} - -func (x *VolumeHealthMetrics) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[34] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VolumeHealthMetrics.ProtoReflect.Descriptor instead. -func (*VolumeHealthMetrics) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{34} -} - -func (x *VolumeHealthMetrics) GetTotalSize() uint64 { - if x != nil { - return x.TotalSize - } - return 0 -} - -func (x *VolumeHealthMetrics) GetUsedSize() uint64 { - if x != nil { - return x.UsedSize - } - return 0 -} - -func (x *VolumeHealthMetrics) GetGarbageSize() uint64 { - if x != nil { - return x.GarbageSize - } - return 0 -} - -func (x *VolumeHealthMetrics) GetGarbageRatio() float64 { - if x != nil { - return x.GarbageRatio - } - return 0 -} - -func (x *VolumeHealthMetrics) GetFileCount() int32 { - if x != nil { - return x.FileCount - } - return 0 -} - -func (x *VolumeHealthMetrics) GetDeletedFileCount() int32 { - if x != nil { - return x.DeletedFileCount - } - return 0 -} - -func (x *VolumeHealthMetrics) GetLastModified() int64 { - if x != nil { - return x.LastModified - } - return 0 -} - -func (x *VolumeHealthMetrics) GetReplicaCount() int32 { - if x != nil { - return x.ReplicaCount - } - return 0 -} - -func (x *VolumeHealthMetrics) GetIsEcVolume() bool { - if x != nil { - return x.IsEcVolume - } - return false -} - -func (x *VolumeHealthMetrics) GetCollection() string { - if x != nil { - return x.Collection - } - return "" -} - -// TaskStateFile wraps task data with metadata for persistence -type TaskStateFile struct { - state protoimpl.MessageState `protogen:"open.v1"` - Task *MaintenanceTaskData `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` - LastUpdated int64 `protobuf:"varint,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` - AdminVersion string `protobuf:"bytes,3,opt,name=admin_version,json=adminVersion,proto3" json:"admin_version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *TaskStateFile) Reset() { - *x = TaskStateFile{} - mi := &file_worker_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskStateFile) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskStateFile) ProtoMessage() {} - -func (x *TaskStateFile) ProtoReflect() protoreflect.Message { - mi := &file_worker_proto_msgTypes[35] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskStateFile.ProtoReflect.Descriptor instead. -func (*TaskStateFile) Descriptor() ([]byte, []int) { - return file_worker_proto_rawDescGZIP(), []int{35} -} - -func (x *TaskStateFile) GetTask() *MaintenanceTaskData { - if x != nil { - return x.Task - } - return nil -} - -func (x *TaskStateFile) GetLastUpdated() int64 { - if x != nil { - return x.LastUpdated - } - return 0 -} - -func (x *TaskStateFile) GetAdminVersion() string { - if x != nil { - return x.AdminVersion - } - return "" -} - -var File_worker_proto protoreflect.FileDescriptor - -const file_worker_proto_rawDesc = "" + - "\n" + - "\fworker.proto\x12\tworker_pb\"\x90\x04\n" + - "\rWorkerMessage\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x1c\n" + - "\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12C\n" + - "\fregistration\x18\x03 \x01(\v2\x1d.worker_pb.WorkerRegistrationH\x00R\fregistration\x12:\n" + - "\theartbeat\x18\x04 \x01(\v2\x1a.worker_pb.WorkerHeartbeatH\x00R\theartbeat\x12;\n" + - "\ftask_request\x18\x05 \x01(\v2\x16.worker_pb.TaskRequestH\x00R\vtaskRequest\x128\n" + - "\vtask_update\x18\x06 \x01(\v2\x15.worker_pb.TaskUpdateH\x00R\n" + - "taskUpdate\x12>\n" + - "\rtask_complete\x18\a \x01(\v2\x17.worker_pb.TaskCompleteH\x00R\ftaskComplete\x127\n" + - "\bshutdown\x18\b \x01(\v2\x19.worker_pb.WorkerShutdownH\x00R\bshutdown\x12H\n" + - "\x11task_log_response\x18\t \x01(\v2\x1a.worker_pb.TaskLogResponseH\x00R\x0ftaskLogResponseB\t\n" + - "\amessage\"\x95\x04\n" + - "\fAdminMessage\x12\x19\n" + - "\badmin_id\x18\x01 \x01(\tR\aadminId\x12\x1c\n" + - "\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\x12V\n" + - "\x15registration_response\x18\x03 \x01(\v2\x1f.worker_pb.RegistrationResponseH\x00R\x14registrationResponse\x12M\n" + - "\x12heartbeat_response\x18\x04 \x01(\v2\x1c.worker_pb.HeartbeatResponseH\x00R\x11heartbeatResponse\x12D\n" + - "\x0ftask_assignment\x18\x05 \x01(\v2\x19.worker_pb.TaskAssignmentH\x00R\x0etaskAssignment\x12J\n" + - "\x11task_cancellation\x18\x06 \x01(\v2\x1b.worker_pb.TaskCancellationH\x00R\x10taskCancellation\x12A\n" + - "\x0eadmin_shutdown\x18\a \x01(\v2\x18.worker_pb.AdminShutdownH\x00R\radminShutdown\x12E\n" + - "\x10task_log_request\x18\b \x01(\v2\x19.worker_pb.TaskLogRequestH\x00R\x0etaskLogRequestB\t\n" + - "\amessage\"\x9c\x02\n" + - "\x12WorkerRegistration\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x18\n" + - "\aaddress\x18\x02 \x01(\tR\aaddress\x12\"\n" + - "\fcapabilities\x18\x03 \x03(\tR\fcapabilities\x12%\n" + - "\x0emax_concurrent\x18\x04 \x01(\x05R\rmaxConcurrent\x12G\n" + - "\bmetadata\x18\x05 \x03(\v2+.worker_pb.WorkerRegistration.MetadataEntryR\bmetadata\x1a;\n" + - "\rMetadataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"x\n" + - "\x14RegistrationResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage\x12,\n" + - "\x12assigned_worker_id\x18\x03 \x01(\tR\x10assignedWorkerId\"\xad\x02\n" + - "\x0fWorkerHeartbeat\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x16\n" + - "\x06status\x18\x02 \x01(\tR\x06status\x12!\n" + - "\fcurrent_load\x18\x03 \x01(\x05R\vcurrentLoad\x12%\n" + - "\x0emax_concurrent\x18\x04 \x01(\x05R\rmaxConcurrent\x12(\n" + - "\x10current_task_ids\x18\x05 \x03(\tR\x0ecurrentTaskIds\x12'\n" + - "\x0ftasks_completed\x18\x06 \x01(\x05R\x0etasksCompleted\x12!\n" + - "\ftasks_failed\x18\a \x01(\x05R\vtasksFailed\x12%\n" + - "\x0euptime_seconds\x18\b \x01(\x03R\ruptimeSeconds\"G\n" + - "\x11HeartbeatResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage\"w\n" + - "\vTaskRequest\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\"\n" + - "\fcapabilities\x18\x02 \x03(\tR\fcapabilities\x12'\n" + - "\x0favailable_slots\x18\x03 \x01(\x05R\x0eavailableSlots\"\xb6\x02\n" + - "\x0eTaskAssignment\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\ttask_type\x18\x02 \x01(\tR\btaskType\x12-\n" + - "\x06params\x18\x03 \x01(\v2\x15.worker_pb.TaskParamsR\x06params\x12\x1a\n" + - "\bpriority\x18\x04 \x01(\x05R\bpriority\x12!\n" + - "\fcreated_time\x18\x05 \x01(\x03R\vcreatedTime\x12C\n" + - "\bmetadata\x18\x06 \x03(\v2'.worker_pb.TaskAssignment.MetadataEntryR\bmetadata\x1a;\n" + - "\rMetadataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xe1\x04\n" + - "\n" + - "TaskParams\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\tvolume_id\x18\x02 \x01(\rR\bvolumeId\x12\x1e\n" + - "\n" + - "collection\x18\x03 \x01(\tR\n" + - "collection\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\x12\x12\n" + - "\x04rack\x18\x05 \x01(\tR\x04rack\x12\x1f\n" + - "\vvolume_size\x18\x06 \x01(\x04R\n" + - "volumeSize\x12/\n" + - "\asources\x18\a \x03(\v2\x15.worker_pb.TaskSourceR\asources\x12/\n" + - "\atargets\x18\b \x03(\v2\x15.worker_pb.TaskTargetR\atargets\x12B\n" + - "\rvacuum_params\x18\t \x01(\v2\x1b.worker_pb.VacuumTaskParamsH\x00R\fvacuumParams\x12X\n" + - "\x15erasure_coding_params\x18\n" + - " \x01(\v2\".worker_pb.ErasureCodingTaskParamsH\x00R\x13erasureCodingParams\x12E\n" + - "\x0ebalance_params\x18\v \x01(\v2\x1c.worker_pb.BalanceTaskParamsH\x00R\rbalanceParams\x12Q\n" + - "\x12replication_params\x18\f \x01(\v2 .worker_pb.ReplicationTaskParamsH\x00R\x11replicationParamsB\r\n" + - "\vtask_params\"\xcb\x01\n" + - "\x10VacuumTaskParams\x12+\n" + - "\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12!\n" + - "\fforce_vacuum\x18\x02 \x01(\bR\vforceVacuum\x12\x1d\n" + - "\n" + - "batch_size\x18\x03 \x01(\x05R\tbatchSize\x12\x1f\n" + - "\vworking_dir\x18\x04 \x01(\tR\n" + - "workingDir\x12'\n" + - "\x0fverify_checksum\x18\x05 \x01(\bR\x0everifyChecksum\"\xfe\x01\n" + - "\x17ErasureCodingTaskParams\x120\n" + - "\x14estimated_shard_size\x18\x01 \x01(\x04R\x12estimatedShardSize\x12\x1f\n" + - "\vdata_shards\x18\x02 \x01(\x05R\n" + - "dataShards\x12#\n" + - "\rparity_shards\x18\x03 \x01(\x05R\fparityShards\x12\x1f\n" + - "\vworking_dir\x18\x04 \x01(\tR\n" + - "workingDir\x12#\n" + - "\rmaster_client\x18\x05 \x01(\tR\fmasterClient\x12%\n" + - "\x0ecleanup_source\x18\x06 \x01(\bR\rcleanupSource\"\xcf\x01\n" + - "\n" + - "TaskSource\x12\x12\n" + - "\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" + - "\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" + - "\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\x12\x1b\n" + - "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tshard_ids\x18\x06 \x03(\rR\bshardIds\x12%\n" + - "\x0eestimated_size\x18\a \x01(\x04R\restimatedSize\"\xcf\x01\n" + - "\n" + - "TaskTarget\x12\x12\n" + - "\x04node\x18\x01 \x01(\tR\x04node\x12\x17\n" + - "\adisk_id\x18\x02 \x01(\rR\x06diskId\x12\x12\n" + - "\x04rack\x18\x03 \x01(\tR\x04rack\x12\x1f\n" + - "\vdata_center\x18\x04 \x01(\tR\n" + - "dataCenter\x12\x1b\n" + - "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x1b\n" + - "\tshard_ids\x18\x06 \x03(\rR\bshardIds\x12%\n" + - "\x0eestimated_size\x18\a \x01(\x04R\restimatedSize\"[\n" + - "\x11BalanceTaskParams\x12\x1d\n" + - "\n" + - "force_move\x18\x01 \x01(\bR\tforceMove\x12'\n" + - "\x0ftimeout_seconds\x18\x02 \x01(\x05R\x0etimeoutSeconds\"k\n" + - "\x15ReplicationTaskParams\x12#\n" + - "\rreplica_count\x18\x01 \x01(\x05R\freplicaCount\x12-\n" + - "\x12verify_consistency\x18\x02 \x01(\bR\x11verifyConsistency\"\x8e\x02\n" + - "\n" + - "TaskUpdate\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x16\n" + - "\x06status\x18\x03 \x01(\tR\x06status\x12\x1a\n" + - "\bprogress\x18\x04 \x01(\x02R\bprogress\x12\x18\n" + - "\amessage\x18\x05 \x01(\tR\amessage\x12?\n" + - "\bmetadata\x18\x06 \x03(\v2#.worker_pb.TaskUpdate.MetadataEntryR\bmetadata\x1a;\n" + - "\rMetadataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc5\x02\n" + - "\fTaskComplete\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x18\n" + - "\asuccess\x18\x03 \x01(\bR\asuccess\x12#\n" + - "\rerror_message\x18\x04 \x01(\tR\ferrorMessage\x12'\n" + - "\x0fcompletion_time\x18\x05 \x01(\x03R\x0ecompletionTime\x12T\n" + - "\x0fresult_metadata\x18\x06 \x03(\v2+.worker_pb.TaskComplete.ResultMetadataEntryR\x0eresultMetadata\x1aA\n" + - "\x13ResultMetadataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"Y\n" + - "\x10TaskCancellation\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x16\n" + - "\x06reason\x18\x02 \x01(\tR\x06reason\x12\x14\n" + - "\x05force\x18\x03 \x01(\bR\x05force\"o\n" + - "\x0eWorkerShutdown\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12\x16\n" + - "\x06reason\x18\x02 \x01(\tR\x06reason\x12(\n" + - "\x10pending_task_ids\x18\x03 \x03(\tR\x0ependingTaskIds\"c\n" + - "\rAdminShutdown\x12\x16\n" + - "\x06reason\x18\x01 \x01(\tR\x06reason\x12:\n" + - "\x19graceful_shutdown_seconds\x18\x02 \x01(\x05R\x17gracefulShutdownSeconds\"\xe9\x01\n" + - "\x0eTaskLogRequest\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12)\n" + - "\x10include_metadata\x18\x03 \x01(\bR\x0fincludeMetadata\x12\x1f\n" + - "\vmax_entries\x18\x04 \x01(\x05R\n" + - "maxEntries\x12\x1b\n" + - "\tlog_level\x18\x05 \x01(\tR\blogLevel\x12\x1d\n" + - "\n" + - "start_time\x18\x06 \x01(\x03R\tstartTime\x12\x19\n" + - "\bend_time\x18\a \x01(\x03R\aendTime\"\xf8\x01\n" + - "\x0fTaskLogResponse\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\tworker_id\x18\x02 \x01(\tR\bworkerId\x12\x18\n" + - "\asuccess\x18\x03 \x01(\bR\asuccess\x12#\n" + - "\rerror_message\x18\x04 \x01(\tR\ferrorMessage\x126\n" + - "\bmetadata\x18\x05 \x01(\v2\x1a.worker_pb.TaskLogMetadataR\bmetadata\x128\n" + - "\vlog_entries\x18\x06 \x03(\v2\x17.worker_pb.TaskLogEntryR\n" + - "logEntries\"\x97\x04\n" + - "\x0fTaskLogMetadata\x12\x17\n" + - "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + - "\ttask_type\x18\x02 \x01(\tR\btaskType\x12\x1b\n" + - "\tworker_id\x18\x03 \x01(\tR\bworkerId\x12\x1d\n" + - "\n" + - "start_time\x18\x04 \x01(\x03R\tstartTime\x12\x19\n" + - "\bend_time\x18\x05 \x01(\x03R\aendTime\x12\x1f\n" + - "\vduration_ms\x18\x06 \x01(\x03R\n" + - "durationMs\x12\x16\n" + - "\x06status\x18\a \x01(\tR\x06status\x12\x1a\n" + - "\bprogress\x18\b \x01(\x02R\bprogress\x12\x1b\n" + - "\tvolume_id\x18\t \x01(\rR\bvolumeId\x12\x16\n" + - "\x06server\x18\n" + - " \x01(\tR\x06server\x12\x1e\n" + - "\n" + - "collection\x18\v \x01(\tR\n" + - "collection\x12\"\n" + - "\rlog_file_path\x18\f \x01(\tR\vlogFilePath\x12\x1d\n" + - "\n" + - "created_at\x18\r \x01(\x03R\tcreatedAt\x12K\n" + - "\vcustom_data\x18\x0e \x03(\v2*.worker_pb.TaskLogMetadata.CustomDataEntryR\n" + - "customData\x1a=\n" + - "\x0fCustomDataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x88\x02\n" + - "\fTaskLogEntry\x12\x1c\n" + - "\ttimestamp\x18\x01 \x01(\x03R\ttimestamp\x12\x14\n" + - "\x05level\x18\x02 \x01(\tR\x05level\x12\x18\n" + - "\amessage\x18\x03 \x01(\tR\amessage\x12;\n" + - "\x06fields\x18\x04 \x03(\v2#.worker_pb.TaskLogEntry.FieldsEntryR\x06fields\x12\x1a\n" + - "\bprogress\x18\x05 \x01(\x02R\bprogress\x12\x16\n" + - "\x06status\x18\x06 \x01(\tR\x06status\x1a9\n" + - "\vFieldsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc0\x03\n" + - "\x11MaintenanceConfig\x12\x18\n" + - "\aenabled\x18\x01 \x01(\bR\aenabled\x122\n" + - "\x15scan_interval_seconds\x18\x02 \x01(\x05R\x13scanIntervalSeconds\x124\n" + - "\x16worker_timeout_seconds\x18\x03 \x01(\x05R\x14workerTimeoutSeconds\x120\n" + - "\x14task_timeout_seconds\x18\x04 \x01(\x05R\x12taskTimeoutSeconds\x12.\n" + - "\x13retry_delay_seconds\x18\x05 \x01(\x05R\x11retryDelaySeconds\x12\x1f\n" + - "\vmax_retries\x18\x06 \x01(\x05R\n" + - "maxRetries\x128\n" + - "\x18cleanup_interval_seconds\x18\a \x01(\x05R\x16cleanupIntervalSeconds\x124\n" + - "\x16task_retention_seconds\x18\b \x01(\x05R\x14taskRetentionSeconds\x124\n" + - "\x06policy\x18\t \x01(\v2\x1c.worker_pb.MaintenancePolicyR\x06policy\"\x80\x03\n" + - "\x11MaintenancePolicy\x12S\n" + - "\rtask_policies\x18\x01 \x03(\v2..worker_pb.MaintenancePolicy.TaskPoliciesEntryR\ftaskPolicies\x122\n" + - "\x15global_max_concurrent\x18\x02 \x01(\x05R\x13globalMaxConcurrent\x12E\n" + - "\x1fdefault_repeat_interval_seconds\x18\x03 \x01(\x05R\x1cdefaultRepeatIntervalSeconds\x12C\n" + - "\x1edefault_check_interval_seconds\x18\x04 \x01(\x05R\x1bdefaultCheckIntervalSeconds\x1aV\n" + - "\x11TaskPoliciesEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12+\n" + - "\x05value\x18\x02 \x01(\v2\x15.worker_pb.TaskPolicyR\x05value:\x028\x01\"\x82\x04\n" + - "\n" + - "TaskPolicy\x12\x18\n" + - "\aenabled\x18\x01 \x01(\bR\aenabled\x12%\n" + - "\x0emax_concurrent\x18\x02 \x01(\x05R\rmaxConcurrent\x126\n" + - "\x17repeat_interval_seconds\x18\x03 \x01(\x05R\x15repeatIntervalSeconds\x124\n" + - "\x16check_interval_seconds\x18\x04 \x01(\x05R\x14checkIntervalSeconds\x12B\n" + - "\rvacuum_config\x18\x05 \x01(\v2\x1b.worker_pb.VacuumTaskConfigH\x00R\fvacuumConfig\x12X\n" + - "\x15erasure_coding_config\x18\x06 \x01(\v2\".worker_pb.ErasureCodingTaskConfigH\x00R\x13erasureCodingConfig\x12E\n" + - "\x0ebalance_config\x18\a \x01(\v2\x1c.worker_pb.BalanceTaskConfigH\x00R\rbalanceConfig\x12Q\n" + - "\x12replication_config\x18\b \x01(\v2 .worker_pb.ReplicationTaskConfigH\x00R\x11replicationConfigB\r\n" + - "\vtask_config\"\xa2\x01\n" + - "\x10VacuumTaskConfig\x12+\n" + - "\x11garbage_threshold\x18\x01 \x01(\x01R\x10garbageThreshold\x12/\n" + - "\x14min_volume_age_hours\x18\x02 \x01(\x05R\x11minVolumeAgeHours\x120\n" + - "\x14min_interval_seconds\x18\x03 \x01(\x05R\x12minIntervalSeconds\"\xc6\x01\n" + - "\x17ErasureCodingTaskConfig\x12%\n" + - "\x0efullness_ratio\x18\x01 \x01(\x01R\rfullnessRatio\x12*\n" + - "\x11quiet_for_seconds\x18\x02 \x01(\x05R\x0fquietForSeconds\x12+\n" + - "\x12min_volume_size_mb\x18\x03 \x01(\x05R\x0fminVolumeSizeMb\x12+\n" + - "\x11collection_filter\x18\x04 \x01(\tR\x10collectionFilter\"n\n" + - "\x11BalanceTaskConfig\x12/\n" + - "\x13imbalance_threshold\x18\x01 \x01(\x01R\x12imbalanceThreshold\x12(\n" + - "\x10min_server_count\x18\x02 \x01(\x05R\x0eminServerCount\"I\n" + - "\x15ReplicationTaskConfig\x120\n" + - "\x14target_replica_count\x18\x01 \x01(\x05R\x12targetReplicaCount\"\xae\a\n" + - "\x13MaintenanceTaskData\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + - "\x04type\x18\x02 \x01(\tR\x04type\x12\x1a\n" + - "\bpriority\x18\x03 \x01(\tR\bpriority\x12\x16\n" + - "\x06status\x18\x04 \x01(\tR\x06status\x12\x1b\n" + - "\tvolume_id\x18\x05 \x01(\rR\bvolumeId\x12\x16\n" + - "\x06server\x18\x06 \x01(\tR\x06server\x12\x1e\n" + - "\n" + - "collection\x18\a \x01(\tR\n" + - "collection\x128\n" + - "\ftyped_params\x18\b \x01(\v2\x15.worker_pb.TaskParamsR\vtypedParams\x12\x16\n" + - "\x06reason\x18\t \x01(\tR\x06reason\x12\x1d\n" + - "\n" + - "created_at\x18\n" + - " \x01(\x03R\tcreatedAt\x12!\n" + - "\fscheduled_at\x18\v \x01(\x03R\vscheduledAt\x12\x1d\n" + - "\n" + - "started_at\x18\f \x01(\x03R\tstartedAt\x12!\n" + - "\fcompleted_at\x18\r \x01(\x03R\vcompletedAt\x12\x1b\n" + - "\tworker_id\x18\x0e \x01(\tR\bworkerId\x12\x14\n" + - "\x05error\x18\x0f \x01(\tR\x05error\x12\x1a\n" + - "\bprogress\x18\x10 \x01(\x01R\bprogress\x12\x1f\n" + - "\vretry_count\x18\x11 \x01(\x05R\n" + - "retryCount\x12\x1f\n" + - "\vmax_retries\x18\x12 \x01(\x05R\n" + - "maxRetries\x12\x1d\n" + - "\n" + - "created_by\x18\x13 \x01(\tR\tcreatedBy\x12)\n" + - "\x10creation_context\x18\x14 \x01(\tR\x0fcreationContext\x12N\n" + - "\x12assignment_history\x18\x15 \x03(\v2\x1f.worker_pb.TaskAssignmentRecordR\x11assignmentHistory\x12'\n" + - "\x0fdetailed_reason\x18\x16 \x01(\tR\x0edetailedReason\x12<\n" + - "\x04tags\x18\x17 \x03(\v2(.worker_pb.MaintenanceTaskData.TagsEntryR\x04tags\x12I\n" + - "\x10creation_metrics\x18\x18 \x01(\v2\x1e.worker_pb.TaskCreationMetricsR\x0fcreationMetrics\x1a7\n" + - "\tTagsEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb8\x01\n" + - "\x14TaskAssignmentRecord\x12\x1b\n" + - "\tworker_id\x18\x01 \x01(\tR\bworkerId\x12%\n" + - "\x0eworker_address\x18\x02 \x01(\tR\rworkerAddress\x12\x1f\n" + - "\vassigned_at\x18\x03 \x01(\x03R\n" + - "assignedAt\x12#\n" + - "\runassigned_at\x18\x04 \x01(\x03R\funassignedAt\x12\x16\n" + - "\x06reason\x18\x05 \x01(\tR\x06reason\"\xe4\x02\n" + - "\x13TaskCreationMetrics\x12%\n" + - "\x0etrigger_metric\x18\x01 \x01(\tR\rtriggerMetric\x12!\n" + - "\fmetric_value\x18\x02 \x01(\x01R\vmetricValue\x12\x1c\n" + - "\tthreshold\x18\x03 \x01(\x01R\tthreshold\x12E\n" + - "\x0evolume_metrics\x18\x04 \x01(\v2\x1e.worker_pb.VolumeHealthMetricsR\rvolumeMetrics\x12[\n" + - "\x0fadditional_data\x18\x05 \x03(\v22.worker_pb.TaskCreationMetrics.AdditionalDataEntryR\x0eadditionalData\x1aA\n" + - "\x13AdditionalDataEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf2\x02\n" + - "\x13VolumeHealthMetrics\x12\x1d\n" + - "\n" + - "total_size\x18\x01 \x01(\x04R\ttotalSize\x12\x1b\n" + - "\tused_size\x18\x02 \x01(\x04R\busedSize\x12!\n" + - "\fgarbage_size\x18\x03 \x01(\x04R\vgarbageSize\x12#\n" + - "\rgarbage_ratio\x18\x04 \x01(\x01R\fgarbageRatio\x12\x1d\n" + - "\n" + - "file_count\x18\x05 \x01(\x05R\tfileCount\x12,\n" + - "\x12deleted_file_count\x18\x06 \x01(\x05R\x10deletedFileCount\x12#\n" + - "\rlast_modified\x18\a \x01(\x03R\flastModified\x12#\n" + - "\rreplica_count\x18\b \x01(\x05R\freplicaCount\x12 \n" + - "\fis_ec_volume\x18\t \x01(\bR\n" + - "isEcVolume\x12\x1e\n" + - "\n" + - "collection\x18\n" + - " \x01(\tR\n" + - "collection\"\x8b\x01\n" + - "\rTaskStateFile\x122\n" + - "\x04task\x18\x01 \x01(\v2\x1e.worker_pb.MaintenanceTaskDataR\x04task\x12!\n" + - "\flast_updated\x18\x02 \x01(\x03R\vlastUpdated\x12#\n" + - "\radmin_version\x18\x03 \x01(\tR\fadminVersion2V\n" + - "\rWorkerService\x12E\n" + - "\fWorkerStream\x12\x18.worker_pb.WorkerMessage\x1a\x17.worker_pb.AdminMessage(\x010\x01B2Z0github.com/seaweedfs/seaweedfs/weed/pb/worker_pbb\x06proto3" - -var ( - file_worker_proto_rawDescOnce sync.Once - file_worker_proto_rawDescData []byte -) - -func file_worker_proto_rawDescGZIP() []byte { - file_worker_proto_rawDescOnce.Do(func() { - file_worker_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc))) - }) - return file_worker_proto_rawDescData -} - -var file_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 45) -var file_worker_proto_goTypes = []any{ - (*WorkerMessage)(nil), // 0: worker_pb.WorkerMessage - (*AdminMessage)(nil), // 1: worker_pb.AdminMessage - (*WorkerRegistration)(nil), // 2: worker_pb.WorkerRegistration - (*RegistrationResponse)(nil), // 3: worker_pb.RegistrationResponse - (*WorkerHeartbeat)(nil), // 4: worker_pb.WorkerHeartbeat - (*HeartbeatResponse)(nil), // 5: worker_pb.HeartbeatResponse - (*TaskRequest)(nil), // 6: worker_pb.TaskRequest - (*TaskAssignment)(nil), // 7: worker_pb.TaskAssignment - (*TaskParams)(nil), // 8: worker_pb.TaskParams - (*VacuumTaskParams)(nil), // 9: worker_pb.VacuumTaskParams - (*ErasureCodingTaskParams)(nil), // 10: worker_pb.ErasureCodingTaskParams - (*TaskSource)(nil), // 11: worker_pb.TaskSource - (*TaskTarget)(nil), // 12: worker_pb.TaskTarget - (*BalanceTaskParams)(nil), // 13: worker_pb.BalanceTaskParams - (*ReplicationTaskParams)(nil), // 14: worker_pb.ReplicationTaskParams - (*TaskUpdate)(nil), // 15: worker_pb.TaskUpdate - (*TaskComplete)(nil), // 16: worker_pb.TaskComplete - (*TaskCancellation)(nil), // 17: worker_pb.TaskCancellation - (*WorkerShutdown)(nil), // 18: worker_pb.WorkerShutdown - (*AdminShutdown)(nil), // 19: worker_pb.AdminShutdown - (*TaskLogRequest)(nil), // 20: worker_pb.TaskLogRequest - (*TaskLogResponse)(nil), // 21: worker_pb.TaskLogResponse - (*TaskLogMetadata)(nil), // 22: worker_pb.TaskLogMetadata - (*TaskLogEntry)(nil), // 23: worker_pb.TaskLogEntry - (*MaintenanceConfig)(nil), // 24: worker_pb.MaintenanceConfig - (*MaintenancePolicy)(nil), // 25: worker_pb.MaintenancePolicy - (*TaskPolicy)(nil), // 26: worker_pb.TaskPolicy - (*VacuumTaskConfig)(nil), // 27: worker_pb.VacuumTaskConfig - (*ErasureCodingTaskConfig)(nil), // 28: worker_pb.ErasureCodingTaskConfig - (*BalanceTaskConfig)(nil), // 29: worker_pb.BalanceTaskConfig - (*ReplicationTaskConfig)(nil), // 30: worker_pb.ReplicationTaskConfig - (*MaintenanceTaskData)(nil), // 31: worker_pb.MaintenanceTaskData - (*TaskAssignmentRecord)(nil), // 32: worker_pb.TaskAssignmentRecord - (*TaskCreationMetrics)(nil), // 33: worker_pb.TaskCreationMetrics - (*VolumeHealthMetrics)(nil), // 34: worker_pb.VolumeHealthMetrics - (*TaskStateFile)(nil), // 35: worker_pb.TaskStateFile - nil, // 36: worker_pb.WorkerRegistration.MetadataEntry - nil, // 37: worker_pb.TaskAssignment.MetadataEntry - nil, // 38: worker_pb.TaskUpdate.MetadataEntry - nil, // 39: worker_pb.TaskComplete.ResultMetadataEntry - nil, // 40: worker_pb.TaskLogMetadata.CustomDataEntry - nil, // 41: worker_pb.TaskLogEntry.FieldsEntry - nil, // 42: worker_pb.MaintenancePolicy.TaskPoliciesEntry - nil, // 43: worker_pb.MaintenanceTaskData.TagsEntry - nil, // 44: worker_pb.TaskCreationMetrics.AdditionalDataEntry -} -var file_worker_proto_depIdxs = []int32{ - 2, // 0: worker_pb.WorkerMessage.registration:type_name -> worker_pb.WorkerRegistration - 4, // 1: worker_pb.WorkerMessage.heartbeat:type_name -> worker_pb.WorkerHeartbeat - 6, // 2: worker_pb.WorkerMessage.task_request:type_name -> worker_pb.TaskRequest - 15, // 3: worker_pb.WorkerMessage.task_update:type_name -> worker_pb.TaskUpdate - 16, // 4: worker_pb.WorkerMessage.task_complete:type_name -> worker_pb.TaskComplete - 18, // 5: worker_pb.WorkerMessage.shutdown:type_name -> worker_pb.WorkerShutdown - 21, // 6: worker_pb.WorkerMessage.task_log_response:type_name -> worker_pb.TaskLogResponse - 3, // 7: worker_pb.AdminMessage.registration_response:type_name -> worker_pb.RegistrationResponse - 5, // 8: worker_pb.AdminMessage.heartbeat_response:type_name -> worker_pb.HeartbeatResponse - 7, // 9: worker_pb.AdminMessage.task_assignment:type_name -> worker_pb.TaskAssignment - 17, // 10: worker_pb.AdminMessage.task_cancellation:type_name -> worker_pb.TaskCancellation - 19, // 11: worker_pb.AdminMessage.admin_shutdown:type_name -> worker_pb.AdminShutdown - 20, // 12: worker_pb.AdminMessage.task_log_request:type_name -> worker_pb.TaskLogRequest - 36, // 13: worker_pb.WorkerRegistration.metadata:type_name -> worker_pb.WorkerRegistration.MetadataEntry - 8, // 14: worker_pb.TaskAssignment.params:type_name -> worker_pb.TaskParams - 37, // 15: worker_pb.TaskAssignment.metadata:type_name -> worker_pb.TaskAssignment.MetadataEntry - 11, // 16: worker_pb.TaskParams.sources:type_name -> worker_pb.TaskSource - 12, // 17: worker_pb.TaskParams.targets:type_name -> worker_pb.TaskTarget - 9, // 18: worker_pb.TaskParams.vacuum_params:type_name -> worker_pb.VacuumTaskParams - 10, // 19: worker_pb.TaskParams.erasure_coding_params:type_name -> worker_pb.ErasureCodingTaskParams - 13, // 20: worker_pb.TaskParams.balance_params:type_name -> worker_pb.BalanceTaskParams - 14, // 21: worker_pb.TaskParams.replication_params:type_name -> worker_pb.ReplicationTaskParams - 38, // 22: worker_pb.TaskUpdate.metadata:type_name -> worker_pb.TaskUpdate.MetadataEntry - 39, // 23: worker_pb.TaskComplete.result_metadata:type_name -> worker_pb.TaskComplete.ResultMetadataEntry - 22, // 24: worker_pb.TaskLogResponse.metadata:type_name -> worker_pb.TaskLogMetadata - 23, // 25: worker_pb.TaskLogResponse.log_entries:type_name -> worker_pb.TaskLogEntry - 40, // 26: worker_pb.TaskLogMetadata.custom_data:type_name -> worker_pb.TaskLogMetadata.CustomDataEntry - 41, // 27: worker_pb.TaskLogEntry.fields:type_name -> worker_pb.TaskLogEntry.FieldsEntry - 25, // 28: worker_pb.MaintenanceConfig.policy:type_name -> worker_pb.MaintenancePolicy - 42, // 29: worker_pb.MaintenancePolicy.task_policies:type_name -> worker_pb.MaintenancePolicy.TaskPoliciesEntry - 27, // 30: worker_pb.TaskPolicy.vacuum_config:type_name -> worker_pb.VacuumTaskConfig - 28, // 31: worker_pb.TaskPolicy.erasure_coding_config:type_name -> worker_pb.ErasureCodingTaskConfig - 29, // 32: worker_pb.TaskPolicy.balance_config:type_name -> worker_pb.BalanceTaskConfig - 30, // 33: worker_pb.TaskPolicy.replication_config:type_name -> worker_pb.ReplicationTaskConfig - 8, // 34: worker_pb.MaintenanceTaskData.typed_params:type_name -> worker_pb.TaskParams - 32, // 35: worker_pb.MaintenanceTaskData.assignment_history:type_name -> worker_pb.TaskAssignmentRecord - 43, // 36: worker_pb.MaintenanceTaskData.tags:type_name -> worker_pb.MaintenanceTaskData.TagsEntry - 33, // 37: worker_pb.MaintenanceTaskData.creation_metrics:type_name -> worker_pb.TaskCreationMetrics - 34, // 38: worker_pb.TaskCreationMetrics.volume_metrics:type_name -> worker_pb.VolumeHealthMetrics - 44, // 39: worker_pb.TaskCreationMetrics.additional_data:type_name -> worker_pb.TaskCreationMetrics.AdditionalDataEntry - 31, // 40: worker_pb.TaskStateFile.task:type_name -> worker_pb.MaintenanceTaskData - 26, // 41: worker_pb.MaintenancePolicy.TaskPoliciesEntry.value:type_name -> worker_pb.TaskPolicy - 0, // 42: worker_pb.WorkerService.WorkerStream:input_type -> worker_pb.WorkerMessage - 1, // 43: worker_pb.WorkerService.WorkerStream:output_type -> worker_pb.AdminMessage - 43, // [43:44] is the sub-list for method output_type - 42, // [42:43] is the sub-list for method input_type - 42, // [42:42] is the sub-list for extension type_name - 42, // [42:42] is the sub-list for extension extendee - 0, // [0:42] is the sub-list for field type_name -} - -func init() { file_worker_proto_init() } -func file_worker_proto_init() { - if File_worker_proto != nil { - return - } - file_worker_proto_msgTypes[0].OneofWrappers = []any{ - (*WorkerMessage_Registration)(nil), - (*WorkerMessage_Heartbeat)(nil), - (*WorkerMessage_TaskRequest)(nil), - (*WorkerMessage_TaskUpdate)(nil), - (*WorkerMessage_TaskComplete)(nil), - (*WorkerMessage_Shutdown)(nil), - (*WorkerMessage_TaskLogResponse)(nil), - } - file_worker_proto_msgTypes[1].OneofWrappers = []any{ - (*AdminMessage_RegistrationResponse)(nil), - (*AdminMessage_HeartbeatResponse)(nil), - (*AdminMessage_TaskAssignment)(nil), - (*AdminMessage_TaskCancellation)(nil), - (*AdminMessage_AdminShutdown)(nil), - (*AdminMessage_TaskLogRequest)(nil), - } - file_worker_proto_msgTypes[8].OneofWrappers = []any{ - (*TaskParams_VacuumParams)(nil), - (*TaskParams_ErasureCodingParams)(nil), - (*TaskParams_BalanceParams)(nil), - (*TaskParams_ReplicationParams)(nil), - } - file_worker_proto_msgTypes[26].OneofWrappers = []any{ - (*TaskPolicy_VacuumConfig)(nil), - (*TaskPolicy_ErasureCodingConfig)(nil), - (*TaskPolicy_BalanceConfig)(nil), - (*TaskPolicy_ReplicationConfig)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_worker_proto_rawDesc), len(file_worker_proto_rawDesc)), - NumEnums: 0, - NumMessages: 45, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_worker_proto_goTypes, - DependencyIndexes: file_worker_proto_depIdxs, - MessageInfos: file_worker_proto_msgTypes, - }.Build() - File_worker_proto = out.File - file_worker_proto_goTypes = nil - file_worker_proto_depIdxs = nil -} diff --git a/weed/pb/worker_pb/worker_grpc.pb.go b/weed/pb/worker_pb/worker_grpc.pb.go deleted file mode 100644 index 85bad96f4..000000000 --- a/weed/pb/worker_pb/worker_grpc.pb.go +++ /dev/null @@ -1,121 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 -// source: worker.proto - -package worker_pb - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - WorkerService_WorkerStream_FullMethodName = "/worker_pb.WorkerService/WorkerStream" -) - -// WorkerServiceClient is the client API for WorkerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// WorkerService provides bidirectional communication between admin and worker -type WorkerServiceClient interface { - // WorkerStream maintains a bidirectional stream for worker communication - WorkerStream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[WorkerMessage, AdminMessage], error) -} - -type workerServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewWorkerServiceClient(cc grpc.ClientConnInterface) WorkerServiceClient { - return &workerServiceClient{cc} -} - -func (c *workerServiceClient) WorkerStream(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[WorkerMessage, AdminMessage], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], WorkerService_WorkerStream_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &grpc.GenericClientStream[WorkerMessage, AdminMessage]{ClientStream: stream} - return x, nil -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type WorkerService_WorkerStreamClient = grpc.BidiStreamingClient[WorkerMessage, AdminMessage] - -// WorkerServiceServer is the server API for WorkerService service. -// All implementations must embed UnimplementedWorkerServiceServer -// for forward compatibility. -// -// WorkerService provides bidirectional communication between admin and worker -type WorkerServiceServer interface { - // WorkerStream maintains a bidirectional stream for worker communication - WorkerStream(grpc.BidiStreamingServer[WorkerMessage, AdminMessage]) error - mustEmbedUnimplementedWorkerServiceServer() -} - -// UnimplementedWorkerServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedWorkerServiceServer struct{} - -func (UnimplementedWorkerServiceServer) WorkerStream(grpc.BidiStreamingServer[WorkerMessage, AdminMessage]) error { - return status.Errorf(codes.Unimplemented, "method WorkerStream not implemented") -} -func (UnimplementedWorkerServiceServer) mustEmbedUnimplementedWorkerServiceServer() {} -func (UnimplementedWorkerServiceServer) testEmbeddedByValue() {} - -// UnsafeWorkerServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to WorkerServiceServer will -// result in compilation errors. -type UnsafeWorkerServiceServer interface { - mustEmbedUnimplementedWorkerServiceServer() -} - -func RegisterWorkerServiceServer(s grpc.ServiceRegistrar, srv WorkerServiceServer) { - // If the following call pancis, it indicates UnimplementedWorkerServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&WorkerService_ServiceDesc, srv) -} - -func _WorkerService_WorkerStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServiceServer).WorkerStream(&grpc.GenericServerStream[WorkerMessage, AdminMessage]{ServerStream: stream}) -} - -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type WorkerService_WorkerStreamServer = grpc.BidiStreamingServer[WorkerMessage, AdminMessage] - -// WorkerService_ServiceDesc is the grpc.ServiceDesc for WorkerService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var WorkerService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "worker_pb.WorkerService", - HandlerType: (*WorkerServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "WorkerStream", - Handler: _WorkerService_WorkerStream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "worker.proto", -} diff --git a/weed/query/engine/aggregations.go b/weed/query/engine/aggregations.go deleted file mode 100644 index 6b58517e1..000000000 --- a/weed/query/engine/aggregations.go +++ /dev/null @@ -1,933 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "math" - "strconv" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" -) - -// AggregationSpec defines an aggregation function to be computed -type AggregationSpec struct { - Function string // COUNT, SUM, AVG, MIN, MAX - Column string // Column name, or "*" for COUNT(*) - Alias string // Optional alias for the result column - Distinct bool // Support for DISTINCT keyword -} - -// AggregationResult holds the computed result of an aggregation -type AggregationResult struct { - Count int64 - Sum float64 - Min interface{} - Max interface{} -} - -// AggregationStrategy represents the strategy for executing aggregations -type AggregationStrategy struct { - CanUseFastPath bool - Reason string - UnsupportedSpecs []AggregationSpec -} - -// TopicDataSources represents the data sources available for a topic -type TopicDataSources struct { - ParquetFiles map[string][]*ParquetFileStats // partitionPath -> parquet file stats - ParquetRowCount int64 - LiveLogRowCount int64 - LiveLogFilesCount int // Total count of live log files across all partitions - PartitionsCount int - BrokerUnflushedCount int64 -} - -// FastPathOptimizer handles fast path aggregation optimization decisions -type FastPathOptimizer struct { - engine *SQLEngine -} - -// NewFastPathOptimizer creates a new fast path optimizer -func NewFastPathOptimizer(engine *SQLEngine) *FastPathOptimizer { - return &FastPathOptimizer{engine: engine} -} - -// DetermineStrategy analyzes aggregations and determines if fast path can be used -func (opt *FastPathOptimizer) DetermineStrategy(aggregations []AggregationSpec) AggregationStrategy { - strategy := AggregationStrategy{ - CanUseFastPath: true, - Reason: "all_aggregations_supported", - UnsupportedSpecs: []AggregationSpec{}, - } - - for _, spec := range aggregations { - if !opt.engine.canUseParquetStatsForAggregation(spec) { - strategy.CanUseFastPath = false - strategy.Reason = "unsupported_aggregation_functions" - strategy.UnsupportedSpecs = append(strategy.UnsupportedSpecs, spec) - } - } - - return strategy -} - -// CollectDataSources gathers information about available data sources for a topic -func (opt *FastPathOptimizer) CollectDataSources(ctx context.Context, hybridScanner *HybridMessageScanner) (*TopicDataSources, error) { - return opt.CollectDataSourcesWithTimeFilter(ctx, hybridScanner, 0, 0) -} - -// CollectDataSourcesWithTimeFilter gathers information about available data sources for a topic -// with optional time filtering to skip irrelevant parquet files -func (opt *FastPathOptimizer) CollectDataSourcesWithTimeFilter(ctx context.Context, hybridScanner *HybridMessageScanner, startTimeNs, stopTimeNs int64) (*TopicDataSources, error) { - dataSources := &TopicDataSources{ - ParquetFiles: make(map[string][]*ParquetFileStats), - ParquetRowCount: 0, - LiveLogRowCount: 0, - LiveLogFilesCount: 0, - PartitionsCount: 0, - } - - if isDebugMode(ctx) { - fmt.Printf("Collecting data sources for: %s/%s\n", hybridScanner.topic.Namespace, hybridScanner.topic.Name) - } - - // Discover partitions for the topic - partitionPaths, err := opt.engine.discoverTopicPartitions(hybridScanner.topic.Namespace, hybridScanner.topic.Name) - if err != nil { - if isDebugMode(ctx) { - fmt.Printf("ERROR: Partition discovery failed: %v\n", err) - } - return dataSources, DataSourceError{ - Source: "partition_discovery", - Cause: err, - } - } - - // DEBUG: Log discovered partitions - if isDebugMode(ctx) { - fmt.Printf("Discovered %d partitions: %v\n", len(partitionPaths), partitionPaths) - } - - // Collect stats from each partition - // Note: discoverTopicPartitions always returns absolute paths starting with "/topics/" - for _, partitionPath := range partitionPaths { - if isDebugMode(ctx) { - fmt.Printf("\nProcessing partition: %s\n", partitionPath) - } - - // Read parquet file statistics - parquetStats, err := hybridScanner.ReadParquetStatistics(partitionPath) - if err != nil { - if isDebugMode(ctx) { - fmt.Printf(" ERROR: Failed to read parquet statistics: %v\n", err) - } - } else if len(parquetStats) == 0 { - if isDebugMode(ctx) { - fmt.Printf(" No parquet files found in partition\n") - } - } else { - // Prune by time range using parquet column statistics - filtered := pruneParquetFilesByTime(ctx, parquetStats, hybridScanner, startTimeNs, stopTimeNs) - dataSources.ParquetFiles[partitionPath] = filtered - partitionParquetRows := int64(0) - for _, stat := range filtered { - partitionParquetRows += stat.RowCount - dataSources.ParquetRowCount += stat.RowCount - } - if isDebugMode(ctx) { - fmt.Printf(" Found %d parquet files with %d total rows\n", len(filtered), partitionParquetRows) - } - } - - // Count live log files (excluding those converted to parquet) - parquetSources := opt.engine.extractParquetSourceFiles(dataSources.ParquetFiles[partitionPath]) - liveLogCount, liveLogErr := opt.engine.countLiveLogRowsExcludingParquetSources(ctx, partitionPath, parquetSources) - if liveLogErr != nil { - if isDebugMode(ctx) { - fmt.Printf(" ERROR: Failed to count live log rows: %v\n", liveLogErr) - } - } else { - dataSources.LiveLogRowCount += liveLogCount - if isDebugMode(ctx) { - fmt.Printf(" Found %d live log rows (excluding %d parquet sources)\n", liveLogCount, len(parquetSources)) - } - } - - // Count live log files for partition with proper range values - // Extract partition name from absolute path (e.g., "0000-2520" from "/topics/.../v2025.../0000-2520") - partitionName := partitionPath[strings.LastIndex(partitionPath, "/")+1:] - partitionParts := strings.Split(partitionName, "-") - if len(partitionParts) == 2 { - rangeStart, err1 := strconv.Atoi(partitionParts[0]) - rangeStop, err2 := strconv.Atoi(partitionParts[1]) - if err1 == nil && err2 == nil { - partition := topic.Partition{ - RangeStart: int32(rangeStart), - RangeStop: int32(rangeStop), - } - liveLogFileCount, err := hybridScanner.countLiveLogFiles(partition) - if err == nil { - dataSources.LiveLogFilesCount += liveLogFileCount - } - - // Count broker unflushed messages for this partition - if hybridScanner.brokerClient != nil { - entries, err := hybridScanner.brokerClient.GetUnflushedMessages(ctx, hybridScanner.topic.Namespace, hybridScanner.topic.Name, partition, 0) - if err == nil { - dataSources.BrokerUnflushedCount += int64(len(entries)) - if isDebugMode(ctx) { - fmt.Printf(" Found %d unflushed broker messages\n", len(entries)) - } - } else if isDebugMode(ctx) { - fmt.Printf(" ERROR: Failed to get unflushed broker messages: %v\n", err) - } - } - } - } - } - - dataSources.PartitionsCount = len(partitionPaths) - - if isDebugMode(ctx) { - fmt.Printf("Data sources collected: %d partitions, %d parquet rows, %d live log rows, %d broker buffer rows\n", - dataSources.PartitionsCount, dataSources.ParquetRowCount, dataSources.LiveLogRowCount, dataSources.BrokerUnflushedCount) - } - - return dataSources, nil -} - -// AggregationComputer handles the computation of aggregations using fast path -type AggregationComputer struct { - engine *SQLEngine -} - -// NewAggregationComputer creates a new aggregation computer -func NewAggregationComputer(engine *SQLEngine) *AggregationComputer { - return &AggregationComputer{engine: engine} -} - -// ComputeFastPathAggregations computes aggregations using parquet statistics and live log data -func (comp *AggregationComputer) ComputeFastPathAggregations( - ctx context.Context, - aggregations []AggregationSpec, - dataSources *TopicDataSources, - partitions []string, -) ([]AggregationResult, error) { - - aggResults := make([]AggregationResult, len(aggregations)) - - for i, spec := range aggregations { - switch spec.Function { - case FuncCOUNT: - if spec.Column == "*" { - aggResults[i].Count = dataSources.ParquetRowCount + dataSources.LiveLogRowCount + dataSources.BrokerUnflushedCount - } else { - // For specific columns, we might need to account for NULLs in the future - aggResults[i].Count = dataSources.ParquetRowCount + dataSources.LiveLogRowCount + dataSources.BrokerUnflushedCount - } - - case FuncMIN: - globalMin, err := comp.computeGlobalMin(spec, dataSources, partitions) - if err != nil { - return nil, AggregationError{ - Operation: spec.Function, - Column: spec.Column, - Cause: err, - } - } - aggResults[i].Min = globalMin - - case FuncMAX: - globalMax, err := comp.computeGlobalMax(spec, dataSources, partitions) - if err != nil { - return nil, AggregationError{ - Operation: spec.Function, - Column: spec.Column, - Cause: err, - } - } - aggResults[i].Max = globalMax - - default: - return nil, OptimizationError{ - Strategy: "fast_path_aggregation", - Reason: fmt.Sprintf("unsupported aggregation function: %s", spec.Function), - } - } - } - - return aggResults, nil -} - -// computeGlobalMin computes the global minimum value across all data sources -func (comp *AggregationComputer) computeGlobalMin(spec AggregationSpec, dataSources *TopicDataSources, partitions []string) (interface{}, error) { - var globalMin interface{} - var globalMinValue *schema_pb.Value - hasParquetStats := false - - // Step 1: Get minimum from parquet statistics - for _, fileStats := range dataSources.ParquetFiles { - for _, fileStat := range fileStats { - // Try case-insensitive column lookup - var colStats *ParquetColumnStats - var found bool - - // First try exact match - if stats, exists := fileStat.ColumnStats[spec.Column]; exists { - colStats = stats - found = true - } else { - // Try case-insensitive lookup - for colName, stats := range fileStat.ColumnStats { - if strings.EqualFold(colName, spec.Column) { - colStats = stats - found = true - break - } - } - } - - if found && colStats != nil && colStats.MinValue != nil { - if globalMinValue == nil || comp.engine.compareValues(colStats.MinValue, globalMinValue) < 0 { - globalMinValue = colStats.MinValue - extractedValue := comp.engine.extractRawValue(colStats.MinValue) - if extractedValue != nil { - globalMin = extractedValue - hasParquetStats = true - } - } - } - } - } - - // Step 2: Get minimum from live log data (only if no live logs or if we need to compare) - if dataSources.LiveLogRowCount > 0 { - for _, partition := range partitions { - partitionParquetSources := make(map[string]bool) - if partitionFileStats, exists := dataSources.ParquetFiles[partition]; exists { - partitionParquetSources = comp.engine.extractParquetSourceFiles(partitionFileStats) - } - - liveLogMin, _, err := comp.engine.computeLiveLogMinMax(partition, spec.Column, partitionParquetSources) - if err != nil { - continue // Skip partitions with errors - } - - if liveLogMin != nil { - if globalMin == nil { - globalMin = liveLogMin - } else { - liveLogSchemaValue := comp.engine.convertRawValueToSchemaValue(liveLogMin) - if liveLogSchemaValue != nil && comp.engine.compareValues(liveLogSchemaValue, globalMinValue) < 0 { - globalMin = liveLogMin - globalMinValue = liveLogSchemaValue - } - } - } - } - } - - // Step 3: Handle system columns if no regular data found - if globalMin == nil && !hasParquetStats { - globalMin = comp.engine.getSystemColumnGlobalMin(spec.Column, dataSources.ParquetFiles) - } - - return globalMin, nil -} - -// computeGlobalMax computes the global maximum value across all data sources -func (comp *AggregationComputer) computeGlobalMax(spec AggregationSpec, dataSources *TopicDataSources, partitions []string) (interface{}, error) { - var globalMax interface{} - var globalMaxValue *schema_pb.Value - hasParquetStats := false - - // Step 1: Get maximum from parquet statistics - for _, fileStats := range dataSources.ParquetFiles { - for _, fileStat := range fileStats { - // Try case-insensitive column lookup - var colStats *ParquetColumnStats - var found bool - - // First try exact match - if stats, exists := fileStat.ColumnStats[spec.Column]; exists { - colStats = stats - found = true - } else { - // Try case-insensitive lookup - for colName, stats := range fileStat.ColumnStats { - if strings.EqualFold(colName, spec.Column) { - colStats = stats - found = true - break - } - } - } - - if found && colStats != nil && colStats.MaxValue != nil { - if globalMaxValue == nil || comp.engine.compareValues(colStats.MaxValue, globalMaxValue) > 0 { - globalMaxValue = colStats.MaxValue - extractedValue := comp.engine.extractRawValue(colStats.MaxValue) - if extractedValue != nil { - globalMax = extractedValue - hasParquetStats = true - } - } - } - } - } - - // Step 2: Get maximum from live log data (only if live logs exist) - if dataSources.LiveLogRowCount > 0 { - for _, partition := range partitions { - partitionParquetSources := make(map[string]bool) - if partitionFileStats, exists := dataSources.ParquetFiles[partition]; exists { - partitionParquetSources = comp.engine.extractParquetSourceFiles(partitionFileStats) - } - - _, liveLogMax, err := comp.engine.computeLiveLogMinMax(partition, spec.Column, partitionParquetSources) - if err != nil { - continue // Skip partitions with errors - } - - if liveLogMax != nil { - if globalMax == nil { - globalMax = liveLogMax - } else { - liveLogSchemaValue := comp.engine.convertRawValueToSchemaValue(liveLogMax) - if liveLogSchemaValue != nil && comp.engine.compareValues(liveLogSchemaValue, globalMaxValue) > 0 { - globalMax = liveLogMax - globalMaxValue = liveLogSchemaValue - } - } - } - } - } - - // Step 3: Handle system columns if no regular data found - if globalMax == nil && !hasParquetStats { - globalMax = comp.engine.getSystemColumnGlobalMax(spec.Column, dataSources.ParquetFiles) - } - - return globalMax, nil -} - -// executeAggregationQuery handles SELECT queries with aggregation functions -func (e *SQLEngine) executeAggregationQuery(ctx context.Context, hybridScanner *HybridMessageScanner, aggregations []AggregationSpec, stmt *SelectStatement) (*QueryResult, error) { - return e.executeAggregationQueryWithPlan(ctx, hybridScanner, aggregations, stmt, nil) -} - -// executeAggregationQueryWithPlan handles SELECT queries with aggregation functions and populates execution plan -func (e *SQLEngine) executeAggregationQueryWithPlan(ctx context.Context, hybridScanner *HybridMessageScanner, aggregations []AggregationSpec, stmt *SelectStatement, plan *QueryExecutionPlan) (*QueryResult, error) { - // Parse LIMIT and OFFSET for aggregation results (do this first) - // Use -1 to distinguish "no LIMIT" from "LIMIT 0" - limit := -1 - offset := 0 - if stmt.Limit != nil && stmt.Limit.Rowcount != nil { - if limitExpr, ok := stmt.Limit.Rowcount.(*SQLVal); ok && limitExpr.Type == IntVal { - if limit64, err := strconv.ParseInt(string(limitExpr.Val), 10, 64); err == nil { - if limit64 > int64(math.MaxInt) || limit64 < 0 { - return nil, fmt.Errorf("LIMIT value %d is out of range", limit64) - } - // Safe conversion after bounds check - limit = int(limit64) - } - } - } - if stmt.Limit != nil && stmt.Limit.Offset != nil { - if offsetExpr, ok := stmt.Limit.Offset.(*SQLVal); ok && offsetExpr.Type == IntVal { - if offset64, err := strconv.ParseInt(string(offsetExpr.Val), 10, 64); err == nil { - if offset64 > int64(math.MaxInt) || offset64 < 0 { - return nil, fmt.Errorf("OFFSET value %d is out of range", offset64) - } - // Safe conversion after bounds check - offset = int(offset64) - } - } - } - - // Parse WHERE clause for filtering - var predicate func(*schema_pb.RecordValue) bool - var err error - if stmt.Where != nil { - predicate, err = e.buildPredicate(stmt.Where.Expr) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Extract time filters and validate that WHERE clause contains only time-based predicates - startTimeNs, stopTimeNs := int64(0), int64(0) - onlyTimePredicates := true - if stmt.Where != nil { - startTimeNs, stopTimeNs, onlyTimePredicates = e.extractTimeFiltersWithValidation(stmt.Where.Expr) - } - - // FAST PATH WITH TIME-BASED OPTIMIZATION: - // Allow fast path only for queries without WHERE clause or with time-only WHERE clauses - // This prevents incorrect results when non-time predicates are present - canAttemptFastPath := stmt.Where == nil || onlyTimePredicates - - if canAttemptFastPath { - if isDebugMode(ctx) { - if stmt.Where == nil { - fmt.Printf("\nFast path optimization attempt (no WHERE clause)...\n") - } else { - fmt.Printf("\nFast path optimization attempt (time-only WHERE clause)...\n") - } - } - fastResult, canOptimize := e.tryFastParquetAggregationWithPlan(ctx, hybridScanner, aggregations, plan, startTimeNs, stopTimeNs, stmt) - if canOptimize { - if isDebugMode(ctx) { - fmt.Printf("Fast path optimization succeeded!\n") - } - return fastResult, nil - } else { - if isDebugMode(ctx) { - fmt.Printf("Fast path optimization failed, falling back to slow path\n") - } - } - } else { - if isDebugMode(ctx) { - fmt.Printf("Fast path not applicable due to complex WHERE clause\n") - } - } - - // SLOW PATH: Fall back to full table scan - if isDebugMode(ctx) { - fmt.Printf("Using full table scan for aggregation (parquet optimization not applicable)\n") - } - - // Extract columns needed for aggregations - columnsNeeded := make(map[string]bool) - for _, spec := range aggregations { - if spec.Column != "*" { - columnsNeeded[spec.Column] = true - } - } - - // Convert to slice - var scanColumns []string - if len(columnsNeeded) > 0 { - scanColumns = make([]string, 0, len(columnsNeeded)) - for col := range columnsNeeded { - scanColumns = append(scanColumns, col) - } - } - // If no specific columns needed (COUNT(*) only), don't specify columns (scan all) - - // Build scan options for full table scan (aggregations need all data during scanning) - hybridScanOptions := HybridScanOptions{ - StartTimeNs: startTimeNs, - StopTimeNs: stopTimeNs, - Limit: -1, // Use -1 to mean "no limit" - need all data for aggregation - Offset: 0, // No offset during scanning - OFFSET applies to final results - Predicate: predicate, - Columns: scanColumns, // Include columns needed for aggregation functions - } - - // DEBUG: Log scan options for aggregation - debugHybridScanOptions(ctx, hybridScanOptions, "AGGREGATION") - - // Execute the hybrid scan to get all matching records - var results []HybridScanResult - if plan != nil { - // EXPLAIN mode - capture broker buffer stats - var stats *HybridScanStats - results, stats, err = hybridScanner.ScanWithStats(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Populate plan with broker buffer information - if stats != nil { - plan.BrokerBufferQueried = stats.BrokerBufferQueried - plan.BrokerBufferMessages = stats.BrokerBufferMessages - plan.BufferStartIndex = stats.BufferStartIndex - - // Add broker_buffer to data sources if buffer was queried - if stats.BrokerBufferQueried { - // Check if broker_buffer is already in data sources - hasBrokerBuffer := false - for _, source := range plan.DataSources { - if source == "broker_buffer" { - hasBrokerBuffer = true - break - } - } - if !hasBrokerBuffer { - plan.DataSources = append(plan.DataSources, "broker_buffer") - } - } - } - } else { - // Normal mode - just get results - results, err = hybridScanner.Scan(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // DEBUG: Log scan results - if isDebugMode(ctx) { - fmt.Printf("AGGREGATION SCAN RESULTS: %d rows returned\n", len(results)) - } - - // Compute aggregations - aggResults := e.computeAggregations(results, aggregations) - - // Build result set - columns := make([]string, len(aggregations)) - row := make([]sqltypes.Value, len(aggregations)) - - for i, spec := range aggregations { - columns[i] = spec.Alias - row[i] = e.formatAggregationResult(spec, aggResults[i]) - } - - // Apply OFFSET and LIMIT to aggregation results - // Limit semantics: -1 = no limit, 0 = LIMIT 0 (empty), >0 = limit to N rows - rows := [][]sqltypes.Value{row} - if offset > 0 || limit >= 0 { - // Handle LIMIT 0 first - if limit == 0 { - rows = [][]sqltypes.Value{} - } else { - // Apply OFFSET first - if offset > 0 { - if offset >= len(rows) { - rows = [][]sqltypes.Value{} - } else { - rows = rows[offset:] - } - } - - // Apply LIMIT after OFFSET (only if limit > 0) - if limit > 0 && len(rows) > limit { - rows = rows[:limit] - } - } - } - - result := &QueryResult{ - Columns: columns, - Rows: rows, - } - - // Build execution tree for aggregation queries if plan is provided - if plan != nil { - // Populate detailed plan information for full scan (similar to fast path) - e.populateFullScanPlanDetails(ctx, plan, hybridScanner, stmt) - plan.RootNode = e.buildExecutionTree(plan, stmt) - } - - return result, nil -} - -// populateFullScanPlanDetails populates detailed plan information for full scan queries -// This provides consistency with fast path execution plan details -func (e *SQLEngine) populateFullScanPlanDetails(ctx context.Context, plan *QueryExecutionPlan, hybridScanner *HybridMessageScanner, stmt *SelectStatement) { - // plan.Details is initialized at the start of the SELECT execution - - // Extract table information - var database, tableName string - if len(stmt.From) == 1 { - if table, ok := stmt.From[0].(*AliasedTableExpr); ok { - if tableExpr, ok := table.Expr.(TableName); ok { - tableName = tableExpr.Name.String() - if tableExpr.Qualifier != nil && tableExpr.Qualifier.String() != "" { - database = tableExpr.Qualifier.String() - } - } - } - } - - // Use current database if not specified - if database == "" { - database = e.catalog.currentDatabase - if database == "" { - database = "default" - } - } - - // Discover partitions and populate file details - if partitions, discoverErr := e.discoverTopicPartitions(database, tableName); discoverErr == nil { - // Add partition paths to execution plan details - plan.Details["partition_paths"] = partitions - - // Populate detailed file information using shared helper - e.populatePlanFileDetails(ctx, plan, hybridScanner, partitions, stmt) - } else { - // Record discovery error to plan for better diagnostics - plan.Details["error_partition_discovery"] = discoverErr.Error() - } -} - -// tryFastParquetAggregation attempts to compute aggregations using hybrid approach: -// - Use parquet metadata for parquet files -// - Count live log files for live data -// - Combine both for accurate results per partition -// Returns (result, canOptimize) where canOptimize=true means the hybrid fast path was used -func (e *SQLEngine) tryFastParquetAggregation(ctx context.Context, hybridScanner *HybridMessageScanner, aggregations []AggregationSpec) (*QueryResult, bool) { - return e.tryFastParquetAggregationWithPlan(ctx, hybridScanner, aggregations, nil, 0, 0, nil) -} - -// tryFastParquetAggregationWithPlan is the same as tryFastParquetAggregation but also populates execution plan if provided -// startTimeNs, stopTimeNs: optional time range filters for parquet file optimization (0 means no filtering) -// stmt: SELECT statement for column statistics pruning optimization (can be nil) -func (e *SQLEngine) tryFastParquetAggregationWithPlan(ctx context.Context, hybridScanner *HybridMessageScanner, aggregations []AggregationSpec, plan *QueryExecutionPlan, startTimeNs, stopTimeNs int64, stmt *SelectStatement) (*QueryResult, bool) { - // Use the new modular components - optimizer := NewFastPathOptimizer(e) - computer := NewAggregationComputer(e) - - // Step 1: Determine strategy - strategy := optimizer.DetermineStrategy(aggregations) - if !strategy.CanUseFastPath { - return nil, false - } - - // Step 2: Collect data sources with time filtering for parquet file optimization - dataSources, err := optimizer.CollectDataSourcesWithTimeFilter(ctx, hybridScanner, startTimeNs, stopTimeNs) - if err != nil { - return nil, false - } - - // Build partition list for aggregation computer - // Note: discoverTopicPartitions always returns absolute paths - partitions, err := e.discoverTopicPartitions(hybridScanner.topic.Namespace, hybridScanner.topic.Name) - if err != nil { - return nil, false - } - - // Debug: Show the hybrid optimization results (only in explain mode) - if isDebugMode(ctx) && (dataSources.ParquetRowCount > 0 || dataSources.LiveLogRowCount > 0 || dataSources.BrokerUnflushedCount > 0) { - partitionsWithLiveLogs := 0 - if dataSources.LiveLogRowCount > 0 || dataSources.BrokerUnflushedCount > 0 { - partitionsWithLiveLogs = 1 // Simplified for now - } - fmt.Printf("Hybrid fast aggregation with deduplication: %d parquet rows + %d deduplicated live log rows + %d broker buffer rows from %d partitions\n", - dataSources.ParquetRowCount, dataSources.LiveLogRowCount, dataSources.BrokerUnflushedCount, partitionsWithLiveLogs) - } - - // Step 3: Compute aggregations using fast path - aggResults, err := computer.ComputeFastPathAggregations(ctx, aggregations, dataSources, partitions) - if err != nil { - return nil, false - } - - // Step 3.5: Validate fast path results (safety check) - // For simple COUNT(*) queries, ensure we got a reasonable result - if len(aggregations) == 1 && aggregations[0].Function == FuncCOUNT && aggregations[0].Column == "*" { - totalRows := dataSources.ParquetRowCount + dataSources.LiveLogRowCount + dataSources.BrokerUnflushedCount - countResult := aggResults[0].Count - - if isDebugMode(ctx) { - fmt.Printf("Validating fast path: COUNT=%d, Sources=%d\n", countResult, totalRows) - } - - if totalRows == 0 && countResult > 0 { - // Fast path found data but data sources show 0 - this suggests a bug - if isDebugMode(ctx) { - fmt.Printf("Fast path validation failed: COUNT=%d but sources=0\n", countResult) - } - return nil, false - } - if totalRows > 0 && countResult == 0 { - // Data sources show data but COUNT is 0 - this also suggests a bug - if isDebugMode(ctx) { - fmt.Printf("Fast path validation failed: sources=%d but COUNT=0\n", totalRows) - } - return nil, false - } - if countResult != totalRows { - // Counts don't match - this suggests inconsistent logic - if isDebugMode(ctx) { - fmt.Printf("Fast path validation failed: COUNT=%d != sources=%d\n", countResult, totalRows) - } - return nil, false - } - if isDebugMode(ctx) { - fmt.Printf("Fast path validation passed: COUNT=%d\n", countResult) - } - } - - // Step 4: Populate execution plan if provided (for EXPLAIN queries) - if plan != nil { - strategy := optimizer.DetermineStrategy(aggregations) - builder := &ExecutionPlanBuilder{} - - // Create a minimal SELECT statement for the plan builder (avoid nil pointer) - stmt := &SelectStatement{} - - // Build aggregation plan with fast path strategy - aggPlan := builder.BuildAggregationPlan(stmt, aggregations, strategy, dataSources) - - // Copy relevant fields to the main plan - plan.ExecutionStrategy = aggPlan.ExecutionStrategy - plan.DataSources = aggPlan.DataSources - plan.OptimizationsUsed = aggPlan.OptimizationsUsed - plan.PartitionsScanned = aggPlan.PartitionsScanned - plan.ParquetFilesScanned = aggPlan.ParquetFilesScanned - plan.LiveLogFilesScanned = aggPlan.LiveLogFilesScanned - plan.TotalRowsProcessed = aggPlan.TotalRowsProcessed - plan.Aggregations = aggPlan.Aggregations - - // Indicate broker buffer participation for EXPLAIN tree rendering - if dataSources.BrokerUnflushedCount > 0 { - plan.BrokerBufferQueried = true - plan.BrokerBufferMessages = int(dataSources.BrokerUnflushedCount) - } - - // Merge details while preserving existing ones - for key, value := range aggPlan.Details { - plan.Details[key] = value - } - - // Add file path information from the data collection - plan.Details["partition_paths"] = partitions - - // Populate detailed file information using shared helper, including time filters for pruning - plan.Details[PlanDetailStartTimeNs] = startTimeNs - plan.Details[PlanDetailStopTimeNs] = stopTimeNs - e.populatePlanFileDetails(ctx, plan, hybridScanner, partitions, stmt) - - // Update counts to match discovered live log files - if liveLogFiles, ok := plan.Details["live_log_files"].([]string); ok { - dataSources.LiveLogFilesCount = len(liveLogFiles) - plan.LiveLogFilesScanned = len(liveLogFiles) - } - - // Ensure PartitionsScanned is set so Statistics section appears - if plan.PartitionsScanned == 0 && len(partitions) > 0 { - plan.PartitionsScanned = len(partitions) - } - - if isDebugMode(ctx) { - fmt.Printf("Populated execution plan with fast path strategy\n") - } - } - - // Step 5: Build final query result - columns := make([]string, len(aggregations)) - row := make([]sqltypes.Value, len(aggregations)) - - for i, spec := range aggregations { - columns[i] = spec.Alias - row[i] = e.formatAggregationResult(spec, aggResults[i]) - } - - result := &QueryResult{ - Columns: columns, - Rows: [][]sqltypes.Value{row}, - } - - return result, true -} - -// computeAggregations computes aggregation results from a full table scan -func (e *SQLEngine) computeAggregations(results []HybridScanResult, aggregations []AggregationSpec) []AggregationResult { - aggResults := make([]AggregationResult, len(aggregations)) - - for i, spec := range aggregations { - switch spec.Function { - case FuncCOUNT: - if spec.Column == "*" { - aggResults[i].Count = int64(len(results)) - } else { - count := int64(0) - for _, result := range results { - if value := e.findColumnValue(result, spec.Column); value != nil && !e.isNullValue(value) { - count++ - } - } - aggResults[i].Count = count - } - - case FuncSUM: - sum := float64(0) - for _, result := range results { - if value := e.findColumnValue(result, spec.Column); value != nil { - if numValue := e.convertToNumber(value); numValue != nil { - sum += *numValue - } - } - } - aggResults[i].Sum = sum - - case FuncAVG: - sum := float64(0) - count := int64(0) - for _, result := range results { - if value := e.findColumnValue(result, spec.Column); value != nil { - if numValue := e.convertToNumber(value); numValue != nil { - sum += *numValue - count++ - } - } - } - if count > 0 { - aggResults[i].Sum = sum / float64(count) // Store average in Sum field - aggResults[i].Count = count - } - - case FuncMIN: - var min interface{} - var minValue *schema_pb.Value - for _, result := range results { - if value := e.findColumnValue(result, spec.Column); value != nil { - if minValue == nil || e.compareValues(value, minValue) < 0 { - minValue = value - min = e.extractRawValue(value) - } - } - } - aggResults[i].Min = min - - case FuncMAX: - var max interface{} - var maxValue *schema_pb.Value - for _, result := range results { - if value := e.findColumnValue(result, spec.Column); value != nil { - if maxValue == nil || e.compareValues(value, maxValue) > 0 { - maxValue = value - max = e.extractRawValue(value) - } - } - } - aggResults[i].Max = max - } - } - - return aggResults -} - -// canUseParquetStatsForAggregation determines if an aggregation can be optimized with parquet stats -func (e *SQLEngine) canUseParquetStatsForAggregation(spec AggregationSpec) bool { - switch spec.Function { - case FuncCOUNT: - return spec.Column == "*" || e.isSystemColumn(spec.Column) || e.isRegularColumn(spec.Column) - case FuncMIN, FuncMAX: - return e.isSystemColumn(spec.Column) || e.isRegularColumn(spec.Column) - case FuncSUM, FuncAVG: - // These require scanning actual values, not just min/max - return false - default: - return false - } -} - -// debugHybridScanOptions logs the exact scan options being used -func debugHybridScanOptions(ctx context.Context, options HybridScanOptions, queryType string) { - if isDebugMode(ctx) { - fmt.Printf("\n=== HYBRID SCAN OPTIONS DEBUG (%s) ===\n", queryType) - fmt.Printf("StartTimeNs: %d\n", options.StartTimeNs) - fmt.Printf("StopTimeNs: %d\n", options.StopTimeNs) - fmt.Printf("Limit: %d\n", options.Limit) - fmt.Printf("Offset: %d\n", options.Offset) - fmt.Printf("Predicate: %v\n", options.Predicate != nil) - fmt.Printf("Columns: %v\n", options.Columns) - fmt.Printf("==========================================\n") - } -} diff --git a/weed/query/engine/alias_timestamp_integration_test.go b/weed/query/engine/alias_timestamp_integration_test.go deleted file mode 100644 index d175d4cf5..000000000 --- a/weed/query/engine/alias_timestamp_integration_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package engine - -import ( - "strconv" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestAliasTimestampIntegration tests that SQL aliases work correctly with timestamp query fixes -func TestAliasTimestampIntegration(t *testing.T) { - engine := NewTestSQLEngine() - - // Use the exact timestamps from the original failing production queries - originalFailingTimestamps := []int64{ - 1756947416566456262, // Original failing query 1 - 1756947416566439304, // Original failing query 2 - 1756913789829292386, // Current data timestamp - } - - t.Run("AliasWithLargeTimestamps", func(t *testing.T) { - for i, timestamp := range originalFailingTimestamps { - t.Run("Timestamp_"+strconv.Itoa(i+1), func(t *testing.T) { - // Create test record - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: int64(1000 + i)}}, - }, - } - - // Test equality with alias (this was the originally failing pattern) - sql := "SELECT _ts_ns AS ts, id FROM test WHERE ts = " + strconv.FormatInt(timestamp, 10) - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse alias equality query for timestamp %d", timestamp) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for large timestamp with alias") - - result := predicate(testRecord) - assert.True(t, result, "Should match exact large timestamp using alias") - - // Test precision - off by 1 nanosecond should not match - sqlOffBy1 := "SELECT _ts_ns AS ts, id FROM test WHERE ts = " + strconv.FormatInt(timestamp+1, 10) - stmt2, err := ParseSQL(sqlOffBy1) - assert.NoError(t, err) - selectStmt2 := stmt2.(*SelectStatement) - predicate2, err := engine.buildPredicateWithContext(selectStmt2.Where.Expr, selectStmt2.SelectExprs) - assert.NoError(t, err) - - result2 := predicate2(testRecord) - assert.False(t, result2, "Should not match timestamp off by 1 nanosecond with alias") - }) - } - }) - - t.Run("AliasWithTimestampRangeQueries", func(t *testing.T) { - timestamp := int64(1756947416566456262) - - testRecords := []*schema_pb.RecordValue{ - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp - 2}}, // Before range - }, - }, - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp}}, // In range - }, - }, - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp + 2}}, // After range - }, - }, - } - - // Test range query with alias - sql := "SELECT _ts_ns AS ts FROM test WHERE ts >= " + - strconv.FormatInt(timestamp-1, 10) + " AND ts <= " + - strconv.FormatInt(timestamp+1, 10) - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse range query with alias") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build range predicate with alias") - - // Test each record - assert.False(t, predicate(testRecords[0]), "Should not match record before range") - assert.True(t, predicate(testRecords[1]), "Should match record in range") - assert.False(t, predicate(testRecords[2]), "Should not match record after range") - }) - - t.Run("AliasWithTimestampPrecisionEdgeCases", func(t *testing.T) { - // Test maximum int64 value - maxInt64 := int64(9223372036854775807) - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: maxInt64}}, - }, - } - - // Test with alias - sql := "SELECT _ts_ns AS ts FROM test WHERE ts = " + strconv.FormatInt(maxInt64, 10) - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse max int64 with alias") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for max int64 with alias") - - result := predicate(testRecord) - assert.True(t, result, "Should handle max int64 value correctly with alias") - - // Test minimum value - minInt64 := int64(-9223372036854775808) - testRecord2 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: minInt64}}, - }, - } - - sql2 := "SELECT _ts_ns AS ts FROM test WHERE ts = " + strconv.FormatInt(minInt64, 10) - stmt2, err := ParseSQL(sql2) - assert.NoError(t, err) - selectStmt2 := stmt2.(*SelectStatement) - predicate2, err := engine.buildPredicateWithContext(selectStmt2.Where.Expr, selectStmt2.SelectExprs) - assert.NoError(t, err) - - result2 := predicate2(testRecord2) - assert.True(t, result2, "Should handle min int64 value correctly with alias") - }) - - t.Run("MultipleAliasesWithTimestamps", func(t *testing.T) { - // Test multiple aliases including timestamps - timestamp1 := int64(1756947416566456262) - timestamp2 := int64(1756913789829292386) - - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp1}}, - "created_at": {Kind: &schema_pb.Value_Int64Value{Int64Value: timestamp2}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - }, - } - - // Use multiple timestamp aliases in WHERE - sql := "SELECT _ts_ns AS event_time, created_at AS created_time, id AS record_id FROM test " + - "WHERE event_time = " + strconv.FormatInt(timestamp1, 10) + - " AND created_time = " + strconv.FormatInt(timestamp2, 10) + - " AND record_id = 12345" - - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse complex query with multiple timestamp aliases") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for multiple timestamp aliases") - - result := predicate(testRecord) - assert.True(t, result, "Should match complex query with multiple timestamp aliases") - }) - - t.Run("CompatibilityWithExistingTimestampFixes", func(t *testing.T) { - // Verify that all the timestamp fixes (precision, scan boundaries, etc.) still work with aliases - largeTimestamp := int64(1756947416566456262) - - // Test all comparison operators with aliases - operators := []struct { - sql string - value int64 - expected bool - }{ - {"ts = " + strconv.FormatInt(largeTimestamp, 10), largeTimestamp, true}, - {"ts = " + strconv.FormatInt(largeTimestamp+1, 10), largeTimestamp, false}, - {"ts > " + strconv.FormatInt(largeTimestamp-1, 10), largeTimestamp, true}, - {"ts > " + strconv.FormatInt(largeTimestamp, 10), largeTimestamp, false}, - {"ts >= " + strconv.FormatInt(largeTimestamp, 10), largeTimestamp, true}, - {"ts >= " + strconv.FormatInt(largeTimestamp+1, 10), largeTimestamp, false}, - {"ts < " + strconv.FormatInt(largeTimestamp+1, 10), largeTimestamp, true}, - {"ts < " + strconv.FormatInt(largeTimestamp, 10), largeTimestamp, false}, - {"ts <= " + strconv.FormatInt(largeTimestamp, 10), largeTimestamp, true}, - {"ts <= " + strconv.FormatInt(largeTimestamp-1, 10), largeTimestamp, false}, - } - - for _, op := range operators { - t.Run(op.sql, func(t *testing.T) { - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: op.value}}, - }, - } - - sql := "SELECT _ts_ns AS ts FROM test WHERE " + op.sql - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse: %s", op.sql) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for: %s", op.sql) - - result := predicate(testRecord) - assert.Equal(t, op.expected, result, "Alias operator test failed for: %s", op.sql) - }) - } - }) - - t.Run("ProductionScenarioReproduction", func(t *testing.T) { - // Reproduce the exact production scenario that was originally failing - - // This was the original failing pattern from the user - originalFailingSQL := "select id, _ts_ns as ts from ecommerce.user_events where ts = 1756913789829292386" - - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756913789829292386}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 82460}}, - }, - } - - stmt, err := ParseSQL(originalFailingSQL) - assert.NoError(t, err, "Should parse the exact originally failing production query") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for original failing query") - - result := predicate(testRecord) - assert.True(t, result, "The originally failing production query should now work perfectly") - - // Also test the other originally failing timestamp - originalFailingSQL2 := "select id, _ts_ns as ts from ecommerce.user_events where ts = 1756947416566456262" - testRecord2 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - stmt2, err := ParseSQL(originalFailingSQL2) - assert.NoError(t, err) - selectStmt2 := stmt2.(*SelectStatement) - predicate2, err := engine.buildPredicateWithContext(selectStmt2.Where.Expr, selectStmt2.SelectExprs) - assert.NoError(t, err) - - result2 := predicate2(testRecord2) - assert.True(t, result2, "The second originally failing production query should now work perfectly") - }) -} diff --git a/weed/query/engine/arithmetic_functions.go b/weed/query/engine/arithmetic_functions.go deleted file mode 100644 index e2237e31b..000000000 --- a/weed/query/engine/arithmetic_functions.go +++ /dev/null @@ -1,218 +0,0 @@ -package engine - -import ( - "fmt" - "math" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// =============================== -// ARITHMETIC OPERATORS -// =============================== - -// ArithmeticOperator represents basic arithmetic operations -type ArithmeticOperator string - -const ( - OpAdd ArithmeticOperator = "+" - OpSub ArithmeticOperator = "-" - OpMul ArithmeticOperator = "*" - OpDiv ArithmeticOperator = "/" - OpMod ArithmeticOperator = "%" -) - -// EvaluateArithmeticExpression evaluates basic arithmetic operations between two values -func (e *SQLEngine) EvaluateArithmeticExpression(left, right *schema_pb.Value, operator ArithmeticOperator) (*schema_pb.Value, error) { - if left == nil || right == nil { - return nil, fmt.Errorf("arithmetic operation requires non-null operands") - } - - // Convert values to numeric types for calculation - leftNum, err := e.valueToFloat64(left) - if err != nil { - return nil, fmt.Errorf("left operand conversion error: %v", err) - } - - rightNum, err := e.valueToFloat64(right) - if err != nil { - return nil, fmt.Errorf("right operand conversion error: %v", err) - } - - var result float64 - var resultErr error - - switch operator { - case OpAdd: - result = leftNum + rightNum - case OpSub: - result = leftNum - rightNum - case OpMul: - result = leftNum * rightNum - case OpDiv: - if rightNum == 0 { - return nil, fmt.Errorf("division by zero") - } - result = leftNum / rightNum - case OpMod: - if rightNum == 0 { - return nil, fmt.Errorf("modulo by zero") - } - result = math.Mod(leftNum, rightNum) - default: - return nil, fmt.Errorf("unsupported arithmetic operator: %s", operator) - } - - if resultErr != nil { - return nil, resultErr - } - - // Convert result back to appropriate schema value type - // If both operands were integers and operation doesn't produce decimal, return integer - if e.isIntegerValue(left) && e.isIntegerValue(right) && - (operator == OpAdd || operator == OpSub || operator == OpMul || operator == OpMod) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(result)}, - }, nil - } - - // Otherwise return as double/float - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: result}, - }, nil -} - -// Add evaluates addition (left + right) -func (e *SQLEngine) Add(left, right *schema_pb.Value) (*schema_pb.Value, error) { - return e.EvaluateArithmeticExpression(left, right, OpAdd) -} - -// Subtract evaluates subtraction (left - right) -func (e *SQLEngine) Subtract(left, right *schema_pb.Value) (*schema_pb.Value, error) { - return e.EvaluateArithmeticExpression(left, right, OpSub) -} - -// Multiply evaluates multiplication (left * right) -func (e *SQLEngine) Multiply(left, right *schema_pb.Value) (*schema_pb.Value, error) { - return e.EvaluateArithmeticExpression(left, right, OpMul) -} - -// Divide evaluates division (left / right) -func (e *SQLEngine) Divide(left, right *schema_pb.Value) (*schema_pb.Value, error) { - return e.EvaluateArithmeticExpression(left, right, OpDiv) -} - -// Modulo evaluates modulo operation (left % right) -func (e *SQLEngine) Modulo(left, right *schema_pb.Value) (*schema_pb.Value, error) { - return e.EvaluateArithmeticExpression(left, right, OpMod) -} - -// =============================== -// MATHEMATICAL FUNCTIONS -// =============================== - -// Round rounds a numeric value to the nearest integer or specified decimal places -func (e *SQLEngine) Round(value *schema_pb.Value, precision ...*schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("ROUND function requires non-null value") - } - - num, err := e.valueToFloat64(value) - if err != nil { - return nil, fmt.Errorf("ROUND function conversion error: %v", err) - } - - // Default precision is 0 (round to integer) - precisionValue := 0 - if len(precision) > 0 && precision[0] != nil { - precFloat, err := e.valueToFloat64(precision[0]) - if err != nil { - return nil, fmt.Errorf("ROUND precision conversion error: %v", err) - } - precisionValue = int(precFloat) - } - - // Apply rounding - multiplier := math.Pow(10, float64(precisionValue)) - rounded := math.Round(num*multiplier) / multiplier - - // Return as integer if precision is 0 and original was integer, otherwise as double - if precisionValue == 0 && e.isIntegerValue(value) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(rounded)}, - }, nil - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: rounded}, - }, nil -} - -// Ceil returns the smallest integer greater than or equal to the value -func (e *SQLEngine) Ceil(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("CEIL function requires non-null value") - } - - num, err := e.valueToFloat64(value) - if err != nil { - return nil, fmt.Errorf("CEIL function conversion error: %v", err) - } - - result := math.Ceil(num) - - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(result)}, - }, nil -} - -// Floor returns the largest integer less than or equal to the value -func (e *SQLEngine) Floor(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("FLOOR function requires non-null value") - } - - num, err := e.valueToFloat64(value) - if err != nil { - return nil, fmt.Errorf("FLOOR function conversion error: %v", err) - } - - result := math.Floor(num) - - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(result)}, - }, nil -} - -// Abs returns the absolute value of a number -func (e *SQLEngine) Abs(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("ABS function requires non-null value") - } - - num, err := e.valueToFloat64(value) - if err != nil { - return nil, fmt.Errorf("ABS function conversion error: %v", err) - } - - result := math.Abs(num) - - // Return same type as input if possible - if e.isIntegerValue(value) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(result)}, - }, nil - } - - // Check if original was float32 - if _, ok := value.Kind.(*schema_pb.Value_FloatValue); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_FloatValue{FloatValue: float32(result)}, - }, nil - } - - // Default to double - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: result}, - }, nil -} diff --git a/weed/query/engine/arithmetic_functions_test.go b/weed/query/engine/arithmetic_functions_test.go deleted file mode 100644 index f07ada54f..000000000 --- a/weed/query/engine/arithmetic_functions_test.go +++ /dev/null @@ -1,530 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestArithmeticOperations(t *testing.T) { - engine := NewTestSQLEngine() - - tests := []struct { - name string - left *schema_pb.Value - right *schema_pb.Value - operator ArithmeticOperator - expected *schema_pb.Value - expectErr bool - }{ - // Addition tests - { - name: "Add two integers", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpAdd, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 15}}, - expectErr: false, - }, - { - name: "Add integer and float", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 5.5}}, - operator: OpAdd, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 15.5}}, - expectErr: false, - }, - // Subtraction tests - { - name: "Subtract two integers", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 3}}, - operator: OpSub, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 7}}, - expectErr: false, - }, - // Multiplication tests - { - name: "Multiply two integers", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 6}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 7}}, - operator: OpMul, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 42}}, - expectErr: false, - }, - { - name: "Multiply with float", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: 2.5}}, - operator: OpMul, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 12.5}}, - expectErr: false, - }, - // Division tests - { - name: "Divide two integers", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 20}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 4}}, - operator: OpDiv, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 5.0}}, - expectErr: false, - }, - { - name: "Division by zero", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 0}}, - operator: OpDiv, - expected: nil, - expectErr: true, - }, - // Modulo tests - { - name: "Modulo operation", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 17}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpMod, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 2}}, - expectErr: false, - }, - { - name: "Modulo by zero", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 0}}, - operator: OpMod, - expected: nil, - expectErr: true, - }, - // String conversion tests - { - name: "Add string number to integer", - left: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "15"}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpAdd, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 20.0}}, - expectErr: false, - }, - { - name: "Invalid string conversion", - left: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "not_a_number"}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpAdd, - expected: nil, - expectErr: true, - }, - // Boolean conversion tests - { - name: "Add boolean to integer", - left: &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: true}}, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpAdd, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 6.0}}, - expectErr: false, - }, - // Null value tests - { - name: "Add with null left operand", - left: nil, - right: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - operator: OpAdd, - expected: nil, - expectErr: true, - }, - { - name: "Add with null right operand", - left: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - right: nil, - operator: OpAdd, - expected: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.EvaluateArithmeticExpression(tt.left, tt.right, tt.operator) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if !valuesEqual(result, tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } -} - -func TestIndividualArithmeticFunctions(t *testing.T) { - engine := NewTestSQLEngine() - - left := &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 10}} - right := &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 3}} - - // Test Add function - result, err := engine.Add(left, right) - if err != nil { - t.Errorf("Add function failed: %v", err) - } - expected := &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 13}} - if !valuesEqual(result, expected) { - t.Errorf("Add: Expected %v, got %v", expected, result) - } - - // Test Subtract function - result, err = engine.Subtract(left, right) - if err != nil { - t.Errorf("Subtract function failed: %v", err) - } - expected = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 7}} - if !valuesEqual(result, expected) { - t.Errorf("Subtract: Expected %v, got %v", expected, result) - } - - // Test Multiply function - result, err = engine.Multiply(left, right) - if err != nil { - t.Errorf("Multiply function failed: %v", err) - } - expected = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 30}} - if !valuesEqual(result, expected) { - t.Errorf("Multiply: Expected %v, got %v", expected, result) - } - - // Test Divide function - result, err = engine.Divide(left, right) - if err != nil { - t.Errorf("Divide function failed: %v", err) - } - expected = &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 10.0 / 3.0}} - if !valuesEqual(result, expected) { - t.Errorf("Divide: Expected %v, got %v", expected, result) - } - - // Test Modulo function - result, err = engine.Modulo(left, right) - if err != nil { - t.Errorf("Modulo function failed: %v", err) - } - expected = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1}} - if !valuesEqual(result, expected) { - t.Errorf("Modulo: Expected %v, got %v", expected, result) - } -} - -func TestMathematicalFunctions(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("ROUND function tests", func(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - precision *schema_pb.Value - expected *schema_pb.Value - expectErr bool - }{ - { - name: "Round float to integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.7}}, - precision: nil, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 4.0}}, - expectErr: false, - }, - { - name: "Round integer stays integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - precision: nil, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expectErr: false, - }, - { - name: "Round with precision 2", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.14159}}, - precision: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 2}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.14}}, - expectErr: false, - }, - { - name: "Round negative number", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: -3.7}}, - precision: nil, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: -4.0}}, - expectErr: false, - }, - { - name: "Round null value", - value: nil, - precision: nil, - expected: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var result *schema_pb.Value - var err error - - if tt.precision != nil { - result, err = engine.Round(tt.value, tt.precision) - } else { - result, err = engine.Round(tt.value) - } - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if !valuesEqual(result, tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } - }) - - t.Run("CEIL function tests", func(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected *schema_pb.Value - expectErr bool - }{ - { - name: "Ceil positive decimal", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.2}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 4}}, - expectErr: false, - }, - { - name: "Ceil negative decimal", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: -3.2}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: -3}}, - expectErr: false, - }, - { - name: "Ceil integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expectErr: false, - }, - { - name: "Ceil null value", - value: nil, - expected: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Ceil(tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if !valuesEqual(result, tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } - }) - - t.Run("FLOOR function tests", func(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected *schema_pb.Value - expectErr bool - }{ - { - name: "Floor positive decimal", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.8}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 3}}, - expectErr: false, - }, - { - name: "Floor negative decimal", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: -3.2}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: -4}}, - expectErr: false, - }, - { - name: "Floor integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expectErr: false, - }, - { - name: "Floor null value", - value: nil, - expected: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Floor(tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if !valuesEqual(result, tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } - }) - - t.Run("ABS function tests", func(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected *schema_pb.Value - expectErr bool - }{ - { - name: "Abs positive integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expectErr: false, - }, - { - name: "Abs negative integer", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: -5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - expectErr: false, - }, - { - name: "Abs positive double", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.14}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.14}}, - expectErr: false, - }, - { - name: "Abs negative double", - value: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: -3.14}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: 3.14}}, - expectErr: false, - }, - { - name: "Abs positive float", - value: &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: 2.5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: 2.5}}, - expectErr: false, - }, - { - name: "Abs negative float", - value: &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: -2.5}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: 2.5}}, - expectErr: false, - }, - { - name: "Abs zero", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 0}}, - expected: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 0}}, - expectErr: false, - }, - { - name: "Abs null value", - value: nil, - expected: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Abs(tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if !valuesEqual(result, tt.expected) { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } - }) -} - -// Helper function to compare two schema_pb.Value objects -func valuesEqual(v1, v2 *schema_pb.Value) bool { - if v1 == nil && v2 == nil { - return true - } - if v1 == nil || v2 == nil { - return false - } - - switch v1Kind := v1.Kind.(type) { - case *schema_pb.Value_Int32Value: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_Int32Value); ok { - return v1Kind.Int32Value == v2Kind.Int32Value - } - case *schema_pb.Value_Int64Value: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_Int64Value); ok { - return v1Kind.Int64Value == v2Kind.Int64Value - } - case *schema_pb.Value_FloatValue: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_FloatValue); ok { - return v1Kind.FloatValue == v2Kind.FloatValue - } - case *schema_pb.Value_DoubleValue: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_DoubleValue); ok { - return v1Kind.DoubleValue == v2Kind.DoubleValue - } - case *schema_pb.Value_StringValue: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_StringValue); ok { - return v1Kind.StringValue == v2Kind.StringValue - } - case *schema_pb.Value_BoolValue: - if v2Kind, ok := v2.Kind.(*schema_pb.Value_BoolValue); ok { - return v1Kind.BoolValue == v2Kind.BoolValue - } - } - - return false -} diff --git a/weed/query/engine/arithmetic_only_execution_test.go b/weed/query/engine/arithmetic_only_execution_test.go deleted file mode 100644 index 1b7cdb34f..000000000 --- a/weed/query/engine/arithmetic_only_execution_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -// TestSQLEngine_ArithmeticOnlyQueryExecution tests the specific fix for queries -// that contain ONLY arithmetic expressions (no base columns) in the SELECT clause. -// This was the root issue reported where such queries returned empty values. -func TestSQLEngine_ArithmeticOnlyQueryExecution(t *testing.T) { - engine := NewTestSQLEngine() - - // Test the core functionality: arithmetic-only queries should return data - tests := []struct { - name string - query string - expectedCols []string - mustNotBeEmpty bool - }{ - { - name: "Basic arithmetic only query", - query: "SELECT id+user_id, id*2 FROM user_events LIMIT 3", - expectedCols: []string{"id+user_id", "id*2"}, - mustNotBeEmpty: true, - }, - { - name: "With LIMIT and OFFSET - original user issue", - query: "SELECT id+user_id, id*2 FROM user_events LIMIT 2 OFFSET 1", - expectedCols: []string{"id+user_id", "id*2"}, - mustNotBeEmpty: true, - }, - { - name: "Multiple arithmetic expressions", - query: "SELECT user_id+100, id-1000 FROM user_events LIMIT 1", - expectedCols: []string{"user_id+100", "id-1000"}, - mustNotBeEmpty: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tt.query) - if err != nil { - t.Fatalf("Query failed: %v", err) - } - if result.Error != nil { - t.Fatalf("Query returned error: %v", result.Error) - } - - // CRITICAL: Verify we got results (the original bug would return empty) - if tt.mustNotBeEmpty && len(result.Rows) == 0 { - t.Fatal("CRITICAL BUG: Query returned no rows - arithmetic-only query fix failed!") - } - - // Verify column count and names - if len(result.Columns) != len(tt.expectedCols) { - t.Errorf("Expected %d columns, got %d", len(tt.expectedCols), len(result.Columns)) - } - - // CRITICAL: Verify no empty/null values (the original bug symptom) - if len(result.Rows) > 0 { - firstRow := result.Rows[0] - for i, val := range firstRow { - if val.IsNull() { - t.Errorf("CRITICAL BUG: Column %d (%s) returned NULL", i, result.Columns[i]) - } - if val.ToString() == "" { - t.Errorf("CRITICAL BUG: Column %d (%s) returned empty string", i, result.Columns[i]) - } - } - } - - // Log success - t.Logf("SUCCESS: %s returned %d rows with calculated values", tt.query, len(result.Rows)) - }) - } -} - -// TestSQLEngine_ArithmeticOnlyQueryBugReproduction tests that the original bug -// (returning empty values) would have failed before our fix -func TestSQLEngine_ArithmeticOnlyQueryBugReproduction(t *testing.T) { - engine := NewTestSQLEngine() - - // This is the EXACT query from the user's bug report - query := "SELECT id+user_id, id*amount, id*2 FROM user_events LIMIT 10 OFFSET 5" - - result, err := engine.ExecuteSQL(context.Background(), query) - if err != nil { - t.Fatalf("Query failed: %v", err) - } - if result.Error != nil { - t.Fatalf("Query returned error: %v", result.Error) - } - - // Key assertions that would fail with the original bug: - - // 1. Must return rows (bug would return 0 rows or empty results) - if len(result.Rows) == 0 { - t.Fatal("CRITICAL: Query returned no rows - the original bug is NOT fixed!") - } - - // 2. Must have expected columns - expectedColumns := []string{"id+user_id", "id*amount", "id*2"} - if len(result.Columns) != len(expectedColumns) { - t.Errorf("Expected %d columns, got %d", len(expectedColumns), len(result.Columns)) - } - - // 3. Must have calculated values, not empty/null - for i, row := range result.Rows { - for j, val := range row { - if val.IsNull() { - t.Errorf("Row %d, Column %d (%s) is NULL - original bug not fixed!", - i, j, result.Columns[j]) - } - if val.ToString() == "" { - t.Errorf("Row %d, Column %d (%s) is empty - original bug not fixed!", - i, j, result.Columns[j]) - } - } - } - - // 4. Verify specific calculations for the OFFSET 5 data - if len(result.Rows) > 0 { - firstRow := result.Rows[0] - // With OFFSET 5, first returned row should be 6th row: id=417224, user_id=7810 - expectedSum := "425034" // 417224 + 7810 - if firstRow[0].ToString() != expectedSum { - t.Errorf("OFFSET 5 calculation wrong: expected id+user_id=%s, got %s", - expectedSum, firstRow[0].ToString()) - } - - expectedDouble := "834448" // 417224 * 2 - if firstRow[2].ToString() != expectedDouble { - t.Errorf("OFFSET 5 calculation wrong: expected id*2=%s, got %s", - expectedDouble, firstRow[2].ToString()) - } - } - - t.Logf("SUCCESS: Arithmetic-only query with OFFSET works correctly!") - t.Logf("Query: %s", query) - t.Logf("Returned %d rows with correct calculations", len(result.Rows)) -} diff --git a/weed/query/engine/arithmetic_test.go b/weed/query/engine/arithmetic_test.go deleted file mode 100644 index 4bf8813c6..000000000 --- a/weed/query/engine/arithmetic_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package engine - -import ( - "fmt" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestArithmeticExpressionParsing(t *testing.T) { - tests := []struct { - name string - expression string - expectNil bool - leftCol string - rightCol string - operator string - }{ - { - name: "simple addition", - expression: "id+user_id", - expectNil: false, - leftCol: "id", - rightCol: "user_id", - operator: "+", - }, - { - name: "simple subtraction", - expression: "col1-col2", - expectNil: false, - leftCol: "col1", - rightCol: "col2", - operator: "-", - }, - { - name: "multiplication with spaces", - expression: "a * b", - expectNil: false, - leftCol: "a", - rightCol: "b", - operator: "*", - }, - { - name: "string concatenation", - expression: "first_name||last_name", - expectNil: false, - leftCol: "first_name", - rightCol: "last_name", - operator: "||", - }, - { - name: "string concatenation with spaces", - expression: "prefix || suffix", - expectNil: false, - leftCol: "prefix", - rightCol: "suffix", - operator: "||", - }, - { - name: "not arithmetic", - expression: "simple_column", - expectNil: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Use CockroachDB parser to parse the expression - cockroachParser := NewCockroachSQLParser() - dummySelect := fmt.Sprintf("SELECT %s", tt.expression) - stmt, err := cockroachParser.ParseSQL(dummySelect) - - var result *ArithmeticExpr - if err == nil { - if selectStmt, ok := stmt.(*SelectStatement); ok && len(selectStmt.SelectExprs) > 0 { - if aliasedExpr, ok := selectStmt.SelectExprs[0].(*AliasedExpr); ok { - if arithmeticExpr, ok := aliasedExpr.Expr.(*ArithmeticExpr); ok { - result = arithmeticExpr - } - } - } - } - - if tt.expectNil { - if result != nil { - t.Errorf("Expected nil for %s, got %v", tt.expression, result) - } - return - } - - if result == nil { - t.Errorf("Expected arithmetic expression for %s, got nil", tt.expression) - return - } - - if result.Operator != tt.operator { - t.Errorf("Expected operator %s, got %s", tt.operator, result.Operator) - } - - // Check left operand - if leftCol, ok := result.Left.(*ColName); ok { - if leftCol.Name.String() != tt.leftCol { - t.Errorf("Expected left column %s, got %s", tt.leftCol, leftCol.Name.String()) - } - } else { - t.Errorf("Expected left operand to be ColName, got %T", result.Left) - } - - // Check right operand - if rightCol, ok := result.Right.(*ColName); ok { - if rightCol.Name.String() != tt.rightCol { - t.Errorf("Expected right column %s, got %s", tt.rightCol, rightCol.Name.String()) - } - } else { - t.Errorf("Expected right operand to be ColName, got %T", result.Right) - } - }) - } -} - -func TestArithmeticExpressionEvaluation(t *testing.T) { - engine := NewSQLEngine("") - - // Create test data - result := HybridScanResult{ - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 10}}, - "user_id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}, - "price": {Kind: &schema_pb.Value_DoubleValue{DoubleValue: 25.5}}, - "qty": {Kind: &schema_pb.Value_Int64Value{Int64Value: 3}}, - "first_name": {Kind: &schema_pb.Value_StringValue{StringValue: "John"}}, - "last_name": {Kind: &schema_pb.Value_StringValue{StringValue: "Doe"}}, - "prefix": {Kind: &schema_pb.Value_StringValue{StringValue: "Hello"}}, - "suffix": {Kind: &schema_pb.Value_StringValue{StringValue: "World"}}, - }, - } - - tests := []struct { - name string - expression string - expected interface{} - }{ - { - name: "integer addition", - expression: "id+user_id", - expected: int64(15), - }, - { - name: "integer subtraction", - expression: "id-user_id", - expected: int64(5), - }, - { - name: "mixed types multiplication", - expression: "price*qty", - expected: float64(76.5), - }, - { - name: "string concatenation", - expression: "first_name||last_name", - expected: "JohnDoe", - }, - { - name: "string concatenation with spaces", - expression: "prefix || suffix", - expected: "HelloWorld", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Parse the arithmetic expression using CockroachDB parser - cockroachParser := NewCockroachSQLParser() - dummySelect := fmt.Sprintf("SELECT %s", tt.expression) - stmt, err := cockroachParser.ParseSQL(dummySelect) - if err != nil { - t.Fatalf("Failed to parse expression %s: %v", tt.expression, err) - } - - var arithmeticExpr *ArithmeticExpr - if selectStmt, ok := stmt.(*SelectStatement); ok && len(selectStmt.SelectExprs) > 0 { - if aliasedExpr, ok := selectStmt.SelectExprs[0].(*AliasedExpr); ok { - if arithExpr, ok := aliasedExpr.Expr.(*ArithmeticExpr); ok { - arithmeticExpr = arithExpr - } - } - } - - if arithmeticExpr == nil { - t.Fatalf("Failed to parse arithmetic expression: %s", tt.expression) - } - - // Evaluate the expression - value, err := engine.evaluateArithmeticExpression(arithmeticExpr, result) - if err != nil { - t.Fatalf("Failed to evaluate expression: %v", err) - } - - if value == nil { - t.Fatalf("Got nil value for expression: %s", tt.expression) - } - - // Check the result - switch expected := tt.expected.(type) { - case int64: - if intVal, ok := value.Kind.(*schema_pb.Value_Int64Value); ok { - if intVal.Int64Value != expected { - t.Errorf("Expected %d, got %d", expected, intVal.Int64Value) - } - } else { - t.Errorf("Expected int64 result, got %T", value.Kind) - } - case float64: - if doubleVal, ok := value.Kind.(*schema_pb.Value_DoubleValue); ok { - if doubleVal.DoubleValue != expected { - t.Errorf("Expected %f, got %f", expected, doubleVal.DoubleValue) - } - } else { - t.Errorf("Expected double result, got %T", value.Kind) - } - case string: - if stringVal, ok := value.Kind.(*schema_pb.Value_StringValue); ok { - if stringVal.StringValue != expected { - t.Errorf("Expected %s, got %s", expected, stringVal.StringValue) - } - } else { - t.Errorf("Expected string result, got %T", value.Kind) - } - } - }) - } -} - -func TestSelectArithmeticExpression(t *testing.T) { - // Test parsing a SELECT with arithmetic and string concatenation expressions - stmt, err := ParseSQL("SELECT id+user_id, user_id*2, first_name||last_name FROM test_table") - if err != nil { - t.Fatalf("Failed to parse SQL: %v", err) - } - - selectStmt := stmt.(*SelectStatement) - if len(selectStmt.SelectExprs) != 3 { - t.Fatalf("Expected 3 select expressions, got %d", len(selectStmt.SelectExprs)) - } - - // Check first expression (id+user_id) - aliasedExpr1 := selectStmt.SelectExprs[0].(*AliasedExpr) - if arithmeticExpr1, ok := aliasedExpr1.Expr.(*ArithmeticExpr); ok { - if arithmeticExpr1.Operator != "+" { - t.Errorf("Expected + operator, got %s", arithmeticExpr1.Operator) - } - } else { - t.Errorf("Expected arithmetic expression, got %T", aliasedExpr1.Expr) - } - - // Check second expression (user_id*2) - aliasedExpr2 := selectStmt.SelectExprs[1].(*AliasedExpr) - if arithmeticExpr2, ok := aliasedExpr2.Expr.(*ArithmeticExpr); ok { - if arithmeticExpr2.Operator != "*" { - t.Errorf("Expected * operator, got %s", arithmeticExpr2.Operator) - } - } else { - t.Errorf("Expected arithmetic expression, got %T", aliasedExpr2.Expr) - } - - // Check third expression (first_name||last_name) - aliasedExpr3 := selectStmt.SelectExprs[2].(*AliasedExpr) - if arithmeticExpr3, ok := aliasedExpr3.Expr.(*ArithmeticExpr); ok { - if arithmeticExpr3.Operator != "||" { - t.Errorf("Expected || operator, got %s", arithmeticExpr3.Operator) - } - } else { - t.Errorf("Expected string concatenation expression, got %T", aliasedExpr3.Expr) - } -} diff --git a/weed/query/engine/arithmetic_with_functions_test.go b/weed/query/engine/arithmetic_with_functions_test.go deleted file mode 100644 index 6d0edd8f7..000000000 --- a/weed/query/engine/arithmetic_with_functions_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -// TestArithmeticWithFunctions tests arithmetic operations with function calls -// This validates the complete AST parser and evaluation system for column-level calculations -func TestArithmeticWithFunctions(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - expected string - desc string - }{ - { - name: "Simple function arithmetic", - sql: "SELECT LENGTH('hello') + 10 FROM user_events LIMIT 1", - expected: "15", - desc: "Basic function call with addition", - }, - { - name: "Nested functions with arithmetic", - sql: "SELECT length(trim(' hello world ')) + 12 FROM user_events LIMIT 1", - expected: "23", - desc: "Complex nested functions with arithmetic operation (user's original failing query)", - }, - { - name: "Function subtraction", - sql: "SELECT LENGTH('programming') - 5 FROM user_events LIMIT 1", - expected: "6", - desc: "Function call with subtraction", - }, - { - name: "Function multiplication", - sql: "SELECT LENGTH('test') * 3 FROM user_events LIMIT 1", - expected: "12", - desc: "Function call with multiplication", - }, - { - name: "Multiple nested functions", - sql: "SELECT LENGTH(UPPER(TRIM(' hello '))) FROM user_events LIMIT 1", - expected: "5", - desc: "Triple nested functions", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if err != nil { - t.Errorf("Query failed: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Query result error: %v", result.Error) - return - } - - if len(result.Rows) == 0 { - t.Error("Expected at least one row") - return - } - - actual := result.Rows[0][0].ToString() - - if actual != tc.expected { - t.Errorf("%s: Expected '%s', got '%s'", tc.desc, tc.expected, actual) - } else { - t.Logf("PASS %s: %s โ†’ %s", tc.desc, tc.sql, actual) - } - }) - } -} diff --git a/weed/query/engine/broker_client.go b/weed/query/engine/broker_client.go deleted file mode 100644 index c1b1cab6f..000000000 --- a/weed/query/engine/broker_client.go +++ /dev/null @@ -1,586 +0,0 @@ -package engine - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - jsonpb "google.golang.org/protobuf/encoding/protojson" -) - -// BrokerClient handles communication with SeaweedFS MQ broker -// Implements BrokerClientInterface for production use -// Assumptions: -// 1. Service discovery via master server (discovers filers and brokers) -// 2. gRPC connection with default timeout of 30 seconds -// 3. Topics and namespaces are managed via SeaweedMessaging service -type BrokerClient struct { - masterAddress string - filerAddress string - brokerAddress string - grpcDialOption grpc.DialOption -} - -// NewBrokerClient creates a new MQ broker client -// Uses master HTTP address and converts it to gRPC address for service discovery -func NewBrokerClient(masterHTTPAddress string) *BrokerClient { - // Convert HTTP address to gRPC address using pb.ServerAddress method - httpAddr := pb.ServerAddress(masterHTTPAddress) - masterGRPCAddress := httpAddr.ToGrpcAddress() - - return &BrokerClient{ - masterAddress: masterGRPCAddress, - grpcDialOption: grpc.WithTransportCredentials(insecure.NewCredentials()), - } -} - -// No need for convertHTTPToGRPC - pb.ServerAddress.ToGrpcAddress() already handles this - -// discoverFiler finds a filer from the master server -func (c *BrokerClient) discoverFiler() error { - if c.filerAddress != "" { - return nil // already discovered - } - - conn, err := grpc.NewClient(c.masterAddress, c.grpcDialOption) - if err != nil { - return fmt.Errorf("failed to connect to master at %s: %v", c.masterAddress, err) - } - defer conn.Close() - - client := master_pb.NewSeaweedClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.ListClusterNodes(ctx, &master_pb.ListClusterNodesRequest{ - ClientType: cluster.FilerType, - }) - if err != nil { - return fmt.Errorf("failed to list filers from master: %v", err) - } - - if len(resp.ClusterNodes) == 0 { - return fmt.Errorf("no filers found in cluster") - } - - // Use the first available filer and convert HTTP address to gRPC - filerHTTPAddress := resp.ClusterNodes[0].Address - httpAddr := pb.ServerAddress(filerHTTPAddress) - c.filerAddress = httpAddr.ToGrpcAddress() - - return nil -} - -// findBrokerBalancer discovers the broker balancer using filer lock mechanism -// First discovers filer from master, then uses filer to find broker balancer -func (c *BrokerClient) findBrokerBalancer() error { - if c.brokerAddress != "" { - return nil // already found - } - - // First discover filer from master - if err := c.discoverFiler(); err != nil { - return fmt.Errorf("failed to discover filer: %v", err) - } - - conn, err := grpc.NewClient(c.filerAddress, c.grpcDialOption) - if err != nil { - return fmt.Errorf("failed to connect to filer at %s: %v", c.filerAddress, err) - } - defer conn.Close() - - client := filer_pb.NewSeaweedFilerClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - resp, err := client.FindLockOwner(ctx, &filer_pb.FindLockOwnerRequest{ - Name: pub_balancer.LockBrokerBalancer, - }) - if err != nil { - return fmt.Errorf("failed to find broker balancer: %v", err) - } - - c.brokerAddress = resp.Owner - return nil -} - -// GetFilerClient creates a filer client for accessing MQ data files -// Discovers filer from master if not already known -func (c *BrokerClient) GetFilerClient() (filer_pb.FilerClient, error) { - // Ensure filer is discovered - if err := c.discoverFiler(); err != nil { - return nil, fmt.Errorf("failed to discover filer: %v", err) - } - - return &filerClientImpl{ - filerAddress: c.filerAddress, - grpcDialOption: c.grpcDialOption, - }, nil -} - -// filerClientImpl implements filer_pb.FilerClient interface for MQ data access -type filerClientImpl struct { - filerAddress string - grpcDialOption grpc.DialOption -} - -// WithFilerClient executes a function with a connected filer client -func (f *filerClientImpl) WithFilerClient(followRedirect bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - conn, err := grpc.NewClient(f.filerAddress, f.grpcDialOption) - if err != nil { - return fmt.Errorf("failed to connect to filer at %s: %v", f.filerAddress, err) - } - defer conn.Close() - - client := filer_pb.NewSeaweedFilerClient(conn) - return fn(client) -} - -// AdjustedUrl implements the FilerClient interface (placeholder implementation) -func (f *filerClientImpl) AdjustedUrl(location *filer_pb.Location) string { - return location.Url -} - -// GetDataCenter implements the FilerClient interface (placeholder implementation) -func (f *filerClientImpl) GetDataCenter() string { - // Return empty string as we don't have data center information for this simple client - return "" -} - -// ListNamespaces retrieves all MQ namespaces (databases) from the filer -func (c *BrokerClient) ListNamespaces(ctx context.Context) ([]string, error) { - // Get filer client to list directories under /topics - filerClient, err := c.GetFilerClient() - if err != nil { - return []string{}, fmt.Errorf("failed to get filer client: %v", err) - } - - var namespaces []string - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // List directories under /topics to get namespaces - request := &filer_pb.ListEntriesRequest{ - Directory: "/topics", // filer.TopicsDir constant value - } - - stream, streamErr := client.ListEntries(ctx, request) - if streamErr != nil { - return fmt.Errorf("failed to list topics directory: %v", streamErr) - } - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break // End of stream - } - return fmt.Errorf("failed to receive entry: %v", recvErr) - } - - // Only include directories (namespaces), skip files and system directories (starting with .) - if resp.Entry != nil && resp.Entry.IsDirectory && !strings.HasPrefix(resp.Entry.Name, ".") { - namespaces = append(namespaces, resp.Entry.Name) - } - } - - return nil - }) - - if err != nil { - return []string{}, fmt.Errorf("failed to list namespaces from /topics: %v", err) - } - - // Return actual namespaces found (may be empty if no topics exist) - return namespaces, nil -} - -// ListTopics retrieves all topics in a namespace from the filer -func (c *BrokerClient) ListTopics(ctx context.Context, namespace string) ([]string, error) { - // Get filer client to list directories under /topics/{namespace} - filerClient, err := c.GetFilerClient() - if err != nil { - // Return empty list if filer unavailable - no fallback sample data - return []string{}, nil - } - - var topics []string - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // List directories under /topics/{namespace} to get topics - namespaceDir := fmt.Sprintf("/topics/%s", namespace) - request := &filer_pb.ListEntriesRequest{ - Directory: namespaceDir, - } - - stream, streamErr := client.ListEntries(ctx, request) - if streamErr != nil { - return fmt.Errorf("failed to list namespace directory %s: %v", namespaceDir, streamErr) - } - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break // End of stream - } - return fmt.Errorf("failed to receive entry: %v", recvErr) - } - - // Only include directories (topics), skip files - if resp.Entry != nil && resp.Entry.IsDirectory { - topics = append(topics, resp.Entry.Name) - } - } - - return nil - }) - - if err != nil { - // Return empty list if directory listing fails - no fallback sample data - return []string{}, nil - } - - // Return actual topics found (may be empty if no topics exist in namespace) - return topics, nil -} - -// GetTopicSchema retrieves the flat schema and key columns for a topic -// Returns (flatSchema, keyColumns, schemaFormat, error) -func (c *BrokerClient) GetTopicSchema(ctx context.Context, namespace, topicName string) (*schema_pb.RecordType, []string, string, error) { - // Get filer client to read topic configuration - filerClient, err := c.GetFilerClient() - if err != nil { - return nil, nil, "", fmt.Errorf("failed to get filer client: %v", err) - } - - var flatSchema *schema_pb.RecordType - var keyColumns []string - var schemaFormat string - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Read topic.conf file from /topics/{namespace}/{topic}/topic.conf - topicDir := fmt.Sprintf("/topics/%s/%s", namespace, topicName) - - // First check if topic directory exists - _, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ - Directory: topicDir, - Name: "topic.conf", - }) - if err != nil { - return fmt.Errorf("topic %s.%s not found: %v", namespace, topicName, err) - } - - // Read the topic.conf file content - data, err := filer.ReadInsideFiler(client, topicDir, "topic.conf") - if err != nil { - return fmt.Errorf("failed to read topic.conf for %s.%s: %v", namespace, topicName, err) - } - - // Parse the configuration - conf := &mq_pb.ConfigureTopicResponse{} - if err = jsonpb.Unmarshal(data, conf); err != nil { - return fmt.Errorf("failed to unmarshal topic %s.%s configuration: %v", namespace, topicName, err) - } - - // Extract flat schema, key columns, and schema format - flatSchema = conf.MessageRecordType - keyColumns = conf.KeyColumns - schemaFormat = conf.SchemaFormat - - return nil - }) - - if err != nil { - return nil, nil, "", err - } - - return flatSchema, keyColumns, schemaFormat, nil -} - -// ConfigureTopic creates or modifies a topic using flat schema format -func (c *BrokerClient) ConfigureTopic(ctx context.Context, namespace, topicName string, partitionCount int32, flatSchema *schema_pb.RecordType, keyColumns []string) error { - if err := c.findBrokerBalancer(); err != nil { - return err - } - - conn, err := grpc.NewClient(c.brokerAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return fmt.Errorf("failed to connect to broker at %s: %v", c.brokerAddress, err) - } - defer conn.Close() - - client := mq_pb.NewSeaweedMessagingClient(conn) - - // Create topic configuration using flat schema format - _, err = client.ConfigureTopic(ctx, &mq_pb.ConfigureTopicRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: topicName, - }, - PartitionCount: partitionCount, - MessageRecordType: flatSchema, - KeyColumns: keyColumns, - }) - if err != nil { - return fmt.Errorf("failed to configure topic %s.%s: %v", namespace, topicName, err) - } - - return nil -} - -// DeleteTopic removes a topic and all its data -// Assumption: There's a delete/drop topic method (may need to be implemented in broker) -func (c *BrokerClient) DeleteTopic(ctx context.Context, namespace, topicName string) error { - if err := c.findBrokerBalancer(); err != nil { - return err - } - - // TODO: Implement topic deletion - // This may require a new gRPC method in the broker service - - return fmt.Errorf("topic deletion not yet implemented in broker - need to add DeleteTopic gRPC method") -} - -// ListTopicPartitions discovers the actual partitions for a given topic via MQ broker -func (c *BrokerClient) ListTopicPartitions(ctx context.Context, namespace, topicName string) ([]topic.Partition, error) { - if err := c.findBrokerBalancer(); err != nil { - // Fallback to default partition when broker unavailable - return []topic.Partition{{RangeStart: 0, RangeStop: 1000}}, nil - } - - // Get topic configuration to determine actual partitions - topicObj := topic.Topic{Namespace: namespace, Name: topicName} - - // Use filer client to read topic configuration - filerClient, err := c.GetFilerClient() - if err != nil { - // Fallback to default partition - return []topic.Partition{{RangeStart: 0, RangeStop: 1000}}, nil - } - - var topicConf *mq_pb.ConfigureTopicResponse - err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - topicConf, err = topicObj.ReadConfFile(client) - return err - }) - - if err != nil { - // Topic doesn't exist or can't read config, use default - return []topic.Partition{{RangeStart: 0, RangeStop: 1000}}, nil - } - - // Generate partitions based on topic configuration - partitionCount := int32(4) // Default partition count for topics - if len(topicConf.BrokerPartitionAssignments) > 0 { - partitionCount = int32(len(topicConf.BrokerPartitionAssignments)) - } - - // Create partition ranges - simplified approach - // Each partition covers an equal range of the hash space - rangeSize := topic.PartitionCount / partitionCount - var partitions []topic.Partition - - for i := int32(0); i < partitionCount; i++ { - rangeStart := i * rangeSize - rangeStop := (i + 1) * rangeSize - if i == partitionCount-1 { - // Last partition covers remaining range - rangeStop = topic.PartitionCount - } - - partitions = append(partitions, topic.Partition{ - RangeStart: rangeStart, - RangeStop: rangeStop, - RingSize: topic.PartitionCount, - UnixTimeNs: time.Now().UnixNano(), - }) - } - - return partitions, nil -} - -// GetUnflushedMessages returns only messages that haven't been flushed to disk yet -// Uses buffer_start metadata from disk files for precise deduplication -// This prevents double-counting when combining with disk-based data -func (c *BrokerClient) GetUnflushedMessages(ctx context.Context, namespace, topicName string, partition topic.Partition, startTimeNs int64) ([]*filer_pb.LogEntry, error) { - glog.V(2).Infof("GetUnflushedMessages called for %s/%s, partition: RangeStart=%d, RangeStop=%d", - namespace, topicName, partition.RangeStart, partition.RangeStop) - - // Step 1: Find the broker that hosts this partition - if err := c.findBrokerBalancer(); err != nil { - glog.V(2).Infof("Failed to find broker balancer: %v", err) - // Return empty slice if we can't find broker - prevents double-counting - return []*filer_pb.LogEntry{}, nil - } - glog.V(2).Infof("Found broker at address: %s", c.brokerAddress) - - // Step 2: Connect to broker - conn, err := grpc.NewClient(c.brokerAddress, c.grpcDialOption) - if err != nil { - glog.V(2).Infof("Failed to connect to broker %s: %v", c.brokerAddress, err) - // Return empty slice if connection fails - prevents double-counting - return []*filer_pb.LogEntry{}, nil - } - defer conn.Close() - - client := mq_pb.NewSeaweedMessagingClient(conn) - - // Step 3: For unflushed messages, always start from 0 to get all in-memory data - // The buffer_start metadata in log files uses timestamp-based indices for uniqueness, - // but the broker's LogBuffer uses sequential indices internally (0, 1, 2, 3...) - // For unflushed data queries, we want all messages in the buffer regardless of their - // timestamp-based buffer indices, so we always use 0. - topicObj := topic.Topic{Namespace: namespace, Name: topicName} - partitionPath := topic.PartitionDir(topicObj, partition) - glog.V(2).Infof("Getting buffer start from partition path: %s", partitionPath) - - // Always use 0 for unflushed messages to ensure we get all in-memory data - earliestBufferOffset := int64(0) - glog.V(2).Infof("Using StartBufferOffset=0 for unflushed messages (buffer offsets are sequential internally)") - - // Step 4: Prepare request using buffer offset filtering only - request := &mq_pb.GetUnflushedMessagesRequest{ - Topic: &schema_pb.Topic{ - Namespace: namespace, - Name: topicName, - }, - Partition: &schema_pb.Partition{ - RingSize: partition.RingSize, - RangeStart: partition.RangeStart, - RangeStop: partition.RangeStop, - UnixTimeNs: partition.UnixTimeNs, - }, - StartBufferOffset: earliestBufferOffset, - } - - // Step 5: Call the broker streaming API - glog.V(2).Infof("Calling GetUnflushedMessages gRPC with StartBufferOffset=%d", earliestBufferOffset) - stream, err := client.GetUnflushedMessages(ctx, request) - if err != nil { - glog.V(2).Infof("GetUnflushedMessages gRPC call failed: %v", err) - // Return empty slice if gRPC call fails - prevents double-counting - return []*filer_pb.LogEntry{}, nil - } - - // Step 5: Receive streaming responses - var logEntries []*filer_pb.LogEntry - for { - response, err := stream.Recv() - if err != nil { - // End of stream or error - return what we have to prevent double-counting - break - } - - // Handle error messages - if response.Error != "" { - // Log the error but return empty slice - prevents double-counting - // (In debug mode, this would be visible) - return []*filer_pb.LogEntry{}, nil - } - - // Check for end of stream - if response.EndOfStream { - break - } - - // Convert and collect the message - if response.Message != nil { - logEntries = append(logEntries, &filer_pb.LogEntry{ - TsNs: response.Message.TsNs, - Key: response.Message.Key, - Data: response.Message.Data, - PartitionKeyHash: int32(response.Message.PartitionKeyHash), // Convert uint32 to int32 - }) - } - } - - return logEntries, nil -} - -// getEarliestBufferStart finds the earliest buffer_start index from disk files in the partition -// -// This method handles three scenarios for seamless broker querying: -// 1. Live log files exist: Uses their buffer_start metadata (most recent boundaries) -// 2. Only Parquet files exist: Uses Parquet buffer_start metadata (preserved from archived sources) -// 3. Mixed files: Uses earliest buffer_start from all sources for comprehensive coverage -// -// This ensures continuous real-time querying capability even after log file compaction/archival -func (c *BrokerClient) getEarliestBufferStart(ctx context.Context, partitionPath string) (int64, error) { - filerClient, err := c.GetFilerClient() - if err != nil { - return 0, fmt.Errorf("failed to get filer client: %v", err) - } - - var earliestBufferIndex int64 = -1 // -1 means no buffer_start found - var logFileCount, parquetFileCount int - var bufferStartSources []string // Track which files provide buffer_start - - err = filer_pb.ReadDirAllEntries(ctx, filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - // Skip directories - if entry.IsDirectory { - return nil - } - - // Count file types for scenario detection - if strings.HasSuffix(entry.Name, ".parquet") { - parquetFileCount++ - } else { - logFileCount++ - } - - // Extract buffer_start from file extended attributes (both log files and parquet files) - bufferStart := c.getBufferStartFromEntry(entry) - if bufferStart != nil && bufferStart.StartIndex > 0 { - if earliestBufferIndex == -1 || bufferStart.StartIndex < earliestBufferIndex { - earliestBufferIndex = bufferStart.StartIndex - } - bufferStartSources = append(bufferStartSources, entry.Name) - } - - return nil - }) - - if err != nil { - return 0, fmt.Errorf("failed to scan partition directory: %v", err) - } - - if earliestBufferIndex == -1 { - return 0, fmt.Errorf("no buffer_start metadata found in partition") - } - - return earliestBufferIndex, nil -} - -// getBufferStartFromEntry extracts LogBufferStart from file entry metadata -// Only supports binary format (used by both log files and Parquet files) -func (c *BrokerClient) getBufferStartFromEntry(entry *filer_pb.Entry) *LogBufferStart { - if entry.Extended == nil { - return nil - } - - if startData, exists := entry.Extended["buffer_start"]; exists { - // Only support binary format - if len(startData) == 8 { - startIndex := int64(binary.BigEndian.Uint64(startData)) - if startIndex > 0 { - return &LogBufferStart{StartIndex: startIndex} - } - } - } - - return nil -} diff --git a/weed/query/engine/catalog.go b/weed/query/engine/catalog.go deleted file mode 100644 index f53e4cb2a..000000000 --- a/weed/query/engine/catalog.go +++ /dev/null @@ -1,451 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// BrokerClientInterface defines the interface for broker client operations -// Both real BrokerClient and MockBrokerClient implement this interface -type BrokerClientInterface interface { - ListNamespaces(ctx context.Context) ([]string, error) - ListTopics(ctx context.Context, namespace string) ([]string, error) - GetTopicSchema(ctx context.Context, namespace, topic string) (*schema_pb.RecordType, []string, string, error) // Returns (flatSchema, keyColumns, schemaFormat, error) - ConfigureTopic(ctx context.Context, namespace, topicName string, partitionCount int32, flatSchema *schema_pb.RecordType, keyColumns []string) error - GetFilerClient() (filer_pb.FilerClient, error) - DeleteTopic(ctx context.Context, namespace, topicName string) error - // GetUnflushedMessages returns only messages that haven't been flushed to disk yet - // This prevents double-counting when combining with disk-based data - GetUnflushedMessages(ctx context.Context, namespace, topicName string, partition topic.Partition, startTimeNs int64) ([]*filer_pb.LogEntry, error) -} - -// SchemaCatalog manages the mapping between MQ topics and SQL tables -// Assumptions: -// 1. Each MQ namespace corresponds to a SQL database -// 2. Each MQ topic corresponds to a SQL table -// 3. Topic schemas are cached for performance -// 4. Schema evolution is tracked via RevisionId -type SchemaCatalog struct { - mu sync.RWMutex - - // databases maps namespace names to database metadata - // Assumption: Namespace names are valid SQL database identifiers - databases map[string]*DatabaseInfo - - // currentDatabase tracks the active database context (for USE database) - // Assumption: Single-threaded usage per SQL session - currentDatabase string - - // brokerClient handles communication with MQ broker - brokerClient BrokerClientInterface // Use interface for dependency injection - - // defaultPartitionCount is the default number of partitions for new topics - // Can be overridden in CREATE TABLE statements with PARTITION COUNT option - defaultPartitionCount int32 - - // cacheTTL is the time-to-live for cached database and table information - // After this duration, cached data is considered stale and will be refreshed - cacheTTL time.Duration -} - -// DatabaseInfo represents a SQL database (MQ namespace) -type DatabaseInfo struct { - Name string - Tables map[string]*TableInfo - CachedAt time.Time // Timestamp when this database info was cached -} - -// TableInfo represents a SQL table (MQ topic) with schema information -// Assumptions: -// 1. All topic messages conform to the same schema within a revision -// 2. Schema evolution maintains backward compatibility -// 3. Primary key is implicitly the message timestamp/offset -type TableInfo struct { - Name string - Namespace string - Schema *schema.Schema - Columns []ColumnInfo - RevisionId uint32 - CachedAt time.Time // Timestamp when this table info was cached -} - -// ColumnInfo represents a SQL column (MQ schema field) -type ColumnInfo struct { - Name string - Type string // SQL type representation - Nullable bool // Assumption: MQ fields are nullable by default -} - -// NewSchemaCatalog creates a new schema catalog -// Uses master address for service discovery of filers and brokers -func NewSchemaCatalog(masterAddress string) *SchemaCatalog { - return &SchemaCatalog{ - databases: make(map[string]*DatabaseInfo), - brokerClient: NewBrokerClient(masterAddress), - defaultPartitionCount: 6, // Default partition count, can be made configurable via environment variable - cacheTTL: 5 * time.Minute, // Default cache TTL of 5 minutes, can be made configurable - } -} - -// ListDatabases returns all available databases (MQ namespaces) -// Assumption: This would be populated from MQ broker metadata -func (c *SchemaCatalog) ListDatabases() []string { - // Clean up expired cache entries first - c.mu.Lock() - c.cleanExpiredDatabases() - c.mu.Unlock() - - c.mu.RLock() - defer c.mu.RUnlock() - - // Try to get real namespaces from broker first - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - namespaces, err := c.brokerClient.ListNamespaces(ctx) - if err != nil { - // Silently handle broker connection errors - - // Fallback to cached databases if broker unavailable - databases := make([]string, 0, len(c.databases)) - for name := range c.databases { - databases = append(databases, name) - } - - // Return empty list if no cached data (no more sample data) - return databases - } - - return namespaces -} - -// ListTables returns all tables in a database (MQ topics in namespace) -func (c *SchemaCatalog) ListTables(database string) ([]string, error) { - // Clean up expired cache entries first - c.mu.Lock() - c.cleanExpiredDatabases() - c.mu.Unlock() - - c.mu.RLock() - defer c.mu.RUnlock() - - // Try to get real topics from broker first - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - topics, err := c.brokerClient.ListTopics(ctx, database) - if err != nil { - // Fallback to cached data if broker unavailable - db, exists := c.databases[database] - if !exists { - // Return empty list if database not found (no more sample data) - return []string{}, nil - } - - tables := make([]string, 0, len(db.Tables)) - for name := range db.Tables { - // Skip .meta table - if name == ".meta" { - continue - } - tables = append(tables, name) - } - return tables, nil - } - - // Filter out .meta table from topics - filtered := make([]string, 0, len(topics)) - for _, topic := range topics { - if topic != ".meta" { - filtered = append(filtered, topic) - } - } - - return filtered, nil -} - -// GetTableInfo returns detailed schema information for a table -// Assumption: Table exists and schema is accessible -func (c *SchemaCatalog) GetTableInfo(database, table string) (*TableInfo, error) { - // Clean up expired cache entries first - c.mu.Lock() - c.cleanExpiredDatabases() - c.mu.Unlock() - - c.mu.RLock() - db, exists := c.databases[database] - if !exists { - c.mu.RUnlock() - return nil, TableNotFoundError{ - Database: database, - Table: "", - } - } - - tableInfo, exists := db.Tables[table] - if !exists || c.isTableCacheExpired(tableInfo) { - c.mu.RUnlock() - - // Try to refresh table info from broker if not found or expired - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - recordType, _, _, err := c.brokerClient.GetTopicSchema(ctx, database, table) - if err != nil { - // If broker unavailable and we have expired cached data, return it - if exists { - return tableInfo, nil - } - // Otherwise return not found error - return nil, TableNotFoundError{ - Database: database, - Table: table, - } - } - - // Convert the broker response to schema and register it - mqSchema := &schema.Schema{ - RecordType: recordType, - RevisionId: 1, // Default revision for schema fetched from broker - } - - // Register the refreshed schema - err = c.RegisterTopic(database, table, mqSchema) - if err != nil { - // If registration fails but we have cached data, return it - if exists { - return tableInfo, nil - } - return nil, fmt.Errorf("failed to register topic schema: %v", err) - } - - // Get the newly registered table info - c.mu.RLock() - defer c.mu.RUnlock() - - db, exists := c.databases[database] - if !exists { - return nil, TableNotFoundError{ - Database: database, - Table: table, - } - } - - tableInfo, exists := db.Tables[table] - if !exists { - return nil, TableNotFoundError{ - Database: database, - Table: table, - } - } - - return tableInfo, nil - } - - c.mu.RUnlock() - return tableInfo, nil -} - -// RegisterTopic adds or updates a topic's schema information in the catalog -// Assumption: This is called when topics are created or schemas are modified -func (c *SchemaCatalog) RegisterTopic(namespace, topicName string, mqSchema *schema.Schema) error { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - - // Ensure database exists - db, exists := c.databases[namespace] - if !exists { - db = &DatabaseInfo{ - Name: namespace, - Tables: make(map[string]*TableInfo), - CachedAt: now, - } - c.databases[namespace] = db - } - - // Convert MQ schema to SQL table info - tableInfo, err := c.convertMQSchemaToTableInfo(namespace, topicName, mqSchema) - if err != nil { - return fmt.Errorf("failed to convert MQ schema: %v", err) - } - - // Set the cached timestamp for the table - tableInfo.CachedAt = now - - db.Tables[topicName] = tableInfo - return nil -} - -// convertMQSchemaToTableInfo converts MQ schema to SQL table information -// Assumptions: -// 1. MQ scalar types map directly to SQL types -// 2. Complex types (arrays, maps) are serialized as JSON strings -// 3. All fields are nullable unless specifically marked otherwise -// 4. If no schema is defined, create a default schema with system fields and _value -func (c *SchemaCatalog) convertMQSchemaToTableInfo(namespace, topicName string, mqSchema *schema.Schema) (*TableInfo, error) { - // Check if the schema has a valid RecordType - if mqSchema == nil || mqSchema.RecordType == nil { - // For topics without schema, create a default schema with system fields and _value - columns := []ColumnInfo{ - {Name: SW_DISPLAY_NAME_TIMESTAMP, Type: "TIMESTAMP", Nullable: true}, - {Name: SW_COLUMN_NAME_KEY, Type: "VARBINARY", Nullable: true}, - {Name: SW_COLUMN_NAME_SOURCE, Type: "VARCHAR(255)", Nullable: true}, - {Name: SW_COLUMN_NAME_VALUE, Type: "VARBINARY", Nullable: true}, - } - - return &TableInfo{ - Name: topicName, - Namespace: namespace, - Schema: nil, // No schema defined - Columns: columns, - RevisionId: 0, - }, nil - } - - columns := make([]ColumnInfo, len(mqSchema.RecordType.Fields)) - - for i, field := range mqSchema.RecordType.Fields { - sqlType, err := c.convertMQFieldTypeToSQL(field.Type) - if err != nil { - return nil, fmt.Errorf("unsupported field type for '%s': %v", field.Name, err) - } - - columns[i] = ColumnInfo{ - Name: field.Name, - Type: sqlType, - Nullable: true, // Assumption: MQ fields are nullable by default - } - } - - return &TableInfo{ - Name: topicName, - Namespace: namespace, - Schema: mqSchema, - Columns: columns, - RevisionId: mqSchema.RevisionId, - }, nil -} - -// convertMQFieldTypeToSQL maps MQ field types to SQL types -// Uses standard SQL type mappings with PostgreSQL compatibility -func (c *SchemaCatalog) convertMQFieldTypeToSQL(fieldType *schema_pb.Type) (string, error) { - switch t := fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch t.ScalarType { - case schema_pb.ScalarType_BOOL: - return "BOOLEAN", nil - case schema_pb.ScalarType_INT32: - return "INT", nil - case schema_pb.ScalarType_INT64: - return "BIGINT", nil - case schema_pb.ScalarType_FLOAT: - return "FLOAT", nil - case schema_pb.ScalarType_DOUBLE: - return "DOUBLE", nil - case schema_pb.ScalarType_BYTES: - return "VARBINARY", nil - case schema_pb.ScalarType_STRING: - return "VARCHAR(255)", nil // Assumption: Default string length - default: - return "", fmt.Errorf("unsupported scalar type: %v", t.ScalarType) - } - case *schema_pb.Type_ListType: - // Assumption: Lists are serialized as JSON strings in SQL - return "TEXT", nil - case *schema_pb.Type_RecordType: - // Assumption: Nested records are serialized as JSON strings - return "TEXT", nil - default: - return "", fmt.Errorf("unsupported field type: %T", t) - } -} - -// SetCurrentDatabase sets the active database context -// Assumption: Used for implementing "USE database" functionality -func (c *SchemaCatalog) SetCurrentDatabase(database string) error { - c.mu.Lock() - defer c.mu.Unlock() - - // TODO: Validate database exists in MQ broker - c.currentDatabase = database - return nil -} - -// GetCurrentDatabase returns the currently active database -func (c *SchemaCatalog) GetCurrentDatabase() string { - c.mu.RLock() - defer c.mu.RUnlock() - return c.currentDatabase -} - -// SetDefaultPartitionCount sets the default number of partitions for new topics -func (c *SchemaCatalog) SetDefaultPartitionCount(count int32) { - c.mu.Lock() - defer c.mu.Unlock() - c.defaultPartitionCount = count -} - -// GetDefaultPartitionCount returns the default number of partitions for new topics -func (c *SchemaCatalog) GetDefaultPartitionCount() int32 { - c.mu.RLock() - defer c.mu.RUnlock() - return c.defaultPartitionCount -} - -// SetCacheTTL sets the time-to-live for cached database and table information -func (c *SchemaCatalog) SetCacheTTL(ttl time.Duration) { - c.mu.Lock() - defer c.mu.Unlock() - c.cacheTTL = ttl -} - -// GetCacheTTL returns the current cache TTL setting -func (c *SchemaCatalog) GetCacheTTL() time.Duration { - c.mu.RLock() - defer c.mu.RUnlock() - return c.cacheTTL -} - -// isDatabaseCacheExpired checks if a database's cached information has expired -func (c *SchemaCatalog) isDatabaseCacheExpired(db *DatabaseInfo) bool { - return time.Since(db.CachedAt) > c.cacheTTL -} - -// isTableCacheExpired checks if a table's cached information has expired -func (c *SchemaCatalog) isTableCacheExpired(table *TableInfo) bool { - return time.Since(table.CachedAt) > c.cacheTTL -} - -// cleanExpiredDatabases removes expired database entries from cache -// Note: This method assumes the caller already holds the write lock -func (c *SchemaCatalog) cleanExpiredDatabases() { - for name, db := range c.databases { - if c.isDatabaseCacheExpired(db) { - delete(c.databases, name) - } else { - // Clean expired tables within non-expired databases - for tableName, table := range db.Tables { - if c.isTableCacheExpired(table) { - delete(db.Tables, tableName) - } - } - } - } -} - -// CleanExpiredCache removes all expired entries from the cache -// This method can be called externally to perform periodic cache cleanup -func (c *SchemaCatalog) CleanExpiredCache() { - c.mu.Lock() - defer c.mu.Unlock() - c.cleanExpiredDatabases() -} diff --git a/weed/query/engine/catalog_no_schema_test.go b/weed/query/engine/catalog_no_schema_test.go deleted file mode 100644 index 0c0312cee..000000000 --- a/weed/query/engine/catalog_no_schema_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/mq/schema" -) - -// TestConvertMQSchemaToTableInfo_NoSchema tests that topics without schemas -// get a default schema with system fields and _value field -func TestConvertMQSchemaToTableInfo_NoSchema(t *testing.T) { - catalog := NewSchemaCatalog("localhost:9333") - - tests := []struct { - name string - mqSchema *schema.Schema - expectError bool - checkFields func(*testing.T, *TableInfo) - }{ - { - name: "nil schema", - mqSchema: nil, - expectError: false, - checkFields: func(t *testing.T, info *TableInfo) { - if info.Schema != nil { - t.Error("Expected Schema to be nil for topics without schema") - } - if len(info.Columns) != 4 { - t.Errorf("Expected 4 columns, got %d", len(info.Columns)) - } - expectedCols := map[string]string{ - "_ts": "TIMESTAMP", - "_key": "VARBINARY", - "_source": "VARCHAR(255)", - "_value": "VARBINARY", - } - for _, col := range info.Columns { - expectedType, ok := expectedCols[col.Name] - if !ok { - t.Errorf("Unexpected column: %s", col.Name) - continue - } - if col.Type != expectedType { - t.Errorf("Column %s: expected type %s, got %s", col.Name, expectedType, col.Type) - } - } - }, - }, - { - name: "schema with nil RecordType", - mqSchema: &schema.Schema{ - RecordType: nil, - RevisionId: 1, - }, - expectError: false, - checkFields: func(t *testing.T, info *TableInfo) { - if info.Schema != nil { - t.Error("Expected Schema to be nil for topics without RecordType") - } - if len(info.Columns) != 4 { - t.Errorf("Expected 4 columns, got %d", len(info.Columns)) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tableInfo, err := catalog.convertMQSchemaToTableInfo("test_namespace", "test_topic", tt.mqSchema) - - if tt.expectError { - if err == nil { - t.Error("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tableInfo == nil { - t.Error("Expected tableInfo but got nil") - return - } - - if tt.checkFields != nil { - tt.checkFields(t, tableInfo) - } - - // Basic checks - if tableInfo.Name != "test_topic" { - t.Errorf("Expected Name 'test_topic', got '%s'", tableInfo.Name) - } - if tableInfo.Namespace != "test_namespace" { - t.Errorf("Expected Namespace 'test_namespace', got '%s'", tableInfo.Namespace) - } - }) - } -} diff --git a/weed/query/engine/cockroach_parser.go b/weed/query/engine/cockroach_parser.go deleted file mode 100644 index 20db9cb4d..000000000 --- a/weed/query/engine/cockroach_parser.go +++ /dev/null @@ -1,408 +0,0 @@ -package engine - -import ( - "fmt" - "strings" - - "github.com/seaweedfs/cockroachdb-parser/pkg/sql/parser" - "github.com/seaweedfs/cockroachdb-parser/pkg/sql/sem/tree" -) - -// CockroachSQLParser wraps CockroachDB's PostgreSQL-compatible SQL parser for use in SeaweedFS -type CockroachSQLParser struct{} - -// NewCockroachSQLParser creates a new instance of the CockroachDB SQL parser wrapper -func NewCockroachSQLParser() *CockroachSQLParser { - return &CockroachSQLParser{} -} - -// ParseSQL parses a SQL statement using CockroachDB's parser -func (p *CockroachSQLParser) ParseSQL(sql string) (Statement, error) { - // Parse using CockroachDB's parser - stmts, err := parser.Parse(sql) - if err != nil { - return nil, fmt.Errorf("CockroachDB parser error: %v", err) - } - - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement, got %d", len(stmts)) - } - - stmt := stmts[0].AST - - // Convert CockroachDB AST to SeaweedFS AST format - switch s := stmt.(type) { - case *tree.Select: - return p.convertSelectStatement(s) - default: - return nil, fmt.Errorf("unsupported statement type: %T", s) - } -} - -// convertSelectStatement converts CockroachDB's Select AST to SeaweedFS format -func (p *CockroachSQLParser) convertSelectStatement(crdbSelect *tree.Select) (*SelectStatement, error) { - selectClause, ok := crdbSelect.Select.(*tree.SelectClause) - if !ok { - return nil, fmt.Errorf("expected SelectClause, got %T", crdbSelect.Select) - } - - seaweedSelect := &SelectStatement{ - SelectExprs: make([]SelectExpr, 0, len(selectClause.Exprs)), - From: []TableExpr{}, - } - - // Convert SELECT expressions - for _, expr := range selectClause.Exprs { - seaweedExpr, err := p.convertSelectExpr(expr) - if err != nil { - return nil, fmt.Errorf("failed to convert select expression: %v", err) - } - seaweedSelect.SelectExprs = append(seaweedSelect.SelectExprs, seaweedExpr) - } - - // Convert FROM clause - if len(selectClause.From.Tables) > 0 { - for _, fromExpr := range selectClause.From.Tables { - seaweedTableExpr, err := p.convertFromExpr(fromExpr) - if err != nil { - return nil, fmt.Errorf("failed to convert FROM clause: %v", err) - } - seaweedSelect.From = append(seaweedSelect.From, seaweedTableExpr) - } - } - - // Convert WHERE clause if present - if selectClause.Where != nil { - whereExpr, err := p.convertExpr(selectClause.Where.Expr) - if err != nil { - return nil, fmt.Errorf("failed to convert WHERE clause: %v", err) - } - seaweedSelect.Where = &WhereClause{ - Expr: whereExpr, - } - } - - // Convert LIMIT and OFFSET clauses if present - if crdbSelect.Limit != nil { - limitClause := &LimitClause{} - - // Convert LIMIT (Count) - if crdbSelect.Limit.Count != nil { - countExpr, err := p.convertExpr(crdbSelect.Limit.Count) - if err != nil { - return nil, fmt.Errorf("failed to convert LIMIT clause: %v", err) - } - limitClause.Rowcount = countExpr - } - - // Convert OFFSET - if crdbSelect.Limit.Offset != nil { - offsetExpr, err := p.convertExpr(crdbSelect.Limit.Offset) - if err != nil { - return nil, fmt.Errorf("failed to convert OFFSET clause: %v", err) - } - limitClause.Offset = offsetExpr - } - - seaweedSelect.Limit = limitClause - } - - return seaweedSelect, nil -} - -// convertSelectExpr converts CockroachDB SelectExpr to SeaweedFS format -func (p *CockroachSQLParser) convertSelectExpr(expr tree.SelectExpr) (SelectExpr, error) { - // Handle star expressions (SELECT *) - if _, isStar := expr.Expr.(tree.UnqualifiedStar); isStar { - return &StarExpr{}, nil - } - - // CockroachDB's SelectExpr is a struct, not an interface, so handle it directly - seaweedExpr := &AliasedExpr{} - - // Convert the main expression - convertedExpr, err := p.convertExpr(expr.Expr) - if err != nil { - return nil, fmt.Errorf("failed to convert expression: %v", err) - } - seaweedExpr.Expr = convertedExpr - - // Convert alias if present - if expr.As != "" { - seaweedExpr.As = aliasValue(expr.As) - } - - return seaweedExpr, nil -} - -// convertExpr converts CockroachDB expressions to SeaweedFS format -func (p *CockroachSQLParser) convertExpr(expr tree.Expr) (ExprNode, error) { - switch e := expr.(type) { - case *tree.FuncExpr: - // Function call - seaweedFunc := &FuncExpr{ - Name: stringValue(strings.ToUpper(e.Func.String())), // Convert to uppercase for consistency - Exprs: make([]SelectExpr, 0, len(e.Exprs)), - } - - // Convert function arguments - for _, arg := range e.Exprs { - // Special case: Handle star expressions in function calls like COUNT(*) - if _, isStar := arg.(tree.UnqualifiedStar); isStar { - seaweedFunc.Exprs = append(seaweedFunc.Exprs, &StarExpr{}) - } else { - convertedArg, err := p.convertExpr(arg) - if err != nil { - return nil, fmt.Errorf("failed to convert function argument: %v", err) - } - seaweedFunc.Exprs = append(seaweedFunc.Exprs, &AliasedExpr{Expr: convertedArg}) - } - } - - return seaweedFunc, nil - - case *tree.BinaryExpr: - // Arithmetic/binary operations (including string concatenation ||) - seaweedArith := &ArithmeticExpr{ - Operator: e.Operator.String(), - } - - // Convert left operand - left, err := p.convertExpr(e.Left) - if err != nil { - return nil, fmt.Errorf("failed to convert left operand: %v", err) - } - seaweedArith.Left = left - - // Convert right operand - right, err := p.convertExpr(e.Right) - if err != nil { - return nil, fmt.Errorf("failed to convert right operand: %v", err) - } - seaweedArith.Right = right - - return seaweedArith, nil - - case *tree.ComparisonExpr: - // Comparison operations (=, >, <, >=, <=, !=, etc.) used in WHERE clauses - seaweedComp := &ComparisonExpr{ - Operator: e.Operator.String(), - } - - // Convert left operand - left, err := p.convertExpr(e.Left) - if err != nil { - return nil, fmt.Errorf("failed to convert comparison left operand: %v", err) - } - seaweedComp.Left = left - - // Convert right operand - right, err := p.convertExpr(e.Right) - if err != nil { - return nil, fmt.Errorf("failed to convert comparison right operand: %v", err) - } - seaweedComp.Right = right - - return seaweedComp, nil - - case *tree.StrVal: - // String literal - return &SQLVal{ - Type: StrVal, - Val: []byte(string(e.RawString())), - }, nil - - case *tree.NumVal: - // Numeric literal - valStr := e.String() - if strings.Contains(valStr, ".") { - return &SQLVal{ - Type: FloatVal, - Val: []byte(valStr), - }, nil - } else { - return &SQLVal{ - Type: IntVal, - Val: []byte(valStr), - }, nil - } - - case *tree.UnresolvedName: - // Column name - return &ColName{ - Name: stringValue(e.String()), - }, nil - - case *tree.AndExpr: - // AND expression - left, err := p.convertExpr(e.Left) - if err != nil { - return nil, fmt.Errorf("failed to convert AND left operand: %v", err) - } - right, err := p.convertExpr(e.Right) - if err != nil { - return nil, fmt.Errorf("failed to convert AND right operand: %v", err) - } - return &AndExpr{ - Left: left, - Right: right, - }, nil - - case *tree.OrExpr: - // OR expression - left, err := p.convertExpr(e.Left) - if err != nil { - return nil, fmt.Errorf("failed to convert OR left operand: %v", err) - } - right, err := p.convertExpr(e.Right) - if err != nil { - return nil, fmt.Errorf("failed to convert OR right operand: %v", err) - } - return &OrExpr{ - Left: left, - Right: right, - }, nil - - case *tree.Tuple: - // Tuple expression for IN clauses: (value1, value2, value3) - tupleValues := make(ValTuple, 0, len(e.Exprs)) - for _, tupleExpr := range e.Exprs { - convertedExpr, err := p.convertExpr(tupleExpr) - if err != nil { - return nil, fmt.Errorf("failed to convert tuple element: %v", err) - } - tupleValues = append(tupleValues, convertedExpr) - } - return tupleValues, nil - - case *tree.CastExpr: - // Handle INTERVAL expressions: INTERVAL '1 hour' - // CockroachDB represents these as cast expressions - if p.isIntervalCast(e) { - // Extract the string value being cast to interval - if strVal, ok := e.Expr.(*tree.StrVal); ok { - return &IntervalExpr{ - Value: string(strVal.RawString()), - }, nil - } - return nil, fmt.Errorf("invalid INTERVAL expression: expected string literal") - } - // For non-interval casts, just convert the inner expression - return p.convertExpr(e.Expr) - - case *tree.RangeCond: - // Handle BETWEEN expressions: column BETWEEN value1 AND value2 - seaweedBetween := &BetweenExpr{ - Not: e.Not, // Handle NOT BETWEEN - } - - // Convert the left operand (the expression being tested) - left, err := p.convertExpr(e.Left) - if err != nil { - return nil, fmt.Errorf("failed to convert BETWEEN left operand: %v", err) - } - seaweedBetween.Left = left - - // Convert the FROM operand (lower bound) - from, err := p.convertExpr(e.From) - if err != nil { - return nil, fmt.Errorf("failed to convert BETWEEN from operand: %v", err) - } - seaweedBetween.From = from - - // Convert the TO operand (upper bound) - to, err := p.convertExpr(e.To) - if err != nil { - return nil, fmt.Errorf("failed to convert BETWEEN to operand: %v", err) - } - seaweedBetween.To = to - - return seaweedBetween, nil - - case *tree.IsNullExpr: - // Handle IS NULL expressions: column IS NULL - expr, err := p.convertExpr(e.Expr) - if err != nil { - return nil, fmt.Errorf("failed to convert IS NULL expression: %v", err) - } - - return &IsNullExpr{ - Expr: expr, - }, nil - - case *tree.IsNotNullExpr: - // Handle IS NOT NULL expressions: column IS NOT NULL - expr, err := p.convertExpr(e.Expr) - if err != nil { - return nil, fmt.Errorf("failed to convert IS NOT NULL expression: %v", err) - } - - return &IsNotNullExpr{ - Expr: expr, - }, nil - - default: - return nil, fmt.Errorf("unsupported expression type: %T", e) - } -} - -// convertFromExpr converts CockroachDB FROM expressions to SeaweedFS format -func (p *CockroachSQLParser) convertFromExpr(expr tree.TableExpr) (TableExpr, error) { - switch e := expr.(type) { - case *tree.TableName: - // Simple table name - tableName := TableName{ - Name: stringValue(e.Table()), - } - - // Extract database qualifier if present - - if e.Schema() != "" { - tableName.Qualifier = stringValue(e.Schema()) - } - - return &AliasedTableExpr{ - Expr: tableName, - }, nil - - case *tree.AliasedTableExpr: - // Handle aliased table expressions (which is what CockroachDB uses for qualified names) - if tableName, ok := e.Expr.(*tree.TableName); ok { - seaweedTableName := TableName{ - Name: stringValue(tableName.Table()), - } - - // Extract database qualifier if present - if tableName.Schema() != "" { - seaweedTableName.Qualifier = stringValue(tableName.Schema()) - } - - return &AliasedTableExpr{ - Expr: seaweedTableName, - }, nil - } - - return nil, fmt.Errorf("unsupported expression in AliasedTableExpr: %T", e.Expr) - - default: - return nil, fmt.Errorf("unsupported table expression type: %T", e) - } -} - -// isIntervalCast checks if a CastExpr is casting to an INTERVAL type -func (p *CockroachSQLParser) isIntervalCast(castExpr *tree.CastExpr) bool { - // Check if the target type is an interval type - // CockroachDB represents interval types in the Type field - // We need to check if it's an interval type by examining the type structure - if castExpr.Type != nil { - // Try to detect interval type by examining the AST structure - // Since we can't easily access the type string, we'll be more conservative - // and assume any cast expression on a string literal could be an interval - if _, ok := castExpr.Expr.(*tree.StrVal); ok { - // This is likely an INTERVAL expression since CockroachDB - // represents INTERVAL '1 hour' as casting a string to interval type - return true - } - } - return false -} diff --git a/weed/query/engine/cockroach_parser_success_test.go b/weed/query/engine/cockroach_parser_success_test.go deleted file mode 100644 index f810e604c..000000000 --- a/weed/query/engine/cockroach_parser_success_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -// TestCockroachDBParserSuccess demonstrates the successful integration of CockroachDB's parser -// This test validates that all previously problematic SQL expressions now work correctly -func TestCockroachDBParserSuccess(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - expected string - desc string - }{ - { - name: "Basic_Function", - sql: "SELECT LENGTH('hello') FROM user_events LIMIT 1", - expected: "5", - desc: "Simple function call", - }, - { - name: "Function_Arithmetic", - sql: "SELECT LENGTH('hello') + 10 FROM user_events LIMIT 1", - expected: "15", - desc: "Function with arithmetic operation (original user issue)", - }, - { - name: "User_Original_Query", - sql: "SELECT length(trim(' hello world ')) + 12 FROM user_events LIMIT 1", - expected: "23", - desc: "User's exact original failing query - now fixed!", - }, - { - name: "String_Concatenation", - sql: "SELECT 'hello' || 'world' FROM user_events LIMIT 1", - expected: "helloworld", - desc: "Basic string concatenation", - }, - { - name: "Function_With_Concat", - sql: "SELECT LENGTH('hello' || 'world') FROM user_events LIMIT 1", - expected: "10", - desc: "Function with string concatenation argument", - }, - { - name: "Multiple_Arithmetic", - sql: "SELECT LENGTH('test') * 3 FROM user_events LIMIT 1", - expected: "12", - desc: "Function with multiplication", - }, - { - name: "Nested_Functions", - sql: "SELECT LENGTH(UPPER('hello')) FROM user_events LIMIT 1", - expected: "5", - desc: "Nested function calls", - }, - { - name: "Column_Alias", - sql: "SELECT LENGTH('test') AS test_length FROM user_events LIMIT 1", - expected: "4", - desc: "Column alias functionality (AS keyword)", - }, - } - - successCount := 0 - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if err != nil { - t.Errorf("%s - Query failed: %v", tc.desc, err) - return - } - - if result.Error != nil { - t.Errorf("%s - Query result error: %v", tc.desc, result.Error) - return - } - - if len(result.Rows) == 0 { - t.Errorf("%s - Expected at least one row", tc.desc) - return - } - - actual := result.Rows[0][0].ToString() - - if actual == tc.expected { - t.Logf("SUCCESS: %s โ†’ %s", tc.desc, actual) - successCount++ - } else { - t.Errorf("FAIL %s - Expected '%s', got '%s'", tc.desc, tc.expected, actual) - } - }) - } - - t.Logf("CockroachDB Parser Integration: %d/%d tests passed!", successCount, len(testCases)) -} diff --git a/weed/query/engine/complete_sql_fixes_test.go b/weed/query/engine/complete_sql_fixes_test.go deleted file mode 100644 index e984ce0e1..000000000 --- a/weed/query/engine/complete_sql_fixes_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestCompleteSQLFixes is a comprehensive test verifying all SQL fixes work together -func TestCompleteSQLFixes(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("OriginalFailingProductionQueries", func(t *testing.T) { - // Test the exact queries that were originally failing in production - - testCases := []struct { - name string - timestamp int64 - id int64 - sql string - }{ - { - name: "OriginalFailingQuery1", - timestamp: 1756947416566456262, - id: 897795, - sql: "select id, _ts_ns as ts from ecommerce.user_events where ts = 1756947416566456262", - }, - { - name: "OriginalFailingQuery2", - timestamp: 1756947416566439304, - id: 715356, - sql: "select id, _ts_ns as ts from ecommerce.user_events where ts = 1756947416566439304", - }, - { - name: "CurrentDataQuery", - timestamp: 1756913789829292386, - id: 82460, - sql: "select id, _ts_ns as ts from ecommerce.user_events where ts = 1756913789829292386", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create test record matching the production data - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: tc.timestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: tc.id}}, - }, - } - - // Parse the original failing SQL - stmt, err := ParseSQL(tc.sql) - assert.NoError(t, err, "Should parse original failing query: %s", tc.name) - - selectStmt := stmt.(*SelectStatement) - - // Build predicate with alias support (this was the missing piece) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for: %s", tc.name) - - // This should now work (was failing before) - result := predicate(testRecord) - assert.True(t, result, "Originally failing query should now work: %s", tc.name) - - // Verify precision is maintained (timestamp fixes) - testRecordOffBy1 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: tc.timestamp + 1}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: tc.id}}, - }, - } - - result2 := predicate(testRecordOffBy1) - assert.False(t, result2, "Should not match timestamp off by 1 nanosecond: %s", tc.name) - }) - } - }) - - t.Run("AllFixesWorkTogether", func(t *testing.T) { - // Comprehensive test that all fixes work in combination - largeTimestamp := int64(1756947416566456262) - - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - "user_id": {Kind: &schema_pb.Value_StringValue{StringValue: "user123"}}, - }, - } - - // Complex query combining multiple fixes: - // 1. Alias resolution (ts alias) - // 2. Large timestamp precision - // 3. Multiple conditions - // 4. Different data types - sql := `SELECT - _ts_ns AS ts, - id AS record_id, - user_id AS uid - FROM ecommerce.user_events - WHERE ts = 1756947416566456262 - AND record_id = 897795 - AND uid = 'user123'` - - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse complex query with all fixes") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate combining all fixes") - - result := predicate(testRecord) - assert.True(t, result, "Complex query should work with all fixes combined") - - // Test that precision is still maintained in complex queries - testRecordDifferentTimestamp := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp + 1}}, // Off by 1ns - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - "user_id": {Kind: &schema_pb.Value_StringValue{StringValue: "user123"}}, - }, - } - - result2 := predicate(testRecordDifferentTimestamp) - assert.False(t, result2, "Should maintain nanosecond precision even in complex queries") - }) - - t.Run("BackwardCompatibilityVerified", func(t *testing.T) { - // Ensure that non-alias queries continue to work exactly as before - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - // Traditional query (no aliases) - should work exactly as before - traditionalSQL := "SELECT _ts_ns, id FROM ecommerce.user_events WHERE _ts_ns = 1756947416566456262 AND id = 897795" - stmt, err := ParseSQL(traditionalSQL) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - - // Should work with both old and new methods - predicateOld, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err, "Old method should still work") - - predicateNew, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "New method should work for traditional queries") - - resultOld := predicateOld(testRecord) - resultNew := predicateNew(testRecord) - - assert.True(t, resultOld, "Traditional query should work with old method") - assert.True(t, resultNew, "Traditional query should work with new method") - assert.Equal(t, resultOld, resultNew, "Both methods should produce identical results") - }) - - t.Run("PerformanceAndStability", func(t *testing.T) { - // Test that the fixes don't introduce performance or stability issues - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - // Run the same query many times to test stability - sql := "SELECT _ts_ns AS ts, id FROM test WHERE ts = 1756947416566456262" - stmt, err := ParseSQL(sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - - // Build predicate once - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err) - - // Run multiple times - should be stable - for i := 0; i < 100; i++ { - result := predicate(testRecord) - assert.True(t, result, "Should be stable across multiple executions (iteration %d)", i) - } - }) - - t.Run("EdgeCasesAndErrorHandling", func(t *testing.T) { - // Test various edge cases to ensure robustness - - // Test with empty/nil inputs - _, err := engine.buildPredicateWithContext(nil, nil) - assert.Error(t, err, "Should handle nil expressions gracefully") - - // Test with nil SelectExprs (should fall back to no-alias behavior) - compExpr := &ComparisonExpr{ - Left: &ColName{Name: stringValue("_ts_ns")}, - Operator: "=", - Right: &SQLVal{Type: IntVal, Val: []byte("1756947416566456262")}, - } - - predicate, err := engine.buildPredicateWithContext(compExpr, nil) - assert.NoError(t, err, "Should handle nil SelectExprs") - assert.NotNil(t, predicate, "Should return valid predicate") - - // Test with empty SelectExprs - predicate2, err := engine.buildPredicateWithContext(compExpr, []SelectExpr{}) - assert.NoError(t, err, "Should handle empty SelectExprs") - assert.NotNil(t, predicate2, "Should return valid predicate") - }) -} - -// TestSQLFixesSummary provides a quick summary test of all major functionality -func TestSQLFixesSummary(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("Summary", func(t *testing.T) { - // The "before and after" test - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - // What was failing before (would return 0 rows) - failingSQL := "SELECT id, _ts_ns AS ts FROM ecommerce.user_events WHERE ts = 1756947416566456262" - - // What works now - stmt, err := ParseSQL(failingSQL) - assert.NoError(t, err, "SQL parsing works") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Predicate building works with aliases") - - result := predicate(testRecord) - assert.True(t, result, "Originally failing query now works perfectly") - - // Verify precision is maintained - testRecordOffBy1 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456263}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - result2 := predicate(testRecordOffBy1) - assert.False(t, result2, "Nanosecond precision maintained") - - t.Log("ALL SQL FIXES VERIFIED:") - t.Log(" Timestamp precision for large int64 values") - t.Log(" SQL alias resolution in WHERE clauses") - t.Log(" Scan boundary fixes for equality queries") - t.Log(" Range query fixes for equal boundaries") - t.Log(" Hybrid scanner time range handling") - t.Log(" Backward compatibility maintained") - t.Log(" Production stability verified") - }) -} diff --git a/weed/query/engine/comprehensive_sql_test.go b/weed/query/engine/comprehensive_sql_test.go deleted file mode 100644 index 5878bfba4..000000000 --- a/weed/query/engine/comprehensive_sql_test.go +++ /dev/null @@ -1,349 +0,0 @@ -package engine - -import ( - "context" - "strings" - "testing" -) - -// TestComprehensiveSQLSuite tests all kinds of SQL patterns to ensure robustness -func TestComprehensiveSQLSuite(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - shouldPanic bool - shouldError bool - desc string - }{ - // =========== BASIC QUERIES =========== - { - name: "Basic_Select_All", - sql: "SELECT * FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Basic select all columns", - }, - { - name: "Basic_Select_Column", - sql: "SELECT id FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Basic select single column", - }, - { - name: "Basic_Select_Multiple_Columns", - sql: "SELECT id, status FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Basic select multiple columns", - }, - - // =========== ARITHMETIC EXPRESSIONS (FIXED) =========== - { - name: "Arithmetic_Multiply_FIXED", - sql: "SELECT id*2 FROM user_events", - shouldPanic: false, // Fixed: no longer panics - shouldError: false, - desc: "FIXED: Arithmetic multiplication works", - }, - { - name: "Arithmetic_Add", - sql: "SELECT id+10 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Arithmetic addition works", - }, - { - name: "Arithmetic_Subtract", - sql: "SELECT id-5 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Arithmetic subtraction works", - }, - { - name: "Arithmetic_Divide", - sql: "SELECT id/3 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Arithmetic division works", - }, - { - name: "Arithmetic_Complex", - sql: "SELECT id*2+10 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Complex arithmetic expression works", - }, - - // =========== STRING OPERATIONS =========== - { - name: "String_Concatenation", - sql: "SELECT 'hello' || 'world' FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "String concatenation", - }, - { - name: "String_Column_Concat", - sql: "SELECT status || '_suffix' FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Column string concatenation", - }, - - // =========== FUNCTIONS =========== - { - name: "Function_LENGTH", - sql: "SELECT LENGTH('hello') FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "LENGTH function with literal", - }, - { - name: "Function_LENGTH_Column", - sql: "SELECT LENGTH(status) FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "LENGTH function with column", - }, - { - name: "Function_UPPER", - sql: "SELECT UPPER('hello') FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "UPPER function", - }, - { - name: "Function_Nested", - sql: "SELECT LENGTH(UPPER('hello')) FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Nested functions", - }, - - // =========== FUNCTIONS WITH ARITHMETIC =========== - { - name: "Function_Arithmetic", - sql: "SELECT LENGTH('hello') + 10 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Function with arithmetic", - }, - { - name: "Function_Arithmetic_Complex", - sql: "SELECT LENGTH(status) * 2 + 5 FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Function with complex arithmetic", - }, - - // =========== TABLE REFERENCES =========== - { - name: "Table_Simple", - sql: "SELECT * FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Simple table reference", - }, - { - name: "Table_With_Database", - sql: "SELECT * FROM ecommerce.user_events", - shouldPanic: false, - shouldError: false, - desc: "Table with database qualifier", - }, - { - name: "Table_Quoted", - sql: `SELECT * FROM "user_events"`, - shouldPanic: false, - shouldError: false, - desc: "Quoted table name", - }, - - // =========== WHERE CLAUSES =========== - { - name: "Where_Simple", - sql: "SELECT * FROM user_events WHERE id = 1", - shouldPanic: false, - shouldError: false, - desc: "Simple WHERE clause", - }, - { - name: "Where_String", - sql: "SELECT * FROM user_events WHERE status = 'active'", - shouldPanic: false, - shouldError: false, - desc: "WHERE clause with string", - }, - - // =========== LIMIT/OFFSET =========== - { - name: "Limit_Only", - sql: "SELECT * FROM user_events LIMIT 10", - shouldPanic: false, - shouldError: false, - desc: "LIMIT clause only", - }, - { - name: "Limit_Offset", - sql: "SELECT * FROM user_events LIMIT 10 OFFSET 5", - shouldPanic: false, - shouldError: false, - desc: "LIMIT with OFFSET", - }, - - // =========== DATETIME FUNCTIONS =========== - { - name: "DateTime_CURRENT_DATE", - sql: "SELECT CURRENT_DATE FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "CURRENT_DATE function", - }, - { - name: "DateTime_NOW", - sql: "SELECT NOW() FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "NOW() function", - }, - { - name: "DateTime_EXTRACT", - sql: "SELECT EXTRACT(YEAR FROM CURRENT_DATE) FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "EXTRACT function", - }, - - // =========== EDGE CASES =========== - { - name: "Empty_String", - sql: "SELECT '' FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Empty string literal", - }, - { - name: "Multiple_Spaces", - sql: "SELECT id FROM user_events", - shouldPanic: false, - shouldError: false, - desc: "Query with multiple spaces", - }, - { - name: "Mixed_Case", - sql: "Select ID from User_Events", - shouldPanic: false, - shouldError: false, - desc: "Mixed case SQL", - }, - - // =========== SHOW STATEMENTS =========== - { - name: "Show_Databases", - sql: "SHOW DATABASES", - shouldPanic: false, - shouldError: false, - desc: "SHOW DATABASES statement", - }, - { - name: "Show_Tables", - sql: "SHOW TABLES", - shouldPanic: false, - shouldError: false, - desc: "SHOW TABLES statement", - }, - } - - var panicTests []string - var errorTests []string - var successTests []string - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Capture panics - var panicValue interface{} - func() { - defer func() { - if r := recover(); r != nil { - panicValue = r - } - }() - - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.shouldPanic { - if panicValue == nil { - t.Errorf("FAIL: Expected panic for %s, but query completed normally", tc.desc) - panicTests = append(panicTests, "FAIL: "+tc.desc) - return - } else { - t.Logf("PASS: EXPECTED PANIC: %s - %v", tc.desc, panicValue) - panicTests = append(panicTests, "PASS: "+tc.desc+" (reproduced)") - return - } - } - - if panicValue != nil { - t.Errorf("FAIL: Unexpected panic for %s: %v", tc.desc, panicValue) - panicTests = append(panicTests, "FAIL: "+tc.desc+" (unexpected panic)") - return - } - - if tc.shouldError { - if err == nil && (result == nil || result.Error == nil) { - t.Errorf("FAIL: Expected error for %s, but query succeeded", tc.desc) - errorTests = append(errorTests, "FAIL: "+tc.desc) - return - } else { - t.Logf("PASS: Expected error: %s", tc.desc) - errorTests = append(errorTests, "PASS: "+tc.desc) - return - } - } - - if err != nil { - t.Errorf("FAIL: Unexpected error for %s: %v", tc.desc, err) - errorTests = append(errorTests, "FAIL: "+tc.desc+" (unexpected error)") - return - } - - if result != nil && result.Error != nil { - t.Errorf("FAIL: Unexpected result error for %s: %v", tc.desc, result.Error) - errorTests = append(errorTests, "FAIL: "+tc.desc+" (unexpected result error)") - return - } - - t.Logf("PASS: Success: %s", tc.desc) - successTests = append(successTests, "PASS: "+tc.desc) - }() - }) - } - - // Summary report - separator := strings.Repeat("=", 80) - t.Log("\n" + separator) - t.Log("COMPREHENSIVE SQL TEST SUITE SUMMARY") - t.Log(separator) - t.Logf("Total Tests: %d", len(testCases)) - t.Logf("Successful: %d", len(successTests)) - t.Logf("Panics: %d", len(panicTests)) - t.Logf("Errors: %d", len(errorTests)) - t.Log(separator) - - if len(panicTests) > 0 { - t.Log("\nPANICS TO FIX:") - for _, test := range panicTests { - t.Log(" " + test) - } - } - - if len(errorTests) > 0 { - t.Log("\nERRORS TO INVESTIGATE:") - for _, test := range errorTests { - t.Log(" " + test) - } - } -} diff --git a/weed/query/engine/data_conversion.go b/weed/query/engine/data_conversion.go deleted file mode 100644 index f626d8f2e..000000000 --- a/weed/query/engine/data_conversion.go +++ /dev/null @@ -1,217 +0,0 @@ -package engine - -import ( - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" -) - -// formatAggregationResult formats an aggregation result into a SQL value -func (e *SQLEngine) formatAggregationResult(spec AggregationSpec, result AggregationResult) sqltypes.Value { - switch spec.Function { - case "COUNT": - return sqltypes.NewInt64(result.Count) - case "SUM": - return sqltypes.NewFloat64(result.Sum) - case "AVG": - return sqltypes.NewFloat64(result.Sum) // Sum contains the average for AVG - case "MIN": - if result.Min != nil { - return e.convertRawValueToSQL(result.Min) - } - return sqltypes.NULL - case "MAX": - if result.Max != nil { - return e.convertRawValueToSQL(result.Max) - } - return sqltypes.NULL - } - return sqltypes.NULL -} - -// convertRawValueToSQL converts a raw Go value to a SQL value -func (e *SQLEngine) convertRawValueToSQL(value interface{}) sqltypes.Value { - switch v := value.(type) { - case int32: - return sqltypes.NewInt32(v) - case int64: - return sqltypes.NewInt64(v) - case float32: - return sqltypes.NewFloat32(v) - case float64: - return sqltypes.NewFloat64(v) - case string: - return sqltypes.NewVarChar(v) - case bool: - if v { - return sqltypes.NewVarChar("1") - } - return sqltypes.NewVarChar("0") - } - return sqltypes.NULL -} - -// extractRawValue extracts the raw Go value from a schema_pb.Value -func (e *SQLEngine) extractRawValue(value *schema_pb.Value) interface{} { - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return v.Int32Value - case *schema_pb.Value_Int64Value: - return v.Int64Value - case *schema_pb.Value_FloatValue: - return v.FloatValue - case *schema_pb.Value_DoubleValue: - return v.DoubleValue - case *schema_pb.Value_StringValue: - return v.StringValue - case *schema_pb.Value_BoolValue: - return v.BoolValue - case *schema_pb.Value_BytesValue: - return string(v.BytesValue) // Convert bytes to string for comparison - } - return nil -} - -// compareValues compares two schema_pb.Value objects -func (e *SQLEngine) compareValues(value1 *schema_pb.Value, value2 *schema_pb.Value) int { - if value2 == nil { - return 1 // value1 > nil - } - raw1 := e.extractRawValue(value1) - raw2 := e.extractRawValue(value2) - if raw1 == nil { - return -1 - } - if raw2 == nil { - return 1 - } - - // Simple comparison - in a full implementation this would handle type coercion - switch v1 := raw1.(type) { - case int32: - if v2, ok := raw2.(int32); ok { - if v1 < v2 { - return -1 - } else if v1 > v2 { - return 1 - } - return 0 - } - case int64: - if v2, ok := raw2.(int64); ok { - if v1 < v2 { - return -1 - } else if v1 > v2 { - return 1 - } - return 0 - } - case float32: - if v2, ok := raw2.(float32); ok { - if v1 < v2 { - return -1 - } else if v1 > v2 { - return 1 - } - return 0 - } - case float64: - if v2, ok := raw2.(float64); ok { - if v1 < v2 { - return -1 - } else if v1 > v2 { - return 1 - } - return 0 - } - case string: - if v2, ok := raw2.(string); ok { - if v1 < v2 { - return -1 - } else if v1 > v2 { - return 1 - } - return 0 - } - case bool: - if v2, ok := raw2.(bool); ok { - if v1 == v2 { - return 0 - } else if v1 && !v2 { - return 1 - } - return -1 - } - } - return 0 -} - -// convertRawValueToSchemaValue converts raw Go values back to schema_pb.Value for comparison -func (e *SQLEngine) convertRawValueToSchemaValue(rawValue interface{}) *schema_pb.Value { - switch v := rawValue.(type) { - case int32: - return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: v}} - case int64: - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v}} - case float32: - return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: v}} - case float64: - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v}} - case string: - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v}} - case bool: - return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: v}} - case []byte: - return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: v}} - default: - // Convert other types to string as fallback - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: fmt.Sprintf("%v", v)}} - } -} - -// convertJSONValueToSchemaValue converts JSON values to schema_pb.Value -func (e *SQLEngine) convertJSONValueToSchemaValue(jsonValue interface{}) *schema_pb.Value { - switch v := jsonValue.(type) { - case string: - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v}} - case float64: - // JSON numbers are always float64, try to detect if it's actually an integer - if v == float64(int64(v)) { - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: int64(v)}} - } - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v}} - case bool: - return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: v}} - case nil: - return nil - default: - // Convert other types to string - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: fmt.Sprintf("%v", v)}} - } -} - -// Helper functions for aggregation processing - -// isNullValue checks if a schema_pb.Value is null or empty -func (e *SQLEngine) isNullValue(value *schema_pb.Value) bool { - return value == nil || value.Kind == nil -} - -// convertToNumber converts a schema_pb.Value to a float64 for numeric operations -func (e *SQLEngine) convertToNumber(value *schema_pb.Value) *float64 { - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - result := float64(v.Int32Value) - return &result - case *schema_pb.Value_Int64Value: - result := float64(v.Int64Value) - return &result - case *schema_pb.Value_FloatValue: - result := float64(v.FloatValue) - return &result - case *schema_pb.Value_DoubleValue: - return &v.DoubleValue - } - return nil -} diff --git a/weed/query/engine/datetime_functions.go b/weed/query/engine/datetime_functions.go deleted file mode 100644 index 9803145f0..000000000 --- a/weed/query/engine/datetime_functions.go +++ /dev/null @@ -1,195 +0,0 @@ -package engine - -import ( - "fmt" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// =============================== -// DATE/TIME CONSTANTS -// =============================== - -// CurrentDate returns the current date as a string in YYYY-MM-DD format -func (e *SQLEngine) CurrentDate() (*schema_pb.Value, error) { - now := time.Now() - dateStr := now.Format("2006-01-02") - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: dateStr}, - }, nil -} - -// CurrentTimestamp returns the current timestamp -func (e *SQLEngine) CurrentTimestamp() (*schema_pb.Value, error) { - now := time.Now() - - // Return as TimestampValue with microseconds - timestampMicros := now.UnixMicro() - - return &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: timestampMicros, - }, - }, - }, nil -} - -// CurrentTime returns the current time as a string in HH:MM:SS format -func (e *SQLEngine) CurrentTime() (*schema_pb.Value, error) { - now := time.Now() - timeStr := now.Format("15:04:05") - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: timeStr}, - }, nil -} - -// Now is an alias for CurrentTimestamp (common SQL function name) -func (e *SQLEngine) Now() (*schema_pb.Value, error) { - return e.CurrentTimestamp() -} - -// =============================== -// EXTRACT FUNCTION -// =============================== - -// DatePart represents the part of a date/time to extract -type DatePart string - -const ( - PartYear DatePart = "YEAR" - PartMonth DatePart = "MONTH" - PartDay DatePart = "DAY" - PartHour DatePart = "HOUR" - PartMinute DatePart = "MINUTE" - PartSecond DatePart = "SECOND" - PartWeek DatePart = "WEEK" - PartDayOfYear DatePart = "DOY" - PartDayOfWeek DatePart = "DOW" - PartQuarter DatePart = "QUARTER" - PartEpoch DatePart = "EPOCH" -) - -// Extract extracts a specific part from a date/time value -func (e *SQLEngine) Extract(part DatePart, value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("EXTRACT function requires non-null value") - } - - // Convert value to time - t, err := e.valueToTime(value) - if err != nil { - return nil, fmt.Errorf("EXTRACT function time conversion error: %v", err) - } - - var result int64 - - switch strings.ToUpper(string(part)) { - case string(PartYear): - result = int64(t.Year()) - case string(PartMonth): - result = int64(t.Month()) - case string(PartDay): - result = int64(t.Day()) - case string(PartHour): - result = int64(t.Hour()) - case string(PartMinute): - result = int64(t.Minute()) - case string(PartSecond): - result = int64(t.Second()) - case string(PartWeek): - _, week := t.ISOWeek() - result = int64(week) - case string(PartDayOfYear): - result = int64(t.YearDay()) - case string(PartDayOfWeek): - result = int64(t.Weekday()) - case string(PartQuarter): - month := t.Month() - result = int64((month-1)/3 + 1) - case string(PartEpoch): - result = t.Unix() - default: - return nil, fmt.Errorf("unsupported date part: %s", part) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: result}, - }, nil -} - -// =============================== -// DATE_TRUNC FUNCTION -// =============================== - -// DateTrunc truncates a date/time to the specified precision -func (e *SQLEngine) DateTrunc(precision string, value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("DATE_TRUNC function requires non-null value") - } - - // Convert value to time - t, err := e.valueToTime(value) - if err != nil { - return nil, fmt.Errorf("DATE_TRUNC function time conversion error: %v", err) - } - - var truncated time.Time - - switch strings.ToLower(precision) { - case "microsecond", "microseconds": - // No truncation needed for microsecond precision - truncated = t - case "millisecond", "milliseconds": - truncated = t.Truncate(time.Millisecond) - case "second", "seconds": - truncated = t.Truncate(time.Second) - case "minute", "minutes": - truncated = t.Truncate(time.Minute) - case "hour", "hours": - truncated = t.Truncate(time.Hour) - case "day", "days": - truncated = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) - case "week", "weeks": - // Truncate to beginning of week (Monday) - days := int(t.Weekday()) - if days == 0 { // Sunday = 0, adjust to make Monday = 0 - days = 6 - } else { - days = days - 1 - } - truncated = time.Date(t.Year(), t.Month(), t.Day()-days, 0, 0, 0, 0, t.Location()) - case "month", "months": - truncated = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location()) - case "quarter", "quarters": - month := t.Month() - quarterMonth := ((int(month)-1)/3)*3 + 1 - truncated = time.Date(t.Year(), time.Month(quarterMonth), 1, 0, 0, 0, 0, t.Location()) - case "year", "years": - truncated = time.Date(t.Year(), 1, 1, 0, 0, 0, 0, t.Location()) - case "decade", "decades": - year := (t.Year() / 10) * 10 - truncated = time.Date(year, 1, 1, 0, 0, 0, 0, t.Location()) - case "century", "centuries": - year := ((t.Year()-1)/100)*100 + 1 - truncated = time.Date(year, 1, 1, 0, 0, 0, 0, t.Location()) - case "millennium", "millennia": - year := ((t.Year()-1)/1000)*1000 + 1 - truncated = time.Date(year, 1, 1, 0, 0, 0, 0, t.Location()) - default: - return nil, fmt.Errorf("unsupported date truncation precision: %s", precision) - } - - // Return as TimestampValue - return &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: truncated.UnixMicro(), - }, - }, - }, nil -} diff --git a/weed/query/engine/datetime_functions_test.go b/weed/query/engine/datetime_functions_test.go deleted file mode 100644 index a4951e825..000000000 --- a/weed/query/engine/datetime_functions_test.go +++ /dev/null @@ -1,891 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestDateTimeFunctions(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("CURRENT_DATE function tests", func(t *testing.T) { - before := time.Now() - result, err := engine.CurrentDate() - after := time.Now() - - if err != nil { - t.Errorf("CurrentDate failed: %v", err) - } - - if result == nil { - t.Errorf("CurrentDate returned nil result") - return - } - - stringVal, ok := result.Kind.(*schema_pb.Value_StringValue) - if !ok { - t.Errorf("CurrentDate should return string value, got %T", result.Kind) - return - } - - // Check format (YYYY-MM-DD) with tolerance for midnight boundary crossings - beforeDate := before.Format("2006-01-02") - afterDate := after.Format("2006-01-02") - - if stringVal.StringValue != beforeDate && stringVal.StringValue != afterDate { - t.Errorf("Expected current date %s or %s (due to potential midnight boundary), got %s", - beforeDate, afterDate, stringVal.StringValue) - } - }) - - t.Run("CURRENT_TIMESTAMP function tests", func(t *testing.T) { - before := time.Now() - result, err := engine.CurrentTimestamp() - after := time.Now() - - if err != nil { - t.Errorf("CurrentTimestamp failed: %v", err) - } - - if result == nil { - t.Errorf("CurrentTimestamp returned nil result") - return - } - - timestampVal, ok := result.Kind.(*schema_pb.Value_TimestampValue) - if !ok { - t.Errorf("CurrentTimestamp should return timestamp value, got %T", result.Kind) - return - } - - timestamp := time.UnixMicro(timestampVal.TimestampValue.TimestampMicros) - - // Check that timestamp is within reasonable range with small tolerance buffer - // Allow for small timing variations, clock precision differences, and NTP adjustments - tolerance := 100 * time.Millisecond - beforeWithTolerance := before.Add(-tolerance) - afterWithTolerance := after.Add(tolerance) - - if timestamp.Before(beforeWithTolerance) || timestamp.After(afterWithTolerance) { - t.Errorf("Timestamp %v should be within tolerance of %v to %v (tolerance: %v)", - timestamp, before, after, tolerance) - } - }) - - t.Run("NOW function tests", func(t *testing.T) { - result, err := engine.Now() - if err != nil { - t.Errorf("Now failed: %v", err) - } - - if result == nil { - t.Errorf("Now returned nil result") - return - } - - // Should return same type as CurrentTimestamp - _, ok := result.Kind.(*schema_pb.Value_TimestampValue) - if !ok { - t.Errorf("Now should return timestamp value, got %T", result.Kind) - } - }) - - t.Run("CURRENT_TIME function tests", func(t *testing.T) { - result, err := engine.CurrentTime() - if err != nil { - t.Errorf("CurrentTime failed: %v", err) - } - - if result == nil { - t.Errorf("CurrentTime returned nil result") - return - } - - stringVal, ok := result.Kind.(*schema_pb.Value_StringValue) - if !ok { - t.Errorf("CurrentTime should return string value, got %T", result.Kind) - return - } - - // Check format (HH:MM:SS) - if len(stringVal.StringValue) != 8 || stringVal.StringValue[2] != ':' || stringVal.StringValue[5] != ':' { - t.Errorf("CurrentTime should return HH:MM:SS format, got %s", stringVal.StringValue) - } - }) -} - -func TestExtractFunction(t *testing.T) { - engine := NewTestSQLEngine() - - // Create a test timestamp: 2023-06-15 14:30:45 - // Use local time to avoid timezone conversion issues - testTime := time.Date(2023, 6, 15, 14, 30, 45, 0, time.Local) - testTimestamp := &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: testTime.UnixMicro(), - }, - }, - } - - tests := []struct { - name string - part DatePart - value *schema_pb.Value - expected int64 - expectErr bool - }{ - { - name: "Extract YEAR", - part: PartYear, - value: testTimestamp, - expected: 2023, - expectErr: false, - }, - { - name: "Extract MONTH", - part: PartMonth, - value: testTimestamp, - expected: 6, - expectErr: false, - }, - { - name: "Extract DAY", - part: PartDay, - value: testTimestamp, - expected: 15, - expectErr: false, - }, - { - name: "Extract HOUR", - part: PartHour, - value: testTimestamp, - expected: 14, - expectErr: false, - }, - { - name: "Extract MINUTE", - part: PartMinute, - value: testTimestamp, - expected: 30, - expectErr: false, - }, - { - name: "Extract SECOND", - part: PartSecond, - value: testTimestamp, - expected: 45, - expectErr: false, - }, - { - name: "Extract QUARTER from June", - part: PartQuarter, - value: testTimestamp, - expected: 2, // June is in Q2 - expectErr: false, - }, - { - name: "Extract from string date", - part: PartYear, - value: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "2023-06-15"}}, - expected: 2023, - expectErr: false, - }, - { - name: "Extract from Unix timestamp", - part: PartYear, - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: testTime.Unix()}}, - expected: 2023, - expectErr: false, - }, - { - name: "Extract from null value", - part: PartYear, - value: nil, - expected: 0, - expectErr: true, - }, - { - name: "Extract invalid part", - part: DatePart("INVALID"), - value: testTimestamp, - expected: 0, - expectErr: true, - }, - { - name: "Extract from invalid string", - part: PartYear, - value: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "invalid-date"}}, - expected: 0, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Extract(tt.part, tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if result == nil { - t.Errorf("Extract returned nil result") - return - } - - intVal, ok := result.Kind.(*schema_pb.Value_Int64Value) - if !ok { - t.Errorf("Extract should return int64 value, got %T", result.Kind) - return - } - - if intVal.Int64Value != tt.expected { - t.Errorf("Expected %d, got %d", tt.expected, intVal.Int64Value) - } - }) - } -} - -func TestDateTruncFunction(t *testing.T) { - engine := NewTestSQLEngine() - - // Create a test timestamp: 2023-06-15 14:30:45.123456 - testTime := time.Date(2023, 6, 15, 14, 30, 45, 123456000, time.Local) // nanoseconds - testTimestamp := &schema_pb.Value{ - Kind: &schema_pb.Value_TimestampValue{ - TimestampValue: &schema_pb.TimestampValue{ - TimestampMicros: testTime.UnixMicro(), - }, - }, - } - - tests := []struct { - name string - precision string - value *schema_pb.Value - expectErr bool - expectedCheck func(result time.Time) bool // Custom check function - }{ - { - name: "Truncate to second", - precision: "second", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 15 && - result.Hour() == 14 && result.Minute() == 30 && result.Second() == 45 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to minute", - precision: "minute", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 15 && - result.Hour() == 14 && result.Minute() == 30 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to hour", - precision: "hour", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 15 && - result.Hour() == 14 && result.Minute() == 0 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to day", - precision: "day", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 15 && - result.Hour() == 0 && result.Minute() == 0 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to month", - precision: "month", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 1 && - result.Hour() == 0 && result.Minute() == 0 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to quarter", - precision: "quarter", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - // June (month 6) should truncate to April (month 4) - start of Q2 - return result.Year() == 2023 && result.Month() == 4 && result.Day() == 1 && - result.Hour() == 0 && result.Minute() == 0 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate to year", - precision: "year", - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 1 && result.Day() == 1 && - result.Hour() == 0 && result.Minute() == 0 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate with plural precision", - precision: "minutes", // Test plural form - value: testTimestamp, - expectErr: false, - expectedCheck: func(result time.Time) bool { - return result.Year() == 2023 && result.Month() == 6 && result.Day() == 15 && - result.Hour() == 14 && result.Minute() == 30 && result.Second() == 0 && - result.Nanosecond() == 0 - }, - }, - { - name: "Truncate from string date", - precision: "day", - value: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "2023-06-15 14:30:45"}}, - expectErr: false, - expectedCheck: func(result time.Time) bool { - // The result should be the start of day 2023-06-15 in local timezone - expectedDay := time.Date(2023, 6, 15, 0, 0, 0, 0, result.Location()) - return result.Equal(expectedDay) - }, - }, - { - name: "Truncate null value", - precision: "day", - value: nil, - expectErr: true, - expectedCheck: nil, - }, - { - name: "Invalid precision", - precision: "invalid", - value: testTimestamp, - expectErr: true, - expectedCheck: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.DateTrunc(tt.precision, tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if result == nil { - t.Errorf("DateTrunc returned nil result") - return - } - - timestampVal, ok := result.Kind.(*schema_pb.Value_TimestampValue) - if !ok { - t.Errorf("DateTrunc should return timestamp value, got %T", result.Kind) - return - } - - resultTime := time.UnixMicro(timestampVal.TimestampValue.TimestampMicros) - - if !tt.expectedCheck(resultTime) { - t.Errorf("DateTrunc result check failed for precision %s, got time: %v", tt.precision, resultTime) - } - }) - } -} - -// TestDateTimeConstantsInSQL tests that datetime constants work in actual SQL queries -// This test reproduces the original bug where CURRENT_TIME returned empty values -func TestDateTimeConstantsInSQL(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("CURRENT_TIME in SQL query", func(t *testing.T) { - // This is the exact case that was failing - result, err := engine.ExecuteSQL(context.Background(), "SELECT CURRENT_TIME FROM user_events LIMIT 1") - - if err != nil { - t.Fatalf("SQL execution failed: %v", err) - } - - if result.Error != nil { - t.Fatalf("Query result has error: %v", result.Error) - } - - // Verify we have the correct column and non-empty values - if len(result.Columns) != 1 || result.Columns[0] != "current_time" { - t.Errorf("Expected column 'current_time', got %v", result.Columns) - } - - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - - timeValue := result.Rows[0][0].ToString() - if timeValue == "" { - t.Error("CURRENT_TIME should not return empty value") - } - - // Verify HH:MM:SS format - if len(timeValue) == 8 && timeValue[2] == ':' && timeValue[5] == ':' { - t.Logf("CURRENT_TIME returned valid time: %s", timeValue) - } else { - t.Errorf("CURRENT_TIME should return HH:MM:SS format, got: %s", timeValue) - } - }) - - t.Run("CURRENT_DATE in SQL query", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT CURRENT_DATE FROM user_events LIMIT 1") - - if err != nil { - t.Fatalf("SQL execution failed: %v", err) - } - - if result.Error != nil { - t.Fatalf("Query result has error: %v", result.Error) - } - - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - - dateValue := result.Rows[0][0].ToString() - if dateValue == "" { - t.Error("CURRENT_DATE should not return empty value") - } - - t.Logf("CURRENT_DATE returned: %s", dateValue) - }) -} - -// TestFunctionArgumentCountHandling tests that the function evaluation correctly handles -// both zero-argument and single-argument functions -func TestFunctionArgumentCountHandling(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("Zero-argument function should fail appropriately", func(t *testing.T) { - funcExpr := &FuncExpr{ - Name: testStringValue(FuncCURRENT_TIME), - Exprs: []SelectExpr{}, // Zero arguments - should fail since we removed zero-arg support - } - - result, err := engine.evaluateStringFunction(funcExpr, HybridScanResult{}) - if err == nil { - t.Error("Expected error for zero-argument function, but got none") - } - if result != nil { - t.Error("Expected nil result for zero-argument function") - } - - expectedError := "function CURRENT_TIME expects exactly 1 argument" - if err.Error() != expectedError { - t.Errorf("Expected error '%s', got '%s'", expectedError, err.Error()) - } - }) - - t.Run("Single-argument function should still work", func(t *testing.T) { - funcExpr := &FuncExpr{ - Name: testStringValue(FuncUPPER), - Exprs: []SelectExpr{ - &AliasedExpr{ - Expr: &SQLVal{ - Type: StrVal, - Val: []byte("test"), - }, - }, - }, // Single argument - should work - } - - // Create a mock result - mockResult := HybridScanResult{} - - result, err := engine.evaluateStringFunction(funcExpr, mockResult) - if err != nil { - t.Errorf("Single-argument function failed: %v", err) - } - if result == nil { - t.Errorf("Single-argument function returned nil") - } - }) - - t.Run("Any zero-argument function should fail", func(t *testing.T) { - funcExpr := &FuncExpr{ - Name: testStringValue("INVALID_FUNCTION"), - Exprs: []SelectExpr{}, // Zero arguments - should fail - } - - result, err := engine.evaluateStringFunction(funcExpr, HybridScanResult{}) - if err == nil { - t.Error("Expected error for zero-argument function, got nil") - } - if result != nil { - t.Errorf("Expected nil result for zero-argument function, got %v", result) - } - - expectedError := "function INVALID_FUNCTION expects exactly 1 argument" - if err.Error() != expectedError { - t.Errorf("Expected error '%s', got '%s'", expectedError, err.Error()) - } - }) - - t.Run("Wrong argument count for single-arg function should fail", func(t *testing.T) { - funcExpr := &FuncExpr{ - Name: testStringValue(FuncUPPER), - Exprs: []SelectExpr{ - &AliasedExpr{Expr: &SQLVal{Type: StrVal, Val: []byte("test1")}}, - &AliasedExpr{Expr: &SQLVal{Type: StrVal, Val: []byte("test2")}}, - }, // Two arguments - should fail for UPPER - } - - result, err := engine.evaluateStringFunction(funcExpr, HybridScanResult{}) - if err == nil { - t.Errorf("Expected error for wrong argument count, got nil") - } - if result != nil { - t.Errorf("Expected nil result for wrong argument count, got %v", result) - } - - expectedError := "function UPPER expects exactly 1 argument" - if err.Error() != expectedError { - t.Errorf("Expected error '%s', got '%s'", expectedError, err.Error()) - } - }) -} - -// Helper function to create a string value for testing -func testStringValue(s string) StringGetter { - return &testStringValueImpl{value: s} -} - -type testStringValueImpl struct { - value string -} - -func (s *testStringValueImpl) String() string { - return s.value -} - -// TestExtractFunctionSQL tests the EXTRACT function through SQL execution -func TestExtractFunctionSQL(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - expectError bool - checkValue func(t *testing.T, result *QueryResult) - }{ - { - name: "Extract YEAR from current_date", - sql: "SELECT EXTRACT(YEAR FROM current_date) AS year_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - yearStr := result.Rows[0][0].ToString() - currentYear := time.Now().Year() - if yearStr != fmt.Sprintf("%d", currentYear) { - t.Errorf("Expected current year %d, got %s", currentYear, yearStr) - } - }, - }, - { - name: "Extract MONTH from current_date", - sql: "SELECT EXTRACT('MONTH', current_date) AS month_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - monthStr := result.Rows[0][0].ToString() - currentMonth := time.Now().Month() - if monthStr != fmt.Sprintf("%d", int(currentMonth)) { - t.Errorf("Expected current month %d, got %s", int(currentMonth), monthStr) - } - }, - }, - { - name: "Extract DAY from current_date", - sql: "SELECT EXTRACT('DAY', current_date) AS day_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - dayStr := result.Rows[0][0].ToString() - currentDay := time.Now().Day() - if dayStr != fmt.Sprintf("%d", currentDay) { - t.Errorf("Expected current day %d, got %s", currentDay, dayStr) - } - }, - }, - { - name: "Extract HOUR from current_timestamp", - sql: "SELECT EXTRACT('HOUR', current_timestamp) AS hour_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - hourStr := result.Rows[0][0].ToString() - // Just check it's a valid hour (0-23) - hour, err := strconv.Atoi(hourStr) - if err != nil { - t.Errorf("Expected valid hour integer, got %s", hourStr) - } - if hour < 0 || hour > 23 { - t.Errorf("Expected hour 0-23, got %d", hour) - } - }, - }, - { - name: "Extract MINUTE from current_timestamp", - sql: "SELECT EXTRACT('MINUTE', current_timestamp) AS minute_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - minuteStr := result.Rows[0][0].ToString() - // Just check it's a valid minute (0-59) - minute, err := strconv.Atoi(minuteStr) - if err != nil { - t.Errorf("Expected valid minute integer, got %s", minuteStr) - } - if minute < 0 || minute > 59 { - t.Errorf("Expected minute 0-59, got %d", minute) - } - }, - }, - { - name: "Extract QUARTER from current_date", - sql: "SELECT EXTRACT('QUARTER', current_date) AS quarter_value FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - quarterStr := result.Rows[0][0].ToString() - quarter, err := strconv.Atoi(quarterStr) - if err != nil { - t.Errorf("Expected valid quarter integer, got %s", quarterStr) - } - if quarter < 1 || quarter > 4 { - t.Errorf("Expected quarter 1-4, got %d", quarter) - } - }, - }, - { - name: "Multiple EXTRACT functions", - sql: "SELECT EXTRACT(YEAR FROM current_date) AS year_val, EXTRACT(MONTH FROM current_date) AS month_val, EXTRACT(DAY FROM current_date) AS day_val FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - if len(result.Rows[0]) != 3 { - t.Fatalf("Expected 3 columns, got %d", len(result.Rows[0])) - } - - // Check year - yearStr := result.Rows[0][0].ToString() - currentYear := time.Now().Year() - if yearStr != fmt.Sprintf("%d", currentYear) { - t.Errorf("Expected current year %d, got %s", currentYear, yearStr) - } - - // Check month - monthStr := result.Rows[0][1].ToString() - currentMonth := time.Now().Month() - if monthStr != fmt.Sprintf("%d", int(currentMonth)) { - t.Errorf("Expected current month %d, got %s", int(currentMonth), monthStr) - } - - // Check day - dayStr := result.Rows[0][2].ToString() - currentDay := time.Now().Day() - if dayStr != fmt.Sprintf("%d", currentDay) { - t.Errorf("Expected current day %d, got %s", currentDay, dayStr) - } - }, - }, - { - name: "EXTRACT with invalid date part", - sql: "SELECT EXTRACT('INVALID_PART', current_date) FROM user_events LIMIT 1", - expectError: true, - checkValue: nil, - }, - { - name: "EXTRACT with wrong number of arguments", - sql: "SELECT EXTRACT('YEAR') FROM user_events LIMIT 1", - expectError: true, - checkValue: nil, - }, - { - name: "EXTRACT with too many arguments", - sql: "SELECT EXTRACT('YEAR', current_date, 'extra') FROM user_events LIMIT 1", - expectError: true, - checkValue: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.expectError { - if err == nil && result.Error == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Query result has error: %v", result.Error) - return - } - - if tc.checkValue != nil { - tc.checkValue(t, result) - } - }) - } -} - -// TestDateTruncFunctionSQL tests the DATE_TRUNC function through SQL execution -func TestDateTruncFunctionSQL(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - expectError bool - checkValue func(t *testing.T, result *QueryResult) - }{ - { - name: "DATE_TRUNC to day", - sql: "SELECT DATE_TRUNC('day', current_timestamp) AS truncated_day FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - // The result should be a timestamp value, just check it's not empty - timestampStr := result.Rows[0][0].ToString() - if timestampStr == "" { - t.Error("Expected non-empty timestamp result") - } - }, - }, - { - name: "DATE_TRUNC to hour", - sql: "SELECT DATE_TRUNC('hour', current_timestamp) AS truncated_hour FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - timestampStr := result.Rows[0][0].ToString() - if timestampStr == "" { - t.Error("Expected non-empty timestamp result") - } - }, - }, - { - name: "DATE_TRUNC to month", - sql: "SELECT DATE_TRUNC('month', current_timestamp) AS truncated_month FROM user_events LIMIT 1", - expectError: false, - checkValue: func(t *testing.T, result *QueryResult) { - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - timestampStr := result.Rows[0][0].ToString() - if timestampStr == "" { - t.Error("Expected non-empty timestamp result") - } - }, - }, - { - name: "DATE_TRUNC with invalid precision", - sql: "SELECT DATE_TRUNC('invalid', current_timestamp) FROM user_events LIMIT 1", - expectError: true, - checkValue: nil, - }, - { - name: "DATE_TRUNC with wrong number of arguments", - sql: "SELECT DATE_TRUNC('day') FROM user_events LIMIT 1", - expectError: true, - checkValue: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.expectError { - if err == nil && result.Error == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Query result has error: %v", result.Error) - return - } - - if tc.checkValue != nil { - tc.checkValue(t, result) - } - }) - } -} diff --git a/weed/query/engine/describe.go b/weed/query/engine/describe.go deleted file mode 100644 index 415fc8e17..000000000 --- a/weed/query/engine/describe.go +++ /dev/null @@ -1,166 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" -) - -// executeDescribeStatement handles DESCRIBE table commands -// Shows table schema in PostgreSQL-compatible format -func (e *SQLEngine) executeDescribeStatement(ctx context.Context, tableName string, database string) (*QueryResult, error) { - if database == "" { - database = e.catalog.GetCurrentDatabase() - if database == "" { - database = "default" - } - } - - // Auto-discover and register topic if not already in catalog (same logic as SELECT) - if _, err := e.catalog.GetTableInfo(database, tableName); err != nil { - // Topic not in catalog, try to discover and register it - if regErr := e.discoverAndRegisterTopic(ctx, database, tableName); regErr != nil { - fmt.Printf("Warning: Failed to discover topic %s.%s: %v\n", database, tableName, regErr) - return &QueryResult{Error: fmt.Errorf("topic %s.%s not found and auto-discovery failed: %v", database, tableName, regErr)}, regErr - } - } - - // Get flat schema and key columns from broker - flatSchema, keyColumns, _, err := e.catalog.brokerClient.GetTopicSchema(ctx, database, tableName) - if err != nil { - return &QueryResult{Error: err}, err - } - - // System columns to include in DESCRIBE output - systemColumns := []struct { - Name string - Type string - Extra string - }{ - {"_ts", "TIMESTAMP", "System column: Message timestamp"}, - {"_key", "VARBINARY", "System column: Message key"}, - {"_source", "VARCHAR(255)", "System column: Data source (parquet/log)"}, - } - - // If no schema is defined, include _value field - if flatSchema == nil { - systemColumns = append(systemColumns, struct { - Name string - Type string - Extra string - }{SW_COLUMN_NAME_VALUE, "VARBINARY", "Raw message value (no schema defined)"}) - } - - // Calculate total rows: schema fields + system columns - totalRows := len(systemColumns) - if flatSchema != nil { - totalRows += len(flatSchema.Fields) - } - - // Create key column lookup map - keyColumnMap := make(map[string]bool) - for _, keyCol := range keyColumns { - keyColumnMap[keyCol] = true - } - - result := &QueryResult{ - Columns: []string{"Field", "Type", "Null", "Key", "Default", "Extra"}, - Rows: make([][]sqltypes.Value, totalRows), - } - - rowIndex := 0 - - // Add schema fields - mark key columns appropriately - if flatSchema != nil { - for _, field := range flatSchema.Fields { - sqlType := e.convertMQTypeToSQL(field.Type) - isKey := keyColumnMap[field.Name] - keyType := "" - if isKey { - keyType = "PRI" // Primary key - } - extra := "Data field" - if isKey { - extra = "Key field" - } - - result.Rows[rowIndex] = []sqltypes.Value{ - sqltypes.NewVarChar(field.Name), - sqltypes.NewVarChar(sqlType), - sqltypes.NewVarChar("YES"), - sqltypes.NewVarChar(keyType), - sqltypes.NewVarChar("NULL"), - sqltypes.NewVarChar(extra), - } - rowIndex++ - } - } - - // Add system columns - for _, sysCol := range systemColumns { - result.Rows[rowIndex] = []sqltypes.Value{ - sqltypes.NewVarChar(sysCol.Name), // Field - sqltypes.NewVarChar(sysCol.Type), // Type - sqltypes.NewVarChar("YES"), // Null - sqltypes.NewVarChar("SYS"), // Key - mark as system column - sqltypes.NewVarChar("NULL"), // Default - sqltypes.NewVarChar(sysCol.Extra), // Extra - description - } - rowIndex++ - } - - return result, nil -} - -// Enhanced executeShowStatementWithDescribe handles SHOW statements including DESCRIBE -func (e *SQLEngine) executeShowStatementWithDescribe(ctx context.Context, stmt *ShowStatement) (*QueryResult, error) { - switch strings.ToUpper(stmt.Type) { - case "DATABASES": - return e.showDatabases(ctx) - case "TABLES": - // Parse FROM clause for database specification, or use current database context - database := "" - // Check if there's a database specified in SHOW TABLES FROM database - if stmt.Schema != "" { - // Use schema field if set by parser - database = stmt.Schema - } else { - // Try to get from OnTable.Name with proper nil checks - if stmt.OnTable.Name != nil { - if nameStr := stmt.OnTable.Name.String(); nameStr != "" { - database = nameStr - } else { - database = e.catalog.GetCurrentDatabase() - } - } else { - database = e.catalog.GetCurrentDatabase() - } - } - if database == "" { - // Use current database context - database = e.catalog.GetCurrentDatabase() - } - return e.showTables(ctx, database) - case "COLUMNS": - // SHOW COLUMNS FROM table is equivalent to DESCRIBE - var tableName, database string - - // Safely extract table name and database with proper nil checks - if stmt.OnTable.Name != nil { - tableName = stmt.OnTable.Name.String() - if stmt.OnTable.Qualifier != nil { - database = stmt.OnTable.Qualifier.String() - } - } - - if tableName != "" { - return e.executeDescribeStatement(ctx, tableName, database) - } - fallthrough - default: - err := fmt.Errorf("unsupported SHOW statement: %s", stmt.Type) - return &QueryResult{Error: err}, err - } -} diff --git a/weed/query/engine/engine.go b/weed/query/engine/engine.go deleted file mode 100644 index e00fd78ca..000000000 --- a/weed/query/engine/engine.go +++ /dev/null @@ -1,5973 +0,0 @@ -package engine - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "math" - "math/big" - "regexp" - "strconv" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "google.golang.org/protobuf/proto" -) - -// SQL Function Name Constants -const ( - // Aggregation Functions - FuncCOUNT = "COUNT" - FuncSUM = "SUM" - FuncAVG = "AVG" - FuncMIN = "MIN" - FuncMAX = "MAX" - - // String Functions - FuncUPPER = "UPPER" - FuncLOWER = "LOWER" - FuncLENGTH = "LENGTH" - FuncTRIM = "TRIM" - FuncBTRIM = "BTRIM" // CockroachDB's internal name for TRIM - FuncLTRIM = "LTRIM" - FuncRTRIM = "RTRIM" - FuncSUBSTRING = "SUBSTRING" - FuncLEFT = "LEFT" - FuncRIGHT = "RIGHT" - FuncCONCAT = "CONCAT" - - // DateTime Functions - FuncCURRENT_DATE = "CURRENT_DATE" - FuncCURRENT_TIME = "CURRENT_TIME" - FuncCURRENT_TIMESTAMP = "CURRENT_TIMESTAMP" - FuncNOW = "NOW" - FuncEXTRACT = "EXTRACT" - FuncDATE_TRUNC = "DATE_TRUNC" - - // PostgreSQL uses EXTRACT(part FROM date) instead of convenience functions like YEAR(), MONTH(), etc. -) - -// PostgreSQL-compatible SQL AST types -type Statement interface { - isStatement() -} - -type ShowStatement struct { - Type string // "databases", "tables", "columns" - Table string // for SHOW COLUMNS FROM table - Schema string // for database context - OnTable NameRef // for compatibility with existing code that checks OnTable -} - -func (s *ShowStatement) isStatement() {} - -type UseStatement struct { - Database string // database name to switch to -} - -func (u *UseStatement) isStatement() {} - -type DDLStatement struct { - Action string // "create", "alter", "drop" - NewName NameRef - TableSpec *TableSpec -} - -type NameRef struct { - Name StringGetter - Qualifier StringGetter -} - -type StringGetter interface { - String() string -} - -type stringValue string - -func (s stringValue) String() string { return string(s) } - -type TableSpec struct { - Columns []ColumnDef -} - -type ColumnDef struct { - Name StringGetter - Type TypeRef -} - -type TypeRef struct { - Type string -} - -func (d *DDLStatement) isStatement() {} - -type SelectStatement struct { - SelectExprs []SelectExpr - From []TableExpr - Where *WhereClause - Limit *LimitClause - WindowFunctions []*WindowFunction -} - -type WhereClause struct { - Expr ExprNode -} - -type LimitClause struct { - Rowcount ExprNode - Offset ExprNode -} - -func (s *SelectStatement) isStatement() {} - -// Window function types for time-series analytics -type WindowSpec struct { - PartitionBy []ExprNode - OrderBy []*OrderByClause -} - -type WindowFunction struct { - Function string // ROW_NUMBER, RANK, LAG, LEAD - Args []ExprNode // Function arguments - Over *WindowSpec - Alias string // Column alias for the result -} - -type OrderByClause struct { - Column string - Order string // ASC or DESC -} - -type SelectExpr interface { - isSelectExpr() -} - -type StarExpr struct{} - -func (s *StarExpr) isSelectExpr() {} - -type AliasedExpr struct { - Expr ExprNode - As AliasRef -} - -type AliasRef interface { - IsEmpty() bool - String() string -} - -type aliasValue string - -func (a aliasValue) IsEmpty() bool { return string(a) == "" } -func (a aliasValue) String() string { return string(a) } -func (a *AliasedExpr) isSelectExpr() {} - -type TableExpr interface { - isTableExpr() -} - -type AliasedTableExpr struct { - Expr interface{} -} - -func (a *AliasedTableExpr) isTableExpr() {} - -type TableName struct { - Name StringGetter - Qualifier StringGetter -} - -type ExprNode interface { - isExprNode() -} - -type FuncExpr struct { - Name StringGetter - Exprs []SelectExpr -} - -func (f *FuncExpr) isExprNode() {} - -type ColName struct { - Name StringGetter -} - -func (c *ColName) isExprNode() {} - -// ArithmeticExpr represents arithmetic operations like id+user_id and string concatenation like name||suffix -type ArithmeticExpr struct { - Left ExprNode - Right ExprNode - Operator string // +, -, *, /, %, || -} - -func (a *ArithmeticExpr) isExprNode() {} - -type ComparisonExpr struct { - Left ExprNode - Right ExprNode - Operator string -} - -func (c *ComparisonExpr) isExprNode() {} - -type AndExpr struct { - Left ExprNode - Right ExprNode -} - -func (a *AndExpr) isExprNode() {} - -type OrExpr struct { - Left ExprNode - Right ExprNode -} - -func (o *OrExpr) isExprNode() {} - -type ParenExpr struct { - Expr ExprNode -} - -func (p *ParenExpr) isExprNode() {} - -type SQLVal struct { - Type int - Val []byte -} - -func (s *SQLVal) isExprNode() {} - -type ValTuple []ExprNode - -func (v ValTuple) isExprNode() {} - -type IntervalExpr struct { - Value string // The interval value (e.g., "1 hour", "30 minutes") - Unit string // The unit (parsed from value) -} - -func (i *IntervalExpr) isExprNode() {} - -type BetweenExpr struct { - Left ExprNode // The expression to test - From ExprNode // Lower bound (inclusive) - To ExprNode // Upper bound (inclusive) - Not bool // true for NOT BETWEEN -} - -func (b *BetweenExpr) isExprNode() {} - -type IsNullExpr struct { - Expr ExprNode // The expression to test for null -} - -func (i *IsNullExpr) isExprNode() {} - -type IsNotNullExpr struct { - Expr ExprNode // The expression to test for not null -} - -func (i *IsNotNullExpr) isExprNode() {} - -// SQLVal types -const ( - IntVal = iota - StrVal - FloatVal -) - -// Operator constants -const ( - CreateStr = "create" - AlterStr = "alter" - DropStr = "drop" - EqualStr = "=" - LessThanStr = "<" - GreaterThanStr = ">" - LessEqualStr = "<=" - GreaterEqualStr = ">=" - NotEqualStr = "!=" -) - -// parseIdentifier properly parses a potentially quoted identifier (database/table name) -func parseIdentifier(identifier string) string { - identifier = strings.TrimSpace(identifier) - identifier = strings.TrimSuffix(identifier, ";") // Remove trailing semicolon - - // Handle double quotes (PostgreSQL standard) - if len(identifier) >= 2 && identifier[0] == '"' && identifier[len(identifier)-1] == '"' { - return identifier[1 : len(identifier)-1] - } - - // Handle backticks (MySQL compatibility) - if len(identifier) >= 2 && identifier[0] == '`' && identifier[len(identifier)-1] == '`' { - return identifier[1 : len(identifier)-1] - } - - return identifier -} - -// ParseSQL parses PostgreSQL-compatible SQL statements using CockroachDB parser for SELECT queries -func ParseSQL(sql string) (Statement, error) { - sql = strings.TrimSpace(sql) - sqlUpper := strings.ToUpper(sql) - - // Handle USE statement - if strings.HasPrefix(sqlUpper, "USE ") { - parts := strings.Fields(sql) - if len(parts) < 2 { - return nil, fmt.Errorf("USE statement requires a database name") - } - // Parse the database name properly, handling quoted identifiers - dbName := parseIdentifier(strings.Join(parts[1:], " ")) - return &UseStatement{Database: dbName}, nil - } - - // Handle DESCRIBE/DESC statements as aliases for SHOW COLUMNS FROM - if strings.HasPrefix(sqlUpper, "DESCRIBE ") || strings.HasPrefix(sqlUpper, "DESC ") { - parts := strings.Fields(sql) - if len(parts) < 2 { - return nil, fmt.Errorf("DESCRIBE/DESC statement requires a table name") - } - - var tableName string - var database string - - // Get the raw table name (before parsing identifiers) - var rawTableName string - if len(parts) >= 3 && strings.ToUpper(parts[1]) == "TABLE" { - rawTableName = parts[2] - } else { - rawTableName = parts[1] - } - - // Parse database.table format first, then apply parseIdentifier to each part - if strings.Contains(rawTableName, ".") { - // Handle quoted database.table like "db"."table" - if strings.HasPrefix(rawTableName, "\"") || strings.HasPrefix(rawTableName, "`") { - // Find the closing quote and the dot - var quoteChar byte = '"' - if rawTableName[0] == '`' { - quoteChar = '`' - } - - // Find the matching closing quote - closingIndex := -1 - for i := 1; i < len(rawTableName); i++ { - if rawTableName[i] == quoteChar { - closingIndex = i - break - } - } - - if closingIndex != -1 && closingIndex+1 < len(rawTableName) && rawTableName[closingIndex+1] == '.' { - // Valid quoted database name - database = parseIdentifier(rawTableName[:closingIndex+1]) - tableName = parseIdentifier(rawTableName[closingIndex+2:]) - } else { - // Fall back to simple split then parse - dbTableParts := strings.SplitN(rawTableName, ".", 2) - database = parseIdentifier(dbTableParts[0]) - tableName = parseIdentifier(dbTableParts[1]) - } - } else { - // Simple case: no quotes, just split then parse - dbTableParts := strings.SplitN(rawTableName, ".", 2) - database = parseIdentifier(dbTableParts[0]) - tableName = parseIdentifier(dbTableParts[1]) - } - } else { - // No database.table format, just parse the table name - tableName = parseIdentifier(rawTableName) - } - - stmt := &ShowStatement{Type: "columns"} - stmt.OnTable.Name = stringValue(tableName) - if database != "" { - stmt.OnTable.Qualifier = stringValue(database) - } - return stmt, nil - } - - // Handle SHOW statements (keep custom parsing for these simple cases) - if strings.HasPrefix(sqlUpper, "SHOW DATABASES") || strings.HasPrefix(sqlUpper, "SHOW SCHEMAS") { - return &ShowStatement{Type: "databases"}, nil - } - if strings.HasPrefix(sqlUpper, "SHOW TABLES") { - stmt := &ShowStatement{Type: "tables"} - // Handle "SHOW TABLES FROM database" syntax - if strings.Contains(sqlUpper, "FROM") { - partsUpper := strings.Fields(sqlUpper) - partsOriginal := strings.Fields(sql) // Use original casing - for i, part := range partsUpper { - if part == "FROM" && i+1 < len(partsOriginal) { - // Parse the database name properly - dbName := parseIdentifier(partsOriginal[i+1]) - stmt.Schema = dbName // Set the Schema field for the test - stmt.OnTable.Name = stringValue(dbName) // Keep for compatibility - break - } - } - } - return stmt, nil - } - if strings.HasPrefix(sqlUpper, "SHOW COLUMNS FROM") { - // Parse "SHOW COLUMNS FROM table" or "SHOW COLUMNS FROM database.table" - parts := strings.Fields(sql) - if len(parts) < 4 { - return nil, fmt.Errorf("SHOW COLUMNS FROM statement requires a table name") - } - - // Get the raw table name (before parsing identifiers) - rawTableName := parts[3] - var tableName string - var database string - - // Parse database.table format first, then apply parseIdentifier to each part - if strings.Contains(rawTableName, ".") { - // Handle quoted database.table like "db"."table" - if strings.HasPrefix(rawTableName, "\"") || strings.HasPrefix(rawTableName, "`") { - // Find the closing quote and the dot - var quoteChar byte = '"' - if rawTableName[0] == '`' { - quoteChar = '`' - } - - // Find the matching closing quote - closingIndex := -1 - for i := 1; i < len(rawTableName); i++ { - if rawTableName[i] == quoteChar { - closingIndex = i - break - } - } - - if closingIndex != -1 && closingIndex+1 < len(rawTableName) && rawTableName[closingIndex+1] == '.' { - // Valid quoted database name - database = parseIdentifier(rawTableName[:closingIndex+1]) - tableName = parseIdentifier(rawTableName[closingIndex+2:]) - } else { - // Fall back to simple split then parse - dbTableParts := strings.SplitN(rawTableName, ".", 2) - database = parseIdentifier(dbTableParts[0]) - tableName = parseIdentifier(dbTableParts[1]) - } - } else { - // Simple case: no quotes, just split then parse - dbTableParts := strings.SplitN(rawTableName, ".", 2) - database = parseIdentifier(dbTableParts[0]) - tableName = parseIdentifier(dbTableParts[1]) - } - } else { - // No database.table format, just parse the table name - tableName = parseIdentifier(rawTableName) - } - - stmt := &ShowStatement{Type: "columns"} - stmt.OnTable.Name = stringValue(tableName) - if database != "" { - stmt.OnTable.Qualifier = stringValue(database) - } - return stmt, nil - } - - // Use CockroachDB parser for SELECT statements - if strings.HasPrefix(sqlUpper, "SELECT") { - parser := NewCockroachSQLParser() - return parser.ParseSQL(sql) - } - - return nil, UnsupportedFeatureError{ - Feature: fmt.Sprintf("statement type: %s", strings.Fields(sqlUpper)[0]), - Reason: "statement parsing not implemented", - } -} - -// debugModeKey is used to store debug mode flag in context -type debugModeKey struct{} - -// isDebugMode checks if we're in debug/explain mode -func isDebugMode(ctx context.Context) bool { - debug, ok := ctx.Value(debugModeKey{}).(bool) - return ok && debug -} - -// withDebugMode returns a context with debug mode enabled -func withDebugMode(ctx context.Context) context.Context { - return context.WithValue(ctx, debugModeKey{}, true) -} - -// LogBufferStart tracks the starting buffer index for a file -// Buffer indexes are monotonically increasing, count = len(chunks) -type LogBufferStart struct { - StartIndex int64 `json:"start_index"` // Starting buffer index (count = len(chunks)) -} - -// SQLEngine provides SQL query execution capabilities for SeaweedFS -// Assumptions: -// 1. MQ namespaces map directly to SQL databases -// 2. MQ topics map directly to SQL tables -// 3. Schema evolution is handled transparently with backward compatibility -// 4. Queries run against Parquet-stored MQ messages -type SQLEngine struct { - catalog *SchemaCatalog -} - -// NewSQLEngine creates a new SQL execution engine -// Uses master address for service discovery and initialization -func NewSQLEngine(masterAddress string) *SQLEngine { - // Initialize global HTTP client if not already done - // This is needed for reading partition data from the filer - if util_http.GetGlobalHttpClient() == nil { - util_http.InitGlobalHttpClient() - } - - return &SQLEngine{ - catalog: NewSchemaCatalog(masterAddress), - } -} - -// NewSQLEngineWithCatalog creates a new SQL execution engine with a custom catalog -// Used for testing or when you want to provide a pre-configured catalog -func NewSQLEngineWithCatalog(catalog *SchemaCatalog) *SQLEngine { - // Initialize global HTTP client if not already done - // This is needed for reading partition data from the filer - if util_http.GetGlobalHttpClient() == nil { - util_http.InitGlobalHttpClient() - } - - return &SQLEngine{ - catalog: catalog, - } -} - -// GetCatalog returns the schema catalog for external access -func (e *SQLEngine) GetCatalog() *SchemaCatalog { - return e.catalog -} - -// ExecuteSQL parses and executes a SQL statement -// Assumptions: -// 1. All SQL statements are PostgreSQL-compatible via pg_query_go -// 2. DDL operations (CREATE/ALTER/DROP) modify underlying MQ topics -// 3. DML operations (SELECT) query Parquet files directly -// 4. Error handling follows PostgreSQL conventions -func (e *SQLEngine) ExecuteSQL(ctx context.Context, sql string) (*QueryResult, error) { - startTime := time.Now() - - // Handle EXPLAIN as a special case - sqlTrimmed := strings.TrimSpace(sql) - sqlUpper := strings.ToUpper(sqlTrimmed) - if strings.HasPrefix(sqlUpper, "EXPLAIN") { - // Extract the actual query after EXPLAIN - actualSQL := strings.TrimSpace(sqlTrimmed[7:]) // Remove "EXPLAIN" - return e.executeExplain(ctx, actualSQL, startTime) - } - - // Parse the SQL statement using PostgreSQL parser - stmt, err := ParseSQL(sql) - if err != nil { - return &QueryResult{ - Error: fmt.Errorf("SQL parse error: %v", err), - }, err - } - - // Route to appropriate handler based on statement type - switch stmt := stmt.(type) { - case *ShowStatement: - return e.executeShowStatementWithDescribe(ctx, stmt) - case *UseStatement: - return e.executeUseStatement(ctx, stmt) - case *DDLStatement: - return e.executeDDLStatement(ctx, stmt) - case *SelectStatement: - return e.executeSelectStatement(ctx, stmt) - default: - err := fmt.Errorf("unsupported SQL statement type: %T", stmt) - return &QueryResult{Error: err}, err - } -} - -// executeExplain handles EXPLAIN statements by executing the query with plan tracking -func (e *SQLEngine) executeExplain(ctx context.Context, actualSQL string, startTime time.Time) (*QueryResult, error) { - // Enable debug mode for EXPLAIN queries - ctx = withDebugMode(ctx) - - // Parse the actual SQL statement using PostgreSQL parser - stmt, err := ParseSQL(actualSQL) - if err != nil { - return &QueryResult{ - Error: fmt.Errorf("SQL parse error in EXPLAIN query: %v", err), - }, err - } - - // Create execution plan - plan := &QueryExecutionPlan{ - QueryType: strings.ToUpper(strings.Fields(actualSQL)[0]), - DataSources: []string{}, - OptimizationsUsed: []string{}, - Details: make(map[string]interface{}), - } - - var result *QueryResult - - // Route to appropriate handler based on statement type (with plan tracking) - switch stmt := stmt.(type) { - case *SelectStatement: - result, err = e.executeSelectStatementWithPlan(ctx, stmt, plan) - if err != nil { - plan.Details["error"] = err.Error() - } - case *ShowStatement: - plan.QueryType = "SHOW" - plan.ExecutionStrategy = "metadata_only" - result, err = e.executeShowStatementWithDescribe(ctx, stmt) - default: - err := fmt.Errorf("EXPLAIN not supported for statement type: %T", stmt) - return &QueryResult{Error: err}, err - } - - // Calculate execution time - plan.ExecutionTimeMs = float64(time.Since(startTime).Nanoseconds()) / 1e6 - - // Format execution plan as result - return e.formatExecutionPlan(plan, result, err) -} - -// formatExecutionPlan converts execution plan to a hierarchical tree format for display -func (e *SQLEngine) formatExecutionPlan(plan *QueryExecutionPlan, originalResult *QueryResult, originalErr error) (*QueryResult, error) { - columns := []string{"Query Execution Plan"} - rows := [][]sqltypes.Value{} - - var planLines []string - - // Use new tree structure if available, otherwise fallback to legacy format - if plan.RootNode != nil { - planLines = e.buildTreePlan(plan, originalErr) - } else { - // Build legacy hierarchical plan display - planLines = e.buildHierarchicalPlan(plan, originalErr) - } - - for _, line := range planLines { - rows = append(rows, []sqltypes.Value{ - sqltypes.NewVarChar(line), - }) - } - - if originalErr != nil { - return &QueryResult{ - Columns: columns, - Rows: rows, - ExecutionPlan: plan, - Error: originalErr, - }, originalErr - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - ExecutionPlan: plan, - }, nil -} - -// buildTreePlan creates the new tree-based execution plan display -func (e *SQLEngine) buildTreePlan(plan *QueryExecutionPlan, err error) []string { - var lines []string - - // Root header - lines = append(lines, fmt.Sprintf("%s Query (%s)", plan.QueryType, plan.ExecutionStrategy)) - - // Build the execution tree - if plan.RootNode != nil { - // Root execution node is always the last (and only) child of SELECT Query - treeLines := e.formatExecutionNode(plan.RootNode, "โ””โ”€โ”€ ", " ", true) - lines = append(lines, treeLines...) - } - - // Add error information if present - if err != nil { - lines = append(lines, "") - lines = append(lines, fmt.Sprintf("Error: %v", err)) - } - - return lines -} - -// formatExecutionNode recursively formats execution tree nodes -func (e *SQLEngine) formatExecutionNode(node ExecutionNode, prefix, childPrefix string, isRoot bool) []string { - var lines []string - - description := node.GetDescription() - - // Format the current node - if isRoot { - lines = append(lines, fmt.Sprintf("%s%s", prefix, description)) - } else { - lines = append(lines, fmt.Sprintf("%s%s", prefix, description)) - } - - // Add node-specific details - switch n := node.(type) { - case *FileSourceNode: - lines = e.formatFileSourceDetails(lines, n, childPrefix, isRoot) - case *ScanOperationNode: - lines = e.formatScanOperationDetails(lines, n, childPrefix, isRoot) - case *MergeOperationNode: - lines = e.formatMergeOperationDetails(lines, n, childPrefix, isRoot) - } - - // Format children - children := node.GetChildren() - if len(children) > 0 { - for i, child := range children { - isLastChild := i == len(children)-1 - - var nextPrefix, nextChildPrefix string - if isLastChild { - nextPrefix = childPrefix + "โ””โ”€โ”€ " - nextChildPrefix = childPrefix + " " - } else { - nextPrefix = childPrefix + "โ”œโ”€โ”€ " - nextChildPrefix = childPrefix + "โ”‚ " - } - - childLines := e.formatExecutionNode(child, nextPrefix, nextChildPrefix, false) - lines = append(lines, childLines...) - } - } - - return lines -} - -// formatFileSourceDetails adds details for file source nodes -func (e *SQLEngine) formatFileSourceDetails(lines []string, node *FileSourceNode, childPrefix string, isRoot bool) []string { - prefix := childPrefix - if isRoot { - prefix = "โ”‚ " - } - - // Add predicates - if len(node.Predicates) > 0 { - lines = append(lines, fmt.Sprintf("%sโ”œโ”€โ”€ Predicates: %s", prefix, strings.Join(node.Predicates, " AND "))) - } - - // Add operations - if len(node.Operations) > 0 { - lines = append(lines, fmt.Sprintf("%sโ””โ”€โ”€ Operations: %s", prefix, strings.Join(node.Operations, " + "))) - } else if len(node.Predicates) == 0 { - lines = append(lines, fmt.Sprintf("%sโ””โ”€โ”€ Operation: full_scan", prefix)) - } - - return lines -} - -// formatScanOperationDetails adds details for scan operation nodes -func (e *SQLEngine) formatScanOperationDetails(lines []string, node *ScanOperationNode, childPrefix string, isRoot bool) []string { - prefix := childPrefix - if isRoot { - prefix = "โ”‚ " - } - - hasChildren := len(node.Children) > 0 - - // Add predicates if present - if len(node.Predicates) > 0 { - if hasChildren { - lines = append(lines, fmt.Sprintf("%sโ”œโ”€โ”€ Predicates: %s", prefix, strings.Join(node.Predicates, " AND "))) - } else { - lines = append(lines, fmt.Sprintf("%sโ””โ”€โ”€ Predicates: %s", prefix, strings.Join(node.Predicates, " AND "))) - } - } - - return lines -} - -// formatMergeOperationDetails adds details for merge operation nodes -func (e *SQLEngine) formatMergeOperationDetails(lines []string, node *MergeOperationNode, childPrefix string, isRoot bool) []string { - hasChildren := len(node.Children) > 0 - - // Add merge strategy info only if we have children, with proper indentation - if strategy, exists := node.Details["merge_strategy"]; exists && hasChildren { - // Strategy should be indented as a detail of this node, before its children - lines = append(lines, fmt.Sprintf("%sโ”œโ”€โ”€ Strategy: %v", childPrefix, strategy)) - } - - return lines -} - -// buildHierarchicalPlan creates a tree-like structure for the execution plan -func (e *SQLEngine) buildHierarchicalPlan(plan *QueryExecutionPlan, err error) []string { - var lines []string - - // Root node - Query type and strategy - lines = append(lines, fmt.Sprintf("%s Query (%s)", plan.QueryType, plan.ExecutionStrategy)) - - // Aggregations section (if present) - if len(plan.Aggregations) > 0 { - lines = append(lines, "โ”œโ”€โ”€ Aggregations") - for i, agg := range plan.Aggregations { - if i == len(plan.Aggregations)-1 { - lines = append(lines, fmt.Sprintf("โ”‚ โ””โ”€โ”€ %s", agg)) - } else { - lines = append(lines, fmt.Sprintf("โ”‚ โ”œโ”€โ”€ %s", agg)) - } - } - } - - // Data Sources section - if len(plan.DataSources) > 0 { - hasMore := len(plan.OptimizationsUsed) > 0 || plan.TotalRowsProcessed > 0 || len(plan.Details) > 0 || err != nil - if hasMore { - lines = append(lines, "โ”œโ”€โ”€ Data Sources") - } else { - lines = append(lines, "โ””โ”€โ”€ Data Sources") - } - - for i, source := range plan.DataSources { - prefix := "โ”‚ " - if !hasMore && i == len(plan.DataSources)-1 { - prefix = " " - } - - if i == len(plan.DataSources)-1 { - lines = append(lines, fmt.Sprintf("%sโ””โ”€โ”€ %s", prefix, e.formatDataSource(source))) - } else { - lines = append(lines, fmt.Sprintf("%sโ”œโ”€โ”€ %s", prefix, e.formatDataSource(source))) - } - } - } - - // Optimizations section - if len(plan.OptimizationsUsed) > 0 { - hasMore := plan.TotalRowsProcessed > 0 || len(plan.Details) > 0 || err != nil - if hasMore { - lines = append(lines, "โ”œโ”€โ”€ Optimizations") - } else { - lines = append(lines, "โ””โ”€โ”€ Optimizations") - } - - for i, opt := range plan.OptimizationsUsed { - prefix := "โ”‚ " - if !hasMore && i == len(plan.OptimizationsUsed)-1 { - prefix = " " - } - - if i == len(plan.OptimizationsUsed)-1 { - lines = append(lines, fmt.Sprintf("%sโ””โ”€โ”€ %s", prefix, e.formatOptimization(opt))) - } else { - lines = append(lines, fmt.Sprintf("%sโ”œโ”€โ”€ %s", prefix, e.formatOptimization(opt))) - } - } - } - - // Check for data sources tree availability - partitionPaths, hasPartitions := plan.Details["partition_paths"].([]string) - parquetFiles, _ := plan.Details["parquet_files"].([]string) - liveLogFiles, _ := plan.Details["live_log_files"].([]string) - - // Statistics section - statisticsPresent := plan.PartitionsScanned > 0 || plan.ParquetFilesScanned > 0 || - plan.LiveLogFilesScanned > 0 || plan.TotalRowsProcessed > 0 - - if statisticsPresent { - // Check if there are sections after Statistics (Data Sources Tree, Details, Performance) - hasDataSourcesTree := hasPartitions && len(partitionPaths) > 0 - hasMoreAfterStats := hasDataSourcesTree || len(plan.Details) > 0 || err != nil || true // Performance is always present - if hasMoreAfterStats { - lines = append(lines, "โ”œโ”€โ”€ Statistics") - } else { - lines = append(lines, "โ””โ”€โ”€ Statistics") - } - - stats := []string{} - if plan.PartitionsScanned > 0 { - stats = append(stats, fmt.Sprintf("Partitions Scanned: %d", plan.PartitionsScanned)) - } - if plan.ParquetFilesScanned > 0 { - stats = append(stats, fmt.Sprintf("Parquet Files: %d", plan.ParquetFilesScanned)) - } - if plan.LiveLogFilesScanned > 0 { - stats = append(stats, fmt.Sprintf("Live Log Files: %d", plan.LiveLogFilesScanned)) - } - // Always show row statistics for aggregations, even if 0 (to show fast path efficiency) - if resultsReturned, hasResults := plan.Details["results_returned"]; hasResults { - stats = append(stats, fmt.Sprintf("Rows Scanned: %d", plan.TotalRowsProcessed)) - stats = append(stats, fmt.Sprintf("Results Returned: %v", resultsReturned)) - - // Add fast path explanation when no rows were scanned - if plan.TotalRowsProcessed == 0 { - // Use the actual scan method from Details instead of hardcoding - if scanMethod, exists := plan.Details["scan_method"].(string); exists { - stats = append(stats, fmt.Sprintf("Scan Method: %s", scanMethod)) - } else { - stats = append(stats, "Scan Method: Metadata Only") - } - } - } else if plan.TotalRowsProcessed > 0 { - stats = append(stats, fmt.Sprintf("Rows Processed: %d", plan.TotalRowsProcessed)) - } - - // Broker buffer information - if plan.BrokerBufferQueried { - stats = append(stats, fmt.Sprintf("Broker Buffer Queried: Yes (%d messages)", plan.BrokerBufferMessages)) - if plan.BufferStartIndex > 0 { - stats = append(stats, fmt.Sprintf("Buffer Start Index: %d (deduplication enabled)", plan.BufferStartIndex)) - } - } - - for i, stat := range stats { - if hasMoreAfterStats { - // More sections after Statistics, so use โ”‚ prefix - if i == len(stats)-1 { - lines = append(lines, fmt.Sprintf("โ”‚ โ””โ”€โ”€ %s", stat)) - } else { - lines = append(lines, fmt.Sprintf("โ”‚ โ”œโ”€โ”€ %s", stat)) - } - } else { - // This is the last main section, so use space prefix for final item - if i == len(stats)-1 { - lines = append(lines, fmt.Sprintf(" โ””โ”€โ”€ %s", stat)) - } else { - lines = append(lines, fmt.Sprintf(" โ”œโ”€โ”€ %s", stat)) - } - } - } - } - - // Data Sources Tree section (if file paths are available) - if hasPartitions && len(partitionPaths) > 0 { - // Check if there are more sections after this - hasMore := len(plan.Details) > 0 || err != nil - if hasMore { - lines = append(lines, "โ”œโ”€โ”€ Data Sources Tree") - } else { - lines = append(lines, "โ”œโ”€โ”€ Data Sources Tree") // Performance always comes after - } - - // Build a tree structure for each partition - for i, partition := range partitionPaths { - isLastPartition := i == len(partitionPaths)-1 - - // Show partition directory - partitionPrefix := "โ”œโ”€โ”€ " - if isLastPartition { - partitionPrefix = "โ””โ”€โ”€ " - } - lines = append(lines, fmt.Sprintf("โ”‚ %s%s/", partitionPrefix, partition)) - - // Show parquet files in this partition - partitionParquetFiles := make([]string, 0) - for _, file := range parquetFiles { - if strings.HasPrefix(file, partition+"/") { - fileName := file[len(partition)+1:] - partitionParquetFiles = append(partitionParquetFiles, fileName) - } - } - - // Show live log files in this partition - partitionLiveLogFiles := make([]string, 0) - for _, file := range liveLogFiles { - if strings.HasPrefix(file, partition+"/") { - fileName := file[len(partition)+1:] - partitionLiveLogFiles = append(partitionLiveLogFiles, fileName) - } - } - - // Display files with proper tree formatting - totalFiles := len(partitionParquetFiles) + len(partitionLiveLogFiles) - fileIndex := 0 - - // Display parquet files - for _, fileName := range partitionParquetFiles { - fileIndex++ - isLastFile := fileIndex == totalFiles && isLastPartition - - var filePrefix string - if isLastPartition { - if isLastFile { - filePrefix = " โ””โ”€โ”€ " - } else { - filePrefix = " โ”œโ”€โ”€ " - } - } else { - if isLastFile { - filePrefix = "โ”‚ โ””โ”€โ”€ " - } else { - filePrefix = "โ”‚ โ”œโ”€โ”€ " - } - } - lines = append(lines, fmt.Sprintf("โ”‚ %s%s (parquet)", filePrefix, fileName)) - } - - // Display live log files - for _, fileName := range partitionLiveLogFiles { - fileIndex++ - isLastFile := fileIndex == totalFiles && isLastPartition - - var filePrefix string - if isLastPartition { - if isLastFile { - filePrefix = " โ””โ”€โ”€ " - } else { - filePrefix = " โ”œโ”€โ”€ " - } - } else { - if isLastFile { - filePrefix = "โ”‚ โ””โ”€โ”€ " - } else { - filePrefix = "โ”‚ โ”œโ”€โ”€ " - } - } - lines = append(lines, fmt.Sprintf("โ”‚ %s%s (live log)", filePrefix, fileName)) - } - } - } - - // Details section - // Filter out details that are shown elsewhere - filteredDetails := make([]string, 0) - for key, value := range plan.Details { - // Skip keys that are already formatted and displayed in the Statistics section - if key != "results_returned" && key != "partition_paths" && key != "parquet_files" && key != "live_log_files" { - filteredDetails = append(filteredDetails, fmt.Sprintf("%s: %v", key, value)) - } - } - - if len(filteredDetails) > 0 { - // Performance is always present, so check if there are errors after Details - hasMore := err != nil - if hasMore { - lines = append(lines, "โ”œโ”€โ”€ Details") - } else { - lines = append(lines, "โ”œโ”€โ”€ Details") // Performance always comes after - } - - for i, detail := range filteredDetails { - if i == len(filteredDetails)-1 { - lines = append(lines, fmt.Sprintf("โ”‚ โ””โ”€โ”€ %s", detail)) - } else { - lines = append(lines, fmt.Sprintf("โ”‚ โ”œโ”€โ”€ %s", detail)) - } - } - } - - // Performance section (always present) - if err != nil { - lines = append(lines, "โ”œโ”€โ”€ Performance") - lines = append(lines, fmt.Sprintf("โ”‚ โ””โ”€โ”€ Execution Time: %.3fms", plan.ExecutionTimeMs)) - lines = append(lines, "โ””โ”€โ”€ Error") - lines = append(lines, fmt.Sprintf(" โ””โ”€โ”€ %s", err.Error())) - } else { - lines = append(lines, "โ””โ”€โ”€ Performance") - lines = append(lines, fmt.Sprintf(" โ””โ”€โ”€ Execution Time: %.3fms", plan.ExecutionTimeMs)) - } - - return lines -} - -// formatDataSource provides user-friendly names for data sources -func (e *SQLEngine) formatDataSource(source string) string { - switch source { - case "parquet_stats": - return "Parquet Statistics (fast path)" - case "parquet_files": - return "Parquet Files (full scan)" - case "live_logs": - return "Live Log Files" - case "broker_buffer": - return "Broker Buffer (real-time)" - default: - return source - } -} - -// buildExecutionTree creates a tree representation of the query execution plan -func (e *SQLEngine) buildExecutionTree(plan *QueryExecutionPlan, stmt *SelectStatement) ExecutionNode { - // Extract WHERE clause predicates for pushdown analysis - var predicates []string - if stmt.Where != nil { - predicates = e.extractPredicateStrings(stmt.Where.Expr) - } - - // Check if we have detailed file information - partitionPaths, hasPartitions := plan.Details["partition_paths"].([]string) - parquetFiles, hasParquetFiles := plan.Details["parquet_files"].([]string) - liveLogFiles, hasLiveLogFiles := plan.Details["live_log_files"].([]string) - - if !hasPartitions || len(partitionPaths) == 0 { - // Fallback: create simple structure without file details - return &ScanOperationNode{ - ScanType: "hybrid_scan", - Description: fmt.Sprintf("Hybrid Scan (%s)", plan.ExecutionStrategy), - Predicates: predicates, - Details: map[string]interface{}{ - "note": "File details not available", - }, - } - } - - // Build file source nodes - var parquetNodes []ExecutionNode - var liveLogNodes []ExecutionNode - var brokerBufferNodes []ExecutionNode - - // Create parquet file nodes - if hasParquetFiles { - for _, filePath := range parquetFiles { - operations := e.determineParquetOperations(plan, filePath) - parquetNodes = append(parquetNodes, &FileSourceNode{ - FilePath: filePath, - SourceType: "parquet", - Predicates: predicates, - Operations: operations, - OptimizationHint: e.determineOptimizationHint(plan, "parquet"), - Details: map[string]interface{}{ - "format": "parquet", - }, - }) - } - } - - // Create live log file nodes - if hasLiveLogFiles { - for _, filePath := range liveLogFiles { - operations := e.determineLiveLogOperations(plan, filePath) - liveLogNodes = append(liveLogNodes, &FileSourceNode{ - FilePath: filePath, - SourceType: "live_log", - Predicates: predicates, - Operations: operations, - OptimizationHint: e.determineOptimizationHint(plan, "live_log"), - Details: map[string]interface{}{ - "format": "log_entry", - }, - }) - } - } - - // Create broker buffer node only if queried AND has unflushed messages - if plan.BrokerBufferQueried && plan.BrokerBufferMessages > 0 { - brokerBufferNodes = append(brokerBufferNodes, &FileSourceNode{ - FilePath: "broker_memory_buffer", - SourceType: "broker_buffer", - Predicates: predicates, - Operations: []string{"memory_scan"}, - OptimizationHint: "real_time", - Details: map[string]interface{}{ - "messages": plan.BrokerBufferMessages, - "buffer_start_idx": plan.BufferStartIndex, - }, - }) - } - - // Build the tree structure based on data sources - var scanNodes []ExecutionNode - - // Add parquet scan node ONLY if there are actual parquet files - if len(parquetNodes) > 0 { - scanNodes = append(scanNodes, &ScanOperationNode{ - ScanType: "parquet_scan", - Description: fmt.Sprintf("Parquet File Scan (%d files)", len(parquetNodes)), - Predicates: predicates, - Children: parquetNodes, - Details: map[string]interface{}{ - "files_count": len(parquetNodes), - "pushdown": "column_projection + predicate_filtering", - }, - }) - } - - // Add live log scan node ONLY if there are actual live log files - if len(liveLogNodes) > 0 { - scanNodes = append(scanNodes, &ScanOperationNode{ - ScanType: "live_log_scan", - Description: fmt.Sprintf("Live Log Scan (%d files)", len(liveLogNodes)), - Predicates: predicates, - Children: liveLogNodes, - Details: map[string]interface{}{ - "files_count": len(liveLogNodes), - "pushdown": "predicate_filtering", - }, - }) - } - - // Add broker buffer scan node ONLY if buffer was actually queried - if len(brokerBufferNodes) > 0 { - scanNodes = append(scanNodes, &ScanOperationNode{ - ScanType: "broker_buffer_scan", - Description: "Real-time Buffer Scan", - Predicates: predicates, - Children: brokerBufferNodes, - Details: map[string]interface{}{ - "real_time": true, - }, - }) - } - - // Debug: Check what we actually have - totalFileNodes := len(parquetNodes) + len(liveLogNodes) + len(brokerBufferNodes) - if totalFileNodes == 0 { - // No actual files found, return simple fallback - return &ScanOperationNode{ - ScanType: "hybrid_scan", - Description: fmt.Sprintf("Hybrid Scan (%s)", plan.ExecutionStrategy), - Predicates: predicates, - Details: map[string]interface{}{ - "note": "No source files discovered", - }, - } - } - - // If no scan nodes, return a fallback structure - if len(scanNodes) == 0 { - return &ScanOperationNode{ - ScanType: "hybrid_scan", - Description: fmt.Sprintf("Hybrid Scan (%s)", plan.ExecutionStrategy), - Predicates: predicates, - Details: map[string]interface{}{ - "note": "No file details available", - }, - } - } - - // If only one scan type, return it directly - if len(scanNodes) == 1 { - return scanNodes[0] - } - - // Multiple scan types - need merge operation - return &MergeOperationNode{ - OperationType: "chronological_merge", - Description: "Chronological Merge (time-ordered)", - Children: scanNodes, - Details: map[string]interface{}{ - "merge_strategy": "timestamp_based", - "sources_count": len(scanNodes), - }, - } -} - -// extractPredicateStrings extracts predicate descriptions from WHERE clause -func (e *SQLEngine) extractPredicateStrings(expr ExprNode) []string { - var predicates []string - e.extractPredicateStringsRecursive(expr, &predicates) - return predicates -} - -func (e *SQLEngine) extractPredicateStringsRecursive(expr ExprNode, predicates *[]string) { - switch exprType := expr.(type) { - case *ComparisonExpr: - *predicates = append(*predicates, fmt.Sprintf("%s %s %s", - e.exprToString(exprType.Left), exprType.Operator, e.exprToString(exprType.Right))) - case *IsNullExpr: - *predicates = append(*predicates, fmt.Sprintf("%s IS NULL", e.exprToString(exprType.Expr))) - case *IsNotNullExpr: - *predicates = append(*predicates, fmt.Sprintf("%s IS NOT NULL", e.exprToString(exprType.Expr))) - case *AndExpr: - e.extractPredicateStringsRecursive(exprType.Left, predicates) - e.extractPredicateStringsRecursive(exprType.Right, predicates) - case *OrExpr: - e.extractPredicateStringsRecursive(exprType.Left, predicates) - e.extractPredicateStringsRecursive(exprType.Right, predicates) - case *ParenExpr: - e.extractPredicateStringsRecursive(exprType.Expr, predicates) - } -} - -func (e *SQLEngine) exprToString(expr ExprNode) string { - switch exprType := expr.(type) { - case *ColName: - return exprType.Name.String() - default: - // For now, return a simplified representation - return fmt.Sprintf("%T", expr) - } -} - -// determineParquetOperations determines what operations will be performed on parquet files -func (e *SQLEngine) determineParquetOperations(plan *QueryExecutionPlan, filePath string) []string { - var operations []string - - // Check for column projection - if contains(plan.OptimizationsUsed, "column_projection") { - operations = append(operations, "column_projection") - } - - // Check for predicate pushdown - if contains(plan.OptimizationsUsed, "predicate_pushdown") { - operations = append(operations, "predicate_pushdown") - } - - // Check for statistics usage - if contains(plan.OptimizationsUsed, "parquet_statistics") || plan.ExecutionStrategy == "hybrid_fast_path" { - operations = append(operations, "statistics_skip") - } else { - operations = append(operations, "row_group_scan") - } - - if len(operations) == 0 { - operations = append(operations, "full_scan") - } - - return operations -} - -// determineLiveLogOperations determines what operations will be performed on live log files -func (e *SQLEngine) determineLiveLogOperations(plan *QueryExecutionPlan, filePath string) []string { - var operations []string - - // Live logs typically require sequential scan - operations = append(operations, "sequential_scan") - - // Check for predicate filtering - if contains(plan.OptimizationsUsed, "predicate_pushdown") { - operations = append(operations, "predicate_filtering") - } - - return operations -} - -// determineOptimizationHint determines the optimization hint for a data source -func (e *SQLEngine) determineOptimizationHint(plan *QueryExecutionPlan, sourceType string) string { - switch plan.ExecutionStrategy { - case "hybrid_fast_path": - if sourceType == "parquet" { - return "statistics_only" - } - return "minimal_scan" - case "full_scan": - return "full_scan" - case "column_projection": - return "column_filter" - default: - return "" - } -} - -// Helper function to check if slice contains string -func contains(slice []string, item string) bool { - for _, s := range slice { - if s == item { - return true - } - } - return false -} - -// collectLiveLogFileNames collects live log file names from a partition directory -func (e *SQLEngine) collectLiveLogFileNames(filerClient filer_pb.FilerClient, partitionPath string) ([]string, error) { - var liveLogFiles []string - - err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // List all files in partition directory - request := &filer_pb.ListEntriesRequest{ - Directory: partitionPath, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: false, - Limit: 10000, // reasonable limit - } - - stream, err := client.ListEntries(context.Background(), request) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - break - } - return err - } - - entry := resp.Entry - if entry != nil && !entry.IsDirectory { - // Check if this is a log file (not a parquet file) - fileName := entry.Name - if !strings.HasSuffix(fileName, ".parquet") && !strings.HasSuffix(fileName, ".metadata") { - liveLogFiles = append(liveLogFiles, fileName) - } - } - } - - return nil - }) - - if err != nil { - return nil, err - } - - return liveLogFiles, nil -} - -// formatOptimization provides user-friendly names for optimizations -func (e *SQLEngine) formatOptimization(opt string) string { - switch opt { - case "parquet_statistics": - return "Parquet Statistics Usage" - case "live_log_counting": - return "Live Log Row Counting" - case "deduplication": - return "Duplicate Data Avoidance" - case "predicate_pushdown": - return "WHERE Clause Pushdown" - case "column_statistics_pruning": - return "Column Statistics File Pruning" - case "column_projection": - return "Column Selection" - case "limit_pushdown": - return "LIMIT Optimization" - default: - return opt - } -} - -// executeUseStatement handles USE database statements to switch current database context -func (e *SQLEngine) executeUseStatement(ctx context.Context, stmt *UseStatement) (*QueryResult, error) { - // Validate database name - if stmt.Database == "" { - err := fmt.Errorf("database name cannot be empty") - return &QueryResult{Error: err}, err - } - - // Set the current database in the catalog - e.catalog.SetCurrentDatabase(stmt.Database) - - // Return success message - result := &QueryResult{ - Columns: []string{"message"}, - Rows: [][]sqltypes.Value{ - {sqltypes.MakeString([]byte(fmt.Sprintf("Database changed to: %s", stmt.Database)))}, - }, - Error: nil, - } - return result, nil -} - -// executeDDLStatement handles CREATE operations only -// Note: ALTER TABLE and DROP TABLE are not supported to protect topic data -func (e *SQLEngine) executeDDLStatement(ctx context.Context, stmt *DDLStatement) (*QueryResult, error) { - switch stmt.Action { - case CreateStr: - return e.createTable(ctx, stmt) - case AlterStr: - err := fmt.Errorf("ALTER TABLE is not supported") - return &QueryResult{Error: err}, err - case DropStr: - err := fmt.Errorf("DROP TABLE is not supported") - return &QueryResult{Error: err}, err - default: - err := fmt.Errorf("unsupported DDL action: %s", stmt.Action) - return &QueryResult{Error: err}, err - } -} - -// executeSelectStatementWithPlan handles SELECT queries with execution plan tracking -func (e *SQLEngine) executeSelectStatementWithPlan(ctx context.Context, stmt *SelectStatement, plan *QueryExecutionPlan) (*QueryResult, error) { - // Initialize plan details once - if plan != nil && plan.Details == nil { - plan.Details = make(map[string]interface{}) - } - // Parse aggregations to populate plan - var aggregations []AggregationSpec - hasAggregations := false - selectAll := false - - for _, selectExpr := range stmt.SelectExprs { - switch expr := selectExpr.(type) { - case *StarExpr: - selectAll = true - case *AliasedExpr: - switch col := expr.Expr.(type) { - case *FuncExpr: - // This is an aggregation function - aggSpec, err := e.parseAggregationFunction(col, expr) - if err != nil { - return &QueryResult{Error: err}, err - } - if aggSpec != nil { - aggregations = append(aggregations, *aggSpec) - hasAggregations = true - plan.Aggregations = append(plan.Aggregations, aggSpec.Function+"("+aggSpec.Column+")") - } - } - } - } - - // Execute the query (handle aggregations specially for plan tracking) - var result *QueryResult - var err error - - // Extract table information for execution (needed for both aggregation and regular queries) - var database, tableName string - if len(stmt.From) == 1 { - if table, ok := stmt.From[0].(*AliasedTableExpr); ok { - if tableExpr, ok := table.Expr.(TableName); ok { - tableName = tableExpr.Name.String() - if tableExpr.Qualifier != nil && tableExpr.Qualifier.String() != "" { - database = tableExpr.Qualifier.String() - } - } - } - } - - // Use current database if not specified - if database == "" { - database = e.catalog.currentDatabase - if database == "" { - database = "default" - } - } - - // CRITICAL FIX: Always use HybridMessageScanner for ALL queries to read both flushed and unflushed data - // Create hybrid scanner for both aggregation and regular SELECT queries - var filerClient filer_pb.FilerClient - if e.catalog.brokerClient != nil { - filerClient, err = e.catalog.brokerClient.GetFilerClient() - if err != nil { - return &QueryResult{Error: err}, err - } - } - - hybridScanner, err := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, database, tableName, e) - if err != nil { - return &QueryResult{Error: err}, err - } - - if hasAggregations { - // Execute aggregation query with plan tracking - result, err = e.executeAggregationQueryWithPlan(ctx, hybridScanner, aggregations, stmt, plan) - } else { - // CRITICAL FIX: Use HybridMessageScanner for regular SELECT queries too - // This ensures both flushed and unflushed data are read - result, err = e.executeRegularSelectWithHybridScanner(ctx, hybridScanner, stmt, plan) - } - - if err == nil && result != nil { - // Extract table name for use in execution strategy determination - var tableName string - if len(stmt.From) == 1 { - if table, ok := stmt.From[0].(*AliasedTableExpr); ok { - if tableExpr, ok := table.Expr.(TableName); ok { - tableName = tableExpr.Name.String() - } - } - } - - // Try to get topic information for partition count and row processing stats - if tableName != "" { - // Try to discover partitions for statistics - if partitions, discoverErr := e.discoverTopicPartitions("test", tableName); discoverErr == nil { - plan.PartitionsScanned = len(partitions) - } - - // For aggregations, determine actual processing based on execution strategy - if hasAggregations { - plan.Details["results_returned"] = len(result.Rows) - - // Determine actual work done based on execution strategy - if stmt.Where == nil { - // Use the same logic as actual execution to determine if fast path was used - var filerClient filer_pb.FilerClient - if e.catalog.brokerClient != nil { - filerClient, _ = e.catalog.brokerClient.GetFilerClient() - } - - hybridScanner, scannerErr := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, "test", tableName, e) - var canUseFastPath bool - if scannerErr == nil { - // Test if fast path can be used (same as actual execution) - _, canOptimize := e.tryFastParquetAggregation(ctx, hybridScanner, aggregations) - canUseFastPath = canOptimize - } else { - // Fallback to simple check - canUseFastPath = true - for _, spec := range aggregations { - if !e.canUseParquetStatsForAggregation(spec) { - canUseFastPath = false - break - } - } - } - - if canUseFastPath { - // Fast path: minimal scanning (only live logs that weren't converted) - if actualScanCount, countErr := e.getActualRowsScannedForFastPath(ctx, "test", tableName); countErr == nil { - plan.TotalRowsProcessed = actualScanCount - } else { - plan.TotalRowsProcessed = 0 // Parquet stats only, no scanning - } - } else { - // Full scan: count all rows - if actualRowCount, countErr := e.getTopicTotalRowCount(ctx, "test", tableName); countErr == nil { - plan.TotalRowsProcessed = actualRowCount - } else { - plan.TotalRowsProcessed = int64(len(result.Rows)) - plan.Details["note"] = "scan_count_unavailable" - } - } - } else { - // With WHERE clause: full scan required - if actualRowCount, countErr := e.getTopicTotalRowCount(ctx, "test", tableName); countErr == nil { - plan.TotalRowsProcessed = actualRowCount - } else { - plan.TotalRowsProcessed = int64(len(result.Rows)) - plan.Details["note"] = "scan_count_unavailable" - } - } - } else { - // For non-aggregations, result count is meaningful - plan.TotalRowsProcessed = int64(len(result.Rows)) - } - } - - // Determine execution strategy based on query type (reuse fast path detection from above) - if hasAggregations { - // Skip execution strategy determination if plan was already populated by aggregation execution - // This prevents overwriting the correctly built plan from BuildAggregationPlan - if plan.ExecutionStrategy == "" { - // For aggregations, determine if fast path conditions are met - if stmt.Where == nil { - // Reuse the same logic used above for row counting - var canUseFastPath bool - if tableName != "" { - var filerClient filer_pb.FilerClient - if e.catalog.brokerClient != nil { - filerClient, _ = e.catalog.brokerClient.GetFilerClient() - } - - if filerClient != nil { - hybridScanner, scannerErr := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, "test", tableName, e) - if scannerErr == nil { - // Test if fast path can be used (same as actual execution) - _, canOptimize := e.tryFastParquetAggregation(ctx, hybridScanner, aggregations) - canUseFastPath = canOptimize - } else { - canUseFastPath = false - } - } else { - // Fallback check - canUseFastPath = true - for _, spec := range aggregations { - if !e.canUseParquetStatsForAggregation(spec) { - canUseFastPath = false - break - } - } - } - } else { - canUseFastPath = false - } - - if canUseFastPath { - plan.ExecutionStrategy = "hybrid_fast_path" - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "parquet_statistics", "live_log_counting", "deduplication") - plan.DataSources = []string{"parquet_stats", "live_logs"} - } else { - plan.ExecutionStrategy = "full_scan" - plan.DataSources = []string{"live_logs", "parquet_files"} - } - } else { - plan.ExecutionStrategy = "full_scan" - plan.DataSources = []string{"live_logs", "parquet_files"} - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "predicate_pushdown") - } - } - } else { - // For regular SELECT queries - if selectAll { - plan.ExecutionStrategy = "hybrid_scan" - plan.DataSources = []string{"live_logs", "parquet_files"} - } else { - plan.ExecutionStrategy = "column_projection" - plan.DataSources = []string{"live_logs", "parquet_files"} - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "column_projection") - } - } - - // Add WHERE clause information - if stmt.Where != nil { - // Only add predicate_pushdown if not already added - alreadyHasPredicate := false - for _, opt := range plan.OptimizationsUsed { - if opt == "predicate_pushdown" { - alreadyHasPredicate = true - break - } - } - if !alreadyHasPredicate { - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "predicate_pushdown") - } - plan.Details["where_clause"] = "present" - } - - // Add LIMIT information - if stmt.Limit != nil { - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "limit_pushdown") - if stmt.Limit.Rowcount != nil { - if limitExpr, ok := stmt.Limit.Rowcount.(*SQLVal); ok && limitExpr.Type == IntVal { - plan.Details["limit"] = string(limitExpr.Val) - } - } - } - } - - // Build execution tree after all plan details are populated - if err == nil && result != nil && plan != nil { - plan.RootNode = e.buildExecutionTree(plan, stmt) - } - - return result, err -} - -// executeSelectStatement handles SELECT queries -// Assumptions: -// 1. Queries run against Parquet files in MQ topics -// 2. Predicate pushdown is used for efficiency -// 3. Cross-topic joins are supported via partition-aware execution -func (e *SQLEngine) executeSelectStatement(ctx context.Context, stmt *SelectStatement) (*QueryResult, error) { - // Parse FROM clause to get table (topic) information - if len(stmt.From) != 1 { - err := fmt.Errorf("SELECT supports single table queries only") - return &QueryResult{Error: err}, err - } - - // Extract table reference - var database, tableName string - switch table := stmt.From[0].(type) { - case *AliasedTableExpr: - switch tableExpr := table.Expr.(type) { - case TableName: - tableName = tableExpr.Name.String() - if tableExpr.Qualifier != nil && tableExpr.Qualifier.String() != "" { - database = tableExpr.Qualifier.String() - } - default: - err := fmt.Errorf("unsupported table expression: %T", tableExpr) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported FROM clause: %T", table) - return &QueryResult{Error: err}, err - } - - // Use current database context if not specified - if database == "" { - database = e.catalog.GetCurrentDatabase() - if database == "" { - database = "default" - } - } - - // Auto-discover and register topic if not already in catalog - if _, err := e.catalog.GetTableInfo(database, tableName); err != nil { - // Topic not in catalog, try to discover and register it - if regErr := e.discoverAndRegisterTopic(ctx, database, tableName); regErr != nil { - // Return error immediately for non-existent topics instead of falling back to sample data - return &QueryResult{Error: regErr}, regErr - } - } - - // Create HybridMessageScanner for the topic (reads both live logs + Parquet files) - // Get filerClient from broker connection (works with both real and mock brokers) - var filerClient filer_pb.FilerClient - var filerClientErr error - filerClient, filerClientErr = e.catalog.brokerClient.GetFilerClient() - if filerClientErr != nil { - // Return error if filer client is not available for topic access - return &QueryResult{Error: filerClientErr}, filerClientErr - } - - hybridScanner, err := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, database, tableName, e) - if err != nil { - // Handle quiet topics gracefully: topics exist but have no active schema/brokers - if IsNoSchemaError(err) { - // Return empty result for quiet topics (normal in production environments) - return &QueryResult{ - Columns: []string{}, - Rows: [][]sqltypes.Value{}, - Database: database, - Table: tableName, - }, nil - } - // Return error for other access issues (truly non-existent topics, etc.) - topicErr := fmt.Errorf("failed to access topic %s.%s: %v", database, tableName, err) - return &QueryResult{Error: topicErr}, topicErr - } - - // Parse SELECT columns and detect aggregation functions - var columns []string - var aggregations []AggregationSpec - selectAll := false - hasAggregations := false - _ = hasAggregations // Used later in aggregation routing - // Track required base columns for arithmetic expressions - baseColumnsSet := make(map[string]bool) - - for _, selectExpr := range stmt.SelectExprs { - switch expr := selectExpr.(type) { - case *StarExpr: - selectAll = true - case *AliasedExpr: - switch col := expr.Expr.(type) { - case *ColName: - colName := col.Name.String() - - // Check if this "column" is actually an arithmetic expression with functions - if arithmeticExpr := e.parseColumnLevelCalculation(colName); arithmeticExpr != nil { - columns = append(columns, e.getArithmeticExpressionAlias(arithmeticExpr)) - e.extractBaseColumns(arithmeticExpr, baseColumnsSet) - } else { - columns = append(columns, colName) - baseColumnsSet[colName] = true - } - case *ArithmeticExpr: - // Handle arithmetic expressions like id+user_id and string concatenation like name||suffix - columns = append(columns, e.getArithmeticExpressionAlias(col)) - // Extract base columns needed for this arithmetic expression - e.extractBaseColumns(col, baseColumnsSet) - case *SQLVal: - // Handle string/numeric literals like 'good', 123, etc. - columns = append(columns, e.getSQLValAlias(col)) - case *FuncExpr: - // Distinguish between aggregation functions and string functions - funcName := strings.ToUpper(col.Name.String()) - if e.isAggregationFunction(funcName) { - // Handle aggregation functions - aggSpec, err := e.parseAggregationFunction(col, expr) - if err != nil { - return &QueryResult{Error: err}, err - } - aggregations = append(aggregations, *aggSpec) - hasAggregations = true - } else if e.isStringFunction(funcName) { - // Handle string functions like UPPER, LENGTH, etc. - columns = append(columns, e.getStringFunctionAlias(col)) - // Extract base columns needed for this string function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else if e.isDateTimeFunction(funcName) { - // Handle datetime functions like CURRENT_DATE, NOW, EXTRACT, DATE_TRUNC - columns = append(columns, e.getDateTimeFunctionAlias(col)) - // Extract base columns needed for this datetime function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else { - return &QueryResult{Error: fmt.Errorf("unsupported function: %s", funcName)}, fmt.Errorf("unsupported function: %s", funcName) - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", col) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", expr) - return &QueryResult{Error: err}, err - } - } - - // If we have aggregations, use aggregation query path - if hasAggregations { - return e.executeAggregationQuery(ctx, hybridScanner, aggregations, stmt) - } - - // Parse WHERE clause for predicate pushdown - var predicate func(*schema_pb.RecordValue) bool - if stmt.Where != nil { - predicate, err = e.buildPredicateWithContext(stmt.Where.Expr, stmt.SelectExprs) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Parse LIMIT and OFFSET clauses - // Use -1 to distinguish "no LIMIT" from "LIMIT 0" - limit := -1 - offset := 0 - if stmt.Limit != nil && stmt.Limit.Rowcount != nil { - switch limitExpr := stmt.Limit.Rowcount.(type) { - case *SQLVal: - if limitExpr.Type == IntVal { - var parseErr error - limit64, parseErr := strconv.ParseInt(string(limitExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if limit64 > math.MaxInt32 || limit64 < 0 { - return &QueryResult{Error: fmt.Errorf("LIMIT value %d is out of valid range", limit64)}, fmt.Errorf("LIMIT value %d is out of valid range", limit64) - } - limit = int(limit64) - } - } - } - - // Parse OFFSET clause if present - if stmt.Limit != nil && stmt.Limit.Offset != nil { - switch offsetExpr := stmt.Limit.Offset.(type) { - case *SQLVal: - if offsetExpr.Type == IntVal { - var parseErr error - offset64, parseErr := strconv.ParseInt(string(offsetExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if offset64 > math.MaxInt32 || offset64 < 0 { - return &QueryResult{Error: fmt.Errorf("OFFSET value %d is out of valid range", offset64)}, fmt.Errorf("OFFSET value %d is out of valid range", offset64) - } - offset = int(offset64) - } - } - } - - // Build hybrid scan options - // Extract time filters from WHERE clause to optimize scanning - startTimeNs, stopTimeNs := int64(0), int64(0) - if stmt.Where != nil { - startTimeNs, stopTimeNs = e.extractTimeFilters(stmt.Where.Expr) - } - - hybridScanOptions := HybridScanOptions{ - StartTimeNs: startTimeNs, // Extracted from WHERE clause time comparisons - StopTimeNs: stopTimeNs, // Extracted from WHERE clause time comparisons - Limit: limit, - Offset: offset, - Predicate: predicate, - } - - if !selectAll { - // Convert baseColumnsSet to slice for hybrid scan options - baseColumns := make([]string, 0, len(baseColumnsSet)) - for columnName := range baseColumnsSet { - baseColumns = append(baseColumns, columnName) - } - // Use base columns (not expression aliases) for data retrieval - if len(baseColumns) > 0 { - hybridScanOptions.Columns = baseColumns - } else { - // If no base columns found (shouldn't happen), use original columns - hybridScanOptions.Columns = columns - } - } - - // Execute the hybrid scan (live logs + Parquet files) - results, err := hybridScanner.Scan(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Convert to SQL result format - if selectAll { - if len(columns) > 0 { - // SELECT *, specific_columns - include both auto-discovered and explicit columns - return hybridScanner.ConvertToSQLResultWithMixedColumns(results, columns), nil - } else { - // SELECT * only - let converter determine all columns (excludes system columns) - columns = nil - return hybridScanner.ConvertToSQLResult(results, columns), nil - } - } - - // Handle custom column expressions (including arithmetic) - return e.ConvertToSQLResultWithExpressions(hybridScanner, results, stmt.SelectExprs), nil -} - -// executeRegularSelectWithHybridScanner handles regular SELECT queries using HybridMessageScanner -// This ensures both flushed and unflushed data are read, fixing the SQL empty results issue -func (e *SQLEngine) executeRegularSelectWithHybridScanner(ctx context.Context, hybridScanner *HybridMessageScanner, stmt *SelectStatement, plan *QueryExecutionPlan) (*QueryResult, error) { - // Parse SELECT expressions to determine columns and detect aggregations - var columns []string - var aggregations []AggregationSpec - var hasAggregations bool - selectAll := false - baseColumnsSet := make(map[string]bool) // Track base columns needed for expressions - - for _, selectExpr := range stmt.SelectExprs { - switch expr := selectExpr.(type) { - case *StarExpr: - selectAll = true - case *AliasedExpr: - switch col := expr.Expr.(type) { - case *ColName: - columnName := col.Name.String() - columns = append(columns, columnName) - baseColumnsSet[columnName] = true - case *FuncExpr: - funcName := strings.ToLower(col.Name.String()) - if e.isAggregationFunction(funcName) { - // Handle aggregation functions - aggSpec, err := e.parseAggregationFunction(col, expr) - if err != nil { - return &QueryResult{Error: err}, err - } - aggregations = append(aggregations, *aggSpec) - hasAggregations = true - } else if e.isStringFunction(funcName) { - // Handle string functions like UPPER, LENGTH, etc. - columns = append(columns, e.getStringFunctionAlias(col)) - // Extract base columns needed for this string function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else if e.isDateTimeFunction(funcName) { - // Handle datetime functions like CURRENT_DATE, NOW, EXTRACT, DATE_TRUNC - columns = append(columns, e.getDateTimeFunctionAlias(col)) - // Extract base columns needed for this datetime function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else { - return &QueryResult{Error: fmt.Errorf("unsupported function: %s", funcName)}, fmt.Errorf("unsupported function: %s", funcName) - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", col) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", expr) - return &QueryResult{Error: err}, err - } - } - - // If we have aggregations, delegate to aggregation handler - if hasAggregations { - return e.executeAggregationQuery(ctx, hybridScanner, aggregations, stmt) - } - - // Parse WHERE clause for predicate pushdown - var predicate func(*schema_pb.RecordValue) bool - var err error - if stmt.Where != nil { - predicate, err = e.buildPredicateWithContext(stmt.Where.Expr, stmt.SelectExprs) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Parse LIMIT and OFFSET clauses - // Use -1 to distinguish "no LIMIT" from "LIMIT 0" - limit := -1 - offset := 0 - if stmt.Limit != nil && stmt.Limit.Rowcount != nil { - switch limitExpr := stmt.Limit.Rowcount.(type) { - case *SQLVal: - if limitExpr.Type == IntVal { - var parseErr error - limit64, parseErr := strconv.ParseInt(string(limitExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if limit64 > math.MaxInt32 || limit64 < 0 { - return &QueryResult{Error: fmt.Errorf("LIMIT value %d is out of valid range", limit64)}, fmt.Errorf("LIMIT value %d is out of valid range", limit64) - } - limit = int(limit64) - } - } - } - - // Parse OFFSET clause if present - if stmt.Limit != nil && stmt.Limit.Offset != nil { - switch offsetExpr := stmt.Limit.Offset.(type) { - case *SQLVal: - if offsetExpr.Type == IntVal { - var parseErr error - offset64, parseErr := strconv.ParseInt(string(offsetExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if offset64 > math.MaxInt32 || offset64 < 0 { - return &QueryResult{Error: fmt.Errorf("OFFSET value %d is out of valid range", offset64)}, fmt.Errorf("OFFSET value %d is out of valid range", offset64) - } - offset = int(offset64) - } - } - } - - // Build hybrid scan options - // Extract time filters from WHERE clause to optimize scanning - startTimeNs, stopTimeNs := int64(0), int64(0) - if stmt.Where != nil { - startTimeNs, stopTimeNs = e.extractTimeFilters(stmt.Where.Expr) - } - - hybridScanOptions := HybridScanOptions{ - StartTimeNs: startTimeNs, // Extracted from WHERE clause time comparisons - StopTimeNs: stopTimeNs, // Extracted from WHERE clause time comparisons - Limit: limit, - Offset: offset, - Predicate: predicate, - } - - if !selectAll { - // Convert baseColumnsSet to slice for hybrid scan options - baseColumns := make([]string, 0, len(baseColumnsSet)) - for columnName := range baseColumnsSet { - baseColumns = append(baseColumns, columnName) - } - // Use base columns (not expression aliases) for data retrieval - if len(baseColumns) > 0 { - hybridScanOptions.Columns = baseColumns - } else { - // If no base columns found (shouldn't happen), use original columns - hybridScanOptions.Columns = columns - } - } - - // Execute the hybrid scan (both flushed and unflushed data) - var results []HybridScanResult - if plan != nil { - // EXPLAIN mode - capture broker buffer stats - var stats *HybridScanStats - results, stats, err = hybridScanner.ScanWithStats(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Populate plan with broker buffer information - if stats != nil { - plan.BrokerBufferQueried = stats.BrokerBufferQueried - plan.BrokerBufferMessages = stats.BrokerBufferMessages - plan.BufferStartIndex = stats.BufferStartIndex - - // Add broker_buffer to data sources if buffer was queried - if stats.BrokerBufferQueried { - // Check if broker_buffer is already in data sources - hasBrokerBuffer := false - for _, source := range plan.DataSources { - if source == "broker_buffer" { - hasBrokerBuffer = true - break - } - } - if !hasBrokerBuffer { - plan.DataSources = append(plan.DataSources, "broker_buffer") - } - } - } - } else { - // Normal mode - just get results - results, err = hybridScanner.Scan(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Convert to SQL result format - if selectAll { - if len(columns) > 0 { - // SELECT *, specific_columns - include both auto-discovered and explicit columns - return hybridScanner.ConvertToSQLResultWithMixedColumns(results, columns), nil - } else { - // SELECT * only - let converter determine all columns (excludes system columns) - columns = nil - return hybridScanner.ConvertToSQLResult(results, columns), nil - } - } - - // Handle custom column expressions (including arithmetic) - return e.ConvertToSQLResultWithExpressions(hybridScanner, results, stmt.SelectExprs), nil -} - -// executeSelectStatementWithBrokerStats handles SELECT queries with broker buffer statistics capture -// This is used by EXPLAIN queries to capture complete data source information including broker memory -func (e *SQLEngine) executeSelectStatementWithBrokerStats(ctx context.Context, stmt *SelectStatement, plan *QueryExecutionPlan) (*QueryResult, error) { - // Parse FROM clause to get table (topic) information - if len(stmt.From) != 1 { - err := fmt.Errorf("SELECT supports single table queries only") - return &QueryResult{Error: err}, err - } - - // Extract table reference - var database, tableName string - switch table := stmt.From[0].(type) { - case *AliasedTableExpr: - switch tableExpr := table.Expr.(type) { - case TableName: - tableName = tableExpr.Name.String() - if tableExpr.Qualifier != nil && tableExpr.Qualifier.String() != "" { - database = tableExpr.Qualifier.String() - } - default: - err := fmt.Errorf("unsupported table expression: %T", tableExpr) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported FROM clause: %T", table) - return &QueryResult{Error: err}, err - } - - // Use current database context if not specified - if database == "" { - database = e.catalog.GetCurrentDatabase() - if database == "" { - database = "default" - } - } - - // Auto-discover and register topic if not already in catalog - if _, err := e.catalog.GetTableInfo(database, tableName); err != nil { - // Topic not in catalog, try to discover and register it - if regErr := e.discoverAndRegisterTopic(ctx, database, tableName); regErr != nil { - // Return error immediately for non-existent topics instead of falling back to sample data - return &QueryResult{Error: regErr}, regErr - } - } - - // Create HybridMessageScanner for the topic (reads both live logs + Parquet files) - // Get filerClient from broker connection (works with both real and mock brokers) - var filerClient filer_pb.FilerClient - var filerClientErr error - filerClient, filerClientErr = e.catalog.brokerClient.GetFilerClient() - if filerClientErr != nil { - // Return error if filer client is not available for topic access - return &QueryResult{Error: filerClientErr}, filerClientErr - } - - hybridScanner, err := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, database, tableName, e) - if err != nil { - // Handle quiet topics gracefully: topics exist but have no active schema/brokers - if IsNoSchemaError(err) { - // Return empty result for quiet topics (normal in production environments) - return &QueryResult{ - Columns: []string{}, - Rows: [][]sqltypes.Value{}, - Database: database, - Table: tableName, - }, nil - } - // Return error for other access issues (truly non-existent topics, etc.) - topicErr := fmt.Errorf("failed to access topic %s.%s: %v", database, tableName, err) - return &QueryResult{Error: topicErr}, topicErr - } - - // Parse SELECT columns and detect aggregation functions - var columns []string - var aggregations []AggregationSpec - selectAll := false - hasAggregations := false - _ = hasAggregations // Used later in aggregation routing - // Track required base columns for arithmetic expressions - baseColumnsSet := make(map[string]bool) - - for _, selectExpr := range stmt.SelectExprs { - switch expr := selectExpr.(type) { - case *StarExpr: - selectAll = true - case *AliasedExpr: - switch col := expr.Expr.(type) { - case *ColName: - colName := col.Name.String() - columns = append(columns, colName) - baseColumnsSet[colName] = true - case *ArithmeticExpr: - // Handle arithmetic expressions like id+user_id and string concatenation like name||suffix - columns = append(columns, e.getArithmeticExpressionAlias(col)) - // Extract base columns needed for this arithmetic expression - e.extractBaseColumns(col, baseColumnsSet) - case *SQLVal: - // Handle string/numeric literals like 'good', 123, etc. - columns = append(columns, e.getSQLValAlias(col)) - case *FuncExpr: - // Distinguish between aggregation functions and string functions - funcName := strings.ToUpper(col.Name.String()) - if e.isAggregationFunction(funcName) { - // Handle aggregation functions - aggSpec, err := e.parseAggregationFunction(col, expr) - if err != nil { - return &QueryResult{Error: err}, err - } - aggregations = append(aggregations, *aggSpec) - hasAggregations = true - } else if e.isStringFunction(funcName) { - // Handle string functions like UPPER, LENGTH, etc. - columns = append(columns, e.getStringFunctionAlias(col)) - // Extract base columns needed for this string function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else if e.isDateTimeFunction(funcName) { - // Handle datetime functions like CURRENT_DATE, NOW, EXTRACT, DATE_TRUNC - columns = append(columns, e.getDateTimeFunctionAlias(col)) - // Extract base columns needed for this datetime function - e.extractBaseColumnsFromFunction(col, baseColumnsSet) - } else { - return &QueryResult{Error: fmt.Errorf("unsupported function: %s", funcName)}, fmt.Errorf("unsupported function: %s", funcName) - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", col) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported SELECT expression: %T", expr) - return &QueryResult{Error: err}, err - } - } - - // If we have aggregations, use aggregation query path - if hasAggregations { - return e.executeAggregationQuery(ctx, hybridScanner, aggregations, stmt) - } - - // Parse WHERE clause for predicate pushdown - var predicate func(*schema_pb.RecordValue) bool - if stmt.Where != nil { - predicate, err = e.buildPredicateWithContext(stmt.Where.Expr, stmt.SelectExprs) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Parse LIMIT and OFFSET clauses - // Use -1 to distinguish "no LIMIT" from "LIMIT 0" - limit := -1 - offset := 0 - if stmt.Limit != nil && stmt.Limit.Rowcount != nil { - switch limitExpr := stmt.Limit.Rowcount.(type) { - case *SQLVal: - if limitExpr.Type == IntVal { - var parseErr error - limit64, parseErr := strconv.ParseInt(string(limitExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if limit64 > math.MaxInt32 || limit64 < 0 { - return &QueryResult{Error: fmt.Errorf("LIMIT value %d is out of valid range", limit64)}, fmt.Errorf("LIMIT value %d is out of valid range", limit64) - } - limit = int(limit64) - } - } - } - - // Parse OFFSET clause if present - if stmt.Limit != nil && stmt.Limit.Offset != nil { - switch offsetExpr := stmt.Limit.Offset.(type) { - case *SQLVal: - if offsetExpr.Type == IntVal { - var parseErr error - offset64, parseErr := strconv.ParseInt(string(offsetExpr.Val), 10, 64) - if parseErr != nil { - return &QueryResult{Error: parseErr}, parseErr - } - if offset64 > math.MaxInt32 || offset64 < 0 { - return &QueryResult{Error: fmt.Errorf("OFFSET value %d is out of valid range", offset64)}, fmt.Errorf("OFFSET value %d is out of valid range", offset64) - } - offset = int(offset64) - } - } - } - - // Build hybrid scan options - // Extract time filters from WHERE clause to optimize scanning - startTimeNs, stopTimeNs := int64(0), int64(0) - if stmt.Where != nil { - startTimeNs, stopTimeNs = e.extractTimeFilters(stmt.Where.Expr) - } - - hybridScanOptions := HybridScanOptions{ - StartTimeNs: startTimeNs, // Extracted from WHERE clause time comparisons - StopTimeNs: stopTimeNs, // Extracted from WHERE clause time comparisons - Limit: limit, - Offset: offset, - Predicate: predicate, - } - - if !selectAll { - // Convert baseColumnsSet to slice for hybrid scan options - baseColumns := make([]string, 0, len(baseColumnsSet)) - for columnName := range baseColumnsSet { - baseColumns = append(baseColumns, columnName) - } - // Use base columns (not expression aliases) for data retrieval - if len(baseColumns) > 0 { - hybridScanOptions.Columns = baseColumns - } else { - // If no base columns found (shouldn't happen), use original columns - hybridScanOptions.Columns = columns - } - } - - // Execute the hybrid scan with stats capture for EXPLAIN - var results []HybridScanResult - if plan != nil { - // EXPLAIN mode - capture broker buffer stats - var stats *HybridScanStats - results, stats, err = hybridScanner.ScanWithStats(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Populate plan with broker buffer information - if stats != nil { - plan.BrokerBufferQueried = stats.BrokerBufferQueried - plan.BrokerBufferMessages = stats.BrokerBufferMessages - plan.BufferStartIndex = stats.BufferStartIndex - - // Add broker_buffer to data sources if buffer was queried - if stats.BrokerBufferQueried { - // Check if broker_buffer is already in data sources - hasBrokerBuffer := false - for _, source := range plan.DataSources { - if source == "broker_buffer" { - hasBrokerBuffer = true - break - } - } - if !hasBrokerBuffer { - plan.DataSources = append(plan.DataSources, "broker_buffer") - } - } - } - - // Populate execution plan details with source file information for Data Sources Tree - if partitions, discoverErr := e.discoverTopicPartitions(database, tableName); discoverErr == nil { - // Add partition paths to execution plan details - plan.Details["partition_paths"] = partitions - // Persist time filter details for downstream pruning/diagnostics - plan.Details[PlanDetailStartTimeNs] = startTimeNs - plan.Details[PlanDetailStopTimeNs] = stopTimeNs - - // Collect actual file information for each partition - var parquetFiles []string - var liveLogFiles []string - parquetSources := make(map[string]bool) - - var parquetReadErrors []string - var liveLogListErrors []string - for _, partitionPath := range partitions { - // Get parquet files for this partition - if parquetStats, err := hybridScanner.ReadParquetStatistics(partitionPath); err == nil { - // Prune files by time range with debug logging - filteredStats := pruneParquetFilesByTime(ctx, parquetStats, hybridScanner, startTimeNs, stopTimeNs) - - // Further prune by column statistics from WHERE clause - if stmt.Where != nil { - beforeColumnPrune := len(filteredStats) - filteredStats = e.pruneParquetFilesByColumnStats(ctx, filteredStats, stmt.Where.Expr) - columnPrunedCount := beforeColumnPrune - len(filteredStats) - - if columnPrunedCount > 0 { - // Track column statistics optimization - if !contains(plan.OptimizationsUsed, "column_statistics_pruning") { - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "column_statistics_pruning") - } - } - } - for _, stats := range filteredStats { - parquetFiles = append(parquetFiles, fmt.Sprintf("%s/%s", partitionPath, stats.FileName)) - } - } else { - parquetReadErrors = append(parquetReadErrors, fmt.Sprintf("%s: %v", partitionPath, err)) - } - - // Merge accurate parquet sources from metadata - if sources, err := e.getParquetSourceFilesFromMetadata(partitionPath); err == nil { - for src := range sources { - parquetSources[src] = true - } - } - - // Get live log files for this partition - if liveFiles, err := e.collectLiveLogFileNames(hybridScanner.filerClient, partitionPath); err == nil { - for _, fileName := range liveFiles { - // Exclude live log files that have been converted to parquet (deduplicated) - if parquetSources[fileName] { - continue - } - liveLogFiles = append(liveLogFiles, fmt.Sprintf("%s/%s", partitionPath, fileName)) - } - } else { - liveLogListErrors = append(liveLogListErrors, fmt.Sprintf("%s: %v", partitionPath, err)) - } - } - - if len(parquetFiles) > 0 { - plan.Details["parquet_files"] = parquetFiles - } - if len(liveLogFiles) > 0 { - plan.Details["live_log_files"] = liveLogFiles - } - if len(parquetReadErrors) > 0 { - plan.Details["error_parquet_statistics"] = parquetReadErrors - } - if len(liveLogListErrors) > 0 { - plan.Details["error_live_log_listing"] = liveLogListErrors - } - - // Update scan statistics for execution plan display - plan.PartitionsScanned = len(partitions) - plan.ParquetFilesScanned = len(parquetFiles) - plan.LiveLogFilesScanned = len(liveLogFiles) - } else { - // Handle partition discovery error - plan.Details["error_partition_discovery"] = discoverErr.Error() - } - } else { - // Normal mode - just get results - results, err = hybridScanner.Scan(ctx, hybridScanOptions) - if err != nil { - return &QueryResult{Error: err}, err - } - } - - // Convert to SQL result format - if selectAll { - if len(columns) > 0 { - // SELECT *, specific_columns - include both auto-discovered and explicit columns - return hybridScanner.ConvertToSQLResultWithMixedColumns(results, columns), nil - } else { - // SELECT * only - let converter determine all columns (excludes system columns) - columns = nil - return hybridScanner.ConvertToSQLResult(results, columns), nil - } - } - - // Handle custom column expressions (including arithmetic) - return e.ConvertToSQLResultWithExpressions(hybridScanner, results, stmt.SelectExprs), nil -} - -// extractTimeFilters extracts time range filters from WHERE clause for optimization -// This allows push-down of time-based queries to improve scan performance -// Returns (startTimeNs, stopTimeNs) where 0 means unbounded -func (e *SQLEngine) extractTimeFilters(expr ExprNode) (int64, int64) { - startTimeNs, stopTimeNs := int64(0), int64(0) - - // Recursively extract time filters from expression tree - e.extractTimeFiltersRecursive(expr, &startTimeNs, &stopTimeNs) - - // Special case: if startTimeNs == stopTimeNs, treat it like an equality query - // to avoid premature scan termination. The predicate will handle exact matching. - if startTimeNs != 0 && startTimeNs == stopTimeNs { - stopTimeNs = 0 - } - - return startTimeNs, stopTimeNs -} - -// extractTimeFiltersWithValidation extracts time filters and validates that WHERE clause contains only time-based predicates -// Returns (startTimeNs, stopTimeNs, onlyTimePredicates) where onlyTimePredicates indicates if fast path is safe -func (e *SQLEngine) extractTimeFiltersWithValidation(expr ExprNode) (int64, int64, bool) { - startTimeNs, stopTimeNs := int64(0), int64(0) - onlyTimePredicates := true - - // Recursively extract time filters and validate predicates - e.extractTimeFiltersWithValidationRecursive(expr, &startTimeNs, &stopTimeNs, &onlyTimePredicates) - - // Special case: if startTimeNs == stopTimeNs, treat it like an equality query - if startTimeNs != 0 && startTimeNs == stopTimeNs { - stopTimeNs = 0 - } - - return startTimeNs, stopTimeNs, onlyTimePredicates -} - -// extractTimeFiltersRecursive recursively processes WHERE expressions to find time comparisons -func (e *SQLEngine) extractTimeFiltersRecursive(expr ExprNode, startTimeNs, stopTimeNs *int64) { - switch exprType := expr.(type) { - case *ComparisonExpr: - e.extractTimeFromComparison(exprType, startTimeNs, stopTimeNs) - case *AndExpr: - // For AND expressions, combine time filters (intersection) - e.extractTimeFiltersRecursive(exprType.Left, startTimeNs, stopTimeNs) - e.extractTimeFiltersRecursive(exprType.Right, startTimeNs, stopTimeNs) - case *OrExpr: - // For OR expressions, we can't easily optimize time ranges - // Skip time filter extraction for OR clauses to avoid incorrect results - return - case *ParenExpr: - // Unwrap parentheses and continue - e.extractTimeFiltersRecursive(exprType.Expr, startTimeNs, stopTimeNs) - } -} - -// extractTimeFiltersWithValidationRecursive recursively processes WHERE expressions to find time comparisons and validate predicates -func (e *SQLEngine) extractTimeFiltersWithValidationRecursive(expr ExprNode, startTimeNs, stopTimeNs *int64, onlyTimePredicates *bool) { - switch exprType := expr.(type) { - case *ComparisonExpr: - // Check if this is a time-based comparison - leftCol := e.getColumnName(exprType.Left) - rightCol := e.getColumnName(exprType.Right) - - isTimeComparison := e.isTimestampColumn(leftCol) || e.isTimestampColumn(rightCol) - if isTimeComparison { - // Extract time filter from this comparison - e.extractTimeFromComparison(exprType, startTimeNs, stopTimeNs) - } else { - // Non-time predicate found - fast path is not safe - *onlyTimePredicates = false - } - case *AndExpr: - // For AND expressions, both sides must be time-only for fast path to be safe - e.extractTimeFiltersWithValidationRecursive(exprType.Left, startTimeNs, stopTimeNs, onlyTimePredicates) - e.extractTimeFiltersWithValidationRecursive(exprType.Right, startTimeNs, stopTimeNs, onlyTimePredicates) - case *OrExpr: - // OR expressions are complex and not supported in fast path - *onlyTimePredicates = false - return - case *ParenExpr: - // Unwrap parentheses and continue - e.extractTimeFiltersWithValidationRecursive(exprType.Expr, startTimeNs, stopTimeNs, onlyTimePredicates) - default: - // Unknown expression type - not safe for fast path - *onlyTimePredicates = false - } -} - -// extractTimeFromComparison extracts time bounds from comparison expressions -// Handles comparisons against timestamp columns (system columns and schema-defined timestamp types) -func (e *SQLEngine) extractTimeFromComparison(comp *ComparisonExpr, startTimeNs, stopTimeNs *int64) { - // Check if this is a time-related column comparison - leftCol := e.getColumnName(comp.Left) - rightCol := e.getColumnName(comp.Right) - - var valueExpr ExprNode - var reversed bool - - // Determine which side is the time column (using schema types) - if e.isTimestampColumn(leftCol) { - valueExpr = comp.Right - reversed = false - } else if e.isTimestampColumn(rightCol) { - valueExpr = comp.Left - reversed = true - } else { - // Not a time comparison - return - } - - // Extract the time value - timeValue := e.extractTimeValue(valueExpr) - if timeValue == 0 { - // Couldn't parse time value - return - } - - // Apply the comparison operator to determine time bounds - operator := comp.Operator - if reversed { - // Reverse the operator if column and value are swapped - operator = e.reverseOperator(operator) - } - - switch operator { - case GreaterThanStr: // timestamp > value - if *startTimeNs == 0 || timeValue > *startTimeNs { - *startTimeNs = timeValue - } - case GreaterEqualStr: // timestamp >= value - if *startTimeNs == 0 || timeValue >= *startTimeNs { - *startTimeNs = timeValue - } - case LessThanStr: // timestamp < value - if *stopTimeNs == 0 || timeValue < *stopTimeNs { - *stopTimeNs = timeValue - } - case LessEqualStr: // timestamp <= value - if *stopTimeNs == 0 || timeValue <= *stopTimeNs { - *stopTimeNs = timeValue - } - case EqualStr: // timestamp = value (point query) - // For exact matches, we set startTimeNs slightly before the target - // This works around a scan boundary bug where >= X starts after X instead of at X - // The predicate function will handle exact matching - *startTimeNs = timeValue - 1 - // Do NOT set stopTimeNs - let the predicate handle exact matching - } -} - -// isTimestampColumn checks if a column is a timestamp using schema type information -func (e *SQLEngine) isTimestampColumn(columnName string) bool { - if columnName == "" { - return false - } - - // System timestamp columns are always time columns - if columnName == SW_COLUMN_NAME_TIMESTAMP || columnName == SW_DISPLAY_NAME_TIMESTAMP { - return true - } - - // For user-defined columns, check actual schema type information - if e.catalog != nil { - currentDB := e.catalog.GetCurrentDatabase() - if currentDB == "" { - currentDB = "default" - } - - // Get current table context from query execution - // Note: This is a limitation - we need table context here - // In a full implementation, this would be passed from the query context - tableInfo, err := e.getCurrentTableInfo(currentDB) - if err == nil && tableInfo != nil { - for _, col := range tableInfo.Columns { - if strings.EqualFold(col.Name, columnName) { - // Use actual SQL type to determine if this is a timestamp - return e.isSQLTypeTimestamp(col.Type) - } - } - } - } - - // Only return true if we have explicit type information - // No guessing based on column names - return false -} - -// getTimeFiltersFromPlan extracts time filter values from execution plan details -func getTimeFiltersFromPlan(plan *QueryExecutionPlan) (startTimeNs, stopTimeNs int64) { - if plan == nil || plan.Details == nil { - return 0, 0 - } - if startNsVal, ok := plan.Details[PlanDetailStartTimeNs]; ok { - if startNs, ok2 := startNsVal.(int64); ok2 { - startTimeNs = startNs - } - } - if stopNsVal, ok := plan.Details[PlanDetailStopTimeNs]; ok { - if stopNs, ok2 := stopNsVal.(int64); ok2 { - stopTimeNs = stopNs - } - } - return -} - -// pruneParquetFilesByTime filters parquet files based on timestamp ranges, with optional debug logging -func pruneParquetFilesByTime(ctx context.Context, parquetStats []*ParquetFileStats, hybridScanner *HybridMessageScanner, startTimeNs, stopTimeNs int64) []*ParquetFileStats { - if startTimeNs == 0 && stopTimeNs == 0 { - return parquetStats - } - - qStart := startTimeNs - qStop := stopTimeNs - if qStop == 0 { - qStop = math.MaxInt64 - } - - n := 0 - for _, fs := range parquetStats { - if minNs, maxNs, ok := hybridScanner.getTimestampRangeFromStats(fs); ok { - if qStop < minNs || (qStart != 0 && qStart > maxNs) { - continue - } - } - parquetStats[n] = fs - n++ - } - return parquetStats[:n] -} - -// pruneParquetFilesByColumnStats filters parquet files based on column statistics and WHERE predicates -func (e *SQLEngine) pruneParquetFilesByColumnStats(ctx context.Context, parquetStats []*ParquetFileStats, whereExpr ExprNode) []*ParquetFileStats { - if whereExpr == nil { - return parquetStats - } - - n := 0 - for _, fs := range parquetStats { - if e.canSkipParquetFile(ctx, fs, whereExpr) { - continue - } - parquetStats[n] = fs - n++ - } - return parquetStats[:n] -} - -// canSkipParquetFile determines if a parquet file can be skipped based on column statistics -func (e *SQLEngine) canSkipParquetFile(ctx context.Context, fileStats *ParquetFileStats, whereExpr ExprNode) bool { - switch expr := whereExpr.(type) { - case *ComparisonExpr: - return e.canSkipFileByComparison(ctx, fileStats, expr) - case *AndExpr: - // For AND: skip if ANY condition allows skipping (more aggressive pruning) - return e.canSkipParquetFile(ctx, fileStats, expr.Left) || e.canSkipParquetFile(ctx, fileStats, expr.Right) - case *OrExpr: - // For OR: skip only if ALL conditions allow skipping (conservative) - return e.canSkipParquetFile(ctx, fileStats, expr.Left) && e.canSkipParquetFile(ctx, fileStats, expr.Right) - default: - // Unknown expression type - don't skip - return false - } -} - -// canSkipFileByComparison checks if a file can be skipped based on a comparison predicate -func (e *SQLEngine) canSkipFileByComparison(ctx context.Context, fileStats *ParquetFileStats, expr *ComparisonExpr) bool { - // Extract column name and comparison value - var columnName string - var compareSchemaValue *schema_pb.Value - var operator string = expr.Operator - - // Determine which side is the column and which is the value - if colRef, ok := expr.Left.(*ColName); ok { - columnName = colRef.Name.String() - if sqlVal, ok := expr.Right.(*SQLVal); ok { - compareSchemaValue = e.convertSQLValToSchemaValue(sqlVal) - } else { - return false // Can't optimize complex expressions - } - } else if colRef, ok := expr.Right.(*ColName); ok { - columnName = colRef.Name.String() - if sqlVal, ok := expr.Left.(*SQLVal); ok { - compareSchemaValue = e.convertSQLValToSchemaValue(sqlVal) - // Flip operator for reversed comparison - operator = e.flipOperator(operator) - } else { - return false - } - } else { - return false // No column reference found - } - - // Validate comparison value - if compareSchemaValue == nil { - return false - } - - // Get column statistics - colStats, exists := fileStats.ColumnStats[columnName] - if !exists || colStats == nil { - // Try case-insensitive lookup - for colName, stats := range fileStats.ColumnStats { - if strings.EqualFold(colName, columnName) { - colStats = stats - exists = true - break - } - } - } - - if !exists || colStats == nil || colStats.MinValue == nil || colStats.MaxValue == nil { - return false // No statistics available - } - - // Apply pruning logic based on operator - switch operator { - case ">": - // Skip if max(column) <= compareValue - return e.compareValues(colStats.MaxValue, compareSchemaValue) <= 0 - case ">=": - // Skip if max(column) < compareValue - return e.compareValues(colStats.MaxValue, compareSchemaValue) < 0 - case "<": - // Skip if min(column) >= compareValue - return e.compareValues(colStats.MinValue, compareSchemaValue) >= 0 - case "<=": - // Skip if min(column) > compareValue - return e.compareValues(colStats.MinValue, compareSchemaValue) > 0 - case "=": - // Skip if compareValue is outside [min, max] range - return e.compareValues(compareSchemaValue, colStats.MinValue) < 0 || - e.compareValues(compareSchemaValue, colStats.MaxValue) > 0 - case "!=", "<>": - // Skip if min == max == compareValue (all values are the same and equal to compareValue) - return e.compareValues(colStats.MinValue, colStats.MaxValue) == 0 && - e.compareValues(colStats.MinValue, compareSchemaValue) == 0 - default: - return false // Unknown operator - } -} - -// flipOperator flips comparison operators when operands are swapped -func (e *SQLEngine) flipOperator(op string) string { - switch op { - case ">": - return "<" - case ">=": - return "<=" - case "<": - return ">" - case "<=": - return ">=" - case "=", "!=", "<>": - return op // These are symmetric - default: - return op - } -} - -// populatePlanFileDetails populates execution plan with detailed file information for partitions -// Includes column statistics pruning optimization when WHERE clause is provided -func (e *SQLEngine) populatePlanFileDetails(ctx context.Context, plan *QueryExecutionPlan, hybridScanner *HybridMessageScanner, partitions []string, stmt *SelectStatement) { - // Collect actual file information for each partition - var parquetFiles []string - var liveLogFiles []string - parquetSources := make(map[string]bool) - var parquetReadErrors []string - var liveLogListErrors []string - - // Extract time filters from plan details - startTimeNs, stopTimeNs := getTimeFiltersFromPlan(plan) - - for _, partitionPath := range partitions { - // Get parquet files for this partition - if parquetStats, err := hybridScanner.ReadParquetStatistics(partitionPath); err == nil { - // Prune files by time range - filteredStats := pruneParquetFilesByTime(ctx, parquetStats, hybridScanner, startTimeNs, stopTimeNs) - - // Further prune by column statistics from WHERE clause - if stmt != nil && stmt.Where != nil { - beforeColumnPrune := len(filteredStats) - filteredStats = e.pruneParquetFilesByColumnStats(ctx, filteredStats, stmt.Where.Expr) - columnPrunedCount := beforeColumnPrune - len(filteredStats) - - if columnPrunedCount > 0 { - // Track column statistics optimization - if !contains(plan.OptimizationsUsed, "column_statistics_pruning") { - plan.OptimizationsUsed = append(plan.OptimizationsUsed, "column_statistics_pruning") - } - } - } - - for _, stats := range filteredStats { - parquetFiles = append(parquetFiles, fmt.Sprintf("%s/%s", partitionPath, stats.FileName)) - } - } else { - parquetReadErrors = append(parquetReadErrors, fmt.Sprintf("%s: %v", partitionPath, err)) - } - - // Merge accurate parquet sources from metadata - if sources, err := e.getParquetSourceFilesFromMetadata(partitionPath); err == nil { - for src := range sources { - parquetSources[src] = true - } - } - - // Get live log files for this partition - if liveFiles, err := e.collectLiveLogFileNames(hybridScanner.filerClient, partitionPath); err == nil { - for _, fileName := range liveFiles { - // Exclude live log files that have been converted to parquet (deduplicated) - if parquetSources[fileName] { - continue - } - liveLogFiles = append(liveLogFiles, fmt.Sprintf("%s/%s", partitionPath, fileName)) - } - } else { - liveLogListErrors = append(liveLogListErrors, fmt.Sprintf("%s: %v", partitionPath, err)) - } - } - - // Add file lists to plan details - if len(parquetFiles) > 0 { - plan.Details["parquet_files"] = parquetFiles - } - if len(liveLogFiles) > 0 { - plan.Details["live_log_files"] = liveLogFiles - } - if len(parquetReadErrors) > 0 { - plan.Details["error_parquet_statistics"] = parquetReadErrors - } - if len(liveLogListErrors) > 0 { - plan.Details["error_live_log_listing"] = liveLogListErrors - } -} - -// isSQLTypeTimestamp checks if a SQL type string represents a timestamp type -func (e *SQLEngine) isSQLTypeTimestamp(sqlType string) bool { - upperType := strings.ToUpper(strings.TrimSpace(sqlType)) - - // Handle type with precision/length specifications - if idx := strings.Index(upperType, "("); idx != -1 { - upperType = upperType[:idx] - } - - switch upperType { - case "TIMESTAMP", "DATETIME": - return true - case "BIGINT": - // BIGINT could be a timestamp if it follows the pattern for timestamp storage - // This is a heuristic - in a better system, we'd have semantic type information - return false // Conservative approach - require explicit TIMESTAMP type - default: - return false - } -} - -// getCurrentTableInfo attempts to get table info for the current query context -// This is a simplified implementation - ideally table context would be passed explicitly -func (e *SQLEngine) getCurrentTableInfo(database string) (*TableInfo, error) { - // This is a limitation of the current architecture - // In practice, we'd need the table context from the current query - // For now, return nil to fallback to naming conventions - // TODO: Enhance architecture to pass table context through query execution - return nil, fmt.Errorf("table context not available in current architecture") -} - -// getColumnName extracts column name from expression (handles ColName types) -func (e *SQLEngine) getColumnName(expr ExprNode) string { - switch exprType := expr.(type) { - case *ColName: - return exprType.Name.String() - } - return "" -} - -// resolveColumnAlias tries to resolve a column name that might be an alias -func (e *SQLEngine) resolveColumnAlias(columnName string, selectExprs []SelectExpr) string { - if selectExprs == nil { - return columnName - } - - // Check if this column name is actually an alias in the SELECT list - for _, selectExpr := range selectExprs { - if aliasedExpr, ok := selectExpr.(*AliasedExpr); ok && aliasedExpr != nil { - // Check if the alias matches our column name - if aliasedExpr.As != nil && !aliasedExpr.As.IsEmpty() && aliasedExpr.As.String() == columnName { - // If the aliased expression is a column, return the actual column name - if colExpr, ok := aliasedExpr.Expr.(*ColName); ok && colExpr != nil { - return colExpr.Name.String() - } - } - } - } - - // If no alias found, return the original column name - return columnName -} - -// extractTimeValue parses time values from SQL expressions -// Supports nanosecond timestamps, ISO dates, and relative times -func (e *SQLEngine) extractTimeValue(expr ExprNode) int64 { - switch exprType := expr.(type) { - case *SQLVal: - switch exprType.Type { - case IntVal: - // Parse as nanosecond timestamp - if val, err := strconv.ParseInt(string(exprType.Val), 10, 64); err == nil { - return val - } - case StrVal: - // Parse as ISO date or other string formats - timeStr := string(exprType.Val) - - // Try parsing as RFC3339 (ISO 8601) - if t, err := time.Parse(time.RFC3339, timeStr); err == nil { - return t.UnixNano() - } - - // Try parsing as RFC3339 with nanoseconds - if t, err := time.Parse(time.RFC3339Nano, timeStr); err == nil { - return t.UnixNano() - } - - // Try parsing as date only (YYYY-MM-DD) - if t, err := time.Parse("2006-01-02", timeStr); err == nil { - return t.UnixNano() - } - - // Try parsing as datetime (YYYY-MM-DD HH:MM:SS) - if t, err := time.Parse("2006-01-02 15:04:05", timeStr); err == nil { - return t.UnixNano() - } - } - } - - return 0 // Couldn't parse -} - -// reverseOperator reverses comparison operators when column and value are swapped -func (e *SQLEngine) reverseOperator(op string) string { - switch op { - case GreaterThanStr: - return LessThanStr - case GreaterEqualStr: - return LessEqualStr - case LessThanStr: - return GreaterThanStr - case LessEqualStr: - return GreaterEqualStr - case EqualStr: - return EqualStr - case NotEqualStr: - return NotEqualStr - default: - return op - } -} - -// buildPredicate creates a predicate function from a WHERE clause expression -// This is a simplified implementation - a full implementation would be much more complex -func (e *SQLEngine) buildPredicate(expr ExprNode) (func(*schema_pb.RecordValue) bool, error) { - return e.buildPredicateWithContext(expr, nil) -} - -// buildPredicateWithContext creates a predicate function with SELECT context for alias resolution -func (e *SQLEngine) buildPredicateWithContext(expr ExprNode, selectExprs []SelectExpr) (func(*schema_pb.RecordValue) bool, error) { - switch exprType := expr.(type) { - case *ComparisonExpr: - return e.buildComparisonPredicateWithContext(exprType, selectExprs) - case *BetweenExpr: - return e.buildBetweenPredicateWithContext(exprType, selectExprs) - case *IsNullExpr: - return e.buildIsNullPredicateWithContext(exprType, selectExprs) - case *IsNotNullExpr: - return e.buildIsNotNullPredicateWithContext(exprType, selectExprs) - case *AndExpr: - leftPred, err := e.buildPredicateWithContext(exprType.Left, selectExprs) - if err != nil { - return nil, err - } - rightPred, err := e.buildPredicateWithContext(exprType.Right, selectExprs) - if err != nil { - return nil, err - } - return func(record *schema_pb.RecordValue) bool { - return leftPred(record) && rightPred(record) - }, nil - case *OrExpr: - leftPred, err := e.buildPredicateWithContext(exprType.Left, selectExprs) - if err != nil { - return nil, err - } - rightPred, err := e.buildPredicateWithContext(exprType.Right, selectExprs) - if err != nil { - return nil, err - } - return func(record *schema_pb.RecordValue) bool { - return leftPred(record) || rightPred(record) - }, nil - default: - return nil, fmt.Errorf("unsupported WHERE expression: %T", expr) - } -} - -// buildComparisonPredicateWithContext creates a predicate for comparison operations with alias support -func (e *SQLEngine) buildComparisonPredicateWithContext(expr *ComparisonExpr, selectExprs []SelectExpr) (func(*schema_pb.RecordValue) bool, error) { - var columnName string - var compareValue interface{} - var operator string - - // Check if column is on the left side (normal case: column > value) - if colName, ok := expr.Left.(*ColName); ok { - rawColumnName := colName.Name.String() - // Resolve potential alias to actual column name - columnName = e.resolveColumnAlias(rawColumnName, selectExprs) - // Map display names to internal names for system columns - columnName = e.getSystemColumnInternalName(columnName) - operator = expr.Operator - - // Extract comparison value from right side - val, err := e.extractComparisonValue(expr.Right) - if err != nil { - return nil, fmt.Errorf("failed to extract right-side value: %v", err) - } - compareValue = e.convertValueForTimestampColumn(columnName, val, expr.Right) - - } else if colName, ok := expr.Right.(*ColName); ok { - // Column is on the right side (reversed case: value < column) - rawColumnName := colName.Name.String() - // Resolve potential alias to actual column name - columnName = e.resolveColumnAlias(rawColumnName, selectExprs) - // Map display names to internal names for system columns - columnName = e.getSystemColumnInternalName(columnName) - - // Reverse the operator when column is on right side - operator = e.reverseOperator(expr.Operator) - - // Extract comparison value from left side - val, err := e.extractComparisonValue(expr.Left) - if err != nil { - return nil, fmt.Errorf("failed to extract left-side value: %v", err) - } - compareValue = e.convertValueForTimestampColumn(columnName, val, expr.Left) - - } else { - // Handle literal-only comparisons like 1 = 0, 'a' = 'b', etc. - leftVal, leftErr := e.extractComparisonValue(expr.Left) - rightVal, rightErr := e.extractComparisonValue(expr.Right) - - if leftErr != nil || rightErr != nil { - return nil, fmt.Errorf("no column name found in comparison expression, left: %T, right: %T", expr.Left, expr.Right) - } - - // Evaluate the literal comparison once - result := e.compareLiteralValues(leftVal, rightVal, expr.Operator) - - // Return a constant predicate - return func(record *schema_pb.RecordValue) bool { - return result - }, nil - } - - // Return the predicate function - return func(record *schema_pb.RecordValue) bool { - fieldValue, exists := record.Fields[columnName] - if !exists { - return false // Column doesn't exist in record - } - - // Use the comparison evaluation function - return e.evaluateComparison(fieldValue, operator, compareValue) - }, nil -} - -// buildBetweenPredicateWithContext creates a predicate for BETWEEN operations -func (e *SQLEngine) buildBetweenPredicateWithContext(expr *BetweenExpr, selectExprs []SelectExpr) (func(*schema_pb.RecordValue) bool, error) { - var columnName string - var fromValue, toValue interface{} - - // Check if left side is a column name - if colName, ok := expr.Left.(*ColName); ok { - rawColumnName := colName.Name.String() - // Resolve potential alias to actual column name - columnName = e.resolveColumnAlias(rawColumnName, selectExprs) - // Map display names to internal names for system columns - columnName = e.getSystemColumnInternalName(columnName) - - // Extract FROM value - fromVal, err := e.extractComparisonValue(expr.From) - if err != nil { - return nil, fmt.Errorf("failed to extract BETWEEN from value: %v", err) - } - fromValue = e.convertValueForTimestampColumn(columnName, fromVal, expr.From) - - // Extract TO value - toVal, err := e.extractComparisonValue(expr.To) - if err != nil { - return nil, fmt.Errorf("failed to extract BETWEEN to value: %v", err) - } - toValue = e.convertValueForTimestampColumn(columnName, toVal, expr.To) - } else { - return nil, fmt.Errorf("BETWEEN left operand must be a column name, got: %T", expr.Left) - } - - // Return the predicate function - return func(record *schema_pb.RecordValue) bool { - fieldValue, exists := record.Fields[columnName] - if !exists { - return false - } - - // Evaluate: fieldValue >= fromValue AND fieldValue <= toValue - greaterThanOrEqualFrom := e.evaluateComparison(fieldValue, ">=", fromValue) - lessThanOrEqualTo := e.evaluateComparison(fieldValue, "<=", toValue) - - result := greaterThanOrEqualFrom && lessThanOrEqualTo - - // Handle NOT BETWEEN - if expr.Not { - result = !result - } - - return result - }, nil -} - -// buildIsNullPredicateWithContext creates a predicate for IS NULL operations -func (e *SQLEngine) buildIsNullPredicateWithContext(expr *IsNullExpr, selectExprs []SelectExpr) (func(*schema_pb.RecordValue) bool, error) { - // Check if the expression is a column name - if colName, ok := expr.Expr.(*ColName); ok { - rawColumnName := colName.Name.String() - // Resolve potential alias to actual column name - columnName := e.resolveColumnAlias(rawColumnName, selectExprs) - // Map display names to internal names for system columns - columnName = e.getSystemColumnInternalName(columnName) - - // Return the predicate function - return func(record *schema_pb.RecordValue) bool { - // Check if field exists and if it's null or missing - fieldValue, exists := record.Fields[columnName] - if !exists { - return true // Field doesn't exist = NULL - } - - // Check if the field value itself is null/empty - return e.isValueNull(fieldValue) - }, nil - } else { - return nil, fmt.Errorf("IS NULL left operand must be a column name, got: %T", expr.Expr) - } -} - -// buildIsNotNullPredicateWithContext creates a predicate for IS NOT NULL operations -func (e *SQLEngine) buildIsNotNullPredicateWithContext(expr *IsNotNullExpr, selectExprs []SelectExpr) (func(*schema_pb.RecordValue) bool, error) { - // Check if the expression is a column name - if colName, ok := expr.Expr.(*ColName); ok { - rawColumnName := colName.Name.String() - // Resolve potential alias to actual column name - columnName := e.resolveColumnAlias(rawColumnName, selectExprs) - // Map display names to internal names for system columns - columnName = e.getSystemColumnInternalName(columnName) - - // Return the predicate function - return func(record *schema_pb.RecordValue) bool { - // Check if field exists and if it's not null - fieldValue, exists := record.Fields[columnName] - if !exists { - return false // Field doesn't exist = NULL, so NOT NULL is false - } - - // Check if the field value itself is not null/empty - return !e.isValueNull(fieldValue) - }, nil - } else { - return nil, fmt.Errorf("IS NOT NULL left operand must be a column name, got: %T", expr.Expr) - } -} - -// isValueNull checks if a schema_pb.Value is null or represents a null value -func (e *SQLEngine) isValueNull(value *schema_pb.Value) bool { - if value == nil { - return true - } - - // Check the Kind field to see if it represents a null value - if value.Kind == nil { - return true - } - - // For different value types, check if they represent null/empty values - switch kind := value.Kind.(type) { - case *schema_pb.Value_StringValue: - // Empty string could be considered null depending on semantics - // For now, treat empty string as not null (SQL standard behavior) - return false - case *schema_pb.Value_BoolValue: - return false // Boolean values are never null - case *schema_pb.Value_Int32Value, *schema_pb.Value_Int64Value: - return false // Integer values are never null - case *schema_pb.Value_FloatValue, *schema_pb.Value_DoubleValue: - return false // Numeric values are never null - case *schema_pb.Value_BytesValue: - // Bytes could be null if empty, but for now treat as not null - return false - case *schema_pb.Value_TimestampValue: - // Check if timestamp is zero/uninitialized - return kind.TimestampValue == nil - case *schema_pb.Value_DateValue: - return kind.DateValue == nil - case *schema_pb.Value_TimeValue: - return kind.TimeValue == nil - default: - // Unknown type, consider it null to be safe - return true - } -} - -// extractComparisonValue extracts the comparison value from a SQL expression -func (e *SQLEngine) extractComparisonValue(expr ExprNode) (interface{}, error) { - switch val := expr.(type) { - case *SQLVal: - switch val.Type { - case IntVal: - intVal, err := strconv.ParseInt(string(val.Val), 10, 64) - if err != nil { - return nil, err - } - return intVal, nil - case StrVal: - return string(val.Val), nil - case FloatVal: - floatVal, err := strconv.ParseFloat(string(val.Val), 64) - if err != nil { - return nil, err - } - return floatVal, nil - default: - return nil, fmt.Errorf("unsupported SQL value type: %v", val.Type) - } - case *ArithmeticExpr: - // Handle arithmetic expressions like CURRENT_TIMESTAMP - INTERVAL '1 hour' - return e.evaluateArithmeticExpressionForComparison(val) - case *FuncExpr: - // Handle function calls like NOW(), CURRENT_TIMESTAMP - return e.evaluateFunctionExpressionForComparison(val) - case *IntervalExpr: - // Handle standalone INTERVAL expressions - nanos, err := e.evaluateInterval(val.Value) - if err != nil { - return nil, err - } - return nanos, nil - case ValTuple: - // Handle IN expressions with multiple values: column IN (value1, value2, value3) - var inValues []interface{} - for _, tupleVal := range val { - switch v := tupleVal.(type) { - case *SQLVal: - switch v.Type { - case IntVal: - intVal, err := strconv.ParseInt(string(v.Val), 10, 64) - if err != nil { - return nil, err - } - inValues = append(inValues, intVal) - case StrVal: - inValues = append(inValues, string(v.Val)) - case FloatVal: - floatVal, err := strconv.ParseFloat(string(v.Val), 64) - if err != nil { - return nil, err - } - inValues = append(inValues, floatVal) - } - } - } - return inValues, nil - default: - return nil, fmt.Errorf("unsupported comparison value type: %T", expr) - } -} - -// evaluateArithmeticExpressionForComparison evaluates an arithmetic expression for WHERE clause comparisons -func (e *SQLEngine) evaluateArithmeticExpressionForComparison(expr *ArithmeticExpr) (interface{}, error) { - // Check if this is timestamp arithmetic with intervals - if e.isTimestampArithmetic(expr.Left, expr.Right) && (expr.Operator == "+" || expr.Operator == "-") { - // Evaluate timestamp arithmetic and return the result as nanoseconds - result, err := e.evaluateTimestampArithmetic(expr.Left, expr.Right, expr.Operator) - if err != nil { - return nil, err - } - - // Extract the timestamp value as nanoseconds for comparison - if result.Kind != nil { - switch resultKind := result.Kind.(type) { - case *schema_pb.Value_Int64Value: - return resultKind.Int64Value, nil - case *schema_pb.Value_StringValue: - // If it's a formatted timestamp string, parse it back to nanoseconds - if timestamp, err := time.Parse("2006-01-02T15:04:05.000000000Z", resultKind.StringValue); err == nil { - return timestamp.UnixNano(), nil - } - return nil, fmt.Errorf("could not parse timestamp string: %s", resultKind.StringValue) - } - } - return nil, fmt.Errorf("invalid timestamp arithmetic result") - } - - // For other arithmetic operations, we'd need to evaluate them differently - // For now, return an error for unsupported arithmetic - return nil, fmt.Errorf("unsupported arithmetic expression in WHERE clause: %s", expr.Operator) -} - -// evaluateFunctionExpressionForComparison evaluates a function expression for WHERE clause comparisons -func (e *SQLEngine) evaluateFunctionExpressionForComparison(expr *FuncExpr) (interface{}, error) { - funcName := strings.ToUpper(expr.Name.String()) - - switch funcName { - case "NOW", "CURRENT_TIMESTAMP": - result, err := e.Now() - if err != nil { - return nil, err - } - // Return as nanoseconds for comparison - if result.Kind != nil { - if resultKind, ok := result.Kind.(*schema_pb.Value_TimestampValue); ok { - // Convert microseconds to nanoseconds - return resultKind.TimestampValue.TimestampMicros * 1000, nil - } - } - return nil, fmt.Errorf("invalid NOW() result: expected TimestampValue, got %T", result.Kind) - - case "CURRENT_DATE": - result, err := e.CurrentDate() - if err != nil { - return nil, err - } - // Convert date to nanoseconds (start of day) - if result.Kind != nil { - if resultKind, ok := result.Kind.(*schema_pb.Value_StringValue); ok { - if date, err := time.Parse("2006-01-02", resultKind.StringValue); err == nil { - return date.UnixNano(), nil - } - } - } - return nil, fmt.Errorf("invalid CURRENT_DATE result") - - case "CURRENT_TIME": - result, err := e.CurrentTime() - if err != nil { - return nil, err - } - // For time comparison, we might need special handling - // For now, just return the string value - if result.Kind != nil { - if resultKind, ok := result.Kind.(*schema_pb.Value_StringValue); ok { - return resultKind.StringValue, nil - } - } - return nil, fmt.Errorf("invalid CURRENT_TIME result") - - default: - return nil, fmt.Errorf("unsupported function in WHERE clause: %s", funcName) - } -} - -// evaluateComparison performs the actual comparison -func (e *SQLEngine) evaluateComparison(fieldValue *schema_pb.Value, operator string, compareValue interface{}) bool { - // This is a simplified implementation - // A full implementation would handle type coercion and all comparison operators - - switch operator { - case "=": - return e.valuesEqual(fieldValue, compareValue) - case "<": - return e.valueLessThan(fieldValue, compareValue) - case ">": - return e.valueGreaterThan(fieldValue, compareValue) - case "<=": - return e.valuesEqual(fieldValue, compareValue) || e.valueLessThan(fieldValue, compareValue) - case ">=": - return e.valuesEqual(fieldValue, compareValue) || e.valueGreaterThan(fieldValue, compareValue) - case "!=", "<>": - return !e.valuesEqual(fieldValue, compareValue) - case "LIKE", "like": - return e.valueLike(fieldValue, compareValue) - case "IN", "in": - return e.valueIn(fieldValue, compareValue) - default: - return false - } -} - -// Helper functions for value comparison with proper type coercion -func (e *SQLEngine) valuesEqual(fieldValue *schema_pb.Value, compareValue interface{}) bool { - // Handle string comparisons first - if strField, ok := fieldValue.Kind.(*schema_pb.Value_StringValue); ok { - if strVal, ok := compareValue.(string); ok { - return strField.StringValue == strVal - } - return false - } - - // Handle boolean comparisons - if boolField, ok := fieldValue.Kind.(*schema_pb.Value_BoolValue); ok { - if boolVal, ok := compareValue.(bool); ok { - return boolField.BoolValue == boolVal - } - return false - } - - // Handle logical type comparisons - if timestampField, ok := fieldValue.Kind.(*schema_pb.Value_TimestampValue); ok { - if timestampVal, ok := compareValue.(int64); ok { - return timestampField.TimestampValue.TimestampMicros == timestampVal - } - return false - } - - if dateField, ok := fieldValue.Kind.(*schema_pb.Value_DateValue); ok { - if dateVal, ok := compareValue.(int32); ok { - return dateField.DateValue.DaysSinceEpoch == dateVal - } - return false - } - - // Handle DecimalValue comparison (convert to string for comparison) - if decimalField, ok := fieldValue.Kind.(*schema_pb.Value_DecimalValue); ok { - if decimalStr, ok := compareValue.(string); ok { - // Convert decimal bytes back to string for comparison - decimalValue := e.decimalToString(decimalField.DecimalValue) - return decimalValue == decimalStr - } - return false - } - - if timeField, ok := fieldValue.Kind.(*schema_pb.Value_TimeValue); ok { - if timeVal, ok := compareValue.(int64); ok { - return timeField.TimeValue.TimeMicros == timeVal - } - return false - } - - // Handle direct int64 comparisons for timestamp precision (before float64 conversion) - if int64Field, ok := fieldValue.Kind.(*schema_pb.Value_Int64Value); ok { - if int64Val, ok := compareValue.(int64); ok { - return int64Field.Int64Value == int64Val - } - if intVal, ok := compareValue.(int); ok { - return int64Field.Int64Value == int64(intVal) - } - } - - // Handle direct int32 comparisons - if int32Field, ok := fieldValue.Kind.(*schema_pb.Value_Int32Value); ok { - if int32Val, ok := compareValue.(int32); ok { - return int32Field.Int32Value == int32Val - } - if intVal, ok := compareValue.(int); ok { - return int32Field.Int32Value == int32(intVal) - } - if int64Val, ok := compareValue.(int64); ok && int64Val >= math.MinInt32 && int64Val <= math.MaxInt32 { - return int32Field.Int32Value == int32(int64Val) - } - } - - // Handle numeric comparisons with type coercion (fallback for other numeric types) - fieldNum := e.convertToNumber(fieldValue) - compareNum := e.convertCompareValueToNumber(compareValue) - - if fieldNum != nil && compareNum != nil { - return *fieldNum == *compareNum - } - - return false -} - -// convertCompareValueToNumber converts compare values from SQL queries to float64 -func (e *SQLEngine) convertCompareValueToNumber(compareValue interface{}) *float64 { - switch v := compareValue.(type) { - case int: - result := float64(v) - return &result - case int32: - result := float64(v) - return &result - case int64: - result := float64(v) - return &result - case float32: - result := float64(v) - return &result - case float64: - return &v - case string: - // Try to parse string as number for flexible comparisons - if parsed, err := strconv.ParseFloat(v, 64); err == nil { - return &parsed - } - } - return nil -} - -// decimalToString converts a DecimalValue back to string representation -func (e *SQLEngine) decimalToString(decimalValue *schema_pb.DecimalValue) string { - if decimalValue == nil || decimalValue.Value == nil { - return "0" - } - - // Convert bytes back to big.Int - intValue := new(big.Int).SetBytes(decimalValue.Value) - - // Convert to string with proper decimal placement - str := intValue.String() - - // Handle decimal placement based on scale - scale := int(decimalValue.Scale) - if scale > 0 && len(str) > scale { - // Insert decimal point - decimalPos := len(str) - scale - return str[:decimalPos] + "." + str[decimalPos:] - } - - return str -} - -func (e *SQLEngine) valueLessThan(fieldValue *schema_pb.Value, compareValue interface{}) bool { - // Handle string comparisons lexicographically - if strField, ok := fieldValue.Kind.(*schema_pb.Value_StringValue); ok { - if strVal, ok := compareValue.(string); ok { - return strField.StringValue < strVal - } - return false - } - - // Handle logical type comparisons - if timestampField, ok := fieldValue.Kind.(*schema_pb.Value_TimestampValue); ok { - if timestampVal, ok := compareValue.(int64); ok { - return timestampField.TimestampValue.TimestampMicros < timestampVal - } - return false - } - - if dateField, ok := fieldValue.Kind.(*schema_pb.Value_DateValue); ok { - if dateVal, ok := compareValue.(int32); ok { - return dateField.DateValue.DaysSinceEpoch < dateVal - } - return false - } - - if timeField, ok := fieldValue.Kind.(*schema_pb.Value_TimeValue); ok { - if timeVal, ok := compareValue.(int64); ok { - return timeField.TimeValue.TimeMicros < timeVal - } - return false - } - - // Handle direct int64 comparisons for timestamp precision (before float64 conversion) - if int64Field, ok := fieldValue.Kind.(*schema_pb.Value_Int64Value); ok { - if int64Val, ok := compareValue.(int64); ok { - return int64Field.Int64Value < int64Val - } - if intVal, ok := compareValue.(int); ok { - return int64Field.Int64Value < int64(intVal) - } - } - - // Handle direct int32 comparisons - if int32Field, ok := fieldValue.Kind.(*schema_pb.Value_Int32Value); ok { - if int32Val, ok := compareValue.(int32); ok { - return int32Field.Int32Value < int32Val - } - if intVal, ok := compareValue.(int); ok { - return int32Field.Int32Value < int32(intVal) - } - if int64Val, ok := compareValue.(int64); ok && int64Val >= math.MinInt32 && int64Val <= math.MaxInt32 { - return int32Field.Int32Value < int32(int64Val) - } - } - - // Handle numeric comparisons with type coercion (fallback for other numeric types) - fieldNum := e.convertToNumber(fieldValue) - compareNum := e.convertCompareValueToNumber(compareValue) - - if fieldNum != nil && compareNum != nil { - return *fieldNum < *compareNum - } - - return false -} - -func (e *SQLEngine) valueGreaterThan(fieldValue *schema_pb.Value, compareValue interface{}) bool { - // Handle string comparisons lexicographically - if strField, ok := fieldValue.Kind.(*schema_pb.Value_StringValue); ok { - if strVal, ok := compareValue.(string); ok { - return strField.StringValue > strVal - } - return false - } - - // Handle logical type comparisons - if timestampField, ok := fieldValue.Kind.(*schema_pb.Value_TimestampValue); ok { - if timestampVal, ok := compareValue.(int64); ok { - return timestampField.TimestampValue.TimestampMicros > timestampVal - } - return false - } - - if dateField, ok := fieldValue.Kind.(*schema_pb.Value_DateValue); ok { - if dateVal, ok := compareValue.(int32); ok { - return dateField.DateValue.DaysSinceEpoch > dateVal - } - return false - } - - if timeField, ok := fieldValue.Kind.(*schema_pb.Value_TimeValue); ok { - if timeVal, ok := compareValue.(int64); ok { - return timeField.TimeValue.TimeMicros > timeVal - } - return false - } - - // Handle direct int64 comparisons for timestamp precision (before float64 conversion) - if int64Field, ok := fieldValue.Kind.(*schema_pb.Value_Int64Value); ok { - if int64Val, ok := compareValue.(int64); ok { - return int64Field.Int64Value > int64Val - } - if intVal, ok := compareValue.(int); ok { - return int64Field.Int64Value > int64(intVal) - } - } - - // Handle direct int32 comparisons - if int32Field, ok := fieldValue.Kind.(*schema_pb.Value_Int32Value); ok { - if int32Val, ok := compareValue.(int32); ok { - return int32Field.Int32Value > int32Val - } - if intVal, ok := compareValue.(int); ok { - return int32Field.Int32Value > int32(intVal) - } - if int64Val, ok := compareValue.(int64); ok && int64Val >= math.MinInt32 && int64Val <= math.MaxInt32 { - return int32Field.Int32Value > int32(int64Val) - } - } - - // Handle numeric comparisons with type coercion (fallback for other numeric types) - fieldNum := e.convertToNumber(fieldValue) - compareNum := e.convertCompareValueToNumber(compareValue) - - if fieldNum != nil && compareNum != nil { - return *fieldNum > *compareNum - } - - return false -} - -// valueLike implements SQL LIKE pattern matching with % and _ wildcards -func (e *SQLEngine) valueLike(fieldValue *schema_pb.Value, compareValue interface{}) bool { - // Only support LIKE for string values - stringVal, ok := fieldValue.Kind.(*schema_pb.Value_StringValue) - if !ok { - return false - } - - pattern, ok := compareValue.(string) - if !ok { - return false - } - - // Convert SQL LIKE pattern to Go regex pattern - // % matches any sequence of characters (.*), _ matches single character (.) - regexPattern := strings.ReplaceAll(pattern, "%", ".*") - regexPattern = strings.ReplaceAll(regexPattern, "_", ".") - regexPattern = "^" + regexPattern + "$" // Anchor to match entire string - - // Compile and match regex - regex, err := regexp.Compile(regexPattern) - if err != nil { - return false // Invalid pattern - } - - return regex.MatchString(stringVal.StringValue) -} - -// valueIn implements SQL IN operator for checking if value exists in a list -func (e *SQLEngine) valueIn(fieldValue *schema_pb.Value, compareValue interface{}) bool { - // For now, handle simple case where compareValue is a slice of values - // In a full implementation, this would handle SQL IN expressions properly - values, ok := compareValue.([]interface{}) - if !ok { - return false - } - - // Check if fieldValue matches any value in the list - for _, value := range values { - if e.valuesEqual(fieldValue, value) { - return true - } - } - - return false -} - -// Helper methods for specific operations - -func (e *SQLEngine) showDatabases(ctx context.Context) (*QueryResult, error) { - databases := e.catalog.ListDatabases() - - result := &QueryResult{ - Columns: []string{"Database"}, - Rows: make([][]sqltypes.Value, len(databases)), - } - - for i, db := range databases { - result.Rows[i] = []sqltypes.Value{ - sqltypes.NewVarChar(db), - } - } - - return result, nil -} - -func (e *SQLEngine) showTables(ctx context.Context, dbName string) (*QueryResult, error) { - // Use current database context if no database specified - if dbName == "" { - dbName = e.catalog.GetCurrentDatabase() - if dbName == "" { - dbName = "default" - } - } - - tables, err := e.catalog.ListTables(dbName) - if err != nil { - return &QueryResult{Error: err}, err - } - - result := &QueryResult{ - Columns: []string{"Tables_in_" + dbName}, - Rows: make([][]sqltypes.Value, len(tables)), - } - - for i, table := range tables { - result.Rows[i] = []sqltypes.Value{ - sqltypes.NewVarChar(table), - } - } - - return result, nil -} - -// compareLiteralValues compares two literal values with the given operator -func (e *SQLEngine) compareLiteralValues(left, right interface{}, operator string) bool { - switch operator { - case "=", "==": - return e.literalValuesEqual(left, right) - case "!=", "<>": - return !e.literalValuesEqual(left, right) - case "<": - return e.compareLiteralNumber(left, right) < 0 - case "<=": - return e.compareLiteralNumber(left, right) <= 0 - case ">": - return e.compareLiteralNumber(left, right) > 0 - case ">=": - return e.compareLiteralNumber(left, right) >= 0 - default: - // For unsupported operators, default to false - return false - } -} - -// literalValuesEqual checks if two literal values are equal -func (e *SQLEngine) literalValuesEqual(left, right interface{}) bool { - // Convert both to strings for comparison - leftStr := fmt.Sprintf("%v", left) - rightStr := fmt.Sprintf("%v", right) - return leftStr == rightStr -} - -// compareLiteralNumber compares two values as numbers -func (e *SQLEngine) compareLiteralNumber(left, right interface{}) int { - leftNum, leftOk := e.convertToFloat64(left) - rightNum, rightOk := e.convertToFloat64(right) - - if !leftOk || !rightOk { - // Fall back to string comparison if not numeric - leftStr := fmt.Sprintf("%v", left) - rightStr := fmt.Sprintf("%v", right) - if leftStr < rightStr { - return -1 - } else if leftStr > rightStr { - return 1 - } else { - return 0 - } - } - - if leftNum < rightNum { - return -1 - } else if leftNum > rightNum { - return 1 - } else { - return 0 - } -} - -// convertToFloat64 attempts to convert a value to float64 -func (e *SQLEngine) convertToFloat64(value interface{}) (float64, bool) { - switch v := value.(type) { - case int64: - return float64(v), true - case int32: - return float64(v), true - case int: - return float64(v), true - case float64: - return v, true - case float32: - return float64(v), true - case string: - if num, err := strconv.ParseFloat(v, 64); err == nil { - return num, true - } - return 0, false - default: - return 0, false - } -} - -func (e *SQLEngine) createTable(ctx context.Context, stmt *DDLStatement) (*QueryResult, error) { - // Parse CREATE TABLE statement - // Assumption: Table name format is [database.]table_name - tableName := stmt.NewName.Name.String() - database := "" - - // Check if database is specified in table name - if stmt.NewName.Qualifier.String() != "" { - database = stmt.NewName.Qualifier.String() - } else { - // Use current database context or default - database = e.catalog.GetCurrentDatabase() - if database == "" { - database = "default" - } - } - - // Parse column definitions from CREATE TABLE - // Assumption: stmt.TableSpec contains column definitions - if stmt.TableSpec == nil || len(stmt.TableSpec.Columns) == 0 { - err := fmt.Errorf("CREATE TABLE requires column definitions") - return &QueryResult{Error: err}, err - } - - // Convert SQL columns to MQ schema fields - fields := make([]*schema_pb.Field, len(stmt.TableSpec.Columns)) - for i, col := range stmt.TableSpec.Columns { - fieldType, err := e.convertSQLTypeToMQ(col.Type) - if err != nil { - return &QueryResult{Error: err}, err - } - - fields[i] = &schema_pb.Field{ - Name: col.Name.String(), - Type: fieldType, - } - } - - // Create record type for the topic - recordType := &schema_pb.RecordType{ - Fields: fields, - } - - // Create the topic via broker using configurable partition count - partitionCount := e.catalog.GetDefaultPartitionCount() - err := e.catalog.brokerClient.ConfigureTopic(ctx, database, tableName, partitionCount, recordType, nil) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Register the new topic in catalog - mqSchema := &schema.Schema{ - Namespace: database, - Name: tableName, - RecordType: recordType, - RevisionId: 1, // Initial revision - } - - err = e.catalog.RegisterTopic(database, tableName, mqSchema) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Return success result - result := &QueryResult{ - Columns: []string{"Result"}, - Rows: [][]sqltypes.Value{ - {sqltypes.NewVarChar(fmt.Sprintf("Table '%s.%s' created successfully", database, tableName))}, - }, - } - - return result, nil -} - -// ExecutionPlanBuilder handles building execution plans for queries -type ExecutionPlanBuilder struct { - engine *SQLEngine -} - -// NewExecutionPlanBuilder creates a new execution plan builder -func NewExecutionPlanBuilder(engine *SQLEngine) *ExecutionPlanBuilder { - return &ExecutionPlanBuilder{engine: engine} -} - -// BuildAggregationPlan builds an execution plan for aggregation queries -func (builder *ExecutionPlanBuilder) BuildAggregationPlan( - stmt *SelectStatement, - aggregations []AggregationSpec, - strategy AggregationStrategy, - dataSources *TopicDataSources, -) *QueryExecutionPlan { - - plan := &QueryExecutionPlan{ - QueryType: "SELECT", - ExecutionStrategy: builder.determineExecutionStrategy(stmt, strategy), - DataSources: builder.buildDataSourcesList(strategy, dataSources), - PartitionsScanned: dataSources.PartitionsCount, - ParquetFilesScanned: builder.countParquetFiles(dataSources), - LiveLogFilesScanned: builder.countLiveLogFiles(dataSources), - OptimizationsUsed: builder.buildOptimizationsList(stmt, strategy, dataSources), - Aggregations: builder.buildAggregationsList(aggregations), - Details: make(map[string]interface{}), - } - - // Set row counts based on strategy - if strategy.CanUseFastPath { - // Only live logs and broker buffer rows are actually scanned; parquet uses metadata - plan.TotalRowsProcessed = dataSources.LiveLogRowCount - if dataSources.BrokerUnflushedCount > 0 { - plan.TotalRowsProcessed += dataSources.BrokerUnflushedCount - } - // Set scan method based on what data sources actually exist - if dataSources.ParquetRowCount > 0 && (dataSources.LiveLogRowCount > 0 || dataSources.BrokerUnflushedCount > 0) { - plan.Details["scan_method"] = "Parquet Metadata + Live Log/Broker Counting" - } else if dataSources.ParquetRowCount > 0 { - plan.Details["scan_method"] = "Parquet Metadata Only" - } else { - plan.Details["scan_method"] = "Live Log/Broker Counting Only" - } - } else { - plan.TotalRowsProcessed = dataSources.ParquetRowCount + dataSources.LiveLogRowCount - plan.Details["scan_method"] = "Full Data Scan" - } - - return plan -} - -// determineExecutionStrategy determines the execution strategy based on query characteristics -func (builder *ExecutionPlanBuilder) determineExecutionStrategy(stmt *SelectStatement, strategy AggregationStrategy) string { - if stmt.Where != nil { - return "full_scan" - } - - if strategy.CanUseFastPath { - return "hybrid_fast_path" - } - - return "full_scan" -} - -// buildDataSourcesList builds the list of data sources used -func (builder *ExecutionPlanBuilder) buildDataSourcesList(strategy AggregationStrategy, dataSources *TopicDataSources) []string { - sources := []string{} - - if strategy.CanUseFastPath { - // Only show parquet stats if there are actual parquet files - if dataSources.ParquetRowCount > 0 { - sources = append(sources, "parquet_stats") - } - if dataSources.LiveLogRowCount > 0 { - sources = append(sources, "live_logs") - } - if dataSources.BrokerUnflushedCount > 0 { - sources = append(sources, "broker_buffer") - } - } else { - sources = append(sources, "live_logs", "parquet_files") - } - - // Note: broker_buffer is added dynamically during execution when broker is queried - // See aggregations.go lines 397-409 for the broker buffer data source addition logic - - return sources -} - -// countParquetFiles counts the total number of parquet files across all partitions -func (builder *ExecutionPlanBuilder) countParquetFiles(dataSources *TopicDataSources) int { - count := 0 - for _, fileStats := range dataSources.ParquetFiles { - count += len(fileStats) - } - return count -} - -// countLiveLogFiles returns the total number of live log files across all partitions -func (builder *ExecutionPlanBuilder) countLiveLogFiles(dataSources *TopicDataSources) int { - return dataSources.LiveLogFilesCount -} - -// buildOptimizationsList builds the list of optimizations used -func (builder *ExecutionPlanBuilder) buildOptimizationsList(stmt *SelectStatement, strategy AggregationStrategy, dataSources *TopicDataSources) []string { - optimizations := []string{} - - if strategy.CanUseFastPath { - // Only include parquet statistics if there are actual parquet files - if dataSources.ParquetRowCount > 0 { - optimizations = append(optimizations, "parquet_statistics") - } - if dataSources.LiveLogRowCount > 0 { - optimizations = append(optimizations, "live_log_counting") - } - // Always include deduplication when using fast path - optimizations = append(optimizations, "deduplication") - } - - if stmt.Where != nil { - // Check if "predicate_pushdown" is already in the list - found := false - for _, opt := range optimizations { - if opt == "predicate_pushdown" { - found = true - break - } - } - if !found { - optimizations = append(optimizations, "predicate_pushdown") - } - } - - return optimizations -} - -// buildAggregationsList builds the list of aggregations for display -func (builder *ExecutionPlanBuilder) buildAggregationsList(aggregations []AggregationSpec) []string { - aggList := make([]string, len(aggregations)) - for i, spec := range aggregations { - aggList[i] = fmt.Sprintf("%s(%s)", spec.Function, spec.Column) - } - return aggList -} - -// parseAggregationFunction parses an aggregation function expression -func (e *SQLEngine) parseAggregationFunction(funcExpr *FuncExpr, aliasExpr *AliasedExpr) (*AggregationSpec, error) { - funcName := strings.ToUpper(funcExpr.Name.String()) - - spec := &AggregationSpec{ - Function: funcName, - } - - // Parse function arguments - switch funcName { - case FuncCOUNT: - if len(funcExpr.Exprs) != 1 { - return nil, fmt.Errorf("COUNT function expects exactly 1 argument") - } - - switch arg := funcExpr.Exprs[0].(type) { - case *StarExpr: - spec.Column = "*" - spec.Alias = "COUNT(*)" - case *AliasedExpr: - if colName, ok := arg.Expr.(*ColName); ok { - spec.Column = colName.Name.String() - spec.Alias = fmt.Sprintf("COUNT(%s)", spec.Column) - } else { - return nil, fmt.Errorf("COUNT argument must be a column name or *") - } - default: - return nil, fmt.Errorf("unsupported COUNT argument: %T", arg) - } - - case FuncSUM, FuncAVG, FuncMIN, FuncMAX: - if len(funcExpr.Exprs) != 1 { - return nil, fmt.Errorf("%s function expects exactly 1 argument", funcName) - } - - switch arg := funcExpr.Exprs[0].(type) { - case *AliasedExpr: - if colName, ok := arg.Expr.(*ColName); ok { - spec.Column = colName.Name.String() - spec.Alias = fmt.Sprintf("%s(%s)", funcName, spec.Column) - } else { - return nil, fmt.Errorf("%s argument must be a column name", funcName) - } - default: - return nil, fmt.Errorf("unsupported %s argument: %T", funcName, arg) - } - - default: - return nil, fmt.Errorf("unsupported aggregation function: %s", funcName) - } - - // Override with user-specified alias if provided - if aliasExpr != nil && aliasExpr.As != nil && !aliasExpr.As.IsEmpty() { - spec.Alias = aliasExpr.As.String() - } - - return spec, nil -} - -// computeLiveLogMinMax scans live log files to find MIN/MAX values for a specific column -func (e *SQLEngine) computeLiveLogMinMax(partitionPath string, columnName string, parquetSourceFiles map[string]bool) (interface{}, interface{}, error) { - if e.catalog.brokerClient == nil { - return nil, nil, fmt.Errorf("no broker client available") - } - - filerClient, err := e.catalog.brokerClient.GetFilerClient() - if err != nil { - return nil, nil, fmt.Errorf("failed to get filer client: %v", err) - } - - var minValue, maxValue interface{} - var minSchemaValue, maxSchemaValue *schema_pb.Value - - // Process each live log file - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - // Skip parquet files and directories - if entry.IsDirectory || strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - // Skip files that have been converted to parquet (deduplication) - if parquetSourceFiles[entry.Name] { - return nil - } - - filePath := partitionPath + "/" + entry.Name - - // Scan this log file for MIN/MAX values - fileMin, fileMax, err := e.computeFileMinMax(filerClient, filePath, columnName) - if err != nil { - fmt.Printf("Warning: failed to compute min/max for file %s: %v\n", filePath, err) - return nil // Continue with other files - } - - // Update global min/max - if fileMin != nil { - if minSchemaValue == nil || e.compareValues(fileMin, minSchemaValue) < 0 { - minSchemaValue = fileMin - minValue = e.extractRawValue(fileMin) - } - } - - if fileMax != nil { - if maxSchemaValue == nil || e.compareValues(fileMax, maxSchemaValue) > 0 { - maxSchemaValue = fileMax - maxValue = e.extractRawValue(fileMax) - } - } - - return nil - }) - - if err != nil { - return nil, nil, fmt.Errorf("failed to process partition directory %s: %v", partitionPath, err) - } - - return minValue, maxValue, nil -} - -// computeFileMinMax scans a single log file to find MIN/MAX values for a specific column -func (e *SQLEngine) computeFileMinMax(filerClient filer_pb.FilerClient, filePath string, columnName string) (*schema_pb.Value, *schema_pb.Value, error) { - var minValue, maxValue *schema_pb.Value - - err := e.eachLogEntryInFile(filerClient, filePath, func(logEntry *filer_pb.LogEntry) error { - // Convert log entry to record value - recordValue, _, err := e.convertLogEntryToRecordValue(logEntry) - if err != nil { - return err // This will stop processing this file but not fail the overall query - } - - // Extract the requested column value - var columnValue *schema_pb.Value - if e.isSystemColumn(columnName) { - // Handle system columns - switch strings.ToLower(columnName) { - case SW_COLUMN_NAME_TIMESTAMP: - columnValue = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}} - case SW_COLUMN_NAME_KEY: - columnValue = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}} - case SW_COLUMN_NAME_SOURCE: - columnValue = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "live_log"}} - } - } else { - // Handle regular data columns - if value, exists := recordValue.Fields[columnName]; exists { - columnValue = value - } - } - - if columnValue == nil { - return nil // Skip this record - } - - // Update min/max - if minValue == nil || e.compareValues(columnValue, minValue) < 0 { - minValue = columnValue - } - if maxValue == nil || e.compareValues(columnValue, maxValue) > 0 { - maxValue = columnValue - } - - return nil - }) - - return minValue, maxValue, err -} - -// eachLogEntryInFile reads a log file and calls the provided function for each log entry -func (e *SQLEngine) eachLogEntryInFile(filerClient filer_pb.FilerClient, filePath string, fn func(*filer_pb.LogEntry) error) error { - // Extract directory and filename - // filePath is like "partitionPath/filename" - lastSlash := strings.LastIndex(filePath, "/") - if lastSlash == -1 { - return fmt.Errorf("invalid file path: %s", filePath) - } - - dirPath := filePath[:lastSlash] - fileName := filePath[lastSlash+1:] - - // Get file entry - var fileEntry *filer_pb.Entry - err := filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(dirPath), "", func(entry *filer_pb.Entry, isLast bool) error { - if entry.Name == fileName { - fileEntry = entry - } - return nil - }) - - if err != nil { - return fmt.Errorf("failed to find file %s: %v", filePath, err) - } - - if fileEntry == nil { - return fmt.Errorf("file not found: %s", filePath) - } - - lookupFileIdFn := filer.LookupFn(filerClient) - - // eachChunkFn processes each chunk's data (pattern from countRowsInLogFile) - eachChunkFn := func(buf []byte) error { - for pos := 0; pos+4 < len(buf); { - size := util.BytesToUint32(buf[pos : pos+4]) - if pos+4+int(size) > len(buf) { - break - } - - entryData := buf[pos+4 : pos+4+int(size)] - - logEntry := &filer_pb.LogEntry{} - if err := proto.Unmarshal(entryData, logEntry); err != nil { - pos += 4 + int(size) - continue // Skip corrupted entries - } - - // Call the provided function for each log entry - if err := fn(logEntry); err != nil { - return err - } - - pos += 4 + int(size) - } - return nil - } - - // Read file chunks and process them (pattern from countRowsInLogFile) - fileSize := filer.FileSize(fileEntry) - visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(context.Background(), lookupFileIdFn, fileEntry.Chunks, 0, int64(fileSize)) - chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize)) - - for x := chunkViews.Front(); x != nil; x = x.Next { - chunk := x.Value - urlStrings, err := lookupFileIdFn(context.Background(), chunk.FileId) - if err != nil { - fmt.Printf("Warning: failed to lookup chunk %s: %v\n", chunk.FileId, err) - continue - } - - if len(urlStrings) == 0 { - continue - } - - // Read chunk data - // urlStrings[0] is already a complete URL (http://server:port/fileId) - data, _, err := util_http.Get(urlStrings[0]) - if err != nil { - fmt.Printf("Warning: failed to read chunk %s from %s: %v\n", chunk.FileId, urlStrings[0], err) - continue - } - - // Process this chunk - if err := eachChunkFn(data); err != nil { - return err - } - } - - return nil -} - -// convertLogEntryToRecordValue helper method (reuse existing logic) -func (e *SQLEngine) convertLogEntryToRecordValue(logEntry *filer_pb.LogEntry) (*schema_pb.RecordValue, string, error) { - // Try to unmarshal as RecordValue first (schematized data) - recordValue := &schema_pb.RecordValue{} - err := proto.Unmarshal(logEntry.Data, recordValue) - if err == nil { - // Successfully unmarshaled as RecordValue (valid protobuf) - // Initialize Fields map if nil - if recordValue.Fields == nil { - recordValue.Fields = make(map[string]*schema_pb.Value) - } - - // Add system columns from LogEntry - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - - return recordValue, "live_log", nil - } - - // Failed to unmarshal as RecordValue - invalid protobuf data - return nil, "", fmt.Errorf("failed to unmarshal log entry protobuf: %w", err) -} - -// extractTimestampFromFilename extracts timestamp from parquet filename -// Format: YYYY-MM-DD-HH-MM-SS.parquet -func (e *SQLEngine) extractTimestampFromFilename(filename string) int64 { - // Remove .parquet extension - filename = strings.TrimSuffix(filename, ".parquet") - - // Parse timestamp format: 2006-01-02-15-04-05 - t, err := time.Parse("2006-01-02-15-04-05", filename) - if err != nil { - return 0 - } - - return t.UnixNano() -} - -// extractParquetSourceFiles extracts source log file names from parquet file metadata for deduplication -func (e *SQLEngine) extractParquetSourceFiles(fileStats []*ParquetFileStats) map[string]bool { - sourceFiles := make(map[string]bool) - - for _, fileStat := range fileStats { - // Each ParquetFileStats should have a reference to the original file entry - // but we need to get it through the hybrid scanner to access Extended metadata - // This is a simplified approach - in practice we'd need to access the filer entry - - // For now, we'll use filename-based deduplication as a fallback - // Extract timestamp from parquet filename (YYYY-MM-DD-HH-MM-SS.parquet) - if strings.HasSuffix(fileStat.FileName, ".parquet") { - timeStr := strings.TrimSuffix(fileStat.FileName, ".parquet") - // Mark this timestamp range as covered by parquet - sourceFiles[timeStr] = true - } - } - - return sourceFiles -} - -// countLiveLogRowsExcludingParquetSources counts live log rows but excludes files that were converted to parquet and duplicate log buffer data -func (e *SQLEngine) countLiveLogRowsExcludingParquetSources(ctx context.Context, partitionPath string, parquetSourceFiles map[string]bool) (int64, error) { - debugEnabled := ctx != nil && isDebugMode(ctx) - filerClient, err := e.catalog.brokerClient.GetFilerClient() - if err != nil { - return 0, err - } - - // First, get the actual source files from parquet metadata - actualSourceFiles, err := e.getParquetSourceFilesFromMetadata(partitionPath) - if err != nil { - // If we can't read parquet metadata, use filename-based fallback - fmt.Printf("Warning: failed to read parquet metadata, using filename-based deduplication: %v\n", err) - actualSourceFiles = parquetSourceFiles - } - - // Second, get duplicate files from log buffer metadata - logBufferDuplicates, err := e.buildLogBufferDeduplicationMap(ctx, partitionPath) - if err != nil { - if debugEnabled { - fmt.Printf("Warning: failed to build log buffer deduplication map: %v\n", err) - } - logBufferDuplicates = make(map[string]bool) - } - - // Debug: Show deduplication status (only in explain mode) - if debugEnabled { - if len(actualSourceFiles) > 0 { - fmt.Printf("Excluding %d converted log files from %s\n", len(actualSourceFiles), partitionPath) - } - if len(logBufferDuplicates) > 0 { - fmt.Printf("Excluding %d duplicate log buffer files from %s\n", len(logBufferDuplicates), partitionPath) - } - } - - totalRows := int64(0) - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - if entry.IsDirectory || strings.HasSuffix(entry.Name, ".parquet") { - return nil // Skip directories and parquet files - } - - // Skip files that have been converted to parquet - if actualSourceFiles[entry.Name] { - if debugEnabled { - fmt.Printf("Skipping %s (already converted to parquet)\n", entry.Name) - } - return nil - } - - // Skip files that are duplicated due to log buffer metadata - if logBufferDuplicates[entry.Name] { - if debugEnabled { - fmt.Printf("Skipping %s (duplicate log buffer data)\n", entry.Name) - } - return nil - } - - // Count rows in live log file - rowCount, err := e.countRowsInLogFile(filerClient, partitionPath, entry) - if err != nil { - fmt.Printf("Warning: failed to count rows in %s/%s: %v\n", partitionPath, entry.Name, err) - return nil // Continue with other files - } - totalRows += rowCount - return nil - }) - return totalRows, err -} - -// getParquetSourceFilesFromMetadata reads parquet file metadata to get actual source log files -func (e *SQLEngine) getParquetSourceFilesFromMetadata(partitionPath string) (map[string]bool, error) { - filerClient, err := e.catalog.brokerClient.GetFilerClient() - if err != nil { - return nil, err - } - - sourceFiles := make(map[string]bool) - - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - if entry.IsDirectory || !strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - - // Read source files from Extended metadata - if entry.Extended != nil && entry.Extended["sources"] != nil { - var sources []string - if err := json.Unmarshal(entry.Extended["sources"], &sources); err == nil { - for _, source := range sources { - sourceFiles[source] = true - } - } - } - - return nil - }) - - return sourceFiles, err -} - -// getLogBufferStartFromFile reads buffer start from file extended attributes -func (e *SQLEngine) getLogBufferStartFromFile(entry *filer_pb.Entry) (*LogBufferStart, error) { - if entry.Extended == nil { - return nil, nil - } - - // Only support binary buffer_start format - if startData, exists := entry.Extended["buffer_start"]; exists { - if len(startData) == 8 { - startIndex := int64(binary.BigEndian.Uint64(startData)) - if startIndex > 0 { - return &LogBufferStart{StartIndex: startIndex}, nil - } - } else { - return nil, fmt.Errorf("invalid buffer_start format: expected 8 bytes, got %d", len(startData)) - } - } - - return nil, nil -} - -// buildLogBufferDeduplicationMap creates a map to track duplicate files based on buffer ranges (ultra-efficient) -func (e *SQLEngine) buildLogBufferDeduplicationMap(ctx context.Context, partitionPath string) (map[string]bool, error) { - debugEnabled := ctx != nil && isDebugMode(ctx) - if e.catalog.brokerClient == nil { - return make(map[string]bool), nil - } - - filerClient, err := e.catalog.brokerClient.GetFilerClient() - if err != nil { - return make(map[string]bool), nil // Don't fail the query, just skip deduplication - } - - // Track buffer ranges instead of individual indexes (much more efficient) - type BufferRange struct { - start, end int64 - } - - processedRanges := make([]BufferRange, 0) - duplicateFiles := make(map[string]bool) - - err = filer_pb.ReadDirAllEntries(context.Background(), filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - if entry.IsDirectory || strings.HasSuffix(entry.Name, ".parquet") { - return nil // Skip directories and parquet files - } - - // Get buffer start for this file (most efficient) - bufferStart, err := e.getLogBufferStartFromFile(entry) - if err != nil || bufferStart == nil { - return nil // No buffer info, can't deduplicate - } - - // Calculate range for this file: [start, start + chunkCount - 1] - chunkCount := int64(len(entry.GetChunks())) - if chunkCount == 0 { - return nil // Empty file, skip - } - - fileRange := BufferRange{ - start: bufferStart.StartIndex, - end: bufferStart.StartIndex + chunkCount - 1, - } - - // Check if this range overlaps with any processed range - isDuplicate := false - for _, processedRange := range processedRanges { - if fileRange.start <= processedRange.end && fileRange.end >= processedRange.start { - // Ranges overlap - this file contains duplicate buffer indexes - isDuplicate = true - if debugEnabled { - fmt.Printf("Marking %s as duplicate (buffer range [%d-%d] overlaps with [%d-%d])\n", - entry.Name, fileRange.start, fileRange.end, processedRange.start, processedRange.end) - } - break - } - } - - if isDuplicate { - duplicateFiles[entry.Name] = true - } else { - // Add this range to processed ranges - processedRanges = append(processedRanges, fileRange) - } - - return nil - }) - - if err != nil { - return make(map[string]bool), nil // Don't fail the query - } - - return duplicateFiles, nil -} - -// countRowsInLogFile counts rows in a single log file using SeaweedFS patterns -func (e *SQLEngine) countRowsInLogFile(filerClient filer_pb.FilerClient, partitionPath string, entry *filer_pb.Entry) (int64, error) { - lookupFileIdFn := filer.LookupFn(filerClient) - - rowCount := int64(0) - - // eachChunkFn processes each chunk's data (pattern from read_log_from_disk.go) - eachChunkFn := func(buf []byte) error { - for pos := 0; pos+4 < len(buf); { - size := util.BytesToUint32(buf[pos : pos+4]) - if pos+4+int(size) > len(buf) { - break - } - - entryData := buf[pos+4 : pos+4+int(size)] - - logEntry := &filer_pb.LogEntry{} - if err := proto.Unmarshal(entryData, logEntry); err != nil { - pos += 4 + int(size) - continue // Skip corrupted entries - } - - // Skip control messages (publisher control, empty key, or no data) - if isControlLogEntry(logEntry) { - pos += 4 + int(size) - continue - } - - rowCount++ - pos += 4 + int(size) - } - return nil - } - - // Read file chunks and process them (pattern from read_log_from_disk.go) - fileSize := filer.FileSize(entry) - visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(context.Background(), lookupFileIdFn, entry.Chunks, 0, int64(fileSize)) - chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize)) - - for x := chunkViews.Front(); x != nil; x = x.Next { - chunk := x.Value - urlStrings, err := lookupFileIdFn(context.Background(), chunk.FileId) - if err != nil { - fmt.Printf("Warning: failed to lookup chunk %s: %v\n", chunk.FileId, err) - continue - } - - if len(urlStrings) == 0 { - continue - } - - // Read chunk data - // urlStrings[0] is already a complete URL (http://server:port/fileId) - data, _, err := util_http.Get(urlStrings[0]) - if err != nil { - fmt.Printf("Warning: failed to read chunk %s from %s: %v\n", chunk.FileId, urlStrings[0], err) - continue - } - - // Process this chunk - if err := eachChunkFn(data); err != nil { - return rowCount, err - } - } - - return rowCount, nil -} - -// isControlLogEntry checks if a log entry is a control entry without actual user data -// Control entries include: -// - DataMessages with populated Ctrl field (publisher control signals) -// - Entries with empty keys (filtered by subscriber) -// - Entries with no data -func isControlLogEntry(logEntry *filer_pb.LogEntry) bool { - // No data: control or placeholder - if len(logEntry.Data) == 0 { - return true - } - - // Empty keys are treated as control entries (consistent with subscriber filtering) - if len(logEntry.Key) == 0 { - return true - } - - // Check if the payload is a DataMessage carrying a control signal - dataMessage := &mq_pb.DataMessage{} - if err := proto.Unmarshal(logEntry.Data, dataMessage); err == nil { - if dataMessage.Ctrl != nil { - return true - } - } - - return false -} - -// discoverTopicPartitions discovers all partitions for a given topic using centralized logic -func (e *SQLEngine) discoverTopicPartitions(namespace, topicName string) ([]string, error) { - // Use centralized topic partition discovery - t := topic.NewTopic(namespace, topicName) - - // Get FilerClient from BrokerClient - filerClient, err := e.catalog.brokerClient.GetFilerClient() - if err != nil { - return nil, err - } - - return t.DiscoverPartitions(context.Background(), filerClient) -} - -// getTopicTotalRowCount returns the total number of rows in a topic (combining parquet and live logs) -func (e *SQLEngine) getTopicTotalRowCount(ctx context.Context, namespace, topicName string) (int64, error) { - // Create a hybrid scanner to access parquet statistics - var filerClient filer_pb.FilerClient - if e.catalog.brokerClient != nil { - var filerClientErr error - filerClient, filerClientErr = e.catalog.brokerClient.GetFilerClient() - if filerClientErr != nil { - return 0, filerClientErr - } - } - - hybridScanner, err := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, namespace, topicName, e) - if err != nil { - return 0, err - } - - // Get all partitions for this topic - // Note: discoverTopicPartitions always returns absolute paths - partitions, err := e.discoverTopicPartitions(namespace, topicName) - if err != nil { - return 0, err - } - - totalRowCount := int64(0) - - // For each partition, count both parquet and live log rows - for _, partition := range partitions { - // Count parquet rows - parquetStats, parquetErr := hybridScanner.ReadParquetStatistics(partition) - if parquetErr == nil { - for _, stats := range parquetStats { - totalRowCount += stats.RowCount - } - } - - // Count live log rows (with deduplication) - parquetSourceFiles := make(map[string]bool) - if parquetErr == nil { - parquetSourceFiles = e.extractParquetSourceFiles(parquetStats) - } - - liveLogCount, liveLogErr := e.countLiveLogRowsExcludingParquetSources(ctx, partition, parquetSourceFiles) - if liveLogErr == nil { - totalRowCount += liveLogCount - } - } - - return totalRowCount, nil -} - -// getActualRowsScannedForFastPath returns only the rows that need to be scanned for fast path aggregations -// (i.e., live log rows that haven't been converted to parquet - parquet uses metadata only) -func (e *SQLEngine) getActualRowsScannedForFastPath(ctx context.Context, namespace, topicName string) (int64, error) { - // Create a hybrid scanner to access parquet statistics - var filerClient filer_pb.FilerClient - if e.catalog.brokerClient != nil { - var filerClientErr error - filerClient, filerClientErr = e.catalog.brokerClient.GetFilerClient() - if filerClientErr != nil { - return 0, filerClientErr - } - } - - hybridScanner, err := NewHybridMessageScanner(filerClient, e.catalog.brokerClient, namespace, topicName, e) - if err != nil { - return 0, err - } - - // Get all partitions for this topic - // Note: discoverTopicPartitions always returns absolute paths - partitions, err := e.discoverTopicPartitions(namespace, topicName) - if err != nil { - return 0, err - } - - totalScannedRows := int64(0) - - // For each partition, count ONLY the live log rows that need scanning - // (parquet files use metadata/statistics, so they contribute 0 to scan count) - for _, partition := range partitions { - // Get parquet files to determine what was converted - parquetStats, parquetErr := hybridScanner.ReadParquetStatistics(partition) - parquetSourceFiles := make(map[string]bool) - if parquetErr == nil { - parquetSourceFiles = e.extractParquetSourceFiles(parquetStats) - } - - // Count only live log rows that haven't been converted to parquet - liveLogCount, liveLogErr := e.countLiveLogRowsExcludingParquetSources(ctx, partition, parquetSourceFiles) - if liveLogErr == nil { - totalScannedRows += liveLogCount - } - - // Note: Parquet files contribute 0 to scan count since we use their metadata/statistics - } - - return totalScannedRows, nil -} - -// findColumnValue performs case-insensitive lookup of column values -// Now includes support for system columns stored in HybridScanResult -func (e *SQLEngine) findColumnValue(result HybridScanResult, columnName string) *schema_pb.Value { - // Check system columns first (stored separately in HybridScanResult) - lowerColumnName := strings.ToLower(columnName) - switch lowerColumnName { - case SW_COLUMN_NAME_TIMESTAMP, SW_DISPLAY_NAME_TIMESTAMP: - // For timestamp column, format as proper timestamp instead of raw nanoseconds - timestamp := time.Unix(result.Timestamp/1e9, result.Timestamp%1e9) - timestampStr := timestamp.UTC().Format("2006-01-02T15:04:05.000000000Z") - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: timestampStr}} - case SW_COLUMN_NAME_KEY: - return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: result.Key}} - case SW_COLUMN_NAME_SOURCE: - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: result.Source}} - } - - // Then check regular columns in Values map - // First try exact match - if value, exists := result.Values[columnName]; exists { - return value - } - - // Then try case-insensitive match - for key, value := range result.Values { - if strings.ToLower(key) == lowerColumnName { - return value - } - } - - return nil -} - -// discoverAndRegisterTopic attempts to discover an existing topic and register it in the SQL catalog -func (e *SQLEngine) discoverAndRegisterTopic(ctx context.Context, database, tableName string) error { - // First, check if topic exists by trying to get its schema from the broker/filer - recordType, _, _, err := e.catalog.brokerClient.GetTopicSchema(ctx, database, tableName) - if err != nil { - return fmt.Errorf("topic %s.%s not found or no schema available: %v", database, tableName, err) - } - - // Create a schema object from the discovered record type - mqSchema := &schema.Schema{ - Namespace: database, - Name: tableName, - RecordType: recordType, - RevisionId: 1, // Default to revision 1 for discovered topics - } - - // Register the topic in the SQL catalog - err = e.catalog.RegisterTopic(database, tableName, mqSchema) - if err != nil { - return fmt.Errorf("failed to register discovered topic %s.%s: %v", database, tableName, err) - } - - // Note: This is a discovery operation, not query execution, so it's okay to always log - return nil -} - -// getArithmeticExpressionAlias generates a display alias for arithmetic expressions -func (e *SQLEngine) getArithmeticExpressionAlias(expr *ArithmeticExpr) string { - leftAlias := e.getExpressionAlias(expr.Left) - rightAlias := e.getExpressionAlias(expr.Right) - return leftAlias + expr.Operator + rightAlias -} - -// getExpressionAlias generates an alias for any expression node -func (e *SQLEngine) getExpressionAlias(expr ExprNode) string { - switch exprType := expr.(type) { - case *ColName: - return exprType.Name.String() - case *ArithmeticExpr: - return e.getArithmeticExpressionAlias(exprType) - case *SQLVal: - return e.getSQLValAlias(exprType) - default: - return "expr" - } -} - -// evaluateArithmeticExpression evaluates an arithmetic expression for a given record -func (e *SQLEngine) evaluateArithmeticExpression(expr *ArithmeticExpr, result HybridScanResult) (*schema_pb.Value, error) { - // Check for timestamp arithmetic with intervals first - if e.isTimestampArithmetic(expr.Left, expr.Right) && (expr.Operator == "+" || expr.Operator == "-") { - return e.evaluateTimestampArithmetic(expr.Left, expr.Right, expr.Operator) - } - - // Get left operand value - leftValue, err := e.evaluateExpressionValue(expr.Left, result) - if err != nil { - return nil, fmt.Errorf("error evaluating left operand: %v", err) - } - - // Get right operand value - rightValue, err := e.evaluateExpressionValue(expr.Right, result) - if err != nil { - return nil, fmt.Errorf("error evaluating right operand: %v", err) - } - - // Handle string concatenation operator - if expr.Operator == "||" { - return e.Concat(leftValue, rightValue) - } - - // Perform arithmetic operation - var op ArithmeticOperator - switch expr.Operator { - case "+": - op = OpAdd - case "-": - op = OpSub - case "*": - op = OpMul - case "/": - op = OpDiv - case "%": - op = OpMod - default: - return nil, fmt.Errorf("unsupported arithmetic operator: %s", expr.Operator) - } - - return e.EvaluateArithmeticExpression(leftValue, rightValue, op) -} - -// isTimestampArithmetic checks if an arithmetic operation involves timestamps and intervals -func (e *SQLEngine) isTimestampArithmetic(left, right ExprNode) bool { - // Check if left is a timestamp function (NOW, CURRENT_TIMESTAMP, etc.) - leftIsTimestamp := e.isTimestampFunction(left) - - // Check if right is an interval - rightIsInterval := e.isIntervalExpression(right) - - return leftIsTimestamp && rightIsInterval -} - -// isTimestampFunction checks if an expression is a timestamp function -func (e *SQLEngine) isTimestampFunction(expr ExprNode) bool { - if funcExpr, ok := expr.(*FuncExpr); ok { - funcName := strings.ToUpper(funcExpr.Name.String()) - return funcName == "NOW" || funcName == "CURRENT_TIMESTAMP" || funcName == "CURRENT_DATE" || funcName == "CURRENT_TIME" - } - return false -} - -// isIntervalExpression checks if an expression is an interval -func (e *SQLEngine) isIntervalExpression(expr ExprNode) bool { - _, ok := expr.(*IntervalExpr) - return ok -} - -// evaluateExpressionValue evaluates any expression to get its value from a record -func (e *SQLEngine) evaluateExpressionValue(expr ExprNode, result HybridScanResult) (*schema_pb.Value, error) { - switch exprType := expr.(type) { - case *ColName: - columnName := exprType.Name.String() - upperColumnName := strings.ToUpper(columnName) - - // Check if this is actually a string literal that was parsed as ColName - if (strings.HasPrefix(columnName, "'") && strings.HasSuffix(columnName, "'")) || - (strings.HasPrefix(columnName, "\"") && strings.HasSuffix(columnName, "\"")) { - // This is a string literal that was incorrectly parsed as a column name - literal := strings.Trim(strings.Trim(columnName, "'"), "\"") - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: literal}}, nil - } - - // Check if this is actually a function call that was parsed as ColName - if strings.Contains(columnName, "(") && strings.Contains(columnName, ")") { - // This is a function call that was parsed incorrectly as a column name - // We need to manually evaluate it as a function - return e.evaluateColumnNameAsFunction(columnName, result) - } - - // Check if this is a datetime constant - if upperColumnName == FuncCURRENT_DATE || upperColumnName == FuncCURRENT_TIME || - upperColumnName == FuncCURRENT_TIMESTAMP || upperColumnName == FuncNOW { - switch upperColumnName { - case FuncCURRENT_DATE: - return e.CurrentDate() - case FuncCURRENT_TIME: - return e.CurrentTime() - case FuncCURRENT_TIMESTAMP: - return e.CurrentTimestamp() - case FuncNOW: - return e.Now() - } - } - - // Check if this is actually a numeric literal disguised as a column name - if val, err := strconv.ParseInt(columnName, 10, 64); err == nil { - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: val}}, nil - } - if val, err := strconv.ParseFloat(columnName, 64); err == nil { - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: val}}, nil - } - - // Otherwise, treat as a regular column lookup - value := e.findColumnValue(result, columnName) - if value == nil { - return nil, nil - } - return value, nil - case *ArithmeticExpr: - return e.evaluateArithmeticExpression(exprType, result) - case *SQLVal: - // Handle literal values - return e.convertSQLValToSchemaValue(exprType), nil - case *FuncExpr: - // Handle function calls that are part of arithmetic expressions - funcName := strings.ToUpper(exprType.Name.String()) - - // Route to appropriate function evaluator based on function type - if e.isDateTimeFunction(funcName) { - // Use datetime function evaluator - return e.evaluateDateTimeFunction(exprType, result) - } else { - // Use string function evaluator - return e.evaluateStringFunction(exprType, result) - } - case *IntervalExpr: - // Handle interval expressions - evaluate as duration in nanoseconds - nanos, err := e.evaluateInterval(exprType.Value) - if err != nil { - return nil, err - } - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: nanos}, - }, nil - default: - return nil, fmt.Errorf("unsupported expression type: %T", expr) - } -} - -// convertSQLValToSchemaValue converts SQLVal literal to schema_pb.Value -func (e *SQLEngine) convertSQLValToSchemaValue(sqlVal *SQLVal) *schema_pb.Value { - switch sqlVal.Type { - case IntVal: - if val, err := strconv.ParseInt(string(sqlVal.Val), 10, 64); err == nil { - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: val}} - } - case FloatVal: - if val, err := strconv.ParseFloat(string(sqlVal.Val), 64); err == nil { - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: val}} - } - case StrVal: - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(sqlVal.Val)}} - } - // Default to string if parsing fails - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(sqlVal.Val)}} -} - -// ConvertToSQLResultWithExpressions converts HybridScanResults to SQL query results with expression evaluation -func (e *SQLEngine) ConvertToSQLResultWithExpressions(hms *HybridMessageScanner, results []HybridScanResult, selectExprs []SelectExpr) *QueryResult { - if len(results) == 0 { - columns := make([]string, 0, len(selectExprs)) - for _, selectExpr := range selectExprs { - switch expr := selectExpr.(type) { - case *AliasedExpr: - // Check if alias is available and use it - if expr.As != nil && !expr.As.IsEmpty() { - columns = append(columns, expr.As.String()) - } else { - // Fall back to expression-based column naming - switch col := expr.Expr.(type) { - case *ColName: - columnName := col.Name.String() - upperColumnName := strings.ToUpper(columnName) - - // Check if this is an arithmetic expression embedded in a ColName - if arithmeticExpr := e.parseColumnLevelCalculation(columnName); arithmeticExpr != nil { - columns = append(columns, e.getArithmeticExpressionAlias(arithmeticExpr)) - } else if upperColumnName == FuncCURRENT_DATE || upperColumnName == FuncCURRENT_TIME || - upperColumnName == FuncCURRENT_TIMESTAMP || upperColumnName == FuncNOW { - // Use lowercase for datetime constants in column headers - columns = append(columns, strings.ToLower(columnName)) - } else { - // Use display name for system columns - displayName := e.getSystemColumnDisplayName(columnName) - columns = append(columns, displayName) - } - case *ArithmeticExpr: - columns = append(columns, e.getArithmeticExpressionAlias(col)) - case *FuncExpr: - columns = append(columns, e.getStringFunctionAlias(col)) - case *SQLVal: - columns = append(columns, e.getSQLValAlias(col)) - default: - columns = append(columns, "expr") - } - } - } - } - - return &QueryResult{ - Columns: columns, - Rows: [][]sqltypes.Value{}, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } - } - - // Build columns from SELECT expressions - columns := make([]string, 0, len(selectExprs)) - for _, selectExpr := range selectExprs { - switch expr := selectExpr.(type) { - case *AliasedExpr: - // Check if alias is available and use it - if expr.As != nil && !expr.As.IsEmpty() { - columns = append(columns, expr.As.String()) - } else { - // Fall back to expression-based column naming - switch col := expr.Expr.(type) { - case *ColName: - columnName := col.Name.String() - upperColumnName := strings.ToUpper(columnName) - - // Check if this is an arithmetic expression embedded in a ColName - if arithmeticExpr := e.parseColumnLevelCalculation(columnName); arithmeticExpr != nil { - columns = append(columns, e.getArithmeticExpressionAlias(arithmeticExpr)) - } else if upperColumnName == FuncCURRENT_DATE || upperColumnName == FuncCURRENT_TIME || - upperColumnName == FuncCURRENT_TIMESTAMP || upperColumnName == FuncNOW { - // Use lowercase for datetime constants in column headers - columns = append(columns, strings.ToLower(columnName)) - } else { - columns = append(columns, columnName) - } - case *ArithmeticExpr: - columns = append(columns, e.getArithmeticExpressionAlias(col)) - case *FuncExpr: - columns = append(columns, e.getStringFunctionAlias(col)) - case *SQLVal: - columns = append(columns, e.getSQLValAlias(col)) - default: - columns = append(columns, "expr") - } - } - } - } - - // Convert to SQL rows with expression evaluation - rows := make([][]sqltypes.Value, len(results)) - for i, result := range results { - row := make([]sqltypes.Value, len(selectExprs)) - for j, selectExpr := range selectExprs { - switch expr := selectExpr.(type) { - case *AliasedExpr: - switch col := expr.Expr.(type) { - case *ColName: - // Handle regular column, datetime constants, or arithmetic expressions - columnName := col.Name.String() - upperColumnName := strings.ToUpper(columnName) - - // Check if this is an arithmetic expression embedded in a ColName - if arithmeticExpr := e.parseColumnLevelCalculation(columnName); arithmeticExpr != nil { - // Handle as arithmetic expression - if value, err := e.evaluateArithmeticExpression(arithmeticExpr, result); err == nil && value != nil { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } else if upperColumnName == "CURRENT_DATE" || upperColumnName == "CURRENT_TIME" || - upperColumnName == "CURRENT_TIMESTAMP" || upperColumnName == "NOW" { - // Handle as datetime function - var value *schema_pb.Value - var err error - switch upperColumnName { - case FuncCURRENT_DATE: - value, err = e.CurrentDate() - case FuncCURRENT_TIME: - value, err = e.CurrentTime() - case FuncCURRENT_TIMESTAMP: - value, err = e.CurrentTimestamp() - case FuncNOW: - value, err = e.Now() - } - - if err == nil && value != nil { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } else { - // Handle as regular column - if value := e.findColumnValue(result, columnName); value != nil { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } - case *ArithmeticExpr: - // Handle arithmetic expression - if value, err := e.evaluateArithmeticExpression(col, result); err == nil && value != nil { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - case *FuncExpr: - // Handle function - route to appropriate evaluator - funcName := strings.ToUpper(col.Name.String()) - var value *schema_pb.Value - var err error - - // Check if it's a datetime function - if e.isDateTimeFunction(funcName) { - value, err = e.evaluateDateTimeFunction(col, result) - } else { - // Default to string function evaluator - value, err = e.evaluateStringFunction(col, result) - } - - if err == nil && value != nil { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - case *SQLVal: - // Handle literal value - value := e.convertSQLValToSchemaValue(col) - row[j] = convertSchemaValueToSQL(value) - default: - row[j] = sqltypes.NULL - } - default: - row[j] = sqltypes.NULL - } - } - rows[i] = row - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } -} - -// extractBaseColumns recursively extracts base column names from arithmetic expressions -func (e *SQLEngine) extractBaseColumns(expr *ArithmeticExpr, baseColumnsSet map[string]bool) { - // Extract columns from left operand - e.extractBaseColumnsFromExpression(expr.Left, baseColumnsSet) - // Extract columns from right operand - e.extractBaseColumnsFromExpression(expr.Right, baseColumnsSet) -} - -// extractBaseColumnsFromExpression extracts base column names from any expression node -func (e *SQLEngine) extractBaseColumnsFromExpression(expr ExprNode, baseColumnsSet map[string]bool) { - switch exprType := expr.(type) { - case *ColName: - columnName := exprType.Name.String() - // Check if it's a literal number disguised as a column name - if _, err := strconv.ParseInt(columnName, 10, 64); err != nil { - if _, err := strconv.ParseFloat(columnName, 64); err != nil { - // Not a numeric literal, treat as actual column name - baseColumnsSet[columnName] = true - } - } - case *ArithmeticExpr: - // Recursively handle nested arithmetic expressions - e.extractBaseColumns(exprType, baseColumnsSet) - } -} - -// isAggregationFunction checks if a function name is an aggregation function -func (e *SQLEngine) isAggregationFunction(funcName string) bool { - // Convert to uppercase for case-insensitive comparison - upperFuncName := strings.ToUpper(funcName) - switch upperFuncName { - case FuncCOUNT, FuncSUM, FuncAVG, FuncMIN, FuncMAX: - return true - default: - return false - } -} - -// isStringFunction checks if a function name is a string function -func (e *SQLEngine) isStringFunction(funcName string) bool { - switch funcName { - case FuncUPPER, FuncLOWER, FuncLENGTH, FuncTRIM, FuncBTRIM, FuncLTRIM, FuncRTRIM, FuncSUBSTRING, FuncLEFT, FuncRIGHT, FuncCONCAT: - return true - default: - return false - } -} - -// isDateTimeFunction checks if a function name is a datetime function -func (e *SQLEngine) isDateTimeFunction(funcName string) bool { - switch funcName { - case FuncCURRENT_DATE, FuncCURRENT_TIME, FuncCURRENT_TIMESTAMP, FuncNOW, FuncEXTRACT, FuncDATE_TRUNC: - return true - default: - return false - } -} - -// getStringFunctionAlias generates an alias for string functions -func (e *SQLEngine) getStringFunctionAlias(funcExpr *FuncExpr) string { - funcName := funcExpr.Name.String() - if len(funcExpr.Exprs) == 1 { - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - if colName, ok := aliasedExpr.Expr.(*ColName); ok { - return fmt.Sprintf("%s(%s)", funcName, colName.Name.String()) - } - } - } - return fmt.Sprintf("%s(...)", funcName) -} - -// getDateTimeFunctionAlias generates an alias for datetime functions -func (e *SQLEngine) getDateTimeFunctionAlias(funcExpr *FuncExpr) string { - funcName := funcExpr.Name.String() - - // Handle zero-argument functions like CURRENT_DATE, NOW - if len(funcExpr.Exprs) == 0 { - // Use lowercase for datetime constants in column headers - return strings.ToLower(funcName) - } - - // Handle EXTRACT function specially to create unique aliases - if strings.ToUpper(funcName) == "EXTRACT" && len(funcExpr.Exprs) == 2 { - // Try to extract the date part to make the alias unique - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - if sqlVal, ok := aliasedExpr.Expr.(*SQLVal); ok && sqlVal.Type == StrVal { - datePart := strings.ToLower(string(sqlVal.Val)) - return fmt.Sprintf("extract_%s", datePart) - } - } - // Fallback to generic if we can't extract the date part - return fmt.Sprintf("%s(...)", funcName) - } - - // Handle other multi-argument functions like DATE_TRUNC - if len(funcExpr.Exprs) == 2 { - return fmt.Sprintf("%s(...)", funcName) - } - - return fmt.Sprintf("%s(...)", funcName) -} - -// extractBaseColumnsFromFunction extracts base columns needed by a string function -func (e *SQLEngine) extractBaseColumnsFromFunction(funcExpr *FuncExpr, baseColumnsSet map[string]bool) { - for _, expr := range funcExpr.Exprs { - if aliasedExpr, ok := expr.(*AliasedExpr); ok { - e.extractBaseColumnsFromExpression(aliasedExpr.Expr, baseColumnsSet) - } - } -} - -// getSQLValAlias generates an alias for SQL literal values -func (e *SQLEngine) getSQLValAlias(sqlVal *SQLVal) string { - switch sqlVal.Type { - case StrVal: - // Escape single quotes by replacing ' with '' (SQL standard escaping) - escapedVal := strings.ReplaceAll(string(sqlVal.Val), "'", "''") - return fmt.Sprintf("'%s'", escapedVal) - case IntVal: - return string(sqlVal.Val) - case FloatVal: - return string(sqlVal.Val) - default: - return "literal" - } -} - -// evaluateStringFunction evaluates a string function for a given record -func (e *SQLEngine) evaluateStringFunction(funcExpr *FuncExpr, result HybridScanResult) (*schema_pb.Value, error) { - funcName := strings.ToUpper(funcExpr.Name.String()) - - // Most string functions require exactly 1 argument - if len(funcExpr.Exprs) != 1 { - return nil, fmt.Errorf("function %s expects exactly 1 argument", funcName) - } - - // Get the argument value - var argValue *schema_pb.Value - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - var err error - argValue, err = e.evaluateExpressionValue(aliasedExpr.Expr, result) - if err != nil { - return nil, fmt.Errorf("error evaluating function argument: %v", err) - } - } else { - return nil, fmt.Errorf("unsupported function argument type") - } - - if argValue == nil { - return nil, nil // NULL input produces NULL output - } - - // Call the appropriate string function - switch funcName { - case FuncUPPER: - return e.Upper(argValue) - case FuncLOWER: - return e.Lower(argValue) - case FuncLENGTH: - return e.Length(argValue) - case FuncTRIM, FuncBTRIM: // CockroachDB converts TRIM to BTRIM - return e.Trim(argValue) - case FuncLTRIM: - return e.LTrim(argValue) - case FuncRTRIM: - return e.RTrim(argValue) - default: - return nil, fmt.Errorf("unsupported string function: %s", funcName) - } -} - -// evaluateDateTimeFunction evaluates a datetime function for a given record -func (e *SQLEngine) evaluateDateTimeFunction(funcExpr *FuncExpr, result HybridScanResult) (*schema_pb.Value, error) { - funcName := strings.ToUpper(funcExpr.Name.String()) - - switch funcName { - case FuncEXTRACT: - // EXTRACT requires exactly 2 arguments: date part and value - if len(funcExpr.Exprs) != 2 { - return nil, fmt.Errorf("EXTRACT function expects exactly 2 arguments (date_part, value), got %d", len(funcExpr.Exprs)) - } - - // Get the first argument (date part) - var datePartValue *schema_pb.Value - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - var err error - datePartValue, err = e.evaluateExpressionValue(aliasedExpr.Expr, result) - if err != nil { - return nil, fmt.Errorf("error evaluating EXTRACT date part argument: %v", err) - } - } else { - return nil, fmt.Errorf("unsupported EXTRACT date part argument type") - } - - if datePartValue == nil { - return nil, fmt.Errorf("EXTRACT date part cannot be NULL") - } - - // Convert date part to string - var datePart string - if stringVal, ok := datePartValue.Kind.(*schema_pb.Value_StringValue); ok { - datePart = strings.ToUpper(stringVal.StringValue) - } else { - return nil, fmt.Errorf("EXTRACT date part must be a string") - } - - // Get the second argument (value to extract from) - var extractValue *schema_pb.Value - if aliasedExpr, ok := funcExpr.Exprs[1].(*AliasedExpr); ok { - var err error - extractValue, err = e.evaluateExpressionValue(aliasedExpr.Expr, result) - if err != nil { - return nil, fmt.Errorf("error evaluating EXTRACT value argument: %v", err) - } - } else { - return nil, fmt.Errorf("unsupported EXTRACT value argument type") - } - - if extractValue == nil { - return nil, nil // NULL input produces NULL output - } - - // Call the Extract function - return e.Extract(DatePart(datePart), extractValue) - - case FuncDATE_TRUNC: - // DATE_TRUNC requires exactly 2 arguments: precision and value - if len(funcExpr.Exprs) != 2 { - return nil, fmt.Errorf("DATE_TRUNC function expects exactly 2 arguments (precision, value), got %d", len(funcExpr.Exprs)) - } - - // Get the first argument (precision) - var precisionValue *schema_pb.Value - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - var err error - precisionValue, err = e.evaluateExpressionValue(aliasedExpr.Expr, result) - if err != nil { - return nil, fmt.Errorf("error evaluating DATE_TRUNC precision argument: %v", err) - } - } else { - return nil, fmt.Errorf("unsupported DATE_TRUNC precision argument type") - } - - if precisionValue == nil { - return nil, fmt.Errorf("DATE_TRUNC precision cannot be NULL") - } - - // Convert precision to string - var precision string - if stringVal, ok := precisionValue.Kind.(*schema_pb.Value_StringValue); ok { - precision = stringVal.StringValue - } else { - return nil, fmt.Errorf("DATE_TRUNC precision must be a string") - } - - // Get the second argument (value to truncate) - var truncateValue *schema_pb.Value - if aliasedExpr, ok := funcExpr.Exprs[1].(*AliasedExpr); ok { - var err error - truncateValue, err = e.evaluateExpressionValue(aliasedExpr.Expr, result) - if err != nil { - return nil, fmt.Errorf("error evaluating DATE_TRUNC value argument: %v", err) - } - } else { - return nil, fmt.Errorf("unsupported DATE_TRUNC value argument type") - } - - if truncateValue == nil { - return nil, nil // NULL input produces NULL output - } - - // Call the DateTrunc function - return e.DateTrunc(precision, truncateValue) - - case FuncCURRENT_DATE: - // CURRENT_DATE is a zero-argument function - if len(funcExpr.Exprs) != 0 { - return nil, fmt.Errorf("CURRENT_DATE function expects no arguments, got %d", len(funcExpr.Exprs)) - } - return e.CurrentDate() - - case FuncCURRENT_TIME: - // CURRENT_TIME is a zero-argument function - if len(funcExpr.Exprs) != 0 { - return nil, fmt.Errorf("CURRENT_TIME function expects no arguments, got %d", len(funcExpr.Exprs)) - } - return e.CurrentTime() - - case FuncCURRENT_TIMESTAMP: - // CURRENT_TIMESTAMP is a zero-argument function - if len(funcExpr.Exprs) != 0 { - return nil, fmt.Errorf("CURRENT_TIMESTAMP function expects no arguments, got %d", len(funcExpr.Exprs)) - } - return e.CurrentTimestamp() - - case FuncNOW: - // NOW is a zero-argument function (but often used with () syntax) - if len(funcExpr.Exprs) != 0 { - return nil, fmt.Errorf("NOW function expects no arguments, got %d", len(funcExpr.Exprs)) - } - return e.Now() - - // PostgreSQL uses EXTRACT(part FROM date) instead of convenience functions like YEAR(date) - - default: - return nil, fmt.Errorf("unsupported datetime function: %s", funcName) - } -} - -// evaluateInterval parses an interval string and returns duration in nanoseconds -func (e *SQLEngine) evaluateInterval(intervalValue string) (int64, error) { - // Parse interval strings like "1 hour", "30 minutes", "2 days" - parts := strings.Fields(strings.TrimSpace(intervalValue)) - if len(parts) != 2 { - return 0, fmt.Errorf("invalid interval format: %s (expected 'number unit')", intervalValue) - } - - // Parse the numeric value - value, err := strconv.ParseInt(parts[0], 10, 64) - if err != nil { - return 0, fmt.Errorf("invalid interval value: %s", parts[0]) - } - - // Parse the unit and convert to nanoseconds - unit := strings.ToLower(parts[1]) - var multiplier int64 - - switch unit { - case "nanosecond", "nanoseconds", "ns": - multiplier = 1 - case "microsecond", "microseconds", "us": - multiplier = 1000 - case "millisecond", "milliseconds", "ms": - multiplier = 1000000 - case "second", "seconds", "s": - multiplier = 1000000000 - case "minute", "minutes", "m": - multiplier = 60 * 1000000000 - case "hour", "hours", "h": - multiplier = 60 * 60 * 1000000000 - case "day", "days", "d": - multiplier = 24 * 60 * 60 * 1000000000 - case "week", "weeks", "w": - multiplier = 7 * 24 * 60 * 60 * 1000000000 - default: - return 0, fmt.Errorf("unsupported interval unit: %s", unit) - } - - return value * multiplier, nil -} - -// convertValueForTimestampColumn converts string timestamp values to nanoseconds for system timestamp columns -func (e *SQLEngine) convertValueForTimestampColumn(columnName string, value interface{}, expr ExprNode) interface{} { - // Special handling for timestamp system columns - if columnName == SW_COLUMN_NAME_TIMESTAMP { - if _, ok := value.(string); ok { - if timeNanos := e.extractTimeValue(expr); timeNanos != 0 { - return timeNanos - } - } - } - return value -} - -// evaluateTimestampArithmetic performs arithmetic operations with timestamps and intervals -func (e *SQLEngine) evaluateTimestampArithmetic(left, right ExprNode, operator string) (*schema_pb.Value, error) { - // Handle timestamp arithmetic: NOW() - INTERVAL '1 hour' - // For timestamp arithmetic, we don't need the result context, so we pass an empty one - emptyResult := HybridScanResult{} - - leftValue, err := e.evaluateExpressionValue(left, emptyResult) - if err != nil { - return nil, fmt.Errorf("failed to evaluate left operand: %v", err) - } - - rightValue, err := e.evaluateExpressionValue(right, emptyResult) - if err != nil { - return nil, fmt.Errorf("failed to evaluate right operand: %v", err) - } - - // Convert left operand (should be timestamp) - var leftTimestamp int64 - if leftValue.Kind != nil { - switch leftKind := leftValue.Kind.(type) { - case *schema_pb.Value_Int64Value: - leftTimestamp = leftKind.Int64Value - case *schema_pb.Value_TimestampValue: - // Convert microseconds to nanoseconds - leftTimestamp = leftKind.TimestampValue.TimestampMicros * 1000 - case *schema_pb.Value_StringValue: - // Parse timestamp string - if ts, err := time.Parse(time.RFC3339, leftKind.StringValue); err == nil { - leftTimestamp = ts.UnixNano() - } else if ts, err := time.Parse("2006-01-02 15:04:05", leftKind.StringValue); err == nil { - leftTimestamp = ts.UnixNano() - } else { - return nil, fmt.Errorf("invalid timestamp format: %s", leftKind.StringValue) - } - default: - return nil, fmt.Errorf("left operand must be a timestamp, got: %T", leftKind) - } - } else { - return nil, fmt.Errorf("left operand value is nil") - } - - // Convert right operand (should be interval in nanoseconds) - var intervalNanos int64 - if rightValue.Kind != nil { - switch rightKind := rightValue.Kind.(type) { - case *schema_pb.Value_Int64Value: - intervalNanos = rightKind.Int64Value - default: - return nil, fmt.Errorf("right operand must be an interval duration") - } - } else { - return nil, fmt.Errorf("right operand value is nil") - } - - // Perform arithmetic - var resultTimestamp int64 - switch operator { - case "+": - resultTimestamp = leftTimestamp + intervalNanos - case "-": - resultTimestamp = leftTimestamp - intervalNanos - default: - return nil, fmt.Errorf("unsupported timestamp arithmetic operator: %s", operator) - } - - // Return as timestamp - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: resultTimestamp}, - }, nil -} - -// evaluateColumnNameAsFunction handles function calls that were incorrectly parsed as column names -func (e *SQLEngine) evaluateColumnNameAsFunction(columnName string, result HybridScanResult) (*schema_pb.Value, error) { - // Simple parser for basic function calls like TRIM('hello world') - // Extract function name and argument - parenPos := strings.Index(columnName, "(") - if parenPos == -1 { - return nil, fmt.Errorf("invalid function format: %s", columnName) - } - - funcName := strings.ToUpper(strings.TrimSpace(columnName[:parenPos])) - argsString := columnName[parenPos+1:] - - // Find the closing parenthesis (handling nested quotes) - closeParen := strings.LastIndex(argsString, ")") - if closeParen == -1 { - return nil, fmt.Errorf("missing closing parenthesis in function: %s", columnName) - } - - argString := strings.TrimSpace(argsString[:closeParen]) - - // Parse the argument - for now handle simple cases - var argValue *schema_pb.Value - var err error - - if strings.HasPrefix(argString, "'") && strings.HasSuffix(argString, "'") { - // String literal argument - literal := strings.Trim(argString, "'") - argValue = &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: literal}} - } else if strings.Contains(argString, "(") && strings.Contains(argString, ")") { - // Nested function call - recursively evaluate it - argValue, err = e.evaluateColumnNameAsFunction(argString, result) - if err != nil { - return nil, fmt.Errorf("error evaluating nested function argument: %v", err) - } - } else { - // Column name or other expression - return nil, fmt.Errorf("unsupported argument type in function: %s", argString) - } - - if argValue == nil { - return nil, nil - } - - // Call the appropriate function - switch funcName { - case FuncUPPER: - return e.Upper(argValue) - case FuncLOWER: - return e.Lower(argValue) - case FuncLENGTH: - return e.Length(argValue) - case FuncTRIM, FuncBTRIM: // CockroachDB converts TRIM to BTRIM - return e.Trim(argValue) - case FuncLTRIM: - return e.LTrim(argValue) - case FuncRTRIM: - return e.RTrim(argValue) - // PostgreSQL-only: Use EXTRACT(YEAR FROM date) instead of YEAR(date) - default: - return nil, fmt.Errorf("unsupported function in column name: %s", funcName) - } -} - -// parseColumnLevelCalculation detects and parses arithmetic expressions that contain function calls -// This handles cases where the SQL parser incorrectly treats "LENGTH('hello') + 10" as a single ColName -func (e *SQLEngine) parseColumnLevelCalculation(expression string) *ArithmeticExpr { - // First check if this looks like an arithmetic expression - if !e.containsArithmeticOperator(expression) { - return nil - } - - // Build AST for the arithmetic expression - return e.buildArithmeticAST(expression) -} - -// containsArithmeticOperator checks if the expression contains arithmetic operators outside of function calls -func (e *SQLEngine) containsArithmeticOperator(expr string) bool { - operators := []string{"+", "-", "*", "/", "%", "||"} - - parenLevel := 0 - quoteLevel := false - - for i, char := range expr { - switch char { - case '(': - if !quoteLevel { - parenLevel++ - } - case ')': - if !quoteLevel { - parenLevel-- - } - case '\'': - quoteLevel = !quoteLevel - default: - // Only check for operators outside of parentheses and quotes - if parenLevel == 0 && !quoteLevel { - for _, op := range operators { - if strings.HasPrefix(expr[i:], op) { - return true - } - } - } - } - } - - return false -} - -// buildArithmeticAST builds an Abstract Syntax Tree for arithmetic expressions containing function calls -func (e *SQLEngine) buildArithmeticAST(expr string) *ArithmeticExpr { - // Remove leading/trailing spaces - expr = strings.TrimSpace(expr) - - // Find the main operator (outside of parentheses) - operators := []string{"||", "+", "-", "*", "/", "%"} // Order matters for precedence - - for _, op := range operators { - opPos := e.findMainOperator(expr, op) - if opPos != -1 { - leftExpr := strings.TrimSpace(expr[:opPos]) - rightExpr := strings.TrimSpace(expr[opPos+len(op):]) - - if leftExpr != "" && rightExpr != "" { - return &ArithmeticExpr{ - Left: e.parseASTExpressionNode(leftExpr), - Right: e.parseASTExpressionNode(rightExpr), - Operator: op, - } - } - } - } - - return nil -} - -// findMainOperator finds the position of an operator that's not inside parentheses or quotes -func (e *SQLEngine) findMainOperator(expr string, operator string) int { - parenLevel := 0 - quoteLevel := false - - for i := 0; i <= len(expr)-len(operator); i++ { - char := expr[i] - - switch char { - case '(': - if !quoteLevel { - parenLevel++ - } - case ')': - if !quoteLevel { - parenLevel-- - } - case '\'': - quoteLevel = !quoteLevel - default: - // Check for operator only at top level (not inside parentheses or quotes) - if parenLevel == 0 && !quoteLevel && strings.HasPrefix(expr[i:], operator) { - return i - } - } - } - - return -1 -} - -// parseASTExpressionNode parses an expression into the appropriate ExprNode type -func (e *SQLEngine) parseASTExpressionNode(expr string) ExprNode { - expr = strings.TrimSpace(expr) - - // Check if it's a function call (contains parentheses) - if strings.Contains(expr, "(") && strings.Contains(expr, ")") { - // This should be parsed as a function expression, but since our SQL parser - // has limitations, we'll create a special ColName that represents the function - return &ColName{Name: stringValue(expr)} - } - - // Check if it's a numeric literal - if _, err := strconv.ParseInt(expr, 10, 64); err == nil { - return &SQLVal{Type: IntVal, Val: []byte(expr)} - } - - if _, err := strconv.ParseFloat(expr, 64); err == nil { - return &SQLVal{Type: FloatVal, Val: []byte(expr)} - } - - // Check if it's a string literal - if strings.HasPrefix(expr, "'") && strings.HasSuffix(expr, "'") { - return &SQLVal{Type: StrVal, Val: []byte(strings.Trim(expr, "'"))} - } - - // Check for nested arithmetic expressions - if nestedArithmetic := e.buildArithmeticAST(expr); nestedArithmetic != nil { - return nestedArithmetic - } - - // Default to column name - return &ColName{Name: stringValue(expr)} -} diff --git a/weed/query/engine/engine_test.go b/weed/query/engine/engine_test.go deleted file mode 100644 index 96c5507b0..000000000 --- a/weed/query/engine/engine_test.go +++ /dev/null @@ -1,1392 +0,0 @@ -package engine - -import ( - "context" - "encoding/binary" - "errors" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "google.golang.org/protobuf/proto" -) - -// Mock implementations for testing -type MockHybridMessageScanner struct { - mock.Mock - topic topic.Topic -} - -func (m *MockHybridMessageScanner) ReadParquetStatistics(partitionPath string) ([]*ParquetFileStats, error) { - args := m.Called(partitionPath) - return args.Get(0).([]*ParquetFileStats), args.Error(1) -} - -type MockSQLEngine struct { - *SQLEngine - mockPartitions map[string][]string - mockParquetSourceFiles map[string]map[string]bool - mockLiveLogRowCounts map[string]int64 - mockColumnStats map[string]map[string]*ParquetColumnStats -} - -func NewMockSQLEngine() *MockSQLEngine { - return &MockSQLEngine{ - SQLEngine: &SQLEngine{ - catalog: &SchemaCatalog{ - databases: make(map[string]*DatabaseInfo), - currentDatabase: "test", - }, - }, - mockPartitions: make(map[string][]string), - mockParquetSourceFiles: make(map[string]map[string]bool), - mockLiveLogRowCounts: make(map[string]int64), - mockColumnStats: make(map[string]map[string]*ParquetColumnStats), - } -} - -func (m *MockSQLEngine) discoverTopicPartitions(namespace, topicName string) ([]string, error) { - key := namespace + "." + topicName - if partitions, exists := m.mockPartitions[key]; exists { - return partitions, nil - } - return []string{"partition-1", "partition-2"}, nil -} - -func (m *MockSQLEngine) extractParquetSourceFiles(fileStats []*ParquetFileStats) map[string]bool { - if len(fileStats) == 0 { - return make(map[string]bool) - } - return map[string]bool{"converted-log-1": true} -} - -func (m *MockSQLEngine) countLiveLogRowsExcludingParquetSources(ctx context.Context, partition string, parquetSources map[string]bool) (int64, error) { - if count, exists := m.mockLiveLogRowCounts[partition]; exists { - return count, nil - } - return 25, nil -} - -func (m *MockSQLEngine) computeLiveLogMinMax(partition, column string, parquetSources map[string]bool) (interface{}, interface{}, error) { - switch column { - case "id": - return int64(1), int64(50), nil - case "value": - return 10.5, 99.9, nil - default: - return nil, nil, nil - } -} - -func (m *MockSQLEngine) getSystemColumnGlobalMin(column string, allFileStats map[string][]*ParquetFileStats) interface{} { - return int64(1000000000) -} - -func (m *MockSQLEngine) getSystemColumnGlobalMax(column string, allFileStats map[string][]*ParquetFileStats) interface{} { - return int64(2000000000) -} - -func createMockColumnStats(column string, minVal, maxVal interface{}) *ParquetColumnStats { - return &ParquetColumnStats{ - ColumnName: column, - MinValue: convertToSchemaValue(minVal), - MaxValue: convertToSchemaValue(maxVal), - NullCount: 0, - } -} - -func convertToSchemaValue(val interface{}) *schema_pb.Value { - switch v := val.(type) { - case int64: - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: v}} - case float64: - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: v}} - case string: - return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: v}} - } - return nil -} - -// Test FastPathOptimizer -func TestFastPathOptimizer_DetermineStrategy(t *testing.T) { - engine := NewMockSQLEngine() - optimizer := NewFastPathOptimizer(engine.SQLEngine) - - tests := []struct { - name string - aggregations []AggregationSpec - expected AggregationStrategy - }{ - { - name: "Supported aggregations", - aggregations: []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - {Function: FuncMAX, Column: "id"}, - {Function: FuncMIN, Column: "value"}, - }, - expected: AggregationStrategy{ - CanUseFastPath: true, - Reason: "all_aggregations_supported", - UnsupportedSpecs: []AggregationSpec{}, - }, - }, - { - name: "Unsupported aggregation", - aggregations: []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - {Function: FuncAVG, Column: "value"}, // Not supported - }, - expected: AggregationStrategy{ - CanUseFastPath: false, - Reason: "unsupported_aggregation_functions", - }, - }, - { - name: "Empty aggregations", - aggregations: []AggregationSpec{}, - expected: AggregationStrategy{ - CanUseFastPath: true, - Reason: "all_aggregations_supported", - UnsupportedSpecs: []AggregationSpec{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - strategy := optimizer.DetermineStrategy(tt.aggregations) - - assert.Equal(t, tt.expected.CanUseFastPath, strategy.CanUseFastPath) - assert.Equal(t, tt.expected.Reason, strategy.Reason) - if !tt.expected.CanUseFastPath { - assert.NotEmpty(t, strategy.UnsupportedSpecs) - } - }) - } -} - -// Test AggregationComputer -func TestAggregationComputer_ComputeFastPathAggregations(t *testing.T) { - engine := NewMockSQLEngine() - computer := NewAggregationComputer(engine.SQLEngine) - - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/topic1/partition-1": { - { - RowCount: 30, - ColumnStats: map[string]*ParquetColumnStats{ - "id": createMockColumnStats("id", int64(10), int64(40)), - }, - }, - }, - }, - ParquetRowCount: 30, - LiveLogRowCount: 25, - PartitionsCount: 1, - } - - partitions := []string{"/topics/test/topic1/partition-1"} - - tests := []struct { - name string - aggregations []AggregationSpec - validate func(t *testing.T, results []AggregationResult) - }{ - { - name: "COUNT aggregation", - aggregations: []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - }, - validate: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 1) - assert.Equal(t, int64(55), results[0].Count) // 30 + 25 - }, - }, - { - name: "MAX aggregation", - aggregations: []AggregationSpec{ - {Function: FuncMAX, Column: "id"}, - }, - validate: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 1) - // Should be max of parquet stats (40) - mock doesn't combine with live log - assert.Equal(t, int64(40), results[0].Max) - }, - }, - { - name: "MIN aggregation", - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "id"}, - }, - validate: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 1) - // Should be min of parquet stats (10) - mock doesn't combine with live log - assert.Equal(t, int64(10), results[0].Min) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - results, err := computer.ComputeFastPathAggregations(ctx, tt.aggregations, dataSources, partitions) - - assert.NoError(t, err) - tt.validate(t, results) - }) - } -} - -// Test case-insensitive column lookup and null handling for MIN/MAX aggregations -func TestAggregationComputer_MinMaxEdgeCases(t *testing.T) { - engine := NewMockSQLEngine() - computer := NewAggregationComputer(engine.SQLEngine) - - tests := []struct { - name string - dataSources *TopicDataSources - aggregations []AggregationSpec - validate func(t *testing.T, results []AggregationResult, err error) - }{ - { - name: "Case insensitive column lookup", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 50, - ColumnStats: map[string]*ParquetColumnStats{ - "ID": createMockColumnStats("ID", int64(5), int64(95)), // Uppercase column name - }, - }, - }, - }, - ParquetRowCount: 50, - LiveLogRowCount: 0, - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "id"}, // lowercase column name - {Function: FuncMAX, Column: "id"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - assert.Equal(t, int64(5), results[0].Min, "MIN should work with case-insensitive lookup") - assert.Equal(t, int64(95), results[1].Max, "MAX should work with case-insensitive lookup") - }, - }, - { - name: "Null column stats handling", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 50, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: nil, // Null min value - MaxValue: nil, // Null max value - NullCount: 50, - RowCount: 50, - }, - }, - }, - }, - }, - ParquetRowCount: 50, - LiveLogRowCount: 0, - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "id"}, - {Function: FuncMAX, Column: "id"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - // When stats are null, should fall back to system column or return nil - // This tests that we don't crash on null stats - }, - }, - { - name: "Mixed data types - string column", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 30, - ColumnStats: map[string]*ParquetColumnStats{ - "name": createMockColumnStats("name", "Alice", "Zoe"), - }, - }, - }, - }, - ParquetRowCount: 30, - LiveLogRowCount: 0, - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "name"}, - {Function: FuncMAX, Column: "name"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - assert.Equal(t, "Alice", results[0].Min) - assert.Equal(t, "Zoe", results[1].Max) - }, - }, - { - name: "Mixed data types - float column", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 25, - ColumnStats: map[string]*ParquetColumnStats{ - "price": createMockColumnStats("price", float64(19.99), float64(299.50)), - }, - }, - }, - }, - ParquetRowCount: 25, - LiveLogRowCount: 0, - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "price"}, - {Function: FuncMAX, Column: "price"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - assert.Equal(t, float64(19.99), results[0].Min) - assert.Equal(t, float64(299.50), results[1].Max) - }, - }, - { - name: "Column not found in parquet stats", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 20, - ColumnStats: map[string]*ParquetColumnStats{ - "id": createMockColumnStats("id", int64(1), int64(100)), - // Note: "nonexistent_column" is not in stats - }, - }, - }, - }, - ParquetRowCount: 20, - LiveLogRowCount: 10, // Has live logs to fall back to - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "nonexistent_column"}, - {Function: FuncMAX, Column: "nonexistent_column"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - // Should fall back to live log processing or return nil - // The key is that it shouldn't crash - }, - }, - { - name: "Multiple parquet files with different ranges", - dataSources: &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/partition-1": { - { - RowCount: 30, - ColumnStats: map[string]*ParquetColumnStats{ - "score": createMockColumnStats("score", int64(10), int64(50)), - }, - }, - { - RowCount: 40, - ColumnStats: map[string]*ParquetColumnStats{ - "score": createMockColumnStats("score", int64(5), int64(75)), // Lower min, higher max - }, - }, - }, - }, - ParquetRowCount: 70, - LiveLogRowCount: 0, - PartitionsCount: 1, - }, - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "score"}, - {Function: FuncMAX, Column: "score"}, - }, - validate: func(t *testing.T, results []AggregationResult, err error) { - assert.NoError(t, err) - assert.Len(t, results, 2) - assert.Equal(t, int64(5), results[0].Min, "Should find global minimum across all files") - assert.Equal(t, int64(75), results[1].Max, "Should find global maximum across all files") - }, - }, - } - - partitions := []string{"/topics/test/partition-1"} - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - results, err := computer.ComputeFastPathAggregations(ctx, tt.aggregations, tt.dataSources, partitions) - tt.validate(t, results, err) - }) - } -} - -// Test the specific bug where MIN/MAX was returning empty values -func TestAggregationComputer_MinMaxEmptyValuesBugFix(t *testing.T) { - engine := NewMockSQLEngine() - computer := NewAggregationComputer(engine.SQLEngine) - - // This test specifically addresses the bug where MIN/MAX returned empty - // due to improper null checking and extraction logic - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/test-topic/partition1": { - { - RowCount: 100, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: 0}}, // Min should be 0 - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: 99}}, // Max should be 99 - NullCount: 0, - RowCount: 100, - }, - }, - }, - }, - }, - ParquetRowCount: 100, - LiveLogRowCount: 0, // No live logs, pure parquet stats - PartitionsCount: 1, - } - - partitions := []string{"/topics/test/test-topic/partition1"} - - tests := []struct { - name string - aggregSpec AggregationSpec - expected interface{} - }{ - { - name: "MIN should return 0 not empty", - aggregSpec: AggregationSpec{Function: FuncMIN, Column: "id"}, - expected: int32(0), // Should extract the actual minimum value - }, - { - name: "MAX should return 99 not empty", - aggregSpec: AggregationSpec{Function: FuncMAX, Column: "id"}, - expected: int32(99), // Should extract the actual maximum value - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - results, err := computer.ComputeFastPathAggregations(ctx, []AggregationSpec{tt.aggregSpec}, dataSources, partitions) - - assert.NoError(t, err) - assert.Len(t, results, 1) - - // Verify the result is not nil/empty - if tt.aggregSpec.Function == FuncMIN { - assert.NotNil(t, results[0].Min, "MIN result should not be nil") - assert.Equal(t, tt.expected, results[0].Min) - } else if tt.aggregSpec.Function == FuncMAX { - assert.NotNil(t, results[0].Max, "MAX result should not be nil") - assert.Equal(t, tt.expected, results[0].Max) - } - }) - } -} - -// Test the formatAggregationResult function with MIN/MAX edge cases -func TestSQLEngine_FormatAggregationResult_MinMax(t *testing.T) { - engine := NewTestSQLEngine() - - tests := []struct { - name string - spec AggregationSpec - result AggregationResult - expected string - }{ - { - name: "MIN with zero value should not be empty", - spec: AggregationSpec{Function: FuncMIN, Column: "id"}, - result: AggregationResult{Min: int32(0)}, - expected: "0", - }, - { - name: "MAX with large value", - spec: AggregationSpec{Function: FuncMAX, Column: "id"}, - result: AggregationResult{Max: int32(99)}, - expected: "99", - }, - { - name: "MIN with negative value", - spec: AggregationSpec{Function: FuncMIN, Column: "score"}, - result: AggregationResult{Min: int64(-50)}, - expected: "-50", - }, - { - name: "MAX with float value", - spec: AggregationSpec{Function: FuncMAX, Column: "price"}, - result: AggregationResult{Max: float64(299.99)}, - expected: "299.99", - }, - { - name: "MIN with string value", - spec: AggregationSpec{Function: FuncMIN, Column: "name"}, - result: AggregationResult{Min: "Alice"}, - expected: "Alice", - }, - { - name: "MIN with nil should return NULL", - spec: AggregationSpec{Function: FuncMIN, Column: "missing"}, - result: AggregationResult{Min: nil}, - expected: "", // NULL values display as empty - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sqlValue := engine.formatAggregationResult(tt.spec, tt.result) - assert.Equal(t, tt.expected, sqlValue.String()) - }) - } -} - -// Test the direct formatAggregationResult scenario that was originally broken -func TestSQLEngine_MinMaxBugFixIntegration(t *testing.T) { - // This test focuses on the core bug fix without the complexity of table discovery - // It directly tests the scenario where MIN/MAX returned empty due to the bug - - engine := NewTestSQLEngine() - - // Test the direct formatting path that was failing - tests := []struct { - name string - aggregSpec AggregationSpec - aggResult AggregationResult - expectedEmpty bool - expectedValue string - }{ - { - name: "MIN with zero should not be empty (the original bug)", - aggregSpec: AggregationSpec{Function: FuncMIN, Column: "id", Alias: "MIN(id)"}, - aggResult: AggregationResult{Min: int32(0)}, // This was returning empty before fix - expectedEmpty: false, - expectedValue: "0", - }, - { - name: "MAX with valid value should not be empty", - aggregSpec: AggregationSpec{Function: FuncMAX, Column: "id", Alias: "MAX(id)"}, - aggResult: AggregationResult{Max: int32(99)}, - expectedEmpty: false, - expectedValue: "99", - }, - { - name: "MIN with negative value should work", - aggregSpec: AggregationSpec{Function: FuncMIN, Column: "score", Alias: "MIN(score)"}, - aggResult: AggregationResult{Min: int64(-10)}, - expectedEmpty: false, - expectedValue: "-10", - }, - { - name: "MIN with nil should be empty (expected behavior)", - aggregSpec: AggregationSpec{Function: FuncMIN, Column: "missing", Alias: "MIN(missing)"}, - aggResult: AggregationResult{Min: nil}, - expectedEmpty: true, - expectedValue: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test the formatAggregationResult function directly - sqlValue := engine.formatAggregationResult(tt.aggregSpec, tt.aggResult) - result := sqlValue.String() - - if tt.expectedEmpty { - assert.Empty(t, result, "Result should be empty for nil values") - } else { - assert.NotEmpty(t, result, "Result should not be empty") - assert.Equal(t, tt.expectedValue, result) - } - }) - } -} - -// Test the tryFastParquetAggregation method specifically for the bug -func TestSQLEngine_FastParquetAggregationBugFix(t *testing.T) { - // This test verifies that the fast path aggregation logic works correctly - // and doesn't return nil/empty values when it should return actual data - - engine := NewMockSQLEngine() - computer := NewAggregationComputer(engine.SQLEngine) - - // Create realistic data sources that mimic the user's scenario - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/test-topic/v2025-09-01-22-54-02/0000-0630": { - { - RowCount: 100, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: 0}}, - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: 99}}, - NullCount: 0, - RowCount: 100, - }, - }, - }, - }, - }, - ParquetRowCount: 100, - LiveLogRowCount: 0, // Pure parquet scenario - PartitionsCount: 1, - } - - partitions := []string{"/topics/test/test-topic/v2025-09-01-22-54-02/0000-0630"} - - tests := []struct { - name string - aggregations []AggregationSpec - validateResults func(t *testing.T, results []AggregationResult) - }{ - { - name: "Single MIN aggregation should return value not nil", - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "id", Alias: "MIN(id)"}, - }, - validateResults: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 1) - assert.NotNil(t, results[0].Min, "MIN result should not be nil") - assert.Equal(t, int32(0), results[0].Min, "MIN should return the correct minimum value") - }, - }, - { - name: "Single MAX aggregation should return value not nil", - aggregations: []AggregationSpec{ - {Function: FuncMAX, Column: "id", Alias: "MAX(id)"}, - }, - validateResults: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 1) - assert.NotNil(t, results[0].Max, "MAX result should not be nil") - assert.Equal(t, int32(99), results[0].Max, "MAX should return the correct maximum value") - }, - }, - { - name: "Combined MIN/MAX should both return values", - aggregations: []AggregationSpec{ - {Function: FuncMIN, Column: "id", Alias: "MIN(id)"}, - {Function: FuncMAX, Column: "id", Alias: "MAX(id)"}, - }, - validateResults: func(t *testing.T, results []AggregationResult) { - assert.Len(t, results, 2) - assert.NotNil(t, results[0].Min, "MIN result should not be nil") - assert.NotNil(t, results[1].Max, "MAX result should not be nil") - assert.Equal(t, int32(0), results[0].Min) - assert.Equal(t, int32(99), results[1].Max) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - results, err := computer.ComputeFastPathAggregations(ctx, tt.aggregations, dataSources, partitions) - - assert.NoError(t, err, "ComputeFastPathAggregations should not error") - tt.validateResults(t, results) - }) - } -} - -// Test ExecutionPlanBuilder -func TestExecutionPlanBuilder_BuildAggregationPlan(t *testing.T) { - engine := NewMockSQLEngine() - builder := NewExecutionPlanBuilder(engine.SQLEngine) - - // Parse a simple SELECT statement using the native parser - stmt, err := ParseSQL("SELECT COUNT(*) FROM test_topic") - assert.NoError(t, err) - selectStmt := stmt.(*SelectStatement) - - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - } - - strategy := AggregationStrategy{ - CanUseFastPath: true, - Reason: "all_aggregations_supported", - } - - dataSources := &TopicDataSources{ - ParquetRowCount: 100, - LiveLogRowCount: 50, - PartitionsCount: 3, - ParquetFiles: map[string][]*ParquetFileStats{ - "partition-1": {{RowCount: 50}}, - "partition-2": {{RowCount: 50}}, - }, - } - - plan := builder.BuildAggregationPlan(selectStmt, aggregations, strategy, dataSources) - - assert.Equal(t, "SELECT", plan.QueryType) - assert.Equal(t, "hybrid_fast_path", plan.ExecutionStrategy) - assert.Contains(t, plan.DataSources, "parquet_stats") - assert.Contains(t, plan.DataSources, "live_logs") - assert.Equal(t, 3, plan.PartitionsScanned) - assert.Equal(t, 2, plan.ParquetFilesScanned) - assert.Contains(t, plan.OptimizationsUsed, "parquet_statistics") - assert.Equal(t, []string{"COUNT(*)"}, plan.Aggregations) - assert.Equal(t, int64(50), plan.TotalRowsProcessed) // Only live logs scanned -} - -// Test Error Types -func TestErrorTypes(t *testing.T) { - t.Run("AggregationError", func(t *testing.T) { - err := AggregationError{ - Operation: "MAX", - Column: "id", - Cause: errors.New("column not found"), - } - - expected := "aggregation error in MAX(id): column not found" - assert.Equal(t, expected, err.Error()) - }) - - t.Run("DataSourceError", func(t *testing.T) { - err := DataSourceError{ - Source: "partition_discovery:test.topic1", - Cause: errors.New("network timeout"), - } - - expected := "data source error in partition_discovery:test.topic1: network timeout" - assert.Equal(t, expected, err.Error()) - }) - - t.Run("OptimizationError", func(t *testing.T) { - err := OptimizationError{ - Strategy: "fast_path_aggregation", - Reason: "unsupported function: AVG", - } - - expected := "optimization failed for fast_path_aggregation: unsupported function: AVG" - assert.Equal(t, expected, err.Error()) - }) -} - -// Integration Tests -func TestIntegration_FastPathOptimization(t *testing.T) { - engine := NewMockSQLEngine() - - // Setup components - optimizer := NewFastPathOptimizer(engine.SQLEngine) - computer := NewAggregationComputer(engine.SQLEngine) - - // Mock data setup - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - {Function: FuncMAX, Column: "id"}, - } - - // Step 1: Determine strategy - strategy := optimizer.DetermineStrategy(aggregations) - assert.True(t, strategy.CanUseFastPath) - - // Step 2: Mock data sources - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/topic1/partition-1": {{ - RowCount: 75, - ColumnStats: map[string]*ParquetColumnStats{ - "id": createMockColumnStats("id", int64(1), int64(100)), - }, - }}, - }, - ParquetRowCount: 75, - LiveLogRowCount: 25, - PartitionsCount: 1, - } - - partitions := []string{"/topics/test/topic1/partition-1"} - - // Step 3: Compute aggregations - ctx := context.Background() - results, err := computer.ComputeFastPathAggregations(ctx, aggregations, dataSources, partitions) - assert.NoError(t, err) - assert.Len(t, results, 2) - assert.Equal(t, int64(100), results[0].Count) // 75 + 25 - assert.Equal(t, int64(100), results[1].Max) // From parquet stats mock -} - -func TestIntegration_FallbackToFullScan(t *testing.T) { - engine := NewMockSQLEngine() - optimizer := NewFastPathOptimizer(engine.SQLEngine) - - // Unsupported aggregations - aggregations := []AggregationSpec{ - {Function: "AVG", Column: "value"}, // Not supported - } - - // Step 1: Strategy should reject fast path - strategy := optimizer.DetermineStrategy(aggregations) - assert.False(t, strategy.CanUseFastPath) - assert.Equal(t, "unsupported_aggregation_functions", strategy.Reason) - assert.NotEmpty(t, strategy.UnsupportedSpecs) -} - -// Benchmark Tests -func BenchmarkFastPathOptimizer_DetermineStrategy(b *testing.B) { - engine := NewMockSQLEngine() - optimizer := NewFastPathOptimizer(engine.SQLEngine) - - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - {Function: FuncMAX, Column: "id"}, - {Function: "MIN", Column: "value"}, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - strategy := optimizer.DetermineStrategy(aggregations) - _ = strategy.CanUseFastPath - } -} - -func BenchmarkAggregationComputer_ComputeFastPathAggregations(b *testing.B) { - engine := NewMockSQLEngine() - computer := NewAggregationComputer(engine.SQLEngine) - - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "partition-1": {{ - RowCount: 1000, - ColumnStats: map[string]*ParquetColumnStats{ - "id": createMockColumnStats("id", int64(1), int64(1000)), - }, - }}, - }, - ParquetRowCount: 1000, - LiveLogRowCount: 100, - } - - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*"}, - {Function: FuncMAX, Column: "id"}, - } - - partitions := []string{"partition-1"} - ctx := context.Background() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - results, err := computer.ComputeFastPathAggregations(ctx, aggregations, dataSources, partitions) - if err != nil { - b.Fatal(err) - } - _ = results - } -} - -// Tests for convertLogEntryToRecordValue - Protocol Buffer parsing bug fix -func TestSQLEngine_ConvertLogEntryToRecordValue_ValidProtobuf(t *testing.T) { - engine := NewTestSQLEngine() - - // Create a valid RecordValue protobuf with user data - originalRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 42}}, - "name": {Kind: &schema_pb.Value_StringValue{StringValue: "test-user"}}, - "score": {Kind: &schema_pb.Value_DoubleValue{DoubleValue: 95.5}}, - }, - } - - // Serialize the protobuf (this is what MQ actually stores) - protobufData, err := proto.Marshal(originalRecord) - assert.NoError(t, err) - - // Create a LogEntry with the serialized data - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, // 2021-01-01 00:00:00 UTC - PartitionKeyHash: 123, - Data: protobufData, // Protocol buffer data (not JSON!) - Key: []byte("test-key-001"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Verify no error - assert.NoError(t, err) - assert.Equal(t, "live_log", source) - assert.NotNil(t, result) - assert.NotNil(t, result.Fields) - - // Verify system columns are added correctly - assert.Contains(t, result.Fields, SW_COLUMN_NAME_TIMESTAMP) - assert.Contains(t, result.Fields, SW_COLUMN_NAME_KEY) - assert.Equal(t, int64(1609459200000000000), result.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value()) - assert.Equal(t, []byte("test-key-001"), result.Fields[SW_COLUMN_NAME_KEY].GetBytesValue()) - - // Verify user data is preserved - assert.Contains(t, result.Fields, "id") - assert.Contains(t, result.Fields, "name") - assert.Contains(t, result.Fields, "score") - assert.Equal(t, int32(42), result.Fields["id"].GetInt32Value()) - assert.Equal(t, "test-user", result.Fields["name"].GetStringValue()) - assert.Equal(t, 95.5, result.Fields["score"].GetDoubleValue()) -} - -func TestSQLEngine_ConvertLogEntryToRecordValue_InvalidProtobuf(t *testing.T) { - engine := NewTestSQLEngine() - - // Create LogEntry with invalid protobuf data (this would cause the original JSON parsing bug) - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, - PartitionKeyHash: 123, - Data: []byte{0x17, 0x00, 0xFF, 0xFE}, // Invalid protobuf data (starts with \x17 like in the original error) - Key: []byte("test-key"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Should return error for invalid protobuf - assert.Error(t, err) - assert.Contains(t, err.Error(), "failed to unmarshal log entry protobuf") - assert.Nil(t, result) - assert.Empty(t, source) -} - -func TestSQLEngine_ConvertLogEntryToRecordValue_EmptyProtobuf(t *testing.T) { - engine := NewTestSQLEngine() - - // Create a minimal valid RecordValue (empty fields) - emptyRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{}, - } - protobufData, err := proto.Marshal(emptyRecord) - assert.NoError(t, err) - - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, - PartitionKeyHash: 456, - Data: protobufData, - Key: []byte("empty-key"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Should succeed and add system columns - assert.NoError(t, err) - assert.Equal(t, "live_log", source) - assert.NotNil(t, result) - assert.NotNil(t, result.Fields) - - // Should have system columns - assert.Contains(t, result.Fields, SW_COLUMN_NAME_TIMESTAMP) - assert.Contains(t, result.Fields, SW_COLUMN_NAME_KEY) - assert.Equal(t, int64(1609459200000000000), result.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value()) - assert.Equal(t, []byte("empty-key"), result.Fields[SW_COLUMN_NAME_KEY].GetBytesValue()) - - // Should have no user fields - userFieldCount := 0 - for fieldName := range result.Fields { - if fieldName != SW_COLUMN_NAME_TIMESTAMP && fieldName != SW_COLUMN_NAME_KEY { - userFieldCount++ - } - } - assert.Equal(t, 0, userFieldCount) -} - -func TestSQLEngine_ConvertLogEntryToRecordValue_NilFieldsMap(t *testing.T) { - engine := NewTestSQLEngine() - - // Create RecordValue with nil Fields map (edge case) - recordWithNilFields := &schema_pb.RecordValue{ - Fields: nil, // This should be handled gracefully - } - protobufData, err := proto.Marshal(recordWithNilFields) - assert.NoError(t, err) - - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, - PartitionKeyHash: 789, - Data: protobufData, - Key: []byte("nil-fields-key"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Should succeed and create Fields map - assert.NoError(t, err) - assert.Equal(t, "live_log", source) - assert.NotNil(t, result) - assert.NotNil(t, result.Fields) // Should be created by the function - - // Should have system columns - assert.Contains(t, result.Fields, SW_COLUMN_NAME_TIMESTAMP) - assert.Contains(t, result.Fields, SW_COLUMN_NAME_KEY) - assert.Equal(t, int64(1609459200000000000), result.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value()) - assert.Equal(t, []byte("nil-fields-key"), result.Fields[SW_COLUMN_NAME_KEY].GetBytesValue()) -} - -func TestSQLEngine_ConvertLogEntryToRecordValue_SystemColumnOverride(t *testing.T) { - engine := NewTestSQLEngine() - - // Create RecordValue that already has system column names (should be overridden) - recordWithSystemCols := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "user_field": {Kind: &schema_pb.Value_StringValue{StringValue: "user-data"}}, - SW_COLUMN_NAME_TIMESTAMP: {Kind: &schema_pb.Value_Int64Value{Int64Value: 999999999}}, // Should be overridden - SW_COLUMN_NAME_KEY: {Kind: &schema_pb.Value_StringValue{StringValue: "old-key"}}, // Should be overridden - }, - } - protobufData, err := proto.Marshal(recordWithSystemCols) - assert.NoError(t, err) - - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, - PartitionKeyHash: 100, - Data: protobufData, - Key: []byte("actual-key"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Should succeed - assert.NoError(t, err) - assert.Equal(t, "live_log", source) - assert.NotNil(t, result) - - // System columns should use LogEntry values, not protobuf values - assert.Equal(t, int64(1609459200000000000), result.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value()) - assert.Equal(t, []byte("actual-key"), result.Fields[SW_COLUMN_NAME_KEY].GetBytesValue()) - - // User field should be preserved - assert.Contains(t, result.Fields, "user_field") - assert.Equal(t, "user-data", result.Fields["user_field"].GetStringValue()) -} - -func TestSQLEngine_ConvertLogEntryToRecordValue_ComplexDataTypes(t *testing.T) { - engine := NewTestSQLEngine() - - // Test with various data types - complexRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "int32_field": {Kind: &schema_pb.Value_Int32Value{Int32Value: -42}}, - "int64_field": {Kind: &schema_pb.Value_Int64Value{Int64Value: 9223372036854775807}}, - "float_field": {Kind: &schema_pb.Value_FloatValue{FloatValue: 3.14159}}, - "double_field": {Kind: &schema_pb.Value_DoubleValue{DoubleValue: 2.718281828}}, - "bool_field": {Kind: &schema_pb.Value_BoolValue{BoolValue: true}}, - "string_field": {Kind: &schema_pb.Value_StringValue{StringValue: "test string with unicode party"}}, - "bytes_field": {Kind: &schema_pb.Value_BytesValue{BytesValue: []byte{0x01, 0x02, 0x03}}}, - }, - } - protobufData, err := proto.Marshal(complexRecord) - assert.NoError(t, err) - - logEntry := &filer_pb.LogEntry{ - TsNs: 1609459200000000000, - PartitionKeyHash: 200, - Data: protobufData, - Key: []byte("complex-key"), - } - - // Test the conversion - result, source, err := engine.convertLogEntryToRecordValue(logEntry) - - // Should succeed - assert.NoError(t, err) - assert.Equal(t, "live_log", source) - assert.NotNil(t, result) - - // Verify all data types are preserved - assert.Equal(t, int32(-42), result.Fields["int32_field"].GetInt32Value()) - assert.Equal(t, int64(9223372036854775807), result.Fields["int64_field"].GetInt64Value()) - assert.Equal(t, float32(3.14159), result.Fields["float_field"].GetFloatValue()) - assert.Equal(t, 2.718281828, result.Fields["double_field"].GetDoubleValue()) - assert.Equal(t, true, result.Fields["bool_field"].GetBoolValue()) - assert.Equal(t, "test string with unicode party", result.Fields["string_field"].GetStringValue()) - assert.Equal(t, []byte{0x01, 0x02, 0x03}, result.Fields["bytes_field"].GetBytesValue()) - - // System columns should still be present - assert.Contains(t, result.Fields, SW_COLUMN_NAME_TIMESTAMP) - assert.Contains(t, result.Fields, SW_COLUMN_NAME_KEY) -} - -// Tests for log buffer deduplication functionality -func TestSQLEngine_GetLogBufferStartFromFile_BinaryFormat(t *testing.T) { - engine := NewTestSQLEngine() - - // Create sample buffer start (binary format) - bufferStartBytes := make([]byte, 8) - binary.BigEndian.PutUint64(bufferStartBytes, uint64(1609459100000000001)) - - // Create file entry with buffer start + some chunks - entry := &filer_pb.Entry{ - Name: "test-log-file", - Extended: map[string][]byte{ - "buffer_start": bufferStartBytes, - }, - Chunks: []*filer_pb.FileChunk{ - {FileId: "chunk1", Offset: 0, Size: 1000}, - {FileId: "chunk2", Offset: 1000, Size: 1000}, - {FileId: "chunk3", Offset: 2000, Size: 1000}, - }, - } - - // Test extraction - result, err := engine.getLogBufferStartFromFile(entry) - assert.NoError(t, err) - assert.NotNil(t, result) - assert.Equal(t, int64(1609459100000000001), result.StartIndex) - - // Test extraction works correctly with the binary format -} - -func TestSQLEngine_GetLogBufferStartFromFile_NoMetadata(t *testing.T) { - engine := NewTestSQLEngine() - - // Create file entry without buffer start - entry := &filer_pb.Entry{ - Name: "test-log-file", - Extended: nil, - } - - // Test extraction - result, err := engine.getLogBufferStartFromFile(entry) - assert.NoError(t, err) - assert.Nil(t, result) -} - -func TestSQLEngine_GetLogBufferStartFromFile_InvalidData(t *testing.T) { - engine := NewTestSQLEngine() - - // Create file entry with invalid buffer start (wrong size) - entry := &filer_pb.Entry{ - Name: "test-log-file", - Extended: map[string][]byte{ - "buffer_start": []byte("invalid-binary"), - }, - } - - // Test extraction - result, err := engine.getLogBufferStartFromFile(entry) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid buffer_start format: expected 8 bytes") - assert.Nil(t, result) -} - -func TestSQLEngine_BuildLogBufferDeduplicationMap_NoBrokerClient(t *testing.T) { - engine := NewTestSQLEngine() - engine.catalog.brokerClient = nil // Simulate no broker client - - ctx := context.Background() - result, err := engine.buildLogBufferDeduplicationMap(ctx, "/topics/test/test-topic") - - assert.NoError(t, err) - assert.NotNil(t, result) - assert.Empty(t, result) -} - -func TestSQLEngine_LogBufferDeduplication_ServerRestartScenario(t *testing.T) { - // Simulate scenario: Buffer indexes are now initialized with process start time - // This tests that buffer start indexes are globally unique across server restarts - - // Before server restart: Process 1 buffer start (3 chunks) - beforeRestartStart := LogBufferStart{ - StartIndex: 1609459100000000000, // Process 1 start time - } - - // After server restart: Process 2 buffer start (3 chunks) - afterRestartStart := LogBufferStart{ - StartIndex: 1609459300000000000, // Process 2 start time (DIFFERENT) - } - - // Simulate 3 chunks for each file - chunkCount := int64(3) - - // Calculate end indexes for range comparison - beforeEnd := beforeRestartStart.StartIndex + chunkCount - 1 // [start, start+2] - afterStart := afterRestartStart.StartIndex // [start, start+2] - - // Test range overlap detection (should NOT overlap) - overlaps := beforeRestartStart.StartIndex <= (afterStart+chunkCount-1) && beforeEnd >= afterStart - assert.False(t, overlaps, "Buffer ranges after restart should not overlap") - - // Verify the start indexes are globally unique - assert.NotEqual(t, beforeRestartStart.StartIndex, afterRestartStart.StartIndex, "Start indexes should be different") - assert.Less(t, beforeEnd, afterStart, "Ranges should be completely separate") - - // Expected values: - // Before restart: [1609459100000000000, 1609459100000000002] - // After restart: [1609459300000000000, 1609459300000000002] - expectedBeforeEnd := int64(1609459100000000002) - expectedAfterStart := int64(1609459300000000000) - - assert.Equal(t, expectedBeforeEnd, beforeEnd) - assert.Equal(t, expectedAfterStart, afterStart) - - // This demonstrates that buffer start indexes initialized with process start time - // prevent false positive duplicates across server restarts -} - -func TestBrokerClient_BinaryBufferStartFormat(t *testing.T) { - // Test scenario: getBufferStartFromEntry should only support binary format - // This tests the standardized binary format for buffer_start metadata - realBrokerClient := &BrokerClient{} - - // Test binary format (used by both log files and Parquet files) - binaryEntry := &filer_pb.Entry{ - Name: "2025-01-07-14-30-45", - IsDirectory: false, - Extended: map[string][]byte{ - "buffer_start": func() []byte { - // Binary format: 8-byte BigEndian - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(2000001)) - return buf - }(), - }, - } - - bufferStart := realBrokerClient.getBufferStartFromEntry(binaryEntry) - assert.NotNil(t, bufferStart) - assert.Equal(t, int64(2000001), bufferStart.StartIndex, "Should parse binary buffer_start metadata") - - // Test Parquet file (same binary format) - parquetEntry := &filer_pb.Entry{ - Name: "2025-01-07-14-30.parquet", - IsDirectory: false, - Extended: map[string][]byte{ - "buffer_start": func() []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(1500001)) - return buf - }(), - }, - } - - bufferStart = realBrokerClient.getBufferStartFromEntry(parquetEntry) - assert.NotNil(t, bufferStart) - assert.Equal(t, int64(1500001), bufferStart.StartIndex, "Should parse binary buffer_start from Parquet file") - - // Test missing metadata - emptyEntry := &filer_pb.Entry{ - Name: "no-metadata", - IsDirectory: false, - Extended: nil, - } - - bufferStart = realBrokerClient.getBufferStartFromEntry(emptyEntry) - assert.Nil(t, bufferStart, "Should return nil for entry without buffer_start metadata") - - // Test invalid format (wrong size) - invalidEntry := &filer_pb.Entry{ - Name: "invalid-metadata", - IsDirectory: false, - Extended: map[string][]byte{ - "buffer_start": []byte("invalid"), - }, - } - - bufferStart = realBrokerClient.getBufferStartFromEntry(invalidEntry) - assert.Nil(t, bufferStart, "Should return nil for invalid buffer_start metadata") -} - -// TestGetSQLValAlias tests the getSQLValAlias function, particularly for SQL injection prevention -func TestGetSQLValAlias(t *testing.T) { - engine := &SQLEngine{} - - tests := []struct { - name string - sqlVal *SQLVal - expected string - desc string - }{ - { - name: "simple string", - sqlVal: &SQLVal{ - Type: StrVal, - Val: []byte("hello"), - }, - expected: "'hello'", - desc: "Simple string should be wrapped in single quotes", - }, - { - name: "string with single quote", - sqlVal: &SQLVal{ - Type: StrVal, - Val: []byte("don't"), - }, - expected: "'don''t'", - desc: "String with single quote should have the quote escaped by doubling it", - }, - { - name: "string with multiple single quotes", - sqlVal: &SQLVal{ - Type: StrVal, - Val: []byte("'malicious'; DROP TABLE users; --"), - }, - expected: "'''malicious''; DROP TABLE users; --'", - desc: "String with SQL injection attempt should have all single quotes properly escaped", - }, - { - name: "empty string", - sqlVal: &SQLVal{ - Type: StrVal, - Val: []byte(""), - }, - expected: "''", - desc: "Empty string should result in empty quoted string", - }, - { - name: "integer value", - sqlVal: &SQLVal{ - Type: IntVal, - Val: []byte("123"), - }, - expected: "123", - desc: "Integer value should not be quoted", - }, - { - name: "float value", - sqlVal: &SQLVal{ - Type: FloatVal, - Val: []byte("123.45"), - }, - expected: "123.45", - desc: "Float value should not be quoted", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := engine.getSQLValAlias(tt.sqlVal) - assert.Equal(t, tt.expected, result, tt.desc) - }) - } -} diff --git a/weed/query/engine/errors.go b/weed/query/engine/errors.go deleted file mode 100644 index 6a297d92f..000000000 --- a/weed/query/engine/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -package engine - -import "fmt" - -// Error types for better error handling and testing - -// AggregationError represents errors that occur during aggregation computation -type AggregationError struct { - Operation string - Column string - Cause error -} - -func (e AggregationError) Error() string { - return fmt.Sprintf("aggregation error in %s(%s): %v", e.Operation, e.Column, e.Cause) -} - -// DataSourceError represents errors that occur when accessing data sources -type DataSourceError struct { - Source string - Cause error -} - -func (e DataSourceError) Error() string { - return fmt.Sprintf("data source error in %s: %v", e.Source, e.Cause) -} - -// OptimizationError represents errors that occur during query optimization -type OptimizationError struct { - Strategy string - Reason string -} - -func (e OptimizationError) Error() string { - return fmt.Sprintf("optimization failed for %s: %s", e.Strategy, e.Reason) -} - -// ParseError represents SQL parsing errors -type ParseError struct { - Query string - Message string - Cause error -} - -func (e ParseError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("SQL parse error: %s (%v)", e.Message, e.Cause) - } - return fmt.Sprintf("SQL parse error: %s", e.Message) -} - -// TableNotFoundError represents table/topic not found errors -type TableNotFoundError struct { - Database string - Table string -} - -func (e TableNotFoundError) Error() string { - if e.Database != "" { - return fmt.Sprintf("table %s.%s not found", e.Database, e.Table) - } - return fmt.Sprintf("table %s not found", e.Table) -} - -// ColumnNotFoundError represents column not found errors -type ColumnNotFoundError struct { - Table string - Column string -} - -func (e ColumnNotFoundError) Error() string { - if e.Table != "" { - return fmt.Sprintf("column %s not found in table %s", e.Column, e.Table) - } - return fmt.Sprintf("column %s not found", e.Column) -} - -// UnsupportedFeatureError represents unsupported SQL features -type UnsupportedFeatureError struct { - Feature string - Reason string -} - -func (e UnsupportedFeatureError) Error() string { - if e.Reason != "" { - return fmt.Sprintf("feature not supported: %s (%s)", e.Feature, e.Reason) - } - return fmt.Sprintf("feature not supported: %s", e.Feature) -} diff --git a/weed/query/engine/execution_plan_fast_path_test.go b/weed/query/engine/execution_plan_fast_path_test.go deleted file mode 100644 index c0f08fa21..000000000 --- a/weed/query/engine/execution_plan_fast_path_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestExecutionPlanFastPathDisplay tests that the execution plan correctly shows -// "Parquet Statistics (fast path)" when fast path is used, not "Parquet Files (full scan)" -func TestExecutionPlanFastPathDisplay(t *testing.T) { - engine := NewMockSQLEngine() - - // Create realistic data sources for fast path scenario - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/topic/partition-1": { - { - RowCount: 500, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1}}, - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 500}}, - NullCount: 0, - RowCount: 500, - }, - }, - }, - }, - }, - ParquetRowCount: 500, - LiveLogRowCount: 0, // Pure parquet scenario - ideal for fast path - PartitionsCount: 1, - } - - t.Run("Fast path execution plan shows correct data sources", func(t *testing.T) { - optimizer := NewFastPathOptimizer(engine.SQLEngine) - - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*", Alias: "COUNT(*)"}, - } - - // Test the strategy determination - strategy := optimizer.DetermineStrategy(aggregations) - assert.True(t, strategy.CanUseFastPath, "Strategy should allow fast path for COUNT(*)") - assert.Equal(t, "all_aggregations_supported", strategy.Reason) - - // Test data source list building - builder := &ExecutionPlanBuilder{} - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/topic/partition-1": { - {RowCount: 500}, - }, - }, - ParquetRowCount: 500, - LiveLogRowCount: 0, - PartitionsCount: 1, - } - - dataSourcesList := builder.buildDataSourcesList(strategy, dataSources) - - // When fast path is used, should show "parquet_stats" not "parquet_files" - assert.Contains(t, dataSourcesList, "parquet_stats", - "Data sources should contain 'parquet_stats' when fast path is used") - assert.NotContains(t, dataSourcesList, "parquet_files", - "Data sources should NOT contain 'parquet_files' when fast path is used") - - // Test that the formatting works correctly - formattedSource := engine.SQLEngine.formatDataSource("parquet_stats") - assert.Equal(t, "Parquet Statistics (fast path)", formattedSource, - "parquet_stats should format to 'Parquet Statistics (fast path)'") - - formattedFullScan := engine.SQLEngine.formatDataSource("parquet_files") - assert.Equal(t, "Parquet Files (full scan)", formattedFullScan, - "parquet_files should format to 'Parquet Files (full scan)'") - }) - - t.Run("Slow path execution plan shows full scan data sources", func(t *testing.T) { - builder := &ExecutionPlanBuilder{} - - // Create strategy that cannot use fast path - strategy := AggregationStrategy{ - CanUseFastPath: false, - Reason: "unsupported_aggregation_functions", - } - - dataSourcesList := builder.buildDataSourcesList(strategy, dataSources) - - // When slow path is used, should show "parquet_files" and "live_logs" - assert.Contains(t, dataSourcesList, "parquet_files", - "Slow path should contain 'parquet_files'") - assert.Contains(t, dataSourcesList, "live_logs", - "Slow path should contain 'live_logs'") - assert.NotContains(t, dataSourcesList, "parquet_stats", - "Slow path should NOT contain 'parquet_stats'") - }) - - t.Run("Data source formatting works correctly", func(t *testing.T) { - // Test just the data source formatting which is the key fix - - // Test parquet_stats formatting (fast path) - fastPathFormatted := engine.SQLEngine.formatDataSource("parquet_stats") - assert.Equal(t, "Parquet Statistics (fast path)", fastPathFormatted, - "parquet_stats should format to show fast path usage") - - // Test parquet_files formatting (slow path) - slowPathFormatted := engine.SQLEngine.formatDataSource("parquet_files") - assert.Equal(t, "Parquet Files (full scan)", slowPathFormatted, - "parquet_files should format to show full scan") - - // Test that data sources list is built correctly for fast path - builder := &ExecutionPlanBuilder{} - fastStrategy := AggregationStrategy{CanUseFastPath: true} - - fastSources := builder.buildDataSourcesList(fastStrategy, dataSources) - assert.Contains(t, fastSources, "parquet_stats", - "Fast path should include parquet_stats") - assert.NotContains(t, fastSources, "parquet_files", - "Fast path should NOT include parquet_files") - - // Test that data sources list is built correctly for slow path - slowStrategy := AggregationStrategy{CanUseFastPath: false} - - slowSources := builder.buildDataSourcesList(slowStrategy, dataSources) - assert.Contains(t, slowSources, "parquet_files", - "Slow path should include parquet_files") - assert.NotContains(t, slowSources, "parquet_stats", - "Slow path should NOT include parquet_stats") - }) -} diff --git a/weed/query/engine/fast_path_fix_test.go b/weed/query/engine/fast_path_fix_test.go deleted file mode 100644 index 3769e9215..000000000 --- a/weed/query/engine/fast_path_fix_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestFastPathCountFixRealistic tests the specific scenario mentioned in the bug report: -// Fast path returning 0 for COUNT(*) when slow path returns 1803 -func TestFastPathCountFixRealistic(t *testing.T) { - engine := NewMockSQLEngine() - - // Set up debug mode to see our new logging - ctx := context.WithValue(context.Background(), "debug", true) - - // Create realistic data sources that mimic a scenario with 1803 rows - dataSources := &TopicDataSources{ - ParquetFiles: map[string][]*ParquetFileStats{ - "/topics/test/large-topic/0000-1023": { - { - RowCount: 800, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1}}, - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 800}}, - NullCount: 0, - RowCount: 800, - }, - }, - }, - { - RowCount: 500, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 801}}, - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1300}}, - NullCount: 0, - RowCount: 500, - }, - }, - }, - }, - "/topics/test/large-topic/1024-2047": { - { - RowCount: 300, - ColumnStats: map[string]*ParquetColumnStats{ - "id": { - ColumnName: "id", - MinValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1301}}, - MaxValue: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 1600}}, - NullCount: 0, - RowCount: 300, - }, - }, - }, - }, - }, - ParquetRowCount: 1600, // 800 + 500 + 300 - LiveLogRowCount: 203, // Additional live log data - PartitionsCount: 2, - LiveLogFilesCount: 15, - } - - partitions := []string{ - "/topics/test/large-topic/0000-1023", - "/topics/test/large-topic/1024-2047", - } - - t.Run("COUNT(*) should return correct total (1803)", func(t *testing.T) { - computer := NewAggregationComputer(engine.SQLEngine) - - aggregations := []AggregationSpec{ - {Function: FuncCOUNT, Column: "*", Alias: "COUNT(*)"}, - } - - results, err := computer.ComputeFastPathAggregations(ctx, aggregations, dataSources, partitions) - - assert.NoError(t, err, "Fast path aggregation should not error") - assert.Len(t, results, 1, "Should return one result") - - // This is the key test - before our fix, this was returning 0 - expectedCount := int64(1803) // 1600 (parquet) + 203 (live log) - actualCount := results[0].Count - - assert.Equal(t, expectedCount, actualCount, - "COUNT(*) should return %d (1600 parquet + 203 live log), but got %d", - expectedCount, actualCount) - }) - - t.Run("MIN/MAX should work with multiple partitions", func(t *testing.T) { - computer := NewAggregationComputer(engine.SQLEngine) - - aggregations := []AggregationSpec{ - {Function: FuncMIN, Column: "id", Alias: "MIN(id)"}, - {Function: FuncMAX, Column: "id", Alias: "MAX(id)"}, - } - - results, err := computer.ComputeFastPathAggregations(ctx, aggregations, dataSources, partitions) - - assert.NoError(t, err, "Fast path aggregation should not error") - assert.Len(t, results, 2, "Should return two results") - - // MIN should be the lowest across all parquet files - assert.Equal(t, int64(1), results[0].Min, "MIN should be 1") - - // MAX should be the highest across all parquet files - assert.Equal(t, int64(1600), results[1].Max, "MAX should be 1600") - }) -} - -// TestFastPathDataSourceDiscoveryLogging tests that our debug logging works correctly -func TestFastPathDataSourceDiscoveryLogging(t *testing.T) { - // This test verifies that our enhanced data source collection structure is correct - - t.Run("DataSources structure validation", func(t *testing.T) { - // Test the TopicDataSources structure initialization - dataSources := &TopicDataSources{ - ParquetFiles: make(map[string][]*ParquetFileStats), - ParquetRowCount: 0, - LiveLogRowCount: 0, - LiveLogFilesCount: 0, - PartitionsCount: 0, - } - - assert.NotNil(t, dataSources, "Data sources should not be nil") - assert.NotNil(t, dataSources.ParquetFiles, "ParquetFiles map should be initialized") - assert.GreaterOrEqual(t, dataSources.PartitionsCount, 0, "PartitionsCount should be non-negative") - assert.GreaterOrEqual(t, dataSources.ParquetRowCount, int64(0), "ParquetRowCount should be non-negative") - assert.GreaterOrEqual(t, dataSources.LiveLogRowCount, int64(0), "LiveLogRowCount should be non-negative") - }) -} - -// TestFastPathValidationLogic tests the enhanced validation we added -func TestFastPathValidationLogic(t *testing.T) { - t.Run("Validation catches data source vs computation mismatch", func(t *testing.T) { - // Create a scenario where data sources and computation might be inconsistent - dataSources := &TopicDataSources{ - ParquetFiles: make(map[string][]*ParquetFileStats), - ParquetRowCount: 1000, // Data sources say 1000 rows - LiveLogRowCount: 0, - PartitionsCount: 1, - } - - // But aggregation result says different count (simulating the original bug) - aggResults := []AggregationResult{ - {Count: 0}, // Bug: returns 0 when data sources show 1000 - } - - // This simulates the validation logic from tryFastParquetAggregation - totalRows := dataSources.ParquetRowCount + dataSources.LiveLogRowCount - countResult := aggResults[0].Count - - // Our validation should catch this mismatch - assert.NotEqual(t, totalRows, countResult, - "This test simulates the bug: data sources show %d but COUNT returns %d", - totalRows, countResult) - - // In the real code, this would trigger a fallback to slow path - validationPassed := (countResult == totalRows) - assert.False(t, validationPassed, "Validation should fail for inconsistent data") - }) - - t.Run("Validation passes for consistent data", func(t *testing.T) { - // Create a scenario where everything is consistent - dataSources := &TopicDataSources{ - ParquetFiles: make(map[string][]*ParquetFileStats), - ParquetRowCount: 1000, - LiveLogRowCount: 803, - PartitionsCount: 1, - } - - // Aggregation result matches data sources - aggResults := []AggregationResult{ - {Count: 1803}, // Correct: matches 1000 + 803 - } - - totalRows := dataSources.ParquetRowCount + dataSources.LiveLogRowCount - countResult := aggResults[0].Count - - // Our validation should pass this - assert.Equal(t, totalRows, countResult, - "Validation should pass when data sources (%d) match COUNT result (%d)", - totalRows, countResult) - - validationPassed := (countResult == totalRows) - assert.True(t, validationPassed, "Validation should pass for consistent data") - }) -} diff --git a/weed/query/engine/fast_path_predicate_validation_test.go b/weed/query/engine/fast_path_predicate_validation_test.go deleted file mode 100644 index 3918fdbf0..000000000 --- a/weed/query/engine/fast_path_predicate_validation_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package engine - -import ( - "testing" -) - -// TestFastPathPredicateValidation tests the critical fix for fast-path aggregation -// to ensure non-time predicates are properly detected and fast-path is blocked -func TestFastPathPredicateValidation(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - whereClause string - expectedTimeOnly bool - expectedStartTimeNs int64 - expectedStopTimeNs int64 - description string - }{ - { - name: "No WHERE clause", - whereClause: "", - expectedTimeOnly: true, // No WHERE means time-only is true - description: "Queries without WHERE clause should allow fast path", - }, - { - name: "Time-only predicate (greater than)", - whereClause: "_ts > 1640995200000000000", - expectedTimeOnly: true, - expectedStartTimeNs: 1640995200000000000, - expectedStopTimeNs: 0, - description: "Pure time predicates should allow fast path", - }, - { - name: "Time-only predicate (less than)", - whereClause: "_ts < 1640995200000000000", - expectedTimeOnly: true, - expectedStartTimeNs: 0, - expectedStopTimeNs: 1640995200000000000, - description: "Pure time predicates should allow fast path", - }, - { - name: "Time-only predicate (range with AND)", - whereClause: "_ts > 1640995200000000000 AND _ts < 1641081600000000000", - expectedTimeOnly: true, - expectedStartTimeNs: 1640995200000000000, - expectedStopTimeNs: 1641081600000000000, - description: "Time range predicates should allow fast path", - }, - { - name: "Mixed predicate (time + non-time)", - whereClause: "_ts > 1640995200000000000 AND user_id = 'user123'", - expectedTimeOnly: false, - description: "CRITICAL: Mixed predicates must block fast path to prevent incorrect results", - }, - { - name: "Non-time predicate only", - whereClause: "user_id = 'user123'", - expectedTimeOnly: false, - description: "Non-time predicates must block fast path", - }, - { - name: "Multiple non-time predicates", - whereClause: "user_id = 'user123' AND status = 'active'", - expectedTimeOnly: false, - description: "Multiple non-time predicates must block fast path", - }, - { - name: "OR with time predicate (unsafe)", - whereClause: "_ts > 1640995200000000000 OR user_id = 'user123'", - expectedTimeOnly: false, - description: "OR expressions are complex and must block fast path", - }, - { - name: "OR with only time predicates (still unsafe)", - whereClause: "_ts > 1640995200000000000 OR _ts < 1640908800000000000", - expectedTimeOnly: false, - description: "Even time-only OR expressions must block fast path due to complexity", - }, - // Note: Parenthesized expressions are not supported by the current parser - // These test cases are commented out until parser support is added - { - name: "String column comparison", - whereClause: "event_type = 'click'", - expectedTimeOnly: false, - description: "String column comparisons must block fast path", - }, - { - name: "Numeric column comparison", - whereClause: "id > 1000", - expectedTimeOnly: false, - description: "Numeric column comparisons must block fast path", - }, - { - name: "Internal timestamp column", - whereClause: "_ts_ns > 1640995200000000000", - expectedTimeOnly: true, - expectedStartTimeNs: 1640995200000000000, - description: "Internal timestamp column should allow fast path", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Parse the WHERE clause if present - var whereExpr ExprNode - if tc.whereClause != "" { - sql := "SELECT COUNT(*) FROM test WHERE " + tc.whereClause - stmt, err := ParseSQL(sql) - if err != nil { - t.Fatalf("Failed to parse SQL: %v", err) - } - selectStmt := stmt.(*SelectStatement) - whereExpr = selectStmt.Where.Expr - } - - // Test the validation function - var startTimeNs, stopTimeNs int64 - var onlyTimePredicates bool - - if whereExpr == nil { - // No WHERE clause case - onlyTimePredicates = true - } else { - startTimeNs, stopTimeNs, onlyTimePredicates = engine.SQLEngine.extractTimeFiltersWithValidation(whereExpr) - } - - // Verify the results - if onlyTimePredicates != tc.expectedTimeOnly { - t.Errorf("Expected onlyTimePredicates=%v, got %v. %s", - tc.expectedTimeOnly, onlyTimePredicates, tc.description) - } - - // Check time filters if expected - if tc.expectedStartTimeNs != 0 && startTimeNs != tc.expectedStartTimeNs { - t.Errorf("Expected startTimeNs=%d, got %d", tc.expectedStartTimeNs, startTimeNs) - } - if tc.expectedStopTimeNs != 0 && stopTimeNs != tc.expectedStopTimeNs { - t.Errorf("Expected stopTimeNs=%d, got %d", tc.expectedStopTimeNs, stopTimeNs) - } - - t.Logf("%s: onlyTimePredicates=%v, startTimeNs=%d, stopTimeNs=%d", - tc.name, onlyTimePredicates, startTimeNs, stopTimeNs) - }) - } -} - -// TestFastPathAggregationSafety tests that fast-path aggregation is only attempted -// when it's safe to do so (no non-time predicates) -func TestFastPathAggregationSafety(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - shouldUseFastPath bool - description string - }{ - { - name: "No WHERE - should use fast path", - sql: "SELECT COUNT(*) FROM test", - shouldUseFastPath: true, - description: "Queries without WHERE should use fast path", - }, - { - name: "Time-only WHERE - should use fast path", - sql: "SELECT COUNT(*) FROM test WHERE _ts > 1640995200000000000", - shouldUseFastPath: true, - description: "Time-only predicates should use fast path", - }, - { - name: "Mixed WHERE - should NOT use fast path", - sql: "SELECT COUNT(*) FROM test WHERE _ts > 1640995200000000000 AND user_id = 'user123'", - shouldUseFastPath: false, - description: "CRITICAL: Mixed predicates must NOT use fast path to prevent wrong results", - }, - { - name: "Non-time WHERE - should NOT use fast path", - sql: "SELECT COUNT(*) FROM test WHERE user_id = 'user123'", - shouldUseFastPath: false, - description: "Non-time predicates must NOT use fast path", - }, - { - name: "OR expression - should NOT use fast path", - sql: "SELECT COUNT(*) FROM test WHERE _ts > 1640995200000000000 OR user_id = 'user123'", - shouldUseFastPath: false, - description: "OR expressions must NOT use fast path due to complexity", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Parse the SQL - stmt, err := ParseSQL(tc.sql) - if err != nil { - t.Fatalf("Failed to parse SQL: %v", err) - } - selectStmt := stmt.(*SelectStatement) - - // Test the fast path decision logic - startTimeNs, stopTimeNs := int64(0), int64(0) - onlyTimePredicates := true - if selectStmt.Where != nil { - startTimeNs, stopTimeNs, onlyTimePredicates = engine.SQLEngine.extractTimeFiltersWithValidation(selectStmt.Where.Expr) - } - - canAttemptFastPath := selectStmt.Where == nil || onlyTimePredicates - - // Verify the decision - if canAttemptFastPath != tc.shouldUseFastPath { - t.Errorf("Expected canAttemptFastPath=%v, got %v. %s", - tc.shouldUseFastPath, canAttemptFastPath, tc.description) - } - - t.Logf("%s: canAttemptFastPath=%v (onlyTimePredicates=%v, startTimeNs=%d, stopTimeNs=%d)", - tc.name, canAttemptFastPath, onlyTimePredicates, startTimeNs, stopTimeNs) - }) - } -} - -// TestTimestampColumnDetection tests that the engine correctly identifies timestamp columns -func TestTimestampColumnDetection(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - columnName string - isTimestamp bool - description string - }{ - { - columnName: "_ts", - isTimestamp: true, - description: "System timestamp display column should be detected", - }, - { - columnName: "_ts_ns", - isTimestamp: true, - description: "Internal timestamp column should be detected", - }, - { - columnName: "user_id", - isTimestamp: false, - description: "Non-timestamp column should not be detected as timestamp", - }, - { - columnName: "id", - isTimestamp: false, - description: "ID column should not be detected as timestamp", - }, - { - columnName: "status", - isTimestamp: false, - description: "Status column should not be detected as timestamp", - }, - { - columnName: "event_type", - isTimestamp: false, - description: "Event type column should not be detected as timestamp", - }, - } - - for _, tc := range testCases { - t.Run(tc.columnName, func(t *testing.T) { - isTimestamp := engine.SQLEngine.isTimestampColumn(tc.columnName) - if isTimestamp != tc.isTimestamp { - t.Errorf("Expected isTimestampColumn(%s)=%v, got %v. %s", - tc.columnName, tc.isTimestamp, isTimestamp, tc.description) - } - t.Logf("Column '%s': isTimestamp=%v", tc.columnName, isTimestamp) - }) - } -} diff --git a/weed/query/engine/function_helpers.go b/weed/query/engine/function_helpers.go deleted file mode 100644 index 60eccdd37..000000000 --- a/weed/query/engine/function_helpers.go +++ /dev/null @@ -1,131 +0,0 @@ -package engine - -import ( - "fmt" - "strconv" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// Helper function to convert schema_pb.Value to float64 -func (e *SQLEngine) valueToFloat64(value *schema_pb.Value) (float64, error) { - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return float64(v.Int32Value), nil - case *schema_pb.Value_Int64Value: - return float64(v.Int64Value), nil - case *schema_pb.Value_FloatValue: - return float64(v.FloatValue), nil - case *schema_pb.Value_DoubleValue: - return v.DoubleValue, nil - case *schema_pb.Value_StringValue: - // Try to parse string as number - if f, err := strconv.ParseFloat(v.StringValue, 64); err == nil { - return f, nil - } - return 0, fmt.Errorf("cannot convert string '%s' to number", v.StringValue) - case *schema_pb.Value_BoolValue: - if v.BoolValue { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("cannot convert value type to number") - } -} - -// Helper function to check if a value is an integer type -func (e *SQLEngine) isIntegerValue(value *schema_pb.Value) bool { - switch value.Kind.(type) { - case *schema_pb.Value_Int32Value, *schema_pb.Value_Int64Value: - return true - default: - return false - } -} - -// Helper function to convert schema_pb.Value to string -func (e *SQLEngine) valueToString(value *schema_pb.Value) (string, error) { - switch v := value.Kind.(type) { - case *schema_pb.Value_StringValue: - return v.StringValue, nil - case *schema_pb.Value_Int32Value: - return strconv.FormatInt(int64(v.Int32Value), 10), nil - case *schema_pb.Value_Int64Value: - return strconv.FormatInt(v.Int64Value, 10), nil - case *schema_pb.Value_FloatValue: - return strconv.FormatFloat(float64(v.FloatValue), 'g', -1, 32), nil - case *schema_pb.Value_DoubleValue: - return strconv.FormatFloat(v.DoubleValue, 'g', -1, 64), nil - case *schema_pb.Value_BoolValue: - if v.BoolValue { - return "true", nil - } - return "false", nil - case *schema_pb.Value_BytesValue: - return string(v.BytesValue), nil - default: - return "", fmt.Errorf("cannot convert value type to string") - } -} - -// Helper function to convert schema_pb.Value to int64 -func (e *SQLEngine) valueToInt64(value *schema_pb.Value) (int64, error) { - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return int64(v.Int32Value), nil - case *schema_pb.Value_Int64Value: - return v.Int64Value, nil - case *schema_pb.Value_FloatValue: - return int64(v.FloatValue), nil - case *schema_pb.Value_DoubleValue: - return int64(v.DoubleValue), nil - case *schema_pb.Value_StringValue: - if i, err := strconv.ParseInt(v.StringValue, 10, 64); err == nil { - return i, nil - } - return 0, fmt.Errorf("cannot convert string '%s' to integer", v.StringValue) - default: - return 0, fmt.Errorf("cannot convert value type to integer") - } -} - -// Helper function to convert schema_pb.Value to time.Time -func (e *SQLEngine) valueToTime(value *schema_pb.Value) (time.Time, error) { - switch v := value.Kind.(type) { - case *schema_pb.Value_TimestampValue: - if v.TimestampValue == nil { - return time.Time{}, fmt.Errorf("null timestamp value") - } - return time.UnixMicro(v.TimestampValue.TimestampMicros), nil - case *schema_pb.Value_StringValue: - // Try to parse various date/time string formats - dateFormats := []struct { - format string - useLocal bool - }{ - {"2006-01-02 15:04:05", true}, // Local time assumed for non-timezone formats - {"2006-01-02T15:04:05Z", false}, // UTC format - {"2006-01-02T15:04:05", true}, // Local time assumed - {"2006-01-02", true}, // Local time assumed for date only - {"15:04:05", true}, // Local time assumed for time only - } - - for _, formatSpec := range dateFormats { - if t, err := time.Parse(formatSpec.format, v.StringValue); err == nil { - if formatSpec.useLocal { - // Convert to UTC for consistency if no timezone was specified - return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC), nil - } - return t, nil - } - } - return time.Time{}, fmt.Errorf("unable to parse date/time string: %s", v.StringValue) - case *schema_pb.Value_Int64Value: - // Assume Unix timestamp (seconds) - return time.Unix(v.Int64Value, 0), nil - default: - return time.Time{}, fmt.Errorf("cannot convert value type to date/time") - } -} diff --git a/weed/query/engine/hybrid_message_scanner.go b/weed/query/engine/hybrid_message_scanner.go deleted file mode 100644 index c09ce2f54..000000000 --- a/weed/query/engine/hybrid_message_scanner.go +++ /dev/null @@ -1,1905 +0,0 @@ -package engine - -import ( - "container/heap" - "context" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/mq" - "github.com/seaweedfs/seaweedfs/weed/mq/logstore" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" - "github.com/seaweedfs/seaweedfs/weed/wdclient" - "google.golang.org/protobuf/proto" -) - -// HybridMessageScanner scans from ALL data sources: -// Architecture: -// 1. Unflushed in-memory data from brokers (mq_pb.DataMessage format) - REAL-TIME -// 2. Recent/live messages in log files (filer_pb.LogEntry format) - FLUSHED -// 3. Older messages in Parquet files (schema_pb.RecordValue format) - ARCHIVED -// 4. Seamlessly merges data from all sources chronologically -// 5. Provides complete real-time view of all messages in a topic -type HybridMessageScanner struct { - filerClient filer_pb.FilerClient - brokerClient BrokerClientInterface // For querying unflushed data - topic topic.Topic - recordSchema *schema_pb.RecordType - schemaFormat string // Serialization format: "AVRO", "PROTOBUF", "JSON_SCHEMA", or empty for schemaless - parquetLevels *schema.ParquetLevels - engine *SQLEngine // Reference for system column formatting -} - -// NewHybridMessageScanner creates a scanner that reads from all data sources -// This provides complete real-time message coverage including unflushed data -func NewHybridMessageScanner(filerClient filer_pb.FilerClient, brokerClient BrokerClientInterface, namespace, topicName string, engine *SQLEngine) (*HybridMessageScanner, error) { - // Check if filerClient is available - if filerClient == nil { - return nil, fmt.Errorf("filerClient is required but not available") - } - - // Create topic reference - t := topic.Topic{ - Namespace: namespace, - Name: topicName, - } - - // Get flat schema from broker client - recordType, _, schemaFormat, err := brokerClient.GetTopicSchema(context.Background(), namespace, topicName) - if err != nil { - return nil, fmt.Errorf("failed to get topic record type: %v", err) - } - - if recordType == nil || len(recordType.Fields) == 0 { - // For topics without schema, create a minimal schema with system fields and _value - recordType = schema.RecordTypeBegin(). - WithField(SW_COLUMN_NAME_TIMESTAMP, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - WithField(SW_COLUMN_NAME_VALUE, schema.TypeBytes). // Raw message value - RecordTypeEnd() - } else { - // Create a copy of the recordType to avoid modifying the original - recordTypeCopy := &schema_pb.RecordType{ - Fields: make([]*schema_pb.Field, len(recordType.Fields)), - } - copy(recordTypeCopy.Fields, recordType.Fields) - - // Add system columns that MQ adds to all records - recordType = schema.NewRecordTypeBuilder(recordTypeCopy). - WithField(SW_COLUMN_NAME_TIMESTAMP, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - RecordTypeEnd() - } - - // Convert to Parquet levels for efficient reading - parquetLevels, err := schema.ToParquetLevels(recordType) - if err != nil { - return nil, fmt.Errorf("failed to create Parquet levels: %v", err) - } - - return &HybridMessageScanner{ - filerClient: filerClient, - brokerClient: brokerClient, - topic: t, - recordSchema: recordType, - schemaFormat: schemaFormat, - parquetLevels: parquetLevels, - engine: engine, - }, nil -} - -// HybridScanOptions configure how the scanner reads from both live and archived data -type HybridScanOptions struct { - // Time range filtering (Unix nanoseconds) - StartTimeNs int64 - StopTimeNs int64 - - // Column projection - if empty, select all columns - Columns []string - - // Row limit - 0 means no limit - Limit int - - // Row offset - 0 means no offset - Offset int - - // Predicate for WHERE clause filtering - Predicate func(*schema_pb.RecordValue) bool -} - -// HybridScanResult represents a message from either live logs or Parquet files -type HybridScanResult struct { - Values map[string]*schema_pb.Value // Column name -> value - Timestamp int64 // Message timestamp (_ts_ns) - Key []byte // Message key (_key) - Source string // "live_log" or "parquet_archive" or "in_memory_broker" -} - -// HybridScanStats contains statistics about data sources scanned -type HybridScanStats struct { - BrokerBufferQueried bool - BrokerBufferMessages int - BufferStartIndex int64 - PartitionsScanned int - LiveLogFilesScanned int // Number of live log files processed -} - -// ParquetColumnStats holds statistics for a single column from parquet metadata -type ParquetColumnStats struct { - ColumnName string - MinValue *schema_pb.Value - MaxValue *schema_pb.Value - NullCount int64 - RowCount int64 -} - -// ParquetFileStats holds aggregated statistics for a parquet file -type ParquetFileStats struct { - FileName string - RowCount int64 - ColumnStats map[string]*ParquetColumnStats - // Optional file-level timestamp range from filer extended attributes - MinTimestampNs int64 - MaxTimestampNs int64 -} - -// getTimestampRangeFromStats returns (minTsNs, maxTsNs, ok) by inspecting common timestamp columns -func (h *HybridMessageScanner) getTimestampRangeFromStats(fileStats *ParquetFileStats) (int64, int64, bool) { - if fileStats == nil { - return 0, 0, false - } - // Prefer column stats for _ts_ns if present - if len(fileStats.ColumnStats) > 0 { - if s, ok := fileStats.ColumnStats[logstore.SW_COLUMN_NAME_TS]; ok && s != nil && s.MinValue != nil && s.MaxValue != nil { - if minNs, okMin := h.schemaValueToNs(s.MinValue); okMin { - if maxNs, okMax := h.schemaValueToNs(s.MaxValue); okMax { - return minNs, maxNs, true - } - } - } - } - // Fallback to file-level range if present in filer extended metadata - if fileStats.MinTimestampNs != 0 || fileStats.MaxTimestampNs != 0 { - return fileStats.MinTimestampNs, fileStats.MaxTimestampNs, true - } - return 0, 0, false -} - -// schemaValueToNs converts a schema_pb.Value that represents a timestamp to ns -func (h *HybridMessageScanner) schemaValueToNs(v *schema_pb.Value) (int64, bool) { - if v == nil { - return 0, false - } - switch k := v.Kind.(type) { - case *schema_pb.Value_Int64Value: - return k.Int64Value, true - case *schema_pb.Value_Int32Value: - return int64(k.Int32Value), true - default: - return 0, false - } -} - -// StreamingDataSource provides a streaming interface for reading scan results -type StreamingDataSource interface { - Next() (*HybridScanResult, error) // Returns next result or nil when done - HasMore() bool // Returns true if more data available - Close() error // Clean up resources -} - -// StreamingMergeItem represents an item in the priority queue for streaming merge -type StreamingMergeItem struct { - Result *HybridScanResult - SourceID int - DataSource StreamingDataSource -} - -// StreamingMergeHeap implements heap.Interface for merging sorted streams by timestamp -type StreamingMergeHeap []*StreamingMergeItem - -func (h StreamingMergeHeap) Len() int { return len(h) } - -func (h StreamingMergeHeap) Less(i, j int) bool { - // Sort by timestamp (ascending order) - return h[i].Result.Timestamp < h[j].Result.Timestamp -} - -func (h StreamingMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } - -func (h *StreamingMergeHeap) Push(x interface{}) { - *h = append(*h, x.(*StreamingMergeItem)) -} - -func (h *StreamingMergeHeap) Pop() interface{} { - old := *h - n := len(old) - item := old[n-1] - *h = old[0 : n-1] - return item -} - -// Scan reads messages from both live logs and archived Parquet files -// Uses SeaweedFS MQ's GenMergedReadFunc for seamless integration -// Assumptions: -// 1. Chronologically merges live and archived data -// 2. Applies filtering at the lowest level for efficiency -// 3. Handles schema evolution transparently -func (hms *HybridMessageScanner) Scan(ctx context.Context, options HybridScanOptions) ([]HybridScanResult, error) { - results, _, err := hms.ScanWithStats(ctx, options) - return results, err -} - -// ScanWithStats reads messages and returns scan statistics for execution plans -func (hms *HybridMessageScanner) ScanWithStats(ctx context.Context, options HybridScanOptions) ([]HybridScanResult, *HybridScanStats, error) { - var results []HybridScanResult - stats := &HybridScanStats{} - - // Get all partitions for this topic via MQ broker discovery - partitions, err := hms.discoverTopicPartitions(ctx) - if err != nil { - return nil, stats, fmt.Errorf("failed to discover partitions for topic %s: %v", hms.topic.String(), err) - } - - stats.PartitionsScanned = len(partitions) - - for _, partition := range partitions { - partitionResults, partitionStats, err := hms.scanPartitionHybridWithStats(ctx, partition, options) - if err != nil { - return nil, stats, fmt.Errorf("failed to scan partition %v: %v", partition, err) - } - - results = append(results, partitionResults...) - - // Aggregate broker buffer stats - if partitionStats != nil { - if partitionStats.BrokerBufferQueried { - stats.BrokerBufferQueried = true - } - stats.BrokerBufferMessages += partitionStats.BrokerBufferMessages - if partitionStats.BufferStartIndex > 0 && (stats.BufferStartIndex == 0 || partitionStats.BufferStartIndex < stats.BufferStartIndex) { - stats.BufferStartIndex = partitionStats.BufferStartIndex - } - } - - // Apply global limit (without offset) across all partitions - // When OFFSET is used, collect more data to ensure we have enough after skipping - // Note: OFFSET will be applied at the end to avoid double-application - if options.Limit > 0 { - // Collect exact amount needed: LIMIT + OFFSET (no excessive doubling) - minRequired := options.Limit + options.Offset - // Small buffer only when needed to handle edge cases in distributed scanning - if options.Offset > 0 && minRequired < 10 { - minRequired = minRequired + 1 // Add 1 extra row buffer, not doubling - } - if len(results) >= minRequired { - break - } - } - } - - // Apply final OFFSET and LIMIT processing (done once at the end) - // Limit semantics: -1 = no limit, 0 = LIMIT 0 (empty), >0 = limit to N rows - if options.Offset > 0 || options.Limit >= 0 { - // Handle LIMIT 0 special case first - if options.Limit == 0 { - return []HybridScanResult{}, stats, nil - } - - // Apply OFFSET first - if options.Offset > 0 { - if options.Offset >= len(results) { - results = []HybridScanResult{} - } else { - results = results[options.Offset:] - } - } - - // Apply LIMIT after OFFSET (only if limit > 0) - if options.Limit > 0 && len(results) > options.Limit { - results = results[:options.Limit] - } - } - - return results, stats, nil -} - -// scanUnflushedData queries brokers for unflushed in-memory data using buffer_start deduplication -func (hms *HybridMessageScanner) scanUnflushedData(ctx context.Context, partition topic.Partition, options HybridScanOptions) ([]HybridScanResult, error) { - results, _, err := hms.scanUnflushedDataWithStats(ctx, partition, options) - return results, err -} - -// scanUnflushedDataWithStats queries brokers for unflushed data and returns statistics -func (hms *HybridMessageScanner) scanUnflushedDataWithStats(ctx context.Context, partition topic.Partition, options HybridScanOptions) ([]HybridScanResult, *HybridScanStats, error) { - var results []HybridScanResult - stats := &HybridScanStats{} - - // Skip if no broker client available - if hms.brokerClient == nil { - return results, stats, nil - } - - // Mark that we attempted to query broker buffer - stats.BrokerBufferQueried = true - - // Step 1: Get unflushed data from broker using buffer_start-based method - // This method uses buffer_start metadata to avoid double-counting with exact precision - unflushedEntries, err := hms.brokerClient.GetUnflushedMessages(ctx, hms.topic.Namespace, hms.topic.Name, partition, options.StartTimeNs) - if err != nil { - // Log error but don't fail the query - continue with disk data only - // Reset queried flag on error - stats.BrokerBufferQueried = false - return results, stats, nil - } - - // Capture stats for EXPLAIN - stats.BrokerBufferMessages = len(unflushedEntries) - - // Step 2: Process unflushed entries (already deduplicated by broker) - for _, logEntry := range unflushedEntries { - // Pre-decode DataMessage for reuse in both control check and conversion - var dataMessage *mq_pb.DataMessage - if len(logEntry.Data) > 0 { - dataMessage = &mq_pb.DataMessage{} - if err := proto.Unmarshal(logEntry.Data, dataMessage); err != nil { - dataMessage = nil // Failed to decode, treat as raw data - } - } - - // Skip control entries without actual data - if hms.isControlEntryWithDecoded(logEntry, dataMessage) { - continue // Skip this entry - } - - // Skip messages outside time range - if options.StartTimeNs > 0 && logEntry.TsNs < options.StartTimeNs { - continue - } - if options.StopTimeNs > 0 && logEntry.TsNs > options.StopTimeNs { - continue - } - - // Convert LogEntry to RecordValue format (same as disk data) - recordValue, _, err := hms.convertLogEntryToRecordValueWithDecoded(logEntry, dataMessage) - if err != nil { - continue // Skip malformed messages - } - - // Apply predicate filter if provided - if options.Predicate != nil && !options.Predicate(recordValue) { - continue - } - - // Extract system columns for result - timestamp := recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value() - key := recordValue.Fields[SW_COLUMN_NAME_KEY].GetBytesValue() - - // Apply column projection - values := make(map[string]*schema_pb.Value) - if len(options.Columns) == 0 { - // Select all columns (excluding system columns from user view) - for name, value := range recordValue.Fields { - if name != SW_COLUMN_NAME_TIMESTAMP && name != SW_COLUMN_NAME_KEY { - values[name] = value - } - } - } else { - // Select specified columns only - for _, columnName := range options.Columns { - if value, exists := recordValue.Fields[columnName]; exists { - values[columnName] = value - } - } - } - - // Create result with proper source tagging - result := HybridScanResult{ - Values: values, - Timestamp: timestamp, - Key: key, - Source: "live_log", // Data from broker's unflushed messages - } - - results = append(results, result) - - // Apply limit (accounting for offset) - collect exact amount needed - if options.Limit > 0 { - // Collect exact amount needed: LIMIT + OFFSET (no excessive doubling) - minRequired := options.Limit + options.Offset - // Small buffer only when needed to handle edge cases in message streaming - if options.Offset > 0 && minRequired < 10 { - minRequired = minRequired + 1 // Add 1 extra row buffer, not doubling - } - if len(results) >= minRequired { - break - } - } - } - - return results, stats, nil -} - -// convertDataMessageToRecord converts mq_pb.DataMessage to schema_pb.RecordValue -func (hms *HybridMessageScanner) convertDataMessageToRecord(msg *mq_pb.DataMessage) (*schema_pb.RecordValue, string, error) { - // Parse the message data as RecordValue - recordValue := &schema_pb.RecordValue{} - if err := proto.Unmarshal(msg.Value, recordValue); err != nil { - return nil, "", fmt.Errorf("failed to unmarshal message data: %v", err) - } - - // Add system columns - if recordValue.Fields == nil { - recordValue.Fields = make(map[string]*schema_pb.Value) - } - - // Add timestamp - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: msg.TsNs}, - } - - return recordValue, string(msg.Key), nil -} - -// discoverTopicPartitions discovers the actual partitions for this topic by scanning the filesystem -// This finds real partition directories like v2025-09-01-07-16-34/0000-0630/ -func (hms *HybridMessageScanner) discoverTopicPartitions(ctx context.Context) ([]topic.Partition, error) { - if hms.filerClient == nil { - return nil, fmt.Errorf("filerClient not available for partition discovery") - } - - var allPartitions []topic.Partition - var err error - - // Scan the topic directory for actual partition versions (timestamped directories) - // List all version directories in the topic directory - err = filer_pb.ReadDirAllEntries(ctx, hms.filerClient, util.FullPath(hms.topic.Dir()), "", func(versionEntry *filer_pb.Entry, isLast bool) error { - if !versionEntry.IsDirectory { - return nil // Skip non-directories - } - - // Parse version timestamp from directory name (e.g., "v2025-09-01-07-16-34") - versionTime, parseErr := topic.ParseTopicVersion(versionEntry.Name) - if parseErr != nil { - // Skip directories that don't match the version format - return nil - } - - // Scan partition directories within this version - versionDir := fmt.Sprintf("%s/%s", hms.topic.Dir(), versionEntry.Name) - return filer_pb.ReadDirAllEntries(ctx, hms.filerClient, util.FullPath(versionDir), "", func(partitionEntry *filer_pb.Entry, isLast bool) error { - if !partitionEntry.IsDirectory { - return nil // Skip non-directories - } - - // Parse partition boundary from directory name (e.g., "0000-0630") - rangeStart, rangeStop := topic.ParsePartitionBoundary(partitionEntry.Name) - if rangeStart == rangeStop { - return nil // Skip invalid partition names - } - - // Create partition object - partition := topic.Partition{ - RangeStart: rangeStart, - RangeStop: rangeStop, - RingSize: topic.PartitionCount, - UnixTimeNs: versionTime.UnixNano(), - } - - allPartitions = append(allPartitions, partition) - return nil - }) - }) - - if err != nil { - return nil, fmt.Errorf("failed to scan topic directory for partitions: %v", err) - } - - // If no partitions found, return empty slice (valid for newly created or empty topics) - if len(allPartitions) == 0 { - fmt.Printf("No partitions found for topic %s - returning empty result set\n", hms.topic.String()) - return []topic.Partition{}, nil - } - - fmt.Printf("Discovered %d partitions for topic %s\n", len(allPartitions), hms.topic.String()) - return allPartitions, nil -} - -// scanPartitionHybrid scans a specific partition using the hybrid approach -// This is where the magic happens - seamlessly reading ALL data sources: -// 1. Unflushed in-memory data from brokers (REAL-TIME) -// 2. Live logs + Parquet files from disk (FLUSHED/ARCHIVED) -func (hms *HybridMessageScanner) scanPartitionHybrid(ctx context.Context, partition topic.Partition, options HybridScanOptions) ([]HybridScanResult, error) { - results, _, err := hms.scanPartitionHybridWithStats(ctx, partition, options) - return results, err -} - -// scanPartitionHybridWithStats scans a specific partition using streaming merge for memory efficiency -// PERFORMANCE IMPROVEMENT: Uses heap-based streaming merge instead of collecting all data and sorting -// - Memory usage: O(k) where k = number of data sources, instead of O(n) where n = total records -// - Scalable: Can handle large topics without LIMIT clauses efficiently -// - Streaming: Processes data as it arrives rather than buffering everything -func (hms *HybridMessageScanner) scanPartitionHybridWithStats(ctx context.Context, partition topic.Partition, options HybridScanOptions) ([]HybridScanResult, *HybridScanStats, error) { - stats := &HybridScanStats{} - - // STEP 1: Scan unflushed in-memory data from brokers (REAL-TIME) - unflushedResults, unflushedStats, err := hms.scanUnflushedDataWithStats(ctx, partition, options) - if err != nil { - // Don't fail the query if broker scanning fails, but provide clear warning to user - // This ensures users are aware that results may not include the most recent data - fmt.Printf("Warning: Unable to access real-time data from message broker: %v\n", err) - fmt.Printf("Note: Query results may not include the most recent unflushed messages\n") - } else if unflushedStats != nil { - stats.BrokerBufferQueried = unflushedStats.BrokerBufferQueried - stats.BrokerBufferMessages = unflushedStats.BrokerBufferMessages - stats.BufferStartIndex = unflushedStats.BufferStartIndex - } - - // Count live log files for statistics - liveLogCount, err := hms.countLiveLogFiles(partition) - if err != nil { - // Don't fail the query, just log warning - fmt.Printf("Warning: Failed to count live log files: %v\n", err) - liveLogCount = 0 - } - stats.LiveLogFilesScanned = liveLogCount - - // STEP 2: Create streaming data sources for memory-efficient merge - var dataSources []StreamingDataSource - - // Add unflushed data source (if we have unflushed results) - if len(unflushedResults) > 0 { - // Sort unflushed results by timestamp before creating stream - if len(unflushedResults) > 1 { - hms.mergeSort(unflushedResults, 0, len(unflushedResults)-1) - } - dataSources = append(dataSources, NewSliceDataSource(unflushedResults)) - } - - // Add streaming flushed data source (live logs + Parquet files) - flushedDataSource := NewStreamingFlushedDataSource(hms, partition, options) - dataSources = append(dataSources, flushedDataSource) - - // STEP 3: Use streaming merge for memory-efficient chronological ordering - var results []HybridScanResult - if len(dataSources) > 0 { - // Calculate how many rows we need to collect during scanning (before OFFSET/LIMIT) - // For LIMIT N OFFSET M, we need to collect at least N+M rows - scanLimit := options.Limit - if options.Limit > 0 && options.Offset > 0 { - scanLimit = options.Limit + options.Offset - } - - mergedResults, err := hms.streamingMerge(dataSources, scanLimit) - if err != nil { - return nil, stats, fmt.Errorf("streaming merge failed: %v", err) - } - results = mergedResults - } - - return results, stats, nil -} - -// countLiveLogFiles counts the number of live log files in a partition for statistics -func (hms *HybridMessageScanner) countLiveLogFiles(partition topic.Partition) (int, error) { - partitionDir := topic.PartitionDir(hms.topic, partition) - - var fileCount int - err := hms.filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // List all files in partition directory - request := &filer_pb.ListEntriesRequest{ - Directory: partitionDir, - Prefix: "", - StartFromFileName: "", - InclusiveStartFrom: true, - Limit: 10000, // reasonable limit for counting - } - - stream, err := client.ListEntries(context.Background(), request) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return err - } - - // Count files that are not .parquet files (live log files) - // Live log files typically have timestamps or are named like log files - fileName := resp.Entry.Name - if !strings.HasSuffix(fileName, ".parquet") && - !strings.HasSuffix(fileName, ".offset") && - len(resp.Entry.Chunks) > 0 { // Has actual content - fileCount++ - } - } - - return nil - }) - - if err != nil { - return 0, err - } - return fileCount, nil -} - -// isControlEntry checks if a log entry is a control entry without actual data -// Based on MQ system analysis, control entries are: -// 1. DataMessages with populated Ctrl field (publisher close signals) -// 2. Entries with empty keys (as filtered by subscriber) -// NOTE: Messages with empty data but valid keys (like NOOP messages) are NOT control entries -func (hms *HybridMessageScanner) isControlEntry(logEntry *filer_pb.LogEntry) bool { - // Pre-decode DataMessage if needed - var dataMessage *mq_pb.DataMessage - if len(logEntry.Data) > 0 { - dataMessage = &mq_pb.DataMessage{} - if err := proto.Unmarshal(logEntry.Data, dataMessage); err != nil { - dataMessage = nil // Failed to decode, treat as raw data - } - } - return hms.isControlEntryWithDecoded(logEntry, dataMessage) -} - -// isControlEntryWithDecoded checks if a log entry is a control entry using pre-decoded DataMessage -// This avoids duplicate protobuf unmarshaling when the DataMessage is already decoded -func (hms *HybridMessageScanner) isControlEntryWithDecoded(logEntry *filer_pb.LogEntry, dataMessage *mq_pb.DataMessage) bool { - // Skip entries with empty keys (same logic as subscriber) - if len(logEntry.Key) == 0 { - return true - } - - // Check if this is a DataMessage with control field populated - if dataMessage != nil && dataMessage.Ctrl != nil { - return true - } - - // Messages with valid keys (even if data is empty) are legitimate messages - // Examples: NOOP messages from Schema Registry - return false -} - -// isNullOrEmpty checks if a schema_pb.Value is null or empty -func isNullOrEmpty(value *schema_pb.Value) bool { - if value == nil { - return true - } - - switch v := value.Kind.(type) { - case *schema_pb.Value_StringValue: - return v.StringValue == "" - case *schema_pb.Value_BytesValue: - return len(v.BytesValue) == 0 - case *schema_pb.Value_ListValue: - return v.ListValue == nil || len(v.ListValue.Values) == 0 - case nil: - return true // No kind set means null - default: - return false - } -} - -// isSchemaless checks if the scanner is configured for a schema-less topic -// Schema-less topics only have system fields: _ts_ns, _key, and _value -func (hms *HybridMessageScanner) isSchemaless() bool { - // Schema-less topics only have system fields: _ts_ns, _key, and _value - // System topics like _schemas are NOT schema-less - they have structured data - // We just need to map their fields during read - - if hms.recordSchema == nil { - return false - } - - // Count only non-system data fields (exclude _ts_ns and _key which are always present) - // Schema-less topics should only have _value as the data field - hasValue := false - dataFieldCount := 0 - - for _, field := range hms.recordSchema.Fields { - switch field.Name { - case SW_COLUMN_NAME_TIMESTAMP, SW_COLUMN_NAME_KEY: - // System fields - ignore - continue - case SW_COLUMN_NAME_VALUE: - hasValue = true - dataFieldCount++ - default: - // Any other field means it's not schema-less - dataFieldCount++ - } - } - - // Schema-less = only has _value field as the data field (plus system fields) - return hasValue && dataFieldCount == 1 -} - -// convertLogEntryToRecordValue converts a filer_pb.LogEntry to schema_pb.RecordValue -// This handles both: -// 1. Live log entries (raw message format) -// 2. Parquet entries (already in schema_pb.RecordValue format) -// 3. Schema-less topics (raw bytes in _value field) -func (hms *HybridMessageScanner) convertLogEntryToRecordValue(logEntry *filer_pb.LogEntry) (*schema_pb.RecordValue, string, error) { - // For schema-less topics, put raw data directly into _value field - if hms.isSchemaless() { - recordValue := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - recordValue.Fields[SW_COLUMN_NAME_VALUE] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Data}, - } - return recordValue, "live_log", nil - } - - // Try to unmarshal as RecordValue first (Parquet format) - recordValue := &schema_pb.RecordValue{} - if err := proto.Unmarshal(logEntry.Data, recordValue); err == nil { - // This is an archived message from Parquet files - // FIX: Add system columns from LogEntry to RecordValue - if recordValue.Fields == nil { - recordValue.Fields = make(map[string]*schema_pb.Value) - } - - // Add system columns from LogEntry - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - - return recordValue, "parquet_archive", nil - } - - // If not a RecordValue, this is raw live message data - parse with schema - return hms.parseRawMessageWithSchema(logEntry) -} - -// min returns the minimum of two integers -func min(a, b int) int { - if a < b { - return a - } - return b -} - -// parseRawMessageWithSchema parses raw live message data using the topic's schema -// This provides proper type conversion and field mapping instead of treating everything as strings -func (hms *HybridMessageScanner) parseRawMessageWithSchema(logEntry *filer_pb.LogEntry) (*schema_pb.RecordValue, string, error) { - recordValue := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - - // Add system columns (always present) - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - - // Parse message data based on schema - if hms.recordSchema == nil || len(hms.recordSchema.Fields) == 0 { - // Fallback: No schema available, use "_value" for schema-less topics only - if hms.isSchemaless() { - recordValue.Fields[SW_COLUMN_NAME_VALUE] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Data}, - } - } - return recordValue, "live_log", nil - } - - // Use schema format to directly choose the right decoder - // This avoids trying multiple decoders and improves performance - var parsedRecord *schema_pb.RecordValue - var err error - - switch hms.schemaFormat { - case "AVRO": - // AVRO format - use Avro decoder - // Note: Avro decoding requires schema registry integration - // For now, fall through to JSON as many Avro messages are also valid JSON - parsedRecord, err = hms.parseJSONMessage(logEntry.Data) - case "PROTOBUF": - // PROTOBUF format - use protobuf decoder - parsedRecord, err = hms.parseProtobufMessage(logEntry.Data) - case "JSON_SCHEMA", "": - // JSON_SCHEMA format or empty (default to JSON) - // JSON is the most common format for schema registry - parsedRecord, err = hms.parseJSONMessage(logEntry.Data) - if err != nil { - // Try protobuf as fallback - parsedRecord, err = hms.parseProtobufMessage(logEntry.Data) - } - default: - // Unknown format - try JSON first, then protobuf as fallback - parsedRecord, err = hms.parseJSONMessage(logEntry.Data) - if err != nil { - parsedRecord, err = hms.parseProtobufMessage(logEntry.Data) - } - } - - if err == nil && parsedRecord != nil { - // Successfully parsed, merge with system columns - for fieldName, fieldValue := range parsedRecord.Fields { - recordValue.Fields[fieldName] = fieldValue - } - return recordValue, "live_log", nil - } - - // Fallback: If schema has a single field, map the raw data to it with type conversion - if len(hms.recordSchema.Fields) == 1 { - field := hms.recordSchema.Fields[0] - convertedValue, convErr := hms.convertRawDataToSchemaValue(logEntry.Data, field.Type) - if convErr == nil { - recordValue.Fields[field.Name] = convertedValue - return recordValue, "live_log", nil - } - } - - // Final fallback: treat as bytes field for schema-less topics only - if hms.isSchemaless() { - recordValue.Fields[SW_COLUMN_NAME_VALUE] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Data}, - } - } - - return recordValue, "live_log", nil -} - -// convertLogEntryToRecordValueWithDecoded converts a filer_pb.LogEntry to schema_pb.RecordValue -// using a pre-decoded DataMessage to avoid duplicate protobuf unmarshaling -func (hms *HybridMessageScanner) convertLogEntryToRecordValueWithDecoded(logEntry *filer_pb.LogEntry, dataMessage *mq_pb.DataMessage) (*schema_pb.RecordValue, string, error) { - // IMPORTANT: Check for schema-less topics FIRST - // Schema-less topics (like _schemas) should store raw data directly in _value field - if hms.isSchemaless() { - recordValue := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - recordValue.Fields[SW_COLUMN_NAME_VALUE] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Data}, - } - return recordValue, "live_log", nil - } - - // CRITICAL: The broker stores DataMessage.Value directly in LogEntry.Data - // So we need to try unmarshaling LogEntry.Data as RecordValue first - var recordValueBytes []byte - - if dataMessage != nil && len(dataMessage.Value) > 0 { - // DataMessage has a Value field - use it - recordValueBytes = dataMessage.Value - } else { - // DataMessage doesn't have Value, use LogEntry.Data directly - // This is the normal case when broker stores messages - recordValueBytes = logEntry.Data - } - - // Try to unmarshal as RecordValue - if len(recordValueBytes) > 0 { - recordValue := &schema_pb.RecordValue{} - if err := proto.Unmarshal(recordValueBytes, recordValue); err == nil { - // Successfully unmarshaled as RecordValue - - // Ensure Fields map exists - if recordValue.Fields == nil { - recordValue.Fields = make(map[string]*schema_pb.Value) - } - - // Add system columns from LogEntry - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: logEntry.TsNs}, - } - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: logEntry.Key}, - } - - return recordValue, "live_log", nil - } - // If unmarshaling as RecordValue fails, fall back to schema-aware parsing - } - - // For cases where protobuf unmarshaling failed or data is empty, - // attempt schema-aware parsing to try JSON, protobuf, and other formats - return hms.parseRawMessageWithSchema(logEntry) -} - -// parseJSONMessage attempts to parse raw data as JSON and map to schema fields -func (hms *HybridMessageScanner) parseJSONMessage(data []byte) (*schema_pb.RecordValue, error) { - // Try to parse as JSON - var jsonData map[string]interface{} - if err := json.Unmarshal(data, &jsonData); err != nil { - return nil, fmt.Errorf("not valid JSON: %v", err) - } - - recordValue := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - - // Map JSON fields to schema fields - for _, schemaField := range hms.recordSchema.Fields { - fieldName := schemaField.Name - if jsonValue, exists := jsonData[fieldName]; exists { - schemaValue, err := hms.convertJSONValueToSchemaValue(jsonValue, schemaField.Type) - if err != nil { - // Log conversion error but continue with other fields - continue - } - recordValue.Fields[fieldName] = schemaValue - } - } - - return recordValue, nil -} - -// parseProtobufMessage attempts to parse raw data as protobuf RecordValue -func (hms *HybridMessageScanner) parseProtobufMessage(data []byte) (*schema_pb.RecordValue, error) { - // This might be a raw protobuf message that didn't parse correctly the first time - // Try alternative protobuf unmarshaling approaches - recordValue := &schema_pb.RecordValue{} - - // Strategy 1: Direct unmarshaling (might work if it's actually a RecordValue) - if err := proto.Unmarshal(data, recordValue); err == nil { - return recordValue, nil - } - - // Strategy 2: Check if it's a different protobuf message type - // For now, return error as we need more specific knowledge of MQ message formats - return nil, fmt.Errorf("could not parse as protobuf RecordValue") -} - -// convertRawDataToSchemaValue converts raw bytes to a specific schema type -func (hms *HybridMessageScanner) convertRawDataToSchemaValue(data []byte, fieldType *schema_pb.Type) (*schema_pb.Value, error) { - dataStr := string(data) - - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - scalarType := fieldType.GetScalarType() - switch scalarType { - case schema_pb.ScalarType_STRING: - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: dataStr}, - }, nil - case schema_pb.ScalarType_INT32: - if val, err := strconv.ParseInt(strings.TrimSpace(dataStr), 10, 32); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int32Value{Int32Value: int32(val)}, - }, nil - } - case schema_pb.ScalarType_INT64: - if val, err := strconv.ParseInt(strings.TrimSpace(dataStr), 10, 64); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: val}, - }, nil - } - case schema_pb.ScalarType_FLOAT: - if val, err := strconv.ParseFloat(strings.TrimSpace(dataStr), 32); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_FloatValue{FloatValue: float32(val)}, - }, nil - } - case schema_pb.ScalarType_DOUBLE: - if val, err := strconv.ParseFloat(strings.TrimSpace(dataStr), 64); err == nil { - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: val}, - }, nil - } - case schema_pb.ScalarType_BOOL: - lowerStr := strings.ToLower(strings.TrimSpace(dataStr)) - if lowerStr == "true" || lowerStr == "1" || lowerStr == "yes" { - return &schema_pb.Value{ - Kind: &schema_pb.Value_BoolValue{BoolValue: true}, - }, nil - } else if lowerStr == "false" || lowerStr == "0" || lowerStr == "no" { - return &schema_pb.Value{ - Kind: &schema_pb.Value_BoolValue{BoolValue: false}, - }, nil - } - case schema_pb.ScalarType_BYTES: - return &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: data}, - }, nil - } - } - - return nil, fmt.Errorf("unsupported type conversion for %v", fieldType) -} - -// convertJSONValueToSchemaValue converts a JSON value to schema_pb.Value based on schema type -func (hms *HybridMessageScanner) convertJSONValueToSchemaValue(jsonValue interface{}, fieldType *schema_pb.Type) (*schema_pb.Value, error) { - switch fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - scalarType := fieldType.GetScalarType() - switch scalarType { - case schema_pb.ScalarType_STRING: - if str, ok := jsonValue.(string); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str}, - }, nil - } - // Convert other types to string - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: fmt.Sprintf("%v", jsonValue)}, - }, nil - case schema_pb.ScalarType_INT32: - if num, ok := jsonValue.(float64); ok { // JSON numbers are float64 - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int32Value{Int32Value: int32(num)}, - }, nil - } - case schema_pb.ScalarType_INT64: - if num, ok := jsonValue.(float64); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(num)}, - }, nil - } - case schema_pb.ScalarType_FLOAT: - if num, ok := jsonValue.(float64); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_FloatValue{FloatValue: float32(num)}, - }, nil - } - case schema_pb.ScalarType_DOUBLE: - if num, ok := jsonValue.(float64); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_DoubleValue{DoubleValue: num}, - }, nil - } - case schema_pb.ScalarType_BOOL: - if boolVal, ok := jsonValue.(bool); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_BoolValue{BoolValue: boolVal}, - }, nil - } - case schema_pb.ScalarType_BYTES: - if str, ok := jsonValue.(string); ok { - return &schema_pb.Value{ - Kind: &schema_pb.Value_BytesValue{BytesValue: []byte(str)}, - }, nil - } - } - } - - return nil, fmt.Errorf("incompatible JSON value type %T for schema type %v", jsonValue, fieldType) -} - -// ConvertToSQLResult converts HybridScanResults to SQL query results -func (hms *HybridMessageScanner) ConvertToSQLResult(results []HybridScanResult, columns []string) *QueryResult { - if len(results) == 0 { - return &QueryResult{ - Columns: columns, - Rows: [][]sqltypes.Value{}, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } - } - - // Determine columns if not specified - if len(columns) == 0 { - columnSet := make(map[string]bool) - for _, result := range results { - for columnName := range result.Values { - columnSet[columnName] = true - } - } - - columns = make([]string, 0, len(columnSet)) - for columnName := range columnSet { - columns = append(columns, columnName) - } - - // If no data columns were found, include system columns so we have something to display - if len(columns) == 0 { - columns = []string{SW_DISPLAY_NAME_TIMESTAMP, SW_COLUMN_NAME_KEY} - } - } - - // Convert to SQL rows - rows := make([][]sqltypes.Value, len(results)) - for i, result := range results { - row := make([]sqltypes.Value, len(columns)) - for j, columnName := range columns { - switch columnName { - case SW_COLUMN_NAME_SOURCE: - row[j] = sqltypes.NewVarChar(result.Source) - case SW_COLUMN_NAME_TIMESTAMP, SW_DISPLAY_NAME_TIMESTAMP: - // Format timestamp as proper timestamp type instead of raw nanoseconds - row[j] = hms.engine.formatTimestampColumn(result.Timestamp) - case SW_COLUMN_NAME_KEY: - row[j] = sqltypes.NewVarBinary(string(result.Key)) - default: - if value, exists := result.Values[columnName]; exists { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } - } - rows[i] = row - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } -} - -// ConvertToSQLResultWithMixedColumns handles SELECT *, specific_columns queries -// Combines auto-discovered columns (from *) with explicitly requested columns -func (hms *HybridMessageScanner) ConvertToSQLResultWithMixedColumns(results []HybridScanResult, explicitColumns []string) *QueryResult { - if len(results) == 0 { - // For empty results, combine auto-discovered columns with explicit ones - columnSet := make(map[string]bool) - - // Add explicit columns first - for _, col := range explicitColumns { - columnSet[col] = true - } - - // Build final column list - columns := make([]string, 0, len(columnSet)) - for col := range columnSet { - columns = append(columns, col) - } - - return &QueryResult{ - Columns: columns, - Rows: [][]sqltypes.Value{}, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } - } - - // Auto-discover columns from data (like SELECT *) - autoColumns := make(map[string]bool) - for _, result := range results { - for columnName := range result.Values { - autoColumns[columnName] = true - } - } - - // Combine auto-discovered and explicit columns - columnSet := make(map[string]bool) - - // Add auto-discovered columns first (regular data columns) - for col := range autoColumns { - columnSet[col] = true - } - - // Add explicit columns (may include system columns like _source) - for _, col := range explicitColumns { - columnSet[col] = true - } - - // Build final column list - columns := make([]string, 0, len(columnSet)) - for col := range columnSet { - columns = append(columns, col) - } - - // If no data columns were found and no explicit columns specified, include system columns - if len(columns) == 0 { - columns = []string{SW_DISPLAY_NAME_TIMESTAMP, SW_COLUMN_NAME_KEY} - } - - // Convert to SQL rows - rows := make([][]sqltypes.Value, len(results)) - for i, result := range results { - row := make([]sqltypes.Value, len(columns)) - for j, columnName := range columns { - switch columnName { - case SW_COLUMN_NAME_TIMESTAMP: - row[j] = sqltypes.NewInt64(result.Timestamp) - case SW_COLUMN_NAME_KEY: - row[j] = sqltypes.NewVarBinary(string(result.Key)) - case SW_COLUMN_NAME_SOURCE: - row[j] = sqltypes.NewVarChar(result.Source) - default: - // Regular data column - if value, exists := result.Values[columnName]; exists { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } - } - rows[i] = row - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - Database: hms.topic.Namespace, - Table: hms.topic.Name, - } -} - -// ReadParquetStatistics efficiently reads column statistics from parquet files -// without scanning the full file content - uses parquet's built-in metadata -func (h *HybridMessageScanner) ReadParquetStatistics(partitionPath string) ([]*ParquetFileStats, error) { - var fileStats []*ParquetFileStats - - // Use the same chunk cache as the logstore package - chunkCache := chunk_cache.NewChunkCacheInMemory(256) - lookupFileIdFn := filer.LookupFn(h.filerClient) - - err := filer_pb.ReadDirAllEntries(context.Background(), h.filerClient, util.FullPath(partitionPath), "", func(entry *filer_pb.Entry, isLast bool) error { - // Only process parquet files - if entry.IsDirectory || !strings.HasSuffix(entry.Name, ".parquet") { - return nil - } - - // Extract statistics from this parquet file - stats, err := h.extractParquetFileStats(entry, lookupFileIdFn, chunkCache) - if err != nil { - // Log error but continue processing other files - fmt.Printf("Warning: failed to extract stats from %s: %v\n", entry.Name, err) - return nil - } - - if stats != nil { - fileStats = append(fileStats, stats) - } - return nil - }) - - return fileStats, err -} - -// extractParquetFileStats extracts column statistics from a single parquet file -func (h *HybridMessageScanner) extractParquetFileStats(entry *filer_pb.Entry, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunkCache *chunk_cache.ChunkCacheInMemory) (*ParquetFileStats, error) { - // Create reader for the parquet file - fileSize := filer.FileSize(entry) - visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(context.Background(), lookupFileIdFn, entry.Chunks, 0, int64(fileSize)) - chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize)) - readerCache := filer.NewReaderCache(32, chunkCache, lookupFileIdFn) - readerAt := filer.NewChunkReaderAtFromClient(context.Background(), readerCache, chunkViews, int64(fileSize)) - - // Create parquet reader - this only reads metadata, not data - parquetReader := parquet.NewReader(readerAt) - defer parquetReader.Close() - - fileView := parquetReader.File() - - fileStats := &ParquetFileStats{ - FileName: entry.Name, - RowCount: fileView.NumRows(), - ColumnStats: make(map[string]*ParquetColumnStats), - } - // Populate optional min/max from filer extended attributes (writer stores ns timestamps) - if entry != nil && entry.Extended != nil { - if minBytes, ok := entry.Extended[mq.ExtendedAttrTimestampMin]; ok && len(minBytes) == 8 { - fileStats.MinTimestampNs = int64(binary.BigEndian.Uint64(minBytes)) - } - if maxBytes, ok := entry.Extended[mq.ExtendedAttrTimestampMax]; ok && len(maxBytes) == 8 { - fileStats.MaxTimestampNs = int64(binary.BigEndian.Uint64(maxBytes)) - } - } - - // Get schema information - schema := fileView.Schema() - - // Process each row group - rowGroups := fileView.RowGroups() - for _, rowGroup := range rowGroups { - columnChunks := rowGroup.ColumnChunks() - - // Process each column chunk - for i, chunk := range columnChunks { - // Get column name from schema - columnName := h.getColumnNameFromSchema(schema, i) - if columnName == "" { - continue - } - - // Try to get column statistics - columnIndex, err := chunk.ColumnIndex() - if err != nil { - // No column index available - skip this column - continue - } - - // Extract min/max values from the first page (for simplicity) - // In a more sophisticated implementation, we could aggregate across all pages - numPages := columnIndex.NumPages() - if numPages == 0 { - continue - } - - minParquetValue := columnIndex.MinValue(0) - maxParquetValue := columnIndex.MaxValue(numPages - 1) - nullCount := int64(0) - - // Aggregate null counts across all pages - for pageIdx := 0; pageIdx < numPages; pageIdx++ { - nullCount += columnIndex.NullCount(pageIdx) - } - - // Convert parquet values to schema_pb.Value - minValue, err := h.convertParquetValueToSchemaValue(minParquetValue) - if err != nil { - continue - } - - maxValue, err := h.convertParquetValueToSchemaValue(maxParquetValue) - if err != nil { - continue - } - - // Store column statistics (aggregate across row groups if column already exists) - if existingStats, exists := fileStats.ColumnStats[columnName]; exists { - // Update existing statistics - if h.compareSchemaValues(minValue, existingStats.MinValue) < 0 { - existingStats.MinValue = minValue - } - if h.compareSchemaValues(maxValue, existingStats.MaxValue) > 0 { - existingStats.MaxValue = maxValue - } - existingStats.NullCount += nullCount - } else { - // Create new column statistics - fileStats.ColumnStats[columnName] = &ParquetColumnStats{ - ColumnName: columnName, - MinValue: minValue, - MaxValue: maxValue, - NullCount: nullCount, - RowCount: rowGroup.NumRows(), - } - } - } - } - - return fileStats, nil -} - -// getColumnNameFromSchema extracts column name from parquet schema by index -func (h *HybridMessageScanner) getColumnNameFromSchema(schema *parquet.Schema, columnIndex int) string { - // Get the leaf columns in order - var columnNames []string - h.collectColumnNames(schema.Fields(), &columnNames) - - if columnIndex >= 0 && columnIndex < len(columnNames) { - return columnNames[columnIndex] - } - return "" -} - -// collectColumnNames recursively collects leaf column names from schema -func (h *HybridMessageScanner) collectColumnNames(fields []parquet.Field, names *[]string) { - for _, field := range fields { - if len(field.Fields()) == 0 { - // This is a leaf field (no sub-fields) - *names = append(*names, field.Name()) - } else { - // This is a group - recurse - h.collectColumnNames(field.Fields(), names) - } - } -} - -// convertParquetValueToSchemaValue converts parquet.Value to schema_pb.Value -func (h *HybridMessageScanner) convertParquetValueToSchemaValue(pv parquet.Value) (*schema_pb.Value, error) { - switch pv.Kind() { - case parquet.Boolean: - return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: pv.Boolean()}}, nil - case parquet.Int32: - return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: pv.Int32()}}, nil - case parquet.Int64: - return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: pv.Int64()}}, nil - case parquet.Float: - return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: pv.Float()}}, nil - case parquet.Double: - return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: pv.Double()}}, nil - case parquet.ByteArray: - return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: pv.ByteArray()}}, nil - default: - return nil, fmt.Errorf("unsupported parquet value kind: %v", pv.Kind()) - } -} - -// compareSchemaValues compares two schema_pb.Value objects -func (h *HybridMessageScanner) compareSchemaValues(v1, v2 *schema_pb.Value) int { - if v1 == nil && v2 == nil { - return 0 - } - if v1 == nil { - return -1 - } - if v2 == nil { - return 1 - } - - // Extract raw values and compare - raw1 := h.extractRawValueFromSchema(v1) - raw2 := h.extractRawValueFromSchema(v2) - - return h.compareRawValues(raw1, raw2) -} - -// extractRawValueFromSchema extracts the raw value from schema_pb.Value -func (h *HybridMessageScanner) extractRawValueFromSchema(value *schema_pb.Value) interface{} { - switch v := value.Kind.(type) { - case *schema_pb.Value_BoolValue: - return v.BoolValue - case *schema_pb.Value_Int32Value: - return v.Int32Value - case *schema_pb.Value_Int64Value: - return v.Int64Value - case *schema_pb.Value_FloatValue: - return v.FloatValue - case *schema_pb.Value_DoubleValue: - return v.DoubleValue - case *schema_pb.Value_BytesValue: - return string(v.BytesValue) // Convert to string for comparison - case *schema_pb.Value_StringValue: - return v.StringValue - } - return nil -} - -// compareRawValues compares two raw values -func (h *HybridMessageScanner) compareRawValues(v1, v2 interface{}) int { - // Handle nil cases - if v1 == nil && v2 == nil { - return 0 - } - if v1 == nil { - return -1 - } - if v2 == nil { - return 1 - } - - // Compare based on type - switch val1 := v1.(type) { - case bool: - if val2, ok := v2.(bool); ok { - if val1 == val2 { - return 0 - } - if val1 { - return 1 - } - return -1 - } - case int32: - if val2, ok := v2.(int32); ok { - if val1 < val2 { - return -1 - } else if val1 > val2 { - return 1 - } - return 0 - } - case int64: - if val2, ok := v2.(int64); ok { - if val1 < val2 { - return -1 - } else if val1 > val2 { - return 1 - } - return 0 - } - case float32: - if val2, ok := v2.(float32); ok { - if val1 < val2 { - return -1 - } else if val1 > val2 { - return 1 - } - return 0 - } - case float64: - if val2, ok := v2.(float64); ok { - if val1 < val2 { - return -1 - } else if val1 > val2 { - return 1 - } - return 0 - } - case string: - if val2, ok := v2.(string); ok { - if val1 < val2 { - return -1 - } else if val1 > val2 { - return 1 - } - return 0 - } - } - - // Default: try string comparison - str1 := fmt.Sprintf("%v", v1) - str2 := fmt.Sprintf("%v", v2) - if str1 < str2 { - return -1 - } else if str1 > str2 { - return 1 - } - return 0 -} - -// streamingMerge merges multiple sorted data sources using a heap-based approach -// This provides memory-efficient merging without loading all data into memory -func (hms *HybridMessageScanner) streamingMerge(dataSources []StreamingDataSource, limit int) ([]HybridScanResult, error) { - if len(dataSources) == 0 { - return nil, nil - } - - var results []HybridScanResult - mergeHeap := &StreamingMergeHeap{} - heap.Init(mergeHeap) - - // Initialize heap with first item from each data source - for i, source := range dataSources { - if source.HasMore() { - result, err := source.Next() - if err != nil { - // Close all sources and return error - for _, s := range dataSources { - s.Close() - } - return nil, fmt.Errorf("failed to read from data source %d: %v", i, err) - } - if result != nil { - heap.Push(mergeHeap, &StreamingMergeItem{ - Result: result, - SourceID: i, - DataSource: source, - }) - } - } - } - - // Process results in chronological order - for mergeHeap.Len() > 0 { - // Get next chronologically ordered result - item := heap.Pop(mergeHeap).(*StreamingMergeItem) - results = append(results, *item.Result) - - // Check limit - if limit > 0 && len(results) >= limit { - break - } - - // Try to get next item from the same data source - if item.DataSource.HasMore() { - nextResult, err := item.DataSource.Next() - if err != nil { - // Log error but continue with other sources - fmt.Printf("Warning: Error reading next item from source %d: %v\n", item.SourceID, err) - } else if nextResult != nil { - heap.Push(mergeHeap, &StreamingMergeItem{ - Result: nextResult, - SourceID: item.SourceID, - DataSource: item.DataSource, - }) - } - } - } - - // Close all data sources - for _, source := range dataSources { - source.Close() - } - - return results, nil -} - -// SliceDataSource wraps a pre-loaded slice of results as a StreamingDataSource -// This is used for unflushed data that is already loaded into memory -type SliceDataSource struct { - results []HybridScanResult - index int -} - -func NewSliceDataSource(results []HybridScanResult) *SliceDataSource { - return &SliceDataSource{ - results: results, - index: 0, - } -} - -func (s *SliceDataSource) Next() (*HybridScanResult, error) { - if s.index >= len(s.results) { - return nil, nil - } - result := &s.results[s.index] - s.index++ - return result, nil -} - -func (s *SliceDataSource) HasMore() bool { - return s.index < len(s.results) -} - -func (s *SliceDataSource) Close() error { - return nil // Nothing to clean up for slice-based source -} - -// StreamingFlushedDataSource provides streaming access to flushed data -type StreamingFlushedDataSource struct { - hms *HybridMessageScanner - partition topic.Partition - options HybridScanOptions - mergedReadFn func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) - resultChan chan *HybridScanResult - errorChan chan error - doneChan chan struct{} - started bool - finished bool - closed int32 // atomic flag to prevent double close - mu sync.RWMutex -} - -func NewStreamingFlushedDataSource(hms *HybridMessageScanner, partition topic.Partition, options HybridScanOptions) *StreamingFlushedDataSource { - mergedReadFn := logstore.GenMergedReadFunc(hms.filerClient, hms.topic, partition) - - return &StreamingFlushedDataSource{ - hms: hms, - partition: partition, - options: options, - mergedReadFn: mergedReadFn, - resultChan: make(chan *HybridScanResult, 100), // Buffer for better performance - errorChan: make(chan error, 1), - doneChan: make(chan struct{}), - started: false, - finished: false, - } -} - -func (s *StreamingFlushedDataSource) startStreaming() { - if s.started { - return - } - s.started = true - - go func() { - defer func() { - // Use atomic flag to ensure channels are only closed once - if atomic.CompareAndSwapInt32(&s.closed, 0, 1) { - close(s.resultChan) - close(s.errorChan) - close(s.doneChan) - } - }() - - // Set up time range for scanning - startTime := time.Unix(0, s.options.StartTimeNs) - if s.options.StartTimeNs == 0 { - startTime = time.Unix(0, 0) - } - - stopTsNs := s.options.StopTimeNs - // For SQL queries, stopTsNs = 0 means "no stop time restriction" - // This is different from message queue consumers which want to stop at "now" - // We detect SQL context by checking if we have a predicate function - if stopTsNs == 0 && s.options.Predicate == nil { - // Only set to current time for non-SQL queries (message queue consumers) - stopTsNs = time.Now().UnixNano() - } - // If stopTsNs is still 0, it means this is a SQL query that wants unrestricted scanning - - // Message processing function - eachLogEntryFn := func(logEntry *filer_pb.LogEntry) (isDone bool, err error) { - // Pre-decode DataMessage for reuse in both control check and conversion - var dataMessage *mq_pb.DataMessage - if len(logEntry.Data) > 0 { - dataMessage = &mq_pb.DataMessage{} - if err := proto.Unmarshal(logEntry.Data, dataMessage); err != nil { - dataMessage = nil // Failed to decode, treat as raw data - } - } - - // Skip control entries without actual data - if s.hms.isControlEntryWithDecoded(logEntry, dataMessage) { - return false, nil // Skip this entry - } - - // Convert log entry to schema_pb.RecordValue for consistent processing - recordValue, source, convertErr := s.hms.convertLogEntryToRecordValueWithDecoded(logEntry, dataMessage) - if convertErr != nil { - return false, fmt.Errorf("failed to convert log entry: %v", convertErr) - } - - // Apply predicate filtering (WHERE clause) - if s.options.Predicate != nil && !s.options.Predicate(recordValue) { - return false, nil // Skip this message - } - - // Extract system columns - timestamp := recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value() - key := recordValue.Fields[SW_COLUMN_NAME_KEY].GetBytesValue() - - // Apply column projection - values := make(map[string]*schema_pb.Value) - if len(s.options.Columns) == 0 { - // Select all columns (excluding system columns from user view) - for name, value := range recordValue.Fields { - if name != SW_COLUMN_NAME_TIMESTAMP && name != SW_COLUMN_NAME_KEY { - values[name] = value - } - } - } else { - // Select specified columns only - for _, columnName := range s.options.Columns { - if value, exists := recordValue.Fields[columnName]; exists { - values[columnName] = value - } - } - } - - result := &HybridScanResult{ - Values: values, - Timestamp: timestamp, - Key: key, - Source: source, - } - - // Check if already closed before trying to send - if atomic.LoadInt32(&s.closed) != 0 { - return true, nil // Stop processing if closed - } - - // Send result to channel with proper handling of closed channels - select { - case s.resultChan <- result: - return false, nil - case <-s.doneChan: - return true, nil // Stop processing if closed - default: - // Check again if closed (in case it was closed between the atomic check and select) - if atomic.LoadInt32(&s.closed) != 0 { - return true, nil - } - // If not closed, try sending again with blocking select - select { - case s.resultChan <- result: - return false, nil - case <-s.doneChan: - return true, nil - } - } - } - - // Start scanning from the specified position - startPosition := log_buffer.MessagePosition{Time: startTime} - _, _, err := s.mergedReadFn(startPosition, stopTsNs, eachLogEntryFn) - - if err != nil { - // Only try to send error if not already closed - if atomic.LoadInt32(&s.closed) == 0 { - select { - case s.errorChan <- fmt.Errorf("flushed data scan failed: %v", err): - case <-s.doneChan: - default: - // Channel might be full or closed, ignore - } - } - } - - s.finished = true - }() -} - -func (s *StreamingFlushedDataSource) Next() (*HybridScanResult, error) { - if !s.started { - s.startStreaming() - } - - select { - case result, ok := <-s.resultChan: - if !ok { - return nil, nil // No more results - } - return result, nil - case err := <-s.errorChan: - return nil, err - case <-s.doneChan: - return nil, nil - } -} - -func (s *StreamingFlushedDataSource) HasMore() bool { - if !s.started { - return true // Haven't started yet, so potentially has data - } - return !s.finished || len(s.resultChan) > 0 -} - -func (s *StreamingFlushedDataSource) Close() error { - // Use atomic flag to ensure channels are only closed once - if atomic.CompareAndSwapInt32(&s.closed, 0, 1) { - close(s.doneChan) - close(s.resultChan) - close(s.errorChan) - } - return nil -} - -// mergeSort efficiently sorts HybridScanResult slice by timestamp using merge sort algorithm -func (hms *HybridMessageScanner) mergeSort(results []HybridScanResult, left, right int) { - if left < right { - mid := left + (right-left)/2 - - // Recursively sort both halves - hms.mergeSort(results, left, mid) - hms.mergeSort(results, mid+1, right) - - // Merge the sorted halves - hms.merge(results, left, mid, right) - } -} - -// merge combines two sorted subarrays into a single sorted array -func (hms *HybridMessageScanner) merge(results []HybridScanResult, left, mid, right int) { - // Create temporary arrays for the two subarrays - leftArray := make([]HybridScanResult, mid-left+1) - rightArray := make([]HybridScanResult, right-mid) - - // Copy data to temporary arrays - copy(leftArray, results[left:mid+1]) - copy(rightArray, results[mid+1:right+1]) - - // Merge the temporary arrays back into results[left..right] - i, j, k := 0, 0, left - - for i < len(leftArray) && j < len(rightArray) { - if leftArray[i].Timestamp <= rightArray[j].Timestamp { - results[k] = leftArray[i] - i++ - } else { - results[k] = rightArray[j] - j++ - } - k++ - } - - // Copy remaining elements of leftArray, if any - for i < len(leftArray) { - results[k] = leftArray[i] - i++ - k++ - } - - // Copy remaining elements of rightArray, if any - for j < len(rightArray) { - results[k] = rightArray[j] - j++ - k++ - } -} diff --git a/weed/query/engine/hybrid_test.go b/weed/query/engine/hybrid_test.go deleted file mode 100644 index 74ef256c7..000000000 --- a/weed/query/engine/hybrid_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strings" - "testing" -) - -func TestSQLEngine_HybridSelectBasic(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT with _source column to show both live and archived data - result, err := engine.ExecuteSQL(context.Background(), "SELECT *, _source FROM user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - if len(result.Columns) == 0 { - t.Error("Expected columns in result") - } - - // In mock environment, we only get live_log data from unflushed messages - // parquet_archive data would come from parquet files in a real system - if len(result.Rows) == 0 { - t.Error("Expected rows in result") - } - - // Check that we have the _source column showing data source - hasSourceColumn := false - sourceColumnIndex := -1 - for i, column := range result.Columns { - if column == SW_COLUMN_NAME_SOURCE { - hasSourceColumn = true - sourceColumnIndex = i - break - } - } - - if !hasSourceColumn { - t.Skip("_source column not available in fallback mode - test requires real SeaweedFS cluster") - } - - // Verify we have the expected data sources (in mock environment, only live_log) - if hasSourceColumn && sourceColumnIndex >= 0 { - foundLiveLog := false - - for _, row := range result.Rows { - if sourceColumnIndex < len(row) { - source := row[sourceColumnIndex].ToString() - if source == "live_log" { - foundLiveLog = true - } - // In mock environment, all data comes from unflushed messages (live_log) - // In a real system, we would also see parquet_archive from parquet files - } - } - - if !foundLiveLog { - t.Error("Expected to find live_log data source in results") - } - - t.Logf("Found live_log data source from unflushed messages") - } -} - -func TestSQLEngine_HybridSelectWithLimit(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT with LIMIT on hybrid data - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 2") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have exactly 2 rows due to LIMIT - if len(result.Rows) != 2 { - t.Errorf("Expected 2 rows with LIMIT 2, got %d", len(result.Rows)) - } -} - -func TestSQLEngine_HybridSelectDifferentTables(t *testing.T) { - engine := NewTestSQLEngine() - - // Test both user_events and system_logs tables - tables := []string{"user_events", "system_logs"} - - for _, tableName := range tables { - result, err := engine.ExecuteSQL(context.Background(), fmt.Sprintf("SELECT *, _source FROM %s", tableName)) - if err != nil { - t.Errorf("Error querying hybrid table %s: %v", tableName, err) - continue - } - - if result.Error != nil { - t.Errorf("Query error for hybrid table %s: %v", tableName, result.Error) - continue - } - - if len(result.Columns) == 0 { - t.Errorf("No columns returned for hybrid table %s", tableName) - } - - if len(result.Rows) == 0 { - t.Errorf("No rows returned for hybrid table %s", tableName) - } - - // Check for _source column - hasSourceColumn := false - for _, column := range result.Columns { - if column == "_source" { - hasSourceColumn = true - break - } - } - - if !hasSourceColumn { - t.Logf("Table %s missing _source column - running in fallback mode", tableName) - } - - t.Logf("Table %s: %d columns, %d rows with hybrid data sources", tableName, len(result.Columns), len(result.Rows)) - } -} - -func TestSQLEngine_HybridDataSource(t *testing.T) { - engine := NewTestSQLEngine() - - // Test that we can distinguish between live and archived data - result, err := engine.ExecuteSQL(context.Background(), "SELECT user_id, event_type, _source FROM user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Find the _source column - sourceColumnIndex := -1 - eventTypeColumnIndex := -1 - - for i, column := range result.Columns { - switch column { - case "_source": - sourceColumnIndex = i - case "event_type": - eventTypeColumnIndex = i - } - } - - if sourceColumnIndex == -1 { - t.Skip("Could not find _source column - test requires real SeaweedFS cluster") - } - - if eventTypeColumnIndex == -1 { - t.Fatal("Could not find event_type column") - } - - // Check the data characteristics - liveEventFound := false - archivedEventFound := false - - for _, row := range result.Rows { - if sourceColumnIndex < len(row) && eventTypeColumnIndex < len(row) { - source := row[sourceColumnIndex].ToString() - eventType := row[eventTypeColumnIndex].ToString() - - if source == "live_log" && strings.Contains(eventType, "live_") { - liveEventFound = true - t.Logf("Found live event: %s from %s", eventType, source) - } - - if source == "parquet_archive" && strings.Contains(eventType, "archived_") { - archivedEventFound = true - t.Logf("Found archived event: %s from %s", eventType, source) - } - } - } - - if !liveEventFound { - t.Error("Expected to find live events with live_ prefix") - } - - if !archivedEventFound { - t.Error("Expected to find archived events with archived_ prefix") - } -} - -func TestSQLEngine_HybridSystemLogs(t *testing.T) { - engine := NewTestSQLEngine() - - // Test system_logs with hybrid data - result, err := engine.ExecuteSQL(context.Background(), "SELECT level, message, service, _source FROM system_logs") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have both live and archived system logs - if len(result.Rows) < 2 { - t.Errorf("Expected at least 2 system log entries, got %d", len(result.Rows)) - } - - // Find column indices - levelIndex := -1 - sourceIndex := -1 - - for i, column := range result.Columns { - switch column { - case "level": - levelIndex = i - case "_source": - sourceIndex = i - } - } - - // Verify we have both live and archived system logs - foundLive := false - foundArchived := false - - for _, row := range result.Rows { - if sourceIndex >= 0 && sourceIndex < len(row) { - source := row[sourceIndex].ToString() - - if source == "live_log" { - foundLive = true - if levelIndex >= 0 && levelIndex < len(row) { - level := row[levelIndex].ToString() - t.Logf("Live system log: level=%s", level) - } - } - - if source == "parquet_archive" { - foundArchived = true - if levelIndex >= 0 && levelIndex < len(row) { - level := row[levelIndex].ToString() - t.Logf("Archived system log: level=%s", level) - } - } - } - } - - if !foundLive { - t.Log("No live system logs found - running in fallback mode") - } - - if !foundArchived { - t.Log("No archived system logs found - running in fallback mode") - } -} - -func TestSQLEngine_HybridSelectWithTimeImplications(t *testing.T) { - engine := NewTestSQLEngine() - - // Test that demonstrates the time-based nature of hybrid data - // Live data should be more recent than archived data - result, err := engine.ExecuteSQL(context.Background(), "SELECT event_type, _source FROM user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // This test documents that hybrid scanning provides a complete view - // of both recent (live) and historical (archived) data in a single query - liveCount := 0 - archivedCount := 0 - - sourceIndex := -1 - for i, column := range result.Columns { - if column == "_source" { - sourceIndex = i - break - } - } - - if sourceIndex >= 0 { - for _, row := range result.Rows { - if sourceIndex < len(row) { - source := row[sourceIndex].ToString() - switch source { - case "live_log": - liveCount++ - case "parquet_archive": - archivedCount++ - } - } - } - } - - t.Logf("Hybrid query results: %d live messages, %d archived messages", liveCount, archivedCount) - - if liveCount == 0 && archivedCount == 0 { - t.Log("No live or archived messages found - running in fallback mode") - } -} diff --git a/weed/query/engine/mock_test.go b/weed/query/engine/mock_test.go deleted file mode 100644 index 697c98494..000000000 --- a/weed/query/engine/mock_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -func TestMockBrokerClient_BasicFunctionality(t *testing.T) { - mockBroker := NewMockBrokerClient() - - // Test ListNamespaces - namespaces, err := mockBroker.ListNamespaces(context.Background()) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if len(namespaces) != 2 { - t.Errorf("Expected 2 namespaces, got %d", len(namespaces)) - } - - // Test ListTopics - topics, err := mockBroker.ListTopics(context.Background(), "default") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if len(topics) != 2 { - t.Errorf("Expected 2 topics in default namespace, got %d", len(topics)) - } - - // Test GetTopicSchema - schema, keyColumns, _, err := mockBroker.GetTopicSchema(context.Background(), "default", "user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if len(schema.Fields) != 3 { - t.Errorf("Expected 3 fields in user_events schema, got %d", len(schema.Fields)) - } - if len(keyColumns) == 0 { - t.Error("Expected at least one key column") - } -} - -func TestMockBrokerClient_FailureScenarios(t *testing.T) { - mockBroker := NewMockBrokerClient() - - // Configure mock to fail - mockBroker.SetFailure(true, "simulated broker failure") - - // Test that operations fail as expected - _, err := mockBroker.ListNamespaces(context.Background()) - if err == nil { - t.Error("Expected error when mock is configured to fail") - } - - _, err = mockBroker.ListTopics(context.Background(), "default") - if err == nil { - t.Error("Expected error when mock is configured to fail") - } - - _, _, _, err = mockBroker.GetTopicSchema(context.Background(), "default", "user_events") - if err == nil { - t.Error("Expected error when mock is configured to fail") - } - - // Test that filer client also fails - _, err = mockBroker.GetFilerClient() - if err == nil { - t.Error("Expected error when mock is configured to fail") - } - - // Reset mock to working state - mockBroker.SetFailure(false, "") - - // Test that operations work again - namespaces, err := mockBroker.ListNamespaces(context.Background()) - if err != nil { - t.Errorf("Expected no error after resetting mock, got %v", err) - } - if len(namespaces) == 0 { - t.Error("Expected namespaces after resetting mock") - } -} - -func TestMockBrokerClient_TopicManagement(t *testing.T) { - mockBroker := NewMockBrokerClient() - - // Test ConfigureTopic (add a new topic) - err := mockBroker.ConfigureTopic(context.Background(), "test", "new-topic", 1, nil, []string{}) - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - // Verify the topic was added - topics, err := mockBroker.ListTopics(context.Background(), "test") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - foundNewTopic := false - for _, topic := range topics { - if topic == "new-topic" { - foundNewTopic = true - break - } - } - if !foundNewTopic { - t.Error("Expected new-topic to be in the topics list") - } - - // Test DeleteTopic - err = mockBroker.DeleteTopic(context.Background(), "test", "new-topic") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - // Verify the topic was removed - topics, err = mockBroker.ListTopics(context.Background(), "test") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - for _, topic := range topics { - if topic == "new-topic" { - t.Error("Expected new-topic to be removed from topics list") - } - } -} - -func TestSQLEngineWithMockBrokerClient_ErrorHandling(t *testing.T) { - // Create an engine with a failing mock broker - mockBroker := NewMockBrokerClient() - mockBroker.SetFailure(true, "mock broker unavailable") - - catalog := &SchemaCatalog{ - databases: make(map[string]*DatabaseInfo), - currentDatabase: "default", - brokerClient: mockBroker, - } - - engine := &SQLEngine{catalog: catalog} - - // Test that queries fail gracefully with proper error messages - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM nonexistent_topic") - - // ExecuteSQL itself should not return an error, but the result should contain an error - if err != nil { - // If ExecuteSQL returns an error, that's also acceptable for this test - t.Logf("ExecuteSQL returned error (acceptable): %v", err) - return - } - - // Should have an error in the result when broker is unavailable - if result.Error == nil { - t.Error("Expected error in query result when broker is unavailable") - } else { - t.Logf("Got expected error in result: %v", result.Error) - } -} diff --git a/weed/query/engine/mocks_test.go b/weed/query/engine/mocks_test.go deleted file mode 100644 index 2f72ed9ed..000000000 --- a/weed/query/engine/mocks_test.go +++ /dev/null @@ -1,1137 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "google.golang.org/protobuf/proto" -) - -// NewTestSchemaCatalog creates a schema catalog for testing with sample data -// Uses mock clients instead of real service connections -func NewTestSchemaCatalog() *SchemaCatalog { - catalog := &SchemaCatalog{ - databases: make(map[string]*DatabaseInfo), - currentDatabase: "default", - brokerClient: NewMockBrokerClient(), // Use mock instead of nil - defaultPartitionCount: 6, // Default partition count for tests - } - - // Pre-populate with sample data to avoid service discovery requirements - initTestSampleData(catalog) - return catalog -} - -// initTestSampleData populates the catalog with sample schema data for testing -// This function is only available in test builds and not in production -func initTestSampleData(c *SchemaCatalog) { - // Create sample databases and tables - c.databases["default"] = &DatabaseInfo{ - Name: "default", - Tables: map[string]*TableInfo{ - "user_events": { - Name: "user_events", - Columns: []ColumnInfo{ - {Name: "user_id", Type: "VARCHAR(100)", Nullable: true}, - {Name: "event_type", Type: "VARCHAR(50)", Nullable: true}, - {Name: "data", Type: "TEXT", Nullable: true}, - // System columns - hidden by default in SELECT * - {Name: SW_COLUMN_NAME_TIMESTAMP, Type: "BIGINT", Nullable: false}, - {Name: SW_COLUMN_NAME_KEY, Type: "VARCHAR(255)", Nullable: true}, - {Name: SW_COLUMN_NAME_SOURCE, Type: "VARCHAR(50)", Nullable: false}, - }, - }, - "system_logs": { - Name: "system_logs", - Columns: []ColumnInfo{ - {Name: "level", Type: "VARCHAR(10)", Nullable: true}, - {Name: "message", Type: "TEXT", Nullable: true}, - {Name: "service", Type: "VARCHAR(50)", Nullable: true}, - // System columns - {Name: SW_COLUMN_NAME_TIMESTAMP, Type: "BIGINT", Nullable: false}, - {Name: SW_COLUMN_NAME_KEY, Type: "VARCHAR(255)", Nullable: true}, - {Name: SW_COLUMN_NAME_SOURCE, Type: "VARCHAR(50)", Nullable: false}, - }, - }, - }, - } - - c.databases["test"] = &DatabaseInfo{ - Name: "test", - Tables: map[string]*TableInfo{ - "test-topic": { - Name: "test-topic", - Columns: []ColumnInfo{ - {Name: "id", Type: "INT", Nullable: true}, - {Name: "name", Type: "VARCHAR(100)", Nullable: true}, - {Name: "value", Type: "DOUBLE", Nullable: true}, - // System columns - {Name: SW_COLUMN_NAME_TIMESTAMP, Type: "BIGINT", Nullable: false}, - {Name: SW_COLUMN_NAME_KEY, Type: "VARCHAR(255)", Nullable: true}, - {Name: SW_COLUMN_NAME_SOURCE, Type: "VARCHAR(50)", Nullable: false}, - }, - }, - }, - } -} - -// TestSQLEngine wraps SQLEngine with test-specific behavior -type TestSQLEngine struct { - *SQLEngine - funcExpressions map[string]*FuncExpr // Map from column key to function expression - arithmeticExpressions map[string]*ArithmeticExpr // Map from column key to arithmetic expression -} - -// NewTestSQLEngine creates a new SQL execution engine for testing -// Does not attempt to connect to real SeaweedFS services -func NewTestSQLEngine() *TestSQLEngine { - // Initialize global HTTP client if not already done - // This is needed for reading partition data from the filer - if util_http.GetGlobalHttpClient() == nil { - util_http.InitGlobalHttpClient() - } - - engine := &SQLEngine{ - catalog: NewTestSchemaCatalog(), - } - - return &TestSQLEngine{ - SQLEngine: engine, - funcExpressions: make(map[string]*FuncExpr), - arithmeticExpressions: make(map[string]*ArithmeticExpr), - } -} - -// ExecuteSQL overrides the real implementation to use sample data for testing -func (e *TestSQLEngine) ExecuteSQL(ctx context.Context, sql string) (*QueryResult, error) { - // Clear expressions from previous executions - e.funcExpressions = make(map[string]*FuncExpr) - e.arithmeticExpressions = make(map[string]*ArithmeticExpr) - - // Parse the SQL statement - stmt, err := ParseSQL(sql) - if err != nil { - return &QueryResult{Error: err}, err - } - - // Handle different statement types - switch s := stmt.(type) { - case *SelectStatement: - return e.executeTestSelectStatement(ctx, s, sql) - default: - // For non-SELECT statements, use the original implementation - return e.SQLEngine.ExecuteSQL(ctx, sql) - } -} - -// executeTestSelectStatement handles SELECT queries with sample data -func (e *TestSQLEngine) executeTestSelectStatement(ctx context.Context, stmt *SelectStatement, sql string) (*QueryResult, error) { - // Extract table name - if len(stmt.From) != 1 { - err := fmt.Errorf("SELECT supports single table queries only") - return &QueryResult{Error: err}, err - } - - var tableName string - switch table := stmt.From[0].(type) { - case *AliasedTableExpr: - switch tableExpr := table.Expr.(type) { - case TableName: - tableName = tableExpr.Name.String() - default: - err := fmt.Errorf("unsupported table expression: %T", tableExpr) - return &QueryResult{Error: err}, err - } - default: - err := fmt.Errorf("unsupported FROM clause: %T", table) - return &QueryResult{Error: err}, err - } - - // Check if this is a known test table - switch tableName { - case "user_events", "system_logs": - return e.generateTestQueryResult(tableName, stmt, sql) - case "nonexistent_table": - err := fmt.Errorf("table %s not found", tableName) - return &QueryResult{Error: err}, err - default: - err := fmt.Errorf("table %s not found", tableName) - return &QueryResult{Error: err}, err - } -} - -// generateTestQueryResult creates a query result with sample data -func (e *TestSQLEngine) generateTestQueryResult(tableName string, stmt *SelectStatement, sql string) (*QueryResult, error) { - // Check if this is an aggregation query - if e.isAggregationQuery(stmt, sql) { - return e.handleAggregationQuery(tableName, stmt, sql) - } - - // Get sample data - allSampleData := generateSampleHybridData(tableName, HybridScanOptions{}) - - // Determine which data to return based on query context - var sampleData []HybridScanResult - - // Check if _source column is requested (indicates hybrid query) - includeArchived := e.isHybridQuery(stmt, sql) - - // Special case: OFFSET edge case tests expect only live data - // This is determined by checking for the specific pattern "LIMIT 1 OFFSET 3" - upperSQL := strings.ToUpper(sql) - isOffsetEdgeCase := strings.Contains(upperSQL, "LIMIT 1 OFFSET 3") - - if includeArchived { - // Include both live and archived data for hybrid queries - sampleData = allSampleData - } else if isOffsetEdgeCase { - // For OFFSET edge case tests, only include live_log data - for _, result := range allSampleData { - if result.Source == "live_log" { - sampleData = append(sampleData, result) - } - } - } else { - // For regular SELECT queries, include all data to match test expectations - sampleData = allSampleData - } - - // Apply WHERE clause filtering if present - if stmt.Where != nil { - predicate, err := e.SQLEngine.buildPredicate(stmt.Where.Expr) - if err != nil { - return &QueryResult{Error: fmt.Errorf("failed to build WHERE predicate: %v", err)}, err - } - - var filteredData []HybridScanResult - for _, result := range sampleData { - // Convert HybridScanResult to RecordValue format for predicate testing - recordValue := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - - // Copy all values from result to recordValue - for name, value := range result.Values { - recordValue.Fields[name] = value - } - - // Apply predicate - if predicate(recordValue) { - filteredData = append(filteredData, result) - } - } - sampleData = filteredData - } - - // Parse LIMIT and OFFSET from SQL string (test-only implementation) - limit, offset := e.parseLimitOffset(sql) - - // Apply offset first - if offset > 0 { - if offset >= len(sampleData) { - sampleData = []HybridScanResult{} - } else { - sampleData = sampleData[offset:] - } - } - - // Apply limit - if limit >= 0 { - if limit == 0 { - sampleData = []HybridScanResult{} // LIMIT 0 returns no rows - } else if limit < len(sampleData) { - sampleData = sampleData[:limit] - } - } - - // Determine columns to return - var columns []string - - if len(stmt.SelectExprs) == 1 { - if _, ok := stmt.SelectExprs[0].(*StarExpr); ok { - // SELECT * - return user columns only (system columns are hidden by default) - switch tableName { - case "user_events": - columns = []string{"id", "user_id", "event_type", "data"} - case "system_logs": - columns = []string{"level", "message", "service"} - } - } - } - - // Process specific expressions if not SELECT * - if len(columns) == 0 { - // Specific columns requested - for testing, include system columns if requested - for _, expr := range stmt.SelectExprs { - if aliasedExpr, ok := expr.(*AliasedExpr); ok { - if colName, ok := aliasedExpr.Expr.(*ColName); ok { - // Check if there's an alias, use that as column name - if aliasedExpr.As != nil && !aliasedExpr.As.IsEmpty() { - columns = append(columns, aliasedExpr.As.String()) - } else { - // Fall back to expression-based column naming - columnName := colName.Name.String() - upperColumnName := strings.ToUpper(columnName) - - // Check if this is an arithmetic expression embedded in a ColName - if arithmeticExpr := e.parseColumnLevelCalculation(columnName); arithmeticExpr != nil { - columns = append(columns, e.getArithmeticExpressionAlias(arithmeticExpr)) - } else if upperColumnName == FuncCURRENT_DATE || upperColumnName == FuncCURRENT_TIME || - upperColumnName == FuncCURRENT_TIMESTAMP || upperColumnName == FuncNOW { - // Handle datetime constants - columns = append(columns, strings.ToLower(columnName)) - } else { - columns = append(columns, columnName) - } - } - } else if arithmeticExpr, ok := aliasedExpr.Expr.(*ArithmeticExpr); ok { - // Handle arithmetic expressions like id+user_id and concatenations - // Store the arithmetic expression for evaluation later - arithmeticExprKey := fmt.Sprintf("__ARITHEXPR__%p", arithmeticExpr) - e.arithmeticExpressions[arithmeticExprKey] = arithmeticExpr - - // Check if there's an alias, use that as column name, otherwise use arithmeticExprKey - if aliasedExpr.As != nil && aliasedExpr.As.String() != "" { - aliasName := aliasedExpr.As.String() - columns = append(columns, aliasName) - // Map the alias back to the arithmetic expression key for evaluation - e.arithmeticExpressions[aliasName] = arithmeticExpr - } else { - // Use a more descriptive alias than the memory address - alias := e.getArithmeticExpressionAlias(arithmeticExpr) - columns = append(columns, alias) - // Map the descriptive alias to the arithmetic expression - e.arithmeticExpressions[alias] = arithmeticExpr - } - } else if funcExpr, ok := aliasedExpr.Expr.(*FuncExpr); ok { - // Store the function expression for evaluation later - // Use a special prefix to distinguish function expressions - funcExprKey := fmt.Sprintf("__FUNCEXPR__%p", funcExpr) - e.funcExpressions[funcExprKey] = funcExpr - - // Check if there's an alias, use that as column name, otherwise use function name - if aliasedExpr.As != nil && aliasedExpr.As.String() != "" { - aliasName := aliasedExpr.As.String() - columns = append(columns, aliasName) - // Map the alias back to the function expression key for evaluation - e.funcExpressions[aliasName] = funcExpr - } else { - // Use proper function alias based on function type - funcName := strings.ToUpper(funcExpr.Name.String()) - var functionAlias string - if e.isDateTimeFunction(funcName) { - functionAlias = e.getDateTimeFunctionAlias(funcExpr) - } else { - functionAlias = e.getStringFunctionAlias(funcExpr) - } - columns = append(columns, functionAlias) - // Map the function alias to the expression for evaluation - e.funcExpressions[functionAlias] = funcExpr - } - } else if sqlVal, ok := aliasedExpr.Expr.(*SQLVal); ok { - // Handle string literals like 'good', 123 - switch sqlVal.Type { - case StrVal: - alias := fmt.Sprintf("'%s'", string(sqlVal.Val)) - columns = append(columns, alias) - case IntVal, FloatVal: - alias := string(sqlVal.Val) - columns = append(columns, alias) - default: - columns = append(columns, "literal") - } - } - } - } - - // Only use fallback columns if this is a malformed query with no expressions - if len(columns) == 0 && len(stmt.SelectExprs) == 0 { - switch tableName { - case "user_events": - columns = []string{"id", "user_id", "event_type", "data"} - case "system_logs": - columns = []string{"level", "message", "service"} - } - } - } - - // Convert sample data to query result - var rows [][]sqltypes.Value - for _, result := range sampleData { - var row []sqltypes.Value - for _, columnName := range columns { - upperColumnName := strings.ToUpper(columnName) - - // IMPORTANT: Check stored arithmetic expressions FIRST (before legacy parsing) - if arithmeticExpr, exists := e.arithmeticExpressions[columnName]; exists { - // Handle arithmetic expressions by evaluating them with the actual engine - if value, err := e.evaluateArithmeticExpression(arithmeticExpr, result); err == nil && value != nil { - row = append(row, convertSchemaValueToSQLValue(value)) - } else { - // Fallback to manual calculation for id*amount that fails in CockroachDB evaluation - if columnName == "id*amount" { - if idVal := result.Values["id"]; idVal != nil { - idValue := idVal.GetInt64Value() - amountValue := 100.0 // Default amount - if amountVal := result.Values["amount"]; amountVal != nil { - if amountVal.GetDoubleValue() != 0 { - amountValue = amountVal.GetDoubleValue() - } else if amountVal.GetFloatValue() != 0 { - amountValue = float64(amountVal.GetFloatValue()) - } - } - row = append(row, sqltypes.NewFloat64(float64(idValue)*amountValue)) - } else { - row = append(row, sqltypes.NULL) - } - } else { - row = append(row, sqltypes.NULL) - } - } - } else if arithmeticExpr := e.parseColumnLevelCalculation(columnName); arithmeticExpr != nil { - // Evaluate the arithmetic expression (legacy fallback) - if value, err := e.evaluateArithmeticExpression(arithmeticExpr, result); err == nil && value != nil { - row = append(row, convertSchemaValueToSQLValue(value)) - } else { - row = append(row, sqltypes.NULL) - } - } else if upperColumnName == FuncCURRENT_DATE || upperColumnName == FuncCURRENT_TIME || - upperColumnName == FuncCURRENT_TIMESTAMP || upperColumnName == FuncNOW { - // Handle datetime constants - var value *schema_pb.Value - var err error - switch upperColumnName { - case FuncCURRENT_DATE: - value, err = e.CurrentDate() - case FuncCURRENT_TIME: - value, err = e.CurrentTime() - case FuncCURRENT_TIMESTAMP: - value, err = e.CurrentTimestamp() - case FuncNOW: - value, err = e.Now() - } - - if err == nil && value != nil { - row = append(row, convertSchemaValueToSQLValue(value)) - } else { - row = append(row, sqltypes.NULL) - } - } else if value, exists := result.Values[columnName]; exists { - row = append(row, convertSchemaValueToSQLValue(value)) - } else if columnName == SW_COLUMN_NAME_TIMESTAMP { - row = append(row, sqltypes.NewInt64(result.Timestamp)) - } else if columnName == SW_COLUMN_NAME_KEY { - row = append(row, sqltypes.NewVarChar(string(result.Key))) - } else if columnName == SW_COLUMN_NAME_SOURCE { - row = append(row, sqltypes.NewVarChar(result.Source)) - } else if strings.Contains(columnName, "||") { - // Handle string concatenation expressions using production engine logic - // Try to use production engine evaluation for complex expressions - if value := e.evaluateComplexExpressionMock(columnName, result); value != nil { - row = append(row, *value) - } else { - row = append(row, e.evaluateStringConcatenationMock(columnName, result)) - } - } else if strings.Contains(columnName, "+") || strings.Contains(columnName, "-") || strings.Contains(columnName, "*") || strings.Contains(columnName, "/") || strings.Contains(columnName, "%") { - // Handle arithmetic expression results - for mock testing, calculate based on operator - idValue := int64(0) - userIdValue := int64(0) - - // Extract id and user_id values for calculations - if idVal, exists := result.Values["id"]; exists && idVal.GetInt64Value() != 0 { - idValue = idVal.GetInt64Value() - } - if userIdVal, exists := result.Values["user_id"]; exists { - if userIdVal.GetInt32Value() != 0 { - userIdValue = int64(userIdVal.GetInt32Value()) - } else if userIdVal.GetInt64Value() != 0 { - userIdValue = userIdVal.GetInt64Value() - } - } - - // Calculate based on specific expressions - if strings.Contains(columnName, "id+user_id") { - row = append(row, sqltypes.NewInt64(idValue+userIdValue)) - } else if strings.Contains(columnName, "id-user_id") { - row = append(row, sqltypes.NewInt64(idValue-userIdValue)) - } else if strings.Contains(columnName, "id*2") { - row = append(row, sqltypes.NewInt64(idValue*2)) - } else if strings.Contains(columnName, "id*user_id") { - row = append(row, sqltypes.NewInt64(idValue*userIdValue)) - } else if strings.Contains(columnName, "user_id*2") { - row = append(row, sqltypes.NewInt64(userIdValue*2)) - } else if strings.Contains(columnName, "id*amount") { - // Handle id*amount calculation - var amountValue int64 = 0 - if amountVal := result.Values["amount"]; amountVal != nil { - if amountVal.GetDoubleValue() != 0 { - amountValue = int64(amountVal.GetDoubleValue()) - } else if amountVal.GetFloatValue() != 0 { - amountValue = int64(amountVal.GetFloatValue()) - } else if amountVal.GetInt64Value() != 0 { - amountValue = amountVal.GetInt64Value() - } else { - // Default amount for testing - amountValue = 100 - } - } else { - // Default amount for testing if no amount column - amountValue = 100 - } - row = append(row, sqltypes.NewInt64(idValue*amountValue)) - } else if strings.Contains(columnName, "id/2") && idValue != 0 { - row = append(row, sqltypes.NewInt64(idValue/2)) - } else if strings.Contains(columnName, "id%") || strings.Contains(columnName, "user_id%") { - // Simple modulo calculation - row = append(row, sqltypes.NewInt64(idValue%100)) - } else { - // Default calculation for other arithmetic expressions - row = append(row, sqltypes.NewInt64(idValue*2)) // Simple default - } - } else if strings.HasPrefix(columnName, "'") && strings.HasSuffix(columnName, "'") { - // Handle string literals like 'good', 'test' - literal := strings.Trim(columnName, "'") - row = append(row, sqltypes.NewVarChar(literal)) - } else if strings.HasPrefix(columnName, "__FUNCEXPR__") { - // Handle function expressions by evaluating them with the actual engine - if funcExpr, exists := e.funcExpressions[columnName]; exists { - // Evaluate the function expression using the actual engine logic - if value, err := e.evaluateFunctionExpression(funcExpr, result); err == nil && value != nil { - row = append(row, convertSchemaValueToSQLValue(value)) - } else { - row = append(row, sqltypes.NULL) - } - } else { - row = append(row, sqltypes.NULL) - } - } else if funcExpr, exists := e.funcExpressions[columnName]; exists { - // Handle function expressions identified by their alias or function name - if value, err := e.evaluateFunctionExpression(funcExpr, result); err == nil && value != nil { - row = append(row, convertSchemaValueToSQLValue(value)) - } else { - // Check if this is a validation error (wrong argument count, unsupported parts/precision, etc.) - if err != nil && (strings.Contains(err.Error(), "expects exactly") || - strings.Contains(err.Error(), "argument") || - strings.Contains(err.Error(), "unsupported date part") || - strings.Contains(err.Error(), "unsupported date truncation precision")) { - // For validation errors, return the error to the caller instead of using fallback - return &QueryResult{Error: err}, err - } - - // Fallback for common datetime functions that might fail in evaluation - functionName := strings.ToUpper(funcExpr.Name.String()) - switch functionName { - case "CURRENT_TIME": - // Return current time in HH:MM:SS format - row = append(row, sqltypes.NewVarChar("14:30:25")) - case "CURRENT_DATE": - // Return current date in YYYY-MM-DD format - row = append(row, sqltypes.NewVarChar("2025-01-09")) - case "NOW": - // Return current timestamp - row = append(row, sqltypes.NewVarChar("2025-01-09 14:30:25")) - case "CURRENT_TIMESTAMP": - // Return current timestamp - row = append(row, sqltypes.NewVarChar("2025-01-09 14:30:25")) - case "EXTRACT": - // Handle EXTRACT function - return mock values based on common patterns - // EXTRACT('YEAR', date) -> 2025, EXTRACT('MONTH', date) -> 9, etc. - if len(funcExpr.Exprs) >= 1 { - if aliasedExpr, ok := funcExpr.Exprs[0].(*AliasedExpr); ok { - if strVal, ok := aliasedExpr.Expr.(*SQLVal); ok && strVal.Type == StrVal { - part := strings.ToUpper(string(strVal.Val)) - switch part { - case "YEAR": - row = append(row, sqltypes.NewInt64(2025)) - case "MONTH": - row = append(row, sqltypes.NewInt64(9)) - case "DAY": - row = append(row, sqltypes.NewInt64(6)) - case "HOUR": - row = append(row, sqltypes.NewInt64(14)) - case "MINUTE": - row = append(row, sqltypes.NewInt64(30)) - case "SECOND": - row = append(row, sqltypes.NewInt64(25)) - case "QUARTER": - row = append(row, sqltypes.NewInt64(3)) - default: - row = append(row, sqltypes.NULL) - } - } else { - row = append(row, sqltypes.NULL) - } - } else { - row = append(row, sqltypes.NULL) - } - } else { - row = append(row, sqltypes.NULL) - } - case "DATE_TRUNC": - // Handle DATE_TRUNC function - return mock timestamp values - row = append(row, sqltypes.NewVarChar("2025-01-09 00:00:00")) - default: - row = append(row, sqltypes.NULL) - } - } - } else if strings.Contains(columnName, "(") && strings.Contains(columnName, ")") { - // Legacy function handling - should be replaced by function expression evaluation above - // Other functions - return mock result - row = append(row, sqltypes.NewVarChar("MOCK_FUNC")) - } else { - row = append(row, sqltypes.NewVarChar("")) // Default empty value - } - } - rows = append(rows, row) - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - }, nil -} - -// convertSchemaValueToSQLValue converts a schema_pb.Value to sqltypes.Value -func convertSchemaValueToSQLValue(value *schema_pb.Value) sqltypes.Value { - if value == nil { - return sqltypes.NewVarChar("") - } - - switch v := value.Kind.(type) { - case *schema_pb.Value_Int32Value: - return sqltypes.NewInt32(v.Int32Value) - case *schema_pb.Value_Int64Value: - return sqltypes.NewInt64(v.Int64Value) - case *schema_pb.Value_StringValue: - return sqltypes.NewVarChar(v.StringValue) - case *schema_pb.Value_DoubleValue: - return sqltypes.NewFloat64(v.DoubleValue) - case *schema_pb.Value_FloatValue: - return sqltypes.NewFloat32(v.FloatValue) - case *schema_pb.Value_BoolValue: - if v.BoolValue { - return sqltypes.NewVarChar("true") - } - return sqltypes.NewVarChar("false") - case *schema_pb.Value_BytesValue: - return sqltypes.NewVarChar(string(v.BytesValue)) - case *schema_pb.Value_TimestampValue: - // Convert timestamp to string representation - timestampMicros := v.TimestampValue.TimestampMicros - seconds := timestampMicros / 1000000 - return sqltypes.NewInt64(seconds) - default: - return sqltypes.NewVarChar("") - } -} - -// parseLimitOffset extracts LIMIT and OFFSET values from SQL string (test-only implementation) -func (e *TestSQLEngine) parseLimitOffset(sql string) (limit int, offset int) { - limit = -1 // -1 means no limit - offset = 0 - - // Convert to uppercase for easier parsing - upperSQL := strings.ToUpper(sql) - - // Parse LIMIT - limitRegex := regexp.MustCompile(`LIMIT\s+(\d+)`) - if matches := limitRegex.FindStringSubmatch(upperSQL); len(matches) > 1 { - if val, err := strconv.Atoi(matches[1]); err == nil { - limit = val - } - } - - // Parse OFFSET - offsetRegex := regexp.MustCompile(`OFFSET\s+(\d+)`) - if matches := offsetRegex.FindStringSubmatch(upperSQL); len(matches) > 1 { - if val, err := strconv.Atoi(matches[1]); err == nil { - offset = val - } - } - - return limit, offset -} - -// getColumnName extracts column name from expression for mock testing -func (e *TestSQLEngine) getColumnName(expr ExprNode) string { - if colName, ok := expr.(*ColName); ok { - return colName.Name.String() - } - return "col" -} - -// isHybridQuery determines if this is a hybrid query that should include archived data -func (e *TestSQLEngine) isHybridQuery(stmt *SelectStatement, sql string) bool { - // Check if _source column is explicitly requested - upperSQL := strings.ToUpper(sql) - if strings.Contains(upperSQL, "_SOURCE") { - return true - } - - // Check if any of the select expressions include _source - for _, expr := range stmt.SelectExprs { - if aliasedExpr, ok := expr.(*AliasedExpr); ok { - if colName, ok := aliasedExpr.Expr.(*ColName); ok { - if colName.Name.String() == SW_COLUMN_NAME_SOURCE { - return true - } - } - } - } - - return false -} - -// isAggregationQuery determines if this is an aggregation query (COUNT, MAX, MIN, SUM, AVG) -func (e *TestSQLEngine) isAggregationQuery(stmt *SelectStatement, sql string) bool { - upperSQL := strings.ToUpper(sql) - // Check for all aggregation functions - aggregationFunctions := []string{"COUNT(", "MAX(", "MIN(", "SUM(", "AVG("} - for _, funcName := range aggregationFunctions { - if strings.Contains(upperSQL, funcName) { - return true - } - } - return false -} - -// handleAggregationQuery handles COUNT, MAX, MIN, SUM, AVG and other aggregation queries -func (e *TestSQLEngine) handleAggregationQuery(tableName string, stmt *SelectStatement, sql string) (*QueryResult, error) { - // Get sample data for aggregation - allSampleData := generateSampleHybridData(tableName, HybridScanOptions{}) - - // Determine aggregation type from SQL - upperSQL := strings.ToUpper(sql) - var result sqltypes.Value - var columnName string - - if strings.Contains(upperSQL, "COUNT(") { - // COUNT aggregation - return count of all rows - result = sqltypes.NewInt64(int64(len(allSampleData))) - columnName = "COUNT(*)" - } else if strings.Contains(upperSQL, "MAX(") { - // MAX aggregation - find maximum value - columnName = "MAX(id)" // Default assumption - maxVal := int64(0) - for _, row := range allSampleData { - if idVal := row.Values["id"]; idVal != nil { - if intVal := idVal.GetInt64Value(); intVal > maxVal { - maxVal = intVal - } - } - } - result = sqltypes.NewInt64(maxVal) - } else if strings.Contains(upperSQL, "MIN(") { - // MIN aggregation - find minimum value - columnName = "MIN(id)" // Default assumption - minVal := int64(999999999) // Start with large number - for _, row := range allSampleData { - if idVal := row.Values["id"]; idVal != nil { - if intVal := idVal.GetInt64Value(); intVal < minVal { - minVal = intVal - } - } - } - result = sqltypes.NewInt64(minVal) - } else if strings.Contains(upperSQL, "SUM(") { - // SUM aggregation - sum all values - columnName = "SUM(id)" // Default assumption - sumVal := int64(0) - for _, row := range allSampleData { - if idVal := row.Values["id"]; idVal != nil { - sumVal += idVal.GetInt64Value() - } - } - result = sqltypes.NewInt64(sumVal) - } else if strings.Contains(upperSQL, "AVG(") { - // AVG aggregation - average of all values - columnName = "AVG(id)" // Default assumption - sumVal := int64(0) - count := 0 - for _, row := range allSampleData { - if idVal := row.Values["id"]; idVal != nil { - sumVal += idVal.GetInt64Value() - count++ - } - } - if count > 0 { - result = sqltypes.NewFloat64(float64(sumVal) / float64(count)) - } else { - result = sqltypes.NewInt64(0) - } - } else { - // Fallback - treat as COUNT - result = sqltypes.NewInt64(int64(len(allSampleData))) - columnName = "COUNT(*)" - } - - // Create aggregation result (single row with single column) - aggregationRows := [][]sqltypes.Value{ - {result}, - } - - // Parse LIMIT and OFFSET - limit, offset := e.parseLimitOffset(sql) - - // Apply offset to aggregation result - if offset > 0 { - if offset >= len(aggregationRows) { - aggregationRows = [][]sqltypes.Value{} - } else { - aggregationRows = aggregationRows[offset:] - } - } - - // Apply limit to aggregation result - if limit >= 0 { - if limit == 0 { - aggregationRows = [][]sqltypes.Value{} - } else if limit < len(aggregationRows) { - aggregationRows = aggregationRows[:limit] - } - } - - return &QueryResult{ - Columns: []string{columnName}, - Rows: aggregationRows, - }, nil -} - -// MockBrokerClient implements BrokerClient interface for testing -type MockBrokerClient struct { - namespaces []string - topics map[string][]string // namespace -> topics - schemas map[string]*schema_pb.RecordType // "namespace.topic" -> schema - shouldFail bool - failMessage string -} - -// NewMockBrokerClient creates a new mock broker client with sample data -func NewMockBrokerClient() *MockBrokerClient { - client := &MockBrokerClient{ - namespaces: []string{"default", "test"}, - topics: map[string][]string{ - "default": {"user_events", "system_logs"}, - "test": {"test-topic"}, - }, - schemas: make(map[string]*schema_pb.RecordType), - } - - // Add sample schemas - client.schemas["default.user_events"] = &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "user_id", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "event_type", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "data", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - }, - } - - client.schemas["default.system_logs"] = &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "level", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "message", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "service", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - }, - } - - client.schemas["test.test-topic"] = &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - {Name: "id", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}}, - {Name: "name", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}}, - {Name: "value", Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}}, - }, - } - - return client -} - -// SetFailure configures the mock to fail with the given message -func (m *MockBrokerClient) SetFailure(shouldFail bool, message string) { - m.shouldFail = shouldFail - m.failMessage = message -} - -// ListNamespaces returns the mock namespaces -func (m *MockBrokerClient) ListNamespaces(ctx context.Context) ([]string, error) { - if m.shouldFail { - return nil, fmt.Errorf("mock broker failure: %s", m.failMessage) - } - return m.namespaces, nil -} - -// ListTopics returns the mock topics for a namespace -func (m *MockBrokerClient) ListTopics(ctx context.Context, namespace string) ([]string, error) { - if m.shouldFail { - return nil, fmt.Errorf("mock broker failure: %s", m.failMessage) - } - - if topics, exists := m.topics[namespace]; exists { - return topics, nil - } - return []string{}, nil -} - -// GetTopicSchema returns flat schema and key columns for a topic -func (m *MockBrokerClient) GetTopicSchema(ctx context.Context, namespace, topic string) (*schema_pb.RecordType, []string, string, error) { - if m.shouldFail { - return nil, nil, "", fmt.Errorf("mock broker failure: %s", m.failMessage) - } - - key := fmt.Sprintf("%s.%s", namespace, topic) - if schema, exists := m.schemas[key]; exists { - // For testing, assume first field is key column - var keyColumns []string - if len(schema.Fields) > 0 { - keyColumns = []string{schema.Fields[0].Name} - } - return schema, keyColumns, "", nil // Schema format empty for mocks - } - return nil, nil, "", fmt.Errorf("topic %s not found", key) -} - -// ConfigureTopic creates or modifies a topic using flat schema format -func (m *MockBrokerClient) ConfigureTopic(ctx context.Context, namespace, topicName string, partitionCount int32, flatSchema *schema_pb.RecordType, keyColumns []string) error { - if m.shouldFail { - return fmt.Errorf("mock broker failure: %s", m.failMessage) - } - - // Store the schema for future retrieval - key := fmt.Sprintf("%s.%s", namespace, topicName) - m.schemas[key] = flatSchema - - // Add topic to namespace if it doesn't exist - if topics, exists := m.topics[namespace]; exists { - found := false - for _, t := range topics { - if t == topicName { - found = true - break - } - } - if !found { - m.topics[namespace] = append(topics, topicName) - } - } else { - m.topics[namespace] = []string{topicName} - } - - return nil -} - -// GetFilerClient returns a mock filer client -func (m *MockBrokerClient) GetFilerClient() (filer_pb.FilerClient, error) { - if m.shouldFail { - return nil, fmt.Errorf("mock broker failure: %s", m.failMessage) - } - return NewMockFilerClient(), nil -} - -// MockFilerClient implements filer_pb.FilerClient interface for testing -type MockFilerClient struct { - shouldFail bool - failMessage string -} - -// NewMockFilerClient creates a new mock filer client -func NewMockFilerClient() *MockFilerClient { - return &MockFilerClient{} -} - -// SetFailure configures the mock to fail with the given message -func (m *MockFilerClient) SetFailure(shouldFail bool, message string) { - m.shouldFail = shouldFail - m.failMessage = message -} - -// WithFilerClient executes a function with a mock filer client -func (m *MockFilerClient) WithFilerClient(followRedirect bool, fn func(client filer_pb.SeaweedFilerClient) error) error { - if m.shouldFail { - return fmt.Errorf("mock filer failure: %s", m.failMessage) - } - - // For testing, we can just return success since the actual filer operations - // are not critical for SQL engine unit tests - return nil -} - -// AdjustedUrl implements the FilerClient interface (mock implementation) -func (m *MockFilerClient) AdjustedUrl(location *filer_pb.Location) string { - if location != nil && location.Url != "" { - return location.Url - } - return "mock://localhost:8080" -} - -// GetDataCenter implements the FilerClient interface (mock implementation) -func (m *MockFilerClient) GetDataCenter() string { - return "mock-datacenter" -} - -// TestHybridMessageScanner is a test-specific implementation that returns sample data -// without requiring real partition discovery -type TestHybridMessageScanner struct { - topicName string -} - -// NewTestHybridMessageScanner creates a test-specific hybrid scanner -func NewTestHybridMessageScanner(topicName string) *TestHybridMessageScanner { - return &TestHybridMessageScanner{ - topicName: topicName, - } -} - -// ScanMessages returns sample data for testing -func (t *TestHybridMessageScanner) ScanMessages(ctx context.Context, options HybridScanOptions) ([]HybridScanResult, error) { - // Return sample data based on topic name - return generateSampleHybridData(t.topicName, options), nil -} - -// DeleteTopic removes a topic and all its data (mock implementation) -func (m *MockBrokerClient) DeleteTopic(ctx context.Context, namespace, topicName string) error { - if m.shouldFail { - return fmt.Errorf("mock broker failure: %s", m.failMessage) - } - - // Remove from schemas - key := fmt.Sprintf("%s.%s", namespace, topicName) - delete(m.schemas, key) - - // Remove from topics list - if topics, exists := m.topics[namespace]; exists { - newTopics := make([]string, 0, len(topics)) - for _, topic := range topics { - if topic != topicName { - newTopics = append(newTopics, topic) - } - } - m.topics[namespace] = newTopics - } - - return nil -} - -// GetUnflushedMessages returns mock unflushed data for testing -// Returns sample data as LogEntries to provide test data for SQL engine -func (m *MockBrokerClient) GetUnflushedMessages(ctx context.Context, namespace, topicName string, partition topic.Partition, startTimeNs int64) ([]*filer_pb.LogEntry, error) { - if m.shouldFail { - return nil, fmt.Errorf("mock broker failed to get unflushed messages: %s", m.failMessage) - } - - // Generate sample data as LogEntries for testing - // This provides data that looks like it came from the broker's memory buffer - allSampleData := generateSampleHybridData(topicName, HybridScanOptions{}) - - var logEntries []*filer_pb.LogEntry - for _, result := range allSampleData { - // Only return live_log entries as unflushed messages - // This matches real system behavior where unflushed messages come from broker memory - // parquet_archive data would come from parquet files, not unflushed messages - if result.Source != "live_log" { - continue - } - - // Convert sample data to protobuf LogEntry format - recordValue := &schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)} - for k, v := range result.Values { - recordValue.Fields[k] = v - } - - // Serialize the RecordValue - data, err := proto.Marshal(recordValue) - if err != nil { - continue // Skip invalid entries - } - - logEntry := &filer_pb.LogEntry{ - TsNs: result.Timestamp, - Key: result.Key, - Data: data, - } - logEntries = append(logEntries, logEntry) - } - - return logEntries, nil -} - -// evaluateStringConcatenationMock evaluates string concatenation expressions for mock testing -func (e *TestSQLEngine) evaluateStringConcatenationMock(columnName string, result HybridScanResult) sqltypes.Value { - // Split the expression by || to get individual parts - parts := strings.Split(columnName, "||") - var concatenated strings.Builder - - for _, part := range parts { - part = strings.TrimSpace(part) - - // Check if it's a string literal (enclosed in single quotes) - if strings.HasPrefix(part, "'") && strings.HasSuffix(part, "'") { - // Extract the literal value - literal := strings.Trim(part, "'") - concatenated.WriteString(literal) - } else { - // It's a column name - get the value from result - if value, exists := result.Values[part]; exists { - // Convert to string and append - if strValue := value.GetStringValue(); strValue != "" { - concatenated.WriteString(strValue) - } else if intValue := value.GetInt64Value(); intValue != 0 { - concatenated.WriteString(fmt.Sprintf("%d", intValue)) - } else if int32Value := value.GetInt32Value(); int32Value != 0 { - concatenated.WriteString(fmt.Sprintf("%d", int32Value)) - } else if floatValue := value.GetDoubleValue(); floatValue != 0 { - concatenated.WriteString(fmt.Sprintf("%g", floatValue)) - } else if floatValue := value.GetFloatValue(); floatValue != 0 { - concatenated.WriteString(fmt.Sprintf("%g", floatValue)) - } - } - // If column doesn't exist or has no value, we append nothing (which is correct SQL behavior) - } - } - - return sqltypes.NewVarChar(concatenated.String()) -} - -// evaluateComplexExpressionMock attempts to use production engine logic for complex expressions -func (e *TestSQLEngine) evaluateComplexExpressionMock(columnName string, result HybridScanResult) *sqltypes.Value { - // Parse the column name back into an expression using CockroachDB parser - cockroachParser := NewCockroachSQLParser() - dummySelect := fmt.Sprintf("SELECT %s", columnName) - - stmt, err := cockroachParser.ParseSQL(dummySelect) - if err == nil { - if selectStmt, ok := stmt.(*SelectStatement); ok && len(selectStmt.SelectExprs) > 0 { - if aliasedExpr, ok := selectStmt.SelectExprs[0].(*AliasedExpr); ok { - if arithmeticExpr, ok := aliasedExpr.Expr.(*ArithmeticExpr); ok { - // Try to evaluate using production logic - tempEngine := &SQLEngine{} - if value, err := tempEngine.evaluateArithmeticExpression(arithmeticExpr, result); err == nil && value != nil { - sqlValue := convertSchemaValueToSQLValue(value) - return &sqlValue - } - } - } - } - } - return nil -} - -// evaluateFunctionExpression evaluates a function expression using the actual engine logic -func (e *TestSQLEngine) evaluateFunctionExpression(funcExpr *FuncExpr, result HybridScanResult) (*schema_pb.Value, error) { - funcName := strings.ToUpper(funcExpr.Name.String()) - - // Route to appropriate function evaluator based on function type - if e.isDateTimeFunction(funcName) { - // Use datetime function evaluator - return e.evaluateDateTimeFunction(funcExpr, result) - } else { - // Use string function evaluator - return e.evaluateStringFunction(funcExpr, result) - } -} diff --git a/weed/query/engine/noschema_error_test.go b/weed/query/engine/noschema_error_test.go deleted file mode 100644 index 31d98c4cd..000000000 --- a/weed/query/engine/noschema_error_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package engine - -import ( - "errors" - "fmt" - "testing" -) - -func TestNoSchemaError(t *testing.T) { - // Test creating a NoSchemaError - err := NoSchemaError{Namespace: "test", Topic: "topic1"} - expectedMsg := "topic test.topic1 has no schema" - if err.Error() != expectedMsg { - t.Errorf("Expected error message '%s', got '%s'", expectedMsg, err.Error()) - } - - // Test IsNoSchemaError with direct NoSchemaError - if !IsNoSchemaError(err) { - t.Error("IsNoSchemaError should return true for NoSchemaError") - } - - // Test IsNoSchemaError with wrapped NoSchemaError - wrappedErr := fmt.Errorf("wrapper: %w", err) - if !IsNoSchemaError(wrappedErr) { - t.Error("IsNoSchemaError should return true for wrapped NoSchemaError") - } - - // Test IsNoSchemaError with different error type - otherErr := errors.New("different error") - if IsNoSchemaError(otherErr) { - t.Error("IsNoSchemaError should return false for other error types") - } - - // Test IsNoSchemaError with nil - if IsNoSchemaError(nil) { - t.Error("IsNoSchemaError should return false for nil") - } -} diff --git a/weed/query/engine/offset_test.go b/weed/query/engine/offset_test.go deleted file mode 100644 index 9176901ac..000000000 --- a/weed/query/engine/offset_test.go +++ /dev/null @@ -1,480 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "strings" - "testing" -) - -// TestParseSQL_OFFSET_EdgeCases tests edge cases for OFFSET parsing -func TestParseSQL_OFFSET_EdgeCases(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement, err error) - }{ - { - name: "Valid LIMIT OFFSET with WHERE", - sql: "SELECT * FROM users WHERE age > 18 LIMIT 10 OFFSET 5", - wantErr: false, - validate: func(t *testing.T, stmt Statement, err error) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Limit == nil { - t.Fatal("Expected LIMIT clause, got nil") - } - if selectStmt.Limit.Offset == nil { - t.Fatal("Expected OFFSET clause, got nil") - } - if selectStmt.Where == nil { - t.Fatal("Expected WHERE clause, got nil") - } - }, - }, - { - name: "LIMIT OFFSET with mixed case", - sql: "select * from users limit 5 offset 3", - wantErr: false, - validate: func(t *testing.T, stmt Statement, err error) { - selectStmt := stmt.(*SelectStatement) - offsetVal := selectStmt.Limit.Offset.(*SQLVal) - if string(offsetVal.Val) != "3" { - t.Errorf("Expected offset value '3', got '%s'", string(offsetVal.Val)) - } - }, - }, - { - name: "LIMIT OFFSET with extra spaces", - sql: "SELECT * FROM users LIMIT 10 OFFSET 20 ", - wantErr: false, - validate: func(t *testing.T, stmt Statement, err error) { - selectStmt := stmt.(*SelectStatement) - limitVal := selectStmt.Limit.Rowcount.(*SQLVal) - offsetVal := selectStmt.Limit.Offset.(*SQLVal) - if string(limitVal.Val) != "10" { - t.Errorf("Expected limit value '10', got '%s'", string(limitVal.Val)) - } - if string(offsetVal.Val) != "20" { - t.Errorf("Expected offset value '20', got '%s'", string(offsetVal.Val)) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt, err) - } - }) - } -} - -// TestSQLEngine_OFFSET_EdgeCases tests edge cases for OFFSET execution -func TestSQLEngine_OFFSET_EdgeCases(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("OFFSET larger than result set", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 5 OFFSET 100") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - // Should return empty result set - if len(result.Rows) != 0 { - t.Errorf("Expected 0 rows when OFFSET > total rows, got %d", len(result.Rows)) - } - }) - - t.Run("OFFSET with LIMIT 0", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 0 OFFSET 2") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - // LIMIT 0 should return no rows regardless of OFFSET - if len(result.Rows) != 0 { - t.Errorf("Expected 0 rows with LIMIT 0, got %d", len(result.Rows)) - } - }) - - t.Run("High OFFSET with small LIMIT", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 1 OFFSET 3") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - // In clean mock environment, we have 4 live_log rows from unflushed messages - // LIMIT 1 OFFSET 3 should return the 4th row (0-indexed: rows 0,1,2,3 -> return row 3) - if len(result.Rows) != 1 { - t.Errorf("Expected 1 row with LIMIT 1 OFFSET 3 (4th live_log row), got %d", len(result.Rows)) - } - }) -} - -// TestSQLEngine_OFFSET_ErrorCases tests error conditions for OFFSET -func TestSQLEngine_OFFSET_ErrorCases(t *testing.T) { - engine := NewTestSQLEngine() - - // Test negative OFFSET - should be caught during execution - t.Run("Negative OFFSET value", func(t *testing.T) { - // Note: This would need to be implemented as validation in the execution engine - // For now, we test that the parser accepts it but execution might handle it - _, err := ParseSQL("SELECT * FROM users LIMIT 10 OFFSET -5") - if err != nil { - t.Logf("Parser rejected negative OFFSET (this is expected): %v", err) - } else { - // Parser accepts it, execution should handle validation - t.Logf("Parser accepts negative OFFSET, execution should validate") - } - }) - - // Test very large OFFSET - t.Run("Very large OFFSET value", func(t *testing.T) { - largeOffset := "2147483647" // Max int32 - sql := "SELECT * FROM user_events LIMIT 1 OFFSET " + largeOffset - result, err := engine.ExecuteSQL(context.Background(), sql) - if err != nil { - // Large OFFSET might cause parsing or execution errors - if strings.Contains(err.Error(), "out of valid range") { - t.Logf("Large OFFSET properly rejected: %v", err) - } else { - t.Errorf("Unexpected error for large OFFSET: %v", err) - } - } else if result.Error != nil { - if strings.Contains(result.Error.Error(), "out of valid range") { - t.Logf("Large OFFSET properly rejected during execution: %v", result.Error) - } else { - t.Errorf("Unexpected execution error for large OFFSET: %v", result.Error) - } - } else { - // Should return empty result for very large offset - if len(result.Rows) != 0 { - t.Errorf("Expected 0 rows for very large OFFSET, got %d", len(result.Rows)) - } - } - }) -} - -// TestSQLEngine_OFFSET_Consistency tests that OFFSET produces consistent results -func TestSQLEngine_OFFSET_Consistency(t *testing.T) { - engine := NewTestSQLEngine() - - // Get all rows first - allResult, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events") - if err != nil { - t.Fatalf("Failed to get all rows: %v", err) - } - if allResult.Error != nil { - t.Fatalf("Failed to get all rows: %v", allResult.Error) - } - - totalRows := len(allResult.Rows) - if totalRows == 0 { - t.Skip("No data available for consistency test") - } - - // Test that OFFSET + remaining rows = total rows - for offset := 0; offset < totalRows; offset++ { - t.Run("OFFSET_"+strconv.Itoa(offset), func(t *testing.T) { - sql := "SELECT * FROM user_events LIMIT 100 OFFSET " + strconv.Itoa(offset) - result, err := engine.ExecuteSQL(context.Background(), sql) - if err != nil { - t.Fatalf("Error with OFFSET %d: %v", offset, err) - } - if result.Error != nil { - t.Fatalf("Query error with OFFSET %d: %v", offset, result.Error) - } - - expectedRows := totalRows - offset - if len(result.Rows) != expectedRows { - t.Errorf("OFFSET %d: expected %d rows, got %d", offset, expectedRows, len(result.Rows)) - } - }) - } -} - -// TestSQLEngine_LIMIT_OFFSET_BugFix tests the specific bug fix for LIMIT with OFFSET -// This test addresses the issue where LIMIT 10 OFFSET 5 was returning 5 rows instead of 10 -func TestSQLEngine_LIMIT_OFFSET_BugFix(t *testing.T) { - engine := NewTestSQLEngine() - - // Test the specific scenario that was broken: LIMIT 10 OFFSET 5 should return 10 rows - t.Run("LIMIT 10 OFFSET 5 returns correct count", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT id, user_id, id+user_id FROM user_events LIMIT 10 OFFSET 5") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // The bug was that this returned 5 rows instead of 10 - // After fix, it should return up to 10 rows (limited by available data) - actualRows := len(result.Rows) - if actualRows > 10 { - t.Errorf("LIMIT 10 violated: got %d rows", actualRows) - } - - t.Logf("LIMIT 10 OFFSET 5 returned %d rows (within limit)", actualRows) - - // Verify we have the expected columns - expectedCols := 3 // id, user_id, id+user_id - if len(result.Columns) != expectedCols { - t.Errorf("Expected %d columns, got %d columns: %v", expectedCols, len(result.Columns), result.Columns) - } - }) - - // Test various LIMIT and OFFSET combinations to ensure correct row counts - testCases := []struct { - name string - limit int - offset int - allowEmpty bool // Whether 0 rows is acceptable (for large offsets) - }{ - {"LIMIT 5 OFFSET 0", 5, 0, false}, - {"LIMIT 5 OFFSET 2", 5, 2, false}, - {"LIMIT 8 OFFSET 3", 8, 3, false}, - {"LIMIT 15 OFFSET 1", 15, 1, false}, - {"LIMIT 3 OFFSET 7", 3, 7, true}, // Large offset may exceed data - {"LIMIT 12 OFFSET 4", 12, 4, true}, // Large offset may exceed data - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - sql := "SELECT id, user_id FROM user_events LIMIT " + strconv.Itoa(tc.limit) + " OFFSET " + strconv.Itoa(tc.offset) - result, err := engine.ExecuteSQL(context.Background(), sql) - if err != nil { - t.Fatalf("Expected no error for %s, got %v", tc.name, err) - } - if result.Error != nil { - t.Fatalf("Expected no query error for %s, got %v", tc.name, result.Error) - } - - actualRows := len(result.Rows) - - // Verify LIMIT is never exceeded - if actualRows > tc.limit { - t.Errorf("%s: LIMIT violated - returned %d rows, limit was %d", tc.name, actualRows, tc.limit) - } - - // Check if we expect rows - if !tc.allowEmpty && actualRows == 0 { - t.Errorf("%s: expected some rows but got 0 (insufficient test data or early termination bug)", tc.name) - } - - t.Logf("%s: returned %d rows (within limit %d)", tc.name, actualRows, tc.limit) - }) - } -} - -// TestSQLEngine_OFFSET_DataCollectionBuffer tests that the enhanced data collection buffer works -func TestSQLEngine_OFFSET_DataCollectionBuffer(t *testing.T) { - engine := NewTestSQLEngine() - - // Test scenarios that specifically stress the data collection buffer enhancement - t.Run("Large OFFSET with small LIMIT", func(t *testing.T) { - // This scenario requires collecting more data upfront to handle the offset - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 2 OFFSET 8") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should either return 2 rows or 0 (if offset exceeds available data) - // The bug would cause early termination and return 0 incorrectly - actualRows := len(result.Rows) - if actualRows != 0 && actualRows != 2 { - t.Errorf("Expected 0 or 2 rows for LIMIT 2 OFFSET 8, got %d", actualRows) - } - }) - - t.Run("Medium OFFSET with medium LIMIT", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT id, user_id FROM user_events LIMIT 6 OFFSET 4") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // With proper buffer enhancement, this should work correctly - actualRows := len(result.Rows) - if actualRows > 6 { - t.Errorf("LIMIT 6 should never return more than 6 rows, got %d", actualRows) - } - }) - - t.Run("Progressive OFFSET test", func(t *testing.T) { - // Test that increasing OFFSET values work consistently - baseSQL := "SELECT id FROM user_events LIMIT 3 OFFSET " - - for offset := 0; offset <= 5; offset++ { - sql := baseSQL + strconv.Itoa(offset) - result, err := engine.ExecuteSQL(context.Background(), sql) - if err != nil { - t.Fatalf("Error at OFFSET %d: %v", offset, err) - } - if result.Error != nil { - t.Fatalf("Query error at OFFSET %d: %v", offset, result.Error) - } - - actualRows := len(result.Rows) - // Each should return at most 3 rows (LIMIT 3) - if actualRows > 3 { - t.Errorf("OFFSET %d: LIMIT 3 returned %d rows (should be โ‰ค 3)", offset, actualRows) - } - - t.Logf("OFFSET %d: returned %d rows", offset, actualRows) - } - }) -} - -// TestSQLEngine_LIMIT_OFFSET_ArithmeticExpressions tests LIMIT/OFFSET with arithmetic expressions -func TestSQLEngine_LIMIT_OFFSET_ArithmeticExpressions(t *testing.T) { - engine := NewTestSQLEngine() - - // Test the exact scenario from the user's example - t.Run("Arithmetic expressions with LIMIT OFFSET", func(t *testing.T) { - // First query: LIMIT 10 (should return 10 rows) - result1, err := engine.ExecuteSQL(context.Background(), "SELECT id, user_id, id+user_id FROM user_events LIMIT 10") - if err != nil { - t.Fatalf("Expected no error for first query, got %v", err) - } - if result1.Error != nil { - t.Fatalf("Expected no query error for first query, got %v", result1.Error) - } - - // Second query: LIMIT 10 OFFSET 5 (should return 10 rows, not 5) - result2, err := engine.ExecuteSQL(context.Background(), "SELECT id, user_id, id+user_id FROM user_events LIMIT 10 OFFSET 5") - if err != nil { - t.Fatalf("Expected no error for second query, got %v", err) - } - if result2.Error != nil { - t.Fatalf("Expected no query error for second query, got %v", result2.Error) - } - - // Verify column structure is correct - expectedColumns := []string{"id", "user_id", "id+user_id"} - if len(result2.Columns) != len(expectedColumns) { - t.Errorf("Expected %d columns, got %d", len(expectedColumns), len(result2.Columns)) - } - - // The key assertion: LIMIT 10 OFFSET 5 should return 10 rows (if available) - // This was the specific bug reported by the user - rows1 := len(result1.Rows) - rows2 := len(result2.Rows) - - t.Logf("LIMIT 10: returned %d rows", rows1) - t.Logf("LIMIT 10 OFFSET 5: returned %d rows", rows2) - - if rows1 >= 15 { // If we have enough data for the test to be meaningful - if rows2 != 10 { - t.Errorf("LIMIT 10 OFFSET 5 should return 10 rows when sufficient data available, got %d", rows2) - } - } else { - t.Logf("Insufficient data (%d rows) to fully test LIMIT 10 OFFSET 5 scenario", rows1) - } - - // Verify multiplication expressions work in the second query - if len(result2.Rows) > 0 { - for i, row := range result2.Rows { - if len(row) >= 3 { // Check if we have the id+user_id column - idVal := row[0].ToString() // id column - userIdVal := row[1].ToString() // user_id column - sumVal := row[2].ToString() // id+user_id column - t.Logf("Row %d: id=%s, user_id=%s, id+user_id=%s", i, idVal, userIdVal, sumVal) - } - } - } - }) - - // Test multiplication specifically - t.Run("Multiplication expressions", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT id, id*2 FROM user_events LIMIT 3") - if err != nil { - t.Fatalf("Expected no error for multiplication test, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error for multiplication test, got %v", result.Error) - } - - if len(result.Columns) != 2 { - t.Errorf("Expected 2 columns for multiplication test, got %d", len(result.Columns)) - } - - if len(result.Rows) == 0 { - t.Error("Expected some rows for multiplication test") - } - - // Check that id*2 column has values (not empty) - for i, row := range result.Rows { - if len(row) >= 2 { - idVal := row[0].ToString() - doubledVal := row[1].ToString() - if doubledVal == "" || doubledVal == "0" { - t.Errorf("Row %d: id*2 should not be empty, id=%s, id*2=%s", i, idVal, doubledVal) - } else { - t.Logf("Row %d: id=%s, id*2=%s โœ“", i, idVal, doubledVal) - } - } - } - }) -} - -// TestSQLEngine_OFFSET_WithAggregation tests OFFSET with aggregation queries -func TestSQLEngine_OFFSET_WithAggregation(t *testing.T) { - engine := NewTestSQLEngine() - - // Note: Aggregation queries typically return single rows, so OFFSET behavior is different - t.Run("COUNT with OFFSET", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT COUNT(*) FROM user_events LIMIT 1 OFFSET 0") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - // COUNT typically returns 1 row, so OFFSET 0 should return that row - if len(result.Rows) != 1 { - t.Errorf("Expected 1 row for COUNT with OFFSET 0, got %d", len(result.Rows)) - } - }) - - t.Run("COUNT with OFFSET 1", func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), "SELECT COUNT(*) FROM user_events LIMIT 1 OFFSET 1") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - // COUNT returns 1 row, so OFFSET 1 should return 0 rows - if len(result.Rows) != 0 { - t.Errorf("Expected 0 rows for COUNT with OFFSET 1, got %d", len(result.Rows)) - } - }) -} diff --git a/weed/query/engine/parquet_scanner.go b/weed/query/engine/parquet_scanner.go deleted file mode 100644 index e4b5252c7..000000000 --- a/weed/query/engine/parquet_scanner.go +++ /dev/null @@ -1,449 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/parquet-go/parquet-go" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/mq/schema" - "github.com/seaweedfs/seaweedfs/weed/mq/topic" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" - "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" -) - -// ParquetScanner scans MQ topic Parquet files for SELECT queries -// Assumptions: -// 1. All MQ messages are stored in Parquet format in topic partitions -// 2. Each partition directory contains dated Parquet files -// 3. System columns (_ts_ns, _key) are added to user schema -// 4. Predicate pushdown is used for efficient scanning -type ParquetScanner struct { - filerClient filer_pb.FilerClient - chunkCache chunk_cache.ChunkCache - topic topic.Topic - recordSchema *schema_pb.RecordType - parquetLevels *schema.ParquetLevels -} - -// NewParquetScanner creates a scanner for a specific MQ topic -// Assumption: Topic exists and has Parquet files in partition directories -func NewParquetScanner(filerClient filer_pb.FilerClient, namespace, topicName string) (*ParquetScanner, error) { - // Check if filerClient is available - if filerClient == nil { - return nil, fmt.Errorf("filerClient is required but not available") - } - - // Create topic reference - t := topic.Topic{ - Namespace: namespace, - Name: topicName, - } - - // Read topic configuration to get schema - var topicConf *mq_pb.ConfigureTopicResponse - var err error - if err := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - topicConf, err = t.ReadConfFile(client) - return err - }); err != nil { - return nil, fmt.Errorf("failed to read topic config: %v", err) - } - - // Build complete schema with system columns - prefer flat schema if available - var recordType *schema_pb.RecordType - - if topicConf.GetMessageRecordType() != nil { - // New flat schema format - use directly - recordType = topicConf.GetMessageRecordType() - } - - if recordType == nil || len(recordType.Fields) == 0 { - // For topics without schema, create a minimal schema with system fields and _value - recordType = schema.RecordTypeBegin(). - WithField(SW_COLUMN_NAME_TIMESTAMP, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - WithField(SW_COLUMN_NAME_VALUE, schema.TypeBytes). // Raw message value - RecordTypeEnd() - } else { - // Add system columns that MQ adds to all records - recordType = schema.NewRecordTypeBuilder(recordType). - WithField(SW_COLUMN_NAME_TIMESTAMP, schema.TypeInt64). - WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes). - RecordTypeEnd() - } - - // Convert to Parquet levels for efficient reading - parquetLevels, err := schema.ToParquetLevels(recordType) - if err != nil { - return nil, fmt.Errorf("failed to create Parquet levels: %v", err) - } - - return &ParquetScanner{ - filerClient: filerClient, - chunkCache: chunk_cache.NewChunkCacheInMemory(256), // Same as MQ logstore - topic: t, - recordSchema: recordType, - parquetLevels: parquetLevels, - }, nil -} - -// ScanOptions configure how the scanner reads data -type ScanOptions struct { - // Time range filtering (Unix nanoseconds) - StartTimeNs int64 - StopTimeNs int64 - - // Column projection - if empty, select all columns - Columns []string - - // Row limit - 0 means no limit - Limit int - - // Predicate for WHERE clause filtering - Predicate func(*schema_pb.RecordValue) bool -} - -// ScanResult represents a single scanned record -type ScanResult struct { - Values map[string]*schema_pb.Value // Column name -> value - Timestamp int64 // Message timestamp (_ts_ns) - Key []byte // Message key (_key) -} - -// Scan reads records from the topic's Parquet files -// Assumptions: -// 1. Scans all partitions of the topic -// 2. Applies time filtering at Parquet level for efficiency -// 3. Applies predicates and projections after reading -func (ps *ParquetScanner) Scan(ctx context.Context, options ScanOptions) ([]ScanResult, error) { - var results []ScanResult - - // Get all partitions for this topic - // TODO: Implement proper partition discovery - // For now, assume partition 0 exists - partitions := []topic.Partition{{RangeStart: 0, RangeStop: 1000}} - - for _, partition := range partitions { - partitionResults, err := ps.scanPartition(ctx, partition, options) - if err != nil { - return nil, fmt.Errorf("failed to scan partition %v: %v", partition, err) - } - - results = append(results, partitionResults...) - - // Apply global limit across all partitions - if options.Limit > 0 && len(results) >= options.Limit { - results = results[:options.Limit] - break - } - } - - return results, nil -} - -// scanPartition scans a specific topic partition -func (ps *ParquetScanner) scanPartition(ctx context.Context, partition topic.Partition, options ScanOptions) ([]ScanResult, error) { - // partitionDir := topic.PartitionDir(ps.topic, partition) // TODO: Use for actual file listing - - var results []ScanResult - - // List Parquet files in partition directory - // TODO: Implement proper file listing with date range filtering - // For now, this is a placeholder that would list actual Parquet files - - // Simulate file processing - in real implementation, this would: - // 1. List files in partitionDir via filerClient - // 2. Filter files by date range if time filtering is enabled - // 3. Process each Parquet file in chronological order - - // Placeholder: Create sample data for testing - if len(results) == 0 { - // Generate sample data for demonstration - sampleData := ps.generateSampleData(options) - results = append(results, sampleData...) - } - - return results, nil -} - -// scanParquetFile scans a single Parquet file (real implementation) -func (ps *ParquetScanner) scanParquetFile(ctx context.Context, entry *filer_pb.Entry, options ScanOptions) ([]ScanResult, error) { - var results []ScanResult - - // Create reader for the Parquet file (same pattern as logstore) - lookupFileIdFn := filer.LookupFn(ps.filerClient) - fileSize := filer.FileSize(entry) - visibleIntervals, _ := filer.NonOverlappingVisibleIntervals(ctx, lookupFileIdFn, entry.Chunks, 0, int64(fileSize)) - chunkViews := filer.ViewFromVisibleIntervals(visibleIntervals, 0, int64(fileSize)) - readerCache := filer.NewReaderCache(32, ps.chunkCache, lookupFileIdFn) - readerAt := filer.NewChunkReaderAtFromClient(ctx, readerCache, chunkViews, int64(fileSize)) - - // Create Parquet reader - parquetReader := parquet.NewReader(readerAt) - defer parquetReader.Close() - - rows := make([]parquet.Row, 128) // Read in batches like logstore - - for { - rowCount, readErr := parquetReader.ReadRows(rows) - - // Process rows even if EOF - for i := 0; i < rowCount; i++ { - // Convert Parquet row to schema value - recordValue, err := schema.ToRecordValue(ps.recordSchema, ps.parquetLevels, rows[i]) - if err != nil { - return nil, fmt.Errorf("failed to convert row: %v", err) - } - - // Extract system columns - timestamp := recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP].GetInt64Value() - key := recordValue.Fields[SW_COLUMN_NAME_KEY].GetBytesValue() - - // Apply time filtering - if options.StartTimeNs > 0 && timestamp < options.StartTimeNs { - continue - } - if options.StopTimeNs > 0 && timestamp >= options.StopTimeNs { - break // Assume data is time-ordered - } - - // Apply predicate filtering (WHERE clause) - if options.Predicate != nil && !options.Predicate(recordValue) { - continue - } - - // Apply column projection - values := make(map[string]*schema_pb.Value) - if len(options.Columns) == 0 { - // Select all columns (excluding system columns from user view) - for name, value := range recordValue.Fields { - if name != SW_COLUMN_NAME_TIMESTAMP && name != SW_COLUMN_NAME_KEY { - values[name] = value - } - } - } else { - // Select specified columns only - for _, columnName := range options.Columns { - if value, exists := recordValue.Fields[columnName]; exists { - values[columnName] = value - } - } - } - - results = append(results, ScanResult{ - Values: values, - Timestamp: timestamp, - Key: key, - }) - - // Apply row limit - if options.Limit > 0 && len(results) >= options.Limit { - return results, nil - } - } - - if readErr != nil { - break // EOF or error - } - } - - return results, nil -} - -// generateSampleData creates sample data for testing when no real Parquet files exist -func (ps *ParquetScanner) generateSampleData(options ScanOptions) []ScanResult { - now := time.Now().UnixNano() - - sampleData := []ScanResult{ - { - Values: map[string]*schema_pb.Value{ - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 1001}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "login"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"ip": "192.168.1.1"}`}}, - }, - Timestamp: now - 3600000000000, // 1 hour ago - Key: []byte("user-1001"), - }, - { - Values: map[string]*schema_pb.Value{ - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 1002}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "page_view"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"page": "/dashboard"}`}}, - }, - Timestamp: now - 1800000000000, // 30 minutes ago - Key: []byte("user-1002"), - }, - { - Values: map[string]*schema_pb.Value{ - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 1001}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "logout"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"session_duration": 3600}`}}, - }, - Timestamp: now - 900000000000, // 15 minutes ago - Key: []byte("user-1001"), - }, - } - - // Apply predicate filtering if specified - if options.Predicate != nil { - var filtered []ScanResult - for _, result := range sampleData { - // Convert to RecordValue for predicate testing - recordValue := &schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)} - for k, v := range result.Values { - recordValue.Fields[k] = v - } - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: result.Timestamp}} - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: result.Key}} - - if options.Predicate(recordValue) { - filtered = append(filtered, result) - } - } - sampleData = filtered - } - - // Apply limit - if options.Limit > 0 && len(sampleData) > options.Limit { - sampleData = sampleData[:options.Limit] - } - - return sampleData -} - -// ConvertToSQLResult converts ScanResults to SQL query results -func (ps *ParquetScanner) ConvertToSQLResult(results []ScanResult, columns []string) *QueryResult { - if len(results) == 0 { - return &QueryResult{ - Columns: columns, - Rows: [][]sqltypes.Value{}, - } - } - - // Determine columns if not specified - if len(columns) == 0 { - columnSet := make(map[string]bool) - for _, result := range results { - for columnName := range result.Values { - columnSet[columnName] = true - } - } - - columns = make([]string, 0, len(columnSet)) - for columnName := range columnSet { - columns = append(columns, columnName) - } - } - - // Convert to SQL rows - rows := make([][]sqltypes.Value, len(results)) - for i, result := range results { - row := make([]sqltypes.Value, len(columns)) - for j, columnName := range columns { - if value, exists := result.Values[columnName]; exists { - row[j] = convertSchemaValueToSQL(value) - } else { - row[j] = sqltypes.NULL - } - } - rows[i] = row - } - - return &QueryResult{ - Columns: columns, - Rows: rows, - } -} - -// convertSchemaValueToSQL converts schema_pb.Value to sqltypes.Value -func convertSchemaValueToSQL(value *schema_pb.Value) sqltypes.Value { - if value == nil { - return sqltypes.NULL - } - - switch v := value.Kind.(type) { - case *schema_pb.Value_BoolValue: - if v.BoolValue { - return sqltypes.NewInt32(1) - } - return sqltypes.NewInt32(0) - case *schema_pb.Value_Int32Value: - return sqltypes.NewInt32(v.Int32Value) - case *schema_pb.Value_Int64Value: - return sqltypes.NewInt64(v.Int64Value) - case *schema_pb.Value_FloatValue: - return sqltypes.NewFloat32(v.FloatValue) - case *schema_pb.Value_DoubleValue: - return sqltypes.NewFloat64(v.DoubleValue) - case *schema_pb.Value_BytesValue: - return sqltypes.NewVarBinary(string(v.BytesValue)) - case *schema_pb.Value_StringValue: - return sqltypes.NewVarChar(v.StringValue) - // Parquet logical types - case *schema_pb.Value_TimestampValue: - timestampValue := value.GetTimestampValue() - if timestampValue == nil { - return sqltypes.NULL - } - // Convert microseconds to time.Time and format as datetime string - timestamp := time.UnixMicro(timestampValue.TimestampMicros) - return sqltypes.MakeTrusted(sqltypes.Datetime, []byte(timestamp.Format("2006-01-02 15:04:05"))) - case *schema_pb.Value_DateValue: - dateValue := value.GetDateValue() - if dateValue == nil { - return sqltypes.NULL - } - // Convert days since epoch to date string - date := time.Unix(int64(dateValue.DaysSinceEpoch)*86400, 0).UTC() - return sqltypes.MakeTrusted(sqltypes.Date, []byte(date.Format("2006-01-02"))) - case *schema_pb.Value_DecimalValue: - decimalValue := value.GetDecimalValue() - if decimalValue == nil { - return sqltypes.NULL - } - // Convert decimal bytes to string representation - decimalStr := decimalToStringHelper(decimalValue) - return sqltypes.MakeTrusted(sqltypes.Decimal, []byte(decimalStr)) - case *schema_pb.Value_TimeValue: - timeValue := value.GetTimeValue() - if timeValue == nil { - return sqltypes.NULL - } - // Convert microseconds since midnight to time string - duration := time.Duration(timeValue.TimeMicros) * time.Microsecond - timeOfDay := time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC).Add(duration) - return sqltypes.MakeTrusted(sqltypes.Time, []byte(timeOfDay.Format("15:04:05"))) - default: - return sqltypes.NewVarChar(fmt.Sprintf("%v", value)) - } -} - -// decimalToStringHelper converts a DecimalValue to string representation -// This is a standalone version of the engine's decimalToString method -func decimalToStringHelper(decimalValue *schema_pb.DecimalValue) string { - if decimalValue == nil || decimalValue.Value == nil { - return "0" - } - - // Convert bytes back to big.Int - intValue := new(big.Int).SetBytes(decimalValue.Value) - - // Convert to string with proper decimal placement - str := intValue.String() - - // Handle decimal placement based on scale - scale := int(decimalValue.Scale) - if scale > 0 && len(str) > scale { - // Insert decimal point - decimalPos := len(str) - scale - return str[:decimalPos] + "." + str[decimalPos:] - } - - return str -} diff --git a/weed/query/engine/parsing_debug_test.go b/weed/query/engine/parsing_debug_test.go deleted file mode 100644 index 6177b0aa6..000000000 --- a/weed/query/engine/parsing_debug_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package engine - -import ( - "fmt" - "testing" -) - -// TestBasicParsing tests basic SQL parsing -func TestBasicParsing(t *testing.T) { - testCases := []string{ - "SELECT * FROM user_events", - "SELECT id FROM user_events", - "SELECT id FROM user_events WHERE id = 123", - "SELECT id FROM user_events WHERE id > 123", - "SELECT id FROM user_events WHERE status = 'active'", - } - - for i, sql := range testCases { - t.Run(fmt.Sprintf("Query_%d", i+1), func(t *testing.T) { - t.Logf("Testing SQL: %s", sql) - - stmt, err := ParseSQL(sql) - if err != nil { - t.Errorf("Parse error: %v", err) - return - } - - t.Logf("Parsed statement type: %T", stmt) - - if selectStmt, ok := stmt.(*SelectStatement); ok { - t.Logf("SelectStatement details:") - t.Logf(" SelectExprs count: %d", len(selectStmt.SelectExprs)) - t.Logf(" From count: %d", len(selectStmt.From)) - t.Logf(" WHERE clause exists: %v", selectStmt.Where != nil) - - if selectStmt.Where != nil { - t.Logf(" WHERE expression type: %T", selectStmt.Where.Expr) - } else { - t.Logf(" WHERE clause is NIL - this is the bug!") - } - } else { - t.Errorf("Expected SelectStatement, got %T", stmt) - } - }) - } -} - -// TestCockroachParserDirectly tests the CockroachDB parser directly -func TestCockroachParserDirectly(t *testing.T) { - // Test if the issue is in our ParseSQL function or CockroachDB parser - sql := "SELECT id FROM user_events WHERE id > 123" - - t.Logf("Testing CockroachDB parser directly with: %s", sql) - - // First test our ParseSQL function - stmt, err := ParseSQL(sql) - if err != nil { - t.Fatalf("Our ParseSQL failed: %v", err) - } - - t.Logf("Our ParseSQL returned: %T", stmt) - - if selectStmt, ok := stmt.(*SelectStatement); ok { - if selectStmt.Where == nil { - t.Errorf("Our ParseSQL is not extracting WHERE clauses!") - t.Errorf("This means the issue is in our CockroachDB AST conversion") - } else { - t.Logf("Our ParseSQL extracted WHERE clause: %T", selectStmt.Where.Expr) - } - } -} - -// TestParseMethodComparison tests different parsing paths -func TestParseMethodComparison(t *testing.T) { - sql := "SELECT id FROM user_events WHERE id > 123" - - t.Logf("Comparing parsing methods for: %s", sql) - - // Test 1: Our global ParseSQL function - stmt1, err1 := ParseSQL(sql) - t.Logf("Global ParseSQL: %T, error: %v", stmt1, err1) - - if selectStmt, ok := stmt1.(*SelectStatement); ok { - t.Logf(" WHERE clause: %v", selectStmt.Where != nil) - } - - // Test 2: Check if we have different parsing paths - // This will help identify if the issue is in our custom parser vs CockroachDB parser - - engine := NewTestSQLEngine() - _, err2 := engine.ExecuteSQL(nil, sql) - t.Logf("ExecuteSQL error (helps identify parsing path): %v", err2) -} diff --git a/weed/query/engine/partition_path_fix_test.go b/weed/query/engine/partition_path_fix_test.go deleted file mode 100644 index 8d92136e6..000000000 --- a/weed/query/engine/partition_path_fix_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package engine - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// TestPartitionPathHandling tests that partition paths are handled correctly -// whether discoverTopicPartitions returns relative or absolute paths -func TestPartitionPathHandling(t *testing.T) { - engine := NewMockSQLEngine() - - t.Run("Mock discoverTopicPartitions returns correct paths", func(t *testing.T) { - // Test that our mock engine handles absolute paths correctly - engine.mockPartitions["test.user_events"] = []string{ - "/topics/test/user_events/v2025-09-03-15-36-29/0000-2520", - "/topics/test/user_events/v2025-09-03-15-36-29/2521-5040", - } - - partitions, err := engine.discoverTopicPartitions("test", "user_events") - assert.NoError(t, err, "Should discover partitions without error") - assert.Equal(t, 2, len(partitions), "Should return 2 partitions") - assert.Contains(t, partitions[0], "/topics/test/user_events/", "Should contain absolute path") - }) - - t.Run("Mock discoverTopicPartitions handles relative paths", func(t *testing.T) { - // Test relative paths scenario - engine.mockPartitions["test.user_events"] = []string{ - "v2025-09-03-15-36-29/0000-2520", - "v2025-09-03-15-36-29/2521-5040", - } - - partitions, err := engine.discoverTopicPartitions("test", "user_events") - assert.NoError(t, err, "Should discover partitions without error") - assert.Equal(t, 2, len(partitions), "Should return 2 partitions") - assert.True(t, !strings.HasPrefix(partitions[0], "/topics/"), "Should be relative path") - }) - - t.Run("Partition path building logic works correctly", func(t *testing.T) { - topicBasePath := "/topics/test/user_events" - - testCases := []struct { - name string - relativePartition string - expectedPath string - }{ - { - name: "Absolute path - use as-is", - relativePartition: "/topics/test/user_events/v2025-09-03-15-36-29/0000-2520", - expectedPath: "/topics/test/user_events/v2025-09-03-15-36-29/0000-2520", - }, - { - name: "Relative path - build full path", - relativePartition: "v2025-09-03-15-36-29/0000-2520", - expectedPath: "/topics/test/user_events/v2025-09-03-15-36-29/0000-2520", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var partitionPath string - - // This is the same logic from our fixed code - if strings.HasPrefix(tc.relativePartition, "/topics/") { - // Already a full path - use as-is - partitionPath = tc.relativePartition - } else { - // Relative path - build full path - partitionPath = topicBasePath + "/" + tc.relativePartition - } - - assert.Equal(t, tc.expectedPath, partitionPath, - "Partition path should be built correctly") - - // Ensure no double slashes - assert.NotContains(t, partitionPath, "//", - "Partition path should not contain double slashes") - }) - } - }) -} - -// TestPartitionPathLogic tests the core logic for handling partition paths -func TestPartitionPathLogic(t *testing.T) { - t.Run("Building partition paths from discovered partitions", func(t *testing.T) { - // Test the specific partition path building that was causing issues - - topicBasePath := "/topics/ecommerce/user_events" - - // This simulates the discoverTopicPartitions returning absolute paths (realistic scenario) - relativePartitions := []string{ - "/topics/ecommerce/user_events/v2025-09-03-15-36-29/0000-2520", - } - - // This is the code from our fix - test it directly - partitions := make([]string, len(relativePartitions)) - for i, relPartition := range relativePartitions { - // Handle both relative and absolute partition paths from discoverTopicPartitions - if strings.HasPrefix(relPartition, "/topics/") { - // Already a full path - use as-is - partitions[i] = relPartition - } else { - // Relative path - build full path - partitions[i] = topicBasePath + "/" + relPartition - } - } - - // Verify the path was handled correctly - expectedPath := "/topics/ecommerce/user_events/v2025-09-03-15-36-29/0000-2520" - assert.Equal(t, expectedPath, partitions[0], "Absolute path should be used as-is") - - // Ensure no double slashes (this was the original bug) - assert.NotContains(t, partitions[0], "//", "Path should not contain double slashes") - }) -} diff --git a/weed/query/engine/postgresql_only_test.go b/weed/query/engine/postgresql_only_test.go deleted file mode 100644 index d40e81b11..000000000 --- a/weed/query/engine/postgresql_only_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package engine - -import ( - "context" - "strings" - "testing" -) - -// TestPostgreSQLOnlySupport ensures that non-PostgreSQL syntax is properly rejected -func TestPostgreSQLOnlySupport(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - shouldError bool - errorMsg string - desc string - }{ - // Test that MySQL backticks are not supported for identifiers - { - name: "MySQL_Backticks_Table", - sql: "SELECT * FROM `user_events` LIMIT 1", - shouldError: true, - desc: "MySQL backticks for table names should be rejected", - }, - { - name: "MySQL_Backticks_Column", - sql: "SELECT `column_name` FROM user_events LIMIT 1", - shouldError: true, - desc: "MySQL backticks for column names should be rejected", - }, - - // Test that PostgreSQL double quotes work (should NOT error) - { - name: "PostgreSQL_Double_Quotes_OK", - sql: `SELECT "user_id" FROM user_events LIMIT 1`, - shouldError: false, - desc: "PostgreSQL double quotes for identifiers should work", - }, - - // Note: MySQL functions like YEAR(), MONTH() may parse but won't have proper implementations - // They're removed from the engine so they won't work correctly, but we don't explicitly reject them - - // Test that PostgreSQL EXTRACT works (should NOT error) - { - name: "PostgreSQL_EXTRACT_OK", - sql: "SELECT EXTRACT(YEAR FROM CURRENT_DATE) FROM user_events LIMIT 1", - shouldError: false, - desc: "PostgreSQL EXTRACT function should work", - }, - - // Test that single quotes work for string literals but not identifiers - { - name: "Single_Quotes_String_Literal_OK", - sql: "SELECT 'hello world' FROM user_events LIMIT 1", - shouldError: false, - desc: "Single quotes for string literals should work", - }, - } - - passCount := 0 - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.shouldError { - // We expect this query to fail - if err == nil && result.Error == nil { - t.Errorf("Expected error for %s, but query succeeded", tc.desc) - return - } - - // Check for specific error message if provided - if tc.errorMsg != "" { - errorText := "" - if err != nil { - errorText = err.Error() - } else if result.Error != nil { - errorText = result.Error.Error() - } - - if !strings.Contains(errorText, tc.errorMsg) { - t.Errorf("Expected error containing '%s', got: %s", tc.errorMsg, errorText) - return - } - } - - t.Logf("CORRECTLY REJECTED: %s", tc.desc) - passCount++ - } else { - // We expect this query to succeed - if err != nil { - t.Errorf("Unexpected error for %s: %v", tc.desc, err) - return - } - - if result.Error != nil { - t.Errorf("Unexpected result error for %s: %v", tc.desc, result.Error) - return - } - - t.Logf("CORRECTLY ACCEPTED: %s", tc.desc) - passCount++ - } - }) - } - - t.Logf("PostgreSQL-only compliance: %d/%d tests passed", passCount, len(testCases)) -} diff --git a/weed/query/engine/query_parsing_test.go b/weed/query/engine/query_parsing_test.go deleted file mode 100644 index ffeaadbc5..000000000 --- a/weed/query/engine/query_parsing_test.go +++ /dev/null @@ -1,564 +0,0 @@ -package engine - -import ( - "testing" -) - -func TestParseSQL_COUNT_Functions(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement) - }{ - { - name: "COUNT(*) basic", - sql: "SELECT COUNT(*) FROM test_table", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt, ok := stmt.(*SelectStatement) - if !ok { - t.Fatalf("Expected *SelectStatement, got %T", stmt) - } - - if len(selectStmt.SelectExprs) != 1 { - t.Fatalf("Expected 1 select expression, got %d", len(selectStmt.SelectExprs)) - } - - aliasedExpr, ok := selectStmt.SelectExprs[0].(*AliasedExpr) - if !ok { - t.Fatalf("Expected *AliasedExpr, got %T", selectStmt.SelectExprs[0]) - } - - funcExpr, ok := aliasedExpr.Expr.(*FuncExpr) - if !ok { - t.Fatalf("Expected *FuncExpr, got %T", aliasedExpr.Expr) - } - - if funcExpr.Name.String() != "COUNT" { - t.Errorf("Expected function name 'COUNT', got '%s'", funcExpr.Name.String()) - } - - if len(funcExpr.Exprs) != 1 { - t.Fatalf("Expected 1 function argument, got %d", len(funcExpr.Exprs)) - } - - starExpr, ok := funcExpr.Exprs[0].(*StarExpr) - if !ok { - t.Errorf("Expected *StarExpr argument, got %T", funcExpr.Exprs[0]) - } - _ = starExpr // Use the variable to avoid unused variable error - }, - }, - { - name: "COUNT(column_name)", - sql: "SELECT COUNT(user_id) FROM users", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt, ok := stmt.(*SelectStatement) - if !ok { - t.Fatalf("Expected *SelectStatement, got %T", stmt) - } - - aliasedExpr := selectStmt.SelectExprs[0].(*AliasedExpr) - funcExpr := aliasedExpr.Expr.(*FuncExpr) - - if funcExpr.Name.String() != "COUNT" { - t.Errorf("Expected function name 'COUNT', got '%s'", funcExpr.Name.String()) - } - - if len(funcExpr.Exprs) != 1 { - t.Fatalf("Expected 1 function argument, got %d", len(funcExpr.Exprs)) - } - - argExpr, ok := funcExpr.Exprs[0].(*AliasedExpr) - if !ok { - t.Errorf("Expected *AliasedExpr argument, got %T", funcExpr.Exprs[0]) - } - - colName, ok := argExpr.Expr.(*ColName) - if !ok { - t.Errorf("Expected *ColName, got %T", argExpr.Expr) - } - - if colName.Name.String() != "user_id" { - t.Errorf("Expected column name 'user_id', got '%s'", colName.Name.String()) - } - }, - }, - { - name: "Multiple aggregate functions", - sql: "SELECT COUNT(*), SUM(amount), AVG(score) FROM transactions", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt, ok := stmt.(*SelectStatement) - if !ok { - t.Fatalf("Expected *SelectStatement, got %T", stmt) - } - - if len(selectStmt.SelectExprs) != 3 { - t.Fatalf("Expected 3 select expressions, got %d", len(selectStmt.SelectExprs)) - } - - // Verify COUNT(*) - countExpr := selectStmt.SelectExprs[0].(*AliasedExpr) - countFunc := countExpr.Expr.(*FuncExpr) - if countFunc.Name.String() != "COUNT" { - t.Errorf("Expected first function to be COUNT, got %s", countFunc.Name.String()) - } - - // Verify SUM(amount) - sumExpr := selectStmt.SelectExprs[1].(*AliasedExpr) - sumFunc := sumExpr.Expr.(*FuncExpr) - if sumFunc.Name.String() != "SUM" { - t.Errorf("Expected second function to be SUM, got %s", sumFunc.Name.String()) - } - - // Verify AVG(score) - avgExpr := selectStmt.SelectExprs[2].(*AliasedExpr) - avgFunc := avgExpr.Expr.(*FuncExpr) - if avgFunc.Name.String() != "AVG" { - t.Errorf("Expected third function to be AVG, got %s", avgFunc.Name.String()) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt) - } - }) - } -} - -func TestParseSQL_SELECT_Expressions(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement) - }{ - { - name: "SELECT * FROM table", - sql: "SELECT * FROM users", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if len(selectStmt.SelectExprs) != 1 { - t.Fatalf("Expected 1 select expression, got %d", len(selectStmt.SelectExprs)) - } - - _, ok := selectStmt.SelectExprs[0].(*StarExpr) - if !ok { - t.Errorf("Expected *StarExpr, got %T", selectStmt.SelectExprs[0]) - } - }, - }, - { - name: "SELECT column FROM table", - sql: "SELECT user_id FROM users", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if len(selectStmt.SelectExprs) != 1 { - t.Fatalf("Expected 1 select expression, got %d", len(selectStmt.SelectExprs)) - } - - aliasedExpr, ok := selectStmt.SelectExprs[0].(*AliasedExpr) - if !ok { - t.Fatalf("Expected *AliasedExpr, got %T", selectStmt.SelectExprs[0]) - } - - colName, ok := aliasedExpr.Expr.(*ColName) - if !ok { - t.Fatalf("Expected *ColName, got %T", aliasedExpr.Expr) - } - - if colName.Name.String() != "user_id" { - t.Errorf("Expected column name 'user_id', got '%s'", colName.Name.String()) - } - }, - }, - { - name: "SELECT multiple columns", - sql: "SELECT user_id, name, email FROM users", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if len(selectStmt.SelectExprs) != 3 { - t.Fatalf("Expected 3 select expressions, got %d", len(selectStmt.SelectExprs)) - } - - expectedColumns := []string{"user_id", "name", "email"} - for i, expected := range expectedColumns { - aliasedExpr := selectStmt.SelectExprs[i].(*AliasedExpr) - colName := aliasedExpr.Expr.(*ColName) - if colName.Name.String() != expected { - t.Errorf("Expected column %d to be '%s', got '%s'", i, expected, colName.Name.String()) - } - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt) - } - }) - } -} - -func TestParseSQL_WHERE_Clauses(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement) - }{ - { - name: "WHERE with simple comparison", - sql: "SELECT * FROM users WHERE age > 18", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Where == nil { - t.Fatal("Expected WHERE clause, got nil") - } - - // Just verify we have a WHERE clause with an expression - if selectStmt.Where.Expr == nil { - t.Error("Expected WHERE expression, got nil") - } - }, - }, - { - name: "WHERE with AND condition", - sql: "SELECT * FROM users WHERE age > 18 AND status = 'active'", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Where == nil { - t.Fatal("Expected WHERE clause, got nil") - } - - // Verify we have an AND expression - andExpr, ok := selectStmt.Where.Expr.(*AndExpr) - if !ok { - t.Errorf("Expected *AndExpr, got %T", selectStmt.Where.Expr) - } - _ = andExpr // Use variable to avoid unused error - }, - }, - { - name: "WHERE with OR condition", - sql: "SELECT * FROM users WHERE age < 18 OR age > 65", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Where == nil { - t.Fatal("Expected WHERE clause, got nil") - } - - // Verify we have an OR expression - orExpr, ok := selectStmt.Where.Expr.(*OrExpr) - if !ok { - t.Errorf("Expected *OrExpr, got %T", selectStmt.Where.Expr) - } - _ = orExpr // Use variable to avoid unused error - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt) - } - }) - } -} - -func TestParseSQL_LIMIT_Clauses(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement) - }{ - { - name: "LIMIT with number", - sql: "SELECT * FROM users LIMIT 10", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Limit == nil { - t.Fatal("Expected LIMIT clause, got nil") - } - - if selectStmt.Limit.Rowcount == nil { - t.Error("Expected LIMIT rowcount, got nil") - } - - // Verify no OFFSET is set - if selectStmt.Limit.Offset != nil { - t.Error("Expected OFFSET to be nil for LIMIT-only query") - } - - sqlVal, ok := selectStmt.Limit.Rowcount.(*SQLVal) - if !ok { - t.Errorf("Expected *SQLVal, got %T", selectStmt.Limit.Rowcount) - } - - if sqlVal.Type != IntVal { - t.Errorf("Expected IntVal type, got %d", sqlVal.Type) - } - - if string(sqlVal.Val) != "10" { - t.Errorf("Expected limit value '10', got '%s'", string(sqlVal.Val)) - } - }, - }, - { - name: "LIMIT with OFFSET", - sql: "SELECT * FROM users LIMIT 10 OFFSET 5", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Limit == nil { - t.Fatal("Expected LIMIT clause, got nil") - } - - // Verify LIMIT value - if selectStmt.Limit.Rowcount == nil { - t.Error("Expected LIMIT rowcount, got nil") - } - - limitVal, ok := selectStmt.Limit.Rowcount.(*SQLVal) - if !ok { - t.Errorf("Expected *SQLVal for LIMIT, got %T", selectStmt.Limit.Rowcount) - } - - if limitVal.Type != IntVal { - t.Errorf("Expected IntVal type for LIMIT, got %d", limitVal.Type) - } - - if string(limitVal.Val) != "10" { - t.Errorf("Expected limit value '10', got '%s'", string(limitVal.Val)) - } - - // Verify OFFSET value - if selectStmt.Limit.Offset == nil { - t.Fatal("Expected OFFSET clause, got nil") - } - - offsetVal, ok := selectStmt.Limit.Offset.(*SQLVal) - if !ok { - t.Errorf("Expected *SQLVal for OFFSET, got %T", selectStmt.Limit.Offset) - } - - if offsetVal.Type != IntVal { - t.Errorf("Expected IntVal type for OFFSET, got %d", offsetVal.Type) - } - - if string(offsetVal.Val) != "5" { - t.Errorf("Expected offset value '5', got '%s'", string(offsetVal.Val)) - } - }, - }, - { - name: "LIMIT with OFFSET zero", - sql: "SELECT * FROM users LIMIT 5 OFFSET 0", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Limit == nil { - t.Fatal("Expected LIMIT clause, got nil") - } - - // Verify OFFSET is 0 - if selectStmt.Limit.Offset == nil { - t.Fatal("Expected OFFSET clause, got nil") - } - - offsetVal, ok := selectStmt.Limit.Offset.(*SQLVal) - if !ok { - t.Errorf("Expected *SQLVal for OFFSET, got %T", selectStmt.Limit.Offset) - } - - if string(offsetVal.Val) != "0" { - t.Errorf("Expected offset value '0', got '%s'", string(offsetVal.Val)) - } - }, - }, - { - name: "LIMIT with large OFFSET", - sql: "SELECT * FROM users LIMIT 100 OFFSET 1000", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - selectStmt := stmt.(*SelectStatement) - if selectStmt.Limit == nil { - t.Fatal("Expected LIMIT clause, got nil") - } - - // Verify large OFFSET value - offsetVal, ok := selectStmt.Limit.Offset.(*SQLVal) - if !ok { - t.Errorf("Expected *SQLVal for OFFSET, got %T", selectStmt.Limit.Offset) - } - - if string(offsetVal.Val) != "1000" { - t.Errorf("Expected offset value '1000', got '%s'", string(offsetVal.Val)) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt) - } - }) - } -} - -func TestParseSQL_SHOW_Statements(t *testing.T) { - tests := []struct { - name string - sql string - wantErr bool - validate func(t *testing.T, stmt Statement) - }{ - { - name: "SHOW DATABASES", - sql: "SHOW DATABASES", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - showStmt, ok := stmt.(*ShowStatement) - if !ok { - t.Fatalf("Expected *ShowStatement, got %T", stmt) - } - - if showStmt.Type != "databases" { - t.Errorf("Expected type 'databases', got '%s'", showStmt.Type) - } - }, - }, - { - name: "SHOW TABLES", - sql: "SHOW TABLES", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - showStmt, ok := stmt.(*ShowStatement) - if !ok { - t.Fatalf("Expected *ShowStatement, got %T", stmt) - } - - if showStmt.Type != "tables" { - t.Errorf("Expected type 'tables', got '%s'", showStmt.Type) - } - }, - }, - { - name: "SHOW TABLES FROM database", - sql: "SHOW TABLES FROM \"test_db\"", - wantErr: false, - validate: func(t *testing.T, stmt Statement) { - showStmt, ok := stmt.(*ShowStatement) - if !ok { - t.Fatalf("Expected *ShowStatement, got %T", stmt) - } - - if showStmt.Type != "tables" { - t.Errorf("Expected type 'tables', got '%s'", showStmt.Type) - } - - if showStmt.Schema != "test_db" { - t.Errorf("Expected schema 'test_db', got '%s'", showStmt.Schema) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stmt, err := ParseSQL(tt.sql) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error, but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if tt.validate != nil { - tt.validate(t, stmt) - } - }) - } -} diff --git a/weed/query/engine/real_namespace_test.go b/weed/query/engine/real_namespace_test.go deleted file mode 100644 index 6c88ef612..000000000 --- a/weed/query/engine/real_namespace_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -// TestRealNamespaceDiscovery tests the real namespace discovery functionality -func TestRealNamespaceDiscovery(t *testing.T) { - engine := NewSQLEngine("localhost:8888") - - // Test SHOW DATABASES with real namespace discovery - result, err := engine.ExecuteSQL(context.Background(), "SHOW DATABASES") - if err != nil { - t.Fatalf("SHOW DATABASES failed: %v", err) - } - - // Should have Database column - if len(result.Columns) != 1 || result.Columns[0] != "Database" { - t.Errorf("Expected 1 column 'Database', got %v", result.Columns) - } - - // With no fallback sample data, result may be empty if no real MQ cluster - t.Logf("Discovered %d namespaces (no fallback data):", len(result.Rows)) - if len(result.Rows) == 0 { - t.Log(" (No namespaces found - requires real SeaweedFS MQ cluster)") - } else { - for _, row := range result.Rows { - if len(row) > 0 { - t.Logf(" - %s", row[0].ToString()) - } - } - } -} - -// TestRealTopicDiscovery tests the real topic discovery functionality -func TestRealTopicDiscovery(t *testing.T) { - engine := NewSQLEngine("localhost:8888") - - // Test SHOW TABLES with real topic discovery (use double quotes for PostgreSQL) - result, err := engine.ExecuteSQL(context.Background(), "SHOW TABLES FROM \"default\"") - if err != nil { - t.Fatalf("SHOW TABLES failed: %v", err) - } - - // Should have table name column - expectedColumn := "Tables_in_default" - if len(result.Columns) != 1 || result.Columns[0] != expectedColumn { - t.Errorf("Expected 1 column '%s', got %v", expectedColumn, result.Columns) - } - - // With no fallback sample data, result may be empty if no real MQ cluster or namespace doesn't exist - t.Logf("Discovered %d topics in 'default' namespace (no fallback data):", len(result.Rows)) - if len(result.Rows) == 0 { - t.Log(" (No topics found - requires real SeaweedFS MQ cluster with 'default' namespace)") - } else { - for _, row := range result.Rows { - if len(row) > 0 { - t.Logf(" - %s", row[0].ToString()) - } - } - } -} - -// TestNamespaceDiscoveryNoFallback tests behavior when filer is unavailable (no sample data) -func TestNamespaceDiscoveryNoFallback(t *testing.T) { - // This test demonstrates the no-fallback behavior when no real MQ cluster is running - engine := NewSQLEngine("localhost:8888") - - // Get broker client to test directly - brokerClient := engine.catalog.brokerClient - if brokerClient == nil { - t.Fatal("Expected brokerClient to be initialized") - } - - // Test namespace listing (should fail without real cluster) - namespaces, err := brokerClient.ListNamespaces(context.Background()) - if err != nil { - t.Logf("ListNamespaces failed as expected: %v", err) - namespaces = []string{} // Set empty for the rest of the test - } - - // With no fallback sample data, should return empty lists - if len(namespaces) != 0 { - t.Errorf("Expected empty namespace list with no fallback, got %v", namespaces) - } - - // Test topic listing (should return empty list) - topics, err := brokerClient.ListTopics(context.Background(), "default") - if err != nil { - t.Fatalf("ListTopics failed: %v", err) - } - - // Should have no fallback topics - if len(topics) != 0 { - t.Errorf("Expected empty topic list with no fallback, got %v", topics) - } - - t.Log("No fallback behavior - returns empty lists when filer unavailable") -} diff --git a/weed/query/engine/real_world_where_clause_test.go b/weed/query/engine/real_world_where_clause_test.go deleted file mode 100644 index e63c27ab4..000000000 --- a/weed/query/engine/real_world_where_clause_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "testing" -) - -// TestRealWorldWhereClauseFailure demonstrates the exact WHERE clause issue from real usage -func TestRealWorldWhereClauseFailure(t *testing.T) { - engine := NewTestSQLEngine() - - // This test simulates the exact real-world scenario that failed - testCases := []struct { - name string - sql string - filterValue int64 - operator string - desc string - }{ - { - name: "Where_ID_Greater_Than_Large_Number", - sql: "SELECT id FROM user_events WHERE id > 10000000", - filterValue: 10000000, - operator: ">", - desc: "Real-world case: WHERE id > 10000000 should filter results", - }, - { - name: "Where_ID_Greater_Than_Small_Number", - sql: "SELECT id FROM user_events WHERE id > 100000", - filterValue: 100000, - operator: ">", - desc: "WHERE id > 100000 should filter results", - }, - { - name: "Where_ID_Less_Than", - sql: "SELECT id FROM user_events WHERE id < 100000", - filterValue: 100000, - operator: "<", - desc: "WHERE id < 100000 should filter results", - }, - } - - t.Log("TESTING REAL-WORLD WHERE CLAUSE SCENARIOS") - t.Log("============================================") - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if err != nil { - t.Errorf("Query failed: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Result error: %v", result.Error) - return - } - - // Analyze the actual results - actualRows := len(result.Rows) - var matchingRows, nonMatchingRows int - - t.Logf("Query: %s", tc.sql) - t.Logf("Total rows returned: %d", actualRows) - - if actualRows > 0 { - t.Logf("Sample IDs returned:") - sampleSize := 5 - if actualRows < sampleSize { - sampleSize = actualRows - } - - for i := 0; i < sampleSize; i++ { - idStr := result.Rows[i][0].ToString() - if idValue, err := strconv.ParseInt(idStr, 10, 64); err == nil { - t.Logf(" Row %d: id = %d", i+1, idValue) - - // Check if this row should have been filtered - switch tc.operator { - case ">": - if idValue > tc.filterValue { - matchingRows++ - } else { - nonMatchingRows++ - } - case "<": - if idValue < tc.filterValue { - matchingRows++ - } else { - nonMatchingRows++ - } - } - } - } - - // Count all rows for accurate assessment - allMatchingRows, allNonMatchingRows := 0, 0 - for _, row := range result.Rows { - idStr := row[0].ToString() - if idValue, err := strconv.ParseInt(idStr, 10, 64); err == nil { - switch tc.operator { - case ">": - if idValue > tc.filterValue { - allMatchingRows++ - } else { - allNonMatchingRows++ - } - case "<": - if idValue < tc.filterValue { - allMatchingRows++ - } else { - allNonMatchingRows++ - } - } - } - } - - t.Logf("Analysis:") - t.Logf(" Rows matching WHERE condition: %d", allMatchingRows) - t.Logf(" Rows NOT matching WHERE condition: %d", allNonMatchingRows) - - if allNonMatchingRows > 0 { - t.Errorf("FAIL: %s - Found %d rows that should have been filtered out", tc.desc, allNonMatchingRows) - t.Errorf(" This confirms WHERE clause is being ignored") - } else { - t.Logf("PASS: %s - All returned rows match the WHERE condition", tc.desc) - } - } else { - t.Logf("No rows returned - this could be correct if no data matches") - } - }) - } -} - -// TestWhereClauseWithLimitOffset tests the exact failing scenario -func TestWhereClauseWithLimitOffset(t *testing.T) { - engine := NewTestSQLEngine() - - // The exact query that was failing in real usage - sql := "SELECT id FROM user_events WHERE id > 10000000 LIMIT 10 OFFSET 5" - - t.Logf("Testing exact failing query: %s", sql) - - result, err := engine.ExecuteSQL(context.Background(), sql) - - if err != nil { - t.Errorf("Query failed: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Result error: %v", result.Error) - return - } - - actualRows := len(result.Rows) - t.Logf("Returned %d rows (LIMIT 10 worked)", actualRows) - - if actualRows > 10 { - t.Errorf("LIMIT not working: expected max 10 rows, got %d", actualRows) - } - - // Check if WHERE clause worked - nonMatchingRows := 0 - for i, row := range result.Rows { - idStr := row[0].ToString() - if idValue, err := strconv.ParseInt(idStr, 10, 64); err == nil { - t.Logf("Row %d: id = %d", i+1, idValue) - if idValue <= 10000000 { - nonMatchingRows++ - } - } - } - - if nonMatchingRows > 0 { - t.Errorf("WHERE clause completely ignored: %d rows have id <= 10000000", nonMatchingRows) - t.Log("This matches the real-world failure - WHERE is parsed but not executed") - } else { - t.Log("WHERE clause working correctly") - } -} - -// TestWhatShouldHaveBeenTested creates the test that should have caught the WHERE issue -func TestWhatShouldHaveBeenTested(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("THE TEST THAT SHOULD HAVE CAUGHT THE WHERE CLAUSE ISSUE") - t.Log("========================================================") - - // Test 1: Simple WHERE that should return subset - result1, _ := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events") - allRowCount := len(result1.Rows) - - result2, _ := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events WHERE id > 999999999") - filteredCount := len(result2.Rows) - - t.Logf("All rows: %d", allRowCount) - t.Logf("WHERE id > 999999999: %d rows", filteredCount) - - if filteredCount == allRowCount { - t.Error("CRITICAL ISSUE: WHERE clause completely ignored") - t.Error("Expected: Fewer rows after WHERE filtering") - t.Error("Actual: Same number of rows (no filtering occurred)") - t.Error("This is the bug that our tests should have caught!") - } - - // Test 2: Impossible WHERE condition - result3, _ := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events WHERE 1 = 0") - impossibleCount := len(result3.Rows) - - t.Logf("WHERE 1 = 0 (impossible): %d rows", impossibleCount) - - if impossibleCount > 0 { - t.Error("CRITICAL ISSUE: Even impossible WHERE conditions ignored") - t.Error("Expected: 0 rows") - t.Errorf("Actual: %d rows", impossibleCount) - } -} diff --git a/weed/query/engine/schema_parsing_test.go b/weed/query/engine/schema_parsing_test.go deleted file mode 100644 index 03db28a9a..000000000 --- a/weed/query/engine/schema_parsing_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestSchemaAwareParsing tests the schema-aware message parsing functionality -func TestSchemaAwareParsing(t *testing.T) { - // Create a mock HybridMessageScanner with schema - recordSchema := &schema_pb.RecordType{ - Fields: []*schema_pb.Field{ - { - Name: "user_id", - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}, - }, - { - Name: "event_type", - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, - }, - { - Name: "cpu_usage", - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}, - }, - { - Name: "is_active", - Type: &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BOOL}}, - }, - }, - } - - scanner := &HybridMessageScanner{ - recordSchema: recordSchema, - } - - t.Run("JSON Message Parsing", func(t *testing.T) { - jsonData := []byte(`{"user_id": 1234, "event_type": "login", "cpu_usage": 75.5, "is_active": true}`) - - result, err := scanner.parseJSONMessage(jsonData) - if err != nil { - t.Fatalf("Failed to parse JSON message: %v", err) - } - - // Verify user_id as int32 - if userIdVal := result.Fields["user_id"]; userIdVal == nil { - t.Error("user_id field missing") - } else if userIdVal.GetInt32Value() != 1234 { - t.Errorf("Expected user_id=1234, got %v", userIdVal.GetInt32Value()) - } - - // Verify event_type as string - if eventTypeVal := result.Fields["event_type"]; eventTypeVal == nil { - t.Error("event_type field missing") - } else if eventTypeVal.GetStringValue() != "login" { - t.Errorf("Expected event_type='login', got %v", eventTypeVal.GetStringValue()) - } - - // Verify cpu_usage as double - if cpuVal := result.Fields["cpu_usage"]; cpuVal == nil { - t.Error("cpu_usage field missing") - } else if cpuVal.GetDoubleValue() != 75.5 { - t.Errorf("Expected cpu_usage=75.5, got %v", cpuVal.GetDoubleValue()) - } - - // Verify is_active as bool - if isActiveVal := result.Fields["is_active"]; isActiveVal == nil { - t.Error("is_active field missing") - } else if !isActiveVal.GetBoolValue() { - t.Errorf("Expected is_active=true, got %v", isActiveVal.GetBoolValue()) - } - - t.Logf("JSON parsing correctly converted types: int32=%d, string='%s', double=%.1f, bool=%v", - result.Fields["user_id"].GetInt32Value(), - result.Fields["event_type"].GetStringValue(), - result.Fields["cpu_usage"].GetDoubleValue(), - result.Fields["is_active"].GetBoolValue()) - }) - - t.Run("Raw Data Type Conversion", func(t *testing.T) { - // Test string conversion - stringType := &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}} - stringVal, err := scanner.convertRawDataToSchemaValue([]byte("hello world"), stringType) - if err != nil { - t.Errorf("Failed to convert string: %v", err) - } else if stringVal.GetStringValue() != "hello world" { - t.Errorf("String conversion failed: got %v", stringVal.GetStringValue()) - } - - // Test int32 conversion - int32Type := &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}} - int32Val, err := scanner.convertRawDataToSchemaValue([]byte("42"), int32Type) - if err != nil { - t.Errorf("Failed to convert int32: %v", err) - } else if int32Val.GetInt32Value() != 42 { - t.Errorf("Int32 conversion failed: got %v", int32Val.GetInt32Value()) - } - - // Test double conversion - doubleType := &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}} - doubleVal, err := scanner.convertRawDataToSchemaValue([]byte("3.14159"), doubleType) - if err != nil { - t.Errorf("Failed to convert double: %v", err) - } else if doubleVal.GetDoubleValue() != 3.14159 { - t.Errorf("Double conversion failed: got %v", doubleVal.GetDoubleValue()) - } - - // Test bool conversion - boolType := &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BOOL}} - boolVal, err := scanner.convertRawDataToSchemaValue([]byte("true"), boolType) - if err != nil { - t.Errorf("Failed to convert bool: %v", err) - } else if !boolVal.GetBoolValue() { - t.Errorf("Bool conversion failed: got %v", boolVal.GetBoolValue()) - } - - t.Log("Raw data type conversions working correctly") - }) - - t.Run("Invalid JSON Graceful Handling", func(t *testing.T) { - invalidJSON := []byte(`{"user_id": 1234, "malformed": }`) - - _, err := scanner.parseJSONMessage(invalidJSON) - if err == nil { - t.Error("Expected error for invalid JSON, but got none") - } - - t.Log("Invalid JSON handled gracefully with error") - }) -} - -// TestSchemaAwareParsingIntegration tests the full integration with SQL engine -func TestSchemaAwareParsingIntegration(t *testing.T) { - engine := NewTestSQLEngine() - - // Test that the enhanced schema-aware parsing doesn't break existing functionality - result, err := engine.ExecuteSQL(context.Background(), "SELECT *, _source FROM user_events LIMIT 2") - if err != nil { - t.Fatalf("Schema-aware parsing broke basic SELECT: %v", err) - } - - if len(result.Rows) == 0 { - t.Error("No rows returned - schema parsing may have issues") - } - - // Check that _source column is still present (hybrid functionality) - foundSourceColumn := false - for _, col := range result.Columns { - if col == "_source" { - foundSourceColumn = true - break - } - } - - if !foundSourceColumn { - t.Log("_source column missing - running in fallback mode without real cluster") - } - - t.Log("Schema-aware parsing integrates correctly with SQL engine") -} diff --git a/weed/query/engine/select_test.go b/weed/query/engine/select_test.go deleted file mode 100644 index 08cf986a2..000000000 --- a/weed/query/engine/select_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strings" - "testing" -) - -func TestSQLEngine_SelectBasic(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT * FROM table - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - if len(result.Columns) == 0 { - t.Error("Expected columns in result") - } - - if len(result.Rows) == 0 { - t.Error("Expected rows in result") - } - - // Should have sample data with 4 columns (SELECT * excludes system columns) - expectedColumns := []string{"id", "user_id", "event_type", "data"} - if len(result.Columns) != len(expectedColumns) { - t.Errorf("Expected %d columns, got %d", len(expectedColumns), len(result.Columns)) - } - - // In mock environment, only live_log data from unflushed messages - // parquet_archive data would come from parquet files in a real system - if len(result.Rows) == 0 { - t.Error("Expected rows in result") - } -} - -func TestSQLEngine_SelectWithLimit(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT with LIMIT - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 2") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have exactly 2 rows due to LIMIT - if len(result.Rows) != 2 { - t.Errorf("Expected 2 rows with LIMIT 2, got %d", len(result.Rows)) - } -} - -func TestSQLEngine_SelectSpecificColumns(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT specific columns (this will fall back to sample data) - result, err := engine.ExecuteSQL(context.Background(), "SELECT user_id, event_type FROM user_events") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have all columns for now (sample data doesn't implement projection yet) - if len(result.Columns) == 0 { - t.Error("Expected columns in result") - } -} - -func TestSQLEngine_SelectFromNonExistentTable(t *testing.T) { - t.Skip("Skipping non-existent table test - table name parsing issue needs investigation") - engine := NewTestSQLEngine() - - // Test SELECT from non-existent table - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM nonexistent_table") - t.Logf("ExecuteSQL returned: err=%v, result.Error=%v", err, result.Error) - if result.Error == nil { - t.Error("Expected error for non-existent table") - return - } - - if !strings.Contains(result.Error.Error(), "not found") { - t.Errorf("Expected 'not found' error, got: %v", result.Error) - } -} - -func TestSQLEngine_SelectWithOffset(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT with OFFSET only - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 10 OFFSET 1") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have fewer rows than total since we skip 1 row - // Sample data has 10 rows, so OFFSET 1 should give us 9 rows - if len(result.Rows) != 9 { - t.Errorf("Expected 9 rows with OFFSET 1 (10 total - 1 offset), got %d", len(result.Rows)) - } -} - -func TestSQLEngine_SelectWithLimitAndOffset(t *testing.T) { - engine := NewTestSQLEngine() - - // Test SELECT with both LIMIT and OFFSET - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 2 OFFSET 1") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have exactly 2 rows (skip 1, take 2) - if len(result.Rows) != 2 { - t.Errorf("Expected 2 rows with LIMIT 2 OFFSET 1, got %d", len(result.Rows)) - } -} - -func TestSQLEngine_SelectWithOffsetExceedsRows(t *testing.T) { - engine := NewTestSQLEngine() - - // Test OFFSET that exceeds available rows - result, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 10 OFFSET 10") - if err != nil { - t.Fatalf("Expected no error, got %v", err) - } - - if result.Error != nil { - t.Fatalf("Expected no query error, got %v", result.Error) - } - - // Should have 0 rows since offset exceeds available data - if len(result.Rows) != 0 { - t.Errorf("Expected 0 rows with large OFFSET, got %d", len(result.Rows)) - } -} - -func TestSQLEngine_SelectWithOffsetZero(t *testing.T) { - engine := NewTestSQLEngine() - - // Test OFFSET 0 (should be same as no offset) - result1, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 3") - if err != nil { - t.Fatalf("Expected no error for LIMIT query, got %v", err) - } - - result2, err := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events LIMIT 3 OFFSET 0") - if err != nil { - t.Fatalf("Expected no error for LIMIT OFFSET query, got %v", err) - } - - if result1.Error != nil { - t.Fatalf("Expected no query error for LIMIT, got %v", result1.Error) - } - - if result2.Error != nil { - t.Fatalf("Expected no query error for LIMIT OFFSET, got %v", result2.Error) - } - - // Both should return the same number of rows - if len(result1.Rows) != len(result2.Rows) { - t.Errorf("LIMIT 3 and LIMIT 3 OFFSET 0 should return same number of rows. Got %d vs %d", len(result1.Rows), len(result2.Rows)) - } -} - -func TestSQLEngine_SelectDifferentTables(t *testing.T) { - engine := NewTestSQLEngine() - - // Test different sample tables - tables := []string{"user_events", "system_logs"} - - for _, tableName := range tables { - result, err := engine.ExecuteSQL(context.Background(), fmt.Sprintf("SELECT * FROM %s", tableName)) - if err != nil { - t.Errorf("Error querying table %s: %v", tableName, err) - continue - } - - if result.Error != nil { - t.Errorf("Query error for table %s: %v", tableName, result.Error) - continue - } - - if len(result.Columns) == 0 { - t.Errorf("No columns returned for table %s", tableName) - } - - if len(result.Rows) == 0 { - t.Errorf("No rows returned for table %s", tableName) - } - - t.Logf("Table %s: %d columns, %d rows", tableName, len(result.Columns), len(result.Rows)) - } -} diff --git a/weed/query/engine/sql_alias_support_test.go b/weed/query/engine/sql_alias_support_test.go deleted file mode 100644 index dbe91f821..000000000 --- a/weed/query/engine/sql_alias_support_test.go +++ /dev/null @@ -1,408 +0,0 @@ -package engine - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestSQLAliasResolution tests the complete SQL alias resolution functionality -func TestSQLAliasResolution(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("ResolveColumnAlias", func(t *testing.T) { - // Test the helper function for resolving aliases - - // Create SELECT expressions with aliases - selectExprs := []SelectExpr{ - &AliasedExpr{ - Expr: &ColName{Name: stringValue("_ts_ns")}, - As: aliasValue("ts"), - }, - &AliasedExpr{ - Expr: &ColName{Name: stringValue("id")}, - As: aliasValue("record_id"), - }, - } - - // Test alias resolution - resolved := engine.resolveColumnAlias("ts", selectExprs) - assert.Equal(t, "_ts_ns", resolved, "Should resolve 'ts' alias to '_ts_ns'") - - resolved = engine.resolveColumnAlias("record_id", selectExprs) - assert.Equal(t, "id", resolved, "Should resolve 'record_id' alias to 'id'") - - // Test non-aliased column (should return as-is) - resolved = engine.resolveColumnAlias("some_other_column", selectExprs) - assert.Equal(t, "some_other_column", resolved, "Non-aliased columns should return unchanged") - }) - - t.Run("SingleAliasInWhere", func(t *testing.T) { - // Test using a single alias in WHERE clause - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - }, - } - - // Parse SQL with alias in WHERE - sql := "SELECT _ts_ns AS ts, id FROM test WHERE ts = 1756947416566456262" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse SQL with alias in WHERE") - - selectStmt := stmt.(*SelectStatement) - - // Build predicate with context (for alias resolution) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate with alias resolution") - - // Test the predicate - result := predicate(testRecord) - assert.True(t, result, "Predicate should match using alias 'ts' for '_ts_ns'") - - // Test with non-matching value - sql2 := "SELECT _ts_ns AS ts, id FROM test WHERE ts = 999999" - stmt2, err := ParseSQL(sql2) - assert.NoError(t, err) - selectStmt2 := stmt2.(*SelectStatement) - - predicate2, err := engine.buildPredicateWithContext(selectStmt2.Where.Expr, selectStmt2.SelectExprs) - assert.NoError(t, err) - - result2 := predicate2(testRecord) - assert.False(t, result2, "Predicate should not match different value") - }) - - t.Run("MultipleAliasesInWhere", func(t *testing.T) { - // Test using multiple aliases in WHERE clause - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 82460}}, - }, - } - - // Parse SQL with multiple aliases in WHERE - sql := "SELECT _ts_ns AS ts, id AS record_id FROM test WHERE ts = 1756947416566456262 AND record_id = 82460" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse SQL with multiple aliases") - - selectStmt := stmt.(*SelectStatement) - - // Build predicate with context - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate with multiple alias resolution") - - // Test the predicate - should match both conditions - result := predicate(testRecord) - assert.True(t, result, "Should match both aliased conditions") - - // Test with one condition not matching - testRecord2 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 99999}}, // Different ID - }, - } - - result2 := predicate(testRecord2) - assert.False(t, result2, "Should not match when one alias condition fails") - }) - - t.Run("RangeQueryWithAliases", func(t *testing.T) { - // Test range queries using aliases - testRecords := []*schema_pb.RecordValue{ - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456260}}, // Below range - }, - }, - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, // In range - }, - }, - { - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456265}}, // Above range - }, - }, - } - - // Test range query with alias - sql := "SELECT _ts_ns AS ts FROM test WHERE ts > 1756947416566456261 AND ts < 1756947416566456264" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse range query with alias") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build range predicate with alias") - - // Test each record - assert.False(t, predicate(testRecords[0]), "Should not match record below range") - assert.True(t, predicate(testRecords[1]), "Should match record in range") - assert.False(t, predicate(testRecords[2]), "Should not match record above range") - }) - - t.Run("MixedAliasAndDirectColumn", func(t *testing.T) { - // Test mixing aliased and non-aliased columns in WHERE - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 82460}}, - "status": {Kind: &schema_pb.Value_StringValue{StringValue: "active"}}, - }, - } - - // Use alias for one column, direct name for another - sql := "SELECT _ts_ns AS ts, id, status FROM test WHERE ts = 1756947416566456262 AND status = 'active'" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse mixed alias/direct query") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build mixed predicate") - - result := predicate(testRecord) - assert.True(t, result, "Should match with mixed alias and direct column usage") - }) - - t.Run("AliasCompatibilityWithTimestampFixes", func(t *testing.T) { - // Test that alias resolution works with the timestamp precision fixes - largeTimestamp := int64(1756947416566456262) // Large nanosecond timestamp - - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - // Test that large timestamp precision is maintained with aliases - sql := "SELECT _ts_ns AS ts, id FROM test WHERE ts = 1756947416566456262" - stmt, err := ParseSQL(sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err) - - result := predicate(testRecord) - assert.True(t, result, "Large timestamp precision should be maintained with aliases") - - // Test precision with off-by-one (should not match) - sql2 := "SELECT _ts_ns AS ts, id FROM test WHERE ts = 1756947416566456263" // +1 - stmt2, err := ParseSQL(sql2) - assert.NoError(t, err) - selectStmt2 := stmt2.(*SelectStatement) - predicate2, err := engine.buildPredicateWithContext(selectStmt2.Where.Expr, selectStmt2.SelectExprs) - assert.NoError(t, err) - - result2 := predicate2(testRecord) - assert.False(t, result2, "Should not match timestamp differing by 1 nanosecond") - }) - - t.Run("EdgeCasesAndErrorHandling", func(t *testing.T) { - // Test edge cases and error conditions - - // Test with nil SelectExprs - predicate, err := engine.buildPredicateWithContext(&ComparisonExpr{ - Left: &ColName{Name: stringValue("test_col")}, - Operator: "=", - Right: &SQLVal{Type: IntVal, Val: []byte("123")}, - }, nil) - assert.NoError(t, err, "Should handle nil SelectExprs gracefully") - assert.NotNil(t, predicate, "Should return valid predicate even without aliases") - - // Test alias resolution with empty SelectExprs - resolved := engine.resolveColumnAlias("test_col", []SelectExpr{}) - assert.Equal(t, "test_col", resolved, "Should return original name with empty SelectExprs") - - // Test alias resolution with nil SelectExprs - resolved = engine.resolveColumnAlias("test_col", nil) - assert.Equal(t, "test_col", resolved, "Should return original name with nil SelectExprs") - }) - - t.Run("ComparisonOperators", func(t *testing.T) { - // Test all comparison operators work with aliases - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1000}}, - }, - } - - operators := []struct { - op string - value string - expected bool - }{ - {"=", "1000", true}, - {"=", "999", false}, - {">", "999", true}, - {">", "1000", false}, - {">=", "1000", true}, - {">=", "1001", false}, - {"<", "1001", true}, - {"<", "1000", false}, - {"<=", "1000", true}, - {"<=", "999", false}, - } - - for _, test := range operators { - t.Run(test.op+"_"+test.value, func(t *testing.T) { - sql := "SELECT _ts_ns AS ts FROM test WHERE ts " + test.op + " " + test.value - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse operator: %s", test.op) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for operator: %s", test.op) - - result := predicate(testRecord) - assert.Equal(t, test.expected, result, "Operator %s with value %s should return %v", test.op, test.value, test.expected) - }) - } - }) - - t.Run("BackwardCompatibility", func(t *testing.T) { - // Ensure non-alias queries still work exactly as before - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - }, - } - - // Test traditional query (no aliases) - sql := "SELECT _ts_ns, id FROM test WHERE _ts_ns = 1756947416566456262" - stmt, err := ParseSQL(sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - - // Should work with both old and new predicate building methods - predicateOld, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err, "Old buildPredicate method should still work") - - predicateNew, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "New buildPredicateWithContext should work for non-alias queries") - - // Both should produce the same result - resultOld := predicateOld(testRecord) - resultNew := predicateNew(testRecord) - - assert.True(t, resultOld, "Old method should match") - assert.True(t, resultNew, "New method should match") - assert.Equal(t, resultOld, resultNew, "Both methods should produce identical results") - }) -} - -// TestAliasIntegrationWithProductionScenarios tests real-world usage patterns -func TestAliasIntegrationWithProductionScenarios(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("OriginalFailingQuery", func(t *testing.T) { - // Test the exact query pattern that was originally failing - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756913789829292386}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 82460}}, - }, - } - - // This was the original failing pattern - sql := "SELECT id, _ts_ns AS ts FROM ecommerce.user_events WHERE ts = 1756913789829292386" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse the originally failing query pattern") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for originally failing pattern") - - result := predicate(testRecord) - assert.True(t, result, "Should now work for the originally failing query pattern") - }) - - t.Run("ComplexProductionQuery", func(t *testing.T) { - // Test a more complex production-like query - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - "user_id": {Kind: &schema_pb.Value_StringValue{StringValue: "user123"}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "click"}}, - }, - } - - sql := `SELECT - id AS event_id, - _ts_ns AS event_time, - user_id AS uid, - event_type AS action - FROM ecommerce.user_events - WHERE event_time = 1756947416566456262 - AND uid = 'user123' - AND action = 'click'` - - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse complex production query") - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicateWithContext(selectStmt.Where.Expr, selectStmt.SelectExprs) - assert.NoError(t, err, "Should build predicate for complex query") - - result := predicate(testRecord) - assert.True(t, result, "Should match complex production query with multiple aliases") - - // Test partial match failure - testRecord2 := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - "user_id": {Kind: &schema_pb.Value_StringValue{StringValue: "user999"}}, // Different user - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "click"}}, - }, - } - - result2 := predicate(testRecord2) - assert.False(t, result2, "Should not match when one aliased condition fails") - }) - - t.Run("PerformanceRegression", func(t *testing.T) { - // Ensure alias resolution doesn't significantly impact performance - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - }, - } - - // Build predicates for comparison - sqlWithAlias := "SELECT _ts_ns AS ts FROM test WHERE ts = 1756947416566456262" - sqlWithoutAlias := "SELECT _ts_ns FROM test WHERE _ts_ns = 1756947416566456262" - - stmtWithAlias, err := ParseSQL(sqlWithAlias) - assert.NoError(t, err) - stmtWithoutAlias, err := ParseSQL(sqlWithoutAlias) - assert.NoError(t, err) - - selectStmtWithAlias := stmtWithAlias.(*SelectStatement) - selectStmtWithoutAlias := stmtWithoutAlias.(*SelectStatement) - - // Both should build successfully - predicateWithAlias, err := engine.buildPredicateWithContext(selectStmtWithAlias.Where.Expr, selectStmtWithAlias.SelectExprs) - assert.NoError(t, err) - - predicateWithoutAlias, err := engine.buildPredicateWithContext(selectStmtWithoutAlias.Where.Expr, selectStmtWithoutAlias.SelectExprs) - assert.NoError(t, err) - - // Both should produce the same logical result - resultWithAlias := predicateWithAlias(testRecord) - resultWithoutAlias := predicateWithoutAlias(testRecord) - - assert.True(t, resultWithAlias, "Alias query should work") - assert.True(t, resultWithoutAlias, "Non-alias query should work") - assert.Equal(t, resultWithAlias, resultWithoutAlias, "Both should produce same result") - }) -} diff --git a/weed/query/engine/sql_feature_diagnostic_test.go b/weed/query/engine/sql_feature_diagnostic_test.go deleted file mode 100644 index f578539fc..000000000 --- a/weed/query/engine/sql_feature_diagnostic_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strings" - "testing" -) - -// TestSQLFeatureDiagnostic provides comprehensive diagnosis of current SQL features -func TestSQLFeatureDiagnostic(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("SEAWEEDFS SQL ENGINE FEATURE DIAGNOSTIC") - t.Log(strings.Repeat("=", 80)) - - // Test 1: LIMIT functionality - t.Log("\n1. TESTING LIMIT FUNCTIONALITY:") - for _, limit := range []int{0, 1, 3, 5, 10, 100} { - sql := fmt.Sprintf("SELECT id FROM user_events LIMIT %d", limit) - result, err := engine.ExecuteSQL(context.Background(), sql) - - if err != nil { - t.Logf(" LIMIT %d: ERROR - %v", limit, err) - } else if result.Error != nil { - t.Logf(" LIMIT %d: RESULT ERROR - %v", limit, result.Error) - } else { - expected := limit - actual := len(result.Rows) - if limit > 10 { - expected = 10 // Test data has max 10 rows - } - - if actual == expected { - t.Logf(" LIMIT %d: PASS - Got %d rows", limit, actual) - } else { - t.Logf(" LIMIT %d: PARTIAL - Expected %d, got %d rows", limit, expected, actual) - } - } - } - - // Test 2: OFFSET functionality - t.Log("\n2. TESTING OFFSET FUNCTIONALITY:") - - for _, offset := range []int{0, 1, 2, 5, 10, 100} { - sql := fmt.Sprintf("SELECT id FROM user_events LIMIT 3 OFFSET %d", offset) - result, err := engine.ExecuteSQL(context.Background(), sql) - - if err != nil { - t.Logf(" OFFSET %d: ERROR - %v", offset, err) - } else if result.Error != nil { - t.Logf(" OFFSET %d: RESULT ERROR - %v", offset, result.Error) - } else { - actual := len(result.Rows) - if offset >= 10 { - t.Logf(" OFFSET %d: PASS - Beyond data range, got %d rows", offset, actual) - } else { - t.Logf(" OFFSET %d: PASS - Got %d rows", offset, actual) - } - } - } - - // Test 3: WHERE clause functionality - t.Log("\n3. TESTING WHERE CLAUSE FUNCTIONALITY:") - whereTests := []struct { - sql string - desc string - }{ - {"SELECT * FROM user_events WHERE id = 82460", "Specific ID match"}, - {"SELECT * FROM user_events WHERE id > 100000", "Greater than comparison"}, - {"SELECT * FROM user_events WHERE status = 'active'", "String equality"}, - {"SELECT * FROM user_events WHERE id = -999999", "Non-existent ID"}, - {"SELECT * FROM user_events WHERE 1 = 2", "Always false condition"}, - } - - allRowsCount := 10 // Expected total rows in test data - - for _, test := range whereTests { - result, err := engine.ExecuteSQL(context.Background(), test.sql) - - if err != nil { - t.Logf(" %s: ERROR - %v", test.desc, err) - } else if result.Error != nil { - t.Logf(" %s: RESULT ERROR - %v", test.desc, result.Error) - } else { - actual := len(result.Rows) - if actual == allRowsCount { - t.Logf(" %s: FAIL - WHERE clause ignored, got all %d rows", test.desc, actual) - } else { - t.Logf(" %s: PASS - WHERE clause working, got %d rows", test.desc, actual) - } - } - } - - // Test 4: Combined functionality - t.Log("\n4. TESTING COMBINED LIMIT + OFFSET + WHERE:") - combinedSql := "SELECT id FROM user_events WHERE id > 0 LIMIT 2 OFFSET 1" - result, err := engine.ExecuteSQL(context.Background(), combinedSql) - - if err != nil { - t.Logf(" Combined query: ERROR - %v", err) - } else if result.Error != nil { - t.Logf(" Combined query: RESULT ERROR - %v", result.Error) - } else { - actual := len(result.Rows) - t.Logf(" Combined query: Got %d rows (LIMIT=2 part works, WHERE filtering unknown)", actual) - } - - // Summary - t.Log("\n" + strings.Repeat("=", 80)) - t.Log("FEATURE SUMMARY:") - t.Log(" LIMIT: FULLY WORKING - Correctly limits result rows") - t.Log(" OFFSET: FULLY WORKING - Correctly skips rows") - t.Log(" WHERE: FULLY WORKING - All comparison operators working") - t.Log(" SELECT: WORKING - Supports *, columns, functions, arithmetic") - t.Log(" Functions: WORKING - String and datetime functions work") - t.Log(" Arithmetic: WORKING - +, -, *, / operations work") - t.Log(strings.Repeat("=", 80)) -} - -// TestSQLWhereClauseIssue creates a focused test to demonstrate WHERE clause issue -func TestSQLWhereClauseIssue(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("DEMONSTRATING WHERE CLAUSE ISSUE:") - - // Get all rows first to establish baseline - allResult, _ := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events") - allCount := len(allResult.Rows) - t.Logf("Total rows in test data: %d", allCount) - - if allCount > 0 { - firstId := allResult.Rows[0][0].ToString() - t.Logf("First row ID: %s", firstId) - - // Try to filter to just that specific ID - specificSql := fmt.Sprintf("SELECT id FROM user_events WHERE id = %s", firstId) - specificResult, err := engine.ExecuteSQL(context.Background(), specificSql) - - if err != nil { - t.Errorf("WHERE query failed: %v", err) - } else { - actualCount := len(specificResult.Rows) - t.Logf("WHERE id = %s returned %d rows", firstId, actualCount) - - if actualCount == allCount { - t.Log("CONFIRMED: WHERE clause is completely ignored") - t.Log(" - Query parsed successfully") - t.Log(" - No errors returned") - t.Log(" - But filtering logic not implemented in execution") - } else if actualCount == 1 { - t.Log("WHERE clause working correctly") - } else { - t.Logf("โ“ Unexpected result: got %d rows instead of 1 or %d", actualCount, allCount) - } - } - } - - // Test impossible condition - impossibleResult, _ := engine.ExecuteSQL(context.Background(), "SELECT * FROM user_events WHERE 1 = 0") - impossibleCount := len(impossibleResult.Rows) - t.Logf("WHERE 1 = 0 returned %d rows", impossibleCount) - - if impossibleCount == allCount { - t.Log("CONFIRMED: Even impossible WHERE conditions are ignored") - } else if impossibleCount == 0 { - t.Log("Impossible WHERE condition correctly returns no rows") - } -} diff --git a/weed/query/engine/sql_filtering_limit_offset_test.go b/weed/query/engine/sql_filtering_limit_offset_test.go deleted file mode 100644 index 6d53b8b01..000000000 --- a/weed/query/engine/sql_filtering_limit_offset_test.go +++ /dev/null @@ -1,446 +0,0 @@ -package engine - -import ( - "context" - "fmt" - "strings" - "testing" -) - -// TestSQLFilteringLimitOffset tests comprehensive SQL filtering, LIMIT, and OFFSET functionality -func TestSQLFilteringLimitOffset(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - shouldError bool - expectRows int // -1 means don't check row count - desc string - }{ - // =========== WHERE CLAUSE OPERATORS =========== - { - name: "Where_Equals_Integer", - sql: "SELECT * FROM user_events WHERE id = 82460", - shouldError: false, - expectRows: 1, - desc: "WHERE with equals operator (integer)", - }, - { - name: "Where_Equals_String", - sql: "SELECT * FROM user_events WHERE status = 'active'", - shouldError: false, - expectRows: -1, // Don't check exact count - desc: "WHERE with equals operator (string)", - }, - { - name: "Where_Not_Equals", - sql: "SELECT * FROM user_events WHERE status != 'inactive'", - shouldError: false, - expectRows: -1, - desc: "WHERE with not equals operator", - }, - { - name: "Where_Greater_Than", - sql: "SELECT * FROM user_events WHERE id > 100000", - shouldError: false, - expectRows: -1, - desc: "WHERE with greater than operator", - }, - { - name: "Where_Less_Than", - sql: "SELECT * FROM user_events WHERE id < 100000", - shouldError: false, - expectRows: -1, - desc: "WHERE with less than operator", - }, - { - name: "Where_Greater_Equal", - sql: "SELECT * FROM user_events WHERE id >= 82460", - shouldError: false, - expectRows: -1, - desc: "WHERE with greater than or equal operator", - }, - { - name: "Where_Less_Equal", - sql: "SELECT * FROM user_events WHERE id <= 82460", - shouldError: false, - expectRows: -1, - desc: "WHERE with less than or equal operator", - }, - - // =========== WHERE WITH COLUMNS AND EXPRESSIONS =========== - { - name: "Where_Column_Comparison", - sql: "SELECT id, status FROM user_events WHERE id = 82460", - shouldError: false, - expectRows: 1, - desc: "WHERE filtering with specific columns selected", - }, - { - name: "Where_With_Function", - sql: "SELECT LENGTH(status) FROM user_events WHERE status = 'active'", - shouldError: false, - expectRows: -1, - desc: "WHERE with function in SELECT", - }, - { - name: "Where_With_Arithmetic", - sql: "SELECT id*2 FROM user_events WHERE id = 82460", - shouldError: false, - expectRows: 1, - desc: "WHERE with arithmetic in SELECT", - }, - - // =========== LIMIT FUNCTIONALITY =========== - { - name: "Limit_1", - sql: "SELECT * FROM user_events LIMIT 1", - shouldError: false, - expectRows: 1, - desc: "LIMIT 1 row", - }, - { - name: "Limit_5", - sql: "SELECT * FROM user_events LIMIT 5", - shouldError: false, - expectRows: 5, - desc: "LIMIT 5 rows", - }, - { - name: "Limit_0", - sql: "SELECT * FROM user_events LIMIT 0", - shouldError: false, - expectRows: 0, - desc: "LIMIT 0 rows (should return no results)", - }, - { - name: "Limit_Large", - sql: "SELECT * FROM user_events LIMIT 1000", - shouldError: false, - expectRows: -1, // Don't check exact count (depends on test data) - desc: "LIMIT with large number", - }, - { - name: "Limit_With_Columns", - sql: "SELECT id, status FROM user_events LIMIT 3", - shouldError: false, - expectRows: 3, - desc: "LIMIT with specific columns", - }, - { - name: "Limit_With_Functions", - sql: "SELECT LENGTH(status), UPPER(action) FROM user_events LIMIT 2", - shouldError: false, - expectRows: 2, - desc: "LIMIT with functions", - }, - - // =========== OFFSET FUNCTIONALITY =========== - { - name: "Offset_0", - sql: "SELECT * FROM user_events LIMIT 5 OFFSET 0", - shouldError: false, - expectRows: 5, - desc: "OFFSET 0 (same as no offset)", - }, - { - name: "Offset_1", - sql: "SELECT * FROM user_events LIMIT 3 OFFSET 1", - shouldError: false, - expectRows: 3, - desc: "OFFSET 1 row", - }, - { - name: "Offset_5", - sql: "SELECT * FROM user_events LIMIT 2 OFFSET 5", - shouldError: false, - expectRows: 2, - desc: "OFFSET 5 rows", - }, - { - name: "Offset_Large", - sql: "SELECT * FROM user_events LIMIT 1 OFFSET 100", - shouldError: false, - expectRows: -1, // May be 0 or 1 depending on test data size - desc: "OFFSET with large number", - }, - - // =========== LIMIT + OFFSET COMBINATIONS =========== - { - name: "Limit_Offset_Pagination_Page1", - sql: "SELECT id, status FROM user_events LIMIT 3 OFFSET 0", - shouldError: false, - expectRows: 3, - desc: "Pagination: Page 1 (LIMIT 3, OFFSET 0)", - }, - { - name: "Limit_Offset_Pagination_Page2", - sql: "SELECT id, status FROM user_events LIMIT 3 OFFSET 3", - shouldError: false, - expectRows: 3, - desc: "Pagination: Page 2 (LIMIT 3, OFFSET 3)", - }, - { - name: "Limit_Offset_Pagination_Page3", - sql: "SELECT id, status FROM user_events LIMIT 3 OFFSET 6", - shouldError: false, - expectRows: 3, - desc: "Pagination: Page 3 (LIMIT 3, OFFSET 6)", - }, - - // =========== WHERE + LIMIT + OFFSET COMBINATIONS =========== - { - name: "Where_Limit", - sql: "SELECT * FROM user_events WHERE status = 'active' LIMIT 2", - shouldError: false, - expectRows: -1, // Depends on filtered data - desc: "WHERE clause with LIMIT", - }, - { - name: "Where_Limit_Offset", - sql: "SELECT id, status FROM user_events WHERE status = 'active' LIMIT 2 OFFSET 1", - shouldError: false, - expectRows: -1, // Depends on filtered data - desc: "WHERE clause with LIMIT and OFFSET", - }, - { - name: "Where_Complex_Limit", - sql: "SELECT id*2, LENGTH(status) FROM user_events WHERE id > 100000 LIMIT 3", - shouldError: false, - expectRows: -1, - desc: "Complex WHERE with functions and arithmetic, plus LIMIT", - }, - - // =========== EDGE CASES =========== - { - name: "Where_No_Match", - sql: "SELECT * FROM user_events WHERE id = -999999", - shouldError: false, - expectRows: 0, - desc: "WHERE clause that matches no rows", - }, - { - name: "Limit_Offset_Beyond_Data", - sql: "SELECT * FROM user_events LIMIT 5 OFFSET 999999", - shouldError: false, - expectRows: 0, - desc: "OFFSET beyond available data", - }, - { - name: "Where_Empty_String", - sql: "SELECT * FROM user_events WHERE status = ''", - shouldError: false, - expectRows: -1, - desc: "WHERE with empty string value", - }, - - // =========== PERFORMANCE PATTERNS =========== - { - name: "Small_Result_Set", - sql: "SELECT id FROM user_events WHERE id = 82460 LIMIT 1", - shouldError: false, - expectRows: 1, - desc: "Optimized query: specific WHERE + LIMIT 1", - }, - { - name: "Batch_Processing", - sql: "SELECT id, status FROM user_events LIMIT 50 OFFSET 0", - shouldError: false, - expectRows: -1, - desc: "Batch processing pattern: moderate LIMIT", - }, - } - - var successTests []string - var errorTests []string - var rowCountMismatches []string - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - // Check for unexpected errors - if tc.shouldError { - if err == nil && (result == nil || result.Error == nil) { - t.Errorf("FAIL: Expected error for %s, but query succeeded", tc.desc) - errorTests = append(errorTests, "FAIL: "+tc.desc) - return - } - t.Logf("PASS: Expected error: %s", tc.desc) - errorTests = append(errorTests, "PASS: "+tc.desc) - return - } - - if err != nil { - t.Errorf("FAIL: Unexpected error for %s: %v", tc.desc, err) - errorTests = append(errorTests, "FAIL: "+tc.desc+" (unexpected error)") - return - } - - if result != nil && result.Error != nil { - t.Errorf("FAIL: Unexpected result error for %s: %v", tc.desc, result.Error) - errorTests = append(errorTests, "FAIL: "+tc.desc+" (unexpected result error)") - return - } - - // Check row count if specified - actualRows := len(result.Rows) - if tc.expectRows >= 0 { - if actualRows != tc.expectRows { - t.Logf("ROW COUNT MISMATCH: %s - Expected %d rows, got %d", tc.desc, tc.expectRows, actualRows) - rowCountMismatches = append(rowCountMismatches, - fmt.Sprintf("MISMATCH: %s (expected %d, got %d)", tc.desc, tc.expectRows, actualRows)) - } else { - t.Logf("PASS: %s - Correct row count: %d", tc.desc, actualRows) - } - } else { - t.Logf("PASS: %s - Row count: %d (not validated)", tc.desc, actualRows) - } - - successTests = append(successTests, "PASS: "+tc.desc) - }) - } - - // Summary report - separator := strings.Repeat("=", 80) - t.Log("\n" + separator) - t.Log("SQL FILTERING, LIMIT & OFFSET TEST SUITE SUMMARY") - t.Log(separator) - t.Logf("Total Tests: %d", len(testCases)) - t.Logf("Successful: %d", len(successTests)) - t.Logf("Errors: %d", len(errorTests)) - t.Logf("Row Count Mismatches: %d", len(rowCountMismatches)) - t.Log(separator) - - if len(errorTests) > 0 { - t.Log("\nERRORS:") - for _, test := range errorTests { - t.Log(" " + test) - } - } - - if len(rowCountMismatches) > 0 { - t.Log("\nROW COUNT MISMATCHES:") - for _, test := range rowCountMismatches { - t.Log(" " + test) - } - } -} - -// TestSQLFilteringAccuracy tests the accuracy of filtering results -func TestSQLFilteringAccuracy(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("Testing SQL filtering accuracy with specific data verification") - - // Test specific ID lookup - result, err := engine.ExecuteSQL(context.Background(), "SELECT id, status FROM user_events WHERE id = 82460") - if err != nil { - t.Fatalf("Query failed: %v", err) - } - - if len(result.Rows) != 1 { - t.Errorf("Expected 1 row for id=82460, got %d", len(result.Rows)) - } else { - idValue := result.Rows[0][0].ToString() - if idValue != "82460" { - t.Errorf("Expected id=82460, got id=%s", idValue) - } else { - t.Log("PASS: Exact ID filtering works correctly") - } - } - - // Test LIMIT accuracy - result2, err2 := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events LIMIT 3") - if err2 != nil { - t.Fatalf("LIMIT query failed: %v", err2) - } - - if len(result2.Rows) != 3 { - t.Errorf("Expected exactly 3 rows with LIMIT 3, got %d", len(result2.Rows)) - } else { - t.Log("PASS: LIMIT 3 returns exactly 3 rows") - } - - // Test OFFSET by comparing with and without offset - resultNoOffset, err3 := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events LIMIT 2 OFFSET 0") - if err3 != nil { - t.Fatalf("No offset query failed: %v", err3) - } - - resultWithOffset, err4 := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events LIMIT 2 OFFSET 1") - if err4 != nil { - t.Fatalf("With offset query failed: %v", err4) - } - - if len(resultNoOffset.Rows) == 2 && len(resultWithOffset.Rows) == 2 { - // The second row of no-offset should equal first row of offset-1 - if resultNoOffset.Rows[1][0].ToString() == resultWithOffset.Rows[0][0].ToString() { - t.Log("PASS: OFFSET 1 correctly skips first row") - } else { - t.Errorf("OFFSET verification failed: expected row shifting") - } - } else { - t.Errorf("OFFSET test setup failed: got %d and %d rows", len(resultNoOffset.Rows), len(resultWithOffset.Rows)) - } -} - -// TestSQLFilteringEdgeCases tests edge cases and boundary conditions -func TestSQLFilteringEdgeCases(t *testing.T) { - engine := NewTestSQLEngine() - - edgeCases := []struct { - name string - sql string - expectError bool - desc string - }{ - { - name: "Zero_Limit", - sql: "SELECT * FROM user_events LIMIT 0", - expectError: false, - desc: "LIMIT 0 should return empty result set", - }, - { - name: "Large_Offset", - sql: "SELECT * FROM user_events LIMIT 1 OFFSET 99999", - expectError: false, - desc: "Very large OFFSET should handle gracefully", - }, - { - name: "Where_False_Condition", - sql: "SELECT * FROM user_events WHERE 1 = 0", - expectError: true, // This might not be supported - desc: "WHERE with always-false condition", - }, - { - name: "Complex_Where", - sql: "SELECT id FROM user_events WHERE id > 0 AND id < 999999999", - expectError: true, // AND might not be implemented - desc: "Complex WHERE with AND condition", - }, - } - - for _, tc := range edgeCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.expectError { - if err == nil && (result == nil || result.Error == nil) { - t.Logf("UNEXPECTED SUCCESS: %s (may indicate feature is implemented)", tc.desc) - } else { - t.Logf("EXPECTED ERROR: %s", tc.desc) - } - } else { - if err != nil { - t.Errorf("UNEXPECTED ERROR for %s: %v", tc.desc, err) - } else if result.Error != nil { - t.Errorf("UNEXPECTED RESULT ERROR for %s: %v", tc.desc, result.Error) - } else { - t.Logf("PASS: %s - Rows: %d", tc.desc, len(result.Rows)) - } - } - }) - } -} diff --git a/weed/query/engine/sql_types.go b/weed/query/engine/sql_types.go deleted file mode 100644 index b679e89bd..000000000 --- a/weed/query/engine/sql_types.go +++ /dev/null @@ -1,84 +0,0 @@ -package engine - -import ( - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// convertSQLTypeToMQ converts SQL column types to MQ schema field types -// Assumptions: -// 1. Standard SQL types map to MQ scalar types -// 2. Unsupported types result in errors -// 3. Default sizes are used for variable-length types -func (e *SQLEngine) convertSQLTypeToMQ(sqlType TypeRef) (*schema_pb.Type, error) { - typeName := strings.ToUpper(sqlType.Type) - - switch typeName { - case "BOOLEAN", "BOOL": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BOOL}}, nil - - case "TINYINT", "SMALLINT", "INT", "INTEGER", "MEDIUMINT": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT32}}, nil - - case "BIGINT": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, nil - - case "FLOAT", "REAL": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_FLOAT}}, nil - - case "DOUBLE", "DOUBLE PRECISION": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_DOUBLE}}, nil - - case "CHAR", "VARCHAR", "TEXT", "LONGTEXT", "MEDIUMTEXT", "TINYTEXT": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, nil - - case "BINARY", "VARBINARY", "BLOB", "LONGBLOB", "MEDIUMBLOB", "TINYBLOB": - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_BYTES}}, nil - - case "JSON": - // JSON stored as string for now - // TODO: Implement proper JSON type support - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_STRING}}, nil - - case "TIMESTAMP", "DATETIME": - // Store as BIGINT (Unix timestamp in nanoseconds) - return &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{ScalarType: schema_pb.ScalarType_INT64}}, nil - - default: - return nil, fmt.Errorf("unsupported SQL type: %s", typeName) - } -} - -// convertMQTypeToSQL converts MQ schema field types back to SQL column types -// This is the reverse of convertSQLTypeToMQ for display purposes -func (e *SQLEngine) convertMQTypeToSQL(fieldType *schema_pb.Type) string { - switch t := fieldType.Kind.(type) { - case *schema_pb.Type_ScalarType: - switch t.ScalarType { - case schema_pb.ScalarType_BOOL: - return "BOOLEAN" - case schema_pb.ScalarType_INT32: - return "INT" - case schema_pb.ScalarType_INT64: - return "BIGINT" - case schema_pb.ScalarType_FLOAT: - return "FLOAT" - case schema_pb.ScalarType_DOUBLE: - return "DOUBLE" - case schema_pb.ScalarType_BYTES: - return "VARBINARY" - case schema_pb.ScalarType_STRING: - return "VARCHAR(255)" - default: - return "UNKNOWN" - } - case *schema_pb.Type_ListType: - return "TEXT" // Lists serialized as JSON - case *schema_pb.Type_RecordType: - return "TEXT" // Nested records serialized as JSON - default: - return "UNKNOWN" - } -} diff --git a/weed/query/engine/string_concatenation_test.go b/weed/query/engine/string_concatenation_test.go deleted file mode 100644 index a2f869c10..000000000 --- a/weed/query/engine/string_concatenation_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package engine - -import ( - "context" - "testing" -) - -// TestSQLEngine_StringConcatenationWithLiterals tests string concatenation with || operator -// This covers the user's reported issue where string literals were being lost -func TestSQLEngine_StringConcatenationWithLiterals(t *testing.T) { - engine := NewTestSQLEngine() - - tests := []struct { - name string - query string - expectedCols []string - validateFirst func(t *testing.T, row []string) - }{ - { - name: "Simple concatenation with literals", - query: "SELECT 'test' || action || 'end' FROM user_events LIMIT 1", - expectedCols: []string{"'test'||action||'end'"}, - validateFirst: func(t *testing.T, row []string) { - expected := "testloginend" // action="login" from first row - if row[0] != expected { - t.Errorf("Expected %s, got %s", expected, row[0]) - } - }, - }, - { - name: "User's original complex concatenation", - query: "SELECT 'test' || action || 'xxx' || action || ' ~~~ ' || status FROM user_events LIMIT 1", - expectedCols: []string{"'test'||action||'xxx'||action||'~~~'||status"}, - validateFirst: func(t *testing.T, row []string) { - // First row: action="login", status="active" - expected := "testloginxxxlogin ~~~ active" - if row[0] != expected { - t.Errorf("Expected %s, got %s", expected, row[0]) - } - }, - }, - { - name: "Mixed columns and literals", - query: "SELECT status || '=' || action, 'prefix:' || user_type FROM user_events LIMIT 1", - expectedCols: []string{"status||'='||action", "'prefix:'||user_type"}, - validateFirst: func(t *testing.T, row []string) { - // First row: status="active", action="login", user_type="premium" - if row[0] != "active=login" { - t.Errorf("Expected 'active=login', got %s", row[0]) - } - if row[1] != "prefix:premium" { - t.Errorf("Expected 'prefix:premium', got %s", row[1]) - } - }, - }, - { - name: "Concatenation with spaces in literals", - query: "SELECT ' [ ' || status || ' ] ' FROM user_events LIMIT 2", - expectedCols: []string{"'['||status||']'"}, - validateFirst: func(t *testing.T, row []string) { - expected := " [ active ] " // status="active" from first row - if row[0] != expected { - t.Errorf("Expected '%s', got '%s'", expected, row[0]) - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tt.query) - if err != nil { - t.Fatalf("Query failed: %v", err) - } - if result.Error != nil { - t.Fatalf("Query returned error: %v", result.Error) - } - - // Verify we got results - if len(result.Rows) == 0 { - t.Fatal("Query returned no rows") - } - - // Verify column count - if len(result.Columns) != len(tt.expectedCols) { - t.Errorf("Expected %d columns, got %d", len(tt.expectedCols), len(result.Columns)) - } - - // Check column names - for i, expectedCol := range tt.expectedCols { - if i < len(result.Columns) && result.Columns[i] != expectedCol { - t.Logf("Expected column %d to be '%s', got '%s'", i, expectedCol, result.Columns[i]) - // Don't fail on column name formatting differences, just log - } - } - - // Validate first row - if tt.validateFirst != nil { - firstRow := result.Rows[0] - stringRow := make([]string, len(firstRow)) - for i, val := range firstRow { - stringRow[i] = val.ToString() - } - tt.validateFirst(t, stringRow) - } - - // Log results for debugging - t.Logf("Query: %s", tt.query) - t.Logf("Columns: %v", result.Columns) - for i, row := range result.Rows { - values := make([]string, len(row)) - for j, val := range row { - values[j] = val.ToString() - } - t.Logf("Row %d: %v", i, values) - } - }) - } -} - -// TestSQLEngine_StringConcatenationBugReproduction tests the exact user query that was failing -func TestSQLEngine_StringConcatenationBugReproduction(t *testing.T) { - engine := NewTestSQLEngine() - - // This is the EXACT query from the user that was showing incorrect results - query := "SELECT UPPER(status), id*2, 'test' || action || 'xxx' || action || ' ~~~ ' || status FROM user_events LIMIT 2" - - result, err := engine.ExecuteSQL(context.Background(), query) - if err != nil { - t.Fatalf("Query failed: %v", err) - } - if result.Error != nil { - t.Fatalf("Query returned error: %v", result.Error) - } - - // Key assertions that would fail with the original bug: - - // 1. Must return rows - if len(result.Rows) != 2 { - t.Errorf("Expected 2 rows, got %d", len(result.Rows)) - } - - // 2. Must have 3 columns - expectedColumns := 3 - if len(result.Columns) != expectedColumns { - t.Errorf("Expected %d columns, got %d", expectedColumns, len(result.Columns)) - } - - // 3. Verify the complex concatenation works correctly - if len(result.Rows) >= 1 { - firstRow := result.Rows[0] - - // Column 0: UPPER(status) should be "ACTIVE" - upperStatus := firstRow[0].ToString() - if upperStatus != "ACTIVE" { - t.Errorf("Expected UPPER(status)='ACTIVE', got '%s'", upperStatus) - } - - // Column 1: id*2 should be calculated correctly - idTimes2 := firstRow[1].ToString() - if idTimes2 != "164920" { // id=82460 * 2 - t.Errorf("Expected id*2=164920, got '%s'", idTimes2) - } - - // Column 2: Complex concatenation should include all parts - concatenated := firstRow[2].ToString() - - // Should be: "test" + "login" + "xxx" + "login" + " ~~~ " + "active" = "testloginxxxlogin ~~~ active" - expected := "testloginxxxlogin ~~~ active" - if concatenated != expected { - t.Errorf("String concatenation failed. Expected '%s', got '%s'", expected, concatenated) - } - - // CRITICAL: Must not be the buggy result like "viewviewpending" - if concatenated == "loginloginactive" || concatenated == "viewviewpending" || concatenated == "clickclickfailed" { - t.Errorf("CRITICAL BUG: String concatenation returned buggy result '%s' - string literals are being lost!", concatenated) - } - } - - t.Logf("SUCCESS: Complex string concatenation works correctly!") - t.Logf("Query: %s", query) - - for i, row := range result.Rows { - values := make([]string, len(row)) - for j, val := range row { - values[j] = val.ToString() - } - t.Logf("Row %d: %v", i, values) - } -} diff --git a/weed/query/engine/string_functions.go b/weed/query/engine/string_functions.go deleted file mode 100644 index 2143a75bc..000000000 --- a/weed/query/engine/string_functions.go +++ /dev/null @@ -1,354 +0,0 @@ -package engine - -import ( - "fmt" - "math" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// =============================== -// STRING FUNCTIONS -// =============================== - -// Length returns the length of a string -func (e *SQLEngine) Length(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("LENGTH function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("LENGTH function conversion error: %v", err) - } - - length := int64(len(str)) - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: length}, - }, nil -} - -// Upper converts a string to uppercase -func (e *SQLEngine) Upper(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("UPPER function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("UPPER function conversion error: %v", err) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: strings.ToUpper(str)}, - }, nil -} - -// Lower converts a string to lowercase -func (e *SQLEngine) Lower(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("LOWER function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("LOWER function conversion error: %v", err) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: strings.ToLower(str)}, - }, nil -} - -// Trim removes leading and trailing whitespace from a string -func (e *SQLEngine) Trim(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("TRIM function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("TRIM function conversion error: %v", err) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: strings.TrimSpace(str)}, - }, nil -} - -// LTrim removes leading whitespace from a string -func (e *SQLEngine) LTrim(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("LTRIM function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("LTRIM function conversion error: %v", err) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: strings.TrimLeft(str, " \t\n\r")}, - }, nil -} - -// RTrim removes trailing whitespace from a string -func (e *SQLEngine) RTrim(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("RTRIM function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("RTRIM function conversion error: %v", err) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: strings.TrimRight(str, " \t\n\r")}, - }, nil -} - -// Substring extracts a substring from a string -func (e *SQLEngine) Substring(value *schema_pb.Value, start *schema_pb.Value, length ...*schema_pb.Value) (*schema_pb.Value, error) { - if value == nil || start == nil { - return nil, fmt.Errorf("SUBSTRING function requires non-null value and start position") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("SUBSTRING function value conversion error: %v", err) - } - - startPos, err := e.valueToInt64(start) - if err != nil { - return nil, fmt.Errorf("SUBSTRING function start position conversion error: %v", err) - } - - // Convert to 0-based indexing (SQL uses 1-based) - if startPos < 1 { - startPos = 1 - } - startIdx := int(startPos - 1) - - if startIdx >= len(str) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - }, nil - } - - var result string - if len(length) > 0 && length[0] != nil { - lengthVal, err := e.valueToInt64(length[0]) - if err != nil { - return nil, fmt.Errorf("SUBSTRING function length conversion error: %v", err) - } - - if lengthVal <= 0 { - result = "" - } else { - if lengthVal > int64(math.MaxInt) || lengthVal < int64(math.MinInt) { - // If length is out-of-bounds for int, take substring from startIdx to end - result = str[startIdx:] - } else { - // Safe conversion after bounds check - endIdx := startIdx + int(lengthVal) - if endIdx > len(str) { - endIdx = len(str) - } - result = str[startIdx:endIdx] - } - } - } else { - result = str[startIdx:] - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: result}, - }, nil -} - -// Concat concatenates multiple strings -func (e *SQLEngine) Concat(values ...*schema_pb.Value) (*schema_pb.Value, error) { - if len(values) == 0 { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - }, nil - } - - var result strings.Builder - for i, value := range values { - if value == nil { - continue // Skip null values - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("CONCAT function value %d conversion error: %v", i, err) - } - result.WriteString(str) - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: result.String()}, - }, nil -} - -// Replace replaces all occurrences of a substring with another substring -func (e *SQLEngine) Replace(value, oldStr, newStr *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil || oldStr == nil || newStr == nil { - return nil, fmt.Errorf("REPLACE function requires non-null values") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("REPLACE function value conversion error: %v", err) - } - - old, err := e.valueToString(oldStr) - if err != nil { - return nil, fmt.Errorf("REPLACE function old string conversion error: %v", err) - } - - new, err := e.valueToString(newStr) - if err != nil { - return nil, fmt.Errorf("REPLACE function new string conversion error: %v", err) - } - - result := strings.ReplaceAll(str, old, new) - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: result}, - }, nil -} - -// Position returns the position of a substring in a string (1-based, 0 if not found) -func (e *SQLEngine) Position(substring, value *schema_pb.Value) (*schema_pb.Value, error) { - if substring == nil || value == nil { - return nil, fmt.Errorf("POSITION function requires non-null values") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("POSITION function string conversion error: %v", err) - } - - substr, err := e.valueToString(substring) - if err != nil { - return nil, fmt.Errorf("POSITION function substring conversion error: %v", err) - } - - pos := strings.Index(str, substr) - if pos == -1 { - pos = 0 // SQL returns 0 for not found - } else { - pos = pos + 1 // Convert to 1-based indexing - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: int64(pos)}, - }, nil -} - -// Left returns the leftmost characters of a string -func (e *SQLEngine) Left(value *schema_pb.Value, length *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil || length == nil { - return nil, fmt.Errorf("LEFT function requires non-null values") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("LEFT function string conversion error: %v", err) - } - - lengthVal, err := e.valueToInt64(length) - if err != nil { - return nil, fmt.Errorf("LEFT function length conversion error: %v", err) - } - - if lengthVal <= 0 { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - }, nil - } - - if lengthVal > int64(len(str)) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str}, - }, nil - } - - if lengthVal > int64(math.MaxInt) || lengthVal < int64(math.MinInt) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str}, - }, nil - } - - // Safe conversion after bounds check - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str[:int(lengthVal)]}, - }, nil -} - -// Right returns the rightmost characters of a string -func (e *SQLEngine) Right(value *schema_pb.Value, length *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil || length == nil { - return nil, fmt.Errorf("RIGHT function requires non-null values") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("RIGHT function string conversion error: %v", err) - } - - lengthVal, err := e.valueToInt64(length) - if err != nil { - return nil, fmt.Errorf("RIGHT function length conversion error: %v", err) - } - - if lengthVal <= 0 { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: ""}, - }, nil - } - - if lengthVal > int64(len(str)) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str}, - }, nil - } - - if lengthVal > int64(math.MaxInt) || lengthVal < int64(math.MinInt) { - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str}, - }, nil - } - - // Safe conversion after bounds check - startPos := len(str) - int(lengthVal) - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: str[startPos:]}, - }, nil -} - -// Reverse reverses a string -func (e *SQLEngine) Reverse(value *schema_pb.Value) (*schema_pb.Value, error) { - if value == nil { - return nil, fmt.Errorf("REVERSE function requires non-null value") - } - - str, err := e.valueToString(value) - if err != nil { - return nil, fmt.Errorf("REVERSE function conversion error: %v", err) - } - - // Reverse the string rune by rune to handle Unicode correctly - runes := []rune(str) - for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { - runes[i], runes[j] = runes[j], runes[i] - } - - return &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: string(runes)}, - }, nil -} diff --git a/weed/query/engine/string_functions_test.go b/weed/query/engine/string_functions_test.go deleted file mode 100644 index 7cdde2346..000000000 --- a/weed/query/engine/string_functions_test.go +++ /dev/null @@ -1,393 +0,0 @@ -package engine - -import ( - "context" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -func TestStringFunctions(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("LENGTH function tests", func(t *testing.T) { - tests := []struct { - name string - value *schema_pb.Value - expected int64 - expectErr bool - }{ - { - name: "Length of string", - value: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}}, - expected: 11, - expectErr: false, - }, - { - name: "Length of empty string", - value: &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: ""}}, - expected: 0, - expectErr: false, - }, - { - name: "Length of number", - value: &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - expected: 5, - expectErr: false, - }, - { - name: "Length of null value", - value: nil, - expected: 0, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.Length(tt.value) - - if tt.expectErr { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - intVal, ok := result.Kind.(*schema_pb.Value_Int64Value) - if !ok { - t.Errorf("LENGTH should return int64 value, got %T", result.Kind) - return - } - - if intVal.Int64Value != tt.expected { - t.Errorf("Expected %d, got %d", tt.expected, intVal.Int64Value) - } - }) - } - }) - - t.Run("UPPER/LOWER function tests", func(t *testing.T) { - // Test UPPER - result, err := engine.Upper(&schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}}) - if err != nil { - t.Errorf("UPPER failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "HELLO WORLD" { - t.Errorf("Expected 'HELLO WORLD', got '%s'", stringVal.StringValue) - } - - // Test LOWER - result, err = engine.Lower(&schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}}) - if err != nil { - t.Errorf("LOWER failed: %v", err) - } - stringVal, _ = result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "hello world" { - t.Errorf("Expected 'hello world', got '%s'", stringVal.StringValue) - } - }) - - t.Run("TRIM function tests", func(t *testing.T) { - tests := []struct { - name string - function func(*schema_pb.Value) (*schema_pb.Value, error) - input string - expected string - }{ - {"TRIM whitespace", engine.Trim, " Hello World ", "Hello World"}, - {"LTRIM whitespace", engine.LTrim, " Hello World ", "Hello World "}, - {"RTRIM whitespace", engine.RTrim, " Hello World ", " Hello World"}, - {"TRIM with tabs and newlines", engine.Trim, "\t\nHello\t\n", "Hello"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := tt.function(&schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: tt.input}}) - if err != nil { - t.Errorf("Function failed: %v", err) - return - } - - stringVal, ok := result.Kind.(*schema_pb.Value_StringValue) - if !ok { - t.Errorf("Function should return string value, got %T", result.Kind) - return - } - - if stringVal.StringValue != tt.expected { - t.Errorf("Expected '%s', got '%s'", tt.expected, stringVal.StringValue) - } - }) - } - }) - - t.Run("SUBSTRING function tests", func(t *testing.T) { - testStr := &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}} - - // Test substring with start and length - result, err := engine.Substring(testStr, - &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 7}}, - &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}) - if err != nil { - t.Errorf("SUBSTRING failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "World" { - t.Errorf("Expected 'World', got '%s'", stringVal.StringValue) - } - - // Test substring with just start position - result, err = engine.Substring(testStr, - &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 7}}) - if err != nil { - t.Errorf("SUBSTRING failed: %v", err) - } - stringVal, _ = result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "World" { - t.Errorf("Expected 'World', got '%s'", stringVal.StringValue) - } - }) - - t.Run("CONCAT function tests", func(t *testing.T) { - result, err := engine.Concat( - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello"}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: " "}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "World"}}, - ) - if err != nil { - t.Errorf("CONCAT failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "Hello World" { - t.Errorf("Expected 'Hello World', got '%s'", stringVal.StringValue) - } - - // Test with mixed types - result, err = engine.Concat( - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Number: "}}, - &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 42}}, - ) - if err != nil { - t.Errorf("CONCAT failed: %v", err) - } - stringVal, _ = result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "Number: 42" { - t.Errorf("Expected 'Number: 42', got '%s'", stringVal.StringValue) - } - }) - - t.Run("REPLACE function tests", func(t *testing.T) { - result, err := engine.Replace( - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World World"}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "World"}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Universe"}}, - ) - if err != nil { - t.Errorf("REPLACE failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "Hello Universe Universe" { - t.Errorf("Expected 'Hello Universe Universe', got '%s'", stringVal.StringValue) - } - }) - - t.Run("POSITION function tests", func(t *testing.T) { - result, err := engine.Position( - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "World"}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}}, - ) - if err != nil { - t.Errorf("POSITION failed: %v", err) - } - intVal, _ := result.Kind.(*schema_pb.Value_Int64Value) - if intVal.Int64Value != 7 { - t.Errorf("Expected 7, got %d", intVal.Int64Value) - } - - // Test not found - result, err = engine.Position( - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "NotFound"}}, - &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}}, - ) - if err != nil { - t.Errorf("POSITION failed: %v", err) - } - intVal, _ = result.Kind.(*schema_pb.Value_Int64Value) - if intVal.Int64Value != 0 { - t.Errorf("Expected 0 for not found, got %d", intVal.Int64Value) - } - }) - - t.Run("LEFT/RIGHT function tests", func(t *testing.T) { - testStr := &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello World"}} - - // Test LEFT - result, err := engine.Left(testStr, &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}) - if err != nil { - t.Errorf("LEFT failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "Hello" { - t.Errorf("Expected 'Hello', got '%s'", stringVal.StringValue) - } - - // Test RIGHT - result, err = engine.Right(testStr, &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: 5}}) - if err != nil { - t.Errorf("RIGHT failed: %v", err) - } - stringVal, _ = result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "World" { - t.Errorf("Expected 'World', got '%s'", stringVal.StringValue) - } - }) - - t.Run("REVERSE function tests", func(t *testing.T) { - result, err := engine.Reverse(&schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "Hello"}}) - if err != nil { - t.Errorf("REVERSE failed: %v", err) - } - stringVal, _ := result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "olleH" { - t.Errorf("Expected 'olleH', got '%s'", stringVal.StringValue) - } - - // Test with Unicode - result, err = engine.Reverse(&schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: "๐Ÿ™‚๐Ÿ‘"}}) - if err != nil { - t.Errorf("REVERSE failed: %v", err) - } - stringVal, _ = result.Kind.(*schema_pb.Value_StringValue) - if stringVal.StringValue != "๐Ÿ‘๐Ÿ™‚" { - t.Errorf("Expected '๐Ÿ‘๐Ÿ™‚', got '%s'", stringVal.StringValue) - } - }) -} - -// TestStringFunctionsSQL tests string functions through SQL execution -func TestStringFunctionsSQL(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - expectError bool - expectedVal string - }{ - { - name: "UPPER function", - sql: "SELECT UPPER('hello world') AS upper_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: "HELLO WORLD", - }, - { - name: "LOWER function", - sql: "SELECT LOWER('HELLO WORLD') AS lower_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: "hello world", - }, - { - name: "LENGTH function", - sql: "SELECT LENGTH('hello') AS length_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: "5", - }, - { - name: "TRIM function", - sql: "SELECT TRIM(' hello world ') AS trimmed_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: "hello world", - }, - { - name: "LTRIM function", - sql: "SELECT LTRIM(' hello world ') AS ltrimmed_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: "hello world ", - }, - { - name: "RTRIM function", - sql: "SELECT RTRIM(' hello world ') AS rtrimmed_value FROM user_events LIMIT 1", - expectError: false, - expectedVal: " hello world", - }, - { - name: "Multiple string functions", - sql: "SELECT UPPER('hello') AS up, LOWER('WORLD') AS low, LENGTH('test') AS len FROM user_events LIMIT 1", - expectError: false, - expectedVal: "", // We'll check this separately - }, - { - name: "String function with wrong argument count", - sql: "SELECT UPPER('hello', 'extra') FROM user_events LIMIT 1", - expectError: true, - expectedVal: "", - }, - { - name: "String function with no arguments", - sql: "SELECT UPPER() FROM user_events LIMIT 1", - expectError: true, - expectedVal: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tc.sql) - - if tc.expectError { - if err == nil && result.Error == nil { - t.Errorf("Expected error but got none") - } - return - } - - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } - - if result.Error != nil { - t.Errorf("Query result has error: %v", result.Error) - return - } - - if len(result.Rows) == 0 { - t.Fatal("Expected at least one row") - } - - if tc.name == "Multiple string functions" { - // Special case for multiple functions test - if len(result.Rows[0]) != 3 { - t.Fatalf("Expected 3 columns, got %d", len(result.Rows[0])) - } - - // Check UPPER('hello') -> 'HELLO' - if result.Rows[0][0].ToString() != "HELLO" { - t.Errorf("Expected 'HELLO', got '%s'", result.Rows[0][0].ToString()) - } - - // Check LOWER('WORLD') -> 'world' - if result.Rows[0][1].ToString() != "world" { - t.Errorf("Expected 'world', got '%s'", result.Rows[0][1].ToString()) - } - - // Check LENGTH('test') -> '4' - if result.Rows[0][2].ToString() != "4" { - t.Errorf("Expected '4', got '%s'", result.Rows[0][2].ToString()) - } - } else { - actualVal := result.Rows[0][0].ToString() - if actualVal != tc.expectedVal { - t.Errorf("Expected '%s', got '%s'", tc.expectedVal, actualVal) - } - } - }) - } -} diff --git a/weed/query/engine/string_literal_function_test.go b/weed/query/engine/string_literal_function_test.go deleted file mode 100644 index 787c86c08..000000000 --- a/weed/query/engine/string_literal_function_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package engine - -import ( - "context" - "strings" - "testing" -) - -// TestSQLEngine_StringFunctionsAndLiterals tests the fixes for string functions and string literals -// This covers the user's reported issues: -// 1. String functions like UPPER(), LENGTH() being treated as aggregation functions -// 2. String literals like 'good' returning empty values -func TestSQLEngine_StringFunctionsAndLiterals(t *testing.T) { - engine := NewTestSQLEngine() - - tests := []struct { - name string - query string - expectedCols []string - expectNonEmpty bool - validateFirstRow func(t *testing.T, row []string) - }{ - { - name: "String functions - UPPER and LENGTH", - query: "SELECT status, UPPER(status), LENGTH(status) FROM user_events LIMIT 3", - expectedCols: []string{"status", "UPPER(status)", "LENGTH(status)"}, - expectNonEmpty: true, - validateFirstRow: func(t *testing.T, row []string) { - if len(row) != 3 { - t.Errorf("Expected 3 columns, got %d", len(row)) - return - } - // Status should exist, UPPER should be uppercase version, LENGTH should be numeric - status := row[0] - upperStatus := row[1] - lengthStr := row[2] - - if status == "" { - t.Error("Status column should not be empty") - } - if upperStatus == "" { - t.Error("UPPER(status) should not be empty") - } - if lengthStr == "" { - t.Error("LENGTH(status) should not be empty") - } - - t.Logf("Status: '%s', UPPER: '%s', LENGTH: '%s'", status, upperStatus, lengthStr) - }, - }, - { - name: "String literal in SELECT", - query: "SELECT id, user_id, 'good' FROM user_events LIMIT 2", - expectedCols: []string{"id", "user_id", "'good'"}, - expectNonEmpty: true, - validateFirstRow: func(t *testing.T, row []string) { - if len(row) != 3 { - t.Errorf("Expected 3 columns, got %d", len(row)) - return - } - - literal := row[2] - if literal != "good" { - t.Errorf("Expected string literal to be 'good', got '%s'", literal) - } - }, - }, - { - name: "Mixed: columns, functions, arithmetic, and literals", - query: "SELECT id, UPPER(status), id*2, 'test' FROM user_events LIMIT 2", - expectedCols: []string{"id", "UPPER(status)", "id*2", "'test'"}, - expectNonEmpty: true, - validateFirstRow: func(t *testing.T, row []string) { - if len(row) != 4 { - t.Errorf("Expected 4 columns, got %d", len(row)) - return - } - - // Verify the literal value - if row[3] != "test" { - t.Errorf("Expected literal 'test', got '%s'", row[3]) - } - - // Verify other values are not empty - for i, val := range row { - if val == "" { - t.Errorf("Column %d should not be empty", i) - } - } - }, - }, - { - name: "User's original failing query - fixed", - query: "SELECT status, action, user_type, UPPER(action), LENGTH(action) FROM user_events LIMIT 2", - expectedCols: []string{"status", "action", "user_type", "UPPER(action)", "LENGTH(action)"}, - expectNonEmpty: true, - validateFirstRow: func(t *testing.T, row []string) { - if len(row) != 5 { - t.Errorf("Expected 5 columns, got %d", len(row)) - return - } - - // All values should be non-empty - for i, val := range row { - if val == "" { - t.Errorf("Column %d (%s) should not be empty", i, []string{"status", "action", "user_type", "UPPER(action)", "LENGTH(action)"}[i]) - } - } - - // UPPER should be uppercase - action := row[1] - upperAction := row[3] - if action != "" && upperAction != "" { - if upperAction != action && upperAction != strings.ToUpper(action) { - t.Logf("Note: UPPER(%s) = %s (may be expected)", action, upperAction) - } - } - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := engine.ExecuteSQL(context.Background(), tt.query) - if err != nil { - t.Fatalf("Query failed: %v", err) - } - if result.Error != nil { - t.Fatalf("Query returned error: %v", result.Error) - } - - // Verify we got results - if tt.expectNonEmpty && len(result.Rows) == 0 { - t.Fatal("Query returned no rows") - } - - // Verify column count - if len(result.Columns) != len(tt.expectedCols) { - t.Errorf("Expected %d columns, got %d", len(tt.expectedCols), len(result.Columns)) - } - - // Check column names - for i, expectedCol := range tt.expectedCols { - if i < len(result.Columns) && result.Columns[i] != expectedCol { - t.Errorf("Expected column %d to be '%s', got '%s'", i, expectedCol, result.Columns[i]) - } - } - - // Validate first row if provided - if len(result.Rows) > 0 && tt.validateFirstRow != nil { - firstRow := result.Rows[0] - stringRow := make([]string, len(firstRow)) - for i, val := range firstRow { - stringRow[i] = val.ToString() - } - tt.validateFirstRow(t, stringRow) - } - - // Log results for debugging - t.Logf("Query: %s", tt.query) - t.Logf("Columns: %v", result.Columns) - for i, row := range result.Rows { - values := make([]string, len(row)) - for j, val := range row { - values[j] = val.ToString() - } - t.Logf("Row %d: %v", i, values) - } - }) - } -} - -// TestSQLEngine_StringFunctionErrorHandling tests error cases for string functions -func TestSQLEngine_StringFunctionErrorHandling(t *testing.T) { - engine := NewTestSQLEngine() - - // This should now work (previously would error as "unsupported aggregation function") - result, err := engine.ExecuteSQL(context.Background(), "SELECT UPPER(status) FROM user_events LIMIT 1") - if err != nil { - t.Fatalf("UPPER function should work, got error: %v", err) - } - if result.Error != nil { - t.Fatalf("UPPER function should work, got query error: %v", result.Error) - } - - t.Logf("UPPER function works correctly") - - // This should now work (previously would error as "unsupported aggregation function") - result2, err2 := engine.ExecuteSQL(context.Background(), "SELECT LENGTH(action) FROM user_events LIMIT 1") - if err2 != nil { - t.Fatalf("LENGTH function should work, got error: %v", err2) - } - if result2.Error != nil { - t.Fatalf("LENGTH function should work, got query error: %v", result2.Error) - } - - t.Logf("LENGTH function works correctly") -} diff --git a/weed/query/engine/system_columns.go b/weed/query/engine/system_columns.go deleted file mode 100644 index a982416ed..000000000 --- a/weed/query/engine/system_columns.go +++ /dev/null @@ -1,160 +0,0 @@ -package engine - -import ( - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" -) - -// System column constants used throughout the SQL engine -const ( - SW_COLUMN_NAME_TIMESTAMP = "_ts_ns" // Message timestamp in nanoseconds (internal) - SW_COLUMN_NAME_KEY = "_key" // Message key - SW_COLUMN_NAME_SOURCE = "_source" // Data source (live_log, parquet_archive, etc.) - SW_COLUMN_NAME_VALUE = "_value" // Raw message value (for schema-less topics) -) - -// System column display names (what users see) -const ( - SW_DISPLAY_NAME_TIMESTAMP = "_ts" // User-facing timestamp column name - // Note: _key and _source keep the same names, only _ts_ns changes to _ts -) - -// isSystemColumn checks if a column is a system column (_ts_ns, _key, _source) -func (e *SQLEngine) isSystemColumn(columnName string) bool { - lowerName := strings.ToLower(columnName) - return lowerName == SW_COLUMN_NAME_TIMESTAMP || - lowerName == SW_COLUMN_NAME_KEY || - lowerName == SW_COLUMN_NAME_SOURCE -} - -// isRegularColumn checks if a column might be a regular data column (placeholder) -func (e *SQLEngine) isRegularColumn(columnName string) bool { - // For now, assume any non-system column is a regular column - return !e.isSystemColumn(columnName) -} - -// getSystemColumnDisplayName returns the user-facing display name for system columns -func (e *SQLEngine) getSystemColumnDisplayName(columnName string) string { - lowerName := strings.ToLower(columnName) - switch lowerName { - case SW_COLUMN_NAME_TIMESTAMP: - return SW_DISPLAY_NAME_TIMESTAMP - case SW_COLUMN_NAME_KEY: - return SW_COLUMN_NAME_KEY // _key stays the same - case SW_COLUMN_NAME_SOURCE: - return SW_COLUMN_NAME_SOURCE // _source stays the same - default: - return columnName // Return original name for non-system columns - } -} - -// isSystemColumnDisplayName checks if a column name is a system column display name -func (e *SQLEngine) isSystemColumnDisplayName(columnName string) bool { - lowerName := strings.ToLower(columnName) - return lowerName == SW_DISPLAY_NAME_TIMESTAMP || - lowerName == SW_COLUMN_NAME_KEY || - lowerName == SW_COLUMN_NAME_SOURCE -} - -// getSystemColumnInternalName returns the internal name for a system column display name -func (e *SQLEngine) getSystemColumnInternalName(displayName string) string { - lowerName := strings.ToLower(displayName) - switch lowerName { - case SW_DISPLAY_NAME_TIMESTAMP: - return SW_COLUMN_NAME_TIMESTAMP - case SW_COLUMN_NAME_KEY: - return SW_COLUMN_NAME_KEY - case SW_COLUMN_NAME_SOURCE: - return SW_COLUMN_NAME_SOURCE - default: - return displayName // Return original name for non-system columns - } -} - -// formatTimestampColumn formats a nanosecond timestamp as a proper timestamp value -func (e *SQLEngine) formatTimestampColumn(timestampNs int64) sqltypes.Value { - // Convert nanoseconds to time.Time - timestamp := time.Unix(timestampNs/1e9, timestampNs%1e9) - - // Format as timestamp string in MySQL datetime format - timestampStr := timestamp.UTC().Format("2006-01-02 15:04:05") - - // Return as a timestamp value using the Timestamp type - return sqltypes.MakeTrusted(sqltypes.Timestamp, []byte(timestampStr)) -} - -// getSystemColumnGlobalMin computes global min for system columns using file metadata -func (e *SQLEngine) getSystemColumnGlobalMin(columnName string, allFileStats map[string][]*ParquetFileStats) interface{} { - lowerName := strings.ToLower(columnName) - - switch lowerName { - case SW_COLUMN_NAME_TIMESTAMP: - // For timestamps, find the earliest timestamp across all files - // This should match what's in the Extended[mq.ExtendedAttrTimestampMin] metadata - var minTimestamp *int64 - for _, fileStats := range allFileStats { - for _, fileStat := range fileStats { - // Extract timestamp from filename (format: YYYY-MM-DD-HH-MM-SS.parquet) - timestamp := e.extractTimestampFromFilename(fileStat.FileName) - if timestamp != 0 { - if minTimestamp == nil || timestamp < *minTimestamp { - minTimestamp = ×tamp - } - } - } - } - if minTimestamp != nil { - return *minTimestamp - } - - case SW_COLUMN_NAME_KEY: - // For keys, we'd need to read the actual parquet column stats - // Fall back to scanning if not available in our current stats - return nil - - case SW_COLUMN_NAME_SOURCE: - // Source is always "parquet_archive" for parquet files - return "parquet_archive" - } - - return nil -} - -// getSystemColumnGlobalMax computes global max for system columns using file metadata -func (e *SQLEngine) getSystemColumnGlobalMax(columnName string, allFileStats map[string][]*ParquetFileStats) interface{} { - lowerName := strings.ToLower(columnName) - - switch lowerName { - case SW_COLUMN_NAME_TIMESTAMP: - // For timestamps, find the latest timestamp across all files - // This should match what's in the Extended[mq.ExtendedAttrTimestampMax] metadata - var maxTimestamp *int64 - for _, fileStats := range allFileStats { - for _, fileStat := range fileStats { - // Extract timestamp from filename (format: YYYY-MM-DD-HH-MM-SS.parquet) - timestamp := e.extractTimestampFromFilename(fileStat.FileName) - if timestamp != 0 { - if maxTimestamp == nil || timestamp > *maxTimestamp { - maxTimestamp = ×tamp - } - } - } - } - if maxTimestamp != nil { - return *maxTimestamp - } - - case SW_COLUMN_NAME_KEY: - // For keys, we'd need to read the actual parquet column stats - // Fall back to scanning if not available in our current stats - return nil - - case SW_COLUMN_NAME_SOURCE: - // Source is always "parquet_archive" for parquet files - return "parquet_archive" - } - - return nil -} diff --git a/weed/query/engine/test_sample_data_test.go b/weed/query/engine/test_sample_data_test.go deleted file mode 100644 index e4a19b431..000000000 --- a/weed/query/engine/test_sample_data_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package engine - -import ( - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// generateSampleHybridData creates sample data that simulates both live and archived messages -// This function is only used for testing and is not included in production builds -func generateSampleHybridData(topicName string, options HybridScanOptions) []HybridScanResult { - now := time.Now().UnixNano() - - // Generate different sample data based on topic name - var sampleData []HybridScanResult - - switch topicName { - case "user_events": - sampleData = []HybridScanResult{ - // Simulated live log data (recent) - // Generate more test data to support LIMIT/OFFSET testing - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 82460}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 9465}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "live_login"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"ip": "10.0.0.1", "live": true}`}}, - "status": {Kind: &schema_pb.Value_StringValue{StringValue: "active"}}, - "action": {Kind: &schema_pb.Value_StringValue{StringValue: "login"}}, - "user_type": {Kind: &schema_pb.Value_StringValue{StringValue: "premium"}}, - "amount": {Kind: &schema_pb.Value_DoubleValue{DoubleValue: 43.619326294957126}}, - }, - Timestamp: now - 300000000000, // 5 minutes ago - Key: []byte("live-user-9465"), - Source: "live_log", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 841256}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 2336}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "live_action"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"action": "click", "live": true}`}}, - "status": {Kind: &schema_pb.Value_StringValue{StringValue: "pending"}}, - "action": {Kind: &schema_pb.Value_StringValue{StringValue: "click"}}, - "user_type": {Kind: &schema_pb.Value_StringValue{StringValue: "standard"}}, - "amount": {Kind: &schema_pb.Value_DoubleValue{DoubleValue: 550.0278410655299}}, - }, - Timestamp: now - 120000000000, // 2 minutes ago - Key: []byte("live-user-2336"), - Source: "live_log", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 55537}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 6912}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "purchase"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"amount": 25.99, "item": "book"}`}}, - }, - Timestamp: now - 90000000000, // 1.5 minutes ago - Key: []byte("live-user-6912"), - Source: "live_log", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 65143}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 5102}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "page_view"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"page": "/home", "duration": 30}`}}, - }, - Timestamp: now - 80000000000, // 80 seconds ago - Key: []byte("live-user-5102"), - Source: "live_log", - }, - - // Simulated archived Parquet data (older) - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 686003}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 2759}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "archived_login"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"ip": "192.168.1.1", "archived": true}`}}, - }, - Timestamp: now - 3600000000000, // 1 hour ago - Key: []byte("archived-user-2759"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 417224}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 7810}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "archived_logout"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"duration": 1800, "archived": true}`}}, - }, - Timestamp: now - 1800000000000, // 30 minutes ago - Key: []byte("archived-user-7810"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 424297}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 8897}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "purchase"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"amount": 45.50, "item": "electronics"}`}}, - }, - Timestamp: now - 1500000000000, // 25 minutes ago - Key: []byte("archived-user-8897"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 431189}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 3400}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "signup"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"referral": "google", "plan": "free"}`}}, - }, - Timestamp: now - 1200000000000, // 20 minutes ago - Key: []byte("archived-user-3400"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 413249}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 5175}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "update_profile"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"field": "email", "new_value": "user@example.com"}`}}, - }, - Timestamp: now - 900000000000, // 15 minutes ago - Key: []byte("archived-user-5175"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 120612}}, - "user_id": {Kind: &schema_pb.Value_Int32Value{Int32Value: 5429}}, - "event_type": {Kind: &schema_pb.Value_StringValue{StringValue: "comment"}}, - "data": {Kind: &schema_pb.Value_StringValue{StringValue: `{"post_id": 123, "comment": "Great post!"}`}}, - }, - Timestamp: now - 600000000000, // 10 minutes ago - Key: []byte("archived-user-5429"), - Source: "parquet_archive", - }, - } - - case "system_logs": - sampleData = []HybridScanResult{ - // Simulated live system logs (recent) - { - Values: map[string]*schema_pb.Value{ - "level": {Kind: &schema_pb.Value_StringValue{StringValue: "INFO"}}, - "message": {Kind: &schema_pb.Value_StringValue{StringValue: "Live system startup completed"}}, - "service": {Kind: &schema_pb.Value_StringValue{StringValue: "auth-service"}}, - }, - Timestamp: now - 240000000000, // 4 minutes ago - Key: []byte("live-sys-001"), - Source: "live_log", - }, - { - Values: map[string]*schema_pb.Value{ - "level": {Kind: &schema_pb.Value_StringValue{StringValue: "WARN"}}, - "message": {Kind: &schema_pb.Value_StringValue{StringValue: "Live high memory usage detected"}}, - "service": {Kind: &schema_pb.Value_StringValue{StringValue: "monitor-service"}}, - }, - Timestamp: now - 180000000000, // 3 minutes ago - Key: []byte("live-sys-002"), - Source: "live_log", - }, - - // Simulated archived system logs (older) - { - Values: map[string]*schema_pb.Value{ - "level": {Kind: &schema_pb.Value_StringValue{StringValue: "ERROR"}}, - "message": {Kind: &schema_pb.Value_StringValue{StringValue: "Archived database connection failed"}}, - "service": {Kind: &schema_pb.Value_StringValue{StringValue: "db-service"}}, - }, - Timestamp: now - 7200000000000, // 2 hours ago - Key: []byte("archived-sys-001"), - Source: "parquet_archive", - }, - { - Values: map[string]*schema_pb.Value{ - "level": {Kind: &schema_pb.Value_StringValue{StringValue: "INFO"}}, - "message": {Kind: &schema_pb.Value_StringValue{StringValue: "Archived batch job completed"}}, - "service": {Kind: &schema_pb.Value_StringValue{StringValue: "batch-service"}}, - }, - Timestamp: now - 3600000000000, // 1 hour ago - Key: []byte("archived-sys-002"), - Source: "parquet_archive", - }, - } - - default: - // For unknown topics, return empty data - sampleData = []HybridScanResult{} - } - - // Apply predicate filtering if specified - if options.Predicate != nil { - var filtered []HybridScanResult - for _, result := range sampleData { - // Convert to RecordValue for predicate testing - recordValue := &schema_pb.RecordValue{Fields: make(map[string]*schema_pb.Value)} - for k, v := range result.Values { - recordValue.Fields[k] = v - } - recordValue.Fields[SW_COLUMN_NAME_TIMESTAMP] = &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: result.Timestamp}} - recordValue.Fields[SW_COLUMN_NAME_KEY] = &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: result.Key}} - - if options.Predicate(recordValue) { - filtered = append(filtered, result) - } - } - sampleData = filtered - } - - return sampleData -} diff --git a/weed/query/engine/timestamp_integration_test.go b/weed/query/engine/timestamp_integration_test.go deleted file mode 100644 index cb156103c..000000000 --- a/weed/query/engine/timestamp_integration_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package engine - -import ( - "strconv" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestTimestampIntegrationScenarios tests complete end-to-end scenarios -func TestTimestampIntegrationScenarios(t *testing.T) { - engine := NewTestSQLEngine() - - // Simulate the exact timestamps that were failing in production - timestamps := []struct { - timestamp int64 - id int64 - name string - }{ - {1756947416566456262, 897795, "original_failing_1"}, - {1756947416566439304, 715356, "original_failing_2"}, - {1756913789829292386, 82460, "current_data"}, - } - - t.Run("EndToEndTimestampEquality", func(t *testing.T) { - for _, ts := range timestamps { - t.Run(ts.name, func(t *testing.T) { - // Create a test record - record := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: ts.timestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: ts.id}}, - }, - } - - // Build SQL query - sql := "SELECT id, _ts_ns FROM test WHERE _ts_ns = " + strconv.FormatInt(ts.timestamp, 10) - stmt, err := ParseSQL(sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - - // Test time filter extraction (Fix #2 and #5) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - assert.Equal(t, ts.timestamp-1, startTimeNs, "Should set startTimeNs to avoid scan boundary bug") - assert.Equal(t, int64(0), stopTimeNs, "Should not set stopTimeNs to avoid premature termination") - - // Test predicate building (Fix #1) - predicate, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err) - - // Test predicate evaluation (Fix #1 - precision) - result := predicate(record) - assert.True(t, result, "Should match exact timestamp without precision loss") - - // Test that close but different timestamps don't match - closeRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: ts.timestamp + 1}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: ts.id}}, - }, - } - result = predicate(closeRecord) - assert.False(t, result, "Should not match timestamp that differs by 1 nanosecond") - }) - } - }) - - t.Run("ComplexRangeQueries", func(t *testing.T) { - // Test range queries that combine multiple fixes - testCases := []struct { - name string - sql string - shouldSet struct{ start, stop bool } - }{ - { - name: "RangeWithDifferentBounds", - sql: "SELECT * FROM test WHERE _ts_ns >= 1756913789829292386 AND _ts_ns <= 1756947416566456262", - shouldSet: struct{ start, stop bool }{true, true}, - }, - { - name: "RangeWithSameBounds", - sql: "SELECT * FROM test WHERE _ts_ns >= 1756913789829292386 AND _ts_ns <= 1756913789829292386", - shouldSet: struct{ start, stop bool }{true, false}, // Fix #4: equal bounds should not set stop - }, - { - name: "OpenEndedRange", - sql: "SELECT * FROM test WHERE _ts_ns >= 1756913789829292386", - shouldSet: struct{ start, stop bool }{true, false}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - stmt, err := ParseSQL(tc.sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - - if tc.shouldSet.start { - assert.NotEqual(t, int64(0), startTimeNs, "Should set startTimeNs for range query") - } else { - assert.Equal(t, int64(0), startTimeNs, "Should not set startTimeNs") - } - - if tc.shouldSet.stop { - assert.NotEqual(t, int64(0), stopTimeNs, "Should set stopTimeNs for bounded range") - } else { - assert.Equal(t, int64(0), stopTimeNs, "Should not set stopTimeNs") - } - }) - } - }) - - t.Run("ProductionScenarioReproduction", func(t *testing.T) { - // This test reproduces the exact production scenario that was failing - - // Original failing query: WHERE _ts_ns = 1756947416566456262 - sql := "SELECT id, _ts_ns FROM ecommerce.user_events WHERE _ts_ns = 1756947416566456262" - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse the production query that was failing") - - selectStmt := stmt.(*SelectStatement) - - // Verify time filter extraction works correctly (fixes scan termination issue) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - assert.Equal(t, int64(1756947416566456261), startTimeNs, "Should set startTimeNs to target-1") // Fix #5 - assert.Equal(t, int64(0), stopTimeNs, "Should not set stopTimeNs") // Fix #2 - - // Verify predicate handles the large timestamp correctly - predicate, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err, "Should build predicate for production query") - - // Test with the actual record that exists in production - productionRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456262}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - result := predicate(productionRecord) - assert.True(t, result, "Should match the production record that was failing before") // Fix #1 - - // Verify precision - test that a timestamp differing by just 1 nanosecond doesn't match - slightlyDifferentRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 1756947416566456263}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - result = predicate(slightlyDifferentRecord) - assert.False(t, result, "Should NOT match record with timestamp differing by 1 nanosecond") - }) -} - -// TestRegressionPrevention ensures the fixes don't break normal cases -func TestRegressionPrevention(t *testing.T) { - engine := NewTestSQLEngine() - - t.Run("SmallTimestamps", func(t *testing.T) { - // Ensure small timestamps still work normally - smallTimestamp := int64(1234567890) - - record := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: smallTimestamp}}, - }, - } - - result := engine.valuesEqual(record.Fields["_ts_ns"], smallTimestamp) - assert.True(t, result, "Small timestamps should continue to work") - }) - - t.Run("NonTimestampColumns", func(t *testing.T) { - // Ensure non-timestamp columns aren't affected by timestamp fixes - sql := "SELECT * FROM test WHERE id = 12345" - stmt, err := ParseSQL(sql) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - - assert.Equal(t, int64(0), startTimeNs, "Non-timestamp queries should not set startTimeNs") - assert.Equal(t, int64(0), stopTimeNs, "Non-timestamp queries should not set stopTimeNs") - }) - - t.Run("StringComparisons", func(t *testing.T) { - // Ensure string comparisons aren't affected - record := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "name": {Kind: &schema_pb.Value_StringValue{StringValue: "test"}}, - }, - } - - result := engine.valuesEqual(record.Fields["name"], "test") - assert.True(t, result, "String comparisons should continue to work") - }) -} diff --git a/weed/query/engine/timestamp_query_fixes_test.go b/weed/query/engine/timestamp_query_fixes_test.go deleted file mode 100644 index 2f5f08cbd..000000000 --- a/weed/query/engine/timestamp_query_fixes_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package engine - -import ( - "strconv" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" - "github.com/stretchr/testify/assert" -) - -// TestTimestampQueryFixes tests all the timestamp query fixes comprehensively -func TestTimestampQueryFixes(t *testing.T) { - engine := NewTestSQLEngine() - - // Test timestamps from the actual failing cases - largeTimestamp1 := int64(1756947416566456262) // Original failing query - largeTimestamp2 := int64(1756947416566439304) // Second failing query - largeTimestamp3 := int64(1756913789829292386) // Current data timestamp - - t.Run("Fix1_PrecisionLoss", func(t *testing.T) { - // Test that large int64 timestamps don't lose precision in comparisons - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp1}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - }, - } - - // Test equality comparison - result := engine.valuesEqual(testRecord.Fields["_ts_ns"], largeTimestamp1) - assert.True(t, result, "Large timestamp equality should work without precision loss") - - // Test inequality comparison - result = engine.valuesEqual(testRecord.Fields["_ts_ns"], largeTimestamp1+1) - assert.False(t, result, "Large timestamp inequality should be detected accurately") - - // Test less than comparison - result = engine.valueLessThan(testRecord.Fields["_ts_ns"], largeTimestamp1+1) - assert.True(t, result, "Large timestamp less-than should work without precision loss") - - // Test greater than comparison - result = engine.valueGreaterThan(testRecord.Fields["_ts_ns"], largeTimestamp1-1) - assert.True(t, result, "Large timestamp greater-than should work without precision loss") - }) - - t.Run("Fix2_TimeFilterExtraction", func(t *testing.T) { - // Test that equality queries don't set stopTimeNs (which causes premature termination) - equalitySQL := "SELECT * FROM test WHERE _ts_ns = " + strconv.FormatInt(largeTimestamp2, 10) - stmt, err := ParseSQL(equalitySQL) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - - assert.Equal(t, largeTimestamp2-1, startTimeNs, "Equality query should set startTimeNs to target-1") - assert.Equal(t, int64(0), stopTimeNs, "Equality query should NOT set stopTimeNs to avoid early termination") - }) - - t.Run("Fix3_RangeBoundaryFix", func(t *testing.T) { - // Test that range queries with equal boundaries don't cause premature termination - rangeSQL := "SELECT * FROM test WHERE _ts_ns >= " + strconv.FormatInt(largeTimestamp3, 10) + - " AND _ts_ns <= " + strconv.FormatInt(largeTimestamp3, 10) - stmt, err := ParseSQL(rangeSQL) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - - // Should be treated like an equality query to avoid premature termination - assert.NotEqual(t, int64(0), startTimeNs, "Range with equal boundaries should set startTimeNs") - assert.Equal(t, int64(0), stopTimeNs, "Range with equal boundaries should NOT set stopTimeNs") - }) - - t.Run("Fix4_DifferentRangeBoundaries", func(t *testing.T) { - // Test that normal range queries still work correctly - rangeSQL := "SELECT * FROM test WHERE _ts_ns >= " + strconv.FormatInt(largeTimestamp1, 10) + - " AND _ts_ns <= " + strconv.FormatInt(largeTimestamp2, 10) - stmt, err := ParseSQL(rangeSQL) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - - assert.Equal(t, largeTimestamp1, startTimeNs, "Range query should set correct startTimeNs") - assert.Equal(t, largeTimestamp2, stopTimeNs, "Range query should set correct stopTimeNs") - }) - - t.Run("Fix5_PredicateAccuracy", func(t *testing.T) { - // Test that predicates correctly evaluate large timestamp equality - equalitySQL := "SELECT * FROM test WHERE _ts_ns = " + strconv.FormatInt(largeTimestamp1, 10) - stmt, err := ParseSQL(equalitySQL) - assert.NoError(t, err) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err) - - // Test with matching record - matchingRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp1}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 897795}}, - }, - } - - result := predicate(matchingRecord) - assert.True(t, result, "Predicate should match record with exact timestamp") - - // Test with non-matching record - nonMatchingRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp1 + 1}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: 12345}}, - }, - } - - result = predicate(nonMatchingRecord) - assert.False(t, result, "Predicate should NOT match record with different timestamp") - }) - - t.Run("Fix6_ComparisonOperators", func(t *testing.T) { - // Test all comparison operators work correctly with large timestamps - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: largeTimestamp2}}, - }, - } - - operators := []struct { - sql string - expected bool - }{ - {"_ts_ns = " + strconv.FormatInt(largeTimestamp2, 10), true}, - {"_ts_ns = " + strconv.FormatInt(largeTimestamp2+1, 10), false}, - {"_ts_ns > " + strconv.FormatInt(largeTimestamp2-1, 10), true}, - {"_ts_ns > " + strconv.FormatInt(largeTimestamp2, 10), false}, - {"_ts_ns >= " + strconv.FormatInt(largeTimestamp2, 10), true}, - {"_ts_ns >= " + strconv.FormatInt(largeTimestamp2+1, 10), false}, - {"_ts_ns < " + strconv.FormatInt(largeTimestamp2+1, 10), true}, - {"_ts_ns < " + strconv.FormatInt(largeTimestamp2, 10), false}, - {"_ts_ns <= " + strconv.FormatInt(largeTimestamp2, 10), true}, - {"_ts_ns <= " + strconv.FormatInt(largeTimestamp2-1, 10), false}, - } - - for _, op := range operators { - sql := "SELECT * FROM test WHERE " + op.sql - stmt, err := ParseSQL(sql) - assert.NoError(t, err, "Should parse SQL: %s", op.sql) - - selectStmt := stmt.(*SelectStatement) - predicate, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err, "Should build predicate for: %s", op.sql) - - result := predicate(testRecord) - assert.Equal(t, op.expected, result, "Operator test failed for: %s", op.sql) - } - }) - - t.Run("Fix7_EdgeCases", func(t *testing.T) { - // Test edge cases and boundary conditions - - // Maximum int64 value - maxInt64 := int64(9223372036854775807) - testRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: maxInt64}}, - }, - } - - // Test equality with maximum int64 - result := engine.valuesEqual(testRecord.Fields["_ts_ns"], maxInt64) - assert.True(t, result, "Should handle maximum int64 value correctly") - - // Test with zero timestamp - zeroRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: 0}}, - }, - } - - result = engine.valuesEqual(zeroRecord.Fields["_ts_ns"], int64(0)) - assert.True(t, result, "Should handle zero timestamp correctly") - }) -} - -// TestOriginalFailingQueries tests the specific queries that were failing before the fixes -func TestOriginalFailingQueries(t *testing.T) { - engine := NewTestSQLEngine() - - failingQueries := []struct { - name string - sql string - timestamp int64 - id int64 - }{ - { - name: "OriginalQuery1", - sql: "select id, _ts_ns from ecommerce.user_events where _ts_ns = 1756947416566456262", - timestamp: 1756947416566456262, - id: 897795, - }, - { - name: "OriginalQuery2", - sql: "select id, _ts_ns from ecommerce.user_events where _ts_ns = 1756947416566439304", - timestamp: 1756947416566439304, - id: 715356, - }, - { - name: "CurrentDataQuery", - sql: "select id, _ts_ns from ecommerce.user_events where _ts_ns = 1756913789829292386", - timestamp: 1756913789829292386, - id: 82460, - }, - } - - for _, query := range failingQueries { - t.Run(query.name, func(t *testing.T) { - // Parse the SQL - stmt, err := ParseSQL(query.sql) - assert.NoError(t, err, "Should parse the failing query") - - selectStmt := stmt.(*SelectStatement) - - // Test time filter extraction - startTimeNs, stopTimeNs := engine.extractTimeFilters(selectStmt.Where.Expr) - assert.Equal(t, query.timestamp-1, startTimeNs, "Should set startTimeNs to timestamp-1") - assert.Equal(t, int64(0), stopTimeNs, "Should not set stopTimeNs for equality") - - // Test predicate building and evaluation - predicate, err := engine.buildPredicate(selectStmt.Where.Expr) - assert.NoError(t, err, "Should build predicate") - - // Test with matching record - matchingRecord := &schema_pb.RecordValue{ - Fields: map[string]*schema_pb.Value{ - "_ts_ns": {Kind: &schema_pb.Value_Int64Value{Int64Value: query.timestamp}}, - "id": {Kind: &schema_pb.Value_Int64Value{Int64Value: query.id}}, - }, - } - - result := predicate(matchingRecord) - assert.True(t, result, "Predicate should match the target record for query: %s", query.name) - }) - } -} diff --git a/weed/query/engine/types.go b/weed/query/engine/types.go deleted file mode 100644 index edcd5bd9a..000000000 --- a/weed/query/engine/types.go +++ /dev/null @@ -1,122 +0,0 @@ -package engine - -import ( - "errors" - "fmt" - - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" -) - -// ExecutionNode represents a node in the execution plan tree -type ExecutionNode interface { - GetNodeType() string - GetChildren() []ExecutionNode - GetDescription() string - GetDetails() map[string]interface{} -} - -// FileSourceNode represents a leaf node - an actual data source file -type FileSourceNode struct { - FilePath string `json:"file_path"` - SourceType string `json:"source_type"` // "parquet", "live_log", "broker_buffer" - Predicates []string `json:"predicates"` // Pushed down predicates - Operations []string `json:"operations"` // "sequential_scan", "statistics_skip", etc. - EstimatedRows int64 `json:"estimated_rows"` // Estimated rows to process - OptimizationHint string `json:"optimization_hint"` // "fast_path", "full_scan", etc. - Details map[string]interface{} `json:"details"` -} - -func (f *FileSourceNode) GetNodeType() string { return "file_source" } -func (f *FileSourceNode) GetChildren() []ExecutionNode { return nil } -func (f *FileSourceNode) GetDescription() string { - if f.OptimizationHint != "" { - return fmt.Sprintf("%s (%s)", f.FilePath, f.OptimizationHint) - } - return f.FilePath -} -func (f *FileSourceNode) GetDetails() map[string]interface{} { return f.Details } - -// MergeOperationNode represents a branch node - combines data from multiple sources -type MergeOperationNode struct { - OperationType string `json:"operation_type"` // "chronological_merge", "union", etc. - Children []ExecutionNode `json:"children"` - Description string `json:"description"` - Details map[string]interface{} `json:"details"` -} - -func (m *MergeOperationNode) GetNodeType() string { return "merge_operation" } -func (m *MergeOperationNode) GetChildren() []ExecutionNode { return m.Children } -func (m *MergeOperationNode) GetDescription() string { return m.Description } -func (m *MergeOperationNode) GetDetails() map[string]interface{} { return m.Details } - -// ScanOperationNode represents an intermediate node - a scanning strategy -type ScanOperationNode struct { - ScanType string `json:"scan_type"` // "parquet_scan", "live_log_scan", "hybrid_scan" - Children []ExecutionNode `json:"children"` - Predicates []string `json:"predicates"` // Predicates applied at this level - Description string `json:"description"` - Details map[string]interface{} `json:"details"` -} - -func (s *ScanOperationNode) GetNodeType() string { return "scan_operation" } -func (s *ScanOperationNode) GetChildren() []ExecutionNode { return s.Children } -func (s *ScanOperationNode) GetDescription() string { return s.Description } -func (s *ScanOperationNode) GetDetails() map[string]interface{} { return s.Details } - -// QueryExecutionPlan contains information about how a query was executed -type QueryExecutionPlan struct { - QueryType string - ExecutionStrategy string `json:"execution_strategy"` // fast_path, full_scan, hybrid - RootNode ExecutionNode `json:"root_node,omitempty"` // Root of execution tree - - // Legacy fields (kept for compatibility) - DataSources []string `json:"data_sources"` // parquet_files, live_logs, broker_buffer - PartitionsScanned int `json:"partitions_scanned"` - ParquetFilesScanned int `json:"parquet_files_scanned"` - LiveLogFilesScanned int `json:"live_log_files_scanned"` - TotalRowsProcessed int64 `json:"total_rows_processed"` - OptimizationsUsed []string `json:"optimizations_used"` // parquet_stats, predicate_pushdown, etc. - TimeRangeFilters map[string]interface{} `json:"time_range_filters,omitempty"` - Aggregations []string `json:"aggregations,omitempty"` - ExecutionTimeMs float64 `json:"execution_time_ms"` - Details map[string]interface{} `json:"details,omitempty"` - - // Broker buffer information - BrokerBufferQueried bool `json:"broker_buffer_queried"` - BrokerBufferMessages int `json:"broker_buffer_messages"` - BufferStartIndex int64 `json:"buffer_start_index,omitempty"` -} - -// Plan detail keys -const ( - PlanDetailStartTimeNs = "StartTimeNs" - PlanDetailStopTimeNs = "StopTimeNs" -) - -// QueryResult represents the result of a SQL query execution -type QueryResult struct { - Columns []string `json:"columns"` - Rows [][]sqltypes.Value `json:"rows"` - Error error `json:"error,omitempty"` - ExecutionPlan *QueryExecutionPlan `json:"execution_plan,omitempty"` - // Schema information for type inference (optional) - Database string `json:"database,omitempty"` - Table string `json:"table,omitempty"` -} - -// NoSchemaError indicates that a topic exists but has no schema defined -// This is a normal condition for quiet topics that haven't received messages yet -type NoSchemaError struct { - Namespace string - Topic string -} - -func (e NoSchemaError) Error() string { - return fmt.Sprintf("topic %s.%s has no schema", e.Namespace, e.Topic) -} - -// IsNoSchemaError checks if an error is a NoSchemaError -func IsNoSchemaError(err error) bool { - var noSchemaErr NoSchemaError - return errors.As(err, &noSchemaErr) -} diff --git a/weed/query/engine/where_clause_debug_test.go b/weed/query/engine/where_clause_debug_test.go deleted file mode 100644 index 382da4594..000000000 --- a/weed/query/engine/where_clause_debug_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" -) - -// TestWhereParsing tests if WHERE clauses are parsed correctly by CockroachDB parser -func TestWhereParsing(t *testing.T) { - - testCases := []struct { - name string - sql string - expectError bool - desc string - }{ - { - name: "Simple_Equals", - sql: "SELECT id FROM user_events WHERE id = 82460", - expectError: false, - desc: "Simple equality WHERE clause", - }, - { - name: "Greater_Than", - sql: "SELECT id FROM user_events WHERE id > 10000000", - expectError: false, - desc: "Greater than WHERE clause", - }, - { - name: "String_Equals", - sql: "SELECT id FROM user_events WHERE status = 'active'", - expectError: false, - desc: "String equality WHERE clause", - }, - { - name: "Impossible_Condition", - sql: "SELECT id FROM user_events WHERE 1 = 0", - expectError: false, - desc: "Impossible WHERE condition (should parse but return no rows)", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Test parsing first - parsedStmt, parseErr := ParseSQL(tc.sql) - - if tc.expectError { - if parseErr == nil { - t.Errorf("Expected parse error but got none for: %s", tc.desc) - } else { - t.Logf("PASS: Expected parse error: %v", parseErr) - } - return - } - - if parseErr != nil { - t.Errorf("Unexpected parse error for %s: %v", tc.desc, parseErr) - return - } - - // Check if it's a SELECT statement - selectStmt, ok := parsedStmt.(*SelectStatement) - if !ok { - t.Errorf("Expected SelectStatement, got %T", parsedStmt) - return - } - - // Check if WHERE clause exists - if selectStmt.Where == nil { - t.Errorf("WHERE clause not parsed for: %s", tc.desc) - return - } - - t.Logf("PASS: WHERE clause parsed successfully for: %s", tc.desc) - t.Logf(" WHERE expression type: %T", selectStmt.Where.Expr) - }) - } -} - -// TestPredicateBuilding tests if buildPredicate can handle CockroachDB AST nodes -func TestPredicateBuilding(t *testing.T) { - engine := NewTestSQLEngine() - - testCases := []struct { - name string - sql string - desc string - testRecord *schema_pb.RecordValue - shouldMatch bool - }{ - { - name: "Simple_Equals_Match", - sql: "SELECT id FROM user_events WHERE id = 82460", - desc: "Simple equality - should match", - testRecord: createTestRecord("82460", "active"), - shouldMatch: true, - }, - { - name: "Simple_Equals_NoMatch", - sql: "SELECT id FROM user_events WHERE id = 82460", - desc: "Simple equality - should not match", - testRecord: createTestRecord("999999", "active"), - shouldMatch: false, - }, - { - name: "Greater_Than_Match", - sql: "SELECT id FROM user_events WHERE id > 100000", - desc: "Greater than - should match", - testRecord: createTestRecord("841256", "active"), - shouldMatch: true, - }, - { - name: "Greater_Than_NoMatch", - sql: "SELECT id FROM user_events WHERE id > 100000", - desc: "Greater than - should not match", - testRecord: createTestRecord("82460", "active"), - shouldMatch: false, - }, - { - name: "String_Equals_Match", - sql: "SELECT id FROM user_events WHERE status = 'active'", - desc: "String equality - should match", - testRecord: createTestRecord("82460", "active"), - shouldMatch: true, - }, - { - name: "String_Equals_NoMatch", - sql: "SELECT id FROM user_events WHERE status = 'active'", - desc: "String equality - should not match", - testRecord: createTestRecord("82460", "inactive"), - shouldMatch: false, - }, - { - name: "Impossible_Condition", - sql: "SELECT id FROM user_events WHERE 1 = 0", - desc: "Impossible condition - should never match", - testRecord: createTestRecord("82460", "active"), - shouldMatch: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Parse the SQL - parsedStmt, parseErr := ParseSQL(tc.sql) - if parseErr != nil { - t.Fatalf("Parse error: %v", parseErr) - } - - selectStmt, ok := parsedStmt.(*SelectStatement) - if !ok || selectStmt.Where == nil { - t.Fatalf("No WHERE clause found") - } - - // Try to build the predicate - predicate, buildErr := engine.buildPredicate(selectStmt.Where.Expr) - if buildErr != nil { - t.Errorf("PREDICATE BUILD ERROR: %v", buildErr) - t.Errorf("This might be the root cause of WHERE clause not working!") - t.Errorf("WHERE expression type: %T", selectStmt.Where.Expr) - return - } - - // Test the predicate against our test record - actualMatch := predicate(tc.testRecord) - - if actualMatch == tc.shouldMatch { - t.Logf("PASS: %s - Predicate worked correctly (match=%v)", tc.desc, actualMatch) - } else { - t.Errorf("FAIL: %s - Expected match=%v, got match=%v", tc.desc, tc.shouldMatch, actualMatch) - t.Errorf("This confirms the predicate logic is incorrect!") - } - }) - } -} - -// TestWhereClauseEndToEnd tests complete WHERE clause functionality -func TestWhereClauseEndToEnd(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("END-TO-END WHERE CLAUSE VALIDATION") - t.Log("===================================") - - // Test 1: Baseline (no WHERE clause) - baselineResult, err := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events") - if err != nil { - t.Fatalf("Baseline query failed: %v", err) - } - baselineCount := len(baselineResult.Rows) - t.Logf("Baseline (no WHERE): %d rows", baselineCount) - - // Test 2: Impossible condition - impossibleResult, err := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events WHERE 1 = 0") - if err != nil { - t.Fatalf("Impossible WHERE query failed: %v", err) - } - impossibleCount := len(impossibleResult.Rows) - t.Logf("WHERE 1 = 0: %d rows", impossibleCount) - - // CRITICAL TEST: This should detect the WHERE clause bug - if impossibleCount == baselineCount { - t.Errorf("WHERE CLAUSE BUG CONFIRMED:") - t.Errorf(" Impossible condition returned same row count as no WHERE clause") - t.Errorf(" This proves WHERE filtering is not being applied") - } else if impossibleCount == 0 { - t.Logf("Impossible WHERE condition correctly returns 0 rows") - } - - // Test 3: Specific ID filtering - if baselineCount > 0 { - firstId := baselineResult.Rows[0][0].ToString() - specificResult, err := engine.ExecuteSQL(context.Background(), - "SELECT id FROM user_events WHERE id = "+firstId) - if err != nil { - t.Fatalf("Specific ID WHERE query failed: %v", err) - } - specificCount := len(specificResult.Rows) - t.Logf("WHERE id = %s: %d rows", firstId, specificCount) - - if specificCount == baselineCount { - t.Errorf("WHERE clause bug: Specific ID filter returned all rows") - } else if specificCount == 1 { - t.Logf("Specific ID WHERE clause working correctly") - } else { - t.Logf("Unexpected: Specific ID returned %d rows", specificCount) - } - } - - // Test 4: Range filtering with actual data validation - rangeResult, err := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events WHERE id > 10000000") - if err != nil { - t.Fatalf("Range WHERE query failed: %v", err) - } - rangeCount := len(rangeResult.Rows) - t.Logf("WHERE id > 10000000: %d rows", rangeCount) - - // Check if the filtering actually worked by examining the data - nonMatchingCount := 0 - for _, row := range rangeResult.Rows { - idStr := row[0].ToString() - if idVal, parseErr := strconv.ParseInt(idStr, 10, 64); parseErr == nil { - if idVal <= 10000000 { - nonMatchingCount++ - } - } - } - - if nonMatchingCount > 0 { - t.Errorf("WHERE clause bug: %d rows have id <= 10,000,000 but should be filtered out", nonMatchingCount) - t.Errorf(" Sample IDs that should be filtered: %v", getSampleIds(rangeResult, 3)) - } else { - t.Logf("WHERE id > 10000000 correctly filtered results") - } -} - -// Helper function to create test records for predicate testing -func createTestRecord(id string, status string) *schema_pb.RecordValue { - record := &schema_pb.RecordValue{ - Fields: make(map[string]*schema_pb.Value), - } - - // Add id field (as int64) - if idVal, err := strconv.ParseInt(id, 10, 64); err == nil { - record.Fields["id"] = &schema_pb.Value{ - Kind: &schema_pb.Value_Int64Value{Int64Value: idVal}, - } - } else { - record.Fields["id"] = &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: id}, - } - } - - // Add status field (as string) - record.Fields["status"] = &schema_pb.Value{ - Kind: &schema_pb.Value_StringValue{StringValue: status}, - } - - return record -} - -// Helper function to get sample IDs from result -func getSampleIds(result *QueryResult, count int) []string { - var ids []string - for i := 0; i < count && i < len(result.Rows); i++ { - ids = append(ids, result.Rows[i][0].ToString()) - } - return ids -} - -// TestSpecificWhereClauseBug reproduces the exact issue from real usage -func TestSpecificWhereClauseBug(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("REPRODUCING EXACT WHERE CLAUSE BUG") - t.Log("==================================") - - // The exact query that was failing: WHERE id > 10000000 - sql := "SELECT id FROM user_events WHERE id > 10000000 LIMIT 10 OFFSET 5" - result, err := engine.ExecuteSQL(context.Background(), sql) - - if err != nil { - t.Fatalf("Query failed: %v", err) - } - - t.Logf("Query: %s", sql) - t.Logf("Returned %d rows:", len(result.Rows)) - - // Check each returned ID - bugDetected := false - for i, row := range result.Rows { - idStr := row[0].ToString() - if idVal, parseErr := strconv.ParseInt(idStr, 10, 64); parseErr == nil { - t.Logf("Row %d: id = %d", i+1, idVal) - if idVal <= 10000000 { - bugDetected = true - t.Errorf("BUG: id %d should be filtered out (<= 10,000,000)", idVal) - } - } - } - - if !bugDetected { - t.Log("WHERE clause working correctly - all IDs > 10,000,000") - } else { - t.Error("WHERE clause bug confirmed: Returned IDs that should be filtered out") - } -} diff --git a/weed/query/engine/where_validation_test.go b/weed/query/engine/where_validation_test.go deleted file mode 100644 index 4ba7d1c70..000000000 --- a/weed/query/engine/where_validation_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package engine - -import ( - "context" - "strconv" - "testing" -) - -// TestWhereClauseValidation tests WHERE clause functionality with various conditions -func TestWhereClauseValidation(t *testing.T) { - engine := NewTestSQLEngine() - - t.Log("WHERE CLAUSE VALIDATION TESTS") - t.Log("==============================") - - // Test 1: Baseline - get all rows to understand the data - baselineResult, err := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events") - if err != nil { - t.Fatalf("Baseline query failed: %v", err) - } - - t.Logf("Baseline data - Total rows: %d", len(baselineResult.Rows)) - if len(baselineResult.Rows) > 0 { - t.Logf("Sample IDs: %s, %s, %s", - baselineResult.Rows[0][0].ToString(), - baselineResult.Rows[1][0].ToString(), - baselineResult.Rows[2][0].ToString()) - } - - // Test 2: Specific ID match (should return 1 row) - firstId := baselineResult.Rows[0][0].ToString() - specificResult, err := engine.ExecuteSQL(context.Background(), - "SELECT id FROM user_events WHERE id = "+firstId) - if err != nil { - t.Fatalf("Specific ID query failed: %v", err) - } - - t.Logf("WHERE id = %s: %d rows", firstId, len(specificResult.Rows)) - if len(specificResult.Rows) == 1 { - t.Logf("Specific ID filtering works correctly") - } else { - t.Errorf("Expected 1 row, got %d rows", len(specificResult.Rows)) - } - - // Test 3: Range filtering (find actual data ranges) - // First, find the min and max IDs in our data - var minId, maxId int64 = 999999999, 0 - for _, row := range baselineResult.Rows { - if idVal, err := strconv.ParseInt(row[0].ToString(), 10, 64); err == nil { - if idVal < minId { - minId = idVal - } - if idVal > maxId { - maxId = idVal - } - } - } - - t.Logf("Data range: min ID = %d, max ID = %d", minId, maxId) - - // Test with a threshold between min and max - threshold := (minId + maxId) / 2 - rangeResult, err := engine.ExecuteSQL(context.Background(), - "SELECT id FROM user_events WHERE id > "+strconv.FormatInt(threshold, 10)) - if err != nil { - t.Fatalf("Range query failed: %v", err) - } - - t.Logf("WHERE id > %d: %d rows", threshold, len(rangeResult.Rows)) - - // Verify all returned IDs are > threshold - allCorrect := true - for _, row := range rangeResult.Rows { - if idVal, err := strconv.ParseInt(row[0].ToString(), 10, 64); err == nil { - if idVal <= threshold { - t.Errorf("Found ID %d which should be filtered out (<= %d)", idVal, threshold) - allCorrect = false - } - } - } - - if allCorrect && len(rangeResult.Rows) > 0 { - t.Logf("Range filtering works correctly - all returned IDs > %d", threshold) - } else if len(rangeResult.Rows) == 0 { - t.Logf("Range filtering works correctly - no IDs > %d in data", threshold) - } - - // Test 4: String filtering - statusResult, err := engine.ExecuteSQL(context.Background(), - "SELECT id, status FROM user_events WHERE status = 'active'") - if err != nil { - t.Fatalf("Status query failed: %v", err) - } - - t.Logf("WHERE status = 'active': %d rows", len(statusResult.Rows)) - - // Verify all returned rows have status = 'active' - statusCorrect := true - for _, row := range statusResult.Rows { - if len(row) > 1 && row[1].ToString() != "active" { - t.Errorf("Found status '%s' which should be filtered out", row[1].ToString()) - statusCorrect = false - } - } - - if statusCorrect { - t.Logf("String filtering works correctly") - } - - // Test 5: Comparison with actual real-world case - t.Log("\nTESTING REAL-WORLD CASE:") - realWorldResult, err := engine.ExecuteSQL(context.Background(), - "SELECT id FROM user_events WHERE id > 10000000 LIMIT 10 OFFSET 5") - if err != nil { - t.Fatalf("Real-world query failed: %v", err) - } - - t.Logf("Real-world query returned: %d rows", len(realWorldResult.Rows)) - - // Check if any IDs are <= 10,000,000 (should be 0) - violationCount := 0 - for _, row := range realWorldResult.Rows { - if idVal, err := strconv.ParseInt(row[0].ToString(), 10, 64); err == nil { - if idVal <= 10000000 { - violationCount++ - } - } - } - - if violationCount == 0 { - t.Logf("Real-world case FIXED: No violations found") - } else { - t.Errorf("Real-world case FAILED: %d violations found", violationCount) - } -} - -// TestWhereClauseComparisonOperators tests all comparison operators -func TestWhereClauseComparisonOperators(t *testing.T) { - engine := NewTestSQLEngine() - - // Get baseline data - baselineResult, _ := engine.ExecuteSQL(context.Background(), "SELECT id FROM user_events") - if len(baselineResult.Rows) == 0 { - t.Skip("No test data available") - return - } - - // Use the second ID as our test value - testId := baselineResult.Rows[1][0].ToString() - - operators := []struct { - op string - desc string - expectRows bool - }{ - {"=", "equals", true}, - {"!=", "not equals", true}, - {">", "greater than", false}, // Depends on data - {"<", "less than", true}, // Should have some results - {">=", "greater or equal", true}, - {"<=", "less or equal", true}, - } - - t.Logf("Testing comparison operators with ID = %s", testId) - - for _, op := range operators { - sql := "SELECT id FROM user_events WHERE id " + op.op + " " + testId - result, err := engine.ExecuteSQL(context.Background(), sql) - - if err != nil { - t.Errorf("Operator %s failed: %v", op.op, err) - continue - } - - t.Logf("WHERE id %s %s: %d rows (%s)", op.op, testId, len(result.Rows), op.desc) - - // Basic validation - should not return more rows than baseline - if len(result.Rows) > len(baselineResult.Rows) { - t.Errorf("Operator %s returned more rows than baseline", op.op) - } - } -} diff --git a/weed/query/json/query_json.go b/weed/query/json/query_json.go index 7492205be..46f3b1b56 100644 --- a/weed/query/json/query_json.go +++ b/weed/query/json/query_json.go @@ -3,7 +3,7 @@ package json import ( "strconv" - "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" + "github.com/chrislusf/seaweedfs/weed/query/sqltypes" "github.com/tidwall/gjson" "github.com/tidwall/match" ) diff --git a/weed/query/json/seralize.go b/weed/query/json/seralize.go index 6538fefaa..9bbddc2ff 100644 --- a/weed/query/json/seralize.go +++ b/weed/query/json/seralize.go @@ -1,6 +1,6 @@ package json -import "github.com/seaweedfs/seaweedfs/weed/query/sqltypes" +import "github.com/chrislusf/seaweedfs/weed/query/sqltypes" func ToJson(buf []byte, selections []string, values []sqltypes.Value) []byte { buf = append(buf, '{') diff --git a/weed/remote_storage/azure/azure_highlevel.go b/weed/remote_storage/azure/azure_highlevel.go new file mode 100644 index 000000000..a5cd4070b --- /dev/null +++ b/weed/remote_storage/azure/azure_highlevel.go @@ -0,0 +1,120 @@ +package azure + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + . "github.com/Azure/azure-storage-blob-go/azblob" + "io" + "sync" +) + +// copied from https://github.com/Azure/azure-storage-blob-go/blob/master/azblob/highlevel.go#L73:6 +// uploadReaderAtToBlockBlob was not public + +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + } + } + + if readerSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = pipeline.NewRequestBodyProgress(body, o.Progress) + } + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = pipeline.NewRequestBodyProgress(body, + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets a progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) +} + +// The UUID reserved variants. +const ( + reservedNCS byte = 0x80 + reservedRFC4122 byte = 0x40 + reservedMicrosoft byte = 0x20 + reservedFuture byte = 0x00 +) + +type uuid [16]byte + +// NewUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (u uuid) { + u = uuid{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + rand.Read(u[:]) + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return +} + +// String returns an unparsed version of the generated UUID sequence. +func (u uuid) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +func (u uuid) bytes() []byte { + return u[:] +} diff --git a/weed/remote_storage/azure/azure_storage_client.go b/weed/remote_storage/azure/azure_storage_client.go index bfedd68e2..1a259a3e2 100644 --- a/weed/remote_storage/azure/azure_storage_client.go +++ b/weed/remote_storage/azure/azure_storage_client.go @@ -4,57 +4,18 @@ import ( "context" "fmt" "io" + "net/url" "os" "reflect" - "regexp" - "strings" - "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" ) -const ( - defaultBlockSize = 4 * 1024 * 1024 - defaultConcurrency = 16 -) - -// invalidMetadataChars matches any character that is not valid in Azure metadata keys. -// Azure metadata keys must be valid C# identifiers: letters, digits, and underscores only. -var invalidMetadataChars = regexp.MustCompile(`[^a-zA-Z0-9_]`) - -// sanitizeMetadataKey converts an S3 metadata key to a valid Azure metadata key. -// Azure metadata keys must be valid C# identifiers (letters, digits, underscores only, cannot start with digit). -// To prevent collisions, invalid characters are replaced with their hex representation (_XX_). -// Examples: -// - "my-key" -> "my_2d_key" -// - "my.key" -> "my_2e_key" -// - "key@value" -> "key_40_value" -func sanitizeMetadataKey(key string) string { - // Replace each invalid character with _XX_ where XX is the hex code - result := invalidMetadataChars.ReplaceAllStringFunc(key, func(s string) string { - return fmt.Sprintf("_%02x_", s[0]) - }) - - // Azure metadata keys cannot start with a digit - if len(result) > 0 && result[0] >= '0' && result[0] <= '9' { - result = "_" + result - } - - return result -} - func init() { remote_storage.RemoteStorageClientMakers["azure"] = new(azureRemoteStorageMaker) } @@ -79,35 +40,25 @@ func (s azureRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag } } - // Create credential and client + // Use your Storage account's name and key to create a credential object. credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) if err != nil { - return nil, fmt.Errorf("invalid Azure credential with account name:%s: %w", accountName, err) + return nil, fmt.Errorf("invalid Azure credential with account name:%s: %v", accountName, err) } - serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - azClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, credential, &azblob.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - Retry: policy.RetryOptions{ - MaxRetries: 10, // Increased from default 3 to maintain resiliency similar to old SDK's 20 - TryTimeout: time.Minute, - RetryDelay: 2 * time.Second, - MaxRetryDelay: time.Minute, - }, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to create Azure client: %w", err) - } + // Create a request pipeline that is used to process HTTP(S) requests and responses. + p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - client.client = azClient + // Create an ServiceURL object that wraps the service URL and a request pipeline. + u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName)) + client.serviceURL = azblob.NewServiceURL(*u, p) return client, nil } type azureRemoteStorageClient struct { - conf *remote_pb.RemoteConf - client *azblob.Client + conf *remote_pb.RemoteConf + serviceURL azblob.ServiceURL } var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{}) @@ -115,74 +66,59 @@ var _ = remote_storage.RemoteStorageClient(&azureRemoteStorageClient{}) func (az *azureRemoteStorageClient) Traverse(loc *remote_pb.RemoteStorageLocation, visitFn remote_storage.VisitFunc) (err error) { pathKey := loc.Path[1:] - containerClient := az.client.ServiceClient().NewContainerClient(loc.Bucket) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) - // List blobs with pager - pager := containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{ - Prefix: &pathKey, - }) - - for pager.More() { - resp, err := pager.NextPage(context.Background()) + // List the container that we have created above + for marker := (azblob.Marker{}); marker.NotDone(); { + // Get a result segment starting with the blob indicated by the current Marker. + listBlob, err := containerURL.ListBlobsFlatSegment(context.Background(), marker, azblob.ListBlobsSegmentOptions{ + Prefix: pathKey, + }) if err != nil { - return fmt.Errorf("azure traverse %s%s: %w", loc.Bucket, loc.Path, err) + return fmt.Errorf("azure traverse %s%s: %v", loc.Bucket, loc.Path, err) } - for _, blobItem := range resp.Segment.BlobItems { - if blobItem.Name == nil { - continue - } - key := "/" + *blobItem.Name + // ListBlobs returns the start of the next segment; you MUST use this to get + // the next segment (after processing the current result segment). + marker = listBlob.NextMarker + + // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) + for _, blobInfo := range listBlob.Segment.BlobItems { + key := blobInfo.Name + key = "/" + key dir, name := util.FullPath(key).DirAndName() - - remoteEntry := &filer_pb.RemoteEntry{ + err = visitFn(dir, name, false, &filer_pb.RemoteEntry{ + RemoteMtime: blobInfo.Properties.LastModified.Unix(), + RemoteSize: *blobInfo.Properties.ContentLength, + RemoteETag: string(blobInfo.Properties.Etag), StorageName: az.conf.Name, - } - if blobItem.Properties != nil { - if blobItem.Properties.LastModified != nil { - remoteEntry.RemoteMtime = blobItem.Properties.LastModified.Unix() - } - if blobItem.Properties.ContentLength != nil { - remoteEntry.RemoteSize = *blobItem.Properties.ContentLength - } - if blobItem.Properties.ETag != nil { - remoteEntry.RemoteETag = string(*blobItem.Properties.ETag) - } - } - - err = visitFn(dir, name, false, remoteEntry) + }) if err != nil { - return fmt.Errorf("azure processing %s%s: %w", loc.Bucket, loc.Path, err) + return fmt.Errorf("azure processing %s%s: %v", loc.Bucket, loc.Path, err) } } } return } - func (az *azureRemoteStorageClient) ReadFile(loc *remote_pb.RemoteStorageLocation, offset int64, size int64) (data []byte, err error) { key := loc.Path[1:] - blobClient := az.client.ServiceClient().NewContainerClient(loc.Bucket).NewBlockBlobClient(key) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) + blobURL := containerURL.NewBlockBlobURL(key) - count := size - if count == 0 { - count = blob.CountToEnd + downloadResponse, readErr := blobURL.Download(context.Background(), offset, size, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + if readErr != nil { + return nil, readErr } - downloadResp, err := blobClient.DownloadStream(context.Background(), &blob.DownloadStreamOptions{ - Range: blob.HTTPRange{ - Offset: offset, - Count: count, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to download file %s%s: %w", loc.Bucket, loc.Path, err) - } - defer downloadResp.Body.Close() + // NOTE: automatically retries are performed if the connection fails + bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20}) + defer bodyStream.Close() + + data, err = io.ReadAll(bodyStream) - data, err = io.ReadAll(downloadResp.Body) if err != nil { - return nil, fmt.Errorf("failed to read download stream %s%s: %w", loc.Bucket, loc.Path, err) + return nil, fmt.Errorf("failed to download file %s%s: %v", loc.Bucket, loc.Path, err) } return @@ -199,68 +135,59 @@ func (az *azureRemoteStorageClient) RemoveDirectory(loc *remote_pb.RemoteStorage func (az *azureRemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, entry *filer_pb.Entry, reader io.Reader) (remoteEntry *filer_pb.RemoteEntry, err error) { key := loc.Path[1:] - blobClient := az.client.ServiceClient().NewContainerClient(loc.Bucket).NewBlockBlobClient(key) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) + blobURL := containerURL.NewBlockBlobURL(key) - // Upload from reader - metadata := toMetadata(entry.Extended) - httpHeaders := &blob.HTTPHeaders{} - if entry.Attributes != nil && entry.Attributes.Mime != "" { - httpHeaders.BlobContentType = &entry.Attributes.Mime + readerAt, ok := reader.(io.ReaderAt) + if !ok { + return nil, fmt.Errorf("unexpected reader: readerAt expected") + } + fileSize := int64(filer.FileSize(entry)) + + _, err = uploadReaderAtToBlockBlob(context.Background(), readerAt, fileSize, blobURL, azblob.UploadToBlockBlobOptions{ + BlockSize: 4 * 1024 * 1024, + Parallelism: 16}) + if err != nil { + return nil, fmt.Errorf("azure upload to %s%s: %v", loc.Bucket, loc.Path, err) } - _, err = blobClient.UploadStream(context.Background(), reader, &blockblob.UploadStreamOptions{ - BlockSize: defaultBlockSize, - Concurrency: defaultConcurrency, - HTTPHeaders: httpHeaders, - Metadata: metadata, - }) - if err != nil { - return nil, fmt.Errorf("azure upload to %s%s: %w", loc.Bucket, loc.Path, err) + metadata := toMetadata(entry.Extended) + if len(metadata) > 0 { + _, err = blobURL.SetMetadata(context.Background(), metadata, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return nil, fmt.Errorf("azure set metadata on %s%s: %v", loc.Bucket, loc.Path, err) + } } // read back the remote entry return az.readFileRemoteEntry(loc) + } func (az *azureRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) { key := loc.Path[1:] - blobClient := az.client.ServiceClient().NewContainerClient(loc.Bucket).NewBlockBlobClient(key) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) + blobURL := containerURL.NewBlockBlobURL(key) + + attr, err := blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - props, err := blobClient.GetProperties(context.Background(), nil) if err != nil { return nil, err } - remoteEntry := &filer_pb.RemoteEntry{ + return &filer_pb.RemoteEntry{ + RemoteMtime: attr.LastModified().Unix(), + RemoteSize: attr.ContentLength(), + RemoteETag: string(attr.ETag()), StorageName: az.conf.Name, - } + }, nil - if props.LastModified != nil { - remoteEntry.RemoteMtime = props.LastModified.Unix() - } - if props.ContentLength != nil { - remoteEntry.RemoteSize = *props.ContentLength - } - if props.ETag != nil { - remoteEntry.RemoteETag = string(*props.ETag) - } - - return remoteEntry, nil } -func toMetadata(attributes map[string][]byte) map[string]*string { - metadata := make(map[string]*string) +func toMetadata(attributes map[string][]byte) map[string]string { + metadata := make(map[string]string) for k, v := range attributes { - if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { - // S3 stores metadata keys in lowercase; normalize for consistency. - key := strings.ToLower(k[len(s3_constants.AmzUserMetaPrefix):]) - - // Sanitize key to prevent collisions and ensure Azure compliance - key = sanitizeMetadataKey(key) - - val := string(v) - metadata[key] = &val - } + metadata[k] = string(v) } return metadata } @@ -272,68 +199,54 @@ func (az *azureRemoteStorageClient) UpdateFileMetadata(loc *remote_pb.RemoteStor metadata := toMetadata(newEntry.Extended) key := loc.Path[1:] - blobClient := az.client.ServiceClient().NewContainerClient(loc.Bucket).NewBlobClient(key) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) - _, err = blobClient.SetMetadata(context.Background(), metadata, nil) + _, err = containerURL.NewBlobURL(key).SetMetadata(context.Background(), metadata, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) return } func (az *azureRemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) (err error) { key := loc.Path[1:] - blobClient := az.client.ServiceClient().NewContainerClient(loc.Bucket).NewBlobClient(key) - - _, err = blobClient.Delete(context.Background(), &blob.DeleteOptions{ - DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude), - }) - if err != nil { - // Make delete idempotent - don't return error if blob doesn't exist - if bloberror.HasCode(err, bloberror.BlobNotFound) { - return nil - } - return fmt.Errorf("azure delete %s%s: %w", loc.Bucket, loc.Path, err) + containerURL := az.serviceURL.NewContainerURL(loc.Bucket) + if _, err = containerURL.NewBlobURL(key).Delete(context.Background(), + azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { + return fmt.Errorf("azure delete %s%s: %v", loc.Bucket, loc.Path, err) } return } func (az *azureRemoteStorageClient) ListBuckets() (buckets []*remote_storage.Bucket, err error) { - pager := az.client.NewListContainersPager(nil) - - for pager.More() { - resp, err := pager.NextPage(context.Background()) - if err != nil { + ctx := context.Background() + for containerMarker := (azblob.Marker{}); containerMarker.NotDone(); { + listContainer, err := az.serviceURL.ListContainersSegment(ctx, containerMarker, azblob.ListContainersSegmentOptions{}) + if err == nil { + for _, v := range listContainer.ContainerItems { + buckets = append(buckets, &remote_storage.Bucket{ + Name: v.Name, + CreatedAt: v.Properties.LastModified, + }) + } + } else { return buckets, err } - - for _, containerItem := range resp.ContainerItems { - if containerItem.Name != nil { - bucket := &remote_storage.Bucket{ - Name: *containerItem.Name, - } - if containerItem.Properties != nil && containerItem.Properties.LastModified != nil { - bucket.CreatedAt = *containerItem.Properties.LastModified - } - buckets = append(buckets, bucket) - } - } + containerMarker = listContainer.NextMarker } return } func (az *azureRemoteStorageClient) CreateBucket(name string) (err error) { - containerClient := az.client.ServiceClient().NewContainerClient(name) - _, err = containerClient.Create(context.Background(), nil) - if err != nil { - return fmt.Errorf("create bucket %s: %w", name, err) + containerURL := az.serviceURL.NewContainerURL(name) + if _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessNone); err != nil { + return fmt.Errorf("create bucket %s: %v", name, err) } return } func (az *azureRemoteStorageClient) DeleteBucket(name string) (err error) { - containerClient := az.client.ServiceClient().NewContainerClient(name) - _, err = containerClient.Delete(context.Background(), nil) - if err != nil { - return fmt.Errorf("delete bucket %s: %w", name, err) + containerURL := az.serviceURL.NewContainerURL(name) + if _, err = containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}); err != nil { + return fmt.Errorf("delete bucket %s: %v", name, err) } return } diff --git a/weed/remote_storage/azure/azure_storage_client_test.go b/weed/remote_storage/azure/azure_storage_client_test.go deleted file mode 100644 index acb7dbd17..000000000 --- a/weed/remote_storage/azure/azure_storage_client_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package azure - -import ( - "bytes" - "fmt" - "os" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestAzureStorageClientBasic tests basic Azure storage client operations -func TestAzureStorageClientBasic(t *testing.T) { - // Skip if credentials not available - accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") - accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") - testContainer := os.Getenv("AZURE_TEST_CONTAINER") - - if accountName == "" || accountKey == "" { - t.Skip("Skipping Azure storage test: AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") - } - if testContainer == "" { - testContainer = "seaweedfs-test" - } - - // Create client - maker := azureRemoteStorageMaker{} - conf := &remote_pb.RemoteConf{ - Name: "test-azure", - AzureAccountName: accountName, - AzureAccountKey: accountKey, - } - - client, err := maker.Make(conf) - if err != nil { - t.Fatalf("Failed to create Azure client: %v", err) - } - - azClient := client.(*azureRemoteStorageClient) - - // Test 1: Create bucket/container - t.Run("CreateBucket", func(t *testing.T) { - err := azClient.CreateBucket(testContainer) - // Ignore error if bucket already exists - if err != nil && !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) { - t.Fatalf("Failed to create bucket: %v", err) - } - }) - - // Test 2: List buckets - t.Run("ListBuckets", func(t *testing.T) { - buckets, err := azClient.ListBuckets() - if err != nil { - t.Fatalf("Failed to list buckets: %v", err) - } - if len(buckets) == 0 { - t.Log("No buckets found (might be expected)") - } else { - t.Logf("Found %d buckets", len(buckets)) - } - }) - - // Test 3: Write file - testContent := []byte("Hello from SeaweedFS Azure SDK migration test!") - testKey := fmt.Sprintf("/test-file-%d.txt", time.Now().Unix()) - loc := &remote_pb.RemoteStorageLocation{ - Name: "test-azure", - Bucket: testContainer, - Path: testKey, - } - - t.Run("WriteFile", func(t *testing.T) { - entry := &filer_pb.Entry{ - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Mime: "text/plain", - }, - Extended: map[string][]byte{ - "x-amz-meta-test-key": []byte("test-value"), - }, - } - - reader := bytes.NewReader(testContent) - remoteEntry, err := azClient.WriteFile(loc, entry, reader) - if err != nil { - t.Fatalf("Failed to write file: %v", err) - } - if remoteEntry == nil { - t.Fatal("Remote entry is nil") - } - if remoteEntry.RemoteSize != int64(len(testContent)) { - t.Errorf("Expected size %d, got %d", len(testContent), remoteEntry.RemoteSize) - } - }) - - // Test 4: Read file - t.Run("ReadFile", func(t *testing.T) { - data, err := azClient.ReadFile(loc, 0, int64(len(testContent))) - if err != nil { - t.Fatalf("Failed to read file: %v", err) - } - if !bytes.Equal(data, testContent) { - t.Errorf("Content mismatch. Expected: %s, Got: %s", testContent, data) - } - }) - - // Test 5: Read partial file - t.Run("ReadPartialFile", func(t *testing.T) { - data, err := azClient.ReadFile(loc, 0, 5) - if err != nil { - t.Fatalf("Failed to read partial file: %v", err) - } - expected := testContent[:5] - if !bytes.Equal(data, expected) { - t.Errorf("Content mismatch. Expected: %s, Got: %s", expected, data) - } - }) - - // Test 6: Update metadata - t.Run("UpdateMetadata", func(t *testing.T) { - oldEntry := &filer_pb.Entry{ - Extended: map[string][]byte{ - "x-amz-meta-test-key": []byte("test-value"), - }, - } - newEntry := &filer_pb.Entry{ - Extended: map[string][]byte{ - "x-amz-meta-test-key": []byte("test-value"), - "x-amz-meta-new-key": []byte("new-value"), - }, - } - err := azClient.UpdateFileMetadata(loc, oldEntry, newEntry) - if err != nil { - t.Fatalf("Failed to update metadata: %v", err) - } - }) - - // Test 7: Traverse (list objects) - t.Run("Traverse", func(t *testing.T) { - foundFile := false - err := azClient.Traverse(loc, func(dir string, name string, isDir bool, remoteEntry *filer_pb.RemoteEntry) error { - if !isDir && name == testKey[1:] { // Remove leading slash - foundFile = true - } - return nil - }) - if err != nil { - t.Fatalf("Failed to traverse: %v", err) - } - if !foundFile { - t.Log("Test file not found in traverse (might be expected due to path matching)") - } - }) - - // Test 8: Delete file - t.Run("DeleteFile", func(t *testing.T) { - err := azClient.DeleteFile(loc) - if err != nil { - t.Fatalf("Failed to delete file: %v", err) - } - }) - - // Test 9: Verify file deleted (should fail) - t.Run("VerifyDeleted", func(t *testing.T) { - _, err := azClient.ReadFile(loc, 0, 10) - if !bloberror.HasCode(err, bloberror.BlobNotFound) { - t.Errorf("Expected BlobNotFound error, but got: %v", err) - } - }) - - // Clean up: Try to delete the test container - // Comment out if you want to keep the container - /* - t.Run("DeleteBucket", func(t *testing.T) { - err := azClient.DeleteBucket(testContainer) - if err != nil { - t.Logf("Warning: Failed to delete bucket: %v", err) - } - }) - */ -} - -// TestToMetadata tests the metadata conversion function -func TestToMetadata(t *testing.T) { - tests := []struct { - name string - input map[string][]byte - expected map[string]*string - }{ - { - name: "basic metadata", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "key1": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "key2": []byte("value2"), - }, - expected: map[string]*string{ - "key1": stringPtr("value1"), - "key2": stringPtr("value2"), - }, - }, - { - name: "metadata with dashes", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "content-type": []byte("text/plain"), - }, - expected: map[string]*string{ - "content_2d_type": stringPtr("text/plain"), // dash (0x2d) -> _2d_ - }, - }, - { - name: "non-metadata keys ignored", - input: map[string][]byte{ - "some-other-key": []byte("ignored"), - s3_constants.AmzUserMetaPrefix + "included": []byte("included"), - }, - expected: map[string]*string{ - "included": stringPtr("included"), - }, - }, - { - name: "keys starting with digits", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "123key": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "456-test": []byte("value2"), - s3_constants.AmzUserMetaPrefix + "789": []byte("value3"), - }, - expected: map[string]*string{ - "_123key": stringPtr("value1"), // starts with digit -> prefix _ - "_456_2d_test": stringPtr("value2"), // starts with digit AND has dash - "_789": stringPtr("value3"), - }, - }, - { - name: "uppercase and mixed case keys", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "My-Key": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "UPPERCASE": []byte("value2"), - s3_constants.AmzUserMetaPrefix + "MiXeD-CaSe": []byte("value3"), - }, - expected: map[string]*string{ - "my_2d_key": stringPtr("value1"), // lowercase + dash -> _2d_ - "uppercase": stringPtr("value2"), - "mixed_2d_case": stringPtr("value3"), - }, - }, - { - name: "keys with invalid characters", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "my.key": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "key+plus": []byte("value2"), - s3_constants.AmzUserMetaPrefix + "key@symbol": []byte("value3"), - s3_constants.AmzUserMetaPrefix + "key-with.": []byte("value4"), - s3_constants.AmzUserMetaPrefix + "key/slash": []byte("value5"), - }, - expected: map[string]*string{ - "my_2e_key": stringPtr("value1"), // dot (0x2e) -> _2e_ - "key_2b_plus": stringPtr("value2"), // plus (0x2b) -> _2b_ - "key_40_symbol": stringPtr("value3"), // @ (0x40) -> _40_ - "key_2d_with_2e_": stringPtr("value4"), // dash and dot - "key_2f_slash": stringPtr("value5"), // slash (0x2f) -> _2f_ - }, - }, - { - name: "collision prevention", - input: map[string][]byte{ - s3_constants.AmzUserMetaPrefix + "my-key": []byte("value1"), - s3_constants.AmzUserMetaPrefix + "my.key": []byte("value2"), - s3_constants.AmzUserMetaPrefix + "my_key": []byte("value3"), - }, - expected: map[string]*string{ - "my_2d_key": stringPtr("value1"), // dash (0x2d) - "my_2e_key": stringPtr("value2"), // dot (0x2e) - "my_key": stringPtr("value3"), // underscore is valid, no encoding - }, - }, - { - name: "empty input", - input: map[string][]byte{}, - expected: map[string]*string{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := toMetadata(tt.input) - if len(result) != len(tt.expected) { - t.Errorf("Expected %d keys, got %d", len(tt.expected), len(result)) - } - for key, expectedVal := range tt.expected { - if resultVal, ok := result[key]; !ok { - t.Errorf("Expected key %s not found", key) - } else if resultVal == nil || expectedVal == nil { - if resultVal != expectedVal { - t.Errorf("For key %s: expected %v, got %v", key, expectedVal, resultVal) - } - } else if *resultVal != *expectedVal { - t.Errorf("For key %s: expected %s, got %s", key, *expectedVal, *resultVal) - } - } - }) - } -} - -func contains(s, substr string) bool { - return bytes.Contains([]byte(s), []byte(substr)) -} - -func stringPtr(s string) *string { - return &s -} - -// Benchmark tests -func BenchmarkToMetadata(b *testing.B) { - input := map[string][]byte{ - "x-amz-meta-key1": []byte("value1"), - "x-amz-meta-key2": []byte("value2"), - "x-amz-meta-content-type": []byte("text/plain"), - "other-key": []byte("ignored"), - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - toMetadata(input) - } -} - -// Test that the maker implements the interface -func TestAzureRemoteStorageMaker(t *testing.T) { - maker := azureRemoteStorageMaker{} - - if !maker.HasBucket() { - t.Error("Expected HasBucket() to return true") - } - - // Test with missing credentials - conf := &remote_pb.RemoteConf{ - Name: "test", - } - _, err := maker.Make(conf) - if err == nil { - t.Error("Expected error with missing credentials") - } -} - -// Test error cases -func TestAzureStorageClientErrors(t *testing.T) { - // Test with invalid credentials - maker := azureRemoteStorageMaker{} - conf := &remote_pb.RemoteConf{ - Name: "test", - AzureAccountName: "invalid", - AzureAccountKey: "aW52YWxpZGtleQ==", // base64 encoded "invalidkey" - } - - client, err := maker.Make(conf) - if err != nil { - t.Skip("Invalid credentials correctly rejected at client creation") - } - - // If client creation succeeded, operations should fail - azClient := client.(*azureRemoteStorageClient) - loc := &remote_pb.RemoteStorageLocation{ - Name: "test", - Bucket: "nonexistent", - Path: "/test.txt", - } - - // These operations should fail with invalid credentials - _, err = azClient.ReadFile(loc, 0, 10) - if err == nil { - t.Log("Expected error with invalid credentials on ReadFile, but got none (might be cached)") - } -} diff --git a/weed/remote_storage/gcs/gcs_storage_client.go b/weed/remote_storage/gcs/gcs_storage_client.go index 8e8a97a1c..788d4b1e0 100644 --- a/weed/remote_storage/gcs/gcs_storage_client.go +++ b/weed/remote_storage/gcs/gcs_storage_client.go @@ -6,14 +6,13 @@ import ( "io" "os" "reflect" - "strings" "cloud.google.com/go/storage" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/api/iterator" "google.golang.org/api/option" ) @@ -56,7 +55,7 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage. c, err := storage.NewClient(context.Background(), option.WithCredentialsFile(googleApplicationCredentials)) if err != nil { - return nil, fmt.Errorf("failed to create client: %w", err) + return nil, fmt.Errorf("failed to create client: %v", err) } client.client = c @@ -167,9 +166,6 @@ func (gcs *gcsRemoteStorageClient) readFileRemoteEntry(loc *remote_pb.RemoteStor func toMetadata(attributes map[string][]byte) map[string]string { metadata := make(map[string]string) for k, v := range attributes { - if strings.HasPrefix(k, "X-") { - continue - } metadata[k] = string(v) } return metadata diff --git a/weed/remote_storage/remote_storage.go b/weed/remote_storage/remote_storage.go index 93b0b9d84..e4a027199 100644 --- a/weed/remote_storage/remote_storage.go +++ b/weed/remote_storage/remote_storage.go @@ -2,9 +2,9 @@ package remote_storage import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/golang/protobuf/proto" "io" "sort" "strings" diff --git a/weed/remote_storage/s3/aliyun.go b/weed/remote_storage/s3/aliyun.go index 19a0ef4ae..d6923aa6b 100644 --- a/weed/remote_storage/s3/aliyun.go +++ b/weed/remote_storage/s3/aliyun.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" "os" ) @@ -42,7 +42,7 @@ func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create aliyun session: %w", err) + return nil, fmt.Errorf("create aliyun session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/s3/backblaze.go b/weed/remote_storage/s3/backblaze.go index 0f42730c9..09a033f8c 100644 --- a/weed/remote_storage/s3/backblaze.go +++ b/weed/remote_storage/s3/backblaze.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" ) func init() { @@ -27,7 +27,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_st } config := &aws.Config{ Endpoint: aws.String(conf.BackblazeEndpoint), - Region: aws.String(conf.BackblazeRegion), + Region: aws.String("us-west-002"), S3ForcePathStyle: aws.Bool(true), S3DisableContentMD5Validation: aws.Bool(true), } @@ -37,7 +37,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_st sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create backblaze session: %w", err) + return nil, fmt.Errorf("create backblaze session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/s3/baidu.go b/weed/remote_storage/s3/baidu.go index 5c175e74b..23bce409e 100644 --- a/weed/remote_storage/s3/baidu.go +++ b/weed/remote_storage/s3/baidu.go @@ -2,16 +2,14 @@ package s3 import ( "fmt" - "os" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" + "os" ) func init() { @@ -35,7 +33,7 @@ func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag config := &aws.Config{ Endpoint: aws.String(conf.BaiduEndpoint), Region: aws.String(conf.BaiduRegion), - S3ForcePathStyle: aws.Bool(false), + S3ForcePathStyle: aws.Bool(true), S3DisableContentMD5Validation: aws.Bool(true), } if accessKey != "" && secretKey != "" { @@ -44,9 +42,8 @@ func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create baidu session: %w", err) + return nil, fmt.Errorf("create baidu session: %v", err) } - sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) return client, nil diff --git a/weed/remote_storage/s3/contabo.go b/weed/remote_storage/s3/contabo.go index 8147350e9..2e85422dc 100644 --- a/weed/remote_storage/s3/contabo.go +++ b/weed/remote_storage/s3/contabo.go @@ -8,9 +8,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -43,7 +43,7 @@ func (s ContaboRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create contabo session: %w", err) + return nil, fmt.Errorf("create contabo session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/s3/filebase.go b/weed/remote_storage/s3/filebase.go index ed8cfb409..23787e0e2 100644 --- a/weed/remote_storage/s3/filebase.go +++ b/weed/remote_storage/s3/filebase.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws/session" v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" "os" ) @@ -43,7 +43,7 @@ func (s FilebaseRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_sto sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create filebase session: %w", err) + return nil, fmt.Errorf("create filebase session: %v", err) } sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) sess.Handlers.Build.PushFront(skipSha256PayloadSigning) diff --git a/weed/remote_storage/s3/s3_storage_client.go b/weed/remote_storage/s3/s3_storage_client.go index 280a856b0..b3c3cb3aa 100644 --- a/weed/remote_storage/s3/s3_storage_client.go +++ b/weed/remote_storage/s3/s3_storage_client.go @@ -2,10 +2,6 @@ package s3 import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/version" - "io" - "reflect" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" @@ -14,11 +10,13 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "reflect" ) func init() { @@ -48,13 +46,13 @@ func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.R sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create aws session: %w", err) + return nil, fmt.Errorf("create aws session: %v", err) } if conf.S3V4Signature { sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) } sess.Handlers.Build.PushBack(func(r *request.Request) { - r.HTTPRequest.Header.Set("User-Agent", "SeaweedFS/"+version.VERSION_NUMBER) + r.HTTPRequest.Header.Set("User-Agent", "SeaweedFS/"+util.VERSION_NUMBER) }) sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) @@ -108,10 +106,10 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation return true }) if listErr != nil { - err = fmt.Errorf("list %v: %w", remote, listErr) + err = fmt.Errorf("list %v: %v", remote, listErr) } if localErr != nil { - err = fmt.Errorf("process %v: %w", remote, localErr) + err = fmt.Errorf("process %v: %v", remote, localErr) } } return @@ -162,16 +160,13 @@ func (s *s3RemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, // process tagging tags := "" - var awsTags *string - // openstack swift doesn't support s3 object tagging - if s.conf.S3SupportTagging { + if s.supportTagging { for k, v := range entry.Extended { if len(tags) > 0 { tags = tags + "&" } tags = tags + k + "=" + string(v) } - awsTags = aws.String(tags) } // Upload the file to S3. @@ -179,7 +174,7 @@ func (s *s3RemoteStorageClient) WriteFile(loc *remote_pb.RemoteStorageLocation, Bucket: aws.String(loc.Bucket), Key: aws.String(loc.Path[1:]), Body: reader, - Tagging: awsTags, + Tagging: aws.String(tags), StorageClass: aws.String(s.conf.S3StorageClass), }) @@ -252,7 +247,7 @@ func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation) func (s *s3RemoteStorageClient) ListBuckets() (buckets []*remote_storage.Bucket, err error) { resp, err := s.conn.ListBuckets(&s3.ListBucketsInput{}) if err != nil { - return nil, fmt.Errorf("list buckets: %w", err) + return nil, fmt.Errorf("list buckets: %v", err) } for _, b := range resp.Buckets { buckets = append(buckets, &remote_storage.Bucket{ diff --git a/weed/remote_storage/s3/storj.go b/weed/remote_storage/s3/storj.go index dd2fead56..2de7ad357 100644 --- a/weed/remote_storage/s3/storj.go +++ b/weed/remote_storage/s3/storj.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" "os" ) @@ -42,7 +42,7 @@ func (s StorjRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create storj session: %w", err) + return nil, fmt.Errorf("create storj session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/s3/tencent.go b/weed/remote_storage/s3/tencent.go index d010b03b9..ab027a1f4 100644 --- a/weed/remote_storage/s3/tencent.go +++ b/weed/remote_storage/s3/tencent.go @@ -6,9 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" "os" ) @@ -42,7 +42,7 @@ func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create tencent session: %w", err) + return nil, fmt.Errorf("create tencent session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/s3/wasabi.go b/weed/remote_storage/s3/wasabi.go index 8d330a29f..64dd0bbf2 100644 --- a/weed/remote_storage/s3/wasabi.go +++ b/weed/remote_storage/s3/wasabi.go @@ -7,9 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/remote_storage" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/remote_storage" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -42,7 +42,7 @@ func (s WasabiRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora sess, err := session.NewSession(config) if err != nil { - return nil, fmt.Errorf("create wasabi session: %w", err) + return nil, fmt.Errorf("create wasabi session: %v", err) } sess.Handlers.Build.PushFront(skipSha256PayloadSigning) client.conn = s3.New(sess) diff --git a/weed/remote_storage/track_sync_offset.go b/weed/remote_storage/track_sync_offset.go index 38cb7bd24..25ac0d340 100644 --- a/weed/remote_storage/track_sync_offset.go +++ b/weed/remote_storage/track_sync_offset.go @@ -3,9 +3,9 @@ package remote_storage import ( "context" "errors" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" ) @@ -17,7 +17,7 @@ func GetSyncOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, dir s dirHash := uint32(util.HashStringToLong(dir)) - readErr = pb.WithFilerClient(false, 0, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + readErr = pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { syncKey := []byte(SyncKeyPrefix + "____") util.Uint32toBytes(syncKey[len(SyncKeyPrefix):len(SyncKeyPrefix)+4], dirHash) @@ -46,7 +46,7 @@ func SetSyncOffset(grpcDialOption grpc.DialOption, filer pb.ServerAddress, dir s dirHash := uint32(util.HashStringToLong(dir)) - return pb.WithFilerClient(false, 0, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { syncKey := []byte(SyncKeyPrefix + "____") util.Uint32toBytes(syncKey[len(SyncKeyPrefix):len(SyncKeyPrefix)+4], dirHash) diff --git a/weed/remote_storage/traverse_bfs.go b/weed/remote_storage/traverse_bfs.go new file mode 100644 index 000000000..a73a1d5fd --- /dev/null +++ b/weed/remote_storage/traverse_bfs.go @@ -0,0 +1,62 @@ +package remote_storage + +import ( + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "sync" + "time" +) + +type ListDirectoryFunc func(parentDir util.FullPath, visitFn VisitFunc) error + +func TraverseBfs(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc) (err error) { + K := 5 + + var dirQueueWg sync.WaitGroup + dirQueue := util.NewQueue() + dirQueueWg.Add(1) + dirQueue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := dirQueue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(util.FullPath) + processErr := processOneDirectory(listDirFn, dir, visitFn, dirQueue, &dirQueueWg) + if processErr != nil { + err = processErr + } + dirQueueWg.Done() + } + }() + } + + dirQueueWg.Wait() + isTerminating = true + return + +} + +func processOneDirectory(listDirFn ListDirectoryFunc, parentPath util.FullPath, visitFn VisitFunc, dirQueue *util.Queue, dirQueueWg *sync.WaitGroup) error { + + return listDirFn(parentPath, func(dir string, name string, isDirectory bool, remoteEntry *filer_pb.RemoteEntry) error { + if err := visitFn(dir, name, isDirectory, remoteEntry); err != nil { + return err + } + if !isDirectory { + return nil + } + dirQueueWg.Add(1) + dirQueue.Enqueue(parentPath.Child(name)) + return nil + }) + +} diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go index 57c206e3e..f135e6210 100644 --- a/weed/replication/repl_util/replication_util.go +++ b/weed/replication/repl_util/replication_util.go @@ -1,19 +1,17 @@ package repl_util import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) -func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerSource *source.FilerSource, writeFunc func(data []byte) error) error { +func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error { - for x := chunkViews.Front(); x != nil; x = x.Next { - chunk := x.Value + for _, chunk := range chunkViews { - fileUrls, err := filerSource.LookupFileId(context.Background(), chunk.FileId) + fileUrls, err := filerSource.LookupFileId(chunk.FileId) if err != nil { return err } @@ -22,7 +20,7 @@ func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerS var shouldRetry bool for _, fileUrl := range fileUrls { - shouldRetry, err = util_http.ReadUrlAsStream(context.Background(), fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.OffsetInChunk, int(chunk.ViewSize), func(data []byte) { + shouldRetry, err = util.ReadUrlAsStream(fileUrl, chunk.CipherKey, chunk.IsGzipped, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { writeErr = writeFunc(data) }) if err != nil { diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 654725725..eaab2c13e 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -3,22 +3,21 @@ package replication import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb" "google.golang.org/grpc" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type Replicator struct { - sink sink.ReplicationSink - source *source.FilerSource - excludeDirs []string + sink sink.ReplicationSink + source *source.FilerSource } func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { @@ -29,9 +28,8 @@ func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSin dataSink.SetSourceFiler(source) return &Replicator{ - sink: dataSink, - source: source, - excludeDirs: sourceConfig.GetStringSlice(configPrefix + "excludeDirectories"), + sink: dataSink, + source: source, } } @@ -43,13 +41,6 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - for _, excludeDir := range r.excludeDirs { - if strings.HasPrefix(key, excludeDir) { - glog.V(4).Infof("skipping %v of exclude dir %v", key, excludeDir) - return nil - } - } - var dateKey string if r.sink.IsIncremental() { var mTime int64 @@ -84,7 +75,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures) if err != nil { - return fmt.Errorf("delete old entry %v: %w", key, err) + return fmt.Errorf("delete old entry %v: %v", key, err) } glog.V(4).Infof("creating missing %v", key) @@ -92,7 +83,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p } func ReadFilerSignature(grpcDialOption grpc.DialOption, filer pb.ServerAddress) (filerSignature int32, readErr error) { - if readErr = pb.WithFilerClient(false, 0, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if readErr = pb.WithFilerClient(false, filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { if resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}); err != nil { return fmt.Errorf("GetFilerConfiguration %s: %v", filer, err) } else { diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index b0e40e1a7..3097ef439 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -3,31 +3,22 @@ package azuresink import ( "bytes" "context" - "errors" "fmt" - "net/http" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" + "net/url" "strings" - "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/repl_util" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type AzureSink struct { - client *azblob.Client + containerURL azblob.ContainerURL container string dir string filerSource *source.FilerSource @@ -68,28 +59,20 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e g.container = container g.dir = dir - // Create credential and client + // Use your Storage account's name and key to create a credential object. credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) if err != nil { - return fmt.Errorf("failed to create Azure credential with account name:%s: %w", accountName, err) + glog.Fatalf("failed to create Azure credential with account name:%s: %v", accountName, err) } - serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - client, err := azblob.NewClientWithSharedKeyCredential(serviceURL, credential, &azblob.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - Retry: policy.RetryOptions{ - MaxRetries: 10, // Increased from default 3 for replication sink resiliency - TryTimeout: time.Minute, - RetryDelay: 2 * time.Second, - MaxRetryDelay: time.Minute, - }, - }, - }) - if err != nil { - return fmt.Errorf("failed to create Azure client: %w", err) - } + // Create a request pipeline that is used to process HTTP(S) requests and responses. + p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - g.client = client + // Create an ServiceURL object that wraps the service URL and a request pipeline. + u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName)) + serviceURL := azblob.NewServiceURL(*u, p) + + g.containerURL = serviceURL.NewContainerURL(g.container) return nil } @@ -102,19 +85,13 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo key = key + "/" } - blobClient := g.client.ServiceClient().NewContainerClient(g.container).NewBlobClient(key) - _, err := blobClient.Delete(context.Background(), &blob.DeleteOptions{ - DeleteSnapshots: to.Ptr(blob.DeleteSnapshotsOptionTypeInclude), - }) - if err != nil { - // Make delete idempotent - don't return error if blob doesn't exist - if bloberror.HasCode(err, bloberror.BlobNotFound) { - return nil - } - return fmt.Errorf("azure delete %s/%s: %w", g.container, key, err) + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), + azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { + return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } return nil + } func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { @@ -126,52 +103,28 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [] } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(context.Background(), g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - // Create append blob client - appendBlobClient := g.client.ServiceClient().NewContainerClient(g.container).NewAppendBlobClient(key) - - // Create blob with access conditions - accessConditions := &blob.AccessConditions{} - if entry.Attributes != nil && entry.Attributes.Mtime > 0 { - modifiedTime := time.Unix(entry.Attributes.Mtime, 0) - accessConditions.ModifiedAccessConditions = &blob.ModifiedAccessConditions{ - IfUnmodifiedSince: &modifiedTime, - } - } - - _, err := appendBlobClient.Create(context.Background(), &appendblob.CreateOptions{ - AccessConditions: accessConditions, - }) + // Create a URL that references a to-be-created blob in your + // Azure Storage account's container. + appendBlobURL := g.containerURL.NewAppendBlobURL(key) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.BlobTagsMap{}, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) if err != nil { - if bloberror.HasCode(err, bloberror.BlobAlreadyExists) { - // Blob already exists, which is fine for an append blob - we can append to it - } else { - // Check if this is a precondition failed error (HTTP 412) - var respErr *azcore.ResponseError - if ok := errors.As(err, &respErr); ok && respErr.StatusCode == http.StatusPreconditionFailed { - glog.V(0).Infof("skip overwriting %s/%s: precondition failed", g.container, key) - return nil - } - return fmt.Errorf("azure create append blob %s/%s: %w", g.container, key, err) - } + return err } writeFunc := func(data []byte) error { - _, writeErr := appendBlobClient.AppendBlock(context.Background(), streaming.NopCloser(bytes.NewReader(data)), &appendblob.AppendBlockOptions{}) + _, writeErr := appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}) return writeErr } - if len(entry.Content) > 0 { - return writeFunc(entry.Content) - } - if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { return err } return nil + } func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { diff --git a/weed/replication/sink/azuresink/azure_sink_test.go b/weed/replication/sink/azuresink/azure_sink_test.go deleted file mode 100644 index e139086e6..000000000 --- a/weed/replication/sink/azuresink/azure_sink_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package azuresink - -import ( - "os" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -// MockConfiguration for testing -type mockConfiguration struct { - values map[string]interface{} -} - -func newMockConfiguration() *mockConfiguration { - return &mockConfiguration{ - values: make(map[string]interface{}), - } -} - -func (m *mockConfiguration) GetString(key string) string { - if v, ok := m.values[key]; ok { - return v.(string) - } - return "" -} - -func (m *mockConfiguration) GetBool(key string) bool { - if v, ok := m.values[key]; ok { - return v.(bool) - } - return false -} - -func (m *mockConfiguration) GetInt(key string) int { - if v, ok := m.values[key]; ok { - return v.(int) - } - return 0 -} - -func (m *mockConfiguration) GetInt64(key string) int64 { - if v, ok := m.values[key]; ok { - return v.(int64) - } - return 0 -} - -func (m *mockConfiguration) GetFloat64(key string) float64 { - if v, ok := m.values[key]; ok { - return v.(float64) - } - return 0.0 -} - -func (m *mockConfiguration) GetStringSlice(key string) []string { - if v, ok := m.values[key]; ok { - return v.([]string) - } - return nil -} - -func (m *mockConfiguration) SetDefault(key string, value interface{}) { - if _, exists := m.values[key]; !exists { - m.values[key] = value - } -} - -// Test the AzureSink interface implementation -func TestAzureSinkInterface(t *testing.T) { - sink := &AzureSink{} - - if sink.GetName() != "azure" { - t.Errorf("Expected name 'azure', got '%s'", sink.GetName()) - } - - // Test directory setting - sink.dir = "/test/dir" - if sink.GetSinkToDirectory() != "/test/dir" { - t.Errorf("Expected directory '/test/dir', got '%s'", sink.GetSinkToDirectory()) - } - - // Test incremental setting - sink.isIncremental = true - if !sink.IsIncremental() { - t.Error("Expected isIncremental to be true") - } -} - -// Test Azure sink initialization -func TestAzureSinkInitialization(t *testing.T) { - accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") - accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") - testContainer := os.Getenv("AZURE_TEST_CONTAINER") - - if accountName == "" || accountKey == "" { - t.Skip("Skipping Azure sink test: AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") - } - if testContainer == "" { - testContainer = "seaweedfs-test" - } - - sink := &AzureSink{} - - err := sink.initialize(accountName, accountKey, testContainer, "/test") - if err != nil { - t.Fatalf("Failed to initialize Azure sink: %v", err) - } - - if sink.container != testContainer { - t.Errorf("Expected container '%s', got '%s'", testContainer, sink.container) - } - - if sink.dir != "/test" { - t.Errorf("Expected dir '/test', got '%s'", sink.dir) - } - - if sink.client == nil { - t.Error("Expected client to be initialized") - } -} - -// Test configuration-based initialization -func TestAzureSinkInitializeFromConfig(t *testing.T) { - accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") - accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") - testContainer := os.Getenv("AZURE_TEST_CONTAINER") - - if accountName == "" || accountKey == "" { - t.Skip("Skipping Azure sink config test: AZURE_STORAGE_ACCOUNT or AZURE_STORAGE_ACCESS_KEY not set") - } - if testContainer == "" { - testContainer = "seaweedfs-test" - } - - config := newMockConfiguration() - config.values["azure.account_name"] = accountName - config.values["azure.account_key"] = accountKey - config.values["azure.container"] = testContainer - config.values["azure.directory"] = "/test" - config.values["azure.is_incremental"] = true - - sink := &AzureSink{} - err := sink.Initialize(config, "azure.") - if err != nil { - t.Fatalf("Failed to initialize from config: %v", err) - } - - if !sink.IsIncremental() { - t.Error("Expected incremental to be true") - } -} - -// Test cleanKey function -func TestCleanKey(t *testing.T) { - tests := []struct { - input string - expected string - }{ - {"/test/file.txt", "test/file.txt"}, - {"test/file.txt", "test/file.txt"}, - {"/", ""}, - {"", ""}, - {"/a/b/c", "a/b/c"}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result := cleanKey(tt.input) - if result != tt.expected { - t.Errorf("cleanKey(%q) = %q, want %q", tt.input, result, tt.expected) - } - }) - } -} - -// Test entry operations (requires valid credentials) -func TestAzureSinkEntryOperations(t *testing.T) { - accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") - accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") - testContainer := os.Getenv("AZURE_TEST_CONTAINER") - - if accountName == "" || accountKey == "" { - t.Skip("Skipping Azure sink entry test: credentials not set") - } - if testContainer == "" { - testContainer = "seaweedfs-test" - } - - sink := &AzureSink{} - err := sink.initialize(accountName, accountKey, testContainer, "/test") - if err != nil { - t.Fatalf("Failed to initialize: %v", err) - } - - // Test CreateEntry with directory (should be no-op) - t.Run("CreateDirectory", func(t *testing.T) { - entry := &filer_pb.Entry{ - IsDirectory: true, - } - err := sink.CreateEntry("/test/dir", entry, nil) - if err != nil { - t.Errorf("CreateEntry for directory should not error: %v", err) - } - }) - - // Test CreateEntry with file - testKey := "/test-sink-file-" + time.Now().Format("20060102-150405") + ".txt" - t.Run("CreateFile", func(t *testing.T) { - entry := &filer_pb.Entry{ - IsDirectory: false, - Content: []byte("Test content for Azure sink"), - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - }, - } - err := sink.CreateEntry(testKey, entry, nil) - if err != nil { - t.Fatalf("Failed to create entry: %v", err) - } - }) - - // Test UpdateEntry - t.Run("UpdateEntry", func(t *testing.T) { - oldEntry := &filer_pb.Entry{ - Content: []byte("Old content"), - } - newEntry := &filer_pb.Entry{ - Content: []byte("New content for update test"), - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - }, - } - found, err := sink.UpdateEntry(testKey, oldEntry, "/test", newEntry, false, nil) - if err != nil { - t.Fatalf("Failed to update entry: %v", err) - } - if !found { - t.Error("Expected found to be true") - } - }) - - // Test DeleteEntry - t.Run("DeleteFile", func(t *testing.T) { - err := sink.DeleteEntry(testKey, false, false, nil) - if err != nil { - t.Fatalf("Failed to delete entry: %v", err) - } - }) - - // Test DeleteEntry with directory marker - testDirKey := "/test-dir-" + time.Now().Format("20060102-150405") - t.Run("DeleteDirectory", func(t *testing.T) { - // First create a directory marker - entry := &filer_pb.Entry{ - IsDirectory: false, - Content: []byte(""), - } - err := sink.CreateEntry(testDirKey+"/", entry, nil) - if err != nil { - t.Logf("Warning: Failed to create directory marker: %v", err) - } - - // Then delete it - err = sink.DeleteEntry(testDirKey, true, false, nil) - if err != nil { - t.Logf("Warning: Failed to delete directory: %v", err) - } - }) -} - -// Test CreateEntry with precondition (IfUnmodifiedSince) -func TestAzureSinkPrecondition(t *testing.T) { - accountName := os.Getenv("AZURE_STORAGE_ACCOUNT") - accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") - testContainer := os.Getenv("AZURE_TEST_CONTAINER") - - if accountName == "" || accountKey == "" { - t.Skip("Skipping Azure sink precondition test: credentials not set") - } - if testContainer == "" { - testContainer = "seaweedfs-test" - } - - sink := &AzureSink{} - err := sink.initialize(accountName, accountKey, testContainer, "/test") - if err != nil { - t.Fatalf("Failed to initialize: %v", err) - } - - testKey := "/test-precondition-" + time.Now().Format("20060102-150405") + ".txt" - - // Create initial entry - entry := &filer_pb.Entry{ - Content: []byte("Initial content"), - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - }, - } - err = sink.CreateEntry(testKey, entry, nil) - if err != nil { - t.Fatalf("Failed to create initial entry: %v", err) - } - - // Try to create again with old mtime (should be skipped due to precondition) - oldEntry := &filer_pb.Entry{ - Content: []byte("Should not overwrite"), - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Add(-1 * time.Hour).Unix(), // Old timestamp - }, - } - err = sink.CreateEntry(testKey, oldEntry, nil) - // Should either succeed (skip) or fail with precondition error - if err != nil { - t.Logf("Create with old mtime: %v (expected)", err) - } - - // Clean up - sink.DeleteEntry(testKey, false, false, nil) -} - -// Benchmark tests -func BenchmarkCleanKey(b *testing.B) { - keys := []string{ - "/simple/path.txt", - "no/leading/slash.txt", - "/", - "/complex/path/with/many/segments/file.txt", - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - cleanKey(keys[i%len(keys)]) - } -} - -// Test error handling with invalid credentials -func TestAzureSinkInvalidCredentials(t *testing.T) { - sink := &AzureSink{} - - err := sink.initialize("invalid-account", "aW52YWxpZGtleQ==", "test-container", "/test") - if err != nil { - t.Skip("Invalid credentials correctly rejected at initialization") - } - - // If initialization succeeded, operations should fail - entry := &filer_pb.Entry{ - Content: []byte("test"), - } - err = sink.CreateEntry("/test.txt", entry, nil) - if err == nil { - t.Log("Expected error with invalid credentials, but got none (might be cached)") - } -} diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 90f77f441..4143c039d 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -2,15 +2,15 @@ package B2Sink import ( "context" - "github.com/seaweedfs/seaweedfs/weed/replication/repl_util" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "strings" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/kurin/blazer/b2" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" ) type B2Sink struct { @@ -79,14 +79,7 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, targetObject := bucket.Object(key) - err = targetObject.Delete(context.Background()) - if err != nil { - // b2_download_file_by_name: 404: File with such name does not exist. - if strings.Contains(err.Error(), ": 404:") { - return nil - } - } - return err + return targetObject.Delete(context.Background()) } @@ -99,7 +92,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(context.Background(), g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { @@ -108,16 +101,13 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int targetObject := bucket.Object(key) writer := targetObject.NewWriter(context.Background()) - defer writer.Close() writeFunc := func(data []byte) error { _, writeErr := writer.Write(data) return writeErr } - if len(entry.Content) > 0 { - return writeFunc(entry.Content) - } + defer writer.Close() if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { return err diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 1f257941f..825d2af95 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -1,20 +1,18 @@ package filersink import ( + "context" "fmt" - "github.com/schollz/progressbar/v3" - "github.com/seaweedfs/seaweedfs/weed/util" - "os" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/util" "sync" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" ) func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) { @@ -22,42 +20,20 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path st return } - // a simple progress bar. Not ideal. Fix me. - var bar *progressbar.ProgressBar - if len(sourceChunks) > 1 { - name := filepath.Base(path) - bar = progressbar.NewOptions64(int64(len(sourceChunks)), - progressbar.OptionClearOnFinish(), - progressbar.OptionOnCompletion(func() { - fmt.Fprint(os.Stderr, "\n") - }), - progressbar.OptionFullWidth(), - progressbar.OptionSetDescription(name), - ) - } - replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) var wg sync.WaitGroup for chunkIndex, sourceChunk := range sourceChunks { wg.Add(1) - index, source := chunkIndex, sourceChunk - fs.executor.Execute(func() { + go func(chunk *filer_pb.FileChunk, index int) { defer wg.Done() - util.Retry("replicate chunks", func() error { - replicatedChunk, e := fs.replicateOneChunk(source, path) - if e != nil { - err = e - return e - } - replicatedChunks[index] = replicatedChunk - if bar != nil { - bar.Add(1) - } - err = nil - return nil - }) - }) + replicatedChunk, e := fs.replicateOneChunk(chunk, path) + if e != nil { + err = e + return + } + replicatedChunks[index] = replicatedChunk + }(sourceChunk, chunkIndex) } wg.Wait() @@ -75,7 +51,7 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path str FileId: fileId, Offset: sourceChunk.Offset, Size: sourceChunk.Size, - ModifiedTsNs: sourceChunk.ModifiedTsNs, + Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), CipherKey: sourceChunk.CipherKey, @@ -89,49 +65,64 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } - defer util_http.CloseResponse(resp) + defer util.CloseResponse(resp) - uploader, err := operation.NewUploader() - if err != nil { - glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) - return "", fmt.Errorf("upload data: %w", err) + var host string + var auth security.EncodedJwt + + if err := fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return util.Retry("assignVolume", func() error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: fs.replication, + Collection: fs.collection, + TtlSec: fs.ttlSec, + DataCenter: fs.dataCenter, + DiskType: fs.diskType, + Path: path, + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, host, auth = resp.FileId, resp.Location.Url, security.EncodedJwt(resp.Auth) + + return nil + }) + }); err != nil { + return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err) } - fileId, uploadResult, err, _ := uploader.UploadWithRetry( - fs, - &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: fs.replication, - Collection: fs.collection, - TtlSec: fs.ttlSec, - DataCenter: fs.dataCenter, - DiskType: fs.diskType, - Path: path, - }, - &operation.UploadOption{ - Filename: filename, - Cipher: false, - IsInputCompressed: "gzip" == header.Get("Content-Encoding"), - MimeType: header.Get("Content-Type"), - PairMap: nil, - }, - func(host, fileId string) string { - fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - if fs.writeChunkByFiler { - fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId) - } - glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - return fileUrl - }, - resp.Body, - ) + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if fs.writeChunkByFiler { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId) + } + glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) + + // fetch data as is, regardless whether it is encrypted or not + uploadOption := &operation.UploadOption{ + UploadUrl: fileUrl, + Filename: filename, + Cipher: false, + IsInputCompressed: "gzip" == header.Get("Content-Encoding"), + MimeType: header.Get("Content-Type"), + PairMap: nil, + Jwt: auth, + } + uploadResult, err, _ := operation.Upload(resp.Body, uploadOption) if err != nil { - glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) - return "", fmt.Errorf("upload data: %w", err) + glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err) + return "", fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v: %v", filename, err) + glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload result: %v", uploadResult.Error) } @@ -142,17 +133,12 @@ var _ = filer_pb.FilerClient(&FilerSink{}) func (fs *FilerSink) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithGrpcClient(streamingMode, fs.signature, func(grpcConnection *grpc.ClientConn) error { + return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, fs.grpcAddress, false, fs.grpcDialOption) + }, fs.grpcAddress, fs.grpcDialOption) } - func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string { return location.Url } - -func (fs *FilerSink) GetDataCenter() string { - return fs.dataCenter -} diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 8b4b0e513..9471409fc 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,20 +3,20 @@ package filersink import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/wdclient" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" "math" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type FilerSink struct { @@ -32,8 +32,6 @@ type FilerSink struct { address string writeChunkByFiler bool isIncremental bool - executor *util.LimitedConcurrentExecutor - signature int32 } func init() { @@ -54,8 +52,6 @@ func (fs *FilerSink) IsIncremental() bool { func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { fs.isIncremental = configuration.GetBool(prefix + "is_incremental") - fs.dataCenter = configuration.GetString(prefix + "dataCenter") - fs.signature = util.RandomInt32() return fs.DoInitialize( "", configuration.GetString(prefix+"grpcAddress"), @@ -86,7 +82,6 @@ func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string, fs.diskType = diskType fs.grpcDialOption = grpcDialOption fs.writeChunkByFiler = writeChunkByFiler - fs.executor = util.NewLimitedConcurrentExecutor(32) return nil } @@ -95,7 +90,7 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo dir, name := util.FullPath(key).DirAndName() glog.V(4).Infof("delete entry: %v", key) - err := filer_pb.Remove(context.Background(), fs, dir, name, deleteIncludeChunks, true, true, true, signatures) + err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures) if err != nil { glog.V(0).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) @@ -114,27 +109,22 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ Directory: dir, Name: name, } - // glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := filer_pb.LookupEntry(context.Background(), client, lookupRequest); err == nil { + glog.V(1).Infof("lookup: %v", lookupRequest) + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if filer.ETag(resp.Entry) == filer.ETag(entry) { glog.V(3).Infof("already replicated %s", key) return nil } - if resp.Entry.Attributes != nil && resp.Entry.Attributes.Mtime >= entry.Attributes.Mtime { - glog.V(3).Infof("skip overwriting %s", key) - return nil - } } - replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, key) if err != nil { // only warning here since the source chunk may have been deleted already glog.Warningf("replicate entry chunks %s: %v", key, err) - return nil } - // glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks) + glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -142,7 +132,6 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ Name: name, IsDirectory: entry.IsDirectory, Attributes: entry.Attributes, - Extended: entry.Extended, Chunks: replicatedChunks, Content: entry.Content, RemoteEntry: entry.RemoteEntry, @@ -152,7 +141,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ } glog.V(3).Infof("create: %v", request) - if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -175,7 +164,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent } glog.V(4).Infof("lookup entry: %v", request) - resp, err := filer_pb.LookupEntry(context.Background(), client, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -196,26 +185,29 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // skip if already changed // this usually happens when the messages are not ordered glog.V(2).Infof("late updates %s", key) + } else if filer.ETag(newEntry) == filer.ETag(existingEntry) { + // skip if no change + // this usually happens when retrying the replication + glog.V(3).Infof("already replicated %s", key) } else { // find out what changed - deletedChunks, newChunks, err := compareChunks(context.Background(), filer.LookupFn(fs), oldEntry, newEntry) + deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) if err != nil { - return true, fmt.Errorf("replicate %s compare chunks error: %v", key, err) + return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err) } // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks) + existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source replicatedChunks, err := fs.replicateChunks(newChunks, key) if err != nil { - glog.Warningf("replicate entry chunks %s: %v", key, err) - return true, nil + return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } - existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...) + existingEntry.Chunks = append(existingEntry.Chunks, replicatedChunks...) existingEntry.Attributes = newEntry.Attributes existingEntry.Extended = newEntry.Extended existingEntry.HardLinkId = newEntry.HardLinkId @@ -242,12 +234,12 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent }) } -func compareChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { - aData, aMeta, aErr := filer.ResolveChunkManifest(ctx, lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64) +func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { + aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks, 0, math.MaxInt64) if aErr != nil { return nil, nil, aErr } - bData, bMeta, bErr := filer.ResolveChunkManifest(ctx, lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64) + bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks, 0, math.MaxInt64) if bErr != nil { return nil, nil, bErr } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 6fe78b21b..6f16595e9 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -3,18 +3,18 @@ package gcssink import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/replication/repl_util" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "os" "cloud.google.com/go/storage" "google.golang.org/api/option" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type GcsSink struct { @@ -97,7 +97,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in } totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(context.Background(), g.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) defer wc.Close() @@ -107,10 +107,6 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []in return writeErr } - if len(entry.Content) > 0 { - return writeFunc(entry.Content) - } - if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { return err } diff --git a/weed/replication/sink/localsink/local_incremental_sink.go b/weed/replication/sink/localsink/local_incremental_sink.go index 6e0585d7b..a1d49e28a 100644 --- a/weed/replication/sink/localsink/local_incremental_sink.go +++ b/weed/replication/sink/localsink/local_incremental_sink.go @@ -1,7 +1,7 @@ package localsink import ( - "github.com/seaweedfs/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/sink" ) type LocalIncSink struct { diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go index 2e962d1d0..0a2109a22 100644 --- a/weed/replication/sink/localsink/local_sink.go +++ b/weed/replication/sink/localsink/local_sink.go @@ -1,15 +1,14 @@ package localsink import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/repl_util" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/util" "os" "path/filepath" "strings" @@ -76,12 +75,12 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa glog.V(4).Infof("Create Entry key: %s", key) totalSize := filer.FileSize(entry) - chunkViews := filer.ViewFromChunks(context.Background(), localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) + chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) dir := filepath.Dir(key) if _, err := os.Stat(dir); os.IsNotExist(err) { - glog.V(4).Infof("Create Directory key: %s", dir) + glog.V(4).Infof("Create Direcotry key: %s", dir) if err = os.MkdirAll(dir, 0755); err != nil { return err } @@ -91,33 +90,17 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa return os.Mkdir(key, os.FileMode(entry.Attributes.FileMode)) } - mode := os.FileMode(entry.Attributes.FileMode) - dstFile, err := os.OpenFile(util.ToShortFileName(key), os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + dstFile, err := os.OpenFile(key, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(entry.Attributes.FileMode)) if err != nil { return err } defer dstFile.Close() - fi, err := dstFile.Stat() - if err != nil { - return err - } - if fi.Mode() != mode { - glog.V(4).Infof("Modify file mode: %o -> %o", fi.Mode(), mode) - if err := dstFile.Chmod(mode); err != nil { - return err - } - } - writeFunc := func(data []byte) error { _, writeErr := dstFile.Write(data) return writeErr } - if len(entry.Content) > 0 { - return writeFunc(entry.Content) - } - if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil { return err } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 29e6bbf8c..4ffd09462 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,9 +1,9 @@ package sink import ( - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type ReplicationSink interface { diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 28428545b..a118324c2 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -1,41 +1,34 @@ package S3Sink import ( - "encoding/base64" + "context" "fmt" + "strings" + "sync" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "strconv" - "strings" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/replication/sink" - "github.com/seaweedfs/seaweedfs/weed/replication/source" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" ) type S3Sink struct { - conn s3iface.S3API - filerSource *source.FilerSource - isIncremental bool - keepPartSize bool - s3DisableContentMD5Validation bool - s3ForcePathStyle bool - uploaderConcurrency int - uploaderMaxUploadParts int - uploaderPartSizeMb int - region string - bucket string - dir string - endpoint string - acl string + conn s3iface.S3API + region string + bucket string + dir string + endpoint string + acl string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -55,49 +48,21 @@ func (s3sink *S3Sink) IsIncremental() bool { } func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { - configuration.SetDefault(prefix+"region", "us-east-2") - configuration.SetDefault(prefix+"directory", "/") - configuration.SetDefault(prefix+"keep_part_size", true) - configuration.SetDefault(prefix+"uploader_max_upload_parts", 1000) - configuration.SetDefault(prefix+"uploader_part_size_mb", 8) - configuration.SetDefault(prefix+"uploader_concurrency", 8) - configuration.SetDefault(prefix+"s3_disable_content_md5_validation", true) - configuration.SetDefault(prefix+"s3_force_path_style", true) - s3sink.region = configuration.GetString(prefix + "region") - s3sink.bucket = configuration.GetString(prefix + "bucket") - s3sink.dir = configuration.GetString(prefix + "directory") - s3sink.endpoint = configuration.GetString(prefix + "endpoint") - s3sink.acl = configuration.GetString(prefix + "acl") + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) + glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint")) + glog.V(0).Infof("sink.s3.acl: %v", configuration.GetString(prefix+"acl")) + glog.V(0).Infof("sink.s3.is_incremental: %v", configuration.GetString(prefix+"is_incremental")) s3sink.isIncremental = configuration.GetBool(prefix + "is_incremental") - s3sink.keepPartSize = configuration.GetBool(prefix + "keep_part_size") - s3sink.s3DisableContentMD5Validation = configuration.GetBool(prefix + "s3_disable_content_md5_validation") - s3sink.s3ForcePathStyle = configuration.GetBool(prefix + "s3_force_path_style") - s3sink.uploaderMaxUploadParts = configuration.GetInt(prefix + "uploader_max_upload_parts") - s3sink.uploaderPartSizeMb = configuration.GetInt(prefix + "uploader_part_size") - s3sink.uploaderConcurrency = configuration.GetInt(prefix + "uploader_concurrency") - - glog.V(0).Infof("sink.s3.region: %v", s3sink.region) - glog.V(0).Infof("sink.s3.bucket: %v", s3sink.bucket) - glog.V(0).Infof("sink.s3.directory: %v", s3sink.dir) - glog.V(0).Infof("sink.s3.endpoint: %v", s3sink.endpoint) - glog.V(0).Infof("sink.s3.acl: %v", s3sink.acl) - glog.V(0).Infof("sink.s3.is_incremental: %v", s3sink.isIncremental) - glog.V(0).Infof("sink.s3.s3_disable_content_md5_validation: %v", s3sink.s3DisableContentMD5Validation) - glog.V(0).Infof("sink.s3.s3_force_path_style: %v", s3sink.s3ForcePathStyle) - glog.V(0).Infof("sink.s3.keep_part_size: %v", s3sink.keepPartSize) - if s3sink.uploaderMaxUploadParts > s3manager.MaxUploadParts { - s3sink.uploaderMaxUploadParts = s3manager.MaxUploadParts - glog.Warningf("uploader_max_upload_parts is greater than the maximum number of parts allowed when uploading multiple parts to Amazon S3") - glog.V(0).Infof("sink.s3.uploader_max_upload_parts: %v => %v", s3sink.uploaderMaxUploadParts, s3manager.MaxUploadParts) - } else { - glog.V(0).Infof("sink.s3.uploader_max_upload_parts: %v", s3sink.uploaderMaxUploadParts) - } - glog.V(0).Infof("sink.s3.uploader_part_size_mb: %v", s3sink.uploaderPartSizeMb) - glog.V(0).Infof("sink.s3.uploader_concurrency: %v", s3sink.uploaderConcurrency) - return s3sink.initialize( configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"endpoint"), + configuration.GetString(prefix+"acl"), ) } @@ -105,12 +70,18 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) { s3sink.filerSource = s } -func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey string) error { +func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint, acl string) error { + s3sink.region = region + s3sink.bucket = bucket + s3sink.dir = dir + s3sink.endpoint = endpoint + s3sink.acl = acl + config := &aws.Config{ Region: aws.String(s3sink.region), Endpoint: aws.String(s3sink.endpoint), - S3DisableContentMD5Validation: aws.Bool(s3sink.s3DisableContentMD5Validation), - S3ForcePathStyle: aws.Bool(s3sink.s3ForcePathStyle), + S3ForcePathStyle: aws.Bool(true), + S3DisableContentMD5Validation: aws.Bool(true), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") @@ -118,7 +89,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey string) erro sess, err := session.NewSession(config) if err != nil { - return fmt.Errorf("create aws session: %w", err) + return fmt.Errorf("create aws session: %v", err) } s3sink.conn = s3.New(sess) @@ -130,84 +101,52 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b key = cleanKey(key) if isDirectory { - return nil + key = key + "/" } - input := &s3.DeleteObjectInput{ - Bucket: aws.String(s3sink.bucket), - Key: aws.String(key), - } - - result, err := s3sink.conn.DeleteObject(input) - - if err == nil { - glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) - } else { - glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) - } - - return err + return s3sink.deleteObject(key) } -func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) (err error) { +func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) if entry.IsDirectory { return nil } - reader := filer.NewFileReader(s3sink.filerSource, entry) + uploadId, err := s3sink.createMultipartUpload(key, entry) + if err != nil { + return fmt.Errorf("createMultipartUpload: %v", err) + } - // Create an uploader with the session and custom options - uploader := s3manager.NewUploaderWithClient(s3sink.conn, func(u *s3manager.Uploader) { - u.PartSize = int64(s3sink.uploaderPartSizeMb * 1024 * 1024) - u.Concurrency = s3sink.uploaderConcurrency - u.MaxUploadParts = s3sink.uploaderMaxUploadParts - }) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - if s3sink.keepPartSize { - switch chunkCount := len(entry.Chunks); { - case chunkCount > 1: - if firstChunkSize := int64(entry.Chunks[0].Size); firstChunkSize > s3manager.MinUploadPartSize { - uploader.PartSize = firstChunkSize + parts := make([]*s3.CompletedPart, len(chunkViews)) + + var wg sync.WaitGroup + for chunkIndex, chunk := range chunkViews { + partId := chunkIndex + 1 + wg.Add(1) + go func(chunk *filer.ChunkView, index int) { + defer wg.Done() + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { + err = uploadErr + glog.Errorf("uploadPart: %v", uploadErr) + } else { + parts[index] = part } - default: - uploader.PartSize = 0 - } + }(chunk, chunkIndex) + } + wg.Wait() + + if err != nil { + s3sink.abortMultipartUpload(key, uploadId) + return fmt.Errorf("uploadPart: %v", err) } - doSaveMtime := true - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } else if _, ok := entry.Extended[s3_constants.AmzUserMetaMtime]; ok { - doSaveMtime = false - } - if doSaveMtime { - entry.Extended[s3_constants.AmzUserMetaMtime] = []byte(strconv.FormatInt(entry.Attributes.Mtime, 10)) - } - // process tagging - tags := "" - for k, v := range entry.Extended { - if len(tags) > 0 { - tags = tags + "&" - } - tags = tags + k + "=" + string(v) - } - - // Upload the file to S3. - uploadInput := s3manager.UploadInput{ - Bucket: aws.String(s3sink.bucket), - Key: aws.String(key), - Body: reader, - Tagging: aws.String(tags), - } - if len(entry.Attributes.Md5) > 0 { - uploadInput.ContentMD5 = aws.String(base64.StdEncoding.EncodeToString([]byte(entry.Attributes.Md5))) - } - _, err = uploader.Upload(&uploadInput) - - return err + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go new file mode 100644 index 000000000..7d8932fb0 --- /dev/null +++ b/weed/replication/sink/s3sink/s3_write.go @@ -0,0 +1,178 @@ +package S3Sink + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3sink *S3Sink) deleteObject(key string) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(s3sink.bucket), + Key: aws.String(key), + } + + result, err := s3sink.conn.DeleteObject(input) + + if err == nil { + glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) + } else { + glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) + } + + return err + +} + +func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) (uploadId string, err error) { + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(s3sink.bucket), + Key: aws.String(key), + ContentType: aws.String(entry.Attributes.Mime), + } + if s3sink.acl != "" { + input.ACL = aws.String(s3sink.acl) + } + + result, err := s3sink.conn.CreateMultipartUpload(input) + + if err == nil { + glog.V(2).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) + } else { + glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err) + return "", err + } + + return *result.UploadId, err +} + +func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error { + input := &s3.AbortMultipartUploadInput{ + Bucket: aws.String(s3sink.bucket), + Key: aws.String(key), + UploadId: aws.String(uploadId), + } + + result, err := s3sink.conn.AbortMultipartUpload(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case s3.ErrCodeNoSuchUpload: + glog.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error()) + default: + glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error()) + } + return err + } + + glog.V(0).Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result) + + return nil +} + +// To complete multipart upload +func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error { + input := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(s3sink.bucket), + Key: aws.String(key), + UploadId: aws.String(uploadId), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + } + + result, err := s3sink.conn.CompleteMultipartUpload(input) + if err == nil { + glog.V(2).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) + } else { + glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) + return fmt.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) + } + + return nil +} + +// To upload a part +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) { + var readSeeker io.ReadSeeker + + readSeeker, err := s3sink.buildReadSeeker(chunk) + if err != nil { + glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) + return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) + } + + input := &s3.UploadPartInput{ + Body: readSeeker, + Bucket: aws.String(s3sink.bucket), + Key: aws.String(key), + PartNumber: aws.Int64(int64(partId)), + UploadId: aws.String(uploadId), + } + + result, err := s3sink.conn.UploadPart(input) + if err == nil { + glog.V(2).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) + } else { + glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err) + } + + part := &s3.CompletedPart{ + ETag: result.ETag, + PartNumber: aws.Int64(int64(partId)), + } + + return part, err +} + +// To upload a part by copying byte range from an existing object as data source +func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySource string, sourceStart, sourceStop int) error { + input := &s3.UploadPartCopyInput{ + Bucket: aws.String(s3sink.bucket), + CopySource: aws.String(fmt.Sprintf("/%s/%s", s3sink.bucket, copySource)), + CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", sourceStart, sourceStop)), + Key: aws.String(key), + PartNumber: aws.Int64(partId), + UploadId: aws.String(uploadId), + } + + result, err := s3sink.conn.UploadPartCopy(input) + if err == nil { + glog.V(0).Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result) + } else { + glog.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err) + } + + return err +} + +func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) { + fileUrls, err := s3sink.filerSource.LookupFileId(chunk.FileId) + if err != nil { + return nil, err + } + buf := make([]byte, chunk.Size) + for _, fileUrl := range fileUrls { + _, err = util.ReadUrl(fileUrl, chunk.CipherKey, chunk.IsGzipped, false, chunk.Offset, int(chunk.Size), buf) + if err != nil { + glog.V(1).Infof("read from %s: %v", fileUrl, err) + } else { + break + } + } + return bytes.NewReader(buf), nil +} diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index fa9a285d9..4108f3821 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -9,13 +9,12 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type ReplicationSource interface { @@ -28,13 +27,9 @@ type FilerSource struct { Dir string address string proxyByFiler bool - dataCenter string - signature int32 } func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { - fs.dataCenter = configuration.GetString(prefix + "dataCenter") - fs.signature = util.RandomInt32() return fs.DoInitialize( "", configuration.GetString(prefix+"grpcAddress"), @@ -55,7 +50,7 @@ func (fs *FilerSource) DoInitialize(address, grpcAddress string, dir string, rea return nil } -func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrls []string, err error) { +func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) @@ -63,7 +58,7 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrls err = fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -76,26 +71,20 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrls }) if err != nil { - glog.V(1).InfofCtx(ctx, "LookupFileId volume id %s: %v", vid, err) + glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err) } locations := vid2Locations[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(1).InfofCtx(ctx, "LookupFileId locate volume id %s: %v", vid, err) + glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) } if !fs.proxyByFiler { for _, loc := range locations.Locations { - fileUrl := fmt.Sprintf("http://%s/%s?readDeleted=true", loc.Url, part) - // Prefer same data center - if fs.dataCenter != "" && fs.dataCenter == loc.DataCenter { - fileUrls = append([]string{fileUrl}, fileUrls...) - } else { - fileUrls = append(fileUrls, fileUrl) - } + fileUrls = append(fileUrls, fmt.Sprintf("http://%s/%s?readDeleted=true", loc.Url, part)) } } else { fileUrls = append(fileUrls, fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, part)) @@ -107,16 +96,16 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrls func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) { if fs.proxyByFiler { - return util_http.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "") + return util.DownloadFile("http://"+fs.address+"/?proxyChunkId="+fileId, "") } - fileUrls, err := fs.LookupFileId(context.Background(), fileId) + fileUrls, err := fs.LookupFileId(fileId) if err != nil { return "", nil, nil, err } for _, fileUrl := range fileUrls { - filename, header, resp, err = util_http.DownloadFile(fileUrl, "") + filename, header, resp, err = util.DownloadFile(fileUrl, "") if err != nil { glog.V(1).Infof("fail to read from %s: %v", fileUrl, err) } else { @@ -131,10 +120,10 @@ var _ = filer_pb.FilerClient(&FilerSource{}) func (fs *FilerSource) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithGrpcClient(streamingMode, fs.signature, func(grpcConnection *grpc.ClientConn) error { + return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, fs.grpcAddress, false, fs.grpcDialOption) + }, fs.grpcAddress, fs.grpcDialOption) } @@ -142,10 +131,6 @@ func (fs *FilerSource) AdjustedUrl(location *filer_pb.Location) string { return location.Url } -func (fs *FilerSource) GetDataCenter() string { - return fs.dataCenter -} - func volumeId(fileId string) string { lastCommaIndex := strings.LastIndex(fileId, ",") if lastCommaIndex > 0 { diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index 5eb42c2aa..08133533f 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -8,10 +8,10 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func init() { @@ -50,7 +50,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que sess, err := session.NewSession(config) if err != nil { - return fmt.Errorf("create aws session: %w", err) + return fmt.Errorf("create aws session: %v", err) } k.svc = sqs.New(sess) @@ -98,11 +98,8 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif key = *keyValue.StringValue text := *result.Messages[0].Body message = &filer_pb.EventNotification{} - err = proto.Unmarshal([]byte(text), message) - if err != nil { - err = fmt.Errorf("unmarshal message from sqs %s: %w", k.queueUrl, err) - return - } + err = proto.UnmarshalText(text, message) + // delete the message _, err = k.svc.DeleteMessage(&sqs.DeleteMessageInput{ QueueUrl: &k.queueUrl, diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index 2e7640af4..ad4031190 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -5,14 +5,14 @@ package sub import ( "context" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" "gocloud.dev/pubsub/rabbitpubsub" - "google.golang.org/protobuf/proto" "net/url" "os" "path" diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index c7509abf2..f7c767d4a 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -6,11 +6,11 @@ import ( "os" "cloud.google.com/go/pubsub" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" "google.golang.org/api/option" - "google.golang.org/protobuf/proto" ) func init() { diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 4f5304cf6..11bd2ffb4 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -8,10 +8,10 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func init() { @@ -135,7 +135,7 @@ func loadProgress(offsetFile string) *KafkaProgress { func (progress *KafkaProgress) saveProgress() error { data, err := json.Marshal(progress) if err != nil { - return fmt.Errorf("failed to marshal progress: %w", err) + return fmt.Errorf("failed to marshal progress: %v", err) } err = util.WriteFile(progress.offsetFile, data, 0640) if err != nil { @@ -146,11 +146,11 @@ func (progress *KafkaProgress) saveProgress() error { return nil } -func (progress *KafkaProgress) setOffset(partition int32, offset int64) error { +func (progress *KafkaProgress) setOffset(parition int32, offset int64) error { progress.Lock() defer progress.Unlock() - progress.PartitionOffsets[partition] = offset + progress.PartitionOffsets[parition] = offset if int(time.Now().Sub(progress.lastSaveTime).Seconds()) > progress.offsetSaveIntervalSeconds { return progress.saveProgress() } diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 4ec214458..d5a910db9 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -1,8 +1,8 @@ package sub import ( - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type NotificationInput interface { diff --git a/weed/s3api/AmazonS3.xsd b/weed/s3api/AmazonS3.xsd index 8a0136b44..8016a6a83 100644 --- a/weed/s3api/AmazonS3.xsd +++ b/weed/s3api/AmazonS3.xsd @@ -525,7 +525,6 @@ - diff --git a/weed/s3api/README.txt b/weed/s3api/README.txt index f7eb1988a..10a18ff4d 100644 --- a/weed/s3api/README.txt +++ b/weed/s3api/README.txt @@ -1,7 +1,7 @@ see https://blog.aqwari.net/xml-schema-go/ 1. go get aqwari.net/xml/cmd/xsdgen -2. Add EncodingType element for ListBucketResult in AmazonS3.xsd -3. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd -4. Remove empty Grantee struct in s3api_xsd_generated.go -5. Remove xmlns: sed s'/http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/\ //' s3api_xsd_generated.go +2. xsdgen -o s3api_xsd_generated.go -pkg s3api AmazonS3.xsd + + + diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index e3e7c0bbb..f9e97ea22 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -1,31 +1,19 @@ package s3api import ( - "context" - "encoding/json" "fmt" "net/http" "os" - "slices" "strings" "sync" - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - - // Import KMS providers to register them - _ "github.com/seaweedfs/seaweedfs/weed/kms/aws" - // _ "github.com/seaweedfs/seaweedfs/weed/kms/azure" // TODO: Fix Azure SDK compatibility issues - _ "github.com/seaweedfs/seaweedfs/weed/kms/gcp" - _ "github.com/seaweedfs/seaweedfs/weed/kms/local" - _ "github.com/seaweedfs/seaweedfs/weed/kms/openbao" - "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) type Action string @@ -37,187 +25,73 @@ type Iam interface { type IdentityAccessManagement struct { m sync.RWMutex - identities []*Identity - accessKeyIdent map[string]*Identity - accounts map[string]*Account - emailAccount map[string]*Account - hashes map[string]*sync.Pool - hashCounters map[string]*int32 - identityAnonymous *Identity - hashMu sync.RWMutex - domain string - isAuthEnabled bool - credentialManager *credential.CredentialManager - filerClient filer_pb.SeaweedFilerClient - grpcDialOption grpc.DialOption - - // IAM Integration for advanced features - iamIntegration *S3IAMIntegration + identities []*Identity + isAuthEnabled bool + domain string } type Identity struct { - Name string - Account *Account - Credentials []*Credential - Actions []Action - PrincipalArn string // ARN for IAM authorization (e.g., "arn:seaweed:iam::user/username") + Name string + Credentials []*Credential + Actions []Action } -// Account represents a system user, a system user can -// configure multiple IAM-Users, IAM-Users can configure -// permissions respectively, and each IAM-User can -// configure multiple security credentials -type Account struct { - //Name is also used to display the "DisplayName" as the owner of the bucket or object - DisplayName string - EmailAddress string - - //Id is used to identify an Account when granting cross-account access(ACLs) to buckets and objects - Id string -} - -// Predefined Accounts -var ( - // AccountAdmin is used as the default account for IAM-Credentials access without Account configured - AccountAdmin = Account{ - DisplayName: "admin", - EmailAddress: "admin@example.com", - Id: s3_constants.AccountAdminId, - } - - // AccountAnonymous is used to represent the account for anonymous access - AccountAnonymous = Account{ - DisplayName: "anonymous", - EmailAddress: "anonymous@example.com", - Id: s3_constants.AccountAnonymousId, - } -) - type Credential struct { AccessKey string SecretKey string } -// "Permission": "FULL_CONTROL"|"WRITE"|"WRITE_ACP"|"READ"|"READ_ACP" +func (action Action) isAdmin() bool { + return strings.HasPrefix(string(action), s3_constants.ACTION_ADMIN) +} + +func (action Action) isOwner(bucket string) bool { + return string(action) == s3_constants.ACTION_ADMIN+":"+bucket +} + +func (action Action) overBucket(bucket string) bool { + return strings.HasSuffix(string(action), ":"+bucket) || strings.HasSuffix(string(action), ":*") +} + func (action Action) getPermission() Permission { switch act := strings.Split(string(action), ":")[0]; act { case s3_constants.ACTION_ADMIN: return Permission("FULL_CONTROL") case s3_constants.ACTION_WRITE: return Permission("WRITE") - case s3_constants.ACTION_WRITE_ACP: - return Permission("WRITE_ACP") case s3_constants.ACTION_READ: return Permission("READ") - case s3_constants.ACTION_READ_ACP: - return Permission("READ_ACP") default: return Permission("") } } func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement { - return NewIdentityAccessManagementWithStore(option, "") -} - -func NewIdentityAccessManagementWithStore(option *S3ApiServerOption, explicitStore string) *IdentityAccessManagement { iam := &IdentityAccessManagement{ - domain: option.DomainName, - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), + domain: option.DomainName, } - - // Always initialize credential manager with fallback to defaults - credentialManager, err := credential.NewCredentialManagerWithDefaults(credential.CredentialStoreTypeName(explicitStore)) - if err != nil { - glog.Fatalf("failed to initialize credential manager: %v", err) - } - - // For stores that need filer client details, set them - if store := credentialManager.GetStore(); store != nil { - if filerClientSetter, ok := store.(interface { - SetFilerClient(string, grpc.DialOption) - }); ok { - filerClientSetter.SetFilerClient(string(option.Filer), option.GrpcDialOption) - } - } - - iam.credentialManager = credentialManager - - // Track whether any configuration was successfully loaded - configLoaded := false - - // First, try to load configurations from file or filer if option.Config != "" { - glog.V(3).Infof("loading static config file %s", option.Config) if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { glog.Fatalf("fail to load config file %s: %v", option.Config, err) } - // Mark as loaded since an explicit config file was provided - // This prevents fallback to environment variables even if no identities were loaded - // (e.g., config file contains only KMS settings) - configLoaded = true } else { - glog.V(3).Infof("no static config file specified... loading config from credential manager") if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil { glog.Warningf("fail to load config: %v", err) - } else { - // Check if any identities were actually loaded from filer - iam.m.RLock() - if len(iam.identities) > 0 { - configLoaded = true - } - iam.m.RUnlock() } } - - // Only use environment variables as fallback if no configuration was loaded - if !configLoaded { - accessKeyId := os.Getenv("AWS_ACCESS_KEY_ID") - secretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - - if accessKeyId != "" && secretAccessKey != "" { - glog.V(0).Infof("No S3 configuration found, using AWS environment variables as fallback") - - // Create environment variable identity name - identityNameSuffix := accessKeyId - if len(accessKeyId) > 8 { - identityNameSuffix = accessKeyId[:8] - } - - // Create admin identity with environment variable credentials - envIdentity := &Identity{ - Name: "admin-" + identityNameSuffix, - Account: &AccountAdmin, - Credentials: []*Credential{ - { - AccessKey: accessKeyId, - SecretKey: secretAccessKey, - }, - }, - Actions: []Action{ - s3_constants.ACTION_ADMIN, - }, - } - - // Set as the only configuration - iam.m.Lock() - if len(iam.identities) == 0 { - iam.identities = []*Identity{envIdentity} - iam.accessKeyIdent = map[string]*Identity{accessKeyId: envIdentity} - iam.isAuthEnabled = true - } - iam.m.Unlock() - - glog.V(0).Infof("Added admin identity from AWS environment variables: %s", envIdentity.Name) - } - } - return iam } -func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error { - return iam.LoadS3ApiConfigurationFromCredentialManager() +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) (err error) { + var content []byte + err = pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + content, err = filer.ReadInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile) + return err + }) + if err != nil { + return fmt.Errorf("read S3 config: %v", err) + } + return iam.LoadS3ApiConfigurationFromBytes(content) } func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { @@ -226,12 +100,6 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName str glog.Warningf("fail to read %s : %v", fileName, readErr) return fmt.Errorf("fail to read %s : %v", fileName, readErr) } - - // Initialize KMS if configuration contains KMS settings - if err := iam.initializeKMSFromConfig(content); err != nil { - glog.Warningf("KMS initialization failed: %v", err) - } - return iam.LoadS3ApiConfigurationFromBytes(content) } @@ -239,7 +107,7 @@ func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []b s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil { glog.Warningf("unmarshal error: %v", err) - return fmt.Errorf("unmarshal error: %w", err) + return fmt.Errorf("unmarshal error: %v", err) } if err := filer.CheckDuplicateAccessKey(s3ApiConfiguration); err != nil { @@ -254,75 +122,12 @@ func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []b func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3ApiConfiguration) error { var identities []*Identity - var identityAnonymous *Identity - accessKeyIdent := make(map[string]*Identity) - accounts := make(map[string]*Account) - emailAccount := make(map[string]*Account) - foundAccountAdmin := false - foundAccountAnonymous := false - - for _, account := range config.Accounts { - glog.V(3).Infof("loading account name=%s, id=%s", account.DisplayName, account.Id) - switch account.Id { - case AccountAdmin.Id: - AccountAdmin = Account{ - Id: account.Id, - DisplayName: account.DisplayName, - EmailAddress: account.EmailAddress, - } - accounts[account.Id] = &AccountAdmin - foundAccountAdmin = true - case AccountAnonymous.Id: - AccountAnonymous = Account{ - Id: account.Id, - DisplayName: account.DisplayName, - EmailAddress: account.EmailAddress, - } - accounts[account.Id] = &AccountAnonymous - foundAccountAnonymous = true - default: - t := Account{ - Id: account.Id, - DisplayName: account.DisplayName, - EmailAddress: account.EmailAddress, - } - accounts[account.Id] = &t - } - if account.EmailAddress != "" { - emailAccount[account.EmailAddress] = accounts[account.Id] - } - } - if !foundAccountAdmin { - accounts[AccountAdmin.Id] = &AccountAdmin - emailAccount[AccountAdmin.EmailAddress] = &AccountAdmin - } - if !foundAccountAnonymous { - accounts[AccountAnonymous.Id] = &AccountAnonymous - emailAccount[AccountAnonymous.EmailAddress] = &AccountAnonymous - } for _, ident := range config.Identities { - glog.V(3).Infof("loading identity %s", ident.Name) t := &Identity{ - Name: ident.Name, - Credentials: nil, - Actions: nil, - PrincipalArn: generatePrincipalArn(ident.Name), + Name: ident.Name, + Credentials: nil, + Actions: nil, } - switch { - case ident.Name == AccountAnonymous.Id: - t.Account = &AccountAnonymous - identityAnonymous = t - case ident.Account == nil: - t.Account = &AccountAdmin - default: - if account, ok := accounts[ident.Account.Id]; ok { - t.Account = account - } else { - t.Account = &AccountAdmin - glog.Warningf("identity %s is associated with a non exist account ID, the association is invalid", ident.Name) - } - } - for _, action := range ident.Actions { t.Actions = append(t.Actions, Action(action)) } @@ -331,23 +136,16 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api AccessKey: cred.AccessKey, SecretKey: cred.SecretKey, }) - accessKeyIdent[cred.AccessKey] = t } identities = append(identities, t) } - iam.m.Lock() // atomically switch iam.identities = identities - iam.identityAnonymous = identityAnonymous - iam.accounts = accounts - iam.emailAccount = emailAccount - iam.accessKeyIdent = accessKeyIdent if !iam.isAuthEnabled { // one-directional, no toggling iam.isAuthEnabled = len(identities) > 0 } iam.m.Unlock() - return nil } @@ -356,12 +154,14 @@ func (iam *IdentityAccessManagement) isEnabled() bool { } func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + iam.m.RLock() defer iam.m.RUnlock() - if ident, ok := iam.accessKeyIdent[accessKey]; ok { - for _, credential := range ident.Credentials { - if credential.AccessKey == accessKey { - return ident, credential, true + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + // println("checking", ident.Name, cred.AccessKey) + if cred.AccessKey == accessKey { + return ident, cred, true } } } @@ -372,43 +172,14 @@ func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identi func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) { iam.m.RLock() defer iam.m.RUnlock() - if iam.identityAnonymous != nil { - return iam.identityAnonymous, true + for _, ident := range iam.identities { + if ident.Name == "anonymous" { + return ident, true + } } return nil, false } -// generatePrincipalArn generates an ARN for a user identity -func generatePrincipalArn(identityName string) string { - // Handle special cases - switch identityName { - case AccountAnonymous.Id: - return "arn:seaweed:iam::user/anonymous" - case AccountAdmin.Id: - return "arn:seaweed:iam::user/admin" - default: - return fmt.Sprintf("arn:seaweed:iam::user/%s", identityName) - } -} - -func (iam *IdentityAccessManagement) GetAccountNameById(canonicalId string) string { - iam.m.RLock() - defer iam.m.RUnlock() - if account, ok := iam.accounts[canonicalId]; ok { - return account.DisplayName - } - return "" -} - -func (iam *IdentityAccessManagement) GetAccountIdByEmail(email string) string { - iam.m.RLock() - defer iam.m.RUnlock() - if account, ok := iam.emailAccount[email]; ok { - return account.Id - } - return "" -} - func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if !iam.isEnabled() { @@ -417,11 +188,14 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt } identity, errCode := iam.authRequest(r, action) - glog.V(3).Infof("auth error: %v", errCode) - if errCode == s3err.ErrNone { if identity != nil && identity.Name != "" { r.Header.Set(s3_constants.AmzIdentityId, identity.Name) + if identity.isAdmin() { + r.Header.Set(s3_constants.AmzIsAdmin, "true") + } else if _, ok := r.Header[s3_constants.AmzIsAdmin]; ok { + r.Header.Del(s3_constants.AmzIsAdmin) + } } f(w, r) return @@ -437,6 +211,8 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) var found bool var authType string switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone case authTypeUnknown: glog.V(3).Infof("unknown auth type") r.Header.Set(s3_constants.AmzAuthType, "Unknown") @@ -445,7 +221,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) glog.V(3).Infof("v2 auth type") identity, s3Err = iam.isReqAuthenticatedV2(r) authType = "SigV2" - case authTypeStreamingSigned, authTypeSigned, authTypePresigned: + case authTypeSigned, authTypePresigned: glog.V(3).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) authType = "SigV4" @@ -453,22 +229,14 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) glog.V(3).Infof("post policy auth type") r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") return identity, s3err.ErrNone - case authTypeStreamingUnsigned: - glog.V(3).Infof("unsigned streaming upload") - return identity, s3err.ErrNone case authTypeJWT: - glog.V(3).Infof("jwt auth type detected, iamIntegration != nil? %t", iam.iamIntegration != nil) + glog.V(3).Infof("jwt auth type") r.Header.Set(s3_constants.AmzAuthType, "Jwt") - if iam.iamIntegration != nil { - identity, s3Err = iam.authenticateJWTWithIAM(r) - authType = "Jwt" - } else { - glog.V(0).Infof("IAM integration is nil, returning ErrNotImplemented") - return identity, s3err.ErrNotImplemented - } + return identity, s3err.ErrNotImplemented case authTypeAnonymous: authType = "Anonymous" - if identity, found = iam.lookupAnonymous(); !found { + identity, found = iam.lookupAnonymous() + if !found { r.Header.Set(s3_constants.AmzAuthType, authType) return identity, s3err.ErrAccessDenied } @@ -484,67 +252,83 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) } glog.V(3).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action) + bucket, object := s3_constants.GetBucketAndObject(r) - prefix := s3_constants.GetPrefix(r) - // For List operations, use prefix for permission checking if available - if action == s3_constants.ACTION_LIST && object == "" && prefix != "" { - // List operation with prefix - check permission for the prefix path - object = prefix - } else if (object == "/" || object == "") && prefix != "" { - // Using the aws cli with s3, and s3api, and with boto3, the object is often set to "/" or empty - // but the prefix is set to the actual object key for permission checking - object = prefix + if !identity.canDo(action, bucket, object) { + return identity, s3err.ErrAccessDenied } - // For ListBuckets, authorization is performed in the handler by iterating - // through buckets and checking permissions for each. Skip the global check here. - if action == s3_constants.ACTION_LIST && bucket == "" { - // ListBuckets operation - authorization handled per-bucket in the handler - } else { - // Use enhanced IAM authorization if available, otherwise fall back to legacy authorization - if iam.iamIntegration != nil { - // Always use IAM when available for unified authorization - if errCode := iam.authorizeWithIAM(r, identity, action, bucket, object); errCode != s3err.ErrNone { - return identity, errCode - } - } else { - // Fall back to existing authorization when IAM is not configured - if !identity.canDo(action, bucket, object) { - return identity, s3err.ErrAccessDenied - } - } - } - - r.Header.Set(s3_constants.AmzAccountId, identity.Account.Id) - return identity, s3err.ErrNone } +func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) { + var identity *Identity + var s3Err s3err.ErrorCode + var found bool + var authType string + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + r.Header.Set(s3_constants.AmzAuthType, "Unknown") + return identity, s3err.ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + authType = "SigV2" + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + authType = "SigV4" + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") + return identity, s3err.ErrNone + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + r.Header.Set(s3_constants.AmzAuthType, "Jwt") + return identity, s3err.ErrNotImplemented + case authTypeAnonymous: + authType = "Anonymous" + identity, found = iam.lookupAnonymous() + if !found { + r.Header.Set(s3_constants.AmzAuthType, authType) + return identity, s3err.ErrAccessDenied + } + default: + return identity, s3err.ErrNotImplemented + } + + if len(authType) > 0 { + r.Header.Set(s3_constants.AmzAuthType, authType) + } + + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != s3err.ErrNone { + return identity, s3Err + } + return identity, s3err.ErrNone +} + func (identity *Identity) canDo(action Action, bucket string, objectKey string) bool { if identity.isAdmin() { return true } for _, a := range identity.Actions { - // Case where the Resource provided is - // "Resource": [ - // "arn:aws:s3:::*" - // ] if a == action { return true } } if bucket == "" { - glog.V(3).Infof("identity %s is not allowed to perform action %s on %s -- bucket is empty", identity.Name, action, bucket+objectKey) return false } - glog.V(3).Infof("checking if %s can perform %s on bucket '%s'", identity.Name, action, bucket+objectKey) target := string(action) + ":" + bucket + objectKey adminTarget := s3_constants.ACTION_ADMIN + ":" + bucket + objectKey limitedByBucket := string(action) + ":" + bucket adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket - for _, a := range identity.Actions { act := string(a) if strings.HasSuffix(act, "*") { @@ -563,119 +347,14 @@ func (identity *Identity) canDo(action Action, bucket string, objectKey string) } } } - //log error - glog.V(3).Infof("identity %s is not allowed to perform action %s on %s", identity.Name, action, bucket+objectKey) return false } func (identity *Identity) isAdmin() bool { - return slices.Contains(identity.Actions, s3_constants.ACTION_ADMIN) -} - -// GetCredentialManager returns the credential manager instance -func (iam *IdentityAccessManagement) GetCredentialManager() *credential.CredentialManager { - return iam.credentialManager -} - -// LoadS3ApiConfigurationFromCredentialManager loads configuration using the credential manager -func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromCredentialManager() error { - s3ApiConfiguration, err := iam.credentialManager.LoadConfiguration(context.Background()) - if err != nil { - return fmt.Errorf("failed to load configuration from credential manager: %w", err) + for _, a := range identity.Actions { + if a == "Admin" { + return true + } } - - return iam.loadS3ApiConfiguration(s3ApiConfiguration) -} - -// initializeKMSFromConfig loads KMS configuration from TOML format -func (iam *IdentityAccessManagement) initializeKMSFromConfig(configContent []byte) error { - // JSON-only KMS configuration - if err := iam.initializeKMSFromJSON(configContent); err == nil { - glog.V(1).Infof("Successfully loaded KMS configuration from JSON format") - return nil - } - - glog.V(2).Infof("No KMS configuration found in S3 config - SSE-KMS will not be available") - return nil -} - -// initializeKMSFromJSON loads KMS configuration from JSON format when provided in the same file -func (iam *IdentityAccessManagement) initializeKMSFromJSON(configContent []byte) error { - // Parse as generic JSON and extract optional "kms" block - var m map[string]any - if err := json.Unmarshal([]byte(strings.TrimSpace(string(configContent))), &m); err != nil { - return err - } - kmsVal, ok := m["kms"] - if !ok { - return fmt.Errorf("no KMS section found") - } - - // Load KMS configuration directly from the parsed JSON data - return kms.LoadKMSFromConfig(kmsVal) -} - -// SetIAMIntegration sets the IAM integration for advanced authentication and authorization -func (iam *IdentityAccessManagement) SetIAMIntegration(integration *S3IAMIntegration) { - iam.m.Lock() - defer iam.m.Unlock() - iam.iamIntegration = integration -} - -// authenticateJWTWithIAM authenticates JWT tokens using the IAM integration -func (iam *IdentityAccessManagement) authenticateJWTWithIAM(r *http.Request) (*Identity, s3err.ErrorCode) { - ctx := r.Context() - - // Use IAM integration to authenticate JWT - iamIdentity, errCode := iam.iamIntegration.AuthenticateJWT(ctx, r) - if errCode != s3err.ErrNone { - return nil, errCode - } - - // Convert IAMIdentity to existing Identity structure - identity := &Identity{ - Name: iamIdentity.Name, - Account: iamIdentity.Account, - Actions: []Action{}, // Empty - authorization handled by policy engine - } - - // Store session info in request headers for later authorization - r.Header.Set("X-SeaweedFS-Session-Token", iamIdentity.SessionToken) - r.Header.Set("X-SeaweedFS-Principal", iamIdentity.Principal) - - return identity, s3err.ErrNone -} - -// authorizeWithIAM authorizes requests using the IAM integration policy engine -func (iam *IdentityAccessManagement) authorizeWithIAM(r *http.Request, identity *Identity, action Action, bucket string, object string) s3err.ErrorCode { - ctx := r.Context() - - // Get session info from request headers (for JWT-based authentication) - sessionToken := r.Header.Get("X-SeaweedFS-Session-Token") - principal := r.Header.Get("X-SeaweedFS-Principal") - - // Create IAMIdentity for authorization - iamIdentity := &IAMIdentity{ - Name: identity.Name, - Account: identity.Account, - } - - // Handle both session-based (JWT) and static-key-based (V4 signature) principals - if sessionToken != "" && principal != "" { - // JWT-based authentication - use session token and principal from headers - iamIdentity.Principal = principal - iamIdentity.SessionToken = sessionToken - glog.V(3).Infof("Using JWT-based IAM authorization for principal: %s", principal) - } else if identity.PrincipalArn != "" { - // V4 signature authentication - use principal ARN from identity - iamIdentity.Principal = identity.PrincipalArn - iamIdentity.SessionToken = "" // No session token for static credentials - glog.V(3).Infof("Using V4 signature IAM authorization for principal: %s", identity.PrincipalArn) - } else { - glog.V(3).Info("No valid principal information for IAM authorization") - return s3err.ErrAccessDenied - } - - // Use IAM integration for authorization - return iam.iamIntegration.AuthorizeAction(ctx, iamIdentity, action, bucket, object, r) + return false } diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go index 68286a877..f2bd94f56 100644 --- a/weed/s3api/auth_credentials_subscribe.go +++ b/weed/s3api/auth_credentials_subscribe.go @@ -1,18 +1,15 @@ package s3api import ( - "errors" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/util" ) -func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, lastTsNs int64, prefix string, directoriesToWatch []string) { +func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) { processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { @@ -31,35 +28,21 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, lastTsNs int64, p _ = s3a.onIamConfigUpdate(dir, fileName, content) _ = s3a.onCircuitBreakerConfigUpdate(dir, fileName, content) - _ = s3a.onBucketMetadataChange(dir, message.OldEntry, message.NewEntry) return nil } - metadataFollowOption := &pb.MetadataFollowOption{ - ClientName: clientName, - ClientId: s3a.randomClientId, - ClientEpoch: 1, - SelfSignature: 0, - PathPrefix: prefix, - AdditionalPathPrefixes: nil, - DirectoriesToWatch: directoriesToWatch, - StartTsNs: lastTsNs, - StopTsNs: 0, - EventErrorType: pb.FatalOnError, - } - util.RetryUntil("followIamChanges", func() error { - metadataFollowOption.ClientEpoch++ - return pb.WithFilerClientFollowMetadata(s3a, metadataFollowOption, processEventFn) + util.RetryForever("followIamChanges", func() error { + return pb.WithFilerClientFollowMetadata(s3a, clientName, s3a.randomClientId, prefix, &lastTsNs, 0, 0, processEventFn, pb.FatalOnError) }, func(err error) bool { glog.V(0).Infof("iam follow metadata changes: %v", err) return true }) } -// reload iam config +//reload iam config func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) error { - if dir == filer.IamConfigDirectory && filename == filer.IamIdentityFile { + if dir == filer.IamConfigDirecotry && filename == filer.IamIdentityFile { if err := s3a.iam.LoadS3ApiConfigurationFromBytes(content); err != nil { return err } @@ -68,7 +51,7 @@ func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) return nil } -// reload circuit breaker config +//reload circuit breaker config func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, content []byte) error { if dir == s3_constants.CircuitBreakerConfigDir && filename == s3_constants.CircuitBreakerConfigFile { if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil { @@ -78,94 +61,3 @@ func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, conte } return nil } - -// reload bucket metadata -func (s3a *S3ApiServer) onBucketMetadataChange(dir string, oldEntry *filer_pb.Entry, newEntry *filer_pb.Entry) error { - if dir == s3a.option.BucketsPath { - if newEntry != nil { - // Update bucket registry (existing functionality) - s3a.bucketRegistry.LoadBucketMetadata(newEntry) - glog.V(0).Infof("updated bucketMetadata %s/%s", dir, newEntry.Name) - - // Update bucket configuration cache with new entry - s3a.updateBucketConfigCacheFromEntry(newEntry) - } else if oldEntry != nil { - // Remove from bucket registry (existing functionality) - s3a.bucketRegistry.RemoveBucketMetadata(oldEntry) - glog.V(0).Infof("remove bucketMetadata %s/%s", dir, oldEntry.Name) - - // Remove from bucket configuration cache - s3a.invalidateBucketConfigCache(oldEntry.Name) - } - } - return nil -} - -// updateBucketConfigCacheFromEntry updates the bucket config cache when a bucket entry changes -func (s3a *S3ApiServer) updateBucketConfigCacheFromEntry(entry *filer_pb.Entry) { - if s3a.bucketConfigCache == nil { - return - } - - bucket := entry.Name - - // Create new bucket config from the entry - config := &BucketConfig{ - Name: bucket, - Entry: entry, - IsPublicRead: false, // Explicitly default to false for private buckets - } - - // Extract configuration from extended attributes - if entry.Extended != nil { - if versioning, exists := entry.Extended[s3_constants.ExtVersioningKey]; exists { - config.Versioning = string(versioning) - } - if ownership, exists := entry.Extended[s3_constants.ExtOwnershipKey]; exists { - config.Ownership = string(ownership) - } - if acl, exists := entry.Extended[s3_constants.ExtAmzAclKey]; exists { - config.ACL = acl - // Parse ACL and cache public-read status - config.IsPublicRead = parseAndCachePublicReadStatus(acl) - } else { - // No ACL means private bucket - config.IsPublicRead = false - } - if owner, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - config.Owner = string(owner) - } - // Parse Object Lock configuration if present - if objectLockConfig, found := LoadObjectLockConfigurationFromExtended(entry); found { - config.ObjectLockConfig = objectLockConfig - glog.V(2).Infof("updateBucketConfigCacheFromEntry: cached Object Lock configuration for bucket %s", bucket) - } - } - - // Load CORS configuration from bucket directory content - if corsConfig, err := s3a.loadCORSFromBucketContent(bucket); err != nil { - if !errors.Is(err, filer_pb.ErrNotFound) { - glog.Errorf("updateBucketConfigCacheFromEntry: failed to load CORS configuration for bucket %s: %v", bucket, err) - } - } else { - config.CORS = corsConfig - glog.V(2).Infof("updateBucketConfigCacheFromEntry: loaded CORS config for bucket %s", bucket) - } - - // Update timestamp - config.LastModified = time.Now() - - // Update cache - s3a.bucketConfigCache.Set(bucket, config) -} - -// invalidateBucketConfigCache removes a bucket from the configuration cache -func (s3a *S3ApiServer) invalidateBucketConfigCache(bucket string) { - if s3a.bucketConfigCache == nil { - return - } - - s3a.bucketConfigCache.Remove(bucket) - s3a.bucketConfigCache.RemoveNegativeCache(bucket) // Also remove from negative cache - glog.V(2).Infof("invalidateBucketConfigCache: removed bucket %s from cache", bucket) -} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go index c7521ad76..4545d13bc 100644 --- a/weed/s3api/auth_credentials_test.go +++ b/weed/s3api/auth_credentials_test.go @@ -1,17 +1,13 @@ package s3api import ( - "os" - "reflect" - "sync" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/stretchr/testify/assert" "testing" - "github.com/seaweedfs/seaweedfs/weed/credential" - . "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" + "github.com/golang/protobuf/jsonpb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - jsonpb "google.golang.org/protobuf/encoding/protojson" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" ) func TestIdentityListFileFormat(t *testing.T) { @@ -62,14 +58,14 @@ func TestIdentityListFileFormat(t *testing.T) { s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) - m := jsonpb.MarshalOptions{ - EmitUnpopulated: true, - Indent: " ", + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", } - text, _ := m.Marshal(s3ApiConfiguration) + text, _ := m.MarshalToString(s3ApiConfiguration) - println(string(text)) + println(text) } @@ -83,10 +79,7 @@ func TestCanDo(t *testing.T) { } // object specific assert.Equal(t, true, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt")) - assert.Equal(t, true, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d/e.txt")) - assert.Equal(t, false, ident1.canDo(ACTION_DELETE_BUCKET, "bucket1", "")) assert.Equal(t, false, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/other/some"), "action without *") - assert.Equal(t, false, ident1.canDo(ACTION_WRITE, "bucket1", "/a/b/*"), "action on parent directory") // bucket specific ident2 := &Identity{ @@ -94,13 +87,10 @@ func TestCanDo(t *testing.T) { Actions: []Action{ "Read:bucket1", "Write:bucket1/*", - "WriteAcp:bucket1", }, } assert.Equal(t, true, ident2.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt")) assert.Equal(t, true, ident2.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt")) - assert.Equal(t, true, ident2.canDo(ACTION_WRITE_ACP, "bucket1", "")) - assert.Equal(t, false, ident2.canDo(ACTION_READ_ACP, "bucket1", "")) assert.Equal(t, false, ident2.canDo(ACTION_LIST, "bucket1", "/a/b/c/d.txt")) // across buckets @@ -114,18 +104,15 @@ func TestCanDo(t *testing.T) { assert.Equal(t, true, ident3.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt")) assert.Equal(t, true, ident3.canDo(ACTION_WRITE, "bucket1", "/a/b/c/d.txt")) assert.Equal(t, false, ident3.canDo(ACTION_LIST, "bucket1", "/a/b/other/some")) - assert.Equal(t, false, ident3.canDo(ACTION_WRITE_ACP, "bucket1", "")) // partial buckets ident4 := &Identity{ Name: "anything", Actions: []Action{ "Read:special_*", - "ReadAcp:special_*", }, } assert.Equal(t, true, ident4.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt")) - assert.Equal(t, true, ident4.canDo(ACTION_READ_ACP, "special_bucket", "")) assert.Equal(t, false, ident4.canDo(ACTION_READ, "bucket1", "/a/b/c/d.txt")) // admin buckets @@ -136,466 +123,6 @@ func TestCanDo(t *testing.T) { }, } assert.Equal(t, true, ident5.canDo(ACTION_READ, "special_bucket", "/a/b/c/d.txt")) - assert.Equal(t, true, ident5.canDo(ACTION_READ_ACP, "special_bucket", "")) assert.Equal(t, true, ident5.canDo(ACTION_WRITE, "special_bucket", "/a/b/c/d.txt")) - assert.Equal(t, true, ident5.canDo(ACTION_WRITE_ACP, "special_bucket", "")) - // anonymous buckets - ident6 := &Identity{ - Name: "anonymous", - Actions: []Action{ - "Read", - }, - } - assert.Equal(t, true, ident6.canDo(ACTION_READ, "anything_bucket", "/a/b/c/d.txt")) - - //test deleteBucket operation - ident7 := &Identity{ - Name: "anything", - Actions: []Action{ - "DeleteBucket:bucket1", - }, - } - assert.Equal(t, true, ident7.canDo(ACTION_DELETE_BUCKET, "bucket1", "")) -} - -type LoadS3ApiConfigurationTestCase struct { - pbAccount *iam_pb.Account - pbIdent *iam_pb.Identity - expectIdent *Identity -} - -func TestLoadS3ApiConfiguration(t *testing.T) { - specifiedAccount := Account{ - Id: "specifiedAccountID", - DisplayName: "specifiedAccountName", - EmailAddress: "specifiedAccounEmail@example.com", - } - pbSpecifiedAccount := iam_pb.Account{ - Id: "specifiedAccountID", - DisplayName: "specifiedAccountName", - EmailAddress: "specifiedAccounEmail@example.com", - } - testCases := map[string]*LoadS3ApiConfigurationTestCase{ - "notSpecifyAccountId": { - pbIdent: &iam_pb.Identity{ - Name: "notSpecifyAccountId", - Actions: []string{ - "Read", - "Write", - }, - Credentials: []*iam_pb.Credential{ - { - AccessKey: "some_access_key1", - SecretKey: "some_secret_key2", - }, - }, - }, - expectIdent: &Identity{ - Name: "notSpecifyAccountId", - Account: &AccountAdmin, - PrincipalArn: "arn:seaweed:iam::user/notSpecifyAccountId", - Actions: []Action{ - "Read", - "Write", - }, - Credentials: []*Credential{ - { - AccessKey: "some_access_key1", - SecretKey: "some_secret_key2", - }, - }, - }, - }, - "specifiedAccountID": { - pbAccount: &pbSpecifiedAccount, - pbIdent: &iam_pb.Identity{ - Name: "specifiedAccountID", - Account: &pbSpecifiedAccount, - Actions: []string{ - "Read", - "Write", - }, - }, - expectIdent: &Identity{ - Name: "specifiedAccountID", - Account: &specifiedAccount, - PrincipalArn: "arn:seaweed:iam::user/specifiedAccountID", - Actions: []Action{ - "Read", - "Write", - }, - }, - }, - "anonymous": { - pbIdent: &iam_pb.Identity{ - Name: "anonymous", - Actions: []string{ - "Read", - "Write", - }, - }, - expectIdent: &Identity{ - Name: "anonymous", - Account: &AccountAnonymous, - PrincipalArn: "arn:seaweed:iam::user/anonymous", - Actions: []Action{ - "Read", - "Write", - }, - }, - }, - } - - config := &iam_pb.S3ApiConfiguration{ - Identities: make([]*iam_pb.Identity, 0), - } - for _, v := range testCases { - config.Identities = append(config.Identities, v.pbIdent) - if v.pbAccount != nil { - config.Accounts = append(config.Accounts, v.pbAccount) - } - } - - iam := IdentityAccessManagement{} - err := iam.loadS3ApiConfiguration(config) - if err != nil { - return - } - - for _, ident := range iam.identities { - tc := testCases[ident.Name] - if !reflect.DeepEqual(ident, tc.expectIdent) { - t.Errorf("not expect for ident name %s", ident.Name) - } - } -} - -func TestNewIdentityAccessManagementWithStoreEnvVars(t *testing.T) { - // Save original environment - originalAccessKeyId := os.Getenv("AWS_ACCESS_KEY_ID") - originalSecretAccessKey := os.Getenv("AWS_SECRET_ACCESS_KEY") - - // Clean up after test - defer func() { - if originalAccessKeyId != "" { - os.Setenv("AWS_ACCESS_KEY_ID", originalAccessKeyId) - } else { - os.Unsetenv("AWS_ACCESS_KEY_ID") - } - if originalSecretAccessKey != "" { - os.Setenv("AWS_SECRET_ACCESS_KEY", originalSecretAccessKey) - } else { - os.Unsetenv("AWS_SECRET_ACCESS_KEY") - } - }() - - tests := []struct { - name string - accessKeyId string - secretAccessKey string - expectEnvIdentity bool - expectedName string - description string - }{ - { - name: "Environment variables used as fallback", - accessKeyId: "AKIA1234567890ABCDEF", - secretAccessKey: "secret123456789012345678901234567890abcdef12", - expectEnvIdentity: true, - expectedName: "admin-AKIA1234", - description: "When no config file and no filer config, environment variables should be used", - }, - { - name: "Short access key fallback", - accessKeyId: "SHORT", - secretAccessKey: "secret123456789012345678901234567890abcdef12", - expectEnvIdentity: true, - expectedName: "admin-SHORT", - description: "Short access keys should work correctly as fallback", - }, - { - name: "No env vars means no identities", - accessKeyId: "", - secretAccessKey: "", - expectEnvIdentity: false, - expectedName: "", - description: "When no env vars and no config, should have no identities", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set up environment variables - if tt.accessKeyId != "" { - os.Setenv("AWS_ACCESS_KEY_ID", tt.accessKeyId) - } else { - os.Unsetenv("AWS_ACCESS_KEY_ID") - } - if tt.secretAccessKey != "" { - os.Setenv("AWS_SECRET_ACCESS_KEY", tt.secretAccessKey) - } else { - os.Unsetenv("AWS_SECRET_ACCESS_KEY") - } - - // Create IAM instance with memory store for testing (no config file) - option := &S3ApiServerOption{ - Config: "", // No config file - this should trigger environment variable fallback - } - iam := NewIdentityAccessManagementWithStore(option, string(credential.StoreTypeMemory)) - - if tt.expectEnvIdentity { - // Should have exactly one identity from environment variables - assert.Len(t, iam.identities, 1, "Should have exactly one identity from environment variables") - - identity := iam.identities[0] - assert.Equal(t, tt.expectedName, identity.Name, "Identity name should match expected") - assert.Len(t, identity.Credentials, 1, "Should have one credential") - assert.Equal(t, tt.accessKeyId, identity.Credentials[0].AccessKey, "Access key should match environment variable") - assert.Equal(t, tt.secretAccessKey, identity.Credentials[0].SecretKey, "Secret key should match environment variable") - assert.Contains(t, identity.Actions, Action(ACTION_ADMIN), "Should have admin action") - } else { - // When no env vars, should have no identities (since no config file) - assert.Len(t, iam.identities, 0, "Should have no identities when no env vars and no config file") - } - }) - } -} - -// TestBucketLevelListPermissions tests that bucket-level List permissions work correctly -// This test validates the fix for issue #7066 -func TestBucketLevelListPermissions(t *testing.T) { - // Test the functionality that was broken in issue #7066 - - t.Run("Bucket Wildcard Permissions", func(t *testing.T) { - // Create identity with bucket-level List permission using wildcards - identity := &Identity{ - Name: "bucket-user", - Actions: []Action{ - "List:mybucket*", - "Read:mybucket*", - "ReadAcp:mybucket*", - "Write:mybucket*", - "WriteAcp:mybucket*", - "Tagging:mybucket*", - }, - } - - // Test cases for bucket-level wildcard permissions - testCases := []struct { - name string - action Action - bucket string - object string - shouldAllow bool - description string - }{ - { - name: "exact bucket match", - action: "List", - bucket: "mybucket", - object: "", - shouldAllow: true, - description: "Should allow access to exact bucket name", - }, - { - name: "bucket with suffix", - action: "List", - bucket: "mybucket-prod", - object: "", - shouldAllow: true, - description: "Should allow access to bucket with matching prefix", - }, - { - name: "bucket with numbers", - action: "List", - bucket: "mybucket123", - object: "", - shouldAllow: true, - description: "Should allow access to bucket with numbers", - }, - { - name: "different bucket", - action: "List", - bucket: "otherbucket", - object: "", - shouldAllow: false, - description: "Should deny access to bucket with different prefix", - }, - { - name: "partial match", - action: "List", - bucket: "notmybucket", - object: "", - shouldAllow: false, - description: "Should deny access to bucket that contains but doesn't start with the prefix", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := identity.canDo(tc.action, tc.bucket, tc.object) - assert.Equal(t, tc.shouldAllow, result, tc.description) - }) - } - }) - - t.Run("Global List Permission", func(t *testing.T) { - // Create identity with global List permission - identity := &Identity{ - Name: "global-user", - Actions: []Action{ - "List", - }, - } - - // Should allow access to any bucket - testCases := []string{"anybucket", "mybucket", "test-bucket", "prod-data"} - - for _, bucket := range testCases { - result := identity.canDo("List", bucket, "") - assert.True(t, result, "Global List permission should allow access to bucket %s", bucket) - } - }) - - t.Run("No Wildcard Exact Match", func(t *testing.T) { - // Create identity with exact bucket permission (no wildcard) - identity := &Identity{ - Name: "exact-user", - Actions: []Action{ - "List:specificbucket", - }, - } - - // Should only allow access to the exact bucket - assert.True(t, identity.canDo("List", "specificbucket", ""), "Should allow access to exact bucket") - assert.False(t, identity.canDo("List", "specificbucket-test", ""), "Should deny access to bucket with suffix") - assert.False(t, identity.canDo("List", "otherbucket", ""), "Should deny access to different bucket") - }) - - t.Log("This test validates the fix for issue #7066") - t.Log("Bucket-level List permissions like 'List:bucket*' work correctly") - t.Log("ListBucketsHandler now uses consistent authentication flow") -} - -// TestListBucketsAuthRequest tests that authRequest works correctly for ListBuckets operations -// This test validates that the fix for the regression identified in PR #7067 works correctly -func TestListBucketsAuthRequest(t *testing.T) { - t.Run("ListBuckets special case handling", func(t *testing.T) { - // Create identity with bucket-specific permissions (no global List permission) - identity := &Identity{ - Name: "bucket-user", - Account: &AccountAdmin, - Actions: []Action{ - Action("List:mybucket*"), - Action("Read:mybucket*"), - }, - } - - // Test 1: ListBuckets operation should succeed (bucket = "") - // This would have failed before the fix because canDo("List", "", "") would return false - // After the fix, it bypasses the canDo check for ListBuckets operations - - // Simulate what happens in authRequest for ListBuckets: - // action = ACTION_LIST, bucket = "", object = "" - - // Before fix: identity.canDo(ACTION_LIST, "", "") would fail - // After fix: the canDo check should be bypassed - - // Test the individual canDo method to show it would fail without the special case - result := identity.canDo(Action(ACTION_LIST), "", "") - assert.False(t, result, "canDo should return false for empty bucket with bucket-specific permissions") - - // Test with a specific bucket that matches the permission - result2 := identity.canDo(Action(ACTION_LIST), "mybucket", "") - assert.True(t, result2, "canDo should return true for matching bucket") - - // Test with a specific bucket that doesn't match - result3 := identity.canDo(Action(ACTION_LIST), "otherbucket", "") - assert.False(t, result3, "canDo should return false for non-matching bucket") - }) - - t.Run("Object listing maintains permission enforcement", func(t *testing.T) { - // Create identity with bucket-specific permissions - identity := &Identity{ - Name: "bucket-user", - Account: &AccountAdmin, - Actions: []Action{ - Action("List:mybucket*"), - }, - } - - // For object listing operations, the normal permission checks should still apply - // These operations have a specific bucket in the URL - - // Should succeed for allowed bucket - result1 := identity.canDo(Action(ACTION_LIST), "mybucket", "prefix/") - assert.True(t, result1, "Should allow listing objects in permitted bucket") - - result2 := identity.canDo(Action(ACTION_LIST), "mybucket-prod", "") - assert.True(t, result2, "Should allow listing objects in wildcard-matched bucket") - - // Should fail for disallowed bucket - result3 := identity.canDo(Action(ACTION_LIST), "otherbucket", "") - assert.False(t, result3, "Should deny listing objects in non-permitted bucket") - }) - - t.Log("This test validates the fix for the regression identified in PR #7067") - t.Log("ListBuckets operation bypasses global permission check when bucket is empty") - t.Log("Object listing still properly enforces bucket-level permissions") -} - -// TestSignatureVerificationDoesNotCheckPermissions tests that signature verification -// only validates the signature and identity, not permissions. Permissions should be -// checked later in authRequest based on the actual operation. -// This test validates the fix for issue #7334 -func TestSignatureVerificationDoesNotCheckPermissions(t *testing.T) { - t.Run("List-only user can authenticate via signature", func(t *testing.T) { - // Create IAM with a user that only has List permissions on specific buckets - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "list-only-user", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "list_access_key", - SecretKey: "list_secret_key", - }, - }, - Actions: []string{ - "List:bucket-123", - "Read:bucket-123", - }, - }, - }, - }) - assert.NoError(t, err) - - // Before the fix, signature verification would fail because it checked for Write permission - // After the fix, signature verification should succeed (only checking signature validity) - // The actual permission check happens later in authRequest with the correct action - - // The user should be able to authenticate (signature verification passes) - // But authorization for specific actions is checked separately - identity, cred, found := iam.lookupByAccessKey("list_access_key") - assert.True(t, found, "Should find the user by access key") - assert.Equal(t, "list-only-user", identity.Name) - assert.Equal(t, "list_secret_key", cred.SecretKey) - - // User should have the correct permissions - assert.True(t, identity.canDo(Action(ACTION_LIST), "bucket-123", "")) - assert.True(t, identity.canDo(Action(ACTION_READ), "bucket-123", "")) - - // User should NOT have write permissions - assert.False(t, identity.canDo(Action(ACTION_WRITE), "bucket-123", "")) - }) - - t.Log("This test validates the fix for issue #7334") - t.Log("Signature verification no longer checks for Write permission") - t.Log("This allows list-only and read-only users to authenticate via AWS Signature V4") } diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go index b31c37a27..5694a96ac 100644 --- a/weed/s3api/auth_signature_v2.go +++ b/weed/s3api/auth_signature_v2.go @@ -22,14 +22,16 @@ import ( "crypto/sha1" "crypto/subtle" "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net" "net/http" + "net/url" + "path" "sort" "strconv" "strings" "time" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" ) // Whitelist resource list that will be used in query string for signature-V2 calculation. @@ -59,7 +61,7 @@ var resourceList = []string{ "website", } -// Verify if request has AWS Signature Version '2'. +// Verify if request has valid AWS Signature Version '2'. func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) { if isRequestSignatureV2(r) { return iam.doesSignV2Match(r) @@ -68,176 +70,276 @@ func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Ide } func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode { - accessKey := formValues.Get("AWSAccessKeyId") - if accessKey == "" { - return s3err.ErrMissingFields - } - - identity, cred, found := iam.lookupByAccessKey(accessKey) + _, cred, found := iam.lookupByAccessKey(accessKey) if !found { return s3err.ErrInvalidAccessKeyID } - - bucket := formValues.Get("bucket") - if !identity.canDo(s3_constants.ACTION_WRITE, bucket, "") { - return s3err.ErrAccessDenied - } - policy := formValues.Get("Policy") - if policy == "" { - return s3err.ErrMissingFields - } - signature := formValues.Get("Signature") - if signature == "" { - return s3err.ErrMissingFields - } - if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { return s3err.ErrSignatureDoesNotMatch } return s3err.ErrNone } +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + // doesSignV2Match - Verify authorization header with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html -// -// returns ErrNone if the signature matches. -func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) { - v2Auth := r.Header.Get("Authorization") - accessKey, errCode := validateV2AuthHeader(v2Auth) - if errCode != s3err.ErrNone { - return nil, errCode - } +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false - identity, cred, found := iam.lookupByAccessKey(accessKey) - if !found { - return nil, s3err.ErrInvalidAccessKeyID - } - - expectedAuth := signatureV2(cred, r.Method, r.URL.Path, r.URL.Query().Encode(), r.Header) - if !compareSignatureV2(v2Auth, expectedAuth) { - return nil, s3err.ErrSignatureDoesNotMatch - } - return identity, s3err.ErrNone -} - -// doesPresignV2SignatureMatch - Verify query headers with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html -// -// returns ErrNone if the signature matches. -func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) { - query := r.URL.Query() - expires := query.Get("Expires") - if expires == "" { - return nil, s3err.ErrMissingFields - } - - expireTimestamp, err := strconv.ParseInt(expires, 10, 64) - if err != nil { - return nil, s3err.ErrMalformedExpires - } - - if time.Unix(expireTimestamp, 0).Before(time.Now().UTC()) { - return nil, s3err.ErrExpiredPresignRequest - } - - accessKey := query.Get("AWSAccessKeyId") - if accessKey == "" { - return nil, s3err.ErrInvalidAccessKeyID - } - - signature := query.Get("Signature") - if signature == "" { - return nil, s3err.ErrMissingFields - } - - identity, cred, found := iam.lookupByAccessKey(accessKey) - if !found { - return nil, s3err.ErrInvalidAccessKeyID - } - - expectedSignature := preSignatureV2(cred, r.Method, r.URL.Path, r.URL.Query().Encode(), r.Header, expires) - if !compareSignatureV2(signature, expectedSignature) { - return nil, s3err.ErrSignatureDoesNotMatch - } - return identity, s3err.ErrNone -} - -// validateV2AuthHeader validates AWS Signature Version '2' authentication header. func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) { if v2Auth == "" { return "", s3err.ErrAuthHeaderEmpty } - - // Signature V2 authorization header format: - // Authorization: AWS AKIAIOSFODNN7EXAMPLE:frJIUN8DYpKDtOLCwo//yllqDzg= + // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v2Auth, signV2Algorithm) { return "", s3err.ErrSignatureVersionNotSupported } - // Strip off the Algorithm prefix. - v2Auth = v2Auth[len(signV2Algorithm):] - authFields := strings.Split(v2Auth, ":") + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") if len(authFields) != 2 { return "", s3err.ErrMissingFields } - // The first field is Access Key ID. - if authFields[0] == "" { - return "", s3err.ErrInvalidAccessKeyID - } - - // The second field is signature. - if authFields[1] == "" { + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { return "", s3err.ErrMissingFields } - return authFields[0], s3err.ErrNone + return keySignFields[0], s3err.ErrNone } -// signatureV2 - calculates signature version 2 for request. +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != s3err.ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, s3err.ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return ident, s3err.ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, s3err.ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, s3err.ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, s3err.ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, s3err.ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + return ident, s3err.ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") signature := calculateSignatureV2(stringToSign, cred.SecretKey) - return signV2Algorithm + cred.AccessKey + ":" + signature + return signature } -// getStringToSignV2 - string to sign in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { canonicalHeaders := canonicalizedAmzHeadersV2(headers) if len(canonicalHeaders) > 0 { canonicalHeaders += "\n" } + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + - // Content-MD5 + "\n" + + // Content-Md5 + "\n" + // Content-Type + "\n" + - // Date + "\n" + - // CanonicalizedAmzHeaders + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + // CanonicalizedResource; - stringToSign := method + "\n" - stringToSign += headers.Get("Content-Md5") + "\n" - stringToSign += headers.Get("Content-Type") + "\n" + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") - if expires != "" { - stringToSign += expires + "\n" - } else { - stringToSign += headers.Get("Date") + "\n" - if v := headers.Get("x-amz-date"); v != "" { - stringToSign = strings.Replace(stringToSign, headers.Get("Date")+"\n", "\n", -1) - } - } - stringToSign += canonicalHeaders - stringToSign += canonicalizedResourceV2(encodedResource, encodedQuery) - return stringToSign + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) } -// canonicalizedResourceV2 - canonicalize the resource string for signature V2. +// Return canonical resource string. func canonicalizedResourceV2(encodedResource, encodedQuery string) string { queries := strings.Split(encodedQuery, "&") keyval := make(map[string]string) @@ -253,26 +355,28 @@ func canonicalizedResourceV2(encodedResource, encodedQuery string) string { } var canonicalQueries []string - for _, resource := range resourceList { - if val, ok := keyval[resource]; ok { - if val == "" { - canonicalQueries = append(canonicalQueries, resource) - continue - } - canonicalQueries = append(canonicalQueries, resource+"="+val) + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) } - // The queries will be already sorted as resourceList is sorted. - if len(canonicalQueries) == 0 { - return encodedResource + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery } - - // If queries are present then the canonicalized resource is set to encodedResource + "?" + strings.Join(canonicalQueries, "&") - return encodedResource + "?" + strings.Join(canonicalQueries, "&") + return encodedResource } -// canonicalizedAmzHeadersV2 - canonicalize the x-amz-* headers for signature V2. +// Return canonical headers. func canonicalizedAmzHeadersV2(headers http.Header) string { var keys []string keyval := make(map[string]string) @@ -285,7 +389,6 @@ func canonicalizedAmzHeadersV2(headers http.Header) string { keyval[lkey] = strings.Join(headers[key], ",") } sort.Strings(keys) - var canonicalHeaders []string for _, key := range keys { canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) @@ -293,7 +396,6 @@ func canonicalizedAmzHeadersV2(headers http.Header) string { return strings.Join(canonicalHeaders, "\n") } -// calculateSignatureV2 - calculates signature version 2. func calculateSignatureV2(stringToSign string, secret string) string { hm := hmac.New(sha1.New, []byte(secret)) hm.Write([]byte(stringToSign)) diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 05e5c7b5f..a49caad06 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -25,7 +25,7 @@ import ( "encoding/hex" "io" "net/http" - "path" + "net/url" "regexp" "sort" "strconv" @@ -33,8 +33,7 @@ import ( "time" "unicode/utf8" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) { @@ -48,55 +47,138 @@ func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Ide return nil, s3err.ErrAccessDenied } -// Constants specific to this file +// Streaming AWS Signature Version '4' constants. const ( - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingUnsignedPayload = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" - unsignedPayload = "UNSIGNED-PAYLOAD" - // Limit for IAM/STS request body size to prevent DoS attacks - iamRequestBodyLimit = 10 * (1 << 20) // 10 MiB + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" ) -// streamHashRequestBody computes SHA256 hash incrementally while preserving the body. -func streamHashRequestBody(r *http.Request, sizeLimit int64) (string, error) { - if r.Body == nil { - return emptySHA256, nil - } - - limitedReader := io.LimitReader(r.Body, sizeLimit) - hasher := sha256.New() - var bodyBuffer bytes.Buffer - - // Use io.Copy with an io.MultiWriter to hash and buffer the body simultaneously. - if _, err := io.Copy(io.MultiWriter(hasher, &bodyBuffer), limitedReader); err != nil { - return "", err - } - - r.Body = io.NopCloser(&bodyBuffer) - - if bodyBuffer.Len() == 0 { - return emptySHA256, nil - } - - return hex.EncodeToString(hasher.Sum(nil)), nil -} - -// getContentSha256Cksum retrieves the "x-amz-content-sha256" header value. +// Returns SHA256 for calculating canonical-request. func getContentSha256Cksum(r *http.Request) string { - // If the client sends a SHA256 checksum of the object in this header, use it. - if v := r.Header.Get("X-Amz-Content-Sha256"); v != "" { - return v - } + var ( + defaultSha256Cksum string + v []string + ok bool + ) // For a presigned request we look at the query param for sha256. if isRequestPresignedSignatureV4(r) { - // X-Amz-Content-Sha256 header value is optional for presigned requests. - return unsignedPayload + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] } - // X-Amz-Content-Sha256 header value is required for all non-presigned requests. - return emptySHA256 + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != s3err.ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, s3err.ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, s3err.ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get hashed Payload + if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { + buf, _ := io.ReadAll(r.Body) + r.Body = io.NopCloser(bytes.NewBuffer(buf)) + b, _ := io.ReadAll(bytes.NewBuffer(buf)) + if len(b) != 0 { + bodyHash := sha256.Sum256(b) + hashedPayload = hex.EncodeToString(bodyHash[:]) + } + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, + signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, + signV4Values.Credential.scope.service) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, s3err.ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } } // signValues data type represents structured form of AWS Signature V4 header. @@ -106,7 +188,19 @@ type signValues struct { Signature string } -// parseSignV4 parses the authorization header for signature v4. +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) { // Replace all spaced strings, some clients can send spaced // parameters and some won't. So we pro-actively remove any spaces @@ -132,7 +226,7 @@ func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) { signV4Values := signValues{} var err s3err.ErrorCode - // Save credential values. + // Save credentail values. signV4Values.Credential, err = parseCredentialHeader(authFields[0]) if err != s3err.ErrNone { return sv, err @@ -154,262 +248,9 @@ func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) { return signV4Values, s3err.ErrNone } -// doesSignatureMatch verifies the request signature. -func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { - - // Copy request - req := *r - - // Save authorization header. - v4Auth := req.Header.Get("Authorization") - - // Parse signature version '4' header. - signV4Values, errCode := parseSignV4(v4Auth) - if errCode != s3err.ErrNone { - return nil, errCode - } - - // Compute payload hash for non-S3 services - if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { - var err error - hashedPayload, err = streamHashRequestBody(r, iamRequestBodyLimit) - if err != nil { - return nil, s3err.ErrInternalError - } - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != s3err.ErrNone { - return nil, errCode - } - - cred := signV4Values.Credential - identity, foundCred, found := iam.lookupByAccessKey(cred.accessKey) - if !found { - return nil, s3err.ErrInvalidAccessKeyID - } - - // Extract date, if not present throw error. - var dateStr string - if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { - if dateStr = r.Header.Get("Date"); dateStr == "" { - return nil, s3err.ErrMissingDateHeader - } - } - // Parse date header. - t, e := time.Parse(iso8601Format, dateStr) - if e != nil { - return nil, s3err.ErrMalformedDate - } - - // Query string. - queryStr := req.URL.Query().Encode() - - // Check if reverse proxy is forwarding with prefix - if forwardedPrefix := r.Header.Get("X-Forwarded-Prefix"); forwardedPrefix != "" { - // Try signature verification with the forwarded prefix first. - // This handles cases where reverse proxies strip URL prefixes and add the X-Forwarded-Prefix header. - cleanedPath := buildPathWithForwardedPrefix(forwardedPrefix, req.URL.Path) - errCode = iam.verifySignatureWithPath(extractedSignedHeaders, hashedPayload, queryStr, cleanedPath, req.Method, foundCred.SecretKey, t, signV4Values) - if errCode == s3err.ErrNone { - return identity, errCode - } - } - - // Try normal signature verification (without prefix) - errCode = iam.verifySignatureWithPath(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method, foundCred.SecretKey, t, signV4Values) - if errCode == s3err.ErrNone { - return identity, errCode - } - - return nil, errCode -} - -// buildPathWithForwardedPrefix combines forwarded prefix with URL path while preserving trailing slashes. -// This ensures compatibility with S3 SDK signatures that include trailing slashes for directory operations. -func buildPathWithForwardedPrefix(forwardedPrefix, urlPath string) string { - fullPath := forwardedPrefix + urlPath - hasTrailingSlash := strings.HasSuffix(urlPath, "/") && urlPath != "/" - cleanedPath := path.Clean(fullPath) - if hasTrailingSlash && !strings.HasSuffix(cleanedPath, "/") { - cleanedPath += "/" - } - return cleanedPath -} - -// verifySignatureWithPath verifies signature with a given path (used for both normal and prefixed paths). -func (iam *IdentityAccessManagement) verifySignatureWithPath(extractedSignedHeaders http.Header, hashedPayload, queryStr, urlPath, method, secretKey string, t time.Time, signV4Values signValues) s3err.ErrorCode { - // Get canonical request. - canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, urlPath, method) - - // Get string to sign from canonical request. - stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) - - // Get hmac signing key. - signingKey := getSigningKey(secretKey, signV4Values.Credential.scope.date.Format(yyyymmdd), signV4Values.Credential.scope.region, signV4Values.Credential.scope.service) - - // Calculate signature. - newSignature := getSignature(signingKey, stringToSign) - - // Verify if signature match. - if !compareSignatureV4(newSignature, signV4Values.Signature) { - return s3err.ErrSignatureDoesNotMatch - } - - return s3err.ErrNone -} - -// verifyPresignedSignatureWithPath verifies presigned signature with a given path (used for both normal and prefixed paths). -func (iam *IdentityAccessManagement) verifyPresignedSignatureWithPath(extractedSignedHeaders http.Header, hashedPayload, queryStr, urlPath, method, secretKey string, t time.Time, credHeader credentialHeader, signature string) s3err.ErrorCode { - // Get canonical request. - canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, urlPath, method) - - // Get string to sign from canonical request. - stringToSign := getStringToSign(canonicalRequest, t, credHeader.getScope()) - - // Get hmac signing key. - signingKey := getSigningKey(secretKey, credHeader.scope.date.Format(yyyymmdd), credHeader.scope.region, credHeader.scope.service) - - // Calculate expected signature. - expectedSignature := getSignature(signingKey, stringToSign) - - // Verify if signature match. - if !compareSignatureV4(expectedSignature, signature) { - return s3err.ErrSignatureDoesNotMatch - } - - return s3err.ErrNone -} - -// Simple implementation for presigned signature verification -func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { - // Parse presigned signature values from query parameters - query := r.URL.Query() - - // Check required parameters - algorithm := query.Get("X-Amz-Algorithm") - if algorithm != signV4Algorithm { - return nil, s3err.ErrSignatureVersionNotSupported - } - - credential := query.Get("X-Amz-Credential") - if credential == "" { - return nil, s3err.ErrMissingFields - } - - signature := query.Get("X-Amz-Signature") - if signature == "" { - return nil, s3err.ErrMissingFields - } - - signedHeadersStr := query.Get("X-Amz-SignedHeaders") - if signedHeadersStr == "" { - return nil, s3err.ErrMissingFields - } - - dateStr := query.Get("X-Amz-Date") - if dateStr == "" { - return nil, s3err.ErrMissingDateHeader - } - - // Parse credential - credHeader, err := parseCredentialHeader("Credential=" + credential) - if err != s3err.ErrNone { - return nil, err - } - - // Look up identity by access key - identity, foundCred, found := iam.lookupByAccessKey(credHeader.accessKey) - if !found { - return nil, s3err.ErrInvalidAccessKeyID - } - - // Parse date - t, e := time.Parse(iso8601Format, dateStr) - if e != nil { - return nil, s3err.ErrMalformedDate - } - - // Check expiration - expiresStr := query.Get("X-Amz-Expires") - if expiresStr != "" { - expires, parseErr := strconv.ParseInt(expiresStr, 10, 64) - if parseErr != nil { - return nil, s3err.ErrMalformedDate - } - // Check if current time is after the expiration time - expirationTime := t.Add(time.Duration(expires) * time.Second) - if time.Now().UTC().After(expirationTime) { - return nil, s3err.ErrExpiredPresignRequest - } - } - - // Parse signed headers - signedHeaders := strings.Split(signedHeadersStr, ";") - - // Extract signed headers from request - extractedSignedHeaders := make(http.Header) - for _, header := range signedHeaders { - if header == "host" { - extractedSignedHeaders[header] = []string{extractHostHeader(r)} - continue - } - if values := r.Header[http.CanonicalHeaderKey(header)]; len(values) > 0 { - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = values - } - } - - // Remove signature from query for canonical request calculation - queryForCanonical := r.URL.Query() - queryForCanonical.Del("X-Amz-Signature") - queryStr := strings.Replace(queryForCanonical.Encode(), "+", "%20", -1) - - var errCode s3err.ErrorCode - // Check if reverse proxy is forwarding with prefix for presigned URLs - if forwardedPrefix := r.Header.Get("X-Forwarded-Prefix"); forwardedPrefix != "" { - // Try signature verification with the forwarded prefix first. - // This handles cases where reverse proxies strip URL prefixes and add the X-Forwarded-Prefix header. - cleanedPath := buildPathWithForwardedPrefix(forwardedPrefix, r.URL.Path) - errCode = iam.verifyPresignedSignatureWithPath(extractedSignedHeaders, hashedPayload, queryStr, cleanedPath, r.Method, foundCred.SecretKey, t, credHeader, signature) - if errCode == s3err.ErrNone { - return identity, errCode - } - } - - // Try normal signature verification (without prefix) - errCode = iam.verifyPresignedSignatureWithPath(extractedSignedHeaders, hashedPayload, queryStr, r.URL.Path, r.Method, foundCred.SecretKey, t, credHeader, signature) - if errCode == s3err.ErrNone { - return identity, errCode - } - - return nil, errCode -} - -// credentialHeader data type represents structured form of Credential -// string from authorization header. -type credentialHeader struct { - accessKey string - scope struct { - date time.Time - region string - service string - request string - } -} - -func (c credentialHeader) getScope() string { - return strings.Join([]string{ - c.scope.date.Format(yyyymmdd), - c.scope.region, - c.scope.service, - c.scope.request, - }, "/") -} - // parse credentialHeader string into its structured form. func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) { - creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) + creds := strings.Split(strings.TrimSpace(credElement), "=") if len(creds) != 2 { return ch, s3err.ErrMissingFields } @@ -436,22 +277,6 @@ func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.E return cred, s3err.ErrNone } -// Parse signature from signature tag. -func parseSignature(signElement string) (string, s3err.ErrorCode) { - signFields := strings.Split(strings.TrimSpace(signElement), "=") - if len(signFields) != 2 { - return "", s3err.ErrMissingFields - } - if signFields[0] != "Signature" { - return "", s3err.ErrMissingSignTag - } - if signFields[1] == "" { - return "", s3err.ErrMissingFields - } - signature := signFields[1] - return signature, s3err.ErrNone -} - // Parse slice of signed headers from signed headers tag. func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") @@ -468,26 +293,40 @@ func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) { return signedHeaders, s3err.ErrNone } +// Parse signature from signature tag. +func parseSignature(signElement string) (string, s3err.ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", s3err.ErrMissingFields + } + if signFields[0] != "Signature" { + return "", s3err.ErrMissingSignTag + } + if signFields[1] == "" { + return "", s3err.ErrMissingFields + } + signature := signFields[1] + return signature, s3err.ErrNone +} + +// doesPolicySignatureMatch - Verify query headers with post policy +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// returns ErrNone if the signature matches. func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode { // Parse credential tag. credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential")) if err != s3err.ErrNone { - return err + return s3err.ErrMissingFields } - identity, cred, found := iam.lookupByAccessKey(credHeader.accessKey) + _, cred, found := iam.lookupByAccessKey(credHeader.accessKey) if !found { return s3err.ErrInvalidAccessKeyID } - bucket := formValues.Get("bucket") - if !identity.canDo(s3_constants.ACTION_WRITE, bucket, "") { - return s3err.ErrAccessDenied - } - // Get signing key. - signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date.Format(yyyymmdd), credHeader.scope.region, credHeader.scope.service) + signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service) // Get signature. newSignature := getSignature(signingKey, formValues.Get("Policy")) @@ -496,69 +335,299 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http. if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) { return s3err.ErrSignatureDoesNotMatch } + + // Success. return s3err.ErrNone } -// Verify if extracted signed headers are not properly signed. +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != s3err.ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, s3err.ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, s3err.ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, s3err.ErrContentSHA256Mismatch + } + } + + // / Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, + pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, + pSignValues.Credential.scope.service) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return identity, s3err.ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return s3err.ErrInvalidQueryParams + } + } + return s3err.ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) { + var err s3err.ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != s3err.ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, s3err.ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != s3err.ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, s3err.ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, s3err.ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, s3err.ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, s3err.ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != s3err.ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != s3err.ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, s3err.ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) { reqHeaders := r.Header - // If no signed headers are provided, then return an error. - if len(signedHeaders) == 0 { - return nil, s3err.ErrMissingFields + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, s3err.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { - // `host` is not a case-sensitive header, unlike other headers such as `x-amz-date`. - if header == "host" { - // Get host value. - hostHeaderValue := extractHostHeader(r) - extractedSignedHeaders[header] = []string{hostHeaderValue} + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } continue } - // For all other headers we need to find them in the HTTP headers and copy them over. - // We skip non-existent headers to be compatible with AWS signatures. - if values, ok := reqHeaders[http.CanonicalHeaderKey(header)]; ok { - extractedSignedHeaders[header] = values + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, s3err.ErrUnsignedHeaders } } return extractedSignedHeaders, s3err.ErrNone } -// extractHostHeader returns the value of host header if available. -func extractHostHeader(r *http.Request) string { - // Check for X-Forwarded-Host header first, which is set by reverse proxies - if forwardedHost := r.Header.Get("X-Forwarded-Host"); forwardedHost != "" { - // Check if reverse proxy also forwarded the port - if forwardedPort := r.Header.Get("X-Forwarded-Port"); forwardedPort != "" { - // Determine the protocol to check for standard ports - proto := r.Header.Get("X-Forwarded-Proto") - // Only add port if it's not the standard port for the protocol - if (proto == "https" && forwardedPort != "443") || (proto != "https" && forwardedPort != "80") { - return forwardedHost + ":" + forwardedPort - } - } - // Using reverse proxy with X-Forwarded-Host (standard port or no port forwarded). - return forwardedHost +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) } - - hostHeaderValue := r.Host - // For standard requests, this should be fine. - if r.Host != "" { - return hostHeaderValue - } - // If no host header is found, then check for host URL value. - if r.URL.Host != "" { - hostHeaderValue = r.URL.Host - } - return hostHeaderValue + sort.Strings(headers) + return strings.Join(headers, ";") } // getScope generate a string of a specific date, an AWS region, and a service. -func getScope(t time.Time, region string, service string) string { +func getScope(t time.Time, region string) string { scope := strings.Join([]string{ t.Format(yyyymmdd), region, - service, + "s3", "aws4_request", }, "/") return scope @@ -567,13 +636,13 @@ func getScope(t time.Time, region string, service string) string { // getCanonicalRequest generate a canonical request of style // // canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// // -// \n -// \n -// \n -// \n -// \n -// func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { rawQuery := strings.Replace(queryStr, "+", "%20", -1) encodedPath := encodePath(urlPath) @@ -592,16 +661,11 @@ func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, func getStringToSign(canonicalRequest string, t time.Time, scope string) string { stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" - stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) return stringToSign } -// getSHA256Hash returns hex-encoded SHA256 hash of the input data. -func getSHA256Hash(data []byte) string { - hash := sha256.Sum256(data) - return hex.EncodeToString(hash[:]) -} - // sumHMAC calculate hmac between two input byte array. func sumHMAC(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) @@ -610,24 +674,27 @@ func sumHMAC(key []byte, data []byte) []byte { } // getSigningKey hmac seed to calculate final signature. -func getSigningKey(secretKey string, time string, region string, service string) []byte { - date := sumHMAC([]byte("AWS4"+secretKey), []byte(time)) +func getSigningKey(secretKey string, t time.Time, region string, service string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) regionBytes := sumHMAC(date, []byte(region)) serviceBytes := sumHMAC(regionBytes, []byte(service)) signingKey := sumHMAC(serviceBytes, []byte("aws4_request")) return signingKey } +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + // getCanonicalHeaders generate a list of request headers with their values func getCanonicalHeaders(signedHeaders http.Header) string { var headers []string vals := make(http.Header) for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) vals[strings.ToLower(k)] = vv } - for k := range vals { - headers = append(headers, k) - } sort.Strings(headers) var buf bytes.Buffer @@ -645,28 +712,18 @@ func getCanonicalHeaders(signedHeaders http.Header) string { return buf.String() } -// signV4TrimAll trims leading and trailing spaces from each string in the slice, and trims sequential spaces. +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html func signV4TrimAll(input string) string { // Compress adjacent spaces (a space is determined by - // unicode.IsSpace() internally here) to a single space and trim - // leading and trailing spaces. + // unicode.IsSpace() internally here) to one space and return return strings.Join(strings.Fields(input), " ") } -// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names -func getSignedHeaders(signedHeaders http.Header) string { - var headers []string - for k := range signedHeaders { - headers = append(headers, strings.ToLower(k)) - } - sort.Strings(headers) - return strings.Join(headers, ";") -} - // if object matches reserved string, no need to encode them var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") -// encodePath encodes the strings from UTF-8 byte representations to HTML hex escape sequences +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences // // This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 // non english characters cannot be parsed due to the nature in which url.Encode() is written @@ -681,38 +738,34 @@ func encodePath(pathName string) string { for _, s := range pathName { if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // ยง2.3 Unreserved characters (mark) encodedPathname = encodedPathname + string(s) - } else { - switch s { - case '-', '_', '.', '~', '/': // ยง2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - default: - runeLen := utf8.RuneLen(s) - if runeLen < 0 { - return pathName - } - u := make([]byte, runeLen) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } + continue + } + switch s { + case '-', '_', '.', '~', '/': // ยง2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) } } } return encodedPathname } -// getSignature final signature in hexadecimal form. -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - // compareSignatureV4 returns true if and only if both signatures -// are equal. The signatures are expected to be hex-encoded strings +// are equal. The signatures are expected to be HEX encoded strings // according to the AWS S3 signature V4 spec. func compareSignatureV4(sig1, sig2 string) bool { - // The CTC using []byte(str) works because the hex encoding doesn't use - // non-ASCII characters. Otherwise, we'd need to convert the strings to - // a []rune of UTF-8 characters. + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 } diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go index bf11a0906..a58551187 100644 --- a/weed/s3api/auto_signature_v4_test.go +++ b/weed/s3api/auto_signature_v4_test.go @@ -6,25 +6,22 @@ import ( "crypto/sha256" "encoding/base64" "encoding/hex" + "errors" "fmt" "io" "net/http" + "net/url" "sort" + "strconv" "strings" - "sync" "testing" "time" "unicode/utf8" - "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) -// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature version v4 detection. +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. func TestIsRequestPresignedSignatureV4(t *testing.T) { testCases := []struct { inputQueryKey string @@ -43,7 +40,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) { for i, testCase := range testCases { // creating an input HTTP request. // Only the query parameters are relevant for this particular test. - inputReq, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + inputReq, err := http.NewRequest("GET", "http://example.com", nil) if err != nil { t.Fatalf("Error initializing input HTTP request: %v", err) } @@ -60,24 +57,20 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) { // Tests is requested authenticated function, tests replies for s3 errors. func TestIsReqAuthenticated(t *testing.T) { - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - _ = iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "someone", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access_key_1", - SecretKey: "secret_key_1", - }, + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", }, - Actions: []string{"Read", "Write"}, }, + Actions: nil, }, - }) + } // List of test cases for validating http request authentication. testCases := []struct { @@ -85,9 +78,9 @@ func TestIsReqAuthenticated(t *testing.T) { s3Error s3err.ErrorCode }{ // When request is unsigned, access denied is returned. - {mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied}, + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied}, // When request is properly signed, error is none. - {mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone}, + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone}, } // Validates all testcases. @@ -99,65 +92,29 @@ func TestIsReqAuthenticated(t *testing.T) { } } -func TestCheckaAnonymousRequestAuthType(t *testing.T) { - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - _ = iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "anonymous", - Actions: []string{s3_constants.ACTION_READ}, - }, - }, - }) - testCases := []struct { - Request *http.Request - ErrCode s3err.ErrorCode - Action Action - }{ - {Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrNone, Action: s3_constants.ACTION_READ}, - {Request: mustNewRequest(http.MethodPut, "http://127.0.0.1:9000/bucket", 0, nil, t), ErrCode: s3err.ErrAccessDenied, Action: s3_constants.ACTION_WRITE}, - } - for i, testCase := range testCases { - _, s3Error := iam.authRequest(testCase.Request, testCase.Action) - if s3Error != testCase.ErrCode { - t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) - } - if testCase.Request.Header.Get(s3_constants.AmzAuthType) != "Anonymous" { - t.Errorf("Test %d: Unexpected AuthType returned wanted %s, got %s", i, "Anonymous", testCase.Request.Header.Get(s3_constants.AmzAuthType)) - } - } - -} - func TestCheckAdminRequestAuthType(t *testing.T) { - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - _ = iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "someone", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "access_key_1", - SecretKey: "secret_key_1", - }, + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", }, - Actions: []string{"Admin", "Read", "Write"}, }, + Actions: nil, }, - }) + } + testCases := []struct { Request *http.Request ErrCode s3err.ErrorCode }{ - {Request: mustNewRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied}, - {Request: mustNewSignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, - {Request: mustNewPresignedRequest(iam, http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, } for i, testCase := range testCases { if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { @@ -166,17 +123,6 @@ func TestCheckAdminRequestAuthType(t *testing.T) { } } -func BenchmarkGetSignature(b *testing.B) { - t := time.Now() - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - signingKey := getSigningKey("secret-key", t.Format(yyyymmdd), "us-east-1", "s3") - getSignature(signingKey, "random data") - } -} - // Provides a fully populated http request instance, fails otherwise. func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { req, err := newTestRequest(method, urlStr, contentLength, body) @@ -192,555 +138,26 @@ func mustNewSignedRequest(method string, urlStr string, contentLength int64, bod req := mustNewRequest(method, urlStr, contentLength, body, t) cred := &Credential{"access_key_1", "secret_key_1"} if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { - t.Fatalf("Unable to initialized new signed http request %s", err) + t.Fatalf("Unable to inititalized new signed http request %s", err) } return req } // This is similar to mustNewRequest but additionally the request // is presigned with AWS Signature V4, fails if not able to do so. -func mustNewPresignedRequest(iam *IdentityAccessManagement, method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { req := mustNewRequest(method, urlStr, contentLength, body, t) cred := &Credential{"access_key_1", "secret_key_1"} - if err := preSignV4(iam, req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { - t.Fatalf("Unable to initialized new signed http request %s", err) + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) } return req } -// preSignV4 adds presigned URL parameters to the request -func preSignV4(iam *IdentityAccessManagement, req *http.Request, accessKey, secretKey string, expires int64) error { - // Create credential scope - now := time.Now().UTC() - dateStr := now.Format(iso8601Format) - - // Create credential header - scope := fmt.Sprintf("%s/%s/%s/%s", now.Format(yyyymmdd), "us-east-1", "s3", "aws4_request") - credential := fmt.Sprintf("%s/%s", accessKey, scope) - - // Get the query parameters - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Credential", credential) - query.Set("X-Amz-Date", dateStr) - query.Set("X-Amz-Expires", fmt.Sprintf("%d", expires)) - query.Set("X-Amz-SignedHeaders", "host") - - // Set the query on the URL (without signature yet) - req.URL.RawQuery = query.Encode() - - // Get the payload hash - hashedPayload := getContentSha256Cksum(req) - - // Extract signed headers - extractedSignedHeaders := make(http.Header) - extractedSignedHeaders["host"] = []string{req.Host} - - // Get canonical request - canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, req.URL.RawQuery, req.URL.Path, req.Method) - - // Get string to sign - stringToSign := getStringToSign(canonicalRequest, now, scope) - - // Get signing key - signingKey := getSigningKey(secretKey, now.Format(yyyymmdd), "us-east-1", "s3") - - // Calculate signature - signature := getSignature(signingKey, stringToSign) - - // Add signature to query - query.Set("X-Amz-Signature", signature) - req.URL.RawQuery = query.Encode() - - return nil -} - -// newTestIAM creates a test IAM with a standard test user -func newTestIAM() *IdentityAccessManagement { - iam := &IdentityAccessManagement{} - iam.identities = []*Identity{ - { - Name: "testuser", - Credentials: []*Credential{{AccessKey: "AKIAIOSFODNN7EXAMPLE", SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}}, - Actions: []Action{s3_constants.ACTION_ADMIN, s3_constants.ACTION_READ, s3_constants.ACTION_WRITE}, - }, - } - // Initialize the access key map for lookup - iam.accessKeyIdent = make(map[string]*Identity) - iam.accessKeyIdent["AKIAIOSFODNN7EXAMPLE"] = iam.identities[0] - return iam -} - -// Test X-Forwarded-Prefix support for reverse proxy scenarios -func TestSignatureV4WithForwardedPrefix(t *testing.T) { - tests := []struct { - name string - forwardedPrefix string - expectedPath string - }{ - { - name: "prefix without trailing slash", - forwardedPrefix: "/s3", - expectedPath: "/s3/test-bucket/test-object", - }, - { - name: "prefix with trailing slash", - forwardedPrefix: "/s3/", - expectedPath: "/s3/test-bucket/test-object", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - iam := newTestIAM() - - // Create a request with X-Forwarded-Prefix header - r, err := newTestRequest("GET", "https://example.com/test-bucket/test-object", 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Set the mux variables manually since we're not going through the actual router - r = mux.SetURLVars(r, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - r.Header.Set("X-Forwarded-Prefix", tt.forwardedPrefix) - r.Header.Set("Host", "example.com") - r.Header.Set("X-Forwarded-Host", "example.com") - - // Sign the request with the expected normalized path - signV4WithPath(r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", tt.expectedPath) - - // Test signature verification - _, errCode := iam.doesSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful signature validation with X-Forwarded-Prefix %q, got error: %v (code: %d)", tt.forwardedPrefix, errCode, int(errCode)) - } - }) - } -} - -// Test X-Forwarded-Prefix with trailing slash preservation (GitHub issue #7223) -// This tests the specific bug where S3 SDK signs paths with trailing slashes -// but path.Clean() would remove them, causing signature verification to fail -func TestSignatureV4WithForwardedPrefixTrailingSlash(t *testing.T) { - tests := []struct { - name string - forwardedPrefix string - urlPath string - expectedPath string - }{ - { - name: "bucket listObjects with trailing slash", - forwardedPrefix: "/oss-sf-nnct", - urlPath: "/s3user-bucket1/", - expectedPath: "/oss-sf-nnct/s3user-bucket1/", - }, - { - name: "prefix path with trailing slash", - forwardedPrefix: "/s3", - urlPath: "/my-bucket/folder/", - expectedPath: "/s3/my-bucket/folder/", - }, - { - name: "root bucket with trailing slash", - forwardedPrefix: "/api/s3", - urlPath: "/test-bucket/", - expectedPath: "/api/s3/test-bucket/", - }, - { - name: "nested folder with trailing slash", - forwardedPrefix: "/storage", - urlPath: "/bucket/path/to/folder/", - expectedPath: "/storage/bucket/path/to/folder/", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - iam := newTestIAM() - - // Create a request with the URL path that has a trailing slash - r, err := newTestRequest("GET", "https://example.com"+tt.urlPath, 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Manually set the URL path with trailing slash to ensure it's preserved - r.URL.Path = tt.urlPath - - r.Header.Set("X-Forwarded-Prefix", tt.forwardedPrefix) - r.Header.Set("Host", "example.com") - r.Header.Set("X-Forwarded-Host", "example.com") - - // Sign the request with the full path including the trailing slash - // This simulates what S3 SDK does for listObjects operations - signV4WithPath(r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", tt.expectedPath) - - // Test signature verification - this should succeed even with trailing slashes - _, errCode := iam.doesSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful signature validation with trailing slash in path %q, got error: %v (code: %d)", tt.urlPath, errCode, int(errCode)) - } - }) - } -} - -// Test X-Forwarded-Port support for reverse proxy scenarios -func TestSignatureV4WithForwardedPort(t *testing.T) { - tests := []struct { - name string - host string - forwardedHost string - forwardedPort string - forwardedProto string - expectedHost string - }{ - { - name: "HTTP with non-standard port", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "8080", - forwardedProto: "http", - expectedHost: "example.com:8080", - }, - { - name: "HTTPS with non-standard port", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "8443", - forwardedProto: "https", - expectedHost: "example.com:8443", - }, - { - name: "HTTP with standard port (80)", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "80", - forwardedProto: "http", - expectedHost: "example.com", - }, - { - name: "HTTPS with standard port (443)", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "443", - forwardedProto: "https", - expectedHost: "example.com", - }, - { - name: "empty proto with non-standard port", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "8080", - forwardedProto: "", - expectedHost: "example.com:8080", - }, - { - name: "empty proto with standard http port", - host: "backend:8333", - forwardedHost: "example.com", - forwardedPort: "80", - forwardedProto: "", - expectedHost: "example.com", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - iam := newTestIAM() - - // Create a request - r, err := newTestRequest("GET", "https://"+tt.host+"/test-bucket/test-object", 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Set the mux variables manually since we're not going through the actual router - r = mux.SetURLVars(r, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Set forwarded headers - r.Header.Set("Host", tt.host) - r.Header.Set("X-Forwarded-Host", tt.forwardedHost) - r.Header.Set("X-Forwarded-Port", tt.forwardedPort) - r.Header.Set("X-Forwarded-Proto", tt.forwardedProto) - - // Sign the request with the expected host header - // We need to temporarily modify the Host header for signing - signV4WithPath(r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", r.URL.Path) - - // Test signature verification - _, errCode := iam.doesSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful signature validation with forwarded port, got error: %v (code: %d)", errCode, int(errCode)) - } - }) - } -} - -// Test basic presigned URL functionality without prefix -func TestPresignedSignatureV4Basic(t *testing.T) { - iam := newTestIAM() - - // Create a presigned request without X-Forwarded-Prefix header - r, err := newTestRequest("GET", "https://example.com/test-bucket/test-object", 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Set the mux variables manually since we're not going through the actual router - r = mux.SetURLVars(r, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - r.Header.Set("Host", "example.com") - - // Create presigned URL with the normal path (no prefix) - err = preSignV4WithPath(iam, r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 3600, r.URL.Path) - if err != nil { - t.Errorf("Failed to presign request: %v", err) - } - - // Test presigned signature verification - _, errCode := iam.doesPresignedSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful presigned signature validation, got error: %v (code: %d)", errCode, int(errCode)) - } -} - -// Test X-Forwarded-Prefix support for presigned URLs -func TestPresignedSignatureV4WithForwardedPrefix(t *testing.T) { - tests := []struct { - name string - forwardedPrefix string - originalPath string - expectedPath string - }{ - { - name: "prefix without trailing slash", - forwardedPrefix: "/s3", - originalPath: "/s3/test-bucket/test-object", - expectedPath: "/s3/test-bucket/test-object", - }, - { - name: "prefix with trailing slash", - forwardedPrefix: "/s3/", - originalPath: "/s3/test-bucket/test-object", - expectedPath: "/s3/test-bucket/test-object", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - iam := newTestIAM() - - // Create a presigned request that simulates reverse proxy scenario: - // 1. Client generates presigned URL with prefixed path - // 2. Proxy strips prefix and forwards to SeaweedFS with X-Forwarded-Prefix header - - // Start with the original request URL (what client sees) - r, err := newTestRequest("GET", "https://example.com"+tt.originalPath, 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Generate presigned URL with the original prefixed path - err = preSignV4WithPath(iam, r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 3600, tt.originalPath) - if err != nil { - t.Errorf("Failed to presign request: %v", err) - return - } - - // Now simulate what the reverse proxy does: - // 1. Strip the prefix from the URL path - r.URL.Path = "/test-bucket/test-object" - - // 2. Set the mux variables for the stripped path - r = mux.SetURLVars(r, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // 3. Add the forwarded headers - r.Header.Set("X-Forwarded-Prefix", tt.forwardedPrefix) - r.Header.Set("Host", "example.com") - r.Header.Set("X-Forwarded-Host", "example.com") - - // Test presigned signature verification - _, errCode := iam.doesPresignedSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful presigned signature validation with X-Forwarded-Prefix %q, got error: %v (code: %d)", tt.forwardedPrefix, errCode, int(errCode)) - } - }) - } -} - -// Test X-Forwarded-Prefix with trailing slash preservation for presigned URLs (GitHub issue #7223) -func TestPresignedSignatureV4WithForwardedPrefixTrailingSlash(t *testing.T) { - tests := []struct { - name string - forwardedPrefix string - originalPath string - strippedPath string - }{ - { - name: "bucket listObjects with trailing slash", - forwardedPrefix: "/oss-sf-nnct", - originalPath: "/oss-sf-nnct/s3user-bucket1/", - strippedPath: "/s3user-bucket1/", - }, - { - name: "prefix path with trailing slash", - forwardedPrefix: "/s3", - originalPath: "/s3/my-bucket/folder/", - strippedPath: "/my-bucket/folder/", - }, - { - name: "api path with trailing slash", - forwardedPrefix: "/api/s3", - originalPath: "/api/s3/test-bucket/", - strippedPath: "/test-bucket/", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - iam := newTestIAM() - - // Create a presigned request that simulates reverse proxy scenario with trailing slashes: - // 1. Client generates presigned URL with prefixed path including trailing slash - // 2. Proxy strips prefix and forwards to SeaweedFS with X-Forwarded-Prefix header - - // Start with the original request URL (what client sees) with trailing slash - r, err := newTestRequest("GET", "https://example.com"+tt.originalPath, 0, nil) - if err != nil { - t.Fatalf("Failed to create test request: %v", err) - } - - // Generate presigned URL with the original prefixed path including trailing slash - err = preSignV4WithPath(iam, r, "AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 3600, tt.originalPath) - if err != nil { - t.Errorf("Failed to presign request: %v", err) - return - } - - // Now simulate what the reverse proxy does: - // 1. Strip the prefix from the URL path but preserve the trailing slash - r.URL.Path = tt.strippedPath - - // 2. Add the forwarded headers - r.Header.Set("X-Forwarded-Prefix", tt.forwardedPrefix) - r.Header.Set("Host", "example.com") - r.Header.Set("X-Forwarded-Host", "example.com") - - // Test presigned signature verification - this should succeed with trailing slashes - _, errCode := iam.doesPresignedSignatureMatch(getContentSha256Cksum(r), r) - if errCode != s3err.ErrNone { - t.Errorf("Expected successful presigned signature validation with trailing slash in path %q, got error: %v (code: %d)", tt.strippedPath, errCode, int(errCode)) - } - }) - } -} - -// preSignV4WithPath adds presigned URL parameters to the request with a custom path -func preSignV4WithPath(iam *IdentityAccessManagement, req *http.Request, accessKey, secretKey string, expires int64, urlPath string) error { - // Create credential scope - now := time.Now().UTC() - dateStr := now.Format(iso8601Format) - - // Create credential header - scope := fmt.Sprintf("%s/%s/%s/%s", now.Format(yyyymmdd), "us-east-1", "s3", "aws4_request") - credential := fmt.Sprintf("%s/%s", accessKey, scope) - - // Get the query parameters - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Credential", credential) - query.Set("X-Amz-Date", dateStr) - query.Set("X-Amz-Expires", fmt.Sprintf("%d", expires)) - query.Set("X-Amz-SignedHeaders", "host") - - // Set the query on the URL (without signature yet) - req.URL.RawQuery = query.Encode() - - // Get the payload hash - hashedPayload := getContentSha256Cksum(req) - - // Extract signed headers - extractedSignedHeaders := make(http.Header) - extractedSignedHeaders["host"] = []string{extractHostHeader(req)} - - // Get canonical request with custom path - canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, req.URL.RawQuery, urlPath, req.Method) - - // Get string to sign - stringToSign := getStringToSign(canonicalRequest, now, scope) - - // Get signing key - signingKey := getSigningKey(secretKey, now.Format(yyyymmdd), "us-east-1", "s3") - - // Calculate signature - signature := getSignature(signingKey, stringToSign) - - // Add signature to query - query.Set("X-Amz-Signature", signature) - req.URL.RawQuery = query.Encode() - - return nil -} - -// signV4WithPath signs a request with a custom path -func signV4WithPath(req *http.Request, accessKey, secretKey, urlPath string) { - // Create credential scope - now := time.Now().UTC() - dateStr := now.Format(iso8601Format) - - // Set required headers - req.Header.Set("X-Amz-Date", dateStr) - - // Create credential header - scope := fmt.Sprintf("%s/%s/%s/%s", now.Format(yyyymmdd), "us-east-1", "s3", "aws4_request") - credential := fmt.Sprintf("%s/%s", accessKey, scope) - - // Get signed headers - signedHeaders := "host;x-amz-date" - - // Extract signed headers - extractedSignedHeaders := make(http.Header) - extractedSignedHeaders["host"] = []string{extractHostHeader(req)} - extractedSignedHeaders["x-amz-date"] = []string{dateStr} - - // Get the payload hash - hashedPayload := getContentSha256Cksum(req) - - // Get canonical request with custom path - canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, req.URL.RawQuery, urlPath, req.Method) - - // Get string to sign - stringToSign := getStringToSign(canonicalRequest, now, scope) - - // Get signing key - signingKey := getSigningKey(secretKey, now.Format(yyyymmdd), "us-east-1", "s3") - - // Calculate signature - signature := getSignature(signingKey, stringToSign) - - // Set Authorization header - authorization := fmt.Sprintf("%s Credential=%s, SignedHeaders=%s, Signature=%s", - signV4Algorithm, credential, signedHeaders, signature) - req.Header.Set("Authorization", authorization) -} - // Returns new HTTP request object. func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { if method == "" { - method = http.MethodPost + method = "POST" } // Save for subsequent use @@ -778,12 +195,17 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek return req, nil } +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + // getMD5HashBase64 returns MD5 hash in base64 encoding of given data. func getMD5HashBase64(data []byte) string { return base64.StdEncoding.EncodeToString(getMD5Sum(data)) } -// getSHA256Sum returns SHA-256 sum of given data. +// getSHA256Hash returns SHA-256 sum of given data. func getSHA256Sum(data []byte) []byte { hash := sha256.New() hash.Write(data) @@ -809,73 +231,6 @@ var ignoredHeaders = map[string]bool{ "User-Agent": true, } -// Tests the test helper with an example from the AWS Doc. -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// This time it's a PUT request uploading the file with content "Welcome to Amazon S3." -func TestGetStringToSignPUT(t *testing.T) { - - canonicalRequest := `PUT -/test%24file.text - -date:Fri, 24 May 2013 00:00:00 GMT -host:examplebucket.s3.amazonaws.com -x-amz-content-sha256:44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072 -x-amz-date:20130524T000000Z -x-amz-storage-class:REDUCED_REDUNDANCY - -date;host;x-amz-content-sha256;x-amz-date;x-amz-storage-class -44ce7dd67c959e0d3524ffac1771dfbba87d2b6b4b4e99e42034a8b803f8b072` - - date, err := time.Parse(iso8601Format, "20130524T000000Z") - - if err != nil { - t.Fatalf("Error parsing date: %v", err) - } - - scope := "20130524/us-east-1/s3/aws4_request" - stringToSign := getStringToSign(canonicalRequest, date, scope) - - expected := `AWS4-HMAC-SHA256 -20130524T000000Z -20130524/us-east-1/s3/aws4_request -9e0e90d9c76de8fa5b200d8c849cd5b8dc7a3be3951ddb7f6a76b4158342019d` - - assert.Equal(t, expected, stringToSign) -} - -// Tests the test helper with an example from the AWS Doc. -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// The GET request example with empty string hash. -func TestGetStringToSignGETEmptyStringHash(t *testing.T) { - - canonicalRequest := `GET -/test.txt - -host:examplebucket.s3.amazonaws.com -range:bytes=0-9 -x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -x-amz-date:20130524T000000Z - -host;range;x-amz-content-sha256;x-amz-date -e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - date, err := time.Parse(iso8601Format, "20130524T000000Z") - - if err != nil { - t.Fatalf("Error parsing date: %v", err) - } - - scope := "20130524/us-east-1/s3/aws4_request" - stringToSign := getStringToSign(canonicalRequest, date, scope) - - expected := `AWS4-HMAC-SHA256 -20130524T000000Z -20130524/us-east-1/s3/aws4_request -7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972` - - assert.Equal(t, expected, stringToSign) -} - // Sign given request using Signature V4. func signRequestV4(req *http.Request, accessKey, secretKey string) error { // Get hashed payload. @@ -986,6 +341,47 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { return nil } +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region, "s3") + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + // EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences // // This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 @@ -1008,12 +404,12 @@ func EncodePath(pathName string) string { encodedPathname = encodedPathname + string(s) continue default: - runeLen := utf8.RuneLen(s) - if runeLen < 0 { + len := utf8.RuneLen(s) + if len < 0 { // if utf8 cannot convert return the same string as is return pathName } - u := make([]byte, runeLen) + u := make([]byte, len) utf8.EncodeRune(u, s) for _, r := range u { hex := hex.EncodeToString([]byte{r}) @@ -1023,611 +419,3 @@ func EncodePath(pathName string) string { } return encodedPathname } - -// Test that IAM requests correctly compute payload hash from request body -// This addresses the regression described in GitHub issue #7080 -func TestIAMPayloadHashComputation(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration with a user - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Test payload for IAM request (typical CreateAccessKey request) - testPayload := "Action=CreateAccessKey&UserName=testuser&Version=2010-05-08" - - // Create request with body (typical IAM request) - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(testPayload)) - assert.NoError(t, err) - - // Set required headers for IAM request - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8111") - - // Compute expected payload hash - expectedHash := sha256.Sum256([]byte(testPayload)) - expectedHashStr := hex.EncodeToString(expectedHash[:]) - - // Create an IAM-style authorization header with "iam" service instead of "s3" - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/iam/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - - // Create authorization header with "iam" service (this is the key difference from S3) - authHeader := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=dummysignature" - req.Header.Set("Authorization", authHeader) - - // Test the doesSignatureMatch function directly - // This should now compute the correct payload hash for IAM requests - identity, errCode := iam.doesSignatureMatch(expectedHashStr, req) - - // Even though the signature will fail (dummy signature), - // the fact that we get past the credential parsing means the payload hash was computed correctly - // We expect ErrSignatureDoesNotMatch because we used a dummy signature, - // but NOT ErrAccessDenied or other auth errors - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) - - // More importantly, test that the request body is preserved after reading - // The fix should restore the body after reading it - bodyBytes := make([]byte, len(testPayload)) - n, err := req.Body.Read(bodyBytes) - assert.NoError(t, err) - assert.Equal(t, len(testPayload), n) - assert.Equal(t, testPayload, string(bodyBytes)) -} - -// Test that S3 requests still work correctly (no regression) -func TestS3PayloadHashNoRegression(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Create S3 request (no body, should use emptySHA256) - req, err := http.NewRequest("GET", "http://localhost:8333/bucket/object", nil) - assert.NoError(t, err) - - req.Header.Set("Host", "localhost:8333") - - // Create S3-style authorization header with "s3" service - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/s3/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - req.Header.Set("X-Amz-Content-Sha256", emptySHA256) - - authHeader := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/" + credentialScope + - ", SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=dummysignature" - req.Header.Set("Authorization", authHeader) - - // This should use the emptySHA256 hash and not try to read the body - identity, errCode := iam.doesSignatureMatch(emptySHA256, req) - - // Should get signature mismatch (because of dummy signature) but not other errors - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) -} - -// Test edge case: IAM request with empty body should still use emptySHA256 -func TestIAMEmptyBodyPayloadHash(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Create IAM request with empty body - req, err := http.NewRequest("POST", "http://localhost:8111/", bytes.NewReader([]byte{})) - assert.NoError(t, err) - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8111") - - // Create IAM-style authorization header - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/iam/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - - authHeader := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=dummysignature" - req.Header.Set("Authorization", authHeader) - - // Even with an IAM request, empty body should result in emptySHA256 - identity, errCode := iam.doesSignatureMatch(emptySHA256, req) - - // Should get signature mismatch (because of dummy signature) but not other errors - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) -} - -// Test that non-S3 services (like STS) also get payload hash computation -func TestSTSPayloadHashComputation(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Test payload for STS request (AssumeRole request) - testPayload := "Action=AssumeRole&RoleArn=arn:aws:iam::123456789012:role/TestRole&RoleSessionName=test&Version=2011-06-15" - - // Create request with body (typical STS request) - req, err := http.NewRequest("POST", "http://localhost:8112/", strings.NewReader(testPayload)) - assert.NoError(t, err) - - // Set required headers for STS request - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8112") - - // Compute expected payload hash - expectedHash := sha256.Sum256([]byte(testPayload)) - expectedHashStr := hex.EncodeToString(expectedHash[:]) - - // Create an STS-style authorization header with "sts" service - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/sts/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - - authHeader := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=dummysignature" - req.Header.Set("Authorization", authHeader) - - // Test the doesSignatureMatch function - // This should compute the correct payload hash for STS requests (non-S3 service) - identity, errCode := iam.doesSignatureMatch(expectedHashStr, req) - - // Should get signature mismatch (dummy signature) but payload hash should be computed correctly - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) - - // Verify body is preserved after reading - bodyBytes := make([]byte, len(testPayload)) - n, err := req.Body.Read(bodyBytes) - assert.NoError(t, err) - assert.Equal(t, len(testPayload), n) - assert.Equal(t, testPayload, string(bodyBytes)) -} - -// Test the specific scenario from GitHub issue #7080 -func TestGitHubIssue7080Scenario(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration matching the issue scenario - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "testkey", - SecretKey: "testsecret", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Simulate the payload from the GitHub issue (CreateAccessKey request) - testPayload := "Action=CreateAccessKey&UserName=admin&Version=2010-05-08" - - // Create the request that was failing - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(testPayload)) - assert.NoError(t, err) - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8111") - - // Create authorization header with IAM service (this was the failing case) - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/iam/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - - authHeader := "AWS4-HMAC-SHA256 Credential=testkey/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=testsignature" - req.Header.Set("Authorization", authHeader) - - // Before the fix, this would have failed with payload hash mismatch - // After the fix, it should properly compute the payload hash and proceed to signature verification - - // Since we're using a dummy signature, we expect signature mismatch, but the important - // thing is that it doesn't fail earlier due to payload hash computation issues - identity, errCode := iam.doesSignatureMatch(emptySHA256, req) - - // The error should be signature mismatch, not payload related - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) - - // Verify the request body is still accessible (fix preserves body) - bodyBytes := make([]byte, len(testPayload)) - n, err := req.Body.Read(bodyBytes) - assert.NoError(t, err) - assert.Equal(t, len(testPayload), n) - assert.Equal(t, testPayload, string(bodyBytes)) -} - -// TestIAMSignatureServiceMatching tests that IAM requests use the correct service in signature computation -// This reproduces the bug described in GitHub issue #7080 where the service was hardcoded to "s3" -func TestIAMSignatureServiceMatching(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{} - - // Load test configuration with credentials that match the logs - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "power_user", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "power_user_key", - SecretKey: "power_user_secret", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Use the exact payload and headers from the failing logs - testPayload := "Action=CreateAccessKey&UserName=admin&Version=2010-05-08" - - // Create request exactly as shown in logs - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(testPayload)) - assert.NoError(t, err) - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8111") - req.Header.Set("X-Amz-Date", "20250805T082934Z") - - // Calculate the expected signature using the correct IAM service - // This simulates what botocore/AWS SDK would calculate - credentialScope := "20250805/us-east-1/iam/aws4_request" - - // Calculate the actual payload hash for our test payload - actualPayloadHash := getSHA256Hash([]byte(testPayload)) - - // Build the canonical request with the actual payload hash - canonicalRequest := "POST\n/\n\ncontent-type:application/x-www-form-urlencoded; charset=utf-8\nhost:localhost:8111\nx-amz-date:20250805T082934Z\n\ncontent-type;host;x-amz-date\n" + actualPayloadHash - - // Calculate the canonical request hash - canonicalRequestHash := getSHA256Hash([]byte(canonicalRequest)) - - // Build the string to sign - stringToSign := "AWS4-HMAC-SHA256\n20250805T082934Z\n" + credentialScope + "\n" + canonicalRequestHash - - // Calculate expected signature using IAM service (what client sends) - expectedSigningKey := getSigningKey("power_user_secret", "20250805", "us-east-1", "iam") - expectedSignature := getSignature(expectedSigningKey, stringToSign) - - // Create authorization header with the correct signature - authHeader := "AWS4-HMAC-SHA256 Credential=power_user_key/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=" + expectedSignature - req.Header.Set("Authorization", authHeader) - - // Now test that SeaweedFS computes the same signature with our fix - identity, errCode := iam.doesSignatureMatch(actualPayloadHash, req) - - // With the fix, the signatures should match and we should get a successful authentication - assert.Equal(t, s3err.ErrNone, errCode) - assert.NotNil(t, identity) - assert.Equal(t, "power_user", identity.Name) -} - -// TestStreamingSignatureServiceField tests that the s3ChunkedReader struct correctly stores the service -// This verifies the fix for streaming uploads where getChunkSignature was hardcoding "s3" -func TestStreamingSignatureServiceField(t *testing.T) { - // Test that the s3ChunkedReader correctly uses the service field - // Create a mock s3ChunkedReader with IAM service - chunkedReader := &s3ChunkedReader{ - seedDate: time.Now(), - region: "us-east-1", - service: "iam", // This should be used instead of hardcoded "s3" - seedSignature: "testsignature", - cred: &Credential{ - AccessKey: "testkey", - SecretKey: "testsecret", - }, - } - - // Test that getScope is called with the correct service - scope := getScope(chunkedReader.seedDate, chunkedReader.region, chunkedReader.service) - assert.Contains(t, scope, "/iam/aws4_request") - assert.NotContains(t, scope, "/s3/aws4_request") - - // Test that getSigningKey would be called with the correct service - signingKey := getSigningKey( - chunkedReader.cred.SecretKey, - chunkedReader.seedDate.Format(yyyymmdd), - chunkedReader.region, - chunkedReader.service, - ) - assert.NotNil(t, signingKey) - - // The main point is that chunkedReader.service is "iam" and gets used correctly - // This ensures that IAM streaming uploads will use "iam" service instead of hardcoded "s3" - assert.Equal(t, "iam", chunkedReader.service) -} - -// Test that large IAM request bodies are truncated for security (DoS prevention) -func TestIAMLargeBodySecurityLimit(t *testing.T) { - // Create test IAM instance - iam := &IdentityAccessManagement{ - hashes: make(map[string]*sync.Pool), - hashCounters: make(map[string]*int32), - } - - // Load test configuration - err := iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Identities: []*iam_pb.Identity{ - { - Name: "testuser", - Credentials: []*iam_pb.Credential{ - { - AccessKey: "AKIAIOSFODNN7EXAMPLE", - SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", - }, - }, - Actions: []string{"Admin"}, - }, - }, - }) - assert.NoError(t, err) - - // Create a payload larger than the 10 MiB limit - largePayload := strings.Repeat("A", 11*(1<<20)) // 11 MiB - - // Create IAM request with large body - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(largePayload)) - assert.NoError(t, err) - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - req.Header.Set("Host", "localhost:8111") - - // Create IAM-style authorization header - now := time.Now().UTC() - dateStr := now.Format("20060102T150405Z") - credentialScope := now.Format("20060102") + "/us-east-1/iam/aws4_request" - - req.Header.Set("X-Amz-Date", dateStr) - - authHeader := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/" + credentialScope + - ", SignedHeaders=content-type;host;x-amz-date, Signature=dummysignature" - req.Header.Set("Authorization", authHeader) - - // The function should complete successfully but limit the body to 10 MiB - identity, errCode := iam.doesSignatureMatch(emptySHA256, req) - - // Should get signature mismatch (dummy signature) but not internal error - assert.Equal(t, s3err.ErrSignatureDoesNotMatch, errCode) - assert.Nil(t, identity) - - // Verify the body was truncated to the limit (10 MiB) - bodyBytes, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.Equal(t, 10*(1<<20), len(bodyBytes)) // Should be exactly 10 MiB - assert.Equal(t, strings.Repeat("A", 10*(1<<20)), string(bodyBytes)) // All As, but truncated -} - -// Test the streaming hash implementation directly -func TestStreamHashRequestBody(t *testing.T) { - testCases := []struct { - name string - payload string - }{ - { - name: "empty body", - payload: "", - }, - { - name: "small payload", - payload: "Action=CreateAccessKey&UserName=testuser&Version=2010-05-08", - }, - { - name: "medium payload", - payload: strings.Repeat("A", 1024), // 1KB - }, - { - name: "large payload within limit", - payload: strings.Repeat("B", 1<<20), // 1MB - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create request with the test payload - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(tc.payload)) - assert.NoError(t, err) - - // Compute expected hash directly for comparison - expectedHashStr := emptySHA256 - if tc.payload != "" { - expectedHash := sha256.Sum256([]byte(tc.payload)) - expectedHashStr = hex.EncodeToString(expectedHash[:]) - } - - // Test the streaming function - hash, err := streamHashRequestBody(req, iamRequestBodyLimit) - assert.NoError(t, err) - assert.Equal(t, expectedHashStr, hash) - - // Verify the body is preserved and readable - bodyBytes, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.Equal(t, tc.payload, string(bodyBytes)) - }) - } -} - -// Test streaming vs non-streaming approach produces identical results -func TestStreamingVsNonStreamingConsistency(t *testing.T) { - testPayloads := []string{ - "", - "small", - "Action=CreateAccessKey&UserName=testuser&Version=2010-05-08", - strings.Repeat("X", 8192), // Exactly one chunk - strings.Repeat("Y", 16384), // Two chunks - strings.Repeat("Z", 12345), // Non-aligned chunks - } - - for i, payload := range testPayloads { - t.Run(fmt.Sprintf("payload_%d", i), func(t *testing.T) { - // Test streaming approach - req1, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(payload)) - assert.NoError(t, err) - - streamHash, err := streamHashRequestBody(req1, iamRequestBodyLimit) - assert.NoError(t, err) - - // Test direct approach for comparison - directHashStr := emptySHA256 - if payload != "" { - directHash := sha256.Sum256([]byte(payload)) - directHashStr = hex.EncodeToString(directHash[:]) - } - - // Both approaches should produce identical results - assert.Equal(t, directHashStr, streamHash) - - // Verify body preservation - bodyBytes, err := io.ReadAll(req1.Body) - assert.NoError(t, err) - assert.Equal(t, payload, string(bodyBytes)) - }) - } -} - -// Test streaming with size limit enforcement -func TestStreamingWithSizeLimit(t *testing.T) { - // Create a payload larger than the limit - largePayload := strings.Repeat("A", 11*(1<<20)) // 11 MiB - - req, err := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(largePayload)) - assert.NoError(t, err) - - // Stream with the limit - hash, err := streamHashRequestBody(req, iamRequestBodyLimit) - assert.NoError(t, err) - - // Verify the hash is computed for the truncated content (10 MiB) - truncatedPayload := strings.Repeat("A", 10*(1<<20)) - expectedHash := sha256.Sum256([]byte(truncatedPayload)) - expectedHashStr := hex.EncodeToString(expectedHash[:]) - - assert.Equal(t, expectedHashStr, hash) - - // Verify the body was truncated - bodyBytes, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.Equal(t, 10*(1<<20), len(bodyBytes)) - assert.Equal(t, truncatedPayload, string(bodyBytes)) -} - -// Benchmark streaming vs non-streaming memory usage -func BenchmarkStreamingVsNonStreaming(b *testing.B) { - // Test with 1MB payload to show memory efficiency - payload := strings.Repeat("A", 1<<20) // 1MB - - b.Run("streaming", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - req, _ := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(payload)) - streamHashRequestBody(req, iamRequestBodyLimit) - } - }) - - b.Run("direct", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Simulate the old approach of reading all at once - req, _ := http.NewRequest("POST", "http://localhost:8111/", strings.NewReader(payload)) - io.ReadAll(req.Body) - sha256.Sum256([]byte(payload)) - } - }) -} diff --git a/weed/s3api/bucket_metadata.go b/weed/s3api/bucket_metadata.go deleted file mode 100644 index a65fe5404..000000000 --- a/weed/s3api/bucket_metadata.go +++ /dev/null @@ -1,217 +0,0 @@ -package s3api - -import ( - "context" - "encoding/json" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/util" - "math" - "sync" -) - -var loadBucketMetadataFromFiler = func(r *BucketRegistry, bucketName string) (*BucketMetaData, error) { - entry, err := filer_pb.GetEntry(context.Background(), r.s3a, util.NewFullPath(r.s3a.option.BucketsPath, bucketName)) - if err != nil { - return nil, err - } - - return buildBucketMetadata(r.s3a.iam, entry), nil -} - -type BucketMetaData struct { - _ struct{} `type:"structure"` - - Name string - - //By default, when another AWS account uploads an object to S3 bucket, - //that account (the object writer) owns the object, has access to it, and - //can grant other users access to it through ACLs. You can use Object Ownership - //to change this default behavior so that ACLs are disabled and you, as the - //bucket owner, automatically own every object in your bucket. - ObjectOwnership string - - // Container for the bucket owner's display name and ID. - Owner *s3.Owner `type:"structure"` - - // A list of grants for access controls. - Acl []*s3.Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` -} - -type BucketRegistry struct { - metadataCache map[string]*BucketMetaData - metadataCacheLock sync.RWMutex - - notFound map[string]struct{} - notFoundLock sync.RWMutex - s3a *S3ApiServer -} - -func NewBucketRegistry(s3a *S3ApiServer) *BucketRegistry { - br := &BucketRegistry{ - metadataCache: make(map[string]*BucketMetaData), - notFound: make(map[string]struct{}), - s3a: s3a, - } - err := br.init() - if err != nil { - glog.Fatal("init bucket registry failed", err) - return nil - } - return br -} - -func (r *BucketRegistry) init() error { - err := filer_pb.List(context.Background(), r.s3a, r.s3a.option.BucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error { - r.LoadBucketMetadata(entry) - return nil - }, "", false, math.MaxUint32) - return err -} - -func (r *BucketRegistry) LoadBucketMetadata(entry *filer_pb.Entry) { - bucketMetadata := buildBucketMetadata(r.s3a.iam, entry) - r.metadataCacheLock.Lock() - defer r.metadataCacheLock.Unlock() - r.metadataCache[entry.Name] = bucketMetadata -} - -func buildBucketMetadata(accountManager AccountManager, entry *filer_pb.Entry) *BucketMetaData { - entryJson, _ := json.Marshal(entry) - glog.V(3).Infof("build bucket metadata,entry=%s", entryJson) - bucketMetadata := &BucketMetaData{ - Name: entry.Name, - - //Default ownership: OwnershipBucketOwnerEnforced, which means Acl is disabled - ObjectOwnership: s3_constants.OwnershipBucketOwnerEnforced, - - // Default owner: `AccountAdmin` - Owner: &s3.Owner{ - ID: &AccountAdmin.Id, - DisplayName: &AccountAdmin.DisplayName, - }, - } - if entry.Extended != nil { - //ownership control - ownership, ok := entry.Extended[s3_constants.ExtOwnershipKey] - if ok { - ownership := string(ownership) - valid := s3_constants.ValidateOwnership(ownership) - if valid { - bucketMetadata.ObjectOwnership = ownership - } else { - glog.Warningf("Invalid ownership: %s, bucket: %s", ownership, bucketMetadata.Name) - } - } - - //access control policy - //owner - acpOwnerBytes, ok := entry.Extended[s3_constants.ExtAmzOwnerKey] - if ok && len(acpOwnerBytes) > 0 { - ownerAccountId := string(acpOwnerBytes) - ownerAccountName := accountManager.GetAccountNameById(ownerAccountId) - if ownerAccountName == "" { - glog.Warningf("owner[id=%s] is invalid, bucket: %s", ownerAccountId, bucketMetadata.Name) - } else { - bucketMetadata.Owner = &s3.Owner{ - ID: &ownerAccountId, - DisplayName: &ownerAccountName, - } - } - } - //grants - acpGrantsBytes, ok := entry.Extended[s3_constants.ExtAmzAclKey] - if ok && len(acpGrantsBytes) > 0 { - var grants []*s3.Grant - err := json.Unmarshal(acpGrantsBytes, &grants) - if err == nil { - bucketMetadata.Acl = grants - } else { - glog.Warningf("Unmarshal ACP grants: %s(%v), bucket: %s", string(acpGrantsBytes), err, bucketMetadata.Name) - } - } - } - return bucketMetadata -} - -func (r *BucketRegistry) RemoveBucketMetadata(entry *filer_pb.Entry) { - r.removeMetadataCache(entry.Name) - r.unMarkNotFound(entry.Name) -} - -func (r *BucketRegistry) GetBucketMetadata(bucketName string) (*BucketMetaData, s3err.ErrorCode) { - r.metadataCacheLock.RLock() - bucketMetadata, ok := r.metadataCache[bucketName] - r.metadataCacheLock.RUnlock() - if ok { - return bucketMetadata, s3err.ErrNone - } - - r.notFoundLock.RLock() - _, ok = r.notFound[bucketName] - r.notFoundLock.RUnlock() - if ok { - return nil, s3err.ErrNoSuchBucket - } - - bucketMetadata, errCode := r.LoadBucketMetadataFromFiler(bucketName) - if errCode != s3err.ErrNone { - return nil, errCode - } - - r.setMetadataCache(bucketMetadata) - r.unMarkNotFound(bucketName) - return bucketMetadata, s3err.ErrNone -} - -func (r *BucketRegistry) LoadBucketMetadataFromFiler(bucketName string) (*BucketMetaData, s3err.ErrorCode) { - r.notFoundLock.Lock() - defer r.notFoundLock.Unlock() - - //check if already exists - r.metadataCacheLock.RLock() - bucketMetaData, ok := r.metadataCache[bucketName] - r.metadataCacheLock.RUnlock() - if ok { - return bucketMetaData, s3err.ErrNone - } - - //if not exists, load from filer - bucketMetadata, err := loadBucketMetadataFromFiler(r, bucketName) - if err != nil { - if err == filer_pb.ErrNotFound { - // The bucket doesn't actually exist and should no longer loaded from the filer - r.notFound[bucketName] = struct{}{} - return nil, s3err.ErrNoSuchBucket - } - return nil, s3err.ErrInternalError - } - return bucketMetadata, s3err.ErrNone -} - -func (r *BucketRegistry) setMetadataCache(metadata *BucketMetaData) { - r.metadataCacheLock.Lock() - defer r.metadataCacheLock.Unlock() - r.metadataCache[metadata.Name] = metadata -} - -func (r *BucketRegistry) removeMetadataCache(bucket string) { - r.metadataCacheLock.Lock() - defer r.metadataCacheLock.Unlock() - delete(r.metadataCache, bucket) -} - -func (r *BucketRegistry) markNotFound(bucket string) { - r.notFoundLock.Lock() - defer r.notFoundLock.Unlock() - r.notFound[bucket] = struct{}{} -} - -func (r *BucketRegistry) unMarkNotFound(bucket string) { - r.notFoundLock.Lock() - defer r.notFoundLock.Unlock() - delete(r.notFound, bucket) -} diff --git a/weed/s3api/bucket_metadata_test.go b/weed/s3api/bucket_metadata_test.go deleted file mode 100644 index 16d20b8fe..000000000 --- a/weed/s3api/bucket_metadata_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package s3api - -import ( - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "reflect" - "sync" - "testing" - "time" -) - -type BucketMetadataTestCase struct { - filerEntry *filer_pb.Entry - expectBucketMetadata *BucketMetaData -} - -var ( - //bad entry - badEntry = &filer_pb.Entry{ - Name: "badEntry", - } - - //good entry - goodEntryAcl, _ = json.Marshal(s3_constants.PublicRead) - goodEntry = &filer_pb.Entry{ - Name: "entryWithValidAcp", - Extended: map[string][]byte{ - s3_constants.ExtOwnershipKey: []byte(s3_constants.OwnershipBucketOwnerEnforced), - s3_constants.ExtAmzOwnerKey: []byte(AccountAdmin.DisplayName), - s3_constants.ExtAmzAclKey: goodEntryAcl, - }, - } - - //ownership is "" - ownershipEmptyStr = &filer_pb.Entry{ - Name: "ownershipEmptyStr", - Extended: map[string][]byte{ - s3_constants.ExtOwnershipKey: []byte(""), - }, - } - - //ownership valid - ownershipValid = &filer_pb.Entry{ - Name: "ownershipValid", - Extended: map[string][]byte{ - s3_constants.ExtOwnershipKey: []byte(s3_constants.OwnershipBucketOwnerEnforced), - }, - } - - //owner is "" - acpEmptyStr = &filer_pb.Entry{ - Name: "acpEmptyStr", - Extended: map[string][]byte{ - s3_constants.ExtAmzOwnerKey: []byte(""), - }, - } - - //owner not exists - acpEmptyObject = &filer_pb.Entry{ - Name: "acpEmptyObject", - Extended: map[string][]byte{ - s3_constants.ExtAmzOwnerKey: []byte("xxxxx"), - }, - } - - //grants is nil - acpOwnerNilAcp, _ = json.Marshal(make([]*s3.Grant, 0)) - acpOwnerNil = &filer_pb.Entry{ - Name: "acpOwnerNil", - Extended: map[string][]byte{ - s3_constants.ExtAmzAclKey: acpOwnerNilAcp, - }, - } - - //load filer is - loadFilerBucket = make(map[string]int, 1) - //override `loadBucketMetadataFromFiler` to avoid really load from filer -) - -var tcs = []*BucketMetadataTestCase{ - { - badEntry, &BucketMetaData{ - Name: badEntry.Name, - ObjectOwnership: s3_constants.DefaultOwnershipForExists, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: nil, - }, - }, - { - goodEntry, &BucketMetaData{ - Name: goodEntry.Name, - ObjectOwnership: s3_constants.OwnershipBucketOwnerEnforced, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: s3_constants.PublicRead, - }, - }, - { - ownershipEmptyStr, &BucketMetaData{ - Name: ownershipEmptyStr.Name, - ObjectOwnership: s3_constants.DefaultOwnershipForExists, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: nil, - }, - }, - { - ownershipValid, &BucketMetaData{ - Name: ownershipValid.Name, - ObjectOwnership: s3_constants.OwnershipBucketOwnerEnforced, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: nil, - }, - }, - { - acpEmptyStr, &BucketMetaData{ - Name: acpEmptyStr.Name, - ObjectOwnership: s3_constants.DefaultOwnershipForExists, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: nil, - }, - }, - { - acpEmptyObject, &BucketMetaData{ - Name: acpEmptyObject.Name, - ObjectOwnership: s3_constants.DefaultOwnershipForExists, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: nil, - }, - }, - { - acpOwnerNil, &BucketMetaData{ - Name: acpOwnerNil.Name, - ObjectOwnership: s3_constants.DefaultOwnershipForExists, - Owner: &s3.Owner{ - DisplayName: &AccountAdmin.DisplayName, - ID: &AccountAdmin.Id, - }, - Acl: make([]*s3.Grant, 0), - }, - }, -} - -func TestBuildBucketMetadata(t *testing.T) { - iam := &IdentityAccessManagement{} - _ = iam.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{}) - for _, tc := range tcs { - resultBucketMetadata := buildBucketMetadata(iam, tc.filerEntry) - if !reflect.DeepEqual(resultBucketMetadata, tc.expectBucketMetadata) { - t.Fatalf("result is unexpect: \nresult: %v, \nexpect: %v", resultBucketMetadata, tc.expectBucketMetadata) - } - } -} - -func TestGetBucketMetadata(t *testing.T) { - loadBucketMetadataFromFiler = func(r *BucketRegistry, bucketName string) (*BucketMetaData, error) { - time.Sleep(time.Second) - loadFilerBucket[bucketName] = loadFilerBucket[bucketName] + 1 - return &BucketMetaData{ - Name: bucketName, - }, nil - } - - br := &BucketRegistry{ - metadataCache: make(map[string]*BucketMetaData), - notFound: make(map[string]struct{}), - s3a: nil, - } - - //start 40 goroutine for - var wg sync.WaitGroup - closeCh := make(chan struct{}) - for i := 0; i < 40; i++ { - wg.Add(1) - go func() { - defer wg.Done() - outLoop: - for { - for j := 0; j < 5; j++ { - select { - case <-closeCh: - break outLoop - default: - reqBucket := fmt.Sprintf("%c", 67+j) - _, errCode := br.GetBucketMetadata(reqBucket) - if errCode != s3err.ErrNone { - close(closeCh) - t.Error("not expect") - } - } - } - time.Sleep(10 * time.Microsecond) - } - }() - } - time.Sleep(time.Second) - close(closeCh) - wg.Wait() - - //Each bucket is loaded from the filer only once - for bucketName, loadCount := range loadFilerBucket { - if loadCount != 1 { - t.Fatalf("lock is uneffict: %s, %d", bucketName, loadCount) - } - } -} diff --git a/weed/s3api/chunked_bug_reproduction_test.go b/weed/s3api/chunked_bug_reproduction_test.go deleted file mode 100644 index dc02bc282..000000000 --- a/weed/s3api/chunked_bug_reproduction_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package s3api - -import ( - "bytes" - "io" - "net/http" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// TestChunkedEncodingMixedFormat tests the fix for GitHub issue #6847 -// where AWS SDKs send mixed format: unsigned streaming headers but signed chunk data -func TestChunkedEncodingMixedFormat(t *testing.T) { - expectedContent := "hello world\n" - - // Create the problematic mixed format payload: - // - Unsigned streaming headers (STREAMING-UNSIGNED-PAYLOAD-TRAILER) - // - But chunk data contains chunk-signature headers - mixedFormatPayload := "c;chunk-signature=347f6c62acd95b7c6ae18648776024a9e8cd6151184a5e777ea8e1d9b4e45b3c\r\n" + - "hello world\n\r\n" + - "0;chunk-signature=1a99b7790b8db0f4bfc048c8802056c3179d561e40c073167e79db5f1a6af4b2\r\n" + - "x-amz-checksum-crc32:rwg7LQ==\r\n" + - "\r\n" - - // Create HTTP request with unsigned streaming headers - req, _ := http.NewRequest("PUT", "/test-bucket/test-object", bytes.NewReader([]byte(mixedFormatPayload))) - req.Header.Set("x-amz-content-sha256", "STREAMING-UNSIGNED-PAYLOAD-TRAILER") - req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32") - - // Process through SeaweedFS chunked reader - iam := setupTestIAM() - reader, errCode := iam.newChunkedReader(req) - - if errCode != s3err.ErrNone { - t.Fatalf("Failed to create chunked reader: %v", errCode) - } - - // Read the content - actualContent, err := io.ReadAll(reader) - if err != nil { - t.Fatalf("Failed to read content: %v", err) - } - - // Should correctly extract just the content, ignoring chunk signatures - if string(actualContent) != expectedContent { - t.Errorf("Mixed format handling failed. Expected: %q, Got: %q", expectedContent, string(actualContent)) - } -} - -// setupTestIAM creates a test IAM instance using the same pattern as existing tests -func setupTestIAM() *IdentityAccessManagement { - iam := &IdentityAccessManagement{} - return iam -} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index ca35fe3cd..2678f312f 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,32 +21,44 @@ package s3api import ( "bufio" "bytes" - "crypto/sha1" "crypto/sha256" - "encoding/base64" "encoding/hex" "errors" - "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "hash" - "hash/crc32" "io" "net/http" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/dustin/go-humanize" - "github.com/minio/crc64nvme" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region, "s3") + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + // calculateSeedSignature - Calculate seed signature in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html -// +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, service string, date time.Time, errCode s3err.ErrorCode) { +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode s3err.ErrorCode) { // Copy request. req := *r @@ -57,33 +69,26 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr // Parse signature version '4' header. signV4Values, errCode := parseSignV4(v4Auth) if errCode != s3err.ErrNone { - return nil, "", "", "", time.Time{}, errCode - } - - contentSha256Header := req.Header.Get("X-Amz-Content-Sha256") - - switch contentSha256Header { - // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' - case streamingContentSHA256: - glog.V(3).Infof("streaming content sha256") - case streamingUnsignedPayload: - glog.V(3).Infof("streaming unsigned payload") - default: - return nil, "", "", "", time.Time{}, s3err.ErrContentSHA256Mismatch + return nil, "", "", time.Time{}, errCode } // Payload streaming. - payload := contentSha256Header + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch + } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) if errCode != s3err.ErrNone { - return nil, "", "", "", time.Time{}, errCode + return nil, "", "", time.Time{}, errCode } // Verify if the access key id matches. identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) if !found { - return nil, "", "", "", time.Time{}, s3err.ErrInvalidAccessKeyID + return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID } bucket, object := s3_constants.GetBucketAndObject(r) @@ -99,15 +104,16 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr var dateStr string if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { if dateStr = r.Header.Get("Date"); dateStr == "" { - return nil, "", "", "", time.Time{}, s3err.ErrMissingDateHeader + return nil, "", "", time.Time{}, s3err.ErrMissingDateHeader } } - // Parse date header. - date, err := time.Parse(iso8601Format, dateStr) + var err error + date, err = time.Parse(iso8601Format, dateStr) if err != nil { - return nil, "", "", "", time.Time{}, s3err.ErrMalformedDate + return nil, "", "", time.Time{}, s3err.ErrMalformedDate } + // Query string. queryStr := req.URL.Query().Encode() @@ -118,18 +124,18 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date.Format(yyyymmdd), region, signV4Values.Credential.scope.service) + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3") // Calculate signature. newSignature := getSignature(signingKey, stringToSign) // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return nil, "", "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch + return nil, "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch } - // Return calculated signature. - return cred, newSignature, region, signV4Values.Credential.scope.service, date, s3err.ErrNone + // Return caculated signature. + return cred, newSignature, region, date, s3err.ErrNone } const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB @@ -140,86 +146,25 @@ var errLineTooLong = errors.New("header line too long") // Malformed encoding is generated when chunk header is wrongly formed. var errMalformedEncoding = errors.New("malformed chunked encoding") -// newChunkedReader returns a new s3ChunkedReader that translates the data read from r +// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func (iam *IdentityAccessManagement) newChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) { - glog.V(3).Infof("creating a new newSignV4ChunkedReader") - - contentSha256Header := req.Header.Get("X-Amz-Content-Sha256") - authorizationHeader := req.Header.Get("Authorization") - - var ident *Credential - var seedSignature, region, service string - var seedDate time.Time - var errCode s3err.ErrorCode - - switch contentSha256Header { - // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' - case streamingContentSHA256: - glog.V(3).Infof("streaming content sha256") - ident, seedSignature, region, service, seedDate, errCode = iam.calculateSeedSignature(req) - if errCode != s3err.ErrNone { - return nil, errCode - } - case streamingUnsignedPayload: - glog.V(3).Infof("streaming unsigned payload") - if authorizationHeader != "" { - // We do not need to pass the seed signature to the Reader as each chunk is not signed, - // but we do compute it to verify the caller has the correct permissions. - _, _, _, _, _, errCode = iam.calculateSeedSignature(req) - if errCode != s3err.ErrNone { - return nil, errCode - } - } +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != s3err.ErrNone { + return nil, errCode } - - // Get the checksum algorithm from the x-amz-trailer Header. - amzTrailerHeader := req.Header.Get("x-amz-trailer") - checksumAlgorithm, err := extractChecksumAlgorithm(amzTrailerHeader) - - if err != nil { - glog.V(3).Infof("error extracting checksum algorithm: %v", err) - return nil, s3err.ErrInvalidRequest - } - - checkSumWriter := getCheckSumWriter(checksumAlgorithm) - return &s3ChunkedReader{ cred: ident, reader: bufio.NewReader(req.Body), seedSignature: seedSignature, seedDate: seedDate, region: region, - service: service, chunkSHA256Writer: sha256.New(), - checkSumAlgorithm: checksumAlgorithm.String(), - checkSumWriter: checkSumWriter, state: readChunkHeader, - iam: iam, }, s3err.ErrNone } -func extractChecksumAlgorithm(amzTrailerHeader string) (ChecksumAlgorithm, error) { - // Extract checksum algorithm from the x-amz-trailer header. - switch amzTrailerHeader { - case "x-amz-checksum-crc32": - return ChecksumAlgorithmCRC32, nil - case "x-amz-checksum-crc32c": - return ChecksumAlgorithmCRC32C, nil - case "x-amz-checksum-crc64nvme": - return ChecksumAlgorithmCRC64NVMe, nil - case "x-amz-checksum-sha1": - return ChecksumAlgorithmSHA1, nil - case "x-amz-checksum-sha256": - return ChecksumAlgorithmSHA256, nil - case "": - return ChecksumAlgorithmNone, nil - default: - return ChecksumAlgorithmNone, errors.New("unsupported checksum algorithm '" + amzTrailerHeader + "'") - } -} - // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { @@ -228,26 +173,19 @@ type s3ChunkedReader struct { seedSignature string seedDate time.Time region string - service string // Service from credential scope (e.g., "s3", "iam") state chunkState lastChunk bool - chunkSignature string // Empty string if unsigned streaming upload. - checkSumAlgorithm string // Empty string if no checksum algorithm is specified. - checkSumWriter hash.Hash + chunkSignature string chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. n uint64 // Unread bytes in chunk err error - iam *IdentityAccessManagement } // Read chunk reads the chunk token signature portion. func (cr *s3ChunkedReader) readS3ChunkHeader() { // Read the first chunk line until CRLF. - var bytesRead, hexChunkSize, hexChunkSignature []byte - bytesRead, cr.err = readChunkLine(cr.reader) - // Parse s3 specific chunk extension and fetch the values. - hexChunkSize, hexChunkSignature = parseS3ChunkExtension(bytesRead) - + var hexChunkSize, hexChunkSignature []byte + hexChunkSize, hexChunkSignature, cr.err = readChunkLine(cr.reader) if cr.err != nil { return } @@ -259,14 +197,8 @@ func (cr *s3ChunkedReader) readS3ChunkHeader() { if cr.n == 0 { cr.err = io.EOF } - // Save the incoming chunk signature. - if hexChunkSignature == nil { - // We are using unsigned streaming upload. - cr.chunkSignature = "" - } else { - cr.chunkSignature = string(hexChunkSignature) - } + cr.chunkSignature = string(hexChunkSignature) } type chunkState int @@ -275,9 +207,7 @@ const ( readChunkHeader chunkState = iota readChunkTrailer readChunk - readTrailerChunk verifyChunk - verifyChecksum eofChunk ) @@ -290,12 +220,8 @@ func (cs chunkState) String() string { stateString = "readChunkTrailer" case readChunk: stateString = "readChunk" - case readTrailerChunk: - stateString = "readTrailerChunk" case verifyChunk: stateString = "verifyChunk" - case verifyChecksum: - stateString = "verifyChecksum" case eofChunk: stateString = "eofChunk" @@ -325,80 +251,11 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { } cr.state = readChunk case readChunkTrailer: - err = peekCRLF(cr.reader) - isTrailingChunk := cr.n == 0 && cr.lastChunk - - if !isTrailingChunk { - // If we're not in the trailing chunk, we should consume the bytes no matter what. - // The error returned by peekCRLF is the same as the one by readCRLF. - readCRLF(cr.reader) - cr.err = err - } else if err != nil && err != errMalformedEncoding { - cr.err = err + cr.err = readCRLF(cr.reader) + if cr.err != nil { return 0, errMalformedEncoding - } else { // equivalent to isTrailingChunk && err == errMalformedEncoding - // FIXME: The "right" structure of the last chunk as provided by the examples in the - // AWS documentation is "0\r\n\r\n" instead of "0\r\n", but some s3 clients when calling with - // streaming-unsigned-payload-trailer omit the last CRLF. To avoid returning an error that, we need to accept both. - // We arrive here when we're at the end of the 0-byte chunk, depending on the client implementation - // the client may or may not send the optional CRLF after the 0-byte chunk. - // If the client sends the optional CRLF, we should consume it. - if err == nil { - readCRLF(cr.reader) - } } - - // If we're using unsigned streaming upload, there is no signature to verify at each chunk. - if cr.chunkSignature != "" { - cr.state = verifyChunk - } else if cr.lastChunk { - cr.state = readTrailerChunk - } else { - cr.state = readChunkHeader - } - - case readTrailerChunk: - // When using unsigned upload, this would be the raw contents of the trailer chunk: - // - // x-amz-checksum-crc32:YABb/g==\n\r\n\r\n // Trailer chunk (note optional \n character) - // \r\n // CRLF - // - // When using signed upload with an additional checksum algorithm, this would be the raw contents of the trailer chunk: - // - // x-amz-checksum-crc32:YABb/g==\n\r\n // Trailer chunk (note optional \n character) - // trailer-signature\r\n - // \r\n // CRLF - // - // This implementation currently only supports the first case. - // TODO: Implement the second case (signed upload with additional checksum computation for each chunk) - - extractedCheckSumAlgorithm, extractedChecksum := parseChunkChecksum(cr.reader) - - if extractedCheckSumAlgorithm.String() != cr.checkSumAlgorithm { - errorMessage := fmt.Sprintf("checksum algorithm in trailer '%s' does not match the one advertised in the header '%s'", extractedCheckSumAlgorithm.String(), cr.checkSumAlgorithm) - glog.V(3).Info(errorMessage) - cr.err = errors.New(s3err.ErrMsgChecksumAlgorithmMismatch) - return 0, cr.err - } - - computedChecksum := cr.checkSumWriter.Sum(nil) - base64Checksum := base64.StdEncoding.EncodeToString(computedChecksum) - if string(extractedChecksum) != base64Checksum { - glog.V(3).Infof("payload checksum '%s' does not match provided checksum '%s'", base64Checksum, string(extractedChecksum)) - cr.err = errors.New(s3err.ErrMsgPayloadChecksumMismatch) - return 0, cr.err - } - - // TODO: Extract signature from trailer chunk and verify it. - // For now, we just read the trailer chunk and discard it. - - // Reading remaining CRLF. - for i := 0; i < 2; i++ { - cr.err = readCRLF(cr.reader) - } - - cr.state = eofChunk - + cr.state = verifyChunk case readChunk: // There is no more space left in the request buffer. if len(buf) == 0 { @@ -423,11 +280,6 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { // Calculate sha256. cr.chunkSHA256Writer.Write(rbuf[:n0]) - // Compute checksum - if cr.checkSumWriter != nil { - cr.checkSumWriter.Write(rbuf[:n0]) - } - // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -440,35 +292,18 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: - // Check if we have credentials for signature verification - // This handles the case where we have unsigned streaming (no cred) but chunks contain signatures - // - // BUG FIX for GitHub issue #6847: - // Some AWS SDK versions (Java 3.7.412+, .NET 4.0.0-preview.6+) send mixed format: - // - HTTP headers indicate unsigned streaming (STREAMING-UNSIGNED-PAYLOAD-TRAILER) - // - But chunk data contains chunk-signature headers (normally only for signed streaming) - // This causes a nil pointer dereference when trying to verify signatures without credentials - if cr.cred != nil { - // Normal signed streaming - verify the chunk signature - // Calculate the hashed chunk. - hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) - // Calculate the chunk signature. - newSignature := cr.getChunkSignature(hashedChunk) - if !compareSignatureV4(cr.chunkSignature, newSignature) { - // Chunk signature doesn't match we return signature does not match. - cr.err = errors.New(s3err.ErrMsgChunkSignatureMismatch) - return 0, cr.err - } - // Newly calculated signature becomes the seed for the next chunk - // this follows the chaining. - cr.seedSignature = newSignature - } else { - // For unsigned streaming, we should not verify chunk signatures even if they are present - // This fixes the bug where AWS SDKs send chunk signatures with unsigned streaming headers - glog.V(3).Infof("Skipping chunk signature verification for unsigned streaming") + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err } - - // Common cleanup and state transition for both signed and unsigned streaming + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk @@ -481,67 +316,42 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { } } -// getChunkSignature - get chunk signature. -func (cr *s3ChunkedReader) getChunkSignature(hashedChunk string) string { - // Calculate string to sign. - stringToSign := signV4Algorithm + "-PAYLOAD" + "\n" + - cr.seedDate.Format(iso8601Format) + "\n" + - getScope(cr.seedDate, cr.region, cr.service) + "\n" + - cr.seedSignature + "\n" + - emptySHA256 + "\n" + - hashedChunk - - // Get hmac signing key. - signingKey := getSigningKey(cr.cred.SecretKey, cr.seedDate.Format(yyyymmdd), cr.region, cr.service) - - // Calculate and return signature. - return getSignature(signingKey, stringToSign) -} - -func readCRLF(reader *bufio.Reader) error { +// readCRLF - check if reader only has '\r\n' CRLF character. +// returns malformed encoding if it doesn't. +func readCRLF(reader io.Reader) error { buf := make([]byte, 2) - _, err := io.ReadFull(reader, buf) + _, err := io.ReadFull(reader, buf[:2]) if err != nil { return err } - return checkCRLF(buf) -} - -func peekCRLF(reader *bufio.Reader) error { - buf, err := reader.Peek(2) - if err != nil { - return err - } - if err := checkCRLF(buf); err != nil { - return err - } - return nil -} - -func checkCRLF(buf []byte) error { - if len(buf) != 2 || buf[0] != '\r' || buf[1] != '\n' { + if buf[0] != '\r' || buf[1] != '\n' { return errMalformedEncoding } return nil } -func readChunkLine(b *bufio.Reader) ([]byte, error) { +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are owned by the bufio.Reader +// so they are only valid until the next bufio read. +func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { buf, err := b.ReadSlice('\n') if err != nil { // We always know when EOF is coming. // If the caller asked for a line, there should be a line. - switch err { - case io.EOF: + if err == io.EOF { err = io.ErrUnexpectedEOF - case bufio.ErrBufferFull: + } else if err == bufio.ErrBufferFull { err = errLineTooLong } - return nil, err + return nil, nil, err } if len(buf) >= maxLineLength { - return nil, errLineTooLong + return nil, nil, errLineTooLong } - return trimTrailingWhitespace(buf), nil + // Parse s3 specific chunk extension and fetch the values. + hexChunkSize, hexChunkSignature := parseS3ChunkExtension(buf) + return hexChunkSize, hexChunkSignature, nil } // trimTrailingWhitespace - trim trailing white space. @@ -560,63 +370,26 @@ func isASCIISpace(b byte) bool { // Constant s3 chunk encoding signature. const s3ChunkSignatureStr = ";chunk-signature=" -// parseS3ChunkExtension removes any s3 specific chunk-extension from buf. +// parses3ChunkExtension removes any s3 specific chunk-extension from buf. // For example, -// -// "10000;chunk-signature=..." => "10000", "chunk-signature=..." +// "10000;chunk-signature=..." => "10000", "chunk-signature=..." func parseS3ChunkExtension(buf []byte) ([]byte, []byte) { buf = trimTrailingWhitespace(buf) semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) // Chunk signature not found, return the whole buffer. - // This means we're using unsigned streaming upload. if semi == -1 { return buf, nil } return buf[:semi], parseChunkSignature(buf[semi:]) } -func parseChunkChecksum(b *bufio.Reader) (ChecksumAlgorithm, []byte) { - // When using unsigned upload, this would be the raw contents of the trailer chunk: - // - // x-amz-checksum-crc32:YABb/g==\n\r\n\r\n // Trailer chunk (note optional \n character) - // \r\n // CRLF - // - // When using signed upload with an additional checksum algorithm, this would be the raw contents of the trailer chunk: - // - // x-amz-checksum-crc32:YABb/g==\n\r\n // Trailer chunk (note optional \n character) - // trailer-signature\r\n - // \r\n // CRLF - // - - // x-amz-checksum-crc32:YABb/g==\n - bytesRead, err := readChunkLine(b) - if err != nil { - return ChecksumAlgorithmNone, nil - } - - // Split on ':' - parts := bytes.SplitN(bytesRead, []byte(":"), 2) - checksumKey := string(parts[0]) - checksumValue := parts[1] - - // Discard all trailing whitespace characters - checksumValue = trimTrailingWhitespace(checksumValue) - - // If the checksum key is not a supported checksum algorithm, return an error. - // TODO: Bubble that error up to the caller - extractedAlgorithm, err := extractChecksumAlgorithm(checksumKey) - if err != nil { - return ChecksumAlgorithmNone, nil - } - - return extractedAlgorithm, checksumValue -} - +// parseChunkSignature - parse chunk signature. func parseChunkSignature(chunk []byte) []byte { - chunkSplits := bytes.SplitN(chunk, []byte("="), 2) - return chunkSplits[1] // Keep only the signature. + chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) + return chunkSplits[1] } +// parse hex to uint64. func parseHexUint(v []byte) (n uint64, err error) { for i, b := range v { switch { @@ -637,50 +410,3 @@ func parseHexUint(v []byte) (n uint64, err error) { } return } - -// Checksum Algorithm represents the various checksum algorithms supported. -type ChecksumAlgorithm int - -const ( - ChecksumAlgorithmNone ChecksumAlgorithm = iota - ChecksumAlgorithmCRC32 - ChecksumAlgorithmCRC32C - ChecksumAlgorithmCRC64NVMe - ChecksumAlgorithmSHA1 - ChecksumAlgorithmSHA256 -) - -func (ca ChecksumAlgorithm) String() string { - switch ca { - case ChecksumAlgorithmNone: - return "" - case ChecksumAlgorithmCRC32: - return "x-amz-checksum-crc32" - case ChecksumAlgorithmCRC32C: - return "x-amz-checksum-crc32c" - case ChecksumAlgorithmCRC64NVMe: - return "x-amz-checksum-crc64nvme" - case ChecksumAlgorithmSHA1: - return "x-amz-checksum-sha1" - case ChecksumAlgorithmSHA256: - return "x-amz-checksum-sha256" - } - return "" -} - -// getCheckSumWriter - get checksum writer. -func getCheckSumWriter(checksumAlgorithm ChecksumAlgorithm) hash.Hash { - switch checksumAlgorithm { - case ChecksumAlgorithmCRC32: - return crc32.NewIEEE() - case ChecksumAlgorithmCRC32C: - return crc32.New(crc32.MakeTable(crc32.Castagnoli)) - case ChecksumAlgorithmCRC64NVMe: - return crc64nvme.New() - case ChecksumAlgorithmSHA1: - return sha1.New() - case ChecksumAlgorithmSHA256: - return sha256.New() - } - return nil -} diff --git a/weed/s3api/chunked_reader_v4_test.go b/weed/s3api/chunked_reader_v4_test.go deleted file mode 100644 index 786df3465..000000000 --- a/weed/s3api/chunked_reader_v4_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package s3api - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "strings" - "sync" - "testing" - - "hash/crc32" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" -) - -const ( - defaultTimestamp = "20130524T000000Z" - defaultBucketName = "examplebucket" - defaultAccessKeyId = "AKIAIOSFODNN7EXAMPLE" - defaultSecretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" - defaultRegion = "us-east-1" -) - -func generatestreamingAws4HmacSha256Payload() string { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming - - chunk1 := "10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n" + - strings.Repeat("a", 65536) + "\r\n" - chunk2 := "400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n" + - strings.Repeat("a", 1024) + "\r\n" - chunk3 := "0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n" + - "\r\n" // The last chunk is empty - - payload := chunk1 + chunk2 + chunk3 - return payload -} - -func NewRequeststreamingAws4HmacSha256Payload() (*http.Request, error) { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming - - payload := generatestreamingAws4HmacSha256Payload() - req, err := http.NewRequest("PUT", "http://s3.amazonaws.com/examplebucket/chunkObject.txt", bytes.NewReader([]byte(payload))) - if err != nil { - return nil, err - } - - req.Header.Set("Host", "s3.amazonaws.com") - req.Header.Set("x-amz-date", defaultTimestamp) - req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - req.Header.Set("Authorization", "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;content-length;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9") - req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") - req.Header.Set("Content-Encoding", "aws-chunked") - req.Header.Set("x-amz-decoded-content-length", "66560") - req.Header.Set("Content-Length", "66824") - - return req, nil -} - -func TestNewSignV4ChunkedReaderstreamingAws4HmacSha256Payload(t *testing.T) { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming - req, err := NewRequeststreamingAws4HmacSha256Payload() - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - iam := setupIam() - - // The expected payload a long string of 'a's - expectedPayload := strings.Repeat("a", 66560) - - runWithRequest(iam, req, t, expectedPayload) -} - -func generateStreamingUnsignedPayloadTrailerPayload(includeFinalCRLF bool) string { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - - chunk1 := "2000\r\n" + strings.Repeat("a", 8192) + "\r\n" - chunk2 := "2000\r\n" + strings.Repeat("a", 8192) + "\r\n" - chunk3 := "400\r\n" + strings.Repeat("a", 1024) + "\r\n" - - chunk4 := "0\r\n" /* the last chunk is empty */ - - if includeFinalCRLF { - // Some clients omit the final CRLF, so we need to test that case as well - chunk4 += "\r\n" - } - - data := strings.Repeat("a", 17408) - writer := crc32.NewIEEE() - _, err := writer.Write([]byte(data)) - - if err != nil { - fmt.Println("Error:", err) - } - checksum := writer.Sum(nil) - base64EncodedChecksum := base64.StdEncoding.EncodeToString(checksum) - trailer := "x-amz-checksum-crc32:" + base64EncodedChecksum + "\n\r\n\r\n\r\n" - - payload := chunk1 + chunk2 + chunk3 + chunk4 + trailer - return payload -} - -func NewRequestStreamingUnsignedPayloadTrailer(includeFinalCRLF bool) (*http.Request, error) { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - - payload := generateStreamingUnsignedPayloadTrailerPayload(includeFinalCRLF) - req, err := http.NewRequest("PUT", "http://amzn-s3-demo-bucket/Key+", bytes.NewReader([]byte(payload))) - if err != nil { - return nil, err - } - - req.Header.Set("Host", "amzn-s3-demo-bucket") - req.Header.Set("x-amz-date", defaultTimestamp) - req.Header.Set("Content-Encoding", "aws-chunked") - req.Header.Set("x-amz-decoded-content-length", "17408") - req.Header.Set("x-amz-content-sha256", "STREAMING-UNSIGNED-PAYLOAD-TRAILER") - req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32") - - return req, nil -} - -func TestNewSignV4ChunkedReaderStreamingUnsignedPayloadTrailer(t *testing.T) { - // This test will implement the following scenario: - // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - iam := setupIam() - - req, err := NewRequestStreamingUnsignedPayloadTrailer(true) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - // The expected payload a long string of 'a's - expectedPayload := strings.Repeat("a", 17408) - - runWithRequest(iam, req, t, expectedPayload) - - req, err = NewRequestStreamingUnsignedPayloadTrailer(false) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - runWithRequest(iam, req, t, expectedPayload) -} - -func runWithRequest(iam IdentityAccessManagement, req *http.Request, t *testing.T, expectedPayload string) { - reader, errCode := iam.newChunkedReader(req) - assert.NotNil(t, reader) - assert.Equal(t, s3err.ErrNone, errCode) - - data, err := io.ReadAll(reader) - if err != nil { - t.Fatalf("Failed to read data: %v", err) - } - - assert.Equal(t, expectedPayload, string(data)) -} - -func setupIam() IdentityAccessManagement { - // Create an IdentityAccessManagement instance - // Add default access keys and secrets - - iam := IdentityAccessManagement{ - identities: []*Identity{}, - accessKeyIdent: map[string]*Identity{}, - accounts: map[string]*Account{}, - emailAccount: map[string]*Account{}, - hashes: map[string]*sync.Pool{}, - hashCounters: map[string]*int32{}, - identityAnonymous: nil, - domain: "", - isAuthEnabled: false, - } - - iam.identities = append(iam.identities, &Identity{ - Name: "default", - Credentials: []*Credential{ - { - AccessKey: defaultAccessKeyId, - SecretKey: defaultSecretAccessKey, - }, - }, - Actions: []Action{ - "Read", - "Write", - "List", - }, - }) - - iam.accessKeyIdent[defaultAccessKeyId] = iam.identities[0] - return iam -} diff --git a/weed/s3api/cors/cors.go b/weed/s3api/cors/cors.go deleted file mode 100644 index d6eb520af..000000000 --- a/weed/s3api/cors/cors.go +++ /dev/null @@ -1,385 +0,0 @@ -package cors - -import ( - "fmt" - "net/http" - "strconv" - "strings" -) - -// CORSRule represents a single CORS rule -type CORSRule struct { - AllowedHeaders []string `xml:"AllowedHeader,omitempty" json:"AllowedHeaders,omitempty"` - AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"` - AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"` - ExposeHeaders []string `xml:"ExposeHeader,omitempty" json:"ExposeHeaders,omitempty"` - MaxAgeSeconds *int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"` - ID string `xml:"ID,omitempty" json:"ID,omitempty"` -} - -// CORSConfiguration represents the CORS configuration for a bucket -type CORSConfiguration struct { - CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"` -} - -// CORSRequest represents a CORS request -type CORSRequest struct { - Origin string - Method string - RequestHeaders []string - IsPreflightRequest bool - AccessControlRequestMethod string - AccessControlRequestHeaders []string -} - -// CORSResponse represents the response for a CORS request -type CORSResponse struct { - AllowOrigin string - AllowMethods string - AllowHeaders string - ExposeHeaders string - MaxAge string - AllowCredentials bool -} - -// ValidateConfiguration validates a CORS configuration -func ValidateConfiguration(config *CORSConfiguration) error { - if config == nil { - return fmt.Errorf("CORS configuration cannot be nil") - } - - if len(config.CORSRules) == 0 { - return fmt.Errorf("CORS configuration must have at least one rule") - } - - if len(config.CORSRules) > 100 { - return fmt.Errorf("CORS configuration cannot have more than 100 rules") - } - - for i, rule := range config.CORSRules { - if err := validateRule(&rule); err != nil { - return fmt.Errorf("invalid CORS rule at index %d: %v", i, err) - } - } - - return nil -} - -// ParseRequest parses an HTTP request to extract CORS information -func ParseRequest(r *http.Request) *CORSRequest { - corsReq := &CORSRequest{ - Origin: r.Header.Get("Origin"), - Method: r.Method, - } - - // Check if this is a preflight request - if r.Method == "OPTIONS" { - corsReq.IsPreflightRequest = true - corsReq.AccessControlRequestMethod = r.Header.Get("Access-Control-Request-Method") - - if headers := r.Header.Get("Access-Control-Request-Headers"); headers != "" { - corsReq.AccessControlRequestHeaders = strings.Split(headers, ",") - for i := range corsReq.AccessControlRequestHeaders { - corsReq.AccessControlRequestHeaders[i] = strings.TrimSpace(corsReq.AccessControlRequestHeaders[i]) - } - } - } - - return corsReq -} - -// validateRule validates a single CORS rule -func validateRule(rule *CORSRule) error { - if len(rule.AllowedMethods) == 0 { - return fmt.Errorf("AllowedMethods cannot be empty") - } - - if len(rule.AllowedOrigins) == 0 { - return fmt.Errorf("AllowedOrigins cannot be empty") - } - - // Validate allowed methods - validMethods := map[string]bool{ - "GET": true, - "PUT": true, - "POST": true, - "DELETE": true, - "HEAD": true, - } - - for _, method := range rule.AllowedMethods { - if !validMethods[method] { - return fmt.Errorf("invalid HTTP method: %s", method) - } - } - - // Validate origins - for _, origin := range rule.AllowedOrigins { - if origin == "*" { - continue - } - if err := validateOrigin(origin); err != nil { - return fmt.Errorf("invalid origin %s: %v", origin, err) - } - } - - // Validate MaxAgeSeconds - if rule.MaxAgeSeconds != nil && *rule.MaxAgeSeconds < 0 { - return fmt.Errorf("MaxAgeSeconds cannot be negative") - } - - return nil -} - -// validateOrigin validates an origin string -func validateOrigin(origin string) error { - if origin == "" { - return fmt.Errorf("origin cannot be empty") - } - - // Special case: "*" is always valid - if origin == "*" { - return nil - } - - // Count wildcards - wildcardCount := strings.Count(origin, "*") - if wildcardCount > 1 { - return fmt.Errorf("origin can contain at most one wildcard") - } - - // If there's a wildcard, it should be in a valid position - if wildcardCount == 1 { - // Must be in the format: http://*.example.com or https://*.example.com - if !strings.HasPrefix(origin, "http://") && !strings.HasPrefix(origin, "https://") { - return fmt.Errorf("origin with wildcard must start with http:// or https://") - } - } - - return nil -} - -// EvaluateRequest evaluates a CORS request against a CORS configuration -func EvaluateRequest(config *CORSConfiguration, corsReq *CORSRequest) (*CORSResponse, error) { - if config == nil || corsReq == nil { - return nil, fmt.Errorf("config and corsReq cannot be nil") - } - - if corsReq.Origin == "" { - return nil, fmt.Errorf("origin header is required for CORS requests") - } - - // Find the first rule that matches the origin - for _, rule := range config.CORSRules { - if matchesOrigin(rule.AllowedOrigins, corsReq.Origin) { - // For preflight requests, we need more detailed validation - if corsReq.IsPreflightRequest { - return buildPreflightResponse(&rule, corsReq), nil - } else { - // For actual requests, check method - if containsString(rule.AllowedMethods, corsReq.Method) { - return buildResponse(&rule, corsReq), nil - } - } - } - } - - return nil, fmt.Errorf("no matching CORS rule found") -} - -// buildPreflightResponse builds a CORS response for preflight requests -func buildPreflightResponse(rule *CORSRule, corsReq *CORSRequest) *CORSResponse { - response := &CORSResponse{ - AllowOrigin: corsReq.Origin, - } - - // Check if the requested method is allowed - methodAllowed := corsReq.AccessControlRequestMethod == "" || containsString(rule.AllowedMethods, corsReq.AccessControlRequestMethod) - - // Check requested headers - var allowedRequestHeaders []string - allHeadersAllowed := true - - if len(corsReq.AccessControlRequestHeaders) > 0 { - // Check if wildcard is allowed - hasWildcard := false - for _, header := range rule.AllowedHeaders { - if header == "*" { - hasWildcard = true - break - } - } - - if hasWildcard { - // All requested headers are allowed with wildcard - allowedRequestHeaders = corsReq.AccessControlRequestHeaders - } else { - // Check each requested header individually - for _, requestedHeader := range corsReq.AccessControlRequestHeaders { - if matchesHeader(rule.AllowedHeaders, requestedHeader) { - allowedRequestHeaders = append(allowedRequestHeaders, requestedHeader) - } else { - allHeadersAllowed = false - } - } - } - } - - // Only set method and header info if both method and ALL headers are allowed - if methodAllowed && allHeadersAllowed { - response.AllowMethods = strings.Join(rule.AllowedMethods, ", ") - - if len(allowedRequestHeaders) > 0 { - response.AllowHeaders = strings.Join(allowedRequestHeaders, ", ") - } - - // Set exposed headers - if len(rule.ExposeHeaders) > 0 { - response.ExposeHeaders = strings.Join(rule.ExposeHeaders, ", ") - } - - // Set max age - if rule.MaxAgeSeconds != nil { - response.MaxAge = strconv.Itoa(*rule.MaxAgeSeconds) - } - } - - return response -} - -// buildResponse builds a CORS response from a matching rule -func buildResponse(rule *CORSRule, corsReq *CORSRequest) *CORSResponse { - response := &CORSResponse{ - AllowOrigin: corsReq.Origin, - } - - // Set allowed methods - response.AllowMethods = strings.Join(rule.AllowedMethods, ", ") - - // Set allowed headers - if len(rule.AllowedHeaders) > 0 { - response.AllowHeaders = strings.Join(rule.AllowedHeaders, ", ") - } - - // Set expose headers - if len(rule.ExposeHeaders) > 0 { - response.ExposeHeaders = strings.Join(rule.ExposeHeaders, ", ") - } - - // Set max age - if rule.MaxAgeSeconds != nil { - response.MaxAge = strconv.Itoa(*rule.MaxAgeSeconds) - } - - return response -} - -// Helper functions - -// matchesOrigin checks if the request origin matches any allowed origin -func matchesOrigin(allowedOrigins []string, origin string) bool { - for _, allowedOrigin := range allowedOrigins { - if allowedOrigin == "*" { - return true - } - if allowedOrigin == origin { - return true - } - // Handle wildcard patterns like https://*.example.com - if strings.Contains(allowedOrigin, "*") { - if matchWildcard(allowedOrigin, origin) { - return true - } - } - } - return false -} - -// matchWildcard performs wildcard matching for origins -func matchWildcard(pattern, text string) bool { - // Simple wildcard matching - only supports single * at the beginning - if strings.HasPrefix(pattern, "http://*") { - suffix := pattern[8:] // Remove "http://*" - return strings.HasPrefix(text, "http://") && strings.HasSuffix(text, suffix) - } - if strings.HasPrefix(pattern, "https://*") { - suffix := pattern[9:] // Remove "https://*" - return strings.HasPrefix(text, "https://") && strings.HasSuffix(text, suffix) - } - return false -} - -// matchesHeader checks if a header is allowed -func matchesHeader(allowedHeaders []string, header string) bool { - // If no headers are specified, all headers are allowed - if len(allowedHeaders) == 0 { - return true - } - - // Header matching is case-insensitive - header = strings.ToLower(header) - - for _, allowedHeader := range allowedHeaders { - allowedHeaderLower := strings.ToLower(allowedHeader) - - // Wildcard match - if allowedHeaderLower == "*" { - return true - } - - // Exact match - if allowedHeaderLower == header { - return true - } - - // Prefix wildcard match (e.g., "x-amz-*" matches "x-amz-date") - if strings.HasSuffix(allowedHeaderLower, "*") { - prefix := strings.TrimSuffix(allowedHeaderLower, "*") - if strings.HasPrefix(header, prefix) { - return true - } - } - } - return false -} - -// containsString checks if a slice contains a specific string -func containsString(slice []string, item string) bool { - for _, s := range slice { - if s == item { - return true - } - } - return false -} - -// ApplyHeaders applies CORS headers to an HTTP response -func ApplyHeaders(w http.ResponseWriter, corsResp *CORSResponse) { - if corsResp == nil { - return - } - - if corsResp.AllowOrigin != "" { - w.Header().Set("Access-Control-Allow-Origin", corsResp.AllowOrigin) - } - - if corsResp.AllowMethods != "" { - w.Header().Set("Access-Control-Allow-Methods", corsResp.AllowMethods) - } - - if corsResp.AllowHeaders != "" { - w.Header().Set("Access-Control-Allow-Headers", corsResp.AllowHeaders) - } - - if corsResp.ExposeHeaders != "" { - w.Header().Set("Access-Control-Expose-Headers", corsResp.ExposeHeaders) - } - - if corsResp.MaxAge != "" { - w.Header().Set("Access-Control-Max-Age", corsResp.MaxAge) - } - - if corsResp.AllowCredentials { - w.Header().Set("Access-Control-Allow-Credentials", "true") - } -} diff --git a/weed/s3api/cors/cors_test.go b/weed/s3api/cors/cors_test.go deleted file mode 100644 index 1b5c54028..000000000 --- a/weed/s3api/cors/cors_test.go +++ /dev/null @@ -1,526 +0,0 @@ -package cors - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -func TestValidateConfiguration(t *testing.T) { - tests := []struct { - name string - config *CORSConfiguration - wantErr bool - }{ - { - name: "nil config", - config: nil, - wantErr: true, - }, - { - name: "empty rules", - config: &CORSConfiguration{ - CORSRules: []CORSRule{}, - }, - wantErr: true, - }, - { - name: "valid single rule", - config: &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"*"}, - }, - }, - }, - wantErr: false, - }, - { - name: "too many rules", - config: &CORSConfiguration{ - CORSRules: make([]CORSRule, 101), - }, - wantErr: true, - }, - { - name: "invalid method", - config: &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"INVALID"}, - AllowedOrigins: []string{"*"}, - }, - }, - }, - wantErr: true, - }, - { - name: "empty origins", - config: &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{}, - }, - }, - }, - wantErr: true, - }, - { - name: "invalid origin with multiple wildcards", - config: &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"http://*.*.example.com"}, - }, - }, - }, - wantErr: true, - }, - { - name: "negative MaxAgeSeconds", - config: &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"GET"}, - AllowedOrigins: []string{"*"}, - MaxAgeSeconds: intPtr(-1), - }, - }, - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateConfiguration(tt.config) - if (err != nil) != tt.wantErr { - t.Errorf("ValidateConfiguration() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestValidateOrigin(t *testing.T) { - tests := []struct { - name string - origin string - wantErr bool - }{ - { - name: "empty origin", - origin: "", - wantErr: true, - }, - { - name: "valid origin", - origin: "http://example.com", - wantErr: false, - }, - { - name: "wildcard origin", - origin: "*", - wantErr: false, - }, - { - name: "valid wildcard origin", - origin: "http://*.example.com", - wantErr: false, - }, - { - name: "https wildcard origin", - origin: "https://*.example.com", - wantErr: false, - }, - { - name: "invalid wildcard origin", - origin: "*.example.com", - wantErr: true, - }, - { - name: "multiple wildcards", - origin: "http://*.*.example.com", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validateOrigin(tt.origin) - if (err != nil) != tt.wantErr { - t.Errorf("validateOrigin() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestParseRequest(t *testing.T) { - tests := []struct { - name string - req *http.Request - want *CORSRequest - }{ - { - name: "simple GET request", - req: &http.Request{ - Method: "GET", - Header: http.Header{ - "Origin": []string{"http://example.com"}, - }, - }, - want: &CORSRequest{ - Origin: "http://example.com", - Method: "GET", - IsPreflightRequest: false, - }, - }, - { - name: "OPTIONS preflight request", - req: &http.Request{ - Method: "OPTIONS", - Header: http.Header{ - "Origin": []string{"http://example.com"}, - "Access-Control-Request-Method": []string{"PUT"}, - "Access-Control-Request-Headers": []string{"Content-Type, Authorization"}, - }, - }, - want: &CORSRequest{ - Origin: "http://example.com", - Method: "OPTIONS", - IsPreflightRequest: true, - AccessControlRequestMethod: "PUT", - AccessControlRequestHeaders: []string{"Content-Type", "Authorization"}, - }, - }, - { - name: "request without origin", - req: &http.Request{ - Method: "GET", - Header: http.Header{}, - }, - want: &CORSRequest{ - Origin: "", - Method: "GET", - IsPreflightRequest: false, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := ParseRequest(tt.req) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("ParseRequest() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestMatchesOrigin(t *testing.T) { - tests := []struct { - name string - allowedOrigins []string - origin string - want bool - }{ - { - name: "wildcard match", - allowedOrigins: []string{"*"}, - origin: "http://example.com", - want: true, - }, - { - name: "exact match", - allowedOrigins: []string{"http://example.com"}, - origin: "http://example.com", - want: true, - }, - { - name: "no match", - allowedOrigins: []string{"http://example.com"}, - origin: "http://other.com", - want: false, - }, - { - name: "wildcard subdomain match", - allowedOrigins: []string{"http://*.example.com"}, - origin: "http://api.example.com", - want: true, - }, - { - name: "wildcard subdomain no match", - allowedOrigins: []string{"http://*.example.com"}, - origin: "http://example.com", - want: false, - }, - { - name: "multiple origins with match", - allowedOrigins: []string{"http://example.com", "http://other.com"}, - origin: "http://other.com", - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := matchesOrigin(tt.allowedOrigins, tt.origin) - if got != tt.want { - t.Errorf("matchesOrigin() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestMatchesHeader(t *testing.T) { - tests := []struct { - name string - allowedHeaders []string - header string - want bool - }{ - { - name: "empty allowed headers", - allowedHeaders: []string{}, - header: "Content-Type", - want: true, - }, - { - name: "wildcard match", - allowedHeaders: []string{"*"}, - header: "Content-Type", - want: true, - }, - { - name: "exact match", - allowedHeaders: []string{"Content-Type"}, - header: "Content-Type", - want: true, - }, - { - name: "case insensitive match", - allowedHeaders: []string{"content-type"}, - header: "Content-Type", - want: true, - }, - { - name: "no match", - allowedHeaders: []string{"Authorization"}, - header: "Content-Type", - want: false, - }, - { - name: "wildcard prefix match", - allowedHeaders: []string{"x-amz-*"}, - header: "x-amz-date", - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := matchesHeader(tt.allowedHeaders, tt.header) - if got != tt.want { - t.Errorf("matchesHeader() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestEvaluateRequest(t *testing.T) { - config := &CORSConfiguration{ - CORSRules: []CORSRule{ - { - AllowedMethods: []string{"GET", "POST"}, - AllowedOrigins: []string{"http://example.com"}, - AllowedHeaders: []string{"Content-Type"}, - ExposeHeaders: []string{"ETag"}, - MaxAgeSeconds: intPtr(3600), - }, - { - AllowedMethods: []string{"PUT"}, - AllowedOrigins: []string{"*"}, - }, - }, - } - - tests := []struct { - name string - config *CORSConfiguration - corsReq *CORSRequest - want *CORSResponse - wantErr bool - }{ - { - name: "matching first rule", - config: config, - corsReq: &CORSRequest{ - Origin: "http://example.com", - Method: "GET", - }, - want: &CORSResponse{ - AllowOrigin: "http://example.com", - AllowMethods: "GET, POST", - AllowHeaders: "Content-Type", - ExposeHeaders: "ETag", - MaxAge: "3600", - }, - wantErr: false, - }, - { - name: "matching second rule", - config: config, - corsReq: &CORSRequest{ - Origin: "http://other.com", - Method: "PUT", - }, - want: &CORSResponse{ - AllowOrigin: "http://other.com", - AllowMethods: "PUT", - }, - wantErr: false, - }, - { - name: "no matching rule", - config: config, - corsReq: &CORSRequest{ - Origin: "http://forbidden.com", - Method: "GET", - }, - want: nil, - wantErr: true, - }, - { - name: "preflight request", - config: config, - corsReq: &CORSRequest{ - Origin: "http://example.com", - Method: "OPTIONS", - IsPreflightRequest: true, - AccessControlRequestMethod: "POST", - AccessControlRequestHeaders: []string{"Content-Type"}, - }, - want: &CORSResponse{ - AllowOrigin: "http://example.com", - AllowMethods: "GET, POST", - AllowHeaders: "Content-Type", - ExposeHeaders: "ETag", - MaxAge: "3600", - }, - wantErr: false, - }, - { - name: "preflight request with forbidden header", - config: config, - corsReq: &CORSRequest{ - Origin: "http://example.com", - Method: "OPTIONS", - IsPreflightRequest: true, - AccessControlRequestMethod: "POST", - AccessControlRequestHeaders: []string{"Authorization"}, - }, - want: &CORSResponse{ - AllowOrigin: "http://example.com", - // No AllowMethods or AllowHeaders because the requested header is forbidden - }, - wantErr: false, - }, - { - name: "request without origin", - config: config, - corsReq: &CORSRequest{ - Origin: "", - Method: "GET", - }, - want: nil, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := EvaluateRequest(tt.config, tt.corsReq) - if (err != nil) != tt.wantErr { - t.Errorf("EvaluateRequest() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("EvaluateRequest() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestApplyHeaders(t *testing.T) { - tests := []struct { - name string - corsResp *CORSResponse - want map[string]string - }{ - { - name: "nil response", - corsResp: nil, - want: map[string]string{}, - }, - { - name: "complete response", - corsResp: &CORSResponse{ - AllowOrigin: "http://example.com", - AllowMethods: "GET, POST", - AllowHeaders: "Content-Type", - ExposeHeaders: "ETag", - MaxAge: "3600", - }, - want: map[string]string{ - "Access-Control-Allow-Origin": "http://example.com", - "Access-Control-Allow-Methods": "GET, POST", - "Access-Control-Allow-Headers": "Content-Type", - "Access-Control-Expose-Headers": "ETag", - "Access-Control-Max-Age": "3600", - }, - }, - { - name: "with credentials", - corsResp: &CORSResponse{ - AllowOrigin: "http://example.com", - AllowMethods: "GET", - AllowCredentials: true, - }, - want: map[string]string{ - "Access-Control-Allow-Origin": "http://example.com", - "Access-Control-Allow-Methods": "GET", - "Access-Control-Allow-Credentials": "true", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a proper response writer using httptest - w := httptest.NewRecorder() - - ApplyHeaders(w, tt.corsResp) - - // Extract headers from the response - headers := make(map[string]string) - for key, values := range w.Header() { - if len(values) > 0 { - headers[key] = values[0] - } - } - - if !reflect.DeepEqual(headers, tt.want) { - t.Errorf("ApplyHeaders() headers = %v, want %v", headers, tt.want) - } - }) - } -} - -// Helper functions and types for testing - -func intPtr(i int) *int { - return &i -} diff --git a/weed/s3api/cors/middleware.go b/weed/s3api/cors/middleware.go deleted file mode 100644 index c9cd0e19e..000000000 --- a/weed/s3api/cors/middleware.go +++ /dev/null @@ -1,156 +0,0 @@ -package cors - -import ( - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// BucketChecker interface for checking bucket existence -type BucketChecker interface { - CheckBucket(r *http.Request, bucket string) s3err.ErrorCode -} - -// CORSConfigGetter interface for getting CORS configuration -type CORSConfigGetter interface { - GetCORSConfiguration(bucket string) (*CORSConfiguration, s3err.ErrorCode) -} - -// Middleware handles CORS evaluation for all S3 API requests -type Middleware struct { - bucketChecker BucketChecker - corsConfigGetter CORSConfigGetter -} - -// NewMiddleware creates a new CORS middleware instance -func NewMiddleware(bucketChecker BucketChecker, corsConfigGetter CORSConfigGetter) *Middleware { - return &Middleware{ - bucketChecker: bucketChecker, - corsConfigGetter: corsConfigGetter, - } -} - -// Handler returns the CORS middleware handler -func (m *Middleware) Handler(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Parse CORS request - corsReq := ParseRequest(r) - - // If not a CORS request, continue normally - if corsReq.Origin == "" { - next.ServeHTTP(w, r) - return - } - - // Extract bucket from request - bucket, _ := s3_constants.GetBucketAndObject(r) - if bucket == "" { - next.ServeHTTP(w, r) - return - } - - // Check if bucket exists - if err := m.bucketChecker.CheckBucket(r, bucket); err != s3err.ErrNone { - // For non-existent buckets, let the normal handler deal with it - next.ServeHTTP(w, r) - return - } - - // Load CORS configuration from cache - config, errCode := m.corsConfigGetter.GetCORSConfiguration(bucket) - if errCode != s3err.ErrNone || config == nil { - // No CORS configuration, handle based on request type - if corsReq.IsPreflightRequest { - // Preflight request without CORS config should fail - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - // Non-preflight request, continue normally - next.ServeHTTP(w, r) - return - } - - // Evaluate CORS request - corsResp, err := EvaluateRequest(config, corsReq) - if err != nil { - glog.V(3).Infof("CORS evaluation failed for bucket %s: %v", bucket, err) - if corsReq.IsPreflightRequest { - // Preflight request that doesn't match CORS rules should fail - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - // Non-preflight request, continue normally but without CORS headers - next.ServeHTTP(w, r) - return - } - - // Apply CORS headers - ApplyHeaders(w, corsResp) - - // Handle preflight requests - if corsReq.IsPreflightRequest { - // Preflight request should return 200 OK with just CORS headers - w.WriteHeader(http.StatusOK) - return - } - - // For actual requests, continue with normal processing - next.ServeHTTP(w, r) - }) -} - -// HandleOptionsRequest handles OPTIONS requests for CORS preflight -func (m *Middleware) HandleOptionsRequest(w http.ResponseWriter, r *http.Request) { - // Parse CORS request - corsReq := ParseRequest(r) - - // If not a CORS request, return OK - if corsReq.Origin == "" { - w.WriteHeader(http.StatusOK) - return - } - - // Extract bucket from request - bucket, _ := s3_constants.GetBucketAndObject(r) - if bucket == "" { - w.WriteHeader(http.StatusOK) - return - } - - // Check if bucket exists - if err := m.bucketChecker.CheckBucket(r, bucket); err != s3err.ErrNone { - // For non-existent buckets, return OK (let other handlers deal with bucket existence) - w.WriteHeader(http.StatusOK) - return - } - - // Load CORS configuration from cache - config, errCode := m.corsConfigGetter.GetCORSConfiguration(bucket) - if errCode != s3err.ErrNone || config == nil { - // No CORS configuration for OPTIONS request should return access denied - if corsReq.IsPreflightRequest { - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - w.WriteHeader(http.StatusOK) - return - } - - // Evaluate CORS request - corsResp, err := EvaluateRequest(config, corsReq) - if err != nil { - glog.V(3).Infof("CORS evaluation failed for bucket %s: %v", bucket, err) - if corsReq.IsPreflightRequest { - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - w.WriteHeader(http.StatusOK) - return - } - - // Apply CORS headers and return success - ApplyHeaders(w, corsResp) - w.WriteHeader(http.StatusOK) -} diff --git a/weed/s3api/custom_types.go b/weed/s3api/custom_types.go index cc170d0ad..569dfc3ac 100644 --- a/weed/s3api/custom_types.go +++ b/weed/s3api/custom_types.go @@ -1,11 +1,3 @@ package s3api -import "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00" - -// ConditionalHeaderResult holds the result of conditional header checking -type ConditionalHeaderResult struct { - ErrorCode s3err.ErrorCode - ETag string // ETag of the object (for 304 responses) -} diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d181d51da..32b93307a 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,38 +1,24 @@ package s3api import ( - "cmp" - "crypto/rand" - "encoding/base64" "encoding/hex" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "golang.org/x/exp/slices" "math" "path/filepath" - "slices" "sort" "strconv" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" -) - -const ( - multipartExt = ".part" - multiPartMinSize = 5 * 1024 * 1024 + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type InitiateMultipartUploadResult struct { @@ -40,60 +26,26 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(r *http.Request, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) { glog.V(2).Infof("createMultipartUpload input %v", input) uploadIdString := s3a.generateUploadID(*input.Key) - uploadIdString = uploadIdString + "_" + strings.ReplaceAll(uuid.New().String(), "-", "") - - // Prepare error handling outside callback scope - var encryptionError error - if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } entry.Extended["key"] = []byte(*input.Key) - - // Set object owner for multipart upload - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(amzAccountId) - } - for k, v := range input.Metadata { entry.Extended[k] = []byte(*v) } if input.ContentType != nil { entry.Attributes.Mime = *input.ContentType } - - // Prepare and apply encryption configuration within directory creation - // This ensures encryption resources are only allocated if directory creation succeeds - encryptionConfig, prepErr := s3a.prepareMultipartEncryptionConfig(r, uploadIdString) - if prepErr != nil { - encryptionError = prepErr - return // Exit callback, letting mkdir handle the error - } - s3a.applyMultipartEncryptionConfig(entry, encryptionConfig) - - // Extract and store object lock metadata from request headers - // This ensures object lock settings from create_multipart_upload are preserved - if err := s3a.extractObjectLockMetadataFromRequest(r, entry); err != nil { - glog.Errorf("createMultipartUpload: failed to extract object lock metadata: %v", err) - // Don't fail the upload - this matches AWS behavior for invalid metadata - } }); err != nil { - _, errorCode := handleMultipartInternalError("create multipart upload directory", err) - return nil, errorCode - } - - // Check for encryption configuration errors that occurred within the callback - if encryptionError != nil { - _, errorCode := handleMultipartInternalError("prepare encryption configuration", encryptionError) - return nil, errorCode + glog.Errorf("NewMultipartUpload error: %v", err) + return nil, s3err.ErrInternalError } output = &InitiateMultipartUploadResult{ @@ -108,398 +60,66 @@ func (s3a *S3ApiServer) createMultipartUpload(r *http.Request, input *s3.CreateM } type CompleteMultipartUploadResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` - Location *string `xml:"Location,omitempty"` - Bucket *string `xml:"Bucket,omitempty"` - Key *string `xml:"Key,omitempty"` - ETag *string `xml:"ETag,omitempty"` - // VersionId is NOT included in XML body - it should only be in x-amz-version-id HTTP header - - // Store the VersionId internally for setting HTTP header, but don't marshal to XML - VersionId *string `xml:"-"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` + s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(r *http.Request, input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) { glog.V(2).Infof("completeMultipartUpload input %v", input) - if len(parts.Parts) == 0 { - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() - return nil, s3err.ErrNoSuchUpload - } - completedPartNumbers := []int{} - completedPartMap := make(map[int][]string) - maxPartNo := 1 - - for _, part := range parts.Parts { - if _, ok := completedPartMap[part.PartNumber]; !ok { - completedPartNumbers = append(completedPartNumbers, part.PartNumber) - } - completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag) - maxPartNo = maxInt(maxPartNo, part.PartNumber) - } - sort.Ints(completedPartNumbers) + completedParts := parts.Parts + slices.SortFunc(completedParts, func(a, b CompletedPart) bool { + return a.PartNumber < b.PartNumber + }) uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, _, err := s3a.list(uploadDirectory, "", "", false, 0) - if err != nil { - glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() - return nil, s3err.ErrNoSuchUpload - } - if len(entries) == 0 { - entryName, dirName := s3a.getEntryNameAndDir(input) - if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil { - if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) { - return &CompleteMultipartUploadResult{ - Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))), - Bucket: input.Bucket, - ETag: aws.String("\"" + filer.ETagChunks(entry.GetChunks()) + "\""), - Key: objectKey(input.Key), - }, s3err.ErrNone - } - } - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() + entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList) + if err != nil || len(entries) == 0 { + glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) return nil, s3err.ErrNoSuchUpload } pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId) if err != nil { glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() return nil, s3err.ErrNoSuchUpload } - deleteEntries := []*filer_pb.Entry{} - partEntries := make(map[int][]*filer_pb.Entry, len(entries)) - entityTooSmall := false - for _, entry := range entries { - foundEntry := false - glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name) - if entry.IsDirectory || !strings.HasSuffix(entry.Name, multipartExt) { - continue - } - partNumber, err := parsePartNumber(entry.Name) - if err != nil { - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNumber).Inc() - glog.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err) - continue - } - completedPartsByNumber, ok := completedPartMap[partNumber] - if !ok { - continue - } - for _, partETag := range completedPartsByNumber { - partETag = strings.Trim(partETag, `"`) - entryETag := hex.EncodeToString(entry.Attributes.GetMd5()) - if partETag != "" && len(partETag) == 32 && entryETag != "" { - if entryETag != partETag { - glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagMismatch).Inc() - continue - } - } else { - glog.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagInvalid).Inc() - } - if len(entry.Chunks) == 0 && partNumber != maxPartNo { - glog.Warningf("completeMultipartUpload %s empty chunks", entry.Name) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEmpty).Inc() - continue - } - //there maybe multi same part, because of client retry - partEntries[partNumber] = append(partEntries[partNumber], entry) - foundEntry = true - } - if foundEntry { - if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] && - entry.Attributes.FileSize < multiPartMinSize { - glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name) - entityTooSmall = true - } - } else { - deleteEntries = append(deleteEntries, entry) - } - } - if entityTooSmall { - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc() - return nil, s3err.ErrEntityTooSmall - } + mime := pentry.Attributes.Mime + var finalParts []*filer_pb.FileChunk var offset int64 - for _, partNumber := range completedPartNumbers { - partEntriesByNumber, ok := partEntries[partNumber] - if !ok { - glog.Errorf("part %d has no entry", partNumber) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNotFound).Inc() - return nil, s3err.ErrInvalidPart - } - found := false - if len(partEntriesByNumber) > 1 { - slices.SortFunc(partEntriesByNumber, func(a, b *filer_pb.Entry) int { - return cmp.Compare(b.Chunks[0].ModifiedTsNs, a.Chunks[0].ModifiedTsNs) - }) - } - for _, entry := range partEntriesByNumber { - if found { - deleteEntries = append(deleteEntries, entry) - stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEntryMismatch).Inc() + + for _, entry := range entries { + if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { + partETag, found := findByPartNumber(entry.Name, completedParts) + if !found { continue } - - // Track within-part offset for SSE-KMS IV calculation - var withinPartOffset int64 = 0 - - for _, chunk := range entry.GetChunks() { - // Update SSE metadata with correct within-part offset (unified approach for KMS and SSE-C) - sseKmsMetadata := chunk.SseMetadata - - if chunk.SseType == filer_pb.SSEType_SSE_KMS && len(chunk.SseMetadata) > 0 { - // Deserialize, update offset, and re-serialize SSE-KMS metadata - if kmsKey, err := DeserializeSSEKMSMetadata(chunk.SseMetadata); err == nil { - kmsKey.ChunkOffset = withinPartOffset - if updatedMetadata, serErr := SerializeSSEKMSMetadata(kmsKey); serErr == nil { - sseKmsMetadata = updatedMetadata - glog.V(4).Infof("Updated SSE-KMS metadata for chunk in part %d: withinPartOffset=%d", partNumber, withinPartOffset) - } - } - } else if chunk.SseType == filer_pb.SSEType_SSE_C { - // For SSE-C chunks, create per-chunk metadata using the part's IV - if ivData, exists := entry.Extended[s3_constants.SeaweedFSSSEIV]; exists { - // Get keyMD5 from entry metadata if available - var keyMD5 string - if keyMD5Data, keyExists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; keyExists { - keyMD5 = string(keyMD5Data) - } - - // Create SSE-C metadata with the part's IV and this chunk's within-part offset - if ssecMetadata, serErr := SerializeSSECMetadata(ivData, keyMD5, withinPartOffset); serErr == nil { - sseKmsMetadata = ssecMetadata // Reuse the same field for unified handling - glog.V(4).Infof("Created SSE-C metadata for chunk in part %d: withinPartOffset=%d", partNumber, withinPartOffset) - } else { - glog.Errorf("Failed to serialize SSE-C metadata for chunk in part %d: %v", partNumber, serErr) - } - } else { - glog.Errorf("SSE-C chunk in part %d missing IV in entry metadata", partNumber) - } - } - + entryETag := hex.EncodeToString(entry.Attributes.GetMd5()) + if partETag != "" && len(partETag) == 32 && entryETag != "" && entryETag != partETag { + glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag) + return nil, s3err.ErrInvalidPart + } + for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.GetFileIdString(), - Offset: offset, - Size: chunk.Size, - ModifiedTsNs: chunk.ModifiedTsNs, - CipherKey: chunk.CipherKey, - ETag: chunk.ETag, - IsCompressed: chunk.IsCompressed, - // Preserve SSE metadata with updated within-part offset - SseType: chunk.SseType, - SseMetadata: sseKmsMetadata, + FileId: chunk.GetFileIdString(), + Offset: offset, + Size: chunk.Size, + Mtime: chunk.Mtime, + CipherKey: chunk.CipherKey, + ETag: chunk.ETag, } finalParts = append(finalParts, p) offset += int64(chunk.Size) - withinPartOffset += int64(chunk.Size) } - found = true } } - entryName, dirName := s3a.getEntryNameAndDir(input) - - // Check if versioning is configured for this bucket BEFORE creating any files - versioningState, vErr := s3a.getVersioningState(*input.Bucket) - if vErr == nil && versioningState == s3_constants.VersioningEnabled { - // For versioned buckets, create a version and return the version ID - versionId := generateVersionId() - versionFileName := s3a.getVersionFileName(versionId) - versionDir := dirName + "/" + entryName + ".versions" - - // Move the completed object to the versions directory - err = s3a.mkFile(versionDir, versionFileName, finalParts, func(versionEntry *filer_pb.Entry) { - if versionEntry.Extended == nil { - versionEntry.Extended = make(map[string][]byte) - } - versionEntry.Extended[s3_constants.ExtVersionIdKey] = []byte(versionId) - versionEntry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId) - - // Set object owner for versioned multipart objects - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - versionEntry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(amzAccountId) - } - - for k, v := range pentry.Extended { - if k != "key" { - versionEntry.Extended[k] = v - } - } - - // Preserve SSE-KMS metadata from the first part (if any) - // SSE-KMS metadata is stored in individual parts, not the upload directory - if len(completedPartNumbers) > 0 && len(partEntries[completedPartNumbers[0]]) > 0 { - firstPartEntry := partEntries[completedPartNumbers[0]][0] - if firstPartEntry.Extended != nil { - // Copy SSE-KMS metadata from the first part - if kmsMetadata, exists := firstPartEntry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - versionEntry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - glog.V(3).Infof("completeMultipartUpload: preserved SSE-KMS metadata from first part (versioned)") - } - } - } - if pentry.Attributes.Mime != "" { - versionEntry.Attributes.Mime = pentry.Attributes.Mime - } else if mime != "" { - versionEntry.Attributes.Mime = mime - } - versionEntry.Attributes.FileSize = uint64(offset) - }) - - if err != nil { - glog.Errorf("completeMultipartUpload: failed to create version %s: %v", versionId, err) - return nil, s3err.ErrInternalError - } - - // Update the .versions directory metadata to indicate this is the latest version - err = s3a.updateLatestVersionInDirectory(*input.Bucket, *input.Key, versionId, versionFileName) - if err != nil { - glog.Errorf("completeMultipartUpload: failed to update latest version in directory: %v", err) - return nil, s3err.ErrInternalError - } - - // For versioned buckets, don't create a main object file - all content is stored in .versions directory - // The latest version information is tracked in the .versions directory metadata - - output = &CompleteMultipartUploadResult{ - Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))), - Bucket: input.Bucket, - ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), - Key: objectKey(input.Key), - VersionId: aws.String(versionId), - } - } else if vErr == nil && versioningState == s3_constants.VersioningSuspended { - // For suspended versioning, add "null" version ID metadata and return "null" version ID - err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - entry.Extended[s3_constants.ExtVersionIdKey] = []byte("null") - - // Set object owner for suspended versioning multipart objects - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(amzAccountId) - } - - for k, v := range pentry.Extended { - if k != "key" { - entry.Extended[k] = v - } - } - - // Preserve SSE-KMS metadata from the first part (if any) - // SSE-KMS metadata is stored in individual parts, not the upload directory - if len(completedPartNumbers) > 0 && len(partEntries[completedPartNumbers[0]]) > 0 { - firstPartEntry := partEntries[completedPartNumbers[0]][0] - if firstPartEntry.Extended != nil { - // Copy SSE-KMS metadata from the first part - if kmsMetadata, exists := firstPartEntry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - entry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - glog.V(3).Infof("completeMultipartUpload: preserved SSE-KMS metadata from first part (suspended versioning)") - } - } - } - if pentry.Attributes.Mime != "" { - entry.Attributes.Mime = pentry.Attributes.Mime - } else if mime != "" { - entry.Attributes.Mime = mime - } - entry.Attributes.FileSize = uint64(offset) - }) - - if err != nil { - glog.Errorf("completeMultipartUpload: failed to create suspended versioning object: %v", err) - return nil, s3err.ErrInternalError - } - - // Note: Suspended versioning should NOT return VersionId field according to AWS S3 spec - output = &CompleteMultipartUploadResult{ - Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))), - Bucket: input.Bucket, - ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), - Key: objectKey(input.Key), - // VersionId field intentionally omitted for suspended versioning - } - } else { - // For non-versioned buckets, create main object file - err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId) - - // Set object owner for non-versioned multipart objects - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(amzAccountId) - } - - for k, v := range pentry.Extended { - if k != "key" { - entry.Extended[k] = v - } - } - - // Preserve SSE-KMS metadata from the first part (if any) - // SSE-KMS metadata is stored in individual parts, not the upload directory - if len(completedPartNumbers) > 0 && len(partEntries[completedPartNumbers[0]]) > 0 { - firstPartEntry := partEntries[completedPartNumbers[0]][0] - if firstPartEntry.Extended != nil { - // Copy SSE-KMS metadata from the first part - if kmsMetadata, exists := firstPartEntry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - entry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - glog.V(3).Infof("completeMultipartUpload: preserved SSE-KMS metadata from first part") - } - } - } - if pentry.Attributes.Mime != "" { - entry.Attributes.Mime = pentry.Attributes.Mime - } else if mime != "" { - entry.Attributes.Mime = mime - } - entry.Attributes.FileSize = uint64(offset) - }) - - if err != nil { - glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) - return nil, s3err.ErrInternalError - } - - // For non-versioned buckets, return response without VersionId - output = &CompleteMultipartUploadResult{ - Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))), - Bucket: input.Bucket, - ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), - Key: objectKey(input.Key), - } - } - - for _, deleteEntry := range deleteEntries { - //delete unused part data - if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil { - glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err) - } - } - if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { - glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) - } - - return -} - -func (s3a *S3ApiServer) getEntryNameAndDir(input *s3.CompleteMultipartUploadInput) (string, string) { entryName := filepath.Base(*input.Key) - dirName := filepath.ToSlash(filepath.Dir(*input.Key)) + dirName := filepath.Dir(*input.Key) if dirName == "." { dirName = "" } @@ -512,18 +132,67 @@ func (s3a *S3ApiServer) getEntryNameAndDir(input *s3.CompleteMultipartUploadInpu if strings.HasSuffix(dirName, "/") { dirName = dirName[:len(dirName)-1] } - return entryName, dirName + + err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) { + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + for k, v := range pentry.Extended { + if k != "key" { + entry.Extended[k] = v + } + } + if pentry.Attributes.Mime != "" { + entry.Attributes.Mime = pentry.Attributes.Mime + } else if mime != "" { + entry.Attributes.Mime = mime + } + }) + + if err != nil { + glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) + return nil, s3err.ErrInternalError + } + + output = &CompleteMultipartUploadResult{ + CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ + Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlPathEscape(dirName), urlPathEscape(entryName))), + Bucket: input.Bucket, + ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), + Key: objectKey(input.Key), + }, + } + + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { + glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) + } + + return } -func parsePartNumber(fileName string) (int, error) { - var partNumberString string - index := strings.Index(fileName, "_") - if index != -1 { - partNumberString = fileName[:index] - } else { - partNumberString = fileName[:len(fileName)-len(multipartExt)] +func findByPartNumber(fileName string, parts []CompletedPart) (etag string, found bool) { + partNumber, formatErr := strconv.Atoi(fileName[:4]) + if formatErr != nil { + return } - return strconv.Atoi(partNumberString) + x := sort.Search(len(parts), func(i int) bool { + return parts[i].PartNumber >= partNumber + }) + if x >= len(parts) { + return + } + if parts[x].PartNumber != partNumber { + return + } + y := 0 + for i, part := range parts[x:] { + if part.PartNumber == partNumber { + y = i + } else { + break + } + } + return parts[x+y].ETag, true } func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) { @@ -575,7 +244,6 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput KeyMarker: input.KeyMarker, MaxUploads: input.MaxUploads, Prefix: input.Prefix, - IsTruncated: aws.Bool(false), } entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32) @@ -639,7 +307,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP StorageClass: aws.String("STANDARD"), } - entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d%s", *input.PartNumberMarker, multipartExt), false, uint32(*input.MaxParts)) + entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, s3err.ErrNoSuchUpload @@ -651,8 +319,9 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP output.IsTruncated = aws.Bool(!isLast) for _, entry := range entries { - if strings.HasSuffix(entry.Name, multipartExt) && !entry.IsDirectory { - partNumber, err := parsePartNumber(entry.Name) + if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { + partNumberString := entry.Name[:len(entry.Name)-len(".part")] + partNumber, err := strconv.Atoi(partNumberString) if err != nil { glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err) continue @@ -671,108 +340,3 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP return } - -// maxInt returns the maximum of two int values -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -// MultipartEncryptionConfig holds pre-prepared encryption configuration to avoid error handling in callbacks -type MultipartEncryptionConfig struct { - // SSE-KMS configuration - IsSSEKMS bool - KMSKeyID string - BucketKeyEnabled bool - EncryptionContext string - KMSBaseIVEncoded string - - // SSE-S3 configuration - IsSSES3 bool - S3BaseIVEncoded string - S3KeyDataEncoded string -} - -// prepareMultipartEncryptionConfig prepares encryption configuration with proper error handling -// This eliminates the need for criticalError variable in callback functions -func (s3a *S3ApiServer) prepareMultipartEncryptionConfig(r *http.Request, uploadIdString string) (*MultipartEncryptionConfig, error) { - config := &MultipartEncryptionConfig{} - - // Prepare SSE-KMS configuration - if IsSSEKMSRequest(r) { - config.IsSSEKMS = true - config.KMSKeyID = r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - config.BucketKeyEnabled = strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true" - config.EncryptionContext = r.Header.Get(s3_constants.AmzServerSideEncryptionContext) - - // Generate and encode base IV with proper error handling - baseIV := make([]byte, s3_constants.AESBlockSize) - n, err := rand.Read(baseIV) - if err != nil || n != len(baseIV) { - return nil, fmt.Errorf("failed to generate secure IV for SSE-KMS multipart upload: %v (read %d/%d bytes)", err, n, len(baseIV)) - } - config.KMSBaseIVEncoded = base64.StdEncoding.EncodeToString(baseIV) - glog.V(4).Infof("Generated base IV %x for SSE-KMS multipart upload %s", baseIV[:8], uploadIdString) - } - - // Prepare SSE-S3 configuration - if IsSSES3RequestInternal(r) { - config.IsSSES3 = true - - // Generate and encode base IV with proper error handling - baseIV := make([]byte, s3_constants.AESBlockSize) - n, err := rand.Read(baseIV) - if err != nil || n != len(baseIV) { - return nil, fmt.Errorf("failed to generate secure IV for SSE-S3 multipart upload: %v (read %d/%d bytes)", err, n, len(baseIV)) - } - config.S3BaseIVEncoded = base64.StdEncoding.EncodeToString(baseIV) - glog.V(4).Infof("Generated base IV %x for SSE-S3 multipart upload %s", baseIV[:8], uploadIdString) - - // Generate and serialize SSE-S3 key with proper error handling - keyManager := GetSSES3KeyManager() - sseS3Key, err := keyManager.GetOrCreateKey("") - if err != nil { - return nil, fmt.Errorf("failed to generate SSE-S3 key for multipart upload: %v", err) - } - - keyData, serErr := SerializeSSES3Metadata(sseS3Key) - if serErr != nil { - return nil, fmt.Errorf("failed to serialize SSE-S3 metadata for multipart upload: %v", serErr) - } - - config.S3KeyDataEncoded = base64.StdEncoding.EncodeToString(keyData) - - // Store key in manager for later retrieval - keyManager.StoreKey(sseS3Key) - glog.V(4).Infof("Stored SSE-S3 key %s for multipart upload %s", sseS3Key.KeyID, uploadIdString) - } - - return config, nil -} - -// applyMultipartEncryptionConfig applies pre-prepared encryption configuration to filer entry -// This function is guaranteed not to fail since all error-prone operations were done during preparation -func (s3a *S3ApiServer) applyMultipartEncryptionConfig(entry *filer_pb.Entry, config *MultipartEncryptionConfig) { - // Apply SSE-KMS configuration - if config.IsSSEKMS { - entry.Extended[s3_constants.SeaweedFSSSEKMSKeyID] = []byte(config.KMSKeyID) - if config.BucketKeyEnabled { - entry.Extended[s3_constants.SeaweedFSSSEKMSBucketKeyEnabled] = []byte("true") - } - if config.EncryptionContext != "" { - entry.Extended[s3_constants.SeaweedFSSSEKMSEncryptionContext] = []byte(config.EncryptionContext) - } - entry.Extended[s3_constants.SeaweedFSSSEKMSBaseIV] = []byte(config.KMSBaseIVEncoded) - glog.V(3).Infof("applyMultipartEncryptionConfig: applied SSE-KMS settings with keyID %s", config.KMSKeyID) - } - - // Apply SSE-S3 configuration - if config.IsSSES3 { - entry.Extended[s3_constants.SeaweedFSSSES3Encryption] = []byte(s3_constants.SSEAlgorithmAES256) - entry.Extended[s3_constants.SeaweedFSSSES3BaseIV] = []byte(config.S3BaseIVEncoded) - entry.Extended[s3_constants.SeaweedFSSSES3KeyData] = []byte(config.S3KeyDataEncoded) - glog.V(3).Infof("applyMultipartEncryptionConfig: applied SSE-S3 settings") - } -} diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go index 7f75a40de..fe2b9c0ce 100644 --- a/weed/s3api/filer_multipart_test.go +++ b/weed/s3api/filer_multipart_test.go @@ -3,7 +3,7 @@ package s3api import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/stretchr/testify/assert" "testing" "time" @@ -50,27 +50,88 @@ func TestListPartsResult(t *testing.T) { } -func Test_parsePartNumber(t *testing.T) { - tests := []struct { - name string +func Test_findByPartNumber(t *testing.T) { + type args struct { fileName string - partNum int + parts []CompletedPart + } + + parts := []CompletedPart{ + CompletedPart{ + ETag: "xxx", + PartNumber: 1, + }, + CompletedPart{ + ETag: "lll", + PartNumber: 1, + }, + CompletedPart{ + ETag: "yyy", + PartNumber: 3, + }, + CompletedPart{ + ETag: "zzz", + PartNumber: 5, + }, + } + + tests := []struct { + name string + args args + wantEtag string + wantFound bool }{ { "first", - "0001_uuid.part", - 1, + args{ + "0001.part", + parts, + }, + "lll", + true, }, { "second", - "0002.part", - 2, + args{ + "0002.part", + parts, + }, + "", + false, + }, + { + "third", + args{ + "0003.part", + parts, + }, + "yyy", + true, + }, + { + "fourth", + args{ + "0004.part", + parts, + }, + "", + false, + }, + { + "fifth", + args{ + "0005.part", + parts, + }, + "zzz", + true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - partNumber, _ := parsePartNumber(tt.fileName) - assert.Equalf(t, tt.partNum, partNumber, "parsePartNumber(%v)", tt.fileName) + gotEtag, gotFound := findByPartNumber(tt.args.fileName, tt.args.parts) + assert.Equalf(t, tt.wantEtag, gotEtag, "findByPartNumber(%v, %v)", tt.args.fileName, tt.args.parts) + assert.Equalf(t, tt.wantFound, gotFound, "findByPartNumber(%v, %v)", tt.args.fileName, tt.args.parts) }) } } diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 9dd9a684e..dbd667339 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,28 +3,27 @@ package s3api import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" ) func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return filer_pb.Mkdir(context.Background(), s3a, parentDirectoryPath, dirName, fn) + return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn) } func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk, fn func(entry *filer_pb.Entry)) error { - return filer_pb.MkFile(context.Background(), s3a, parentDirectoryPath, fileName, chunks, fn) + return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks, fn) } func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) { - err = filer_pb.List(context.Background(), s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error { + err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error { entries = append(entries, entry) if isLastEntry { isLast = true @@ -77,42 +76,19 @@ func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath strin func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - return filer_pb.Exists(context.Background(), s3a, parentDirectoryPath, entryName, isDirectory) + return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory) } func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) { - return filer_pb.Touch(context.Background(), s3a, parentDirectoryPath, entryName, entry) + return filer_pb.Touch(s3a, parentDirectoryPath, entryName, entry) } func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) { fullPath := util.NewFullPath(parentDirectoryPath, entryName) - return filer_pb.GetEntry(context.Background(), s3a, fullPath) -} - -func (s3a *S3ApiServer) updateEntry(parentDirectoryPath string, newEntry *filer_pb.Entry) error { - updateEntryRequest := &filer_pb.UpdateEntryRequest{ - Directory: parentDirectoryPath, - Entry: newEntry, - } - - err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - err := filer_pb.UpdateEntry(context.Background(), client, updateEntryRequest) - if err != nil { - return err - } - return nil - }) - return err -} - -func (s3a *S3ApiServer) getCollectionName(bucket string) string { - if s3a.option.FilerGroup != "" { - return fmt.Sprintf("%s_%s", s3a.option.FilerGroup, bucket) - } - return bucket + return filer_pb.GetEntry(s3a, fullPath) } func objectKey(key *string) *string { diff --git a/weed/s3api/filer_util_tags.go b/weed/s3api/filer_util_tags.go index d33e46c2e..18d4d69c5 100644 --- a/weed/s3api/filer_util_tags.go +++ b/weed/s3api/filer_util_tags.go @@ -1,11 +1,10 @@ package s3api import ( - "context" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "strings" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) const ( @@ -16,7 +15,7 @@ func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (t err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := filer_pb.LookupEntry(context.Background(), client, &filer_pb.LookupDirectoryEntryRequest{ + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, Name: entryName, }) @@ -38,7 +37,7 @@ func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, ta return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := filer_pb.LookupEntry(context.Background(), client, &filer_pb.LookupDirectoryEntryRequest{ + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, Name: entryName, }) @@ -59,7 +58,7 @@ func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, ta resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v) } - return filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{ + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ Directory: parentDirectoryPath, Entry: resp.Entry, IsFromOtherCluster: false, @@ -74,7 +73,7 @@ func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (er return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := filer_pb.LookupEntry(context.Background(), client, &filer_pb.LookupDirectoryEntryRequest{ + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, Name: entryName, }) @@ -94,7 +93,7 @@ func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (er return nil } - return filer_pb.UpdateEntry(context.Background(), client, &filer_pb.UpdateEntryRequest{ + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ Directory: parentDirectoryPath, Entry: resp.Entry, IsFromOtherCluster: false, diff --git a/weed/s3api/object_lock_utils.go b/weed/s3api/object_lock_utils.go deleted file mode 100644 index 39496e14f..000000000 --- a/weed/s3api/object_lock_utils.go +++ /dev/null @@ -1,363 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "fmt" - "strconv" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// ObjectLockUtils provides shared utilities for Object Lock configuration -// These functions are used by both Admin UI and S3 API handlers to ensure consistency - -// VersioningUtils provides shared utilities for bucket versioning configuration -// These functions ensure Admin UI and S3 API use the same versioning keys - -// StoreVersioningInExtended stores versioning configuration in entry extended attributes -func StoreVersioningInExtended(entry *filer_pb.Entry, enabled bool) error { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - if enabled { - entry.Extended[s3_constants.ExtVersioningKey] = []byte(s3_constants.VersioningEnabled) - } else { - entry.Extended[s3_constants.ExtVersioningKey] = []byte(s3_constants.VersioningSuspended) - } - - return nil -} - -// LoadVersioningFromExtended loads versioning configuration from entry extended attributes -func LoadVersioningFromExtended(entry *filer_pb.Entry) (bool, bool) { - if entry == nil || entry.Extended == nil { - return false, false // not found, default to suspended - } - - // Check for S3 API compatible key - if versioningBytes, exists := entry.Extended[s3_constants.ExtVersioningKey]; exists { - enabled := string(versioningBytes) == s3_constants.VersioningEnabled - return enabled, true - } - - return false, false // not found -} - -// CreateObjectLockConfiguration creates a new ObjectLockConfiguration with the specified parameters -func CreateObjectLockConfiguration(enabled bool, mode string, days int, years int) *ObjectLockConfiguration { - if !enabled { - return nil - } - - config := &ObjectLockConfiguration{ - ObjectLockEnabled: s3_constants.ObjectLockEnabled, - } - - // Add default retention rule if mode and period are specified - if mode != "" && (days > 0 || years > 0) { - config.Rule = &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: mode, - Days: days, - Years: years, - DaysSet: days > 0, - YearsSet: years > 0, - }, - } - } - - return config -} - -// ObjectLockConfigurationToXML converts ObjectLockConfiguration to XML bytes -func ObjectLockConfigurationToXML(config *ObjectLockConfiguration) ([]byte, error) { - if config == nil { - return nil, fmt.Errorf("object lock configuration is nil") - } - - return xml.Marshal(config) -} - -// StoreObjectLockConfigurationInExtended stores Object Lock configuration in entry extended attributes -func StoreObjectLockConfigurationInExtended(entry *filer_pb.Entry, config *ObjectLockConfiguration) error { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - if config == nil { - // Remove Object Lock configuration - delete(entry.Extended, s3_constants.ExtObjectLockEnabledKey) - delete(entry.Extended, s3_constants.ExtObjectLockDefaultModeKey) - delete(entry.Extended, s3_constants.ExtObjectLockDefaultDaysKey) - delete(entry.Extended, s3_constants.ExtObjectLockDefaultYearsKey) - return nil - } - - // Store the enabled flag - entry.Extended[s3_constants.ExtObjectLockEnabledKey] = []byte(config.ObjectLockEnabled) - - // Store default retention configuration if present - if config.Rule != nil && config.Rule.DefaultRetention != nil { - defaultRetention := config.Rule.DefaultRetention - - // Store mode - if defaultRetention.Mode != "" { - entry.Extended[s3_constants.ExtObjectLockDefaultModeKey] = []byte(defaultRetention.Mode) - } - - // Store days - if defaultRetention.DaysSet && defaultRetention.Days > 0 { - entry.Extended[s3_constants.ExtObjectLockDefaultDaysKey] = []byte(strconv.Itoa(defaultRetention.Days)) - } - - // Store years - if defaultRetention.YearsSet && defaultRetention.Years > 0 { - entry.Extended[s3_constants.ExtObjectLockDefaultYearsKey] = []byte(strconv.Itoa(defaultRetention.Years)) - } - } else { - // Remove default retention if not present - delete(entry.Extended, s3_constants.ExtObjectLockDefaultModeKey) - delete(entry.Extended, s3_constants.ExtObjectLockDefaultDaysKey) - delete(entry.Extended, s3_constants.ExtObjectLockDefaultYearsKey) - } - - return nil -} - -// LoadObjectLockConfigurationFromExtended loads Object Lock configuration from entry extended attributes -func LoadObjectLockConfigurationFromExtended(entry *filer_pb.Entry) (*ObjectLockConfiguration, bool) { - if entry == nil || entry.Extended == nil { - return nil, false - } - - // Check if Object Lock is enabled - enabledBytes, exists := entry.Extended[s3_constants.ExtObjectLockEnabledKey] - if !exists { - return nil, false - } - - enabled := string(enabledBytes) - if enabled != s3_constants.ObjectLockEnabled && enabled != "true" { - return nil, false - } - - // Create basic configuration - config := &ObjectLockConfiguration{ - ObjectLockEnabled: s3_constants.ObjectLockEnabled, - } - - // Load default retention configuration if present - if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockDefaultModeKey]; exists { - mode := string(modeBytes) - - // Parse days and years - var days, years int - if daysBytes, exists := entry.Extended[s3_constants.ExtObjectLockDefaultDaysKey]; exists { - if parsed, err := strconv.Atoi(string(daysBytes)); err == nil { - days = parsed - } - } - if yearsBytes, exists := entry.Extended[s3_constants.ExtObjectLockDefaultYearsKey]; exists { - if parsed, err := strconv.Atoi(string(yearsBytes)); err == nil { - years = parsed - } - } - - // Create rule if we have a mode and at least days or years - if mode != "" && (days > 0 || years > 0) { - config.Rule = &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: mode, - Days: days, - Years: years, - DaysSet: days > 0, - YearsSet: years > 0, - }, - } - } - } - - return config, true -} - -// ExtractObjectLockInfoFromConfig extracts basic Object Lock information from configuration -// Returns: enabled, mode, duration (for UI display) -func ExtractObjectLockInfoFromConfig(config *ObjectLockConfiguration) (bool, string, int32) { - if config == nil || config.ObjectLockEnabled != s3_constants.ObjectLockEnabled { - return false, "", 0 - } - - if config.Rule == nil || config.Rule.DefaultRetention == nil { - return true, "", 0 - } - - defaultRetention := config.Rule.DefaultRetention - - // Convert years to days for consistent representation - days := 0 - if defaultRetention.DaysSet { - days = defaultRetention.Days - } - if defaultRetention.YearsSet && defaultRetention.Years > 0 { - days += defaultRetention.Years * 365 - } - - return true, defaultRetention.Mode, int32(days) -} - -// CreateObjectLockConfigurationFromParams creates ObjectLockConfiguration from individual parameters -// This is a convenience function for Admin UI usage -func CreateObjectLockConfigurationFromParams(enabled bool, mode string, duration int32) *ObjectLockConfiguration { - if !enabled { - return nil - } - - return CreateObjectLockConfiguration(enabled, mode, int(duration), 0) -} - -// ValidateObjectLockParameters validates Object Lock parameters before creating configuration -func ValidateObjectLockParameters(enabled bool, mode string, duration int32) error { - if !enabled { - return nil - } - - if mode != s3_constants.RetentionModeGovernance && mode != s3_constants.RetentionModeCompliance { - return ErrInvalidObjectLockMode - } - - if duration <= 0 { - return ErrInvalidObjectLockDuration - } - - if duration > MaxRetentionDays { - return ErrObjectLockDurationExceeded - } - - return nil -} - -// ==================================================================== -// OBJECT LOCK VALIDATION FUNCTIONS -// ==================================================================== -// These validation functions provide comprehensive validation for -// all Object Lock related configurations and requests. - -// ValidateRetention validates retention configuration for object-level retention -func ValidateRetention(retention *ObjectRetention) error { - // Check if mode is specified - if retention.Mode == "" { - return ErrRetentionMissingMode - } - - // Check if retain until date is specified - if retention.RetainUntilDate == nil { - return ErrRetentionMissingRetainUntilDate - } - - // Check if mode is valid - if retention.Mode != s3_constants.RetentionModeGovernance && retention.Mode != s3_constants.RetentionModeCompliance { - return ErrInvalidRetentionModeValue - } - - // Check if retain until date is in the future - if retention.RetainUntilDate.Before(time.Now()) { - return ErrRetentionDateMustBeFuture - } - - return nil -} - -// ValidateLegalHold validates legal hold configuration -func ValidateLegalHold(legalHold *ObjectLegalHold) error { - // Check if status is valid - if legalHold.Status != s3_constants.LegalHoldOn && legalHold.Status != s3_constants.LegalHoldOff { - return ErrInvalidLegalHoldStatus - } - - return nil -} - -// ValidateObjectLockConfiguration validates object lock configuration at bucket level -func ValidateObjectLockConfiguration(config *ObjectLockConfiguration) error { - // ObjectLockEnabled is required for bucket-level configuration - if config.ObjectLockEnabled == "" { - return ErrObjectLockConfigurationMissingEnabled - } - - // Validate ObjectLockEnabled value - if config.ObjectLockEnabled != s3_constants.ObjectLockEnabled { - // ObjectLockEnabled can only be 'Enabled', any other value (including 'Disabled') is malformed XML - return ErrInvalidObjectLockEnabledValue - } - - // Validate Rule if present - if config.Rule != nil { - if config.Rule.DefaultRetention == nil { - return ErrRuleMissingDefaultRetention - } - return validateDefaultRetention(config.Rule.DefaultRetention) - } - - return nil -} - -// validateDefaultRetention validates default retention configuration for bucket-level settings -func validateDefaultRetention(retention *DefaultRetention) error { - glog.V(2).Infof("validateDefaultRetention: Mode=%s, Days=%d (set=%v), Years=%d (set=%v)", - retention.Mode, retention.Days, retention.DaysSet, retention.Years, retention.YearsSet) - - // Mode is required - if retention.Mode == "" { - return ErrDefaultRetentionMissingMode - } - - // Mode must be valid - if retention.Mode != s3_constants.RetentionModeGovernance && retention.Mode != s3_constants.RetentionModeCompliance { - return ErrInvalidDefaultRetentionMode - } - - // Check for invalid Years value (negative values are always invalid) - if retention.YearsSet && retention.Years < 0 { - return ErrInvalidRetentionPeriod - } - - // Check for invalid Days value (negative values are invalid) - if retention.DaysSet && retention.Days < 0 { - return ErrInvalidRetentionPeriod - } - - // Check for invalid Days value (zero is invalid when explicitly provided) - if retention.DaysSet && retention.Days == 0 { - return ErrInvalidRetentionPeriod - } - - // Check for neither Days nor Years being specified - if !retention.DaysSet && !retention.YearsSet { - return ErrDefaultRetentionMissingPeriod - } - - // Check for both Days and Years being specified - if retention.DaysSet && retention.YearsSet { - return ErrDefaultRetentionBothDaysAndYears - } - - // Validate Days if specified - if retention.DaysSet && retention.Days > 0 { - if retention.Days > MaxRetentionDays { - return ErrDefaultRetentionDaysOutOfRange - } - } - - // Validate Years if specified - if retention.YearsSet && retention.Years > 0 { - if retention.Years > MaxRetentionYears { - return ErrDefaultRetentionYearsOutOfRange - } - } - - return nil -} diff --git a/weed/s3api/policy/post-policy.go b/weed/s3api/policy/post-policy.go index ad3cfc401..5ef8d397d 100644 --- a/weed/s3api/policy/post-policy.go +++ b/weed/s3api/policy/post-policy.go @@ -20,7 +20,7 @@ package policy import ( "encoding/base64" "fmt" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "strings" "time" @@ -34,11 +34,12 @@ const expirationDateFormat = "2006-01-02T15:04:05.999Z" // // Example: // -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// type policyCondition struct { matchType string condition string @@ -315,6 +316,6 @@ func errInvalidArgument(message string) error { StatusCode: http.StatusBadRequest, Code: "InvalidArgument", Message: message, - RequestID: "client", + RequestID: "minio", } } diff --git a/weed/s3api/policy/post-policy_test.go b/weed/s3api/policy/post-policy_test.go index 8b054023a..ce241b723 100644 --- a/weed/s3api/policy/post-policy_test.go +++ b/weed/s3api/policy/post-policy_test.go @@ -125,7 +125,7 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) [] // postPresignSignatureV4 - presigned signature for PostPolicy requests. func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { - // Get signing key. + // Get signining key. signingkey := getSigningKey(secretAccessKey, t, location) // Calculate signature. signature := getSignature(signingkey, policyBase64) diff --git a/weed/s3api/policy/postpolicyform.go b/weed/s3api/policy/postpolicyform.go index 011f782d7..3a6f3a882 100644 --- a/weed/s3api/policy/postpolicyform.go +++ b/weed/s3api/policy/postpolicyform.go @@ -203,7 +203,7 @@ func ParsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) { return parsedPolicy, nil } -// checkPolicyCond returns a boolean to indicate if a condition is satisfied according +// checkPolicyCond returns a boolean to indicate if a condition is satisified according // to the passed operator func checkPolicyCond(op string, input1, input2 string) bool { switch op { diff --git a/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md b/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md deleted file mode 100644 index 39e7d9dcd..000000000 --- a/weed/s3api/policy_engine/GOVERNANCE_PERMISSIONS.md +++ /dev/null @@ -1,249 +0,0 @@ -# Governance Permission Implementation - -This document explains the implementation of `s3:BypassGovernanceRetention` permission in SeaweedFS, providing AWS S3-compatible governance retention bypass functionality. - -## Overview - -The governance permission system enables proper AWS S3-compatible object retention with governance mode bypass capabilities. This implementation ensures that only users with the appropriate permissions can bypass governance retention, while maintaining security and compliance requirements. - -## Features - -### 1. Permission-Based Bypass Control - -- **s3:BypassGovernanceRetention**: New permission that allows users to bypass governance retention -- **Admin Override**: Admin users can always bypass governance retention -- **Header Detection**: Automatic detection of `x-amz-bypass-governance-retention` header -- **Permission Validation**: Validates user permissions before allowing bypass - -### 2. Retention Mode Support - -- **GOVERNANCE Mode**: Can be bypassed with proper permission and header -- **COMPLIANCE Mode**: Cannot be bypassed (highest security level) -- **Legal Hold**: Always blocks operations regardless of permissions - -### 3. Integration Points - -- **DELETE Operations**: Checks governance permissions before object deletion -- **PUT Operations**: Validates permissions before object overwrite -- **Retention Modification**: Ensures proper permissions for retention changes - -## Implementation Details - -### Core Components - -1. **Permission Checker** - ```go - func (s3a *S3ApiServer) checkGovernanceBypassPermission(r *http.Request, bucket, object string) bool - ``` - - Checks if user has `s3:BypassGovernanceRetention` permission - - Validates admin status - - Integrates with existing IAM system - -2. **Object Lock Permission Validation** - ```go - func (s3a *S3ApiServer) checkObjectLockPermissions(r *http.Request, bucket, object, versionId string, bypassGovernance bool) error - ``` - - Validates governance bypass permissions - - Checks retention mode (GOVERNANCE vs COMPLIANCE) - - Enforces legal hold restrictions - -3. **IAM Integration** - - Added `ACTION_BYPASS_GOVERNANCE_RETENTION` constant - - Updated policy engine with `s3:BypassGovernanceRetention` action - - Integrated with existing identity-based access control - -### Permission Flow - -``` -Request with x-amz-bypass-governance-retention: true - โ†“ -Check if object is under retention - โ†“ -If GOVERNANCE mode: - โ†“ -Check if user has s3:BypassGovernanceRetention permission - โ†“ -If permission granted: Allow operation -If permission denied: Deny operation - โ†“ -If COMPLIANCE mode: Always deny -``` - -## Configuration - -### 1. Identity-Based Configuration - -Add governance bypass permission to user actions in `identities.json`: - -```json -{ - "identities": [ - { - "name": "governance-admin", - "credentials": [{"accessKey": "admin123", "secretKey": "secret123"}], - "actions": [ - "Read:my-bucket/*", - "Write:my-bucket/*", - "BypassGovernanceRetention:my-bucket/*" - ] - } - ] -} -``` - -### 2. Bucket Policy Configuration - -Grant governance bypass permission via bucket policies: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:BypassGovernanceRetention", - "Resource": "arn:aws:s3:::bucket/*" - } - ] -} -``` - -**Note**: The policy version should use the standard AWS policy version `PolicyVersion2012_10_17` constant (which equals `"2012-10-17"`). - -## Usage Examples - -### 1. Delete Object with Governance Bypass - -```bash -# User with bypass permission -aws s3api delete-object \ - --bucket my-bucket \ - --key my-object \ - --bypass-governance-retention - -# Admin user (always allowed) -aws s3api delete-object \ - --bucket my-bucket \ - --key my-object \ - --bypass-governance-retention -``` - -### 2. Update Object Retention - -```bash -# Extend retention period (requires bypass permission for governance mode) -aws s3api put-object-retention \ - --bucket my-bucket \ - --key my-object \ - --retention Mode=GOVERNANCE,RetainUntilDate=2025-01-01T00:00:00Z \ - --bypass-governance-retention -``` - -### 3. Bulk Object Deletion - -```bash -# Delete multiple objects with governance bypass -aws s3api delete-objects \ - --bucket my-bucket \ - --delete file://delete-objects.json \ - --bypass-governance-retention -``` - -## Error Handling - -### Permission Errors - -- **ErrAccessDenied**: User lacks `s3:BypassGovernanceRetention` permission -- **ErrGovernanceModeActive**: Governance mode protection without bypass -- **ErrComplianceModeActive**: Compliance mode cannot be bypassed - -### Example Error Response - -```xml - - - AccessDenied - User does not have permission to bypass governance retention - abc123 - /my-bucket/my-object - -``` - -## Security Considerations - -### 1. Least Privilege Principle - -- Grant bypass permission only to users who absolutely need it -- Use bucket-specific permissions rather than global permissions -- Regularly audit users with bypass permissions - -### 2. Compliance Mode Protection - -- COMPLIANCE mode objects cannot be bypassed by any user -- Use COMPLIANCE mode for regulatory requirements -- GOVERNANCE mode provides flexibility while maintaining audit trails - -### 3. Admin Privileges - -- Admin users can always bypass governance retention -- Ensure admin access is properly secured -- Use admin privileges responsibly - -## Testing - -### Unit Tests - -```bash -# Run governance permission tests -go test -v ./weed/s3api/ -run TestGovernance - -# Run all object retention tests -go test -v ./weed/s3api/ -run TestObjectRetention -``` - -### Integration Tests - -```bash -# Test with real S3 clients -cd test/s3/retention -go test -v ./... -run TestGovernanceBypass -``` - -## AWS Compatibility - -This implementation provides full AWS S3 compatibility for: - -- โœ… `x-amz-bypass-governance-retention` header support -- โœ… `s3:BypassGovernanceRetention` permission -- โœ… GOVERNANCE vs COMPLIANCE mode behavior -- โœ… Legal hold enforcement -- โœ… Error responses and codes -- โœ… Bucket policy integration -- โœ… IAM policy integration - -## Troubleshooting - -### Common Issues - -1. **User cannot bypass governance retention** - - Check if user has `s3:BypassGovernanceRetention` permission - - Verify the header `x-amz-bypass-governance-retention: true` is set - - Ensure object is in GOVERNANCE mode (not COMPLIANCE) - -2. **Admin bypass not working** - - Verify user has admin privileges in the IAM system - - Check that object is not under legal hold - - Ensure versioning is enabled on the bucket - -3. **Policy not taking effect** - - Verify bucket policy JSON syntax - - Check resource ARN format - - Ensure principal has proper format - -## Future Enhancements - -- [ ] AWS STS integration for temporary credentials -- [ ] CloudTrail-compatible audit logging -- [ ] Advanced condition evaluation (IP, time, etc.) -- [ ] Integration with external identity providers -- [ ] Fine-grained permissions for different retention operations \ No newline at end of file diff --git a/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md b/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md deleted file mode 100644 index 5c07952b5..000000000 --- a/weed/s3api/policy_engine/INTEGRATION_EXAMPLE.md +++ /dev/null @@ -1,176 +0,0 @@ -# Integration Example - -This shows how to integrate the new policy engine with the existing S3ApiServer. - -## Minimal Integration - -```go -// In s3api_server.go - modify NewS3ApiServerWithStore function - -func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, explicitStore string) (s3ApiServer *S3ApiServer, err error) { - // ... existing code ... - - // Create traditional IAM - iam := NewIdentityAccessManagementWithStore(option, explicitStore) - - s3ApiServer = &S3ApiServer{ - option: option, - iam: iam, // Keep existing for compatibility - randomClientId: util.RandomInt32(), - filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec), - cb: NewCircuitBreaker(option), - credentialManager: iam.credentialManager, - bucketConfigCache: NewBucketConfigCache(5 * time.Minute), - } - - // Optional: Wrap with policy-backed IAM for enhanced features - if option.EnablePolicyEngine { // Add this config option - // Option 1: Create and set legacy IAM separately - policyBackedIAM := NewPolicyBackedIAM() - policyBackedIAM.SetLegacyIAM(iam) - - // Option 2: Create with legacy IAM in one call (convenience method) - // policyBackedIAM := NewPolicyBackedIAMWithLegacy(iam) - - // Load existing identities as policies - if err := policyBackedIAM.LoadIdentityPolicies(); err != nil { - glog.Warningf("Failed to load identity policies: %v", err) - } - - // Replace IAM with policy-backed version - s3ApiServer.iam = policyBackedIAM - } - - // ... rest of existing code ... -} -``` - -## Router Integration - -```go -// In registerRouter function, replace bucket policy handlers: - -// Old handlers (if they exist): -// bucket.Methods(http.MethodGet).HandlerFunc(s3a.GetBucketPolicyHandler).Queries("policy", "") -// bucket.Methods(http.MethodPut).HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "") -// bucket.Methods(http.MethodDelete).HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "") - -// New handlers with policy engine: -if policyBackedIAM, ok := s3a.iam.(*PolicyBackedIAM); ok { - // Use policy-backed handlers - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "") - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(policyBackedIAM.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "") -} else { - // Use existing/fallback handlers - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "") - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "") -} -``` - -## Configuration Option - -Add to `S3ApiServerOption`: - -```go -type S3ApiServerOption struct { - // ... existing fields ... - EnablePolicyEngine bool // Add this field -} -``` - -## Example Usage - -### 1. Existing Users (No Changes) - -Your existing `identities.json` continues to work: - -```json -{ - "identities": [ - { - "name": "user1", - "credentials": [{"accessKey": "key1", "secretKey": "secret1"}], - "actions": ["Read:bucket1/*", "Write:bucket1/uploads/*"] - } - ] -} -``` - -### 2. New Users (Enhanced Policies) - -Set bucket policies via S3 API: - -```bash -# Allow public read -aws s3api put-bucket-policy --bucket my-bucket --policy file://policy.json - -# Where policy.json contains: -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::my-bucket/*" - } - ] -} -``` - -### 3. Advanced Conditions - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::secure-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - }, - "Bool": { - "aws:SecureTransport": "true" - } - } - } - ] -} -``` - -## Migration Strategy - -### Phase 1: Enable Policy Engine (Opt-in) -- Set `EnablePolicyEngine: true` in server options -- Existing `identities.json` automatically converted to policies -- Add bucket policies as needed - -### Phase 2: Full Policy Management -- Use AWS CLI/SDK for policy management -- Gradually migrate from `identities.json` to pure IAM policies -- Take advantage of advanced conditions and features - -## Testing - -```bash -# Test existing functionality -go test -v -run TestCanDo - -# Test new policy engine -go test -v -run TestPolicyEngine - -# Test integration -go test -v -run TestPolicyBackedIAM -``` - -The integration is designed to be: -- **Backward compatible** - Existing setups work unchanged -- **Opt-in** - Enable policy engine only when needed -- **Gradual** - Migrate at your own pace -- **AWS compatible** - Use standard AWS tools and patterns \ No newline at end of file diff --git a/weed/s3api/policy_engine/POLICY_EXAMPLES.md b/weed/s3api/policy_engine/POLICY_EXAMPLES.md deleted file mode 100644 index 34a61488e..000000000 --- a/weed/s3api/policy_engine/POLICY_EXAMPLES.md +++ /dev/null @@ -1,54 +0,0 @@ -# Policy Engine Examples - -This document contains examples of how to use the SeaweedFS Policy Engine. - -## Overview - -The examples in `examples.go` demonstrate various policy configurations and usage patterns. The examples file is excluded from production builds using build tags to reduce binary size. - -## To Use Examples - -If you need to use the examples during development or testing, you can: - -1. **Remove the build tag**: Remove the `//go:build ignore` and `// +build ignore` lines from `examples.go` -2. **Use during development**: The examples are available during development but not in production builds -3. **Copy specific examples**: Copy the JSON examples you need into your own code - -## Example Categories - -The examples file includes: - -- **Legacy Identity Format**: Examples of existing identities.json format -- **Policy Documents**: Various AWS S3-compatible policy examples -- **Condition Examples**: Complex condition-based policies -- **Migration Examples**: How to migrate from legacy to policy-based IAM -- **Integration Examples**: How to integrate with existing systems - -## Usage Functions - -The examples file provides helper functions: - -- `GetAllExamples()`: Returns all example policies -- `ValidateExamplePolicies()`: Validates all examples -- `GetExamplePolicy(name)`: Gets a specific example -- `CreateExamplePolicyDocument(name)`: Creates a policy document -- `PrintExamplePolicyPretty(name)`: Pretty-prints an example -- `ExampleUsage()`: Shows basic usage patterns -- `ExampleLegacyIntegration()`: Shows legacy integration -- `ExampleConditions()`: Shows condition usage -- `ExampleMigrationStrategy()`: Shows migration approach - -## To Enable Examples in Development - -```go -// Remove build tags from examples.go, then: -import "github.com/seaweedfs/seaweedfs/weed/s3api/policy_engine" - -// Use examples -examples := policy_engine.GetAllExamples() -policy, err := policy_engine.GetExamplePolicy("read-only-user") -``` - -## Note - -The examples are excluded from production builds to keep binary size minimal. They are available for development and testing purposes only. \ No newline at end of file diff --git a/weed/s3api/policy_engine/README_POLICY_ENGINE.md b/weed/s3api/policy_engine/README_POLICY_ENGINE.md deleted file mode 100644 index 70dbf37f1..000000000 --- a/weed/s3api/policy_engine/README_POLICY_ENGINE.md +++ /dev/null @@ -1,279 +0,0 @@ -# SeaweedFS Policy Evaluation Engine - -This document describes the comprehensive policy evaluation engine that has been added to SeaweedFS, providing AWS S3-compatible policy support while maintaining full backward compatibility with existing `identities.json` configuration. - -## Overview - -The policy engine provides: -- **Full AWS S3 policy compatibility** - JSON policies with conditions, wildcards, and complex logic -- **Backward compatibility** - Existing `identities.json` continues to work unchanged -- **Bucket policies** - Per-bucket access control policies -- **IAM policies** - User and group-level policies -- **Condition evaluation** - IP restrictions, time-based access, SSL-only, etc. -- **AWS-compliant evaluation order** - Explicit Deny > Explicit Allow > Default Deny - -## Architecture - -### Files Created - -1. **`policy_engine/types.go`** - Core policy data structures and validation -2. **`policy_engine/conditions.go`** - Condition evaluators (StringEquals, IpAddress, etc.) -3. **`policy_engine/engine.go`** - Main policy evaluation engine -4. **`policy_engine/integration.go`** - Integration with existing IAM system -5. **`policy_engine/engine_test.go`** - Comprehensive tests -6. **`policy_engine/examples.go`** - Usage examples and documentation (excluded from builds) -7. **`policy_engine/wildcard_matcher.go`** - Optimized wildcard pattern matching -8. **`policy_engine/wildcard_matcher_test.go`** - Wildcard matching tests - -### Key Components - -``` -PolicyEngine -โ”œโ”€โ”€ Bucket Policies (per-bucket JSON policies) -โ”œโ”€โ”€ User Policies (converted from identities.json + new IAM policies) -โ”œโ”€โ”€ Condition Evaluators (IP, time, string, numeric, etc.) -โ””โ”€โ”€ Evaluation Logic (AWS-compliant precedence) -``` - -## Backward Compatibility - -### Existing identities.json (No Changes Required) - -Your existing configuration continues to work exactly as before: - -```json -{ - "identities": [ - { - "name": "readonly_user", - "credentials": [{"accessKey": "key123", "secretKey": "secret123"}], - "actions": ["Read:public-bucket/*", "List:public-bucket"] - } - ] -} -``` - -Legacy actions are automatically converted to AWS-style policies: -- `Read:bucket/*` โ†’ `s3:GetObject` on `arn:aws:s3:::bucket/*` -- `Write:bucket` โ†’ `s3:PutObject`, `s3:DeleteObject` on `arn:aws:s3:::bucket/*` -- `Admin` โ†’ `s3:*` on `arn:aws:s3:::*` - -## New Capabilities - -### 1. Bucket Policies - -Set bucket-level policies using standard S3 API: - -```bash -# Set bucket policy -curl -X PUT "http://localhost:8333/bucket?policy" \ - -H "Authorization: AWS access_key:signature" \ - -d '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::bucket/*" - } - ] - }' - -# Get bucket policy -curl "http://localhost:8333/bucket?policy" - -# Delete bucket policy -curl -X DELETE "http://localhost:8333/bucket?policy" -``` - -### 2. Advanced Conditions - -Support for all AWS condition operators: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::secure-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": ["192.168.1.0/24", "10.0.0.0/8"] - }, - "Bool": { - "aws:SecureTransport": "true" - }, - "DateGreaterThan": { - "aws:CurrentTime": "2023-01-01T00:00:00Z" - } - } - } - ] -} -``` - -### 3. Supported Condition Operators - -- **String**: `StringEquals`, `StringNotEquals`, `StringLike`, `StringNotLike` -- **Numeric**: `NumericEquals`, `NumericLessThan`, `NumericGreaterThan`, etc. -- **Date**: `DateEquals`, `DateLessThan`, `DateGreaterThan`, etc. -- **IP**: `IpAddress`, `NotIpAddress` (supports CIDR notation) -- **Boolean**: `Bool` -- **ARN**: `ArnEquals`, `ArnLike` -- **Null**: `Null` - -### 4. Condition Keys - -Standard AWS condition keys are supported: -- `aws:CurrentTime` - Current request time -- `aws:SourceIp` - Client IP address -- `aws:SecureTransport` - Whether HTTPS is used -- `aws:UserAgent` - Client user agent -- `s3:x-amz-acl` - Requested ACL -- `s3:VersionId` - Object version ID -- And many more... - -## Policy Evaluation - -### Evaluation Order (AWS-Compatible) - -1. **Explicit Deny** - If any policy explicitly denies access โ†’ **DENY** -2. **Explicit Allow** - If any policy explicitly allows access โ†’ **ALLOW** -3. **Default Deny** - If no policy matches โ†’ **DENY** - -### Policy Sources (Evaluated Together) - -1. **Bucket Policies** - Stored per-bucket, highest priority -2. **User Policies** - Converted from `identities.json` + new IAM policies -3. **Legacy IAM** - For backward compatibility (lowest priority) - -## Examples - -### Public Read Bucket - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "PublicRead", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::public-bucket/*" - } - ] -} -``` - -### IP-Restricted Bucket - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": "arn:aws:s3:::secure-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - } - ] -} -``` - -### SSL-Only Access - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Deny", - "Principal": "*", - "Action": "s3:*", - "Resource": ["arn:aws:s3:::ssl-bucket/*", "arn:aws:s3:::ssl-bucket"], - "Condition": { - "Bool": { - "aws:SecureTransport": "false" - } - } - } - ] -} -``` - -## Integration - -### For Existing SeaweedFS Users - -1. **No changes required** - Your existing setup continues to work -2. **Optional enhancement** - Add bucket policies for fine-grained control -3. **Gradual migration** - Move to full AWS policies over time - -### For New Users - -1. Start with either `identities.json` or AWS-style policies -2. Use bucket policies for complex access patterns -3. Full feature parity with AWS S3 policies - -## Testing - -Run the policy engine tests: - -```bash -# Core policy tests -go test -v -run TestPolicyEngine - -# Condition evaluator tests -go test -v -run TestConditionEvaluators - -# Legacy compatibility tests -go test -v -run TestConvertIdentityToPolicy - -# Validation tests -go test -v -run TestPolicyValidation -``` - -## Performance - -- **Compiled patterns** - Regex patterns are pre-compiled for fast matching -- **Cached policies** - Policies are cached in memory with TTL -- **Early termination** - Evaluation stops on first explicit deny -- **Minimal overhead** - Backward compatibility with minimal performance impact - -## Migration Path - -### Phase 1: Backward Compatible (Current) -- Keep existing `identities.json` unchanged -- Add bucket policies as needed -- Legacy actions automatically converted to AWS policies - -### Phase 2: Enhanced (Optional) -- Add advanced conditions to policies -- Use full AWS S3 policy features -- Maintain backward compatibility - -### Phase 3: Full Migration (Future) -- Migrate to pure IAM policies -- Use AWS CLI/SDK for policy management -- Complete AWS S3 feature parity - -## Compatibility - -- โœ… **Full backward compatibility** with existing `identities.json` -- โœ… **AWS S3 API compatibility** for bucket policies -- โœ… **Standard condition operators** and keys -- โœ… **Proper evaluation precedence** (Deny > Allow > Default Deny) -- โœ… **Performance optimized** with caching and compiled patterns - -The policy engine provides a seamless upgrade path from SeaweedFS's existing simple IAM system to full AWS S3-compatible policies, giving you the best of both worlds: simplicity for basic use cases and power for complex enterprise scenarios. \ No newline at end of file diff --git a/weed/s3api/policy_engine/conditions.go b/weed/s3api/policy_engine/conditions.go deleted file mode 100644 index fc8005fd0..000000000 --- a/weed/s3api/policy_engine/conditions.go +++ /dev/null @@ -1,768 +0,0 @@ -package policy_engine - -import ( - "fmt" - "net" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// LRUNode represents a node in the doubly-linked list for efficient LRU operations -type LRUNode struct { - key string - value []string - prev *LRUNode - next *LRUNode -} - -// NormalizedValueCache provides size-limited caching for normalized values with efficient LRU eviction -type NormalizedValueCache struct { - mu sync.RWMutex - cache map[string]*LRUNode - maxSize int - head *LRUNode // Most recently used - tail *LRUNode // Least recently used -} - -// NewNormalizedValueCache creates a new normalized value cache with configurable size -func NewNormalizedValueCache(maxSize int) *NormalizedValueCache { - if maxSize <= 0 { - maxSize = 1000 // Default size - } - - // Create dummy head and tail nodes for easier list manipulation - head := &LRUNode{} - tail := &LRUNode{} - head.next = tail - tail.prev = head - - return &NormalizedValueCache{ - cache: make(map[string]*LRUNode), - maxSize: maxSize, - head: head, - tail: tail, - } -} - -// Get retrieves a cached value and updates access order in O(1) time -func (c *NormalizedValueCache) Get(key string) ([]string, bool) { - c.mu.Lock() - defer c.mu.Unlock() - - if node, exists := c.cache[key]; exists { - // Move to head (most recently used) - O(1) operation - c.moveToHead(node) - return node.value, true - } - return nil, false -} - -// Set stores a value in the cache with size limit enforcement in O(1) time -func (c *NormalizedValueCache) Set(key string, value []string) { - c.mu.Lock() - defer c.mu.Unlock() - - if node, exists := c.cache[key]; exists { - // Update existing node and move to head - node.value = value - c.moveToHead(node) - return - } - - // Create new node - newNode := &LRUNode{ - key: key, - value: value, - } - - // If at max size, evict least recently used - if len(c.cache) >= c.maxSize { - c.evictLeastRecentlyUsed() - } - - // Add to cache and move to head - c.cache[key] = newNode - c.addToHead(newNode) -} - -// moveToHead moves a node to the head of the list (most recently used) - O(1) -func (c *NormalizedValueCache) moveToHead(node *LRUNode) { - c.removeNode(node) - c.addToHead(node) -} - -// addToHead adds a node right after the head - O(1) -func (c *NormalizedValueCache) addToHead(node *LRUNode) { - node.prev = c.head - node.next = c.head.next - c.head.next.prev = node - c.head.next = node -} - -// removeNode removes a node from the list - O(1) -func (c *NormalizedValueCache) removeNode(node *LRUNode) { - node.prev.next = node.next - node.next.prev = node.prev -} - -// removeTail removes the last node before tail (least recently used) - O(1) -func (c *NormalizedValueCache) removeTail() *LRUNode { - lastNode := c.tail.prev - c.removeNode(lastNode) - return lastNode -} - -// evictLeastRecentlyUsed removes the least recently used item in O(1) time -func (c *NormalizedValueCache) evictLeastRecentlyUsed() { - tail := c.removeTail() - delete(c.cache, tail.key) -} - -// Clear clears all cached values -func (c *NormalizedValueCache) Clear() { - c.mu.Lock() - defer c.mu.Unlock() - c.cache = make(map[string]*LRUNode) - c.head.next = c.tail - c.tail.prev = c.head -} - -// GetStats returns cache statistics -func (c *NormalizedValueCache) GetStats() (size int, maxSize int) { - c.mu.RLock() - defer c.mu.RUnlock() - return len(c.cache), c.maxSize -} - -// Global cache instance with size limit -var normalizedValueCache = NewNormalizedValueCache(1000) - -// getCachedNormalizedValues returns cached normalized values or caches new ones -func getCachedNormalizedValues(value interface{}) []string { - // Create a string key for caching - more efficient than fmt.Sprintf - typeStr := reflect.TypeOf(value).String() - cacheKey := typeStr + ":" + fmt.Sprint(value) - - // Try to get from cache - if cached, exists := normalizedValueCache.Get(cacheKey); exists { - return cached - } - - // Not in cache, normalize and store - // Use the error-handling version for better error reporting - normalized, err := normalizeToStringSliceWithError(value) - if err != nil { - glog.Warningf("Failed to normalize policy value %v: %v", value, err) - // Fallback to string conversion for backward compatibility - normalized = []string{fmt.Sprintf("%v", value)} - } - - normalizedValueCache.Set(cacheKey, normalized) - - return normalized -} - -// ConditionEvaluator evaluates policy conditions -type ConditionEvaluator interface { - Evaluate(conditionValue interface{}, contextValues []string) bool -} - -// StringEqualsEvaluator evaluates StringEquals conditions -type StringEqualsEvaluator struct{} - -func (e *StringEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - for _, contextValue := range contextValues { - if expected == contextValue { - return true - } - } - } - return false -} - -// StringNotEqualsEvaluator evaluates StringNotEquals conditions -type StringNotEqualsEvaluator struct{} - -func (e *StringNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - for _, contextValue := range contextValues { - if expected == contextValue { - return false - } - } - } - return true -} - -// StringLikeEvaluator evaluates StringLike conditions (supports wildcards) -type StringLikeEvaluator struct{} - -func (e *StringLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - patterns := getCachedNormalizedValues(conditionValue) - for _, pattern := range patterns { - for _, contextValue := range contextValues { - if MatchesWildcard(pattern, contextValue) { - return true - } - } - } - return false -} - -// StringNotLikeEvaluator evaluates StringNotLike conditions -type StringNotLikeEvaluator struct{} - -func (e *StringNotLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - patterns := getCachedNormalizedValues(conditionValue) - for _, pattern := range patterns { - for _, contextValue := range contextValues { - if MatchesWildcard(pattern, contextValue) { - return false - } - } - } - return true -} - -// NumericEqualsEvaluator evaluates NumericEquals conditions -type NumericEqualsEvaluator struct{} - -func (e *NumericEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if expectedFloat == contextFloat { - return true - } - } - } - return false -} - -// NumericNotEqualsEvaluator evaluates NumericNotEquals conditions -type NumericNotEqualsEvaluator struct{} - -func (e *NumericNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if expectedFloat == contextFloat { - return false - } - } - } - return true -} - -// NumericLessThanEvaluator evaluates NumericLessThan conditions -type NumericLessThanEvaluator struct{} - -func (e *NumericLessThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if contextFloat < expectedFloat { - return true - } - } - } - return false -} - -// NumericLessThanEqualsEvaluator evaluates NumericLessThanEquals conditions -type NumericLessThanEqualsEvaluator struct{} - -func (e *NumericLessThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if contextFloat <= expectedFloat { - return true - } - } - } - return false -} - -// NumericGreaterThanEvaluator evaluates NumericGreaterThan conditions -type NumericGreaterThanEvaluator struct{} - -func (e *NumericGreaterThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if contextFloat > expectedFloat { - return true - } - } - } - return false -} - -// NumericGreaterThanEqualsEvaluator evaluates NumericGreaterThanEquals conditions -type NumericGreaterThanEqualsEvaluator struct{} - -func (e *NumericGreaterThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedFloat, err := strconv.ParseFloat(expected, 64) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextFloat, err := strconv.ParseFloat(contextValue, 64) - if err != nil { - continue - } - if contextFloat >= expectedFloat { - return true - } - } - } - return false -} - -// DateEqualsEvaluator evaluates DateEquals conditions -type DateEqualsEvaluator struct{} - -func (e *DateEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if expectedTime.Equal(contextTime) { - return true - } - } - } - return false -} - -// DateNotEqualsEvaluator evaluates DateNotEquals conditions -type DateNotEqualsEvaluator struct{} - -func (e *DateNotEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if expectedTime.Equal(contextTime) { - return false - } - } - } - return true -} - -// DateLessThanEvaluator evaluates DateLessThan conditions -type DateLessThanEvaluator struct{} - -func (e *DateLessThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if contextTime.Before(expectedTime) { - return true - } - } - } - return false -} - -// DateLessThanEqualsEvaluator evaluates DateLessThanEquals conditions -type DateLessThanEqualsEvaluator struct{} - -func (e *DateLessThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if contextTime.Before(expectedTime) || contextTime.Equal(expectedTime) { - return true - } - } - } - return false -} - -// DateGreaterThanEvaluator evaluates DateGreaterThan conditions -type DateGreaterThanEvaluator struct{} - -func (e *DateGreaterThanEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if contextTime.After(expectedTime) { - return true - } - } - } - return false -} - -// DateGreaterThanEqualsEvaluator evaluates DateGreaterThanEquals conditions -type DateGreaterThanEqualsEvaluator struct{} - -func (e *DateGreaterThanEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedTime, err := time.Parse(time.RFC3339, expected) - if err != nil { - continue - } - for _, contextValue := range contextValues { - contextTime, err := time.Parse(time.RFC3339, contextValue) - if err != nil { - continue - } - if contextTime.After(expectedTime) || contextTime.Equal(expectedTime) { - return true - } - } - } - return false -} - -// BoolEvaluator evaluates Bool conditions -type BoolEvaluator struct{} - -func (e *BoolEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - for _, contextValue := range contextValues { - if strings.ToLower(expected) == strings.ToLower(contextValue) { - return true - } - } - } - return false -} - -// IpAddressEvaluator evaluates IpAddress conditions -type IpAddressEvaluator struct{} - -func (e *IpAddressEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - _, expectedNet, err := net.ParseCIDR(expected) - if err != nil { - // Try parsing as single IP - expectedIP := net.ParseIP(expected) - if expectedIP == nil { - glog.V(3).Infof("Failed to parse expected IP address: %s", expected) - continue - } - for _, contextValue := range contextValues { - contextIP := net.ParseIP(contextValue) - if contextIP == nil { - glog.V(3).Infof("Failed to parse IP address: %s", contextValue) - continue - } - if contextIP.Equal(expectedIP) { - return true - } - } - } else { - // CIDR network - for _, contextValue := range contextValues { - contextIP := net.ParseIP(contextValue) - if contextIP == nil { - glog.V(3).Infof("Failed to parse IP address: %s", contextValue) - continue - } - if expectedNet.Contains(contextIP) { - return true - } - } - } - } - return false -} - -// NotIpAddressEvaluator evaluates NotIpAddress conditions -type NotIpAddressEvaluator struct{} - -func (e *NotIpAddressEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - _, expectedNet, err := net.ParseCIDR(expected) - if err != nil { - // Try parsing as single IP - expectedIP := net.ParseIP(expected) - if expectedIP == nil { - glog.V(3).Infof("Failed to parse expected IP address: %s", expected) - continue - } - for _, contextValue := range contextValues { - contextIP := net.ParseIP(contextValue) - if contextIP == nil { - glog.V(3).Infof("Failed to parse IP address: %s", contextValue) - continue - } - if contextIP.Equal(expectedIP) { - return false - } - } - } else { - // CIDR network - for _, contextValue := range contextValues { - contextIP := net.ParseIP(contextValue) - if contextIP == nil { - glog.V(3).Infof("Failed to parse IP address: %s", contextValue) - continue - } - if expectedNet.Contains(contextIP) { - return false - } - } - } - } - return true -} - -// ArnEqualsEvaluator evaluates ArnEquals conditions -type ArnEqualsEvaluator struct{} - -func (e *ArnEqualsEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - for _, contextValue := range contextValues { - if expected == contextValue { - return true - } - } - } - return false -} - -// ArnLikeEvaluator evaluates ArnLike conditions -type ArnLikeEvaluator struct{} - -func (e *ArnLikeEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - patterns := getCachedNormalizedValues(conditionValue) - for _, pattern := range patterns { - for _, contextValue := range contextValues { - if MatchesWildcard(pattern, contextValue) { - return true - } - } - } - return false -} - -// NullEvaluator evaluates Null conditions -type NullEvaluator struct{} - -func (e *NullEvaluator) Evaluate(conditionValue interface{}, contextValues []string) bool { - expectedValues := getCachedNormalizedValues(conditionValue) - for _, expected := range expectedValues { - expectedBool := strings.ToLower(expected) == "true" - contextExists := len(contextValues) > 0 - if expectedBool && !contextExists { - return true // Key should be null and it is - } - if !expectedBool && contextExists { - return true // Key should not be null and it isn't - } - } - return false -} - -// GetConditionEvaluator returns the appropriate evaluator for a condition operator -func GetConditionEvaluator(operator string) (ConditionEvaluator, error) { - switch operator { - case "StringEquals": - return &StringEqualsEvaluator{}, nil - case "StringNotEquals": - return &StringNotEqualsEvaluator{}, nil - case "StringLike": - return &StringLikeEvaluator{}, nil - case "StringNotLike": - return &StringNotLikeEvaluator{}, nil - case "NumericEquals": - return &NumericEqualsEvaluator{}, nil - case "NumericNotEquals": - return &NumericNotEqualsEvaluator{}, nil - case "NumericLessThan": - return &NumericLessThanEvaluator{}, nil - case "NumericLessThanEquals": - return &NumericLessThanEqualsEvaluator{}, nil - case "NumericGreaterThan": - return &NumericGreaterThanEvaluator{}, nil - case "NumericGreaterThanEquals": - return &NumericGreaterThanEqualsEvaluator{}, nil - case "DateEquals": - return &DateEqualsEvaluator{}, nil - case "DateNotEquals": - return &DateNotEqualsEvaluator{}, nil - case "DateLessThan": - return &DateLessThanEvaluator{}, nil - case "DateLessThanEquals": - return &DateLessThanEqualsEvaluator{}, nil - case "DateGreaterThan": - return &DateGreaterThanEvaluator{}, nil - case "DateGreaterThanEquals": - return &DateGreaterThanEqualsEvaluator{}, nil - case "Bool": - return &BoolEvaluator{}, nil - case "IpAddress": - return &IpAddressEvaluator{}, nil - case "NotIpAddress": - return &NotIpAddressEvaluator{}, nil - case "ArnEquals": - return &ArnEqualsEvaluator{}, nil - case "ArnLike": - return &ArnLikeEvaluator{}, nil - case "Null": - return &NullEvaluator{}, nil - default: - return nil, fmt.Errorf("unsupported condition operator: %s", operator) - } -} - -// EvaluateConditions evaluates all conditions in a policy statement -func EvaluateConditions(conditions PolicyConditions, contextValues map[string][]string) bool { - if len(conditions) == 0 { - return true // No conditions means always true - } - - for operator, conditionMap := range conditions { - conditionEvaluator, err := GetConditionEvaluator(operator) - if err != nil { - glog.Warningf("Unsupported condition operator: %s", operator) - continue - } - - for key, value := range conditionMap { - contextVals, exists := contextValues[key] - if !exists { - contextVals = []string{} - } - - if !conditionEvaluator.Evaluate(value.Strings(), contextVals) { - return false // If any condition fails, the whole condition block fails - } - } - } - - return true -} - -// EvaluateConditionsLegacy evaluates conditions using the old interface{} format for backward compatibility -func EvaluateConditionsLegacy(conditions map[string]interface{}, contextValues map[string][]string) bool { - if len(conditions) == 0 { - return true // No conditions means always true - } - - for operator, conditionMap := range conditions { - conditionEvaluator, err := GetConditionEvaluator(operator) - if err != nil { - glog.Warningf("Unsupported condition operator: %s", operator) - continue - } - - conditionMapTyped, ok := conditionMap.(map[string]interface{}) - if !ok { - glog.Warningf("Invalid condition format for operator: %s", operator) - continue - } - - for key, value := range conditionMapTyped { - contextVals, exists := contextValues[key] - if !exists { - contextVals = []string{} - } - - if !conditionEvaluator.Evaluate(value, contextVals) { - return false // If any condition fails, the whole condition block fails - } - } - } - - return true -} diff --git a/weed/s3api/policy_engine/engine.go b/weed/s3api/policy_engine/engine.go deleted file mode 100644 index 709fafda4..000000000 --- a/weed/s3api/policy_engine/engine.go +++ /dev/null @@ -1,432 +0,0 @@ -package policy_engine - -import ( - "fmt" - "net" - "net/http" - "regexp" - "strings" - "sync" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// PolicyEvaluationResult represents the result of policy evaluation -type PolicyEvaluationResult int - -const ( - PolicyResultDeny PolicyEvaluationResult = iota - PolicyResultAllow - PolicyResultIndeterminate -) - -// PolicyEvaluationContext manages policy evaluation for a bucket -type PolicyEvaluationContext struct { - bucketName string - policy *CompiledPolicy - cache *PolicyCache - mutex sync.RWMutex -} - -// PolicyEngine is the main policy evaluation engine -type PolicyEngine struct { - contexts map[string]*PolicyEvaluationContext - mutex sync.RWMutex -} - -// NewPolicyEngine creates a new policy evaluation engine -func NewPolicyEngine() *PolicyEngine { - return &PolicyEngine{ - contexts: make(map[string]*PolicyEvaluationContext), - } -} - -// SetBucketPolicy sets the policy for a bucket -func (engine *PolicyEngine) SetBucketPolicy(bucketName string, policyJSON string) error { - policy, err := ParsePolicy(policyJSON) - if err != nil { - return fmt.Errorf("invalid policy: %w", err) - } - - compiled, err := CompilePolicy(policy) - if err != nil { - return fmt.Errorf("failed to compile policy: %w", err) - } - - engine.mutex.Lock() - defer engine.mutex.Unlock() - - context := &PolicyEvaluationContext{ - bucketName: bucketName, - policy: compiled, - cache: NewPolicyCache(), - } - - engine.contexts[bucketName] = context - glog.V(2).Infof("Set bucket policy for %s", bucketName) - return nil -} - -// GetBucketPolicy gets the policy for a bucket -func (engine *PolicyEngine) GetBucketPolicy(bucketName string) (*PolicyDocument, error) { - engine.mutex.RLock() - defer engine.mutex.RUnlock() - - context, exists := engine.contexts[bucketName] - if !exists { - return nil, fmt.Errorf("no policy found for bucket %s", bucketName) - } - - return context.policy.Document, nil -} - -// DeleteBucketPolicy deletes the policy for a bucket -func (engine *PolicyEngine) DeleteBucketPolicy(bucketName string) error { - engine.mutex.Lock() - defer engine.mutex.Unlock() - - delete(engine.contexts, bucketName) - glog.V(2).Infof("Deleted bucket policy for %s", bucketName) - return nil -} - -// EvaluatePolicy evaluates a policy for the given arguments -func (engine *PolicyEngine) EvaluatePolicy(bucketName string, args *PolicyEvaluationArgs) PolicyEvaluationResult { - engine.mutex.RLock() - context, exists := engine.contexts[bucketName] - engine.mutex.RUnlock() - - if !exists { - return PolicyResultIndeterminate - } - - return engine.evaluateCompiledPolicy(context.policy, args) -} - -// evaluateCompiledPolicy evaluates a compiled policy -func (engine *PolicyEngine) evaluateCompiledPolicy(policy *CompiledPolicy, args *PolicyEvaluationArgs) PolicyEvaluationResult { - // AWS Policy evaluation logic: - // 1. Check for explicit Deny - if found, return Deny - // 2. Check for explicit Allow - if found, return Allow - // 3. If no explicit Allow is found, return Deny (default deny) - - hasExplicitAllow := false - - for _, stmt := range policy.Statements { - if engine.evaluateStatement(&stmt, args) { - if stmt.Statement.Effect == PolicyEffectDeny { - return PolicyResultDeny // Explicit deny trumps everything - } - if stmt.Statement.Effect == PolicyEffectAllow { - hasExplicitAllow = true - } - } - } - - if hasExplicitAllow { - return PolicyResultAllow - } - - return PolicyResultDeny // Default deny -} - -// evaluateStatement evaluates a single policy statement -func (engine *PolicyEngine) evaluateStatement(stmt *CompiledStatement, args *PolicyEvaluationArgs) bool { - // Check if action matches - if !engine.matchesPatterns(stmt.ActionPatterns, args.Action) { - return false - } - - // Check if resource matches - if !engine.matchesPatterns(stmt.ResourcePatterns, args.Resource) { - return false - } - - // Check if principal matches (if specified) - if len(stmt.PrincipalPatterns) > 0 { - if !engine.matchesPatterns(stmt.PrincipalPatterns, args.Principal) { - return false - } - } - - // Check conditions - if len(stmt.Statement.Condition) > 0 { - if !EvaluateConditions(stmt.Statement.Condition, args.Conditions) { - return false - } - } - - return true -} - -// matchesPatterns checks if a value matches any of the compiled patterns -func (engine *PolicyEngine) matchesPatterns(patterns []*regexp.Regexp, value string) bool { - for _, pattern := range patterns { - if pattern.MatchString(value) { - return true - } - } - return false -} - -// ExtractConditionValuesFromRequest extracts condition values from HTTP request -func ExtractConditionValuesFromRequest(r *http.Request) map[string][]string { - values := make(map[string][]string) - - // AWS condition keys - // Extract IP address without port for proper IP matching - host, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - // Log a warning if splitting fails - glog.Warningf("Failed to parse IP address from RemoteAddr %q: %v", r.RemoteAddr, err) - // If splitting fails, use the original RemoteAddr (might be just IP without port) - host = r.RemoteAddr - } - values["aws:SourceIp"] = []string{host} - values["aws:SecureTransport"] = []string{fmt.Sprintf("%t", r.TLS != nil)} - // Use AWS standard condition key for current time - values["aws:CurrentTime"] = []string{time.Now().Format(time.RFC3339)} - // Keep RequestTime for backward compatibility - values["aws:RequestTime"] = []string{time.Now().Format(time.RFC3339)} - - // S3 specific condition keys - if userAgent := r.Header.Get("User-Agent"); userAgent != "" { - values["aws:UserAgent"] = []string{userAgent} - } - - if referer := r.Header.Get("Referer"); referer != "" { - values["aws:Referer"] = []string{referer} - } - - // S3 object-level conditions - if r.Method == "GET" || r.Method == "HEAD" { - values["s3:ExistingObjectTag"] = extractObjectTags(r) - } - - // S3 bucket-level conditions - if delimiter := r.URL.Query().Get("delimiter"); delimiter != "" { - values["s3:delimiter"] = []string{delimiter} - } - - if prefix := r.URL.Query().Get("prefix"); prefix != "" { - values["s3:prefix"] = []string{prefix} - } - - if maxKeys := r.URL.Query().Get("max-keys"); maxKeys != "" { - values["s3:max-keys"] = []string{maxKeys} - } - - // Authentication method - if authHeader := r.Header.Get("Authorization"); authHeader != "" { - if strings.HasPrefix(authHeader, "AWS4-HMAC-SHA256") { - values["s3:authType"] = []string{"REST-HEADER"} - } else if strings.HasPrefix(authHeader, "AWS ") { - values["s3:authType"] = []string{"REST-HEADER"} - } - } else if r.URL.Query().Get("AWSAccessKeyId") != "" { - values["s3:authType"] = []string{"REST-QUERY-STRING"} - } - - // HTTP method - values["s3:RequestMethod"] = []string{r.Method} - - // Extract custom headers - for key, headerValues := range r.Header { - if strings.HasPrefix(strings.ToLower(key), "x-amz-") { - values[strings.ToLower(key)] = headerValues - } - } - - return values -} - -// extractObjectTags extracts object tags from request (placeholder implementation) -func extractObjectTags(r *http.Request) []string { - // This would need to be implemented based on how object tags are stored - // For now, return empty slice - return []string{} -} - -// BuildResourceArn builds an ARN for the given bucket and object -func BuildResourceArn(bucketName, objectName string) string { - if objectName == "" { - return fmt.Sprintf("arn:aws:s3:::%s", bucketName) - } - return fmt.Sprintf("arn:aws:s3:::%s/%s", bucketName, objectName) -} - -// BuildActionName builds a standardized action name -func BuildActionName(action string) string { - if strings.HasPrefix(action, "s3:") { - return action - } - return fmt.Sprintf("s3:%s", action) -} - -// IsReadAction checks if an action is a read action -func IsReadAction(action string) bool { - readActions := []string{ - "s3:GetObject", - "s3:GetObjectVersion", - "s3:GetObjectAcl", - "s3:GetObjectVersionAcl", - "s3:GetObjectTagging", - "s3:GetObjectVersionTagging", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - "s3:GetBucketVersioning", - "s3:GetBucketAcl", - "s3:GetBucketCors", - "s3:GetBucketPolicy", - "s3:GetBucketTagging", - "s3:GetBucketNotification", - "s3:GetBucketObjectLockConfiguration", - "s3:GetObjectRetention", - "s3:GetObjectLegalHold", - } - - for _, readAction := range readActions { - if action == readAction { - return true - } - } - return false -} - -// IsWriteAction checks if an action is a write action -func IsWriteAction(action string) bool { - writeActions := []string{ - "s3:PutObject", - "s3:PutObjectAcl", - "s3:PutObjectTagging", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:DeleteObjectTagging", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - "s3:PutBucketAcl", - "s3:PutBucketCors", - "s3:PutBucketPolicy", - "s3:PutBucketTagging", - "s3:PutBucketNotification", - "s3:PutBucketVersioning", - "s3:DeleteBucketPolicy", - "s3:DeleteBucketTagging", - "s3:DeleteBucketCors", - "s3:PutBucketObjectLockConfiguration", - "s3:PutObjectRetention", - "s3:PutObjectLegalHold", - "s3:BypassGovernanceRetention", - } - - for _, writeAction := range writeActions { - if action == writeAction { - return true - } - } - return false -} - -// GetBucketNameFromArn extracts bucket name from ARN -func GetBucketNameFromArn(arn string) string { - if strings.HasPrefix(arn, "arn:aws:s3:::") { - parts := strings.SplitN(arn[13:], "/", 2) - return parts[0] - } - return "" -} - -// GetObjectNameFromArn extracts object name from ARN -func GetObjectNameFromArn(arn string) string { - if strings.HasPrefix(arn, "arn:aws:s3:::") { - parts := strings.SplitN(arn[13:], "/", 2) - if len(parts) > 1 { - return parts[1] - } - } - return "" -} - -// HasPolicyForBucket checks if a bucket has a policy -func (engine *PolicyEngine) HasPolicyForBucket(bucketName string) bool { - engine.mutex.RLock() - defer engine.mutex.RUnlock() - - _, exists := engine.contexts[bucketName] - return exists -} - -// GetPolicyStatements returns all policy statements for a bucket -func (engine *PolicyEngine) GetPolicyStatements(bucketName string) []PolicyStatement { - engine.mutex.RLock() - defer engine.mutex.RUnlock() - - context, exists := engine.contexts[bucketName] - if !exists { - return nil - } - - return context.policy.Document.Statement -} - -// ValidatePolicyForBucket validates if a policy is valid for a bucket -func (engine *PolicyEngine) ValidatePolicyForBucket(bucketName string, policyJSON string) error { - policy, err := ParsePolicy(policyJSON) - if err != nil { - return err - } - - // Additional validation specific to the bucket - for _, stmt := range policy.Statement { - resources := normalizeToStringSlice(stmt.Resource) - for _, resource := range resources { - if resourceBucket := GetBucketFromResource(resource); resourceBucket != "" { - if resourceBucket != bucketName { - return fmt.Errorf("policy resource %s does not match bucket %s", resource, bucketName) - } - } - } - } - - return nil -} - -// ClearAllPolicies clears all bucket policies -func (engine *PolicyEngine) ClearAllPolicies() { - engine.mutex.Lock() - defer engine.mutex.Unlock() - - engine.contexts = make(map[string]*PolicyEvaluationContext) - glog.V(2).Info("Cleared all bucket policies") -} - -// GetAllBucketsWithPolicies returns all buckets that have policies -func (engine *PolicyEngine) GetAllBucketsWithPolicies() []string { - engine.mutex.RLock() - defer engine.mutex.RUnlock() - - buckets := make([]string, 0, len(engine.contexts)) - for bucketName := range engine.contexts { - buckets = append(buckets, bucketName) - } - return buckets -} - -// EvaluatePolicyForRequest evaluates policy for an HTTP request -func (engine *PolicyEngine) EvaluatePolicyForRequest(bucketName, objectName, action, principal string, r *http.Request) PolicyEvaluationResult { - resource := BuildResourceArn(bucketName, objectName) - actionName := BuildActionName(action) - conditions := ExtractConditionValuesFromRequest(r) - - args := &PolicyEvaluationArgs{ - Action: actionName, - Resource: resource, - Principal: principal, - Conditions: conditions, - } - - return engine.EvaluatePolicy(bucketName, args) -} diff --git a/weed/s3api/policy_engine/engine_test.go b/weed/s3api/policy_engine/engine_test.go deleted file mode 100644 index 799579ce6..000000000 --- a/weed/s3api/policy_engine/engine_test.go +++ /dev/null @@ -1,716 +0,0 @@ -package policy_engine - -import ( - "net/http" - "net/url" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -func TestPolicyEngine(t *testing.T) { - engine := NewPolicyEngine() - - // Test policy JSON - policyJSON := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": ["arn:aws:s3:::test-bucket/*"] - }, - { - "Effect": "Deny", - "Action": ["s3:DeleteObject"], - "Resource": ["arn:aws:s3:::test-bucket/*"], - "Condition": { - "StringEquals": { - "s3:RequestMethod": ["DELETE"] - } - } - } - ] - }` - - // Set bucket policy - err := engine.SetBucketPolicy("test-bucket", policyJSON) - if err != nil { - t.Fatalf("Failed to set bucket policy: %v", err) - } - - // Test Allow case - args := &PolicyEvaluationArgs{ - Action: "s3:GetObject", - Resource: "arn:aws:s3:::test-bucket/test-object", - Principal: "user1", - Conditions: map[string][]string{}, - } - - result := engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultAllow { - t.Errorf("Expected Allow, got %v", result) - } - - // Test Deny case - args = &PolicyEvaluationArgs{ - Action: "s3:DeleteObject", - Resource: "arn:aws:s3:::test-bucket/test-object", - Principal: "user1", - Conditions: map[string][]string{ - "s3:RequestMethod": {"DELETE"}, - }, - } - - result = engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultDeny { - t.Errorf("Expected Deny, got %v", result) - } - - // Test non-matching action - args = &PolicyEvaluationArgs{ - Action: "s3:ListBucket", - Resource: "arn:aws:s3:::test-bucket", - Principal: "user1", - Conditions: map[string][]string{}, - } - - result = engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultDeny { - t.Errorf("Expected Deny for non-matching action, got %v", result) - } - - // Test GetBucketPolicy - policy, err := engine.GetBucketPolicy("test-bucket") - if err != nil { - t.Fatalf("Failed to get bucket policy: %v", err) - } - if policy.Version != "2012-10-17" { - t.Errorf("Expected version 2012-10-17, got %s", policy.Version) - } - - // Test DeleteBucketPolicy - err = engine.DeleteBucketPolicy("test-bucket") - if err != nil { - t.Fatalf("Failed to delete bucket policy: %v", err) - } - - // Test policy is gone - result = engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultIndeterminate { - t.Errorf("Expected Indeterminate after policy deletion, got %v", result) - } -} - -func TestConditionEvaluators(t *testing.T) { - tests := []struct { - name string - operator string - conditionValue interface{} - contextValues []string - expected bool - }{ - { - name: "StringEquals - match", - operator: "StringEquals", - conditionValue: "test-value", - contextValues: []string{"test-value"}, - expected: true, - }, - { - name: "StringEquals - no match", - operator: "StringEquals", - conditionValue: "test-value", - contextValues: []string{"other-value"}, - expected: false, - }, - { - name: "StringLike - wildcard match", - operator: "StringLike", - conditionValue: "test-*", - contextValues: []string{"test-value"}, - expected: true, - }, - { - name: "StringLike - wildcard no match", - operator: "StringLike", - conditionValue: "test-*", - contextValues: []string{"other-value"}, - expected: false, - }, - { - name: "NumericEquals - match", - operator: "NumericEquals", - conditionValue: "42", - contextValues: []string{"42"}, - expected: true, - }, - { - name: "NumericLessThan - match", - operator: "NumericLessThan", - conditionValue: "100", - contextValues: []string{"50"}, - expected: true, - }, - { - name: "NumericLessThan - no match", - operator: "NumericLessThan", - conditionValue: "100", - contextValues: []string{"150"}, - expected: false, - }, - { - name: "IpAddress - CIDR match", - operator: "IpAddress", - conditionValue: "192.168.1.0/24", - contextValues: []string{"192.168.1.100"}, - expected: true, - }, - { - name: "IpAddress - CIDR no match", - operator: "IpAddress", - conditionValue: "192.168.1.0/24", - contextValues: []string{"10.0.0.1"}, - expected: false, - }, - { - name: "Bool - true match", - operator: "Bool", - conditionValue: "true", - contextValues: []string{"true"}, - expected: true, - }, - { - name: "Bool - false match", - operator: "Bool", - conditionValue: "false", - contextValues: []string{"false"}, - expected: true, - }, - { - name: "Bool - no match", - operator: "Bool", - conditionValue: "true", - contextValues: []string{"false"}, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - evaluator, err := GetConditionEvaluator(tt.operator) - if err != nil { - t.Fatalf("Failed to get condition evaluator: %v", err) - } - - result := evaluator.Evaluate(tt.conditionValue, tt.contextValues) - if result != tt.expected { - t.Errorf("Expected %v, got %v", tt.expected, result) - } - }) - } -} - -func TestConvertIdentityToPolicy(t *testing.T) { - identityActions := []string{ - "Read:bucket1/*", - "Write:bucket1/*", - "Admin:bucket2", - } - - policy, err := ConvertIdentityToPolicy(identityActions, "bucket1") - if err != nil { - t.Fatalf("Failed to convert identity to policy: %v", err) - } - - if policy.Version != "2012-10-17" { - t.Errorf("Expected version 2012-10-17, got %s", policy.Version) - } - - if len(policy.Statement) != 3 { - t.Errorf("Expected 3 statements, got %d", len(policy.Statement)) - } - - // Check first statement (Read) - stmt := policy.Statement[0] - if stmt.Effect != PolicyEffectAllow { - t.Errorf("Expected Allow effect, got %s", stmt.Effect) - } - - actions := normalizeToStringSlice(stmt.Action) - if len(actions) != 3 { - t.Errorf("Expected 3 read actions, got %d", len(actions)) - } - - resources := normalizeToStringSlice(stmt.Resource) - if len(resources) != 2 { - t.Errorf("Expected 2 resources, got %d", len(resources)) - } -} - -func TestPolicyValidation(t *testing.T) { - tests := []struct { - name string - policyJSON string - expectError bool - }{ - { - name: "Valid policy", - policyJSON: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket/*" - } - ] - }`, - expectError: false, - }, - { - name: "Invalid version", - policyJSON: `{ - "Version": "2008-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket/*" - } - ] - }`, - expectError: true, - }, - { - name: "Missing action", - policyJSON: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Resource": "arn:aws:s3:::test-bucket/*" - } - ] - }`, - expectError: true, - }, - { - name: "Invalid JSON", - policyJSON: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket/*" - } - ] - }extra`, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := ParsePolicy(tt.policyJSON) - if (err != nil) != tt.expectError { - t.Errorf("Expected error: %v, got error: %v", tt.expectError, err) - } - }) - } -} - -func TestPatternMatching(t *testing.T) { - tests := []struct { - name string - pattern string - value string - expected bool - }{ - { - name: "Exact match", - pattern: "s3:GetObject", - value: "s3:GetObject", - expected: true, - }, - { - name: "Wildcard match", - pattern: "s3:Get*", - value: "s3:GetObject", - expected: true, - }, - { - name: "Wildcard no match", - pattern: "s3:Put*", - value: "s3:GetObject", - expected: false, - }, - { - name: "Full wildcard", - pattern: "*", - value: "anything", - expected: true, - }, - { - name: "Question mark wildcard", - pattern: "s3:GetObjec?", - value: "s3:GetObject", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - compiled, err := compilePattern(tt.pattern) - if err != nil { - t.Fatalf("Failed to compile pattern %s: %v", tt.pattern, err) - } - - result := compiled.MatchString(tt.value) - if result != tt.expected { - t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.value, tt.expected, result) - } - }) - } -} - -func TestExtractConditionValuesFromRequest(t *testing.T) { - // Create a test request - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: "/test-bucket/test-object", - RawQuery: "prefix=test&delimiter=/", - }, - Header: map[string][]string{ - "User-Agent": {"test-agent"}, - "X-Amz-Copy-Source": {"source-bucket/source-object"}, - }, - RemoteAddr: "192.168.1.100:12345", - } - - values := ExtractConditionValuesFromRequest(req) - - // Check extracted values - if len(values["aws:SourceIp"]) != 1 || values["aws:SourceIp"][0] != "192.168.1.100" { - t.Errorf("Expected SourceIp to be 192.168.1.100, got %v", values["aws:SourceIp"]) - } - - if len(values["aws:UserAgent"]) != 1 || values["aws:UserAgent"][0] != "test-agent" { - t.Errorf("Expected UserAgent to be test-agent, got %v", values["aws:UserAgent"]) - } - - if len(values["s3:prefix"]) != 1 || values["s3:prefix"][0] != "test" { - t.Errorf("Expected prefix to be test, got %v", values["s3:prefix"]) - } - - if len(values["s3:delimiter"]) != 1 || values["s3:delimiter"][0] != "/" { - t.Errorf("Expected delimiter to be /, got %v", values["s3:delimiter"]) - } - - if len(values["s3:RequestMethod"]) != 1 || values["s3:RequestMethod"][0] != "GET" { - t.Errorf("Expected RequestMethod to be GET, got %v", values["s3:RequestMethod"]) - } - - if len(values["x-amz-copy-source"]) != 1 || values["x-amz-copy-source"][0] != "source-bucket/source-object" { - t.Errorf("Expected X-Amz-Copy-Source header to be extracted, got %v", values["x-amz-copy-source"]) - } - - // Check that aws:CurrentTime is properly set - if len(values["aws:CurrentTime"]) != 1 { - t.Errorf("Expected aws:CurrentTime to be set, got %v", values["aws:CurrentTime"]) - } - - // Check that aws:RequestTime is still available for backward compatibility - if len(values["aws:RequestTime"]) != 1 { - t.Errorf("Expected aws:RequestTime to be set for backward compatibility, got %v", values["aws:RequestTime"]) - } -} - -func TestPolicyEvaluationWithConditions(t *testing.T) { - engine := NewPolicyEngine() - - // Policy with IP condition - policyJSON := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - } - ] - }` - - err := engine.SetBucketPolicy("test-bucket", policyJSON) - if err != nil { - t.Fatalf("Failed to set bucket policy: %v", err) - } - - // Test matching IP - args := &PolicyEvaluationArgs{ - Action: "s3:GetObject", - Resource: "arn:aws:s3:::test-bucket/test-object", - Principal: "user1", - Conditions: map[string][]string{ - "aws:SourceIp": {"192.168.1.100"}, - }, - } - - result := engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultAllow { - t.Errorf("Expected Allow for matching IP, got %v", result) - } - - // Test non-matching IP - args.Conditions["aws:SourceIp"] = []string{"10.0.0.1"} - result = engine.EvaluatePolicy("test-bucket", args) - if result != PolicyResultDeny { - t.Errorf("Expected Deny for non-matching IP, got %v", result) - } -} - -func TestResourceArn(t *testing.T) { - tests := []struct { - name string - bucketName string - objectName string - expected string - }{ - { - name: "Bucket only", - bucketName: "test-bucket", - objectName: "", - expected: "arn:aws:s3:::test-bucket", - }, - { - name: "Bucket and object", - bucketName: "test-bucket", - objectName: "test-object", - expected: "arn:aws:s3:::test-bucket/test-object", - }, - { - name: "Bucket and nested object", - bucketName: "test-bucket", - objectName: "folder/subfolder/test-object", - expected: "arn:aws:s3:::test-bucket/folder/subfolder/test-object", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := BuildResourceArn(tt.bucketName, tt.objectName) - if result != tt.expected { - t.Errorf("Expected %s, got %s", tt.expected, result) - } - }) - } -} - -func TestActionConversion(t *testing.T) { - tests := []struct { - name string - action string - expected string - }{ - { - name: "Already has s3 prefix", - action: "s3:GetObject", - expected: "s3:GetObject", - }, - { - name: "Add s3 prefix", - action: "GetObject", - expected: "s3:GetObject", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := BuildActionName(tt.action) - if result != tt.expected { - t.Errorf("Expected %s, got %s", tt.expected, result) - } - }) - } -} - -func TestPolicyEngineForRequest(t *testing.T) { - engine := NewPolicyEngine() - - // Set up a policy - policyJSON := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::test-bucket/*", - "Condition": { - "StringEquals": { - "s3:RequestMethod": "GET" - } - } - } - ] - }` - - err := engine.SetBucketPolicy("test-bucket", policyJSON) - if err != nil { - t.Fatalf("Failed to set bucket policy: %v", err) - } - - // Create test request - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: "/test-bucket/test-object", - }, - Header: make(map[string][]string), - RemoteAddr: "192.168.1.100:12345", - } - - // Test the request - result := engine.EvaluatePolicyForRequest("test-bucket", "test-object", "GetObject", "user1", req) - if result != PolicyResultAllow { - t.Errorf("Expected Allow for matching request, got %v", result) - } -} - -func TestWildcardMatching(t *testing.T) { - tests := []struct { - name string - pattern string - str string - expected bool - }{ - { - name: "Exact match", - pattern: "test", - str: "test", - expected: true, - }, - { - name: "Single wildcard", - pattern: "*", - str: "anything", - expected: true, - }, - { - name: "Prefix wildcard", - pattern: "test*", - str: "test123", - expected: true, - }, - { - name: "Suffix wildcard", - pattern: "*test", - str: "123test", - expected: true, - }, - { - name: "Middle wildcard", - pattern: "test*123", - str: "testABC123", - expected: true, - }, - { - name: "No match", - pattern: "test*", - str: "other", - expected: false, - }, - { - name: "Multiple wildcards", - pattern: "test*abc*123", - str: "testXYZabcDEF123", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := MatchesWildcard(tt.pattern, tt.str) - if result != tt.expected { - t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.str, tt.expected, result) - } - }) - } -} - -func TestCompilePolicy(t *testing.T) { - policyJSON := `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": "arn:aws:s3:::test-bucket/*" - } - ] - }` - - policy, err := ParsePolicy(policyJSON) - if err != nil { - t.Fatalf("Failed to parse policy: %v", err) - } - - compiled, err := CompilePolicy(policy) - if err != nil { - t.Fatalf("Failed to compile policy: %v", err) - } - - if len(compiled.Statements) != 1 { - t.Errorf("Expected 1 compiled statement, got %d", len(compiled.Statements)) - } - - stmt := compiled.Statements[0] - if len(stmt.ActionPatterns) != 2 { - t.Errorf("Expected 2 action patterns, got %d", len(stmt.ActionPatterns)) - } - - if len(stmt.ResourcePatterns) != 1 { - t.Errorf("Expected 1 resource pattern, got %d", len(stmt.ResourcePatterns)) - } -} - -// TestNewPolicyBackedIAMWithLegacy tests the constructor overload -func TestNewPolicyBackedIAMWithLegacy(t *testing.T) { - // Mock legacy IAM - mockLegacyIAM := &MockLegacyIAM{} - - // Test the new constructor - policyBackedIAM := NewPolicyBackedIAMWithLegacy(mockLegacyIAM) - - // Verify that the legacy IAM is set - if policyBackedIAM.legacyIAM != mockLegacyIAM { - t.Errorf("Expected legacy IAM to be set, but it wasn't") - } - - // Verify that the policy engine is initialized - if policyBackedIAM.policyEngine == nil { - t.Errorf("Expected policy engine to be initialized, but it wasn't") - } - - // Compare with the traditional approach - traditionalIAM := NewPolicyBackedIAM() - traditionalIAM.SetLegacyIAM(mockLegacyIAM) - - // Both should behave the same - if policyBackedIAM.legacyIAM != traditionalIAM.legacyIAM { - t.Errorf("Expected both approaches to result in the same legacy IAM") - } -} - -// MockLegacyIAM implements the LegacyIAM interface for testing -type MockLegacyIAM struct{} - -func (m *MockLegacyIAM) authRequest(r *http.Request, action Action) (Identity, s3err.ErrorCode) { - return nil, s3err.ErrNone -} diff --git a/weed/s3api/policy_engine/examples.go b/weed/s3api/policy_engine/examples.go deleted file mode 100644 index 6f14127f3..000000000 --- a/weed/s3api/policy_engine/examples.go +++ /dev/null @@ -1,463 +0,0 @@ -//go:build ignore -// +build ignore - -package policy_engine - -import ( - "encoding/json" - "fmt" -) - -// This file contains examples and documentation for the policy engine - -// ExampleIdentityJSON shows the existing identities.json format (unchanged) -var ExampleIdentityJSON = `{ - "identities": [ - { - "name": "user1", - "credentials": [ - { - "accessKey": "AKIAIOSFODNN7EXAMPLE", - "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" - } - ], - "actions": [ - "Read:bucket1/*", - "Write:bucket1/*", - "Admin:bucket2" - ] - }, - { - "name": "readonly-user", - "credentials": [ - { - "accessKey": "AKIAI44QH8DHBEXAMPLE", - "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY" - } - ], - "actions": [ - "Read:bucket1/*", - "List:bucket1" - ] - } - ] -}` - -// ExampleBucketPolicy shows an AWS S3 bucket policy with conditions -var ExampleBucketPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowGetObjectFromSpecificIP", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::my-bucket/*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - }, - { - "Sid": "AllowPutObjectWithSSL", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::my-bucket/*", - "Condition": { - "Bool": { - "aws:SecureTransport": "true" - } - } - }, - { - "Sid": "DenyDeleteFromProduction", - "Effect": "Deny", - "Principal": "*", - "Action": "s3:DeleteObject", - "Resource": "arn:aws:s3:::my-bucket/production/*" - } - ] -}` - -// ExampleTimeBasedPolicy shows a policy with time-based conditions -var ExampleTimeBasedPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowAccessDuringBusinessHours", - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": "arn:aws:s3:::my-bucket/*", - "Condition": { - "DateGreaterThan": { - "aws:RequestTime": "2023-01-01T08:00:00Z" - }, - "DateLessThan": { - "aws:RequestTime": "2023-12-31T18:00:00Z" - } - } - } - ] -}` - -// ExampleIPRestrictedPolicy shows a policy with IP restrictions -var ExampleIPRestrictedPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowFromOfficeNetwork", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::my-bucket", - "arn:aws:s3:::my-bucket/*" - ], - "Condition": { - "IpAddress": { - "aws:SourceIp": [ - "203.0.113.0/24", - "198.51.100.0/24" - ] - } - } - }, - { - "Sid": "DenyFromRestrictedIPs", - "Effect": "Deny", - "Principal": "*", - "Action": "*", - "Resource": "*", - "Condition": { - "IpAddress": { - "aws:SourceIp": [ - "192.0.2.0/24" - ] - } - } - } - ] -}` - -// ExamplePublicReadPolicy shows a policy for public read access -var ExamplePublicReadPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "PublicReadGetObject", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::my-public-bucket/*" - } - ] -}` - -// ExampleCORSPolicy shows a policy with CORS-related conditions -var ExampleCORSPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowCrossOriginRequests", - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject", "s3:PutObject"], - "Resource": "arn:aws:s3:::my-bucket/*", - "Condition": { - "StringLike": { - "aws:Referer": [ - "https://example.com/*", - "https://*.example.com/*" - ] - } - } - } - ] -}` - -// ExampleUserAgentPolicy shows a policy with user agent restrictions -var ExampleUserAgentPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowSpecificUserAgents", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::my-bucket/*", - "Condition": { - "StringLike": { - "aws:UserAgent": [ - "MyApp/*", - "curl/*" - ] - } - } - } - ] -}` - -// ExamplePrefixBasedPolicy shows a policy with prefix-based access -var ExamplePrefixBasedPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowUserFolderAccess", - "Effect": "Allow", - "Principal": "*", - "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"], - "Resource": "arn:aws:s3:::my-bucket/${aws:username}/*", - "Condition": { - "StringEquals": { - "s3:prefix": "${aws:username}/" - } - } - } - ] -}` - -// ExampleMultiStatementPolicy shows a complex policy with multiple statements -var ExampleMultiStatementPolicy = `{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "AllowListBucket", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:ListBucket", - "Resource": "arn:aws:s3:::my-bucket", - "Condition": { - "StringEquals": { - "s3:prefix": "public/" - } - } - }, - { - "Sid": "AllowGetPublicObjects", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::my-bucket/public/*" - }, - { - "Sid": "AllowAuthenticatedUpload", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::my-bucket/uploads/*", - "Condition": { - "StringEquals": { - "s3:x-amz-acl": "private" - } - } - }, - { - "Sid": "DenyInsecureConnections", - "Effect": "Deny", - "Principal": "*", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::my-bucket", - "arn:aws:s3:::my-bucket/*" - ], - "Condition": { - "Bool": { - "aws:SecureTransport": "false" - } - } - } - ] -}` - -// GetAllExamples returns all example policies -func GetAllExamples() map[string]string { - return map[string]string{ - "basic-bucket-policy": ExampleBucketPolicy, - "time-based-policy": ExampleTimeBasedPolicy, - "ip-restricted-policy": ExampleIPRestrictedPolicy, - "public-read-policy": ExamplePublicReadPolicy, - "cors-policy": ExampleCORSPolicy, - "user-agent-policy": ExampleUserAgentPolicy, - "prefix-based-policy": ExamplePrefixBasedPolicy, - "multi-statement-policy": ExampleMultiStatementPolicy, - } -} - -// ValidateExamplePolicies validates all example policies -func ValidateExamplePolicies() error { - examples := GetAllExamples() - - for name, policyJSON := range examples { - _, err := ParsePolicy(policyJSON) - if err != nil { - return fmt.Errorf("invalid example policy %s: %v", name, err) - } - } - - return nil -} - -// GetExamplePolicy returns a specific example policy -func GetExamplePolicy(name string) (string, error) { - examples := GetAllExamples() - - policy, exists := examples[name] - if !exists { - return "", fmt.Errorf("example policy %s not found", name) - } - - return policy, nil -} - -// CreateExamplePolicyDocument creates a PolicyDocument from an example -func CreateExamplePolicyDocument(name string) (*PolicyDocument, error) { - policyJSON, err := GetExamplePolicy(name) - if err != nil { - return nil, err - } - - return ParsePolicy(policyJSON) -} - -// PrintExamplePolicyPretty prints an example policy in pretty format -func PrintExamplePolicyPretty(name string) error { - policyJSON, err := GetExamplePolicy(name) - if err != nil { - return err - } - - var policy interface{} - if err := json.Unmarshal([]byte(policyJSON), &policy); err != nil { - return err - } - - prettyJSON, err := json.MarshalIndent(policy, "", " ") - if err != nil { - return err - } - - fmt.Printf("Example Policy: %s\n", name) - fmt.Printf("================\n") - fmt.Println(string(prettyJSON)) - - return nil -} - -// ExampleUsage demonstrates how to use the policy engine -func ExampleUsage() { - // Create a new policy engine - engine := NewPolicyEngine() - - // Set a bucket policy - policyJSON := ExampleBucketPolicy - err := engine.SetBucketPolicy("my-bucket", policyJSON) - if err != nil { - fmt.Printf("Error setting bucket policy: %v\n", err) - return - } - - // Evaluate a policy - args := &PolicyEvaluationArgs{ - Action: "s3:GetObject", - Resource: "arn:aws:s3:::my-bucket/test-object", - Principal: "*", - Conditions: map[string][]string{ - "aws:SourceIp": {"192.168.1.100"}, - }, - } - - result := engine.EvaluatePolicy("my-bucket", args) - - switch result { - case PolicyResultAllow: - fmt.Println("Access allowed") - case PolicyResultDeny: - fmt.Println("Access denied") - case PolicyResultIndeterminate: - fmt.Println("Access indeterminate") - } -} - -// ExampleLegacyIntegration demonstrates backward compatibility -func ExampleLegacyIntegration() { - // Legacy identity actions - legacyActions := []string{ - "Read:bucket1/*", - "Write:bucket1/uploads/*", - "Admin:bucket2", - } - - // Convert to policy - policy, err := ConvertIdentityToPolicy(legacyActions, "bucket1") - if err != nil { - fmt.Printf("Error converting identity to policy: %v\n", err) - return - } - - // Create policy-backed IAM - policyIAM := NewPolicyBackedIAM() - - // Set the converted policy - policyJSON, _ := json.MarshalIndent(policy, "", " ") - err = policyIAM.SetBucketPolicy("bucket1", string(policyJSON)) - if err != nil { - fmt.Printf("Error setting bucket policy: %v\n", err) - return - } - - fmt.Println("Legacy identity successfully converted to AWS S3 policy") -} - -// ExampleConditions demonstrates various condition types -func ExampleConditions() { - examples := map[string]string{ - "StringEquals": `"StringEquals": {"s3:prefix": "documents/"}`, - "StringLike": `"StringLike": {"aws:UserAgent": "MyApp/*"}`, - "NumericEquals": `"NumericEquals": {"s3:max-keys": "10"}`, - "NumericLessThan": `"NumericLessThan": {"s3:max-keys": "1000"}`, - "DateGreaterThan": `"DateGreaterThan": {"aws:RequestTime": "2023-01-01T00:00:00Z"}`, - "DateLessThan": `"DateLessThan": {"aws:RequestTime": "2023-12-31T23:59:59Z"}`, - "IpAddress": `"IpAddress": {"aws:SourceIp": "192.168.1.0/24"}`, - "NotIpAddress": `"NotIpAddress": {"aws:SourceIp": "10.0.0.0/8"}`, - "Bool": `"Bool": {"aws:SecureTransport": "true"}`, - "Null": `"Null": {"s3:x-amz-server-side-encryption": "false"}`, - } - - fmt.Println("Supported Condition Operators:") - fmt.Println("==============================") - - for operator, example := range examples { - fmt.Printf("%s: %s\n", operator, example) - } -} - -// ExampleMigrationStrategy demonstrates migration from legacy to policy-based system -func ExampleMigrationStrategy() { - fmt.Println("Migration Strategy:") - fmt.Println("==================") - fmt.Println("1. Keep existing identities.json unchanged") - fmt.Println("2. Legacy actions are automatically converted to AWS policies internally") - fmt.Println("3. Add bucket policies for advanced features:") - fmt.Println(" - IP restrictions") - fmt.Println(" - Time-based access") - fmt.Println(" - SSL-only access") - fmt.Println(" - User agent restrictions") - fmt.Println("4. Policy evaluation precedence:") - fmt.Println(" - Explicit Deny (highest priority)") - fmt.Println(" - Explicit Allow") - fmt.Println(" - Default Deny (lowest priority)") -} - -// PrintAllExamples prints all example policies -func PrintAllExamples() { - examples := GetAllExamples() - - for name := range examples { - fmt.Printf("\n") - PrintExamplePolicyPretty(name) - fmt.Printf("\n") - } -} diff --git a/weed/s3api/policy_engine/integration.go b/weed/s3api/policy_engine/integration.go deleted file mode 100644 index 17bcec112..000000000 --- a/weed/s3api/policy_engine/integration.go +++ /dev/null @@ -1,512 +0,0 @@ -package policy_engine - -import ( - "fmt" - "net/http" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// Action represents an S3 action - this should match the type in auth_credentials.go -type Action string - -// Identity represents a user identity - this should match the type in auth_credentials.go -type Identity interface { - canDo(action Action, bucket string, objectKey string) bool -} - -// PolicyBackedIAM provides policy-based access control with fallback to legacy IAM -type PolicyBackedIAM struct { - policyEngine *PolicyEngine - legacyIAM LegacyIAM // Interface to delegate to existing IAM system -} - -// LegacyIAM interface for delegating to existing IAM implementation -type LegacyIAM interface { - authRequest(r *http.Request, action Action) (Identity, s3err.ErrorCode) -} - -// NewPolicyBackedIAM creates a new policy-backed IAM system -func NewPolicyBackedIAM() *PolicyBackedIAM { - return &PolicyBackedIAM{ - policyEngine: NewPolicyEngine(), - legacyIAM: nil, // Will be set when integrated with existing IAM - } -} - -// NewPolicyBackedIAMWithLegacy creates a new policy-backed IAM system with legacy IAM set -func NewPolicyBackedIAMWithLegacy(legacyIAM LegacyIAM) *PolicyBackedIAM { - return &PolicyBackedIAM{ - policyEngine: NewPolicyEngine(), - legacyIAM: legacyIAM, - } -} - -// SetLegacyIAM sets the legacy IAM system for fallback -func (p *PolicyBackedIAM) SetLegacyIAM(legacyIAM LegacyIAM) { - p.legacyIAM = legacyIAM -} - -// SetBucketPolicy sets the policy for a bucket -func (p *PolicyBackedIAM) SetBucketPolicy(bucketName string, policyJSON string) error { - return p.policyEngine.SetBucketPolicy(bucketName, policyJSON) -} - -// GetBucketPolicy gets the policy for a bucket -func (p *PolicyBackedIAM) GetBucketPolicy(bucketName string) (*PolicyDocument, error) { - return p.policyEngine.GetBucketPolicy(bucketName) -} - -// DeleteBucketPolicy deletes the policy for a bucket -func (p *PolicyBackedIAM) DeleteBucketPolicy(bucketName string) error { - return p.policyEngine.DeleteBucketPolicy(bucketName) -} - -// CanDo checks if a principal can perform an action on a resource -func (p *PolicyBackedIAM) CanDo(action, bucketName, objectName, principal string, r *http.Request) bool { - // If there's a bucket policy, evaluate it - if p.policyEngine.HasPolicyForBucket(bucketName) { - result := p.policyEngine.EvaluatePolicyForRequest(bucketName, objectName, action, principal, r) - switch result { - case PolicyResultAllow: - return true - case PolicyResultDeny: - return false - case PolicyResultIndeterminate: - // Fall through to legacy system - } - } - - // No bucket policy or indeterminate result, use legacy conversion - return p.evaluateLegacyAction(action, bucketName, objectName, principal) -} - -// evaluateLegacyAction evaluates actions using legacy identity-based rules -func (p *PolicyBackedIAM) evaluateLegacyAction(action, bucketName, objectName, principal string) bool { - // If we have a legacy IAM system to delegate to, use it - if p.legacyIAM != nil { - // Create a dummy request for legacy evaluation - // In real implementation, this would use the actual request - r := &http.Request{ - Header: make(http.Header), - } - - // Convert the action string to Action type - legacyAction := Action(action) - - // Use legacy IAM to check permission - identity, errCode := p.legacyIAM.authRequest(r, legacyAction) - if errCode != s3err.ErrNone { - return false - } - - // If we have an identity, check if it can perform the action - if identity != nil { - return identity.canDo(legacyAction, bucketName, objectName) - } - } - - // No legacy IAM available, convert to policy and evaluate - return p.evaluateUsingPolicyConversion(action, bucketName, objectName, principal) -} - -// evaluateUsingPolicyConversion converts legacy action to policy and evaluates -func (p *PolicyBackedIAM) evaluateUsingPolicyConversion(action, bucketName, objectName, principal string) bool { - // For now, use a conservative approach for legacy actions - // In a real implementation, this would integrate with the existing identity system - glog.V(2).Infof("Legacy action evaluation for %s on %s/%s by %s", action, bucketName, objectName, principal) - - // Return false to maintain security until proper legacy integration is implemented - // This ensures no unintended access is granted - return false -} - -// ConvertIdentityToPolicy converts a legacy identity action to an AWS policy -func ConvertIdentityToPolicy(identityActions []string, bucketName string) (*PolicyDocument, error) { - statements := make([]PolicyStatement, 0) - - for _, action := range identityActions { - stmt, err := convertSingleAction(action, bucketName) - if err != nil { - glog.Warningf("Failed to convert action %s: %v", action, err) - continue - } - if stmt != nil { - statements = append(statements, *stmt) - } - } - - if len(statements) == 0 { - return nil, fmt.Errorf("no valid statements generated") - } - - return &PolicyDocument{ - Version: PolicyVersion2012_10_17, - Statement: statements, - }, nil -} - -// convertSingleAction converts a single legacy action to a policy statement -func convertSingleAction(action, bucketName string) (*PolicyStatement, error) { - parts := strings.Split(action, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid action format: %s", action) - } - - actionType := parts[0] - resourcePattern := parts[1] - - var s3Actions []string - var resources []string - - switch actionType { - case "Read": - s3Actions = []string{"s3:GetObject", "s3:GetObjectVersion", "s3:ListBucket"} - if strings.HasSuffix(resourcePattern, "/*") { - // Object-level read access - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{ - fmt.Sprintf("arn:aws:s3:::%s", bucket), - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - } - } else { - // Bucket-level read access - resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)} - } - - case "Write": - s3Actions = []string{"s3:PutObject", "s3:DeleteObject", "s3:PutObjectAcl"} - if strings.HasSuffix(resourcePattern, "/*") { - // Object-level write access - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - // Bucket-level write access - resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)} - } - - case "Admin": - s3Actions = []string{"s3:*"} - resources = []string{ - fmt.Sprintf("arn:aws:s3:::%s", resourcePattern), - fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern), - } - - case "List": - s3Actions = []string{"s3:ListBucket", "s3:ListBucketVersions"} - if strings.HasSuffix(resourcePattern, "/*") { - // Object-level list access - extract bucket from "bucket/prefix/*" pattern - patternWithoutWildcard := strings.TrimSuffix(resourcePattern, "/*") - parts := strings.SplitN(patternWithoutWildcard, "/", 2) - bucket := parts[0] - resources = []string{ - fmt.Sprintf("arn:aws:s3:::%s", bucket), - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - } - } else { - // Bucket-level list access - resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)} - } - - case "Tagging": - s3Actions = []string{"s3:GetObjectTagging", "s3:PutObjectTagging", "s3:DeleteObjectTagging"} - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - - case "BypassGovernanceRetention": - s3Actions = []string{"s3:BypassGovernanceRetention"} - if strings.HasSuffix(resourcePattern, "/*") { - // Object-level bypass governance access - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - // Bucket-level bypass governance access - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - } - - case "GetObjectRetention": - s3Actions = []string{"s3:GetObjectRetention"} - if strings.HasSuffix(resourcePattern, "/*") { - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - } - - case "PutObjectRetention": - s3Actions = []string{"s3:PutObjectRetention"} - if strings.HasSuffix(resourcePattern, "/*") { - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - } - - case "GetObjectLegalHold": - s3Actions = []string{"s3:GetObjectLegalHold"} - if strings.HasSuffix(resourcePattern, "/*") { - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - } - - case "PutObjectLegalHold": - s3Actions = []string{"s3:PutObjectLegalHold"} - if strings.HasSuffix(resourcePattern, "/*") { - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", bucket)} - } else { - resources = []string{fmt.Sprintf("arn:aws:s3:::%s/*", resourcePattern)} - } - - case "GetBucketObjectLockConfiguration": - s3Actions = []string{"s3:GetBucketObjectLockConfiguration"} - resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)} - - case "PutBucketObjectLockConfiguration": - s3Actions = []string{"s3:PutBucketObjectLockConfiguration"} - resources = []string{fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)} - - default: - return nil, fmt.Errorf("unknown action type: %s", actionType) - } - - return &PolicyStatement{ - Effect: PolicyEffectAllow, - Action: NewStringOrStringSlice(s3Actions...), - Resource: NewStringOrStringSlice(resources...), - }, nil -} - -// GetActionMappings returns the mapping of legacy actions to S3 actions -func GetActionMappings() map[string][]string { - return map[string][]string{ - "Read": { - "s3:GetObject", - "s3:GetObjectVersion", - "s3:GetObjectAcl", - "s3:GetObjectVersionAcl", - "s3:GetObjectTagging", - "s3:GetObjectVersionTagging", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - "s3:GetBucketVersioning", - "s3:GetBucketAcl", - "s3:GetBucketCors", - "s3:GetBucketTagging", - "s3:GetBucketNotification", - }, - "Write": { - "s3:PutObject", - "s3:PutObjectAcl", - "s3:PutObjectTagging", - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:DeleteObjectTagging", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - "s3:PutBucketAcl", - "s3:PutBucketCors", - "s3:PutBucketTagging", - "s3:PutBucketNotification", - "s3:PutBucketVersioning", - "s3:DeleteBucketTagging", - "s3:DeleteBucketCors", - }, - "Admin": { - "s3:*", - }, - "List": { - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:ListAllMyBuckets", - }, - "Tagging": { - "s3:GetObjectTagging", - "s3:PutObjectTagging", - "s3:DeleteObjectTagging", - "s3:GetBucketTagging", - "s3:PutBucketTagging", - "s3:DeleteBucketTagging", - }, - "BypassGovernanceRetention": { - "s3:BypassGovernanceRetention", - }, - "GetObjectRetention": { - "s3:GetObjectRetention", - }, - "PutObjectRetention": { - "s3:PutObjectRetention", - }, - "GetObjectLegalHold": { - "s3:GetObjectLegalHold", - }, - "PutObjectLegalHold": { - "s3:PutObjectLegalHold", - }, - "GetBucketObjectLockConfiguration": { - "s3:GetBucketObjectLockConfiguration", - }, - "PutBucketObjectLockConfiguration": { - "s3:PutBucketObjectLockConfiguration", - }, - } -} - -// ValidateActionMapping validates that a legacy action can be mapped to S3 actions -func ValidateActionMapping(action string) error { - mappings := GetActionMappings() - - parts := strings.Split(action, ":") - if len(parts) != 2 { - return fmt.Errorf("invalid action format: %s, expected format: 'ActionType:Resource'", action) - } - - actionType := parts[0] - resource := parts[1] - - if _, exists := mappings[actionType]; !exists { - return fmt.Errorf("unknown action type: %s", actionType) - } - - if resource == "" { - return fmt.Errorf("resource cannot be empty") - } - - return nil -} - -// ConvertLegacyActions converts an array of legacy actions to S3 actions -func ConvertLegacyActions(legacyActions []string) ([]string, error) { - mappings := GetActionMappings() - s3Actions := make([]string, 0) - - for _, legacyAction := range legacyActions { - if err := ValidateActionMapping(legacyAction); err != nil { - return nil, err - } - - parts := strings.Split(legacyAction, ":") - actionType := parts[0] - - if actionType == "Admin" { - // Admin gives all permissions, so we can just return s3:* - return []string{"s3:*"}, nil - } - - if mapped, exists := mappings[actionType]; exists { - s3Actions = append(s3Actions, mapped...) - } - } - - // Remove duplicates - uniqueActions := make([]string, 0) - seen := make(map[string]bool) - for _, action := range s3Actions { - if !seen[action] { - uniqueActions = append(uniqueActions, action) - seen[action] = true - } - } - - return uniqueActions, nil -} - -// GetResourcesFromLegacyAction extracts resources from a legacy action -func GetResourcesFromLegacyAction(legacyAction string) ([]string, error) { - parts := strings.Split(legacyAction, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid action format: %s", legacyAction) - } - - resourcePattern := parts[1] - resources := make([]string, 0) - - if strings.HasSuffix(resourcePattern, "/*") { - // Object-level access - bucket := strings.TrimSuffix(resourcePattern, "/*") - resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s", bucket)) - resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s/*", bucket)) - } else { - // Bucket-level access - resources = append(resources, fmt.Sprintf("arn:aws:s3:::%s", resourcePattern)) - } - - return resources, nil -} - -// CreatePolicyFromLegacyIdentity creates a policy document from legacy identity actions -func CreatePolicyFromLegacyIdentity(identityName string, actions []string) (*PolicyDocument, error) { - statements := make([]PolicyStatement, 0) - - // Group actions by resource pattern - resourceActions := make(map[string][]string) - - for _, action := range actions { - parts := strings.Split(action, ":") - if len(parts) != 2 { - continue - } - - resourcePattern := parts[1] - actionType := parts[0] - - if _, exists := resourceActions[resourcePattern]; !exists { - resourceActions[resourcePattern] = make([]string, 0) - } - resourceActions[resourcePattern] = append(resourceActions[resourcePattern], actionType) - } - - // Create statements for each resource pattern - for resourcePattern, actionTypes := range resourceActions { - s3Actions := make([]string, 0) - - for _, actionType := range actionTypes { - if actionType == "Admin" { - s3Actions = []string{"s3:*"} - break - } - - if mapped, exists := GetActionMappings()[actionType]; exists { - s3Actions = append(s3Actions, mapped...) - } - } - - resources, err := GetResourcesFromLegacyAction(fmt.Sprintf("dummy:%s", resourcePattern)) - if err != nil { - continue - } - - statement := PolicyStatement{ - Sid: fmt.Sprintf("%s-%s", identityName, strings.ReplaceAll(resourcePattern, "/", "-")), - Effect: PolicyEffectAllow, - Action: NewStringOrStringSlice(s3Actions...), - Resource: NewStringOrStringSlice(resources...), - } - - statements = append(statements, statement) - } - - if len(statements) == 0 { - return nil, fmt.Errorf("no valid statements generated for identity %s", identityName) - } - - return &PolicyDocument{ - Version: PolicyVersion2012_10_17, - Statement: statements, - }, nil -} - -// HasPolicyForBucket checks if a bucket has a policy -func (p *PolicyBackedIAM) HasPolicyForBucket(bucketName string) bool { - return p.policyEngine.HasPolicyForBucket(bucketName) -} - -// GetPolicyEngine returns the underlying policy engine -func (p *PolicyBackedIAM) GetPolicyEngine() *PolicyEngine { - return p.policyEngine -} diff --git a/weed/s3api/policy_engine/types.go b/weed/s3api/policy_engine/types.go deleted file mode 100644 index d68b1f297..000000000 --- a/weed/s3api/policy_engine/types.go +++ /dev/null @@ -1,449 +0,0 @@ -package policy_engine - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// Policy Engine Types -// -// This package provides enhanced AWS S3-compatible policy types with improved type safety. -// -// MIGRATION COMPLETE: -// This is now the unified PolicyDocument type used throughout the SeaweedFS codebase. -// The previous duplicate PolicyDocument types in iamapi and credential packages have -// been migrated to use these enhanced types, providing: -// - Principal specifications -// - Complex conditions (IP, time, string patterns, etc.) -// - Flexible string/array types with proper JSON marshaling -// - Policy compilation for performance -// -// All policy operations now use this single, consistent type definition. - -// Constants for policy validation -const ( - // PolicyVersion2012_10_17 is the standard AWS policy version - PolicyVersion2012_10_17 = "2012-10-17" -) - -// StringOrStringSlice represents a value that can be either a string or []string -type StringOrStringSlice struct { - values []string -} - -// UnmarshalJSON implements json.Unmarshaler for StringOrStringSlice -func (s *StringOrStringSlice) UnmarshalJSON(data []byte) error { - // Try unmarshaling as string first - var str string - if err := json.Unmarshal(data, &str); err == nil { - s.values = []string{str} - return nil - } - - // Try unmarshaling as []string - var strs []string - if err := json.Unmarshal(data, &strs); err == nil { - s.values = strs - return nil - } - - return fmt.Errorf("value must be string or []string") -} - -// MarshalJSON implements json.Marshaler for StringOrStringSlice -func (s StringOrStringSlice) MarshalJSON() ([]byte, error) { - if len(s.values) == 1 { - return json.Marshal(s.values[0]) - } - return json.Marshal(s.values) -} - -// Strings returns the slice of strings -func (s StringOrStringSlice) Strings() []string { - return s.values -} - -// NewStringOrStringSlice creates a new StringOrStringSlice from strings -func NewStringOrStringSlice(values ...string) StringOrStringSlice { - return StringOrStringSlice{values: values} -} - -// PolicyConditions represents policy conditions with proper typing -type PolicyConditions map[string]map[string]StringOrStringSlice - -// PolicyDocument represents an AWS S3 bucket policy document -type PolicyDocument struct { - Version string `json:"Version"` - Statement []PolicyStatement `json:"Statement"` -} - -// PolicyStatement represents a single policy statement -type PolicyStatement struct { - Sid string `json:"Sid,omitempty"` - Effect PolicyEffect `json:"Effect"` - Principal *StringOrStringSlice `json:"Principal,omitempty"` - Action StringOrStringSlice `json:"Action"` - Resource StringOrStringSlice `json:"Resource"` - Condition PolicyConditions `json:"Condition,omitempty"` -} - -// PolicyEffect represents Allow or Deny -type PolicyEffect string - -const ( - PolicyEffectAllow PolicyEffect = "Allow" - PolicyEffectDeny PolicyEffect = "Deny" -) - -// PolicyEvaluationArgs contains the arguments for policy evaluation -type PolicyEvaluationArgs struct { - Action string - Resource string - Principal string - Conditions map[string][]string -} - -// PolicyCache for caching compiled policies -type PolicyCache struct { - policies map[string]*CompiledPolicy - lastUpdate time.Time -} - -// CompiledPolicy represents a policy that has been compiled for efficient evaluation -type CompiledPolicy struct { - Document *PolicyDocument - Statements []CompiledStatement -} - -// CompiledStatement represents a compiled policy statement -type CompiledStatement struct { - Statement *PolicyStatement - ActionMatchers []*WildcardMatcher - ResourceMatchers []*WildcardMatcher - PrincipalMatchers []*WildcardMatcher - // Keep regex patterns for backward compatibility - ActionPatterns []*regexp.Regexp - ResourcePatterns []*regexp.Regexp - PrincipalPatterns []*regexp.Regexp -} - -// NewPolicyCache creates a new policy cache -func NewPolicyCache() *PolicyCache { - return &PolicyCache{ - policies: make(map[string]*CompiledPolicy), - } -} - -// ValidatePolicy validates a policy document -func ValidatePolicy(policyDoc *PolicyDocument) error { - if policyDoc.Version != PolicyVersion2012_10_17 { - return fmt.Errorf("unsupported policy version: %s", policyDoc.Version) - } - - if len(policyDoc.Statement) == 0 { - return fmt.Errorf("policy must contain at least one statement") - } - - for i, stmt := range policyDoc.Statement { - if err := validateStatement(&stmt); err != nil { - return fmt.Errorf("invalid statement %d: %v", i, err) - } - } - - return nil -} - -// validateStatement validates a single policy statement -func validateStatement(stmt *PolicyStatement) error { - if stmt.Effect != PolicyEffectAllow && stmt.Effect != PolicyEffectDeny { - return fmt.Errorf("invalid effect: %s", stmt.Effect) - } - - if len(stmt.Action.Strings()) == 0 { - return fmt.Errorf("action is required") - } - - if len(stmt.Resource.Strings()) == 0 { - return fmt.Errorf("resource is required") - } - - return nil -} - -// ParsePolicy parses a policy JSON string -func ParsePolicy(policyJSON string) (*PolicyDocument, error) { - var policy PolicyDocument - if err := json.Unmarshal([]byte(policyJSON), &policy); err != nil { - return nil, fmt.Errorf("failed to parse policy JSON: %w", err) - } - - if err := ValidatePolicy(&policy); err != nil { - return nil, fmt.Errorf("invalid policy: %w", err) - } - - return &policy, nil -} - -// CompilePolicy compiles a policy for efficient evaluation -func CompilePolicy(policy *PolicyDocument) (*CompiledPolicy, error) { - compiled := &CompiledPolicy{ - Document: policy, - Statements: make([]CompiledStatement, len(policy.Statement)), - } - - for i, stmt := range policy.Statement { - compiledStmt, err := compileStatement(&stmt) - if err != nil { - return nil, fmt.Errorf("failed to compile statement %d: %v", i, err) - } - compiled.Statements[i] = *compiledStmt - } - - return compiled, nil -} - -// compileStatement compiles a single policy statement -func compileStatement(stmt *PolicyStatement) (*CompiledStatement, error) { - compiled := &CompiledStatement{ - Statement: stmt, - } - - // Compile action patterns and matchers - for _, action := range stmt.Action.Strings() { - pattern, err := compilePattern(action) - if err != nil { - return nil, fmt.Errorf("failed to compile action pattern %s: %v", action, err) - } - compiled.ActionPatterns = append(compiled.ActionPatterns, pattern) - - matcher, err := NewWildcardMatcher(action) - if err != nil { - return nil, fmt.Errorf("failed to create action matcher %s: %v", action, err) - } - compiled.ActionMatchers = append(compiled.ActionMatchers, matcher) - } - - // Compile resource patterns and matchers - for _, resource := range stmt.Resource.Strings() { - pattern, err := compilePattern(resource) - if err != nil { - return nil, fmt.Errorf("failed to compile resource pattern %s: %v", resource, err) - } - compiled.ResourcePatterns = append(compiled.ResourcePatterns, pattern) - - matcher, err := NewWildcardMatcher(resource) - if err != nil { - return nil, fmt.Errorf("failed to create resource matcher %s: %v", resource, err) - } - compiled.ResourceMatchers = append(compiled.ResourceMatchers, matcher) - } - - // Compile principal patterns and matchers if present - if stmt.Principal != nil && len(stmt.Principal.Strings()) > 0 { - for _, principal := range stmt.Principal.Strings() { - pattern, err := compilePattern(principal) - if err != nil { - return nil, fmt.Errorf("failed to compile principal pattern %s: %v", principal, err) - } - compiled.PrincipalPatterns = append(compiled.PrincipalPatterns, pattern) - - matcher, err := NewWildcardMatcher(principal) - if err != nil { - return nil, fmt.Errorf("failed to create principal matcher %s: %v", principal, err) - } - compiled.PrincipalMatchers = append(compiled.PrincipalMatchers, matcher) - } - } - - return compiled, nil -} - -// compilePattern compiles a wildcard pattern to regex -func compilePattern(pattern string) (*regexp.Regexp, error) { - return CompileWildcardPattern(pattern) -} - -// normalizeToStringSlice converts various types to string slice - kept for backward compatibility -func normalizeToStringSlice(value interface{}) []string { - result, err := normalizeToStringSliceWithError(value) - if err != nil { - glog.Warningf("unexpected type for policy value: %T, error: %v", value, err) - return []string{fmt.Sprintf("%v", value)} - } - return result -} - -// normalizeToStringSliceWithError converts various types to string slice with proper error handling -func normalizeToStringSliceWithError(value interface{}) ([]string, error) { - switch v := value.(type) { - case string: - return []string{v}, nil - case []string: - return v, nil - case []interface{}: - result := make([]string, len(v)) - for i, item := range v { - result[i] = fmt.Sprintf("%v", item) - } - return result, nil - case StringOrStringSlice: - return v.Strings(), nil - default: - return nil, fmt.Errorf("unexpected type for policy value: %T", v) - } -} - -// GetBucketFromResource extracts bucket name from resource ARN -func GetBucketFromResource(resource string) string { - // Handle ARN format: arn:aws:s3:::bucket-name/object-path - if strings.HasPrefix(resource, "arn:aws:s3:::") { - parts := strings.SplitN(resource[13:], "/", 2) - return parts[0] - } - return "" -} - -// IsObjectResource checks if resource refers to objects -func IsObjectResource(resource string) bool { - return strings.Contains(resource, "/") -} - -// S3Actions contains common S3 actions -var S3Actions = map[string]string{ - "GetObject": "s3:GetObject", - "PutObject": "s3:PutObject", - "DeleteObject": "s3:DeleteObject", - "GetObjectVersion": "s3:GetObjectVersion", - "DeleteObjectVersion": "s3:DeleteObjectVersion", - "ListBucket": "s3:ListBucket", - "ListBucketVersions": "s3:ListBucketVersions", - "GetBucketLocation": "s3:GetBucketLocation", - "GetBucketVersioning": "s3:GetBucketVersioning", - "PutBucketVersioning": "s3:PutBucketVersioning", - "GetBucketAcl": "s3:GetBucketAcl", - "PutBucketAcl": "s3:PutBucketAcl", - "GetObjectAcl": "s3:GetObjectAcl", - "PutObjectAcl": "s3:PutObjectAcl", - "GetBucketPolicy": "s3:GetBucketPolicy", - "PutBucketPolicy": "s3:PutBucketPolicy", - "DeleteBucketPolicy": "s3:DeleteBucketPolicy", - "GetBucketCors": "s3:GetBucketCors", - "PutBucketCors": "s3:PutBucketCors", - "DeleteBucketCors": "s3:DeleteBucketCors", - "GetBucketNotification": "s3:GetBucketNotification", - "PutBucketNotification": "s3:PutBucketNotification", - "GetBucketTagging": "s3:GetBucketTagging", - "PutBucketTagging": "s3:PutBucketTagging", - "DeleteBucketTagging": "s3:DeleteBucketTagging", - "GetObjectTagging": "s3:GetObjectTagging", - "PutObjectTagging": "s3:PutObjectTagging", - "DeleteObjectTagging": "s3:DeleteObjectTagging", - "ListMultipartUploads": "s3:ListMultipartUploads", - "AbortMultipartUpload": "s3:AbortMultipartUpload", - "ListParts": "s3:ListParts", - "GetObjectRetention": "s3:GetObjectRetention", - "PutObjectRetention": "s3:PutObjectRetention", - "GetObjectLegalHold": "s3:GetObjectLegalHold", - "PutObjectLegalHold": "s3:PutObjectLegalHold", - "GetBucketObjectLockConfiguration": "s3:GetBucketObjectLockConfiguration", - "PutBucketObjectLockConfiguration": "s3:PutBucketObjectLockConfiguration", - "BypassGovernanceRetention": "s3:BypassGovernanceRetention", -} - -// MatchesAction checks if an action matches any of the compiled action matchers -func (cs *CompiledStatement) MatchesAction(action string) bool { - for _, matcher := range cs.ActionMatchers { - if matcher.Match(action) { - return true - } - } - return false -} - -// MatchesResource checks if a resource matches any of the compiled resource matchers -func (cs *CompiledStatement) MatchesResource(resource string) bool { - for _, matcher := range cs.ResourceMatchers { - if matcher.Match(resource) { - return true - } - } - return false -} - -// MatchesPrincipal checks if a principal matches any of the compiled principal matchers -func (cs *CompiledStatement) MatchesPrincipal(principal string) bool { - // If no principals specified, match all - if len(cs.PrincipalMatchers) == 0 { - return true - } - - for _, matcher := range cs.PrincipalMatchers { - if matcher.Match(principal) { - return true - } - } - return false -} - -// EvaluateStatement evaluates a compiled statement against the given arguments -func (cs *CompiledStatement) EvaluateStatement(args *PolicyEvaluationArgs) bool { - // Check if action matches - if !cs.MatchesAction(args.Action) { - return false - } - - // Check if resource matches - if !cs.MatchesResource(args.Resource) { - return false - } - - // Check if principal matches - if !cs.MatchesPrincipal(args.Principal) { - return false - } - - return true -} - -// EvaluatePolicy evaluates a compiled policy against the given arguments -func (cp *CompiledPolicy) EvaluatePolicy(args *PolicyEvaluationArgs) (bool, PolicyEffect) { - var explicitAllow, explicitDeny bool - - // Evaluate each statement - for _, stmt := range cp.Statements { - if stmt.EvaluateStatement(args) { - if stmt.Statement.Effect == PolicyEffectAllow { - explicitAllow = true - } else if stmt.Statement.Effect == PolicyEffectDeny { - explicitDeny = true - } - } - } - - // AWS policy evaluation logic: explicit deny overrides allow - if explicitDeny { - return false, PolicyEffectDeny - } - if explicitAllow { - return true, PolicyEffectAllow - } - - // No matching statements - implicit deny - return false, PolicyEffectDeny -} - -// FastMatchesWildcard uses cached WildcardMatcher for performance -func FastMatchesWildcard(pattern, str string) bool { - matcher, err := GetCachedWildcardMatcher(pattern) - if err != nil { - glog.Errorf("Error getting cached WildcardMatcher for pattern %s: %v", pattern, err) - // Fall back to the original implementation - return MatchesWildcard(pattern, str) - } - return matcher.Match(str) -} diff --git a/weed/s3api/policy_engine/wildcard_matcher.go b/weed/s3api/policy_engine/wildcard_matcher.go deleted file mode 100644 index 7fd36abf9..000000000 --- a/weed/s3api/policy_engine/wildcard_matcher.go +++ /dev/null @@ -1,253 +0,0 @@ -package policy_engine - -import ( - "regexp" - "strings" - "sync" - - "github.com/seaweedfs/seaweedfs/weed/glog" -) - -// WildcardMatcher provides unified wildcard matching functionality -type WildcardMatcher struct { - // Use regex for complex patterns with ? wildcards - // Use string manipulation for simple * patterns (better performance) - useRegex bool - regex *regexp.Regexp - pattern string -} - -// WildcardMatcherCache provides caching for WildcardMatcher instances -type WildcardMatcherCache struct { - mu sync.RWMutex - matchers map[string]*WildcardMatcher - maxSize int - accessOrder []string // For LRU eviction -} - -// NewWildcardMatcherCache creates a new WildcardMatcherCache with a configurable maxSize -func NewWildcardMatcherCache(maxSize int) *WildcardMatcherCache { - if maxSize <= 0 { - maxSize = 1000 // Default value - } - return &WildcardMatcherCache{ - matchers: make(map[string]*WildcardMatcher), - maxSize: maxSize, - } -} - -// Global cache instance -var wildcardMatcherCache = NewWildcardMatcherCache(1000) // Default maxSize - -// GetCachedWildcardMatcher gets or creates a cached WildcardMatcher for the given pattern -func GetCachedWildcardMatcher(pattern string) (*WildcardMatcher, error) { - // Fast path: check if already in cache - wildcardMatcherCache.mu.RLock() - if matcher, exists := wildcardMatcherCache.matchers[pattern]; exists { - wildcardMatcherCache.mu.RUnlock() - wildcardMatcherCache.updateAccessOrder(pattern) - return matcher, nil - } - wildcardMatcherCache.mu.RUnlock() - - // Slow path: create new matcher and cache it - wildcardMatcherCache.mu.Lock() - defer wildcardMatcherCache.mu.Unlock() - - // Double-check after acquiring write lock - if matcher, exists := wildcardMatcherCache.matchers[pattern]; exists { - wildcardMatcherCache.updateAccessOrderLocked(pattern) - return matcher, nil - } - - // Create new matcher - matcher, err := NewWildcardMatcher(pattern) - if err != nil { - return nil, err - } - - // Evict old entries if cache is full - if len(wildcardMatcherCache.matchers) >= wildcardMatcherCache.maxSize { - wildcardMatcherCache.evictLeastRecentlyUsed() - } - - // Cache it - wildcardMatcherCache.matchers[pattern] = matcher - wildcardMatcherCache.accessOrder = append(wildcardMatcherCache.accessOrder, pattern) - return matcher, nil -} - -// updateAccessOrder updates the access order for LRU eviction (with read lock) -func (c *WildcardMatcherCache) updateAccessOrder(pattern string) { - c.mu.Lock() - defer c.mu.Unlock() - c.updateAccessOrderLocked(pattern) -} - -// updateAccessOrderLocked updates the access order for LRU eviction (without locking) -func (c *WildcardMatcherCache) updateAccessOrderLocked(pattern string) { - // Remove pattern from its current position - for i, p := range c.accessOrder { - if p == pattern { - c.accessOrder = append(c.accessOrder[:i], c.accessOrder[i+1:]...) - break - } - } - // Add pattern to the end (most recently used) - c.accessOrder = append(c.accessOrder, pattern) -} - -// evictLeastRecentlyUsed removes the least recently used pattern from the cache -func (c *WildcardMatcherCache) evictLeastRecentlyUsed() { - if len(c.accessOrder) == 0 { - return - } - - // Remove the least recently used pattern (first in the list) - lruPattern := c.accessOrder[0] - c.accessOrder = c.accessOrder[1:] - delete(c.matchers, lruPattern) -} - -// ClearCache clears all cached patterns (useful for testing) -func (c *WildcardMatcherCache) ClearCache() { - c.mu.Lock() - defer c.mu.Unlock() - c.matchers = make(map[string]*WildcardMatcher) - c.accessOrder = c.accessOrder[:0] -} - -// GetCacheStats returns cache statistics -func (c *WildcardMatcherCache) GetCacheStats() (size int, maxSize int) { - c.mu.RLock() - defer c.mu.RUnlock() - return len(c.matchers), c.maxSize -} - -// NewWildcardMatcher creates a new wildcard matcher for the given pattern -func NewWildcardMatcher(pattern string) (*WildcardMatcher, error) { - matcher := &WildcardMatcher{ - pattern: pattern, - } - - // Determine if we need regex (contains ? wildcards) - if strings.Contains(pattern, "?") { - matcher.useRegex = true - regex, err := compileWildcardPattern(pattern) - if err != nil { - return nil, err - } - matcher.regex = regex - } else { - matcher.useRegex = false - } - - return matcher, nil -} - -// Match checks if a string matches the wildcard pattern -func (m *WildcardMatcher) Match(str string) bool { - if m.useRegex { - return m.regex.MatchString(str) - } - return matchWildcardString(m.pattern, str) -} - -// MatchesWildcard provides a simple function interface for wildcard matching -// This function consolidates the logic from the previous separate implementations -func MatchesWildcard(pattern, str string) bool { - // Handle simple cases first - if pattern == "*" { - return true - } - if pattern == str { - return true - } - - // Use regex for patterns with ? wildcards, string manipulation for * only - if strings.Contains(pattern, "?") { - return matchWildcardRegex(pattern, str) - } - return matchWildcardString(pattern, str) -} - -// CompileWildcardPattern converts a wildcard pattern to a compiled regex -// This replaces the previous compilePattern function -func CompileWildcardPattern(pattern string) (*regexp.Regexp, error) { - return compileWildcardPattern(pattern) -} - -// matchWildcardString uses string manipulation for * wildcards only (more efficient) -func matchWildcardString(pattern, str string) bool { - // Handle simple cases - if pattern == "*" { - return true - } - if pattern == str { - return true - } - - // Split pattern by wildcards - parts := strings.Split(pattern, "*") - if len(parts) == 1 { - // No wildcards, exact match - return pattern == str - } - - // Check if string starts with first part - if len(parts[0]) > 0 && !strings.HasPrefix(str, parts[0]) { - return false - } - - // Check if string ends with last part - if len(parts[len(parts)-1]) > 0 && !strings.HasSuffix(str, parts[len(parts)-1]) { - return false - } - - // Check middle parts - searchStr := str - if len(parts[0]) > 0 { - searchStr = searchStr[len(parts[0]):] - } - if len(parts[len(parts)-1]) > 0 { - searchStr = searchStr[:len(searchStr)-len(parts[len(parts)-1])] - } - - for i := 1; i < len(parts)-1; i++ { - if len(parts[i]) > 0 { - index := strings.Index(searchStr, parts[i]) - if index == -1 { - return false - } - searchStr = searchStr[index+len(parts[i]):] - } - } - - return true -} - -// matchWildcardRegex uses WildcardMatcher for patterns with ? wildcards -func matchWildcardRegex(pattern, str string) bool { - matcher, err := GetCachedWildcardMatcher(pattern) - if err != nil { - glog.Errorf("Error getting WildcardMatcher for pattern %s: %v. Falling back to matchWildcardString.", pattern, err) - // Fallback to matchWildcardString - return matchWildcardString(pattern, str) - } - return matcher.Match(str) -} - -// compileWildcardPattern converts a wildcard pattern to regex -func compileWildcardPattern(pattern string) (*regexp.Regexp, error) { - // Escape special regex characters except * and ? - escaped := regexp.QuoteMeta(pattern) - - // Replace escaped wildcards with regex equivalents - escaped = strings.ReplaceAll(escaped, `\*`, `.*`) - escaped = strings.ReplaceAll(escaped, `\?`, `.`) - - // Anchor the pattern - escaped = "^" + escaped + "$" - - return regexp.Compile(escaped) -} diff --git a/weed/s3api/policy_engine/wildcard_matcher_test.go b/weed/s3api/policy_engine/wildcard_matcher_test.go deleted file mode 100644 index 43e16284e..000000000 --- a/weed/s3api/policy_engine/wildcard_matcher_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package policy_engine - -import ( - "testing" -) - -func TestMatchesWildcard(t *testing.T) { - tests := []struct { - name string - pattern string - str string - expected bool - }{ - // Basic functionality tests - { - name: "Exact match", - pattern: "test", - str: "test", - expected: true, - }, - { - name: "Single wildcard", - pattern: "*", - str: "anything", - expected: true, - }, - { - name: "Empty string with wildcard", - pattern: "*", - str: "", - expected: true, - }, - - // Star (*) wildcard tests - { - name: "Prefix wildcard", - pattern: "test*", - str: "test123", - expected: true, - }, - { - name: "Suffix wildcard", - pattern: "*test", - str: "123test", - expected: true, - }, - { - name: "Middle wildcard", - pattern: "test*123", - str: "testABC123", - expected: true, - }, - { - name: "Multiple wildcards", - pattern: "test*abc*123", - str: "testXYZabcDEF123", - expected: true, - }, - { - name: "No match", - pattern: "test*", - str: "other", - expected: false, - }, - - // Question mark (?) wildcard tests - { - name: "Single question mark", - pattern: "test?", - str: "test1", - expected: true, - }, - { - name: "Multiple question marks", - pattern: "test??", - str: "test12", - expected: true, - }, - { - name: "Question mark no match", - pattern: "test?", - str: "test12", - expected: false, - }, - { - name: "Mixed wildcards", - pattern: "test*abc?def", - str: "testXYZabc1def", - expected: true, - }, - - // Edge cases - { - name: "Empty pattern", - pattern: "", - str: "", - expected: true, - }, - { - name: "Empty pattern with string", - pattern: "", - str: "test", - expected: false, - }, - { - name: "Pattern with string empty", - pattern: "test", - str: "", - expected: false, - }, - - // Special characters - { - name: "Pattern with regex special chars", - pattern: "test[abc]", - str: "test[abc]", - expected: true, - }, - { - name: "Pattern with dots", - pattern: "test.txt", - str: "test.txt", - expected: true, - }, - { - name: "Pattern with dots and wildcard", - pattern: "*.txt", - str: "test.txt", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := MatchesWildcard(tt.pattern, tt.str) - if result != tt.expected { - t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, tt.str, tt.expected, result) - } - }) - } -} - -func TestWildcardMatcher(t *testing.T) { - tests := []struct { - name string - pattern string - strings []string - expected []bool - }{ - { - name: "Simple star pattern", - pattern: "test*", - strings: []string{"test", "test123", "testing", "other"}, - expected: []bool{true, true, true, false}, - }, - { - name: "Question mark pattern", - pattern: "test?", - strings: []string{"test1", "test2", "test", "test12"}, - expected: []bool{true, true, false, false}, - }, - { - name: "Mixed pattern", - pattern: "*.txt", - strings: []string{"file.txt", "test.txt", "file.doc", "txt"}, - expected: []bool{true, true, false, false}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - matcher, err := NewWildcardMatcher(tt.pattern) - if err != nil { - t.Fatalf("Failed to create matcher: %v", err) - } - - for i, str := range tt.strings { - result := matcher.Match(str) - if result != tt.expected[i] { - t.Errorf("Pattern %s against %s: expected %v, got %v", tt.pattern, str, tt.expected[i], result) - } - } - }) - } -} - -func TestCompileWildcardPattern(t *testing.T) { - tests := []struct { - name string - pattern string - input string - want bool - }{ - {"Star wildcard", "s3:Get*", "s3:GetObject", true}, - {"Question mark wildcard", "s3:Get?bject", "s3:GetObject", true}, - {"Mixed wildcards", "s3:*Object*", "s3:GetObjectAcl", true}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - regex, err := CompileWildcardPattern(tt.pattern) - if err != nil { - t.Errorf("CompileWildcardPattern() error = %v", err) - return - } - got := regex.MatchString(tt.input) - if got != tt.want { - t.Errorf("CompileWildcardPattern() = %v, want %v", got, tt.want) - } - }) - } -} - -// BenchmarkWildcardMatchingPerformance demonstrates the performance benefits of caching -func BenchmarkWildcardMatchingPerformance(b *testing.B) { - patterns := []string{ - "s3:Get*", - "s3:Put*", - "s3:Delete*", - "s3:List*", - "arn:aws:s3:::bucket/*", - "arn:aws:s3:::bucket/prefix*", - "user:*", - "user:admin-*", - } - - inputs := []string{ - "s3:GetObject", - "s3:PutObject", - "s3:DeleteObject", - "s3:ListBucket", - "arn:aws:s3:::bucket/file.txt", - "arn:aws:s3:::bucket/prefix/file.txt", - "user:admin", - "user:admin-john", - } - - b.Run("WithoutCache", func(b *testing.B) { - for i := 0; i < b.N; i++ { - for _, pattern := range patterns { - for _, input := range inputs { - MatchesWildcard(pattern, input) - } - } - } - }) - - b.Run("WithCache", func(b *testing.B) { - for i := 0; i < b.N; i++ { - for _, pattern := range patterns { - for _, input := range inputs { - FastMatchesWildcard(pattern, input) - } - } - } - }) -} - -// BenchmarkWildcardMatcherReuse demonstrates the performance benefits of reusing WildcardMatcher instances -func BenchmarkWildcardMatcherReuse(b *testing.B) { - pattern := "s3:Get*" - input := "s3:GetObject" - - b.Run("NewMatcherEveryTime", func(b *testing.B) { - for i := 0; i < b.N; i++ { - matcher, _ := NewWildcardMatcher(pattern) - matcher.Match(input) - } - }) - - b.Run("CachedMatcher", func(b *testing.B) { - for i := 0; i < b.N; i++ { - matcher, _ := GetCachedWildcardMatcher(pattern) - matcher.Match(input) - } - }) -} - -// TestWildcardMatcherCaching verifies that caching works correctly -func TestWildcardMatcherCaching(t *testing.T) { - pattern := "s3:Get*" - - // Get the first matcher - matcher1, err := GetCachedWildcardMatcher(pattern) - if err != nil { - t.Fatalf("Failed to get cached matcher: %v", err) - } - - // Get the second matcher - should be the same instance - matcher2, err := GetCachedWildcardMatcher(pattern) - if err != nil { - t.Fatalf("Failed to get cached matcher: %v", err) - } - - // Check that they're the same instance (same pointer) - if matcher1 != matcher2 { - t.Errorf("Expected same matcher instance, got different instances") - } - - // Test that both matchers work correctly - testInput := "s3:GetObject" - if !matcher1.Match(testInput) { - t.Errorf("First matcher failed to match %s", testInput) - } - if !matcher2.Match(testInput) { - t.Errorf("Second matcher failed to match %s", testInput) - } -} - -// TestFastMatchesWildcard verifies that the fast matching function works correctly -func TestFastMatchesWildcard(t *testing.T) { - tests := []struct { - pattern string - input string - want bool - }{ - {"s3:Get*", "s3:GetObject", true}, - {"s3:Put*", "s3:GetObject", false}, - {"arn:aws:s3:::bucket/*", "arn:aws:s3:::bucket/file.txt", true}, - {"user:admin-*", "user:admin-john", true}, - {"user:admin-*", "user:guest-john", false}, - } - - for _, tt := range tests { - t.Run(tt.pattern+"_"+tt.input, func(t *testing.T) { - got := FastMatchesWildcard(tt.pattern, tt.input) - if got != tt.want { - t.Errorf("FastMatchesWildcard(%q, %q) = %v, want %v", tt.pattern, tt.input, got, tt.want) - } - }) - } -} - -// TestWildcardMatcherCacheBounding tests the bounded cache functionality -func TestWildcardMatcherCacheBounding(t *testing.T) { - // Clear cache before test - wildcardMatcherCache.ClearCache() - - // Get original max size - originalMaxSize := wildcardMatcherCache.maxSize - - // Set a small max size for testing - wildcardMatcherCache.maxSize = 3 - defer func() { - wildcardMatcherCache.maxSize = originalMaxSize - wildcardMatcherCache.ClearCache() - }() - - // Add patterns up to max size - patterns := []string{"pattern1", "pattern2", "pattern3"} - for _, pattern := range patterns { - _, err := GetCachedWildcardMatcher(pattern) - if err != nil { - t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) - } - } - - // Verify cache size - size, maxSize := wildcardMatcherCache.GetCacheStats() - if size != 3 { - t.Errorf("Expected cache size 3, got %d", size) - } - if maxSize != 3 { - t.Errorf("Expected max size 3, got %d", maxSize) - } - - // Add another pattern, should evict the least recently used - _, err := GetCachedWildcardMatcher("pattern4") - if err != nil { - t.Fatalf("Failed to get cached matcher for pattern4: %v", err) - } - - // Cache should still be at max size - size, _ = wildcardMatcherCache.GetCacheStats() - if size != 3 { - t.Errorf("Expected cache size 3 after eviction, got %d", size) - } - - // The first pattern should have been evicted - wildcardMatcherCache.mu.RLock() - if _, exists := wildcardMatcherCache.matchers["pattern1"]; exists { - t.Errorf("Expected pattern1 to be evicted, but it still exists") - } - if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists { - t.Errorf("Expected pattern4 to be in cache, but it doesn't exist") - } - wildcardMatcherCache.mu.RUnlock() -} - -// TestWildcardMatcherCacheLRU tests the LRU eviction policy -func TestWildcardMatcherCacheLRU(t *testing.T) { - // Clear cache before test - wildcardMatcherCache.ClearCache() - - // Get original max size - originalMaxSize := wildcardMatcherCache.maxSize - - // Set a small max size for testing - wildcardMatcherCache.maxSize = 3 - defer func() { - wildcardMatcherCache.maxSize = originalMaxSize - wildcardMatcherCache.ClearCache() - }() - - // Add patterns to fill cache - patterns := []string{"pattern1", "pattern2", "pattern3"} - for _, pattern := range patterns { - _, err := GetCachedWildcardMatcher(pattern) - if err != nil { - t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) - } - } - - // Access pattern1 to make it most recently used - _, err := GetCachedWildcardMatcher("pattern1") - if err != nil { - t.Fatalf("Failed to access pattern1: %v", err) - } - - // Add another pattern, should evict pattern2 (now least recently used) - _, err = GetCachedWildcardMatcher("pattern4") - if err != nil { - t.Fatalf("Failed to get cached matcher for pattern4: %v", err) - } - - // pattern1 should still be in cache (was accessed recently) - // pattern2 should be evicted (was least recently used) - wildcardMatcherCache.mu.RLock() - if _, exists := wildcardMatcherCache.matchers["pattern1"]; !exists { - t.Errorf("Expected pattern1 to remain in cache (most recently used)") - } - if _, exists := wildcardMatcherCache.matchers["pattern2"]; exists { - t.Errorf("Expected pattern2 to be evicted (least recently used)") - } - if _, exists := wildcardMatcherCache.matchers["pattern3"]; !exists { - t.Errorf("Expected pattern3 to remain in cache") - } - if _, exists := wildcardMatcherCache.matchers["pattern4"]; !exists { - t.Errorf("Expected pattern4 to be in cache") - } - wildcardMatcherCache.mu.RUnlock() -} - -// TestWildcardMatcherCacheClear tests the cache clearing functionality -func TestWildcardMatcherCacheClear(t *testing.T) { - // Add some patterns to cache - patterns := []string{"pattern1", "pattern2", "pattern3"} - for _, pattern := range patterns { - _, err := GetCachedWildcardMatcher(pattern) - if err != nil { - t.Fatalf("Failed to get cached matcher for %s: %v", pattern, err) - } - } - - // Verify cache has patterns - size, _ := wildcardMatcherCache.GetCacheStats() - if size == 0 { - t.Errorf("Expected cache to have patterns before clearing") - } - - // Clear cache - wildcardMatcherCache.ClearCache() - - // Verify cache is empty - size, _ = wildcardMatcherCache.GetCacheStats() - if size != 0 { - t.Errorf("Expected cache to be empty after clearing, got size %d", size) - } -} diff --git a/weed/s3api/s3_bucket_encryption.go b/weed/s3api/s3_bucket_encryption.go deleted file mode 100644 index 3166fb81f..000000000 --- a/weed/s3api/s3_bucket_encryption.go +++ /dev/null @@ -1,346 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// ServerSideEncryptionConfiguration represents the bucket encryption configuration -type ServerSideEncryptionConfiguration struct { - XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` - Rules []ServerSideEncryptionRule `xml:"Rule"` -} - -// ServerSideEncryptionRule represents a single encryption rule -type ServerSideEncryptionRule struct { - ApplyServerSideEncryptionByDefault ApplyServerSideEncryptionByDefault `xml:"ApplyServerSideEncryptionByDefault"` - BucketKeyEnabled *bool `xml:"BucketKeyEnabled,omitempty"` -} - -// ApplyServerSideEncryptionByDefault specifies the default encryption settings -type ApplyServerSideEncryptionByDefault struct { - SSEAlgorithm string `xml:"SSEAlgorithm"` - KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` -} - -// encryptionConfigToProto converts EncryptionConfiguration to protobuf format -func encryptionConfigToProto(config *s3_pb.EncryptionConfiguration) *s3_pb.EncryptionConfiguration { - if config == nil { - return nil - } - return &s3_pb.EncryptionConfiguration{ - SseAlgorithm: config.SseAlgorithm, - KmsKeyId: config.KmsKeyId, - BucketKeyEnabled: config.BucketKeyEnabled, - } -} - -// encryptionConfigFromXML converts XML ServerSideEncryptionConfiguration to protobuf -func encryptionConfigFromXML(xmlConfig *ServerSideEncryptionConfiguration) *s3_pb.EncryptionConfiguration { - if xmlConfig == nil || len(xmlConfig.Rules) == 0 { - return nil - } - - rule := xmlConfig.Rules[0] // AWS S3 supports only one rule - return &s3_pb.EncryptionConfiguration{ - SseAlgorithm: rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm, - KmsKeyId: rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID, - BucketKeyEnabled: rule.BucketKeyEnabled != nil && *rule.BucketKeyEnabled, - } -} - -// encryptionConfigToXML converts protobuf EncryptionConfiguration to XML -func encryptionConfigToXML(config *s3_pb.EncryptionConfiguration) *ServerSideEncryptionConfiguration { - if config == nil { - return nil - } - - return &ServerSideEncryptionConfiguration{ - Rules: []ServerSideEncryptionRule{ - { - ApplyServerSideEncryptionByDefault: ApplyServerSideEncryptionByDefault{ - SSEAlgorithm: config.SseAlgorithm, - KMSMasterKeyID: config.KmsKeyId, - }, - BucketKeyEnabled: &config.BucketKeyEnabled, - }, - }, - } -} - -// Default encryption algorithms -const ( - EncryptionTypeAES256 = "AES256" - EncryptionTypeKMS = "aws:kms" -) - -// GetBucketEncryptionHandler handles GET bucket encryption requests -func (s3a *S3ApiServer) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - // Load bucket encryption configuration - config, errCode := s3a.getEncryptionConfiguration(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucketEncryptionConfiguration { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucketEncryptionConfiguration) - return - } - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Convert protobuf config to S3 XML response - response := encryptionConfigToXML(config) - if response == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucketEncryptionConfiguration) - return - } - - w.Header().Set("Content-Type", "application/xml") - if err := xml.NewEncoder(w).Encode(response); err != nil { - glog.Errorf("Failed to encode bucket encryption response: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } -} - -// PutBucketEncryptionHandler handles PUT bucket encryption requests -func (s3a *S3ApiServer) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - // Read and parse the request body - body, err := io.ReadAll(r.Body) - if err != nil { - glog.Errorf("Failed to read request body: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - defer r.Body.Close() - - var xmlConfig ServerSideEncryptionConfiguration - if err := xml.Unmarshal(body, &xmlConfig); err != nil { - glog.Errorf("Failed to parse bucket encryption configuration: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Validate the configuration - if len(xmlConfig.Rules) == 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - rule := xmlConfig.Rules[0] // AWS S3 supports only one rule - - // Validate SSE algorithm - if rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm != EncryptionTypeAES256 && - rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm != EncryptionTypeKMS { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidEncryptionAlgorithm) - return - } - - // For aws:kms, validate KMS key if provided - if rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm == EncryptionTypeKMS { - keyID := rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID - if keyID != "" && !isValidKMSKeyID(keyID) { - s3err.WriteErrorResponse(w, r, s3err.ErrKMSKeyNotFound) - return - } - } - - // Convert XML to protobuf configuration - encryptionConfig := encryptionConfigFromXML(&xmlConfig) - - // Update the bucket configuration - errCode := s3a.updateEncryptionConfiguration(bucket, encryptionConfig) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - w.WriteHeader(http.StatusOK) -} - -// DeleteBucketEncryptionHandler handles DELETE bucket encryption requests -func (s3a *S3ApiServer) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - errCode := s3a.removeEncryptionConfiguration(bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// GetBucketEncryptionConfig retrieves the bucket encryption configuration for internal use -func (s3a *S3ApiServer) GetBucketEncryptionConfig(bucket string) (*s3_pb.EncryptionConfiguration, error) { - config, errCode := s3a.getEncryptionConfiguration(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucketEncryptionConfiguration { - return nil, fmt.Errorf("no encryption configuration found") - } - return nil, fmt.Errorf("failed to get encryption configuration") - } - return config, nil -} - -// Internal methods following the bucket configuration pattern - -// getEncryptionConfiguration retrieves encryption configuration with caching -func (s3a *S3ApiServer) getEncryptionConfiguration(bucket string) (*s3_pb.EncryptionConfiguration, s3err.ErrorCode) { - // Get metadata using structured API - metadata, err := s3a.GetBucketMetadata(bucket) - if err != nil { - glog.Errorf("getEncryptionConfiguration: failed to get bucket metadata for bucket %s: %v", bucket, err) - return nil, s3err.ErrInternalError - } - - if metadata.Encryption == nil { - return nil, s3err.ErrNoSuchBucketEncryptionConfiguration - } - - return metadata.Encryption, s3err.ErrNone -} - -// updateEncryptionConfiguration updates the encryption configuration for a bucket -func (s3a *S3ApiServer) updateEncryptionConfiguration(bucket string, encryptionConfig *s3_pb.EncryptionConfiguration) s3err.ErrorCode { - // Update using structured API - err := s3a.UpdateBucketEncryption(bucket, encryptionConfig) - if err != nil { - glog.Errorf("updateEncryptionConfiguration: failed to update encryption config for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Cache will be updated automatically via metadata subscription - return s3err.ErrNone -} - -// removeEncryptionConfiguration removes the encryption configuration for a bucket -func (s3a *S3ApiServer) removeEncryptionConfiguration(bucket string) s3err.ErrorCode { - // Check if encryption configuration exists - metadata, err := s3a.GetBucketMetadata(bucket) - if err != nil { - glog.Errorf("removeEncryptionConfiguration: failed to get bucket metadata for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - if metadata.Encryption == nil { - return s3err.ErrNoSuchBucketEncryptionConfiguration - } - - // Update using structured API - err = s3a.ClearBucketEncryption(bucket) - if err != nil { - glog.Errorf("removeEncryptionConfiguration: failed to remove encryption config for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Cache will be updated automatically via metadata subscription - return s3err.ErrNone -} - -// IsDefaultEncryptionEnabled checks if default encryption is enabled for a bucket -func (s3a *S3ApiServer) IsDefaultEncryptionEnabled(bucket string) bool { - config, err := s3a.GetBucketEncryptionConfig(bucket) - if err != nil || config == nil { - return false - } - return config.SseAlgorithm != "" -} - -// GetDefaultEncryptionHeaders returns the default encryption headers for a bucket -func (s3a *S3ApiServer) GetDefaultEncryptionHeaders(bucket string) map[string]string { - config, err := s3a.GetBucketEncryptionConfig(bucket) - if err != nil || config == nil { - return nil - } - - headers := make(map[string]string) - headers[s3_constants.AmzServerSideEncryption] = config.SseAlgorithm - - if config.SseAlgorithm == EncryptionTypeKMS && config.KmsKeyId != "" { - headers[s3_constants.AmzServerSideEncryptionAwsKmsKeyId] = config.KmsKeyId - } - - if config.BucketKeyEnabled { - headers[s3_constants.AmzServerSideEncryptionBucketKeyEnabled] = "true" - } - - return headers -} - -// IsDefaultEncryptionEnabled checks if default encryption is enabled for a configuration -func IsDefaultEncryptionEnabled(config *s3_pb.EncryptionConfiguration) bool { - return config != nil && config.SseAlgorithm != "" -} - -// GetDefaultEncryptionHeaders generates default encryption headers from configuration -func GetDefaultEncryptionHeaders(config *s3_pb.EncryptionConfiguration) map[string]string { - if config == nil || config.SseAlgorithm == "" { - return nil - } - - headers := make(map[string]string) - headers[s3_constants.AmzServerSideEncryption] = config.SseAlgorithm - - if config.SseAlgorithm == "aws:kms" && config.KmsKeyId != "" { - headers[s3_constants.AmzServerSideEncryptionAwsKmsKeyId] = config.KmsKeyId - } - - return headers -} - -// encryptionConfigFromXMLBytes parses XML bytes to encryption configuration -func encryptionConfigFromXMLBytes(xmlBytes []byte) (*s3_pb.EncryptionConfiguration, error) { - var xmlConfig ServerSideEncryptionConfiguration - if err := xml.Unmarshal(xmlBytes, &xmlConfig); err != nil { - return nil, err - } - - // Validate namespace - should be empty or the standard AWS namespace - if xmlConfig.XMLName.Space != "" && xmlConfig.XMLName.Space != "http://s3.amazonaws.com/doc/2006-03-01/" { - return nil, fmt.Errorf("invalid XML namespace: %s", xmlConfig.XMLName.Space) - } - - // Validate the configuration - if len(xmlConfig.Rules) == 0 { - return nil, fmt.Errorf("encryption configuration must have at least one rule") - } - - rule := xmlConfig.Rules[0] - if rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm == "" { - return nil, fmt.Errorf("encryption algorithm is required") - } - - // Validate algorithm - validAlgorithms := map[string]bool{ - "AES256": true, - "aws:kms": true, - } - - if !validAlgorithms[rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm] { - return nil, fmt.Errorf("unsupported encryption algorithm: %s", rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm) - } - - config := encryptionConfigFromXML(&xmlConfig) - return config, nil -} - -// encryptionConfigToXMLBytes converts encryption configuration to XML bytes -func encryptionConfigToXMLBytes(config *s3_pb.EncryptionConfiguration) ([]byte, error) { - if config == nil { - return nil, fmt.Errorf("encryption configuration is nil") - } - - xmlConfig := encryptionConfigToXML(config) - return xml.Marshal(xmlConfig) -} diff --git a/weed/s3api/s3_bucket_policy_simple_test.go b/weed/s3api/s3_bucket_policy_simple_test.go deleted file mode 100644 index 025b44900..000000000 --- a/weed/s3api/s3_bucket_policy_simple_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package s3api - -import ( - "encoding/json" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestBucketPolicyValidationBasics tests the core validation logic -func TestBucketPolicyValidationBasics(t *testing.T) { - s3Server := &S3ApiServer{} - - tests := []struct { - name string - policy *policy.PolicyDocument - bucket string - expectedValid bool - expectedError string - }{ - { - name: "Valid bucket policy", - policy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "TestStatement", - Effect: "Allow", - Principal: map[string]interface{}{ - "AWS": "*", - }, - Action: []string{"s3:GetObject"}, - Resource: []string{ - "arn:seaweed:s3:::test-bucket/*", - }, - }, - }, - }, - bucket: "test-bucket", - expectedValid: true, - }, - { - name: "Policy without Principal (invalid)", - policy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Action: []string{"s3:GetObject"}, - Resource: []string{"arn:seaweed:s3:::test-bucket/*"}, - // Principal is missing - }, - }, - }, - bucket: "test-bucket", - expectedValid: false, - expectedError: "bucket policies must specify a Principal", - }, - { - name: "Invalid version", - policy: &policy.PolicyDocument{ - Version: "2008-10-17", // Wrong version - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "AWS": "*", - }, - Action: []string{"s3:GetObject"}, - Resource: []string{"arn:seaweed:s3:::test-bucket/*"}, - }, - }, - }, - bucket: "test-bucket", - expectedValid: false, - expectedError: "unsupported policy version", - }, - { - name: "Resource not matching bucket", - policy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "AWS": "*", - }, - Action: []string{"s3:GetObject"}, - Resource: []string{"arn:seaweed:s3:::other-bucket/*"}, // Wrong bucket - }, - }, - }, - bucket: "test-bucket", - expectedValid: false, - expectedError: "does not match bucket", - }, - { - name: "Non-S3 action", - policy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "AWS": "*", - }, - Action: []string{"iam:GetUser"}, // Non-S3 action - Resource: []string{"arn:seaweed:s3:::test-bucket/*"}, - }, - }, - }, - bucket: "test-bucket", - expectedValid: false, - expectedError: "bucket policies only support S3 actions", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := s3Server.validateBucketPolicy(tt.policy, tt.bucket) - - if tt.expectedValid { - assert.NoError(t, err, "Policy should be valid") - } else { - assert.Error(t, err, "Policy should be invalid") - if tt.expectedError != "" { - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - } - }) - } -} - -// TestBucketResourceValidation tests the resource ARN validation -func TestBucketResourceValidation(t *testing.T) { - s3Server := &S3ApiServer{} - - tests := []struct { - name string - resource string - bucket string - valid bool - }{ - { - name: "Exact bucket ARN", - resource: "arn:seaweed:s3:::test-bucket", - bucket: "test-bucket", - valid: true, - }, - { - name: "Bucket wildcard ARN", - resource: "arn:seaweed:s3:::test-bucket/*", - bucket: "test-bucket", - valid: true, - }, - { - name: "Specific object ARN", - resource: "arn:seaweed:s3:::test-bucket/path/to/object.txt", - bucket: "test-bucket", - valid: true, - }, - { - name: "Different bucket ARN", - resource: "arn:seaweed:s3:::other-bucket/*", - bucket: "test-bucket", - valid: false, - }, - { - name: "Global S3 wildcard", - resource: "arn:seaweed:s3:::*", - bucket: "test-bucket", - valid: false, - }, - { - name: "Invalid ARN format", - resource: "invalid-arn", - bucket: "test-bucket", - valid: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := s3Server.validateResourceForBucket(tt.resource, tt.bucket) - assert.Equal(t, tt.valid, result, "Resource validation result should match expected") - }) - } -} - -// TestBucketPolicyJSONSerialization tests policy JSON handling -func TestBucketPolicyJSONSerialization(t *testing.T) { - policy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "PublicReadGetObject", - Effect: "Allow", - Principal: map[string]interface{}{ - "AWS": "*", - }, - Action: []string{"s3:GetObject"}, - Resource: []string{ - "arn:seaweed:s3:::public-bucket/*", - }, - }, - }, - } - - // Test that policy can be marshaled and unmarshaled correctly - jsonData := marshalPolicy(t, policy) - assert.NotEmpty(t, jsonData, "JSON data should not be empty") - - // Verify the JSON contains expected elements - jsonStr := string(jsonData) - assert.Contains(t, jsonStr, "2012-10-17", "JSON should contain version") - assert.Contains(t, jsonStr, "s3:GetObject", "JSON should contain action") - assert.Contains(t, jsonStr, "arn:seaweed:s3:::public-bucket/*", "JSON should contain resource") - assert.Contains(t, jsonStr, "PublicReadGetObject", "JSON should contain statement ID") -} - -// Helper function for marshaling policies -func marshalPolicy(t *testing.T, policyDoc *policy.PolicyDocument) []byte { - data, err := json.Marshal(policyDoc) - require.NoError(t, err) - return data -} diff --git a/weed/s3api/s3_constants/acp_canned_acl.go b/weed/s3api/s3_constants/acp_canned_acl.go deleted file mode 100644 index eab497872..000000000 --- a/weed/s3api/s3_constants/acp_canned_acl.go +++ /dev/null @@ -1,65 +0,0 @@ -package s3_constants - -import ( - "github.com/aws/aws-sdk-go/service/s3" -) - -const ( - CannedAclPrivate = "private" - CannedAclPublicRead = "public-read" - CannedAclPublicReadWrite = "public-read-write" - CannedAclAuthenticatedRead = "authenticated-read" - CannedAclLogDeliveryWrite = "log-delivery-write" - CannedAclBucketOwnerRead = "bucket-owner-read" - CannedAclBucketOwnerFullControl = "bucket-owner-full-control" - CannedAclAwsExecRead = "aws-exec-read" -) - -var ( - PublicRead = []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &GrantTypeGroup, - URI: &GranteeGroupAllUsers, - }, - Permission: &PermissionRead, - }, - } - - PublicReadWrite = []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &GrantTypeGroup, - URI: &GranteeGroupAllUsers, - }, - Permission: &PermissionRead, - }, - { - Grantee: &s3.Grantee{ - Type: &GrantTypeGroup, - URI: &GranteeGroupAllUsers, - }, - Permission: &PermissionWrite, - }, - } - - AuthenticatedRead = []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &GrantTypeGroup, - URI: &GranteeGroupAuthenticatedUsers, - }, - Permission: &PermissionRead, - }, - } - - LogDeliveryWrite = []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &GrantTypeGroup, - URI: &GranteeGroupLogDelivery, - }, - Permission: &PermissionWrite, - }, - } -) diff --git a/weed/s3api/s3_constants/acp_grantee_group.go b/weed/s3api/s3_constants/acp_grantee_group.go deleted file mode 100644 index 2eaa892e9..000000000 --- a/weed/s3api/s3_constants/acp_grantee_group.go +++ /dev/null @@ -1,20 +0,0 @@ -package s3_constants - -// Amazon S3 predefined groups -var ( - GranteeGroupAllUsers = "http://acs.amazonaws.com/groups/global/AllUsers" - GranteeGroupAuthenticatedUsers = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" - GranteeGroupLogDelivery = "http://acs.amazonaws.com/groups/s3/LogDelivery" -) - -func ValidateGroup(group string) bool { - valid := true - switch group { - case GranteeGroupAllUsers: - case GranteeGroupLogDelivery: - case GranteeGroupAuthenticatedUsers: - default: - valid = false - } - return valid -} diff --git a/weed/s3api/s3_constants/acp_grantee_type.go b/weed/s3api/s3_constants/acp_grantee_type.go deleted file mode 100644 index 7a4dfaf16..000000000 --- a/weed/s3api/s3_constants/acp_grantee_type.go +++ /dev/null @@ -1,7 +0,0 @@ -package s3_constants - -var ( - GrantTypeCanonicalUser = "CanonicalUser" - GrantTypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - GrantTypeGroup = "Group" -) diff --git a/weed/s3api/s3_constants/acp_ownership.go b/weed/s3api/s3_constants/acp_ownership.go deleted file mode 100644 index e11e95935..000000000 --- a/weed/s3api/s3_constants/acp_ownership.go +++ /dev/null @@ -1,18 +0,0 @@ -package s3_constants - -var ( - OwnershipBucketOwnerPreferred = "BucketOwnerPreferred" - OwnershipObjectWriter = "ObjectWriter" - OwnershipBucketOwnerEnforced = "BucketOwnerEnforced" - - DefaultOwnershipForCreate = OwnershipObjectWriter - DefaultOwnershipForExists = OwnershipBucketOwnerEnforced -) - -func ValidateOwnership(ownership string) bool { - if ownership == "" || (ownership != OwnershipBucketOwnerPreferred && ownership != OwnershipObjectWriter && ownership != OwnershipBucketOwnerEnforced) { - return false - } else { - return true - } -} diff --git a/weed/s3api/s3_constants/acp_permisson.go b/weed/s3api/s3_constants/acp_permisson.go deleted file mode 100644 index 4b875ff49..000000000 --- a/weed/s3api/s3_constants/acp_permisson.go +++ /dev/null @@ -1,9 +0,0 @@ -package s3_constants - -var ( - PermissionFullControl = "FULL_CONTROL" - PermissionRead = "READ" - PermissionWrite = "WRITE" - PermissionReadAcp = "READ_ACP" - PermissionWriteAcp = "WRITE_ACP" -) diff --git a/weed/s3api/s3_constants/crypto.go b/weed/s3api/s3_constants/crypto.go deleted file mode 100644 index 398e2b669..000000000 --- a/weed/s3api/s3_constants/crypto.go +++ /dev/null @@ -1,32 +0,0 @@ -package s3_constants - -// Cryptographic constants -const ( - // AES block and key sizes - AESBlockSize = 16 // 128 bits for AES block size (IV length) - AESKeySize = 32 // 256 bits for AES-256 keys - - // SSE algorithm identifiers - SSEAlgorithmAES256 = "AES256" - SSEAlgorithmKMS = "aws:kms" - - // SSE type identifiers for response headers and internal processing - SSETypeC = "SSE-C" - SSETypeKMS = "SSE-KMS" - SSETypeS3 = "SSE-S3" - - // S3 multipart upload limits and offsets - S3MaxPartSize = 5 * 1024 * 1024 * 1024 // 5GB - AWS S3 maximum part size limit - - // Multipart offset calculation for unique IV generation - // Using 8GB offset between parts (larger than max part size) to prevent IV collisions - // Critical for CTR mode encryption security in multipart uploads - PartOffsetMultiplier = int64(1) << 33 // 8GB per part offset - - // KMS validation limits based on AWS KMS service constraints - MaxKMSEncryptionContextPairs = 10 // Maximum number of encryption context key-value pairs - MaxKMSKeyIDLength = 500 // Maximum length for KMS key identifiers - - // S3 multipart upload limits based on AWS S3 service constraints - MaxS3MultipartParts = 10000 // Maximum number of parts in a multipart upload (1-10,000) -) diff --git a/weed/s3api/s3_constants/extend_key.go b/weed/s3api/s3_constants/extend_key.go deleted file mode 100644 index f0f223a45..000000000 --- a/weed/s3api/s3_constants/extend_key.go +++ /dev/null @@ -1,46 +0,0 @@ -package s3_constants - -const ( - ExtAmzOwnerKey = "Seaweed-X-Amz-Owner" - ExtAmzAclKey = "Seaweed-X-Amz-Acl" - ExtOwnershipKey = "Seaweed-X-Amz-Ownership" - ExtVersioningKey = "Seaweed-X-Amz-Versioning" - ExtVersionIdKey = "Seaweed-X-Amz-Version-Id" - ExtDeleteMarkerKey = "Seaweed-X-Amz-Delete-Marker" - ExtIsLatestKey = "Seaweed-X-Amz-Is-Latest" - ExtETagKey = "Seaweed-X-Amz-ETag" - ExtLatestVersionIdKey = "Seaweed-X-Amz-Latest-Version-Id" - ExtLatestVersionFileNameKey = "Seaweed-X-Amz-Latest-Version-File-Name" - - // Bucket Policy - ExtBucketPolicyKey = "Seaweed-X-Amz-Bucket-Policy" - - // Object Retention and Legal Hold - ExtObjectLockModeKey = "Seaweed-X-Amz-Object-Lock-Mode" - ExtRetentionUntilDateKey = "Seaweed-X-Amz-Retention-Until-Date" - ExtLegalHoldKey = "Seaweed-X-Amz-Legal-Hold" - ExtObjectLockEnabledKey = "Seaweed-X-Amz-Object-Lock-Enabled" - - // Object Lock Bucket Configuration (individual components, not XML) - ExtObjectLockDefaultModeKey = "Lock-Default-Mode" - ExtObjectLockDefaultDaysKey = "Lock-Default-Days" - ExtObjectLockDefaultYearsKey = "Lock-Default-Years" -) - -// Object Lock and Retention Constants -const ( - // Retention modes - RetentionModeGovernance = "GOVERNANCE" - RetentionModeCompliance = "COMPLIANCE" - - // Legal hold status - LegalHoldOn = "ON" - LegalHoldOff = "OFF" - - // Object lock enabled status - ObjectLockEnabled = "Enabled" - - // Bucket versioning status - VersioningEnabled = "Enabled" - VersioningSuspended = "Suspended" -) diff --git a/weed/s3api/s3_constants/header.go b/weed/s3api/s3_constants/header.go index 86863f257..cd725d435 100644 --- a/weed/s3api/s3_constants/header.go +++ b/weed/s3api/s3_constants/header.go @@ -17,10 +17,9 @@ package s3_constants import ( + "github.com/gorilla/mux" "net/http" "strings" - - "github.com/gorilla/mux" ) // Standard S3 HTTP request constants @@ -31,101 +30,19 @@ const ( // S3 user-defined metadata AmzUserMetaPrefix = "X-Amz-Meta-" AmzUserMetaDirective = "X-Amz-Metadata-Directive" - AmzUserMetaMtime = "X-Amz-Meta-Mtime" // S3 object tagging AmzObjectTagging = "X-Amz-Tagging" AmzObjectTaggingPrefix = "X-Amz-Tagging-" AmzObjectTaggingDirective = "X-Amz-Tagging-Directive" AmzTagCount = "x-amz-tagging-count" - - SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key" - SeaweedFSPartNumber = "X-Seaweedfs-Part-Number" - SeaweedFSUploadId = "X-Seaweedfs-Upload-Id" - - // S3 ACL headers - AmzCannedAcl = "X-Amz-Acl" - AmzAclFullControl = "X-Amz-Grant-Full-Control" - AmzAclRead = "X-Amz-Grant-Read" - AmzAclWrite = "X-Amz-Grant-Write" - AmzAclReadAcp = "X-Amz-Grant-Read-Acp" - AmzAclWriteAcp = "X-Amz-Grant-Write-Acp" - - // S3 Object Lock headers - AmzBucketObjectLockEnabled = "X-Amz-Bucket-Object-Lock-Enabled" - AmzObjectLockMode = "X-Amz-Object-Lock-Mode" - AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" - AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" - - // S3 conditional headers - IfMatch = "If-Match" - IfNoneMatch = "If-None-Match" - IfModifiedSince = "If-Modified-Since" - IfUnmodifiedSince = "If-Unmodified-Since" - - // S3 conditional copy headers - AmzCopySourceIfMatch = "X-Amz-Copy-Source-If-Match" - AmzCopySourceIfNoneMatch = "X-Amz-Copy-Source-If-None-Match" - AmzCopySourceIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since" - AmzCopySourceIfUnmodifiedSince = "X-Amz-Copy-Source-If-Unmodified-Since" - - AmzMpPartsCount = "X-Amz-Mp-Parts-Count" - - // S3 Server-Side Encryption with Customer-provided Keys (SSE-C) - AmzServerSideEncryptionCustomerAlgorithm = "X-Amz-Server-Side-Encryption-Customer-Algorithm" - AmzServerSideEncryptionCustomerKey = "X-Amz-Server-Side-Encryption-Customer-Key" - AmzServerSideEncryptionCustomerKeyMD5 = "X-Amz-Server-Side-Encryption-Customer-Key-MD5" - AmzServerSideEncryptionContext = "X-Amz-Server-Side-Encryption-Context" - - // S3 Server-Side Encryption with KMS (SSE-KMS) - AmzServerSideEncryption = "X-Amz-Server-Side-Encryption" - AmzServerSideEncryptionAwsKmsKeyId = "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - AmzServerSideEncryptionBucketKeyEnabled = "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - - // S3 SSE-C copy source headers - AmzCopySourceServerSideEncryptionCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - AmzCopySourceServerSideEncryptionCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - AmzCopySourceServerSideEncryptionCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" -) - -// Metadata keys for internal storage -const ( - // SSE-KMS metadata keys - AmzEncryptedDataKey = "x-amz-encrypted-data-key" - AmzEncryptionContextMeta = "x-amz-encryption-context" - - // SeaweedFS internal metadata keys for encryption (prefixed to avoid automatic HTTP header conversion) - SeaweedFSSSEKMSKey = "x-seaweedfs-sse-kms-key" // Key for storing serialized SSE-KMS metadata - SeaweedFSSSES3Key = "x-seaweedfs-sse-s3-key" // Key for storing serialized SSE-S3 metadata - SeaweedFSSSEIV = "x-seaweedfs-sse-c-iv" // Key for storing SSE-C IV - - // Multipart upload metadata keys for SSE-KMS (consistent with internal metadata key pattern) - SeaweedFSSSEKMSKeyID = "x-seaweedfs-sse-kms-key-id" // Key ID for multipart upload SSE-KMS inheritance - SeaweedFSSSEKMSEncryption = "x-seaweedfs-sse-kms-encryption" // Encryption type for multipart upload SSE-KMS inheritance - SeaweedFSSSEKMSBucketKeyEnabled = "x-seaweedfs-sse-kms-bucket-key-enabled" // Bucket key setting for multipart upload SSE-KMS inheritance - SeaweedFSSSEKMSEncryptionContext = "x-seaweedfs-sse-kms-encryption-context" // Encryption context for multipart upload SSE-KMS inheritance - SeaweedFSSSEKMSBaseIV = "x-seaweedfs-sse-kms-base-iv" // Base IV for multipart upload SSE-KMS (for IV offset calculation) - - // Multipart upload metadata keys for SSE-S3 - SeaweedFSSSES3Encryption = "x-seaweedfs-sse-s3-encryption" // Encryption type for multipart upload SSE-S3 inheritance - SeaweedFSSSES3BaseIV = "x-seaweedfs-sse-s3-base-iv" // Base IV for multipart upload SSE-S3 (for IV offset calculation) - SeaweedFSSSES3KeyData = "x-seaweedfs-sse-s3-key-data" // Encrypted key data for multipart upload SSE-S3 inheritance -) - -// SeaweedFS internal headers for filer communication -const ( - SeaweedFSSSEKMSKeyHeader = "X-SeaweedFS-SSE-KMS-Key" // Header for passing SSE-KMS metadata to filer - SeaweedFSSSEIVHeader = "X-SeaweedFS-SSE-IV" // Header for passing SSE-C IV to filer (SSE-C only) - SeaweedFSSSEKMSBaseIVHeader = "X-SeaweedFS-SSE-KMS-Base-IV" // Header for passing base IV for multipart SSE-KMS - SeaweedFSSSES3BaseIVHeader = "X-SeaweedFS-SSE-S3-Base-IV" // Header for passing base IV for multipart SSE-S3 - SeaweedFSSSES3KeyDataHeader = "X-SeaweedFS-SSE-S3-Key-Data" // Header for passing key data for multipart SSE-S3 ) // Non-Standard S3 HTTP request constants const ( AmzIdentityId = "s3-identity-id" - AmzAccountId = "s3-account-id" AmzAuthType = "s3-auth-type" + AmzIsAdmin = "s3-is-admin" // only set to http request header as a context ) func GetBucketAndObject(r *http.Request) (bucket, object string) { @@ -139,16 +56,6 @@ func GetBucketAndObject(r *http.Request) (bucket, object string) { return } -func GetPrefix(r *http.Request) string { - query := r.URL.Query() - prefix := query.Get("prefix") - if !strings.HasPrefix(prefix, "/") { - prefix = "/" + prefix - } - - return prefix -} - var PassThroughHeaders = map[string]string{ "response-cache-control": "Cache-Control", "response-content-disposition": "Content-Disposition", diff --git a/weed/s3api/s3_constants/s3_acp.go b/weed/s3api/s3_constants/s3_acp.go deleted file mode 100644 index d24e07e24..000000000 --- a/weed/s3api/s3_constants/s3_acp.go +++ /dev/null @@ -1,6 +0,0 @@ -package s3_constants - -const ( - AccountAnonymousId = "anonymous" - AccountAdminId = "admin" -) diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go index 923327be2..721c57f71 100644 --- a/weed/s3api/s3_constants/s3_actions.go +++ b/weed/s3api/s3_constants/s3_actions.go @@ -1,31 +1,12 @@ package s3_constants const ( - ACTION_READ = "Read" - ACTION_READ_ACP = "ReadAcp" - ACTION_WRITE = "Write" - ACTION_WRITE_ACP = "WriteAcp" - ACTION_ADMIN = "Admin" - ACTION_TAGGING = "Tagging" - ACTION_LIST = "List" - ACTION_DELETE_BUCKET = "DeleteBucket" - ACTION_BYPASS_GOVERNANCE_RETENTION = "BypassGovernanceRetention" - ACTION_GET_OBJECT_RETENTION = "GetObjectRetention" - ACTION_PUT_OBJECT_RETENTION = "PutObjectRetention" - ACTION_GET_OBJECT_LEGAL_HOLD = "GetObjectLegalHold" - ACTION_PUT_OBJECT_LEGAL_HOLD = "PutObjectLegalHold" - ACTION_GET_BUCKET_OBJECT_LOCK_CONFIG = "GetBucketObjectLockConfiguration" - ACTION_PUT_BUCKET_OBJECT_LOCK_CONFIG = "PutBucketObjectLockConfiguration" - - // Granular multipart upload actions for fine-grained IAM policies - ACTION_CREATE_MULTIPART_UPLOAD = "s3:CreateMultipartUpload" - ACTION_UPLOAD_PART = "s3:UploadPart" - ACTION_COMPLETE_MULTIPART = "s3:CompleteMultipartUpload" - ACTION_ABORT_MULTIPART = "s3:AbortMultipartUpload" - ACTION_LIST_MULTIPART_UPLOADS = "s3:ListMultipartUploads" - ACTION_LIST_PARTS = "s3:ListParts" + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" + ACTION_TAGGING = "Tagging" + ACTION_LIST = "List" SeaweedStorageDestinationHeader = "x-seaweedfs-destination" MultipartUploadsFolder = ".uploads" - FolderMimeType = "httpd/unix-directory" ) diff --git a/weed/s3api/s3_constants/s3_config.go b/weed/s3api/s3_constants/s3_config.go index d2d2c257a..0fa5b26f4 100644 --- a/weed/s3api/s3_constants/s3_config.go +++ b/weed/s3api/s3_constants/s3_config.go @@ -7,7 +7,7 @@ import ( var ( CircuitBreakerConfigDir = "/etc/s3" CircuitBreakerConfigFile = "circuit_breaker.json" - AllowedActions = []string{ACTION_READ, ACTION_READ_ACP, ACTION_WRITE, ACTION_WRITE_ACP, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN, ACTION_DELETE_BUCKET} + AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN} LimitTypeCount = "Count" LimitTypeBytes = "MB" Separator = ":" diff --git a/weed/s3api/s3_end_to_end_test.go b/weed/s3api/s3_end_to_end_test.go deleted file mode 100644 index ba6d4e106..000000000 --- a/weed/s3api/s3_end_to_end_test.go +++ /dev/null @@ -1,656 +0,0 @@ -package s3api - -import ( - "bytes" - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/ldap" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createTestJWTEndToEnd creates a test JWT token with the specified issuer, subject and signing key -func createTestJWTEndToEnd(t *testing.T, issuer, subject, signingKey string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client-id", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - // Add claims that trust policy validation expects - "idp": "test-oidc", // Identity provider claim for trust policy matching - }) - - tokenString, err := token.SignedString([]byte(signingKey)) - require.NoError(t, err) - return tokenString -} - -// TestS3EndToEndWithJWT tests complete S3 operations with JWT authentication -func TestS3EndToEndWithJWT(t *testing.T) { - // Set up complete IAM system with S3 integration - s3Server, iamManager := setupCompleteS3IAMSystem(t) - - // Test scenarios - tests := []struct { - name string - roleArn string - sessionName string - setupRole func(ctx context.Context, manager *integration.IAMManager) - s3Operations []S3Operation - expectedResults []bool // true = allow, false = deny - }{ - { - name: "S3 Read-Only Role Complete Workflow", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - sessionName: "readonly-test-session", - setupRole: setupS3ReadOnlyRole, - s3Operations: []S3Operation{ - {Method: "PUT", Path: "/test-bucket", Body: nil, Operation: "CreateBucket"}, - {Method: "GET", Path: "/test-bucket", Body: nil, Operation: "ListBucket"}, - {Method: "PUT", Path: "/test-bucket/test-file.txt", Body: []byte("test content"), Operation: "PutObject"}, - {Method: "GET", Path: "/test-bucket/test-file.txt", Body: nil, Operation: "GetObject"}, - {Method: "HEAD", Path: "/test-bucket/test-file.txt", Body: nil, Operation: "HeadObject"}, - {Method: "DELETE", Path: "/test-bucket/test-file.txt", Body: nil, Operation: "DeleteObject"}, - }, - expectedResults: []bool{false, true, false, true, true, false}, // Only read operations allowed - }, - { - name: "S3 Admin Role Complete Workflow", - roleArn: "arn:seaweed:iam::role/S3AdminRole", - sessionName: "admin-test-session", - setupRole: setupS3AdminRole, - s3Operations: []S3Operation{ - {Method: "PUT", Path: "/admin-bucket", Body: nil, Operation: "CreateBucket"}, - {Method: "PUT", Path: "/admin-bucket/admin-file.txt", Body: []byte("admin content"), Operation: "PutObject"}, - {Method: "GET", Path: "/admin-bucket/admin-file.txt", Body: nil, Operation: "GetObject"}, - {Method: "DELETE", Path: "/admin-bucket/admin-file.txt", Body: nil, Operation: "DeleteObject"}, - {Method: "DELETE", Path: "/admin-bucket", Body: nil, Operation: "DeleteBucket"}, - }, - expectedResults: []bool{true, true, true, true, true}, // All operations allowed - }, - { - name: "S3 IP-Restricted Role", - roleArn: "arn:seaweed:iam::role/S3IPRestrictedRole", - sessionName: "ip-restricted-session", - setupRole: setupS3IPRestrictedRole, - s3Operations: []S3Operation{ - {Method: "GET", Path: "/restricted-bucket/file.txt", Body: nil, Operation: "GetObject", SourceIP: "192.168.1.100"}, // Allowed IP - {Method: "GET", Path: "/restricted-bucket/file.txt", Body: nil, Operation: "GetObject", SourceIP: "8.8.8.8"}, // Blocked IP - }, - expectedResults: []bool{true, false}, // Only office IP allowed - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - // Set up role - tt.setupRole(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTEndToEnd(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Assume role to get JWT token - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: tt.roleArn, - WebIdentityToken: validJWTToken, - RoleSessionName: tt.sessionName, - }) - require.NoError(t, err, "Failed to assume role %s", tt.roleArn) - - jwtToken := response.Credentials.SessionToken - require.NotEmpty(t, jwtToken, "JWT token should not be empty") - - // Execute S3 operations - for i, operation := range tt.s3Operations { - t.Run(fmt.Sprintf("%s_%s", tt.name, operation.Operation), func(t *testing.T) { - allowed := executeS3OperationWithJWT(t, s3Server, operation, jwtToken) - expected := tt.expectedResults[i] - - if expected { - assert.True(t, allowed, "Operation %s should be allowed", operation.Operation) - } else { - assert.False(t, allowed, "Operation %s should be denied", operation.Operation) - } - }) - } - }) - } -} - -// TestS3MultipartUploadWithJWT tests multipart upload with IAM -func TestS3MultipartUploadWithJWT(t *testing.T) { - s3Server, iamManager := setupCompleteS3IAMSystem(t) - ctx := context.Background() - - // Set up write role - setupS3WriteRole(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTEndToEnd(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Assume role - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3WriteRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "multipart-test-session", - }) - require.NoError(t, err) - - jwtToken := response.Credentials.SessionToken - - // Test multipart upload workflow - tests := []struct { - name string - operation S3Operation - expected bool - }{ - { - name: "Initialize Multipart Upload", - operation: S3Operation{ - Method: "POST", - Path: "/multipart-bucket/large-file.txt?uploads", - Body: nil, - Operation: "CreateMultipartUpload", - }, - expected: true, - }, - { - name: "Upload Part", - operation: S3Operation{ - Method: "PUT", - Path: "/multipart-bucket/large-file.txt?partNumber=1&uploadId=test-upload-id", - Body: bytes.Repeat([]byte("data"), 1024), // 4KB part - Operation: "UploadPart", - }, - expected: true, - }, - { - name: "List Parts", - operation: S3Operation{ - Method: "GET", - Path: "/multipart-bucket/large-file.txt?uploadId=test-upload-id", - Body: nil, - Operation: "ListParts", - }, - expected: true, - }, - { - name: "Complete Multipart Upload", - operation: S3Operation{ - Method: "POST", - Path: "/multipart-bucket/large-file.txt?uploadId=test-upload-id", - Body: []byte(""), - Operation: "CompleteMultipartUpload", - }, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - allowed := executeS3OperationWithJWT(t, s3Server, tt.operation, jwtToken) - if tt.expected { - assert.True(t, allowed, "Multipart operation %s should be allowed", tt.operation.Operation) - } else { - assert.False(t, allowed, "Multipart operation %s should be denied", tt.operation.Operation) - } - }) - } -} - -// TestS3CORSWithJWT tests CORS preflight requests with IAM -func TestS3CORSWithJWT(t *testing.T) { - s3Server, iamManager := setupCompleteS3IAMSystem(t) - ctx := context.Background() - - // Set up read role - setupS3ReadOnlyRole(ctx, iamManager) - - // Test CORS preflight - req := httptest.NewRequest("OPTIONS", "/test-bucket/test-file.txt", http.NoBody) - req.Header.Set("Origin", "https://example.com") - req.Header.Set("Access-Control-Request-Method", "GET") - req.Header.Set("Access-Control-Request-Headers", "Authorization") - - recorder := httptest.NewRecorder() - s3Server.ServeHTTP(recorder, req) - - // CORS preflight should succeed - assert.True(t, recorder.Code < 400, "CORS preflight should succeed, got %d: %s", recorder.Code, recorder.Body.String()) - - // Check CORS headers - assert.Contains(t, recorder.Header().Get("Access-Control-Allow-Origin"), "example.com") - assert.Contains(t, recorder.Header().Get("Access-Control-Allow-Methods"), "GET") -} - -// TestS3PerformanceWithIAM tests performance impact of IAM integration -func TestS3PerformanceWithIAM(t *testing.T) { - if testing.Short() { - t.Skip("Skipping performance test in short mode") - } - - s3Server, iamManager := setupCompleteS3IAMSystem(t) - ctx := context.Background() - - // Set up performance role - setupS3ReadOnlyRole(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTEndToEnd(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Assume role - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "performance-test-session", - }) - require.NoError(t, err) - - jwtToken := response.Credentials.SessionToken - - // Benchmark multiple GET requests - numRequests := 100 - start := time.Now() - - for i := 0; i < numRequests; i++ { - operation := S3Operation{ - Method: "GET", - Path: fmt.Sprintf("/perf-bucket/file-%d.txt", i), - Body: nil, - Operation: "GetObject", - } - - executeS3OperationWithJWT(t, s3Server, operation, jwtToken) - } - - duration := time.Since(start) - avgLatency := duration / time.Duration(numRequests) - - t.Logf("Performance Results:") - t.Logf("- Total requests: %d", numRequests) - t.Logf("- Total time: %v", duration) - t.Logf("- Average latency: %v", avgLatency) - t.Logf("- Requests per second: %.2f", float64(numRequests)/duration.Seconds()) - - // Assert reasonable performance (less than 10ms average) - assert.Less(t, avgLatency, 10*time.Millisecond, "IAM overhead should be minimal") -} - -// S3Operation represents an S3 operation for testing -type S3Operation struct { - Method string - Path string - Body []byte - Operation string - SourceIP string -} - -// Helper functions for test setup - -func setupCompleteS3IAMSystem(t *testing.T) (http.Handler, *integration.IAMManager) { - // Create IAM manager - iamManager := integration.NewIAMManager() - - // Initialize with test configuration - config := &integration.IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", - }, - } - - err := iamManager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Set up test identity providers - setupTestProviders(t, iamManager) - - // Create S3 server with IAM integration - router := mux.NewRouter() - - // Create S3 IAM integration for testing with error recovery - var s3IAMIntegration *S3IAMIntegration - - // Attempt to create IAM integration with panic recovery - func() { - defer func() { - if r := recover(); r != nil { - t.Logf("Failed to create S3 IAM integration: %v", r) - t.Skip("Skipping test due to S3 server setup issues (likely missing filer or older code version)") - } - }() - s3IAMIntegration = NewS3IAMIntegration(iamManager, "localhost:8888") - }() - - if s3IAMIntegration == nil { - t.Skip("Could not create S3 IAM integration") - } - - // Add a simple test endpoint that we can use to verify IAM functionality - router.HandleFunc("/test-auth", func(w http.ResponseWriter, r *http.Request) { - // Test JWT authentication - identity, errCode := s3IAMIntegration.AuthenticateJWT(r.Context(), r) - if errCode != s3err.ErrNone { - w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte("Authentication failed")) - return - } - - // Map HTTP method to S3 action for more realistic testing - var action Action - switch r.Method { - case "GET": - action = Action("s3:GetObject") - case "PUT": - action = Action("s3:PutObject") - case "DELETE": - action = Action("s3:DeleteObject") - case "HEAD": - action = Action("s3:HeadObject") - default: - action = Action("s3:GetObject") // Default fallback - } - - // Test authorization with appropriate action - authErrCode := s3IAMIntegration.AuthorizeAction(r.Context(), identity, action, "test-bucket", "test-object", r) - if authErrCode != s3err.ErrNone { - w.WriteHeader(http.StatusForbidden) - w.Write([]byte("Authorization failed")) - return - } - - w.WriteHeader(http.StatusOK) - w.Write([]byte("Success")) - }).Methods("GET", "PUT", "DELETE", "HEAD") - - // Add CORS preflight handler for S3 bucket/object paths - router.PathPrefix("/{bucket}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - // Handle CORS preflight request - origin := r.Header.Get("Origin") - requestMethod := r.Header.Get("Access-Control-Request-Method") - - // Set CORS headers - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, HEAD, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-Amz-Date, X-Amz-Security-Token") - w.Header().Set("Access-Control-Max-Age", "3600") - - if requestMethod != "" { - w.Header().Add("Access-Control-Allow-Methods", requestMethod) - } - - w.WriteHeader(http.StatusOK) - return - } - - // For non-OPTIONS requests, return 404 since we don't have full S3 implementation - w.WriteHeader(http.StatusNotFound) - w.Write([]byte("Not found")) - }) - - return router, iamManager -} - -func setupTestProviders(t *testing.T, manager *integration.IAMManager) { - // Set up OIDC provider - oidcProvider := oidc.NewMockOIDCProvider("test-oidc") - oidcConfig := &oidc.OIDCConfig{ - Issuer: "https://test-issuer.com", - ClientID: "test-client-id", - } - err := oidcProvider.Initialize(oidcConfig) - require.NoError(t, err) - oidcProvider.SetupDefaultTestData() - - // Set up LDAP mock provider (no config needed for mock) - ldapProvider := ldap.NewMockLDAPProvider("test-ldap") - err = ldapProvider.Initialize(nil) // Mock doesn't need real config - require.NoError(t, err) - ldapProvider.SetupDefaultTestData() - - // Register providers - err = manager.RegisterIdentityProvider(oidcProvider) - require.NoError(t, err) - err = manager.RegisterIdentityProvider(ldapProvider) - require.NoError(t, err) -} - -func setupS3ReadOnlyRole(ctx context.Context, manager *integration.IAMManager) { - // Create read-only policy - readOnlyPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3ReadOperations", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket", "s3:HeadObject"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3ReadOnlyPolicy", readOnlyPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3ReadOnlyRole", &integration.RoleDefinition{ - RoleName: "S3ReadOnlyRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - }) -} - -func setupS3AdminRole(ctx context.Context, manager *integration.IAMManager) { - // Create admin policy - adminPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowAllS3Operations", - Effect: "Allow", - Action: []string{"s3:*"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3AdminPolicy", adminPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3AdminRole", &integration.RoleDefinition{ - RoleName: "S3AdminRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3AdminPolicy"}, - }) -} - -func setupS3WriteRole(ctx context.Context, manager *integration.IAMManager) { - // Create write policy - writePolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3WriteOperations", - Effect: "Allow", - Action: []string{"s3:PutObject", "s3:GetObject", "s3:ListBucket", "s3:DeleteObject"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3WritePolicy", writePolicy) - - // Create role - manager.CreateRole(ctx, "", "S3WriteRole", &integration.RoleDefinition{ - RoleName: "S3WriteRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3WritePolicy"}, - }) -} - -func setupS3IPRestrictedRole(ctx context.Context, manager *integration.IAMManager) { - // Create IP-restricted policy - restrictedPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3FromOfficeIP", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - Condition: map[string]map[string]interface{}{ - "IpAddress": { - "seaweed:SourceIP": []string{"192.168.1.0/24"}, - }, - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3IPRestrictedPolicy", restrictedPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3IPRestrictedRole", &integration.RoleDefinition{ - RoleName: "S3IPRestrictedRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3IPRestrictedPolicy"}, - }) -} - -func executeS3OperationWithJWT(t *testing.T, s3Server http.Handler, operation S3Operation, jwtToken string) bool { - // Use our simplified test endpoint for IAM validation with the correct HTTP method - req := httptest.NewRequest(operation.Method, "/test-auth", nil) - req.Header.Set("Authorization", "Bearer "+jwtToken) - req.Header.Set("Content-Type", "application/octet-stream") - - // Set source IP if specified - if operation.SourceIP != "" { - req.Header.Set("X-Forwarded-For", operation.SourceIP) - req.RemoteAddr = operation.SourceIP + ":12345" - } - - // Execute request - recorder := httptest.NewRecorder() - s3Server.ServeHTTP(recorder, req) - - // Determine if operation was allowed - allowed := recorder.Code < 400 - - t.Logf("S3 Operation: %s %s -> %d (%s)", operation.Method, operation.Path, recorder.Code, - map[bool]string{true: "ALLOWED", false: "DENIED"}[allowed]) - - if !allowed && recorder.Code != http.StatusForbidden && recorder.Code != http.StatusUnauthorized { - // If it's not a 403/401, it might be a different error (like not found) - // For testing purposes, we'll consider non-auth errors as "allowed" for now - t.Logf("Non-auth error: %s", recorder.Body.String()) - return true - } - - return allowed -} diff --git a/weed/s3api/s3_error_utils.go b/weed/s3api/s3_error_utils.go deleted file mode 100644 index 7afb241b5..000000000 --- a/weed/s3api/s3_error_utils.go +++ /dev/null @@ -1,54 +0,0 @@ -package s3api - -import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// ErrorHandlers provide common error handling patterns for S3 API operations - -// handlePutToFilerError logs an error and returns the standard putToFiler error format -func handlePutToFilerError(operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) { - glog.Errorf("Failed to %s: %v", operation, err) - return "", errorCode, "" -} - -// handlePutToFilerInternalError is a convenience wrapper for internal errors in putToFiler -func handlePutToFilerInternalError(operation string, err error) (string, s3err.ErrorCode, string) { - return handlePutToFilerError(operation, err, s3err.ErrInternalError) -} - -// handleMultipartError logs an error and returns the standard multipart error format -func handleMultipartError(operation string, err error, errorCode s3err.ErrorCode) (interface{}, s3err.ErrorCode) { - glog.Errorf("Failed to %s: %v", operation, err) - return nil, errorCode -} - -// handleMultipartInternalError is a convenience wrapper for internal errors in multipart operations -func handleMultipartInternalError(operation string, err error) (interface{}, s3err.ErrorCode) { - return handleMultipartError(operation, err, s3err.ErrInternalError) -} - -// logErrorAndReturn logs an error with operation context and returns the specified error code -func logErrorAndReturn(operation string, err error, errorCode s3err.ErrorCode) s3err.ErrorCode { - glog.Errorf("Failed to %s: %v", operation, err) - return errorCode -} - -// logInternalError is a convenience wrapper for internal error logging -func logInternalError(operation string, err error) s3err.ErrorCode { - return logErrorAndReturn(operation, err, s3err.ErrInternalError) -} - -// SSE-specific error handlers - -// handleSSEError handles common SSE-related errors with appropriate context -func handleSSEError(sseType string, operation string, err error, errorCode s3err.ErrorCode) (string, s3err.ErrorCode, string) { - glog.Errorf("Failed to %s for %s: %v", operation, sseType, err) - return "", errorCode, "" -} - -// handleSSEInternalError is a convenience wrapper for SSE internal errors -func handleSSEInternalError(sseType string, operation string, err error) (string, s3err.ErrorCode, string) { - return handleSSEError(sseType, operation, err, s3err.ErrInternalError) -} diff --git a/weed/s3api/s3_granular_action_security_test.go b/weed/s3api/s3_granular_action_security_test.go deleted file mode 100644 index 404638d14..000000000 --- a/weed/s3api/s3_granular_action_security_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package s3api - -import ( - "net/http" - "net/url" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" -) - -// TestGranularActionMappingSecurity demonstrates how the new granular action mapping -// fixes critical security issues that existed with the previous coarse mapping -func TestGranularActionMappingSecurity(t *testing.T) { - tests := []struct { - name string - method string - bucket string - objectKey string - queryParams map[string]string - description string - problemWithOldMapping string - granularActionResult string - }{ - { - name: "delete_object_security_fix", - method: "DELETE", - bucket: "sensitive-bucket", - objectKey: "confidential-file.txt", - queryParams: map[string]string{}, - description: "DELETE object operations should map to s3:DeleteObject, not s3:PutObject", - problemWithOldMapping: "Old mapping incorrectly mapped DELETE object to s3:PutObject, " + - "allowing users with only PUT permissions to delete objects - a critical security flaw", - granularActionResult: "s3:DeleteObject", - }, - { - name: "get_object_acl_precision", - method: "GET", - bucket: "secure-bucket", - objectKey: "private-file.pdf", - queryParams: map[string]string{"acl": ""}, - description: "GET object ACL should map to s3:GetObjectAcl, not generic s3:GetObject", - problemWithOldMapping: "Old mapping would allow users with s3:GetObject permission to " + - "read ACLs, potentially exposing sensitive permission information", - granularActionResult: "s3:GetObjectAcl", - }, - { - name: "put_object_tagging_precision", - method: "PUT", - bucket: "data-bucket", - objectKey: "business-document.xlsx", - queryParams: map[string]string{"tagging": ""}, - description: "PUT object tagging should map to s3:PutObjectTagging, not generic s3:PutObject", - problemWithOldMapping: "Old mapping couldn't distinguish between actual object uploads and " + - "metadata operations like tagging, making fine-grained permissions impossible", - granularActionResult: "s3:PutObjectTagging", - }, - { - name: "multipart_upload_precision", - method: "POST", - bucket: "large-files", - objectKey: "video.mp4", - queryParams: map[string]string{"uploads": ""}, - description: "Multipart upload initiation should map to s3:CreateMultipartUpload", - problemWithOldMapping: "Old mapping would treat multipart operations as generic s3:PutObject, " + - "preventing policies that allow regular uploads but restrict large multipart operations", - granularActionResult: "s3:CreateMultipartUpload", - }, - { - name: "bucket_policy_vs_bucket_creation", - method: "PUT", - bucket: "corporate-bucket", - objectKey: "", - queryParams: map[string]string{"policy": ""}, - description: "Bucket policy modifications should map to s3:PutBucketPolicy, not s3:CreateBucket", - problemWithOldMapping: "Old mapping couldn't distinguish between creating buckets and " + - "modifying bucket policies, potentially allowing unauthorized policy changes", - granularActionResult: "s3:PutBucketPolicy", - }, - { - name: "list_vs_read_distinction", - method: "GET", - bucket: "inventory-bucket", - objectKey: "", - queryParams: map[string]string{"uploads": ""}, - description: "Listing multipart uploads should map to s3:ListMultipartUploads", - problemWithOldMapping: "Old mapping would use generic s3:ListBucket for all bucket operations, " + - "preventing fine-grained control over who can see ongoing multipart operations", - granularActionResult: "s3:ListMultipartUploads", - }, - { - name: "delete_object_tagging_precision", - method: "DELETE", - bucket: "metadata-bucket", - objectKey: "tagged-file.json", - queryParams: map[string]string{"tagging": ""}, - description: "Delete object tagging should map to s3:DeleteObjectTagging, not s3:DeleteObject", - problemWithOldMapping: "Old mapping couldn't distinguish between deleting objects and " + - "deleting tags, preventing policies that allow tag management but not object deletion", - granularActionResult: "s3:DeleteObjectTagging", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create HTTP request with query parameters - req := &http.Request{ - Method: tt.method, - URL: &url.URL{Path: "/" + tt.bucket + "/" + tt.objectKey}, - } - - // Add query parameters - query := req.URL.Query() - for key, value := range tt.queryParams { - query.Set(key, value) - } - req.URL.RawQuery = query.Encode() - - // Test the new granular action determination - result := determineGranularS3Action(req, s3_constants.ACTION_WRITE, tt.bucket, tt.objectKey) - - assert.Equal(t, tt.granularActionResult, result, - "Security Fix Test: %s\n"+ - "Description: %s\n"+ - "Problem with old mapping: %s\n"+ - "Expected: %s, Got: %s", - tt.name, tt.description, tt.problemWithOldMapping, tt.granularActionResult, result) - - // Log the security improvement - t.Logf("SECURITY IMPROVEMENT: %s", tt.description) - t.Logf(" Problem Fixed: %s", tt.problemWithOldMapping) - t.Logf(" Granular Action: %s", result) - }) - } -} - -// TestBackwardCompatibilityFallback tests that the new system maintains backward compatibility -// with existing generic actions while providing enhanced granularity -func TestBackwardCompatibilityFallback(t *testing.T) { - tests := []struct { - name string - method string - bucket string - objectKey string - fallbackAction Action - expectedResult string - description string - }{ - { - name: "generic_read_fallback", - method: "GET", // Generic method without specific query params - bucket: "", // Edge case: no bucket specified - objectKey: "", // Edge case: no object specified - fallbackAction: s3_constants.ACTION_READ, - expectedResult: "s3:GetObject", - description: "Generic read operations should fall back to s3:GetObject for compatibility", - }, - { - name: "generic_write_fallback", - method: "PUT", // Generic method without specific query params - bucket: "", // Edge case: no bucket specified - objectKey: "", // Edge case: no object specified - fallbackAction: s3_constants.ACTION_WRITE, - expectedResult: "s3:PutObject", - description: "Generic write operations should fall back to s3:PutObject for compatibility", - }, - { - name: "already_granular_passthrough", - method: "GET", - bucket: "", - objectKey: "", - fallbackAction: "s3:GetBucketLocation", // Already specific - expectedResult: "s3:GetBucketLocation", - description: "Already granular actions should pass through unchanged", - }, - { - name: "unknown_action_conversion", - method: "GET", - bucket: "", - objectKey: "", - fallbackAction: "CustomAction", // Not S3-prefixed - expectedResult: "s3:CustomAction", - description: "Unknown actions should be converted to S3 format for consistency", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := &http.Request{ - Method: tt.method, - URL: &url.URL{Path: "/" + tt.bucket + "/" + tt.objectKey}, - } - - result := determineGranularS3Action(req, tt.fallbackAction, tt.bucket, tt.objectKey) - - assert.Equal(t, tt.expectedResult, result, - "Backward Compatibility Test: %s\nDescription: %s\nExpected: %s, Got: %s", - tt.name, tt.description, tt.expectedResult, result) - - t.Logf("COMPATIBILITY: %s - %s", tt.description, result) - }) - } -} - -// TestPolicyEnforcementScenarios demonstrates how granular actions enable -// more precise and secure IAM policy enforcement -func TestPolicyEnforcementScenarios(t *testing.T) { - scenarios := []struct { - name string - policyExample string - method string - bucket string - objectKey string - queryParams map[string]string - expectedAction string - securityBenefit string - }{ - { - name: "allow_read_deny_acl_access", - policyExample: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::sensitive-bucket/*" - } - ] - }`, - method: "GET", - bucket: "sensitive-bucket", - objectKey: "document.pdf", - queryParams: map[string]string{"acl": ""}, - expectedAction: "s3:GetObjectAcl", - securityBenefit: "Policy allows reading objects but denies ACL access - granular actions enable this distinction", - }, - { - name: "allow_tagging_deny_object_modification", - policyExample: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:PutObjectTagging", "s3:DeleteObjectTagging"], - "Resource": "arn:aws:s3:::data-bucket/*" - } - ] - }`, - method: "PUT", - bucket: "data-bucket", - objectKey: "metadata-file.json", - queryParams: map[string]string{"tagging": ""}, - expectedAction: "s3:PutObjectTagging", - securityBenefit: "Policy allows tag management but prevents actual object uploads - critical for metadata-only roles", - }, - { - name: "restrict_multipart_uploads", - policyExample: `{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::uploads/*" - }, - { - "Effect": "Deny", - "Action": ["s3:CreateMultipartUpload", "s3:UploadPart"], - "Resource": "arn:aws:s3:::uploads/*" - } - ] - }`, - method: "POST", - bucket: "uploads", - objectKey: "large-file.zip", - queryParams: map[string]string{"uploads": ""}, - expectedAction: "s3:CreateMultipartUpload", - securityBenefit: "Policy allows regular uploads but blocks large multipart uploads - prevents resource abuse", - }, - } - - for _, scenario := range scenarios { - t.Run(scenario.name, func(t *testing.T) { - req := &http.Request{ - Method: scenario.method, - URL: &url.URL{Path: "/" + scenario.bucket + "/" + scenario.objectKey}, - } - - query := req.URL.Query() - for key, value := range scenario.queryParams { - query.Set(key, value) - } - req.URL.RawQuery = query.Encode() - - result := determineGranularS3Action(req, s3_constants.ACTION_WRITE, scenario.bucket, scenario.objectKey) - - assert.Equal(t, scenario.expectedAction, result, - "Policy Enforcement Scenario: %s\nExpected Action: %s, Got: %s", - scenario.name, scenario.expectedAction, result) - - t.Logf("๐Ÿ”’ SECURITY SCENARIO: %s", scenario.name) - t.Logf(" Expected Action: %s", result) - t.Logf(" Security Benefit: %s", scenario.securityBenefit) - t.Logf(" Policy Example:\n%s", scenario.policyExample) - }) - } -} diff --git a/weed/s3api/s3_iam_middleware.go b/weed/s3api/s3_iam_middleware.go deleted file mode 100644 index 857123d7b..000000000 --- a/weed/s3api/s3_iam_middleware.go +++ /dev/null @@ -1,794 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// S3IAMIntegration provides IAM integration for S3 API -type S3IAMIntegration struct { - iamManager *integration.IAMManager - stsService *sts.STSService - filerAddress string - enabled bool -} - -// NewS3IAMIntegration creates a new S3 IAM integration -func NewS3IAMIntegration(iamManager *integration.IAMManager, filerAddress string) *S3IAMIntegration { - var stsService *sts.STSService - if iamManager != nil { - stsService = iamManager.GetSTSService() - } - - return &S3IAMIntegration{ - iamManager: iamManager, - stsService: stsService, - filerAddress: filerAddress, - enabled: iamManager != nil, - } -} - -// AuthenticateJWT authenticates JWT tokens using our STS service -func (s3iam *S3IAMIntegration) AuthenticateJWT(ctx context.Context, r *http.Request) (*IAMIdentity, s3err.ErrorCode) { - - if !s3iam.enabled { - return nil, s3err.ErrNotImplemented - } - - // Extract bearer token from Authorization header - authHeader := r.Header.Get("Authorization") - if !strings.HasPrefix(authHeader, "Bearer ") { - return nil, s3err.ErrAccessDenied - } - - sessionToken := strings.TrimPrefix(authHeader, "Bearer ") - if sessionToken == "" { - return nil, s3err.ErrAccessDenied - } - - // Basic token format validation - reject obviously invalid tokens - if sessionToken == "invalid-token" || len(sessionToken) < 10 { - glog.V(3).Info("Session token format is invalid") - return nil, s3err.ErrAccessDenied - } - - // Try to parse as STS session token first - tokenClaims, err := parseJWTToken(sessionToken) - if err != nil { - glog.V(3).Infof("Failed to parse JWT token: %v", err) - return nil, s3err.ErrAccessDenied - } - - // Determine token type by issuer claim (more robust than checking role claim) - issuer, issuerOk := tokenClaims["iss"].(string) - if !issuerOk { - glog.V(3).Infof("Token missing issuer claim - invalid JWT") - return nil, s3err.ErrAccessDenied - } - - // Check if this is an STS-issued token by examining the issuer - if !s3iam.isSTSIssuer(issuer) { - - // Not an STS session token, try to validate as OIDC token with timeout - // Create a context with a reasonable timeout to prevent hanging - ctx, cancel := context.WithTimeout(ctx, 15*time.Second) - defer cancel() - - identity, err := s3iam.validateExternalOIDCToken(ctx, sessionToken) - - if err != nil { - return nil, s3err.ErrAccessDenied - } - - // Extract role from OIDC identity - if identity.RoleArn == "" { - return nil, s3err.ErrAccessDenied - } - - // Return IAM identity for OIDC token - return &IAMIdentity{ - Name: identity.UserID, - Principal: identity.RoleArn, - SessionToken: sessionToken, - Account: &Account{ - DisplayName: identity.UserID, - EmailAddress: identity.UserID + "@oidc.local", - Id: identity.UserID, - }, - }, s3err.ErrNone - } - - // This is an STS-issued token - extract STS session information - - // Extract role claim from STS token - roleName, roleOk := tokenClaims["role"].(string) - if !roleOk || roleName == "" { - glog.V(3).Infof("STS token missing role claim") - return nil, s3err.ErrAccessDenied - } - - sessionName, ok := tokenClaims["snam"].(string) - if !ok || sessionName == "" { - sessionName = "jwt-session" // Default fallback - } - - subject, ok := tokenClaims["sub"].(string) - if !ok || subject == "" { - subject = "jwt-user" // Default fallback - } - - // Use the principal ARN directly from token claims, or build it if not available - principalArn, ok := tokenClaims["principal"].(string) - if !ok || principalArn == "" { - // Fallback: extract role name from role ARN and build principal ARN - roleNameOnly := roleName - if strings.Contains(roleName, "/") { - parts := strings.Split(roleName, "/") - roleNameOnly = parts[len(parts)-1] - } - principalArn = fmt.Sprintf("arn:seaweed:sts::assumed-role/%s/%s", roleNameOnly, sessionName) - } - - // Validate the JWT token directly using STS service (avoid circular dependency) - // Note: We don't call IsActionAllowed here because that would create a circular dependency - // Authentication should only validate the token, authorization happens later - _, err = s3iam.stsService.ValidateSessionToken(ctx, sessionToken) - if err != nil { - glog.V(3).Infof("STS session validation failed: %v", err) - return nil, s3err.ErrAccessDenied - } - - // Create IAM identity from validated token - identity := &IAMIdentity{ - Name: subject, - Principal: principalArn, - SessionToken: sessionToken, - Account: &Account{ - DisplayName: roleName, - EmailAddress: subject + "@seaweedfs.local", - Id: subject, - }, - } - - glog.V(3).Infof("JWT authentication successful for principal: %s", identity.Principal) - return identity, s3err.ErrNone -} - -// AuthorizeAction authorizes actions using our policy engine -func (s3iam *S3IAMIntegration) AuthorizeAction(ctx context.Context, identity *IAMIdentity, action Action, bucket string, objectKey string, r *http.Request) s3err.ErrorCode { - if !s3iam.enabled { - return s3err.ErrNone // Fallback to existing authorization - } - - if identity.SessionToken == "" { - return s3err.ErrAccessDenied - } - - // Build resource ARN for the S3 operation - resourceArn := buildS3ResourceArn(bucket, objectKey) - - // Extract request context for policy conditions - requestContext := extractRequestContext(r) - - // Determine the specific S3 action based on the HTTP request details - specificAction := determineGranularS3Action(r, action, bucket, objectKey) - - // Create action request - actionRequest := &integration.ActionRequest{ - Principal: identity.Principal, - Action: specificAction, - Resource: resourceArn, - SessionToken: identity.SessionToken, - RequestContext: requestContext, - } - - // Check if action is allowed using our policy engine - allowed, err := s3iam.iamManager.IsActionAllowed(ctx, actionRequest) - if err != nil { - return s3err.ErrAccessDenied - } - - if !allowed { - return s3err.ErrAccessDenied - } - - return s3err.ErrNone -} - -// IAMIdentity represents an authenticated identity with session information -type IAMIdentity struct { - Name string - Principal string - SessionToken string - Account *Account -} - -// IsAdmin checks if the identity has admin privileges -func (identity *IAMIdentity) IsAdmin() bool { - // In our IAM system, admin status is determined by policies, not identity - // This is handled by the policy engine during authorization - return false -} - -// Mock session structures for validation -type MockSessionInfo struct { - AssumedRoleUser MockAssumedRoleUser -} - -type MockAssumedRoleUser struct { - AssumedRoleId string - Arn string -} - -// Helper functions - -// buildS3ResourceArn builds an S3 resource ARN from bucket and object -func buildS3ResourceArn(bucket string, objectKey string) string { - if bucket == "" { - return "arn:seaweed:s3:::*" - } - - if objectKey == "" || objectKey == "/" { - return "arn:seaweed:s3:::" + bucket - } - - // Remove leading slash from object key if present - if strings.HasPrefix(objectKey, "/") { - objectKey = objectKey[1:] - } - - return "arn:seaweed:s3:::" + bucket + "/" + objectKey -} - -// determineGranularS3Action determines the specific S3 IAM action based on HTTP request details -// This provides granular, operation-specific actions for accurate IAM policy enforcement -func determineGranularS3Action(r *http.Request, fallbackAction Action, bucket string, objectKey string) string { - method := r.Method - query := r.URL.Query() - - // Check if there are specific query parameters indicating granular operations - // If there are, always use granular mapping regardless of method-action alignment - hasGranularIndicators := hasSpecificQueryParameters(query) - - // Only check for method-action mismatch when there are NO granular indicators - // This provides fallback behavior for cases where HTTP method doesn't align with intended action - if !hasGranularIndicators && isMethodActionMismatch(method, fallbackAction) { - return mapLegacyActionToIAM(fallbackAction) - } - - // Handle object-level operations when method and action are aligned - if objectKey != "" && objectKey != "/" { - switch method { - case "GET", "HEAD": - // Object read operations - check for specific query parameters - if _, hasAcl := query["acl"]; hasAcl { - return "s3:GetObjectAcl" - } - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:GetObjectTagging" - } - if _, hasRetention := query["retention"]; hasRetention { - return "s3:GetObjectRetention" - } - if _, hasLegalHold := query["legal-hold"]; hasLegalHold { - return "s3:GetObjectLegalHold" - } - if _, hasVersions := query["versions"]; hasVersions { - return "s3:GetObjectVersion" - } - if _, hasUploadId := query["uploadId"]; hasUploadId { - return "s3:ListParts" - } - // Default object read - return "s3:GetObject" - - case "PUT", "POST": - // Object write operations - check for specific query parameters - if _, hasAcl := query["acl"]; hasAcl { - return "s3:PutObjectAcl" - } - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:PutObjectTagging" - } - if _, hasRetention := query["retention"]; hasRetention { - return "s3:PutObjectRetention" - } - if _, hasLegalHold := query["legal-hold"]; hasLegalHold { - return "s3:PutObjectLegalHold" - } - // Check for multipart upload operations - if _, hasUploads := query["uploads"]; hasUploads { - return "s3:CreateMultipartUpload" - } - if _, hasUploadId := query["uploadId"]; hasUploadId { - if _, hasPartNumber := query["partNumber"]; hasPartNumber { - return "s3:UploadPart" - } - return "s3:CompleteMultipartUpload" // Complete multipart upload - } - // Default object write - return "s3:PutObject" - - case "DELETE": - // Object delete operations - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:DeleteObjectTagging" - } - if _, hasUploadId := query["uploadId"]; hasUploadId { - return "s3:AbortMultipartUpload" - } - // Default object delete - return "s3:DeleteObject" - } - } - - // Handle bucket-level operations - if bucket != "" { - switch method { - case "GET", "HEAD": - // Bucket read operations - check for specific query parameters - if _, hasAcl := query["acl"]; hasAcl { - return "s3:GetBucketAcl" - } - if _, hasPolicy := query["policy"]; hasPolicy { - return "s3:GetBucketPolicy" - } - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:GetBucketTagging" - } - if _, hasCors := query["cors"]; hasCors { - return "s3:GetBucketCors" - } - if _, hasVersioning := query["versioning"]; hasVersioning { - return "s3:GetBucketVersioning" - } - if _, hasNotification := query["notification"]; hasNotification { - return "s3:GetBucketNotification" - } - if _, hasObjectLock := query["object-lock"]; hasObjectLock { - return "s3:GetBucketObjectLockConfiguration" - } - if _, hasUploads := query["uploads"]; hasUploads { - return "s3:ListMultipartUploads" - } - if _, hasVersions := query["versions"]; hasVersions { - return "s3:ListBucketVersions" - } - // Default bucket read/list - return "s3:ListBucket" - - case "PUT": - // Bucket write operations - check for specific query parameters - if _, hasAcl := query["acl"]; hasAcl { - return "s3:PutBucketAcl" - } - if _, hasPolicy := query["policy"]; hasPolicy { - return "s3:PutBucketPolicy" - } - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:PutBucketTagging" - } - if _, hasCors := query["cors"]; hasCors { - return "s3:PutBucketCors" - } - if _, hasVersioning := query["versioning"]; hasVersioning { - return "s3:PutBucketVersioning" - } - if _, hasNotification := query["notification"]; hasNotification { - return "s3:PutBucketNotification" - } - if _, hasObjectLock := query["object-lock"]; hasObjectLock { - return "s3:PutBucketObjectLockConfiguration" - } - // Default bucket creation - return "s3:CreateBucket" - - case "DELETE": - // Bucket delete operations - check for specific query parameters - if _, hasPolicy := query["policy"]; hasPolicy { - return "s3:DeleteBucketPolicy" - } - if _, hasTagging := query["tagging"]; hasTagging { - return "s3:DeleteBucketTagging" - } - if _, hasCors := query["cors"]; hasCors { - return "s3:DeleteBucketCors" - } - // Default bucket delete - return "s3:DeleteBucket" - } - } - - // Fallback to legacy mapping for specific known actions - return mapLegacyActionToIAM(fallbackAction) -} - -// hasSpecificQueryParameters checks if the request has query parameters that indicate specific granular operations -func hasSpecificQueryParameters(query url.Values) bool { - // Check for object-level operation indicators - objectParams := []string{ - "acl", // ACL operations - "tagging", // Tagging operations - "retention", // Object retention - "legal-hold", // Legal hold - "versions", // Versioning operations - } - - // Check for multipart operation indicators - multipartParams := []string{ - "uploads", // List/initiate multipart uploads - "uploadId", // Part operations, complete, abort - "partNumber", // Upload part - } - - // Check for bucket-level operation indicators - bucketParams := []string{ - "policy", // Bucket policy operations - "website", // Website configuration - "cors", // CORS configuration - "lifecycle", // Lifecycle configuration - "notification", // Event notification - "replication", // Cross-region replication - "encryption", // Server-side encryption - "accelerate", // Transfer acceleration - "requestPayment", // Request payment - "logging", // Access logging - "versioning", // Versioning configuration - "inventory", // Inventory configuration - "analytics", // Analytics configuration - "metrics", // CloudWatch metrics - "location", // Bucket location - } - - // Check if any of these parameters are present - allParams := append(append(objectParams, multipartParams...), bucketParams...) - for _, param := range allParams { - if _, exists := query[param]; exists { - return true - } - } - - return false -} - -// isMethodActionMismatch detects when HTTP method doesn't align with the intended S3 action -// This provides a mechanism to use fallback action mapping when there's a semantic mismatch -func isMethodActionMismatch(method string, fallbackAction Action) bool { - switch fallbackAction { - case s3_constants.ACTION_WRITE: - // WRITE actions should typically use PUT, POST, or DELETE methods - // GET/HEAD methods indicate read-oriented operations - return method == "GET" || method == "HEAD" - - case s3_constants.ACTION_READ: - // READ actions should typically use GET or HEAD methods - // PUT, POST, DELETE methods indicate write-oriented operations - return method == "PUT" || method == "POST" || method == "DELETE" - - case s3_constants.ACTION_LIST: - // LIST actions should typically use GET method - // PUT, POST, DELETE methods indicate write-oriented operations - return method == "PUT" || method == "POST" || method == "DELETE" - - case s3_constants.ACTION_DELETE_BUCKET: - // DELETE_BUCKET should use DELETE method - // Other methods indicate different operation types - return method != "DELETE" - - default: - // For unknown actions or actions that already have s3: prefix, don't assume mismatch - return false - } -} - -// mapLegacyActionToIAM provides fallback mapping for legacy actions -// This ensures backward compatibility while the system transitions to granular actions -func mapLegacyActionToIAM(legacyAction Action) string { - switch legacyAction { - case s3_constants.ACTION_READ: - return "s3:GetObject" // Fallback for unmapped read operations - case s3_constants.ACTION_WRITE: - return "s3:PutObject" // Fallback for unmapped write operations - case s3_constants.ACTION_LIST: - return "s3:ListBucket" // Fallback for unmapped list operations - case s3_constants.ACTION_TAGGING: - return "s3:GetObjectTagging" // Fallback for unmapped tagging operations - case s3_constants.ACTION_READ_ACP: - return "s3:GetObjectAcl" // Fallback for unmapped ACL read operations - case s3_constants.ACTION_WRITE_ACP: - return "s3:PutObjectAcl" // Fallback for unmapped ACL write operations - case s3_constants.ACTION_DELETE_BUCKET: - return "s3:DeleteBucket" // Fallback for unmapped bucket delete operations - case s3_constants.ACTION_ADMIN: - return "s3:*" // Fallback for unmapped admin operations - - // Handle granular multipart actions (already correctly mapped) - case s3_constants.ACTION_CREATE_MULTIPART_UPLOAD: - return "s3:CreateMultipartUpload" - case s3_constants.ACTION_UPLOAD_PART: - return "s3:UploadPart" - case s3_constants.ACTION_COMPLETE_MULTIPART: - return "s3:CompleteMultipartUpload" - case s3_constants.ACTION_ABORT_MULTIPART: - return "s3:AbortMultipartUpload" - case s3_constants.ACTION_LIST_MULTIPART_UPLOADS: - return "s3:ListMultipartUploads" - case s3_constants.ACTION_LIST_PARTS: - return "s3:ListParts" - - default: - // If it's already a properly formatted S3 action, return as-is - actionStr := string(legacyAction) - if strings.HasPrefix(actionStr, "s3:") { - return actionStr - } - // Fallback: convert to S3 action format - return "s3:" + actionStr - } -} - -// extractRequestContext extracts request context for policy conditions -func extractRequestContext(r *http.Request) map[string]interface{} { - context := make(map[string]interface{}) - - // Extract source IP for IP-based conditions - sourceIP := extractSourceIP(r) - if sourceIP != "" { - context["sourceIP"] = sourceIP - } - - // Extract user agent - if userAgent := r.Header.Get("User-Agent"); userAgent != "" { - context["userAgent"] = userAgent - } - - // Extract request time - context["requestTime"] = r.Context().Value("requestTime") - - // Extract additional headers that might be useful for conditions - if referer := r.Header.Get("Referer"); referer != "" { - context["referer"] = referer - } - - return context -} - -// extractSourceIP extracts the real source IP from the request -func extractSourceIP(r *http.Request) string { - // Check X-Forwarded-For header (most common for proxied requests) - if forwardedFor := r.Header.Get("X-Forwarded-For"); forwardedFor != "" { - // X-Forwarded-For can contain multiple IPs, take the first one - if ips := strings.Split(forwardedFor, ","); len(ips) > 0 { - return strings.TrimSpace(ips[0]) - } - } - - // Check X-Real-IP header - if realIP := r.Header.Get("X-Real-IP"); realIP != "" { - return strings.TrimSpace(realIP) - } - - // Fall back to RemoteAddr - if ip, _, err := net.SplitHostPort(r.RemoteAddr); err == nil { - return ip - } - - return r.RemoteAddr -} - -// parseJWTToken parses a JWT token and returns its claims without verification -// Note: This is for extracting claims only. Verification is done by the IAM system. -func parseJWTToken(tokenString string) (jwt.MapClaims, error) { - token, _, err := new(jwt.Parser).ParseUnverified(tokenString, jwt.MapClaims{}) - if err != nil { - return nil, fmt.Errorf("failed to parse JWT token: %v", err) - } - - claims, ok := token.Claims.(jwt.MapClaims) - if !ok { - return nil, fmt.Errorf("invalid token claims") - } - - return claims, nil -} - -// minInt returns the minimum of two integers -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -// SetIAMIntegration adds advanced IAM integration to the S3ApiServer -func (s3a *S3ApiServer) SetIAMIntegration(iamManager *integration.IAMManager) { - if s3a.iam != nil { - s3a.iam.iamIntegration = NewS3IAMIntegration(iamManager, "localhost:8888") - glog.V(0).Infof("IAM integration successfully set on S3ApiServer") - } else { - glog.Errorf("Cannot set IAM integration: s3a.iam is nil") - } -} - -// EnhancedS3ApiServer extends S3ApiServer with IAM integration -type EnhancedS3ApiServer struct { - *S3ApiServer - iamIntegration *S3IAMIntegration -} - -// NewEnhancedS3ApiServer creates an S3 API server with IAM integration -func NewEnhancedS3ApiServer(baseServer *S3ApiServer, iamManager *integration.IAMManager) *EnhancedS3ApiServer { - // Set the IAM integration on the base server - baseServer.SetIAMIntegration(iamManager) - - return &EnhancedS3ApiServer{ - S3ApiServer: baseServer, - iamIntegration: NewS3IAMIntegration(iamManager, "localhost:8888"), - } -} - -// AuthenticateJWTRequest handles JWT authentication for S3 requests -func (enhanced *EnhancedS3ApiServer) AuthenticateJWTRequest(r *http.Request) (*Identity, s3err.ErrorCode) { - ctx := r.Context() - - // Use our IAM integration for JWT authentication - iamIdentity, errCode := enhanced.iamIntegration.AuthenticateJWT(ctx, r) - if errCode != s3err.ErrNone { - return nil, errCode - } - - // Convert IAMIdentity to the existing Identity structure - identity := &Identity{ - Name: iamIdentity.Name, - Account: iamIdentity.Account, - // Note: Actions will be determined by policy evaluation - Actions: []Action{}, // Empty - authorization handled by policy engine - } - - // Store session token for later authorization - r.Header.Set("X-SeaweedFS-Session-Token", iamIdentity.SessionToken) - r.Header.Set("X-SeaweedFS-Principal", iamIdentity.Principal) - - return identity, s3err.ErrNone -} - -// AuthorizeRequest handles authorization for S3 requests using policy engine -func (enhanced *EnhancedS3ApiServer) AuthorizeRequest(r *http.Request, identity *Identity, action Action) s3err.ErrorCode { - ctx := r.Context() - - // Get session info from request headers (set during authentication) - sessionToken := r.Header.Get("X-SeaweedFS-Session-Token") - principal := r.Header.Get("X-SeaweedFS-Principal") - - if sessionToken == "" || principal == "" { - glog.V(3).Info("No session information available for authorization") - return s3err.ErrAccessDenied - } - - // Extract bucket and object from request - bucket, object := s3_constants.GetBucketAndObject(r) - prefix := s3_constants.GetPrefix(r) - - // For List operations, use prefix for permission checking if available - if action == s3_constants.ACTION_LIST && object == "" && prefix != "" { - object = prefix - } else if (object == "/" || object == "") && prefix != "" { - object = prefix - } - - // Create IAM identity for authorization - iamIdentity := &IAMIdentity{ - Name: identity.Name, - Principal: principal, - SessionToken: sessionToken, - Account: identity.Account, - } - - // Use our IAM integration for authorization - return enhanced.iamIntegration.AuthorizeAction(ctx, iamIdentity, action, bucket, object, r) -} - -// OIDCIdentity represents an identity validated through OIDC -type OIDCIdentity struct { - UserID string - RoleArn string - Provider string -} - -// validateExternalOIDCToken validates an external OIDC token using the STS service's secure issuer-based lookup -// This method delegates to the STS service's validateWebIdentityToken for better security and efficiency -func (s3iam *S3IAMIntegration) validateExternalOIDCToken(ctx context.Context, token string) (*OIDCIdentity, error) { - - if s3iam.iamManager == nil { - return nil, fmt.Errorf("IAM manager not available") - } - - // Get STS service for secure token validation - stsService := s3iam.iamManager.GetSTSService() - if stsService == nil { - return nil, fmt.Errorf("STS service not available") - } - - // Use the STS service's secure validateWebIdentityToken method - // This method uses issuer-based lookup to select the correct provider, which is more secure and efficient - externalIdentity, provider, err := stsService.ValidateWebIdentityToken(ctx, token) - if err != nil { - return nil, fmt.Errorf("token validation failed: %w", err) - } - - if externalIdentity == nil { - return nil, fmt.Errorf("authentication succeeded but no identity returned") - } - - // Extract role from external identity attributes - rolesAttr, exists := externalIdentity.Attributes["roles"] - if !exists || rolesAttr == "" { - glog.V(3).Infof("No roles found in external identity") - return nil, fmt.Errorf("no roles found in external identity") - } - - // Parse roles (stored as comma-separated string) - rolesStr := strings.TrimSpace(rolesAttr) - roles := strings.Split(rolesStr, ",") - - // Clean up role names - var cleanRoles []string - for _, role := range roles { - cleanRole := strings.TrimSpace(role) - if cleanRole != "" { - cleanRoles = append(cleanRoles, cleanRole) - } - } - - if len(cleanRoles) == 0 { - glog.V(3).Infof("Empty roles list after parsing") - return nil, fmt.Errorf("no valid roles found in token") - } - - // Determine the primary role using intelligent selection - roleArn := s3iam.selectPrimaryRole(cleanRoles, externalIdentity) - - return &OIDCIdentity{ - UserID: externalIdentity.UserID, - RoleArn: roleArn, - Provider: fmt.Sprintf("%T", provider), // Use provider type as identifier - }, nil -} - -// selectPrimaryRole simply picks the first role from the list -// The OIDC provider should return roles in priority order (most important first) -func (s3iam *S3IAMIntegration) selectPrimaryRole(roles []string, externalIdentity *providers.ExternalIdentity) string { - if len(roles) == 0 { - return "" - } - - // Just pick the first one - keep it simple - selectedRole := roles[0] - return selectedRole -} - -// isSTSIssuer determines if an issuer belongs to the STS service -// Uses exact match against configured STS issuer for security and correctness -func (s3iam *S3IAMIntegration) isSTSIssuer(issuer string) bool { - if s3iam.stsService == nil || s3iam.stsService.Config == nil { - return false - } - - // Directly compare with the configured STS issuer for exact match - // This prevents false positives from external OIDC providers that might - // contain STS-related keywords in their issuer URLs - return issuer == s3iam.stsService.Config.Issuer -} diff --git a/weed/s3api/s3_iam_role_selection_test.go b/weed/s3api/s3_iam_role_selection_test.go deleted file mode 100644 index 91b1f2822..000000000 --- a/weed/s3api/s3_iam_role_selection_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package s3api - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/iam/providers" - "github.com/stretchr/testify/assert" -) - -func TestSelectPrimaryRole(t *testing.T) { - s3iam := &S3IAMIntegration{} - - t.Run("empty_roles_returns_empty", func(t *testing.T) { - identity := &providers.ExternalIdentity{Attributes: make(map[string]string)} - result := s3iam.selectPrimaryRole([]string{}, identity) - assert.Equal(t, "", result) - }) - - t.Run("single_role_returns_that_role", func(t *testing.T) { - identity := &providers.ExternalIdentity{Attributes: make(map[string]string)} - result := s3iam.selectPrimaryRole([]string{"admin"}, identity) - assert.Equal(t, "admin", result) - }) - - t.Run("multiple_roles_returns_first", func(t *testing.T) { - identity := &providers.ExternalIdentity{Attributes: make(map[string]string)} - roles := []string{"viewer", "manager", "admin"} - result := s3iam.selectPrimaryRole(roles, identity) - assert.Equal(t, "viewer", result, "Should return first role") - }) - - t.Run("order_matters", func(t *testing.T) { - identity := &providers.ExternalIdentity{Attributes: make(map[string]string)} - - // Test different orderings - roles1 := []string{"admin", "viewer", "manager"} - result1 := s3iam.selectPrimaryRole(roles1, identity) - assert.Equal(t, "admin", result1) - - roles2 := []string{"viewer", "admin", "manager"} - result2 := s3iam.selectPrimaryRole(roles2, identity) - assert.Equal(t, "viewer", result2) - - roles3 := []string{"manager", "admin", "viewer"} - result3 := s3iam.selectPrimaryRole(roles3, identity) - assert.Equal(t, "manager", result3) - }) - - t.Run("complex_enterprise_roles", func(t *testing.T) { - identity := &providers.ExternalIdentity{Attributes: make(map[string]string)} - roles := []string{ - "finance-readonly", - "hr-manager", - "it-system-admin", - "guest-viewer", - } - result := s3iam.selectPrimaryRole(roles, identity) - // Should return the first role - assert.Equal(t, "finance-readonly", result, "Should return first role in list") - }) -} diff --git a/weed/s3api/s3_iam_simple_test.go b/weed/s3api/s3_iam_simple_test.go deleted file mode 100644 index bdddeb24d..000000000 --- a/weed/s3api/s3_iam_simple_test.go +++ /dev/null @@ -1,490 +0,0 @@ -package s3api - -import ( - "context" - "net/http" - "net/http/httptest" - "net/url" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/iam/utils" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestS3IAMMiddleware tests the basic S3 IAM middleware functionality -func TestS3IAMMiddleware(t *testing.T) { - // Create IAM manager - iamManager := integration.NewIAMManager() - - // Initialize with test configuration - config := &integration.IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", - }, - } - - err := iamManager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Create S3 IAM integration - s3IAMIntegration := NewS3IAMIntegration(iamManager, "localhost:8888") - - // Test that integration is created successfully - assert.NotNil(t, s3IAMIntegration) - assert.True(t, s3IAMIntegration.enabled) -} - -// TestS3IAMMiddlewareJWTAuth tests JWT authentication -func TestS3IAMMiddlewareJWTAuth(t *testing.T) { - // Skip for now since it requires full setup - t.Skip("JWT authentication test requires full IAM setup") - - // Create IAM integration - s3iam := NewS3IAMIntegration(nil, "localhost:8888") // Disabled integration - - // Create test request with JWT token - req := httptest.NewRequest("GET", "/test-bucket/test-object", http.NoBody) - req.Header.Set("Authorization", "Bearer test-token") - - // Test authentication (should return not implemented when disabled) - ctx := context.Background() - identity, errCode := s3iam.AuthenticateJWT(ctx, req) - - assert.Nil(t, identity) - assert.NotEqual(t, errCode, 0) // Should return an error -} - -// TestBuildS3ResourceArn tests resource ARN building -func TestBuildS3ResourceArn(t *testing.T) { - tests := []struct { - name string - bucket string - object string - expected string - }{ - { - name: "empty bucket and object", - bucket: "", - object: "", - expected: "arn:seaweed:s3:::*", - }, - { - name: "bucket only", - bucket: "test-bucket", - object: "", - expected: "arn:seaweed:s3:::test-bucket", - }, - { - name: "bucket and object", - bucket: "test-bucket", - object: "test-object.txt", - expected: "arn:seaweed:s3:::test-bucket/test-object.txt", - }, - { - name: "bucket and object with leading slash", - bucket: "test-bucket", - object: "/test-object.txt", - expected: "arn:seaweed:s3:::test-bucket/test-object.txt", - }, - { - name: "bucket and nested object", - bucket: "test-bucket", - object: "folder/subfolder/test-object.txt", - expected: "arn:seaweed:s3:::test-bucket/folder/subfolder/test-object.txt", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := buildS3ResourceArn(tt.bucket, tt.object) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestDetermineGranularS3Action tests granular S3 action determination from HTTP requests -func TestDetermineGranularS3Action(t *testing.T) { - tests := []struct { - name string - method string - bucket string - objectKey string - queryParams map[string]string - fallbackAction Action - expected string - description string - }{ - // Object-level operations - { - name: "get_object", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_READ, - expected: "s3:GetObject", - description: "Basic object retrieval", - }, - { - name: "get_object_acl", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"acl": ""}, - fallbackAction: s3_constants.ACTION_READ_ACP, - expected: "s3:GetObjectAcl", - description: "Object ACL retrieval", - }, - { - name: "get_object_tagging", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"tagging": ""}, - fallbackAction: s3_constants.ACTION_TAGGING, - expected: "s3:GetObjectTagging", - description: "Object tagging retrieval", - }, - { - name: "put_object", - method: "PUT", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:PutObject", - description: "Basic object upload", - }, - { - name: "put_object_acl", - method: "PUT", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"acl": ""}, - fallbackAction: s3_constants.ACTION_WRITE_ACP, - expected: "s3:PutObjectAcl", - description: "Object ACL modification", - }, - { - name: "delete_object", - method: "DELETE", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_WRITE, // DELETE object uses WRITE fallback - expected: "s3:DeleteObject", - description: "Object deletion - correctly mapped to DeleteObject (not PutObject)", - }, - { - name: "delete_object_tagging", - method: "DELETE", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"tagging": ""}, - fallbackAction: s3_constants.ACTION_TAGGING, - expected: "s3:DeleteObjectTagging", - description: "Object tag deletion", - }, - - // Multipart upload operations - { - name: "create_multipart_upload", - method: "POST", - bucket: "test-bucket", - objectKey: "large-file.txt", - queryParams: map[string]string{"uploads": ""}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:CreateMultipartUpload", - description: "Multipart upload initiation", - }, - { - name: "upload_part", - method: "PUT", - bucket: "test-bucket", - objectKey: "large-file.txt", - queryParams: map[string]string{"uploadId": "12345", "partNumber": "1"}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:UploadPart", - description: "Multipart part upload", - }, - { - name: "complete_multipart_upload", - method: "POST", - bucket: "test-bucket", - objectKey: "large-file.txt", - queryParams: map[string]string{"uploadId": "12345"}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:CompleteMultipartUpload", - description: "Multipart upload completion", - }, - { - name: "abort_multipart_upload", - method: "DELETE", - bucket: "test-bucket", - objectKey: "large-file.txt", - queryParams: map[string]string{"uploadId": "12345"}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:AbortMultipartUpload", - description: "Multipart upload abort", - }, - - // Bucket-level operations - { - name: "list_bucket", - method: "GET", - bucket: "test-bucket", - objectKey: "", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_LIST, - expected: "s3:ListBucket", - description: "Bucket listing", - }, - { - name: "get_bucket_acl", - method: "GET", - bucket: "test-bucket", - objectKey: "", - queryParams: map[string]string{"acl": ""}, - fallbackAction: s3_constants.ACTION_READ_ACP, - expected: "s3:GetBucketAcl", - description: "Bucket ACL retrieval", - }, - { - name: "put_bucket_policy", - method: "PUT", - bucket: "test-bucket", - objectKey: "", - queryParams: map[string]string{"policy": ""}, - fallbackAction: s3_constants.ACTION_WRITE, - expected: "s3:PutBucketPolicy", - description: "Bucket policy modification", - }, - { - name: "delete_bucket", - method: "DELETE", - bucket: "test-bucket", - objectKey: "", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_DELETE_BUCKET, - expected: "s3:DeleteBucket", - description: "Bucket deletion", - }, - { - name: "list_multipart_uploads", - method: "GET", - bucket: "test-bucket", - objectKey: "", - queryParams: map[string]string{"uploads": ""}, - fallbackAction: s3_constants.ACTION_LIST, - expected: "s3:ListMultipartUploads", - description: "List multipart uploads in bucket", - }, - - // Fallback scenarios - { - name: "legacy_read_fallback", - method: "GET", - bucket: "", - objectKey: "", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_READ, - expected: "s3:GetObject", - description: "Legacy read action fallback", - }, - { - name: "already_granular_action", - method: "GET", - bucket: "", - objectKey: "", - queryParams: map[string]string{}, - fallbackAction: "s3:GetBucketLocation", // Already granular - expected: "s3:GetBucketLocation", - description: "Already granular action passed through", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create HTTP request with query parameters - req := &http.Request{ - Method: tt.method, - URL: &url.URL{Path: "/" + tt.bucket + "/" + tt.objectKey}, - } - - // Add query parameters - query := req.URL.Query() - for key, value := range tt.queryParams { - query.Set(key, value) - } - req.URL.RawQuery = query.Encode() - - // Test the granular action determination - result := determineGranularS3Action(req, tt.fallbackAction, tt.bucket, tt.objectKey) - - assert.Equal(t, tt.expected, result, - "Test %s failed: %s. Expected %s but got %s", - tt.name, tt.description, tt.expected, result) - }) - } -} - -// TestMapLegacyActionToIAM tests the legacy action fallback mapping -func TestMapLegacyActionToIAM(t *testing.T) { - tests := []struct { - name string - legacyAction Action - expected string - }{ - { - name: "read_action_fallback", - legacyAction: s3_constants.ACTION_READ, - expected: "s3:GetObject", - }, - { - name: "write_action_fallback", - legacyAction: s3_constants.ACTION_WRITE, - expected: "s3:PutObject", - }, - { - name: "admin_action_fallback", - legacyAction: s3_constants.ACTION_ADMIN, - expected: "s3:*", - }, - { - name: "granular_multipart_action", - legacyAction: s3_constants.ACTION_CREATE_MULTIPART_UPLOAD, - expected: "s3:CreateMultipartUpload", - }, - { - name: "unknown_action_with_s3_prefix", - legacyAction: "s3:CustomAction", - expected: "s3:CustomAction", - }, - { - name: "unknown_action_without_prefix", - legacyAction: "CustomAction", - expected: "s3:CustomAction", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := mapLegacyActionToIAM(tt.legacyAction) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestExtractSourceIP tests source IP extraction from requests -func TestExtractSourceIP(t *testing.T) { - tests := []struct { - name string - setupReq func() *http.Request - expectedIP string - }{ - { - name: "X-Forwarded-For header", - setupReq: func() *http.Request { - req := httptest.NewRequest("GET", "/test", http.NoBody) - req.Header.Set("X-Forwarded-For", "192.168.1.100, 10.0.0.1") - return req - }, - expectedIP: "192.168.1.100", - }, - { - name: "X-Real-IP header", - setupReq: func() *http.Request { - req := httptest.NewRequest("GET", "/test", http.NoBody) - req.Header.Set("X-Real-IP", "192.168.1.200") - return req - }, - expectedIP: "192.168.1.200", - }, - { - name: "RemoteAddr fallback", - setupReq: func() *http.Request { - req := httptest.NewRequest("GET", "/test", http.NoBody) - req.RemoteAddr = "192.168.1.300:12345" - return req - }, - expectedIP: "192.168.1.300", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := tt.setupReq() - result := extractSourceIP(req) - assert.Equal(t, tt.expectedIP, result) - }) - } -} - -// TestExtractRoleNameFromPrincipal tests role name extraction -func TestExtractRoleNameFromPrincipal(t *testing.T) { - tests := []struct { - name string - principal string - expected string - }{ - { - name: "valid assumed role ARN", - principal: "arn:seaweed:sts::assumed-role/S3ReadOnlyRole/session-123", - expected: "S3ReadOnlyRole", - }, - { - name: "invalid format", - principal: "invalid-principal", - expected: "", // Returns empty string to signal invalid format - }, - { - name: "missing session name", - principal: "arn:seaweed:sts::assumed-role/TestRole", - expected: "TestRole", // Extracts role name even without session name - }, - { - name: "empty principal", - principal: "", - expected: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := utils.ExtractRoleNameFromPrincipal(tt.principal) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestIAMIdentityIsAdmin tests the IsAdmin method -func TestIAMIdentityIsAdmin(t *testing.T) { - identity := &IAMIdentity{ - Name: "test-identity", - Principal: "arn:seaweed:sts::assumed-role/TestRole/session", - SessionToken: "test-token", - } - - // In our implementation, IsAdmin always returns false since admin status - // is determined by policies, not identity - result := identity.IsAdmin() - assert.False(t, result) -} diff --git a/weed/s3api/s3_jwt_auth_test.go b/weed/s3api/s3_jwt_auth_test.go deleted file mode 100644 index f6b2774d7..000000000 --- a/weed/s3api/s3_jwt_auth_test.go +++ /dev/null @@ -1,557 +0,0 @@ -package s3api - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/ldap" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createTestJWTAuth creates a test JWT token with the specified issuer, subject and signing key -func createTestJWTAuth(t *testing.T, issuer, subject, signingKey string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client-id", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - // Add claims that trust policy validation expects - "idp": "test-oidc", // Identity provider claim for trust policy matching - }) - - tokenString, err := token.SignedString([]byte(signingKey)) - require.NoError(t, err) - return tokenString -} - -// TestJWTAuthenticationFlow tests the JWT authentication flow without full S3 server -func TestJWTAuthenticationFlow(t *testing.T) { - // Set up IAM system - iamManager := setupTestIAMManager(t) - - // Create IAM integration - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - - // Create IAM server with integration - iamServer := setupIAMWithIntegration(t, iamManager, s3iam) - - // Test scenarios - tests := []struct { - name string - roleArn string - setupRole func(ctx context.Context, mgr *integration.IAMManager) - testOperations []JWTTestOperation - }{ - { - name: "Read-Only JWT Authentication", - roleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - setupRole: setupTestReadOnlyRole, - testOperations: []JWTTestOperation{ - {Action: s3_constants.ACTION_READ, Bucket: "test-bucket", Object: "test-file.txt", ExpectedAllow: true}, - {Action: s3_constants.ACTION_WRITE, Bucket: "test-bucket", Object: "new-file.txt", ExpectedAllow: false}, - {Action: s3_constants.ACTION_LIST, Bucket: "test-bucket", Object: "", ExpectedAllow: true}, - }, - }, - { - name: "Admin JWT Authentication", - roleArn: "arn:seaweed:iam::role/S3AdminRole", - setupRole: setupTestAdminRole, - testOperations: []JWTTestOperation{ - {Action: s3_constants.ACTION_READ, Bucket: "admin-bucket", Object: "admin-file.txt", ExpectedAllow: true}, - {Action: s3_constants.ACTION_WRITE, Bucket: "admin-bucket", Object: "new-admin-file.txt", ExpectedAllow: true}, - {Action: s3_constants.ACTION_DELETE_BUCKET, Bucket: "admin-bucket", Object: "", ExpectedAllow: true}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - // Set up role - tt.setupRole(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTAuth(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Assume role to get JWT - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: tt.roleArn, - WebIdentityToken: validJWTToken, - RoleSessionName: "jwt-auth-test", - }) - require.NoError(t, err) - - jwtToken := response.Credentials.SessionToken - - // Test each operation - for _, op := range tt.testOperations { - t.Run(string(op.Action), func(t *testing.T) { - // Test JWT authentication - identity, errCode := testJWTAuthentication(t, iamServer, jwtToken) - require.Equal(t, s3err.ErrNone, errCode, "JWT authentication should succeed") - require.NotNil(t, identity) - - // Test authorization with appropriate role based on test case - var testRoleName string - if tt.name == "Read-Only JWT Authentication" { - testRoleName = "TestReadRole" - } else { - testRoleName = "TestAdminRole" - } - allowed := testJWTAuthorizationWithRole(t, iamServer, identity, op.Action, op.Bucket, op.Object, jwtToken, testRoleName) - assert.Equal(t, op.ExpectedAllow, allowed, "Operation %s should have expected result", op.Action) - }) - } - }) - } -} - -// TestJWTTokenValidation tests JWT token validation edge cases -func TestJWTTokenValidation(t *testing.T) { - iamManager := setupTestIAMManager(t) - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - iamServer := setupIAMWithIntegration(t, iamManager, s3iam) - - tests := []struct { - name string - token string - expectedErr s3err.ErrorCode - }{ - { - name: "Empty token", - token: "", - expectedErr: s3err.ErrAccessDenied, - }, - { - name: "Invalid token format", - token: "invalid-token", - expectedErr: s3err.ErrAccessDenied, - }, - { - name: "Expired token", - token: "expired-session-token", - expectedErr: s3err.ErrAccessDenied, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - identity, errCode := testJWTAuthentication(t, iamServer, tt.token) - - assert.Equal(t, tt.expectedErr, errCode) - assert.Nil(t, identity) - }) - } -} - -// TestRequestContextExtraction tests context extraction for policy conditions -func TestRequestContextExtraction(t *testing.T) { - tests := []struct { - name string - setupRequest func() *http.Request - expectedIP string - expectedUA string - }{ - { - name: "Standard request with IP", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", http.NoBody) - req.Header.Set("X-Forwarded-For", "192.168.1.100") - req.Header.Set("User-Agent", "aws-sdk-go/1.0") - return req - }, - expectedIP: "192.168.1.100", - expectedUA: "aws-sdk-go/1.0", - }, - { - name: "Request with X-Real-IP", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", http.NoBody) - req.Header.Set("X-Real-IP", "10.0.0.1") - req.Header.Set("User-Agent", "boto3/1.0") - return req - }, - expectedIP: "10.0.0.1", - expectedUA: "boto3/1.0", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := tt.setupRequest() - - // Extract request context - context := extractRequestContext(req) - - if tt.expectedIP != "" { - assert.Equal(t, tt.expectedIP, context["sourceIP"]) - } - - if tt.expectedUA != "" { - assert.Equal(t, tt.expectedUA, context["userAgent"]) - } - }) - } -} - -// TestIPBasedPolicyEnforcement tests IP-based conditional policies -func TestIPBasedPolicyEnforcement(t *testing.T) { - iamManager := setupTestIAMManager(t) - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - ctx := context.Background() - - // Set up IP-restricted role - setupTestIPRestrictedRole(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTAuth(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Assume role - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3IPRestrictedRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "ip-test-session", - }) - require.NoError(t, err) - - tests := []struct { - name string - sourceIP string - shouldAllow bool - }{ - { - name: "Allow from office IP", - sourceIP: "192.168.1.100", - shouldAllow: true, - }, - { - name: "Block from external IP", - sourceIP: "8.8.8.8", - shouldAllow: false, - }, - { - name: "Allow from internal range", - sourceIP: "10.0.0.1", - shouldAllow: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create request with specific IP - req := httptest.NewRequest("GET", "/restricted-bucket/file.txt", http.NoBody) - req.Header.Set("Authorization", "Bearer "+response.Credentials.SessionToken) - req.Header.Set("X-Forwarded-For", tt.sourceIP) - - // Create IAM identity for testing - identity := &IAMIdentity{ - Name: "test-user", - Principal: response.AssumedRoleUser.Arn, - SessionToken: response.Credentials.SessionToken, - } - - // Test authorization with IP condition - errCode := s3iam.AuthorizeAction(ctx, identity, s3_constants.ACTION_READ, "restricted-bucket", "file.txt", req) - - if tt.shouldAllow { - assert.Equal(t, s3err.ErrNone, errCode, "Should allow access from IP %s", tt.sourceIP) - } else { - assert.Equal(t, s3err.ErrAccessDenied, errCode, "Should deny access from IP %s", tt.sourceIP) - } - }) - } -} - -// JWTTestOperation represents a test operation for JWT testing -type JWTTestOperation struct { - Action Action - Bucket string - Object string - ExpectedAllow bool -} - -// Helper functions - -func setupTestIAMManager(t *testing.T) *integration.IAMManager { - // Create IAM manager - manager := integration.NewIAMManager() - - // Initialize with test configuration - config := &integration.IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", - }, - } - - err := manager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Set up test identity providers - setupTestIdentityProviders(t, manager) - - return manager -} - -func setupTestIdentityProviders(t *testing.T, manager *integration.IAMManager) { - // Set up OIDC provider - oidcProvider := oidc.NewMockOIDCProvider("test-oidc") - oidcConfig := &oidc.OIDCConfig{ - Issuer: "https://test-issuer.com", - ClientID: "test-client-id", - } - err := oidcProvider.Initialize(oidcConfig) - require.NoError(t, err) - oidcProvider.SetupDefaultTestData() - - // Set up LDAP provider - ldapProvider := ldap.NewMockLDAPProvider("test-ldap") - err = ldapProvider.Initialize(nil) // Mock doesn't need real config - require.NoError(t, err) - ldapProvider.SetupDefaultTestData() - - // Register providers - err = manager.RegisterIdentityProvider(oidcProvider) - require.NoError(t, err) - err = manager.RegisterIdentityProvider(ldapProvider) - require.NoError(t, err) -} - -func setupIAMWithIntegration(t *testing.T, iamManager *integration.IAMManager, s3iam *S3IAMIntegration) *IdentityAccessManagement { - // Create a minimal IdentityAccessManagement for testing - iam := &IdentityAccessManagement{ - isAuthEnabled: true, - } - - // Set IAM integration - iam.SetIAMIntegration(s3iam) - - return iam -} - -func setupTestReadOnlyRole(ctx context.Context, manager *integration.IAMManager) { - // Create read-only policy - readPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3Read", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3ReadOnlyPolicy", readPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3ReadOnlyRole", &integration.RoleDefinition{ - RoleName: "S3ReadOnlyRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - }) - - // Also create a TestReadRole for read-only authorization testing - manager.CreateRole(ctx, "", "TestReadRole", &integration.RoleDefinition{ - RoleName: "TestReadRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - }) -} - -func setupTestAdminRole(ctx context.Context, manager *integration.IAMManager) { - // Create admin policy - adminPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowAllS3", - Effect: "Allow", - Action: []string{"s3:*"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "AllowSTSSessionValidation", - Effect: "Allow", - Action: []string{"sts:ValidateSession"}, - Resource: []string{"*"}, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3AdminPolicy", adminPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3AdminRole", &integration.RoleDefinition{ - RoleName: "S3AdminRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3AdminPolicy"}, - }) - - // Also create a TestAdminRole with admin policy for authorization testing - manager.CreateRole(ctx, "", "TestAdminRole", &integration.RoleDefinition{ - RoleName: "TestAdminRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3AdminPolicy"}, // Admin gets full access - }) -} - -func setupTestIPRestrictedRole(ctx context.Context, manager *integration.IAMManager) { - // Create IP-restricted policy - restrictedPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowFromOffice", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - Condition: map[string]map[string]interface{}{ - "IpAddress": { - "seaweed:SourceIP": []string{"192.168.1.0/24", "10.0.0.0/8"}, - }, - }, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3IPRestrictedPolicy", restrictedPolicy) - - // Create role - manager.CreateRole(ctx, "", "S3IPRestrictedRole", &integration.RoleDefinition{ - RoleName: "S3IPRestrictedRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3IPRestrictedPolicy"}, - }) -} - -func testJWTAuthentication(t *testing.T, iam *IdentityAccessManagement, token string) (*Identity, s3err.ErrorCode) { - // Create test request with JWT - req := httptest.NewRequest("GET", "/test-bucket/test-object", http.NoBody) - req.Header.Set("Authorization", "Bearer "+token) - - // Test authentication - if iam.iamIntegration == nil { - return nil, s3err.ErrNotImplemented - } - - return iam.authenticateJWTWithIAM(req) -} - -func testJWTAuthorization(t *testing.T, iam *IdentityAccessManagement, identity *Identity, action Action, bucket, object, token string) bool { - return testJWTAuthorizationWithRole(t, iam, identity, action, bucket, object, token, "TestRole") -} - -func testJWTAuthorizationWithRole(t *testing.T, iam *IdentityAccessManagement, identity *Identity, action Action, bucket, object, token, roleName string) bool { - // Create test request - req := httptest.NewRequest("GET", "/"+bucket+"/"+object, http.NoBody) - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("X-SeaweedFS-Session-Token", token) - - // Use a proper principal ARN format that matches what STS would generate - principalArn := "arn:seaweed:sts::assumed-role/" + roleName + "/test-session" - req.Header.Set("X-SeaweedFS-Principal", principalArn) - - // Test authorization - if iam.iamIntegration == nil { - return false - } - - errCode := iam.authorizeWithIAM(req, identity, action, bucket, object) - return errCode == s3err.ErrNone -} diff --git a/weed/s3api/s3_list_parts_action_test.go b/weed/s3api/s3_list_parts_action_test.go deleted file mode 100644 index c0e9aa8a1..000000000 --- a/weed/s3api/s3_list_parts_action_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package s3api - -import ( - "net/http" - "net/url" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" -) - -// TestListPartsActionMapping tests the fix for the missing s3:ListParts action mapping -// when GET requests include an uploadId query parameter -func TestListPartsActionMapping(t *testing.T) { - testCases := []struct { - name string - method string - bucket string - objectKey string - queryParams map[string]string - fallbackAction Action - expectedAction string - description string - }{ - { - name: "get_object_without_uploadId", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{}, - fallbackAction: s3_constants.ACTION_READ, - expectedAction: "s3:GetObject", - description: "GET request without uploadId should map to s3:GetObject", - }, - { - name: "get_object_with_uploadId", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"uploadId": "test-upload-id"}, - fallbackAction: s3_constants.ACTION_READ, - expectedAction: "s3:ListParts", - description: "GET request with uploadId should map to s3:ListParts (this was the missing mapping)", - }, - { - name: "get_object_with_uploadId_and_other_params", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{ - "uploadId": "test-upload-id-123", - "max-parts": "100", - "part-number-marker": "50", - }, - fallbackAction: s3_constants.ACTION_READ, - expectedAction: "s3:ListParts", - description: "GET request with uploadId plus other multipart params should map to s3:ListParts", - }, - { - name: "get_object_versions", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"versions": ""}, - fallbackAction: s3_constants.ACTION_READ, - expectedAction: "s3:GetObjectVersion", - description: "GET request with versions should still map to s3:GetObjectVersion (precedence check)", - }, - { - name: "get_object_acl_without_uploadId", - method: "GET", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"acl": ""}, - fallbackAction: s3_constants.ACTION_READ_ACP, - expectedAction: "s3:GetObjectAcl", - description: "GET request with acl should map to s3:GetObjectAcl (not affected by uploadId fix)", - }, - { - name: "post_multipart_upload_without_uploadId", - method: "POST", - bucket: "test-bucket", - objectKey: "test-object.txt", - queryParams: map[string]string{"uploads": ""}, - fallbackAction: s3_constants.ACTION_WRITE, - expectedAction: "s3:CreateMultipartUpload", - description: "POST request to initiate multipart upload should not be affected by uploadId fix", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create HTTP request with query parameters - req := &http.Request{ - Method: tc.method, - URL: &url.URL{Path: "/" + tc.bucket + "/" + tc.objectKey}, - } - - // Add query parameters - query := req.URL.Query() - for key, value := range tc.queryParams { - query.Set(key, value) - } - req.URL.RawQuery = query.Encode() - - // Call the granular action determination function - action := determineGranularS3Action(req, tc.fallbackAction, tc.bucket, tc.objectKey) - - // Verify the action mapping - assert.Equal(t, tc.expectedAction, action, - "Test case: %s - %s", tc.name, tc.description) - }) - } -} - -// TestListPartsActionMappingSecurityScenarios tests security scenarios for the ListParts fix -func TestListPartsActionMappingSecurityScenarios(t *testing.T) { - t.Run("privilege_separation_listparts_vs_getobject", func(t *testing.T) { - // Scenario: User has permission to list multipart upload parts but NOT to get the actual object content - // This is a common enterprise pattern where users can manage uploads but not read final objects - - // Test request 1: List parts with uploadId - req1 := &http.Request{ - Method: "GET", - URL: &url.URL{Path: "/secure-bucket/confidential-document.pdf"}, - } - query1 := req1.URL.Query() - query1.Set("uploadId", "active-upload-123") - req1.URL.RawQuery = query1.Encode() - action1 := determineGranularS3Action(req1, s3_constants.ACTION_READ, "secure-bucket", "confidential-document.pdf") - - // Test request 2: Get object without uploadId - req2 := &http.Request{ - Method: "GET", - URL: &url.URL{Path: "/secure-bucket/confidential-document.pdf"}, - } - action2 := determineGranularS3Action(req2, s3_constants.ACTION_READ, "secure-bucket", "confidential-document.pdf") - - // These should be different actions, allowing different permissions - assert.Equal(t, "s3:ListParts", action1, "Listing multipart parts should require s3:ListParts permission") - assert.Equal(t, "s3:GetObject", action2, "Reading object content should require s3:GetObject permission") - assert.NotEqual(t, action1, action2, "ListParts and GetObject should be separate permissions for security") - }) - - t.Run("policy_enforcement_precision", func(t *testing.T) { - // This test documents the security improvement - before the fix, both operations - // would incorrectly map to s3:GetObject, preventing fine-grained access control - - testCases := []struct { - description string - queryParams map[string]string - expectedAction string - securityNote string - }{ - { - description: "List multipart upload parts", - queryParams: map[string]string{"uploadId": "upload-abc123"}, - expectedAction: "s3:ListParts", - securityNote: "FIXED: Now correctly maps to s3:ListParts instead of s3:GetObject", - }, - { - description: "Get actual object content", - queryParams: map[string]string{}, - expectedAction: "s3:GetObject", - securityNote: "UNCHANGED: Still correctly maps to s3:GetObject", - }, - { - description: "Get object with complex upload ID", - queryParams: map[string]string{"uploadId": "complex-upload-id-with-hyphens-123-abc-def"}, - expectedAction: "s3:ListParts", - securityNote: "FIXED: Complex upload IDs now correctly detected", - }, - } - - for _, tc := range testCases { - req := &http.Request{ - Method: "GET", - URL: &url.URL{Path: "/test-bucket/test-object"}, - } - - query := req.URL.Query() - for key, value := range tc.queryParams { - query.Set(key, value) - } - req.URL.RawQuery = query.Encode() - - action := determineGranularS3Action(req, s3_constants.ACTION_READ, "test-bucket", "test-object") - - assert.Equal(t, tc.expectedAction, action, - "%s - %s", tc.description, tc.securityNote) - } - }) -} - -// TestListPartsActionRealWorldScenarios tests realistic enterprise multipart upload scenarios -func TestListPartsActionRealWorldScenarios(t *testing.T) { - t.Run("large_file_upload_workflow", func(t *testing.T) { - // Simulate a large file upload workflow where users need different permissions for each step - - // Step 1: Initiate multipart upload (POST with uploads query) - req1 := &http.Request{ - Method: "POST", - URL: &url.URL{Path: "/data/large-dataset.csv"}, - } - query1 := req1.URL.Query() - query1.Set("uploads", "") - req1.URL.RawQuery = query1.Encode() - action1 := determineGranularS3Action(req1, s3_constants.ACTION_WRITE, "data", "large-dataset.csv") - - // Step 2: List existing parts (GET with uploadId query) - THIS WAS THE MISSING MAPPING - req2 := &http.Request{ - Method: "GET", - URL: &url.URL{Path: "/data/large-dataset.csv"}, - } - query2 := req2.URL.Query() - query2.Set("uploadId", "dataset-upload-20240827-001") - req2.URL.RawQuery = query2.Encode() - action2 := determineGranularS3Action(req2, s3_constants.ACTION_READ, "data", "large-dataset.csv") - - // Step 3: Upload a part (PUT with uploadId and partNumber) - req3 := &http.Request{ - Method: "PUT", - URL: &url.URL{Path: "/data/large-dataset.csv"}, - } - query3 := req3.URL.Query() - query3.Set("uploadId", "dataset-upload-20240827-001") - query3.Set("partNumber", "5") - req3.URL.RawQuery = query3.Encode() - action3 := determineGranularS3Action(req3, s3_constants.ACTION_WRITE, "data", "large-dataset.csv") - - // Step 4: Complete multipart upload (POST with uploadId) - req4 := &http.Request{ - Method: "POST", - URL: &url.URL{Path: "/data/large-dataset.csv"}, - } - query4 := req4.URL.Query() - query4.Set("uploadId", "dataset-upload-20240827-001") - req4.URL.RawQuery = query4.Encode() - action4 := determineGranularS3Action(req4, s3_constants.ACTION_WRITE, "data", "large-dataset.csv") - - // Verify each step has the correct action mapping - assert.Equal(t, "s3:CreateMultipartUpload", action1, "Step 1: Initiate upload") - assert.Equal(t, "s3:ListParts", action2, "Step 2: List parts (FIXED by this PR)") - assert.Equal(t, "s3:UploadPart", action3, "Step 3: Upload part") - assert.Equal(t, "s3:CompleteMultipartUpload", action4, "Step 4: Complete upload") - - // Verify that each step requires different permissions (security principle) - actions := []string{action1, action2, action3, action4} - for i, action := range actions { - for j, otherAction := range actions { - if i != j { - assert.NotEqual(t, action, otherAction, - "Each multipart operation step should require different permissions for fine-grained control") - } - } - } - }) - - t.Run("edge_case_upload_ids", func(t *testing.T) { - // Test various upload ID formats to ensure the fix works with real AWS-compatible upload IDs - - testUploadIds := []string{ - "simple123", - "complex-upload-id-with-hyphens", - "upload_with_underscores_123", - "2VmVGvGhqM0sXnVeBjMNCqtRvr.ygGz0pWPLKAj.YW3zK7VmpFHYuLKVR8OOXnHEhP3WfwlwLKMYJxoHgkGYYv", - "very-long-upload-id-that-might-be-generated-by-aws-s3-or-compatible-services-abcd1234", - "uploadId-with.dots.and-dashes_and_underscores123", - } - - for _, uploadId := range testUploadIds { - req := &http.Request{ - Method: "GET", - URL: &url.URL{Path: "/test-bucket/test-file.bin"}, - } - query := req.URL.Query() - query.Set("uploadId", uploadId) - req.URL.RawQuery = query.Encode() - - action := determineGranularS3Action(req, s3_constants.ACTION_READ, "test-bucket", "test-file.bin") - - assert.Equal(t, "s3:ListParts", action, - "Upload ID format %s should be correctly detected and mapped to s3:ListParts", uploadId) - } - }) -} diff --git a/weed/s3api/s3_multipart_iam.go b/weed/s3api/s3_multipart_iam.go deleted file mode 100644 index a9d6c7ccf..000000000 --- a/weed/s3api/s3_multipart_iam.go +++ /dev/null @@ -1,420 +0,0 @@ -package s3api - -import ( - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// S3MultipartIAMManager handles IAM integration for multipart upload operations -type S3MultipartIAMManager struct { - s3iam *S3IAMIntegration -} - -// NewS3MultipartIAMManager creates a new multipart IAM manager -func NewS3MultipartIAMManager(s3iam *S3IAMIntegration) *S3MultipartIAMManager { - return &S3MultipartIAMManager{ - s3iam: s3iam, - } -} - -// MultipartUploadRequest represents a multipart upload request -type MultipartUploadRequest struct { - Bucket string `json:"bucket"` // S3 bucket name - ObjectKey string `json:"object_key"` // S3 object key - UploadID string `json:"upload_id"` // Multipart upload ID - PartNumber int `json:"part_number"` // Part number for upload part - Operation string `json:"operation"` // Multipart operation type - SessionToken string `json:"session_token"` // JWT session token - Headers map[string]string `json:"headers"` // Request headers - ContentSize int64 `json:"content_size"` // Content size for validation -} - -// MultipartUploadPolicy represents security policies for multipart uploads -type MultipartUploadPolicy struct { - MaxPartSize int64 `json:"max_part_size"` // Maximum part size (5GB AWS limit) - MinPartSize int64 `json:"min_part_size"` // Minimum part size (5MB AWS limit, except last part) - MaxParts int `json:"max_parts"` // Maximum number of parts (10,000 AWS limit) - MaxUploadDuration time.Duration `json:"max_upload_duration"` // Maximum time to complete multipart upload - AllowedContentTypes []string `json:"allowed_content_types"` // Allowed content types - RequiredHeaders []string `json:"required_headers"` // Required headers for validation - IPWhitelist []string `json:"ip_whitelist"` // Allowed IP addresses/ranges -} - -// MultipartOperation represents different multipart upload operations -type MultipartOperation string - -const ( - MultipartOpInitiate MultipartOperation = "initiate" - MultipartOpUploadPart MultipartOperation = "upload_part" - MultipartOpComplete MultipartOperation = "complete" - MultipartOpAbort MultipartOperation = "abort" - MultipartOpList MultipartOperation = "list" - MultipartOpListParts MultipartOperation = "list_parts" -) - -// ValidateMultipartOperationWithIAM validates multipart operations using IAM policies -func (iam *IdentityAccessManagement) ValidateMultipartOperationWithIAM(r *http.Request, identity *Identity, operation MultipartOperation) s3err.ErrorCode { - if iam.iamIntegration == nil { - // Fall back to standard validation - return s3err.ErrNone - } - - // Extract bucket and object from request - bucket, object := s3_constants.GetBucketAndObject(r) - - // Determine the S3 action based on multipart operation - action := determineMultipartS3Action(operation) - - // Extract session token from request - sessionToken := extractSessionTokenFromRequest(r) - if sessionToken == "" { - // No session token - use standard auth - return s3err.ErrNone - } - - // Retrieve the actual principal ARN from the request header - // This header is set during initial authentication and contains the correct assumed role ARN - principalArn := r.Header.Get("X-SeaweedFS-Principal") - if principalArn == "" { - glog.V(0).Info("IAM authorization for multipart operation failed: missing principal ARN in request header") - return s3err.ErrAccessDenied - } - - // Create IAM identity for authorization - iamIdentity := &IAMIdentity{ - Name: identity.Name, - Principal: principalArn, - SessionToken: sessionToken, - Account: identity.Account, - } - - // Authorize using IAM - ctx := r.Context() - errCode := iam.iamIntegration.AuthorizeAction(ctx, iamIdentity, action, bucket, object, r) - if errCode != s3err.ErrNone { - glog.V(3).Infof("IAM authorization failed for multipart operation: principal=%s operation=%s action=%s bucket=%s object=%s", - iamIdentity.Principal, operation, action, bucket, object) - return errCode - } - - glog.V(3).Infof("IAM authorization succeeded for multipart operation: principal=%s operation=%s action=%s bucket=%s object=%s", - iamIdentity.Principal, operation, action, bucket, object) - return s3err.ErrNone -} - -// ValidateMultipartRequestWithPolicy validates multipart request against security policy -func (policy *MultipartUploadPolicy) ValidateMultipartRequestWithPolicy(req *MultipartUploadRequest) error { - if req == nil { - return fmt.Errorf("multipart request cannot be nil") - } - - // Validate part size for upload part operations - if req.Operation == string(MultipartOpUploadPart) { - if req.ContentSize > policy.MaxPartSize { - return fmt.Errorf("part size %d exceeds maximum allowed %d", req.ContentSize, policy.MaxPartSize) - } - - // Minimum part size validation (except for last part) - // Note: Last part validation would require knowing if this is the final part - if req.ContentSize < policy.MinPartSize && req.ContentSize > 0 { - glog.V(2).Infof("Part size %d is below minimum %d - assuming last part", req.ContentSize, policy.MinPartSize) - } - - // Validate part number - if req.PartNumber < 1 || req.PartNumber > policy.MaxParts { - return fmt.Errorf("part number %d is invalid (must be 1-%d)", req.PartNumber, policy.MaxParts) - } - } - - // Validate required headers first - if req.Headers != nil { - for _, requiredHeader := range policy.RequiredHeaders { - if _, exists := req.Headers[requiredHeader]; !exists { - // Check lowercase version - if _, exists := req.Headers[strings.ToLower(requiredHeader)]; !exists { - return fmt.Errorf("required header %s is missing", requiredHeader) - } - } - } - } - - // Validate content type if specified - if len(policy.AllowedContentTypes) > 0 && req.Headers != nil { - contentType := req.Headers["Content-Type"] - if contentType == "" { - contentType = req.Headers["content-type"] - } - - allowed := false - for _, allowedType := range policy.AllowedContentTypes { - if contentType == allowedType { - allowed = true - break - } - } - - if !allowed { - return fmt.Errorf("content type %s is not allowed", contentType) - } - } - - return nil -} - -// Enhanced multipart handlers with IAM integration - -// NewMultipartUploadWithIAM handles initiate multipart upload with IAM validation -func (s3a *S3ApiServer) NewMultipartUploadWithIAM(w http.ResponseWriter, r *http.Request) { - // Validate IAM permissions first - if s3a.iam.iamIntegration != nil { - if identity, errCode := s3a.iam.authRequest(r, s3_constants.ACTION_WRITE); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } else { - // Additional multipart-specific IAM validation - if errCode := s3a.iam.ValidateMultipartOperationWithIAM(r, identity, MultipartOpInitiate); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - } - } - - // Delegate to existing handler - s3a.NewMultipartUploadHandler(w, r) -} - -// CompleteMultipartUploadWithIAM handles complete multipart upload with IAM validation -func (s3a *S3ApiServer) CompleteMultipartUploadWithIAM(w http.ResponseWriter, r *http.Request) { - // Validate IAM permissions first - if s3a.iam.iamIntegration != nil { - if identity, errCode := s3a.iam.authRequest(r, s3_constants.ACTION_WRITE); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } else { - // Additional multipart-specific IAM validation - if errCode := s3a.iam.ValidateMultipartOperationWithIAM(r, identity, MultipartOpComplete); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - } - } - - // Delegate to existing handler - s3a.CompleteMultipartUploadHandler(w, r) -} - -// AbortMultipartUploadWithIAM handles abort multipart upload with IAM validation -func (s3a *S3ApiServer) AbortMultipartUploadWithIAM(w http.ResponseWriter, r *http.Request) { - // Validate IAM permissions first - if s3a.iam.iamIntegration != nil { - if identity, errCode := s3a.iam.authRequest(r, s3_constants.ACTION_WRITE); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } else { - // Additional multipart-specific IAM validation - if errCode := s3a.iam.ValidateMultipartOperationWithIAM(r, identity, MultipartOpAbort); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - } - } - - // Delegate to existing handler - s3a.AbortMultipartUploadHandler(w, r) -} - -// ListMultipartUploadsWithIAM handles list multipart uploads with IAM validation -func (s3a *S3ApiServer) ListMultipartUploadsWithIAM(w http.ResponseWriter, r *http.Request) { - // Validate IAM permissions first - if s3a.iam.iamIntegration != nil { - if identity, errCode := s3a.iam.authRequest(r, s3_constants.ACTION_LIST); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } else { - // Additional multipart-specific IAM validation - if errCode := s3a.iam.ValidateMultipartOperationWithIAM(r, identity, MultipartOpList); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - } - } - - // Delegate to existing handler - s3a.ListMultipartUploadsHandler(w, r) -} - -// UploadPartWithIAM handles upload part with IAM validation -func (s3a *S3ApiServer) UploadPartWithIAM(w http.ResponseWriter, r *http.Request) { - // Validate IAM permissions first - if s3a.iam.iamIntegration != nil { - if identity, errCode := s3a.iam.authRequest(r, s3_constants.ACTION_WRITE); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } else { - // Additional multipart-specific IAM validation - if errCode := s3a.iam.ValidateMultipartOperationWithIAM(r, identity, MultipartOpUploadPart); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Validate part size and other policies - if err := s3a.validateUploadPartRequest(r); err != nil { - glog.Errorf("Upload part validation failed: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - } - } - - // Delegate to existing object PUT handler (which handles upload part) - s3a.PutObjectHandler(w, r) -} - -// Helper functions - -// determineMultipartS3Action maps multipart operations to granular S3 actions -// This enables fine-grained IAM policies for multipart upload operations -func determineMultipartS3Action(operation MultipartOperation) Action { - switch operation { - case MultipartOpInitiate: - return s3_constants.ACTION_CREATE_MULTIPART_UPLOAD - case MultipartOpUploadPart: - return s3_constants.ACTION_UPLOAD_PART - case MultipartOpComplete: - return s3_constants.ACTION_COMPLETE_MULTIPART - case MultipartOpAbort: - return s3_constants.ACTION_ABORT_MULTIPART - case MultipartOpList: - return s3_constants.ACTION_LIST_MULTIPART_UPLOADS - case MultipartOpListParts: - return s3_constants.ACTION_LIST_PARTS - default: - // Fail closed for unmapped operations to prevent unintended access - glog.Errorf("unmapped multipart operation: %s", operation) - return "s3:InternalErrorUnknownMultipartAction" // Non-existent action ensures denial - } -} - -// extractSessionTokenFromRequest extracts session token from various request sources -func extractSessionTokenFromRequest(r *http.Request) string { - // Check Authorization header for Bearer token - if authHeader := r.Header.Get("Authorization"); authHeader != "" { - if strings.HasPrefix(authHeader, "Bearer ") { - return strings.TrimPrefix(authHeader, "Bearer ") - } - } - - // Check X-Amz-Security-Token header - if token := r.Header.Get("X-Amz-Security-Token"); token != "" { - return token - } - - // Check query parameters for presigned URL tokens - if token := r.URL.Query().Get("X-Amz-Security-Token"); token != "" { - return token - } - - return "" -} - -// validateUploadPartRequest validates upload part request against policies -func (s3a *S3ApiServer) validateUploadPartRequest(r *http.Request) error { - // Get default multipart policy - policy := DefaultMultipartUploadPolicy() - - // Extract part number from query - partNumberStr := r.URL.Query().Get("partNumber") - if partNumberStr == "" { - return fmt.Errorf("missing partNumber parameter") - } - - partNumber, err := strconv.Atoi(partNumberStr) - if err != nil { - return fmt.Errorf("invalid partNumber: %v", err) - } - - // Get content length - contentLength := r.ContentLength - if contentLength < 0 { - contentLength = 0 - } - - // Create multipart request for validation - bucket, object := s3_constants.GetBucketAndObject(r) - multipartReq := &MultipartUploadRequest{ - Bucket: bucket, - ObjectKey: object, - PartNumber: partNumber, - Operation: string(MultipartOpUploadPart), - ContentSize: contentLength, - Headers: make(map[string]string), - } - - // Copy relevant headers - for key, values := range r.Header { - if len(values) > 0 { - multipartReq.Headers[key] = values[0] - } - } - - // Validate against policy - return policy.ValidateMultipartRequestWithPolicy(multipartReq) -} - -// DefaultMultipartUploadPolicy returns a default multipart upload security policy -func DefaultMultipartUploadPolicy() *MultipartUploadPolicy { - return &MultipartUploadPolicy{ - MaxPartSize: 5 * 1024 * 1024 * 1024, // 5GB AWS limit - MinPartSize: 5 * 1024 * 1024, // 5MB AWS minimum (except last part) - MaxParts: 10000, // AWS limit - MaxUploadDuration: 7 * 24 * time.Hour, // 7 days to complete upload - AllowedContentTypes: []string{}, // Empty means all types allowed - RequiredHeaders: []string{}, // No required headers by default - IPWhitelist: []string{}, // Empty means no IP restrictions - } -} - -// MultipartUploadSession represents an ongoing multipart upload session -type MultipartUploadSession struct { - UploadID string `json:"upload_id"` - Bucket string `json:"bucket"` - ObjectKey string `json:"object_key"` - Initiator string `json:"initiator"` // User who initiated the upload - Owner string `json:"owner"` // Object owner - CreatedAt time.Time `json:"created_at"` // When upload was initiated - Parts []MultipartUploadPart `json:"parts"` // Uploaded parts - Metadata map[string]string `json:"metadata"` // Object metadata - Policy *MultipartUploadPolicy `json:"policy"` // Applied security policy - SessionToken string `json:"session_token"` // IAM session token -} - -// MultipartUploadPart represents an uploaded part -type MultipartUploadPart struct { - PartNumber int `json:"part_number"` - Size int64 `json:"size"` - ETag string `json:"etag"` - LastModified time.Time `json:"last_modified"` - Checksum string `json:"checksum"` // Optional integrity checksum -} - -// GetMultipartUploadSessions retrieves active multipart upload sessions for a bucket -func (s3a *S3ApiServer) GetMultipartUploadSessions(bucket string) ([]*MultipartUploadSession, error) { - // This would typically query the filer for active multipart uploads - // For now, return empty list as this is a placeholder for the full implementation - return []*MultipartUploadSession{}, nil -} - -// CleanupExpiredMultipartUploads removes expired multipart upload sessions -func (s3a *S3ApiServer) CleanupExpiredMultipartUploads(maxAge time.Duration) error { - // This would typically scan for and remove expired multipart uploads - // Implementation would depend on how multipart sessions are stored in the filer - glog.V(2).Infof("Cleanup expired multipart uploads older than %v", maxAge) - return nil -} diff --git a/weed/s3api/s3_multipart_iam_test.go b/weed/s3api/s3_multipart_iam_test.go deleted file mode 100644 index 2aa68fda0..000000000 --- a/weed/s3api/s3_multipart_iam_test.go +++ /dev/null @@ -1,614 +0,0 @@ -package s3api - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/ldap" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createTestJWTMultipart creates a test JWT token with the specified issuer, subject and signing key -func createTestJWTMultipart(t *testing.T, issuer, subject, signingKey string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client-id", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - // Add claims that trust policy validation expects - "idp": "test-oidc", // Identity provider claim for trust policy matching - }) - - tokenString, err := token.SignedString([]byte(signingKey)) - require.NoError(t, err) - return tokenString -} - -// TestMultipartIAMValidation tests IAM validation for multipart operations -func TestMultipartIAMValidation(t *testing.T) { - // Set up IAM system - iamManager := setupTestIAMManagerForMultipart(t) - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - s3iam.enabled = true - - // Create IAM with integration - iam := &IdentityAccessManagement{ - isAuthEnabled: true, - } - iam.SetIAMIntegration(s3iam) - - // Set up roles - ctx := context.Background() - setupTestRolesForMultipart(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTMultipart(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Get session token - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3WriteRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "multipart-test-session", - }) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - - tests := []struct { - name string - operation MultipartOperation - method string - path string - sessionToken string - expectedResult s3err.ErrorCode - }{ - { - name: "Initiate multipart upload", - operation: MultipartOpInitiate, - method: "POST", - path: "/test-bucket/test-file.txt?uploads", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "Upload part", - operation: MultipartOpUploadPart, - method: "PUT", - path: "/test-bucket/test-file.txt?partNumber=1&uploadId=test-upload-id", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "Complete multipart upload", - operation: MultipartOpComplete, - method: "POST", - path: "/test-bucket/test-file.txt?uploadId=test-upload-id", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "Abort multipart upload", - operation: MultipartOpAbort, - method: "DELETE", - path: "/test-bucket/test-file.txt?uploadId=test-upload-id", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "List multipart uploads", - operation: MultipartOpList, - method: "GET", - path: "/test-bucket?uploads", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "Upload part without session token", - operation: MultipartOpUploadPart, - method: "PUT", - path: "/test-bucket/test-file.txt?partNumber=1&uploadId=test-upload-id", - sessionToken: "", - expectedResult: s3err.ErrNone, // Falls back to standard auth - }, - { - name: "Upload part with invalid session token", - operation: MultipartOpUploadPart, - method: "PUT", - path: "/test-bucket/test-file.txt?partNumber=1&uploadId=test-upload-id", - sessionToken: "invalid-token", - expectedResult: s3err.ErrAccessDenied, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create request for multipart operation - req := createMultipartRequest(t, tt.method, tt.path, tt.sessionToken) - - // Create identity for testing - identity := &Identity{ - Name: "test-user", - Account: &AccountAdmin, - } - - // Test validation - result := iam.ValidateMultipartOperationWithIAM(req, identity, tt.operation) - assert.Equal(t, tt.expectedResult, result, "Multipart IAM validation result should match expected") - }) - } -} - -// TestMultipartUploadPolicy tests multipart upload security policies -func TestMultipartUploadPolicy(t *testing.T) { - policy := &MultipartUploadPolicy{ - MaxPartSize: 10 * 1024 * 1024, // 10MB for testing - MinPartSize: 5 * 1024 * 1024, // 5MB minimum - MaxParts: 100, // 100 parts max for testing - AllowedContentTypes: []string{"application/json", "text/plain"}, - RequiredHeaders: []string{"Content-Type"}, - } - - tests := []struct { - name string - request *MultipartUploadRequest - expectedError string - }{ - { - name: "Valid upload part request", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 1, - Operation: string(MultipartOpUploadPart), - ContentSize: 8 * 1024 * 1024, // 8MB - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }, - expectedError: "", - }, - { - name: "Part size too large", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 1, - Operation: string(MultipartOpUploadPart), - ContentSize: 15 * 1024 * 1024, // 15MB exceeds limit - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }, - expectedError: "part size", - }, - { - name: "Invalid part number (too high)", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 150, // Exceeds max parts - Operation: string(MultipartOpUploadPart), - ContentSize: 8 * 1024 * 1024, - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }, - expectedError: "part number", - }, - { - name: "Invalid part number (too low)", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 0, // Must be >= 1 - Operation: string(MultipartOpUploadPart), - ContentSize: 8 * 1024 * 1024, - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }, - expectedError: "part number", - }, - { - name: "Content type not allowed", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 1, - Operation: string(MultipartOpUploadPart), - ContentSize: 8 * 1024 * 1024, - Headers: map[string]string{ - "Content-Type": "video/mp4", // Not in allowed list - }, - }, - expectedError: "content type video/mp4 is not allowed", - }, - { - name: "Missing required header", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - PartNumber: 1, - Operation: string(MultipartOpUploadPart), - ContentSize: 8 * 1024 * 1024, - Headers: map[string]string{}, // Missing Content-Type - }, - expectedError: "required header Content-Type is missing", - }, - { - name: "Non-upload operation (should not validate size)", - request: &MultipartUploadRequest{ - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Operation: string(MultipartOpInitiate), - Headers: map[string]string{ - "Content-Type": "application/json", - }, - }, - expectedError: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := policy.ValidateMultipartRequestWithPolicy(tt.request) - - if tt.expectedError == "" { - assert.NoError(t, err, "Policy validation should succeed") - } else { - assert.Error(t, err, "Policy validation should fail") - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - }) - } -} - -// TestMultipartS3ActionMapping tests the mapping of multipart operations to S3 actions -func TestMultipartS3ActionMapping(t *testing.T) { - tests := []struct { - operation MultipartOperation - expectedAction Action - }{ - {MultipartOpInitiate, s3_constants.ACTION_CREATE_MULTIPART_UPLOAD}, - {MultipartOpUploadPart, s3_constants.ACTION_UPLOAD_PART}, - {MultipartOpComplete, s3_constants.ACTION_COMPLETE_MULTIPART}, - {MultipartOpAbort, s3_constants.ACTION_ABORT_MULTIPART}, - {MultipartOpList, s3_constants.ACTION_LIST_MULTIPART_UPLOADS}, - {MultipartOpListParts, s3_constants.ACTION_LIST_PARTS}, - {MultipartOperation("unknown"), "s3:InternalErrorUnknownMultipartAction"}, // Fail-closed for security - } - - for _, tt := range tests { - t.Run(string(tt.operation), func(t *testing.T) { - action := determineMultipartS3Action(tt.operation) - assert.Equal(t, tt.expectedAction, action, "S3 action mapping should match expected") - }) - } -} - -// TestSessionTokenExtraction tests session token extraction from various sources -func TestSessionTokenExtraction(t *testing.T) { - tests := []struct { - name string - setupRequest func() *http.Request - expectedToken string - }{ - { - name: "Bearer token in Authorization header", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt", nil) - req.Header.Set("Authorization", "Bearer test-session-token-123") - return req - }, - expectedToken: "test-session-token-123", - }, - { - name: "X-Amz-Security-Token header", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt", nil) - req.Header.Set("X-Amz-Security-Token", "security-token-456") - return req - }, - expectedToken: "security-token-456", - }, - { - name: "X-Amz-Security-Token query parameter", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt?X-Amz-Security-Token=query-token-789", nil) - return req - }, - expectedToken: "query-token-789", - }, - { - name: "No token present", - setupRequest: func() *http.Request { - return httptest.NewRequest("PUT", "/test-bucket/test-file.txt", nil) - }, - expectedToken: "", - }, - { - name: "Authorization header without Bearer", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt", nil) - req.Header.Set("Authorization", "AWS access_key:signature") - return req - }, - expectedToken: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := tt.setupRequest() - token := extractSessionTokenFromRequest(req) - assert.Equal(t, tt.expectedToken, token, "Extracted token should match expected") - }) - } -} - -// TestUploadPartValidation tests upload part request validation -func TestUploadPartValidation(t *testing.T) { - s3Server := &S3ApiServer{} - - tests := []struct { - name string - setupRequest func() *http.Request - expectedError string - }{ - { - name: "Valid upload part request", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt?partNumber=1&uploadId=test-123", nil) - req.Header.Set("Content-Type", "application/octet-stream") - req.ContentLength = 6 * 1024 * 1024 // 6MB - return req - }, - expectedError: "", - }, - { - name: "Missing partNumber parameter", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt?uploadId=test-123", nil) - req.Header.Set("Content-Type", "application/octet-stream") - req.ContentLength = 6 * 1024 * 1024 - return req - }, - expectedError: "missing partNumber parameter", - }, - { - name: "Invalid partNumber format", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt?partNumber=abc&uploadId=test-123", nil) - req.Header.Set("Content-Type", "application/octet-stream") - req.ContentLength = 6 * 1024 * 1024 - return req - }, - expectedError: "invalid partNumber", - }, - { - name: "Part size too large", - setupRequest: func() *http.Request { - req := httptest.NewRequest("PUT", "/test-bucket/test-file.txt?partNumber=1&uploadId=test-123", nil) - req.Header.Set("Content-Type", "application/octet-stream") - req.ContentLength = 6 * 1024 * 1024 * 1024 // 6GB exceeds 5GB limit - return req - }, - expectedError: "part size", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := tt.setupRequest() - err := s3Server.validateUploadPartRequest(req) - - if tt.expectedError == "" { - assert.NoError(t, err, "Upload part validation should succeed") - } else { - assert.Error(t, err, "Upload part validation should fail") - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - }) - } -} - -// TestDefaultMultipartUploadPolicy tests the default policy configuration -func TestDefaultMultipartUploadPolicy(t *testing.T) { - policy := DefaultMultipartUploadPolicy() - - assert.Equal(t, int64(5*1024*1024*1024), policy.MaxPartSize, "Max part size should be 5GB") - assert.Equal(t, int64(5*1024*1024), policy.MinPartSize, "Min part size should be 5MB") - assert.Equal(t, 10000, policy.MaxParts, "Max parts should be 10,000") - assert.Equal(t, 7*24*time.Hour, policy.MaxUploadDuration, "Max upload duration should be 7 days") - assert.Empty(t, policy.AllowedContentTypes, "Should allow all content types by default") - assert.Empty(t, policy.RequiredHeaders, "Should have no required headers by default") - assert.Empty(t, policy.IPWhitelist, "Should have no IP restrictions by default") -} - -// TestMultipartUploadSession tests multipart upload session structure -func TestMultipartUploadSession(t *testing.T) { - session := &MultipartUploadSession{ - UploadID: "test-upload-123", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Initiator: "arn:seaweed:iam::user/testuser", - Owner: "arn:seaweed:iam::user/testuser", - CreatedAt: time.Now(), - Parts: []MultipartUploadPart{ - { - PartNumber: 1, - Size: 5 * 1024 * 1024, - ETag: "abc123", - LastModified: time.Now(), - Checksum: "sha256:def456", - }, - }, - Metadata: map[string]string{ - "Content-Type": "application/octet-stream", - "x-amz-meta-custom": "value", - }, - Policy: DefaultMultipartUploadPolicy(), - SessionToken: "session-token-789", - } - - assert.NotEmpty(t, session.UploadID, "Upload ID should not be empty") - assert.NotEmpty(t, session.Bucket, "Bucket should not be empty") - assert.NotEmpty(t, session.ObjectKey, "Object key should not be empty") - assert.Len(t, session.Parts, 1, "Should have one part") - assert.Equal(t, 1, session.Parts[0].PartNumber, "Part number should be 1") - assert.NotNil(t, session.Policy, "Policy should not be nil") -} - -// Helper functions for tests - -func setupTestIAMManagerForMultipart(t *testing.T) *integration.IAMManager { - // Create IAM manager - manager := integration.NewIAMManager() - - // Initialize with test configuration - config := &integration.IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", - }, - } - - err := manager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Set up test identity providers - setupTestProvidersForMultipart(t, manager) - - return manager -} - -func setupTestProvidersForMultipart(t *testing.T, manager *integration.IAMManager) { - // Set up OIDC provider - oidcProvider := oidc.NewMockOIDCProvider("test-oidc") - oidcConfig := &oidc.OIDCConfig{ - Issuer: "https://test-issuer.com", - ClientID: "test-client-id", - } - err := oidcProvider.Initialize(oidcConfig) - require.NoError(t, err) - oidcProvider.SetupDefaultTestData() - - // Set up LDAP provider - ldapProvider := ldap.NewMockLDAPProvider("test-ldap") - err = ldapProvider.Initialize(nil) // Mock doesn't need real config - require.NoError(t, err) - ldapProvider.SetupDefaultTestData() - - // Register providers - err = manager.RegisterIdentityProvider(oidcProvider) - require.NoError(t, err) - err = manager.RegisterIdentityProvider(ldapProvider) - require.NoError(t, err) -} - -func setupTestRolesForMultipart(ctx context.Context, manager *integration.IAMManager) { - // Create write policy for multipart operations - writePolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3MultipartOperations", - Effect: "Allow", - Action: []string{ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3WritePolicy", writePolicy) - - // Create write role - manager.CreateRole(ctx, "", "S3WriteRole", &integration.RoleDefinition{ - RoleName: "S3WriteRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3WritePolicy"}, - }) - - // Create a role for multipart users - manager.CreateRole(ctx, "", "MultipartUser", &integration.RoleDefinition{ - RoleName: "MultipartUser", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3WritePolicy"}, - }) -} - -func createMultipartRequest(t *testing.T, method, path, sessionToken string) *http.Request { - req := httptest.NewRequest(method, path, nil) - - // Add session token if provided - if sessionToken != "" { - req.Header.Set("Authorization", "Bearer "+sessionToken) - // Set the principal ARN header that matches the assumed role from the test setup - // This corresponds to the role "arn:seaweed:iam::role/S3WriteRole" with session name "multipart-test-session" - req.Header.Set("X-SeaweedFS-Principal", "arn:seaweed:sts::assumed-role/S3WriteRole/multipart-test-session") - } - - // Add common headers - req.Header.Set("Content-Type", "application/octet-stream") - - return req -} diff --git a/weed/s3api/s3_policy_templates.go b/weed/s3api/s3_policy_templates.go deleted file mode 100644 index 811872aee..000000000 --- a/weed/s3api/s3_policy_templates.go +++ /dev/null @@ -1,618 +0,0 @@ -package s3api - -import ( - "time" - - "github.com/seaweedfs/seaweedfs/weed/iam/policy" -) - -// S3PolicyTemplates provides pre-built IAM policy templates for common S3 use cases -type S3PolicyTemplates struct{} - -// NewS3PolicyTemplates creates a new policy templates provider -func NewS3PolicyTemplates() *S3PolicyTemplates { - return &S3PolicyTemplates{} -} - -// GetS3ReadOnlyPolicy returns a policy that allows read-only access to all S3 resources -func (t *S3PolicyTemplates) GetS3ReadOnlyPolicy() *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "S3ReadOnlyAccess", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:GetObjectVersion", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - "s3:GetBucketVersioning", - "s3:ListAllMyBuckets", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } -} - -// GetS3WriteOnlyPolicy returns a policy that allows write-only access to all S3 resources -func (t *S3PolicyTemplates) GetS3WriteOnlyPolicy() *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "S3WriteOnlyAccess", - Effect: "Allow", - Action: []string{ - "s3:PutObject", - "s3:PutObjectAcl", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } -} - -// GetS3AdminPolicy returns a policy that allows full admin access to all S3 resources -func (t *S3PolicyTemplates) GetS3AdminPolicy() *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "S3FullAccess", - Effect: "Allow", - Action: []string{ - "s3:*", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } -} - -// GetBucketSpecificReadPolicy returns a policy for read-only access to a specific bucket -func (t *S3PolicyTemplates) GetBucketSpecificReadPolicy(bucketName string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "BucketSpecificReadAccess", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:GetObjectVersion", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - "arn:seaweed:s3:::" + bucketName + "/*", - }, - }, - }, - } -} - -// GetBucketSpecificWritePolicy returns a policy for write-only access to a specific bucket -func (t *S3PolicyTemplates) GetBucketSpecificWritePolicy(bucketName string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "BucketSpecificWriteAccess", - Effect: "Allow", - Action: []string{ - "s3:PutObject", - "s3:PutObjectAcl", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - "arn:seaweed:s3:::" + bucketName + "/*", - }, - }, - }, - } -} - -// GetPathBasedAccessPolicy returns a policy that restricts access to a specific path within a bucket -func (t *S3PolicyTemplates) GetPathBasedAccessPolicy(bucketName, pathPrefix string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "ListBucketPermission", - Effect: "Allow", - Action: []string{ - "s3:ListBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - }, - Condition: map[string]map[string]interface{}{ - "StringLike": map[string]interface{}{ - "s3:prefix": []string{pathPrefix + "/*"}, - }, - }, - }, - { - Sid: "PathBasedObjectAccess", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:PutObject", - "s3:DeleteObject", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName + "/" + pathPrefix + "/*", - }, - }, - }, - } -} - -// GetIPRestrictedPolicy returns a policy that restricts access based on source IP -func (t *S3PolicyTemplates) GetIPRestrictedPolicy(allowedCIDRs []string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "IPRestrictedS3Access", - Effect: "Allow", - Action: []string{ - "s3:*", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - Condition: map[string]map[string]interface{}{ - "IpAddress": map[string]interface{}{ - "aws:SourceIp": allowedCIDRs, - }, - }, - }, - }, - } -} - -// GetTimeBasedAccessPolicy returns a policy that allows access only during specific hours -func (t *S3PolicyTemplates) GetTimeBasedAccessPolicy(startHour, endHour int) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "TimeBasedS3Access", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - Condition: map[string]map[string]interface{}{ - "DateGreaterThan": map[string]interface{}{ - "aws:CurrentTime": time.Now().Format("2006-01-02") + "T" + - formatHour(startHour) + ":00:00Z", - }, - "DateLessThan": map[string]interface{}{ - "aws:CurrentTime": time.Now().Format("2006-01-02") + "T" + - formatHour(endHour) + ":00:00Z", - }, - }, - }, - }, - } -} - -// GetMultipartUploadPolicy returns a policy specifically for multipart upload operations -func (t *S3PolicyTemplates) GetMultipartUploadPolicy(bucketName string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "MultipartUploadOperations", - Effect: "Allow", - Action: []string{ - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName + "/*", - }, - }, - { - Sid: "ListBucketForMultipart", - Effect: "Allow", - Action: []string{ - "s3:ListBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - }, - }, - }, - } -} - -// GetPresignedURLPolicy returns a policy for generating and using presigned URLs -func (t *S3PolicyTemplates) GetPresignedURLPolicy(bucketName string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "PresignedURLAccess", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:PutObject", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName + "/*", - }, - Condition: map[string]map[string]interface{}{ - "StringEquals": map[string]interface{}{ - "s3:x-amz-signature-version": "AWS4-HMAC-SHA256", - }, - }, - }, - }, - } -} - -// GetTemporaryAccessPolicy returns a policy for temporary access with expiration -func (t *S3PolicyTemplates) GetTemporaryAccessPolicy(bucketName string, expirationHours int) *policy.PolicyDocument { - expirationTime := time.Now().Add(time.Duration(expirationHours) * time.Hour) - - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "TemporaryS3Access", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - "arn:seaweed:s3:::" + bucketName + "/*", - }, - Condition: map[string]map[string]interface{}{ - "DateLessThan": map[string]interface{}{ - "aws:CurrentTime": expirationTime.UTC().Format("2006-01-02T15:04:05Z"), - }, - }, - }, - }, - } -} - -// GetContentTypeRestrictedPolicy returns a policy that restricts uploads to specific content types -func (t *S3PolicyTemplates) GetContentTypeRestrictedPolicy(bucketName string, allowedContentTypes []string) *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "ContentTypeRestrictedUpload", - Effect: "Allow", - Action: []string{ - "s3:PutObject", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName + "/*", - }, - Condition: map[string]map[string]interface{}{ - "StringEquals": map[string]interface{}{ - "s3:content-type": allowedContentTypes, - }, - }, - }, - { - Sid: "ReadAccess", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:ListBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::" + bucketName, - "arn:seaweed:s3:::" + bucketName + "/*", - }, - }, - }, - } -} - -// GetDenyDeletePolicy returns a policy that allows all operations except delete -func (t *S3PolicyTemplates) GetDenyDeletePolicy() *policy.PolicyDocument { - return &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowAllExceptDelete", - Effect: "Allow", - Action: []string{ - "s3:GetObject", - "s3:GetObjectVersion", - "s3:PutObject", - "s3:PutObjectAcl", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:CreateMultipartUpload", - "s3:UploadPart", - "s3:CompleteMultipartUpload", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploads", - "s3:ListParts", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - { - Sid: "DenyDeleteOperations", - Effect: "Deny", - Action: []string{ - "s3:DeleteObject", - "s3:DeleteObjectVersion", - "s3:DeleteBucket", - }, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } -} - -// Helper function to format hour with leading zero -func formatHour(hour int) string { - if hour < 10 { - return "0" + string(rune('0'+hour)) - } - return string(rune('0'+hour/10)) + string(rune('0'+hour%10)) -} - -// PolicyTemplateDefinition represents metadata about a policy template -type PolicyTemplateDefinition struct { - Name string `json:"name"` - Description string `json:"description"` - Category string `json:"category"` - UseCase string `json:"use_case"` - Parameters []PolicyTemplateParam `json:"parameters,omitempty"` - Policy *policy.PolicyDocument `json:"policy"` -} - -// PolicyTemplateParam represents a parameter for customizing policy templates -type PolicyTemplateParam struct { - Name string `json:"name"` - Type string `json:"type"` - Description string `json:"description"` - Required bool `json:"required"` - DefaultValue string `json:"default_value,omitempty"` - Example string `json:"example,omitempty"` -} - -// GetAllPolicyTemplates returns all available policy templates with metadata -func (t *S3PolicyTemplates) GetAllPolicyTemplates() []PolicyTemplateDefinition { - return []PolicyTemplateDefinition{ - { - Name: "S3ReadOnlyAccess", - Description: "Provides read-only access to all S3 buckets and objects", - Category: "Basic Access", - UseCase: "Data consumers, backup services, monitoring applications", - Policy: t.GetS3ReadOnlyPolicy(), - }, - { - Name: "S3WriteOnlyAccess", - Description: "Provides write-only access to all S3 buckets and objects", - Category: "Basic Access", - UseCase: "Data ingestion services, backup applications", - Policy: t.GetS3WriteOnlyPolicy(), - }, - { - Name: "S3AdminAccess", - Description: "Provides full administrative access to all S3 resources", - Category: "Administrative", - UseCase: "S3 administrators, service accounts with full control", - Policy: t.GetS3AdminPolicy(), - }, - { - Name: "BucketSpecificRead", - Description: "Provides read-only access to a specific bucket", - Category: "Bucket-Specific", - UseCase: "Applications that need access to specific data sets", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket to grant access to", - Required: true, - Example: "my-data-bucket", - }, - }, - Policy: t.GetBucketSpecificReadPolicy("${bucketName}"), - }, - { - Name: "BucketSpecificWrite", - Description: "Provides write-only access to a specific bucket", - Category: "Bucket-Specific", - UseCase: "Upload services, data ingestion for specific datasets", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket to grant access to", - Required: true, - Example: "my-upload-bucket", - }, - }, - Policy: t.GetBucketSpecificWritePolicy("${bucketName}"), - }, - { - Name: "PathBasedAccess", - Description: "Restricts access to a specific path/prefix within a bucket", - Category: "Path-Restricted", - UseCase: "Multi-tenant applications, user-specific directories", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket", - Required: true, - Example: "shared-bucket", - }, - { - Name: "pathPrefix", - Type: "string", - Description: "Path prefix to restrict access to", - Required: true, - Example: "user123/documents", - }, - }, - Policy: t.GetPathBasedAccessPolicy("${bucketName}", "${pathPrefix}"), - }, - { - Name: "IPRestrictedAccess", - Description: "Allows access only from specific IP addresses or ranges", - Category: "Security", - UseCase: "Corporate networks, office-based access, VPN restrictions", - Parameters: []PolicyTemplateParam{ - { - Name: "allowedCIDRs", - Type: "array", - Description: "List of allowed IP addresses or CIDR ranges", - Required: true, - Example: "[\"192.168.1.0/24\", \"10.0.0.0/8\"]", - }, - }, - Policy: t.GetIPRestrictedPolicy([]string{"${allowedCIDRs}"}), - }, - { - Name: "MultipartUploadOnly", - Description: "Allows only multipart upload operations on a specific bucket", - Category: "Upload-Specific", - UseCase: "Large file upload services, streaming applications", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket for multipart uploads", - Required: true, - Example: "large-files-bucket", - }, - }, - Policy: t.GetMultipartUploadPolicy("${bucketName}"), - }, - { - Name: "PresignedURLAccess", - Description: "Policy for generating and using presigned URLs", - Category: "Presigned URLs", - UseCase: "Frontend applications, temporary file sharing", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket for presigned URL access", - Required: true, - Example: "shared-files-bucket", - }, - }, - Policy: t.GetPresignedURLPolicy("${bucketName}"), - }, - { - Name: "ContentTypeRestricted", - Description: "Restricts uploads to specific content types", - Category: "Content Control", - UseCase: "Image galleries, document repositories, media libraries", - Parameters: []PolicyTemplateParam{ - { - Name: "bucketName", - Type: "string", - Description: "Name of the S3 bucket", - Required: true, - Example: "media-bucket", - }, - { - Name: "allowedContentTypes", - Type: "array", - Description: "List of allowed MIME content types", - Required: true, - Example: "[\"image/jpeg\", \"image/png\", \"video/mp4\"]", - }, - }, - Policy: t.GetContentTypeRestrictedPolicy("${bucketName}", []string{"${allowedContentTypes}"}), - }, - { - Name: "DenyDeleteAccess", - Description: "Allows all operations except delete (immutable storage)", - Category: "Data Protection", - UseCase: "Compliance storage, audit logs, backup retention", - Policy: t.GetDenyDeletePolicy(), - }, - } -} - -// GetPolicyTemplateByName returns a specific policy template by name -func (t *S3PolicyTemplates) GetPolicyTemplateByName(name string) *PolicyTemplateDefinition { - templates := t.GetAllPolicyTemplates() - for _, template := range templates { - if template.Name == name { - return &template - } - } - return nil -} - -// GetPolicyTemplatesByCategory returns all policy templates in a specific category -func (t *S3PolicyTemplates) GetPolicyTemplatesByCategory(category string) []PolicyTemplateDefinition { - var result []PolicyTemplateDefinition - templates := t.GetAllPolicyTemplates() - for _, template := range templates { - if template.Category == category { - result = append(result, template) - } - } - return result -} diff --git a/weed/s3api/s3_policy_templates_test.go b/weed/s3api/s3_policy_templates_test.go deleted file mode 100644 index 9c1f6c7d3..000000000 --- a/weed/s3api/s3_policy_templates_test.go +++ /dev/null @@ -1,504 +0,0 @@ -package s3api - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestS3PolicyTemplates(t *testing.T) { - templates := NewS3PolicyTemplates() - - t.Run("S3ReadOnlyPolicy", func(t *testing.T) { - policy := templates.GetS3ReadOnlyPolicy() - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "S3ReadOnlyAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:GetObject") - assert.Contains(t, stmt.Action, "s3:ListBucket") - assert.NotContains(t, stmt.Action, "s3:PutObject") - assert.NotContains(t, stmt.Action, "s3:DeleteObject") - - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*") - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*/*") - }) - - t.Run("S3WriteOnlyPolicy", func(t *testing.T) { - policy := templates.GetS3WriteOnlyPolicy() - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "S3WriteOnlyAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:PutObject") - assert.Contains(t, stmt.Action, "s3:CreateMultipartUpload") - assert.NotContains(t, stmt.Action, "s3:GetObject") - assert.NotContains(t, stmt.Action, "s3:DeleteObject") - - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*") - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*/*") - }) - - t.Run("S3AdminPolicy", func(t *testing.T) { - policy := templates.GetS3AdminPolicy() - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "S3FullAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:*") - - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*") - assert.Contains(t, stmt.Resource, "arn:seaweed:s3:::*/*") - }) -} - -func TestBucketSpecificPolicies(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "test-bucket" - - t.Run("BucketSpecificReadPolicy", func(t *testing.T) { - policy := templates.GetBucketSpecificReadPolicy(bucketName) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "BucketSpecificReadAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:GetObject") - assert.Contains(t, stmt.Action, "s3:ListBucket") - assert.NotContains(t, stmt.Action, "s3:PutObject") - - expectedBucketArn := "arn:seaweed:s3:::" + bucketName - expectedObjectArn := "arn:seaweed:s3:::" + bucketName + "/*" - assert.Contains(t, stmt.Resource, expectedBucketArn) - assert.Contains(t, stmt.Resource, expectedObjectArn) - }) - - t.Run("BucketSpecificWritePolicy", func(t *testing.T) { - policy := templates.GetBucketSpecificWritePolicy(bucketName) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "BucketSpecificWriteAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:PutObject") - assert.Contains(t, stmt.Action, "s3:CreateMultipartUpload") - assert.NotContains(t, stmt.Action, "s3:GetObject") - - expectedBucketArn := "arn:seaweed:s3:::" + bucketName - expectedObjectArn := "arn:seaweed:s3:::" + bucketName + "/*" - assert.Contains(t, stmt.Resource, expectedBucketArn) - assert.Contains(t, stmt.Resource, expectedObjectArn) - }) -} - -func TestPathBasedAccessPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "shared-bucket" - pathPrefix := "user123/documents" - - policy := templates.GetPathBasedAccessPolicy(bucketName, pathPrefix) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 2) - - // First statement: List bucket with prefix condition - listStmt := policy.Statement[0] - assert.Equal(t, "Allow", listStmt.Effect) - assert.Equal(t, "ListBucketPermission", listStmt.Sid) - assert.Contains(t, listStmt.Action, "s3:ListBucket") - assert.Contains(t, listStmt.Resource, "arn:seaweed:s3:::"+bucketName) - assert.NotNil(t, listStmt.Condition) - - // Second statement: Object operations on path - objectStmt := policy.Statement[1] - assert.Equal(t, "Allow", objectStmt.Effect) - assert.Equal(t, "PathBasedObjectAccess", objectStmt.Sid) - assert.Contains(t, objectStmt.Action, "s3:GetObject") - assert.Contains(t, objectStmt.Action, "s3:PutObject") - assert.Contains(t, objectStmt.Action, "s3:DeleteObject") - - expectedObjectArn := "arn:seaweed:s3:::" + bucketName + "/" + pathPrefix + "/*" - assert.Contains(t, objectStmt.Resource, expectedObjectArn) -} - -func TestIPRestrictedPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - allowedCIDRs := []string{"192.168.1.0/24", "10.0.0.0/8"} - - policy := templates.GetIPRestrictedPolicy(allowedCIDRs) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "IPRestrictedS3Access", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:*") - assert.NotNil(t, stmt.Condition) - - // Check IP condition structure - condition := stmt.Condition - ipAddress, exists := condition["IpAddress"] - assert.True(t, exists) - - sourceIp, exists := ipAddress["aws:SourceIp"] - assert.True(t, exists) - assert.Equal(t, allowedCIDRs, sourceIp) -} - -func TestTimeBasedAccessPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - startHour := 9 // 9 AM - endHour := 17 // 5 PM - - policy := templates.GetTimeBasedAccessPolicy(startHour, endHour) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "TimeBasedS3Access", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:GetObject") - assert.Contains(t, stmt.Action, "s3:PutObject") - assert.Contains(t, stmt.Action, "s3:ListBucket") - assert.NotNil(t, stmt.Condition) - - // Check time condition structure - condition := stmt.Condition - _, hasGreater := condition["DateGreaterThan"] - _, hasLess := condition["DateLessThan"] - assert.True(t, hasGreater) - assert.True(t, hasLess) -} - -func TestMultipartUploadPolicyTemplate(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "large-files" - - policy := templates.GetMultipartUploadPolicy(bucketName) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 2) - - // First statement: Multipart operations - multipartStmt := policy.Statement[0] - assert.Equal(t, "Allow", multipartStmt.Effect) - assert.Equal(t, "MultipartUploadOperations", multipartStmt.Sid) - assert.Contains(t, multipartStmt.Action, "s3:CreateMultipartUpload") - assert.Contains(t, multipartStmt.Action, "s3:UploadPart") - assert.Contains(t, multipartStmt.Action, "s3:CompleteMultipartUpload") - assert.Contains(t, multipartStmt.Action, "s3:AbortMultipartUpload") - assert.Contains(t, multipartStmt.Action, "s3:ListMultipartUploads") - assert.Contains(t, multipartStmt.Action, "s3:ListParts") - - expectedObjectArn := "arn:seaweed:s3:::" + bucketName + "/*" - assert.Contains(t, multipartStmt.Resource, expectedObjectArn) - - // Second statement: List bucket - listStmt := policy.Statement[1] - assert.Equal(t, "Allow", listStmt.Effect) - assert.Equal(t, "ListBucketForMultipart", listStmt.Sid) - assert.Contains(t, listStmt.Action, "s3:ListBucket") - - expectedBucketArn := "arn:seaweed:s3:::" + bucketName - assert.Contains(t, listStmt.Resource, expectedBucketArn) -} - -func TestPresignedURLPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "shared-files" - - policy := templates.GetPresignedURLPolicy(bucketName) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "PresignedURLAccess", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:GetObject") - assert.Contains(t, stmt.Action, "s3:PutObject") - assert.NotNil(t, stmt.Condition) - - expectedObjectArn := "arn:seaweed:s3:::" + bucketName + "/*" - assert.Contains(t, stmt.Resource, expectedObjectArn) - - // Check signature version condition - condition := stmt.Condition - stringEquals, exists := condition["StringEquals"] - assert.True(t, exists) - - signatureVersion, exists := stringEquals["s3:x-amz-signature-version"] - assert.True(t, exists) - assert.Equal(t, "AWS4-HMAC-SHA256", signatureVersion) -} - -func TestTemporaryAccessPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "temp-bucket" - expirationHours := 24 - - policy := templates.GetTemporaryAccessPolicy(bucketName, expirationHours) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 1) - - stmt := policy.Statement[0] - assert.Equal(t, "Allow", stmt.Effect) - assert.Equal(t, "TemporaryS3Access", stmt.Sid) - assert.Contains(t, stmt.Action, "s3:GetObject") - assert.Contains(t, stmt.Action, "s3:PutObject") - assert.Contains(t, stmt.Action, "s3:ListBucket") - assert.NotNil(t, stmt.Condition) - - // Check expiration condition - condition := stmt.Condition - dateLessThan, exists := condition["DateLessThan"] - assert.True(t, exists) - - currentTime, exists := dateLessThan["aws:CurrentTime"] - assert.True(t, exists) - assert.IsType(t, "", currentTime) // Should be a string timestamp -} - -func TestContentTypeRestrictedPolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - bucketName := "media-bucket" - allowedTypes := []string{"image/jpeg", "image/png", "video/mp4"} - - policy := templates.GetContentTypeRestrictedPolicy(bucketName, allowedTypes) - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 2) - - // First statement: Upload with content type restriction - uploadStmt := policy.Statement[0] - assert.Equal(t, "Allow", uploadStmt.Effect) - assert.Equal(t, "ContentTypeRestrictedUpload", uploadStmt.Sid) - assert.Contains(t, uploadStmt.Action, "s3:PutObject") - assert.Contains(t, uploadStmt.Action, "s3:CreateMultipartUpload") - assert.NotNil(t, uploadStmt.Condition) - - // Check content type condition - condition := uploadStmt.Condition - stringEquals, exists := condition["StringEquals"] - assert.True(t, exists) - - contentType, exists := stringEquals["s3:content-type"] - assert.True(t, exists) - assert.Equal(t, allowedTypes, contentType) - - // Second statement: Read access without restrictions - readStmt := policy.Statement[1] - assert.Equal(t, "Allow", readStmt.Effect) - assert.Equal(t, "ReadAccess", readStmt.Sid) - assert.Contains(t, readStmt.Action, "s3:GetObject") - assert.Contains(t, readStmt.Action, "s3:ListBucket") - assert.Nil(t, readStmt.Condition) // No conditions for read access -} - -func TestDenyDeletePolicy(t *testing.T) { - templates := NewS3PolicyTemplates() - - policy := templates.GetDenyDeletePolicy() - - require.NotNil(t, policy) - assert.Equal(t, "2012-10-17", policy.Version) - assert.Len(t, policy.Statement, 2) - - // First statement: Allow everything except delete - allowStmt := policy.Statement[0] - assert.Equal(t, "Allow", allowStmt.Effect) - assert.Equal(t, "AllowAllExceptDelete", allowStmt.Sid) - assert.Contains(t, allowStmt.Action, "s3:GetObject") - assert.Contains(t, allowStmt.Action, "s3:PutObject") - assert.Contains(t, allowStmt.Action, "s3:ListBucket") - assert.NotContains(t, allowStmt.Action, "s3:DeleteObject") - assert.NotContains(t, allowStmt.Action, "s3:DeleteBucket") - - // Second statement: Explicitly deny delete operations - denyStmt := policy.Statement[1] - assert.Equal(t, "Deny", denyStmt.Effect) - assert.Equal(t, "DenyDeleteOperations", denyStmt.Sid) - assert.Contains(t, denyStmt.Action, "s3:DeleteObject") - assert.Contains(t, denyStmt.Action, "s3:DeleteObjectVersion") - assert.Contains(t, denyStmt.Action, "s3:DeleteBucket") -} - -func TestPolicyTemplateMetadata(t *testing.T) { - templates := NewS3PolicyTemplates() - - t.Run("GetAllPolicyTemplates", func(t *testing.T) { - allTemplates := templates.GetAllPolicyTemplates() - - assert.Greater(t, len(allTemplates), 10) // Should have many templates - - // Check that each template has required fields - for _, template := range allTemplates { - assert.NotEmpty(t, template.Name) - assert.NotEmpty(t, template.Description) - assert.NotEmpty(t, template.Category) - assert.NotEmpty(t, template.UseCase) - assert.NotNil(t, template.Policy) - assert.Equal(t, "2012-10-17", template.Policy.Version) - } - }) - - t.Run("GetPolicyTemplateByName", func(t *testing.T) { - // Test existing template - template := templates.GetPolicyTemplateByName("S3ReadOnlyAccess") - require.NotNil(t, template) - assert.Equal(t, "S3ReadOnlyAccess", template.Name) - assert.Equal(t, "Basic Access", template.Category) - - // Test non-existing template - nonExistent := templates.GetPolicyTemplateByName("NonExistentTemplate") - assert.Nil(t, nonExistent) - }) - - t.Run("GetPolicyTemplatesByCategory", func(t *testing.T) { - basicAccessTemplates := templates.GetPolicyTemplatesByCategory("Basic Access") - assert.GreaterOrEqual(t, len(basicAccessTemplates), 2) - - for _, template := range basicAccessTemplates { - assert.Equal(t, "Basic Access", template.Category) - } - - // Test non-existing category - emptyCategory := templates.GetPolicyTemplatesByCategory("NonExistentCategory") - assert.Empty(t, emptyCategory) - }) - - t.Run("PolicyTemplateParameters", func(t *testing.T) { - allTemplates := templates.GetAllPolicyTemplates() - - // Find a template with parameters (like BucketSpecificRead) - var templateWithParams *PolicyTemplateDefinition - for _, template := range allTemplates { - if template.Name == "BucketSpecificRead" { - templateWithParams = &template - break - } - } - - require.NotNil(t, templateWithParams) - assert.Greater(t, len(templateWithParams.Parameters), 0) - - param := templateWithParams.Parameters[0] - assert.Equal(t, "bucketName", param.Name) - assert.Equal(t, "string", param.Type) - assert.True(t, param.Required) - assert.NotEmpty(t, param.Description) - assert.NotEmpty(t, param.Example) - }) -} - -func TestFormatHourHelper(t *testing.T) { - tests := []struct { - hour int - expected string - }{ - {0, "00"}, - {5, "05"}, - {9, "09"}, - {10, "10"}, - {15, "15"}, - {23, "23"}, - } - - for _, tt := range tests { - t.Run(fmt.Sprintf("Hour_%d", tt.hour), func(t *testing.T) { - result := formatHour(tt.hour) - assert.Equal(t, tt.expected, result) - }) - } -} - -func TestPolicyTemplateCategories(t *testing.T) { - templates := NewS3PolicyTemplates() - allTemplates := templates.GetAllPolicyTemplates() - - // Extract all categories - categoryMap := make(map[string]int) - for _, template := range allTemplates { - categoryMap[template.Category]++ - } - - // Expected categories - expectedCategories := []string{ - "Basic Access", - "Administrative", - "Bucket-Specific", - "Path-Restricted", - "Security", - "Upload-Specific", - "Presigned URLs", - "Content Control", - "Data Protection", - } - - for _, expectedCategory := range expectedCategories { - count, exists := categoryMap[expectedCategory] - assert.True(t, exists, "Category %s should exist", expectedCategory) - assert.Greater(t, count, 0, "Category %s should have at least one template", expectedCategory) - } -} - -func TestPolicyValidation(t *testing.T) { - templates := NewS3PolicyTemplates() - allTemplates := templates.GetAllPolicyTemplates() - - // Test that all policies have valid structure - for _, template := range allTemplates { - t.Run("Policy_"+template.Name, func(t *testing.T) { - policy := template.Policy - - // Basic validation - assert.Equal(t, "2012-10-17", policy.Version) - assert.Greater(t, len(policy.Statement), 0) - - // Validate each statement - for i, stmt := range policy.Statement { - assert.NotEmpty(t, stmt.Effect, "Statement %d should have effect", i) - assert.Contains(t, []string{"Allow", "Deny"}, stmt.Effect, "Statement %d effect should be Allow or Deny", i) - assert.Greater(t, len(stmt.Action), 0, "Statement %d should have actions", i) - assert.Greater(t, len(stmt.Resource), 0, "Statement %d should have resources", i) - - // Check resource format - for _, resource := range stmt.Resource { - if resource != "*" { - assert.Contains(t, resource, "arn:seaweed:s3:::", "Resource should be valid SeaweedFS S3 ARN: %s", resource) - } - } - } - }) - } -} diff --git a/weed/s3api/s3_presigned_url_iam.go b/weed/s3api/s3_presigned_url_iam.go deleted file mode 100644 index 86b07668b..000000000 --- a/weed/s3api/s3_presigned_url_iam.go +++ /dev/null @@ -1,383 +0,0 @@ -package s3api - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// S3PresignedURLManager handles IAM integration for presigned URLs -type S3PresignedURLManager struct { - s3iam *S3IAMIntegration -} - -// NewS3PresignedURLManager creates a new presigned URL manager with IAM integration -func NewS3PresignedURLManager(s3iam *S3IAMIntegration) *S3PresignedURLManager { - return &S3PresignedURLManager{ - s3iam: s3iam, - } -} - -// PresignedURLRequest represents a request to generate a presigned URL -type PresignedURLRequest struct { - Method string `json:"method"` // HTTP method (GET, PUT, POST, DELETE) - Bucket string `json:"bucket"` // S3 bucket name - ObjectKey string `json:"object_key"` // S3 object key - Expiration time.Duration `json:"expiration"` // URL expiration duration - SessionToken string `json:"session_token"` // JWT session token for IAM - Headers map[string]string `json:"headers"` // Additional headers to sign - QueryParams map[string]string `json:"query_params"` // Additional query parameters -} - -// PresignedURLResponse represents the generated presigned URL -type PresignedURLResponse struct { - URL string `json:"url"` // The presigned URL - Method string `json:"method"` // HTTP method - Headers map[string]string `json:"headers"` // Required headers - ExpiresAt time.Time `json:"expires_at"` // URL expiration time - SignedHeaders []string `json:"signed_headers"` // List of signed headers - CanonicalQuery string `json:"canonical_query"` // Canonical query string -} - -// ValidatePresignedURLWithIAM validates a presigned URL request using IAM policies -func (iam *IdentityAccessManagement) ValidatePresignedURLWithIAM(r *http.Request, identity *Identity) s3err.ErrorCode { - if iam.iamIntegration == nil { - // Fall back to standard validation - return s3err.ErrNone - } - - // Extract bucket and object from request - bucket, object := s3_constants.GetBucketAndObject(r) - - // Determine the S3 action from HTTP method and path - action := determineS3ActionFromRequest(r, bucket, object) - - // Check if the user has permission for this action - ctx := r.Context() - sessionToken := extractSessionTokenFromPresignedURL(r) - if sessionToken == "" { - // No session token in presigned URL - use standard auth - return s3err.ErrNone - } - - // Parse JWT token to extract role and session information - tokenClaims, err := parseJWTToken(sessionToken) - if err != nil { - glog.V(3).Infof("Failed to parse JWT token in presigned URL: %v", err) - return s3err.ErrAccessDenied - } - - // Extract role information from token claims - roleName, ok := tokenClaims["role"].(string) - if !ok || roleName == "" { - glog.V(3).Info("No role found in JWT token for presigned URL") - return s3err.ErrAccessDenied - } - - sessionName, ok := tokenClaims["snam"].(string) - if !ok || sessionName == "" { - sessionName = "presigned-session" // Default fallback - } - - // Use the principal ARN directly from token claims, or build it if not available - principalArn, ok := tokenClaims["principal"].(string) - if !ok || principalArn == "" { - // Fallback: extract role name from role ARN and build principal ARN - roleNameOnly := roleName - if strings.Contains(roleName, "/") { - parts := strings.Split(roleName, "/") - roleNameOnly = parts[len(parts)-1] - } - principalArn = fmt.Sprintf("arn:seaweed:sts::assumed-role/%s/%s", roleNameOnly, sessionName) - } - - // Create IAM identity for authorization using extracted information - iamIdentity := &IAMIdentity{ - Name: identity.Name, - Principal: principalArn, - SessionToken: sessionToken, - Account: identity.Account, - } - - // Authorize using IAM - errCode := iam.iamIntegration.AuthorizeAction(ctx, iamIdentity, action, bucket, object, r) - if errCode != s3err.ErrNone { - glog.V(3).Infof("IAM authorization failed for presigned URL: principal=%s action=%s bucket=%s object=%s", - iamIdentity.Principal, action, bucket, object) - return errCode - } - - glog.V(3).Infof("IAM authorization succeeded for presigned URL: principal=%s action=%s bucket=%s object=%s", - iamIdentity.Principal, action, bucket, object) - return s3err.ErrNone -} - -// GeneratePresignedURLWithIAM generates a presigned URL with IAM policy validation -func (pm *S3PresignedURLManager) GeneratePresignedURLWithIAM(ctx context.Context, req *PresignedURLRequest, baseURL string) (*PresignedURLResponse, error) { - if pm.s3iam == nil || !pm.s3iam.enabled { - return nil, fmt.Errorf("IAM integration not enabled") - } - - // Validate session token and get identity - // Use a proper ARN format for the principal - principalArn := fmt.Sprintf("arn:seaweed:sts::assumed-role/PresignedUser/presigned-session") - iamIdentity := &IAMIdentity{ - SessionToken: req.SessionToken, - Principal: principalArn, - Name: "presigned-user", - Account: &AccountAdmin, - } - - // Determine S3 action from method - action := determineS3ActionFromMethodAndPath(req.Method, req.Bucket, req.ObjectKey) - - // Check IAM permissions before generating URL - authRequest := &http.Request{ - Method: req.Method, - URL: &url.URL{Path: "/" + req.Bucket + "/" + req.ObjectKey}, - Header: make(http.Header), - } - authRequest.Header.Set("Authorization", "Bearer "+req.SessionToken) - authRequest = authRequest.WithContext(ctx) - - errCode := pm.s3iam.AuthorizeAction(ctx, iamIdentity, action, req.Bucket, req.ObjectKey, authRequest) - if errCode != s3err.ErrNone { - return nil, fmt.Errorf("IAM authorization failed: user does not have permission for action %s on resource %s/%s", action, req.Bucket, req.ObjectKey) - } - - // Generate presigned URL with validated permissions - return pm.generatePresignedURL(req, baseURL, iamIdentity) -} - -// generatePresignedURL creates the actual presigned URL -func (pm *S3PresignedURLManager) generatePresignedURL(req *PresignedURLRequest, baseURL string, identity *IAMIdentity) (*PresignedURLResponse, error) { - // Calculate expiration time - expiresAt := time.Now().Add(req.Expiration) - - // Build the base URL - urlPath := "/" + req.Bucket - if req.ObjectKey != "" { - urlPath += "/" + req.ObjectKey - } - - // Create query parameters for AWS signature v4 - queryParams := make(map[string]string) - for k, v := range req.QueryParams { - queryParams[k] = v - } - - // Add AWS signature v4 parameters - queryParams["X-Amz-Algorithm"] = "AWS4-HMAC-SHA256" - queryParams["X-Amz-Credential"] = fmt.Sprintf("seaweedfs/%s/us-east-1/s3/aws4_request", expiresAt.Format("20060102")) - queryParams["X-Amz-Date"] = expiresAt.Format("20060102T150405Z") - queryParams["X-Amz-Expires"] = strconv.Itoa(int(req.Expiration.Seconds())) - queryParams["X-Amz-SignedHeaders"] = "host" - - // Add session token if available - if identity.SessionToken != "" { - queryParams["X-Amz-Security-Token"] = identity.SessionToken - } - - // Build canonical query string - canonicalQuery := buildCanonicalQuery(queryParams) - - // For now, we'll create a mock signature - // In production, this would use proper AWS signature v4 signing - mockSignature := generateMockSignature(req.Method, urlPath, canonicalQuery, identity.SessionToken) - queryParams["X-Amz-Signature"] = mockSignature - - // Build final URL - finalQuery := buildCanonicalQuery(queryParams) - fullURL := baseURL + urlPath + "?" + finalQuery - - // Prepare response - headers := make(map[string]string) - for k, v := range req.Headers { - headers[k] = v - } - - return &PresignedURLResponse{ - URL: fullURL, - Method: req.Method, - Headers: headers, - ExpiresAt: expiresAt, - SignedHeaders: []string{"host"}, - CanonicalQuery: canonicalQuery, - }, nil -} - -// Helper functions - -// determineS3ActionFromRequest determines the S3 action based on HTTP request -func determineS3ActionFromRequest(r *http.Request, bucket, object string) Action { - return determineS3ActionFromMethodAndPath(r.Method, bucket, object) -} - -// determineS3ActionFromMethodAndPath determines the S3 action based on method and path -func determineS3ActionFromMethodAndPath(method, bucket, object string) Action { - switch method { - case "GET": - if object == "" { - return s3_constants.ACTION_LIST // ListBucket - } else { - return s3_constants.ACTION_READ // GetObject - } - case "PUT", "POST": - return s3_constants.ACTION_WRITE // PutObject - case "DELETE": - if object == "" { - return s3_constants.ACTION_DELETE_BUCKET // DeleteBucket - } else { - return s3_constants.ACTION_WRITE // DeleteObject (uses WRITE action) - } - case "HEAD": - if object == "" { - return s3_constants.ACTION_LIST // HeadBucket - } else { - return s3_constants.ACTION_READ // HeadObject - } - default: - return s3_constants.ACTION_READ // Default to read - } -} - -// extractSessionTokenFromPresignedURL extracts session token from presigned URL query parameters -func extractSessionTokenFromPresignedURL(r *http.Request) string { - // Check for X-Amz-Security-Token in query parameters - if token := r.URL.Query().Get("X-Amz-Security-Token"); token != "" { - return token - } - - // Check for session token in other possible locations - if token := r.URL.Query().Get("SessionToken"); token != "" { - return token - } - - return "" -} - -// buildCanonicalQuery builds a canonical query string for AWS signature -func buildCanonicalQuery(params map[string]string) string { - var keys []string - for k := range params { - keys = append(keys, k) - } - - // Sort keys for canonical order - for i := 0; i < len(keys); i++ { - for j := i + 1; j < len(keys); j++ { - if keys[i] > keys[j] { - keys[i], keys[j] = keys[j], keys[i] - } - } - } - - var parts []string - for _, k := range keys { - parts = append(parts, fmt.Sprintf("%s=%s", url.QueryEscape(k), url.QueryEscape(params[k]))) - } - - return strings.Join(parts, "&") -} - -// generateMockSignature generates a mock signature for testing purposes -func generateMockSignature(method, path, query, sessionToken string) string { - // This is a simplified signature for demonstration - // In production, use proper AWS signature v4 calculation - data := fmt.Sprintf("%s\n%s\n%s\n%s", method, path, query, sessionToken) - hash := sha256.Sum256([]byte(data)) - return hex.EncodeToString(hash[:])[:16] // Truncate for readability -} - -// ValidatePresignedURLExpiration validates that a presigned URL hasn't expired -func ValidatePresignedURLExpiration(r *http.Request) error { - query := r.URL.Query() - - // Get X-Amz-Date and X-Amz-Expires - dateStr := query.Get("X-Amz-Date") - expiresStr := query.Get("X-Amz-Expires") - - if dateStr == "" || expiresStr == "" { - return fmt.Errorf("missing required presigned URL parameters") - } - - // Parse date (always in UTC) - signedDate, err := time.Parse("20060102T150405Z", dateStr) - if err != nil { - return fmt.Errorf("invalid X-Amz-Date format: %v", err) - } - - // Parse expires - expires, err := strconv.Atoi(expiresStr) - if err != nil { - return fmt.Errorf("invalid X-Amz-Expires format: %v", err) - } - - // Check expiration - compare in UTC - expirationTime := signedDate.Add(time.Duration(expires) * time.Second) - now := time.Now().UTC() - if now.After(expirationTime) { - return fmt.Errorf("presigned URL has expired") - } - - return nil -} - -// PresignedURLSecurityPolicy represents security constraints for presigned URL generation -type PresignedURLSecurityPolicy struct { - MaxExpirationDuration time.Duration `json:"max_expiration_duration"` // Maximum allowed expiration - AllowedMethods []string `json:"allowed_methods"` // Allowed HTTP methods - RequiredHeaders []string `json:"required_headers"` // Headers that must be present - IPWhitelist []string `json:"ip_whitelist"` // Allowed IP addresses/ranges - MaxFileSize int64 `json:"max_file_size"` // Maximum file size for uploads -} - -// DefaultPresignedURLSecurityPolicy returns a default security policy -func DefaultPresignedURLSecurityPolicy() *PresignedURLSecurityPolicy { - return &PresignedURLSecurityPolicy{ - MaxExpirationDuration: 7 * 24 * time.Hour, // 7 days max - AllowedMethods: []string{"GET", "PUT", "POST", "HEAD"}, - RequiredHeaders: []string{}, - IPWhitelist: []string{}, // Empty means no IP restrictions - MaxFileSize: 5 * 1024 * 1024 * 1024, // 5GB default - } -} - -// ValidatePresignedURLRequest validates a presigned URL request against security policy -func (policy *PresignedURLSecurityPolicy) ValidatePresignedURLRequest(req *PresignedURLRequest) error { - // Check expiration duration - if req.Expiration > policy.MaxExpirationDuration { - return fmt.Errorf("expiration duration %v exceeds maximum allowed %v", req.Expiration, policy.MaxExpirationDuration) - } - - // Check HTTP method - methodAllowed := false - for _, allowedMethod := range policy.AllowedMethods { - if req.Method == allowedMethod { - methodAllowed = true - break - } - } - if !methodAllowed { - return fmt.Errorf("HTTP method %s is not allowed", req.Method) - } - - // Check required headers - for _, requiredHeader := range policy.RequiredHeaders { - if _, exists := req.Headers[requiredHeader]; !exists { - return fmt.Errorf("required header %s is missing", requiredHeader) - } - } - - return nil -} diff --git a/weed/s3api/s3_presigned_url_iam_test.go b/weed/s3api/s3_presigned_url_iam_test.go deleted file mode 100644 index 890162121..000000000 --- a/weed/s3api/s3_presigned_url_iam_test.go +++ /dev/null @@ -1,602 +0,0 @@ -package s3api - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/ldap" - "github.com/seaweedfs/seaweedfs/weed/iam/oidc" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// createTestJWTPresigned creates a test JWT token with the specified issuer, subject and signing key -func createTestJWTPresigned(t *testing.T, issuer, subject, signingKey string) string { - token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ - "iss": issuer, - "sub": subject, - "aud": "test-client-id", - "exp": time.Now().Add(time.Hour).Unix(), - "iat": time.Now().Unix(), - // Add claims that trust policy validation expects - "idp": "test-oidc", // Identity provider claim for trust policy matching - }) - - tokenString, err := token.SignedString([]byte(signingKey)) - require.NoError(t, err) - return tokenString -} - -// TestPresignedURLIAMValidation tests IAM validation for presigned URLs -func TestPresignedURLIAMValidation(t *testing.T) { - // Set up IAM system - iamManager := setupTestIAMManagerForPresigned(t) - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - - // Create IAM with integration - iam := &IdentityAccessManagement{ - isAuthEnabled: true, - } - iam.SetIAMIntegration(s3iam) - - // Set up roles - ctx := context.Background() - setupTestRolesForPresigned(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTPresigned(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Get session token - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3ReadOnlyRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "presigned-test-session", - }) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - - tests := []struct { - name string - method string - path string - sessionToken string - expectedResult s3err.ErrorCode - }{ - { - name: "GET object with read permissions", - method: "GET", - path: "/test-bucket/test-file.txt", - sessionToken: sessionToken, - expectedResult: s3err.ErrNone, - }, - { - name: "PUT object with read-only permissions (should fail)", - method: "PUT", - path: "/test-bucket/new-file.txt", - sessionToken: sessionToken, - expectedResult: s3err.ErrAccessDenied, - }, - { - name: "GET object without session token", - method: "GET", - path: "/test-bucket/test-file.txt", - sessionToken: "", - expectedResult: s3err.ErrNone, // Falls back to standard auth - }, - { - name: "Invalid session token", - method: "GET", - path: "/test-bucket/test-file.txt", - sessionToken: "invalid-token", - expectedResult: s3err.ErrAccessDenied, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create request with presigned URL parameters - req := createPresignedURLRequest(t, tt.method, tt.path, tt.sessionToken) - - // Create identity for testing - identity := &Identity{ - Name: "test-user", - Account: &AccountAdmin, - } - - // Test validation - result := iam.ValidatePresignedURLWithIAM(req, identity) - assert.Equal(t, tt.expectedResult, result, "IAM validation result should match expected") - }) - } -} - -// TestPresignedURLGeneration tests IAM-aware presigned URL generation -func TestPresignedURLGeneration(t *testing.T) { - // Set up IAM system - iamManager := setupTestIAMManagerForPresigned(t) - s3iam := NewS3IAMIntegration(iamManager, "localhost:8888") - s3iam.enabled = true // Enable IAM integration - presignedManager := NewS3PresignedURLManager(s3iam) - - ctx := context.Background() - setupTestRolesForPresigned(ctx, iamManager) - - // Create a valid JWT token for testing - validJWTToken := createTestJWTPresigned(t, "https://test-issuer.com", "test-user-123", "test-signing-key") - - // Get session token - response, err := iamManager.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityRequest{ - RoleArn: "arn:seaweed:iam::role/S3AdminRole", - WebIdentityToken: validJWTToken, - RoleSessionName: "presigned-gen-test-session", - }) - require.NoError(t, err) - - sessionToken := response.Credentials.SessionToken - - tests := []struct { - name string - request *PresignedURLRequest - shouldSucceed bool - expectedError string - }{ - { - name: "Generate valid presigned GET URL", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: time.Hour, - SessionToken: sessionToken, - }, - shouldSucceed: true, - }, - { - name: "Generate valid presigned PUT URL", - request: &PresignedURLRequest{ - Method: "PUT", - Bucket: "test-bucket", - ObjectKey: "new-file.txt", - Expiration: time.Hour, - SessionToken: sessionToken, - }, - shouldSucceed: true, - }, - { - name: "Generate URL with invalid session token", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: time.Hour, - SessionToken: "invalid-token", - }, - shouldSucceed: false, - expectedError: "IAM authorization failed", - }, - { - name: "Generate URL without session token", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: time.Hour, - }, - shouldSucceed: false, - expectedError: "IAM authorization failed", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - response, err := presignedManager.GeneratePresignedURLWithIAM(ctx, tt.request, "http://localhost:8333") - - if tt.shouldSucceed { - assert.NoError(t, err, "Presigned URL generation should succeed") - if response != nil { - assert.NotEmpty(t, response.URL, "URL should not be empty") - assert.Equal(t, tt.request.Method, response.Method, "Method should match") - assert.True(t, response.ExpiresAt.After(time.Now()), "URL should not be expired") - } else { - t.Errorf("Response should not be nil when generation should succeed") - } - } else { - assert.Error(t, err, "Presigned URL generation should fail") - if tt.expectedError != "" { - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - } - }) - } -} - -// TestPresignedURLExpiration tests URL expiration validation -func TestPresignedURLExpiration(t *testing.T) { - tests := []struct { - name string - setupRequest func() *http.Request - expectedError string - }{ - { - name: "Valid non-expired URL", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", nil) - q := req.URL.Query() - // Set date to 30 minutes ago with 2 hours expiration for safe margin - q.Set("X-Amz-Date", time.Now().UTC().Add(-30*time.Minute).Format("20060102T150405Z")) - q.Set("X-Amz-Expires", "7200") // 2 hours - req.URL.RawQuery = q.Encode() - return req - }, - expectedError: "", - }, - { - name: "Expired URL", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", nil) - q := req.URL.Query() - // Set date to 2 hours ago with 1 hour expiration - q.Set("X-Amz-Date", time.Now().UTC().Add(-2*time.Hour).Format("20060102T150405Z")) - q.Set("X-Amz-Expires", "3600") // 1 hour - req.URL.RawQuery = q.Encode() - return req - }, - expectedError: "presigned URL has expired", - }, - { - name: "Missing date parameter", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", nil) - q := req.URL.Query() - q.Set("X-Amz-Expires", "3600") - req.URL.RawQuery = q.Encode() - return req - }, - expectedError: "missing required presigned URL parameters", - }, - { - name: "Invalid date format", - setupRequest: func() *http.Request { - req := httptest.NewRequest("GET", "/test-bucket/test-file.txt", nil) - q := req.URL.Query() - q.Set("X-Amz-Date", "invalid-date") - q.Set("X-Amz-Expires", "3600") - req.URL.RawQuery = q.Encode() - return req - }, - expectedError: "invalid X-Amz-Date format", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := tt.setupRequest() - err := ValidatePresignedURLExpiration(req) - - if tt.expectedError == "" { - assert.NoError(t, err, "Validation should succeed") - } else { - assert.Error(t, err, "Validation should fail") - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - }) - } -} - -// TestPresignedURLSecurityPolicy tests security policy enforcement -func TestPresignedURLSecurityPolicy(t *testing.T) { - policy := &PresignedURLSecurityPolicy{ - MaxExpirationDuration: 24 * time.Hour, - AllowedMethods: []string{"GET", "PUT"}, - RequiredHeaders: []string{"Content-Type"}, - MaxFileSize: 1024 * 1024, // 1MB - } - - tests := []struct { - name string - request *PresignedURLRequest - expectedError string - }{ - { - name: "Valid request", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: 12 * time.Hour, - Headers: map[string]string{"Content-Type": "application/json"}, - }, - expectedError: "", - }, - { - name: "Expiration too long", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: 48 * time.Hour, // Exceeds 24h limit - Headers: map[string]string{"Content-Type": "application/json"}, - }, - expectedError: "expiration duration", - }, - { - name: "Method not allowed", - request: &PresignedURLRequest{ - Method: "DELETE", // Not in allowed methods - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: 12 * time.Hour, - Headers: map[string]string{"Content-Type": "application/json"}, - }, - expectedError: "HTTP method DELETE is not allowed", - }, - { - name: "Missing required header", - request: &PresignedURLRequest{ - Method: "GET", - Bucket: "test-bucket", - ObjectKey: "test-file.txt", - Expiration: 12 * time.Hour, - Headers: map[string]string{}, // Missing Content-Type - }, - expectedError: "required header Content-Type is missing", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := policy.ValidatePresignedURLRequest(tt.request) - - if tt.expectedError == "" { - assert.NoError(t, err, "Policy validation should succeed") - } else { - assert.Error(t, err, "Policy validation should fail") - assert.Contains(t, err.Error(), tt.expectedError, "Error message should contain expected text") - } - }) - } -} - -// TestS3ActionDetermination tests action determination from HTTP methods -func TestS3ActionDetermination(t *testing.T) { - tests := []struct { - name string - method string - bucket string - object string - expectedAction Action - }{ - { - name: "GET object", - method: "GET", - bucket: "test-bucket", - object: "test-file.txt", - expectedAction: s3_constants.ACTION_READ, - }, - { - name: "GET bucket (list)", - method: "GET", - bucket: "test-bucket", - object: "", - expectedAction: s3_constants.ACTION_LIST, - }, - { - name: "PUT object", - method: "PUT", - bucket: "test-bucket", - object: "new-file.txt", - expectedAction: s3_constants.ACTION_WRITE, - }, - { - name: "DELETE object", - method: "DELETE", - bucket: "test-bucket", - object: "old-file.txt", - expectedAction: s3_constants.ACTION_WRITE, - }, - { - name: "DELETE bucket", - method: "DELETE", - bucket: "test-bucket", - object: "", - expectedAction: s3_constants.ACTION_DELETE_BUCKET, - }, - { - name: "HEAD object", - method: "HEAD", - bucket: "test-bucket", - object: "test-file.txt", - expectedAction: s3_constants.ACTION_READ, - }, - { - name: "POST object", - method: "POST", - bucket: "test-bucket", - object: "upload-file.txt", - expectedAction: s3_constants.ACTION_WRITE, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - action := determineS3ActionFromMethodAndPath(tt.method, tt.bucket, tt.object) - assert.Equal(t, tt.expectedAction, action, "S3 action should match expected") - }) - } -} - -// Helper functions for tests - -func setupTestIAMManagerForPresigned(t *testing.T) *integration.IAMManager { - // Create IAM manager - manager := integration.NewIAMManager() - - // Initialize with test configuration - config := &integration.IAMConfig{ - STS: &sts.STSConfig{ - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{time.Hour * 12}, - Issuer: "test-sts", - SigningKey: []byte("test-signing-key-32-characters-long"), - }, - Policy: &policy.PolicyEngineConfig{ - DefaultEffect: "Deny", - StoreType: "memory", - }, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", - }, - } - - err := manager.Initialize(config, func() string { - return "localhost:8888" // Mock filer address for testing - }) - require.NoError(t, err) - - // Set up test identity providers - setupTestProvidersForPresigned(t, manager) - - return manager -} - -func setupTestProvidersForPresigned(t *testing.T, manager *integration.IAMManager) { - // Set up OIDC provider - oidcProvider := oidc.NewMockOIDCProvider("test-oidc") - oidcConfig := &oidc.OIDCConfig{ - Issuer: "https://test-issuer.com", - ClientID: "test-client-id", - } - err := oidcProvider.Initialize(oidcConfig) - require.NoError(t, err) - oidcProvider.SetupDefaultTestData() - - // Set up LDAP provider - ldapProvider := ldap.NewMockLDAPProvider("test-ldap") - err = ldapProvider.Initialize(nil) // Mock doesn't need real config - require.NoError(t, err) - ldapProvider.SetupDefaultTestData() - - // Register providers - err = manager.RegisterIdentityProvider(oidcProvider) - require.NoError(t, err) - err = manager.RegisterIdentityProvider(ldapProvider) - require.NoError(t, err) -} - -func setupTestRolesForPresigned(ctx context.Context, manager *integration.IAMManager) { - // Create read-only policy - readOnlyPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowS3ReadOperations", - Effect: "Allow", - Action: []string{"s3:GetObject", "s3:ListBucket", "s3:HeadObject"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3ReadOnlyPolicy", readOnlyPolicy) - - // Create read-only role - manager.CreateRole(ctx, "", "S3ReadOnlyRole", &integration.RoleDefinition{ - RoleName: "S3ReadOnlyRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3ReadOnlyPolicy"}, - }) - - // Create admin policy - adminPolicy := &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Sid: "AllowAllS3Operations", - Effect: "Allow", - Action: []string{"s3:*"}, - Resource: []string{ - "arn:seaweed:s3:::*", - "arn:seaweed:s3:::*/*", - }, - }, - }, - } - - manager.CreatePolicy(ctx, "", "S3AdminPolicy", adminPolicy) - - // Create admin role - manager.CreateRole(ctx, "", "S3AdminRole", &integration.RoleDefinition{ - RoleName: "S3AdminRole", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3AdminPolicy"}, - }) - - // Create a role for presigned URL users with admin permissions for testing - manager.CreateRole(ctx, "", "PresignedUser", &integration.RoleDefinition{ - RoleName: "PresignedUser", - TrustPolicy: &policy.PolicyDocument{ - Version: "2012-10-17", - Statement: []policy.Statement{ - { - Effect: "Allow", - Principal: map[string]interface{}{ - "Federated": "test-oidc", - }, - Action: []string{"sts:AssumeRoleWithWebIdentity"}, - }, - }, - }, - AttachedPolicies: []string{"S3AdminPolicy"}, // Use admin policy for testing - }) -} - -func createPresignedURLRequest(t *testing.T, method, path, sessionToken string) *http.Request { - req := httptest.NewRequest(method, path, nil) - - // Add presigned URL parameters if session token is provided - if sessionToken != "" { - q := req.URL.Query() - q.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256") - q.Set("X-Amz-Security-Token", sessionToken) - q.Set("X-Amz-Date", time.Now().Format("20060102T150405Z")) - q.Set("X-Amz-Expires", "3600") - req.URL.RawQuery = q.Encode() - } - - return req -} diff --git a/weed/s3api/s3_sse_bucket_test.go b/weed/s3api/s3_sse_bucket_test.go deleted file mode 100644 index 74ad9296b..000000000 --- a/weed/s3api/s3_sse_bucket_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package s3api - -import ( - "fmt" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" -) - -// TestBucketDefaultSSEKMSEnforcement tests bucket default encryption enforcement -func TestBucketDefaultSSEKMSEnforcement(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Create bucket encryption configuration - config := &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "aws:kms", - KmsKeyId: kmsKey.KeyID, - BucketKeyEnabled: false, - } - - t.Run("Bucket with SSE-KMS default encryption", func(t *testing.T) { - // Test that default encryption config is properly stored and retrieved - if config.SseAlgorithm != "aws:kms" { - t.Errorf("Expected SSE algorithm aws:kms, got %s", config.SseAlgorithm) - } - - if config.KmsKeyId != kmsKey.KeyID { - t.Errorf("Expected KMS key ID %s, got %s", kmsKey.KeyID, config.KmsKeyId) - } - }) - - t.Run("Default encryption headers generation", func(t *testing.T) { - // Test generating default encryption headers for objects - headers := GetDefaultEncryptionHeaders(config) - - if headers == nil { - t.Fatal("Expected default headers, got nil") - } - - expectedAlgorithm := headers["X-Amz-Server-Side-Encryption"] - if expectedAlgorithm != "aws:kms" { - t.Errorf("Expected X-Amz-Server-Side-Encryption header aws:kms, got %s", expectedAlgorithm) - } - - expectedKeyID := headers["X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"] - if expectedKeyID != kmsKey.KeyID { - t.Errorf("Expected X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id header %s, got %s", kmsKey.KeyID, expectedKeyID) - } - }) - - t.Run("Default encryption detection", func(t *testing.T) { - // Test IsDefaultEncryptionEnabled - enabled := IsDefaultEncryptionEnabled(config) - if !enabled { - t.Error("Should detect default encryption as enabled") - } - - // Test with nil config - enabled = IsDefaultEncryptionEnabled(nil) - if enabled { - t.Error("Should detect default encryption as disabled for nil config") - } - - // Test with empty config - emptyConfig := &s3_pb.EncryptionConfiguration{} - enabled = IsDefaultEncryptionEnabled(emptyConfig) - if enabled { - t.Error("Should detect default encryption as disabled for empty config") - } - }) -} - -// TestBucketEncryptionConfigValidation tests XML validation of bucket encryption configurations -func TestBucketEncryptionConfigValidation(t *testing.T) { - testCases := []struct { - name string - xml string - expectError bool - description string - }{ - { - name: "Valid SSE-S3 configuration", - xml: ` - - - AES256 - - - `, - expectError: false, - description: "Basic SSE-S3 configuration should be valid", - }, - { - name: "Valid SSE-KMS configuration", - xml: ` - - - aws:kms - test-key-id - - - `, - expectError: false, - description: "SSE-KMS configuration with key ID should be valid", - }, - { - name: "Valid SSE-KMS without key ID", - xml: ` - - - aws:kms - - - `, - expectError: false, - description: "SSE-KMS without key ID should use default key", - }, - { - name: "Invalid XML structure", - xml: ` - - AES256 - - `, - expectError: true, - description: "Invalid XML structure should be rejected", - }, - { - name: "Empty configuration", - xml: ` - `, - expectError: true, - description: "Empty configuration should be rejected", - }, - { - name: "Invalid algorithm", - xml: ` - - - INVALID - - - `, - expectError: true, - description: "Invalid algorithm should be rejected", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - config, err := encryptionConfigFromXMLBytes([]byte(tc.xml)) - - if tc.expectError && err == nil { - t.Errorf("Expected error for %s, but got none. %s", tc.name, tc.description) - } - - if !tc.expectError && err != nil { - t.Errorf("Expected no error for %s, but got: %v. %s", tc.name, err, tc.description) - } - - if !tc.expectError && config != nil { - // Validate the parsed configuration - t.Logf("Successfully parsed config: Algorithm=%s, KeyID=%s", - config.SseAlgorithm, config.KmsKeyId) - } - }) - } -} - -// TestBucketEncryptionAPIOperations tests the bucket encryption API operations -func TestBucketEncryptionAPIOperations(t *testing.T) { - // Note: These tests would normally require a full S3 API server setup - // For now, we test the individual components - - t.Run("PUT bucket encryption", func(t *testing.T) { - xml := ` - - - aws:kms - test-key-id - - - ` - - // Parse the XML to protobuf - config, err := encryptionConfigFromXMLBytes([]byte(xml)) - if err != nil { - t.Fatalf("Failed to parse encryption config: %v", err) - } - - // Verify the parsed configuration - if config.SseAlgorithm != "aws:kms" { - t.Errorf("Expected algorithm aws:kms, got %s", config.SseAlgorithm) - } - - if config.KmsKeyId != "test-key-id" { - t.Errorf("Expected key ID test-key-id, got %s", config.KmsKeyId) - } - - // Convert back to XML - xmlBytes, err := encryptionConfigToXMLBytes(config) - if err != nil { - t.Fatalf("Failed to convert config to XML: %v", err) - } - - // Verify round-trip - if len(xmlBytes) == 0 { - t.Error("Generated XML should not be empty") - } - - // Parse again to verify - roundTripConfig, err := encryptionConfigFromXMLBytes(xmlBytes) - if err != nil { - t.Fatalf("Failed to parse round-trip XML: %v", err) - } - - if roundTripConfig.SseAlgorithm != config.SseAlgorithm { - t.Error("Round-trip algorithm doesn't match") - } - - if roundTripConfig.KmsKeyId != config.KmsKeyId { - t.Error("Round-trip key ID doesn't match") - } - }) - - t.Run("GET bucket encryption", func(t *testing.T) { - // Test getting encryption configuration - config := &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "AES256", - KmsKeyId: "", - BucketKeyEnabled: false, - } - - // Convert to XML for GET response - xmlBytes, err := encryptionConfigToXMLBytes(config) - if err != nil { - t.Fatalf("Failed to convert config to XML: %v", err) - } - - if len(xmlBytes) == 0 { - t.Error("Generated XML should not be empty") - } - - // Verify XML contains expected elements - xmlStr := string(xmlBytes) - if !strings.Contains(xmlStr, "AES256") { - t.Error("XML should contain AES256 algorithm") - } - }) - - t.Run("DELETE bucket encryption", func(t *testing.T) { - // Test deleting encryption configuration - // This would typically involve removing the configuration from metadata - - // Simulate checking if encryption is enabled after deletion - enabled := IsDefaultEncryptionEnabled(nil) - if enabled { - t.Error("Encryption should be disabled after deletion") - } - }) -} - -// TestBucketEncryptionEdgeCases tests edge cases in bucket encryption -func TestBucketEncryptionEdgeCases(t *testing.T) { - t.Run("Large XML configuration", func(t *testing.T) { - // Test with a large but valid XML - largeXML := ` - - - aws:kms - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - - true - - ` - - config, err := encryptionConfigFromXMLBytes([]byte(largeXML)) - if err != nil { - t.Fatalf("Failed to parse large XML: %v", err) - } - - if config.SseAlgorithm != "aws:kms" { - t.Error("Should parse large XML correctly") - } - }) - - t.Run("XML with namespaces", func(t *testing.T) { - // Test XML with namespaces - namespacedXML := ` - - - AES256 - - - ` - - config, err := encryptionConfigFromXMLBytes([]byte(namespacedXML)) - if err != nil { - t.Fatalf("Failed to parse namespaced XML: %v", err) - } - - if config.SseAlgorithm != "AES256" { - t.Error("Should parse namespaced XML correctly") - } - }) - - t.Run("Malformed XML", func(t *testing.T) { - malformedXMLs := []string{ - `AES256`, // Unclosed tags - ``, // Empty rule - `not-xml-at-all`, // Not XML - `AES256`, // Invalid namespace - } - - for i, malformedXML := range malformedXMLs { - t.Run(fmt.Sprintf("Malformed XML %d", i), func(t *testing.T) { - _, err := encryptionConfigFromXMLBytes([]byte(malformedXML)) - if err == nil { - t.Errorf("Expected error for malformed XML %d, but got none", i) - } - }) - } - }) -} - -// TestGetDefaultEncryptionHeaders tests generation of default encryption headers -func TestGetDefaultEncryptionHeaders(t *testing.T) { - testCases := []struct { - name string - config *s3_pb.EncryptionConfiguration - expectedHeaders map[string]string - }{ - { - name: "Nil configuration", - config: nil, - expectedHeaders: nil, - }, - { - name: "SSE-S3 configuration", - config: &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "AES256", - }, - expectedHeaders: map[string]string{ - "X-Amz-Server-Side-Encryption": "AES256", - }, - }, - { - name: "SSE-KMS configuration with key", - config: &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "aws:kms", - KmsKeyId: "test-key-id", - }, - expectedHeaders: map[string]string{ - "X-Amz-Server-Side-Encryption": "aws:kms", - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": "test-key-id", - }, - }, - { - name: "SSE-KMS configuration without key", - config: &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "aws:kms", - }, - expectedHeaders: map[string]string{ - "X-Amz-Server-Side-Encryption": "aws:kms", - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - headers := GetDefaultEncryptionHeaders(tc.config) - - if tc.expectedHeaders == nil && headers != nil { - t.Error("Expected nil headers but got some") - } - - if tc.expectedHeaders != nil && headers == nil { - t.Error("Expected headers but got nil") - } - - if tc.expectedHeaders != nil && headers != nil { - for key, expectedValue := range tc.expectedHeaders { - if actualValue, exists := headers[key]; !exists { - t.Errorf("Expected header %s not found", key) - } else if actualValue != expectedValue { - t.Errorf("Header %s: expected %s, got %s", key, expectedValue, actualValue) - } - } - - // Check for unexpected headers - for key := range headers { - if _, expected := tc.expectedHeaders[key]; !expected { - t.Errorf("Unexpected header found: %s", key) - } - } - } - }) - } -} diff --git a/weed/s3api/s3_sse_c.go b/weed/s3api/s3_sse_c.go deleted file mode 100644 index 733ae764e..000000000 --- a/weed/s3api/s3_sse_c.go +++ /dev/null @@ -1,344 +0,0 @@ -package s3api - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/md5" - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// SSECCopyStrategy represents different strategies for copying SSE-C objects -type SSECCopyStrategy int - -const ( - // SSECCopyStrategyDirect indicates the object can be copied directly without decryption - SSECCopyStrategyDirect SSECCopyStrategy = iota - // SSECCopyStrategyDecryptEncrypt indicates the object must be decrypted then re-encrypted - SSECCopyStrategyDecryptEncrypt -) - -const ( - // SSE-C constants - SSECustomerAlgorithmAES256 = s3_constants.SSEAlgorithmAES256 - SSECustomerKeySize = 32 // 256 bits -) - -// SSE-C related errors -var ( - ErrInvalidRequest = errors.New("invalid request") - ErrInvalidEncryptionAlgorithm = errors.New("invalid encryption algorithm") - ErrInvalidEncryptionKey = errors.New("invalid encryption key") - ErrSSECustomerKeyMD5Mismatch = errors.New("customer key MD5 mismatch") - ErrSSECustomerKeyMissing = errors.New("customer key missing") - ErrSSECustomerKeyNotNeeded = errors.New("customer key not needed") -) - -// SSECustomerKey represents a customer-provided encryption key for SSE-C -type SSECustomerKey struct { - Algorithm string - Key []byte - KeyMD5 string -} - -// IsSSECRequest checks if the request contains SSE-C headers -func IsSSECRequest(r *http.Request) bool { - // If SSE-KMS headers are present, this is not an SSE-C request (they are mutually exclusive) - sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryption) - if sseAlgorithm == "aws:kms" || r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) != "" { - return false - } - - return r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" -} - -// IsSSECEncrypted checks if the metadata indicates SSE-C encryption -func IsSSECEncrypted(metadata map[string][]byte) bool { - if metadata == nil { - return false - } - - // Check for SSE-C specific metadata keys - if _, exists := metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists { - return true - } - if _, exists := metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists { - return true - } - - return false -} - -// validateAndParseSSECHeaders does the core validation and parsing logic -func validateAndParseSSECHeaders(algorithm, key, keyMD5 string) (*SSECustomerKey, error) { - if algorithm == "" && key == "" && keyMD5 == "" { - return nil, nil // No SSE-C headers - } - - if algorithm == "" || key == "" || keyMD5 == "" { - return nil, ErrInvalidRequest - } - - if algorithm != SSECustomerAlgorithmAES256 { - return nil, ErrInvalidEncryptionAlgorithm - } - - // Decode and validate key - keyBytes, err := base64.StdEncoding.DecodeString(key) - if err != nil { - return nil, ErrInvalidEncryptionKey - } - - if len(keyBytes) != SSECustomerKeySize { - return nil, ErrInvalidEncryptionKey - } - - // Validate key MD5 (base64-encoded MD5 of the raw key bytes; case-sensitive) - sum := md5.Sum(keyBytes) - expectedMD5 := base64.StdEncoding.EncodeToString(sum[:]) - - // Debug logging for MD5 validation - glog.V(4).Infof("SSE-C MD5 validation: provided='%s', expected='%s', keyBytes=%x", keyMD5, expectedMD5, keyBytes) - - if keyMD5 != expectedMD5 { - glog.Errorf("SSE-C MD5 mismatch: provided='%s', expected='%s'", keyMD5, expectedMD5) - return nil, ErrSSECustomerKeyMD5Mismatch - } - - return &SSECustomerKey{ - Algorithm: algorithm, - Key: keyBytes, - KeyMD5: keyMD5, - }, nil -} - -// ValidateSSECHeaders validates SSE-C headers in the request -func ValidateSSECHeaders(r *http.Request) error { - algorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - key := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKey) - keyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - - _, err := validateAndParseSSECHeaders(algorithm, key, keyMD5) - return err -} - -// ParseSSECHeaders parses and validates SSE-C headers from the request -func ParseSSECHeaders(r *http.Request) (*SSECustomerKey, error) { - algorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - key := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKey) - keyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - - return validateAndParseSSECHeaders(algorithm, key, keyMD5) -} - -// ParseSSECCopySourceHeaders parses and validates SSE-C copy source headers from the request -func ParseSSECCopySourceHeaders(r *http.Request) (*SSECustomerKey, error) { - algorithm := r.Header.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerAlgorithm) - key := r.Header.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKey) - keyMD5 := r.Header.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKeyMD5) - - return validateAndParseSSECHeaders(algorithm, key, keyMD5) -} - -// CreateSSECEncryptedReader creates a new encrypted reader for SSE-C -// Returns the encrypted reader and the IV for metadata storage -func CreateSSECEncryptedReader(r io.Reader, customerKey *SSECustomerKey) (io.Reader, []byte, error) { - if customerKey == nil { - return r, nil, nil - } - - // Create AES cipher - block, err := aes.NewCipher(customerKey.Key) - if err != nil { - return nil, nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Generate random IV - iv := make([]byte, s3_constants.AESBlockSize) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, nil, fmt.Errorf("failed to generate IV: %v", err) - } - - // Create CTR mode cipher - stream := cipher.NewCTR(block, iv) - - // The IV is stored in metadata, so the encrypted stream does not need to prepend the IV - // This ensures correct Content-Length for clients - encryptedReader := &cipher.StreamReader{S: stream, R: r} - - return encryptedReader, iv, nil -} - -// CreateSSECDecryptedReader creates a new decrypted reader for SSE-C -// The IV comes from metadata, not from the encrypted data stream -func CreateSSECDecryptedReader(r io.Reader, customerKey *SSECustomerKey, iv []byte) (io.Reader, error) { - if customerKey == nil { - return r, nil - } - - // IV must be provided from metadata - if err := ValidateIV(iv, "IV"); err != nil { - return nil, fmt.Errorf("invalid IV from metadata: %w", err) - } - - // Create AES cipher - block, err := aes.NewCipher(customerKey.Key) - if err != nil { - return nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Create CTR mode cipher using the IV from metadata - stream := cipher.NewCTR(block, iv) - - return &cipher.StreamReader{S: stream, R: r}, nil -} - -// CreateSSECEncryptedReaderWithOffset creates an encrypted reader with a specific counter offset -// This is used for chunk-level encryption where each chunk needs a different counter position -func CreateSSECEncryptedReaderWithOffset(r io.Reader, customerKey *SSECustomerKey, iv []byte, counterOffset uint64) (io.Reader, error) { - if customerKey == nil { - return r, nil - } - - // Create AES cipher - block, err := aes.NewCipher(customerKey.Key) - if err != nil { - return nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Create CTR mode cipher with offset - stream := createCTRStreamWithOffset(block, iv, counterOffset) - - return &cipher.StreamReader{S: stream, R: r}, nil -} - -// CreateSSECDecryptedReaderWithOffset creates a decrypted reader with a specific counter offset -func CreateSSECDecryptedReaderWithOffset(r io.Reader, customerKey *SSECustomerKey, iv []byte, counterOffset uint64) (io.Reader, error) { - if customerKey == nil { - return r, nil - } - - // Create AES cipher - block, err := aes.NewCipher(customerKey.Key) - if err != nil { - return nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Create CTR mode cipher with offset - stream := createCTRStreamWithOffset(block, iv, counterOffset) - - return &cipher.StreamReader{S: stream, R: r}, nil -} - -// createCTRStreamWithOffset creates a CTR stream positioned at a specific counter offset -func createCTRStreamWithOffset(block cipher.Block, iv []byte, counterOffset uint64) cipher.Stream { - // Create a copy of the IV to avoid modifying the original - offsetIV := make([]byte, len(iv)) - copy(offsetIV, iv) - - // Calculate the counter offset in blocks (AES block size is 16 bytes) - blockOffset := counterOffset / 16 - - // Add the block offset to the counter portion of the IV - // In AES-CTR, the last 8 bytes of the IV are typically used as the counter - addCounterToIV(offsetIV, blockOffset) - - return cipher.NewCTR(block, offsetIV) -} - -// addCounterToIV adds a counter value to the IV (treating last 8 bytes as big-endian counter) -func addCounterToIV(iv []byte, counter uint64) { - // Use the last 8 bytes as a big-endian counter - for i := 7; i >= 0; i-- { - carry := counter & 0xff - iv[len(iv)-8+i] += byte(carry) - if iv[len(iv)-8+i] >= byte(carry) { - break // No overflow - } - counter >>= 8 - } -} - -// GetSourceSSECInfo extracts SSE-C information from source object metadata -func GetSourceSSECInfo(metadata map[string][]byte) (algorithm string, keyMD5 string, isEncrypted bool) { - if alg, exists := metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists { - algorithm = string(alg) - } - if md5, exists := metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists { - keyMD5 = string(md5) - } - isEncrypted = algorithm != "" && keyMD5 != "" - return -} - -// CanDirectCopySSEC determines if we can directly copy chunks without decrypt/re-encrypt -func CanDirectCopySSEC(srcMetadata map[string][]byte, copySourceKey *SSECustomerKey, destKey *SSECustomerKey) bool { - _, srcKeyMD5, srcEncrypted := GetSourceSSECInfo(srcMetadata) - - // Case 1: Source unencrypted, destination unencrypted -> Direct copy - if !srcEncrypted && destKey == nil { - return true - } - - // Case 2: Source encrypted, same key for decryption and destination -> Direct copy - if srcEncrypted && copySourceKey != nil && destKey != nil { - // Same key if MD5 matches exactly (base64 encoding is case-sensitive) - return copySourceKey.KeyMD5 == srcKeyMD5 && - destKey.KeyMD5 == srcKeyMD5 - } - - // All other cases require decrypt/re-encrypt - return false -} - -// Note: SSECCopyStrategy is defined above - -// DetermineSSECCopyStrategy determines the optimal copy strategy -func DetermineSSECCopyStrategy(srcMetadata map[string][]byte, copySourceKey *SSECustomerKey, destKey *SSECustomerKey) (SSECCopyStrategy, error) { - _, srcKeyMD5, srcEncrypted := GetSourceSSECInfo(srcMetadata) - - // Validate source key if source is encrypted - if srcEncrypted { - if copySourceKey == nil { - return SSECCopyStrategyDecryptEncrypt, ErrSSECustomerKeyMissing - } - if copySourceKey.KeyMD5 != srcKeyMD5 { - return SSECCopyStrategyDecryptEncrypt, ErrSSECustomerKeyMD5Mismatch - } - } else if copySourceKey != nil { - // Source not encrypted but copy source key provided - return SSECCopyStrategyDecryptEncrypt, ErrSSECustomerKeyNotNeeded - } - - if CanDirectCopySSEC(srcMetadata, copySourceKey, destKey) { - return SSECCopyStrategyDirect, nil - } - - return SSECCopyStrategyDecryptEncrypt, nil -} - -// MapSSECErrorToS3Error maps SSE-C custom errors to S3 API error codes -func MapSSECErrorToS3Error(err error) s3err.ErrorCode { - switch err { - case ErrInvalidEncryptionAlgorithm: - return s3err.ErrInvalidEncryptionAlgorithm - case ErrInvalidEncryptionKey: - return s3err.ErrInvalidEncryptionKey - case ErrSSECustomerKeyMD5Mismatch: - return s3err.ErrSSECustomerKeyMD5Mismatch - case ErrSSECustomerKeyMissing: - return s3err.ErrSSECustomerKeyMissing - case ErrSSECustomerKeyNotNeeded: - return s3err.ErrSSECustomerKeyNotNeeded - default: - return s3err.ErrInvalidRequest - } -} diff --git a/weed/s3api/s3_sse_c_range_test.go b/weed/s3api/s3_sse_c_range_test.go deleted file mode 100644 index 318771d8c..000000000 --- a/weed/s3api/s3_sse_c_range_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package s3api - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// ResponseRecorder that also implements http.Flusher -type recorderFlusher struct{ *httptest.ResponseRecorder } - -func (r recorderFlusher) Flush() {} - -// TestSSECRangeRequestsSupported verifies that HTTP Range requests are now supported -// for SSE-C encrypted objects since the IV is stored in metadata and CTR mode allows seeking -func TestSSECRangeRequestsSupported(t *testing.T) { - // Create a request with Range header and valid SSE-C headers - req := httptest.NewRequest(http.MethodGet, "/b/o", nil) - req.Header.Set("Range", "bytes=10-20") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - - key := make([]byte, 32) - for i := range key { - key[i] = byte(i) - } - s := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(s[:]) - - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key)) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyMD5) - - // Attach mux vars to avoid panic in error writer - req = mux.SetURLVars(req, map[string]string{"bucket": "b", "object": "o"}) - - // Create a mock HTTP response that simulates SSE-C encrypted object metadata - proxyResponse := &http.Response{ - StatusCode: 200, - Header: make(http.Header), - Body: io.NopCloser(bytes.NewReader([]byte("mock encrypted data"))), - } - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyMD5) - - // Call the function under test - should no longer reject range requests - s3a := &S3ApiServer{ - option: &S3ApiServerOption{ - BucketsPath: "/buckets", - }, - } - rec := httptest.NewRecorder() - w := recorderFlusher{rec} - statusCode, _ := s3a.handleSSECResponse(req, proxyResponse, w) - - // Range requests should now be allowed to proceed (will be handled by filer layer) - // The exact status code depends on the object existence and filer response - if statusCode == http.StatusRequestedRangeNotSatisfiable { - t.Fatalf("Range requests should no longer be rejected for SSE-C objects, got status %d", statusCode) - } -} diff --git a/weed/s3api/s3_sse_c_test.go b/weed/s3api/s3_sse_c_test.go deleted file mode 100644 index 034f07a8e..000000000 --- a/weed/s3api/s3_sse_c_test.go +++ /dev/null @@ -1,407 +0,0 @@ -package s3api - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "fmt" - "io" - "net/http" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -func base64MD5(b []byte) string { - s := md5.Sum(b) - return base64.StdEncoding.EncodeToString(s[:]) -} - -func TestSSECHeaderValidation(t *testing.T) { - // Test valid SSE-C headers - req := &http.Request{Header: make(http.Header)} - - key := make([]byte, 32) // 256-bit key - for i := range key { - key[i] = byte(i) - } - - keyBase64 := base64.StdEncoding.EncodeToString(key) - md5sum := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(md5sum[:]) - - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, keyBase64) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyMD5) - - // Test validation - err := ValidateSSECHeaders(req) - if err != nil { - t.Errorf("Expected valid headers, got error: %v", err) - } - - // Test parsing - customerKey, err := ParseSSECHeaders(req) - if err != nil { - t.Errorf("Expected successful parsing, got error: %v", err) - } - - if customerKey == nil { - t.Error("Expected customer key, got nil") - } - - if customerKey.Algorithm != "AES256" { - t.Errorf("Expected algorithm AES256, got %s", customerKey.Algorithm) - } - - if !bytes.Equal(customerKey.Key, key) { - t.Error("Key doesn't match original") - } - - if customerKey.KeyMD5 != keyMD5 { - t.Errorf("Expected key MD5 %s, got %s", keyMD5, customerKey.KeyMD5) - } -} - -func TestSSECCopySourceHeaders(t *testing.T) { - // Test valid SSE-C copy source headers - req := &http.Request{Header: make(http.Header)} - - key := make([]byte, 32) // 256-bit key - for i := range key { - key[i] = byte(i) + 1 // Different from regular test - } - - keyBase64 := base64.StdEncoding.EncodeToString(key) - md5sum2 := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(md5sum2[:]) - - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerKey, keyBase64) - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerKeyMD5, keyMD5) - - // Test parsing copy source headers - customerKey, err := ParseSSECCopySourceHeaders(req) - if err != nil { - t.Errorf("Expected successful copy source parsing, got error: %v", err) - } - - if customerKey == nil { - t.Error("Expected customer key from copy source headers, got nil") - } - - if customerKey.Algorithm != "AES256" { - t.Errorf("Expected algorithm AES256, got %s", customerKey.Algorithm) - } - - if !bytes.Equal(customerKey.Key, key) { - t.Error("Copy source key doesn't match original") - } - - // Test that regular headers don't interfere with copy source headers - regularKey, err := ParseSSECHeaders(req) - if err != nil { - t.Errorf("Regular header parsing should not fail: %v", err) - } - - if regularKey != nil { - t.Error("Expected nil for regular headers when only copy source headers are present") - } -} - -func TestSSECHeaderValidationErrors(t *testing.T) { - tests := []struct { - name string - algorithm string - key string - keyMD5 string - wantErr error - }{ - { - name: "invalid algorithm", - algorithm: "AES128", - key: base64.StdEncoding.EncodeToString(make([]byte, 32)), - keyMD5: base64MD5(make([]byte, 32)), - wantErr: ErrInvalidEncryptionAlgorithm, - }, - { - name: "invalid key length", - algorithm: "AES256", - key: base64.StdEncoding.EncodeToString(make([]byte, 16)), - keyMD5: base64MD5(make([]byte, 16)), - wantErr: ErrInvalidEncryptionKey, - }, - { - name: "mismatched MD5", - algorithm: "AES256", - key: base64.StdEncoding.EncodeToString(make([]byte, 32)), - keyMD5: "wrong==md5", - wantErr: ErrSSECustomerKeyMD5Mismatch, - }, - { - name: "incomplete headers", - algorithm: "AES256", - key: "", - keyMD5: "", - wantErr: ErrInvalidRequest, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := &http.Request{Header: make(http.Header)} - - if tt.algorithm != "" { - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, tt.algorithm) - } - if tt.key != "" { - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, tt.key) - } - if tt.keyMD5 != "" { - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, tt.keyMD5) - } - - err := ValidateSSECHeaders(req) - if err != tt.wantErr { - t.Errorf("Expected error %v, got %v", tt.wantErr, err) - } - }) - } -} - -func TestSSECEncryptionDecryption(t *testing.T) { - // Create customer key - key := make([]byte, 32) - for i := range key { - key[i] = byte(i) - } - - md5sumKey := md5.Sum(key) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: key, - KeyMD5: base64.StdEncoding.EncodeToString(md5sumKey[:]), - } - - // Test data - testData := []byte("Hello, World! This is a test of SSE-C encryption.") - - // Create encrypted reader - dataReader := bytes.NewReader(testData) - encryptedReader, iv, err := CreateSSECEncryptedReader(dataReader, customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - // Read encrypted data - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Verify data is actually encrypted (different from original) - if bytes.Equal(encryptedData[16:], testData) { // Skip IV - t.Error("Data doesn't appear to be encrypted") - } - - // Create decrypted reader - encryptedReader2 := bytes.NewReader(encryptedData) - decryptedReader, err := CreateSSECDecryptedReader(encryptedReader2, customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Read decrypted data - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - // Verify decrypted data matches original - if !bytes.Equal(decryptedData, testData) { - t.Errorf("Decrypted data doesn't match original.\nOriginal: %s\nDecrypted: %s", testData, decryptedData) - } -} - -func TestSSECIsSSECRequest(t *testing.T) { - // Test with SSE-C headers - req := &http.Request{Header: make(http.Header)} - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - - if !IsSSECRequest(req) { - t.Error("Expected IsSSECRequest to return true when SSE-C headers are present") - } - - // Test without SSE-C headers - req2 := &http.Request{Header: make(http.Header)} - if IsSSECRequest(req2) { - t.Error("Expected IsSSECRequest to return false when no SSE-C headers are present") - } -} - -// Test encryption with different data sizes (similar to s3tests) -func TestSSECEncryptionVariousSizes(t *testing.T) { - sizes := []int{1, 13, 1024, 1024 * 1024} // 1B, 13B, 1KB, 1MB - - for _, size := range sizes { - t.Run(fmt.Sprintf("size_%d", size), func(t *testing.T) { - // Create customer key - key := make([]byte, 32) - for i := range key { - key[i] = byte(i + size) // Make key unique per test - } - - md5sumDyn := md5.Sum(key) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: key, - KeyMD5: base64.StdEncoding.EncodeToString(md5sumDyn[:]), - } - - // Create test data of specified size - testData := make([]byte, size) - for i := range testData { - testData[i] = byte('A' + (i % 26)) // Pattern of A-Z - } - - // Encrypt - dataReader := bytes.NewReader(testData) - encryptedReader, iv, err := CreateSSECEncryptedReader(dataReader, customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Verify encrypted data has same size as original (IV is stored in metadata, not in stream) - if len(encryptedData) != size { - t.Errorf("Expected encrypted data length %d (same as original), got %d", size, len(encryptedData)) - } - - // Decrypt - encryptedReader2 := bytes.NewReader(encryptedData) - decryptedReader, err := CreateSSECDecryptedReader(encryptedReader2, customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - // Verify decrypted data matches original - if !bytes.Equal(decryptedData, testData) { - t.Errorf("Decrypted data doesn't match original for size %d", size) - } - }) - } -} - -func TestSSECEncryptionWithNilKey(t *testing.T) { - testData := []byte("test data") - dataReader := bytes.NewReader(testData) - - // Test encryption with nil key (should pass through) - encryptedReader, iv, err := CreateSSECEncryptedReader(dataReader, nil) - if err != nil { - t.Fatalf("Failed to create encrypted reader with nil key: %v", err) - } - - result, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read from pass-through reader: %v", err) - } - - if !bytes.Equal(result, testData) { - t.Error("Data should pass through unchanged when key is nil") - } - - // Test decryption with nil key (should pass through) - dataReader2 := bytes.NewReader(testData) - decryptedReader, err := CreateSSECDecryptedReader(dataReader2, nil, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader with nil key: %v", err) - } - - result2, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read from pass-through reader: %v", err) - } - - if !bytes.Equal(result2, testData) { - t.Error("Data should pass through unchanged when key is nil") - } -} - -// TestSSECEncryptionSmallBuffers tests the fix for the critical bug where small buffers -// could corrupt the data stream when reading in chunks smaller than the IV size -func TestSSECEncryptionSmallBuffers(t *testing.T) { - testData := []byte("This is a test message for small buffer reads") - - // Create customer key - key := make([]byte, 32) - for i := range key { - key[i] = byte(i) - } - - md5sumKey3 := md5.Sum(key) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: key, - KeyMD5: base64.StdEncoding.EncodeToString(md5sumKey3[:]), - } - - // Create encrypted reader - dataReader := bytes.NewReader(testData) - encryptedReader, iv, err := CreateSSECEncryptedReader(dataReader, customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - // Read with very small buffers (smaller than IV size of 16 bytes) - var encryptedData []byte - smallBuffer := make([]byte, 5) // Much smaller than 16-byte IV - - for { - n, err := encryptedReader.Read(smallBuffer) - if n > 0 { - encryptedData = append(encryptedData, smallBuffer[:n]...) - } - if err == io.EOF { - break - } - if err != nil { - t.Fatalf("Error reading encrypted data: %v", err) - } - } - - // Verify we have some encrypted data (IV is in metadata, not in stream) - if len(encryptedData) == 0 && len(testData) > 0 { - t.Fatal("Expected encrypted data but got none") - } - - // Expected size: same as original data (IV is stored in metadata, not in stream) - if len(encryptedData) != len(testData) { - t.Errorf("Expected encrypted data size %d (same as original), got %d", len(testData), len(encryptedData)) - } - - // Decrypt and verify - encryptedReader2 := bytes.NewReader(encryptedData) - decryptedReader, err := CreateSSECDecryptedReader(encryptedReader2, customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - if !bytes.Equal(decryptedData, testData) { - t.Errorf("Decrypted data doesn't match original.\nOriginal: %s\nDecrypted: %s", testData, decryptedData) - } -} diff --git a/weed/s3api/s3_sse_copy_test.go b/weed/s3api/s3_sse_copy_test.go deleted file mode 100644 index 35839a704..000000000 --- a/weed/s3api/s3_sse_copy_test.go +++ /dev/null @@ -1,628 +0,0 @@ -package s3api - -import ( - "bytes" - "io" - "net/http" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestSSECObjectCopy tests copying SSE-C encrypted objects with different keys -func TestSSECObjectCopy(t *testing.T) { - // Original key for source object - sourceKey := GenerateTestSSECKey(1) - sourceCustomerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: sourceKey.Key, - KeyMD5: sourceKey.KeyMD5, - } - - // Destination key for target object - destKey := GenerateTestSSECKey(2) - destCustomerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: destKey.Key, - KeyMD5: destKey.KeyMD5, - } - - testData := "Hello, SSE-C copy world!" - - // Encrypt with source key - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(testData), sourceCustomerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Test copy strategy determination - sourceMetadata := make(map[string][]byte) - StoreIVInMetadata(sourceMetadata, iv) - sourceMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - sourceMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(sourceKey.KeyMD5) - - t.Run("Same key copy (direct copy)", func(t *testing.T) { - strategy, err := DetermineSSECCopyStrategy(sourceMetadata, sourceCustomerKey, sourceCustomerKey) - if err != nil { - t.Fatalf("Failed to determine copy strategy: %v", err) - } - - if strategy != SSECCopyStrategyDirect { - t.Errorf("Expected direct copy strategy for same key, got %v", strategy) - } - }) - - t.Run("Different key copy (decrypt-encrypt)", func(t *testing.T) { - strategy, err := DetermineSSECCopyStrategy(sourceMetadata, sourceCustomerKey, destCustomerKey) - if err != nil { - t.Fatalf("Failed to determine copy strategy: %v", err) - } - - if strategy != SSECCopyStrategyDecryptEncrypt { - t.Errorf("Expected decrypt-encrypt copy strategy for different keys, got %v", strategy) - } - }) - - t.Run("Can direct copy check", func(t *testing.T) { - // Same key should allow direct copy - canDirect := CanDirectCopySSEC(sourceMetadata, sourceCustomerKey, sourceCustomerKey) - if !canDirect { - t.Error("Should allow direct copy with same key") - } - - // Different key should not allow direct copy - canDirect = CanDirectCopySSEC(sourceMetadata, sourceCustomerKey, destCustomerKey) - if canDirect { - t.Error("Should not allow direct copy with different keys") - } - }) - - // Test actual copy operation (decrypt with source key, encrypt with dest key) - t.Run("Full copy operation", func(t *testing.T) { - // Decrypt with source key - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), sourceCustomerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Re-encrypt with destination key - reEncryptedReader, destIV, err := CreateSSECEncryptedReader(decryptedReader, destCustomerKey) - if err != nil { - t.Fatalf("Failed to create re-encrypted reader: %v", err) - } - - reEncryptedData, err := io.ReadAll(reEncryptedReader) - if err != nil { - t.Fatalf("Failed to read re-encrypted data: %v", err) - } - - // Verify we can decrypt with destination key - finalDecryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(reEncryptedData), destCustomerKey, destIV) - if err != nil { - t.Fatalf("Failed to create final decrypted reader: %v", err) - } - - finalData, err := io.ReadAll(finalDecryptedReader) - if err != nil { - t.Fatalf("Failed to read final decrypted data: %v", err) - } - - if string(finalData) != testData { - t.Errorf("Expected %s, got %s", testData, string(finalData)) - } - }) -} - -// TestSSEKMSObjectCopy tests copying SSE-KMS encrypted objects -func TestSSEKMSObjectCopy(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - testData := "Hello, SSE-KMS copy world!" - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - // Encrypt with SSE-KMS - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(testData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - t.Run("Same KMS key copy", func(t *testing.T) { - // Decrypt with original key - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Re-encrypt with same KMS key - reEncryptedReader, newSseKey, err := CreateSSEKMSEncryptedReader(decryptedReader, kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create re-encrypted reader: %v", err) - } - - reEncryptedData, err := io.ReadAll(reEncryptedReader) - if err != nil { - t.Fatalf("Failed to read re-encrypted data: %v", err) - } - - // Verify we can decrypt with new key - finalDecryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(reEncryptedData), newSseKey) - if err != nil { - t.Fatalf("Failed to create final decrypted reader: %v", err) - } - - finalData, err := io.ReadAll(finalDecryptedReader) - if err != nil { - t.Fatalf("Failed to read final decrypted data: %v", err) - } - - if string(finalData) != testData { - t.Errorf("Expected %s, got %s", testData, string(finalData)) - } - }) -} - -// TestSSECToSSEKMSCopy tests cross-encryption copy (SSE-C to SSE-KMS) -func TestSSECToSSEKMSCopy(t *testing.T) { - // Setup SSE-C key - ssecKey := GenerateTestSSECKey(1) - ssecCustomerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: ssecKey.Key, - KeyMD5: ssecKey.KeyMD5, - } - - // Setup SSE-KMS - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - testData := "Hello, cross-encryption copy world!" - - // Encrypt with SSE-C - encryptedReader, ssecIV, err := CreateSSECEncryptedReader(strings.NewReader(testData), ssecCustomerKey) - if err != nil { - t.Fatalf("Failed to create SSE-C encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read SSE-C encrypted data: %v", err) - } - - // Decrypt SSE-C data - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), ssecCustomerKey, ssecIV) - if err != nil { - t.Fatalf("Failed to create SSE-C decrypted reader: %v", err) - } - - // Re-encrypt with SSE-KMS - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - reEncryptedReader, sseKmsKey, err := CreateSSEKMSEncryptedReader(decryptedReader, kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create SSE-KMS encrypted reader: %v", err) - } - - reEncryptedData, err := io.ReadAll(reEncryptedReader) - if err != nil { - t.Fatalf("Failed to read SSE-KMS encrypted data: %v", err) - } - - // Decrypt with SSE-KMS - finalDecryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(reEncryptedData), sseKmsKey) - if err != nil { - t.Fatalf("Failed to create SSE-KMS decrypted reader: %v", err) - } - - finalData, err := io.ReadAll(finalDecryptedReader) - if err != nil { - t.Fatalf("Failed to read final decrypted data: %v", err) - } - - if string(finalData) != testData { - t.Errorf("Expected %s, got %s", testData, string(finalData)) - } -} - -// TestSSEKMSToSSECCopy tests cross-encryption copy (SSE-KMS to SSE-C) -func TestSSEKMSToSSECCopy(t *testing.T) { - // Setup SSE-KMS - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Setup SSE-C key - ssecKey := GenerateTestSSECKey(1) - ssecCustomerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: ssecKey.Key, - KeyMD5: ssecKey.KeyMD5, - } - - testData := "Hello, reverse cross-encryption copy world!" - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - // Encrypt with SSE-KMS - encryptedReader, sseKmsKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(testData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create SSE-KMS encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read SSE-KMS encrypted data: %v", err) - } - - // Decrypt SSE-KMS data - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKmsKey) - if err != nil { - t.Fatalf("Failed to create SSE-KMS decrypted reader: %v", err) - } - - // Re-encrypt with SSE-C - reEncryptedReader, reEncryptedIV, err := CreateSSECEncryptedReader(decryptedReader, ssecCustomerKey) - if err != nil { - t.Fatalf("Failed to create SSE-C encrypted reader: %v", err) - } - - reEncryptedData, err := io.ReadAll(reEncryptedReader) - if err != nil { - t.Fatalf("Failed to read SSE-C encrypted data: %v", err) - } - - // Decrypt with SSE-C - finalDecryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(reEncryptedData), ssecCustomerKey, reEncryptedIV) - if err != nil { - t.Fatalf("Failed to create SSE-C decrypted reader: %v", err) - } - - finalData, err := io.ReadAll(finalDecryptedReader) - if err != nil { - t.Fatalf("Failed to read final decrypted data: %v", err) - } - - if string(finalData) != testData { - t.Errorf("Expected %s, got %s", testData, string(finalData)) - } -} - -// TestSSECopyWithCorruptedSource tests copy operations with corrupted source data -func TestSSECopyWithCorruptedSource(t *testing.T) { - ssecKey := GenerateTestSSECKey(1) - ssecCustomerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: ssecKey.Key, - KeyMD5: ssecKey.KeyMD5, - } - - testData := "Hello, corruption test!" - - // Encrypt data - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(testData), ssecCustomerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Corrupt the encrypted data - corruptedData := make([]byte, len(encryptedData)) - copy(corruptedData, encryptedData) - if len(corruptedData) > s3_constants.AESBlockSize { - // Corrupt a byte after the IV - corruptedData[s3_constants.AESBlockSize] ^= 0xFF - } - - // Try to decrypt corrupted data - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(corruptedData), ssecCustomerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader for corrupted data: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - // This is okay - corrupted data might cause read errors - t.Logf("Read error for corrupted data (expected): %v", err) - return - } - - // If we can read it, the data should be different from original - if string(decryptedData) == testData { - t.Error("Decrypted corrupted data should not match original") - } -} - -// TestSSEKMSCopyStrategy tests SSE-KMS copy strategy determination -func TestSSEKMSCopyStrategy(t *testing.T) { - tests := []struct { - name string - srcMetadata map[string][]byte - destKeyID string - expectedStrategy SSEKMSCopyStrategy - }{ - { - name: "Unencrypted to unencrypted", - srcMetadata: map[string][]byte{}, - destKeyID: "", - expectedStrategy: SSEKMSCopyStrategyDirect, - }, - { - name: "Same KMS key", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "test-key-123", - expectedStrategy: SSEKMSCopyStrategyDirect, - }, - { - name: "Different KMS keys", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "test-key-456", - expectedStrategy: SSEKMSCopyStrategyDecryptEncrypt, - }, - { - name: "Encrypted to unencrypted", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "", - expectedStrategy: SSEKMSCopyStrategyDecryptEncrypt, - }, - { - name: "Unencrypted to encrypted", - srcMetadata: map[string][]byte{}, - destKeyID: "test-key-123", - expectedStrategy: SSEKMSCopyStrategyDecryptEncrypt, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - strategy, err := DetermineSSEKMSCopyStrategy(tt.srcMetadata, tt.destKeyID) - if err != nil { - t.Fatalf("DetermineSSEKMSCopyStrategy failed: %v", err) - } - if strategy != tt.expectedStrategy { - t.Errorf("Expected strategy %v, got %v", tt.expectedStrategy, strategy) - } - }) - } -} - -// TestSSEKMSCopyHeaders tests SSE-KMS copy header parsing -func TestSSEKMSCopyHeaders(t *testing.T) { - tests := []struct { - name string - headers map[string]string - expectedKeyID string - expectedContext map[string]string - expectedBucketKey bool - expectError bool - }{ - { - name: "No SSE-KMS headers", - headers: map[string]string{}, - expectedKeyID: "", - expectedContext: nil, - expectedBucketKey: false, - expectError: false, - }, - { - name: "SSE-KMS with key ID", - headers: map[string]string{ - s3_constants.AmzServerSideEncryption: "aws:kms", - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: "test-key-123", - }, - expectedKeyID: "test-key-123", - expectedContext: nil, - expectedBucketKey: false, - expectError: false, - }, - { - name: "SSE-KMS with all options", - headers: map[string]string{ - s3_constants.AmzServerSideEncryption: "aws:kms", - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: "test-key-123", - s3_constants.AmzServerSideEncryptionContext: "eyJ0ZXN0IjoidmFsdWUifQ==", // base64 of {"test":"value"} - s3_constants.AmzServerSideEncryptionBucketKeyEnabled: "true", - }, - expectedKeyID: "test-key-123", - expectedContext: map[string]string{"test": "value"}, - expectedBucketKey: true, - expectError: false, - }, - { - name: "Invalid key ID", - headers: map[string]string{ - s3_constants.AmzServerSideEncryption: "aws:kms", - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: "invalid key id", - }, - expectError: true, - }, - { - name: "Invalid encryption context", - headers: map[string]string{ - s3_constants.AmzServerSideEncryption: "aws:kms", - s3_constants.AmzServerSideEncryptionContext: "invalid-base64!", - }, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest("PUT", "/test", nil) - for k, v := range tt.headers { - req.Header.Set(k, v) - } - - keyID, context, bucketKey, err := ParseSSEKMSCopyHeaders(req) - - if tt.expectError { - if err == nil { - t.Error("Expected error but got none") - } - return - } - - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - if keyID != tt.expectedKeyID { - t.Errorf("Expected keyID %s, got %s", tt.expectedKeyID, keyID) - } - - if !mapsEqual(context, tt.expectedContext) { - t.Errorf("Expected context %v, got %v", tt.expectedContext, context) - } - - if bucketKey != tt.expectedBucketKey { - t.Errorf("Expected bucketKey %v, got %v", tt.expectedBucketKey, bucketKey) - } - }) - } -} - -// TestSSEKMSDirectCopy tests direct copy scenarios -func TestSSEKMSDirectCopy(t *testing.T) { - tests := []struct { - name string - srcMetadata map[string][]byte - destKeyID string - canDirect bool - }{ - { - name: "Both unencrypted", - srcMetadata: map[string][]byte{}, - destKeyID: "", - canDirect: true, - }, - { - name: "Same key ID", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "test-key-123", - canDirect: true, - }, - { - name: "Different key IDs", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "test-key-456", - canDirect: false, - }, - { - name: "Source encrypted, dest unencrypted", - srcMetadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - destKeyID: "", - canDirect: false, - }, - { - name: "Source unencrypted, dest encrypted", - srcMetadata: map[string][]byte{}, - destKeyID: "test-key-123", - canDirect: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - canDirect := CanDirectCopySSEKMS(tt.srcMetadata, tt.destKeyID) - if canDirect != tt.canDirect { - t.Errorf("Expected canDirect %v, got %v", tt.canDirect, canDirect) - } - }) - } -} - -// TestGetSourceSSEKMSInfo tests extraction of SSE-KMS info from metadata -func TestGetSourceSSEKMSInfo(t *testing.T) { - tests := []struct { - name string - metadata map[string][]byte - expectedKeyID string - expectedEncrypted bool - }{ - { - name: "No encryption", - metadata: map[string][]byte{}, - expectedKeyID: "", - expectedEncrypted: false, - }, - { - name: "SSE-KMS with key ID", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzServerSideEncryptionAwsKmsKeyId: []byte("test-key-123"), - }, - expectedKeyID: "test-key-123", - expectedEncrypted: true, - }, - { - name: "SSE-KMS without key ID (default key)", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - }, - expectedKeyID: "", - expectedEncrypted: true, - }, - { - name: "Non-KMS encryption", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("AES256"), - }, - expectedKeyID: "", - expectedEncrypted: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - keyID, encrypted := GetSourceSSEKMSInfo(tt.metadata) - if keyID != tt.expectedKeyID { - t.Errorf("Expected keyID %s, got %s", tt.expectedKeyID, keyID) - } - if encrypted != tt.expectedEncrypted { - t.Errorf("Expected encrypted %v, got %v", tt.expectedEncrypted, encrypted) - } - }) - } -} - -// Helper function to compare maps -func mapsEqual(a, b map[string]string) bool { - if len(a) != len(b) { - return false - } - for k, v := range a { - if b[k] != v { - return false - } - } - return true -} diff --git a/weed/s3api/s3_sse_error_test.go b/weed/s3api/s3_sse_error_test.go deleted file mode 100644 index a344e2ef7..000000000 --- a/weed/s3api/s3_sse_error_test.go +++ /dev/null @@ -1,400 +0,0 @@ -package s3api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestSSECWrongKeyDecryption tests decryption with wrong SSE-C key -func TestSSECWrongKeyDecryption(t *testing.T) { - // Setup original key and encrypt data - originalKey := GenerateTestSSECKey(1) - testData := "Hello, SSE-C world!" - - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(testData), &SSECustomerKey{ - Algorithm: "AES256", - Key: originalKey.Key, - KeyMD5: originalKey.KeyMD5, - }) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - // Read encrypted data - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Try to decrypt with wrong key - wrongKey := GenerateTestSSECKey(2) // Different seed = different key - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), &SSECustomerKey{ - Algorithm: "AES256", - Key: wrongKey.Key, - KeyMD5: wrongKey.KeyMD5, - }, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Read decrypted data - should be garbage/different from original - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - // Verify the decrypted data is NOT the same as original (wrong key used) - if string(decryptedData) == testData { - t.Error("Decryption with wrong key should not produce original data") - } -} - -// TestSSEKMSKeyNotFound tests handling of missing KMS key -func TestSSEKMSKeyNotFound(t *testing.T) { - // Note: The local KMS provider creates keys on-demand by design. - // This test validates that when on-demand creation fails or is disabled, - // appropriate errors are returned. - - // Test with an invalid key ID that would fail even on-demand creation - invalidKeyID := "" // Empty key ID should fail - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - _, _, err := CreateSSEKMSEncryptedReader(strings.NewReader("test data"), invalidKeyID, encryptionContext) - - // Should get an error for invalid/empty key - if err == nil { - t.Error("Expected error for empty KMS key ID, got none") - } - - // For local KMS with on-demand creation, we test what we can realistically test - if err != nil { - t.Logf("Got expected error for empty key ID: %v", err) - } -} - -// TestSSEHeadersWithoutEncryption tests inconsistent state where headers are present but no encryption -func TestSSEHeadersWithoutEncryption(t *testing.T) { - testCases := []struct { - name string - setupReq func() *http.Request - }{ - { - name: "SSE-C algorithm without key", - setupReq: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - // Missing key and MD5 - return req - }, - }, - { - name: "SSE-C key without algorithm", - setupReq: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - keyPair := GenerateTestSSECKey(1) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, keyPair.KeyB64) - // Missing algorithm - return req - }, - }, - { - name: "SSE-KMS key ID without algorithm", - setupReq: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, "test-key-id") - // Missing algorithm - return req - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := tc.setupReq() - - // Validate headers - should catch incomplete configurations - if strings.Contains(tc.name, "SSE-C") { - err := ValidateSSECHeaders(req) - if err == nil { - t.Error("Expected validation error for incomplete SSE-C headers") - } - } - }) - } -} - -// TestSSECInvalidKeyFormats tests various invalid SSE-C key formats -func TestSSECInvalidKeyFormats(t *testing.T) { - testCases := []struct { - name string - algorithm string - key string - keyMD5 string - expectErr bool - }{ - { - name: "Invalid algorithm", - algorithm: "AES128", - key: "dGVzdGtleXRlc3RrZXl0ZXN0a2V5dGVzdGtleXRlc3RrZXk=", // 32 bytes base64 - keyMD5: "valid-md5-hash", - expectErr: true, - }, - { - name: "Invalid key length (too short)", - algorithm: "AES256", - key: "c2hvcnRrZXk=", // "shortkey" base64 - too short - keyMD5: "valid-md5-hash", - expectErr: true, - }, - { - name: "Invalid key length (too long)", - algorithm: "AES256", - key: "dGVzdGtleXRlc3RrZXl0ZXN0a2V5dGVzdGtleXRlc3RrZXl0ZXN0a2V5dGVzdGtleQ==", // too long - keyMD5: "valid-md5-hash", - expectErr: true, - }, - { - name: "Invalid base64 key", - algorithm: "AES256", - key: "invalid-base64!", - keyMD5: "valid-md5-hash", - expectErr: true, - }, - { - name: "Invalid base64 MD5", - algorithm: "AES256", - key: "dGVzdGtleXRlc3RrZXl0ZXN0a2V5dGVzdGtleXRlc3RrZXk=", - keyMD5: "invalid-base64!", - expectErr: true, - }, - { - name: "Mismatched MD5", - algorithm: "AES256", - key: "dGVzdGtleXRlc3RrZXl0ZXN0a2V5dGVzdGtleXRlc3RrZXk=", - keyMD5: "d29uZy1tZDUtaGFzaA==", // "wrong-md5-hash" base64 - expectErr: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, tc.algorithm) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, tc.key) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, tc.keyMD5) - - err := ValidateSSECHeaders(req) - if tc.expectErr && err == nil { - t.Errorf("Expected error for %s, but got none", tc.name) - } - if !tc.expectErr && err != nil { - t.Errorf("Expected no error for %s, but got: %v", tc.name, err) - } - }) - } -} - -// TestSSEKMSInvalidConfigurations tests various invalid SSE-KMS configurations -func TestSSEKMSInvalidConfigurations(t *testing.T) { - testCases := []struct { - name string - setupRequest func() *http.Request - expectError bool - }{ - { - name: "Invalid algorithm", - setupRequest: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryption, "invalid-algorithm") - return req - }, - expectError: true, - }, - { - name: "Empty key ID", - setupRequest: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryption, "aws:kms") - req.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, "") - return req - }, - expectError: false, // Empty key ID might be valid (use default) - }, - { - name: "Invalid key ID format", - setupRequest: func() *http.Request { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzServerSideEncryption, "aws:kms") - req.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, "invalid key id with spaces") - return req - }, - expectError: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := tc.setupRequest() - - _, err := ParseSSEKMSHeaders(req) - if tc.expectError && err == nil { - t.Errorf("Expected error for %s, but got none", tc.name) - } - if !tc.expectError && err != nil { - t.Errorf("Expected no error for %s, but got: %v", tc.name, err) - } - }) - } -} - -// TestSSEEmptyDataHandling tests handling of empty data with SSE -func TestSSEEmptyDataHandling(t *testing.T) { - t.Run("SSE-C with empty data", func(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - // Encrypt empty data - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(""), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for empty data: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted empty data: %v", err) - } - - // Should have IV for empty data - if len(iv) != s3_constants.AESBlockSize { - t.Error("IV should be present even for empty data") - } - - // Decrypt and verify - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader for empty data: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted empty data: %v", err) - } - - if len(decryptedData) != 0 { - t.Errorf("Expected empty decrypted data, got %d bytes", len(decryptedData)) - } - }) - - t.Run("SSE-KMS with empty data", func(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - // Encrypt empty data - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(""), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader for empty data: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted empty data: %v", err) - } - - // Empty data should produce empty encrypted data (IV is stored in metadata) - if len(encryptedData) != 0 { - t.Errorf("Encrypted empty data should be empty, got %d bytes", len(encryptedData)) - } - - // Decrypt and verify - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader for empty data: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted empty data: %v", err) - } - - if len(decryptedData) != 0 { - t.Errorf("Expected empty decrypted data, got %d bytes", len(decryptedData)) - } - }) -} - -// TestSSEConcurrentAccess tests SSE operations under concurrent access -func TestSSEConcurrentAccess(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - const numGoroutines = 10 - done := make(chan bool, numGoroutines) - errors := make(chan error, numGoroutines) - - // Run multiple encryption/decryption operations concurrently - for i := 0; i < numGoroutines; i++ { - go func(id int) { - defer func() { done <- true }() - - testData := fmt.Sprintf("test data %d", id) - - // Encrypt - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(testData), customerKey) - if err != nil { - errors <- fmt.Errorf("goroutine %d encrypt error: %v", id, err) - return - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - errors <- fmt.Errorf("goroutine %d read encrypted error: %v", id, err) - return - } - - // Decrypt - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - errors <- fmt.Errorf("goroutine %d decrypt error: %v", id, err) - return - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - errors <- fmt.Errorf("goroutine %d read decrypted error: %v", id, err) - return - } - - if string(decryptedData) != testData { - errors <- fmt.Errorf("goroutine %d data mismatch: expected %s, got %s", id, testData, string(decryptedData)) - return - } - }(i) - } - - // Wait for all goroutines to complete - for i := 0; i < numGoroutines; i++ { - <-done - } - - // Check for errors - close(errors) - for err := range errors { - t.Error(err) - } -} diff --git a/weed/s3api/s3_sse_http_test.go b/weed/s3api/s3_sse_http_test.go deleted file mode 100644 index 95f141ca7..000000000 --- a/weed/s3api/s3_sse_http_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package s3api - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestPutObjectWithSSEC tests PUT object with SSE-C through HTTP handler -func TestPutObjectWithSSEC(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - testData := "Hello, SSE-C PUT object!" - - // Create HTTP request - req := CreateTestHTTPRequest("PUT", "/test-bucket/test-object", []byte(testData)) - SetupTestSSECHeaders(req, keyPair) - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Create response recorder - w := CreateTestHTTPResponse() - - // Test header validation - err := ValidateSSECHeaders(req) - if err != nil { - t.Fatalf("Header validation failed: %v", err) - } - - // Parse SSE-C headers - customerKey, err := ParseSSECHeaders(req) - if err != nil { - t.Fatalf("Failed to parse SSE-C headers: %v", err) - } - - if customerKey == nil { - t.Fatal("Expected customer key, got nil") - } - - // Verify parsed key matches input - if !bytes.Equal(customerKey.Key, keyPair.Key) { - t.Error("Parsed key doesn't match input key") - } - - if customerKey.KeyMD5 != keyPair.KeyMD5 { - t.Errorf("Parsed key MD5 doesn't match: expected %s, got %s", keyPair.KeyMD5, customerKey.KeyMD5) - } - - // Simulate setting response headers - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyPair.KeyMD5) - - // Verify response headers - AssertSSECHeaders(t, w, keyPair) -} - -// TestGetObjectWithSSEC tests GET object with SSE-C through HTTP handler -func TestGetObjectWithSSEC(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - - // Create HTTP request for GET - req := CreateTestHTTPRequest("GET", "/test-bucket/test-object", nil) - SetupTestSSECHeaders(req, keyPair) - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Create response recorder - w := CreateTestHTTPResponse() - - // Test that SSE-C is detected for GET requests - if !IsSSECRequest(req) { - t.Error("Should detect SSE-C request for GET with SSE-C headers") - } - - // Validate headers - err := ValidateSSECHeaders(req) - if err != nil { - t.Fatalf("Header validation failed: %v", err) - } - - // Simulate response with SSE-C headers - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyPair.KeyMD5) - w.WriteHeader(http.StatusOK) - - // Verify response - if w.Code != http.StatusOK { - t.Errorf("Expected status 200, got %d", w.Code) - } - - AssertSSECHeaders(t, w, keyPair) -} - -// TestPutObjectWithSSEKMS tests PUT object with SSE-KMS through HTTP handler -func TestPutObjectWithSSEKMS(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - testData := "Hello, SSE-KMS PUT object!" - - // Create HTTP request - req := CreateTestHTTPRequest("PUT", "/test-bucket/test-object", []byte(testData)) - SetupTestSSEKMSHeaders(req, kmsKey.KeyID) - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Create response recorder - w := CreateTestHTTPResponse() - - // Test that SSE-KMS is detected - if !IsSSEKMSRequest(req) { - t.Error("Should detect SSE-KMS request") - } - - // Parse SSE-KMS headers - sseKmsKey, err := ParseSSEKMSHeaders(req) - if err != nil { - t.Fatalf("Failed to parse SSE-KMS headers: %v", err) - } - - if sseKmsKey == nil { - t.Fatal("Expected SSE-KMS key, got nil") - } - - if sseKmsKey.KeyID != kmsKey.KeyID { - t.Errorf("Parsed key ID doesn't match: expected %s, got %s", kmsKey.KeyID, sseKmsKey.KeyID) - } - - // Simulate setting response headers - w.Header().Set(s3_constants.AmzServerSideEncryption, "aws:kms") - w.Header().Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, kmsKey.KeyID) - - // Verify response headers - AssertSSEKMSHeaders(t, w, kmsKey.KeyID) -} - -// TestGetObjectWithSSEKMS tests GET object with SSE-KMS through HTTP handler -func TestGetObjectWithSSEKMS(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Create HTTP request for GET (no SSE headers needed for GET) - req := CreateTestHTTPRequest("GET", "/test-bucket/test-object", nil) - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Create response recorder - w := CreateTestHTTPResponse() - - // Simulate response with SSE-KMS headers (would come from stored metadata) - w.Header().Set(s3_constants.AmzServerSideEncryption, "aws:kms") - w.Header().Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, kmsKey.KeyID) - w.WriteHeader(http.StatusOK) - - // Verify response - if w.Code != http.StatusOK { - t.Errorf("Expected status 200, got %d", w.Code) - } - - AssertSSEKMSHeaders(t, w, kmsKey.KeyID) -} - -// TestSSECRangeRequestSupport tests that range requests are now supported for SSE-C -func TestSSECRangeRequestSupport(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - - // Create HTTP request with Range header - req := CreateTestHTTPRequest("GET", "/test-bucket/test-object", nil) - req.Header.Set("Range", "bytes=0-100") - SetupTestSSECHeaders(req, keyPair) - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Create a mock proxy response with SSE-C headers - proxyResponse := httptest.NewRecorder() - proxyResponse.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - proxyResponse.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyPair.KeyMD5) - proxyResponse.Header().Set("Content-Length", "1000") - - // Test the detection logic - these should all still work - - // Should detect as SSE-C request - if !IsSSECRequest(req) { - t.Error("Should detect SSE-C request") - } - - // Should detect range request - if req.Header.Get("Range") == "" { - t.Error("Range header should be present") - } - - // The combination should now be allowed and handled by the filer layer - // Range requests with SSE-C are now supported since IV is stored in metadata -} - -// TestSSEHeaderConflicts tests conflicting SSE headers -func TestSSEHeaderConflicts(t *testing.T) { - testCases := []struct { - name string - setupFn func(*http.Request) - valid bool - }{ - { - name: "SSE-C and SSE-KMS conflict", - setupFn: func(req *http.Request) { - keyPair := GenerateTestSSECKey(1) - SetupTestSSECHeaders(req, keyPair) - SetupTestSSEKMSHeaders(req, "test-key-id") - }, - valid: false, - }, - { - name: "Valid SSE-C only", - setupFn: func(req *http.Request) { - keyPair := GenerateTestSSECKey(1) - SetupTestSSECHeaders(req, keyPair) - }, - valid: true, - }, - { - name: "Valid SSE-KMS only", - setupFn: func(req *http.Request) { - SetupTestSSEKMSHeaders(req, "test-key-id") - }, - valid: true, - }, - { - name: "No SSE headers", - setupFn: func(req *http.Request) { - // No SSE headers - }, - valid: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := CreateTestHTTPRequest("PUT", "/test-bucket/test-object", []byte("test")) - tc.setupFn(req) - - ssecDetected := IsSSECRequest(req) - sseKmsDetected := IsSSEKMSRequest(req) - - // Both shouldn't be detected simultaneously - if ssecDetected && sseKmsDetected { - t.Error("Both SSE-C and SSE-KMS should not be detected simultaneously") - } - - // Test validation if SSE-C is detected - if ssecDetected { - err := ValidateSSECHeaders(req) - if tc.valid && err != nil { - t.Errorf("Expected valid SSE-C headers, got error: %v", err) - } - if !tc.valid && err == nil && tc.name == "SSE-C and SSE-KMS conflict" { - // This specific test case should probably be handled at a higher level - t.Log("Conflict detection should be handled by higher-level validation") - } - } - }) - } -} - -// TestSSECopySourceHeaders tests copy operations with SSE headers -func TestSSECopySourceHeaders(t *testing.T) { - sourceKey := GenerateTestSSECKey(1) - destKey := GenerateTestSSECKey(2) - - // Create copy request with both source and destination SSE-C headers - req := CreateTestHTTPRequest("PUT", "/dest-bucket/dest-object", nil) - - // Set copy source headers - SetupTestSSECCopyHeaders(req, sourceKey) - - // Set destination headers - SetupTestSSECHeaders(req, destKey) - - // Set copy source - req.Header.Set("X-Amz-Copy-Source", "/source-bucket/source-object") - - SetupTestMuxVars(req, map[string]string{ - "bucket": "dest-bucket", - "object": "dest-object", - }) - - // Parse copy source headers - copySourceKey, err := ParseSSECCopySourceHeaders(req) - if err != nil { - t.Fatalf("Failed to parse copy source headers: %v", err) - } - - if copySourceKey == nil { - t.Fatal("Expected copy source key, got nil") - } - - if !bytes.Equal(copySourceKey.Key, sourceKey.Key) { - t.Error("Copy source key doesn't match") - } - - // Parse destination headers - destCustomerKey, err := ParseSSECHeaders(req) - if err != nil { - t.Fatalf("Failed to parse destination headers: %v", err) - } - - if destCustomerKey == nil { - t.Fatal("Expected destination key, got nil") - } - - if !bytes.Equal(destCustomerKey.Key, destKey.Key) { - t.Error("Destination key doesn't match") - } -} - -// TestSSERequestValidation tests comprehensive request validation -func TestSSERequestValidation(t *testing.T) { - testCases := []struct { - name string - method string - setupFn func(*http.Request) - expectError bool - errorType string - }{ - { - name: "Valid PUT with SSE-C", - method: "PUT", - setupFn: func(req *http.Request) { - keyPair := GenerateTestSSECKey(1) - SetupTestSSECHeaders(req, keyPair) - }, - expectError: false, - }, - { - name: "Valid GET with SSE-C", - method: "GET", - setupFn: func(req *http.Request) { - keyPair := GenerateTestSSECKey(1) - SetupTestSSECHeaders(req, keyPair) - }, - expectError: false, - }, - { - name: "Invalid SSE-C key format", - method: "PUT", - setupFn: func(req *http.Request) { - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, "invalid-key") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, "invalid-md5") - }, - expectError: true, - errorType: "InvalidRequest", - }, - { - name: "Missing SSE-C key MD5", - method: "PUT", - setupFn: func(req *http.Request) { - keyPair := GenerateTestSSECKey(1) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, keyPair.KeyB64) - // Missing MD5 - }, - expectError: true, - errorType: "InvalidRequest", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - req := CreateTestHTTPRequest(tc.method, "/test-bucket/test-object", []byte("test data")) - tc.setupFn(req) - - SetupTestMuxVars(req, map[string]string{ - "bucket": "test-bucket", - "object": "test-object", - }) - - // Test header validation - if IsSSECRequest(req) { - err := ValidateSSECHeaders(req) - if tc.expectError && err == nil { - t.Errorf("Expected error for %s, but got none", tc.name) - } - if !tc.expectError && err != nil { - t.Errorf("Expected no error for %s, but got: %v", tc.name, err) - } - } - }) - } -} diff --git a/weed/s3api/s3_sse_kms.go b/weed/s3api/s3_sse_kms.go deleted file mode 100644 index 3b721aa26..000000000 --- a/weed/s3api/s3_sse_kms.go +++ /dev/null @@ -1,1058 +0,0 @@ -package s3api - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "net/http" - "regexp" - "sort" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// Compiled regex patterns for KMS key validation -var ( - uuidRegex = regexp.MustCompile(`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$`) - arnRegex = regexp.MustCompile(`^arn:aws:kms:[a-z0-9-]+:\d{12}:(key|alias)/.+$`) -) - -// SSEKMSKey contains the metadata for an SSE-KMS encrypted object -type SSEKMSKey struct { - KeyID string // The KMS key ID used - EncryptedDataKey []byte // The encrypted data encryption key - EncryptionContext map[string]string // The encryption context used - BucketKeyEnabled bool // Whether S3 Bucket Keys are enabled - IV []byte // The initialization vector for encryption - ChunkOffset int64 // Offset of this chunk within the original part (for IV calculation) -} - -// SSEKMSMetadata represents the metadata stored with SSE-KMS objects -type SSEKMSMetadata struct { - Algorithm string `json:"algorithm"` // "aws:kms" - KeyID string `json:"keyId"` // KMS key identifier - EncryptedDataKey string `json:"encryptedDataKey"` // Base64-encoded encrypted data key - EncryptionContext map[string]string `json:"encryptionContext"` // Encryption context - BucketKeyEnabled bool `json:"bucketKeyEnabled"` // S3 Bucket Key optimization - IV string `json:"iv"` // Base64-encoded initialization vector - PartOffset int64 `json:"partOffset"` // Offset within original multipart part (for IV calculation) -} - -const ( - // Default data key size (256 bits) - DataKeySize = 32 -) - -// Bucket key cache TTL (moved to be used with per-bucket cache) -const BucketKeyCacheTTL = time.Hour - -// CreateSSEKMSEncryptedReader creates an encrypted reader using KMS envelope encryption -func CreateSSEKMSEncryptedReader(r io.Reader, keyID string, encryptionContext map[string]string) (io.Reader, *SSEKMSKey, error) { - return CreateSSEKMSEncryptedReaderWithBucketKey(r, keyID, encryptionContext, false) -} - -// CreateSSEKMSEncryptedReaderWithBucketKey creates an encrypted reader with optional S3 Bucket Keys optimization -func CreateSSEKMSEncryptedReaderWithBucketKey(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool) (io.Reader, *SSEKMSKey, error) { - if bucketKeyEnabled { - // Use S3 Bucket Keys optimization - try to get or create a bucket-level data key - // Note: This is a simplified implementation. In practice, this would need - // access to the bucket name and S3ApiServer instance for proper per-bucket caching. - // For now, generate per-object keys (bucket key optimization disabled) - glog.V(2).Infof("Bucket key optimization requested but not fully implemented yet - using per-object keys") - bucketKeyEnabled = false - } - - // Generate data key using common utility - dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext) - if err != nil { - return nil, nil, err - } - - // Ensure we clear the plaintext data key from memory when done - defer clearKMSDataKey(dataKeyResult) - - // Generate a random IV for CTR mode - // Note: AES-CTR is used for object data encryption (not AES-GCM) because: - // 1. CTR mode supports streaming encryption for large objects - // 2. CTR mode supports range requests (seek to arbitrary positions) - // 3. This matches AWS S3 and other S3-compatible implementations - // The KMS data key encryption (separate layer) uses AES-GCM for authentication - iv := make([]byte, s3_constants.AESBlockSize) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, nil, fmt.Errorf("failed to generate IV: %v", err) - } - - // Create CTR mode cipher stream - stream := cipher.NewCTR(dataKeyResult.Block, iv) - - // Create the SSE-KMS metadata using utility function - sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, 0) - - // The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV - // This ensures correct Content-Length for clients - encryptedReader := &cipher.StreamReader{S: stream, R: r} - - // Store IV in the SSE key for metadata storage - sseKey.IV = iv - - return encryptedReader, sseKey, nil -} - -// CreateSSEKMSEncryptedReaderWithBaseIV creates an SSE-KMS encrypted reader using a provided base IV -// This is used for multipart uploads where all chunks need to use the same base IV -func CreateSSEKMSEncryptedReaderWithBaseIV(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool, baseIV []byte) (io.Reader, *SSEKMSKey, error) { - if err := ValidateIV(baseIV, "base IV"); err != nil { - return nil, nil, err - } - - // Generate data key using common utility - dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext) - if err != nil { - return nil, nil, err - } - - // Ensure we clear the plaintext data key from memory when done - defer clearKMSDataKey(dataKeyResult) - - // Use the provided base IV instead of generating a new one - iv := make([]byte, s3_constants.AESBlockSize) - copy(iv, baseIV) - - // Create CTR mode cipher stream - stream := cipher.NewCTR(dataKeyResult.Block, iv) - - // Create the SSE-KMS metadata using utility function - sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, 0) - - // The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV - // This ensures correct Content-Length for clients - encryptedReader := &cipher.StreamReader{S: stream, R: r} - - // Store the base IV in the SSE key for metadata storage - sseKey.IV = iv - - return encryptedReader, sseKey, nil -} - -// CreateSSEKMSEncryptedReaderWithBaseIVAndOffset creates an SSE-KMS encrypted reader using a provided base IV and offset -// This is used for multipart uploads where all chunks need unique IVs to prevent IV reuse vulnerabilities -func CreateSSEKMSEncryptedReaderWithBaseIVAndOffset(r io.Reader, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool, baseIV []byte, offset int64) (io.Reader, *SSEKMSKey, error) { - if err := ValidateIV(baseIV, "base IV"); err != nil { - return nil, nil, err - } - - // Generate data key using common utility - dataKeyResult, err := generateKMSDataKey(keyID, encryptionContext) - if err != nil { - return nil, nil, err - } - - // Ensure we clear the plaintext data key from memory when done - defer clearKMSDataKey(dataKeyResult) - - // Calculate unique IV using base IV and offset to prevent IV reuse in multipart uploads - iv := calculateIVWithOffset(baseIV, offset) - - // Create CTR mode cipher stream - stream := cipher.NewCTR(dataKeyResult.Block, iv) - - // Create the SSE-KMS metadata using utility function - sseKey := createSSEKMSKey(dataKeyResult, encryptionContext, bucketKeyEnabled, iv, offset) - - // The IV is stored in SSE key metadata, so the encrypted stream does not need to prepend the IV - // This ensures correct Content-Length for clients - encryptedReader := &cipher.StreamReader{S: stream, R: r} - - return encryptedReader, sseKey, nil -} - -// hashEncryptionContext creates a deterministic hash of the encryption context -func hashEncryptionContext(encryptionContext map[string]string) string { - if len(encryptionContext) == 0 { - return "empty" - } - - // Create a deterministic representation of the context - hash := sha256.New() - - // Sort keys to ensure deterministic hash - keys := make([]string, 0, len(encryptionContext)) - for k := range encryptionContext { - keys = append(keys, k) - } - - sort.Strings(keys) - - // Hash the sorted key-value pairs - for _, k := range keys { - hash.Write([]byte(k)) - hash.Write([]byte("=")) - hash.Write([]byte(encryptionContext[k])) - hash.Write([]byte(";")) - } - - return hex.EncodeToString(hash.Sum(nil))[:16] // Use first 16 chars for brevity -} - -// getBucketDataKey retrieves or creates a cached bucket-level data key for SSE-KMS -// This is a simplified implementation that demonstrates the per-bucket caching concept -// In a full implementation, this would integrate with the actual bucket configuration system -func getBucketDataKey(bucketName, keyID string, encryptionContext map[string]string, bucketCache *BucketKMSCache) (*kms.GenerateDataKeyResponse, error) { - // Create context hash for cache key - contextHash := hashEncryptionContext(encryptionContext) - cacheKey := fmt.Sprintf("%s:%s", keyID, contextHash) - - // Try to get from cache first if cache is available - if bucketCache != nil { - if cacheEntry, found := bucketCache.Get(cacheKey); found { - if dataKey, ok := cacheEntry.DataKey.(*kms.GenerateDataKeyResponse); ok { - glog.V(3).Infof("Using cached bucket key for bucket %s, keyID %s", bucketName, keyID) - return dataKey, nil - } - } - } - - // Cache miss - generate new data key - kmsProvider := kms.GetGlobalKMS() - if kmsProvider == nil { - return nil, fmt.Errorf("KMS is not configured") - } - - dataKeyReq := &kms.GenerateDataKeyRequest{ - KeyID: keyID, - KeySpec: kms.KeySpecAES256, - EncryptionContext: encryptionContext, - } - - ctx := context.Background() - dataKeyResp, err := kmsProvider.GenerateDataKey(ctx, dataKeyReq) - if err != nil { - return nil, fmt.Errorf("failed to generate bucket data key: %v", err) - } - - // Cache the data key for future use if cache is available - if bucketCache != nil { - bucketCache.Set(cacheKey, keyID, dataKeyResp, BucketKeyCacheTTL) - glog.V(2).Infof("Generated and cached new bucket key for bucket %s, keyID %s", bucketName, keyID) - } else { - glog.V(2).Infof("Generated new bucket key for bucket %s, keyID %s (caching disabled)", bucketName, keyID) - } - - return dataKeyResp, nil -} - -// CreateSSEKMSEncryptedReaderForBucket creates an encrypted reader with bucket-specific caching -// This method is part of S3ApiServer to access bucket configuration and caching -func (s3a *S3ApiServer) CreateSSEKMSEncryptedReaderForBucket(r io.Reader, bucketName, keyID string, encryptionContext map[string]string, bucketKeyEnabled bool) (io.Reader, *SSEKMSKey, error) { - var dataKeyResp *kms.GenerateDataKeyResponse - var err error - - if bucketKeyEnabled { - // Use S3 Bucket Keys optimization with persistent per-bucket caching - bucketCache, err := s3a.getBucketKMSCache(bucketName) - if err != nil { - glog.V(2).Infof("Failed to get bucket KMS cache for %s, falling back to per-object key: %v", bucketName, err) - bucketKeyEnabled = false - } else { - dataKeyResp, err = getBucketDataKey(bucketName, keyID, encryptionContext, bucketCache) - if err != nil { - // Fall back to per-object key generation if bucket key fails - glog.V(2).Infof("Bucket key generation failed for bucket %s, falling back to per-object key: %v", bucketName, err) - bucketKeyEnabled = false - } - } - } - - if !bucketKeyEnabled { - // Generate a per-object data encryption key using KMS - kmsProvider := kms.GetGlobalKMS() - if kmsProvider == nil { - return nil, nil, fmt.Errorf("KMS is not configured") - } - - dataKeyReq := &kms.GenerateDataKeyRequest{ - KeyID: keyID, - KeySpec: kms.KeySpecAES256, - EncryptionContext: encryptionContext, - } - - ctx := context.Background() - dataKeyResp, err = kmsProvider.GenerateDataKey(ctx, dataKeyReq) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate data key: %v", err) - } - } - - // Ensure we clear the plaintext data key from memory when done - defer kms.ClearSensitiveData(dataKeyResp.Plaintext) - - // Create AES cipher with the data key - block, err := aes.NewCipher(dataKeyResp.Plaintext) - if err != nil { - return nil, nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Generate a random IV for CTR mode - iv := make([]byte, 16) // AES block size - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, nil, fmt.Errorf("failed to generate IV: %v", err) - } - - // Create CTR mode cipher stream - stream := cipher.NewCTR(block, iv) - - // Create the encrypting reader - sseKey := &SSEKMSKey{ - KeyID: keyID, - EncryptedDataKey: dataKeyResp.CiphertextBlob, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - IV: iv, - } - - return &cipher.StreamReader{S: stream, R: r}, sseKey, nil -} - -// getBucketKMSCache gets or creates the persistent KMS cache for a bucket -func (s3a *S3ApiServer) getBucketKMSCache(bucketName string) (*BucketKMSCache, error) { - // Get bucket configuration - bucketConfig, errCode := s3a.getBucketConfig(bucketName) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucket { - return nil, fmt.Errorf("bucket %s does not exist", bucketName) - } - return nil, fmt.Errorf("failed to get bucket config: %v", errCode) - } - - // Initialize KMS cache if it doesn't exist - if bucketConfig.KMSKeyCache == nil { - bucketConfig.KMSKeyCache = NewBucketKMSCache(bucketName, BucketKeyCacheTTL) - glog.V(3).Infof("Initialized new KMS cache for bucket %s", bucketName) - } - - return bucketConfig.KMSKeyCache, nil -} - -// CleanupBucketKMSCache performs cleanup of expired KMS keys for a specific bucket -func (s3a *S3ApiServer) CleanupBucketKMSCache(bucketName string) int { - bucketCache, err := s3a.getBucketKMSCache(bucketName) - if err != nil { - glog.V(3).Infof("Could not get KMS cache for bucket %s: %v", bucketName, err) - return 0 - } - - cleaned := bucketCache.CleanupExpired() - if cleaned > 0 { - glog.V(2).Infof("Cleaned up %d expired KMS keys for bucket %s", cleaned, bucketName) - } - return cleaned -} - -// CleanupAllBucketKMSCaches performs cleanup of expired KMS keys for all buckets -func (s3a *S3ApiServer) CleanupAllBucketKMSCaches() int { - totalCleaned := 0 - - // Access the bucket config cache safely - if s3a.bucketConfigCache != nil { - s3a.bucketConfigCache.mutex.RLock() - bucketNames := make([]string, 0, len(s3a.bucketConfigCache.cache)) - for bucketName := range s3a.bucketConfigCache.cache { - bucketNames = append(bucketNames, bucketName) - } - s3a.bucketConfigCache.mutex.RUnlock() - - // Clean up each bucket's KMS cache - for _, bucketName := range bucketNames { - cleaned := s3a.CleanupBucketKMSCache(bucketName) - totalCleaned += cleaned - } - } - - if totalCleaned > 0 { - glog.V(2).Infof("Cleaned up %d expired KMS keys across %d bucket caches", totalCleaned, len(s3a.bucketConfigCache.cache)) - } - return totalCleaned -} - -// CreateSSEKMSDecryptedReader creates a decrypted reader using KMS envelope encryption -func CreateSSEKMSDecryptedReader(r io.Reader, sseKey *SSEKMSKey) (io.Reader, error) { - kmsProvider := kms.GetGlobalKMS() - if kmsProvider == nil { - return nil, fmt.Errorf("KMS is not configured") - } - - // Decrypt the data encryption key using KMS - decryptReq := &kms.DecryptRequest{ - CiphertextBlob: sseKey.EncryptedDataKey, - EncryptionContext: sseKey.EncryptionContext, - } - - ctx := context.Background() - decryptResp, err := kmsProvider.Decrypt(ctx, decryptReq) - if err != nil { - return nil, fmt.Errorf("failed to decrypt data key: %v", err) - } - - // Ensure we clear the plaintext data key from memory when done - defer kms.ClearSensitiveData(decryptResp.Plaintext) - - // Verify the key ID matches (security check) - if decryptResp.KeyID != sseKey.KeyID { - return nil, fmt.Errorf("KMS key ID mismatch: expected %s, got %s", sseKey.KeyID, decryptResp.KeyID) - } - - // Use the IV from the SSE key metadata, calculating offset if this is a chunked part - if err := ValidateIV(sseKey.IV, "SSE key IV"); err != nil { - return nil, fmt.Errorf("invalid IV in SSE key: %w", err) - } - - // Calculate the correct IV for this chunk's offset within the original part - var iv []byte - if sseKey.ChunkOffset > 0 { - iv = calculateIVWithOffset(sseKey.IV, sseKey.ChunkOffset) - } else { - iv = sseKey.IV - } - - // Create AES cipher with the decrypted data key - block, err := aes.NewCipher(decryptResp.Plaintext) - if err != nil { - return nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - // Create CTR mode cipher stream for decryption - // Note: AES-CTR is used for object data decryption to match the encryption mode - stream := cipher.NewCTR(block, iv) - - // Return the decrypted reader - return &cipher.StreamReader{S: stream, R: r}, nil -} - -// ParseSSEKMSHeaders parses SSE-KMS headers from an HTTP request -func ParseSSEKMSHeaders(r *http.Request) (*SSEKMSKey, error) { - sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryption) - - // Check if SSE-KMS is requested - if sseAlgorithm == "" { - return nil, nil // No SSE headers present - } - if sseAlgorithm != s3_constants.SSEAlgorithmKMS { - return nil, fmt.Errorf("invalid SSE algorithm: %s", sseAlgorithm) - } - - keyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - encryptionContextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext) - bucketKeyEnabledHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled) - - // Parse encryption context if provided - var encryptionContext map[string]string - if encryptionContextHeader != "" { - // Decode base64-encoded JSON encryption context - contextBytes, err := base64.StdEncoding.DecodeString(encryptionContextHeader) - if err != nil { - return nil, fmt.Errorf("invalid encryption context format: %v", err) - } - - if err := json.Unmarshal(contextBytes, &encryptionContext); err != nil { - return nil, fmt.Errorf("invalid encryption context JSON: %v", err) - } - } - - // Parse bucket key enabled flag - bucketKeyEnabled := strings.ToLower(bucketKeyEnabledHeader) == "true" - - sseKey := &SSEKMSKey{ - KeyID: keyID, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - } - - // Validate the parsed key including key ID format - if err := ValidateSSEKMSKeyInternal(sseKey); err != nil { - return nil, err - } - - return sseKey, nil -} - -// ValidateSSEKMSKey validates an SSE-KMS key configuration -func ValidateSSEKMSKeyInternal(sseKey *SSEKMSKey) error { - if err := ValidateSSEKMSKey(sseKey); err != nil { - return err - } - - // An empty key ID is valid and means the default KMS key should be used. - if sseKey.KeyID != "" && !isValidKMSKeyID(sseKey.KeyID) { - return fmt.Errorf("invalid KMS key ID format: %s", sseKey.KeyID) - } - - return nil -} - -// BuildEncryptionContext creates the encryption context for S3 objects -func BuildEncryptionContext(bucketName, objectKey string, useBucketKey bool) map[string]string { - return kms.BuildS3EncryptionContext(bucketName, objectKey, useBucketKey) -} - -// parseEncryptionContext parses the user-provided encryption context from base64 JSON -func parseEncryptionContext(contextHeader string) (map[string]string, error) { - if contextHeader == "" { - return nil, nil - } - - // Decode base64 - contextBytes, err := base64.StdEncoding.DecodeString(contextHeader) - if err != nil { - return nil, fmt.Errorf("invalid base64 encoding in encryption context: %w", err) - } - - // Parse JSON - var context map[string]string - if err := json.Unmarshal(contextBytes, &context); err != nil { - return nil, fmt.Errorf("invalid JSON in encryption context: %w", err) - } - - // Validate context keys and values - for k, v := range context { - if k == "" || v == "" { - return nil, fmt.Errorf("encryption context keys and values cannot be empty") - } - // AWS KMS has limits on context key/value length (256 chars each) - if len(k) > 256 || len(v) > 256 { - return nil, fmt.Errorf("encryption context key or value too long (max 256 characters)") - } - } - - return context, nil -} - -// SerializeSSEKMSMetadata serializes SSE-KMS metadata for storage in object metadata -func SerializeSSEKMSMetadata(sseKey *SSEKMSKey) ([]byte, error) { - if err := ValidateSSEKMSKey(sseKey); err != nil { - return nil, err - } - - metadata := &SSEKMSMetadata{ - Algorithm: s3_constants.SSEAlgorithmKMS, - KeyID: sseKey.KeyID, - EncryptedDataKey: base64.StdEncoding.EncodeToString(sseKey.EncryptedDataKey), - EncryptionContext: sseKey.EncryptionContext, - BucketKeyEnabled: sseKey.BucketKeyEnabled, - IV: base64.StdEncoding.EncodeToString(sseKey.IV), // Store IV for decryption - PartOffset: sseKey.ChunkOffset, // Store within-part offset - } - - data, err := json.Marshal(metadata) - if err != nil { - return nil, fmt.Errorf("failed to marshal SSE-KMS metadata: %w", err) - } - - glog.V(4).Infof("Serialized SSE-KMS metadata: keyID=%s, bucketKey=%t", sseKey.KeyID, sseKey.BucketKeyEnabled) - return data, nil -} - -// DeserializeSSEKMSMetadata deserializes SSE-KMS metadata from storage and reconstructs the SSE-KMS key -func DeserializeSSEKMSMetadata(data []byte) (*SSEKMSKey, error) { - if len(data) == 0 { - return nil, fmt.Errorf("empty SSE-KMS metadata") - } - - var metadata SSEKMSMetadata - if err := json.Unmarshal(data, &metadata); err != nil { - return nil, fmt.Errorf("failed to unmarshal SSE-KMS metadata: %w", err) - } - - // Validate algorithm - be lenient with missing/empty algorithm for backward compatibility - if metadata.Algorithm != "" && metadata.Algorithm != s3_constants.SSEAlgorithmKMS { - return nil, fmt.Errorf("invalid SSE-KMS algorithm: %s", metadata.Algorithm) - } - - // Set default algorithm if empty - if metadata.Algorithm == "" { - metadata.Algorithm = s3_constants.SSEAlgorithmKMS - } - - // Decode the encrypted data key - encryptedDataKey, err := base64.StdEncoding.DecodeString(metadata.EncryptedDataKey) - if err != nil { - return nil, fmt.Errorf("failed to decode encrypted data key: %w", err) - } - - // Decode the IV - var iv []byte - if metadata.IV != "" { - iv, err = base64.StdEncoding.DecodeString(metadata.IV) - if err != nil { - return nil, fmt.Errorf("failed to decode IV: %w", err) - } - } - - sseKey := &SSEKMSKey{ - KeyID: metadata.KeyID, - EncryptedDataKey: encryptedDataKey, - EncryptionContext: metadata.EncryptionContext, - BucketKeyEnabled: metadata.BucketKeyEnabled, - IV: iv, // Restore IV for decryption - ChunkOffset: metadata.PartOffset, // Use stored within-part offset - } - - glog.V(4).Infof("Deserialized SSE-KMS metadata: keyID=%s, bucketKey=%t", sseKey.KeyID, sseKey.BucketKeyEnabled) - return sseKey, nil -} - -// SSECMetadata represents SSE-C metadata for per-chunk storage (unified with SSE-KMS approach) -type SSECMetadata struct { - Algorithm string `json:"algorithm"` // SSE-C algorithm (always "AES256") - IV string `json:"iv"` // Base64-encoded initialization vector for this chunk - KeyMD5 string `json:"keyMD5"` // MD5 of the customer-provided key - PartOffset int64 `json:"partOffset"` // Offset within original multipart part (for IV calculation) -} - -// SerializeSSECMetadata serializes SSE-C metadata for storage in chunk metadata -func SerializeSSECMetadata(iv []byte, keyMD5 string, partOffset int64) ([]byte, error) { - if err := ValidateIV(iv, "IV"); err != nil { - return nil, err - } - - metadata := &SSECMetadata{ - Algorithm: s3_constants.SSEAlgorithmAES256, - IV: base64.StdEncoding.EncodeToString(iv), - KeyMD5: keyMD5, - PartOffset: partOffset, - } - - data, err := json.Marshal(metadata) - if err != nil { - return nil, fmt.Errorf("failed to marshal SSE-C metadata: %w", err) - } - - glog.V(4).Infof("Serialized SSE-C metadata: keyMD5=%s, partOffset=%d", keyMD5, partOffset) - return data, nil -} - -// DeserializeSSECMetadata deserializes SSE-C metadata from chunk storage -func DeserializeSSECMetadata(data []byte) (*SSECMetadata, error) { - if len(data) == 0 { - return nil, fmt.Errorf("empty SSE-C metadata") - } - - var metadata SSECMetadata - if err := json.Unmarshal(data, &metadata); err != nil { - return nil, fmt.Errorf("failed to unmarshal SSE-C metadata: %w", err) - } - - // Validate algorithm - if metadata.Algorithm != s3_constants.SSEAlgorithmAES256 { - return nil, fmt.Errorf("invalid SSE-C algorithm: %s", metadata.Algorithm) - } - - // Validate IV - if metadata.IV == "" { - return nil, fmt.Errorf("missing IV in SSE-C metadata") - } - - if _, err := base64.StdEncoding.DecodeString(metadata.IV); err != nil { - return nil, fmt.Errorf("invalid base64 IV in SSE-C metadata: %w", err) - } - - glog.V(4).Infof("Deserialized SSE-C metadata: keyMD5=%s, partOffset=%d", metadata.KeyMD5, metadata.PartOffset) - return &metadata, nil -} - -// AddSSEKMSResponseHeaders adds SSE-KMS response headers to an HTTP response -func AddSSEKMSResponseHeaders(w http.ResponseWriter, sseKey *SSEKMSKey) { - w.Header().Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmKMS) - w.Header().Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, sseKey.KeyID) - - if len(sseKey.EncryptionContext) > 0 { - // Encode encryption context as base64 JSON - contextBytes, err := json.Marshal(sseKey.EncryptionContext) - if err == nil { - contextB64 := base64.StdEncoding.EncodeToString(contextBytes) - w.Header().Set(s3_constants.AmzServerSideEncryptionContext, contextB64) - } else { - glog.Errorf("Failed to encode encryption context: %v", err) - } - } - - if sseKey.BucketKeyEnabled { - w.Header().Set(s3_constants.AmzServerSideEncryptionBucketKeyEnabled, "true") - } -} - -// IsSSEKMSRequest checks if the request contains SSE-KMS headers -func IsSSEKMSRequest(r *http.Request) bool { - // If SSE-C headers are present, this is not an SSE-KMS request (they are mutually exclusive) - if r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" { - return false - } - - // According to AWS S3 specification, SSE-KMS is only valid when the encryption header - // is explicitly set to "aws:kms". The KMS key ID header alone is not sufficient. - sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryption) - return sseAlgorithm == s3_constants.SSEAlgorithmKMS -} - -// IsSSEKMSEncrypted checks if the metadata indicates SSE-KMS encryption -func IsSSEKMSEncrypted(metadata map[string][]byte) bool { - if metadata == nil { - return false - } - - // The canonical way to identify an SSE-KMS encrypted object is by this header. - if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists { - return string(sseAlgorithm) == s3_constants.SSEAlgorithmKMS - } - - return false -} - -// IsAnySSEEncrypted checks if metadata indicates any type of SSE encryption -func IsAnySSEEncrypted(metadata map[string][]byte) bool { - if metadata == nil { - return false - } - - // Check for any SSE type - if IsSSECEncrypted(metadata) { - return true - } - if IsSSEKMSEncrypted(metadata) { - return true - } - - // Check for SSE-S3 - if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists { - return string(sseAlgorithm) == s3_constants.SSEAlgorithmAES256 - } - - return false -} - -// MapKMSErrorToS3Error maps KMS errors to appropriate S3 error codes -func MapKMSErrorToS3Error(err error) s3err.ErrorCode { - if err == nil { - return s3err.ErrNone - } - - // Check if it's a KMS error - kmsErr, ok := err.(*kms.KMSError) - if !ok { - return s3err.ErrInternalError - } - - switch kmsErr.Code { - case kms.ErrCodeNotFoundException: - return s3err.ErrKMSKeyNotFound - case kms.ErrCodeAccessDenied: - return s3err.ErrKMSAccessDenied - case kms.ErrCodeKeyUnavailable: - return s3err.ErrKMSDisabled - case kms.ErrCodeInvalidKeyUsage: - return s3err.ErrKMSAccessDenied - case kms.ErrCodeInvalidCiphertext: - return s3err.ErrKMSInvalidCiphertext - default: - glog.Errorf("Unmapped KMS error: %s - %s", kmsErr.Code, kmsErr.Message) - return s3err.ErrInternalError - } -} - -// SSEKMSCopyStrategy represents different strategies for copying SSE-KMS encrypted objects -type SSEKMSCopyStrategy int - -const ( - // SSEKMSCopyStrategyDirect - Direct chunk copy (same key, no re-encryption needed) - SSEKMSCopyStrategyDirect SSEKMSCopyStrategy = iota - // SSEKMSCopyStrategyDecryptEncrypt - Decrypt source and re-encrypt for destination - SSEKMSCopyStrategyDecryptEncrypt -) - -// String returns string representation of the strategy -func (s SSEKMSCopyStrategy) String() string { - switch s { - case SSEKMSCopyStrategyDirect: - return "Direct" - case SSEKMSCopyStrategyDecryptEncrypt: - return "DecryptEncrypt" - default: - return "Unknown" - } -} - -// GetSourceSSEKMSInfo extracts SSE-KMS information from source object metadata -func GetSourceSSEKMSInfo(metadata map[string][]byte) (keyID string, isEncrypted bool) { - if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists && string(sseAlgorithm) == s3_constants.SSEAlgorithmKMS { - if kmsKeyID, exists := metadata[s3_constants.AmzServerSideEncryptionAwsKmsKeyId]; exists { - return string(kmsKeyID), true - } - return "", true // SSE-KMS with default key - } - return "", false -} - -// CanDirectCopySSEKMS determines if we can directly copy chunks without decrypt/re-encrypt -func CanDirectCopySSEKMS(srcMetadata map[string][]byte, destKeyID string) bool { - srcKeyID, srcEncrypted := GetSourceSSEKMSInfo(srcMetadata) - - // Case 1: Source unencrypted, destination unencrypted -> Direct copy - if !srcEncrypted && destKeyID == "" { - return true - } - - // Case 2: Source encrypted with same KMS key as destination -> Direct copy - if srcEncrypted && destKeyID != "" { - // Same key if key IDs match (empty means default key) - return srcKeyID == destKeyID - } - - // All other cases require decrypt/re-encrypt - return false -} - -// DetermineSSEKMSCopyStrategy determines the optimal copy strategy for SSE-KMS -func DetermineSSEKMSCopyStrategy(srcMetadata map[string][]byte, destKeyID string) (SSEKMSCopyStrategy, error) { - if CanDirectCopySSEKMS(srcMetadata, destKeyID) { - return SSEKMSCopyStrategyDirect, nil - } - return SSEKMSCopyStrategyDecryptEncrypt, nil -} - -// ParseSSEKMSCopyHeaders parses SSE-KMS headers from copy request -func ParseSSEKMSCopyHeaders(r *http.Request) (destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, err error) { - // Check if this is an SSE-KMS request - if !IsSSEKMSRequest(r) { - return "", nil, false, nil - } - - // Get destination KMS key ID - destKeyID = r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - - // Validate key ID if provided - if destKeyID != "" && !isValidKMSKeyID(destKeyID) { - return "", nil, false, fmt.Errorf("invalid KMS key ID: %s", destKeyID) - } - - // Parse encryption context if provided - if contextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" { - contextBytes, decodeErr := base64.StdEncoding.DecodeString(contextHeader) - if decodeErr != nil { - return "", nil, false, fmt.Errorf("invalid encryption context encoding: %v", decodeErr) - } - - if unmarshalErr := json.Unmarshal(contextBytes, &encryptionContext); unmarshalErr != nil { - return "", nil, false, fmt.Errorf("invalid encryption context JSON: %v", unmarshalErr) - } - } - - // Parse bucket key enabled flag - if bucketKeyHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled); bucketKeyHeader != "" { - bucketKeyEnabled = strings.ToLower(bucketKeyHeader) == "true" - } - - return destKeyID, encryptionContext, bucketKeyEnabled, nil -} - -// UnifiedCopyStrategy represents all possible copy strategies across encryption types -type UnifiedCopyStrategy int - -const ( - // CopyStrategyDirect - Direct chunk copy (no encryption changes) - CopyStrategyDirect UnifiedCopyStrategy = iota - // CopyStrategyEncrypt - Encrypt during copy (plain โ†’ encrypted) - CopyStrategyEncrypt - // CopyStrategyDecrypt - Decrypt during copy (encrypted โ†’ plain) - CopyStrategyDecrypt - // CopyStrategyReencrypt - Decrypt and re-encrypt (different keys/methods) - CopyStrategyReencrypt - // CopyStrategyKeyRotation - Same object, different key (metadata-only update) - CopyStrategyKeyRotation -) - -// String returns string representation of the unified strategy -func (s UnifiedCopyStrategy) String() string { - switch s { - case CopyStrategyDirect: - return "Direct" - case CopyStrategyEncrypt: - return "Encrypt" - case CopyStrategyDecrypt: - return "Decrypt" - case CopyStrategyReencrypt: - return "Reencrypt" - case CopyStrategyKeyRotation: - return "KeyRotation" - default: - return "Unknown" - } -} - -// EncryptionState represents the encryption state of source and destination -type EncryptionState struct { - SrcSSEC bool - SrcSSEKMS bool - SrcSSES3 bool - DstSSEC bool - DstSSEKMS bool - DstSSES3 bool - SameObject bool -} - -// IsSourceEncrypted returns true if source has any encryption -func (e *EncryptionState) IsSourceEncrypted() bool { - return e.SrcSSEC || e.SrcSSEKMS || e.SrcSSES3 -} - -// IsTargetEncrypted returns true if target should be encrypted -func (e *EncryptionState) IsTargetEncrypted() bool { - return e.DstSSEC || e.DstSSEKMS || e.DstSSES3 -} - -// DetermineUnifiedCopyStrategy determines the optimal copy strategy for all encryption types -func DetermineUnifiedCopyStrategy(state *EncryptionState, srcMetadata map[string][]byte, r *http.Request) (UnifiedCopyStrategy, error) { - // Key rotation: same object with different encryption - if state.SameObject && state.IsSourceEncrypted() && state.IsTargetEncrypted() { - // Check if it's actually a key change - if state.SrcSSEC && state.DstSSEC { - // SSE-C key rotation - need to compare keys - return CopyStrategyKeyRotation, nil - } - if state.SrcSSEKMS && state.DstSSEKMS { - // SSE-KMS key rotation - need to compare key IDs - srcKeyID, _ := GetSourceSSEKMSInfo(srcMetadata) - dstKeyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - if srcKeyID != dstKeyID { - return CopyStrategyKeyRotation, nil - } - } - } - - // Direct copy: no encryption changes - if !state.IsSourceEncrypted() && !state.IsTargetEncrypted() { - return CopyStrategyDirect, nil - } - - // Same encryption type and key - if state.SrcSSEKMS && state.DstSSEKMS { - srcKeyID, _ := GetSourceSSEKMSInfo(srcMetadata) - dstKeyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - if srcKeyID == dstKeyID { - return CopyStrategyDirect, nil - } - } - - if state.SrcSSEC && state.DstSSEC { - // For SSE-C, we'd need to compare the actual keys, but we can't do that securely - // So we assume different keys and use reencrypt strategy - return CopyStrategyReencrypt, nil - } - - // Encrypt: plain โ†’ encrypted - if !state.IsSourceEncrypted() && state.IsTargetEncrypted() { - return CopyStrategyEncrypt, nil - } - - // Decrypt: encrypted โ†’ plain - if state.IsSourceEncrypted() && !state.IsTargetEncrypted() { - return CopyStrategyDecrypt, nil - } - - // Reencrypt: different encryption types or keys - if state.IsSourceEncrypted() && state.IsTargetEncrypted() { - return CopyStrategyReencrypt, nil - } - - return CopyStrategyDirect, nil -} - -// DetectEncryptionState analyzes the source metadata and request headers to determine encryption state -func DetectEncryptionState(srcMetadata map[string][]byte, r *http.Request, srcPath, dstPath string) *EncryptionState { - state := &EncryptionState{ - SrcSSEC: IsSSECEncrypted(srcMetadata), - SrcSSEKMS: IsSSEKMSEncrypted(srcMetadata), - SrcSSES3: IsSSES3EncryptedInternal(srcMetadata), - DstSSEC: IsSSECRequest(r), - DstSSEKMS: IsSSEKMSRequest(r), - DstSSES3: IsSSES3RequestInternal(r), - SameObject: srcPath == dstPath, - } - - return state -} - -// DetectEncryptionStateWithEntry analyzes the source entry and request headers to determine encryption state -// This version can detect multipart encrypted objects by examining chunks -func DetectEncryptionStateWithEntry(entry *filer_pb.Entry, r *http.Request, srcPath, dstPath string) *EncryptionState { - state := &EncryptionState{ - SrcSSEC: IsSSECEncryptedWithEntry(entry), - SrcSSEKMS: IsSSEKMSEncryptedWithEntry(entry), - SrcSSES3: IsSSES3EncryptedInternal(entry.Extended), - DstSSEC: IsSSECRequest(r), - DstSSEKMS: IsSSEKMSRequest(r), - DstSSES3: IsSSES3RequestInternal(r), - SameObject: srcPath == dstPath, - } - - return state -} - -// IsSSEKMSEncryptedWithEntry detects SSE-KMS encryption from entry (including multipart objects) -func IsSSEKMSEncryptedWithEntry(entry *filer_pb.Entry) bool { - if entry == nil { - return false - } - - // Check object-level metadata first - if IsSSEKMSEncrypted(entry.Extended) { - return true - } - - // Check for multipart SSE-KMS by examining chunks - if len(entry.GetChunks()) > 0 { - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS { - return true - } - } - } - - return false -} - -// IsSSECEncryptedWithEntry detects SSE-C encryption from entry (including multipart objects) -func IsSSECEncryptedWithEntry(entry *filer_pb.Entry) bool { - if entry == nil { - return false - } - - // Check object-level metadata first - if IsSSECEncrypted(entry.Extended) { - return true - } - - // Check for multipart SSE-C by examining chunks - if len(entry.GetChunks()) > 0 { - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - return true - } - } - } - - return false -} - -// Helper functions for SSE-C detection are in s3_sse_c.go diff --git a/weed/s3api/s3_sse_kms_test.go b/weed/s3api/s3_sse_kms_test.go deleted file mode 100644 index 487a239a5..000000000 --- a/weed/s3api/s3_sse_kms_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package s3api - -import ( - "bytes" - "encoding/json" - "io" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -func TestSSEKMSEncryptionDecryption(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Test data - testData := "Hello, SSE-KMS world! This is a test of envelope encryption." - testReader := strings.NewReader(testData) - - // Create encryption context - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - // Encrypt the data - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(testReader, kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - // Verify SSE key metadata - if sseKey.KeyID != kmsKey.KeyID { - t.Errorf("Expected key ID %s, got %s", kmsKey.KeyID, sseKey.KeyID) - } - - if len(sseKey.EncryptedDataKey) == 0 { - t.Error("Encrypted data key should not be empty") - } - - if sseKey.EncryptionContext == nil { - t.Error("Encryption context should not be nil") - } - - // Read the encrypted data - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Verify the encrypted data is different from original - if string(encryptedData) == testData { - t.Error("Encrypted data should be different from original data") - } - - // The encrypted data should be same size as original (IV is stored in metadata, not in stream) - if len(encryptedData) != len(testData) { - t.Errorf("Encrypted data should be same size as original: expected %d, got %d", len(testData), len(encryptedData)) - } - - // Decrypt the data - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Read the decrypted data - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - // Verify the decrypted data matches the original - if string(decryptedData) != testData { - t.Errorf("Decrypted data does not match original.\nExpected: %s\nGot: %s", testData, string(decryptedData)) - } -} - -func TestSSEKMSKeyValidation(t *testing.T) { - tests := []struct { - name string - keyID string - wantValid bool - }{ - { - name: "Valid UUID key ID", - keyID: "12345678-1234-1234-1234-123456789012", - wantValid: true, - }, - { - name: "Valid alias", - keyID: "alias/my-test-key", - wantValid: true, - }, - { - name: "Valid ARN", - keyID: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012", - wantValid: true, - }, - { - name: "Valid alias ARN", - keyID: "arn:aws:kms:us-east-1:123456789012:alias/my-test-key", - wantValid: true, - }, - - { - name: "Valid test key format", - keyID: "invalid-key-format", - wantValid: true, // Now valid - following Minio's permissive approach - }, - { - name: "Valid short key", - keyID: "12345678-1234", - wantValid: true, // Now valid - following Minio's permissive approach - }, - { - name: "Invalid - leading space", - keyID: " leading-space", - wantValid: false, - }, - { - name: "Invalid - trailing space", - keyID: "trailing-space ", - wantValid: false, - }, - { - name: "Invalid - empty", - keyID: "", - wantValid: false, - }, - { - name: "Invalid - internal spaces", - keyID: "invalid key id", - wantValid: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - valid := isValidKMSKeyID(tt.keyID) - if valid != tt.wantValid { - t.Errorf("isValidKMSKeyID(%s) = %v, want %v", tt.keyID, valid, tt.wantValid) - } - }) - } -} - -func TestSSEKMSMetadataSerialization(t *testing.T) { - // Create test SSE key - sseKey := &SSEKMSKey{ - KeyID: "test-key-id", - EncryptedDataKey: []byte("encrypted-data-key"), - EncryptionContext: map[string]string{ - "aws:s3:arn": "arn:aws:s3:::test-bucket/test-object", - }, - BucketKeyEnabled: true, - } - - // Serialize metadata - serialized, err := SerializeSSEKMSMetadata(sseKey) - if err != nil { - t.Fatalf("Failed to serialize SSE-KMS metadata: %v", err) - } - - // Verify it's valid JSON - var jsonData map[string]interface{} - if err := json.Unmarshal(serialized, &jsonData); err != nil { - t.Fatalf("Serialized data is not valid JSON: %v", err) - } - - // Deserialize metadata - deserializedKey, err := DeserializeSSEKMSMetadata(serialized) - if err != nil { - t.Fatalf("Failed to deserialize SSE-KMS metadata: %v", err) - } - - // Verify the deserialized data matches original - if deserializedKey.KeyID != sseKey.KeyID { - t.Errorf("KeyID mismatch: expected %s, got %s", sseKey.KeyID, deserializedKey.KeyID) - } - - if !bytes.Equal(deserializedKey.EncryptedDataKey, sseKey.EncryptedDataKey) { - t.Error("EncryptedDataKey mismatch") - } - - if len(deserializedKey.EncryptionContext) != len(sseKey.EncryptionContext) { - t.Error("EncryptionContext length mismatch") - } - - for k, v := range sseKey.EncryptionContext { - if deserializedKey.EncryptionContext[k] != v { - t.Errorf("EncryptionContext mismatch for key %s: expected %s, got %s", k, v, deserializedKey.EncryptionContext[k]) - } - } - - if deserializedKey.BucketKeyEnabled != sseKey.BucketKeyEnabled { - t.Errorf("BucketKeyEnabled mismatch: expected %v, got %v", sseKey.BucketKeyEnabled, deserializedKey.BucketKeyEnabled) - } -} - -func TestBuildEncryptionContext(t *testing.T) { - tests := []struct { - name string - bucket string - object string - useBucketKey bool - expectedARN string - }{ - { - name: "Object-level encryption", - bucket: "test-bucket", - object: "test-object", - useBucketKey: false, - expectedARN: "arn:aws:s3:::test-bucket/test-object", - }, - { - name: "Bucket-level encryption", - bucket: "test-bucket", - object: "test-object", - useBucketKey: true, - expectedARN: "arn:aws:s3:::test-bucket", - }, - { - name: "Nested object path", - bucket: "my-bucket", - object: "folder/subfolder/file.txt", - useBucketKey: false, - expectedARN: "arn:aws:s3:::my-bucket/folder/subfolder/file.txt", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - context := BuildEncryptionContext(tt.bucket, tt.object, tt.useBucketKey) - - if context == nil { - t.Fatal("Encryption context should not be nil") - } - - arn, exists := context[kms.EncryptionContextS3ARN] - if !exists { - t.Error("Encryption context should contain S3 ARN") - } - - if arn != tt.expectedARN { - t.Errorf("Expected ARN %s, got %s", tt.expectedARN, arn) - } - }) - } -} - -func TestKMSErrorMapping(t *testing.T) { - tests := []struct { - name string - kmsError *kms.KMSError - expectedErr string - }{ - { - name: "Key not found", - kmsError: &kms.KMSError{ - Code: kms.ErrCodeNotFoundException, - Message: "Key not found", - }, - expectedErr: "KMSKeyNotFoundException", - }, - { - name: "Access denied", - kmsError: &kms.KMSError{ - Code: kms.ErrCodeAccessDenied, - Message: "Access denied", - }, - expectedErr: "KMSAccessDeniedException", - }, - { - name: "Key unavailable", - kmsError: &kms.KMSError{ - Code: kms.ErrCodeKeyUnavailable, - Message: "Key is disabled", - }, - expectedErr: "KMSKeyDisabledException", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - errorCode := MapKMSErrorToS3Error(tt.kmsError) - - // Get the actual error description - apiError := s3err.GetAPIError(errorCode) - if apiError.Code != tt.expectedErr { - t.Errorf("Expected error code %s, got %s", tt.expectedErr, apiError.Code) - } - }) - } -} - -// TestLargeDataEncryption tests encryption/decryption of larger data streams -func TestSSEKMSLargeDataEncryption(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Create a larger test dataset (1MB) - testData := strings.Repeat("This is a test of SSE-KMS with larger data streams. ", 20000) - testReader := strings.NewReader(testData) - - // Create encryption context - encryptionContext := BuildEncryptionContext("large-bucket", "large-object", false) - - // Encrypt the data - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(testReader, kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - // Read the encrypted data - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Decrypt the data - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - // Read the decrypted data - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - // Verify the decrypted data matches the original - if string(decryptedData) != testData { - t.Errorf("Decrypted data length: %d, original data length: %d", len(decryptedData), len(testData)) - t.Error("Decrypted large data does not match original") - } - - t.Logf("Successfully encrypted/decrypted %d bytes of data", len(testData)) -} - -// TestValidateSSEKMSKey tests the ValidateSSEKMSKey function, which correctly handles empty key IDs -func TestValidateSSEKMSKey(t *testing.T) { - tests := []struct { - name string - sseKey *SSEKMSKey - wantErr bool - }{ - { - name: "nil SSE-KMS key", - sseKey: nil, - wantErr: true, - }, - { - name: "empty key ID (valid - represents default KMS key)", - sseKey: &SSEKMSKey{ - KeyID: "", - EncryptionContext: map[string]string{"test": "value"}, - BucketKeyEnabled: false, - }, - wantErr: false, - }, - { - name: "valid UUID key ID", - sseKey: &SSEKMSKey{ - KeyID: "12345678-1234-1234-1234-123456789012", - EncryptionContext: map[string]string{"test": "value"}, - BucketKeyEnabled: true, - }, - wantErr: false, - }, - { - name: "valid alias", - sseKey: &SSEKMSKey{ - KeyID: "alias/my-test-key", - EncryptionContext: map[string]string{}, - BucketKeyEnabled: false, - }, - wantErr: false, - }, - { - name: "valid flexible key ID format", - sseKey: &SSEKMSKey{ - KeyID: "invalid-format", - EncryptionContext: map[string]string{}, - BucketKeyEnabled: false, - }, - wantErr: false, // Now valid - following Minio's permissive approach - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateSSEKMSKey(tt.sseKey) - if (err != nil) != tt.wantErr { - t.Errorf("ValidateSSEKMSKey() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/weed/s3api/s3_sse_kms_utils.go b/weed/s3api/s3_sse_kms_utils.go deleted file mode 100644 index be6d72626..000000000 --- a/weed/s3api/s3_sse_kms_utils.go +++ /dev/null @@ -1,99 +0,0 @@ -package s3api - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// KMSDataKeyResult holds the result of data key generation -type KMSDataKeyResult struct { - Response *kms.GenerateDataKeyResponse - Block cipher.Block -} - -// generateKMSDataKey generates a new data encryption key using KMS -// This function encapsulates the common pattern used across all SSE-KMS functions -func generateKMSDataKey(keyID string, encryptionContext map[string]string) (*KMSDataKeyResult, error) { - // Validate keyID to prevent injection attacks and malformed requests to KMS service - if !isValidKMSKeyID(keyID) { - return nil, fmt.Errorf("invalid KMS key ID format: key ID must be non-empty, without spaces or control characters") - } - - // Validate encryption context to prevent malformed requests to KMS service - if encryptionContext != nil { - for key, value := range encryptionContext { - // Validate context keys and values for basic security - if strings.TrimSpace(key) == "" { - return nil, fmt.Errorf("invalid encryption context: keys cannot be empty or whitespace-only") - } - if strings.ContainsAny(key, "\x00\n\r\t") || strings.ContainsAny(value, "\x00\n\r\t") { - return nil, fmt.Errorf("invalid encryption context: keys and values cannot contain control characters") - } - // AWS KMS has limits on key/value lengths - if len(key) > 2048 || len(value) > 2048 { - return nil, fmt.Errorf("invalid encryption context: keys and values must be โ‰ค 2048 characters (key=%d, value=%d)", len(key), len(value)) - } - } - // AWS KMS has a limit on the total number of context pairs - if len(encryptionContext) > s3_constants.MaxKMSEncryptionContextPairs { - return nil, fmt.Errorf("invalid encryption context: cannot exceed %d key-value pairs, got %d", s3_constants.MaxKMSEncryptionContextPairs, len(encryptionContext)) - } - } - - // Get KMS provider - kmsProvider := kms.GetGlobalKMS() - if kmsProvider == nil { - return nil, fmt.Errorf("KMS is not configured") - } - - // Create data key request - generateDataKeyReq := &kms.GenerateDataKeyRequest{ - KeyID: keyID, - KeySpec: kms.KeySpecAES256, - EncryptionContext: encryptionContext, - } - - // Generate the data key - dataKeyResp, err := kmsProvider.GenerateDataKey(context.Background(), generateDataKeyReq) - if err != nil { - return nil, fmt.Errorf("failed to generate KMS data key: %v", err) - } - - // Create AES cipher with the plaintext data key - block, err := aes.NewCipher(dataKeyResp.Plaintext) - if err != nil { - // Clear sensitive data before returning error - kms.ClearSensitiveData(dataKeyResp.Plaintext) - return nil, fmt.Errorf("failed to create AES cipher: %v", err) - } - - return &KMSDataKeyResult{ - Response: dataKeyResp, - Block: block, - }, nil -} - -// clearKMSDataKey safely clears sensitive data from a KMSDataKeyResult -func clearKMSDataKey(result *KMSDataKeyResult) { - if result != nil && result.Response != nil { - kms.ClearSensitiveData(result.Response.Plaintext) - } -} - -// createSSEKMSKey creates an SSEKMSKey struct from data key result and parameters -func createSSEKMSKey(result *KMSDataKeyResult, encryptionContext map[string]string, bucketKeyEnabled bool, iv []byte, chunkOffset int64) *SSEKMSKey { - return &SSEKMSKey{ - KeyID: result.Response.KeyID, - EncryptedDataKey: result.Response.CiphertextBlob, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - IV: iv, - ChunkOffset: chunkOffset, - } -} diff --git a/weed/s3api/s3_sse_metadata.go b/weed/s3api/s3_sse_metadata.go deleted file mode 100644 index 8b641f150..000000000 --- a/weed/s3api/s3_sse_metadata.go +++ /dev/null @@ -1,159 +0,0 @@ -package s3api - -import ( - "encoding/base64" - "encoding/json" - "fmt" -) - -// SSE metadata keys for storing encryption information in entry metadata -const ( - // MetaSSEIV is the initialization vector used for encryption - MetaSSEIV = "X-SeaweedFS-Server-Side-Encryption-Iv" - - // MetaSSEAlgorithm is the encryption algorithm used - MetaSSEAlgorithm = "X-SeaweedFS-Server-Side-Encryption-Algorithm" - - // MetaSSECKeyMD5 is the MD5 hash of the SSE-C customer key - MetaSSECKeyMD5 = "X-SeaweedFS-Server-Side-Encryption-Customer-Key-MD5" - - // MetaSSEKMSKeyID is the KMS key ID used for encryption - MetaSSEKMSKeyID = "X-SeaweedFS-Server-Side-Encryption-KMS-Key-Id" - - // MetaSSEKMSEncryptedKey is the encrypted data key from KMS - MetaSSEKMSEncryptedKey = "X-SeaweedFS-Server-Side-Encryption-KMS-Encrypted-Key" - - // MetaSSEKMSContext is the encryption context for KMS - MetaSSEKMSContext = "X-SeaweedFS-Server-Side-Encryption-KMS-Context" - - // MetaSSES3KeyID is the key ID for SSE-S3 encryption - MetaSSES3KeyID = "X-SeaweedFS-Server-Side-Encryption-S3-Key-Id" -) - -// StoreIVInMetadata stores the IV in entry metadata as base64 encoded string -func StoreIVInMetadata(metadata map[string][]byte, iv []byte) { - if len(iv) > 0 { - metadata[MetaSSEIV] = []byte(base64.StdEncoding.EncodeToString(iv)) - } -} - -// GetIVFromMetadata retrieves the IV from entry metadata -func GetIVFromMetadata(metadata map[string][]byte) ([]byte, error) { - if ivBase64, exists := metadata[MetaSSEIV]; exists { - iv, err := base64.StdEncoding.DecodeString(string(ivBase64)) - if err != nil { - return nil, fmt.Errorf("failed to decode IV from metadata: %w", err) - } - return iv, nil - } - return nil, fmt.Errorf("IV not found in metadata") -} - -// StoreSSECMetadata stores SSE-C related metadata -func StoreSSECMetadata(metadata map[string][]byte, iv []byte, keyMD5 string) { - StoreIVInMetadata(metadata, iv) - metadata[MetaSSEAlgorithm] = []byte("AES256") - if keyMD5 != "" { - metadata[MetaSSECKeyMD5] = []byte(keyMD5) - } -} - -// StoreSSEKMSMetadata stores SSE-KMS related metadata -func StoreSSEKMSMetadata(metadata map[string][]byte, iv []byte, keyID string, encryptedKey []byte, context map[string]string) { - StoreIVInMetadata(metadata, iv) - metadata[MetaSSEAlgorithm] = []byte("aws:kms") - if keyID != "" { - metadata[MetaSSEKMSKeyID] = []byte(keyID) - } - if len(encryptedKey) > 0 { - metadata[MetaSSEKMSEncryptedKey] = []byte(base64.StdEncoding.EncodeToString(encryptedKey)) - } - if len(context) > 0 { - // Marshal context to JSON to handle special characters correctly - contextBytes, err := json.Marshal(context) - if err == nil { - metadata[MetaSSEKMSContext] = contextBytes - } - // Note: json.Marshal for map[string]string should never fail, but we handle it gracefully - } -} - -// StoreSSES3Metadata stores SSE-S3 related metadata -func StoreSSES3Metadata(metadata map[string][]byte, iv []byte, keyID string) { - StoreIVInMetadata(metadata, iv) - metadata[MetaSSEAlgorithm] = []byte("AES256") - if keyID != "" { - metadata[MetaSSES3KeyID] = []byte(keyID) - } -} - -// GetSSECMetadata retrieves SSE-C metadata -func GetSSECMetadata(metadata map[string][]byte) (iv []byte, keyMD5 string, err error) { - iv, err = GetIVFromMetadata(metadata) - if err != nil { - return nil, "", err - } - - if keyMD5Bytes, exists := metadata[MetaSSECKeyMD5]; exists { - keyMD5 = string(keyMD5Bytes) - } - - return iv, keyMD5, nil -} - -// GetSSEKMSMetadata retrieves SSE-KMS metadata -func GetSSEKMSMetadata(metadata map[string][]byte) (iv []byte, keyID string, encryptedKey []byte, context map[string]string, err error) { - iv, err = GetIVFromMetadata(metadata) - if err != nil { - return nil, "", nil, nil, err - } - - if keyIDBytes, exists := metadata[MetaSSEKMSKeyID]; exists { - keyID = string(keyIDBytes) - } - - if encKeyBase64, exists := metadata[MetaSSEKMSEncryptedKey]; exists { - encryptedKey, err = base64.StdEncoding.DecodeString(string(encKeyBase64)) - if err != nil { - return nil, "", nil, nil, fmt.Errorf("failed to decode encrypted key: %w", err) - } - } - - // Parse context from JSON - if contextBytes, exists := metadata[MetaSSEKMSContext]; exists { - context = make(map[string]string) - if err := json.Unmarshal(contextBytes, &context); err != nil { - return nil, "", nil, nil, fmt.Errorf("failed to parse KMS context JSON: %w", err) - } - } - - return iv, keyID, encryptedKey, context, nil -} - -// GetSSES3Metadata retrieves SSE-S3 metadata -func GetSSES3Metadata(metadata map[string][]byte) (iv []byte, keyID string, err error) { - iv, err = GetIVFromMetadata(metadata) - if err != nil { - return nil, "", err - } - - if keyIDBytes, exists := metadata[MetaSSES3KeyID]; exists { - keyID = string(keyIDBytes) - } - - return iv, keyID, nil -} - -// IsSSEEncrypted checks if the metadata indicates any form of SSE encryption -func IsSSEEncrypted(metadata map[string][]byte) bool { - _, exists := metadata[MetaSSEIV] - return exists -} - -// GetSSEAlgorithm returns the SSE algorithm from metadata -func GetSSEAlgorithm(metadata map[string][]byte) string { - if alg, exists := metadata[MetaSSEAlgorithm]; exists { - return string(alg) - } - return "" -} diff --git a/weed/s3api/s3_sse_metadata_test.go b/weed/s3api/s3_sse_metadata_test.go deleted file mode 100644 index c0c1360af..000000000 --- a/weed/s3api/s3_sse_metadata_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package s3api - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestSSECIsEncrypted tests detection of SSE-C encryption from metadata -func TestSSECIsEncrypted(t *testing.T) { - testCases := []struct { - name string - metadata map[string][]byte - expected bool - }{ - { - name: "Empty metadata", - metadata: CreateTestMetadata(), - expected: false, - }, - { - name: "Valid SSE-C metadata", - metadata: CreateTestMetadataWithSSEC(GenerateTestSSECKey(1)), - expected: true, - }, - { - name: "SSE-C algorithm only", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerAlgorithm: []byte("AES256"), - }, - expected: true, - }, - { - name: "SSE-C key MD5 only", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerKeyMD5: []byte("somemd5"), - }, - expected: true, - }, - { - name: "Other encryption type (SSE-KMS)", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - }, - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := IsSSECEncrypted(tc.metadata) - if result != tc.expected { - t.Errorf("Expected %v, got %v", tc.expected, result) - } - }) - } -} - -// TestSSEKMSIsEncrypted tests detection of SSE-KMS encryption from metadata -func TestSSEKMSIsEncrypted(t *testing.T) { - testCases := []struct { - name string - metadata map[string][]byte - expected bool - }{ - { - name: "Empty metadata", - metadata: CreateTestMetadata(), - expected: false, - }, - { - name: "Valid SSE-KMS metadata", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzEncryptedDataKey: []byte("encrypted-key"), - }, - expected: true, - }, - { - name: "SSE-KMS algorithm only", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - }, - expected: true, - }, - { - name: "SSE-KMS encrypted data key only", - metadata: map[string][]byte{ - s3_constants.AmzEncryptedDataKey: []byte("encrypted-key"), - }, - expected: false, // Only encrypted data key without algorithm header should not be considered SSE-KMS - }, - { - name: "Other encryption type (SSE-C)", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerAlgorithm: []byte("AES256"), - }, - expected: false, - }, - { - name: "SSE-S3 (AES256)", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("AES256"), - }, - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := IsSSEKMSEncrypted(tc.metadata) - if result != tc.expected { - t.Errorf("Expected %v, got %v", tc.expected, result) - } - }) - } -} - -// TestSSETypeDiscrimination tests that SSE types don't interfere with each other -func TestSSETypeDiscrimination(t *testing.T) { - // Test SSE-C headers don't trigger SSE-KMS detection - t.Run("SSE-C headers don't trigger SSE-KMS", func(t *testing.T) { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - keyPair := GenerateTestSSECKey(1) - SetupTestSSECHeaders(req, keyPair) - - // Should detect SSE-C, not SSE-KMS - if !IsSSECRequest(req) { - t.Error("Should detect SSE-C request") - } - if IsSSEKMSRequest(req) { - t.Error("Should not detect SSE-KMS request for SSE-C headers") - } - }) - - // Test SSE-KMS headers don't trigger SSE-C detection - t.Run("SSE-KMS headers don't trigger SSE-C", func(t *testing.T) { - req := CreateTestHTTPRequest("PUT", "/bucket/object", nil) - SetupTestSSEKMSHeaders(req, "test-key-id") - - // Should detect SSE-KMS, not SSE-C - if IsSSECRequest(req) { - t.Error("Should not detect SSE-C request for SSE-KMS headers") - } - if !IsSSEKMSRequest(req) { - t.Error("Should detect SSE-KMS request") - } - }) - - // Test metadata discrimination - t.Run("Metadata type discrimination", func(t *testing.T) { - ssecMetadata := CreateTestMetadataWithSSEC(GenerateTestSSECKey(1)) - - // Should detect as SSE-C, not SSE-KMS - if !IsSSECEncrypted(ssecMetadata) { - t.Error("Should detect SSE-C encrypted metadata") - } - if IsSSEKMSEncrypted(ssecMetadata) { - t.Error("Should not detect SSE-KMS for SSE-C metadata") - } - }) -} - -// TestSSECParseCorruptedMetadata tests handling of corrupted SSE-C metadata -func TestSSECParseCorruptedMetadata(t *testing.T) { - testCases := []struct { - name string - metadata map[string][]byte - expectError bool - errorMessage string - }{ - { - name: "Missing algorithm", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerKeyMD5: []byte("valid-md5"), - }, - expectError: false, // Detection should still work with partial metadata - }, - { - name: "Invalid key MD5 format", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerAlgorithm: []byte("AES256"), - s3_constants.AmzServerSideEncryptionCustomerKeyMD5: []byte("invalid-base64!"), - }, - expectError: false, // Detection should work, validation happens later - }, - { - name: "Empty values", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryptionCustomerAlgorithm: []byte(""), - s3_constants.AmzServerSideEncryptionCustomerKeyMD5: []byte(""), - }, - expectError: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Test that detection doesn't panic on corrupted metadata - result := IsSSECEncrypted(tc.metadata) - // The detection should be robust and not crash - t.Logf("Detection result for %s: %v", tc.name, result) - }) - } -} - -// TestSSEKMSParseCorruptedMetadata tests handling of corrupted SSE-KMS metadata -func TestSSEKMSParseCorruptedMetadata(t *testing.T) { - testCases := []struct { - name string - metadata map[string][]byte - }{ - { - name: "Invalid encrypted data key", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzEncryptedDataKey: []byte("invalid-base64!"), - }, - }, - { - name: "Invalid encryption context", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - s3_constants.AmzEncryptionContextMeta: []byte("invalid-json"), - }, - }, - { - name: "Empty values", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte(""), - s3_constants.AmzEncryptedDataKey: []byte(""), - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Test that detection doesn't panic on corrupted metadata - result := IsSSEKMSEncrypted(tc.metadata) - t.Logf("Detection result for %s: %v", tc.name, result) - }) - } -} - -// TestSSEMetadataDeserialization tests SSE-KMS metadata deserialization with various inputs -func TestSSEMetadataDeserialization(t *testing.T) { - testCases := []struct { - name string - data []byte - expectError bool - }{ - { - name: "Empty data", - data: []byte{}, - expectError: true, - }, - { - name: "Invalid JSON", - data: []byte("invalid-json"), - expectError: true, - }, - { - name: "Valid JSON but wrong structure", - data: []byte(`{"wrong": "structure"}`), - expectError: false, // Our deserialization might be lenient - }, - { - name: "Null data", - data: nil, - expectError: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - _, err := DeserializeSSEKMSMetadata(tc.data) - if tc.expectError && err == nil { - t.Error("Expected error but got none") - } - if !tc.expectError && err != nil { - t.Errorf("Expected no error but got: %v", err) - } - }) - } -} - -// TestGeneralSSEDetection tests the general SSE detection that works across types -func TestGeneralSSEDetection(t *testing.T) { - testCases := []struct { - name string - metadata map[string][]byte - expected bool - }{ - { - name: "No encryption", - metadata: CreateTestMetadata(), - expected: false, - }, - { - name: "SSE-C encrypted", - metadata: CreateTestMetadataWithSSEC(GenerateTestSSECKey(1)), - expected: true, - }, - { - name: "SSE-KMS encrypted", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("aws:kms"), - }, - expected: true, - }, - { - name: "SSE-S3 encrypted", - metadata: map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte("AES256"), - }, - expected: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := IsAnySSEEncrypted(tc.metadata) - if result != tc.expected { - t.Errorf("Expected %v, got %v", tc.expected, result) - } - }) - } -} diff --git a/weed/s3api/s3_sse_multipart_test.go b/weed/s3api/s3_sse_multipart_test.go deleted file mode 100644 index ba67a4c5c..000000000 --- a/weed/s3api/s3_sse_multipart_test.go +++ /dev/null @@ -1,517 +0,0 @@ -package s3api - -import ( - "bytes" - "fmt" - "io" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestSSECMultipartUpload tests SSE-C with multipart uploads -func TestSSECMultipartUpload(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - // Test data larger than typical part size - testData := strings.Repeat("Hello, SSE-C multipart world! ", 1000) // ~30KB - - t.Run("Single part encryption/decryption", func(t *testing.T) { - // Encrypt the data - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(testData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Decrypt the data - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - if string(decryptedData) != testData { - t.Error("Decrypted data doesn't match original") - } - }) - - t.Run("Simulated multipart upload parts", func(t *testing.T) { - // Simulate multiple parts (each part gets encrypted separately) - partSize := 5 * 1024 // 5KB parts - var encryptedParts [][]byte - var partIVs [][]byte - - for i := 0; i < len(testData); i += partSize { - end := i + partSize - if end > len(testData) { - end = len(testData) - } - - partData := testData[i:end] - - // Each part is encrypted separately in multipart uploads - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(partData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for part %d: %v", i/partSize, err) - } - - encryptedPart, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted part %d: %v", i/partSize, err) - } - - encryptedParts = append(encryptedParts, encryptedPart) - partIVs = append(partIVs, iv) - } - - // Simulate reading back the multipart object - var reconstructedData strings.Builder - - for i, encryptedPart := range encryptedParts { - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedPart), customerKey, partIVs[i]) - if err != nil { - t.Fatalf("Failed to create decrypted reader for part %d: %v", i, err) - } - - decryptedPart, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted part %d: %v", i, err) - } - - reconstructedData.Write(decryptedPart) - } - - if reconstructedData.String() != testData { - t.Error("Reconstructed multipart data doesn't match original") - } - }) - - t.Run("Multipart with different part sizes", func(t *testing.T) { - partSizes := []int{1024, 2048, 4096, 8192} // Various part sizes - - for _, partSize := range partSizes { - t.Run(fmt.Sprintf("PartSize_%d", partSize), func(t *testing.T) { - var encryptedParts [][]byte - var partIVs [][]byte - - for i := 0; i < len(testData); i += partSize { - end := i + partSize - if end > len(testData) { - end = len(testData) - } - - partData := testData[i:end] - - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(partData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedPart, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted part: %v", err) - } - - encryptedParts = append(encryptedParts, encryptedPart) - partIVs = append(partIVs, iv) - } - - // Verify reconstruction - var reconstructedData strings.Builder - - for j, encryptedPart := range encryptedParts { - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedPart), customerKey, partIVs[j]) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - decryptedPart, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted part: %v", err) - } - - reconstructedData.Write(decryptedPart) - } - - if reconstructedData.String() != testData { - t.Errorf("Reconstructed data doesn't match original for part size %d", partSize) - } - }) - } - }) -} - -// TestSSEKMSMultipartUpload tests SSE-KMS with multipart uploads -func TestSSEKMSMultipartUpload(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - // Test data larger than typical part size - testData := strings.Repeat("Hello, SSE-KMS multipart world! ", 1000) // ~30KB - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - t.Run("Single part encryption/decryption", func(t *testing.T) { - // Encrypt the data - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(testData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data: %v", err) - } - - // Decrypt the data - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data: %v", err) - } - - if string(decryptedData) != testData { - t.Error("Decrypted data doesn't match original") - } - }) - - t.Run("Simulated multipart upload parts", func(t *testing.T) { - // Simulate multiple parts (each part might use the same or different KMS operations) - partSize := 5 * 1024 // 5KB parts - var encryptedParts [][]byte - var sseKeys []*SSEKMSKey - - for i := 0; i < len(testData); i += partSize { - end := i + partSize - if end > len(testData) { - end = len(testData) - } - - partData := testData[i:end] - - // Each part might get its own data key in KMS multipart uploads - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(partData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader for part %d: %v", i/partSize, err) - } - - encryptedPart, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted part %d: %v", i/partSize, err) - } - - encryptedParts = append(encryptedParts, encryptedPart) - sseKeys = append(sseKeys, sseKey) - } - - // Simulate reading back the multipart object - var reconstructedData strings.Builder - - for i, encryptedPart := range encryptedParts { - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedPart), sseKeys[i]) - if err != nil { - t.Fatalf("Failed to create decrypted reader for part %d: %v", i, err) - } - - decryptedPart, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted part %d: %v", i, err) - } - - reconstructedData.Write(decryptedPart) - } - - if reconstructedData.String() != testData { - t.Error("Reconstructed multipart data doesn't match original") - } - }) - - t.Run("Multipart consistency checks", func(t *testing.T) { - // Test that all parts use the same KMS key ID but different data keys - partSize := 5 * 1024 - var sseKeys []*SSEKMSKey - - for i := 0; i < len(testData); i += partSize { - end := i + partSize - if end > len(testData) { - end = len(testData) - } - - partData := testData[i:end] - - _, sseKey, err := CreateSSEKMSEncryptedReader(strings.NewReader(partData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader: %v", err) - } - - sseKeys = append(sseKeys, sseKey) - } - - // Verify all parts use the same KMS key ID - for i, sseKey := range sseKeys { - if sseKey.KeyID != kmsKey.KeyID { - t.Errorf("Part %d has wrong KMS key ID: expected %s, got %s", i, kmsKey.KeyID, sseKey.KeyID) - } - } - - // Verify each part has different encrypted data keys (they should be unique) - for i := 0; i < len(sseKeys); i++ { - for j := i + 1; j < len(sseKeys); j++ { - if bytes.Equal(sseKeys[i].EncryptedDataKey, sseKeys[j].EncryptedDataKey) { - t.Errorf("Parts %d and %d have identical encrypted data keys (should be unique)", i, j) - } - } - } - }) -} - -// TestMultipartSSEMixedScenarios tests edge cases with multipart and SSE -func TestMultipartSSEMixedScenarios(t *testing.T) { - t.Run("Empty parts handling", func(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - // Test empty part - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(""), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for empty data: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted empty data: %v", err) - } - - // Empty part should produce empty encrypted data, but still have a valid IV - if len(encryptedData) != 0 { - t.Errorf("Expected empty encrypted data for empty part, got %d bytes", len(encryptedData)) - } - if len(iv) != s3_constants.AESBlockSize { - t.Errorf("Expected IV of size %d, got %d", s3_constants.AESBlockSize, len(iv)) - } - - // Decrypt and verify - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader for empty data: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted empty data: %v", err) - } - - if len(decryptedData) != 0 { - t.Errorf("Expected empty decrypted data, got %d bytes", len(decryptedData)) - } - }) - - t.Run("Single byte parts", func(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - testData := "ABCDEFGHIJ" - var encryptedParts [][]byte - var partIVs [][]byte - - // Encrypt each byte as a separate part - for i, b := range []byte(testData) { - partData := string(b) - - encryptedReader, iv, err := CreateSSECEncryptedReader(strings.NewReader(partData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for byte %d: %v", i, err) - } - - encryptedPart, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted byte %d: %v", i, err) - } - - encryptedParts = append(encryptedParts, encryptedPart) - partIVs = append(partIVs, iv) - } - - // Reconstruct - var reconstructedData strings.Builder - - for i, encryptedPart := range encryptedParts { - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedPart), customerKey, partIVs[i]) - if err != nil { - t.Fatalf("Failed to create decrypted reader for byte %d: %v", i, err) - } - - decryptedPart, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted byte %d: %v", i, err) - } - - reconstructedData.Write(decryptedPart) - } - - if reconstructedData.String() != testData { - t.Errorf("Expected %s, got %s", testData, reconstructedData.String()) - } - }) - - t.Run("Very large parts", func(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - // Create a large part (1MB) - largeData := make([]byte, 1024*1024) - for i := range largeData { - largeData[i] = byte(i % 256) - } - - // Encrypt - encryptedReader, iv, err := CreateSSECEncryptedReader(bytes.NewReader(largeData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for large data: %v", err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted large data: %v", err) - } - - // Decrypt - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader for large data: %v", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted large data: %v", err) - } - - if !bytes.Equal(decryptedData, largeData) { - t.Error("Large data doesn't match after encryption/decryption") - } - }) -} - -// TestMultipartSSEPerformance tests performance characteristics of SSE with multipart -func TestMultipartSSEPerformance(t *testing.T) { - if testing.Short() { - t.Skip("Skipping performance test in short mode") - } - - t.Run("SSE-C performance with multiple parts", func(t *testing.T) { - keyPair := GenerateTestSSECKey(1) - customerKey := &SSECustomerKey{ - Algorithm: "AES256", - Key: keyPair.Key, - KeyMD5: keyPair.KeyMD5, - } - - partSize := 64 * 1024 // 64KB parts - numParts := 10 - - for partNum := 0; partNum < numParts; partNum++ { - partData := make([]byte, partSize) - for i := range partData { - partData[i] = byte((partNum + i) % 256) - } - - // Encrypt - encryptedReader, iv, err := CreateSSECEncryptedReader(bytes.NewReader(partData), customerKey) - if err != nil { - t.Fatalf("Failed to create encrypted reader for part %d: %v", partNum, err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data for part %d: %v", partNum, err) - } - - // Decrypt - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), customerKey, iv) - if err != nil { - t.Fatalf("Failed to create decrypted reader for part %d: %v", partNum, err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data for part %d: %v", partNum, err) - } - - if !bytes.Equal(decryptedData, partData) { - t.Errorf("Data mismatch for part %d", partNum) - } - } - }) - - t.Run("SSE-KMS performance with multiple parts", func(t *testing.T) { - kmsKey := SetupTestKMS(t) - defer kmsKey.Cleanup() - - partSize := 64 * 1024 // 64KB parts - numParts := 5 // Fewer parts for KMS due to overhead - encryptionContext := BuildEncryptionContext("test-bucket", "test-object", false) - - for partNum := 0; partNum < numParts; partNum++ { - partData := make([]byte, partSize) - for i := range partData { - partData[i] = byte((partNum + i) % 256) - } - - // Encrypt - encryptedReader, sseKey, err := CreateSSEKMSEncryptedReader(bytes.NewReader(partData), kmsKey.KeyID, encryptionContext) - if err != nil { - t.Fatalf("Failed to create encrypted reader for part %d: %v", partNum, err) - } - - encryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - t.Fatalf("Failed to read encrypted data for part %d: %v", partNum, err) - } - - // Decrypt - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sseKey) - if err != nil { - t.Fatalf("Failed to create decrypted reader for part %d: %v", partNum, err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - t.Fatalf("Failed to read decrypted data for part %d: %v", partNum, err) - } - - if !bytes.Equal(decryptedData, partData) { - t.Errorf("Data mismatch for part %d", partNum) - } - } - }) -} diff --git a/weed/s3api/s3_sse_s3.go b/weed/s3api/s3_sse_s3.go deleted file mode 100644 index 6471e04fd..000000000 --- a/weed/s3api/s3_sse_s3.go +++ /dev/null @@ -1,316 +0,0 @@ -package s3api - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io" - mathrand "math/rand" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// SSE-S3 uses AES-256 encryption with server-managed keys -const ( - SSES3Algorithm = s3_constants.SSEAlgorithmAES256 - SSES3KeySize = 32 // 256 bits -) - -// SSES3Key represents a server-managed encryption key for SSE-S3 -type SSES3Key struct { - Key []byte - KeyID string - Algorithm string - IV []byte // Initialization Vector for this key -} - -// IsSSES3RequestInternal checks if the request specifies SSE-S3 encryption -func IsSSES3RequestInternal(r *http.Request) bool { - sseHeader := r.Header.Get(s3_constants.AmzServerSideEncryption) - result := sseHeader == SSES3Algorithm - - // Debug: log header detection for SSE-S3 requests - if result { - glog.V(4).Infof("SSE-S3 detection: method=%s, header=%q, expected=%q, result=%t, copySource=%q", r.Method, sseHeader, SSES3Algorithm, result, r.Header.Get("X-Amz-Copy-Source")) - } - - return result -} - -// IsSSES3EncryptedInternal checks if the object metadata indicates SSE-S3 encryption -func IsSSES3EncryptedInternal(metadata map[string][]byte) bool { - if sseAlgorithm, exists := metadata[s3_constants.AmzServerSideEncryption]; exists { - return string(sseAlgorithm) == SSES3Algorithm - } - return false -} - -// GenerateSSES3Key generates a new SSE-S3 encryption key -func GenerateSSES3Key() (*SSES3Key, error) { - key := make([]byte, SSES3KeySize) - if _, err := io.ReadFull(rand.Reader, key); err != nil { - return nil, fmt.Errorf("failed to generate SSE-S3 key: %w", err) - } - - // Generate a key ID for tracking - keyID := fmt.Sprintf("sse-s3-key-%d", mathrand.Int63()) - - return &SSES3Key{ - Key: key, - KeyID: keyID, - Algorithm: SSES3Algorithm, - }, nil -} - -// CreateSSES3EncryptedReader creates an encrypted reader for SSE-S3 -// Returns the encrypted reader and the IV for metadata storage -func CreateSSES3EncryptedReader(reader io.Reader, key *SSES3Key) (io.Reader, []byte, error) { - // Create AES cipher - block, err := aes.NewCipher(key.Key) - if err != nil { - return nil, nil, fmt.Errorf("create AES cipher: %w", err) - } - - // Generate random IV - iv := make([]byte, aes.BlockSize) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, nil, fmt.Errorf("generate IV: %w", err) - } - - // Create CTR mode cipher - stream := cipher.NewCTR(block, iv) - - // Return encrypted reader and IV separately for metadata storage - encryptedReader := &cipher.StreamReader{S: stream, R: reader} - - return encryptedReader, iv, nil -} - -// CreateSSES3DecryptedReader creates a decrypted reader for SSE-S3 using IV from metadata -func CreateSSES3DecryptedReader(reader io.Reader, key *SSES3Key, iv []byte) (io.Reader, error) { - // Create AES cipher - block, err := aes.NewCipher(key.Key) - if err != nil { - return nil, fmt.Errorf("create AES cipher: %w", err) - } - - // Create CTR mode cipher with the provided IV - stream := cipher.NewCTR(block, iv) - - return &cipher.StreamReader{S: stream, R: reader}, nil -} - -// GetSSES3Headers returns the headers for SSE-S3 encrypted objects -func GetSSES3Headers() map[string]string { - return map[string]string{ - s3_constants.AmzServerSideEncryption: SSES3Algorithm, - } -} - -// SerializeSSES3Metadata serializes SSE-S3 metadata for storage -func SerializeSSES3Metadata(key *SSES3Key) ([]byte, error) { - if err := ValidateSSES3Key(key); err != nil { - return nil, err - } - - // For SSE-S3, we typically don't store the actual key in metadata - // Instead, we store a key ID or reference that can be used to retrieve the key - // from a secure key management system - - metadata := map[string]string{ - "algorithm": key.Algorithm, - "keyId": key.KeyID, - } - - // Include IV if present (needed for chunk-level decryption) - if key.IV != nil { - metadata["iv"] = base64.StdEncoding.EncodeToString(key.IV) - } - - // Use JSON for proper serialization - data, err := json.Marshal(metadata) - if err != nil { - return nil, fmt.Errorf("marshal SSE-S3 metadata: %w", err) - } - - return data, nil -} - -// DeserializeSSES3Metadata deserializes SSE-S3 metadata from storage and retrieves the actual key -func DeserializeSSES3Metadata(data []byte, keyManager *SSES3KeyManager) (*SSES3Key, error) { - if len(data) == 0 { - return nil, fmt.Errorf("empty SSE-S3 metadata") - } - - // Parse the JSON metadata to extract keyId - var metadata map[string]string - if err := json.Unmarshal(data, &metadata); err != nil { - return nil, fmt.Errorf("failed to parse SSE-S3 metadata: %w", err) - } - - keyID, exists := metadata["keyId"] - if !exists { - return nil, fmt.Errorf("keyId not found in SSE-S3 metadata") - } - - algorithm, exists := metadata["algorithm"] - if !exists { - algorithm = s3_constants.SSEAlgorithmAES256 // Default algorithm - } - - // Retrieve the actual key using the keyId - if keyManager == nil { - return nil, fmt.Errorf("key manager is required for SSE-S3 key retrieval") - } - - key, err := keyManager.GetOrCreateKey(keyID) - if err != nil { - return nil, fmt.Errorf("failed to retrieve SSE-S3 key with ID %s: %w", keyID, err) - } - - // Verify the algorithm matches - if key.Algorithm != algorithm { - return nil, fmt.Errorf("algorithm mismatch: expected %s, got %s", algorithm, key.Algorithm) - } - - // Restore IV if present in metadata (for chunk-level decryption) - if ivStr, exists := metadata["iv"]; exists { - iv, err := base64.StdEncoding.DecodeString(ivStr) - if err != nil { - return nil, fmt.Errorf("failed to decode IV: %w", err) - } - key.IV = iv - } - - return key, nil -} - -// SSES3KeyManager manages SSE-S3 encryption keys -type SSES3KeyManager struct { - // In a production system, this would interface with a secure key management system - keys map[string]*SSES3Key -} - -// NewSSES3KeyManager creates a new SSE-S3 key manager -func NewSSES3KeyManager() *SSES3KeyManager { - return &SSES3KeyManager{ - keys: make(map[string]*SSES3Key), - } -} - -// GetOrCreateKey gets an existing key or creates a new one -func (km *SSES3KeyManager) GetOrCreateKey(keyID string) (*SSES3Key, error) { - if keyID == "" { - // Generate new key - return GenerateSSES3Key() - } - - // Check if key exists - if key, exists := km.keys[keyID]; exists { - return key, nil - } - - // Create new key - key, err := GenerateSSES3Key() - if err != nil { - return nil, err - } - - key.KeyID = keyID - km.keys[keyID] = key - - return key, nil -} - -// StoreKey stores a key in the manager -func (km *SSES3KeyManager) StoreKey(key *SSES3Key) { - km.keys[key.KeyID] = key -} - -// GetKey retrieves a key by ID -func (km *SSES3KeyManager) GetKey(keyID string) (*SSES3Key, bool) { - key, exists := km.keys[keyID] - return key, exists -} - -// Global SSE-S3 key manager instance -var globalSSES3KeyManager = NewSSES3KeyManager() - -// GetSSES3KeyManager returns the global SSE-S3 key manager -func GetSSES3KeyManager() *SSES3KeyManager { - return globalSSES3KeyManager -} - -// ProcessSSES3Request processes an SSE-S3 request and returns encryption metadata -func ProcessSSES3Request(r *http.Request) (map[string][]byte, error) { - if !IsSSES3RequestInternal(r) { - return nil, nil - } - - // Generate or retrieve encryption key - keyManager := GetSSES3KeyManager() - key, err := keyManager.GetOrCreateKey("") - if err != nil { - return nil, fmt.Errorf("get SSE-S3 key: %w", err) - } - - // Serialize key metadata - keyData, err := SerializeSSES3Metadata(key) - if err != nil { - return nil, fmt.Errorf("serialize SSE-S3 metadata: %w", err) - } - - // Store key in manager - keyManager.StoreKey(key) - - // Return metadata - metadata := map[string][]byte{ - s3_constants.AmzServerSideEncryption: []byte(SSES3Algorithm), - s3_constants.SeaweedFSSSES3Key: keyData, - } - - return metadata, nil -} - -// GetSSES3KeyFromMetadata extracts SSE-S3 key from object metadata -func GetSSES3KeyFromMetadata(metadata map[string][]byte, keyManager *SSES3KeyManager) (*SSES3Key, error) { - keyData, exists := metadata[s3_constants.SeaweedFSSSES3Key] - if !exists { - return nil, fmt.Errorf("SSE-S3 key not found in metadata") - } - - return DeserializeSSES3Metadata(keyData, keyManager) -} - -// CreateSSES3EncryptedReaderWithBaseIV creates an encrypted reader using a base IV for multipart upload consistency. -// The returned IV is the offset-derived IV, calculated from the input baseIV and offset. -func CreateSSES3EncryptedReaderWithBaseIV(reader io.Reader, key *SSES3Key, baseIV []byte, offset int64) (io.Reader, []byte /* derivedIV */, error) { - // Validate key to prevent panics and security issues - if key == nil { - return nil, nil, fmt.Errorf("SSES3Key is nil") - } - if key.Key == nil || len(key.Key) != SSES3KeySize { - return nil, nil, fmt.Errorf("invalid SSES3Key: must be %d bytes, got %d", SSES3KeySize, len(key.Key)) - } - if err := ValidateSSES3Key(key); err != nil { - return nil, nil, err - } - - block, err := aes.NewCipher(key.Key) - if err != nil { - return nil, nil, fmt.Errorf("create AES cipher: %w", err) - } - - // Calculate the proper IV with offset to ensure unique IV per chunk/part - // This prevents the severe security vulnerability of IV reuse in CTR mode - iv := calculateIVWithOffset(baseIV, offset) - - stream := cipher.NewCTR(block, iv) - encryptedReader := &cipher.StreamReader{S: stream, R: reader} - return encryptedReader, iv, nil -} diff --git a/weed/s3api/s3_sse_test_utils_test.go b/weed/s3api/s3_sse_test_utils_test.go deleted file mode 100644 index 1c57be791..000000000 --- a/weed/s3api/s3_sse_test_utils_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package s3api - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/kms/local" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestKeyPair represents a test SSE-C key pair -type TestKeyPair struct { - Key []byte - KeyB64 string - KeyMD5 string -} - -// TestSSEKMSKey represents a test SSE-KMS key -type TestSSEKMSKey struct { - KeyID string - Cleanup func() -} - -// GenerateTestSSECKey creates a test SSE-C key pair -func GenerateTestSSECKey(seed byte) *TestKeyPair { - key := make([]byte, 32) // 256-bit key - for i := range key { - key[i] = seed + byte(i) - } - - keyB64 := base64.StdEncoding.EncodeToString(key) - md5sum := md5.Sum(key) - keyMD5 := base64.StdEncoding.EncodeToString(md5sum[:]) - - return &TestKeyPair{ - Key: key, - KeyB64: keyB64, - KeyMD5: keyMD5, - } -} - -// SetupTestSSECHeaders sets SSE-C headers on an HTTP request -func SetupTestSSECHeaders(req *http.Request, keyPair *TestKeyPair) { - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKey, keyPair.KeyB64) - req.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, keyPair.KeyMD5) -} - -// SetupTestSSECCopyHeaders sets SSE-C copy source headers on an HTTP request -func SetupTestSSECCopyHeaders(req *http.Request, keyPair *TestKeyPair) { - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerAlgorithm, "AES256") - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerKey, keyPair.KeyB64) - req.Header.Set(s3_constants.AmzCopySourceServerSideEncryptionCustomerKeyMD5, keyPair.KeyMD5) -} - -// SetupTestKMS initializes a local KMS provider for testing -func SetupTestKMS(t *testing.T) *TestSSEKMSKey { - // Initialize local KMS provider directly - provider, err := local.NewLocalKMSProvider(nil) - if err != nil { - t.Fatalf("Failed to create local KMS provider: %v", err) - } - - // Set it as the global provider - kms.SetGlobalKMSProvider(provider) - - // Create a test key - localProvider := provider.(*local.LocalKMSProvider) - testKey, err := localProvider.CreateKey("Test key for SSE-KMS", []string{"test-key"}) - if err != nil { - t.Fatalf("Failed to create test key: %v", err) - } - - // Cleanup function - cleanup := func() { - kms.SetGlobalKMSProvider(nil) // Clear global KMS - if err := provider.Close(); err != nil { - t.Logf("Warning: Failed to close KMS provider: %v", err) - } - } - - return &TestSSEKMSKey{ - KeyID: testKey.KeyID, - Cleanup: cleanup, - } -} - -// SetupTestSSEKMSHeaders sets SSE-KMS headers on an HTTP request -func SetupTestSSEKMSHeaders(req *http.Request, keyID string) { - req.Header.Set(s3_constants.AmzServerSideEncryption, "aws:kms") - if keyID != "" { - req.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, keyID) - } -} - -// CreateTestMetadata creates test metadata with SSE information -func CreateTestMetadata() map[string][]byte { - return make(map[string][]byte) -} - -// CreateTestMetadataWithSSEC creates test metadata containing SSE-C information -func CreateTestMetadataWithSSEC(keyPair *TestKeyPair) map[string][]byte { - metadata := CreateTestMetadata() - metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(keyPair.KeyMD5) - // Add encryption IV and other encrypted data that would be stored - iv := make([]byte, 16) - for i := range iv { - iv[i] = byte(i) - } - StoreIVInMetadata(metadata, iv) - return metadata -} - -// CreateTestMetadataWithSSEKMS creates test metadata containing SSE-KMS information -func CreateTestMetadataWithSSEKMS(sseKey *SSEKMSKey) map[string][]byte { - metadata := CreateTestMetadata() - metadata[s3_constants.AmzServerSideEncryption] = []byte("aws:kms") - if sseKey != nil { - serialized, _ := SerializeSSEKMSMetadata(sseKey) - metadata[s3_constants.AmzEncryptedDataKey] = sseKey.EncryptedDataKey - metadata[s3_constants.AmzEncryptionContextMeta] = serialized - } - return metadata -} - -// CreateTestHTTPRequest creates a test HTTP request with optional SSE headers -func CreateTestHTTPRequest(method, path string, body []byte) *http.Request { - var bodyReader io.Reader - if body != nil { - bodyReader = bytes.NewReader(body) - } - - req := httptest.NewRequest(method, path, bodyReader) - return req -} - -// CreateTestHTTPResponse creates a test HTTP response recorder -func CreateTestHTTPResponse() *httptest.ResponseRecorder { - return httptest.NewRecorder() -} - -// SetupTestMuxVars sets up mux variables for testing -func SetupTestMuxVars(req *http.Request, vars map[string]string) { - mux.SetURLVars(req, vars) -} - -// AssertSSECHeaders verifies that SSE-C response headers are set correctly -func AssertSSECHeaders(t *testing.T, w *httptest.ResponseRecorder, keyPair *TestKeyPair) { - algorithm := w.Header().Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - if algorithm != "AES256" { - t.Errorf("Expected algorithm AES256, got %s", algorithm) - } - - keyMD5 := w.Header().Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - if keyMD5 != keyPair.KeyMD5 { - t.Errorf("Expected key MD5 %s, got %s", keyPair.KeyMD5, keyMD5) - } -} - -// AssertSSEKMSHeaders verifies that SSE-KMS response headers are set correctly -func AssertSSEKMSHeaders(t *testing.T, w *httptest.ResponseRecorder, keyID string) { - algorithm := w.Header().Get(s3_constants.AmzServerSideEncryption) - if algorithm != "aws:kms" { - t.Errorf("Expected algorithm aws:kms, got %s", algorithm) - } - - if keyID != "" { - responseKeyID := w.Header().Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - if responseKeyID != keyID { - t.Errorf("Expected key ID %s, got %s", keyID, responseKeyID) - } - } -} - -// CreateCorruptedSSECMetadata creates intentionally corrupted SSE-C metadata for testing -func CreateCorruptedSSECMetadata() map[string][]byte { - metadata := CreateTestMetadata() - // Missing algorithm - metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte("invalid-md5") - return metadata -} - -// CreateCorruptedSSEKMSMetadata creates intentionally corrupted SSE-KMS metadata for testing -func CreateCorruptedSSEKMSMetadata() map[string][]byte { - metadata := CreateTestMetadata() - metadata[s3_constants.AmzServerSideEncryption] = []byte("aws:kms") - // Invalid encrypted data key - metadata[s3_constants.AmzEncryptedDataKey] = []byte("invalid-base64!") - return metadata -} - -// TestDataSizes provides various data sizes for testing -var TestDataSizes = []int{ - 0, // Empty - 1, // Single byte - 15, // Less than AES block size - 16, // Exactly AES block size - 17, // More than AES block size - 1024, // 1KB - 65536, // 64KB - 1048576, // 1MB -} - -// GenerateTestData creates test data of specified size -func GenerateTestData(size int) []byte { - data := make([]byte, size) - for i := range data { - data[i] = byte(i % 256) - } - return data -} diff --git a/weed/s3api/s3_sse_utils.go b/weed/s3api/s3_sse_utils.go deleted file mode 100644 index 848bc61ea..000000000 --- a/weed/s3api/s3_sse_utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package s3api - -import "github.com/seaweedfs/seaweedfs/weed/glog" - -// calculateIVWithOffset calculates a unique IV by combining a base IV with an offset. -// This ensures each chunk/part uses a unique IV, preventing CTR mode IV reuse vulnerabilities. -// This function is shared between SSE-KMS and SSE-S3 implementations for consistency. -func calculateIVWithOffset(baseIV []byte, offset int64) []byte { - if len(baseIV) != 16 { - glog.Errorf("Invalid base IV length: expected 16, got %d", len(baseIV)) - return baseIV // Return original IV as fallback - } - - // Create a copy of the base IV to avoid modifying the original - iv := make([]byte, 16) - copy(iv, baseIV) - - // Calculate the block offset (AES block size is 16 bytes) - blockOffset := offset / 16 - originalBlockOffset := blockOffset - - // Add the block offset to the IV counter (last 8 bytes, big-endian) - // This matches how AES-CTR mode increments the counter - // Process from least significant byte (index 15) to most significant byte (index 8) - carry := uint64(0) - for i := 15; i >= 8; i-- { - sum := uint64(iv[i]) + uint64(blockOffset&0xFF) + carry - iv[i] = byte(sum & 0xFF) - carry = sum >> 8 - blockOffset = blockOffset >> 8 - - // If no more blockOffset bits and no carry, we can stop early - if blockOffset == 0 && carry == 0 { - break - } - } - - // Single consolidated debug log to avoid performance impact in high-throughput scenarios - glog.V(4).Infof("calculateIVWithOffset: baseIV=%x, offset=%d, blockOffset=%d, derivedIV=%x", - baseIV, offset, originalBlockOffset, iv) - return iv -} diff --git a/weed/s3api/s3_token_differentiation_test.go b/weed/s3api/s3_token_differentiation_test.go deleted file mode 100644 index cf61703ad..000000000 --- a/weed/s3api/s3_token_differentiation_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package s3api - -import ( - "strings" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/stretchr/testify/assert" -) - -func TestS3IAMIntegration_isSTSIssuer(t *testing.T) { - // Create test STS service with configuration - stsService := sts.NewSTSService() - - // Set up STS configuration with a specific issuer - testIssuer := "https://seaweedfs-prod.company.com/sts" - stsConfig := &sts.STSConfig{ - Issuer: testIssuer, - SigningKey: []byte("test-signing-key-32-characters-long"), - TokenDuration: sts.FlexibleDuration{time.Hour}, - MaxSessionLength: sts.FlexibleDuration{12 * time.Hour}, // Required field - } - - // Initialize STS service with config (this sets the Config field) - err := stsService.Initialize(stsConfig) - assert.NoError(t, err) - - // Create S3IAM integration with configured STS service - s3iam := &S3IAMIntegration{ - iamManager: &integration.IAMManager{}, // Mock - stsService: stsService, - filerAddress: "test-filer:8888", - enabled: true, - } - - tests := []struct { - name string - issuer string - expected bool - }{ - // Only exact match should return true - { - name: "exact match with configured issuer", - issuer: testIssuer, - expected: true, - }, - // All other issuers should return false (exact matching) - { - name: "similar but not exact issuer", - issuer: "https://seaweedfs-prod.company.com/sts2", - expected: false, - }, - { - name: "substring of configured issuer", - issuer: "seaweedfs-prod.company.com", - expected: false, - }, - { - name: "contains configured issuer as substring", - issuer: "prefix-" + testIssuer + "-suffix", - expected: false, - }, - { - name: "case sensitive - different case", - issuer: strings.ToUpper(testIssuer), - expected: false, - }, - { - name: "Google OIDC", - issuer: "https://accounts.google.com", - expected: false, - }, - { - name: "Azure AD", - issuer: "https://login.microsoftonline.com/tenant-id/v2.0", - expected: false, - }, - { - name: "Auth0", - issuer: "https://mycompany.auth0.com", - expected: false, - }, - { - name: "Keycloak", - issuer: "https://keycloak.mycompany.com/auth/realms/master", - expected: false, - }, - { - name: "Empty string", - issuer: "", - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := s3iam.isSTSIssuer(tt.issuer) - assert.Equal(t, tt.expected, result, "isSTSIssuer should use exact matching against configured issuer") - }) - } -} - -func TestS3IAMIntegration_isSTSIssuer_NoSTSService(t *testing.T) { - // Create S3IAM integration without STS service - s3iam := &S3IAMIntegration{ - iamManager: &integration.IAMManager{}, - stsService: nil, // No STS service - filerAddress: "test-filer:8888", - enabled: true, - } - - // Should return false when STS service is not available - result := s3iam.isSTSIssuer("seaweedfs-sts") - assert.False(t, result, "isSTSIssuer should return false when STS service is nil") -} diff --git a/weed/s3api/s3_validation_utils.go b/weed/s3api/s3_validation_utils.go deleted file mode 100644 index da53342b1..000000000 --- a/weed/s3api/s3_validation_utils.go +++ /dev/null @@ -1,75 +0,0 @@ -package s3api - -import ( - "fmt" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// isValidKMSKeyID performs basic validation of KMS key identifiers. -// Following Minio's approach: be permissive and accept any reasonable key format. -// Only reject keys with leading/trailing spaces or other obvious issues. -// -// This function is used across multiple S3 API handlers to ensure consistent -// validation of KMS key IDs in various contexts (bucket encryption, object operations, etc.). -func isValidKMSKeyID(keyID string) bool { - // Reject empty keys - if keyID == "" { - return false - } - - // Following Minio's validation: reject keys with leading/trailing spaces - if strings.HasPrefix(keyID, " ") || strings.HasSuffix(keyID, " ") { - return false - } - - // Also reject keys with internal spaces (common sense validation) - if strings.Contains(keyID, " ") { - return false - } - - // Reject keys with control characters or newlines - if strings.ContainsAny(keyID, "\t\n\r\x00") { - return false - } - - // Accept any reasonable length key (be permissive for various KMS providers) - if len(keyID) > 0 && len(keyID) <= s3_constants.MaxKMSKeyIDLength { - return true - } - - return false -} - -// ValidateIV validates that an initialization vector has the correct length for AES encryption -func ValidateIV(iv []byte, name string) error { - if len(iv) != s3_constants.AESBlockSize { - return fmt.Errorf("invalid %s length: expected %d bytes, got %d", name, s3_constants.AESBlockSize, len(iv)) - } - return nil -} - -// ValidateSSEKMSKey validates that an SSE-KMS key is not nil and has required fields -func ValidateSSEKMSKey(sseKey *SSEKMSKey) error { - if sseKey == nil { - return fmt.Errorf("SSE-KMS key cannot be nil") - } - return nil -} - -// ValidateSSECKey validates that an SSE-C key is not nil -func ValidateSSECKey(customerKey *SSECustomerKey) error { - if customerKey == nil { - return fmt.Errorf("SSE-C customer key cannot be nil") - } - return nil -} - -// ValidateSSES3Key validates that an SSE-S3 key is not nil -func ValidateSSES3Key(sseKey *SSES3Key) error { - if sseKey == nil { - return fmt.Errorf("SSE-S3 key cannot be nil") - } - return nil -} diff --git a/weed/s3api/s3api_acl_helper.go b/weed/s3api/s3api_acl_helper.go deleted file mode 100644 index 6cfa17f34..000000000 --- a/weed/s3api/s3api_acl_helper.go +++ /dev/null @@ -1,514 +0,0 @@ -package s3api - -import ( - "encoding/json" - "encoding/xml" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" -) - -type AccountManager interface { - GetAccountNameById(canonicalId string) string - GetAccountIdByEmail(email string) string -} - -// GetAccountId get AccountId from request headers, AccountAnonymousId will be return if not presen -func GetAccountId(r *http.Request) string { - id := r.Header.Get(s3_constants.AmzAccountId) - if len(id) == 0 { - return s3_constants.AccountAnonymousId - } else { - return id - } -} - -// ExtractAcl extracts the acl from the request body, or from the header if request body is empty -func ExtractAcl(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, ownerId, accountId string) (grants []*s3.Grant, errCode s3err.ErrorCode) { - if r.Body != nil && r.Body != http.NoBody { - defer util_http.CloseRequest(r) - - var acp s3.AccessControlPolicy - err := xmlutil.UnmarshalXML(&acp, xml.NewDecoder(r.Body), "") - if err != nil || acp.Owner == nil || acp.Owner.ID == nil { - return nil, s3err.ErrInvalidRequest - } - - //owner should present && owner is immutable - if *acp.Owner.ID != ownerId { - glog.V(3).Infof("set acl denied! owner account is not consistent, request account id: %s, expect account id: %s", accountId, ownerId) - return nil, s3err.ErrAccessDenied - } - - return ValidateAndTransferGrants(accountManager, acp.Grants) - } else { - _, grants, errCode = ParseAndValidateAclHeadersOrElseDefault(r, accountManager, ownership, bucketOwnerId, accountId, true) - return grants, errCode - } -} - -// ParseAndValidateAclHeadersOrElseDefault will callParseAndValidateAclHeaders to get Grants, if empty, it will return Grant that grant `accountId` with `FullControl` permission -func ParseAndValidateAclHeadersOrElseDefault(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) { - ownerId, grants, errCode = ParseAndValidateAclHeaders(r, accountManager, ownership, bucketOwnerId, accountId, putAcl) - if errCode != s3err.ErrNone { - return - } - if len(grants) == 0 { - //if no acl(both customAcl and cannedAcl) specified, grant accountId(object writer) with full control permission - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &s3_constants.PermissionFullControl, - }) - } - return -} - -// ParseAndValidateAclHeaders parse and validate acl from header -func ParseAndValidateAclHeaders(r *http.Request, accountManager AccountManager, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) { - ownerId, grants, errCode = ParseAclHeaders(r, ownership, bucketOwnerId, accountId, putAcl) - if errCode != s3err.ErrNone { - return - } - if len(grants) > 0 { - grants, errCode = ValidateAndTransferGrants(accountManager, grants) - } - return -} - -// ParseAclHeaders parse acl headers -// When `putAcl` is true, only `CannedAcl` is parsed, such as `PutBucketAcl` or `PutObjectAcl` -// is requested, `CustomAcl` is parsed from the request body not from headers, and only if the -// request body is empty, `CannedAcl` is parsed from the header, and will not parse `CustomAcl` from the header -// -// Since `CustomAcl` has higher priority, it will be parsed first; if `CustomAcl` does not exist, `CannedAcl` will be parsed -func ParseAclHeaders(r *http.Request, ownership, bucketOwnerId, accountId string, putAcl bool) (ownerId string, grants []*s3.Grant, errCode s3err.ErrorCode) { - if !putAcl { - errCode = ParseCustomAclHeaders(r, &grants) - if errCode != s3err.ErrNone { - return "", nil, errCode - } - } - if len(grants) > 0 { - return accountId, grants, s3err.ErrNone - } - - cannedAcl := r.Header.Get(s3_constants.AmzCannedAcl) - if len(cannedAcl) == 0 { - return accountId, grants, s3err.ErrNone - } - - //if canned acl specified, parse cannedAcl (lower priority to custom acl) - ownerId, grants, errCode = ParseCannedAclHeader(ownership, bucketOwnerId, accountId, cannedAcl, putAcl) - if errCode != s3err.ErrNone { - return "", nil, errCode - } - return ownerId, grants, errCode -} - -func ParseCustomAclHeaders(r *http.Request, grants *[]*s3.Grant) s3err.ErrorCode { - customAclHeaders := []string{s3_constants.AmzAclFullControl, s3_constants.AmzAclRead, s3_constants.AmzAclReadAcp, s3_constants.AmzAclWrite, s3_constants.AmzAclWriteAcp} - var errCode s3err.ErrorCode - for _, customAclHeader := range customAclHeaders { - headerValue := r.Header.Get(customAclHeader) - switch customAclHeader { - case s3_constants.AmzAclRead: - errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionRead, grants) - case s3_constants.AmzAclWrite: - errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWrite, grants) - case s3_constants.AmzAclReadAcp: - errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionReadAcp, grants) - case s3_constants.AmzAclWriteAcp: - errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionWriteAcp, grants) - case s3_constants.AmzAclFullControl: - errCode = ParseCustomAclHeader(headerValue, s3_constants.PermissionFullControl, grants) - } - if errCode != s3err.ErrNone { - return errCode - } - } - return s3err.ErrNone -} - -func ParseCustomAclHeader(headerValue, permission string, grants *[]*s3.Grant) s3err.ErrorCode { - if len(headerValue) > 0 { - split := strings.Split(headerValue, ", ") - for _, grantStr := range split { - kv := strings.Split(grantStr, "=") - if len(kv) != 2 { - return s3err.ErrInvalidRequest - } - - switch kv[0] { - case "id": - var accountId string - _ = json.Unmarshal([]byte(kv[1]), &accountId) - *grants = append(*grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &permission, - }) - case "emailAddress": - var emailAddress string - _ = json.Unmarshal([]byte(kv[1]), &emailAddress) - *grants = append(*grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeAmazonCustomerByEmail, - EmailAddress: &emailAddress, - }, - Permission: &permission, - }) - case "uri": - var groupName string - _ = json.Unmarshal([]byte(kv[1]), &groupName) - *grants = append(*grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &groupName, - }, - Permission: &permission, - }) - } - } - } - return s3err.ErrNone - -} - -func ParseCannedAclHeader(bucketOwnership, bucketOwnerId, accountId, cannedAcl string, putAcl bool) (ownerId string, grants []*s3.Grant, err s3err.ErrorCode) { - err = s3err.ErrNone - ownerId = accountId - - //objectWrite automatically has full control on current object - objectWriterFullControl := &s3.Grant{ - Grantee: &s3.Grantee{ - ID: &accountId, - Type: &s3_constants.GrantTypeCanonicalUser, - }, - Permission: &s3_constants.PermissionFullControl, - } - - switch cannedAcl { - case s3_constants.CannedAclPrivate: - grants = append(grants, objectWriterFullControl) - case s3_constants.CannedAclPublicRead: - grants = append(grants, objectWriterFullControl) - grants = append(grants, s3_constants.PublicRead...) - case s3_constants.CannedAclPublicReadWrite: - grants = append(grants, objectWriterFullControl) - grants = append(grants, s3_constants.PublicReadWrite...) - case s3_constants.CannedAclAuthenticatedRead: - grants = append(grants, objectWriterFullControl) - grants = append(grants, s3_constants.AuthenticatedRead...) - case s3_constants.CannedAclLogDeliveryWrite: - grants = append(grants, objectWriterFullControl) - grants = append(grants, s3_constants.LogDeliveryWrite...) - case s3_constants.CannedAclBucketOwnerRead: - grants = append(grants, objectWriterFullControl) - if bucketOwnerId != "" && bucketOwnerId != accountId { - grants = append(grants, - &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &bucketOwnerId, - }, - Permission: &s3_constants.PermissionRead, - }) - } - case s3_constants.CannedAclBucketOwnerFullControl: - if bucketOwnerId != "" { - // if set ownership to 'BucketOwnerPreferred' when upload object, the bucket owner will be the object owner - if !putAcl && bucketOwnership == s3_constants.OwnershipBucketOwnerPreferred { - ownerId = bucketOwnerId - grants = append(grants, - &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &bucketOwnerId, - }, - Permission: &s3_constants.PermissionFullControl, - }) - } else { - grants = append(grants, objectWriterFullControl) - if accountId != bucketOwnerId { - grants = append(grants, - &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &bucketOwnerId, - }, - Permission: &s3_constants.PermissionFullControl, - }) - } - } - } - case s3_constants.CannedAclAwsExecRead: - err = s3err.ErrNotImplemented - default: - err = s3err.ErrInvalidRequest - } - return -} - -// ValidateAndTransferGrants validate grant & transfer Email-Grant to Id-Grant -func ValidateAndTransferGrants(accountManager AccountManager, grants []*s3.Grant) ([]*s3.Grant, s3err.ErrorCode) { - var result []*s3.Grant - for _, grant := range grants { - grantee := grant.Grantee - if grantee == nil || grantee.Type == nil { - glog.Warning("invalid grantee! grantee or granteeType is nil") - return nil, s3err.ErrInvalidRequest - } - - switch *grantee.Type { - case s3_constants.GrantTypeGroup: - if grantee.URI == nil { - glog.Warning("invalid group grantee! group URI is nil") - return nil, s3err.ErrInvalidRequest - } - ok := s3_constants.ValidateGroup(*grantee.URI) - if !ok { - glog.Warningf("invalid group grantee! group name[%s] is not valid", *grantee.URI) - return nil, s3err.ErrInvalidRequest - } - result = append(result, grant) - case s3_constants.GrantTypeCanonicalUser: - if grantee.ID == nil { - glog.Warning("invalid canonical grantee! account id is nil") - return nil, s3err.ErrInvalidRequest - } - name := accountManager.GetAccountNameById(*grantee.ID) - if len(name) == 0 { - glog.Warningf("invalid canonical grantee! account id[%s] is not exists", *grantee.ID) - return nil, s3err.ErrInvalidRequest - } - result = append(result, grant) - case s3_constants.GrantTypeAmazonCustomerByEmail: - if grantee.EmailAddress == nil { - glog.Warning("invalid email grantee! email address is nil") - return nil, s3err.ErrInvalidRequest - } - accountId := accountManager.GetAccountIdByEmail(*grantee.EmailAddress) - if len(accountId) == 0 { - glog.Warningf("invalid email grantee! email address[%s] is not exists", *grantee.EmailAddress) - return nil, s3err.ErrInvalidRequest - } - result = append(result, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: grant.Permission, - }) - default: - return nil, s3err.ErrInvalidRequest - } - } - return result, s3err.ErrNone -} - -// DetermineReqGrants generates the grant set (Grants) according to accountId and reqPermission. -func DetermineReqGrants(accountId, aclAction string) (grants []*s3.Grant) { - // group grantee (AllUsers) - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &aclAction, - }) - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }) - - // canonical grantee (accountId) - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &aclAction, - }) - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &s3_constants.PermissionFullControl, - }) - - // group grantee (AuthenticateUsers) - if accountId != s3_constants.AccountAnonymousId { - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAuthenticatedUsers, - }, - Permission: &aclAction, - }) - grants = append(grants, &s3.Grant{ - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAuthenticatedUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }) - } - return -} - -func SetAcpOwnerHeader(r *http.Request, acpOwnerId string) { - r.Header.Set(s3_constants.ExtAmzOwnerKey, acpOwnerId) -} - -func GetAcpOwner(entryExtended map[string][]byte, defaultOwner string) string { - ownerIdBytes, ok := entryExtended[s3_constants.ExtAmzOwnerKey] - if ok && len(ownerIdBytes) > 0 { - return string(ownerIdBytes) - } - return defaultOwner -} - -func SetAcpGrantsHeader(r *http.Request, acpGrants []*s3.Grant) { - if len(acpGrants) > 0 { - a, err := json.Marshal(acpGrants) - if err == nil { - r.Header.Set(s3_constants.ExtAmzAclKey, string(a)) - } else { - glog.Warning("Marshal acp grants err", err) - } - } -} - -// GetAcpGrants return grants parsed from entry -func GetAcpGrants(entryExtended map[string][]byte) []*s3.Grant { - acpBytes, ok := entryExtended[s3_constants.ExtAmzAclKey] - if ok && len(acpBytes) > 0 { - var grants []*s3.Grant - err := json.Unmarshal(acpBytes, &grants) - if err == nil { - return grants - } - } - return nil -} - -// AssembleEntryWithAcp fill entry with owner and grants -func AssembleEntryWithAcp(objectEntry *filer_pb.Entry, objectOwner string, grants []*s3.Grant) s3err.ErrorCode { - if objectEntry.Extended == nil { - objectEntry.Extended = make(map[string][]byte) - } - - if len(objectOwner) > 0 { - objectEntry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(objectOwner) - } else { - delete(objectEntry.Extended, s3_constants.ExtAmzOwnerKey) - } - - if len(grants) > 0 { - grantsBytes, err := json.Marshal(grants) - if err != nil { - glog.Warning("assemble acp to entry:", err) - return s3err.ErrInvalidRequest - } - objectEntry.Extended[s3_constants.ExtAmzAclKey] = grantsBytes - } else { - delete(objectEntry.Extended, s3_constants.ExtAmzAclKey) - } - - return s3err.ErrNone -} - -// GrantEquals Compare whether two Grants are equal in meaning, not completely -// equal (compare Grantee.Type and the corresponding Value for equality, other -// fields of Grantee are ignored) -func GrantEquals(a, b *s3.Grant) bool { - // grant - if a == b { - return true - } - - if a == nil || b == nil { - return false - } - - // grant.Permission - if a.Permission != b.Permission { - if a.Permission == nil || b.Permission == nil { - return false - } - - if *a.Permission != *b.Permission { - return false - } - } - - // grant.Grantee - ag := a.Grantee - bg := b.Grantee - if ag != bg { - if ag == nil || bg == nil { - return false - } - // grantee.Type - if ag.Type != bg.Type { - if ag.Type == nil || bg.Type == nil { - return false - } - if *ag.Type != *bg.Type { - return false - } - } - // value corresponding to granteeType - if ag.Type != nil { - switch *ag.Type { - case s3_constants.GrantTypeGroup: - if ag.URI != bg.URI { - if ag.URI == nil || bg.URI == nil { - return false - } - - if *ag.URI != *bg.URI { - return false - } - } - case s3_constants.GrantTypeCanonicalUser: - if ag.ID != bg.ID { - if ag.ID == nil || bg.ID == nil { - return false - } - - if *ag.ID != *bg.ID { - return false - } - } - case s3_constants.GrantTypeAmazonCustomerByEmail: - if ag.EmailAddress != bg.EmailAddress { - if ag.EmailAddress == nil || bg.EmailAddress == nil { - return false - } - - if *ag.EmailAddress != *bg.EmailAddress { - return false - } - } - } - } - } - return true -} diff --git a/weed/s3api/s3api_acl_helper_test.go b/weed/s3api/s3api_acl_helper_test.go deleted file mode 100644 index 81ce25575..000000000 --- a/weed/s3api/s3api_acl_helper_test.go +++ /dev/null @@ -1,709 +0,0 @@ -package s3api - -import ( - "bytes" - "encoding/json" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "io" - "net/http" - "testing" -) - -var accountManager *IdentityAccessManagement - -func init() { - accountManager = &IdentityAccessManagement{} - _ = accountManager.loadS3ApiConfiguration(&iam_pb.S3ApiConfiguration{ - Accounts: []*iam_pb.Account{ - { - Id: "accountA", - DisplayName: "accountAName", - EmailAddress: "accountA@example.com", - }, - { - Id: "accountB", - DisplayName: "accountBName", - EmailAddress: "accountB@example.com", - }, - }, - }) -} - -func TestGetAccountId(t *testing.T) { - req := &http.Request{ - Header: make(map[string][]string), - } - //case1 - //accountId: "admin" - req.Header.Set(s3_constants.AmzAccountId, s3_constants.AccountAdminId) - if GetAccountId(req) != s3_constants.AccountAdminId { - t.Fatal("expect accountId: admin") - } - - //case2 - //accountId: "anoymous" - req.Header.Set(s3_constants.AmzAccountId, s3_constants.AccountAnonymousId) - if GetAccountId(req) != s3_constants.AccountAnonymousId { - t.Fatal("expect accountId: anonymous") - } - - //case3 - //accountId is nil => "anonymous" - req.Header.Del(s3_constants.AmzAccountId) - if GetAccountId(req) != s3_constants.AccountAnonymousId { - t.Fatal("expect accountId: anonymous") - } -} - -func TestExtractAcl(t *testing.T) { - type Case struct { - id int - resultErrCode, expectErrCode s3err.ErrorCode - resultGrants, expectGrants []*s3.Grant - } - testCases := make([]*Case, 0) - accountAdminId := "admin" - { - //case1 (good case) - //parse acp from request body - req := &http.Request{ - Header: make(map[string][]string), - } - req.Body = io.NopCloser(bytes.NewReader([]byte(` - - - admin - admin - - - - - admin - - FULL_CONTROL - - - - http://acs.amazonaws.com/groups/global/AllUsers - - FULL_CONTROL - - - - `))) - objectWriter := "accountA" - grants, errCode := ExtractAcl(req, accountManager, s3_constants.OwnershipObjectWriter, accountAdminId, accountAdminId, objectWriter) - testCases = append(testCases, &Case{ - 1, - errCode, s3err.ErrNone, - grants, []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountAdminId, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }, - }, - }) - } - - { - //case2 (good case) - //parse acp from header (cannedAcl) - req := &http.Request{ - Header: make(map[string][]string), - } - req.Body = nil - req.Header.Set(s3_constants.AmzCannedAcl, s3_constants.CannedAclPrivate) - objectWriter := "accountA" - grants, errCode := ExtractAcl(req, accountManager, s3_constants.OwnershipObjectWriter, accountAdminId, accountAdminId, objectWriter) - testCases = append(testCases, &Case{ - 2, - errCode, s3err.ErrNone, - grants, []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &objectWriter, - }, - Permission: &s3_constants.PermissionFullControl, - }, - }, - }) - } - - { - //case3 (bad case) - //parse acp from request body (content is invalid) - req := &http.Request{ - Header: make(map[string][]string), - } - req.Body = io.NopCloser(bytes.NewReader([]byte("zdfsaf"))) - req.Header.Set(s3_constants.AmzCannedAcl, s3_constants.CannedAclPrivate) - objectWriter := "accountA" - _, errCode := ExtractAcl(req, accountManager, s3_constants.OwnershipObjectWriter, accountAdminId, accountAdminId, objectWriter) - testCases = append(testCases, &Case{ - id: 3, - resultErrCode: errCode, expectErrCode: s3err.ErrInvalidRequest, - }) - } - - //case4 (bad case) - //parse acp from header (cannedAcl is invalid) - req := &http.Request{ - Header: make(map[string][]string), - } - req.Body = nil - req.Header.Set(s3_constants.AmzCannedAcl, "dfaksjfk") - objectWriter := "accountA" - _, errCode := ExtractAcl(req, accountManager, s3_constants.OwnershipObjectWriter, accountAdminId, "", objectWriter) - testCases = append(testCases, &Case{ - id: 4, - resultErrCode: errCode, expectErrCode: s3err.ErrInvalidRequest, - }) - - { - //case5 (bad case) - //parse acp from request body: owner is inconsistent - req.Body = io.NopCloser(bytes.NewReader([]byte(` - - - admin - admin - - - - - admin - - FULL_CONTROL - - - - http://acs.amazonaws.com/groups/global/AllUsers - - FULL_CONTROL - - - - `))) - objectWriter = "accountA" - _, errCode := ExtractAcl(req, accountManager, s3_constants.OwnershipObjectWriter, accountAdminId, objectWriter, objectWriter) - testCases = append(testCases, &Case{ - id: 5, - resultErrCode: errCode, expectErrCode: s3err.ErrAccessDenied, - }) - } - - for _, tc := range testCases { - if tc.resultErrCode != tc.expectErrCode { - t.Fatalf("case[%d]: errorCode not expect", tc.id) - } - if !grantsEquals(tc.resultGrants, tc.expectGrants) { - t.Fatalf("case[%d]: grants not expect", tc.id) - } - } -} - -func TestParseAndValidateAclHeaders(t *testing.T) { - type Case struct { - id int - resultOwner, expectOwner string - resultErrCode, expectErrCode s3err.ErrorCode - resultGrants, expectGrants []*s3.Grant - } - testCases := make([]*Case, 0) - bucketOwner := "admin" - - { - //case1 (good case) - //parse custom acl - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzAclFullControl, `uri="http://acs.amazonaws.com/groups/global/AllUsers", id="anonymous", emailAddress="admin@example.com"`) - ownerId, grants, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipObjectWriter, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - 1, - ownerId, objectWriter, - errCode, s3err.ErrNone, - grants, []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: aws.String(s3_constants.AccountAnonymousId), - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: aws.String(s3_constants.AccountAdminId), - }, - Permission: &s3_constants.PermissionFullControl, - }, - }, - }) - } - { - //case2 (good case) - //parse canned acl (ownership=ObjectWriter) - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzCannedAcl, s3_constants.CannedAclBucketOwnerFullControl) - ownerId, grants, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipObjectWriter, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - 2, - ownerId, objectWriter, - errCode, s3err.ErrNone, - grants, []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &objectWriter, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &bucketOwner, - }, - Permission: &s3_constants.PermissionFullControl, - }, - }, - }) - } - { - //case3 (good case) - //parse canned acl (ownership=OwnershipBucketOwnerPreferred) - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzCannedAcl, s3_constants.CannedAclBucketOwnerFullControl) - ownerId, grants, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipBucketOwnerPreferred, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - 3, - ownerId, bucketOwner, - errCode, s3err.ErrNone, - grants, []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &bucketOwner, - }, - Permission: &s3_constants.PermissionFullControl, - }, - }, - }) - } - { - //case4 (bad case) - //parse custom acl (grantee id not exists) - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzAclFullControl, `uri="http://acs.amazonaws.com/groups/global/AllUsers", id="notExistsAccount", emailAddress="admin@example.com"`) - _, _, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipObjectWriter, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - id: 4, - resultErrCode: errCode, expectErrCode: s3err.ErrInvalidRequest, - }) - } - - { - //case5 (bad case) - //parse custom acl (invalid format) - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzAclFullControl, `uri="http:sfasf"`) - _, _, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipObjectWriter, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - id: 5, - resultErrCode: errCode, expectErrCode: s3err.ErrInvalidRequest, - }) - } - - { - //case6 (bad case) - //parse canned acl (invalid value) - req := &http.Request{ - Header: make(map[string][]string), - } - objectWriter := "accountA" - req.Header.Set(s3_constants.AmzCannedAcl, `uri="http:sfasf"`) - _, _, errCode := ParseAndValidateAclHeaders(req, accountManager, s3_constants.OwnershipObjectWriter, bucketOwner, objectWriter, false) - testCases = append(testCases, &Case{ - id: 5, - resultErrCode: errCode, expectErrCode: s3err.ErrInvalidRequest, - }) - } - - for _, tc := range testCases { - if tc.expectErrCode != tc.resultErrCode { - t.Errorf("case[%d]: errCode unexpect", tc.id) - } - if tc.resultOwner != tc.expectOwner { - t.Errorf("case[%d]: ownerId unexpect", tc.id) - } - if !grantsEquals(tc.resultGrants, tc.expectGrants) { - t.Fatalf("case[%d]: grants not expect", tc.id) - } - } -} - -func grantsEquals(a, b []*s3.Grant) bool { - if len(a) != len(b) { - return false - } - for i, grant := range a { - if !GrantEquals(grant, b[i]) { - return false - } - } - return true -} - -func TestDetermineReqGrants(t *testing.T) { - { - //case1: request account is anonymous - accountId := s3_constants.AccountAnonymousId - reqPermission := s3_constants.PermissionRead - - resultGrants := DetermineReqGrants(accountId, reqPermission) - expectGrants := []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &reqPermission, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &reqPermission, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &s3_constants.PermissionFullControl, - }, - } - if !grantsEquals(resultGrants, expectGrants) { - t.Fatalf("grants not expect") - } - } - { - //case2: request account is not anonymous (Iam authed) - accountId := "accountX" - reqPermission := s3_constants.PermissionRead - - resultGrants := DetermineReqGrants(accountId, reqPermission) - expectGrants := []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &reqPermission, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &reqPermission, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeCanonicalUser, - ID: &accountId, - }, - Permission: &s3_constants.PermissionFullControl, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAuthenticatedUsers, - }, - Permission: &reqPermission, - }, - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAuthenticatedUsers, - }, - Permission: &s3_constants.PermissionFullControl, - }, - } - if !grantsEquals(resultGrants, expectGrants) { - t.Fatalf("grants not expect") - } - } -} - -func TestAssembleEntryWithAcp(t *testing.T) { - defaultOwner := "admin" - - //case1 - //assemble with non-empty grants - expectOwner := "accountS" - expectGrants := []*s3.Grant{ - { - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, - } - entry := &filer_pb.Entry{} - AssembleEntryWithAcp(entry, expectOwner, expectGrants) - - resultOwner := GetAcpOwner(entry.Extended, defaultOwner) - if resultOwner != expectOwner { - t.Fatalf("owner not expect") - } - - resultGrants := GetAcpGrants(entry.Extended) - if !grantsEquals(resultGrants, expectGrants) { - t.Fatal("grants not expect") - } - - //case2 - //assemble with empty grants (override) - AssembleEntryWithAcp(entry, "", nil) - resultOwner = GetAcpOwner(entry.Extended, defaultOwner) - if resultOwner != defaultOwner { - t.Fatalf("owner not expect") - } - - resultGrants = GetAcpGrants(entry.Extended) - if len(resultGrants) != 0 { - t.Fatal("grants not expect") - } - -} - -func TestGrantEquals(t *testing.T) { - testCases := map[bool]bool{ - GrantEquals(nil, nil): true, - - GrantEquals(&s3.Grant{}, nil): false, - - GrantEquals(&s3.Grant{}, &s3.Grant{}): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - }, &s3.Grant{}): false, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{}, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{}, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{}, - }): false, - - //type not present, compare other fields of grant is meaningless - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - ID: aws.String(s3_constants.AccountAdminId), - //EmailAddress: &s3account.AccountAdmin.EmailAddress, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - ID: aws.String(s3_constants.AccountAdminId), - }, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - }, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionWrite, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - }): false, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - }, - }): true, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - }, - }): false, - - GrantEquals(&s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, &s3.Grant{ - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - }): true, - } - - for tc, expect := range testCases { - if tc != expect { - t.Fatal("TestGrantEquals not expect!") - } - } -} - -func TestSetAcpOwnerHeader(t *testing.T) { - ownerId := "accountZ" - req := &http.Request{ - Header: make(map[string][]string), - } - SetAcpOwnerHeader(req, ownerId) - - if req.Header.Get(s3_constants.ExtAmzOwnerKey) != ownerId { - t.Fatalf("owner unexpect") - } -} - -func TestSetAcpGrantsHeader(t *testing.T) { - req := &http.Request{ - Header: make(map[string][]string), - } - grants := []*s3.Grant{ - { - Permission: &s3_constants.PermissionRead, - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - ID: aws.String(s3_constants.AccountAdminId), - URI: &s3_constants.GranteeGroupAllUsers, - }, - }, - } - SetAcpGrantsHeader(req, grants) - - grantsJson, _ := json.Marshal(grants) - if req.Header.Get(s3_constants.ExtAmzAclKey) != string(grantsJson) { - t.Fatalf("owner unexpect") - } -} diff --git a/weed/s3api/s3api_acp.go b/weed/s3api/s3api_acp.go deleted file mode 100644 index 0a79990f5..000000000 --- a/weed/s3api/s3api_acp.go +++ /dev/null @@ -1,28 +0,0 @@ -package s3api - -import ( - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "net/http" -) - -func getAccountId(r *http.Request) string { - id := r.Header.Get(s3_constants.AmzAccountId) - if len(id) == 0 { - return AccountAnonymous.Id - } else { - return id - } -} - -func (s3a *S3ApiServer) checkAccessByOwnership(r *http.Request, bucket string) s3err.ErrorCode { - metadata, errCode := s3a.bucketRegistry.GetBucketMetadata(bucket) - if errCode != s3err.ErrNone { - return errCode - } - accountId := getAccountId(r) - if accountId == AccountAdmin.Id || accountId == *metadata.Owner.ID { - return s3err.ErrNone - } - return s3err.ErrAccessDenied -} diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index e946b1284..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -53,11 +53,6 @@ func isRequestSignStreamingV4(r *http.Request) bool { r.Method == http.MethodPut } -func isRequestUnsignedStreaming(r *http.Request) bool { - return r.Header.Get("x-amz-content-sha256") == streamingUnsignedPayload && - r.Method == http.MethodPut -} - // Authorization type. type authType int @@ -69,7 +64,6 @@ const ( authTypePresignedV2 authTypePostPolicy authTypeStreamingSigned - authTypeStreamingUnsigned authTypeSigned authTypeSignedV2 authTypeJWT @@ -77,29 +71,22 @@ const ( // Get request authentication type. func getRequestAuthType(r *http.Request) authType { - var authType authType - if isRequestSignatureV2(r) { - authType = authTypeSignedV2 + return authTypeSignedV2 } else if isRequestPresignedSignatureV2(r) { - authType = authTypePresignedV2 + return authTypePresignedV2 } else if isRequestSignStreamingV4(r) { - authType = authTypeStreamingSigned - } else if isRequestUnsignedStreaming(r) { - authType = authTypeStreamingUnsigned + return authTypeStreamingSigned } else if isRequestSignatureV4(r) { - authType = authTypeSigned + return authTypeSigned } else if isRequestPresignedSignatureV4(r) { - authType = authTypePresigned + return authTypePresigned } else if isRequestJWT(r) { - authType = authTypeJWT + return authTypeJWT } else if isRequestPostPolicySignatureV4(r) { - authType = authTypePostPolicy + return authTypePostPolicy } else if _, ok := r.Header["Authorization"]; !ok { - authType = authTypeAnonymous - } else { - authType = authTypeUnknown + return authTypeAnonymous } - - return authType + return authTypeUnknown } diff --git a/weed/s3api/s3api_bucket_config.go b/weed/s3api/s3api_bucket_config.go deleted file mode 100644 index 61cddc45a..000000000 --- a/weed/s3api/s3api_bucket_config.go +++ /dev/null @@ -1,947 +0,0 @@ -package s3api - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go/service/s3" - "google.golang.org/protobuf/proto" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/kms" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/cors" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// BucketConfig represents cached bucket configuration -type BucketConfig struct { - Name string - Versioning string // "Enabled", "Suspended", or "" - Ownership string - ACL []byte - Owner string - IsPublicRead bool // Cached flag to avoid JSON parsing on every request - CORS *cors.CORSConfiguration - ObjectLockConfig *ObjectLockConfiguration // Cached parsed Object Lock configuration - KMSKeyCache *BucketKMSCache // Per-bucket KMS key cache for SSE-KMS operations - LastModified time.Time - Entry *filer_pb.Entry -} - -// BucketKMSCache represents per-bucket KMS key caching for SSE-KMS operations -// This provides better isolation and automatic cleanup compared to global caching -type BucketKMSCache struct { - cache map[string]*BucketKMSCacheEntry // Key: contextHash, Value: cached data key - mutex sync.RWMutex - bucket string // Bucket name for logging/debugging - lastTTL time.Duration // TTL used for cache entries (typically 1 hour) -} - -// BucketKMSCacheEntry represents a single cached KMS data key -type BucketKMSCacheEntry struct { - DataKey interface{} // Could be *kms.GenerateDataKeyResponse or similar - ExpiresAt time.Time - KeyID string - ContextHash string // Hash of encryption context for cache validation -} - -// NewBucketKMSCache creates a new per-bucket KMS key cache -func NewBucketKMSCache(bucketName string, ttl time.Duration) *BucketKMSCache { - return &BucketKMSCache{ - cache: make(map[string]*BucketKMSCacheEntry), - bucket: bucketName, - lastTTL: ttl, - } -} - -// Get retrieves a cached KMS data key if it exists and hasn't expired -func (bkc *BucketKMSCache) Get(contextHash string) (*BucketKMSCacheEntry, bool) { - if bkc == nil { - return nil, false - } - - bkc.mutex.RLock() - defer bkc.mutex.RUnlock() - - entry, exists := bkc.cache[contextHash] - if !exists { - return nil, false - } - - // Check if entry has expired - if time.Now().After(entry.ExpiresAt) { - return nil, false - } - - return entry, true -} - -// Set stores a KMS data key in the cache -func (bkc *BucketKMSCache) Set(contextHash, keyID string, dataKey interface{}, ttl time.Duration) { - if bkc == nil { - return - } - - bkc.mutex.Lock() - defer bkc.mutex.Unlock() - - bkc.cache[contextHash] = &BucketKMSCacheEntry{ - DataKey: dataKey, - ExpiresAt: time.Now().Add(ttl), - KeyID: keyID, - ContextHash: contextHash, - } - bkc.lastTTL = ttl -} - -// CleanupExpired removes expired entries from the cache -func (bkc *BucketKMSCache) CleanupExpired() int { - if bkc == nil { - return 0 - } - - bkc.mutex.Lock() - defer bkc.mutex.Unlock() - - now := time.Now() - expiredCount := 0 - - for key, entry := range bkc.cache { - if now.After(entry.ExpiresAt) { - // Clear sensitive data before removing from cache - bkc.clearSensitiveData(entry) - delete(bkc.cache, key) - expiredCount++ - } - } - - return expiredCount -} - -// Size returns the current number of cached entries -func (bkc *BucketKMSCache) Size() int { - if bkc == nil { - return 0 - } - - bkc.mutex.RLock() - defer bkc.mutex.RUnlock() - - return len(bkc.cache) -} - -// clearSensitiveData securely clears sensitive data from a cache entry -func (bkc *BucketKMSCache) clearSensitiveData(entry *BucketKMSCacheEntry) { - if dataKeyResp, ok := entry.DataKey.(*kms.GenerateDataKeyResponse); ok { - // Zero out the plaintext data key to prevent it from lingering in memory - if dataKeyResp.Plaintext != nil { - for i := range dataKeyResp.Plaintext { - dataKeyResp.Plaintext[i] = 0 - } - dataKeyResp.Plaintext = nil - } - } -} - -// Clear clears all cached KMS entries, securely zeroing sensitive data first -func (bkc *BucketKMSCache) Clear() { - if bkc == nil { - return - } - - bkc.mutex.Lock() - defer bkc.mutex.Unlock() - - // Clear sensitive data from all entries before deletion - for _, entry := range bkc.cache { - bkc.clearSensitiveData(entry) - } - - // Clear the cache map - bkc.cache = make(map[string]*BucketKMSCacheEntry) -} - -// BucketConfigCache provides caching for bucket configurations -// Cache entries are automatically updated/invalidated through metadata subscription events, -// so TTL serves as a safety fallback rather than the primary consistency mechanism -type BucketConfigCache struct { - cache map[string]*BucketConfig - negativeCache map[string]time.Time // Cache for non-existent buckets - mutex sync.RWMutex - ttl time.Duration // Safety fallback TTL; real-time consistency maintained via events - negativeTTL time.Duration // TTL for negative cache entries -} - -// BucketMetadata represents the complete metadata for a bucket -type BucketMetadata struct { - Tags map[string]string `json:"tags,omitempty"` - CORS *cors.CORSConfiguration `json:"cors,omitempty"` - Encryption *s3_pb.EncryptionConfiguration `json:"encryption,omitempty"` - // Future extensions can be added here: - // Versioning *s3_pb.VersioningConfiguration `json:"versioning,omitempty"` - // Lifecycle *s3_pb.LifecycleConfiguration `json:"lifecycle,omitempty"` - // Notification *s3_pb.NotificationConfiguration `json:"notification,omitempty"` - // Replication *s3_pb.ReplicationConfiguration `json:"replication,omitempty"` - // Analytics *s3_pb.AnalyticsConfiguration `json:"analytics,omitempty"` - // Logging *s3_pb.LoggingConfiguration `json:"logging,omitempty"` - // Website *s3_pb.WebsiteConfiguration `json:"website,omitempty"` - // RequestPayer *s3_pb.RequestPayerConfiguration `json:"requestPayer,omitempty"` - // PublicAccess *s3_pb.PublicAccessConfiguration `json:"publicAccess,omitempty"` -} - -// NewBucketMetadata creates a new BucketMetadata with default values -func NewBucketMetadata() *BucketMetadata { - return &BucketMetadata{ - Tags: make(map[string]string), - } -} - -// IsEmpty returns true if the metadata has no configuration set -func (bm *BucketMetadata) IsEmpty() bool { - return len(bm.Tags) == 0 && bm.CORS == nil && bm.Encryption == nil -} - -// HasEncryption returns true if bucket has encryption configuration -func (bm *BucketMetadata) HasEncryption() bool { - return bm.Encryption != nil -} - -// HasCORS returns true if bucket has CORS configuration -func (bm *BucketMetadata) HasCORS() bool { - return bm.CORS != nil -} - -// HasTags returns true if bucket has tags -func (bm *BucketMetadata) HasTags() bool { - return len(bm.Tags) > 0 -} - -// NewBucketConfigCache creates a new bucket configuration cache -// TTL can be set to a longer duration since cache consistency is maintained -// through real-time metadata subscription events rather than TTL expiration -func NewBucketConfigCache(ttl time.Duration) *BucketConfigCache { - negativeTTL := ttl / 4 // Negative cache TTL is shorter than positive cache - if negativeTTL < 30*time.Second { - negativeTTL = 30 * time.Second // Minimum 30 seconds for negative cache - } - - return &BucketConfigCache{ - cache: make(map[string]*BucketConfig), - negativeCache: make(map[string]time.Time), - ttl: ttl, - negativeTTL: negativeTTL, - } -} - -// Get retrieves bucket configuration from cache -func (bcc *BucketConfigCache) Get(bucket string) (*BucketConfig, bool) { - bcc.mutex.RLock() - defer bcc.mutex.RUnlock() - - config, exists := bcc.cache[bucket] - if !exists { - return nil, false - } - - // Check if cache entry is expired (safety fallback; entries are normally updated via events) - if time.Since(config.LastModified) > bcc.ttl { - return nil, false - } - - return config, true -} - -// Set stores bucket configuration in cache -func (bcc *BucketConfigCache) Set(bucket string, config *BucketConfig) { - bcc.mutex.Lock() - defer bcc.mutex.Unlock() - - config.LastModified = time.Now() - bcc.cache[bucket] = config -} - -// Remove removes bucket configuration from cache -func (bcc *BucketConfigCache) Remove(bucket string) { - bcc.mutex.Lock() - defer bcc.mutex.Unlock() - - delete(bcc.cache, bucket) -} - -// Clear clears all cached configurations -func (bcc *BucketConfigCache) Clear() { - bcc.mutex.Lock() - defer bcc.mutex.Unlock() - - bcc.cache = make(map[string]*BucketConfig) - bcc.negativeCache = make(map[string]time.Time) -} - -// IsNegativelyCached checks if a bucket is in the negative cache (doesn't exist) -func (bcc *BucketConfigCache) IsNegativelyCached(bucket string) bool { - bcc.mutex.RLock() - defer bcc.mutex.RUnlock() - - if cachedTime, exists := bcc.negativeCache[bucket]; exists { - // Check if the negative cache entry is still valid - if time.Since(cachedTime) < bcc.negativeTTL { - return true - } - // Entry expired, remove it - delete(bcc.negativeCache, bucket) - } - return false -} - -// SetNegativeCache marks a bucket as non-existent in the negative cache -func (bcc *BucketConfigCache) SetNegativeCache(bucket string) { - bcc.mutex.Lock() - defer bcc.mutex.Unlock() - - bcc.negativeCache[bucket] = time.Now() -} - -// RemoveNegativeCache removes a bucket from the negative cache -func (bcc *BucketConfigCache) RemoveNegativeCache(bucket string) { - bcc.mutex.Lock() - defer bcc.mutex.Unlock() - - delete(bcc.negativeCache, bucket) -} - -// getBucketConfig retrieves bucket configuration with caching -func (s3a *S3ApiServer) getBucketConfig(bucket string) (*BucketConfig, s3err.ErrorCode) { - // Check negative cache first - if s3a.bucketConfigCache.IsNegativelyCached(bucket) { - return nil, s3err.ErrNoSuchBucket - } - - // Try positive cache - if config, found := s3a.bucketConfigCache.Get(bucket); found { - return config, s3err.ErrNone - } - - // Try to get from filer - entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - // Bucket doesn't exist - set negative cache - s3a.bucketConfigCache.SetNegativeCache(bucket) - return nil, s3err.ErrNoSuchBucket - } - glog.Errorf("getBucketConfig: failed to get bucket entry for %s: %v", bucket, err) - return nil, s3err.ErrInternalError - } - - config := &BucketConfig{ - Name: bucket, - Entry: entry, - IsPublicRead: false, // Explicitly default to false for private buckets - } - - // Extract configuration from extended attributes - if entry.Extended != nil { - if versioning, exists := entry.Extended[s3_constants.ExtVersioningKey]; exists { - config.Versioning = string(versioning) - } - if ownership, exists := entry.Extended[s3_constants.ExtOwnershipKey]; exists { - config.Ownership = string(ownership) - } - if acl, exists := entry.Extended[s3_constants.ExtAmzAclKey]; exists { - config.ACL = acl - // Parse ACL once and cache public-read status - config.IsPublicRead = parseAndCachePublicReadStatus(acl) - } else { - // No ACL means private bucket - config.IsPublicRead = false - } - if owner, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - config.Owner = string(owner) - } - // Parse Object Lock configuration if present - if objectLockConfig, found := LoadObjectLockConfigurationFromExtended(entry); found { - config.ObjectLockConfig = objectLockConfig - glog.V(2).Infof("getBucketConfig: cached Object Lock configuration for bucket %s", bucket) - } - } - - // Load CORS configuration from bucket directory content - if corsConfig, err := s3a.loadCORSFromBucketContent(bucket); err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - // Missing metadata is not an error; fall back cleanly - glog.V(2).Infof("CORS metadata not found for bucket %s, falling back to default behavior", bucket) - } else { - // Log parsing or validation errors - glog.Errorf("Failed to load CORS configuration for bucket %s: %v", bucket, err) - } - } else { - config.CORS = corsConfig - } - - // Cache the result - s3a.bucketConfigCache.Set(bucket, config) - - return config, s3err.ErrNone -} - -// updateBucketConfig updates bucket configuration and invalidates cache -func (s3a *S3ApiServer) updateBucketConfig(bucket string, updateFn func(*BucketConfig) error) s3err.ErrorCode { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - return errCode - } - - // Apply update function - if err := updateFn(config); err != nil { - glog.Errorf("updateBucketConfig: update function failed for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Prepare extended attributes - if config.Entry.Extended == nil { - config.Entry.Extended = make(map[string][]byte) - } - - // Update extended attributes - if config.Versioning != "" { - config.Entry.Extended[s3_constants.ExtVersioningKey] = []byte(config.Versioning) - } - if config.Ownership != "" { - config.Entry.Extended[s3_constants.ExtOwnershipKey] = []byte(config.Ownership) - } - if config.ACL != nil { - config.Entry.Extended[s3_constants.ExtAmzAclKey] = config.ACL - } - if config.Owner != "" { - config.Entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(config.Owner) - } - // Update Object Lock configuration - if config.ObjectLockConfig != nil { - if err := StoreObjectLockConfigurationInExtended(config.Entry, config.ObjectLockConfig); err != nil { - glog.Errorf("updateBucketConfig: failed to store Object Lock configuration for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - } - - // Save to filer - err := s3a.updateEntry(s3a.option.BucketsPath, config.Entry) - if err != nil { - glog.Errorf("updateBucketConfig: failed to update bucket entry for %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Update cache - s3a.bucketConfigCache.Set(bucket, config) - - return s3err.ErrNone -} - -// isVersioningEnabled checks if versioning is enabled for a bucket (with caching) -func (s3a *S3ApiServer) isVersioningEnabled(bucket string) (bool, error) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucket { - return false, filer_pb.ErrNotFound - } - return false, fmt.Errorf("failed to get bucket config: %v", errCode) - } - - // Versioning is enabled if explicitly set to "Enabled" OR if object lock is enabled - // (since object lock requires versioning to be enabled) - return config.Versioning == s3_constants.VersioningEnabled || config.ObjectLockConfig != nil, nil -} - -// isVersioningConfigured checks if versioning has been configured (either Enabled or Suspended) -func (s3a *S3ApiServer) isVersioningConfigured(bucket string) (bool, error) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucket { - return false, filer_pb.ErrNotFound - } - return false, fmt.Errorf("failed to get bucket config: %v", errCode) - } - - // Versioning is configured if explicitly set to either "Enabled" or "Suspended" - // OR if object lock is enabled (which forces versioning) - return config.Versioning != "" || config.ObjectLockConfig != nil, nil -} - -// getVersioningState returns the detailed versioning state for a bucket -func (s3a *S3ApiServer) getVersioningState(bucket string) (string, error) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucket { - return "", nil - } - return "", fmt.Errorf("failed to get bucket config: %v", errCode) - } - - // If object lock is enabled, versioning must be enabled regardless of explicit setting - if config.ObjectLockConfig != nil { - return s3_constants.VersioningEnabled, nil - } - - // Return the explicit versioning status (empty string means never configured) - return config.Versioning, nil -} - -// getBucketVersioningStatus returns the versioning status for a bucket -func (s3a *S3ApiServer) getBucketVersioningStatus(bucket string) (string, s3err.ErrorCode) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - return "", errCode - } - - // Return exactly what's stored - empty string means versioning was never configured - // This matches AWS S3 behavior where new buckets have no Status field in GetBucketVersioning response - return config.Versioning, s3err.ErrNone -} - -// setBucketVersioningStatus sets the versioning status for a bucket -func (s3a *S3ApiServer) setBucketVersioningStatus(bucket, status string) s3err.ErrorCode { - return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { - config.Versioning = status - return nil - }) -} - -// getBucketOwnership returns the ownership setting for a bucket -func (s3a *S3ApiServer) getBucketOwnership(bucket string) (string, s3err.ErrorCode) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - return "", errCode - } - - return config.Ownership, s3err.ErrNone -} - -// setBucketOwnership sets the ownership setting for a bucket -func (s3a *S3ApiServer) setBucketOwnership(bucket, ownership string) s3err.ErrorCode { - return s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { - config.Ownership = ownership - return nil - }) -} - -// loadCORSFromBucketContent loads CORS configuration from bucket directory content -func (s3a *S3ApiServer) loadCORSFromBucketContent(bucket string) (*cors.CORSConfiguration, error) { - metadata, err := s3a.GetBucketMetadata(bucket) - if err != nil { - return nil, err - } - - // Note: corsConfig can be nil if no CORS configuration is set, which is valid - return metadata.CORS, nil -} - -// getCORSConfiguration retrieves CORS configuration with caching -func (s3a *S3ApiServer) getCORSConfiguration(bucket string) (*cors.CORSConfiguration, s3err.ErrorCode) { - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - return nil, errCode - } - - return config.CORS, s3err.ErrNone -} - -// updateCORSConfiguration updates the CORS configuration for a bucket -func (s3a *S3ApiServer) updateCORSConfiguration(bucket string, corsConfig *cors.CORSConfiguration) s3err.ErrorCode { - // Update using structured API - err := s3a.UpdateBucketCORS(bucket, corsConfig) - if err != nil { - glog.Errorf("updateCORSConfiguration: failed to update CORS config for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Cache will be updated automatically via metadata subscription - return s3err.ErrNone -} - -// removeCORSConfiguration removes the CORS configuration for a bucket -func (s3a *S3ApiServer) removeCORSConfiguration(bucket string) s3err.ErrorCode { - // Update using structured API - err := s3a.ClearBucketCORS(bucket) - if err != nil { - glog.Errorf("removeCORSConfiguration: failed to remove CORS config for bucket %s: %v", bucket, err) - return s3err.ErrInternalError - } - - // Cache will be updated automatically via metadata subscription - return s3err.ErrNone -} - -// Conversion functions between CORS types and protobuf types - -// corsRuleToProto converts a CORS rule to protobuf format -func corsRuleToProto(rule cors.CORSRule) *s3_pb.CORSRule { - return &s3_pb.CORSRule{ - AllowedHeaders: rule.AllowedHeaders, - AllowedMethods: rule.AllowedMethods, - AllowedOrigins: rule.AllowedOrigins, - ExposeHeaders: rule.ExposeHeaders, - MaxAgeSeconds: int32(getMaxAgeSecondsValue(rule.MaxAgeSeconds)), - Id: rule.ID, - } -} - -// corsRuleFromProto converts a protobuf CORS rule to standard format -func corsRuleFromProto(protoRule *s3_pb.CORSRule) cors.CORSRule { - var maxAge *int - // Always create the pointer if MaxAgeSeconds is >= 0 - // This prevents nil pointer dereferences in tests and matches AWS behavior - if protoRule.MaxAgeSeconds >= 0 { - age := int(protoRule.MaxAgeSeconds) - maxAge = &age - } - // Only leave maxAge as nil if MaxAgeSeconds was explicitly set to a negative value - - return cors.CORSRule{ - AllowedHeaders: protoRule.AllowedHeaders, - AllowedMethods: protoRule.AllowedMethods, - AllowedOrigins: protoRule.AllowedOrigins, - ExposeHeaders: protoRule.ExposeHeaders, - MaxAgeSeconds: maxAge, - ID: protoRule.Id, - } -} - -// corsConfigToProto converts CORS configuration to protobuf format -func corsConfigToProto(config *cors.CORSConfiguration) *s3_pb.CORSConfiguration { - if config == nil { - return nil - } - - protoRules := make([]*s3_pb.CORSRule, len(config.CORSRules)) - for i, rule := range config.CORSRules { - protoRules[i] = corsRuleToProto(rule) - } - - return &s3_pb.CORSConfiguration{ - CorsRules: protoRules, - } -} - -// corsConfigFromProto converts protobuf CORS configuration to standard format -func corsConfigFromProto(protoConfig *s3_pb.CORSConfiguration) *cors.CORSConfiguration { - if protoConfig == nil { - return nil - } - - rules := make([]cors.CORSRule, len(protoConfig.CorsRules)) - for i, protoRule := range protoConfig.CorsRules { - rules[i] = corsRuleFromProto(protoRule) - } - - return &cors.CORSConfiguration{ - CORSRules: rules, - } -} - -// getMaxAgeSecondsValue safely extracts max age seconds value -func getMaxAgeSecondsValue(maxAge *int) int { - if maxAge == nil { - return 0 - } - return *maxAge -} - -// parseAndCachePublicReadStatus parses the ACL and caches the public-read status -func parseAndCachePublicReadStatus(acl []byte) bool { - var grants []*s3.Grant - if err := json.Unmarshal(acl, &grants); err != nil { - return false - } - - // Check if any grant gives read permission to "AllUsers" group - for _, grant := range grants { - if grant.Grantee != nil && grant.Grantee.URI != nil && grant.Permission != nil { - // Check for AllUsers group with Read permission - if *grant.Grantee.URI == s3_constants.GranteeGroupAllUsers && - (*grant.Permission == s3_constants.PermissionRead || *grant.Permission == s3_constants.PermissionFullControl) { - return true - } - } - } - - return false -} - -// getBucketMetadata retrieves bucket metadata as a structured object with caching -func (s3a *S3ApiServer) getBucketMetadata(bucket string) (*BucketMetadata, error) { - if s3a.bucketConfigCache != nil { - // Check negative cache first - if s3a.bucketConfigCache.IsNegativelyCached(bucket) { - return nil, fmt.Errorf("bucket directory not found %s", bucket) - } - - // Try to get from positive cache - if config, found := s3a.bucketConfigCache.Get(bucket); found { - // Extract metadata from cached config - if metadata, err := s3a.extractMetadataFromConfig(config); err == nil { - return metadata, nil - } - // If extraction fails, fall through to direct load - } - } - - // Load directly from filer - return s3a.loadBucketMetadataFromFiler(bucket) -} - -// extractMetadataFromConfig extracts BucketMetadata from cached BucketConfig -func (s3a *S3ApiServer) extractMetadataFromConfig(config *BucketConfig) (*BucketMetadata, error) { - if config == nil || config.Entry == nil { - return NewBucketMetadata(), nil - } - - // Parse metadata from entry content if available - if len(config.Entry.Content) > 0 { - var protoMetadata s3_pb.BucketMetadata - if err := proto.Unmarshal(config.Entry.Content, &protoMetadata); err != nil { - glog.Errorf("extractMetadataFromConfig: failed to unmarshal protobuf metadata for bucket %s: %v", config.Name, err) - return nil, err - } - // Convert protobuf to structured metadata - metadata := &BucketMetadata{ - Tags: protoMetadata.Tags, - CORS: corsConfigFromProto(protoMetadata.Cors), - Encryption: protoMetadata.Encryption, - } - return metadata, nil - } - - // Fallback: create metadata from cached CORS config - metadata := NewBucketMetadata() - if config.CORS != nil { - metadata.CORS = config.CORS - } - - return metadata, nil -} - -// loadBucketMetadataFromFiler loads bucket metadata directly from the filer -func (s3a *S3ApiServer) loadBucketMetadataFromFiler(bucket string) (*BucketMetadata, error) { - // Validate bucket name to prevent path traversal attacks - if bucket == "" || strings.Contains(bucket, "/") || strings.Contains(bucket, "\\") || - strings.Contains(bucket, "..") || strings.Contains(bucket, "~") { - return nil, fmt.Errorf("invalid bucket name: %s", bucket) - } - - // Clean the bucket name further to prevent any potential path traversal - bucket = filepath.Clean(bucket) - if bucket == "." || bucket == ".." { - return nil, fmt.Errorf("invalid bucket name: %s", bucket) - } - - // Get bucket directory entry to access its content - entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) - if err != nil { - // Check if this is a "not found" error - if errors.Is(err, filer_pb.ErrNotFound) { - // Set negative cache for non-existent bucket - if s3a.bucketConfigCache != nil { - s3a.bucketConfigCache.SetNegativeCache(bucket) - } - } - return nil, fmt.Errorf("error retrieving bucket directory %s: %w", bucket, err) - } - if entry == nil { - // Set negative cache for non-existent bucket - if s3a.bucketConfigCache != nil { - s3a.bucketConfigCache.SetNegativeCache(bucket) - } - return nil, fmt.Errorf("bucket directory not found %s", bucket) - } - - // If no content, return empty metadata - if len(entry.Content) == 0 { - return NewBucketMetadata(), nil - } - - // Unmarshal metadata from protobuf - var protoMetadata s3_pb.BucketMetadata - if err := proto.Unmarshal(entry.Content, &protoMetadata); err != nil { - glog.Errorf("getBucketMetadata: failed to unmarshal protobuf metadata for bucket %s: %v", bucket, err) - return nil, fmt.Errorf("failed to unmarshal bucket metadata for %s: %w", bucket, err) - } - - // Convert protobuf CORS to standard CORS - corsConfig := corsConfigFromProto(protoMetadata.Cors) - - // Create and return structured metadata - metadata := &BucketMetadata{ - Tags: protoMetadata.Tags, - CORS: corsConfig, - Encryption: protoMetadata.Encryption, - } - - return metadata, nil -} - -// setBucketMetadata stores bucket metadata from a structured object -func (s3a *S3ApiServer) setBucketMetadata(bucket string, metadata *BucketMetadata) error { - // Validate bucket name to prevent path traversal attacks - if bucket == "" || strings.Contains(bucket, "/") || strings.Contains(bucket, "\\") || - strings.Contains(bucket, "..") || strings.Contains(bucket, "~") { - return fmt.Errorf("invalid bucket name: %s", bucket) - } - - // Clean the bucket name further to prevent any potential path traversal - bucket = filepath.Clean(bucket) - if bucket == "." || bucket == ".." { - return fmt.Errorf("invalid bucket name: %s", bucket) - } - - // Default to empty metadata if nil - if metadata == nil { - metadata = NewBucketMetadata() - } - - // Create protobuf metadata - protoMetadata := &s3_pb.BucketMetadata{ - Tags: metadata.Tags, - Cors: corsConfigToProto(metadata.CORS), - Encryption: metadata.Encryption, - } - - // Marshal metadata to protobuf - metadataBytes, err := proto.Marshal(protoMetadata) - if err != nil { - return fmt.Errorf("failed to marshal bucket metadata to protobuf: %w", err) - } - - // Update the bucket entry with new content - err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Get current bucket entry - entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) - if err != nil { - return fmt.Errorf("error retrieving bucket directory %s: %w", bucket, err) - } - if entry == nil { - return fmt.Errorf("bucket directory not found %s", bucket) - } - - // Update content with metadata - entry.Content = metadataBytes - - request := &filer_pb.UpdateEntryRequest{ - Directory: s3a.option.BucketsPath, - Entry: entry, - } - - _, err = client.UpdateEntry(context.Background(), request) - return err - }) - - // Invalidate cache after successful update - if err == nil && s3a.bucketConfigCache != nil { - s3a.bucketConfigCache.Remove(bucket) - s3a.bucketConfigCache.RemoveNegativeCache(bucket) // Remove from negative cache too - } - - return err -} - -// New structured API functions using BucketMetadata - -// GetBucketMetadata retrieves complete bucket metadata as a structured object -func (s3a *S3ApiServer) GetBucketMetadata(bucket string) (*BucketMetadata, error) { - return s3a.getBucketMetadata(bucket) -} - -// SetBucketMetadata stores complete bucket metadata from a structured object -func (s3a *S3ApiServer) SetBucketMetadata(bucket string, metadata *BucketMetadata) error { - return s3a.setBucketMetadata(bucket, metadata) -} - -// UpdateBucketMetadata updates specific parts of bucket metadata while preserving others -// -// DISTRIBUTED SYSTEM DESIGN NOTE: -// This function implements a read-modify-write pattern with "last write wins" semantics. -// In the rare case of concurrent updates to different parts of bucket metadata -// (e.g., simultaneous tag and CORS updates), the last write may overwrite previous changes. -// -// This is an acceptable trade-off because: -// 1. Bucket metadata updates are infrequent in typical S3 usage -// 2. Traditional locking doesn't work in distributed systems across multiple nodes -// 3. The complexity of distributed consensus (e.g., Raft) for metadata updates would -// be disproportionate to the low frequency of bucket configuration changes -// 4. Most bucket operations (tags, CORS, encryption) are typically configured once -// during setup rather than being frequently modified -// -// If stronger consistency is required, consider implementing optimistic concurrency -// control with version numbers or ETags at the storage layer. -func (s3a *S3ApiServer) UpdateBucketMetadata(bucket string, update func(*BucketMetadata) error) error { - // Get current metadata - metadata, err := s3a.GetBucketMetadata(bucket) - if err != nil { - return fmt.Errorf("failed to get current bucket metadata: %w", err) - } - - // Apply update function - if err := update(metadata); err != nil { - return fmt.Errorf("failed to apply metadata update: %w", err) - } - - // Store updated metadata (last write wins) - return s3a.SetBucketMetadata(bucket, metadata) -} - -// Helper functions for specific metadata operations using structured API - -// UpdateBucketTags sets bucket tags using the structured API -func (s3a *S3ApiServer) UpdateBucketTags(bucket string, tags map[string]string) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.Tags = tags - return nil - }) -} - -// UpdateBucketCORS sets bucket CORS configuration using the structured API -func (s3a *S3ApiServer) UpdateBucketCORS(bucket string, corsConfig *cors.CORSConfiguration) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.CORS = corsConfig - return nil - }) -} - -// UpdateBucketEncryption sets bucket encryption configuration using the structured API -func (s3a *S3ApiServer) UpdateBucketEncryption(bucket string, encryptionConfig *s3_pb.EncryptionConfiguration) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.Encryption = encryptionConfig - return nil - }) -} - -// ClearBucketTags removes all bucket tags using the structured API -func (s3a *S3ApiServer) ClearBucketTags(bucket string) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.Tags = make(map[string]string) - return nil - }) -} - -// ClearBucketCORS removes bucket CORS configuration using the structured API -func (s3a *S3ApiServer) ClearBucketCORS(bucket string) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.CORS = nil - return nil - }) -} - -// ClearBucketEncryption removes bucket encryption configuration using the structured API -func (s3a *S3ApiServer) ClearBucketEncryption(bucket string) error { - return s3a.UpdateBucketMetadata(bucket, func(metadata *BucketMetadata) error { - metadata.Encryption = nil - return nil - }) -} diff --git a/weed/s3api/s3api_bucket_cors_handlers.go b/weed/s3api/s3api_bucket_cors_handlers.go deleted file mode 100644 index bd27785e2..000000000 --- a/weed/s3api/s3api_bucket_cors_handlers.go +++ /dev/null @@ -1,129 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/cors" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// S3BucketChecker implements cors.BucketChecker interface -type S3BucketChecker struct { - server *S3ApiServer -} - -func (c *S3BucketChecker) CheckBucket(r *http.Request, bucket string) s3err.ErrorCode { - return c.server.checkBucket(r, bucket) -} - -// S3CORSConfigGetter implements cors.CORSConfigGetter interface -type S3CORSConfigGetter struct { - server *S3ApiServer -} - -func (g *S3CORSConfigGetter) GetCORSConfiguration(bucket string) (*cors.CORSConfiguration, s3err.ErrorCode) { - return g.server.getCORSConfiguration(bucket) -} - -// getCORSMiddleware returns a CORS middleware instance with caching -func (s3a *S3ApiServer) getCORSMiddleware() *cors.Middleware { - bucketChecker := &S3BucketChecker{server: s3a} - corsConfigGetter := &S3CORSConfigGetter{server: s3a} - - return cors.NewMiddleware(bucketChecker, corsConfigGetter) -} - -// GetBucketCorsHandler handles Get bucket CORS configuration -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html -func (s3a *S3ApiServer) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketCorsHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Load CORS configuration from cache - config, errCode := s3a.getCORSConfiguration(bucket) - if errCode != s3err.ErrNone { - if errCode == s3err.ErrNoSuchBucket { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - } else { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - } - return - } - - if config == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchCORSConfiguration) - return - } - - // Return CORS configuration as XML - writeSuccessResponseXML(w, r, config) -} - -// PutBucketCorsHandler handles Put bucket CORS configuration -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html -func (s3a *S3ApiServer) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketCorsHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Parse CORS configuration from request body - var config cors.CORSConfiguration - if err := xml.NewDecoder(r.Body).Decode(&config); err != nil { - glog.V(1).Infof("Failed to parse CORS configuration: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Validate CORS configuration - if err := cors.ValidateConfiguration(&config); err != nil { - glog.V(1).Infof("Invalid CORS configuration: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - // Store CORS configuration and update cache - // This handles both cache update and persistent storage through the unified bucket config system - if err := s3a.updateCORSConfiguration(bucket, &config); err != s3err.ErrNone { - glog.Errorf("Failed to update CORS configuration: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Return success - writeSuccessResponseEmpty(w, r) -} - -// DeleteBucketCorsHandler handles Delete bucket CORS configuration -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html -func (s3a *S3ApiServer) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteBucketCorsHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Remove CORS configuration from cache and persistent storage - // This handles both cache invalidation and persistent storage cleanup through the unified bucket config system - if err := s3a.removeCORSConfiguration(bucket); err != s3err.ErrNone { - glog.Errorf("Failed to remove CORS configuration: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Return success (204 No Content) - w.WriteHeader(http.StatusNoContent) -} diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 060d453b1..c057ec10c 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -1,35 +1,33 @@ package s3api import ( - "bytes" "context" - "encoding/json" "encoding/xml" "errors" "fmt" "math" "net/http" - "sort" - "strings" "time" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) +type ListAllMyBucketsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` + Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` +} + func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { glog.V(3).Infof("ListBucketsHandler") @@ -37,9 +35,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques var identity *Identity var s3Err s3err.ErrorCode if s3a.iam.isEnabled() { - // Use authRequest instead of authUser for consistency with other endpoints - // This ensures the same authentication flow and any fixes (like prefix handling) are applied - identity, s3Err = s3a.iam.authRequest(r, s3_constants.ACTION_LIST) + identity, s3Err = s3a.iam.authUser(r) if s3Err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, s3Err) return @@ -57,39 +53,25 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques identityId := r.Header.Get(s3_constants.AmzIdentityId) - var listBuckets ListAllMyBucketsList + var buckets []*s3.Bucket for _, entry := range entries { if entry.IsDirectory { - // Check permissions for each bucket - if identity != nil { - // For JWT-authenticated users, use IAM authorization - sessionToken := r.Header.Get("X-SeaweedFS-Session-Token") - if s3a.iam.iamIntegration != nil && sessionToken != "" { - // Use IAM authorization for JWT users - errCode := s3a.iam.authorizeWithIAM(r, identity, s3_constants.ACTION_LIST, entry.Name, "") - if errCode != s3err.ErrNone { - continue - } - } else { - // Use legacy authorization for non-JWT users - if !identity.canDo(s3_constants.ACTION_LIST, entry.Name, "") { - continue - } - } + if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name, "") { + continue } - listBuckets.Bucket = append(listBuckets.Bucket, ListAllMyBucketsEntry{ - Name: entry.Name, - CreationDate: time.Unix(entry.Attributes.Crtime, 0).UTC(), + buckets = append(buckets, &s3.Bucket{ + Name: aws.String(entry.Name), + CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()), }) } } response = ListAllMyBucketsResult{ - Owner: CanonicalUser{ - ID: identityId, - DisplayName: identityId, + Owner: &s3.Owner{ + ID: aws.String(identityId), + DisplayName: aws.String(identityId), }, - Buckets: listBuckets, + Buckets: buckets, } writeSuccessResponseXML(w, r, response) @@ -97,33 +79,22 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("PutBucketHandler %s", bucket) - // validate the bucket name - err := s3bucket.VerifyS3BucketName(bucket) - if err != nil { - glog.Errorf("put invalid bucket name: %v %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidBucketName) - return - } - - // Check if bucket already exists and handle ownership/settings - currentIdentityId := r.Header.Get(s3_constants.AmzIdentityId) - - // Check collection existence first - collectionExists := false + // avoid duplicated buckets + errCode := s3err.ErrNone if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { if resp, err := client.CollectionList(context.Background(), &filer_pb.CollectionListRequest{ IncludeEcVolumes: true, IncludeNormalVolumes: true, }); err != nil { glog.Errorf("list collection: %v", err) - return fmt.Errorf("list collections: %w", err) + return fmt.Errorf("list collections: %v", err) } else { for _, c := range resp.Collections { - if s3a.getCollectionName(bucket) == c.Name { - collectionExists = true + if bucket == c.Name { + errCode = s3err.ErrBucketAlreadyExists break } } @@ -133,62 +104,19 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } - - // Check bucket directory existence and get metadata if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist { - // Bucket exists, check ownership and settings - if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); err == nil { - // Get existing bucket owner - var existingOwnerId string - if entry.Extended != nil { - if id, ok := entry.Extended[s3_constants.AmzIdentityId]; ok { - existingOwnerId = string(id) - } - } - - // Check ownership - if existingOwnerId != "" && existingOwnerId != currentIdentityId { - // Different owner - always fail with BucketAlreadyExists - glog.V(3).Infof("PutBucketHandler: bucket %s owned by %s, requested by %s", bucket, existingOwnerId, currentIdentityId) - s3err.WriteErrorResponse(w, r, s3err.ErrBucketAlreadyExists) - return - } - - // Same owner or no owner set - check for conflicting settings - objectLockRequested := strings.EqualFold(r.Header.Get(s3_constants.AmzBucketObjectLockEnabled), "true") - - // Get current bucket configuration - bucketConfig, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - glog.Errorf("PutBucketHandler: failed to get bucket config for %s: %v", bucket, errCode) - // If we can't get config, assume no conflict and allow recreation - } else { - // Check for Object Lock conflict - currentObjectLockEnabled := bucketConfig.ObjectLockConfig != nil && - bucketConfig.ObjectLockConfig.ObjectLockEnabled == s3_constants.ObjectLockEnabled - - if objectLockRequested != currentObjectLockEnabled { - // Conflicting Object Lock settings - fail with BucketAlreadyExists - glog.V(3).Infof("PutBucketHandler: bucket %s has conflicting Object Lock settings (requested: %v, current: %v)", - bucket, objectLockRequested, currentObjectLockEnabled) - s3err.WriteErrorResponse(w, r, s3err.ErrBucketAlreadyExists) - return - } - } - - // Bucket already exists - always return BucketAlreadyExists per S3 specification - // The S3 tests expect BucketAlreadyExists in all cases, not BucketAlreadyOwnedByYou - glog.V(3).Infof("PutBucketHandler: bucket %s already exists", bucket) - s3err.WriteErrorResponse(w, r, s3err.ErrBucketAlreadyExists) - return - } + errCode = s3err.ErrBucketAlreadyExists + } + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return } - // If collection exists but bucket directory doesn't, this is an inconsistent state - if collectionExists { - glog.Errorf("PutBucketHandler: collection exists but bucket directory missing for %s", bucket) - s3err.WriteErrorResponse(w, r, s3err.ErrBucketAlreadyExists) - return + if s3a.iam.isEnabled() { + if _, errCode = s3a.iam.authRequest(r, s3_constants.ACTION_ADMIN); errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } } fn := func(entry *filer_pb.Entry) { @@ -206,36 +134,6 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } - - // Check for x-amz-bucket-object-lock-enabled header (S3 standard compliance) - if objectLockHeaderValue := r.Header.Get(s3_constants.AmzBucketObjectLockEnabled); strings.EqualFold(objectLockHeaderValue, "true") { - glog.V(3).Infof("PutBucketHandler: enabling Object Lock and Versioning for bucket %s due to x-amz-bucket-object-lock-enabled header", bucket) - - // Atomically update the configuration of the specified bucket. See the updateBucketConfig - // function definition for detailed documentation on parameters and behavior. - errCode := s3a.updateBucketConfig(bucket, func(bucketConfig *BucketConfig) error { - // Enable versioning (required for Object Lock) - bucketConfig.Versioning = s3_constants.VersioningEnabled - - // Create basic Object Lock configuration (enabled without default retention) - objectLockConfig := &ObjectLockConfiguration{ - ObjectLockEnabled: s3_constants.ObjectLockEnabled, - } - - // Set the cached Object Lock configuration - bucketConfig.ObjectLockConfig = objectLockConfig - - return nil - }) - - if errCode != s3err.ErrNone { - glog.Errorf("PutBucketHandler: failed to enable Object Lock for bucket %s: %v", bucket, errCode) - s3err.WriteErrorResponse(w, r, errCode) - return - } - glog.V(3).Infof("PutBucketHandler: enabled Object Lock and Versioning for bucket %s", bucket) - } - w.Header().Set("Location", "/"+bucket) writeSuccessResponseEmpty(w, r) } @@ -265,7 +163,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ - Collection: s3a.getCollectionName(bucket), + Collection: bucket, } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) @@ -292,9 +190,6 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque return } - // Clean up bucket-related caches and locks after successful deletion - s3a.invalidateBucketConfigCache(bucket) - s3err.WriteEmptyResponse(w, r, http.StatusNoContent) } @@ -303,7 +198,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request bucket, _ := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("HeadBucketHandler %s", bucket) - if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || errors.Is(err, filer_pb.ErrNotFound) { + if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound { s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) return } @@ -313,14 +208,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode { entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) - if entry == nil || errors.Is(err, filer_pb.ErrNotFound) { + if entry == nil || err == filer_pb.ErrNotFound { return s3err.ErrNoSuchBucket } - //if iam is enabled, the access was already checked before - if s3a.iam.isEnabled() { - return s3err.ErrNone - } if !s3a.hasAccess(r, entry) { return s3err.ErrAccessDenied } @@ -328,11 +219,10 @@ func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorC } func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { - // Check if user is properly authenticated as admin through IAM system - if s3a.isUserAdmin(r) { + isAdmin := r.Header.Get(s3_constants.AmzIsAdmin) != "" + if isAdmin { return true } - if entry.Extended == nil { return true } @@ -340,82 +230,12 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { identityId := r.Header.Get(s3_constants.AmzIdentityId) if id, ok := entry.Extended[s3_constants.AmzIdentityId]; ok { if identityId != string(id) { - glog.V(3).Infof("hasAccess: %s != %s (entry.Extended = %v)", identityId, id, entry.Extended) return false } } return true } -// isUserAdmin securely checks if the authenticated user is an admin -// This validates admin status through proper IAM authentication, not spoofable headers -func (s3a *S3ApiServer) isUserAdmin(r *http.Request) bool { - // Use a minimal admin action to authenticate and check admin status - adminAction := Action("Admin") - identity, errCode := s3a.iam.authRequest(r, adminAction) - if errCode != s3err.ErrNone { - return false - } - - // Check if the authenticated identity has admin privileges - return identity != nil && identity.isAdmin() -} - -// isBucketPublicRead checks if a bucket allows anonymous read access based on its cached ACL status -func (s3a *S3ApiServer) isBucketPublicRead(bucket string) bool { - // Get bucket configuration which contains cached public-read status - config, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - glog.V(4).Infof("isBucketPublicRead: failed to get bucket config for %s: %v", bucket, errCode) - return false - } - - glog.V(4).Infof("isBucketPublicRead: bucket=%s, IsPublicRead=%v", bucket, config.IsPublicRead) - // Return the cached public-read status (no JSON parsing needed) - return config.IsPublicRead -} - -// isPublicReadGrants checks if the grants allow public read access -func isPublicReadGrants(grants []*s3.Grant) bool { - for _, grant := range grants { - if grant.Grantee != nil && grant.Grantee.URI != nil && grant.Permission != nil { - // Check for AllUsers group with Read permission - if *grant.Grantee.URI == s3_constants.GranteeGroupAllUsers && - (*grant.Permission == s3_constants.PermissionRead || *grant.Permission == s3_constants.PermissionFullControl) { - return true - } - } - } - return false -} - -// AuthWithPublicRead creates an auth wrapper that allows anonymous access for public-read buckets -func (s3a *S3ApiServer) AuthWithPublicRead(handler http.HandlerFunc, action Action) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - authType := getRequestAuthType(r) - isAnonymous := authType == authTypeAnonymous - - glog.V(4).Infof("AuthWithPublicRead: bucket=%s, authType=%v, isAnonymous=%v", bucket, authType, isAnonymous) - - // For anonymous requests, check if bucket allows public read - if isAnonymous { - isPublic := s3a.isBucketPublicRead(bucket) - glog.V(4).Infof("AuthWithPublicRead: bucket=%s, isPublic=%v", bucket, isPublic) - if isPublic { - glog.V(3).Infof("AuthWithPublicRead: allowing anonymous access to public-read bucket %s", bucket) - handler(w, r) - return - } - glog.V(3).Infof("AuthWithPublicRead: bucket %s is not public-read, falling back to IAM auth", bucket) - } - - // For all authenticated requests and anonymous requests to non-public buckets, - // use normal IAM auth to enforce policies - s3a.iam.Auth(handler, action)(w, r) - } -} - // GetBucketAclHandler Get Bucket ACL // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAcl.html func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { @@ -428,88 +248,32 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque return } - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - amzDisplayName := s3a.iam.GetAccountNameById(amzAccountId) - response := AccessControlPolicy{ - Owner: CanonicalUser{ - ID: amzAccountId, - DisplayName: amzDisplayName, - }, - } - response.AccessControlList.Grant = append(response.AccessControlList.Grant, Grant{ - Grantee: Grantee{ - ID: amzAccountId, - DisplayName: amzDisplayName, - Type: "CanonicalUser", - XMLXSI: "CanonicalUser", - XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, - Permission: s3.PermissionFullControl, - }) - writeSuccessResponseXML(w, r, response) -} - -// PutBucketAclHandler Put bucket ACL -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html // -func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketAclHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Get account information for ACL processing - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - - // Get bucket ownership settings (these would be used for ownership validation in a full implementation) - bucketOwnership := "" // Default/simplified for now - in a full implementation this would be retrieved from bucket config - bucketOwnerId := amzAccountId // Simplified - bucket owner is current account - - // Use the existing ACL parsing logic to handle both canned ACLs and XML body - grants, errCode := ExtractAcl(r, s3a.iam, bucketOwnership, bucketOwnerId, amzAccountId, amzAccountId) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - glog.V(3).Infof("PutBucketAclHandler: bucket=%s, extracted %d grants", bucket, len(grants)) - isPublic := isPublicReadGrants(grants) - glog.V(3).Infof("PutBucketAclHandler: bucket=%s, isPublicReadGrants=%v", bucket, isPublic) - - // Store the bucket ACL in bucket metadata - errCode = s3a.updateBucketConfig(bucket, func(config *BucketConfig) error { - if len(grants) > 0 { - grantsBytes, err := json.Marshal(grants) - if err != nil { - glog.Errorf("PutBucketAclHandler: failed to marshal grants: %v", err) - return err - } - config.ACL = grantsBytes - // Cache the public-read status to avoid JSON parsing on every request - config.IsPublicRead = isPublicReadGrants(grants) - glog.V(4).Infof("PutBucketAclHandler: bucket=%s, setting IsPublicRead=%v", bucket, config.IsPublicRead) - } else { - config.ACL = nil - config.IsPublicRead = false + response := AccessControlPolicy{} + for _, ident := range s3a.iam.identities { + if len(ident.Credentials) == 0 { + continue + } + for _, action := range ident.Actions { + if !action.overBucket(bucket) || action.getPermission() == "" { + continue + } + id := ident.Credentials[0].AccessKey + if response.Owner.DisplayName == "" && action.isOwner(bucket) && len(ident.Credentials) > 0 { + response.Owner.DisplayName = ident.Name + response.Owner.ID = id + } + response.AccessControlList.Grant = append(response.AccessControlList.Grant, Grant{ + Grantee: Grantee{ + ID: id, + DisplayName: ident.Name, + Type: "CanonicalUser", + XMLXSI: "CanonicalUser", + XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, + Permission: action.getPermission(), + }) } - config.Owner = amzAccountId - return nil - }) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return } - - glog.V(3).Infof("PutBucketAclHandler: Successfully stored ACL for bucket %s with %d grants", bucket, len(grants)) - - // Small delay to ensure ACL propagation across distributed caches - // This prevents race conditions in tests where anonymous access is attempted immediately after ACL change - time.Sleep(50 * time.Millisecond) - - writeSuccessResponseEmpty(w, r) + writeSuccessResponseXML(w, r, response) } // GetBucketLifecycleConfigurationHandler Get Bucket Lifecycle configuration @@ -529,185 +293,49 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } - ttls := fc.GetCollectionTtls(s3a.getCollectionName(bucket)) + ttls := fc.GetCollectionTtls(bucket) if len(ttls) == 0 { s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchLifecycleConfiguration) return } - response := Lifecycle{} - // Sort locationPrefixes to ensure consistent ordering of lifecycle rules - var locationPrefixes []string - for locationPrefix := range ttls { - locationPrefixes = append(locationPrefixes, locationPrefix) - } - sort.Strings(locationPrefixes) - - for _, locationPrefix := range locationPrefixes { - internalTtl := ttls[locationPrefix] + for prefix, internalTtl := range ttls { ttl, _ := needle.ReadTTL(internalTtl) days := int(ttl.Minutes() / 60 / 24) if days == 0 { continue } - prefix, found := strings.CutPrefix(locationPrefix, fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket)) - if !found { - continue - } response.Rules = append(response.Rules, Rule{ - ID: prefix, - Status: Enabled, - Prefix: Prefix{val: prefix, set: true}, + Status: Enabled, Filter: Filter{ + Prefix: Prefix{string: prefix, set: true}, + set: true, + }, Expiration: Expiration{Days: days, set: true}, }) } - writeSuccessResponseXML(w, r, response) } // PutBucketLifecycleConfigurationHandler Put Bucket Lifecycle configuration // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketLifecycleConfigurationHandler %s", bucket) - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) - lifeCycleConfig := Lifecycle{} - if err := xmlDecoder(r.Body, &lifeCycleConfig, r.ContentLength); err != nil { - glog.Warningf("PutBucketLifecycleConfigurationHandler xml decode: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) - if err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler read filer config: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - collectionName := s3a.getCollectionName(bucket) - collectionTtls := fc.GetCollectionTtls(collectionName) - changed := false - - for _, rule := range lifeCycleConfig.Rules { - if rule.Status != Enabled { - continue - } - var rulePrefix string - switch { - case rule.Filter.Prefix.set: - rulePrefix = rule.Filter.Prefix.val - case rule.Prefix.set: - rulePrefix = rule.Prefix.val - case !rule.Expiration.Date.IsZero() || rule.Transition.Days > 0 || !rule.Transition.Date.IsZero(): - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) - return - } - - if rule.Expiration.Days == 0 { - continue - } - - locConf := &filer_pb.FilerConf_PathConf{ - LocationPrefix: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, rulePrefix), - Collection: collectionName, - Ttl: fmt.Sprintf("%dd", rule.Expiration.Days), - } - if ttl, ok := collectionTtls[locConf.LocationPrefix]; ok && ttl == locConf.Ttl { - continue - } - if err := fc.AddLocationConf(locConf); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler add location config: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - changed = true - } - - if changed { - var buf bytes.Buffer - if err := fc.ToText(&buf); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler save config to text: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - } - if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) - }); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler save config inside filer: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - writeSuccessResponseEmpty(w, r) } -// DeleteBucketLifecycleHandler Delete Bucket Lifecycle +// DeleteBucketMetricsConfiguration Delete Bucket Lifecycle // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteBucketLifecycleHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) - if err != nil { - glog.Errorf("DeleteBucketLifecycleHandler read filer config: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - collectionTtls := fc.GetCollectionTtls(s3a.getCollectionName(bucket)) - changed := false - for prefix, ttl := range collectionTtls { - bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) - if strings.HasPrefix(prefix, bucketPrefix) && strings.HasSuffix(ttl, "d") { - pathConf, found := fc.GetLocationConf(prefix) - if found { - pathConf.Ttl = "" - fc.SetLocationConf(pathConf) - } - changed = true - } - } - - if changed { - var buf bytes.Buffer - if err := fc.ToText(&buf); err != nil { - glog.Errorf("DeleteBucketLifecycleHandler save config to text: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - } - if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) - }); err != nil { - glog.Errorf("DeleteBucketLifecycleHandler save config inside filer: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } s3err.WriteEmptyResponse(w, r, http.StatusNoContent) + } // GetBucketLocationHandler Get bucket location // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html func (s3a *S3ApiServer) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - writeSuccessResponseXML(w, r, CreateBucketConfiguration{}) + writeSuccessResponseXML(w, r, LocationConstraint{}) } // GetBucketRequestPaymentHandler Get bucket location @@ -715,238 +343,3 @@ func (s3a *S3ApiServer) GetBucketLocationHandler(w http.ResponseWriter, r *http. func (s3a *S3ApiServer) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) { writeSuccessResponseXML(w, r, RequestPaymentConfiguration{Payer: "BucketOwner"}) } - -// PutBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketOwnershipControls.html -func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketOwnershipControls %s", bucket) - - errCode := s3a.checkAccessByOwnership(r, bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if r.Body == nil || r.Body == http.NoBody { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - var v s3.OwnershipControls - defer util_http.CloseRequest(r) - - err := xmlutil.UnmarshalXML(&v, xml.NewDecoder(r.Body), "") - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - if len(v.Rules) != 1 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - printOwnership := true - ownership := *v.Rules[0].ObjectOwnership - switch ownership { - case s3_constants.OwnershipObjectWriter: - case s3_constants.OwnershipBucketOwnerPreferred: - case s3_constants.OwnershipBucketOwnerEnforced: - printOwnership = false - default: - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - // Check if ownership needs to be updated - currentOwnership, errCode := s3a.getBucketOwnership(bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if currentOwnership != ownership { - errCode = s3a.setBucketOwnership(bucket, ownership) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - } - - if printOwnership { - result := &s3.PutBucketOwnershipControlsInput{ - OwnershipControls: &v, - } - s3err.WriteAwsXMLResponse(w, r, http.StatusOK, result) - } else { - writeSuccessResponseEmpty(w, r) - } -} - -// GetBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketOwnershipControls.html -func (s3a *S3ApiServer) GetBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketOwnershipControls %s", bucket) - - errCode := s3a.checkAccessByOwnership(r, bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Get ownership using new bucket config system - ownership, errCode := s3a.getBucketOwnership(bucket) - if errCode == s3err.ErrNoSuchBucket { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } else if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, s3err.OwnershipControlsNotFoundError) - return - } - - result := &s3.PutBucketOwnershipControlsInput{ - OwnershipControls: &s3.OwnershipControls{ - Rules: []*s3.OwnershipControlsRule{ - { - ObjectOwnership: &ownership, - }, - }, - }, - } - - s3err.WriteAwsXMLResponse(w, r, http.StatusOK, result) -} - -// DeleteBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketOwnershipControls.html -func (s3a *S3ApiServer) DeleteBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketOwnershipControls %s", bucket) - - errCode := s3a.checkAccessByOwnership(r, bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - bucketEntry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - _, ok := bucketEntry.Extended[s3_constants.ExtOwnershipKey] - if !ok { - s3err.WriteErrorResponse(w, r, s3err.OwnershipControlsNotFoundError) - return - } - - delete(bucketEntry.Extended, s3_constants.ExtOwnershipKey) - err = s3a.updateEntry(s3a.option.BucketsPath, bucketEntry) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - emptyOwnershipControls := &s3.OwnershipControls{ - Rules: []*s3.OwnershipControlsRule{}, - } - s3err.WriteAwsXMLResponse(w, r, http.StatusOK, emptyOwnershipControls) -} - -// GetBucketVersioningHandler Get Bucket Versioning status -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html -func (s3a *S3ApiServer) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketVersioning %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Get versioning status using new bucket config system - versioningStatus, errCode := s3a.getBucketVersioningStatus(bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // AWS S3 behavior: If versioning was never configured, don't return Status field - var response *s3.PutBucketVersioningInput - if versioningStatus == "" { - // No versioning configuration - return empty response (no Status field) - response = &s3.PutBucketVersioningInput{ - VersioningConfiguration: &s3.VersioningConfiguration{}, - } - } else { - // Versioning was explicitly configured - return the status - response = &s3.PutBucketVersioningInput{ - VersioningConfiguration: &s3.VersioningConfiguration{ - Status: aws.String(versioningStatus), - }, - } - } - s3err.WriteAwsXMLResponse(w, r, http.StatusOK, response) -} - -// PutBucketVersioningHandler Put bucket Versioning -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html -func (s3a *S3ApiServer) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketVersioning %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - if r.Body == nil || r.Body == http.NoBody { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - var versioningConfig s3.VersioningConfiguration - defer util_http.CloseRequest(r) - - err := xmlutil.UnmarshalXML(&versioningConfig, xml.NewDecoder(r.Body), "") - if err != nil { - glog.Warningf("PutBucketVersioningHandler xml decode: %s", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - if versioningConfig.Status == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - status := *versioningConfig.Status - if status != s3_constants.VersioningEnabled && status != s3_constants.VersioningSuspended { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - return - } - - // Check if trying to suspend versioning on a bucket with object lock enabled - if status == s3_constants.VersioningSuspended { - // Get bucket configuration to check for object lock - bucketConfig, errCode := s3a.getBucketConfig(bucket) - if errCode == s3err.ErrNone && bucketConfig.ObjectLockConfig != nil { - // Object lock is enabled, cannot suspend versioning - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidBucketState) - return - } - } - - // Update bucket versioning configuration using new bucket config system - if errCode := s3a.setBucketVersioningStatus(bucket, status); errCode != s3err.ErrNone { - glog.Errorf("PutBucketVersioningHandler save config: %d", errCode) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - writeSuccessResponseEmpty(w, r) -} diff --git a/weed/s3api/s3api_bucket_handlers_object_lock_config.go b/weed/s3api/s3api_bucket_handlers_object_lock_config.go deleted file mode 100644 index 6747e6aaf..000000000 --- a/weed/s3api/s3api_bucket_handlers_object_lock_config.go +++ /dev/null @@ -1,150 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "net/http" - - "errors" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" -) - -// PutObjectLockConfigurationHandler Put object Lock configuration -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html -func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectLockConfigurationHandler %s", bucket) - - // Check if Object Lock is available for this bucket (requires versioning) - // For bucket-level operations, return InvalidBucketState (409) when object lock is not available - if err := s3a.isObjectLockAvailable(bucket); err != nil { - glog.Errorf("PutObjectLockConfigurationHandler: object lock not available for bucket %s: %v", bucket, err) - if errors.Is(err, ErrBucketNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - } else { - // Return InvalidBucketState for bucket-level object lock operations on buckets without object lock enabled - // This matches AWS S3 behavior and s3-tests expectations (409 Conflict) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidBucketState) - } - return - } - - // Parse object lock configuration from request body - config, err := parseObjectLockConfiguration(r) - if err != nil { - glog.Errorf("PutObjectLockConfigurationHandler: failed to parse object lock config: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Validate object lock configuration - if err := ValidateObjectLockConfiguration(config); err != nil { - glog.Errorf("PutObjectLockConfigurationHandler: invalid object lock config: %v", err) - s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err)) - return - } - - // Set object lock configuration on the bucket - errCode := s3a.updateBucketConfig(bucket, func(bucketConfig *BucketConfig) error { - // Set the cached Object Lock configuration - bucketConfig.ObjectLockConfig = config - return nil - }) - - if errCode != s3err.ErrNone { - glog.Errorf("PutObjectLockConfigurationHandler: failed to set object lock config: %v", errCode) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - // Return success (HTTP 200 with no body) - w.WriteHeader(http.StatusOK) - glog.V(3).Infof("PutObjectLockConfigurationHandler: successfully set object lock config for %s", bucket) -} - -// GetObjectLockConfigurationHandler Get object Lock configuration -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html -func (s3a *S3ApiServer) GetObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectLockConfigurationHandler %s", bucket) - - // Get bucket configuration - bucketConfig, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - glog.Errorf("GetObjectLockConfigurationHandler: failed to get bucket config: %v", errCode) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - var configXML []byte - - // Check if we have cached Object Lock configuration - if bucketConfig.ObjectLockConfig != nil { - // Use cached configuration and marshal it to XML for response - marshaledXML, err := xml.Marshal(bucketConfig.ObjectLockConfig) - if err != nil { - glog.Errorf("GetObjectLockConfigurationHandler: failed to marshal cached Object Lock config: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Write XML response - w.Header().Set("Content-Type", "application/xml") - w.WriteHeader(http.StatusOK) - if _, err := w.Write([]byte(xml.Header)); err != nil { - glog.Errorf("GetObjectLockConfigurationHandler: failed to write XML header: %v", err) - return - } - if _, err := w.Write(marshaledXML); err != nil { - glog.Errorf("GetObjectLockConfigurationHandler: failed to write config XML: %v", err) - return - } - glog.V(3).Infof("GetObjectLockConfigurationHandler: successfully retrieved cached object lock config for %s", bucket) - return - } - - // Fallback: check for legacy storage in extended attributes - if bucketConfig.Entry.Extended != nil { - // Check if Object Lock is enabled via boolean flag - if enabledBytes, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]; exists { - enabled := string(enabledBytes) - if enabled == s3_constants.ObjectLockEnabled || enabled == "true" { - // Generate minimal XML configuration for enabled Object Lock without retention policies - minimalConfig := `Enabled` - configXML = []byte(minimalConfig) - } - } - } - - // If no Object Lock configuration found, return error - if len(configXML) == 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrObjectLockConfigurationNotFoundError) - return - } - - // Set response headers - w.Header().Set("Content-Type", "application/xml") - w.WriteHeader(http.StatusOK) - - // Write XML response - if _, err := w.Write([]byte(xml.Header)); err != nil { - glog.Errorf("GetObjectLockConfigurationHandler: failed to write XML header: %v", err) - return - } - - if _, err := w.Write(configXML); err != nil { - glog.Errorf("GetObjectLockConfigurationHandler: failed to write config XML: %v", err) - return - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - glog.V(3).Infof("GetObjectLockConfigurationHandler: successfully retrieved object lock config for %s", bucket) -} diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go index 3835c08e9..d5622c51c 100644 --- a/weed/s3api/s3api_bucket_handlers_test.go +++ b/weed/s3api/s3api_bucket_handlers_test.go @@ -1,250 +1,40 @@ package s3api import ( - "encoding/json" - "encoding/xml" - "net/http/httptest" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "testing" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func TestPutBucketAclCannedAclSupport(t *testing.T) { - // Test that the ExtractAcl function can handle various canned ACLs - // This tests the core functionality without requiring a fully initialized S3ApiServer +func TestListBucketsHandler(t *testing.T) { - testCases := []struct { - name string - cannedAcl string - shouldWork bool - description string - }{ - { - name: "private", - cannedAcl: s3_constants.CannedAclPrivate, - shouldWork: true, - description: "private ACL should be accepted", - }, - { - name: "public-read", - cannedAcl: s3_constants.CannedAclPublicRead, - shouldWork: true, - description: "public-read ACL should be accepted", - }, - { - name: "public-read-write", - cannedAcl: s3_constants.CannedAclPublicReadWrite, - shouldWork: true, - description: "public-read-write ACL should be accepted", - }, - { - name: "authenticated-read", - cannedAcl: s3_constants.CannedAclAuthenticatedRead, - shouldWork: true, - description: "authenticated-read ACL should be accepted", - }, - { - name: "bucket-owner-read", - cannedAcl: s3_constants.CannedAclBucketOwnerRead, - shouldWork: true, - description: "bucket-owner-read ACL should be accepted", - }, - { - name: "bucket-owner-full-control", - cannedAcl: s3_constants.CannedAclBucketOwnerFullControl, - shouldWork: true, - description: "bucket-owner-full-control ACL should be accepted", - }, - { - name: "invalid-acl", - cannedAcl: "invalid-acl-value", - shouldWork: false, - description: "invalid ACL should be rejected", - }, - } + expected := ` +2011-04-09T12:34:49Ztest12011-02-09T12:34:49Ztest2` + var response ListAllMyBucketsResult - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create a request with the specified canned ACL - req := httptest.NewRequest("PUT", "/bucket?acl", nil) - req.Header.Set(s3_constants.AmzCannedAcl, tc.cannedAcl) - req.Header.Set(s3_constants.AmzAccountId, "test-account-123") - - // Create a mock IAM for testing - mockIam := &mockIamInterface{} - - // Test the ACL extraction directly - grants, errCode := ExtractAcl(req, mockIam, "", "test-account-123", "test-account-123", "test-account-123") - - if tc.shouldWork { - assert.Equal(t, s3err.ErrNone, errCode, "Expected ACL parsing to succeed for %s", tc.cannedAcl) - assert.NotEmpty(t, grants, "Expected grants to be generated for valid ACL %s", tc.cannedAcl) - t.Logf("โœ“ PASS: %s - %s", tc.name, tc.description) - } else { - assert.NotEqual(t, s3err.ErrNone, errCode, "Expected ACL parsing to fail for invalid ACL %s", tc.cannedAcl) - t.Logf("โœ“ PASS: %s - %s", tc.name, tc.description) - } - }) - } -} - -// TestBucketWithoutACLIsNotPublicRead tests that buckets without ACLs are not public-read -func TestBucketWithoutACLIsNotPublicRead(t *testing.T) { - // Create a bucket config without ACL (like a freshly created bucket) - config := &BucketConfig{ - Name: "test-bucket", - IsPublicRead: false, // Should be explicitly false - } - - // Verify that buckets without ACL are not public-read - assert.False(t, config.IsPublicRead, "Bucket without ACL should not be public-read") -} - -func TestBucketConfigInitialization(t *testing.T) { - // Test that BucketConfig properly initializes IsPublicRead field - config := &BucketConfig{ - Name: "test-bucket", - IsPublicRead: false, // Explicitly set to false for private buckets - } - - // Verify proper initialization - assert.False(t, config.IsPublicRead, "Newly created bucket should not be public-read by default") -} - -// TestUpdateBucketConfigCacheConsistency tests that updateBucketConfigCacheFromEntry -// properly handles the IsPublicRead flag consistently with getBucketConfig -func TestUpdateBucketConfigCacheConsistency(t *testing.T) { - t.Run("bucket without ACL should have IsPublicRead=false", func(t *testing.T) { - // Simulate an entry without ACL (like a freshly created bucket) - entry := &filer_pb.Entry{ - Name: "test-bucket", - Attributes: &filer_pb.FuseAttributes{ - FileMode: 0755, - }, - // Extended is nil or doesn't contain ACL - } - - // Test what updateBucketConfigCacheFromEntry would create - config := &BucketConfig{ - Name: entry.Name, - Entry: entry, - IsPublicRead: false, // Should be explicitly false - } - - // When Extended is nil, IsPublicRead should be false - assert.False(t, config.IsPublicRead, "Bucket without Extended metadata should not be public-read") - - // When Extended exists but has no ACL key, IsPublicRead should also be false - entry.Extended = make(map[string][]byte) - entry.Extended["some-other-key"] = []byte("some-value") - - config = &BucketConfig{ - Name: entry.Name, - Entry: entry, - IsPublicRead: false, // Should be explicitly false - } - - // Simulate the else branch: no ACL means private bucket - if _, exists := entry.Extended[s3_constants.ExtAmzAclKey]; !exists { - config.IsPublicRead = false - } - - assert.False(t, config.IsPublicRead, "Bucket with Extended but no ACL should not be public-read") + var buckets []*s3.Bucket + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test1"), + CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)), + }) + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test2"), + CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)), }) - t.Run("bucket with public-read ACL should have IsPublicRead=true", func(t *testing.T) { - // Create a mock public-read ACL using AWS S3 SDK types - publicReadGrants := []*s3.Grant{ - { - Grantee: &s3.Grantee{ - Type: &s3_constants.GrantTypeGroup, - URI: &s3_constants.GranteeGroupAllUsers, - }, - Permission: &s3_constants.PermissionRead, - }, - } - - aclBytes, err := json.Marshal(publicReadGrants) - require.NoError(t, err) - - entry := &filer_pb.Entry{ - Name: "public-bucket", - Extended: map[string][]byte{ - s3_constants.ExtAmzAclKey: aclBytes, - }, - } - - config := &BucketConfig{ - Name: entry.Name, - Entry: entry, - IsPublicRead: false, // Start with false - } - - // Simulate what updateBucketConfigCacheFromEntry would do - if acl, exists := entry.Extended[s3_constants.ExtAmzAclKey]; exists { - config.ACL = acl - config.IsPublicRead = parseAndCachePublicReadStatus(acl) - } - - assert.True(t, config.IsPublicRead, "Bucket with public-read ACL should be public-read") - }) -} - -// mockIamInterface is a simple mock for testing -type mockIamInterface struct{} - -func (m *mockIamInterface) GetAccountNameById(canonicalId string) string { - return "test-user-" + canonicalId -} - -func (m *mockIamInterface) GetAccountIdByEmail(email string) string { - return "account-for-" + email -} - -// TestListAllMyBucketsResultNamespace verifies that the ListAllMyBucketsResult -// XML response includes the proper S3 namespace URI -func TestListAllMyBucketsResultNamespace(t *testing.T) { - // Create a sample ListAllMyBucketsResult response - response := ListAllMyBucketsResult{ - Owner: CanonicalUser{ - ID: "test-owner-id", - DisplayName: "test-owner", - }, - Buckets: ListAllMyBucketsList{ - Bucket: []ListAllMyBucketsEntry{ - { - Name: "test-bucket", - CreationDate: time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC), - }, - }, + response = ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(""), + DisplayName: aws.String(""), }, + Buckets: buckets, } - // Marshal the response to XML - xmlData, err := xml.Marshal(response) - require.NoError(t, err, "Failed to marshal XML response") - - xmlString := string(xmlData) - - // Verify that the XML contains the proper namespace - assert.Contains(t, xmlString, `xmlns="http://s3.amazonaws.com/doc/2006-03-01/"`, - "XML response should contain the S3 namespace URI") - - // Verify the root element has the correct name - assert.Contains(t, xmlString, "", "XML should contain Owner element") - assert.Contains(t, xmlString, "", "XML should contain Buckets element") - assert.Contains(t, xmlString, "", "XML should contain Bucket element") - assert.Contains(t, xmlString, "test-bucket", "XML should contain bucket name") - - t.Logf("Generated XML:\n%s", xmlString) + encoded := string(s3err.EncodeXMLResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } } diff --git a/weed/s3api/s3api_bucket_metadata_test.go b/weed/s3api/s3api_bucket_metadata_test.go deleted file mode 100644 index ac269163e..000000000 --- a/weed/s3api/s3api_bucket_metadata_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package s3api - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/cors" -) - -func TestBucketMetadataStruct(t *testing.T) { - // Test creating empty metadata - metadata := NewBucketMetadata() - if !metadata.IsEmpty() { - t.Error("New metadata should be empty") - } - - // Test setting tags - metadata.Tags["Environment"] = "production" - metadata.Tags["Owner"] = "team-alpha" - if !metadata.HasTags() { - t.Error("Metadata should have tags") - } - if metadata.IsEmpty() { - t.Error("Metadata with tags should not be empty") - } - - // Test setting encryption - encryption := &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "aws:kms", - KmsKeyId: "test-key-id", - } - metadata.Encryption = encryption - if !metadata.HasEncryption() { - t.Error("Metadata should have encryption") - } - - // Test setting CORS - maxAge := 3600 - corsRule := cors.CORSRule{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "POST"}, - AllowedHeaders: []string{"*"}, - MaxAgeSeconds: &maxAge, - } - corsConfig := &cors.CORSConfiguration{ - CORSRules: []cors.CORSRule{corsRule}, - } - metadata.CORS = corsConfig - if !metadata.HasCORS() { - t.Error("Metadata should have CORS") - } - - // Test all flags - if !metadata.HasTags() || !metadata.HasEncryption() || !metadata.HasCORS() { - t.Error("All metadata flags should be true") - } - if metadata.IsEmpty() { - t.Error("Metadata with all configurations should not be empty") - } -} - -func TestBucketMetadataUpdatePattern(t *testing.T) { - // This test demonstrates the update pattern using the function signature - // (without actually testing the S3ApiServer which would require setup) - - // Simulate what UpdateBucketMetadata would do - updateFunc := func(metadata *BucketMetadata) error { - // Add some tags - metadata.Tags["Project"] = "seaweedfs" - metadata.Tags["Version"] = "v3.0" - - // Set encryption - metadata.Encryption = &s3_pb.EncryptionConfiguration{ - SseAlgorithm: "AES256", - } - - return nil - } - - // Start with empty metadata - metadata := NewBucketMetadata() - - // Apply the update - if err := updateFunc(metadata); err != nil { - t.Fatalf("Update function failed: %v", err) - } - - // Verify the results - if len(metadata.Tags) != 2 { - t.Errorf("Expected 2 tags, got %d", len(metadata.Tags)) - } - if metadata.Tags["Project"] != "seaweedfs" { - t.Error("Project tag not set correctly") - } - if metadata.Encryption == nil || metadata.Encryption.SseAlgorithm != "AES256" { - t.Error("Encryption not set correctly") - } -} - -func TestBucketMetadataHelperFunctions(t *testing.T) { - metadata := NewBucketMetadata() - - // Test empty state - if metadata.HasTags() || metadata.HasCORS() || metadata.HasEncryption() { - t.Error("Empty metadata should have no configurations") - } - - // Test adding tags - metadata.Tags["key1"] = "value1" - if !metadata.HasTags() { - t.Error("Should have tags after adding") - } - - // Test adding CORS - metadata.CORS = &cors.CORSConfiguration{} - if !metadata.HasCORS() { - t.Error("Should have CORS after adding") - } - - // Test adding encryption - metadata.Encryption = &s3_pb.EncryptionConfiguration{} - if !metadata.HasEncryption() { - t.Error("Should have encryption after adding") - } - - // Test clearing - metadata.Tags = make(map[string]string) - metadata.CORS = nil - metadata.Encryption = nil - - if metadata.HasTags() || metadata.HasCORS() || metadata.HasEncryption() { - t.Error("Cleared metadata should have no configurations") - } - if !metadata.IsEmpty() { - t.Error("Cleared metadata should be empty") - } -} diff --git a/weed/s3api/s3api_bucket_policy_handlers.go b/weed/s3api/s3api_bucket_policy_handlers.go deleted file mode 100644 index e079eb53e..000000000 --- a/weed/s3api/s3api_bucket_policy_handlers.go +++ /dev/null @@ -1,328 +0,0 @@ -package s3api - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// Bucket policy metadata key for storing policies in filer -const BUCKET_POLICY_METADATA_KEY = "s3-bucket-policy" - -// GetBucketPolicyHandler handles GET bucket?policy requests -func (s3a *S3ApiServer) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - glog.V(3).Infof("GetBucketPolicyHandler: bucket=%s", bucket) - - // Get bucket policy from filer metadata - policyDocument, err := s3a.getBucketPolicy(bucket) - if err != nil { - if strings.Contains(err.Error(), "not found") { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucketPolicy) - } else { - glog.Errorf("Failed to get bucket policy for %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - } - return - } - - // Return policy as JSON - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - if err := json.NewEncoder(w).Encode(policyDocument); err != nil { - glog.Errorf("Failed to encode bucket policy response: %v", err) - } -} - -// PutBucketPolicyHandler handles PUT bucket?policy requests -func (s3a *S3ApiServer) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - glog.V(3).Infof("PutBucketPolicyHandler: bucket=%s", bucket) - - // Read policy document from request body - body, err := io.ReadAll(r.Body) - if err != nil { - glog.Errorf("Failed to read bucket policy request body: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPolicyDocument) - return - } - defer r.Body.Close() - - // Parse and validate policy document - var policyDoc policy.PolicyDocument - if err := json.Unmarshal(body, &policyDoc); err != nil { - glog.Errorf("Failed to parse bucket policy JSON: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPolicy) - return - } - - // Validate policy document structure - if err := policy.ValidatePolicyDocument(&policyDoc); err != nil { - glog.Errorf("Invalid bucket policy document: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPolicyDocument) - return - } - - // Additional bucket policy specific validation - if err := s3a.validateBucketPolicy(&policyDoc, bucket); err != nil { - glog.Errorf("Bucket policy validation failed: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPolicyDocument) - return - } - - // Store bucket policy - if err := s3a.setBucketPolicy(bucket, &policyDoc); err != nil { - glog.Errorf("Failed to store bucket policy for %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Update IAM integration with new bucket policy - if s3a.iam.iamIntegration != nil { - if err := s3a.updateBucketPolicyInIAM(bucket, &policyDoc); err != nil { - glog.Errorf("Failed to update IAM with bucket policy: %v", err) - // Don't fail the request, but log the warning - } - } - - w.WriteHeader(http.StatusNoContent) -} - -// DeleteBucketPolicyHandler handles DELETE bucket?policy requests -func (s3a *S3ApiServer) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - glog.V(3).Infof("DeleteBucketPolicyHandler: bucket=%s", bucket) - - // Check if bucket policy exists - if _, err := s3a.getBucketPolicy(bucket); err != nil { - if strings.Contains(err.Error(), "not found") { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucketPolicy) - } else { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - } - return - } - - // Delete bucket policy - if err := s3a.deleteBucketPolicy(bucket); err != nil { - glog.Errorf("Failed to delete bucket policy for %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Update IAM integration to remove bucket policy - if s3a.iam.iamIntegration != nil { - if err := s3a.removeBucketPolicyFromIAM(bucket); err != nil { - glog.Errorf("Failed to remove bucket policy from IAM: %v", err) - // Don't fail the request, but log the warning - } - } - - w.WriteHeader(http.StatusNoContent) -} - -// Helper functions for bucket policy storage and retrieval - -// getBucketPolicy retrieves a bucket policy from filer metadata -func (s3a *S3ApiServer) getBucketPolicy(bucket string) (*policy.PolicyDocument, error) { - - var policyDoc policy.PolicyDocument - err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: s3a.option.BucketsPath, - Name: bucket, - }) - if err != nil { - return fmt.Errorf("bucket not found: %v", err) - } - - if resp.Entry == nil { - return fmt.Errorf("bucket policy not found: no entry") - } - - policyJSON, exists := resp.Entry.Extended[BUCKET_POLICY_METADATA_KEY] - if !exists || len(policyJSON) == 0 { - return fmt.Errorf("bucket policy not found: no policy metadata") - } - - if err := json.Unmarshal(policyJSON, &policyDoc); err != nil { - return fmt.Errorf("failed to parse stored bucket policy: %v", err) - } - - return nil - }) - - if err != nil { - return nil, err - } - - return &policyDoc, nil -} - -// setBucketPolicy stores a bucket policy in filer metadata -func (s3a *S3ApiServer) setBucketPolicy(bucket string, policyDoc *policy.PolicyDocument) error { - // Serialize policy to JSON - policyJSON, err := json.Marshal(policyDoc) - if err != nil { - return fmt.Errorf("failed to serialize policy: %v", err) - } - - return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // First, get the current entry to preserve other attributes - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: s3a.option.BucketsPath, - Name: bucket, - }) - if err != nil { - return fmt.Errorf("bucket not found: %v", err) - } - - entry := resp.Entry - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - // Set the bucket policy metadata - entry.Extended[BUCKET_POLICY_METADATA_KEY] = policyJSON - - // Update the entry with new metadata - _, err = client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{ - Directory: s3a.option.BucketsPath, - Entry: entry, - }) - - return err - }) -} - -// deleteBucketPolicy removes a bucket policy from filer metadata -func (s3a *S3ApiServer) deleteBucketPolicy(bucket string) error { - return s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - // Get the current entry - resp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ - Directory: s3a.option.BucketsPath, - Name: bucket, - }) - if err != nil { - return fmt.Errorf("bucket not found: %v", err) - } - - entry := resp.Entry - if entry.Extended == nil { - return nil // No policy to delete - } - - // Remove the bucket policy metadata - delete(entry.Extended, BUCKET_POLICY_METADATA_KEY) - - // Update the entry - _, err = client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{ - Directory: s3a.option.BucketsPath, - Entry: entry, - }) - - return err - }) -} - -// validateBucketPolicy performs bucket-specific policy validation -func (s3a *S3ApiServer) validateBucketPolicy(policyDoc *policy.PolicyDocument, bucket string) error { - if policyDoc.Version != "2012-10-17" { - return fmt.Errorf("unsupported policy version: %s (must be 2012-10-17)", policyDoc.Version) - } - - if len(policyDoc.Statement) == 0 { - return fmt.Errorf("policy document must contain at least one statement") - } - - for i, statement := range policyDoc.Statement { - // Bucket policies must have Principal - if statement.Principal == nil { - return fmt.Errorf("statement %d: bucket policies must specify a Principal", i) - } - - // Validate resources refer to this bucket - for _, resource := range statement.Resource { - if !s3a.validateResourceForBucket(resource, bucket) { - return fmt.Errorf("statement %d: resource %s does not match bucket %s", i, resource, bucket) - } - } - - // Validate actions are S3 actions - for _, action := range statement.Action { - if !strings.HasPrefix(action, "s3:") { - return fmt.Errorf("statement %d: bucket policies only support S3 actions, got %s", i, action) - } - } - } - - return nil -} - -// validateResourceForBucket checks if a resource ARN is valid for the given bucket -func (s3a *S3ApiServer) validateResourceForBucket(resource, bucket string) bool { - // Expected formats: - // arn:seaweed:s3:::bucket-name - // arn:seaweed:s3:::bucket-name/* - // arn:seaweed:s3:::bucket-name/path/to/object - - expectedBucketArn := fmt.Sprintf("arn:seaweed:s3:::%s", bucket) - expectedBucketWildcard := fmt.Sprintf("arn:seaweed:s3:::%s/*", bucket) - expectedBucketPath := fmt.Sprintf("arn:seaweed:s3:::%s/", bucket) - - return resource == expectedBucketArn || - resource == expectedBucketWildcard || - strings.HasPrefix(resource, expectedBucketPath) -} - -// IAM integration functions - -// updateBucketPolicyInIAM updates the IAM system with the new bucket policy -func (s3a *S3ApiServer) updateBucketPolicyInIAM(bucket string, policyDoc *policy.PolicyDocument) error { - // This would integrate with our advanced IAM system - // For now, we'll just log that the policy was updated - glog.V(2).Infof("Updated bucket policy for %s in IAM system", bucket) - - // TODO: Integrate with IAM manager to store resource-based policies - // s3a.iam.iamIntegration.iamManager.SetBucketPolicy(bucket, policyDoc) - - return nil -} - -// removeBucketPolicyFromIAM removes the bucket policy from the IAM system -func (s3a *S3ApiServer) removeBucketPolicyFromIAM(bucket string) error { - // This would remove the bucket policy from our advanced IAM system - glog.V(2).Infof("Removed bucket policy for %s from IAM system", bucket) - - // TODO: Integrate with IAM manager to remove resource-based policies - // s3a.iam.iamIntegration.iamManager.RemoveBucketPolicy(bucket) - - return nil -} - -// GetPublicAccessBlockHandler Retrieves the PublicAccessBlock configuration for an S3 bucket -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -func (s3a *S3ApiServer) GetPublicAccessBlockHandler(w http.ResponseWriter, r *http.Request) { - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) -} - -func (s3a *S3ApiServer) PutPublicAccessBlockHandler(w http.ResponseWriter, r *http.Request) { - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) -} - -func (s3a *S3ApiServer) DeletePublicAccessBlockHandler(w http.ResponseWriter, r *http.Request) { - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) -} diff --git a/weed/s3api/s3api_bucket_skip_handlers.go b/weed/s3api/s3api_bucket_skip_handlers.go new file mode 100644 index 000000000..f4ca1177d --- /dev/null +++ b/weed/s3api/s3api_bucket_skip_handlers.go @@ -0,0 +1,49 @@ +package s3api + +import ( + "net/http" + + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" +) + +// GetBucketCorsHandler Get bucket CORS +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html +func (s3a *S3ApiServer) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchCORSConfiguration) +} + +// PutBucketCorsHandler Put bucket CORS +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +func (s3a *S3ApiServer) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) +} + +// DeleteBucketCorsHandler Delete bucket CORS +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html +func (s3a *S3ApiServer) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, http.StatusNoContent) +} + +// GetBucketPolicyHandler Get bucket Policy +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html +func (s3a *S3ApiServer) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucketPolicy) +} + +// PutBucketPolicyHandler Put bucket Policy +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html +func (s3a *S3ApiServer) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) +} + +// DeleteBucketPolicyHandler Delete bucket Policy +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html +func (s3a *S3ApiServer) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, http.StatusNoContent) +} + +// PutBucketAclHandler Put bucket ACL +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAcl.html +func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) +} diff --git a/weed/s3api/s3api_bucket_tagging_handlers.go b/weed/s3api/s3api_bucket_tagging_handlers.go deleted file mode 100644 index a1b116fd2..000000000 --- a/weed/s3api/s3api_bucket_tagging_handlers.go +++ /dev/null @@ -1,110 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// GetBucketTaggingHandler Returns the tag set associated with the bucket -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html -func (s3a *S3ApiServer) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketTagging %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Load bucket metadata and extract tags - metadata, err := s3a.GetBucketMetadata(bucket) - if err != nil { - glog.V(3).Infof("GetBucketTagging: failed to get bucket metadata for %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - if len(metadata.Tags) == 0 { - glog.V(3).Infof("GetBucketTagging: no tags found for bucket %s", bucket) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchTagSet) - return - } - - tags := metadata.Tags - - // Convert tags to XML response format - tagging := FromTags(tags) - writeSuccessResponseXML(w, r, tagging) -} - -// PutBucketTaggingHandler Put bucket tagging -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html -func (s3a *S3ApiServer) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketTagging %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Parse tagging configuration from request body - tagging := &Tagging{} - input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength)) - if err != nil { - glog.Errorf("PutBucketTagging read input %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - if err = xml.Unmarshal(input, tagging); err != nil { - glog.Errorf("PutBucketTagging Unmarshal %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - tags := tagging.ToTags() - - // Validate tags using existing validation - err = ValidateTags(tags) - if err != nil { - glog.Errorf("PutBucketTagging ValidateTags error %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag) - return - } - - // Store bucket tags in metadata - if err = s3a.UpdateBucketTags(bucket, tags); err != nil { - glog.Errorf("PutBucketTagging UpdateBucketTags %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - writeSuccessResponseEmpty(w, r) -} - -// DeleteBucketTaggingHandler Delete bucket tagging -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html -func (s3a *S3ApiServer) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteBucketTagging %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Remove bucket tags from metadata - if err := s3a.ClearBucketTags(bucket); err != nil { - glog.Errorf("DeleteBucketTagging ClearBucketTags %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - w.WriteHeader(http.StatusNoContent) - s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone) -} diff --git a/weed/s3api/s3api_circuit_breaker.go b/weed/s3api/s3api_circuit_breaker.go index 47efa728a..68fb0a5d2 100644 --- a/weed/s3api/s3api_circuit_breaker.go +++ b/weed/s3api/s3api_circuit_breaker.go @@ -1,16 +1,15 @@ package s3api import ( - "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "net/http" "sync" "sync/atomic" @@ -29,18 +28,16 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { limitations: make(map[string]int64), } - err := pb.WithFilerClient(false, 0, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile) - if errors.Is(err, filer_pb.ErrNotFound) { - return nil - } if err != nil { - return fmt.Errorf("read S3 circuit breaker config: %w", err) + return fmt.Errorf("read S3 circuit breaker config: %v", err) } return cb.LoadS3ApiConfigurationFromBytes(content) }) if err != nil { + glog.Infof("s3 circuit breaker not configured: %v", err) } return cb @@ -50,7 +47,7 @@ func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error cbCfg := &s3_pb.S3CircuitBreakerConfig{} if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil { glog.Warningf("unmarshal error: %v", err) - return fmt.Errorf("unmarshal error: %w", err) + return fmt.Errorf("unmarshal error: %v", err) } if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil { return err diff --git a/weed/s3api/s3api_circuit_breaker_test.go b/weed/s3api/s3api_circuit_breaker_test.go index aa8167544..5848cf164 100644 --- a/weed/s3api/s3api_circuit_breaker_test.go +++ b/weed/s3api/s3api_circuit_breaker_test.go @@ -1,9 +1,9 @@ package s3api import ( - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "sync" "sync/atomic" diff --git a/weed/s3api/s3api_conditional_headers_test.go b/weed/s3api/s3api_conditional_headers_test.go deleted file mode 100644 index 9a810c15e..000000000 --- a/weed/s3api/s3api_conditional_headers_test.go +++ /dev/null @@ -1,849 +0,0 @@ -package s3api - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// TestConditionalHeadersWithExistingObjects tests conditional headers against existing objects -// This addresses the PR feedback about missing test coverage for object existence scenarios -func TestConditionalHeadersWithExistingObjects(t *testing.T) { - bucket := "test-bucket" - object := "/test-object" - - // Mock object with known ETag and modification time - testObject := &filer_pb.Entry{ - Name: "test-object", - Extended: map[string][]byte{ - s3_constants.ExtETagKey: []byte("\"abc123\""), - }, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC).Unix(), // June 15, 2024 - FileSize: 1024, // Add file size - }, - Chunks: []*filer_pb.FileChunk{ - // Add a mock chunk to make calculateETagFromChunks work - { - FileId: "test-file-id", - Offset: 0, - Size: 1024, - }, - }, - } - - // Test If-None-Match with existing object - t.Run("IfNoneMatch_ObjectExists", func(t *testing.T) { - // Test case 1: If-None-Match=* when object exists (should fail) - t.Run("Asterisk_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object exists with If-None-Match=*, got %v", errCode) - } - }) - - // Test case 2: If-None-Match with matching ETag (should fail) - t.Run("MatchingETag_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "\"abc123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when ETag matches, got %v", errCode) - } - }) - - // Test case 3: If-None-Match with non-matching ETag (should succeed) - t.Run("NonMatchingETag_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "\"xyz789\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when ETag doesn't match, got %v", errCode) - } - }) - - // Test case 4: If-None-Match with multiple ETags, one matching (should fail) - t.Run("MultipleETags_OneMatches_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "\"xyz789\", \"abc123\", \"def456\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when one ETag matches, got %v", errCode) - } - }) - - // Test case 5: If-None-Match with multiple ETags, none matching (should succeed) - t.Run("MultipleETags_NoneMatch_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "\"xyz789\", \"def456\", \"ghi123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when no ETags match, got %v", errCode) - } - }) - }) - - // Test If-Match with existing object - t.Run("IfMatch_ObjectExists", func(t *testing.T) { - // Test case 1: If-Match with matching ETag (should succeed) - t.Run("MatchingETag_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "\"abc123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when ETag matches, got %v", errCode) - } - }) - - // Test case 2: If-Match with non-matching ETag (should fail) - t.Run("NonMatchingETag_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "\"xyz789\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when ETag doesn't match, got %v", errCode) - } - }) - - // Test case 3: If-Match with multiple ETags, one matching (should succeed) - t.Run("MultipleETags_OneMatches_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "\"xyz789\", \"abc123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when one ETag matches, got %v", errCode) - } - }) - - // Test case 4: If-Match with wildcard * (should succeed if object exists) - t.Run("Wildcard_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when If-Match=* and object exists, got %v", errCode) - } - }) - }) - - // Test If-Modified-Since with existing object - t.Run("IfModifiedSince_ObjectExists", func(t *testing.T) { - // Test case 1: If-Modified-Since with date before object modification (should succeed) - t.Run("DateBefore_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - dateBeforeModification := time.Date(2024, 6, 14, 12, 0, 0, 0, time.UTC) - req.Header.Set(s3_constants.IfModifiedSince, dateBeforeModification.Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object was modified after date, got %v", errCode) - } - }) - - // Test case 2: If-Modified-Since with date after object modification (should fail) - t.Run("DateAfter_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - dateAfterModification := time.Date(2024, 6, 16, 12, 0, 0, 0, time.UTC) - req.Header.Set(s3_constants.IfModifiedSince, dateAfterModification.Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object wasn't modified since date, got %v", errCode) - } - }) - - // Test case 3: If-Modified-Since with exact modification date (should fail - not after) - t.Run("ExactDate_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - exactDate := time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC) - req.Header.Set(s3_constants.IfModifiedSince, exactDate.Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object modification time equals header date, got %v", errCode) - } - }) - }) - - // Test If-Unmodified-Since with existing object - t.Run("IfUnmodifiedSince_ObjectExists", func(t *testing.T) { - // Test case 1: If-Unmodified-Since with date after object modification (should succeed) - t.Run("DateAfter_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - dateAfterModification := time.Date(2024, 6, 16, 12, 0, 0, 0, time.UTC) - req.Header.Set(s3_constants.IfUnmodifiedSince, dateAfterModification.Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object wasn't modified after date, got %v", errCode) - } - }) - - // Test case 2: If-Unmodified-Since with date before object modification (should fail) - t.Run("DateBefore_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(testObject) - req := createTestPutRequest(bucket, object, "test content") - dateBeforeModification := time.Date(2024, 6, 14, 12, 0, 0, 0, time.UTC) - req.Header.Set(s3_constants.IfUnmodifiedSince, dateBeforeModification.Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object was modified after date, got %v", errCode) - } - }) - }) -} - -// TestConditionalHeadersForReads tests conditional headers for read operations (GET, HEAD) -// This implements AWS S3 conditional reads behavior where different conditions return different status codes -// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-reads.html -func TestConditionalHeadersForReads(t *testing.T) { - bucket := "test-bucket" - object := "/test-read-object" - - // Mock existing object to test conditional headers against - existingObject := &filer_pb.Entry{ - Name: "test-read-object", - Extended: map[string][]byte{ - s3_constants.ExtETagKey: []byte("\"read123\""), - }, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC).Unix(), - FileSize: 1024, - }, - Chunks: []*filer_pb.FileChunk{ - { - FileId: "read-file-id", - Offset: 0, - Size: 1024, - }, - }, - } - - // Test conditional reads with existing object - t.Run("ConditionalReads_ObjectExists", func(t *testing.T) { - // Test If-None-Match with existing object (should return 304 Not Modified) - t.Run("IfNoneMatch_ObjectExists_ShouldReturn304", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfNoneMatch, "\"read123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNotModified { - t.Errorf("Expected ErrNotModified when If-None-Match matches, got %v", errCode) - } - }) - - // Test If-None-Match=* with existing object (should return 304 Not Modified) - t.Run("IfNoneMatchAsterisk_ObjectExists_ShouldReturn304", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfNoneMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNotModified { - t.Errorf("Expected ErrNotModified when If-None-Match=* with existing object, got %v", errCode) - } - }) - - // Test If-None-Match with non-matching ETag (should succeed) - t.Run("IfNoneMatch_NonMatchingETag_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfNoneMatch, "\"different-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when If-None-Match doesn't match, got %v", errCode) - } - }) - - // Test If-Match with matching ETag (should succeed) - t.Run("IfMatch_MatchingETag_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfMatch, "\"read123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when If-Match matches, got %v", errCode) - } - }) - - // Test If-Match with non-matching ETag (should return 412 Precondition Failed) - t.Run("IfMatch_NonMatchingETag_ShouldReturn412", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfMatch, "\"different-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when If-Match doesn't match, got %v", errCode) - } - }) - - // Test If-Match=* with existing object (should succeed) - t.Run("IfMatchAsterisk_ObjectExists_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when If-Match=* with existing object, got %v", errCode) - } - }) - - // Test If-Modified-Since (object modified after date - should succeed) - t.Run("IfModifiedSince_ObjectModifiedAfter_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfModifiedSince, "Sat, 14 Jun 2024 12:00:00 GMT") // Before object mtime - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object modified after If-Modified-Since date, got %v", errCode) - } - }) - - // Test If-Modified-Since (object not modified since date - should return 304) - t.Run("IfModifiedSince_ObjectNotModified_ShouldReturn304", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfModifiedSince, "Sun, 16 Jun 2024 12:00:00 GMT") // After object mtime - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNotModified { - t.Errorf("Expected ErrNotModified when object not modified since If-Modified-Since date, got %v", errCode) - } - }) - - // Test If-Unmodified-Since (object not modified since date - should succeed) - t.Run("IfUnmodifiedSince_ObjectNotModified_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfUnmodifiedSince, "Sun, 16 Jun 2024 12:00:00 GMT") // After object mtime - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object not modified since If-Unmodified-Since date, got %v", errCode) - } - }) - - // Test If-Unmodified-Since (object modified since date - should return 412) - t.Run("IfUnmodifiedSince_ObjectModified_ShouldReturn412", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfUnmodifiedSince, "Fri, 14 Jun 2024 12:00:00 GMT") // Before object mtime - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object modified since If-Unmodified-Since date, got %v", errCode) - } - }) - }) - - // Test conditional reads with non-existent object - t.Run("ConditionalReads_ObjectNotExists", func(t *testing.T) { - // Test If-None-Match with non-existent object (should succeed) - t.Run("IfNoneMatch_ObjectNotExists_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfNoneMatch, "\"any-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object doesn't exist with If-None-Match, got %v", errCode) - } - }) - - // Test If-Match with non-existent object (should return 412) - t.Run("IfMatch_ObjectNotExists_ShouldReturn412", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfMatch, "\"any-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object doesn't exist with If-Match, got %v", errCode) - } - }) - - // Test If-Modified-Since with non-existent object (should succeed) - t.Run("IfModifiedSince_ObjectNotExists_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfModifiedSince, "Sat, 15 Jun 2024 12:00:00 GMT") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object doesn't exist with If-Modified-Since, got %v", errCode) - } - }) - - // Test If-Unmodified-Since with non-existent object (should return 412) - t.Run("IfUnmodifiedSince_ObjectNotExists_ShouldReturn412", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object - - req := createTestGetRequest(bucket, object) - req.Header.Set(s3_constants.IfUnmodifiedSince, "Sat, 15 Jun 2024 12:00:00 GMT") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersForReadsWithGetter(getter, req, bucket, object) - if errCode.ErrorCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object doesn't exist with If-Unmodified-Since, got %v", errCode) - } - }) - }) -} - -// Helper function to create a GET request for testing -func createTestGetRequest(bucket, object string) *http.Request { - return &http.Request{ - Method: "GET", - Header: make(http.Header), - URL: &url.URL{ - Path: fmt.Sprintf("/%s%s", bucket, object), - }, - } -} - -// TestConditionalHeadersWithNonExistentObjects tests the original scenarios (object doesn't exist) -func TestConditionalHeadersWithNonExistentObjects(t *testing.T) { - s3a := NewS3ApiServerForTest() - if s3a == nil { - t.Skip("S3ApiServer not available for testing") - } - - bucket := "test-bucket" - object := "/test-object" - - // Test If-None-Match header when object doesn't exist - t.Run("IfNoneMatch_ObjectDoesNotExist", func(t *testing.T) { - // Test case 1: If-None-Match=* when object doesn't exist (should return ErrNone) - t.Run("Asterisk_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object doesn't exist, got %v", errCode) - } - }) - - // Test case 2: If-None-Match with specific ETag when object doesn't exist - t.Run("SpecificETag_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfNoneMatch, "\"some-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object doesn't exist, got %v", errCode) - } - }) - }) - - // Test If-Match header when object doesn't exist - t.Run("IfMatch_ObjectDoesNotExist", func(t *testing.T) { - // Test case 1: If-Match with specific ETag when object doesn't exist (should fail - critical bug fix) - t.Run("SpecificETag_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "\"some-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object doesn't exist with If-Match header, got %v", errCode) - } - }) - - // Test case 2: If-Match with wildcard * when object doesn't exist (should fail) - t.Run("Wildcard_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object doesn't exist with If-Match=*, got %v", errCode) - } - }) - }) - - // Test date format validation (works regardless of object existence) - t.Run("DateFormatValidation", func(t *testing.T) { - // Test case 1: Valid If-Modified-Since date format - t.Run("IfModifiedSince_ValidFormat", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfModifiedSince, time.Now().Format(time.RFC1123)) - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone with valid date format, got %v", errCode) - } - }) - - // Test case 2: Invalid If-Modified-Since date format - t.Run("IfModifiedSince_InvalidFormat", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfModifiedSince, "invalid-date") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrInvalidRequest { - t.Errorf("Expected ErrInvalidRequest for invalid date format, got %v", errCode) - } - }) - - // Test case 3: Invalid If-Unmodified-Since date format - t.Run("IfUnmodifiedSince_InvalidFormat", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - req.Header.Set(s3_constants.IfUnmodifiedSince, "invalid-date") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrInvalidRequest { - t.Errorf("Expected ErrInvalidRequest for invalid date format, got %v", errCode) - } - }) - }) - - // Test no conditional headers - t.Run("NoConditionalHeaders", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No object exists - req := createTestPutRequest(bucket, object, "test content") - // Don't set any conditional headers - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when no conditional headers, got %v", errCode) - } - }) -} - -// TestETagMatching tests the etagMatches helper function -func TestETagMatching(t *testing.T) { - s3a := NewS3ApiServerForTest() - if s3a == nil { - t.Skip("S3ApiServer not available for testing") - } - - testCases := []struct { - name string - headerValue string - objectETag string - expected bool - }{ - { - name: "ExactMatch", - headerValue: "\"abc123\"", - objectETag: "abc123", - expected: true, - }, - { - name: "ExactMatchWithQuotes", - headerValue: "\"abc123\"", - objectETag: "\"abc123\"", - expected: true, - }, - { - name: "NoMatch", - headerValue: "\"abc123\"", - objectETag: "def456", - expected: false, - }, - { - name: "MultipleETags_FirstMatch", - headerValue: "\"abc123\", \"def456\"", - objectETag: "abc123", - expected: true, - }, - { - name: "MultipleETags_SecondMatch", - headerValue: "\"abc123\", \"def456\"", - objectETag: "def456", - expected: true, - }, - { - name: "MultipleETags_NoMatch", - headerValue: "\"abc123\", \"def456\"", - objectETag: "ghi789", - expected: false, - }, - { - name: "WithSpaces", - headerValue: " \"abc123\" , \"def456\" ", - objectETag: "def456", - expected: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := s3a.etagMatches(tc.headerValue, tc.objectETag) - if result != tc.expected { - t.Errorf("Expected %v, got %v for headerValue='%s', objectETag='%s'", - tc.expected, result, tc.headerValue, tc.objectETag) - } - }) - } -} - -// TestConditionalHeadersIntegration tests conditional headers with full integration -func TestConditionalHeadersIntegration(t *testing.T) { - // This would be a full integration test that requires a running SeaweedFS instance - t.Skip("Integration test - requires running SeaweedFS instance") -} - -// createTestPutRequest creates a test HTTP PUT request -func createTestPutRequest(bucket, object, content string) *http.Request { - req, _ := http.NewRequest("PUT", "/"+bucket+object, bytes.NewReader([]byte(content))) - req.Header.Set("Content-Type", "application/octet-stream") - - // Set up mux vars to simulate the bucket and object extraction - // In real tests, this would be handled by the gorilla mux router - return req -} - -// NewS3ApiServerForTest creates a minimal S3ApiServer for testing -// Note: This is a simplified version for unit testing conditional logic -func NewS3ApiServerForTest() *S3ApiServer { - // In a real test environment, this would set up a proper S3ApiServer - // with filer connection, etc. For unit testing conditional header logic, - // we create a minimal instance - return &S3ApiServer{ - option: &S3ApiServerOption{ - BucketsPath: "/buckets", - }, - } -} - -// MockEntryGetter implements the simplified EntryGetter interface for testing -// Only mocks the data access dependency - tests use production getObjectETag and etagMatches -type MockEntryGetter struct { - mockEntry *filer_pb.Entry -} - -// Implement only the simplified EntryGetter interface -func (m *MockEntryGetter) getEntry(parentDirectoryPath, entryName string) (*filer_pb.Entry, error) { - if m.mockEntry != nil { - return m.mockEntry, nil - } - return nil, filer_pb.ErrNotFound -} - -// createMockEntryGetter creates a mock EntryGetter for testing -func createMockEntryGetter(mockEntry *filer_pb.Entry) *MockEntryGetter { - return &MockEntryGetter{ - mockEntry: mockEntry, - } -} - -// TestConditionalHeadersMultipartUpload tests conditional headers with multipart uploads -// This verifies AWS S3 compatibility where conditional headers only apply to CompleteMultipartUpload -func TestConditionalHeadersMultipartUpload(t *testing.T) { - bucket := "test-bucket" - object := "/test-multipart-object" - - // Mock existing object to test conditional headers against - existingObject := &filer_pb.Entry{ - Name: "test-multipart-object", - Extended: map[string][]byte{ - s3_constants.ExtETagKey: []byte("\"existing123\""), - }, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Date(2024, 6, 15, 12, 0, 0, 0, time.UTC).Unix(), - FileSize: 2048, - }, - Chunks: []*filer_pb.FileChunk{ - { - FileId: "existing-file-id", - Offset: 0, - Size: 2048, - }, - }, - } - - // Test CompleteMultipartUpload with If-None-Match: * (should fail when object exists) - t.Run("CompleteMultipartUpload_IfNoneMatchAsterisk_ObjectExists_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - // Create a mock CompleteMultipartUpload request with If-None-Match: * - req := &http.Request{ - Method: "POST", - Header: make(http.Header), - URL: &url.URL{ - RawQuery: "uploadId=test-upload-id", - }, - } - req.Header.Set(s3_constants.IfNoneMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object exists with If-None-Match=*, got %v", errCode) - } - }) - - // Test CompleteMultipartUpload with If-None-Match: * (should succeed when object doesn't exist) - t.Run("CompleteMultipartUpload_IfNoneMatchAsterisk_ObjectNotExists_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No existing object - - req := &http.Request{ - Method: "POST", - Header: make(http.Header), - URL: &url.URL{ - RawQuery: "uploadId=test-upload-id", - }, - } - req.Header.Set(s3_constants.IfNoneMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object doesn't exist with If-None-Match=*, got %v", errCode) - } - }) - - // Test CompleteMultipartUpload with If-Match (should succeed when ETag matches) - t.Run("CompleteMultipartUpload_IfMatch_ETagMatches_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := &http.Request{ - Method: "POST", - Header: make(http.Header), - URL: &url.URL{ - RawQuery: "uploadId=test-upload-id", - }, - } - req.Header.Set(s3_constants.IfMatch, "\"existing123\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when ETag matches, got %v", errCode) - } - }) - - // Test CompleteMultipartUpload with If-Match (should fail when object doesn't exist) - t.Run("CompleteMultipartUpload_IfMatch_ObjectNotExists_ShouldFail", func(t *testing.T) { - getter := createMockEntryGetter(nil) // No existing object - - req := &http.Request{ - Method: "POST", - Header: make(http.Header), - URL: &url.URL{ - RawQuery: "uploadId=test-upload-id", - }, - } - req.Header.Set(s3_constants.IfMatch, "\"any-etag\"") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrPreconditionFailed { - t.Errorf("Expected ErrPreconditionFailed when object doesn't exist with If-Match, got %v", errCode) - } - }) - - // Test CompleteMultipartUpload with If-Match wildcard (should succeed when object exists) - t.Run("CompleteMultipartUpload_IfMatchWildcard_ObjectExists_ShouldSucceed", func(t *testing.T) { - getter := createMockEntryGetter(existingObject) - - req := &http.Request{ - Method: "POST", - Header: make(http.Header), - URL: &url.URL{ - RawQuery: "uploadId=test-upload-id", - }, - } - req.Header.Set(s3_constants.IfMatch, "*") - - s3a := NewS3ApiServerForTest() - errCode := s3a.checkConditionalHeadersWithGetter(getter, req, bucket, object) - if errCode != s3err.ErrNone { - t.Errorf("Expected ErrNone when object exists with If-Match=*, got %v", errCode) - } - }) -} diff --git a/weed/s3api/s3api_copy_size_calculation.go b/weed/s3api/s3api_copy_size_calculation.go deleted file mode 100644 index a11c46cdf..000000000 --- a/weed/s3api/s3api_copy_size_calculation.go +++ /dev/null @@ -1,239 +0,0 @@ -package s3api - -import ( - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// CopySizeCalculator handles size calculations for different copy scenarios -type CopySizeCalculator struct { - srcSize int64 - srcEncrypted bool - dstEncrypted bool - srcType EncryptionType - dstType EncryptionType - isCompressed bool -} - -// EncryptionType represents different encryption types -type EncryptionType int - -const ( - EncryptionTypeNone EncryptionType = iota - EncryptionTypeSSEC - EncryptionTypeSSEKMS - EncryptionTypeSSES3 -) - -// NewCopySizeCalculator creates a new size calculator for copy operations -func NewCopySizeCalculator(entry *filer_pb.Entry, r *http.Request) *CopySizeCalculator { - calc := &CopySizeCalculator{ - srcSize: int64(entry.Attributes.FileSize), - isCompressed: isCompressedEntry(entry), - } - - // Determine source encryption type - calc.srcType, calc.srcEncrypted = getSourceEncryptionType(entry.Extended) - - // Determine destination encryption type - calc.dstType, calc.dstEncrypted = getDestinationEncryptionType(r) - - return calc -} - -// CalculateTargetSize calculates the expected size of the target object -func (calc *CopySizeCalculator) CalculateTargetSize() int64 { - // For compressed objects, size calculation is complex - if calc.isCompressed { - return -1 // Indicates unknown size - } - - switch { - case !calc.srcEncrypted && !calc.dstEncrypted: - // Plain โ†’ Plain: no size change - return calc.srcSize - - case !calc.srcEncrypted && calc.dstEncrypted: - // Plain โ†’ Encrypted: no overhead since IV is in metadata - return calc.srcSize - - case calc.srcEncrypted && !calc.dstEncrypted: - // Encrypted โ†’ Plain: no overhead since IV is in metadata - return calc.srcSize - - case calc.srcEncrypted && calc.dstEncrypted: - // Encrypted โ†’ Encrypted: no overhead since IV is in metadata - return calc.srcSize - - default: - return calc.srcSize - } -} - -// CalculateActualSize calculates the actual unencrypted size of the content -func (calc *CopySizeCalculator) CalculateActualSize() int64 { - // With IV in metadata, encrypted and unencrypted sizes are the same - return calc.srcSize -} - -// CalculateEncryptedSize calculates the encrypted size for the given encryption type -func (calc *CopySizeCalculator) CalculateEncryptedSize(encType EncryptionType) int64 { - // With IV in metadata, encrypted size equals actual size - return calc.CalculateActualSize() -} - -// getSourceEncryptionType determines the encryption type of the source object -func getSourceEncryptionType(metadata map[string][]byte) (EncryptionType, bool) { - if IsSSECEncrypted(metadata) { - return EncryptionTypeSSEC, true - } - if IsSSEKMSEncrypted(metadata) { - return EncryptionTypeSSEKMS, true - } - if IsSSES3EncryptedInternal(metadata) { - return EncryptionTypeSSES3, true - } - return EncryptionTypeNone, false -} - -// getDestinationEncryptionType determines the encryption type for the destination -func getDestinationEncryptionType(r *http.Request) (EncryptionType, bool) { - if IsSSECRequest(r) { - return EncryptionTypeSSEC, true - } - if IsSSEKMSRequest(r) { - return EncryptionTypeSSEKMS, true - } - if IsSSES3RequestInternal(r) { - return EncryptionTypeSSES3, true - } - return EncryptionTypeNone, false -} - -// isCompressedEntry checks if the entry represents a compressed object -func isCompressedEntry(entry *filer_pb.Entry) bool { - // Check for compression indicators in metadata - if compressionType, exists := entry.Extended["compression"]; exists { - return string(compressionType) != "" - } - - // Check MIME type for compressed formats - mimeType := entry.Attributes.Mime - compressedMimeTypes := []string{ - "application/gzip", - "application/x-gzip", - "application/zip", - "application/x-compress", - "application/x-compressed", - } - - for _, compressedType := range compressedMimeTypes { - if mimeType == compressedType { - return true - } - } - - return false -} - -// SizeTransitionInfo provides detailed information about size changes during copy -type SizeTransitionInfo struct { - SourceSize int64 - TargetSize int64 - ActualSize int64 - SizeChange int64 - SourceType EncryptionType - TargetType EncryptionType - IsCompressed bool - RequiresResize bool -} - -// GetSizeTransitionInfo returns detailed size transition information -func (calc *CopySizeCalculator) GetSizeTransitionInfo() *SizeTransitionInfo { - targetSize := calc.CalculateTargetSize() - actualSize := calc.CalculateActualSize() - - info := &SizeTransitionInfo{ - SourceSize: calc.srcSize, - TargetSize: targetSize, - ActualSize: actualSize, - SizeChange: targetSize - calc.srcSize, - SourceType: calc.srcType, - TargetType: calc.dstType, - IsCompressed: calc.isCompressed, - RequiresResize: targetSize != calc.srcSize, - } - - return info -} - -// String returns a string representation of the encryption type -func (e EncryptionType) String() string { - switch e { - case EncryptionTypeNone: - return "None" - case EncryptionTypeSSEC: - return s3_constants.SSETypeC - case EncryptionTypeSSEKMS: - return s3_constants.SSETypeKMS - case EncryptionTypeSSES3: - return s3_constants.SSETypeS3 - default: - return "Unknown" - } -} - -// OptimizedSizeCalculation provides size calculations optimized for different scenarios -type OptimizedSizeCalculation struct { - Strategy UnifiedCopyStrategy - SourceSize int64 - TargetSize int64 - ActualContentSize int64 - EncryptionOverhead int64 - CanPreallocate bool - RequiresStreaming bool -} - -// CalculateOptimizedSizes calculates sizes optimized for the copy strategy -func CalculateOptimizedSizes(entry *filer_pb.Entry, r *http.Request, strategy UnifiedCopyStrategy) *OptimizedSizeCalculation { - calc := NewCopySizeCalculator(entry, r) - info := calc.GetSizeTransitionInfo() - - result := &OptimizedSizeCalculation{ - Strategy: strategy, - SourceSize: info.SourceSize, - TargetSize: info.TargetSize, - ActualContentSize: info.ActualSize, - CanPreallocate: !info.IsCompressed && info.TargetSize > 0, - RequiresStreaming: info.IsCompressed || info.TargetSize < 0, - } - - // Calculate encryption overhead for the target - // With IV in metadata, all encryption overhead is 0 - result.EncryptionOverhead = 0 - - // Adjust based on strategy - switch strategy { - case CopyStrategyDirect: - // Direct copy: no size change - result.TargetSize = result.SourceSize - result.CanPreallocate = true - - case CopyStrategyKeyRotation: - // Key rotation: size might change slightly due to different IVs - if info.SourceType == EncryptionTypeSSEC && info.TargetType == EncryptionTypeSSEC { - // SSE-C key rotation: same overhead - result.TargetSize = result.SourceSize - } - result.CanPreallocate = true - - case CopyStrategyEncrypt, CopyStrategyDecrypt, CopyStrategyReencrypt: - // Size changes based on encryption transition - result.TargetSize = info.TargetSize - result.CanPreallocate = !info.IsCompressed - } - - return result -} diff --git a/weed/s3api/s3api_copy_validation.go b/weed/s3api/s3api_copy_validation.go deleted file mode 100644 index deb292a2a..000000000 --- a/weed/s3api/s3api_copy_validation.go +++ /dev/null @@ -1,296 +0,0 @@ -package s3api - -import ( - "fmt" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// CopyValidationError represents validation errors during copy operations -type CopyValidationError struct { - Code s3err.ErrorCode - Message string -} - -func (e *CopyValidationError) Error() string { - return e.Message -} - -// ValidateCopyEncryption performs comprehensive validation of copy encryption parameters -func ValidateCopyEncryption(srcMetadata map[string][]byte, headers http.Header) error { - // Validate SSE-C copy requirements - if err := validateSSECCopyRequirements(srcMetadata, headers); err != nil { - return err - } - - // Validate SSE-KMS copy requirements - if err := validateSSEKMSCopyRequirements(srcMetadata, headers); err != nil { - return err - } - - // Validate incompatible encryption combinations - if err := validateEncryptionCompatibility(headers); err != nil { - return err - } - - return nil -} - -// validateSSECCopyRequirements validates SSE-C copy header requirements -func validateSSECCopyRequirements(srcMetadata map[string][]byte, headers http.Header) error { - srcIsSSEC := IsSSECEncrypted(srcMetadata) - hasCopyHeaders := hasSSECCopyHeaders(headers) - hasSSECHeaders := hasSSECHeaders(headers) - - // If source is SSE-C encrypted, copy headers are required - if srcIsSSEC && !hasCopyHeaders { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C encrypted source requires copy source encryption headers", - } - } - - // If copy headers are provided, source must be SSE-C encrypted - if hasCopyHeaders && !srcIsSSEC { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C copy headers provided but source is not SSE-C encrypted", - } - } - - // Validate copy header completeness - if hasCopyHeaders { - if err := validateSSECCopyHeaderCompleteness(headers); err != nil { - return err - } - } - - // Validate destination SSE-C headers if present - if hasSSECHeaders { - if err := validateSSECHeaderCompleteness(headers); err != nil { - return err - } - } - - return nil -} - -// validateSSEKMSCopyRequirements validates SSE-KMS copy requirements -func validateSSEKMSCopyRequirements(srcMetadata map[string][]byte, headers http.Header) error { - dstIsSSEKMS := IsSSEKMSRequest(&http.Request{Header: headers}) - - // Validate KMS key ID format if provided - if dstIsSSEKMS { - keyID := headers.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - if keyID != "" && !isValidKMSKeyID(keyID) { - return &CopyValidationError{ - Code: s3err.ErrKMSKeyNotFound, - Message: fmt.Sprintf("Invalid KMS key ID format: %s", keyID), - } - } - } - - // Validate encryption context format if provided - if contextHeader := headers.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" { - if !dstIsSSEKMS { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "Encryption context can only be used with SSE-KMS", - } - } - - // Validate base64 encoding and JSON format - if err := validateEncryptionContext(contextHeader); err != nil { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: fmt.Sprintf("Invalid encryption context: %v", err), - } - } - } - - return nil -} - -// validateEncryptionCompatibility validates that encryption methods are not conflicting -func validateEncryptionCompatibility(headers http.Header) error { - hasSSEC := hasSSECHeaders(headers) - hasSSEKMS := headers.Get(s3_constants.AmzServerSideEncryption) == "aws:kms" - hasSSES3 := headers.Get(s3_constants.AmzServerSideEncryption) == "AES256" - - // Count how many encryption methods are specified - encryptionCount := 0 - if hasSSEC { - encryptionCount++ - } - if hasSSEKMS { - encryptionCount++ - } - if hasSSES3 { - encryptionCount++ - } - - // Only one encryption method should be specified - if encryptionCount > 1 { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "Multiple encryption methods specified - only one is allowed", - } - } - - return nil -} - -// validateSSECCopyHeaderCompleteness validates that all required SSE-C copy headers are present -func validateSSECCopyHeaderCompleteness(headers http.Header) error { - algorithm := headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerAlgorithm) - key := headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKey) - keyMD5 := headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKeyMD5) - - if algorithm == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C copy customer algorithm header is required", - } - } - - if key == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C copy customer key header is required", - } - } - - if keyMD5 == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C copy customer key MD5 header is required", - } - } - - // Validate algorithm - if algorithm != "AES256" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: fmt.Sprintf("Unsupported SSE-C algorithm: %s", algorithm), - } - } - - return nil -} - -// validateSSECHeaderCompleteness validates that all required SSE-C headers are present -func validateSSECHeaderCompleteness(headers http.Header) error { - algorithm := headers.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - key := headers.Get(s3_constants.AmzServerSideEncryptionCustomerKey) - keyMD5 := headers.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - - if algorithm == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C customer algorithm header is required", - } - } - - if key == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C customer key header is required", - } - } - - if keyMD5 == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "SSE-C customer key MD5 header is required", - } - } - - // Validate algorithm - if algorithm != "AES256" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: fmt.Sprintf("Unsupported SSE-C algorithm: %s", algorithm), - } - } - - return nil -} - -// Helper functions for header detection -func hasSSECCopyHeaders(headers http.Header) bool { - return headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerAlgorithm) != "" || - headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKey) != "" || - headers.Get(s3_constants.AmzCopySourceServerSideEncryptionCustomerKeyMD5) != "" -} - -func hasSSECHeaders(headers http.Header) bool { - return headers.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" || - headers.Get(s3_constants.AmzServerSideEncryptionCustomerKey) != "" || - headers.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) != "" -} - -// validateEncryptionContext validates the encryption context header format -func validateEncryptionContext(contextHeader string) error { - // This would validate base64 encoding and JSON format - // Implementation would decode base64 and parse JSON - // For now, just check it's not empty - if contextHeader == "" { - return fmt.Errorf("encryption context cannot be empty") - } - return nil -} - -// ValidateCopySource validates the copy source path and permissions -func ValidateCopySource(copySource string, srcBucket, srcObject string) error { - if copySource == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidCopySource, - Message: "Copy source header is required", - } - } - - if srcBucket == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidCopySource, - Message: "Source bucket cannot be empty", - } - } - - if srcObject == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidCopySource, - Message: "Source object cannot be empty", - } - } - - return nil -} - -// ValidateCopyDestination validates the copy destination -func ValidateCopyDestination(dstBucket, dstObject string) error { - if dstBucket == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "Destination bucket cannot be empty", - } - } - - if dstObject == "" { - return &CopyValidationError{ - Code: s3err.ErrInvalidRequest, - Message: "Destination object cannot be empty", - } - } - - return nil -} - -// MapCopyValidationError maps validation errors to appropriate S3 error codes -func MapCopyValidationError(err error) s3err.ErrorCode { - if validationErr, ok := err.(*CopyValidationError); ok { - return validationErr.Code - } - return s3err.ErrInvalidRequest -} diff --git a/weed/s3api/s3api_governance_permissions_test.go b/weed/s3api/s3api_governance_permissions_test.go deleted file mode 100644 index 2b8a35232..000000000 --- a/weed/s3api/s3api_governance_permissions_test.go +++ /dev/null @@ -1,599 +0,0 @@ -package s3api - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// TestCheckGovernanceBypassPermissionResourceGeneration tests that the function -// correctly generates resource paths for the permission check -func TestCheckGovernanceBypassPermissionResourceGeneration(t *testing.T) { - tests := []struct { - name string - bucket string - object string - expectedPath string - description string - }{ - { - name: "simple_object", - bucket: "test-bucket", - object: "test-object.txt", - expectedPath: "test-bucket/test-object.txt", - description: "Simple bucket and object should be joined with slash", - }, - { - name: "object_with_leading_slash", - bucket: "test-bucket", - object: "/test-object.txt", - expectedPath: "test-bucket/test-object.txt", - description: "Leading slash should be trimmed from object name", - }, - { - name: "nested_object", - bucket: "test-bucket", - object: "/folder/subfolder/test-object.txt", - expectedPath: "test-bucket/folder/subfolder/test-object.txt", - description: "Nested object path should be handled correctly", - }, - { - name: "empty_object", - bucket: "test-bucket", - object: "", - expectedPath: "test-bucket/", - description: "Empty object should result in bucket with trailing slash", - }, - { - name: "root_object", - bucket: "test-bucket", - object: "/", - expectedPath: "test-bucket/", - description: "Root object should result in bucket with trailing slash", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test the resource generation logic used in checkGovernanceBypassPermission - resource := strings.TrimPrefix(tt.object, "/") - actualPath := tt.bucket + "/" + resource - - if actualPath != tt.expectedPath { - t.Errorf("Resource path generation failed. Expected: %s, Got: %s. %s", - tt.expectedPath, actualPath, tt.description) - } - }) - } -} - -// TestCheckGovernanceBypassPermissionActionGeneration tests that the function -// correctly generates action strings for IAM checking -func TestCheckGovernanceBypassPermissionActionGeneration(t *testing.T) { - tests := []struct { - name string - bucket string - object string - expectedBypassAction string - expectedAdminAction string - description string - }{ - { - name: "bypass_action_generation", - bucket: "test-bucket", - object: "test-object.txt", - expectedBypassAction: "BypassGovernanceRetention:test-bucket/test-object.txt", - expectedAdminAction: "Admin:test-bucket/test-object.txt", - description: "Actions should be properly formatted with resource path", - }, - { - name: "leading_slash_handling", - bucket: "test-bucket", - object: "/test-object.txt", - expectedBypassAction: "BypassGovernanceRetention:test-bucket/test-object.txt", - expectedAdminAction: "Admin:test-bucket/test-object.txt", - description: "Leading slash should be trimmed in action generation", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test the action generation logic used in checkGovernanceBypassPermission - resource := strings.TrimPrefix(tt.object, "/") - resourcePath := tt.bucket + "/" + resource - - bypassAction := s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION + ":" + resourcePath - adminAction := s3_constants.ACTION_ADMIN + ":" + resourcePath - - if bypassAction != tt.expectedBypassAction { - t.Errorf("Bypass action generation failed. Expected: %s, Got: %s. %s", - tt.expectedBypassAction, bypassAction, tt.description) - } - - if adminAction != tt.expectedAdminAction { - t.Errorf("Admin action generation failed. Expected: %s, Got: %s. %s", - tt.expectedAdminAction, adminAction, tt.description) - } - }) - } -} - -// TestCheckGovernanceBypassPermissionErrorHandling tests error handling scenarios -func TestCheckGovernanceBypassPermissionErrorHandling(t *testing.T) { - // Note: This test demonstrates the expected behavior for different error scenarios - // without requiring full IAM setup - - tests := []struct { - name string - bucket string - object string - description string - }{ - { - name: "empty_bucket", - bucket: "", - object: "test-object.txt", - description: "Empty bucket should be handled gracefully", - }, - { - name: "special_characters", - bucket: "test-bucket", - object: "test object with spaces.txt", - description: "Objects with special characters should be handled", - }, - { - name: "unicode_characters", - bucket: "test-bucket", - object: "ๆต‹่ฏ•ๆ–‡ไปถ.txt", - description: "Objects with unicode characters should be handled", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test that the function doesn't panic with various inputs - // This would normally call checkGovernanceBypassPermission - // but since we don't have a full S3ApiServer setup, we just test - // that the resource generation logic works without panicking - resource := strings.TrimPrefix(tt.object, "/") - resourcePath := tt.bucket + "/" + resource - - // Verify the resource path is generated - if resourcePath == "" { - t.Errorf("Resource path should not be empty for test case: %s", tt.description) - } - - t.Logf("Generated resource path for %s: %s", tt.description, resourcePath) - }) - } -} - -// TestCheckGovernanceBypassPermissionIntegrationBehavior documents the expected behavior -// when integrated with a full IAM system -func TestCheckGovernanceBypassPermissionIntegrationBehavior(t *testing.T) { - t.Skip("Documentation test - describes expected behavior with full IAM integration") - - // This test documents the expected behavior when checkGovernanceBypassPermission - // is called with a full IAM system: - // - // 1. Function calls s3a.iam.authRequest() with the bypass action - // 2. If authRequest returns errCode != s3err.ErrNone, function returns false - // 3. If authRequest succeeds, function checks identity.canDo() with the bypass action - // 4. If canDo() returns true, function returns true - // 5. If bypass permission fails, function checks admin action with identity.canDo() - // 6. If admin action succeeds, function returns true and logs admin access - // 7. If all checks fail, function returns false - // - // The function correctly uses: - // - s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION for bypass permission - // - s3_constants.ACTION_ADMIN for admin permission - // - Proper resource path generation with bucket/object format - // - Trimming of leading slashes from object names -} - -// TestGovernanceBypassPermission was removed because it tested the old -// insecure behavior of trusting the AmzIsAdmin header. The new implementation -// uses proper IAM authentication instead of relying on client-provided headers. - -// Test specifically for users with IAM bypass permission -func TestGovernanceBypassWithIAMPermission(t *testing.T) { - // This test demonstrates the expected behavior for non-admin users with bypass permission - // In a real implementation, this would integrate with the full IAM system - - t.Skip("Integration test requires full IAM setup - demonstrates expected behavior") - - // The expected behavior would be: - // 1. Non-admin user makes request with bypass header - // 2. checkGovernanceBypassPermission calls s3a.iam.authRequest - // 3. authRequest validates user identity and checks permissions - // 4. If user has s3:BypassGovernanceRetention permission, return true - // 5. Otherwise return false - - // For now, the function correctly returns false for non-admin users - // when the IAM system doesn't have the user configured with bypass permission -} - -func TestGovernancePermissionIntegration(t *testing.T) { - // Note: This test demonstrates the expected integration behavior - // In a real implementation, this would require setting up a proper IAM mock - // with identities that have the bypass governance permission - - t.Skip("Integration test requires full IAM setup - demonstrates expected behavior") - - // This test would verify: - // 1. User with BypassGovernanceRetention permission can bypass governance - // 2. User without permission cannot bypass governance - // 3. Admin users can always bypass governance - // 4. Anonymous users cannot bypass governance -} - -func TestGovernanceBypassHeader(t *testing.T) { - tests := []struct { - name string - headerValue string - expectedResult bool - description string - }{ - { - name: "bypass_header_true", - headerValue: "true", - expectedResult: true, - description: "Header with 'true' value should enable bypass", - }, - { - name: "bypass_header_false", - headerValue: "false", - expectedResult: false, - description: "Header with 'false' value should not enable bypass", - }, - { - name: "bypass_header_empty", - headerValue: "", - expectedResult: false, - description: "Empty header should not enable bypass", - }, - { - name: "bypass_header_invalid", - headerValue: "invalid", - expectedResult: false, - description: "Invalid header value should not enable bypass", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := httptest.NewRequest("DELETE", "/bucket/object", nil) - if tt.headerValue != "" { - req.Header.Set("x-amz-bypass-governance-retention", tt.headerValue) - } - - result := req.Header.Get("x-amz-bypass-governance-retention") == "true" - - if result != tt.expectedResult { - t.Errorf("bypass header check = %v, want %v. %s", result, tt.expectedResult, tt.description) - } - }) - } -} - -func TestGovernanceRetentionModeChecking(t *testing.T) { - tests := []struct { - name string - retentionMode string - bypassGovernance bool - hasPermission bool - expectedError bool - expectedErrorType string - description string - }{ - { - name: "compliance_mode_cannot_bypass", - retentionMode: s3_constants.RetentionModeCompliance, - bypassGovernance: true, - hasPermission: true, - expectedError: true, - expectedErrorType: "compliance mode", - description: "Compliance mode should not be bypassable even with permission", - }, - { - name: "governance_mode_without_bypass", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: false, - hasPermission: false, - expectedError: true, - expectedErrorType: "governance mode", - description: "Governance mode should be blocked without bypass", - }, - { - name: "governance_mode_with_bypass_no_permission", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: true, - hasPermission: false, - expectedError: true, - expectedErrorType: "permission", - description: "Governance mode bypass should fail without permission", - }, - { - name: "governance_mode_with_bypass_and_permission", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: true, - hasPermission: true, - expectedError: false, - expectedErrorType: "", - description: "Governance mode bypass should succeed with permission", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test validates the logic without actually needing the full implementation - // This demonstrates the expected behavior patterns - - var hasError bool - var errorType string - - if tt.retentionMode == s3_constants.RetentionModeCompliance { - hasError = true - errorType = "compliance mode" - } else if tt.retentionMode == s3_constants.RetentionModeGovernance { - if !tt.bypassGovernance { - hasError = true - errorType = "governance mode" - } else if !tt.hasPermission { - hasError = true - errorType = "permission" - } - } - - if hasError != tt.expectedError { - t.Errorf("expected error: %v, got error: %v. %s", tt.expectedError, hasError, tt.description) - } - - if tt.expectedError && !strings.Contains(errorType, tt.expectedErrorType) { - t.Errorf("expected error type containing '%s', got '%s'. %s", tt.expectedErrorType, errorType, tt.description) - } - }) - } -} - -func TestGovernancePermissionActionGeneration(t *testing.T) { - tests := []struct { - name string - bucket string - object string - expectedAction string - description string - }{ - { - name: "bucket_and_object_action", - bucket: "test-bucket", - object: "/test-object", // Object has "/" prefix from GetBucketAndObject - expectedAction: "BypassGovernanceRetention:test-bucket/test-object", - description: "Action should be generated correctly for bucket and object", - }, - { - name: "bucket_only_action", - bucket: "test-bucket", - object: "", - expectedAction: "BypassGovernanceRetention:test-bucket", - description: "Action should be generated correctly for bucket only", - }, - { - name: "nested_object_action", - bucket: "test-bucket", - object: "/folder/subfolder/object", // Object has "/" prefix from GetBucketAndObject - expectedAction: "BypassGovernanceRetention:test-bucket/folder/subfolder/object", - description: "Action should be generated correctly for nested objects", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - action := s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION + ":" + tt.bucket + tt.object - - if action != tt.expectedAction { - t.Errorf("generated action: %s, expected: %s. %s", action, tt.expectedAction, tt.description) - } - }) - } -} - -// TestGovernancePermissionEndToEnd tests the complete object lock permission flow -func TestGovernancePermissionEndToEnd(t *testing.T) { - t.Skip("End-to-end testing requires full S3 API server setup - demonstrates expected behavior") - - // This test demonstrates the end-to-end flow that would be tested in a full integration test - // The checkObjectLockPermissions method is called by: - // 1. DeleteObjectHandler - when versioning is enabled and object lock is configured - // 2. DeleteMultipleObjectsHandler - for each object in versioned buckets - // 3. PutObjectHandler - via checkObjectLockPermissionsForPut for versioned buckets - // 4. PutObjectRetentionHandler - when setting retention on objects - // - // Each handler: - // - Extracts bypassGovernance from "x-amz-bypass-governance-retention" header - // - Calls checkObjectLockPermissions with the appropriate parameters - // - Handles the returned errors appropriately (ErrAccessDenied, etc.) - // - // The method integrates with the IAM system through checkGovernanceBypassPermission - // which validates the s3:BypassGovernanceRetention permission -} - -// TestGovernancePermissionHTTPFlow tests the HTTP header processing and method calls -func TestGovernancePermissionHTTPFlow(t *testing.T) { - tests := []struct { - name string - headerValue string - expectedBypassGovernance bool - }{ - { - name: "bypass_header_true", - headerValue: "true", - expectedBypassGovernance: true, - }, - { - name: "bypass_header_false", - headerValue: "false", - expectedBypassGovernance: false, - }, - { - name: "bypass_header_missing", - headerValue: "", - expectedBypassGovernance: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock HTTP request - req, _ := http.NewRequest("DELETE", "/bucket/test-object", nil) - if tt.headerValue != "" { - req.Header.Set("x-amz-bypass-governance-retention", tt.headerValue) - } - - // Test the header processing logic used in handlers - bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true" - - if bypassGovernance != tt.expectedBypassGovernance { - t.Errorf("Expected bypassGovernance to be %v, got %v", tt.expectedBypassGovernance, bypassGovernance) - } - }) - } -} - -// TestGovernancePermissionMethodCalls tests that the governance permission methods are called correctly -func TestGovernancePermissionMethodCalls(t *testing.T) { - // Test that demonstrates the method call pattern used in handlers - - // This is the pattern used in DeleteObjectHandler: - t.Run("delete_object_handler_pattern", func(t *testing.T) { - req, _ := http.NewRequest("DELETE", "/bucket/test-object", nil) - req.Header.Set("x-amz-bypass-governance-retention", "true") - - // Extract parameters as done in the handler - bucket, object := s3_constants.GetBucketAndObject(req) - versionId := req.URL.Query().Get("versionId") - bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true" - - // Verify the parameters are extracted correctly - // Note: The actual bucket and object extraction depends on the URL structure - t.Logf("Extracted bucket: %s, object: %s", bucket, object) - if versionId != "" { - t.Errorf("Expected versionId to be empty, got %v", versionId) - } - if !bypassGovernance { - t.Errorf("Expected bypassGovernance to be true") - } - }) - - // This is the pattern used in PutObjectHandler: - t.Run("put_object_handler_pattern", func(t *testing.T) { - req, _ := http.NewRequest("PUT", "/bucket/test-object", nil) - req.Header.Set("x-amz-bypass-governance-retention", "true") - - // Extract parameters as done in the handler - bucket, object := s3_constants.GetBucketAndObject(req) - bypassGovernance := req.Header.Get("x-amz-bypass-governance-retention") == "true" - versioningEnabled := true // Would be determined by isVersioningEnabled(bucket) - - // Verify the parameters are extracted correctly - // Note: The actual bucket and object extraction depends on the URL structure - t.Logf("Extracted bucket: %s, object: %s", bucket, object) - if !bypassGovernance { - t.Errorf("Expected bypassGovernance to be true") - } - if !versioningEnabled { - t.Errorf("Expected versioningEnabled to be true") - } - }) -} - -// TestGovernanceBypassNotPermittedError tests that ErrGovernanceBypassNotPermitted -// is returned when bypass is requested but the user lacks permission -func TestGovernanceBypassNotPermittedError(t *testing.T) { - // Test the error constant itself - if ErrGovernanceBypassNotPermitted == nil { - t.Error("ErrGovernanceBypassNotPermitted should be defined") - } - - // Verify the error message - expectedMessage := "user does not have permission to bypass governance retention" - if ErrGovernanceBypassNotPermitted.Error() != expectedMessage { - t.Errorf("expected error message '%s', got '%s'", - expectedMessage, ErrGovernanceBypassNotPermitted.Error()) - } - - // Test the scenario where this error should be returned - // This documents the expected behavior when: - // 1. Object is under governance retention - // 2. bypassGovernance is true - // 3. checkGovernanceBypassPermission returns false - testCases := []struct { - name string - retentionMode string - bypassGovernance bool - hasPermission bool - expectedError error - description string - }{ - { - name: "governance_bypass_without_permission", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: true, - hasPermission: false, - expectedError: ErrGovernanceBypassNotPermitted, - description: "Should return ErrGovernanceBypassNotPermitted when bypass is requested but user lacks permission", - }, - { - name: "governance_bypass_with_permission", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: true, - hasPermission: true, - expectedError: nil, - description: "Should succeed when bypass is requested and user has permission", - }, - { - name: "governance_no_bypass", - retentionMode: s3_constants.RetentionModeGovernance, - bypassGovernance: false, - hasPermission: false, - expectedError: ErrGovernanceModeActive, - description: "Should return ErrGovernanceModeActive when bypass is not requested", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // This test documents the expected behavior pattern - // The actual checkObjectLockPermissions method implements this logic: - // if retention.Mode == s3_constants.RetentionModeGovernance { - // if !bypassGovernance { - // return ErrGovernanceModeActive - // } - // if !s3a.checkGovernanceBypassPermission(request, bucket, object) { - // return ErrGovernanceBypassNotPermitted - // } - // } - - var simulatedError error - if tc.retentionMode == s3_constants.RetentionModeGovernance { - if !tc.bypassGovernance { - simulatedError = ErrGovernanceModeActive - } else if !tc.hasPermission { - simulatedError = ErrGovernanceBypassNotPermitted - } - } - - if simulatedError != tc.expectedError { - t.Errorf("expected error %v, got %v. %s", tc.expectedError, simulatedError, tc.description) - } - - // Verify ErrGovernanceBypassNotPermitted is returned in the right case - if tc.name == "governance_bypass_without_permission" && simulatedError != ErrGovernanceBypassNotPermitted { - t.Errorf("Test case should return ErrGovernanceBypassNotPermitted but got %v", simulatedError) - } - }) - } -} diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index c146a8b15..4ace4bb21 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -3,23 +3,22 @@ package s3api import ( "encoding/base64" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "google.golang.org/grpc" "net/http" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "google.golang.org/grpc" - - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) var _ = filer_pb.FilerClient(&S3ApiServer{}) func (s3a *S3ApiServer) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error { - return pb.WithGrpcClient(streamingMode, s3a.randomClientId, func(grpcConnection *grpc.ClientConn) error { + return pb.WithGrpcClient(streamingMode, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) - }, s3a.option.Filer.ToGrpcAddress(), false, s3a.option.GrpcDialOption) + }, s3a.option.Filer.ToGrpcAddress(), s3a.option.GrpcDialOption) } @@ -27,10 +26,6 @@ func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string { return location.Url } -func (s3a *S3ApiServer) GetDataCenter() string { - return s3a.option.DataCenter -} - func writeSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) { s3err.WriteXMLResponse(w, r, http.StatusOK, response) s3err.PostLog(r, http.StatusOK, s3err.ErrNone) @@ -40,10 +35,6 @@ func writeSuccessResponseEmpty(w http.ResponseWriter, r *http.Request) { s3err.WriteEmptyResponse(w, r, http.StatusOK) } -func writeFailureResponse(w http.ResponseWriter, r *http.Request, errCode s3err.ErrorCode) { - s3err.WriteErrorResponse(w, r, errCode) -} - func validateContentMd5(h http.Header) ([]byte, error) { md5B64, ok := h["Content-Md5"] if ok { diff --git a/weed/s3api/s3api_key_rotation.go b/weed/s3api/s3api_key_rotation.go deleted file mode 100644 index e8d29ff7a..000000000 --- a/weed/s3api/s3api_key_rotation.go +++ /dev/null @@ -1,291 +0,0 @@ -package s3api - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -// rotateSSECKey handles SSE-C key rotation for same-object copies -func (s3a *S3ApiServer) rotateSSECKey(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, error) { - // Parse source and destination SSE-C keys - sourceKey, err := ParseSSECCopySourceHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-C copy source headers: %w", err) - } - - destKey, err := ParseSSECHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-C destination headers: %w", err) - } - - // Validate that we have both keys - if sourceKey == nil { - return nil, fmt.Errorf("source SSE-C key required for key rotation") - } - - if destKey == nil { - return nil, fmt.Errorf("destination SSE-C key required for key rotation") - } - - // Check if keys are actually different - if sourceKey.KeyMD5 == destKey.KeyMD5 { - glog.V(2).Infof("SSE-C key rotation: keys are identical, using direct copy") - return entry.GetChunks(), nil - } - - glog.V(2).Infof("SSE-C key rotation: rotating from key %s to key %s", - sourceKey.KeyMD5[:8], destKey.KeyMD5[:8]) - - // For SSE-C key rotation, we need to re-encrypt all chunks - // This cannot be a metadata-only operation because the encryption key changes - return s3a.rotateSSECChunks(entry, sourceKey, destKey) -} - -// rotateSSEKMSKey handles SSE-KMS key rotation for same-object copies -func (s3a *S3ApiServer) rotateSSEKMSKey(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, error) { - // Get source and destination key IDs - srcKeyID, srcEncrypted := GetSourceSSEKMSInfo(entry.Extended) - if !srcEncrypted { - return nil, fmt.Errorf("source object is not SSE-KMS encrypted") - } - - dstKeyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - if dstKeyID == "" { - // Use default key if not specified - dstKeyID = "default" - } - - // Check if keys are actually different - if srcKeyID == dstKeyID { - glog.V(2).Infof("SSE-KMS key rotation: keys are identical, using direct copy") - return entry.GetChunks(), nil - } - - glog.V(2).Infof("SSE-KMS key rotation: rotating from key %s to key %s", srcKeyID, dstKeyID) - - // For SSE-KMS, we can potentially do metadata-only rotation - // if the KMS service supports key aliasing and the data encryption key can be re-wrapped - if s3a.canDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID) { - return s3a.rotateSSEKMSMetadataOnly(entry, srcKeyID, dstKeyID) - } - - // Fallback to full re-encryption - return s3a.rotateSSEKMSChunks(entry, srcKeyID, dstKeyID, r) -} - -// canDoMetadataOnlyKMSRotation determines if KMS key rotation can be done metadata-only -func (s3a *S3ApiServer) canDoMetadataOnlyKMSRotation(srcKeyID, dstKeyID string) bool { - // For now, we'll be conservative and always re-encrypt - // In a full implementation, this would check if: - // 1. Both keys are in the same KMS instance - // 2. The KMS supports key re-wrapping - // 3. The user has permissions for both keys - return false -} - -// rotateSSEKMSMetadataOnly performs metadata-only SSE-KMS key rotation -func (s3a *S3ApiServer) rotateSSEKMSMetadataOnly(entry *filer_pb.Entry, srcKeyID, dstKeyID string) ([]*filer_pb.FileChunk, error) { - // This would re-wrap the data encryption key with the new KMS key - // For now, return an error since we don't support this yet - return nil, fmt.Errorf("metadata-only KMS key rotation not yet implemented") -} - -// rotateSSECChunks re-encrypts all chunks with new SSE-C key -func (s3a *S3ApiServer) rotateSSECChunks(entry *filer_pb.Entry, sourceKey, destKey *SSECustomerKey) ([]*filer_pb.FileChunk, error) { - // Get IV from entry metadata - iv, err := GetIVFromMetadata(entry.Extended) - if err != nil { - return nil, fmt.Errorf("get IV from metadata: %w", err) - } - - var rotatedChunks []*filer_pb.FileChunk - - for _, chunk := range entry.GetChunks() { - rotatedChunk, err := s3a.rotateSSECChunk(chunk, sourceKey, destKey, iv) - if err != nil { - return nil, fmt.Errorf("rotate SSE-C chunk: %w", err) - } - rotatedChunks = append(rotatedChunks, rotatedChunk) - } - - // Generate new IV for the destination and store it in entry metadata - newIV := make([]byte, s3_constants.AESBlockSize) - if _, err := io.ReadFull(rand.Reader, newIV); err != nil { - return nil, fmt.Errorf("generate new IV: %w", err) - } - - // Update entry metadata with new IV and SSE-C headers - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - StoreIVInMetadata(entry.Extended, newIV) - entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destKey.KeyMD5) - - return rotatedChunks, nil -} - -// rotateSSEKMSChunks re-encrypts all chunks with new SSE-KMS key -func (s3a *S3ApiServer) rotateSSEKMSChunks(entry *filer_pb.Entry, srcKeyID, dstKeyID string, r *http.Request) ([]*filer_pb.FileChunk, error) { - var rotatedChunks []*filer_pb.FileChunk - - // Parse encryption context and bucket key settings - _, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-KMS copy headers: %w", err) - } - - for _, chunk := range entry.GetChunks() { - rotatedChunk, err := s3a.rotateSSEKMSChunk(chunk, srcKeyID, dstKeyID, encryptionContext, bucketKeyEnabled) - if err != nil { - return nil, fmt.Errorf("rotate SSE-KMS chunk: %w", err) - } - rotatedChunks = append(rotatedChunks, rotatedChunk) - } - - return rotatedChunks, nil -} - -// rotateSSECChunk rotates a single SSE-C encrypted chunk -func (s3a *S3ApiServer) rotateSSECChunk(chunk *filer_pb.FileChunk, sourceKey, destKey *SSECustomerKey, iv []byte) (*filer_pb.FileChunk, error) { - // Create new chunk with same properties - newChunk := &filer_pb.FileChunk{ - Offset: chunk.Offset, - Size: chunk.Size, - ModifiedTsNs: chunk.ModifiedTsNs, - ETag: chunk.ETag, - } - - // Assign new volume for the rotated chunk - assignResult, err := s3a.assignNewVolume("") - if err != nil { - return nil, fmt.Errorf("assign new volume: %w", err) - } - - // Set file ID on new chunk - if err := s3a.setChunkFileId(newChunk, assignResult); err != nil { - return nil, err - } - - // Get source chunk data - srcUrl, err := s3a.lookupVolumeUrl(chunk.GetFileIdString()) - if err != nil { - return nil, fmt.Errorf("lookup source volume: %w", err) - } - - // Download encrypted data - encryptedData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download chunk data: %w", err) - } - - // Decrypt with source key using provided IV - decryptedReader, err := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), sourceKey, iv) - if err != nil { - return nil, fmt.Errorf("create decrypted reader: %w", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - return nil, fmt.Errorf("decrypt data: %w", err) - } - - // Re-encrypt with destination key - encryptedReader, _, err := CreateSSECEncryptedReader(bytes.NewReader(decryptedData), destKey) - if err != nil { - return nil, fmt.Errorf("create encrypted reader: %w", err) - } - - // Note: IV will be handled at the entry level by the calling function - - reencryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - return nil, fmt.Errorf("re-encrypt data: %w", err) - } - - // Update chunk size to include new IV - newChunk.Size = uint64(len(reencryptedData)) - - // Upload re-encrypted data - if err := s3a.uploadChunkData(reencryptedData, assignResult); err != nil { - return nil, fmt.Errorf("upload re-encrypted data: %w", err) - } - - return newChunk, nil -} - -// rotateSSEKMSChunk rotates a single SSE-KMS encrypted chunk -func (s3a *S3ApiServer) rotateSSEKMSChunk(chunk *filer_pb.FileChunk, srcKeyID, dstKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool) (*filer_pb.FileChunk, error) { - // Create new chunk with same properties - newChunk := &filer_pb.FileChunk{ - Offset: chunk.Offset, - Size: chunk.Size, - ModifiedTsNs: chunk.ModifiedTsNs, - ETag: chunk.ETag, - } - - // Assign new volume for the rotated chunk - assignResult, err := s3a.assignNewVolume("") - if err != nil { - return nil, fmt.Errorf("assign new volume: %w", err) - } - - // Set file ID on new chunk - if err := s3a.setChunkFileId(newChunk, assignResult); err != nil { - return nil, err - } - - // Get source chunk data - srcUrl, err := s3a.lookupVolumeUrl(chunk.GetFileIdString()) - if err != nil { - return nil, fmt.Errorf("lookup source volume: %w", err) - } - - // Download data (this would be encrypted with the old KMS key) - chunkData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download chunk data: %w", err) - } - - // For now, we'll just re-upload the data as-is - // In a full implementation, this would: - // 1. Decrypt with old KMS key - // 2. Re-encrypt with new KMS key - // 3. Update metadata accordingly - - // Upload data with new key (placeholder implementation) - if err := s3a.uploadChunkData(chunkData, assignResult); err != nil { - return nil, fmt.Errorf("upload rotated data: %w", err) - } - - return newChunk, nil -} - -// IsSameObjectCopy determines if this is a same-object copy operation -func IsSameObjectCopy(r *http.Request, srcBucket, srcObject, dstBucket, dstObject string) bool { - return srcBucket == dstBucket && srcObject == dstObject -} - -// NeedsKeyRotation determines if the copy operation requires key rotation -func NeedsKeyRotation(entry *filer_pb.Entry, r *http.Request) bool { - // Check for SSE-C key rotation - if IsSSECEncrypted(entry.Extended) && IsSSECRequest(r) { - return true // Assume different keys for safety - } - - // Check for SSE-KMS key rotation - if IsSSEKMSEncrypted(entry.Extended) && IsSSEKMSRequest(r) { - srcKeyID, _ := GetSourceSSEKMSInfo(entry.Extended) - dstKeyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - return srcKeyID != dstKeyID - } - - return false -} diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..950e7a8fb --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,309 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "modernc.org/strutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DirectiveCopy = "COPY" + DirectiveReplace = "REPLACE" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + dstBucket, dstObject := s3_constants.GetBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + + glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject) + + replaceMeta, replaceTagging := replaceDirective(r.Header) + + if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) { + fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) + dir, name := fullPath.DirAndName() + entry, err := s3a.getEntry(dir, name) + if err != nil || entry.IsDirectory { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging) + if err != nil { + glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err) + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag) + return + } + err = s3a.touch(dir, name, entry) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + writeSuccessResponseXML(w, r, CopyObjectResult{ + ETag: fmt.Sprintf("%x", entry.Attributes.Md5), + LastModified: time.Now().UTC(), + }) + return + } + + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) + dir, name := srcPath.DirAndName() + if entry, err := s3a.getEntry(dir, name); err != nil || entry.IsDirectory { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, dstBucket, urlPathEscape(dstObject)) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject)) + + _, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false)) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + defer util.CloseResponse(resp) + + tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name) + if tagErr != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) + etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, r, response) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + dstBucket, dstObject := s3_constants.GetBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) + return + } + + glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID) + + // check partID with maximum part ID for multipart objects + if partID > globalMaxPartID { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", + s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(dstBucket), uploadID, partID) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlPathEscape(srcObject)) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) + return + } + defer dataReader.Close() + + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, r, response) + +} + +func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) { + return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace +} + +func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) { + if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 { + if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 { + reqHeader[s3_constants.AmzStorageClass] = sc + } + } + + if !replaceMeta { + for header, _ := range reqHeader { + if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { + delete(reqHeader, header) + } + } + for k, v := range existing { + if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { + reqHeader[k] = v + } + } + } + + if !replaceTagging { + for header, _ := range reqHeader { + if strings.HasPrefix(header, s3_constants.AmzObjectTagging) { + delete(reqHeader, header) + } + } + + found := false + for k, _ := range existing { + if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) { + found = true + break + } + } + + if found { + tags, err := getTags(dir, name) + if err != nil { + return err + } + + var tagArr []string + for k, v := range tags { + tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v)) + } + tagStr := strutil.JoinFields(tagArr, "&") + reqHeader.Set(s3_constants.AmzObjectTagging, tagStr) + } + } + return +} + +func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte, err error) { + metadata = make(map[string][]byte) + + if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 { + metadata[s3_constants.AmzStorageClass] = sc + } + if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 { + metadata[s3_constants.AmzStorageClass] = []byte(sc) + } + + if replaceMeta { + for header, values := range reqHeader { + if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { + for _, value := range values { + metadata[header] = []byte(value) + } + } + } + } else { + for k, v := range existing { + if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { + metadata[k] = v + } + } + } + if replaceTagging { + if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" { + parsedTags, err := parseTagsHeader(tags) + if err != nil { + return nil, err + } + err = ValidateTags(parsedTags) + if err != nil { + return nil, err + } + for k, v := range parsedTags { + metadata[s3_constants.AmzObjectTagging+"-"+k] = []byte(v) + } + } + } else { + for k, v := range existing { + if strings.HasPrefix(k, s3_constants.AmzObjectTagging) { + metadata[k] = v + } + } + delete(metadata, s3_constants.AmzTagCount) + } + + return +} diff --git a/weed/s3api/s3api_object_handlers_copy_test.go b/weed/s3api/s3api_object_copy_handlers_test.go similarity index 99% rename from weed/s3api/s3api_object_handlers_copy_test.go rename to weed/s3api/s3api_object_copy_handlers_test.go index a537b6f3c..29d519c24 100644 --- a/weed/s3api/s3api_object_handlers_copy_test.go +++ b/weed/s3api/s3api_object_copy_handlers_test.go @@ -2,7 +2,7 @@ package s3api import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "net/http" "reflect" "sort" diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index f30522292..e49d613c4 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -2,38 +2,34 @@ package s3api import ( "bytes" - "encoding/base64" - "errors" + "crypto/md5" + "encoding/json" + "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/mem" + "golang.org/x/exp/slices" "io" "net/http" "net/url" - "sort" - "strconv" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/pquerna/cachecontrol/cacheobject" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/util/mem" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/glog" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) -// corsHeaders defines the CORS headers that need to be preserved -// Package-level constant to avoid repeated allocations -var corsHeaders = []string{ - "Access-Control-Allow-Origin", - "Access-Control-Allow-Methods", - "Access-Control-Allow-Headers", - "Access-Control-Expose-Headers", - "Access-Control-Max-Age", - "Access-Control-Allow-Credentials", -} +const ( + deleteMultipleObjectsLimmit = 1000 +) func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser { mimeBuffer := make([]byte, 512) @@ -45,194 +41,98 @@ func mimeDetect(r *http.Request, dataReader io.Reader) io.ReadCloser { return io.NopCloser(dataReader) } -func urlEscapeObject(object string) string { - t := urlPathEscape(removeDuplicateSlashes(object)) - if strings.HasPrefix(t, "/") { - return t - } - return "/" + t -} +func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) { -func entryUrlEncode(dir string, entry string, encodingTypeUrl bool) (dirName string, entryName string, prefix string) { - if !encodingTypeUrl { - return dir, entry, entry + // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html + + bucket, object := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) + + _, err := validateContentMd5(r.Header) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) + return } - return urlPathEscape(dir), url.QueryEscape(entry), urlPathEscape(entry) + + if r.Header.Get("Cache-Control") != "" { + if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) + return + } + } + + if r.Header.Get("Expires") != "" { + if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedExpires) + return + } + } + + dataReader := r.Body + rAuthType := getRequestAuthType(r) + if s3a.iam.isEnabled() { + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, s3ErrCode) + return + } + } else { + if authTypeStreamingSigned == rAuthType { + s3err.WriteErrorResponse(w, r, s3err.ErrAuthNotSetup) + return + } + } + defer dataReader.Close() + + objectContentType := r.Header.Get("Content-Type") + if strings.HasSuffix(object, "/") { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), func(entry *filer_pb.Entry) { + if objectContentType == "" { + objectContentType = "httpd/unix-directory" + } + entry.Attributes.Mime = objectContentType + }); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + } else { + uploadUrl := s3a.toFilerUrl(bucket, object) + if objectContentType == "" { + dataReader = mimeDetect(r, dataReader) + } + + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, "") + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + setEtag(w, etag) + } + + writeSuccessResponseEmpty(w, r) } func urlPathEscape(object string) string { var escapedParts []string for _, part := range strings.Split(object, "/") { - escapedParts = append(escapedParts, strings.ReplaceAll(url.PathEscape(part), "+", "%2B")) + escapedParts = append(escapedParts, url.PathEscape(part)) } return strings.Join(escapedParts, "/") } -func removeDuplicateSlashes(object string) string { - result := strings.Builder{} - result.Grow(len(object)) - - isLastSlash := false - for _, r := range object { - switch r { - case '/': - if !isLastSlash { - result.WriteRune(r) - } - isLastSlash = true - default: - result.WriteRune(r) - isLastSlash = false - } - } - return result.String() -} - -// checkDirectoryObject checks if the object is a directory object (ends with "/") and if it exists -// Returns: (entry, isDirectoryObject, error) -// - entry: the directory entry if found and is a directory -// - isDirectoryObject: true if the request was for a directory object (ends with "/") -// - error: any error encountered while checking -func (s3a *S3ApiServer) checkDirectoryObject(bucket, object string) (*filer_pb.Entry, bool, error) { - if !strings.HasSuffix(object, "/") { - return nil, false, nil // Not a directory object - } - - bucketDir := s3a.option.BucketsPath + "/" + bucket - cleanObject := strings.TrimSuffix(strings.TrimPrefix(object, "/"), "/") - - if cleanObject == "" { - return nil, true, nil // Root level directory object, but we don't handle it - } - - // Check if directory exists - dirEntry, err := s3a.getEntry(bucketDir, cleanObject) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - return nil, true, nil // Directory object requested but doesn't exist - } - return nil, true, err // Other errors should be propagated - } - - if !dirEntry.IsDirectory { - return nil, true, nil // Exists but not a directory - } - - return dirEntry, true, nil -} - -// serveDirectoryContent serves the content of a directory object directly -func (s3a *S3ApiServer) serveDirectoryContent(w http.ResponseWriter, r *http.Request, entry *filer_pb.Entry) { - // Set content type - use stored MIME type or default - contentType := entry.Attributes.Mime - if contentType == "" { - contentType = "application/octet-stream" - } - w.Header().Set("Content-Type", contentType) - - // Set content length - use FileSize for accuracy, especially for large files - contentLength := int64(entry.Attributes.FileSize) - w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10)) - - // Set last modified - w.Header().Set("Last-Modified", time.Unix(entry.Attributes.Mtime, 0).UTC().Format(http.TimeFormat)) - - // Set ETag - w.Header().Set("ETag", "\""+filer.ETag(entry)+"\"") - - // For HEAD requests, don't write body - if r.Method == http.MethodHead { - w.WriteHeader(http.StatusOK) - return - } - - // Write content - w.WriteHeader(http.StatusOK) - if len(entry.Content) > 0 { - if _, err := w.Write(entry.Content); err != nil { - glog.Errorf("serveDirectoryContent: failed to write response: %v", err) - } - } -} - -// handleDirectoryObjectRequest is a helper function that handles directory object requests -// for both GET and HEAD operations, eliminating code duplication -func (s3a *S3ApiServer) handleDirectoryObjectRequest(w http.ResponseWriter, r *http.Request, bucket, object, handlerName string) bool { - // Check if this is a directory object and handle it directly - if dirEntry, isDirectoryObject, err := s3a.checkDirectoryObject(bucket, object); err != nil { - glog.Errorf("%s: error checking directory object %s/%s: %v", handlerName, bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return true // Request was handled (with error) - } else if dirEntry != nil { - glog.V(2).Infof("%s: directory object %s/%s found, serving content", handlerName, bucket, object) - s3a.serveDirectoryContent(w, r, dirEntry) - return true // Request was handled successfully - } else if isDirectoryObject { - // Directory object but doesn't exist - glog.V(2).Infof("%s: directory object %s/%s not found", handlerName, bucket, object) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return true // Request was handled (with not found) - } - - return false // Not a directory object, continue with normal processing -} - -func newListEntry(entry *filer_pb.Entry, key string, dir string, name string, bucketPrefix string, fetchOwner bool, isDirectory bool, encodingTypeUrl bool, iam AccountManager) (listEntry ListEntry) { - storageClass := "STANDARD" - if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok { - storageClass = string(v) - } - keyFormat := "%s/%s" - if isDirectory { - keyFormat += "/" - } - if key == "" { - key = fmt.Sprintf(keyFormat, dir, name)[len(bucketPrefix):] - } - if encodingTypeUrl { - key = urlPathEscape(key) - } - listEntry = ListEntry{ - Key: key, - LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), - ETag: "\"" + filer.ETag(entry) + "\"", - Size: int64(filer.FileSize(entry)), - StorageClass: StorageClass(storageClass), - } - if fetchOwner { - // Extract owner from S3 metadata (Extended attributes) instead of file system attributes - var ownerID, displayName string - if entry.Extended != nil { - if ownerBytes, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - ownerID = string(ownerBytes) - } - } - - // Fallback to anonymous if no S3 owner found - if ownerID == "" { - ownerID = s3_constants.AccountAnonymousId - displayName = "anonymous" - } else { - // Get the proper display name from IAM system - displayName = iam.GetAccountNameById(ownerID) - // Fallback to ownerID if no display name found - if displayName == "" { - displayName = ownerID - } - } - - listEntry.Owner = &CanonicalUser{ - ID: ownerID, - DisplayName: displayName, - } - } - return listEntry -} - func (s3a *S3ApiServer) toFilerUrl(bucket, object string) string { - object = urlPathEscape(removeDuplicateSlashes(object)) destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, object) + s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object)) return destUrl } @@ -241,145 +141,14 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("GetObjectHandler %s %s", bucket, object) - // Handle directory objects with shared logic - if s3a.handleDirectoryObjectRequest(w, r, bucket, object, "GetObjectHandler") { - return // Directory object request was handled - } - - // Check conditional headers for read operations - result := s3a.checkConditionalHeadersForReads(r, bucket, object) - if result.ErrorCode != s3err.ErrNone { - glog.V(3).Infof("GetObjectHandler: Conditional header check failed for %s/%s with error %v", bucket, object, result.ErrorCode) - - // For 304 Not Modified responses, include the ETag header - if result.ErrorCode == s3err.ErrNotModified && result.ETag != "" { - w.Header().Set("ETag", result.ETag) - } - - s3err.WriteErrorResponse(w, r, result.ErrorCode) + if strings.HasSuffix(r.URL.Path, "/") { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) return } - // Check for specific version ID in query parameters - versionId := r.URL.Query().Get("versionId") + destUrl := s3a.toFilerUrl(bucket, object) - // Check if versioning is configured for the bucket (Enabled or Suspended) - versioningConfigured, err := s3a.isVersioningConfigured(bucket) - if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - glog.V(1).Infof("GetObject: bucket %s, object %s, versioningConfigured=%v, versionId=%s", bucket, object, versioningConfigured, versionId) - - var destUrl string - - if versioningConfigured { - // Handle versioned GET - all versions are stored in .versions directory - var targetVersionId string - var entry *filer_pb.Entry - - if versionId != "" { - // Request for specific version - glog.V(2).Infof("GetObject: requesting specific version %s for %s%s", versionId, bucket, object) - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - if err != nil { - glog.Errorf("Failed to get specific version %s: %v", versionId, err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - targetVersionId = versionId - } else { - // Request for latest version - glog.V(1).Infof("GetObject: requesting latest version for %s%s", bucket, object) - entry, err = s3a.getLatestObjectVersion(bucket, object) - if err != nil { - glog.Errorf("GetObject: Failed to get latest version for %s%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - if entry.Extended != nil { - if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { - targetVersionId = string(versionIdBytes) - } - } - // If no version ID found in entry, this is a pre-versioning object - if targetVersionId == "" { - targetVersionId = "null" - } - } - - // Check if this is a delete marker - if entry.Extended != nil { - if deleteMarker, exists := entry.Extended[s3_constants.ExtDeleteMarkerKey]; exists && string(deleteMarker) == "true" { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - } - - // Determine the actual file path based on whether this is a versioned or pre-versioning object - if targetVersionId == "null" { - // Pre-versioning object - stored as regular file - destUrl = s3a.toFilerUrl(bucket, object) - glog.V(2).Infof("GetObject: pre-versioning object URL: %s", destUrl) - } else { - // Versioned object - stored in .versions directory - versionObjectPath := object + ".versions/" + s3a.getVersionFileName(targetVersionId) - destUrl = s3a.toFilerUrl(bucket, versionObjectPath) - glog.V(2).Infof("GetObject: version %s URL: %s", targetVersionId, destUrl) - } - - // Set version ID in response header - w.Header().Set("x-amz-version-id", targetVersionId) - - // Add object lock metadata to response headers if present - s3a.addObjectLockHeadersToResponse(w, entry) - } else { - // Handle regular GET (non-versioned) - destUrl = s3a.toFilerUrl(bucket, object) - } - - // Check if this is a range request to an SSE object and modify the approach - originalRangeHeader := r.Header.Get("Range") - var sseObject = false - - // Pre-check if this object is SSE encrypted to avoid filer range conflicts - if originalRangeHeader != "" { - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - if objectEntry, err := s3a.getEntry("", objectPath); err == nil { - primarySSEType := s3a.detectPrimarySSEType(objectEntry) - if primarySSEType == s3_constants.SSETypeC || primarySSEType == s3_constants.SSETypeKMS { - sseObject = true - // Temporarily remove Range header to get full encrypted data from filer - r.Header.Del("Range") - - } - } - } - - s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { - // Restore the original Range header for SSE processing - if sseObject && originalRangeHeader != "" { - r.Header.Set("Range", originalRangeHeader) - - } - - // Add SSE metadata headers based on object metadata before SSE processing - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - if objectEntry, err := s3a.getEntry("", objectPath); err == nil { - s3a.addSSEHeadersToResponse(proxyResponse, objectEntry) - } - - // Handle SSE decryption (both SSE-C and SSE-KMS) if needed - return s3a.handleSSEResponse(r, proxyResponse, w) - }) + s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse) } func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { @@ -387,117 +156,167 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request bucket, object := s3_constants.GetBucketAndObject(r) glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object) - // Handle directory objects with shared logic - if s3a.handleDirectoryObjectRequest(w, r, bucket, object, "HeadObjectHandler") { - return // Directory object request was handled - } + destUrl := s3a.toFilerUrl(bucket, object) - // Check conditional headers for read operations - result := s3a.checkConditionalHeadersForReads(r, bucket, object) - if result.ErrorCode != s3err.ErrNone { - glog.V(3).Infof("HeadObjectHandler: Conditional header check failed for %s/%s with error %v", bucket, object, result.ErrorCode) + s3a.proxyToFiler(w, r, destUrl, false, passThroughResponse) +} - // For 304 Not Modified responses, include the ETag header - if result.ErrorCode == s3err.ErrNotModified && result.ETag != "" { - w.Header().Set("ETag", result.ETag) +func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) + + destUrl := s3a.toFilerUrl(bucket, object) + + s3a.proxyToFiler(w, r, destUrl, true, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) { + statusCode = http.StatusNoContent + for k, v := range proxyResponse.Header { + w.Header()[k] = v } + w.WriteHeader(statusCode) + return statusCode + }) +} - s3err.WriteErrorResponse(w, r, result.ErrorCode) - return - } +// / ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} - // Check for specific version ID in query parameters - versionId := r.URL.Query().Get("versionId") +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} - // Check if versioning is configured for the bucket (Enabled or Suspended) - versioningConfigured, err := s3a.isVersioningConfigured(bucket) +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` +} + +// DeleteMultipleObjectsHandler - Delete multiple objects +func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { + + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) + + deleteXMLBytes, err := io.ReadAll(r.Body) if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } - var destUrl string - - if versioningConfigured { - // Handle versioned HEAD - all versions are stored in .versions directory - var targetVersionId string - var entry *filer_pb.Entry - - if versionId != "" { - // Request for specific version - glog.V(2).Infof("HeadObject: requesting specific version %s for %s%s", versionId, bucket, object) - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - if err != nil { - glog.Errorf("Failed to get specific version %s: %v", versionId, err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - targetVersionId = versionId - } else { - // Request for latest version - glog.V(2).Infof("HeadObject: requesting latest version for %s%s", bucket, object) - entry, err = s3a.getLatestObjectVersion(bucket, object) - if err != nil { - glog.Errorf("Failed to get latest version: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - if entry.Extended != nil { - if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { - targetVersionId = string(versionIdBytes) - } - } - // If no version ID found in entry, this is a pre-versioning object - if targetVersionId == "" { - targetVersionId = "null" - } - } - - // Check if this is a delete marker - if entry.Extended != nil { - if deleteMarker, exists := entry.Extended[s3_constants.ExtDeleteMarkerKey]; exists && string(deleteMarker) == "true" { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - } - - // Determine the actual file path based on whether this is a versioned or pre-versioning object - if targetVersionId == "null" { - // Pre-versioning object - stored as regular file - destUrl = s3a.toFilerUrl(bucket, object) - glog.V(2).Infof("HeadObject: pre-versioning object URL: %s", destUrl) - } else { - // Versioned object - stored in .versions directory - versionObjectPath := object + ".versions/" + s3a.getVersionFileName(targetVersionId) - destUrl = s3a.toFilerUrl(bucket, versionObjectPath) - glog.V(2).Infof("HeadObject: version %s URL: %s", targetVersionId, destUrl) - } - - // Set version ID in response header - w.Header().Set("x-amz-version-id", targetVersionId) - - // Add object lock metadata to response headers if present - s3a.addObjectLockHeadersToResponse(w, entry) - } else { - // Handle regular HEAD (non-versioned) - destUrl = s3a.toFilerUrl(bucket, object) + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) + return } - s3a.proxyToFiler(w, r, destUrl, false, func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { - // Handle SSE validation (both SSE-C and SSE-KMS) for HEAD requests - return s3a.handleSSEResponse(r, proxyResponse, w) + if len(deleteObjects.Objects) > deleteMultipleObjectsLimmit { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxDeleteObjects) + return + } + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + var auditLog *s3err.AccessLog + + directoriesWithDeletion := make(map[string]int) + + if s3err.Logger != nil { + auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone) + } + s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + // delete file entries + for _, object := range deleteObjects.Objects { + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + directoriesWithDeletion[parentDirectoryPath]++ + deletedObjects = append(deletedObjects, object) + } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { + deletedObjects = append(deletedObjects, object) + } else { + delete(directoriesWithDeletion, parentDirectoryPath) + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } + if auditLog != nil { + auditLog.Key = entryName + s3err.PostAccessLog(*auditLog) + } + } + + // purge empty folders, only checking folders with deletions + for len(directoriesWithDeletion) > 0 { + directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) + } + + return nil }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, r, deleteResp) + } -func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, isWrite bool, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64)) { +func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { + var allDirs []string + for dir, _ := range directoriesWithDeletion { + allDirs = append(allDirs, dir) + } + slices.SortFunc(allDirs, func(a, b string) bool { + return len(a) > len(b) + }) + newDirectoriesWithDeletion = make(map[string]int) + for _, dir := range allDirs { + parentDir, dirName := util.FullPath(dir).DirAndName() + if parentDir == s3a.option.BucketsPath { + continue + } + if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { + glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) + } else { + newDirectoriesWithDeletion[parentDir]++ + } + } + return +} + +func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, isWrite bool, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int)) { glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl) - start := time.Now() proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body) @@ -508,21 +327,14 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des } proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Accept-Encoding", "identity") for k, v := range r.URL.Query() { if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok { proxyReq.Header[k] = v } - if k == "partNumber" { - proxyReq.Header[s3_constants.SeaweedFSPartNumber] = v - } } for header, values := range r.Header { proxyReq.Header[header] = values } - if proxyReq.ContentLength == 0 && r.ContentLength != 0 { - proxyReq.ContentLength = r.ContentLength - } // ensure that the Authorization header is overriding any previous // Authorization header which might be already present in proxyReq @@ -534,7 +346,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } - defer util_http.CloseResponse(resp) + defer util.CloseResponse(resp) if resp.StatusCode == http.StatusPreconditionFailed { s3err.WriteErrorResponse(w, r, s3err.ErrPreconditionFailed) @@ -546,891 +358,127 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des return } - if r.Method == http.MethodDelete { - if resp.StatusCode == http.StatusNotFound { - // this is normal - responseStatusCode, _ := responseFn(resp, w) - s3err.PostLog(r, responseStatusCode, s3err.ErrNone) + if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { + if r.Method != "DELETE" { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) return } } - if resp.StatusCode == http.StatusNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - TimeToFirstByte(r.Method, start, r) - if resp.Header.Get(s3_constants.SeaweedFSIsDirectoryKey) == "true" { - responseStatusCode, _ := responseFn(resp, w) - s3err.PostLog(r, responseStatusCode, s3err.ErrNone) - return - } - - if resp.StatusCode == http.StatusInternalServerError { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // when HEAD a directory, it should be reported as no such key - // https://github.com/seaweedfs/seaweedfs/issues/3457 - if resp.ContentLength == -1 && resp.StatusCode != http.StatusNotModified { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - if resp.StatusCode == http.StatusBadRequest { - resp_body, _ := io.ReadAll(resp.Body) - switch string(resp_body) { - case "InvalidPart": - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - default: - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) - } - resp.Body.Close() - return - } - - setUserMetadataKeyToLowercase(resp) - - responseStatusCode, bytesTransferred := responseFn(resp, w) - BucketTrafficSent(bytesTransferred, r) + responseStatusCode := responseFn(resp, w) s3err.PostLog(r, responseStatusCode, s3err.ErrNone) } -func setUserMetadataKeyToLowercase(resp *http.Response) { - for key, value := range resp.Header { - if strings.HasPrefix(key, s3_constants.AmzUserMetaPrefix) { - resp.Header[strings.ToLower(key)] = value - delete(resp.Header, key) - } +func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int) { + for k, v := range proxyResponse.Header { + w.Header()[k] = v } -} - -func captureCORSHeaders(w http.ResponseWriter, headersToCapture []string) map[string]string { - captured := make(map[string]string) - for _, corsHeader := range headersToCapture { - if value := w.Header().Get(corsHeader); value != "" { - captured[corsHeader] = value - } - } - return captured -} - -func restoreCORSHeaders(w http.ResponseWriter, capturedCORSHeaders map[string]string) { - for corsHeader, value := range capturedCORSHeaders { - w.Header().Set(corsHeader, value) - } -} - -// writeFinalResponse handles the common response writing logic shared between -// passThroughResponse and handleSSECResponse -func writeFinalResponse(w http.ResponseWriter, proxyResponse *http.Response, bodyReader io.Reader, capturedCORSHeaders map[string]string) (statusCode int, bytesTransferred int64) { - // Restore CORS headers that were set by middleware - restoreCORSHeaders(w, capturedCORSHeaders) - if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 { + w.WriteHeader(http.StatusPartialContent) statusCode = http.StatusPartialContent } else { statusCode = proxyResponse.StatusCode } w.WriteHeader(statusCode) - - // Stream response data buf := mem.Allocate(128 * 1024) defer mem.Free(buf) - bytesTransferred, err := io.CopyBuffer(w, bodyReader, buf) + if n, err := io.CopyBuffer(w, proxyResponse.Body, buf); err != nil { + glog.V(1).Infof("passthrough response read %d bytes: %v", n, err) + } + return statusCode +} + +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string) (etag string, code s3err.ErrorCode) { + + hash := md5.New() + var body = io.TeeReader(dataReader, hash) + + proxyReq, err := http.NewRequest("PUT", uploadUrl, body) + if err != nil { - glog.V(1).Infof("response read %d bytes: %v", bytesTransferred, err) + glog.Errorf("NewRequest %s: %v", uploadUrl, err) + return "", s3err.ErrInternalError } - return statusCode, bytesTransferred + + proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) + if destination != "" { + proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination) + } + + for header, values := range r.Header { + for _, value := range values { + proxyReq.Header.Add(header, value) + } + } + // ensure that the Authorization header is overriding any previous + // Authorization header which might be already present in proxyReq + s3a.maybeAddFilerJwtAuthorization(proxyReq, true) + resp, postErr := s3a.client.Do(proxyReq) + + if postErr != nil { + glog.Errorf("post to filer: %v", postErr) + return "", s3err.ErrInternalError + } + defer resp.Body.Close() + + etag = fmt.Sprintf("%x", hash.Sum(nil)) + + resp_body, ra_err := io.ReadAll(resp.Body) + if ra_err != nil { + glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) + return etag, s3err.ErrInternalError + } + var ret weed_server.FilerPostResult + unmarshal_err := json.Unmarshal(resp_body, &ret) + if unmarshal_err != nil { + glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) + return "", s3err.ErrInternalError + } + if ret.Error != "" { + glog.Errorf("upload to filer error: %v", ret.Error) + return "", filerErrorToS3Error(ret.Error) + } + + return etag, s3err.ErrNone } -func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { - // Capture existing CORS headers that may have been set by middleware - capturedCORSHeaders := captureCORSHeaders(w, corsHeaders) - - // Copy headers from proxy response - for k, v := range proxyResponse.Header { - w.Header()[k] = v - } - - return writeFinalResponse(w, proxyResponse, proxyResponse.Body, capturedCORSHeaders) -} - -// handleSSECResponse handles SSE-C decryption and response processing -func (s3a *S3ApiServer) handleSSECResponse(r *http.Request, proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { - // Check if the object has SSE-C metadata - sseAlgorithm := proxyResponse.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - sseKeyMD5 := proxyResponse.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - isObjectEncrypted := sseAlgorithm != "" && sseKeyMD5 != "" - - // Parse SSE-C headers from request once (avoid duplication) - customerKey, err := ParseSSECHeaders(r) - if err != nil { - errCode := MapSSECErrorToS3Error(err) - s3err.WriteErrorResponse(w, r, errCode) - return http.StatusBadRequest, 0 - } - - if isObjectEncrypted { - // This object was encrypted with SSE-C, validate customer key - if customerKey == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing) - return http.StatusBadRequest, 0 - } - - // SSE-C MD5 is base64 and case-sensitive - if customerKey.KeyMD5 != sseKeyMD5 { - // For GET/HEAD requests, AWS S3 returns 403 Forbidden for a key mismatch. - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return http.StatusForbidden, 0 - } - - // SSE-C encrypted objects support HTTP Range requests - // The IV is stored in metadata and CTR mode allows seeking to any offset - // Range requests will be handled by the filer layer with proper offset-based decryption - - // Check if this is a chunked or small content SSE-C object - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - if entry, err := s3a.getEntry("", objectPath); err == nil { - // Check for SSE-C chunks - sseCChunks := 0 - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - sseCChunks++ - } - } - - if sseCChunks >= 1 { - - // Handle chunked SSE-C objects - each chunk needs independent decryption - multipartReader, decErr := s3a.createMultipartSSECDecryptedReader(r, proxyResponse) - if decErr != nil { - glog.Errorf("Failed to create multipart SSE-C decrypted reader: %v", decErr) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - // Capture existing CORS headers - capturedCORSHeaders := captureCORSHeaders(w, corsHeaders) - - // Copy headers from proxy response - for k, v := range proxyResponse.Header { - w.Header()[k] = v - } - - // Set proper headers for range requests - rangeHeader := r.Header.Get("Range") - if rangeHeader != "" { - - // Parse range header (e.g., "bytes=0-99") - if len(rangeHeader) > 6 && rangeHeader[:6] == "bytes=" { - rangeSpec := rangeHeader[6:] - parts := strings.Split(rangeSpec, "-") - if len(parts) == 2 { - startOffset, endOffset := int64(0), int64(-1) - if parts[0] != "" { - startOffset, _ = strconv.ParseInt(parts[0], 10, 64) - } - if parts[1] != "" { - endOffset, _ = strconv.ParseInt(parts[1], 10, 64) - } - - if endOffset >= startOffset { - // Specific range - set proper Content-Length and Content-Range headers - rangeLength := endOffset - startOffset + 1 - totalSize := proxyResponse.Header.Get("Content-Length") - - w.Header().Set("Content-Length", strconv.FormatInt(rangeLength, 10)) - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%s", startOffset, endOffset, totalSize)) - // writeFinalResponse will set status to 206 if Content-Range is present - } - } - } - } - - return writeFinalResponse(w, proxyResponse, multipartReader, capturedCORSHeaders) - } else if len(entry.GetChunks()) == 0 && len(entry.Content) > 0 { - // Small content SSE-C object stored directly in entry.Content - - // Fall through to traditional single-object SSE-C handling below - } - } - - // Single-part SSE-C object: Get IV from proxy response headers (stored during upload) - ivBase64 := proxyResponse.Header.Get(s3_constants.SeaweedFSSSEIVHeader) - if ivBase64 == "" { - glog.Errorf("SSE-C encrypted single-part object missing IV in metadata") - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - iv, err := base64.StdEncoding.DecodeString(ivBase64) - if err != nil { - glog.Errorf("Failed to decode IV from metadata: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - // Create decrypted reader with IV from metadata - decryptedReader, decErr := CreateSSECDecryptedReader(proxyResponse.Body, customerKey, iv) - if decErr != nil { - glog.Errorf("Failed to create SSE-C decrypted reader: %v", decErr) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - // Capture existing CORS headers that may have been set by middleware - capturedCORSHeaders := captureCORSHeaders(w, corsHeaders) - - // Copy headers from proxy response (excluding body-related headers that might change) - for k, v := range proxyResponse.Header { - if k != "Content-Length" && k != "Content-Encoding" { - w.Header()[k] = v - } - } - - // Set correct Content-Length for SSE-C (only for full object requests) - // With IV stored in metadata, the encrypted length equals the original length - if proxyResponse.Header.Get("Content-Range") == "" { - // Full object request: encrypted length equals original length (IV not in stream) - if contentLengthStr := proxyResponse.Header.Get("Content-Length"); contentLengthStr != "" { - // Content-Length is already correct since IV is stored in metadata, not in data stream - w.Header().Set("Content-Length", contentLengthStr) - } - } - // For range requests, let the actual bytes transferred determine the response length - - // Add SSE-C response headers - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, sseAlgorithm) - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, sseKeyMD5) - - return writeFinalResponse(w, proxyResponse, decryptedReader, capturedCORSHeaders) - } else { - // Object is not encrypted, but check if customer provided SSE-C headers unnecessarily - if customerKey != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyNotNeeded) - return http.StatusBadRequest, 0 - } - - // Normal pass-through response - return passThroughResponse(proxyResponse, w) - } -} - -// handleSSEResponse handles both SSE-C and SSE-KMS decryption/validation and response processing -func (s3a *S3ApiServer) handleSSEResponse(r *http.Request, proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64) { - // Check what the client is expecting based on request headers - clientExpectsSSEC := IsSSECRequest(r) - - // Check what the stored object has in headers (may be conflicting after copy) - kmsMetadataHeader := proxyResponse.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader) - sseAlgorithm := proxyResponse.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - - // Get actual object state by examining chunks (most reliable for cross-encryption) - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - actualObjectType := "Unknown" - if objectEntry, err := s3a.getEntry("", objectPath); err == nil { - actualObjectType = s3a.detectPrimarySSEType(objectEntry) - } - - // Route based on ACTUAL object type (from chunks) rather than conflicting headers - if actualObjectType == s3_constants.SSETypeC && clientExpectsSSEC { - // Object is SSE-C and client expects SSE-C โ†’ SSE-C handler - return s3a.handleSSECResponse(r, proxyResponse, w) - } else if actualObjectType == s3_constants.SSETypeKMS && !clientExpectsSSEC { - // Object is SSE-KMS and client doesn't expect SSE-C โ†’ SSE-KMS handler - return s3a.handleSSEKMSResponse(r, proxyResponse, w, kmsMetadataHeader) - } else if actualObjectType == "None" && !clientExpectsSSEC { - // Object is unencrypted and client doesn't expect SSE-C โ†’ pass through - return passThroughResponse(proxyResponse, w) - } else if actualObjectType == s3_constants.SSETypeC && !clientExpectsSSEC { - // Object is SSE-C but client doesn't provide SSE-C headers โ†’ Error - s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing) - return http.StatusBadRequest, 0 - } else if actualObjectType == s3_constants.SSETypeKMS && clientExpectsSSEC { - // Object is SSE-KMS but client provides SSE-C headers โ†’ Error - s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing) - return http.StatusBadRequest, 0 - } else if actualObjectType == "None" && clientExpectsSSEC { - // Object is unencrypted but client provides SSE-C headers โ†’ Error - s3err.WriteErrorResponse(w, r, s3err.ErrSSECustomerKeyMissing) - return http.StatusBadRequest, 0 - } - - // Fallback for edge cases - use original logic with header-based detection - if clientExpectsSSEC && sseAlgorithm != "" { - return s3a.handleSSECResponse(r, proxyResponse, w) - } else if !clientExpectsSSEC && kmsMetadataHeader != "" { - return s3a.handleSSEKMSResponse(r, proxyResponse, w, kmsMetadataHeader) - } else { - return passThroughResponse(proxyResponse, w) - } -} - -// handleSSEKMSResponse handles SSE-KMS decryption and response processing -func (s3a *S3ApiServer) handleSSEKMSResponse(r *http.Request, proxyResponse *http.Response, w http.ResponseWriter, kmsMetadataHeader string) (statusCode int, bytesTransferred int64) { - // Deserialize SSE-KMS metadata - kmsMetadataBytes, err := base64.StdEncoding.DecodeString(kmsMetadataHeader) - if err != nil { - glog.Errorf("Failed to decode SSE-KMS metadata: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - sseKMSKey, err := DeserializeSSEKMSMetadata(kmsMetadataBytes) - if err != nil { - glog.Errorf("Failed to deserialize SSE-KMS metadata: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - - // For HEAD requests, we don't need to decrypt the body, just add response headers - if r.Method == "HEAD" { - // Capture existing CORS headers that may have been set by middleware - capturedCORSHeaders := captureCORSHeaders(w, corsHeaders) - - // Copy headers from proxy response - for k, v := range proxyResponse.Header { - w.Header()[k] = v - } - - // Add SSE-KMS response headers - AddSSEKMSResponseHeaders(w, sseKMSKey) - - return writeFinalResponse(w, proxyResponse, proxyResponse.Body, capturedCORSHeaders) - } - - // For GET requests, check if this is a multipart SSE-KMS object - // We need to check the object structure to determine if it's multipart encrypted - isMultipartSSEKMS := false - - if sseKMSKey != nil { - // Get the object entry to check chunk structure - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - if entry, err := s3a.getEntry("", objectPath); err == nil { - // Check for multipart SSE-KMS - sseKMSChunks := 0 - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseMetadata()) > 0 { - sseKMSChunks++ - } - } - isMultipartSSEKMS = sseKMSChunks > 1 - } - } - - var decryptedReader io.Reader - if isMultipartSSEKMS { - // Handle multipart SSE-KMS objects - each chunk needs independent decryption - multipartReader, decErr := s3a.createMultipartSSEKMSDecryptedReader(r, proxyResponse) - if decErr != nil { - glog.Errorf("Failed to create multipart SSE-KMS decrypted reader: %v", decErr) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - decryptedReader = multipartReader - glog.V(3).Infof("Using multipart SSE-KMS decryption for object") - } else { - // Handle single-part SSE-KMS objects - singlePartReader, decErr := CreateSSEKMSDecryptedReader(proxyResponse.Body, sseKMSKey) - if decErr != nil { - glog.Errorf("Failed to create SSE-KMS decrypted reader: %v", decErr) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return http.StatusInternalServerError, 0 - } - decryptedReader = singlePartReader - glog.V(3).Infof("Using single-part SSE-KMS decryption for object") - } - - // Capture existing CORS headers that may have been set by middleware - capturedCORSHeaders := captureCORSHeaders(w, corsHeaders) - - // Copy headers from proxy response (excluding body-related headers that might change) - for k, v := range proxyResponse.Header { - if k != "Content-Length" && k != "Content-Encoding" { - w.Header()[k] = v - } - } - - // Set correct Content-Length for SSE-KMS - if proxyResponse.Header.Get("Content-Range") == "" { - // For full object requests, encrypted length equals original length - if contentLengthStr := proxyResponse.Header.Get("Content-Length"); contentLengthStr != "" { - w.Header().Set("Content-Length", contentLengthStr) - } - } - - // Add SSE-KMS response headers - AddSSEKMSResponseHeaders(w, sseKMSKey) - - return writeFinalResponse(w, proxyResponse, decryptedReader, capturedCORSHeaders) -} - -// addObjectLockHeadersToResponse extracts object lock metadata from entry Extended attributes -// and adds the appropriate S3 headers to the response -func (s3a *S3ApiServer) addObjectLockHeadersToResponse(w http.ResponseWriter, entry *filer_pb.Entry) { - if entry == nil || entry.Extended == nil { - return - } - - // Check if this entry has any object lock metadata (indicating it's from an object lock enabled bucket) - hasObjectLockMode := false - hasRetentionDate := false - - // Add object lock mode header if present - if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists && len(modeBytes) > 0 { - w.Header().Set(s3_constants.AmzObjectLockMode, string(modeBytes)) - hasObjectLockMode = true - } - - // Add retention until date header if present - if dateBytes, exists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; exists && len(dateBytes) > 0 { - dateStr := string(dateBytes) - // Convert Unix timestamp to ISO8601 format for S3 compatibility - if timestamp, err := strconv.ParseInt(dateStr, 10, 64); err == nil { - retainUntilDate := time.Unix(timestamp, 0).UTC() - w.Header().Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - hasRetentionDate = true +func setEtag(w http.ResponseWriter, etag string) { + if etag != "" { + if strings.HasPrefix(etag, "\"") { + w.Header()["ETag"] = []string{etag} } else { - glog.Errorf("addObjectLockHeadersToResponse: failed to parse retention until date from stored metadata (dateStr: %s): %v", dateStr, err) + w.Header()["ETag"] = []string{"\"" + etag + "\""} } } - - // Add legal hold header - AWS S3 behavior: always include legal hold for object lock enabled buckets - if legalHoldBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists && len(legalHoldBytes) > 0 { - // Return stored S3 standard "ON"/"OFF" values directly - w.Header().Set(s3_constants.AmzObjectLockLegalHold, string(legalHoldBytes)) - } else if hasObjectLockMode || hasRetentionDate { - // If this entry has object lock metadata (indicating object lock enabled bucket) - // but no legal hold specifically set, default to "OFF" as per AWS S3 behavior - w.Header().Set(s3_constants.AmzObjectLockLegalHold, s3_constants.LegalHoldOff) - } } -// addSSEHeadersToResponse converts stored SSE metadata from entry.Extended to HTTP response headers -// Uses intelligent prioritization: only set headers for the PRIMARY encryption type to avoid conflicts -func (s3a *S3ApiServer) addSSEHeadersToResponse(proxyResponse *http.Response, entry *filer_pb.Entry) { - if entry == nil || entry.Extended == nil { - return - } - - // Determine the primary encryption type by examining chunks (most reliable) - primarySSEType := s3a.detectPrimarySSEType(entry) - - // Only set headers for the PRIMARY encryption type - switch primarySSEType { - case s3_constants.SSETypeC: - // Add only SSE-C headers - if algorithmBytes, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists && len(algorithmBytes) > 0 { - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, string(algorithmBytes)) - } - - if keyMD5Bytes, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists && len(keyMD5Bytes) > 0 { - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, string(keyMD5Bytes)) - } - - if ivBytes, exists := entry.Extended[s3_constants.SeaweedFSSSEIV]; exists && len(ivBytes) > 0 { - ivBase64 := base64.StdEncoding.EncodeToString(ivBytes) - proxyResponse.Header.Set(s3_constants.SeaweedFSSSEIVHeader, ivBase64) - } - - case s3_constants.SSETypeKMS: - // Add only SSE-KMS headers - if sseAlgorithm, exists := entry.Extended[s3_constants.AmzServerSideEncryption]; exists && len(sseAlgorithm) > 0 { - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryption, string(sseAlgorithm)) - } - - if kmsKeyID, exists := entry.Extended[s3_constants.AmzServerSideEncryptionAwsKmsKeyId]; exists && len(kmsKeyID) > 0 { - proxyResponse.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, string(kmsKeyID)) - } - +func filerErrorToS3Error(errString string) s3err.ErrorCode { + switch { + case strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory"): + return s3err.ErrExistingObjectIsDirectory + case strings.HasSuffix(errString, "is a file"): + return s3err.ErrExistingObjectIsFile default: - // Unencrypted or unknown - don't set any SSE headers - } - - glog.V(3).Infof("addSSEHeadersToResponse: processed %d extended metadata entries", len(entry.Extended)) -} - -// detectPrimarySSEType determines the primary SSE type by examining chunk metadata -func (s3a *S3ApiServer) detectPrimarySSEType(entry *filer_pb.Entry) string { - if len(entry.GetChunks()) == 0 { - // No chunks - check object-level metadata only (single objects or smallContent) - hasSSEC := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] != nil - hasSSEKMS := entry.Extended[s3_constants.AmzServerSideEncryption] != nil - - if hasSSEC && !hasSSEKMS { - return s3_constants.SSETypeC - } else if hasSSEKMS && !hasSSEC { - return s3_constants.SSETypeKMS - } else if hasSSEC && hasSSEKMS { - // Both present - this should only happen during cross-encryption copies - // Use content to determine actual encryption state - if len(entry.Content) > 0 { - // smallContent - check if it's encrypted (heuristic: random-looking data) - return s3_constants.SSETypeC // Default to SSE-C for mixed case - } else { - // No content, both headers - default to SSE-C - return s3_constants.SSETypeC - } - } - return "None" - } - - // Count chunk types to determine primary (multipart objects) - ssecChunks := 0 - ssekmsChunks := 0 - - for _, chunk := range entry.GetChunks() { - switch chunk.GetSseType() { - case filer_pb.SSEType_SSE_C: - ssecChunks++ - case filer_pb.SSEType_SSE_KMS: - ssekmsChunks++ - } - } - - // Primary type is the one with more chunks - if ssecChunks > ssekmsChunks { - return s3_constants.SSETypeC - } else if ssekmsChunks > ssecChunks { - return s3_constants.SSETypeKMS - } else if ssecChunks > 0 { - // Equal number, prefer SSE-C (shouldn't happen in practice) - return s3_constants.SSETypeC - } - - return "None" -} - -// createMultipartSSEKMSDecryptedReader creates a reader that decrypts each chunk independently for multipart SSE-KMS objects -func (s3a *S3ApiServer) createMultipartSSEKMSDecryptedReader(r *http.Request, proxyResponse *http.Response) (io.Reader, error) { - // Get the object path from the request - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - - // Get the object entry from filer to access chunk information - entry, err := s3a.getEntry("", objectPath) - if err != nil { - return nil, fmt.Errorf("failed to get object entry for multipart SSE-KMS decryption: %v", err) - } - - // Sort chunks by offset to ensure correct order - chunks := entry.GetChunks() - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].GetOffset() < chunks[j].GetOffset() - }) - - // Create readers for each chunk, decrypting them independently - var readers []io.Reader - - for _, chunk := range chunks { - // Get this chunk's encrypted data - chunkReader, err := s3a.createEncryptedChunkReader(chunk) - if err != nil { - return nil, fmt.Errorf("failed to create chunk reader: %v", err) - } - - // Get SSE-KMS metadata for this chunk - var chunkSSEKMSKey *SSEKMSKey - - // Check if this chunk has per-chunk SSE-KMS metadata (new architecture) - if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS && len(chunk.GetSseMetadata()) > 0 { - // Use the per-chunk SSE-KMS metadata - kmsKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata()) - if err != nil { - glog.Errorf("Failed to deserialize per-chunk SSE-KMS metadata for chunk %s: %v", chunk.GetFileIdString(), err) - } else { - // ChunkOffset is already set from the stored metadata (PartOffset) - chunkSSEKMSKey = kmsKey - } - } - - // Fallback to object-level metadata (legacy support) - if chunkSSEKMSKey == nil { - objectMetadataHeader := proxyResponse.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader) - if objectMetadataHeader != "" { - kmsMetadataBytes, decodeErr := base64.StdEncoding.DecodeString(objectMetadataHeader) - if decodeErr == nil { - kmsKey, _ := DeserializeSSEKMSMetadata(kmsMetadataBytes) - if kmsKey != nil { - // For object-level metadata (legacy), use absolute file offset as fallback - kmsKey.ChunkOffset = chunk.GetOffset() - chunkSSEKMSKey = kmsKey - } - } - } - } - - if chunkSSEKMSKey == nil { - return nil, fmt.Errorf("no SSE-KMS metadata found for chunk %s in multipart object", chunk.GetFileIdString()) - } - - // Create decrypted reader for this chunk - decryptedChunkReader, decErr := CreateSSEKMSDecryptedReader(chunkReader, chunkSSEKMSKey) - if decErr != nil { - chunkReader.Close() // Close the chunk reader if decryption fails - return nil, fmt.Errorf("failed to decrypt chunk: %v", decErr) - } - - // Use the streaming decrypted reader directly instead of reading into memory - readers = append(readers, decryptedChunkReader) - glog.V(4).Infof("Added streaming decrypted reader for chunk %s in multipart SSE-KMS object", chunk.GetFileIdString()) - } - - // Combine all decrypted chunk readers into a single stream with proper resource management - multiReader := NewMultipartSSEReader(readers) - glog.V(3).Infof("Created multipart SSE-KMS decrypted reader with %d chunks", len(readers)) - - return multiReader, nil -} - -// createEncryptedChunkReader creates a reader for a single encrypted chunk -func (s3a *S3ApiServer) createEncryptedChunkReader(chunk *filer_pb.FileChunk) (io.ReadCloser, error) { - // Get chunk URL - srcUrl, err := s3a.lookupVolumeUrl(chunk.GetFileIdString()) - if err != nil { - return nil, fmt.Errorf("lookup volume URL for chunk %s: %v", chunk.GetFileIdString(), err) - } - - // Create HTTP request for chunk data - req, err := http.NewRequest("GET", srcUrl, nil) - if err != nil { - return nil, fmt.Errorf("create HTTP request for chunk: %v", err) - } - - // Execute request - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, fmt.Errorf("execute HTTP request for chunk: %v", err) - } - - if resp.StatusCode != http.StatusOK { - resp.Body.Close() - return nil, fmt.Errorf("HTTP request for chunk failed: %d", resp.StatusCode) - } - - return resp.Body, nil -} - -// MultipartSSEReader wraps multiple readers and ensures all underlying readers are properly closed -type MultipartSSEReader struct { - multiReader io.Reader - readers []io.Reader -} - -// SSERangeReader applies range logic to an underlying reader -type SSERangeReader struct { - reader io.Reader - offset int64 // bytes to skip from the beginning - remaining int64 // bytes remaining to read (-1 for unlimited) - skipped int64 // bytes already skipped -} - -// NewMultipartSSEReader creates a new multipart reader that can properly close all underlying readers -func NewMultipartSSEReader(readers []io.Reader) *MultipartSSEReader { - return &MultipartSSEReader{ - multiReader: io.MultiReader(readers...), - readers: readers, + return s3err.ErrInternalError } } -// Read implements the io.Reader interface -func (m *MultipartSSEReader) Read(p []byte) (n int, err error) { - return m.multiReader.Read(p) +func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) { + encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite) + + if encodedJwt == "" { + return + } + + r.Header.Set("Authorization", "BEARER "+string(encodedJwt)) } -// Close implements the io.Closer interface and closes all underlying readers that support closing -func (m *MultipartSSEReader) Close() error { - var lastErr error - for i, reader := range m.readers { - if closer, ok := reader.(io.Closer); ok { - if err := closer.Close(); err != nil { - glog.V(2).Infof("Error closing reader %d: %v", i, err) - lastErr = err // Keep track of the last error, but continue closing others - } - } +func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string { + var encodedJwt security.EncodedJwt + if isWrite { + encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec) + } else { + encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec) } - return lastErr -} - -// Read implements the io.Reader interface for SSERangeReader -func (r *SSERangeReader) Read(p []byte) (n int, err error) { - - // If we need to skip bytes and haven't skipped enough yet - if r.skipped < r.offset { - skipNeeded := r.offset - r.skipped - skipBuf := make([]byte, min(int64(len(p)), skipNeeded)) - skipRead, skipErr := r.reader.Read(skipBuf) - r.skipped += int64(skipRead) - - if skipErr != nil { - return 0, skipErr - } - - // If we still need to skip more, recurse - if r.skipped < r.offset { - return r.Read(p) - } - } - - // If we have a remaining limit and it's reached - if r.remaining == 0 { - return 0, io.EOF - } - - // Calculate how much to read - readSize := len(p) - if r.remaining > 0 && int64(readSize) > r.remaining { - readSize = int(r.remaining) - } - - // Read the data - n, err = r.reader.Read(p[:readSize]) - if r.remaining > 0 { - r.remaining -= int64(n) - } - - return n, err -} - -// createMultipartSSECDecryptedReader creates a decrypted reader for multipart SSE-C objects -// Each chunk has its own IV and encryption key from the original multipart parts -func (s3a *S3ApiServer) createMultipartSSECDecryptedReader(r *http.Request, proxyResponse *http.Response) (io.Reader, error) { - // Parse SSE-C headers from the request for decryption key - customerKey, err := ParseSSECHeaders(r) - if err != nil { - return nil, fmt.Errorf("invalid SSE-C headers for multipart decryption: %v", err) - } - - // Get the object path from the request - bucket, object := s3_constants.GetBucketAndObject(r) - objectPath := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - - // Get the object entry from filer to access chunk information - entry, err := s3a.getEntry("", objectPath) - if err != nil { - return nil, fmt.Errorf("failed to get object entry for multipart SSE-C decryption: %v", err) - } - - // Sort chunks by offset to ensure correct order - chunks := entry.GetChunks() - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].GetOffset() < chunks[j].GetOffset() - }) - - // Check for Range header to optimize chunk processing - var startOffset, endOffset int64 = 0, -1 - rangeHeader := r.Header.Get("Range") - if rangeHeader != "" { - // Parse range header (e.g., "bytes=0-99") - if len(rangeHeader) > 6 && rangeHeader[:6] == "bytes=" { - rangeSpec := rangeHeader[6:] - parts := strings.Split(rangeSpec, "-") - if len(parts) == 2 { - if parts[0] != "" { - startOffset, _ = strconv.ParseInt(parts[0], 10, 64) - } - if parts[1] != "" { - endOffset, _ = strconv.ParseInt(parts[1], 10, 64) - } - } - } - } - - // Filter chunks to only those needed for the range request - var neededChunks []*filer_pb.FileChunk - for _, chunk := range chunks { - chunkStart := chunk.GetOffset() - chunkEnd := chunkStart + int64(chunk.GetSize()) - 1 - - // Check if this chunk overlaps with the requested range - if endOffset == -1 { - // No end specified, take all chunks from startOffset - if chunkEnd >= startOffset { - neededChunks = append(neededChunks, chunk) - } - } else { - // Specific range: check for overlap - if chunkStart <= endOffset && chunkEnd >= startOffset { - neededChunks = append(neededChunks, chunk) - } - } - } - - // Create readers for only the needed chunks - var readers []io.Reader - - for _, chunk := range neededChunks { - - // Get this chunk's encrypted data - chunkReader, err := s3a.createEncryptedChunkReader(chunk) - if err != nil { - return nil, fmt.Errorf("failed to create chunk reader: %v", err) - } - - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - // For SSE-C chunks, extract the IV from the stored per-chunk metadata (unified approach) - if len(chunk.GetSseMetadata()) > 0 { - // Deserialize the SSE-C metadata stored in the unified metadata field - ssecMetadata, decErr := DeserializeSSECMetadata(chunk.GetSseMetadata()) - if decErr != nil { - return nil, fmt.Errorf("failed to deserialize SSE-C metadata for chunk %s: %v", chunk.GetFileIdString(), decErr) - } - - // Decode the IV from the metadata - iv, ivErr := base64.StdEncoding.DecodeString(ssecMetadata.IV) - if ivErr != nil { - return nil, fmt.Errorf("failed to decode IV for SSE-C chunk %s: %v", chunk.GetFileIdString(), ivErr) - } - - // Calculate the correct IV for this chunk using within-part offset - var chunkIV []byte - if ssecMetadata.PartOffset > 0 { - chunkIV = calculateIVWithOffset(iv, ssecMetadata.PartOffset) - } else { - chunkIV = iv - } - - decryptedReader, decErr := CreateSSECDecryptedReader(chunkReader, customerKey, chunkIV) - if decErr != nil { - return nil, fmt.Errorf("failed to create SSE-C decrypted reader for chunk %s: %v", chunk.GetFileIdString(), decErr) - } - readers = append(readers, decryptedReader) - } else { - return nil, fmt.Errorf("SSE-C chunk %s missing required metadata", chunk.GetFileIdString()) - } - } else { - // Non-SSE-C chunk, use as-is - readers = append(readers, chunkReader) - } - } - - multiReader := NewMultipartSSEReader(readers) - - // Apply range logic if a range was requested - if rangeHeader != "" && startOffset >= 0 { - if endOffset == -1 { - // Open-ended range (e.g., "bytes=100-") - return &SSERangeReader{ - reader: multiReader, - offset: startOffset, - remaining: -1, // Read until EOF - }, nil - } else { - // Specific range (e.g., "bytes=0-99") - rangeLength := endOffset - startOffset + 1 - return &SSERangeReader{ - reader: multiReader, - offset: startOffset, - remaining: rangeLength, - }, nil - } - } - - return multiReader, nil + return string(encodedJwt) } diff --git a/weed/s3api/s3api_object_handlers_acl.go b/weed/s3api/s3api_object_handlers_acl.go deleted file mode 100644 index 1386b6cba..000000000 --- a/weed/s3api/s3api_object_handlers_acl.go +++ /dev/null @@ -1,356 +0,0 @@ -package s3api - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// GetObjectAclHandler Get object ACL -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectAclHandler %s %s", bucket, object) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Check for specific version ID in query parameters - versionId := r.URL.Query().Get("versionId") - - // Check if versioning is configured for the bucket (Enabled or Suspended) - versioningConfigured, err := s3a.isVersioningConfigured(bucket) - if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("GetObjectAclHandler: Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - var entry *filer_pb.Entry - - if versioningConfigured { - // Handle versioned object ACL retrieval - use same logic as GetObjectHandler - if versionId != "" { - // Request for specific version - glog.V(2).Infof("GetObjectAclHandler: requesting ACL for specific version %s of %s%s", versionId, bucket, object) - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - } else { - // Request for latest version - glog.V(2).Infof("GetObjectAclHandler: requesting ACL for latest version of %s%s", bucket, object) - entry, err = s3a.getLatestObjectVersion(bucket, object) - } - - if err != nil { - glog.Errorf("GetObjectAclHandler: Failed to get object version %s for %s%s: %v", versionId, bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - // Check if this is a delete marker - if entry.Extended != nil { - if deleteMarker, exists := entry.Extended[s3_constants.ExtDeleteMarkerKey]; exists && string(deleteMarker) == "true" { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - } - } else { - // Handle regular (non-versioned) object ACL retrieval - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err = s3a.getEntry(bucketDir, object) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - glog.Errorf("GetObjectAclHandler: error checking object %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - if entry == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - // Get object owner from metadata, fallback to request account - var objectOwner string - var objectOwnerDisplayName string - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - - if entry.Extended != nil { - if ownerBytes, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - objectOwner = string(ownerBytes) - } - } - - // Fallback to current account if no owner stored - if objectOwner == "" { - objectOwner = amzAccountId - } - - objectOwnerDisplayName = s3a.iam.GetAccountNameById(objectOwner) - - // Build ACL response - response := AccessControlPolicy{ - Owner: CanonicalUser{ - ID: objectOwner, - DisplayName: objectOwnerDisplayName, - }, - } - - // Get grants from stored ACL metadata - grants := GetAcpGrants(entry.Extended) - if len(grants) > 0 { - // Convert AWS SDK grants to local Grant format - for _, grant := range grants { - localGrant := Grant{ - Permission: Permission(*grant.Permission), - } - - if grant.Grantee != nil { - localGrant.Grantee = Grantee{ - Type: *grant.Grantee.Type, - XMLXSI: "CanonicalUser", - XMLNS: "http://www.w3.org/2001/XMLSchema-instance", - } - - if grant.Grantee.ID != nil { - localGrant.Grantee.ID = *grant.Grantee.ID - localGrant.Grantee.DisplayName = s3a.iam.GetAccountNameById(*grant.Grantee.ID) - } - - if grant.Grantee.URI != nil { - localGrant.Grantee.URI = *grant.Grantee.URI - } - } - - response.AccessControlList.Grant = append(response.AccessControlList.Grant, localGrant) - } - } else { - // Fallback to default full control for object owner - response.AccessControlList.Grant = append(response.AccessControlList.Grant, Grant{ - Grantee: Grantee{ - ID: objectOwner, - DisplayName: objectOwnerDisplayName, - Type: "CanonicalUser", - XMLXSI: "CanonicalUser", - XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, - Permission: Permission(s3_constants.PermissionFullControl), - }) - } - - writeSuccessResponseXML(w, r, response) -} - -// PutObjectAclHandler Put object ACL -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html -func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) { - // collect parameters - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectAclHandler %s %s", bucket, object) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Check for specific version ID in query parameters - versionId := r.URL.Query().Get("versionId") - - // Check if versioning is configured for the bucket (Enabled or Suspended) - versioningConfigured, err := s3a.isVersioningConfigured(bucket) - if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("PutObjectAclHandler: Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - var entry *filer_pb.Entry - - if versioningConfigured { - // Handle versioned object ACL modification - use same logic as GetObjectHandler - if versionId != "" { - // Request for specific version - glog.V(2).Infof("PutObjectAclHandler: modifying ACL for specific version %s of %s%s", versionId, bucket, object) - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - } else { - // Request for latest version - glog.V(2).Infof("PutObjectAclHandler: modifying ACL for latest version of %s%s", bucket, object) - entry, err = s3a.getLatestObjectVersion(bucket, object) - } - - if err != nil { - glog.Errorf("PutObjectAclHandler: Failed to get object version %s for %s%s: %v", versionId, bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - // Check if this is a delete marker - if entry.Extended != nil { - if deleteMarker, exists := entry.Extended[s3_constants.ExtDeleteMarkerKey]; exists && string(deleteMarker) == "true" { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - } - } else { - // Handle regular (non-versioned) object ACL modification - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err = s3a.getEntry(bucketDir, object) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - glog.Errorf("PutObjectAclHandler: error checking object %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - if entry == nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - // Get current object owner from metadata - var objectOwner string - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - - if entry.Extended != nil { - if ownerBytes, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - objectOwner = string(ownerBytes) - } - } - - // Fallback to current account if no owner stored - if objectOwner == "" { - objectOwner = amzAccountId - } - - // **PERMISSION CHECKS** - - // 1. Check if user is admin (admins can modify any ACL) - if !s3a.isUserAdmin(r) { - // 2. Check object ownership - only object owner can modify ACL (unless admin) - if objectOwner != amzAccountId { - glog.V(3).Infof("PutObjectAclHandler: Access denied - user %s is not owner of object %s/%s (owner: %s)", - amzAccountId, bucket, object, objectOwner) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - // 3. Check object-level WRITE_ACP permission - // Create the specific action for this object - writeAcpAction := Action(fmt.Sprintf("WriteAcp:%s/%s", bucket, object)) - identity, errCode := s3a.iam.authRequest(r, writeAcpAction) - if errCode != s3err.ErrNone { - glog.V(3).Infof("PutObjectAclHandler: Auth failed for WriteAcp action on %s/%s: %v", bucket, object, errCode) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - // 4. Verify the authenticated identity can perform WriteAcp on this specific object - if identity == nil || !identity.canDo(writeAcpAction, bucket, object) { - glog.V(3).Infof("PutObjectAclHandler: Identity %v cannot perform WriteAcp on %s/%s", identity, bucket, object) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - } else { - glog.V(3).Infof("PutObjectAclHandler: Admin user %s granted ACL modification permission for %s/%s", amzAccountId, bucket, object) - } - - // Get bucket config for ownership settings - bucketConfig, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - bucketOwnership := bucketConfig.Ownership - bucketOwnerId := bucketConfig.Owner - - // Extract ACL from request (either canned ACL or XML body) - // This function also validates that the owner in the request matches the object owner - grants, errCode := ExtractAcl(r, s3a.iam, bucketOwnership, bucketOwnerId, objectOwner, amzAccountId) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Store ACL in object metadata - if errCode := AssembleEntryWithAcp(entry, objectOwner, grants); errCode != s3err.ErrNone { - glog.Errorf("PutObjectAclHandler: failed to assemble entry with ACP: %v", errCode) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Calculate the correct directory for ACL update - var updateDirectory string - - if versioningConfigured { - if versionId != "" && versionId != "null" { - // Versioned object - update the specific version file in .versions directory - updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + ".versions" - } else { - // Latest version in versioned bucket - could be null version or versioned object - // Extract version ID from the entry to determine where it's stored - var actualVersionId string - if entry.Extended != nil { - if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { - actualVersionId = string(versionIdBytes) - } - } - - if actualVersionId == "null" || actualVersionId == "" { - // Null version (pre-versioning object) - stored as regular file - updateDirectory = s3a.option.BucketsPath + "/" + bucket - } else { - // Versioned object - stored in .versions directory - updateDirectory = s3a.option.BucketsPath + "/" + bucket + "/" + object + ".versions" - } - } - } else { - // Non-versioned object - stored as regular file - updateDirectory = s3a.option.BucketsPath + "/" + bucket - } - - // Update the object with new ACL metadata - err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.UpdateEntryRequest{ - Directory: updateDirectory, - Entry: entry, - } - - if _, err := client.UpdateEntry(context.Background(), request); err != nil { - return err - } - return nil - }) - - if err != nil { - glog.Errorf("PutObjectAclHandler: failed to update entry: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - glog.V(3).Infof("PutObjectAclHandler: Successfully updated ACL for %s/%s by user %s", bucket, object, amzAccountId) - writeSuccessResponseEmpty(w, r) -} diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go deleted file mode 100644 index a71b52a39..000000000 --- a/weed/s3api/s3api_object_handlers_copy.go +++ /dev/null @@ -1,2260 +0,0 @@ -package s3api - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "modernc.org/strutil" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" -) - -const ( - DirectiveCopy = "COPY" - DirectiveReplace = "REPLACE" -) - -func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - - dstBucket, dstObject := s3_constants.GetBucketAndObject(r) - - // Copy source path. - cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) - if err != nil { - // Save unescaped string as is. - cpSrcPath = r.Header.Get("X-Amz-Copy-Source") - } - - srcBucket, srcObject, srcVersionId := pathToBucketObjectAndVersion(cpSrcPath) - - glog.V(3).Infof("CopyObjectHandler %s %s (version: %s) => %s %s", srcBucket, srcObject, srcVersionId, dstBucket, dstObject) - - // Validate copy source and destination - if err := ValidateCopySource(cpSrcPath, srcBucket, srcObject); err != nil { - glog.V(2).Infof("CopyObjectHandler validation error: %v", err) - errCode := MapCopyValidationError(err) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if err := ValidateCopyDestination(dstBucket, dstObject); err != nil { - glog.V(2).Infof("CopyObjectHandler validation error: %v", err) - errCode := MapCopyValidationError(err) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - replaceMeta, replaceTagging := replaceDirective(r.Header) - - if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) { - fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) - dir, name := fullPath.DirAndName() - entry, err := s3a.getEntry(dir, name) - if err != nil || entry.IsDirectory { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging) - entry.Attributes.Mtime = time.Now().Unix() - if err != nil { - glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag) - return - } - err = s3a.touch(dir, name, entry) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - writeSuccessResponseXML(w, r, CopyObjectResult{ - ETag: fmt.Sprintf("%x", entry.Attributes.Md5), - LastModified: time.Now().UTC(), - }) - return - } - - // If source object is empty or bucket is empty, reply back invalid copy source. - if srcObject == "" || srcBucket == "" { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - // Get detailed versioning state for source bucket - srcVersioningState, err := s3a.getVersioningState(srcBucket) - if err != nil { - glog.Errorf("Error checking versioning state for source bucket %s: %v", srcBucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - // Get the source entry with version awareness based on versioning state - var entry *filer_pb.Entry - if srcVersionId != "" { - // Specific version requested - always use version-aware retrieval - entry, err = s3a.getSpecificObjectVersion(srcBucket, srcObject, srcVersionId) - } else if srcVersioningState == s3_constants.VersioningEnabled { - // Versioning enabled - get latest version from .versions directory - entry, err = s3a.getLatestObjectVersion(srcBucket, srcObject) - } else if srcVersioningState == s3_constants.VersioningSuspended { - // Versioning suspended - current object is stored as regular file ("null" version) - // Try regular file first, fall back to latest version if needed - srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) - dir, name := srcPath.DirAndName() - entry, err = s3a.getEntry(dir, name) - if err != nil { - // If regular file doesn't exist, try latest version as fallback - glog.V(2).Infof("CopyObject: regular file not found for suspended versioning, trying latest version") - entry, err = s3a.getLatestObjectVersion(srcBucket, srcObject) - } - } else { - // No versioning configured - use regular retrieval - srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) - dir, name := srcPath.DirAndName() - entry, err = s3a.getEntry(dir, name) - } - - if err != nil || entry.IsDirectory { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - if srcBucket == dstBucket && srcObject == dstObject { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest) - return - } - - // Validate conditional copy headers - if err := s3a.validateConditionalCopyHeaders(r, entry); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Validate encryption parameters - if err := ValidateCopyEncryption(entry.Extended, r.Header); err != nil { - glog.V(2).Infof("CopyObjectHandler encryption validation error: %v", err) - errCode := MapCopyValidationError(err) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Create new entry for destination - dstEntry := &filer_pb.Entry{ - Attributes: &filer_pb.FuseAttributes{ - FileSize: entry.Attributes.FileSize, - Mtime: time.Now().Unix(), - Crtime: entry.Attributes.Crtime, - Mime: entry.Attributes.Mime, - }, - Extended: make(map[string][]byte), - } - - // Copy extended attributes from source, filtering out conflicting encryption metadata - for k, v := range entry.Extended { - // Skip encryption-specific headers that might conflict with destination encryption type - skipHeader := false - - // If we're doing cross-encryption, skip conflicting headers - if len(entry.GetChunks()) > 0 { - // Detect source and destination encryption types - srcHasSSEC := IsSSECEncrypted(entry.Extended) - srcHasSSEKMS := IsSSEKMSEncrypted(entry.Extended) - srcHasSSES3 := IsSSES3EncryptedInternal(entry.Extended) - dstWantsSSEC := IsSSECRequest(r) - dstWantsSSEKMS := IsSSEKMSRequest(r) - dstWantsSSES3 := IsSSES3RequestInternal(r) - - // Use helper function to determine if header should be skipped - skipHeader = shouldSkipEncryptionHeader(k, - srcHasSSEC, srcHasSSEKMS, srcHasSSES3, - dstWantsSSEC, dstWantsSSEKMS, dstWantsSSES3) - } - - if !skipHeader { - dstEntry.Extended[k] = v - } - } - - // Process metadata and tags and apply to destination - processedMetadata, tagErr := processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging) - if tagErr != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - // Apply processed metadata to destination entry - for k, v := range processedMetadata { - dstEntry.Extended[k] = v - } - - // For zero-size files or files without chunks, use the original approach - if entry.Attributes.FileSize == 0 || len(entry.GetChunks()) == 0 { - // Just copy the entry structure without chunks for zero-size files - dstEntry.Chunks = nil - } else { - // Use unified copy strategy approach - dstChunks, dstMetadata, copyErr := s3a.executeUnifiedCopyStrategy(entry, r, dstBucket, srcObject, dstObject) - if copyErr != nil { - glog.Errorf("CopyObjectHandler unified copy error: %v", copyErr) - // Map errors to appropriate S3 errors - errCode := s3a.mapCopyErrorToS3Error(copyErr) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - dstEntry.Chunks = dstChunks - - // Apply destination-specific metadata (e.g., SSE-C IV and headers) - if dstMetadata != nil { - for k, v := range dstMetadata { - dstEntry.Extended[k] = v - } - glog.V(2).Infof("Applied %d destination metadata entries for copy: %s", len(dstMetadata), r.URL.Path) - } - } - - // Check if destination bucket has versioning configured - dstVersioningConfigured, err := s3a.isVersioningConfigured(dstBucket) - if err != nil { - glog.Errorf("Error checking versioning status for destination bucket %s: %v", dstBucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - var dstVersionId string - var etag string - - if dstVersioningConfigured { - // For versioned destination, create a new version - dstVersionId = generateVersionId() - glog.V(2).Infof("CopyObjectHandler: creating version %s for destination %s/%s", dstVersionId, dstBucket, dstObject) - - // Add version metadata to the entry - if dstEntry.Extended == nil { - dstEntry.Extended = make(map[string][]byte) - } - dstEntry.Extended[s3_constants.ExtVersionIdKey] = []byte(dstVersionId) - - // Calculate ETag for versioning - filerEntry := &filer.Entry{ - FullPath: util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)), - Attr: filer.Attr{ - FileSize: dstEntry.Attributes.FileSize, - Mtime: time.Unix(dstEntry.Attributes.Mtime, 0), - Crtime: time.Unix(dstEntry.Attributes.Crtime, 0), - Mime: dstEntry.Attributes.Mime, - }, - Chunks: dstEntry.Chunks, - } - etag = filer.ETagEntry(filerEntry) - if !strings.HasPrefix(etag, "\"") { - etag = "\"" + etag + "\"" - } - dstEntry.Extended[s3_constants.ExtETagKey] = []byte(etag) - - // Create version file - versionFileName := s3a.getVersionFileName(dstVersionId) - versionObjectPath := dstObject + ".versions/" + versionFileName - bucketDir := s3a.option.BucketsPath + "/" + dstBucket - - if err := s3a.mkFile(bucketDir, versionObjectPath, dstEntry.Chunks, func(entry *filer_pb.Entry) { - entry.Attributes = dstEntry.Attributes - entry.Extended = dstEntry.Extended - }); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Update the .versions directory metadata - err = s3a.updateLatestVersionInDirectory(dstBucket, dstObject, dstVersionId, versionFileName) - if err != nil { - glog.Errorf("CopyObjectHandler: failed to update latest version in directory: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set version ID in response header - w.Header().Set("x-amz-version-id", dstVersionId) - } else { - // For non-versioned destination, use regular copy - dstPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) - dstDir, dstName := dstPath.DirAndName() - - // Check if destination exists and remove it first (S3 copy overwrites) - if exists, _ := s3a.exists(dstDir, dstName, false); exists { - if err := s3a.rm(dstDir, dstName, false, false); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - // Create the new file - if err := s3a.mkFile(dstDir, dstName, dstEntry.Chunks, func(entry *filer_pb.Entry) { - entry.Attributes = dstEntry.Attributes - entry.Extended = dstEntry.Extended - }); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Calculate ETag - filerEntry := &filer.Entry{ - FullPath: dstPath, - Attr: filer.Attr{ - FileSize: dstEntry.Attributes.FileSize, - Mtime: time.Unix(dstEntry.Attributes.Mtime, 0), - Crtime: time.Unix(dstEntry.Attributes.Crtime, 0), - Mime: dstEntry.Attributes.Mime, - }, - Chunks: dstEntry.Chunks, - } - etag = filer.ETagEntry(filerEntry) - } - - setEtag(w, etag) - - response := CopyObjectResult{ - ETag: etag, - LastModified: time.Now().UTC(), - } - - writeSuccessResponseXML(w, r, response) - -} - -func pathToBucketAndObject(path string) (bucket, object string) { - path = strings.TrimPrefix(path, "/") - parts := strings.SplitN(path, "/", 2) - if len(parts) == 2 { - return parts[0], "/" + parts[1] - } - return parts[0], "/" -} - -func pathToBucketObjectAndVersion(path string) (bucket, object, versionId string) { - // Parse versionId from query string if present - // Format: /bucket/object?versionId=version-id - if idx := strings.Index(path, "?versionId="); idx != -1 { - versionId = path[idx+len("?versionId="):] // dynamically calculate length - path = path[:idx] - } - - bucket, object = pathToBucketAndObject(path) - return bucket, object, versionId -} - -type CopyPartResult struct { - LastModified time.Time `xml:"LastModified"` - ETag string `xml:"ETag"` -} - -func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { - // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html - // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - dstBucket, dstObject := s3_constants.GetBucketAndObject(r) - - // Copy source path. - cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) - if err != nil { - // Save unescaped string as is. - cpSrcPath = r.Header.Get("X-Amz-Copy-Source") - } - - srcBucket, srcObject, srcVersionId := pathToBucketObjectAndVersion(cpSrcPath) - // If source object is empty or bucket is empty, reply back invalid copy source. - if srcObject == "" || srcBucket == "" { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - partIDString := r.URL.Query().Get("partNumber") - uploadID := r.URL.Query().Get("uploadId") - - partID, err := strconv.Atoi(partIDString) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - return - } - - // Check if the upload ID is valid - err = s3a.checkUploadId(dstObject, uploadID) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) - return - } - - glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d upload %s", srcBucket, srcObject, dstBucket, partID, uploadID) - - // check partID with maximum part ID for multipart objects - if partID > s3_constants.MaxS3MultipartParts { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - return - } - - // Get detailed versioning state for source bucket - srcVersioningState, err := s3a.getVersioningState(srcBucket) - if err != nil { - glog.Errorf("Error checking versioning state for source bucket %s: %v", srcBucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - // Get the source entry with version awareness based on versioning state - var entry *filer_pb.Entry - if srcVersionId != "" { - // Specific version requested - always use version-aware retrieval - entry, err = s3a.getSpecificObjectVersion(srcBucket, srcObject, srcVersionId) - } else if srcVersioningState == s3_constants.VersioningEnabled { - // Versioning enabled - get latest version from .versions directory - entry, err = s3a.getLatestObjectVersion(srcBucket, srcObject) - } else if srcVersioningState == s3_constants.VersioningSuspended { - // Versioning suspended - current object is stored as regular file ("null" version) - // Try regular file first, fall back to latest version if needed - srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) - dir, name := srcPath.DirAndName() - entry, err = s3a.getEntry(dir, name) - if err != nil { - // If regular file doesn't exist, try latest version as fallback - glog.V(2).Infof("CopyObjectPart: regular file not found for suspended versioning, trying latest version") - entry, err = s3a.getLatestObjectVersion(srcBucket, srcObject) - } - } else { - // No versioning configured - use regular retrieval - srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject)) - dir, name := srcPath.DirAndName() - entry, err = s3a.getEntry(dir, name) - } - - if err != nil || entry.IsDirectory { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) - return - } - - // Validate conditional copy headers - if err := s3a.validateConditionalCopyHeaders(r, entry); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Handle range header if present - rangeHeader := r.Header.Get("x-amz-copy-source-range") - var startOffset, endOffset int64 - if rangeHeader != "" { - startOffset, endOffset, err = parseRangeHeader(rangeHeader) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRange) - return - } - } else { - startOffset = 0 - if entry.Attributes.FileSize == 0 { - endOffset = -1 // For zero-size files, use -1 as endOffset - } else { - endOffset = int64(entry.Attributes.FileSize) - 1 - } - } - - // Create new entry for the part - dstEntry := &filer_pb.Entry{ - Attributes: &filer_pb.FuseAttributes{ - FileSize: uint64(endOffset - startOffset + 1), - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - Mime: entry.Attributes.Mime, - }, - Extended: make(map[string][]byte), - } - - // Handle zero-size files or empty ranges - if entry.Attributes.FileSize == 0 || endOffset < startOffset { - // For zero-size files or invalid ranges, create an empty part - dstEntry.Chunks = nil - } else { - // Copy chunks that overlap with the range - dstChunks, err := s3a.copyChunksForRange(entry, startOffset, endOffset, r.URL.Path) - if err != nil { - glog.Errorf("CopyObjectPartHandler copy chunks error: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - dstEntry.Chunks = dstChunks - } - - // Save the part entry to the multipart uploads folder - uploadDir := s3a.genUploadsFolder(dstBucket) + "/" + uploadID - partName := fmt.Sprintf("%04d_%s.part", partID, "copy") - - // Check if part exists and remove it first (allow re-copying same part) - if exists, _ := s3a.exists(uploadDir, partName, false); exists { - if err := s3a.rm(uploadDir, partName, false, false); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - if err := s3a.mkFile(uploadDir, partName, dstEntry.Chunks, func(entry *filer_pb.Entry) { - entry.Attributes = dstEntry.Attributes - entry.Extended = dstEntry.Extended - }); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Calculate ETag for the part - partPath := util.FullPath(uploadDir + "/" + partName) - filerEntry := &filer.Entry{ - FullPath: partPath, - Attr: filer.Attr{ - FileSize: dstEntry.Attributes.FileSize, - Mtime: time.Unix(dstEntry.Attributes.Mtime, 0), - Crtime: time.Unix(dstEntry.Attributes.Crtime, 0), - Mime: dstEntry.Attributes.Mime, - }, - Chunks: dstEntry.Chunks, - } - - etag := filer.ETagEntry(filerEntry) - setEtag(w, etag) - - response := CopyPartResult{ - ETag: etag, - LastModified: time.Now().UTC(), - } - - writeSuccessResponseXML(w, r, response) -} - -func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) { - return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace -} - -func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) { - if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 { - if sc := existing.Get(s3_constants.AmzStorageClass); len(sc) > 0 { - reqHeader.Set(s3_constants.AmzStorageClass, sc) - } - } - - if !replaceMeta { - for header := range reqHeader { - if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { - delete(reqHeader, header) - } - } - for k, v := range existing { - if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { - reqHeader[k] = v - } - } - } - - if !replaceTagging { - for header, _ := range reqHeader { - if strings.HasPrefix(header, s3_constants.AmzObjectTagging) { - delete(reqHeader, header) - } - } - - found := false - for k, _ := range existing { - if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) { - found = true - break - } - } - - if found { - tags, err := getTags(dir, name) - if err != nil { - return err - } - - var tagArr []string - for k, v := range tags { - tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v)) - } - tagStr := strutil.JoinFields(tagArr, "&") - reqHeader.Set(s3_constants.AmzObjectTagging, tagStr) - } - } - return -} - -func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte, err error) { - metadata = make(map[string][]byte) - - if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 { - metadata[s3_constants.AmzStorageClass] = sc - } - if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 { - metadata[s3_constants.AmzStorageClass] = []byte(sc) - } - - // Handle SSE-KMS headers - these are always processed from request headers if present - if sseAlgorithm := reqHeader.Get(s3_constants.AmzServerSideEncryption); sseAlgorithm == "aws:kms" { - metadata[s3_constants.AmzServerSideEncryption] = []byte(sseAlgorithm) - - // KMS Key ID (optional - can use default key) - if kmsKeyID := reqHeader.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId); kmsKeyID != "" { - metadata[s3_constants.AmzServerSideEncryptionAwsKmsKeyId] = []byte(kmsKeyID) - } - - // Encryption Context (optional) - if encryptionContext := reqHeader.Get(s3_constants.AmzServerSideEncryptionContext); encryptionContext != "" { - metadata[s3_constants.AmzServerSideEncryptionContext] = []byte(encryptionContext) - } - - // Bucket Key Enabled (optional) - if bucketKeyEnabled := reqHeader.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled); bucketKeyEnabled != "" { - metadata[s3_constants.AmzServerSideEncryptionBucketKeyEnabled] = []byte(bucketKeyEnabled) - } - } else { - // If not explicitly setting SSE-KMS, preserve existing SSE headers from source - for _, sseHeader := range []string{ - s3_constants.AmzServerSideEncryption, - s3_constants.AmzServerSideEncryptionAwsKmsKeyId, - s3_constants.AmzServerSideEncryptionContext, - s3_constants.AmzServerSideEncryptionBucketKeyEnabled, - } { - if existingValue, exists := existing[sseHeader]; exists { - metadata[sseHeader] = existingValue - } - } - } - - // Handle SSE-C headers - these are always processed from request headers if present - if sseCustomerAlgorithm := reqHeader.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); sseCustomerAlgorithm != "" { - metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(sseCustomerAlgorithm) - - if sseCustomerKeyMD5 := reqHeader.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); sseCustomerKeyMD5 != "" { - metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(sseCustomerKeyMD5) - } - } else { - // If not explicitly setting SSE-C, preserve existing SSE-C headers from source - for _, ssecHeader := range []string{ - s3_constants.AmzServerSideEncryptionCustomerAlgorithm, - s3_constants.AmzServerSideEncryptionCustomerKeyMD5, - } { - if existingValue, exists := existing[ssecHeader]; exists { - metadata[ssecHeader] = existingValue - } - } - } - - if replaceMeta { - for header, values := range reqHeader { - if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) { - for _, value := range values { - metadata[header] = []byte(value) - } - } - } - } else { - for k, v := range existing { - if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { - metadata[k] = v - } - } - } - if replaceTagging { - if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" { - parsedTags, err := parseTagsHeader(tags) - if err != nil { - return nil, err - } - err = ValidateTags(parsedTags) - if err != nil { - return nil, err - } - for k, v := range parsedTags { - metadata[s3_constants.AmzObjectTagging+"-"+k] = []byte(v) - } - } - } else { - for k, v := range existing { - if strings.HasPrefix(k, s3_constants.AmzObjectTagging) { - metadata[k] = v - } - } - delete(metadata, s3_constants.AmzTagCount) - } - - return -} - -// copyChunks replicates chunks from source entry to destination entry -func (s3a *S3ApiServer) copyChunks(entry *filer_pb.Entry, dstPath string) ([]*filer_pb.FileChunk, error) { - dstChunks := make([]*filer_pb.FileChunk, len(entry.GetChunks())) - const defaultChunkCopyConcurrency = 4 - executor := util.NewLimitedConcurrentExecutor(defaultChunkCopyConcurrency) // Limit to configurable concurrent operations - errChan := make(chan error, len(entry.GetChunks())) - - for i, chunk := range entry.GetChunks() { - chunkIndex := i - executor.Execute(func() { - dstChunk, err := s3a.copySingleChunk(chunk, dstPath) - if err != nil { - errChan <- fmt.Errorf("chunk %d: %v", chunkIndex, err) - return - } - dstChunks[chunkIndex] = dstChunk - errChan <- nil - }) - } - - // Wait for all operations to complete and check for errors - for i := 0; i < len(entry.GetChunks()); i++ { - if err := <-errChan; err != nil { - return nil, err - } - } - - return dstChunks, nil -} - -// copySingleChunk copies a single chunk from source to destination -func (s3a *S3ApiServer) copySingleChunk(chunk *filer_pb.FileChunk, dstPath string) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Download and upload the chunk - chunkData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download chunk data: %w", err) - } - - if err := s3a.uploadChunkData(chunkData, assignResult); err != nil { - return nil, fmt.Errorf("upload chunk data: %w", err) - } - - return dstChunk, nil -} - -// copySingleChunkForRange copies a portion of a chunk for range operations -func (s3a *S3ApiServer) copySingleChunkForRange(originalChunk, rangeChunk *filer_pb.FileChunk, rangeStart, rangeEnd int64, dstPath string) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(rangeChunk, rangeChunk.Offset, rangeChunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(originalChunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Calculate the portion of the original chunk that we need to copy - chunkStart := originalChunk.Offset - overlapStart := max(rangeStart, chunkStart) - offsetInChunk := overlapStart - chunkStart - - // Download and upload the chunk portion - chunkData, err := s3a.downloadChunkData(srcUrl, offsetInChunk, int64(rangeChunk.Size)) - if err != nil { - return nil, fmt.Errorf("download chunk range data: %w", err) - } - - if err := s3a.uploadChunkData(chunkData, assignResult); err != nil { - return nil, fmt.Errorf("upload chunk range data: %w", err) - } - - return dstChunk, nil -} - -// assignNewVolume assigns a new volume for the chunk -func (s3a *S3ApiServer) assignNewVolume(dstPath string) (*filer_pb.AssignVolumeResponse, error) { - var assignResult *filer_pb.AssignVolumeResponse - err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.AssignVolume(context.Background(), &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: "", - Collection: "", - DiskType: "", - DataCenter: s3a.option.DataCenter, - Path: dstPath, - }) - if err != nil { - return fmt.Errorf("assign volume: %w", err) - } - if resp.Error != "" { - return fmt.Errorf("assign volume: %v", resp.Error) - } - assignResult = resp - return nil - }) - if err != nil { - return nil, err - } - return assignResult, nil -} - -// min returns the minimum of two int64 values -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -// max returns the maximum of two int64 values -func max(a, b int64) int64 { - if a > b { - return a - } - return b -} - -// parseRangeHeader parses the x-amz-copy-source-range header -func parseRangeHeader(rangeHeader string) (startOffset, endOffset int64, err error) { - // Remove "bytes=" prefix if present - rangeStr := strings.TrimPrefix(rangeHeader, "bytes=") - parts := strings.Split(rangeStr, "-") - if len(parts) != 2 { - return 0, 0, fmt.Errorf("invalid range format") - } - - startOffset, err = strconv.ParseInt(parts[0], 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("invalid start offset: %w", err) - } - - endOffset, err = strconv.ParseInt(parts[1], 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("invalid end offset: %w", err) - } - - return startOffset, endOffset, nil -} - -// copyChunksForRange copies chunks that overlap with the specified range -func (s3a *S3ApiServer) copyChunksForRange(entry *filer_pb.Entry, startOffset, endOffset int64, dstPath string) ([]*filer_pb.FileChunk, error) { - var relevantChunks []*filer_pb.FileChunk - - // Find chunks that overlap with the range - for _, chunk := range entry.GetChunks() { - chunkStart := chunk.Offset - chunkEnd := chunk.Offset + int64(chunk.Size) - - // Check if chunk overlaps with the range - if chunkStart < endOffset+1 && chunkEnd > startOffset { - // Calculate the overlap - overlapStart := max(startOffset, chunkStart) - overlapEnd := min(endOffset+1, chunkEnd) - - // Create a new chunk with adjusted offset and size relative to the range - newChunk := &filer_pb.FileChunk{ - FileId: chunk.FileId, - Offset: overlapStart - startOffset, // Offset relative to the range start - Size: uint64(overlapEnd - overlapStart), - ModifiedTsNs: time.Now().UnixNano(), - ETag: chunk.ETag, - IsCompressed: chunk.IsCompressed, - CipherKey: chunk.CipherKey, - Fid: chunk.Fid, - } - relevantChunks = append(relevantChunks, newChunk) - } - } - - // Copy the relevant chunks using a specialized method for range copies - dstChunks := make([]*filer_pb.FileChunk, len(relevantChunks)) - const defaultChunkCopyConcurrency = 4 - executor := util.NewLimitedConcurrentExecutor(defaultChunkCopyConcurrency) - errChan := make(chan error, len(relevantChunks)) - - // Create a map to track original chunks for each relevant chunk - originalChunks := make([]*filer_pb.FileChunk, len(relevantChunks)) - relevantIndex := 0 - for _, chunk := range entry.GetChunks() { - chunkStart := chunk.Offset - chunkEnd := chunk.Offset + int64(chunk.Size) - - // Check if chunk overlaps with the range - if chunkStart < endOffset+1 && chunkEnd > startOffset { - originalChunks[relevantIndex] = chunk - relevantIndex++ - } - } - - for i, chunk := range relevantChunks { - chunkIndex := i - originalChunk := originalChunks[i] // Get the corresponding original chunk - executor.Execute(func() { - dstChunk, err := s3a.copySingleChunkForRange(originalChunk, chunk, startOffset, endOffset, dstPath) - if err != nil { - errChan <- fmt.Errorf("chunk %d: %v", chunkIndex, err) - return - } - dstChunks[chunkIndex] = dstChunk - errChan <- nil - }) - } - - // Wait for all operations to complete and check for errors - for i := 0; i < len(relevantChunks); i++ { - if err := <-errChan; err != nil { - return nil, err - } - } - - return dstChunks, nil -} - -// Helper methods for copy operations to avoid code duplication - -// validateConditionalCopyHeaders validates the conditional copy headers against the source entry -func (s3a *S3ApiServer) validateConditionalCopyHeaders(r *http.Request, entry *filer_pb.Entry) s3err.ErrorCode { - // Calculate ETag for the source entry - srcPath := util.FullPath(fmt.Sprintf("%s/%s", r.URL.Path, entry.Name)) - filerEntry := &filer.Entry{ - FullPath: srcPath, - Attr: filer.Attr{ - FileSize: entry.Attributes.FileSize, - Mtime: time.Unix(entry.Attributes.Mtime, 0), - Crtime: time.Unix(entry.Attributes.Crtime, 0), - Mime: entry.Attributes.Mime, - }, - Chunks: entry.Chunks, - } - sourceETag := filer.ETagEntry(filerEntry) - - // Check X-Amz-Copy-Source-If-Match - if ifMatch := r.Header.Get(s3_constants.AmzCopySourceIfMatch); ifMatch != "" { - // Remove quotes if present - ifMatch = strings.Trim(ifMatch, `"`) - sourceETag = strings.Trim(sourceETag, `"`) - glog.V(3).Infof("CopyObjectHandler: If-Match check - expected %s, got %s", ifMatch, sourceETag) - if ifMatch != sourceETag { - glog.V(3).Infof("CopyObjectHandler: If-Match failed - expected %s, got %s", ifMatch, sourceETag) - return s3err.ErrPreconditionFailed - } - } - - // Check X-Amz-Copy-Source-If-None-Match - if ifNoneMatch := r.Header.Get(s3_constants.AmzCopySourceIfNoneMatch); ifNoneMatch != "" { - // Remove quotes if present - ifNoneMatch = strings.Trim(ifNoneMatch, `"`) - sourceETag = strings.Trim(sourceETag, `"`) - glog.V(3).Infof("CopyObjectHandler: If-None-Match check - comparing %s with %s", ifNoneMatch, sourceETag) - if ifNoneMatch == sourceETag { - glog.V(3).Infof("CopyObjectHandler: If-None-Match failed - matched %s", sourceETag) - return s3err.ErrPreconditionFailed - } - } - - // Check X-Amz-Copy-Source-If-Modified-Since - if ifModifiedSince := r.Header.Get(s3_constants.AmzCopySourceIfModifiedSince); ifModifiedSince != "" { - t, err := time.Parse(time.RFC1123, ifModifiedSince) - if err != nil { - glog.V(3).Infof("CopyObjectHandler: Invalid If-Modified-Since header: %v", err) - return s3err.ErrInvalidRequest - } - if !time.Unix(entry.Attributes.Mtime, 0).After(t) { - glog.V(3).Infof("CopyObjectHandler: If-Modified-Since failed") - return s3err.ErrPreconditionFailed - } - } - - // Check X-Amz-Copy-Source-If-Unmodified-Since - if ifUnmodifiedSince := r.Header.Get(s3_constants.AmzCopySourceIfUnmodifiedSince); ifUnmodifiedSince != "" { - t, err := time.Parse(time.RFC1123, ifUnmodifiedSince) - if err != nil { - glog.V(3).Infof("CopyObjectHandler: Invalid If-Unmodified-Since header: %v", err) - return s3err.ErrInvalidRequest - } - if time.Unix(entry.Attributes.Mtime, 0).After(t) { - glog.V(3).Infof("CopyObjectHandler: If-Unmodified-Since failed") - return s3err.ErrPreconditionFailed - } - } - - return s3err.ErrNone -} - -// createDestinationChunk creates a new chunk based on the source chunk with modified properties -func (s3a *S3ApiServer) createDestinationChunk(sourceChunk *filer_pb.FileChunk, offset int64, size uint64) *filer_pb.FileChunk { - return &filer_pb.FileChunk{ - Offset: offset, - Size: size, - ModifiedTsNs: time.Now().UnixNano(), - ETag: sourceChunk.ETag, - IsCompressed: sourceChunk.IsCompressed, - CipherKey: sourceChunk.CipherKey, - } -} - -// lookupVolumeUrl looks up the volume URL for a given file ID using the filer's LookupVolume method -func (s3a *S3ApiServer) lookupVolumeUrl(fileId string) (string, error) { - var srcUrl string - err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - vid, _, err := operation.ParseFileId(fileId) - if err != nil { - return fmt.Errorf("parse file ID: %w", err) - } - - resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ - VolumeIds: []string{vid}, - }) - if err != nil { - return fmt.Errorf("lookup volume: %w", err) - } - - if locations, found := resp.LocationsMap[vid]; found && len(locations.Locations) > 0 { - srcUrl = "http://" + locations.Locations[0].Url + "/" + fileId - } else { - return fmt.Errorf("no location found for volume %s", vid) - } - - return nil - }) - if err != nil { - return "", fmt.Errorf("lookup volume URL: %w", err) - } - return srcUrl, nil -} - -// setChunkFileId sets the file ID on the destination chunk -func (s3a *S3ApiServer) setChunkFileId(chunk *filer_pb.FileChunk, assignResult *filer_pb.AssignVolumeResponse) error { - chunk.FileId = assignResult.FileId - fid, err := filer_pb.ToFileIdObject(assignResult.FileId) - if err != nil { - return fmt.Errorf("parse file ID: %w", err) - } - chunk.Fid = fid - return nil -} - -// prepareChunkCopy prepares a chunk for copying by assigning a new volume and looking up the source URL -func (s3a *S3ApiServer) prepareChunkCopy(sourceFileId, dstPath string) (*filer_pb.AssignVolumeResponse, string, error) { - // Assign new volume - assignResult, err := s3a.assignNewVolume(dstPath) - if err != nil { - return nil, "", fmt.Errorf("assign volume: %w", err) - } - - // Look up source URL - srcUrl, err := s3a.lookupVolumeUrl(sourceFileId) - if err != nil { - return nil, "", fmt.Errorf("lookup source URL: %w", err) - } - - return assignResult, srcUrl, nil -} - -// uploadChunkData uploads chunk data to the destination using common upload logic -func (s3a *S3ApiServer) uploadChunkData(chunkData []byte, assignResult *filer_pb.AssignVolumeResponse) error { - dstUrl := fmt.Sprintf("http://%s/%s", assignResult.Location.Url, assignResult.FileId) - - uploadOption := &operation.UploadOption{ - UploadUrl: dstUrl, - Cipher: false, - IsInputCompressed: false, - MimeType: "", - PairMap: nil, - Jwt: security.EncodedJwt(assignResult.Auth), - } - uploader, err := operation.NewUploader() - if err != nil { - return fmt.Errorf("create uploader: %w", err) - } - _, err = uploader.UploadData(context.Background(), chunkData, uploadOption) - if err != nil { - return fmt.Errorf("upload chunk: %w", err) - } - - return nil -} - -// downloadChunkData downloads chunk data from the source URL -func (s3a *S3ApiServer) downloadChunkData(srcUrl string, offset, size int64) ([]byte, error) { - var chunkData []byte - shouldRetry, err := util_http.ReadUrlAsStream(context.Background(), srcUrl, nil, false, false, offset, int(size), func(data []byte) { - chunkData = append(chunkData, data...) - }) - if err != nil { - return nil, fmt.Errorf("download chunk: %w", err) - } - if shouldRetry { - return nil, fmt.Errorf("download chunk: retry needed") - } - return chunkData, nil -} - -// copyMultipartSSECChunks handles copying multipart SSE-C objects -// Returns chunks and destination metadata that should be applied to the destination entry -func (s3a *S3ApiServer) copyMultipartSSECChunks(entry *filer_pb.Entry, copySourceKey *SSECustomerKey, destKey *SSECustomerKey, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - - // For multipart SSE-C, always use decrypt/reencrypt path to ensure proper metadata handling - // The standard copyChunks() doesn't preserve SSE metadata, so we need per-chunk processing - - // Different keys or key changes: decrypt and re-encrypt each chunk individually - glog.V(2).Infof("Multipart SSE-C reencrypt copy (different keys): %s", dstPath) - - var dstChunks []*filer_pb.FileChunk - var destIV []byte - - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() != filer_pb.SSEType_SSE_C { - // Non-SSE-C chunk, copy directly - copiedChunk, err := s3a.copySingleChunk(chunk, dstPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to copy non-SSE-C chunk: %w", err) - } - dstChunks = append(dstChunks, copiedChunk) - continue - } - - // SSE-C chunk: decrypt with stored per-chunk metadata, re-encrypt with dest key - copiedChunk, chunkDestIV, err := s3a.copyMultipartSSECChunk(chunk, copySourceKey, destKey, dstPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to copy SSE-C chunk %s: %w", chunk.GetFileIdString(), err) - } - - dstChunks = append(dstChunks, copiedChunk) - - // Store the first chunk's IV as the object's IV (for single-part compatibility) - if len(destIV) == 0 { - destIV = chunkDestIV - } - } - - // Create destination metadata - dstMetadata := make(map[string][]byte) - if destKey != nil && len(destIV) > 0 { - // Store the IV and SSE-C headers for single-part compatibility - StoreIVInMetadata(dstMetadata, destIV) - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destKey.KeyMD5) - glog.V(2).Infof("Prepared multipart SSE-C destination metadata: %s", dstPath) - } - - return dstChunks, dstMetadata, nil -} - -// copyMultipartSSEKMSChunks handles copying multipart SSE-KMS objects (unified with SSE-C approach) -// Returns chunks and destination metadata that should be applied to the destination entry -func (s3a *S3ApiServer) copyMultipartSSEKMSChunks(entry *filer_pb.Entry, destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, dstPath, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - - // For multipart SSE-KMS, always use decrypt/reencrypt path to ensure proper metadata handling - // The standard copyChunks() doesn't preserve SSE metadata, so we need per-chunk processing - - var dstChunks []*filer_pb.FileChunk - - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() != filer_pb.SSEType_SSE_KMS { - // Non-SSE-KMS chunk, copy directly - copiedChunk, err := s3a.copySingleChunk(chunk, dstPath) - if err != nil { - return nil, nil, fmt.Errorf("failed to copy non-SSE-KMS chunk: %w", err) - } - dstChunks = append(dstChunks, copiedChunk) - continue - } - - // SSE-KMS chunk: decrypt with stored per-chunk metadata, re-encrypt with dest key - copiedChunk, err := s3a.copyMultipartSSEKMSChunk(chunk, destKeyID, encryptionContext, bucketKeyEnabled, dstPath, bucket) - if err != nil { - return nil, nil, fmt.Errorf("failed to copy SSE-KMS chunk %s: %w", chunk.GetFileIdString(), err) - } - - dstChunks = append(dstChunks, copiedChunk) - } - - // Create destination metadata for SSE-KMS - dstMetadata := make(map[string][]byte) - if destKeyID != "" { - // Store SSE-KMS metadata for single-part compatibility - if encryptionContext == nil { - encryptionContext = BuildEncryptionContext(bucket, dstPath, bucketKeyEnabled) - } - sseKey := &SSEKMSKey{ - KeyID: destKeyID, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - } - if kmsMetadata, serErr := SerializeSSEKMSMetadata(sseKey); serErr == nil { - dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - } else { - glog.Errorf("Failed to serialize SSE-KMS metadata: %v", serErr) - } - } - - return dstChunks, dstMetadata, nil -} - -// copyMultipartSSEKMSChunk copies a single SSE-KMS chunk from a multipart object (unified with SSE-C approach) -func (s3a *S3ApiServer) copyMultipartSSEKMSChunk(chunk *filer_pb.FileChunk, destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, dstPath, bucket string) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Download encrypted chunk data - encryptedData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download encrypted chunk data: %w", err) - } - - var finalData []byte - - // Decrypt source data using stored SSE-KMS metadata (same pattern as SSE-C) - if len(chunk.GetSseMetadata()) == 0 { - return nil, fmt.Errorf("SSE-KMS chunk missing per-chunk metadata") - } - - // Deserialize the SSE-KMS metadata (reusing unified metadata structure) - sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata()) - if err != nil { - return nil, fmt.Errorf("failed to deserialize SSE-KMS metadata: %w", err) - } - - // Decrypt the chunk data using the source metadata - decryptedReader, decErr := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sourceSSEKey) - if decErr != nil { - return nil, fmt.Errorf("create SSE-KMS decrypted reader: %w", decErr) - } - - decryptedData, readErr := io.ReadAll(decryptedReader) - if readErr != nil { - return nil, fmt.Errorf("decrypt chunk data: %w", readErr) - } - finalData = decryptedData - glog.V(4).Infof("Decrypted multipart SSE-KMS chunk: %d bytes โ†’ %d bytes", len(encryptedData), len(finalData)) - - // Re-encrypt with destination key if specified - if destKeyID != "" { - // Build encryption context if not provided - if encryptionContext == nil { - encryptionContext = BuildEncryptionContext(bucket, dstPath, bucketKeyEnabled) - } - - // Encrypt with destination key - encryptedReader, destSSEKey, encErr := CreateSSEKMSEncryptedReaderWithBucketKey(bytes.NewReader(finalData), destKeyID, encryptionContext, bucketKeyEnabled) - if encErr != nil { - return nil, fmt.Errorf("create SSE-KMS encrypted reader: %w", encErr) - } - - reencryptedData, readErr := io.ReadAll(encryptedReader) - if readErr != nil { - return nil, fmt.Errorf("re-encrypt chunk data: %w", readErr) - } - finalData = reencryptedData - - // Create per-chunk SSE-KMS metadata for the destination chunk - // For copy operations, reset chunk offset to 0 (similar to SSE-C approach) - // The copied chunks form a new object structure independent of original part boundaries - destSSEKey.ChunkOffset = 0 - kmsMetadata, err := SerializeSSEKMSMetadata(destSSEKey) - if err != nil { - return nil, fmt.Errorf("serialize SSE-KMS metadata: %w", err) - } - - // Set the SSE type and metadata on destination chunk (unified approach) - dstChunk.SseType = filer_pb.SSEType_SSE_KMS - dstChunk.SseMetadata = kmsMetadata - - glog.V(4).Infof("Re-encrypted multipart SSE-KMS chunk: %d bytes โ†’ %d bytes", len(finalData)-len(reencryptedData)+len(finalData), len(finalData)) - } - - // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { - return nil, fmt.Errorf("upload chunk data: %w", err) - } - - // Update chunk size - dstChunk.Size = uint64(len(finalData)) - - glog.V(3).Infof("Successfully copied multipart SSE-KMS chunk %s โ†’ %s", - chunk.GetFileIdString(), dstChunk.GetFileIdString()) - - return dstChunk, nil -} - -// copyMultipartSSECChunk copies a single SSE-C chunk from a multipart object -func (s3a *S3ApiServer) copyMultipartSSECChunk(chunk *filer_pb.FileChunk, copySourceKey *SSECustomerKey, destKey *SSECustomerKey, dstPath string) (*filer_pb.FileChunk, []byte, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, nil, err - } - - // Download encrypted chunk data - encryptedData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, nil, fmt.Errorf("download encrypted chunk data: %w", err) - } - - var finalData []byte - var destIV []byte - - // Decrypt if source is encrypted - if copySourceKey != nil { - // Get the per-chunk SSE-C metadata - if len(chunk.GetSseMetadata()) == 0 { - return nil, nil, fmt.Errorf("SSE-C chunk missing per-chunk metadata") - } - - // Deserialize the SSE-C metadata - ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseMetadata()) - if err != nil { - return nil, nil, fmt.Errorf("failed to deserialize SSE-C metadata: %w", err) - } - - // Decode the IV from the metadata - chunkBaseIV, err := base64.StdEncoding.DecodeString(ssecMetadata.IV) - if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk IV: %w", err) - } - - // Calculate the correct IV for this chunk using within-part offset - var chunkIV []byte - if ssecMetadata.PartOffset > 0 { - chunkIV = calculateIVWithOffset(chunkBaseIV, ssecMetadata.PartOffset) - } else { - chunkIV = chunkBaseIV - } - - // Decrypt the chunk data - decryptedReader, decErr := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), copySourceKey, chunkIV) - if decErr != nil { - return nil, nil, fmt.Errorf("create decrypted reader: %w", decErr) - } - - decryptedData, readErr := io.ReadAll(decryptedReader) - if readErr != nil { - return nil, nil, fmt.Errorf("decrypt chunk data: %w", readErr) - } - finalData = decryptedData - glog.V(4).Infof("Decrypted multipart SSE-C chunk: %d bytes โ†’ %d bytes", len(encryptedData), len(finalData)) - } else { - // Source is unencrypted - finalData = encryptedData - } - - // Re-encrypt if destination should be encrypted - if destKey != nil { - // Generate new IV for this chunk - newIV := make([]byte, s3_constants.AESBlockSize) - if _, err := rand.Read(newIV); err != nil { - return nil, nil, fmt.Errorf("generate IV: %w", err) - } - destIV = newIV - - // Encrypt with new key and IV - encryptedReader, iv, encErr := CreateSSECEncryptedReader(bytes.NewReader(finalData), destKey) - if encErr != nil { - return nil, nil, fmt.Errorf("create encrypted reader: %w", encErr) - } - destIV = iv - - reencryptedData, readErr := io.ReadAll(encryptedReader) - if readErr != nil { - return nil, nil, fmt.Errorf("re-encrypt chunk data: %w", readErr) - } - finalData = reencryptedData - - // Create per-chunk SSE-C metadata for the destination chunk - ssecMetadata, err := SerializeSSECMetadata(destIV, destKey.KeyMD5, 0) // partOffset=0 for copied chunks - if err != nil { - return nil, nil, fmt.Errorf("serialize SSE-C metadata: %w", err) - } - - // Set the SSE type and metadata on destination chunk - dstChunk.SseType = filer_pb.SSEType_SSE_C - dstChunk.SseMetadata = ssecMetadata // Use unified metadata field - - glog.V(4).Infof("Re-encrypted multipart SSE-C chunk: %d bytes โ†’ %d bytes", len(finalData)-len(reencryptedData)+len(finalData), len(finalData)) - } - - // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { - return nil, nil, fmt.Errorf("upload chunk data: %w", err) - } - - // Update chunk size - dstChunk.Size = uint64(len(finalData)) - - glog.V(3).Infof("Successfully copied multipart SSE-C chunk %s โ†’ %s", - chunk.GetFileIdString(), dstChunk.GetFileIdString()) - - return dstChunk, destIV, nil -} - -// copyMultipartCrossEncryption handles all cross-encryption and decrypt-only copy scenarios -// This unified function supports: SSE-Cโ†”SSE-KMS, SSE-Cโ†’Plain, SSE-KMSโ†’Plain -func (s3a *S3ApiServer) copyMultipartCrossEncryption(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstBucket, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - var dstChunks []*filer_pb.FileChunk - - // Parse destination encryption parameters - var destSSECKey *SSECustomerKey - var destKMSKeyID string - var destKMSEncryptionContext map[string]string - var destKMSBucketKeyEnabled bool - - if state.DstSSEC { - var err error - destSSECKey, err = ParseSSECHeaders(r) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse destination SSE-C headers: %w", err) - } - } else if state.DstSSEKMS { - var err error - destKMSKeyID, destKMSEncryptionContext, destKMSBucketKeyEnabled, err = ParseSSEKMSCopyHeaders(r) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse destination SSE-KMS headers: %w", err) - } - } else { - } - - // Parse source encryption parameters - var sourceSSECKey *SSECustomerKey - if state.SrcSSEC { - var err error - sourceSSECKey, err = ParseSSECCopySourceHeaders(r) - if err != nil { - return nil, nil, fmt.Errorf("failed to parse source SSE-C headers: %w", err) - } - } - - // Process each chunk with unified cross-encryption logic - for _, chunk := range entry.GetChunks() { - var copiedChunk *filer_pb.FileChunk - var err error - - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - copiedChunk, err = s3a.copyCrossEncryptionChunk(chunk, sourceSSECKey, destSSECKey, destKMSKeyID, destKMSEncryptionContext, destKMSBucketKeyEnabled, dstPath, dstBucket, state) - } else if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS { - copiedChunk, err = s3a.copyCrossEncryptionChunk(chunk, nil, destSSECKey, destKMSKeyID, destKMSEncryptionContext, destKMSBucketKeyEnabled, dstPath, dstBucket, state) - } else { - // Unencrypted chunk, copy directly - copiedChunk, err = s3a.copySingleChunk(chunk, dstPath) - } - - if err != nil { - return nil, nil, fmt.Errorf("failed to copy chunk %s: %w", chunk.GetFileIdString(), err) - } - - dstChunks = append(dstChunks, copiedChunk) - } - - // Create destination metadata based on destination encryption type - dstMetadata := make(map[string][]byte) - - // Clear any previous encryption metadata to avoid routing conflicts - if state.SrcSSEKMS && state.DstSSEC { - // SSE-KMS โ†’ SSE-C: Remove SSE-KMS headers - // These will be excluded from dstMetadata, effectively removing them - } else if state.SrcSSEC && state.DstSSEKMS { - // SSE-C โ†’ SSE-KMS: Remove SSE-C headers - // These will be excluded from dstMetadata, effectively removing them - } else if !state.DstSSEC && !state.DstSSEKMS { - // Encrypted โ†’ Unencrypted: Remove all encryption metadata - // These will be excluded from dstMetadata, effectively removing them - } - - if state.DstSSEC && destSSECKey != nil { - // For SSE-C destination, use first chunk's IV for compatibility - if len(dstChunks) > 0 && dstChunks[0].GetSseType() == filer_pb.SSEType_SSE_C && len(dstChunks[0].GetSseMetadata()) > 0 { - if ssecMetadata, err := DeserializeSSECMetadata(dstChunks[0].GetSseMetadata()); err == nil { - if iv, ivErr := base64.StdEncoding.DecodeString(ssecMetadata.IV); ivErr == nil { - StoreIVInMetadata(dstMetadata, iv) - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destSSECKey.KeyMD5) - } - } - } - } else if state.DstSSEKMS && destKMSKeyID != "" { - // For SSE-KMS destination, create object-level metadata - if destKMSEncryptionContext == nil { - destKMSEncryptionContext = BuildEncryptionContext(dstBucket, dstPath, destKMSBucketKeyEnabled) - } - sseKey := &SSEKMSKey{ - KeyID: destKMSKeyID, - EncryptionContext: destKMSEncryptionContext, - BucketKeyEnabled: destKMSBucketKeyEnabled, - } - if kmsMetadata, serErr := SerializeSSEKMSMetadata(sseKey); serErr == nil { - dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - } else { - glog.Errorf("Failed to serialize SSE-KMS metadata: %v", serErr) - } - } - // For unencrypted destination, no metadata needed (dstMetadata remains empty) - - return dstChunks, dstMetadata, nil -} - -// copyCrossEncryptionChunk handles copying a single chunk with cross-encryption support -func (s3a *S3ApiServer) copyCrossEncryptionChunk(chunk *filer_pb.FileChunk, sourceSSECKey *SSECustomerKey, destSSECKey *SSECustomerKey, destKMSKeyID string, destKMSEncryptionContext map[string]string, destKMSBucketKeyEnabled bool, dstPath, dstBucket string, state *EncryptionState) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Download encrypted chunk data - encryptedData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download encrypted chunk data: %w", err) - } - - var finalData []byte - - // Step 1: Decrypt source data - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - // Decrypt SSE-C source - if len(chunk.GetSseMetadata()) == 0 { - return nil, fmt.Errorf("SSE-C chunk missing per-chunk metadata") - } - - ssecMetadata, err := DeserializeSSECMetadata(chunk.GetSseMetadata()) - if err != nil { - return nil, fmt.Errorf("failed to deserialize SSE-C metadata: %w", err) - } - - chunkBaseIV, err := base64.StdEncoding.DecodeString(ssecMetadata.IV) - if err != nil { - return nil, fmt.Errorf("failed to decode chunk IV: %w", err) - } - - // Calculate the correct IV for this chunk using within-part offset - var chunkIV []byte - if ssecMetadata.PartOffset > 0 { - chunkIV = calculateIVWithOffset(chunkBaseIV, ssecMetadata.PartOffset) - } else { - chunkIV = chunkBaseIV - } - - decryptedReader, decErr := CreateSSECDecryptedReader(bytes.NewReader(encryptedData), sourceSSECKey, chunkIV) - if decErr != nil { - return nil, fmt.Errorf("create SSE-C decrypted reader: %w", decErr) - } - - decryptedData, readErr := io.ReadAll(decryptedReader) - if readErr != nil { - return nil, fmt.Errorf("decrypt SSE-C chunk data: %w", readErr) - } - finalData = decryptedData - previewLen := 16 - if len(finalData) < previewLen { - previewLen = len(finalData) - } - - } else if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS { - // Decrypt SSE-KMS source - if len(chunk.GetSseMetadata()) == 0 { - return nil, fmt.Errorf("SSE-KMS chunk missing per-chunk metadata") - } - - sourceSSEKey, err := DeserializeSSEKMSMetadata(chunk.GetSseMetadata()) - if err != nil { - return nil, fmt.Errorf("failed to deserialize SSE-KMS metadata: %w", err) - } - - decryptedReader, decErr := CreateSSEKMSDecryptedReader(bytes.NewReader(encryptedData), sourceSSEKey) - if decErr != nil { - return nil, fmt.Errorf("create SSE-KMS decrypted reader: %w", decErr) - } - - decryptedData, readErr := io.ReadAll(decryptedReader) - if readErr != nil { - return nil, fmt.Errorf("decrypt SSE-KMS chunk data: %w", readErr) - } - finalData = decryptedData - previewLen := 16 - if len(finalData) < previewLen { - previewLen = len(finalData) - } - - } else { - // Source is unencrypted - finalData = encryptedData - } - - // Step 2: Re-encrypt with destination encryption (if any) - if state.DstSSEC && destSSECKey != nil { - // Encrypt with SSE-C - encryptedReader, iv, encErr := CreateSSECEncryptedReader(bytes.NewReader(finalData), destSSECKey) - if encErr != nil { - return nil, fmt.Errorf("create SSE-C encrypted reader: %w", encErr) - } - - reencryptedData, readErr := io.ReadAll(encryptedReader) - if readErr != nil { - return nil, fmt.Errorf("re-encrypt with SSE-C: %w", readErr) - } - finalData = reencryptedData - - // Create per-chunk SSE-C metadata (offset=0 for cross-encryption copies) - ssecMetadata, err := SerializeSSECMetadata(iv, destSSECKey.KeyMD5, 0) - if err != nil { - return nil, fmt.Errorf("serialize SSE-C metadata: %w", err) - } - - dstChunk.SseType = filer_pb.SSEType_SSE_C - dstChunk.SseMetadata = ssecMetadata - - previewLen := 16 - if len(finalData) < previewLen { - previewLen = len(finalData) - } - - } else if state.DstSSEKMS && destKMSKeyID != "" { - // Encrypt with SSE-KMS - if destKMSEncryptionContext == nil { - destKMSEncryptionContext = BuildEncryptionContext(dstBucket, dstPath, destKMSBucketKeyEnabled) - } - - encryptedReader, destSSEKey, encErr := CreateSSEKMSEncryptedReaderWithBucketKey(bytes.NewReader(finalData), destKMSKeyID, destKMSEncryptionContext, destKMSBucketKeyEnabled) - if encErr != nil { - return nil, fmt.Errorf("create SSE-KMS encrypted reader: %w", encErr) - } - - reencryptedData, readErr := io.ReadAll(encryptedReader) - if readErr != nil { - return nil, fmt.Errorf("re-encrypt with SSE-KMS: %w", readErr) - } - finalData = reencryptedData - - // Create per-chunk SSE-KMS metadata (offset=0 for cross-encryption copies) - destSSEKey.ChunkOffset = 0 - kmsMetadata, err := SerializeSSEKMSMetadata(destSSEKey) - if err != nil { - return nil, fmt.Errorf("serialize SSE-KMS metadata: %w", err) - } - - dstChunk.SseType = filer_pb.SSEType_SSE_KMS - dstChunk.SseMetadata = kmsMetadata - - glog.V(4).Infof("Re-encrypted chunk with SSE-KMS") - } - // For unencrypted destination, finalData remains as decrypted plaintext - - // Upload the final data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { - return nil, fmt.Errorf("upload chunk data: %w", err) - } - - // Update chunk size - dstChunk.Size = uint64(len(finalData)) - - glog.V(3).Infof("Successfully copied cross-encryption chunk %s โ†’ %s", - chunk.GetFileIdString(), dstChunk.GetFileIdString()) - - return dstChunk, nil -} - -// getEncryptionTypeString returns a string representation of encryption type for logging -func (s3a *S3ApiServer) getEncryptionTypeString(isSSEC, isSSEKMS, isSSES3 bool) string { - if isSSEC { - return s3_constants.SSETypeC - } else if isSSEKMS { - return s3_constants.SSETypeKMS - } else if isSSES3 { - return s3_constants.SSETypeS3 - } - return "Plain" -} - -// copyChunksWithSSEC handles SSE-C aware copying with smart fast/slow path selection -// Returns chunks and destination metadata that should be applied to the destination entry -func (s3a *S3ApiServer) copyChunksWithSSEC(entry *filer_pb.Entry, r *http.Request) ([]*filer_pb.FileChunk, map[string][]byte, error) { - - // Parse SSE-C headers - copySourceKey, err := ParseSSECCopySourceHeaders(r) - if err != nil { - glog.Errorf("Failed to parse SSE-C copy source headers: %v", err) - return nil, nil, err - } - - destKey, err := ParseSSECHeaders(r) - if err != nil { - glog.Errorf("Failed to parse SSE-C headers: %v", err) - return nil, nil, err - } - - // Check if this is a multipart SSE-C object - isMultipartSSEC := false - sseCChunks := 0 - for i, chunk := range entry.GetChunks() { - glog.V(4).Infof("Chunk %d: sseType=%d, hasMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseMetadata()) > 0) - if chunk.GetSseType() == filer_pb.SSEType_SSE_C { - sseCChunks++ - } - } - isMultipartSSEC = sseCChunks > 1 - - if isMultipartSSEC { - glog.V(2).Infof("Detected multipart SSE-C object with %d encrypted chunks for copy", sseCChunks) - return s3a.copyMultipartSSECChunks(entry, copySourceKey, destKey, r.URL.Path) - } - - // Single-part SSE-C object: use original logic - // Determine copy strategy - strategy, err := DetermineSSECCopyStrategy(entry.Extended, copySourceKey, destKey) - if err != nil { - return nil, nil, err - } - - glog.V(2).Infof("SSE-C copy strategy for single-part %s: %v", r.URL.Path, strategy) - - switch strategy { - case SSECCopyStrategyDirect: - // FAST PATH: Direct chunk copy - glog.V(2).Infof("Using fast path: direct chunk copy for %s", r.URL.Path) - chunks, err := s3a.copyChunks(entry, r.URL.Path) - return chunks, nil, err - - case SSECCopyStrategyDecryptEncrypt: - // SLOW PATH: Decrypt and re-encrypt - glog.V(2).Infof("Using slow path: decrypt/re-encrypt for %s", r.URL.Path) - chunks, destIV, err := s3a.copyChunksWithReencryption(entry, copySourceKey, destKey, r.URL.Path) - if err != nil { - return nil, nil, err - } - - // Create destination metadata with IV and SSE-C headers - dstMetadata := make(map[string][]byte) - if destKey != nil && len(destIV) > 0 { - // Store the IV - StoreIVInMetadata(dstMetadata, destIV) - - // Store SSE-C algorithm and key MD5 for proper metadata - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte("AES256") - dstMetadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(destKey.KeyMD5) - - glog.V(2).Infof("Prepared IV and SSE-C metadata for destination copy: %s", r.URL.Path) - } - - return chunks, dstMetadata, nil - - default: - return nil, nil, fmt.Errorf("unknown SSE-C copy strategy: %v", strategy) - } -} - -// copyChunksWithReencryption handles the slow path: decrypt source and re-encrypt for destination -// Returns the destination chunks and the IV used for encryption (if any) -func (s3a *S3ApiServer) copyChunksWithReencryption(entry *filer_pb.Entry, copySourceKey *SSECustomerKey, destKey *SSECustomerKey, dstPath string) ([]*filer_pb.FileChunk, []byte, error) { - dstChunks := make([]*filer_pb.FileChunk, len(entry.GetChunks())) - const defaultChunkCopyConcurrency = 4 - executor := util.NewLimitedConcurrentExecutor(defaultChunkCopyConcurrency) // Limit to configurable concurrent operations - errChan := make(chan error, len(entry.GetChunks())) - - // Generate a single IV for the destination object (if destination is encrypted) - var destIV []byte - if destKey != nil { - destIV = make([]byte, s3_constants.AESBlockSize) - if _, err := io.ReadFull(rand.Reader, destIV); err != nil { - return nil, nil, fmt.Errorf("failed to generate destination IV: %w", err) - } - } - - for i, chunk := range entry.GetChunks() { - chunkIndex := i - executor.Execute(func() { - dstChunk, err := s3a.copyChunkWithReencryption(chunk, copySourceKey, destKey, dstPath, entry.Extended, destIV) - if err != nil { - errChan <- fmt.Errorf("chunk %d: %v", chunkIndex, err) - return - } - dstChunks[chunkIndex] = dstChunk - errChan <- nil - }) - } - - // Wait for all operations to complete and check for errors - for i := 0; i < len(entry.GetChunks()); i++ { - if err := <-errChan; err != nil { - return nil, nil, err - } - } - - return dstChunks, destIV, nil -} - -// copyChunkWithReencryption copies a single chunk with decrypt/re-encrypt -func (s3a *S3ApiServer) copyChunkWithReencryption(chunk *filer_pb.FileChunk, copySourceKey *SSECustomerKey, destKey *SSECustomerKey, dstPath string, srcMetadata map[string][]byte, destIV []byte) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Download encrypted chunk data - encryptedData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download encrypted chunk data: %w", err) - } - - var finalData []byte - - // Decrypt if source is encrypted - if copySourceKey != nil { - // Get IV from source metadata - srcIV, err := GetIVFromMetadata(srcMetadata) - if err != nil { - return nil, fmt.Errorf("failed to get IV from metadata: %w", err) - } - - // Use counter offset based on chunk position in the original object - decryptedReader, decErr := CreateSSECDecryptedReaderWithOffset(bytes.NewReader(encryptedData), copySourceKey, srcIV, uint64(chunk.Offset)) - if decErr != nil { - return nil, fmt.Errorf("create decrypted reader: %w", decErr) - } - - decryptedData, readErr := io.ReadAll(decryptedReader) - if readErr != nil { - return nil, fmt.Errorf("decrypt chunk data: %w", readErr) - } - finalData = decryptedData - } else { - // Source is unencrypted - finalData = encryptedData - } - - // Re-encrypt if destination should be encrypted - if destKey != nil { - // Use the provided destination IV with counter offset based on chunk position - // This ensures all chunks of the same object use the same IV with different counters - encryptedReader, encErr := CreateSSECEncryptedReaderWithOffset(bytes.NewReader(finalData), destKey, destIV, uint64(chunk.Offset)) - if encErr != nil { - return nil, fmt.Errorf("create encrypted reader: %w", encErr) - } - - reencryptedData, readErr := io.ReadAll(encryptedReader) - if readErr != nil { - return nil, fmt.Errorf("re-encrypt chunk data: %w", readErr) - } - finalData = reencryptedData - - // Update chunk size to include IV - dstChunk.Size = uint64(len(finalData)) - } - - // Upload the processed data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { - return nil, fmt.Errorf("upload processed chunk data: %w", err) - } - - return dstChunk, nil -} - -// copyChunksWithSSEKMS handles SSE-KMS aware copying with smart fast/slow path selection -// Returns chunks and destination metadata like SSE-C for consistency -func (s3a *S3ApiServer) copyChunksWithSSEKMS(entry *filer_pb.Entry, r *http.Request, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - - // Parse SSE-KMS headers from copy request - destKeyID, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r) - if err != nil { - return nil, nil, err - } - - // Check if this is a multipart SSE-KMS object - isMultipartSSEKMS := false - sseKMSChunks := 0 - for i, chunk := range entry.GetChunks() { - glog.V(4).Infof("Chunk %d: sseType=%d, hasKMSMetadata=%t", i, chunk.GetSseType(), len(chunk.GetSseMetadata()) > 0) - if chunk.GetSseType() == filer_pb.SSEType_SSE_KMS { - sseKMSChunks++ - } - } - isMultipartSSEKMS = sseKMSChunks > 1 - - if isMultipartSSEKMS { - glog.V(2).Infof("Detected multipart SSE-KMS object with %d encrypted chunks for copy", sseKMSChunks) - return s3a.copyMultipartSSEKMSChunks(entry, destKeyID, encryptionContext, bucketKeyEnabled, r.URL.Path, bucket) - } - - // Single-part SSE-KMS object: use existing logic - // If no SSE-KMS headers and source is not SSE-KMS encrypted, use regular copy - if destKeyID == "" && !IsSSEKMSEncrypted(entry.Extended) { - chunks, err := s3a.copyChunks(entry, r.URL.Path) - return chunks, nil, err - } - - // Apply bucket default encryption if no explicit key specified - if destKeyID == "" { - bucketMetadata, err := s3a.getBucketMetadata(bucket) - if err != nil { - glog.V(2).Infof("Could not get bucket metadata for default encryption: %v", err) - } else if bucketMetadata != nil && bucketMetadata.Encryption != nil && bucketMetadata.Encryption.SseAlgorithm == "aws:kms" { - destKeyID = bucketMetadata.Encryption.KmsKeyId - bucketKeyEnabled = bucketMetadata.Encryption.BucketKeyEnabled - } - } - - // Determine copy strategy - strategy, err := DetermineSSEKMSCopyStrategy(entry.Extended, destKeyID) - if err != nil { - return nil, nil, err - } - - glog.V(2).Infof("SSE-KMS copy strategy for %s: %v", r.URL.Path, strategy) - - switch strategy { - case SSEKMSCopyStrategyDirect: - // FAST PATH: Direct chunk copy (same key or both unencrypted) - glog.V(2).Infof("Using fast path: direct chunk copy for %s", r.URL.Path) - chunks, err := s3a.copyChunks(entry, r.URL.Path) - // For direct copy, generate destination metadata if we're encrypting to SSE-KMS - var dstMetadata map[string][]byte - if destKeyID != "" { - dstMetadata = make(map[string][]byte) - if encryptionContext == nil { - encryptionContext = BuildEncryptionContext(bucket, r.URL.Path, bucketKeyEnabled) - } - sseKey := &SSEKMSKey{ - KeyID: destKeyID, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - } - if kmsMetadata, serializeErr := SerializeSSEKMSMetadata(sseKey); serializeErr == nil { - dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - glog.V(3).Infof("Generated SSE-KMS metadata for direct copy: keyID=%s", destKeyID) - } else { - glog.Errorf("Failed to serialize SSE-KMS metadata for direct copy: %v", serializeErr) - } - } - return chunks, dstMetadata, err - - case SSEKMSCopyStrategyDecryptEncrypt: - // SLOW PATH: Decrypt source and re-encrypt for destination - glog.V(2).Infof("Using slow path: decrypt/re-encrypt for %s", r.URL.Path) - return s3a.copyChunksWithSSEKMSReencryption(entry, destKeyID, encryptionContext, bucketKeyEnabled, r.URL.Path, bucket) - - default: - return nil, nil, fmt.Errorf("unknown SSE-KMS copy strategy: %v", strategy) - } -} - -// copyChunksWithSSEKMSReencryption handles the slow path: decrypt source and re-encrypt for destination -// Returns chunks and destination metadata like SSE-C for consistency -func (s3a *S3ApiServer) copyChunksWithSSEKMSReencryption(entry *filer_pb.Entry, destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, dstPath, bucket string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - var dstChunks []*filer_pb.FileChunk - - // Extract and deserialize source SSE-KMS metadata - var sourceSSEKey *SSEKMSKey - if keyData, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - var err error - sourceSSEKey, err = DeserializeSSEKMSMetadata(keyData) - if err != nil { - return nil, nil, fmt.Errorf("failed to deserialize source SSE-KMS metadata: %w", err) - } - glog.V(3).Infof("Extracted source SSE-KMS key: keyID=%s, bucketKey=%t", sourceSSEKey.KeyID, sourceSSEKey.BucketKeyEnabled) - } - - // Process chunks - for _, chunk := range entry.GetChunks() { - dstChunk, err := s3a.copyChunkWithSSEKMSReencryption(chunk, sourceSSEKey, destKeyID, encryptionContext, bucketKeyEnabled, dstPath, bucket) - if err != nil { - return nil, nil, fmt.Errorf("copy chunk with SSE-KMS re-encryption: %w", err) - } - dstChunks = append(dstChunks, dstChunk) - } - - // Generate destination metadata for SSE-KMS encryption (consistent with SSE-C pattern) - dstMetadata := make(map[string][]byte) - if destKeyID != "" { - // Build encryption context if not provided - if encryptionContext == nil { - encryptionContext = BuildEncryptionContext(bucket, dstPath, bucketKeyEnabled) - } - - // Create SSE-KMS key structure for destination metadata - sseKey := &SSEKMSKey{ - KeyID: destKeyID, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - // Note: EncryptedDataKey will be generated during actual encryption - // IV is also generated per chunk during encryption - } - - // Serialize SSE-KMS metadata for storage - kmsMetadata, err := SerializeSSEKMSMetadata(sseKey) - if err != nil { - return nil, nil, fmt.Errorf("serialize destination SSE-KMS metadata: %w", err) - } - - dstMetadata[s3_constants.SeaweedFSSSEKMSKey] = kmsMetadata - glog.V(3).Infof("Generated destination SSE-KMS metadata: keyID=%s, bucketKey=%t", destKeyID, bucketKeyEnabled) - } - - return dstChunks, dstMetadata, nil -} - -// copyChunkWithSSEKMSReencryption copies a single chunk with SSE-KMS decrypt/re-encrypt -func (s3a *S3ApiServer) copyChunkWithSSEKMSReencryption(chunk *filer_pb.FileChunk, sourceSSEKey *SSEKMSKey, destKeyID string, encryptionContext map[string]string, bucketKeyEnabled bool, dstPath, bucket string) (*filer_pb.FileChunk, error) { - // Create destination chunk - dstChunk := s3a.createDestinationChunk(chunk, chunk.Offset, chunk.Size) - - // Prepare chunk copy (assign new volume and get source URL) - assignResult, srcUrl, err := s3a.prepareChunkCopy(chunk.GetFileIdString(), dstPath) - if err != nil { - return nil, err - } - - // Set file ID on destination chunk - if err := s3a.setChunkFileId(dstChunk, assignResult); err != nil { - return nil, err - } - - // Download chunk data - chunkData, err := s3a.downloadChunkData(srcUrl, 0, int64(chunk.Size)) - if err != nil { - return nil, fmt.Errorf("download chunk data: %w", err) - } - - var finalData []byte - - // Decrypt source data if it's SSE-KMS encrypted - if sourceSSEKey != nil { - // For SSE-KMS, the encrypted chunk data contains IV + encrypted content - // Use the source SSE key to decrypt the chunk data - decryptedReader, err := CreateSSEKMSDecryptedReader(bytes.NewReader(chunkData), sourceSSEKey) - if err != nil { - return nil, fmt.Errorf("create SSE-KMS decrypted reader: %w", err) - } - - decryptedData, err := io.ReadAll(decryptedReader) - if err != nil { - return nil, fmt.Errorf("decrypt chunk data: %w", err) - } - finalData = decryptedData - glog.V(4).Infof("Decrypted chunk data: %d bytes โ†’ %d bytes", len(chunkData), len(finalData)) - } else { - // Source is not SSE-KMS encrypted, use data as-is - finalData = chunkData - } - - // Re-encrypt if destination should be SSE-KMS encrypted - if destKeyID != "" { - // Encryption context should already be provided by the caller - // But ensure we have a fallback for robustness - if encryptionContext == nil { - encryptionContext = BuildEncryptionContext(bucket, dstPath, bucketKeyEnabled) - } - - encryptedReader, _, err := CreateSSEKMSEncryptedReaderWithBucketKey(bytes.NewReader(finalData), destKeyID, encryptionContext, bucketKeyEnabled) - if err != nil { - return nil, fmt.Errorf("create SSE-KMS encrypted reader: %w", err) - } - - reencryptedData, err := io.ReadAll(encryptedReader) - if err != nil { - return nil, fmt.Errorf("re-encrypt chunk data: %w", err) - } - - // Store original decrypted data size for logging - originalSize := len(finalData) - finalData = reencryptedData - glog.V(4).Infof("Re-encrypted chunk data: %d bytes โ†’ %d bytes", originalSize, len(finalData)) - - // Update chunk size to include IV and encryption overhead - dstChunk.Size = uint64(len(finalData)) - } - - // Upload the processed data - if err := s3a.uploadChunkData(finalData, assignResult); err != nil { - return nil, fmt.Errorf("upload processed chunk data: %w", err) - } - - glog.V(3).Infof("Successfully processed SSE-KMS chunk re-encryption: src_key=%s, dst_key=%s, size=%dโ†’%d", - getKeyIDString(sourceSSEKey), destKeyID, len(chunkData), len(finalData)) - - return dstChunk, nil -} - -// getKeyIDString safely gets the KeyID from an SSEKMSKey, handling nil cases -func getKeyIDString(key *SSEKMSKey) string { - if key == nil { - return "none" - } - if key.KeyID == "" { - return "default" - } - return key.KeyID -} - -// EncryptionHeaderContext holds encryption type information and header classifications -type EncryptionHeaderContext struct { - SrcSSEC, SrcSSEKMS, SrcSSES3 bool - DstSSEC, DstSSEKMS, DstSSES3 bool - IsSSECHeader, IsSSEKMSHeader, IsSSES3Header bool -} - -// newEncryptionHeaderContext creates a context for encryption header processing -func newEncryptionHeaderContext(headerKey string, srcSSEC, srcSSEKMS, srcSSES3, dstSSEC, dstSSEKMS, dstSSES3 bool) *EncryptionHeaderContext { - return &EncryptionHeaderContext{ - SrcSSEC: srcSSEC, SrcSSEKMS: srcSSEKMS, SrcSSES3: srcSSES3, - DstSSEC: dstSSEC, DstSSEKMS: dstSSEKMS, DstSSES3: dstSSES3, - IsSSECHeader: isSSECHeader(headerKey), - IsSSEKMSHeader: isSSEKMSHeader(headerKey, srcSSEKMS, dstSSEKMS), - IsSSES3Header: isSSES3Header(headerKey, srcSSES3, dstSSES3), - } -} - -// isSSECHeader checks if the header is SSE-C specific -func isSSECHeader(headerKey string) bool { - return headerKey == s3_constants.AmzServerSideEncryptionCustomerAlgorithm || - headerKey == s3_constants.AmzServerSideEncryptionCustomerKeyMD5 || - headerKey == s3_constants.SeaweedFSSSEIV -} - -// isSSEKMSHeader checks if the header is SSE-KMS specific -func isSSEKMSHeader(headerKey string, srcSSEKMS, dstSSEKMS bool) bool { - return (headerKey == s3_constants.AmzServerSideEncryption && (srcSSEKMS || dstSSEKMS)) || - headerKey == s3_constants.AmzServerSideEncryptionAwsKmsKeyId || - headerKey == s3_constants.SeaweedFSSSEKMSKey || - headerKey == s3_constants.SeaweedFSSSEKMSKeyID || - headerKey == s3_constants.SeaweedFSSSEKMSEncryption || - headerKey == s3_constants.SeaweedFSSSEKMSBucketKeyEnabled || - headerKey == s3_constants.SeaweedFSSSEKMSEncryptionContext || - headerKey == s3_constants.SeaweedFSSSEKMSBaseIV -} - -// isSSES3Header checks if the header is SSE-S3 specific -func isSSES3Header(headerKey string, srcSSES3, dstSSES3 bool) bool { - return (headerKey == s3_constants.AmzServerSideEncryption && (srcSSES3 || dstSSES3)) || - headerKey == s3_constants.SeaweedFSSSES3Key || - headerKey == s3_constants.SeaweedFSSSES3Encryption || - headerKey == s3_constants.SeaweedFSSSES3BaseIV || - headerKey == s3_constants.SeaweedFSSSES3KeyData -} - -// shouldSkipCrossEncryptionHeader handles cross-encryption copy scenarios -func (ctx *EncryptionHeaderContext) shouldSkipCrossEncryptionHeader() bool { - // SSE-C to SSE-KMS: skip SSE-C headers - if ctx.SrcSSEC && ctx.DstSSEKMS && ctx.IsSSECHeader { - return true - } - - // SSE-KMS to SSE-C: skip SSE-KMS headers - if ctx.SrcSSEKMS && ctx.DstSSEC && ctx.IsSSEKMSHeader { - return true - } - - // SSE-C to SSE-S3: skip SSE-C headers - if ctx.SrcSSEC && ctx.DstSSES3 && ctx.IsSSECHeader { - return true - } - - // SSE-S3 to SSE-C: skip SSE-S3 headers - if ctx.SrcSSES3 && ctx.DstSSEC && ctx.IsSSES3Header { - return true - } - - // SSE-KMS to SSE-S3: skip SSE-KMS headers - if ctx.SrcSSEKMS && ctx.DstSSES3 && ctx.IsSSEKMSHeader { - return true - } - - // SSE-S3 to SSE-KMS: skip SSE-S3 headers - if ctx.SrcSSES3 && ctx.DstSSEKMS && ctx.IsSSES3Header { - return true - } - - return false -} - -// shouldSkipEncryptedToUnencryptedHeader handles encrypted to unencrypted copy scenarios -func (ctx *EncryptionHeaderContext) shouldSkipEncryptedToUnencryptedHeader() bool { - // Skip all encryption headers when copying from encrypted to unencrypted - hasSourceEncryption := ctx.SrcSSEC || ctx.SrcSSEKMS || ctx.SrcSSES3 - hasDestinationEncryption := ctx.DstSSEC || ctx.DstSSEKMS || ctx.DstSSES3 - isAnyEncryptionHeader := ctx.IsSSECHeader || ctx.IsSSEKMSHeader || ctx.IsSSES3Header - - return hasSourceEncryption && !hasDestinationEncryption && isAnyEncryptionHeader -} - -// shouldSkipEncryptionHeader determines if a header should be skipped when copying extended attributes -// based on the source and destination encryption types. This consolidates the repetitive logic for -// filtering encryption-related headers during copy operations. -func shouldSkipEncryptionHeader(headerKey string, - srcSSEC, srcSSEKMS, srcSSES3 bool, - dstSSEC, dstSSEKMS, dstSSES3 bool) bool { - - // Create context to reduce complexity and improve testability - ctx := newEncryptionHeaderContext(headerKey, srcSSEC, srcSSEKMS, srcSSES3, dstSSEC, dstSSEKMS, dstSSES3) - - // If it's not an encryption header, don't skip it - if !ctx.IsSSECHeader && !ctx.IsSSEKMSHeader && !ctx.IsSSES3Header { - return false - } - - // Handle cross-encryption scenarios (different encryption types) - if ctx.shouldSkipCrossEncryptionHeader() { - return true - } - - // Handle encrypted to unencrypted scenarios - if ctx.shouldSkipEncryptedToUnencryptedHeader() { - return true - } - - // Default: don't skip the header - return false -} diff --git a/weed/s3api/s3api_object_handlers_copy_unified.go b/weed/s3api/s3api_object_handlers_copy_unified.go deleted file mode 100644 index d11594420..000000000 --- a/weed/s3api/s3api_object_handlers_copy_unified.go +++ /dev/null @@ -1,249 +0,0 @@ -package s3api - -import ( - "context" - "fmt" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// executeUnifiedCopyStrategy executes the appropriate copy strategy based on encryption state -// Returns chunks and destination metadata that should be applied to the destination entry -func (s3a *S3ApiServer) executeUnifiedCopyStrategy(entry *filer_pb.Entry, r *http.Request, dstBucket, srcObject, dstObject string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - // Detect encryption state (using entry-aware detection for multipart objects) - srcPath := fmt.Sprintf("/%s/%s", r.Header.Get("X-Amz-Copy-Source-Bucket"), srcObject) - dstPath := fmt.Sprintf("/%s/%s", dstBucket, dstObject) - state := DetectEncryptionStateWithEntry(entry, r, srcPath, dstPath) - - // Debug logging for encryption state - - // Apply bucket default encryption if no explicit encryption specified - if !state.IsTargetEncrypted() { - bucketMetadata, err := s3a.getBucketMetadata(dstBucket) - if err == nil && bucketMetadata != nil && bucketMetadata.Encryption != nil { - switch bucketMetadata.Encryption.SseAlgorithm { - case "aws:kms": - state.DstSSEKMS = true - case "AES256": - state.DstSSES3 = true - } - } - } - - // Determine copy strategy - strategy, err := DetermineUnifiedCopyStrategy(state, entry.Extended, r) - if err != nil { - return nil, nil, err - } - - glog.V(2).Infof("Unified copy strategy for %s โ†’ %s: %v", srcPath, dstPath, strategy) - - // Calculate optimized sizes for the strategy - sizeCalc := CalculateOptimizedSizes(entry, r, strategy) - glog.V(2).Infof("Size calculation: src=%d, target=%d, actual=%d, overhead=%d, preallocate=%v", - sizeCalc.SourceSize, sizeCalc.TargetSize, sizeCalc.ActualContentSize, - sizeCalc.EncryptionOverhead, sizeCalc.CanPreallocate) - - // Execute strategy - switch strategy { - case CopyStrategyDirect: - chunks, err := s3a.copyChunks(entry, dstPath) - return chunks, nil, err - - case CopyStrategyKeyRotation: - return s3a.executeKeyRotation(entry, r, state) - - case CopyStrategyEncrypt: - return s3a.executeEncryptCopy(entry, r, state, dstBucket, dstPath) - - case CopyStrategyDecrypt: - return s3a.executeDecryptCopy(entry, r, state, dstPath) - - case CopyStrategyReencrypt: - return s3a.executeReencryptCopy(entry, r, state, dstBucket, dstPath) - - default: - return nil, nil, fmt.Errorf("unknown unified copy strategy: %v", strategy) - } -} - -// mapCopyErrorToS3Error maps various copy errors to appropriate S3 error codes -func (s3a *S3ApiServer) mapCopyErrorToS3Error(err error) s3err.ErrorCode { - if err == nil { - return s3err.ErrNone - } - - // Check for KMS errors first - if kmsErr := MapKMSErrorToS3Error(err); kmsErr != s3err.ErrInvalidRequest { - return kmsErr - } - - // Check for SSE-C errors - if ssecErr := MapSSECErrorToS3Error(err); ssecErr != s3err.ErrInvalidRequest { - return ssecErr - } - - // Default to internal error for unknown errors - return s3err.ErrInternalError -} - -// executeKeyRotation handles key rotation for same-object copies -func (s3a *S3ApiServer) executeKeyRotation(entry *filer_pb.Entry, r *http.Request, state *EncryptionState) ([]*filer_pb.FileChunk, map[string][]byte, error) { - // For key rotation, we only need to update metadata, not re-copy chunks - // This is a significant optimization for same-object key changes - - if state.SrcSSEC && state.DstSSEC { - // SSE-C key rotation - need to handle new key/IV, use reencrypt logic - return s3a.executeReencryptCopy(entry, r, state, "", "") - } - - if state.SrcSSEKMS && state.DstSSEKMS { - // SSE-KMS key rotation - return existing chunks, metadata will be updated by caller - return entry.GetChunks(), nil, nil - } - - // Fallback to reencrypt if we can't do metadata-only rotation - return s3a.executeReencryptCopy(entry, r, state, "", "") -} - -// executeEncryptCopy handles plain โ†’ encrypted copies -func (s3a *S3ApiServer) executeEncryptCopy(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstBucket, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - if state.DstSSEC { - // Use existing SSE-C copy logic - return s3a.copyChunksWithSSEC(entry, r) - } - - if state.DstSSEKMS { - // Use existing SSE-KMS copy logic - metadata is now generated internally - chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket) - return chunks, dstMetadata, err - } - - if state.DstSSES3 { - // Use streaming copy for SSE-S3 encryption - chunks, err := s3a.executeStreamingReencryptCopy(entry, r, state, dstPath) - return chunks, nil, err - } - - return nil, nil, fmt.Errorf("unknown target encryption type") -} - -// executeDecryptCopy handles encrypted โ†’ plain copies -func (s3a *S3ApiServer) executeDecryptCopy(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - // Use unified multipart-aware decrypt copy for all encryption types - if state.SrcSSEC || state.SrcSSEKMS { - glog.V(2).Infof("Encryptedโ†’Plain copy: using unified multipart decrypt copy") - return s3a.copyMultipartCrossEncryption(entry, r, state, "", dstPath) - } - - if state.SrcSSES3 { - // Use streaming copy for SSE-S3 decryption - chunks, err := s3a.executeStreamingReencryptCopy(entry, r, state, dstPath) - return chunks, nil, err - } - - return nil, nil, fmt.Errorf("unknown source encryption type") -} - -// executeReencryptCopy handles encrypted โ†’ encrypted copies with different keys/methods -func (s3a *S3ApiServer) executeReencryptCopy(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstBucket, dstPath string) ([]*filer_pb.FileChunk, map[string][]byte, error) { - // Check if we should use streaming copy for better performance - if s3a.shouldUseStreamingCopy(entry, state) { - chunks, err := s3a.executeStreamingReencryptCopy(entry, r, state, dstPath) - return chunks, nil, err - } - - // Fallback to chunk-by-chunk approach for compatibility - if state.SrcSSEC && state.DstSSEC { - return s3a.copyChunksWithSSEC(entry, r) - } - - if state.SrcSSEKMS && state.DstSSEKMS { - // Use existing SSE-KMS copy logic - metadata is now generated internally - chunks, dstMetadata, err := s3a.copyChunksWithSSEKMS(entry, r, dstBucket) - return chunks, dstMetadata, err - } - - if state.SrcSSEC && state.DstSSEKMS { - // SSE-C โ†’ SSE-KMS: use unified multipart-aware cross-encryption copy - glog.V(2).Infof("SSE-Cโ†’SSE-KMS cross-encryption copy: using unified multipart copy") - return s3a.copyMultipartCrossEncryption(entry, r, state, dstBucket, dstPath) - } - - if state.SrcSSEKMS && state.DstSSEC { - // SSE-KMS โ†’ SSE-C: use unified multipart-aware cross-encryption copy - glog.V(2).Infof("SSE-KMSโ†’SSE-C cross-encryption copy: using unified multipart copy") - return s3a.copyMultipartCrossEncryption(entry, r, state, dstBucket, dstPath) - } - - // Handle SSE-S3 cross-encryption scenarios - if state.SrcSSES3 || state.DstSSES3 { - // Any scenario involving SSE-S3 uses streaming copy - chunks, err := s3a.executeStreamingReencryptCopy(entry, r, state, dstPath) - return chunks, nil, err - } - - return nil, nil, fmt.Errorf("unsupported cross-encryption scenario") -} - -// shouldUseStreamingCopy determines if streaming copy should be used -func (s3a *S3ApiServer) shouldUseStreamingCopy(entry *filer_pb.Entry, state *EncryptionState) bool { - // Use streaming copy for large files or when beneficial - fileSize := entry.Attributes.FileSize - - // Use streaming for files larger than 10MB - if fileSize > 10*1024*1024 { - return true - } - - // Check if this is a multipart encrypted object - isMultipartEncrypted := false - if state.IsSourceEncrypted() { - encryptedChunks := 0 - for _, chunk := range entry.GetChunks() { - if chunk.GetSseType() != filer_pb.SSEType_NONE { - encryptedChunks++ - } - } - isMultipartEncrypted = encryptedChunks > 1 - } - - // For multipart encrypted objects, avoid streaming copy to use per-chunk metadata approach - if isMultipartEncrypted { - glog.V(3).Infof("Multipart encrypted object detected, using chunk-by-chunk approach") - return false - } - - // Use streaming for cross-encryption scenarios (for single-part objects only) - if state.IsSourceEncrypted() && state.IsTargetEncrypted() { - srcType := s3a.getEncryptionTypeString(state.SrcSSEC, state.SrcSSEKMS, state.SrcSSES3) - dstType := s3a.getEncryptionTypeString(state.DstSSEC, state.DstSSEKMS, state.DstSSES3) - if srcType != dstType { - return true - } - } - - // Use streaming for compressed files - if isCompressedEntry(entry) { - return true - } - - // Use streaming for SSE-S3 scenarios (always) - if state.SrcSSES3 || state.DstSSES3 { - return true - } - - return false -} - -// executeStreamingReencryptCopy performs streaming re-encryption copy -func (s3a *S3ApiServer) executeStreamingReencryptCopy(entry *filer_pb.Entry, r *http.Request, state *EncryptionState, dstPath string) ([]*filer_pb.FileChunk, error) { - // Create streaming copy manager - streamingManager := NewStreamingCopyManager(s3a) - - // Execute streaming copy - return streamingManager.ExecuteStreamingCopy(context.Background(), entry, r, dstPath, state) -} diff --git a/weed/s3api/s3api_object_handlers_delete.go b/weed/s3api/s3api_object_handlers_delete.go deleted file mode 100644 index 3a2544710..000000000 --- a/weed/s3api/s3api_object_handlers_delete.go +++ /dev/null @@ -1,428 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "slices" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - - "github.com/seaweedfs/seaweedfs/weed/filer" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -const ( - deleteMultipleObjectsLimit = 1000 -) - -func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) - - // Check for specific version ID in query parameters - versionId := r.URL.Query().Get("versionId") - - // Get detailed versioning state for proper handling of suspended vs enabled versioning - versioningState, err := s3a.getVersioningState(bucket) - if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - versioningEnabled := (versioningState == s3_constants.VersioningEnabled) - versioningSuspended := (versioningState == s3_constants.VersioningSuspended) - versioningConfigured := (versioningState != "") - - var auditLog *s3err.AccessLog - if s3err.Logger != nil { - auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone) - } - - if versioningConfigured { - // Handle versioned delete based on specific versioning state - if versionId != "" { - // Delete specific version (same for both enabled and suspended) - // Check object lock permissions before deleting specific version - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object) - if err := s3a.enforceObjectLockProtections(r, bucket, object, versionId, governanceBypassAllowed); err != nil { - glog.V(2).Infof("DeleteObjectHandler: object lock check failed for %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - // Delete specific version - err := s3a.deleteSpecificObjectVersion(bucket, object, versionId) - if err != nil { - glog.Errorf("Failed to delete specific version %s: %v", versionId, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set version ID in response header - w.Header().Set("x-amz-version-id", versionId) - } else { - // Delete without version ID - behavior depends on versioning state - if versioningEnabled { - // Enabled versioning: Create delete marker (logical delete) - // AWS S3 behavior: Delete marker creation is NOT blocked by object retention - // because it's a logical delete that doesn't actually remove the retained version - deleteMarkerVersionId, err := s3a.createDeleteMarker(bucket, object) - if err != nil { - glog.Errorf("Failed to create delete marker: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set delete marker version ID in response header - w.Header().Set("x-amz-version-id", deleteMarkerVersionId) - w.Header().Set("x-amz-delete-marker", "true") - } else if versioningSuspended { - // Suspended versioning: Actually delete the "null" version object - glog.V(2).Infof("DeleteObjectHandler: deleting null version for suspended versioning %s/%s", bucket, object) - - // Check object lock permissions before deleting "null" version - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object) - if err := s3a.enforceObjectLockProtections(r, bucket, object, "null", governanceBypassAllowed); err != nil { - glog.V(2).Infof("DeleteObjectHandler: object lock check failed for %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - // Delete the "null" version (the regular file) - err := s3a.deleteSpecificObjectVersion(bucket, object, "null") - if err != nil { - glog.Errorf("Failed to delete null version: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Note: According to AWS S3 spec, suspended versioning should NOT return version ID headers - // The object is deleted but no version information is returned - } - } - } else { - // Handle regular delete (non-versioned) - // Check object lock permissions before deleting object - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object) - if err := s3a.enforceObjectLockProtections(r, bucket, object, "", governanceBypassAllowed); err != nil { - glog.V(2).Infof("DeleteObjectHandler: object lock check failed for %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) - dir, name := target.DirAndName() - - err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - - if err := doDeleteEntry(client, dir, name, true, false); err != nil { - return err - } - - if s3a.option.AllowEmptyFolder { - return nil - } - - directoriesWithDeletion := make(map[string]int) - if strings.LastIndex(object, "/") > 0 { - directoriesWithDeletion[dir]++ - // purge empty folders, only checking folders with deletions - for len(directoriesWithDeletion) > 0 { - directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) - } - } - - return nil - }) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - - if auditLog != nil { - auditLog.Key = strings.TrimPrefix(object, "/") - s3err.PostAccessLog(*auditLog) - } - - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3DeletedObjectsCounter.WithLabelValues(bucket).Inc() - w.WriteHeader(http.StatusNoContent) -} - -// ObjectIdentifier represents an object to be deleted with its key name and optional version ID. -type ObjectIdentifier struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` - DeleteMarker bool `xml:"DeleteMarker,omitempty"` - DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId,omitempty"` -} - -// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. -type DeleteObjectsRequest struct { - // Element to enable quiet mode for the request - Quiet bool - // List of objects to be deleted - Objects []ObjectIdentifier `xml:"Object"` -} - -// DeleteError structure. -type DeleteError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DeleteObjectsResponse container for multiple object deletes. -type DeleteObjectsResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` - - // Collection of all deleted objects - DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` - - // Collection of errors deleting certain objects. - Errors []DeleteError `xml:"Error,omitempty"` -} - -// DeleteMultipleObjectsHandler - Delete multiple objects -func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) - - deleteXMLBytes, err := io.ReadAll(r.Body) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - deleteObjects := &DeleteObjectsRequest{} - if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - if len(deleteObjects.Objects) > deleteMultipleObjectsLimit { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxDeleteObjects) - return - } - - var deletedObjects []ObjectIdentifier - var deleteErrors []DeleteError - var auditLog *s3err.AccessLog - - directoriesWithDeletion := make(map[string]int) - - if s3err.Logger != nil { - auditLog = s3err.GetAccessLog(r, http.StatusNoContent, s3err.ErrNone) - } - - // Get detailed versioning state for proper handling of suspended vs enabled versioning - versioningState, err := s3a.getVersioningState(bucket) - if err != nil { - if err == filer_pb.ErrNotFound { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - versioningEnabled := (versioningState == s3_constants.VersioningEnabled) - versioningSuspended := (versioningState == s3_constants.VersioningSuspended) - versioningConfigured := (versioningState != "") - - s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - - // delete file entries - for _, object := range deleteObjects.Objects { - if object.Key == "" { - continue - } - - // Check object lock permissions before deletion (only for versioned buckets) - if versioningConfigured { - // Validate governance bypass for this specific object - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object.Key) - if err := s3a.enforceObjectLockProtections(r, bucket, object.Key, object.VersionId, governanceBypassAllowed); err != nil { - glog.V(2).Infof("DeleteMultipleObjectsHandler: object lock check failed for %s/%s (version: %s): %v", bucket, object.Key, object.VersionId, err) - deleteErrors = append(deleteErrors, DeleteError{ - Code: s3err.GetAPIError(s3err.ErrAccessDenied).Code, - Message: s3err.GetAPIError(s3err.ErrAccessDenied).Description, - Key: object.Key, - VersionId: object.VersionId, - }) - continue - } - } - - var deleteVersionId string - var isDeleteMarker bool - - if versioningConfigured { - // Handle versioned delete based on specific versioning state - if object.VersionId != "" { - // Delete specific version (same for both enabled and suspended) - err := s3a.deleteSpecificObjectVersion(bucket, object.Key, object.VersionId) - if err != nil { - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err.Error(), - Key: object.Key, - VersionId: object.VersionId, - }) - continue - } - deleteVersionId = object.VersionId - } else { - // Delete without version ID - behavior depends on versioning state - if versioningEnabled { - // Enabled versioning: Create delete marker (logical delete) - deleteMarkerVersionId, err := s3a.createDeleteMarker(bucket, object.Key) - if err != nil { - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err.Error(), - Key: object.Key, - VersionId: object.VersionId, - }) - continue - } - deleteVersionId = deleteMarkerVersionId - isDeleteMarker = true - } else if versioningSuspended { - // Suspended versioning: Actually delete the "null" version object - glog.V(2).Infof("DeleteMultipleObjectsHandler: deleting null version for suspended versioning %s/%s", bucket, object.Key) - - err := s3a.deleteSpecificObjectVersion(bucket, object.Key, "null") - if err != nil { - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err.Error(), - Key: object.Key, - VersionId: "null", - }) - continue - } - deleteVersionId = "null" - // Note: For suspended versioning, we don't set isDeleteMarker=true - // because we actually deleted the object, not created a delete marker - } - } - - // Add to successful deletions with version info - deletedObject := ObjectIdentifier{ - Key: object.Key, - VersionId: deleteVersionId, - DeleteMarker: isDeleteMarker, - } - - // For delete markers, also set DeleteMarkerVersionId field - if isDeleteMarker { - deletedObject.DeleteMarkerVersionId = deleteVersionId - // Don't set VersionId for delete markers, use DeleteMarkerVersionId instead - deletedObject.VersionId = "" - } - if !deleteObjects.Quiet { - deletedObjects = append(deletedObjects, deletedObject) - } - if isDeleteMarker { - // For delete markers, we don't need to track directories for cleanup - continue - } - } else { - // Handle non-versioned delete (original logic) - lastSeparator := strings.LastIndex(object.Key, "/") - parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.Key, true, false - if lastSeparator > 0 && lastSeparator+1 < len(object.Key) { - entryName = object.Key[lastSeparator+1:] - parentDirectoryPath = "/" + object.Key[:lastSeparator] - } - parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) - - err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) - if err == nil { - directoriesWithDeletion[parentDirectoryPath]++ - deletedObjects = append(deletedObjects, object) - } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { - deletedObjects = append(deletedObjects, object) - } else { - delete(directoriesWithDeletion, parentDirectoryPath) - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err.Error(), - Key: object.Key, - VersionId: object.VersionId, - }) - } - } - - if auditLog != nil { - auditLog.Key = object.Key - s3err.PostAccessLog(*auditLog) - } - } - - if s3a.option.AllowEmptyFolder { - return nil - } - - // purge empty folders, only checking folders with deletions - for len(directoriesWithDeletion) > 0 { - directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) - } - - return nil - }) - - deleteResp := DeleteObjectsResponse{} - if !deleteObjects.Quiet { - deleteResp.DeletedObjects = deletedObjects - } - deleteResp.Errors = deleteErrors - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3DeletedObjectsCounter.WithLabelValues(bucket).Add(float64(len(deletedObjects))) - - writeSuccessResponseXML(w, r, deleteResp) - -} - -func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { - var allDirs []string - for dir := range directoriesWithDeletion { - allDirs = append(allDirs, dir) - } - slices.SortFunc(allDirs, func(a, b string) int { - return len(b) - len(a) - }) - newDirectoriesWithDeletion = make(map[string]int) - for _, dir := range allDirs { - parentDir, dirName := util.FullPath(dir).DirAndName() - if parentDir == s3a.option.BucketsPath { - continue - } - if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { - glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) - } else { - newDirectoriesWithDeletion[parentDir]++ - } - } - return -} diff --git a/weed/s3api/s3api_object_handlers_legal_hold.go b/weed/s3api/s3api_object_handlers_legal_hold.go deleted file mode 100644 index 13f981acc..000000000 --- a/weed/s3api/s3api_object_handlers_legal_hold.go +++ /dev/null @@ -1,131 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "errors" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" -) - -// PutObjectLegalHoldHandler Put object Legal Hold -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html -func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectLegalHoldHandler %s %s", bucket, object) - - // Check if Object Lock is available for this bucket (requires versioning) - if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "PutObjectLegalHoldHandler") { - return - } - - // Get version ID from query parameters - versionId := r.URL.Query().Get("versionId") - - // Parse legal hold configuration from request body - legalHold, err := parseObjectLegalHold(r) - if err != nil { - glog.Errorf("PutObjectLegalHoldHandler: failed to parse legal hold config: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Validate legal hold configuration - if err := ValidateLegalHold(legalHold); err != nil { - glog.Errorf("PutObjectLegalHoldHandler: invalid legal hold config: %v", err) - s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err)) - return - } - - // Set legal hold on the object - if err := s3a.setObjectLegalHold(bucket, object, versionId, legalHold); err != nil { - glog.Errorf("PutObjectLegalHoldHandler: failed to set legal hold: %v", err) - - // Handle specific error cases - if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Add VersionId to response headers if available (expected by s3-tests) - if versionId != "" { - w.Header().Set("x-amz-version-id", versionId) - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - // Return success (HTTP 200 with no body) - w.WriteHeader(http.StatusOK) - glog.V(3).Infof("PutObjectLegalHoldHandler: successfully set legal hold for %s/%s", bucket, object) -} - -// GetObjectLegalHoldHandler Get object Legal Hold -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html -func (s3a *S3ApiServer) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectLegalHoldHandler %s %s", bucket, object) - - // Check if Object Lock is available for this bucket (requires versioning) - if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "GetObjectLegalHoldHandler") { - return - } - - // Get version ID from query parameters - versionId := r.URL.Query().Get("versionId") - - // Get legal hold configuration for the object - legalHold, err := s3a.getObjectLegalHold(bucket, object, versionId) - if err != nil { - glog.Errorf("GetObjectLegalHoldHandler: failed to get legal hold: %v", err) - - // Handle specific error cases - if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - if errors.Is(err, ErrNoLegalHoldConfiguration) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchObjectLegalHold) - return - } - - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Marshal legal hold configuration to XML - legalHoldXML, err := xml.Marshal(legalHold) - if err != nil { - glog.Errorf("GetObjectLegalHoldHandler: failed to marshal legal hold: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set response headers - w.Header().Set("Content-Type", "application/xml") - w.WriteHeader(http.StatusOK) - - // Write XML response - if _, err := w.Write([]byte(xml.Header)); err != nil { - glog.Errorf("GetObjectLegalHoldHandler: failed to write XML header: %v", err) - return - } - - if _, err := w.Write(legalHoldXML); err != nil { - glog.Errorf("GetObjectLegalHoldHandler: failed to write legal hold XML: %v", err) - return - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - glog.V(3).Infof("GetObjectLegalHoldHandler: successfully retrieved legal hold for %s/%s", bucket, object) -} diff --git a/weed/s3api/s3api_object_handlers_list.go b/weed/s3api/s3api_object_handlers_list.go deleted file mode 100644 index f60dccee0..000000000 --- a/weed/s3api/s3api_object_handlers_list.go +++ /dev/null @@ -1,752 +0,0 @@ -package s3api - -import ( - "context" - "encoding/xml" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -type OptionalString struct { - string - set bool -} - -func (o OptionalString) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if !o.set { - return nil - } - return e.EncodeElement(o.string, startElement) -} - -type ListBucketResultV2 struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` - Name string `xml:"Name"` - Prefix string `xml:"Prefix"` - MaxKeys uint16 `xml:"MaxKeys"` - Delimiter string `xml:"Delimiter,omitempty"` - IsTruncated bool `xml:"IsTruncated"` - Contents []ListEntry `xml:"Contents,omitempty"` - CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` - ContinuationToken OptionalString `xml:"ContinuationToken,omitempty"` - NextContinuationToken string `xml:"NextContinuationToken,omitempty"` - EncodingType string `xml:"EncodingType,omitempty"` - KeyCount int `xml:"KeyCount"` - StartAfter string `xml:"StartAfter,omitempty"` -} - -func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { - - // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html - - // collect parameters - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV2Handler %s", bucket) - - originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys, allowUnordered, errCode := getListObjectsV2Args(r.URL.Query()) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if maxKeys < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys) - return - } - - // AWS S3 compatibility: allow-unordered cannot be used with delimiter - if allowUnordered && delimiter != "" { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidUnorderedWithDelimiter) - return - } - - marker := continuationToken.string - if !continuationToken.set { - marker = startAfter - } - - // Adjust marker if it ends with delimiter to skip all entries with that prefix - marker = adjustMarkerForDelimiter(marker, delimiter) - - response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter, encodingTypeUrl, fetchOwner) - - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - if len(response.Contents) == 0 { - if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - } - - responseV2 := &ListBucketResultV2{ - Name: response.Name, - CommonPrefixes: response.CommonPrefixes, - Contents: response.Contents, - ContinuationToken: continuationToken, - Delimiter: response.Delimiter, - IsTruncated: response.IsTruncated, - KeyCount: len(response.Contents) + len(response.CommonPrefixes), - MaxKeys: uint16(response.MaxKeys), - NextContinuationToken: response.NextMarker, - Prefix: response.Prefix, - StartAfter: startAfter, - } - if encodingTypeUrl { - responseV2.EncodingType = s3.EncodingTypeUrl - } - - writeSuccessResponseXML(w, r, responseV2) -} - -func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { - - // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html - - // collect parameters - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV1Handler %s", bucket) - - originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys, allowUnordered, errCode := getListObjectsV1Args(r.URL.Query()) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if maxKeys < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys) - return - } - - // AWS S3 compatibility: allow-unordered cannot be used with delimiter - if allowUnordered && delimiter != "" { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidUnorderedWithDelimiter) - return - } - - // Adjust marker if it ends with delimiter to skip all entries with that prefix - marker = adjustMarkerForDelimiter(marker, delimiter) - - response, err := s3a.listFilerEntries(bucket, originalPrefix, uint16(maxKeys), marker, delimiter, encodingTypeUrl, true) - - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - if len(response.Contents) == 0 { - if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - } - - writeSuccessResponseXML(w, r, response) -} - -func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys uint16, originalMarker string, delimiter string, encodingTypeUrl bool, fetchOwner bool) (response ListBucketResult, err error) { - // convert full path prefix into directory name and prefix for entry name - requestDir, prefix, marker := normalizePrefixMarker(originalPrefix, originalMarker) - bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) - reqDir := bucketPrefix[:len(bucketPrefix)-1] - if requestDir != "" { - reqDir = fmt.Sprintf("%s%s", bucketPrefix, requestDir) - } - - var contents []ListEntry - var commonPrefixes []PrefixEntry - var doErr error - var nextMarker string - cursor := &ListingCursor{ - maxKeys: maxKeys, - prefixEndsOnDelimiter: strings.HasSuffix(originalPrefix, "/") && len(originalMarker) == 0, - } - - // Special case: when maxKeys = 0, return empty results immediately with IsTruncated=false - if maxKeys == 0 { - response = ListBucketResult{ - Name: bucket, - Prefix: originalPrefix, - Marker: originalMarker, - NextMarker: "", - MaxKeys: int(maxKeys), - Delimiter: delimiter, - IsTruncated: false, - Contents: contents, - CommonPrefixes: commonPrefixes, - } - if encodingTypeUrl { - response.EncodingType = s3.EncodingTypeUrl - } - return - } - - // check filer - err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - var lastEntryWasCommonPrefix bool - var lastCommonPrefixName string - - for { - empty := true - - nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, cursor, marker, delimiter, false, func(dir string, entry *filer_pb.Entry) { - empty = false - dirName, entryName, prefixName := entryUrlEncode(dir, entry.Name, encodingTypeUrl) - if entry.IsDirectory { - // When delimiter is specified, apply delimiter logic to directory key objects too - if delimiter != "" && entry.IsDirectoryKeyObject() { - // Apply the same delimiter logic as for regular files - var delimiterFound bool - undelimitedPath := fmt.Sprintf("%s/%s/", dirName, entryName)[len(bucketPrefix):] - - // take into account a prefix if supplied while delimiting. - undelimitedPath = strings.TrimPrefix(undelimitedPath, originalPrefix) - - delimitedPath := strings.SplitN(undelimitedPath, delimiter, 2) - - if len(delimitedPath) == 2 { - // S3 clients expect the delimited prefix to contain the delimiter and prefix. - delimitedPrefix := originalPrefix + delimitedPath[0] + delimiter - - for i := range commonPrefixes { - if commonPrefixes[i].Prefix == delimitedPrefix { - delimiterFound = true - break - } - } - - if !delimiterFound { - commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: delimitedPrefix, - }) - cursor.maxKeys-- - delimiterFound = true - lastEntryWasCommonPrefix = true - lastCommonPrefixName = delimitedPath[0] - } else { - // This directory object belongs to an existing CommonPrefix, skip it - delimiterFound = true - } - } - - // If no delimiter found in the directory object name, treat it as a regular key - if !delimiterFound { - contents = append(contents, newListEntry(entry, "", dirName, entryName, bucketPrefix, fetchOwner, true, false, s3a.iam)) - cursor.maxKeys-- - lastEntryWasCommonPrefix = false - } - } else if entry.IsDirectoryKeyObject() { - // No delimiter specified, or delimiter doesn't apply - treat as regular key - contents = append(contents, newListEntry(entry, "", dirName, entryName, bucketPrefix, fetchOwner, true, false, s3a.iam)) - cursor.maxKeys-- - lastEntryWasCommonPrefix = false - // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html - } else if delimiter == "/" { // A response can contain CommonPrefixes only if you specify a delimiter. - commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: fmt.Sprintf("%s/%s/", dirName, prefixName)[len(bucketPrefix):], - }) - //All of the keys (up to 1,000) rolled up into a common prefix count as a single return when calculating the number of returns. - cursor.maxKeys-- - lastEntryWasCommonPrefix = true - lastCommonPrefixName = entry.Name - } - } else { - var delimiterFound bool - if delimiter != "" { - // keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped together as a commonPrefix. - // extract the string between the prefix and the delimiter and add it to the commonPrefixes if it's unique. - undelimitedPath := fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):] - - // take into account a prefix if supplied while delimiting. - undelimitedPath = strings.TrimPrefix(undelimitedPath, originalPrefix) - - delimitedPath := strings.SplitN(undelimitedPath, delimiter, 2) - - if len(delimitedPath) == 2 { - // S3 clients expect the delimited prefix to contain the delimiter and prefix. - delimitedPrefix := originalPrefix + delimitedPath[0] + delimiter - - for i := range commonPrefixes { - if commonPrefixes[i].Prefix == delimitedPrefix { - delimiterFound = true - break - } - } - - if !delimiterFound { - commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: delimitedPrefix, - }) - cursor.maxKeys-- - delimiterFound = true - lastEntryWasCommonPrefix = true - lastCommonPrefixName = delimitedPath[0] - } else { - // This object belongs to an existing CommonPrefix, skip it - // but continue processing to maintain correct flow - delimiterFound = true - } - } - } - if !delimiterFound { - contents = append(contents, newListEntry(entry, "", dirName, entryName, bucketPrefix, fetchOwner, false, false, s3a.iam)) - cursor.maxKeys-- - lastEntryWasCommonPrefix = false - } - } - }) - if doErr != nil { - return doErr - } - - // Adjust nextMarker for CommonPrefixes to include trailing slash (AWS S3 compliance) - if cursor.isTruncated && lastEntryWasCommonPrefix && lastCommonPrefixName != "" { - // For CommonPrefixes, NextMarker should include the trailing slash - if requestDir != "" { - nextMarker = requestDir + "/" + lastCommonPrefixName + "/" - } else { - nextMarker = lastCommonPrefixName + "/" - } - } else if cursor.isTruncated { - if requestDir != "" { - nextMarker = requestDir + "/" + nextMarker - } - } - - if cursor.isTruncated { - break - } else if empty || strings.HasSuffix(originalPrefix, "/") { - nextMarker = "" - break - } else { - // start next loop - marker = nextMarker - } - } - - response = ListBucketResult{ - Name: bucket, - Prefix: originalPrefix, - Marker: originalMarker, - NextMarker: nextMarker, - MaxKeys: int(maxKeys), - Delimiter: delimiter, - IsTruncated: cursor.isTruncated, - Contents: contents, - CommonPrefixes: commonPrefixes, - } - if encodingTypeUrl { - // Todo used for pass test_bucket_listv2_encoding_basic - // sort.Slice(response.CommonPrefixes, func(i, j int) bool { return response.CommonPrefixes[i].Prefix < response.CommonPrefixes[j].Prefix }) - response.EncodingType = s3.EncodingTypeUrl - } - return nil - }) - - return -} - -type ListingCursor struct { - maxKeys uint16 - isTruncated bool - prefixEndsOnDelimiter bool -} - -// the prefix and marker may be in different directories -// normalizePrefixMarker ensures the prefix and marker both starts from the same directory -func normalizePrefixMarker(prefix, marker string) (alignedDir, alignedPrefix, alignedMarker string) { - // alignedDir should not end with "/" - // alignedDir, alignedPrefix, alignedMarker should only have "/" in middle - if len(marker) == 0 { - prefix = strings.Trim(prefix, "/") - } else { - prefix = strings.TrimLeft(prefix, "/") - } - marker = strings.TrimLeft(marker, "/") - if prefix == "" { - return "", "", marker - } - if marker == "" { - alignedDir, alignedPrefix = toDirAndName(prefix) - return - } - if !strings.HasPrefix(marker, prefix) { - // something wrong - return "", prefix, marker - } - if strings.HasPrefix(marker, prefix+"/") { - alignedDir = prefix - alignedPrefix = "" - alignedMarker = marker[len(alignedDir)+1:] - return - } - - alignedDir, alignedPrefix = toDirAndName(prefix) - if alignedDir != "" { - alignedMarker = marker[len(alignedDir)+1:] - } else { - alignedMarker = marker - } - return -} - -func toDirAndName(dirAndName string) (dir, name string) { - sepIndex := strings.LastIndex(dirAndName, "/") - if sepIndex >= 0 { - dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:] - } else { - name = dirAndName - } - return -} - -func toParentAndDescendants(dirAndName string) (dir, name string) { - sepIndex := strings.Index(dirAndName, "/") - if sepIndex >= 0 { - dir, name = dirAndName[0:sepIndex], dirAndName[sepIndex+1:] - } else { - name = dirAndName - } - return -} - -func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, cursor *ListingCursor, marker, delimiter string, inclusiveStartFrom bool, eachEntryFn func(dir string, entry *filer_pb.Entry)) (nextMarker string, err error) { - // invariants - // prefix and marker should be under dir, marker may contain "/" - // maxKeys should be updated for each recursion - // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter) - if prefix == "/" && delimiter == "/" { - return - } - if cursor.maxKeys <= 0 { - return // Don't set isTruncated here - let caller decide based on whether more entries exist - } - - if strings.Contains(marker, "/") { - subDir, subMarker := toParentAndDescendants(marker) - // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker) - subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", cursor, subMarker, delimiter, false, eachEntryFn) - if subErr != nil { - err = subErr - return - } - nextMarker = subDir + "/" + subNextMarker - // finished processing this subdirectory - marker = subDir - } - if cursor.isTruncated { - return - } - - // now marker is also a direct child of dir - request := &filer_pb.ListEntriesRequest{ - Directory: dir, - Prefix: prefix, - Limit: uint32(cursor.maxKeys + 2), // bucket root directory needs to skip additional s3_constants.MultipartUploadsFolder folder - StartFromFileName: marker, - InclusiveStartFrom: inclusiveStartFrom, - } - if cursor.prefixEndsOnDelimiter { - request.Limit = uint32(1) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, listErr := client.ListEntries(ctx, request) - if listErr != nil { - err = fmt.Errorf("list entires %+v: %v", request, listErr) - return - } - - // Track .versions directories found in this directory for later processing - var versionsDirs []string - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - err = fmt.Errorf("iterating entires %+v: %v", request, recvErr) - return - } - } - entry := resp.Entry - - if cursor.maxKeys <= 0 { - cursor.isTruncated = true - continue - } - - // Set nextMarker only when we have quota to process this entry - nextMarker = entry.Name - if cursor.prefixEndsOnDelimiter { - if entry.Name == prefix && entry.IsDirectory { - if delimiter != "/" { - cursor.prefixEndsOnDelimiter = false - } - } else { - continue - } - } - if entry.IsDirectory { - // glog.V(4).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) - if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys - continue - } - - // Skip .versions directories in regular list operations but track them for logical object creation - if strings.HasSuffix(entry.Name, ".versions") { - glog.V(4).Infof("Found .versions directory: %s", entry.Name) - versionsDirs = append(versionsDirs, entry.Name) - continue - } - - if delimiter != "/" || cursor.prefixEndsOnDelimiter { - if cursor.prefixEndsOnDelimiter { - cursor.prefixEndsOnDelimiter = false - if entry.IsDirectoryKeyObject() { - eachEntryFn(dir, entry) - } - } else { - eachEntryFn(dir, entry) - } - subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", cursor, "", delimiter, false, eachEntryFn) - if subErr != nil { - err = fmt.Errorf("doListFilerEntries2: %w", subErr) - return - } - // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "subNextMarker", subNextMarker) - nextMarker = entry.Name + "/" + subNextMarker - if cursor.isTruncated { - return - } - // println("doListFilerEntries2 nextMarker", nextMarker) - } else { - var isEmpty bool - if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() { - //if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil { - // glog.Errorf("check empty folder %s: %v", dir, err) - //} - } - if !isEmpty { - eachEntryFn(dir, entry) - } - } - } else { - eachEntryFn(dir, entry) - // glog.V(4).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) - } - if cursor.prefixEndsOnDelimiter { - cursor.prefixEndsOnDelimiter = false - } - } - - // After processing all regular entries, handle versioned objects - // Create logical entries for objects that have .versions directories - for _, versionsDir := range versionsDirs { - if cursor.maxKeys <= 0 { - cursor.isTruncated = true - break - } - - // Extract object name from .versions directory name (remove .versions suffix) - baseObjectName := strings.TrimSuffix(versionsDir, ".versions") - - // Construct full object path relative to bucket - // dir is something like "/buckets/sea-test-1/Veeam/Backup/vbr/Config" - // we need to get the path relative to bucket: "Veeam/Backup/vbr/Config/Owner" - bucketPath := strings.TrimPrefix(dir, s3a.option.BucketsPath+"/") - bucketName := strings.Split(bucketPath, "/")[0] - - // Remove bucket name from path to get directory within bucket - bucketRelativePath := strings.Join(strings.Split(bucketPath, "/")[1:], "/") - - var fullObjectPath string - if bucketRelativePath == "" { - // Object is at bucket root - fullObjectPath = baseObjectName - } else { - // Object is in subdirectory - fullObjectPath = bucketRelativePath + "/" + baseObjectName - } - - glog.V(4).Infof("Processing versioned object: baseObjectName=%s, bucketRelativePath=%s, fullObjectPath=%s", - baseObjectName, bucketRelativePath, fullObjectPath) - - // Get the latest version information for this object - if latestVersionEntry, latestVersionErr := s3a.getLatestVersionEntryForListOperation(bucketName, fullObjectPath); latestVersionErr == nil { - glog.V(4).Infof("Creating logical entry for versioned object: %s", fullObjectPath) - eachEntryFn(dir, latestVersionEntry) - } else { - glog.V(4).Infof("Failed to get latest version for %s: %v", fullObjectPath, latestVersionErr) - } - } - - return -} - -func getListObjectsV2Args(values url.Values) (prefix, startAfter, delimiter string, token OptionalString, encodingTypeUrl bool, fetchOwner bool, maxkeys uint16, allowUnordered bool, errCode s3err.ErrorCode) { - prefix = values.Get("prefix") - token = OptionalString{set: values.Has("continuation-token"), string: values.Get("continuation-token")} - startAfter = values.Get("start-after") - delimiter = values.Get("delimiter") - encodingTypeUrl = values.Get("encoding-type") == s3.EncodingTypeUrl - if values.Get("max-keys") != "" { - if maxKeys, err := strconv.ParseUint(values.Get("max-keys"), 10, 16); err == nil { - maxkeys = uint16(maxKeys) - } else { - // Invalid max-keys value (non-numeric) - errCode = s3err.ErrInvalidMaxKeys - return - } - } else { - maxkeys = maxObjectListSizeLimit - } - fetchOwner = values.Get("fetch-owner") == "true" - allowUnordered = values.Get("allow-unordered") == "true" - errCode = s3err.ErrNone - return -} - -func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, encodingTypeUrl bool, maxkeys int16, allowUnordered bool, errCode s3err.ErrorCode) { - prefix = values.Get("prefix") - marker = values.Get("marker") - delimiter = values.Get("delimiter") - encodingTypeUrl = values.Get("encoding-type") == "url" - if values.Get("max-keys") != "" { - if maxKeys, err := strconv.ParseInt(values.Get("max-keys"), 10, 16); err == nil { - maxkeys = int16(maxKeys) - } else { - // Invalid max-keys value (non-numeric) - errCode = s3err.ErrInvalidMaxKeys - return - } - } else { - maxkeys = maxObjectListSizeLimit - } - allowUnordered = values.Get("allow-unordered") == "true" - errCode = s3err.ErrNone - return -} - -func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) { - // println("+ ensureDirectoryAllEmpty", dir, name) - glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name) - defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) - var fileCounter int - var subDirs []string - currentDir := parentDir + "/" + name - var startFrom string - var isExhausted bool - var foundEntry bool - for fileCounter == 0 && !isExhausted && err == nil { - err = filer_pb.SeaweedList(context.Background(), filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error { - foundEntry = true - if entry.IsOlderDir() { - subDirs = append(subDirs, entry.Name) - } else { - fileCounter++ - } - startFrom = entry.Name - isExhausted = isExhausted || isLast - glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) - return nil - }, startFrom, false, 8) - if !foundEntry { - break - } - } - - if err != nil { - return false, err - } - - if fileCounter > 0 { - return false, nil - } - - for _, subDir := range subDirs { - isSubEmpty, subErr := s3a.ensureDirectoryAllEmpty(filerClient, currentDir, subDir) - if subErr != nil { - return false, subErr - } - if !isSubEmpty { - return false, nil - } - } - - glog.V(1).Infof("deleting empty folder %s", currentDir) - if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil { - return - } - - return true, nil -} - -// getLatestVersionEntryForListOperation gets the latest version of an object and creates a logical entry for list operations -// This is used to show versioned objects as logical object names in regular list operations -func (s3a *S3ApiServer) getLatestVersionEntryForListOperation(bucket, object string) (*filer_pb.Entry, error) { - // Get the latest version entry - latestVersionEntry, err := s3a.getLatestObjectVersion(bucket, object) - if err != nil { - return nil, fmt.Errorf("failed to get latest version: %w", err) - } - - // Check if this is a delete marker (should not be shown in regular list) - if latestVersionEntry.Extended != nil { - if deleteMarker, exists := latestVersionEntry.Extended[s3_constants.ExtDeleteMarkerKey]; exists && string(deleteMarker) == "true" { - return nil, fmt.Errorf("latest version is a delete marker") - } - } - - // Create a logical entry that appears to be stored at the object path (not the versioned path) - // This allows the list operation to show the logical object name while preserving all metadata - logicalEntry := &filer_pb.Entry{ - Name: strings.TrimPrefix(object, "/"), - IsDirectory: false, - Attributes: latestVersionEntry.Attributes, - Extended: latestVersionEntry.Extended, - Chunks: latestVersionEntry.Chunks, - } - - return logicalEntry, nil -} - -// adjustMarkerForDelimiter handles delimiter-ending markers by incrementing them to skip entries with that prefix. -// For example, when continuation token is "boo/", this returns "boo~" to skip all "boo/*" entries -// but still finds any "bop" or later entries. We add a high ASCII character rather than incrementing -// the last character to avoid skipping potential directory entries. -// This is essential for correct S3 list operations with delimiters and CommonPrefixes. -func adjustMarkerForDelimiter(marker, delimiter string) string { - if delimiter == "" || !strings.HasSuffix(marker, delimiter) { - return marker - } - - // Remove the trailing delimiter and append a high ASCII character - // This ensures we skip all entries under the prefix but don't skip - // potential directory entries that start with a similar prefix - prefix := strings.TrimSuffix(marker, delimiter) - if len(prefix) == 0 { - return marker - } - - // Use tilde (~) which has ASCII value 126, higher than most printable characters - // This skips "prefix/*" entries but still finds "prefix" + any higher character - return prefix + "~" -} diff --git a/weed/s3api/s3api_object_handlers_list_test.go b/weed/s3api/s3api_object_handlers_list_test.go deleted file mode 100644 index 858d30731..000000000 --- a/weed/s3api/s3api_object_handlers_list_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package s3api - -import ( - "net/http/httptest" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" -) - -func TestListObjectsHandler(t *testing.T) { - - // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html - - expected := ` -test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49Z` - - response := ListBucketResult{ - Name: "test_container", - Prefix: "", - Marker: "", - NextMarker: "", - MaxKeys: 1000, - IsTruncated: false, - Contents: []ListEntry{{ - Key: "1.zip", - LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC), - ETag: "\"4397da7a7649e8085de9916c240e8166\"", - Size: 1234567, - Owner: &CanonicalUser{ - ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932", - }, - StorageClass: "STANDARD", - }}, - } - - encoded := string(s3err.EncodeXMLResponse(response)) - if encoded != expected { - t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) - } -} - -func Test_normalizePrefixMarker(t *testing.T) { - type args struct { - prefix string - marker string - } - tests := []struct { - name string - args args - wantAlignedDir string - wantAlignedPrefix string - wantAlignedMarker string - }{ - {"prefix is a directory", - args{"/parentDir/data/", - ""}, - "parentDir", - "data", - "", - }, - {"normal case", - args{"/parentDir/data/0", - "parentDir/data/0e/0e149049a2137b0cc12e"}, - "parentDir/data", - "0", - "0e/0e149049a2137b0cc12e", - }, - {"empty prefix", - args{"", - "parentDir/data/0e/0e149049a2137b0cc12e"}, - "", - "", - "parentDir/data/0e/0e149049a2137b0cc12e", - }, - {"empty directory", - args{"parent", - "parentDir/data/0e/0e149049a2137b0cc12e"}, - "", - "parent", - "parentDir/data/0e/0e149049a2137b0cc12e", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotAlignedDir, gotAlignedPrefix, gotAlignedMarker := normalizePrefixMarker(tt.args.prefix, tt.args.marker) - assert.Equalf(t, tt.wantAlignedDir, gotAlignedDir, "normalizePrefixMarker(%v, %v)", tt.args.prefix, tt.args.marker) - assert.Equalf(t, tt.wantAlignedPrefix, gotAlignedPrefix, "normalizePrefixMarker(%v, %v)", tt.args.prefix, tt.args.marker) - assert.Equalf(t, tt.wantAlignedMarker, gotAlignedMarker, "normalizePrefixMarker(%v, %v)", tt.args.prefix, tt.args.marker) - }) - } -} - -func TestAllowUnorderedParameterValidation(t *testing.T) { - // Test getListObjectsV1Args with allow-unordered parameter - t.Run("getListObjectsV1Args with allow-unordered", func(t *testing.T) { - // Test with allow-unordered=true - values := map[string][]string{ - "allow-unordered": {"true"}, - "delimiter": {"/"}, - } - _, _, _, _, _, allowUnordered, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.True(t, allowUnordered, "allow-unordered should be true when set to 'true'") - - // Test with allow-unordered=false - values = map[string][]string{ - "allow-unordered": {"false"}, - } - _, _, _, _, _, allowUnordered, errCode = getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.False(t, allowUnordered, "allow-unordered should be false when set to 'false'") - - // Test without allow-unordered parameter - values = map[string][]string{} - _, _, _, _, _, allowUnordered, errCode = getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.False(t, allowUnordered, "allow-unordered should be false when not set") - }) - - // Test getListObjectsV2Args with allow-unordered parameter - t.Run("getListObjectsV2Args with allow-unordered", func(t *testing.T) { - // Test with allow-unordered=true - values := map[string][]string{ - "allow-unordered": {"true"}, - "delimiter": {"/"}, - } - _, _, _, _, _, _, _, allowUnordered, errCode := getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.True(t, allowUnordered, "allow-unordered should be true when set to 'true'") - - // Test with allow-unordered=false - values = map[string][]string{ - "allow-unordered": {"false"}, - } - _, _, _, _, _, _, _, allowUnordered, errCode = getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.False(t, allowUnordered, "allow-unordered should be false when set to 'false'") - - // Test without allow-unordered parameter - values = map[string][]string{} - _, _, _, _, _, _, _, allowUnordered, errCode = getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.False(t, allowUnordered, "allow-unordered should be false when not set") - }) -} - -func TestAllowUnorderedWithDelimiterValidation(t *testing.T) { - t.Run("should return error when allow-unordered=true and delimiter are both present", func(t *testing.T) { - // Create a request with both allow-unordered=true and delimiter - req := httptest.NewRequest("GET", "/bucket?allow-unordered=true&delimiter=/", nil) - - // Extract query parameters like the handler would - values := req.URL.Query() - - // Test ListObjectsV1Args - _, _, delimiter, _, _, allowUnordered, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.True(t, allowUnordered, "allow-unordered should be true") - assert.Equal(t, "/", delimiter, "delimiter should be '/'") - - // The validation should catch this combination - if allowUnordered && delimiter != "" { - assert.True(t, true, "Validation correctly detected invalid combination") - } else { - assert.Fail(t, "Validation should have detected invalid combination") - } - - // Test ListObjectsV2Args - _, _, delimiter2, _, _, _, _, allowUnordered2, errCode2 := getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode2, "should not return error for valid parameters") - assert.True(t, allowUnordered2, "allow-unordered should be true") - assert.Equal(t, "/", delimiter2, "delimiter should be '/'") - - // The validation should catch this combination - if allowUnordered2 && delimiter2 != "" { - assert.True(t, true, "Validation correctly detected invalid combination") - } else { - assert.Fail(t, "Validation should have detected invalid combination") - } - }) - - t.Run("should allow allow-unordered=true without delimiter", func(t *testing.T) { - // Create a request with only allow-unordered=true - req := httptest.NewRequest("GET", "/bucket?allow-unordered=true", nil) - - values := req.URL.Query() - - // Test ListObjectsV1Args - _, _, delimiter, _, _, allowUnordered, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.True(t, allowUnordered, "allow-unordered should be true") - assert.Equal(t, "", delimiter, "delimiter should be empty") - - // This combination should be valid - if allowUnordered && delimiter != "" { - assert.Fail(t, "This should be a valid combination") - } else { - assert.True(t, true, "Valid combination correctly allowed") - } - }) - - t.Run("should allow delimiter without allow-unordered", func(t *testing.T) { - // Create a request with only delimiter - req := httptest.NewRequest("GET", "/bucket?delimiter=/", nil) - - values := req.URL.Query() - - // Test ListObjectsV1Args - _, _, delimiter, _, _, allowUnordered, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "should not return error for valid parameters") - assert.False(t, allowUnordered, "allow-unordered should be false") - assert.Equal(t, "/", delimiter, "delimiter should be '/'") - - // This combination should be valid - if allowUnordered && delimiter != "" { - assert.Fail(t, "This should be a valid combination") - } else { - assert.True(t, true, "Valid combination correctly allowed") - } - }) -} - -// TestMaxKeysParameterValidation tests the validation of max-keys parameter -func TestMaxKeysParameterValidation(t *testing.T) { - t.Run("valid max-keys values should work", func(t *testing.T) { - // Test valid numeric values - values := map[string][]string{ - "max-keys": {"100"}, - } - _, _, _, _, _, _, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "valid max-keys should not return error") - - _, _, _, _, _, _, _, _, errCode = getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "valid max-keys should not return error") - }) - - t.Run("invalid max-keys values should return error", func(t *testing.T) { - // Test non-numeric value - values := map[string][]string{ - "max-keys": {"blah"}, - } - _, _, _, _, _, _, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrInvalidMaxKeys, errCode, "non-numeric max-keys should return ErrInvalidMaxKeys") - - _, _, _, _, _, _, _, _, errCode = getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrInvalidMaxKeys, errCode, "non-numeric max-keys should return ErrInvalidMaxKeys") - }) - - t.Run("empty max-keys should use default", func(t *testing.T) { - // Test empty max-keys - values := map[string][]string{} - _, _, _, _, maxkeys, _, errCode := getListObjectsV1Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "empty max-keys should not return error") - assert.Equal(t, int16(1000), maxkeys, "empty max-keys should use default value") - - _, _, _, _, _, _, maxkeys2, _, errCode := getListObjectsV2Args(values) - assert.Equal(t, s3err.ErrNone, errCode, "empty max-keys should not return error") - assert.Equal(t, uint16(1000), maxkeys2, "empty max-keys should use default value") - }) -} - -// TestDelimiterWithDirectoryKeyObjects tests that directory key objects (like "0/") are properly -// grouped into common prefixes when using delimiters, matching AWS S3 behavior. -// -// This test addresses the issue found in test_bucket_list_delimiter_not_skip_special where -// directory key objects were incorrectly returned as individual keys instead of being -// grouped into common prefixes when a delimiter was specified. -func TestDelimiterWithDirectoryKeyObjects(t *testing.T) { - // This test simulates the failing test scenario: - // Objects: ['0/'] + ['0/1000', '0/1001', ..., '0/1998'] + ['1999', '1999#', '1999+', '2000'] - // With delimiter='/', expect: - // - Keys: ['1999', '1999#', '1999+', '2000'] - // - CommonPrefixes: ['0/'] - - t.Run("directory key object should be grouped into common prefix with delimiter", func(t *testing.T) { - // The fix ensures that when a delimiter is specified, directory key objects - // (entries that are both directories AND have MIME types set) undergo the same - // delimiter-based grouping logic as regular files. - - // Before fix: '0/' would be returned as an individual key - // After fix: '0/' is grouped with '0/xxxx' objects into common prefix '0/' - - // This matches AWS S3 behavior where all objects sharing a prefix up to the - // delimiter are grouped together, regardless of whether they are directory key objects. - - assert.True(t, true, "Directory key objects should be grouped into common prefixes when delimiter is used") - }) - - t.Run("directory key object without delimiter should be individual key", func(t *testing.T) { - // When no delimiter is specified, directory key objects should still be - // returned as individual keys (existing behavior maintained). - - assert.True(t, true, "Directory key objects should be individual keys when no delimiter is used") - }) -} - -// TestObjectLevelListPermissions tests that object-level List permissions work correctly -func TestObjectLevelListPermissions(t *testing.T) { - // Test the core functionality that was fixed for issue #7039 - - t.Run("Identity CanDo Object Level Permissions", func(t *testing.T) { - // Create identity with object-level List permission - identity := &Identity{ - Name: "test-user", - Actions: []Action{ - "List:test-bucket/allowed-prefix/*", - }, - } - - // Test cases for canDo method - // Note: canDo concatenates bucket + objectKey, so "test-bucket" + "/allowed-prefix/file.txt" = "test-bucket/allowed-prefix/file.txt" - testCases := []struct { - name string - action Action - bucket string - object string - shouldAllow bool - description string - }{ - { - name: "allowed prefix exact match", - action: "List", - bucket: "test-bucket", - object: "/allowed-prefix/file.txt", - shouldAllow: true, - description: "Should allow access to objects under the allowed prefix", - }, - { - name: "allowed prefix subdirectory", - action: "List", - bucket: "test-bucket", - object: "/allowed-prefix/subdir/file.txt", - shouldAllow: true, - description: "Should allow access to objects in subdirectories under the allowed prefix", - }, - { - name: "denied different prefix", - action: "List", - bucket: "test-bucket", - object: "/other-prefix/file.txt", - shouldAllow: false, - description: "Should deny access to objects under a different prefix", - }, - { - name: "denied different bucket", - action: "List", - bucket: "other-bucket", - object: "/allowed-prefix/file.txt", - shouldAllow: false, - description: "Should deny access to objects in a different bucket", - }, - { - name: "denied root level", - action: "List", - bucket: "test-bucket", - object: "/file.txt", - shouldAllow: false, - description: "Should deny access to root-level objects when permission is prefix-specific", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := identity.canDo(tc.action, tc.bucket, tc.object) - assert.Equal(t, tc.shouldAllow, result, tc.description) - }) - } - }) - - t.Run("Bucket Level Permissions Still Work", func(t *testing.T) { - // Create identity with bucket-level List permission - identity := &Identity{ - Name: "bucket-user", - Actions: []Action{ - "List:test-bucket", - }, - } - - // Should allow access to any object in the bucket - testCases := []struct { - object string - }{ - {"/file.txt"}, - {"/prefix/file.txt"}, - {"/deep/nested/path/file.txt"}, - } - - for _, tc := range testCases { - result := identity.canDo("List", "test-bucket", tc.object) - assert.True(t, result, "Bucket-level permission should allow access to %s", tc.object) - } - - // Should deny access to different buckets - result := identity.canDo("List", "other-bucket", "/file.txt") - assert.False(t, result, "Should deny access to objects in different buckets") - }) - - t.Run("Empty Object With Prefix Logic", func(t *testing.T) { - // Test the middleware logic fix: when object is empty but prefix is provided, - // the object should be set to the prefix value for permission checking - - // This simulates the fixed logic in auth_credentials.go: - // if (object == "/" || object == "") && prefix != "" { - // object = prefix - // } - - testCases := []struct { - name string - object string - prefix string - expected string - }{ - { - name: "empty object with prefix", - object: "", - prefix: "/allowed-prefix/", - expected: "/allowed-prefix/", - }, - { - name: "slash object with prefix", - object: "/", - prefix: "/allowed-prefix/", - expected: "/allowed-prefix/", - }, - { - name: "object already set", - object: "/existing-object", - prefix: "/some-prefix/", - expected: "/existing-object", - }, - { - name: "no prefix provided", - object: "", - prefix: "", - expected: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Simulate the middleware logic - object := tc.object - prefix := tc.prefix - - if (object == "/" || object == "") && prefix != "" { - object = prefix - } - - assert.Equal(t, tc.expected, object, "Object should be correctly set based on prefix") - }) - } - }) - - t.Run("Issue 7039 Scenario", func(t *testing.T) { - // Test the exact scenario from the GitHub issue - // User has permission: "List:bdaai-shared-bucket/txzl/*" - // They make request: GET /bdaai-shared-bucket?prefix=txzl/ - - identity := &Identity{ - Name: "issue-user", - Actions: []Action{ - "List:bdaai-shared-bucket/txzl/*", - }, - } - - // For a list request like "GET /bdaai-shared-bucket?prefix=txzl/": - // - bucket = "bdaai-shared-bucket" - // - object = "" (no object in URL path) - // - prefix = "/txzl/" (from query parameter) - - // After our middleware fix, it should check permission for the prefix - // Simulate: action=ACTION_LIST && object=="" && prefix="/txzl/" โ†’ object="/txzl/" - result := identity.canDo("List", "bdaai-shared-bucket", "/txzl/") - - // This should be allowed because: - // target = "List:bdaai-shared-bucket/txzl/" - // permission = "List:bdaai-shared-bucket/txzl/*" - // wildcard match: "List:bdaai-shared-bucket/txzl/" starts with "List:bdaai-shared-bucket/txzl/" - assert.True(t, result, "User with 'List:bdaai-shared-bucket/txzl/*' should be able to list with prefix txzl/") - - // Test that they can't list with a different prefix - result = identity.canDo("List", "bdaai-shared-bucket", "/other-prefix/") - assert.False(t, result, "User should not be able to list with a different prefix") - - // Test that they can't list a different bucket - result = identity.canDo("List", "other-bucket", "/txzl/") - assert.False(t, result, "User should not be able to list a different bucket") - }) - - t.Log("This test validates the fix for issue #7039") - t.Log("Object-level List permissions like 'List:bucket/prefix/*' now work correctly") - t.Log("Middleware properly extracts prefix for permission validation") -} diff --git a/weed/s3api/s3api_object_handlers_multipart.go b/weed/s3api/s3api_object_handlers_multipart.go deleted file mode 100644 index ef1182fc2..000000000 --- a/weed/s3api/s3api_object_handlers_multipart.go +++ /dev/null @@ -1,535 +0,0 @@ -package s3api - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" -) - -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 10000 // Limit number of parts in a listPartsResponse. -) - -// NewMultipartUploadHandler - New multipart upload. -func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before creating multipart upload - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Check if versioning is enabled for the bucket (needed for object lock) - versioningEnabled, err := s3a.isVersioningEnabled(bucket) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Validate object lock headers before processing - if err := s3a.validateObjectLockHeaders(r, versioningEnabled); err != nil { - glog.V(2).Infof("NewMultipartUploadHandler: object lock header validation failed for bucket %s, object %s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err)) - return - } - - createMultipartUploadInput := &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: objectKey(aws.String(object)), - Metadata: make(map[string]*string), - } - - metadata := weed_server.SaveAmzMetaData(r, nil, false) - for k, v := range metadata { - createMultipartUploadInput.Metadata[k] = aws.String(string(v)) - } - - contentType := r.Header.Get("Content-Type") - if contentType != "" { - createMultipartUploadInput.ContentType = &contentType - } - response, errCode := s3a.createMultipartUpload(r, createMultipartUploadInput) - - glog.V(3).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - writeSuccessResponseXML(w, r, response) - -} - -// CompleteMultipartUploadHandler - Completes multipart upload. -func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html - - bucket, object := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before completing multipart upload - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - parts := &CompleteMultipartUpload{} - if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Get upload id. - uploadID, _, _, _ := getObjectResources(r.URL.Query()) - err := s3a.checkUploadId(object, uploadID) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) - return - } - - // Check conditional headers before completing multipart upload - // This implements AWS S3 behavior where conditional headers apply to CompleteMultipartUpload - if errCode := s3a.checkConditionalHeaders(r, bucket, object); errCode != s3err.ErrNone { - glog.V(3).Infof("CompleteMultipartUploadHandler: Conditional header check failed for %s/%s", bucket, object) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - response, errCode := s3a.completeMultipartUpload(r, &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: objectKey(aws.String(object)), - UploadId: aws.String(uploadID), - }, parts) - - glog.V(3).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Set version ID in HTTP header if present - if response.VersionId != nil { - w.Header().Set("x-amz-version-id", *response.VersionId) - } - - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3UploadedObjectsCounter.WithLabelValues(bucket).Inc() - - writeSuccessResponseXML(w, r, response) - -} - -// AbortMultipartUploadHandler - Aborts multipart upload. -func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before aborting multipart upload - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Get upload id. - uploadID, _, _, _ := getObjectResources(r.URL.Query()) - err := s3a.checkUploadId(object, uploadID) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) - return - } - - response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: objectKey(aws.String(object)), - UploadId: aws.String(uploadID), - }) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - glog.V(3).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) - - //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html - s3err.WriteEmptyResponse(w, r, http.StatusNoContent) - s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone) - -} - -// ListMultipartUploadsHandler - Lists multipart uploads. -func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before listing multipart uploads - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) - if maxUploads < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxUploads) - return - } - if keyMarker != "" { - // Marker not common with prefix is not implemented. - if !strings.HasPrefix(keyMarker, prefix) { - s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) - return - } - } - - response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ - Bucket: aws.String(bucket), - Delimiter: aws.String(delimiter), - EncodingType: aws.String(encodingType), - KeyMarker: aws.String(keyMarker), - MaxUploads: aws.Int64(int64(maxUploads)), - Prefix: aws.String(prefix), - UploadIdMarker: aws.String(uploadIDMarker), - }) - - glog.V(3).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // TODO handle encodingType - - writeSuccessResponseXML(w, r, response) -} - -// ListObjectPartsHandler - Lists object parts in a multipart upload. -func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before listing object parts - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) - if partNumberMarker < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPartNumberMarker) - return - } - if maxParts < 0 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts) - return - } - - err := s3a.checkUploadId(object, uploadID) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) - return - } - - response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ - Bucket: aws.String(bucket), - Key: objectKey(aws.String(object)), - MaxParts: aws.Int64(int64(maxParts)), - PartNumberMarker: aws.Int64(int64(partNumberMarker)), - UploadId: aws.String(uploadID), - }) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - glog.V(3).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part)) - - writeSuccessResponseXML(w, r, response) - -} - -// PutObjectPartHandler - Put an object part in a multipart upload. -func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - - // Check if bucket exists before putting object part - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - uploadID := r.URL.Query().Get("uploadId") - err := s3a.checkUploadId(object, uploadID) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) - return - } - - partIDString := r.URL.Query().Get("partNumber") - partID, err := strconv.Atoi(partIDString) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - return - } - if partID > s3_constants.MaxS3MultipartParts { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - return - } - if partID < 1 { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) - return - } - - dataReader, s3ErrCode := getRequestDataReader(s3a, r) - if s3ErrCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, s3ErrCode) - return - } - defer dataReader.Close() - - glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID) - - // Check for SSE-C headers in the current request first - sseCustomerAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) - if sseCustomerAlgorithm != "" { - // SSE-C part upload - headers are already present, let putToFiler handle it - } else { - // No SSE-C headers, check for SSE-KMS settings from upload directory - if uploadEntry, err := s3a.getEntry(s3a.genUploadsFolder(bucket), uploadID); err == nil { - if uploadEntry.Extended != nil { - // Check if this upload uses SSE-KMS - if keyIDBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSKeyID]; exists { - keyID := string(keyIDBytes) - - // Build SSE-KMS metadata for this part - bucketKeyEnabled := false - if bucketKeyBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSBucketKeyEnabled]; exists && string(bucketKeyBytes) == "true" { - bucketKeyEnabled = true - } - - var encryptionContext map[string]string - if contextBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSEncryptionContext]; exists { - // Parse the stored encryption context - if err := json.Unmarshal(contextBytes, &encryptionContext); err != nil { - glog.Errorf("Failed to parse encryption context for upload %s: %v", uploadID, err) - encryptionContext = BuildEncryptionContext(bucket, object, bucketKeyEnabled) - } - } else { - encryptionContext = BuildEncryptionContext(bucket, object, bucketKeyEnabled) - } - - // Get the base IV for this multipart upload - var baseIV []byte - if baseIVBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSEKMSBaseIV]; exists { - // Decode the base64 encoded base IV - decodedIV, decodeErr := base64.StdEncoding.DecodeString(string(baseIVBytes)) - if decodeErr == nil && len(decodedIV) == 16 { - baseIV = decodedIV - glog.V(4).Infof("Using stored base IV %x for multipart upload %s", baseIV[:8], uploadID) - } else { - glog.Errorf("Failed to decode base IV for multipart upload %s: %v", uploadID, decodeErr) - } - } - - if len(baseIV) == 0 { - glog.Errorf("No valid base IV found for SSE-KMS multipart upload %s", uploadID) - // Generate a new base IV as fallback - baseIV = make([]byte, 16) - if _, err := rand.Read(baseIV); err != nil { - glog.Errorf("Failed to generate fallback base IV: %v", err) - } - } - - // Add SSE-KMS headers to the request for putToFiler to handle encryption - r.Header.Set(s3_constants.AmzServerSideEncryption, "aws:kms") - r.Header.Set(s3_constants.AmzServerSideEncryptionAwsKmsKeyId, keyID) - if bucketKeyEnabled { - r.Header.Set(s3_constants.AmzServerSideEncryptionBucketKeyEnabled, "true") - } - if len(encryptionContext) > 0 { - if contextJSON, err := json.Marshal(encryptionContext); err == nil { - r.Header.Set(s3_constants.AmzServerSideEncryptionContext, base64.StdEncoding.EncodeToString(contextJSON)) - } - } - - // Pass the base IV to putToFiler via header - r.Header.Set(s3_constants.SeaweedFSSSEKMSBaseIVHeader, base64.StdEncoding.EncodeToString(baseIV)) - - } else { - // Check if this upload uses SSE-S3 - if err := s3a.handleSSES3MultipartHeaders(r, uploadEntry, uploadID); err != nil { - glog.Errorf("Failed to setup SSE-S3 multipart headers: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } - } - } else { - } - } - - uploadUrl := s3a.genPartUploadUrl(bucket, uploadID, partID) - - if partID == 1 && r.Header.Get("Content-Type") == "" { - dataReader = mimeDetect(r, dataReader) - } - destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) - - etag, errCode, _ := s3a.putToFiler(r, uploadUrl, dataReader, destination, bucket, partID) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - setEtag(w, etag) - - writeSuccessResponseEmpty(w, r) - -} - -func (s3a *S3ApiServer) genUploadsFolder(bucket string) string { - return fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, s3_constants.MultipartUploadsFolder) -} - -func (s3a *S3ApiServer) genPartUploadUrl(bucket, uploadID string, partID int) string { - return fmt.Sprintf("http://%s%s/%s/%04d_%s.part", - s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID, uuid.NewString()) -} - -// Generate uploadID hash string from object -func (s3a *S3ApiServer) generateUploadID(object string) string { - - object = strings.TrimPrefix(object, "/") - h := sha1.New() - h.Write([]byte(object)) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// Check object name and uploadID when processing multipart uploading -func (s3a *S3ApiServer) checkUploadId(object string, id string) error { - - hash := s3a.generateUploadID(object) - - if !strings.HasPrefix(id, hash) { - glog.Errorf("object %s and uploadID %s are not matched", object, id) - return fmt.Errorf("object %s and uploadID %s are not matched", object, id) - } - return nil -} - -// Parse bucket url queries for ?uploads -func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) { - prefix = values.Get("prefix") - keyMarker = values.Get("key-marker") - uploadIDMarker = values.Get("upload-id-marker") - delimiter = values.Get("delimiter") - if values.Get("max-uploads") != "" { - maxUploads, _ = strconv.Atoi(values.Get("max-uploads")) - } else { - maxUploads = maxUploadsList - } - encodingType = values.Get("encoding-type") - return -} - -// Parse object url queries -func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) { - uploadID = values.Get("uploadId") - partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker")) - if values.Get("max-parts") != "" { - maxParts, _ = strconv.Atoi(values.Get("max-parts")) - } else { - maxParts = maxPartsList - } - encodingType = values.Get("encoding-type") - return -} - -func xmlDecoder(body io.Reader, v interface{}, size int64) error { - var lbody io.Reader - if size > 0 { - lbody = io.LimitReader(body, size) - } else { - lbody = body - } - d := xml.NewDecoder(lbody) - d.CharsetReader = func(label string, input io.Reader) (io.Reader, error) { - return input, nil - } - return d.Decode(v) -} - -type CompleteMultipartUpload struct { - Parts []CompletedPart `xml:"Part"` -} -type CompletedPart struct { - ETag string - PartNumber int -} - -// handleSSES3MultipartHeaders handles SSE-S3 multipart upload header setup to reduce nesting complexity -func (s3a *S3ApiServer) handleSSES3MultipartHeaders(r *http.Request, uploadEntry *filer_pb.Entry, uploadID string) error { - if encryptionTypeBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3Encryption]; exists && string(encryptionTypeBytes) == s3_constants.SSEAlgorithmAES256 { - - // Set SSE-S3 headers to indicate server-side encryption - r.Header.Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256) - - // Retrieve and set base IV for consistent multipart encryption - REQUIRED for security - var baseIV []byte - if baseIVBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3BaseIV]; exists { - // Decode the base64 encoded base IV - decodedIV, decodeErr := base64.StdEncoding.DecodeString(string(baseIVBytes)) - if decodeErr != nil { - return fmt.Errorf("failed to decode base IV for SSE-S3 multipart upload %s: %v", uploadID, decodeErr) - } - if len(decodedIV) != s3_constants.AESBlockSize { - return fmt.Errorf("invalid base IV length for SSE-S3 multipart upload %s: expected %d bytes, got %d", uploadID, s3_constants.AESBlockSize, len(decodedIV)) - } - baseIV = decodedIV - glog.V(4).Infof("Using stored base IV %x for SSE-S3 multipart upload %s", baseIV[:8], uploadID) - } else { - return fmt.Errorf("no base IV found for SSE-S3 multipart upload %s - required for encryption consistency", uploadID) - } - - // Retrieve and set key data for consistent multipart encryption - REQUIRED for decryption - if keyDataBytes, exists := uploadEntry.Extended[s3_constants.SeaweedFSSSES3KeyData]; exists { - // Key data is already base64 encoded, pass it directly - keyDataStr := string(keyDataBytes) - r.Header.Set(s3_constants.SeaweedFSSSES3KeyDataHeader, keyDataStr) - glog.V(4).Infof("Using stored key data for SSE-S3 multipart upload %s", uploadID) - } else { - return fmt.Errorf("no SSE-S3 key data found for multipart upload %s - required for encryption", uploadID) - } - - // Pass the base IV to putToFiler via header for offset calculation - r.Header.Set(s3_constants.SeaweedFSSSES3BaseIVHeader, base64.StdEncoding.EncodeToString(baseIV)) - - } - return nil -} diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index da986cf87..5704fcf38 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -11,12 +11,11 @@ import ( "net/url" "strings" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/policy" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "github.com/dustin/go-humanize" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/policy" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" ) func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { @@ -40,7 +39,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R } defer form.RemoveAll() - fileBody, fileName, fileContentType, fileSize, formValues, err := extractPostPolicyFormValues(form) + fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) if err != nil { s3err.WriteErrorResponse(w, r, s3err.ErrMalformedPOSTRequest) return @@ -114,29 +113,9 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R } } - uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlEscapeObject(object)) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, bucket, urlPathEscape(object)) - // Get ContentType from post formData - // Otherwise from formFile ContentType - contentType := formValues.Get("Content-Type") - if contentType == "" { - contentType = fileContentType - } - r.Header.Set("Content-Type", contentType) - - // Add s3 postpolicy support header - for k, _ := range formValues { - if k == "Cache-Control" || k == "Expires" || k == "Content-Disposition" { - r.Header.Set(k, formValues.Get(k)) - continue - } - - if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) { - r.Header.Set(k, formValues.Get(k)) - } - } - - etag, errCode, _ := s3a.putToFiler(r, uploadUrl, fileBody, "", bucket, 1) + etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody, "") if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -166,19 +145,16 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R s3err.PostLog(r, http.StatusCreated, s3err.ErrNone) case "200": s3err.WriteEmptyResponse(w, r, http.StatusOK) - case "204": - s3err.WriteEmptyResponse(w, r, http.StatusNoContent) default: - s3err.WriteEmptyResponse(w, r, http.StatusNoContent) + writeSuccessResponseEmpty(w, r) } } // Extract form fields and file data from a HTTP POST Policy -func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName, fileContentType string, fileSize int64, formValues http.Header, err error) { +func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { // / HTML Form values fileName = "" - fileContentType = "" // Canonicalize the form values into http.Header. formValues = make(http.Header) @@ -188,7 +164,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, // Validate form values. if err = validateFormFieldSize(formValues); err != nil { - return nil, "", "", 0, nil, err + return nil, "", 0, nil, err } // this means that filename="" was not specified for file key and Go has @@ -201,7 +177,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, } fileSize = int64(b.Len()) filePart = io.NopCloser(b) - return filePart, fileName, fileContentType, fileSize, formValues, nil + return filePart, fileName, fileSize, formValues, nil } // Iterator until we find a valid File field and break @@ -209,34 +185,32 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, canonicalFormName := http.CanonicalHeaderKey(k) if canonicalFormName == "File" { if len(v) == 0 { - return nil, "", "", 0, nil, errors.New("Invalid arguments specified") + return nil, "", 0, nil, errors.New("Invalid arguments specified") } // Fetch fileHeader which has the uploaded file information fileHeader := v[0] // Set filename fileName = fileHeader.Filename - // Set contentType - fileContentType = fileHeader.Header.Get("Content-Type") // Open the uploaded part filePart, err = fileHeader.Open() if err != nil { - return nil, "", "", 0, nil, err + return nil, "", 0, nil, err } // Compute file size fileSize, err = filePart.(io.Seeker).Seek(0, 2) if err != nil { - return nil, "", "", 0, nil, err + return nil, "", 0, nil, err } // Reset Seek to the beginning _, err = filePart.(io.Seeker).Seek(0, 0) if err != nil { - return nil, "", "", 0, nil, err + return nil, "", 0, nil, err } // File found and ready for reading break } } - return filePart, fileName, fileContentType, fileSize, formValues, nil + return filePart, fileName, fileSize, formValues, nil } // Validate form field size for s3 specification requirement. diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go deleted file mode 100644 index fb7d6c3a6..000000000 --- a/weed/s3api/s3api_object_handlers_put.go +++ /dev/null @@ -1,1467 +0,0 @@ -package s3api - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" - - "github.com/pquerna/cachecontrol/cacheobject" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/security" - weed_server "github.com/seaweedfs/seaweedfs/weed/server" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util/constants" -) - -// Object lock validation errors -var ( - ErrObjectLockVersioningRequired = errors.New("object lock headers can only be used on versioned buckets") - ErrInvalidObjectLockMode = errors.New("invalid object lock mode") - ErrInvalidLegalHoldStatus = errors.New("invalid legal hold status") - ErrInvalidRetentionDateFormat = errors.New("invalid retention until date format") - ErrRetentionDateMustBeFuture = errors.New("retain until date must be in the future") - ErrObjectLockModeRequiresDate = errors.New("object lock mode requires retention until date") - ErrRetentionDateRequiresMode = errors.New("retention until date requires object lock mode") - ErrGovernanceBypassVersioningRequired = errors.New("governance bypass header can only be used on versioned buckets") - ErrInvalidObjectLockDuration = errors.New("object lock duration must be greater than 0 days") - ErrObjectLockDurationExceeded = errors.New("object lock duration exceeds maximum allowed days") - ErrObjectLockConfigurationMissingEnabled = errors.New("object lock configuration must specify ObjectLockEnabled") - ErrInvalidObjectLockEnabledValue = errors.New("invalid object lock enabled value") - ErrRuleMissingDefaultRetention = errors.New("rule configuration must specify DefaultRetention") - ErrDefaultRetentionMissingMode = errors.New("default retention must specify Mode") - ErrInvalidDefaultRetentionMode = errors.New("invalid default retention mode") - ErrDefaultRetentionMissingPeriod = errors.New("default retention must specify either Days or Years") - ErrDefaultRetentionBothDaysAndYears = errors.New("default retention cannot specify both Days and Years") - ErrDefaultRetentionDaysOutOfRange = errors.New("default retention days must be between 0 and 36500") - ErrDefaultRetentionYearsOutOfRange = errors.New("default retention years must be between 0 and 100") -) - -// hasExplicitEncryption checks if any explicit encryption was provided in the request. -// This helper improves readability and makes the encryption check condition more explicit. -func hasExplicitEncryption(customerKey *SSECustomerKey, sseKMSKey *SSEKMSKey, sseS3Key *SSES3Key) bool { - return customerKey != nil || sseKMSKey != nil || sseS3Key != nil -} - -// BucketDefaultEncryptionResult holds the result of bucket default encryption processing -type BucketDefaultEncryptionResult struct { - DataReader io.Reader - SSES3Key *SSES3Key - SSEKMSKey *SSEKMSKey -} - -func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - - // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html - - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) - - _, err := validateContentMd5(r.Header) - if err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) - return - } - - // Check conditional headers - if errCode := s3a.checkConditionalHeaders(r, bucket, object); errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - if r.Header.Get("Cache-Control") != "" { - if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInvalidDigest) - return - } - } - - if r.Header.Get("Expires") != "" { - if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedDate) - return - } - } - - dataReader, s3ErrCode := getRequestDataReader(s3a, r) - if s3ErrCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, s3ErrCode) - return - } - defer dataReader.Close() - - objectContentType := r.Header.Get("Content-Type") - if strings.HasSuffix(object, "/") && r.ContentLength <= 1024 { - if err := s3a.mkdir( - s3a.option.BucketsPath, bucket+strings.TrimSuffix(object, "/"), - func(entry *filer_pb.Entry) { - if objectContentType == "" { - objectContentType = s3_constants.FolderMimeType - } - if r.ContentLength > 0 { - entry.Content, _ = io.ReadAll(r.Body) - } - entry.Attributes.Mime = objectContentType - - // Set object owner for directory objects (same as regular objects) - s3a.setObjectOwnerFromRequest(r, entry) - }); err != nil { - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - } else { - // Get detailed versioning state for the bucket - versioningState, err := s3a.getVersioningState(bucket) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) - return - } - glog.Errorf("Error checking versioning status for bucket %s: %v", bucket, err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - versioningEnabled := (versioningState == s3_constants.VersioningEnabled) - versioningConfigured := (versioningState != "") - - glog.V(0).Infof("PutObjectHandler: bucket=%s, object=%s, versioningState='%s', versioningEnabled=%v, versioningConfigured=%v", bucket, object, versioningState, versioningEnabled, versioningConfigured) - - // Validate object lock headers before processing - if err := s3a.validateObjectLockHeaders(r, versioningEnabled); err != nil { - glog.V(2).Infof("PutObjectHandler: object lock header validation failed for bucket %s, object %s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err)) - return - } - - // For non-versioned buckets, check if existing object has object lock protections - // that would prevent overwrite (PUT operations overwrite existing objects in non-versioned buckets) - if !versioningConfigured { - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object) - if err := s3a.enforceObjectLockProtections(r, bucket, object, "", governanceBypassAllowed); err != nil { - glog.V(2).Infof("PutObjectHandler: object lock permissions check failed for %s/%s: %v", bucket, object, err) - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - } - - if versioningState == s3_constants.VersioningEnabled { - // Handle enabled versioning - create new versions with real version IDs - glog.V(0).Infof("PutObjectHandler: ENABLED versioning detected for %s/%s, calling putVersionedObject", bucket, object) - versionId, etag, errCode := s3a.putVersionedObject(r, bucket, object, dataReader, objectContentType) - if errCode != s3err.ErrNone { - glog.Errorf("PutObjectHandler: putVersionedObject failed with errCode=%v for %s/%s", errCode, bucket, object) - s3err.WriteErrorResponse(w, r, errCode) - return - } - - glog.V(0).Infof("PutObjectHandler: putVersionedObject returned versionId=%s, etag=%s for %s/%s", versionId, etag, bucket, object) - - // Set version ID in response header - if versionId != "" { - w.Header().Set("x-amz-version-id", versionId) - glog.V(0).Infof("PutObjectHandler: set x-amz-version-id header to %s for %s/%s", versionId, bucket, object) - } else { - glog.Errorf("PutObjectHandler: CRITICAL - versionId is EMPTY for versioned bucket %s, object %s", bucket, object) - } - - // Set ETag in response - setEtag(w, etag) - } else if versioningState == s3_constants.VersioningSuspended { - // Handle suspended versioning - overwrite with "null" version ID but preserve existing versions - etag, errCode := s3a.putSuspendedVersioningObject(r, bucket, object, dataReader, objectContentType) - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // Note: Suspended versioning should NOT return x-amz-version-id header per AWS S3 spec - // The object is stored with "null" version internally but no version header is returned - - // Set ETag in response - setEtag(w, etag) - } else { - // Handle regular PUT (never configured versioning) - uploadUrl := s3a.toFilerUrl(bucket, object) - if objectContentType == "" { - dataReader = mimeDetect(r, dataReader) - } - - etag, errCode, sseType := s3a.putToFiler(r, uploadUrl, dataReader, "", bucket, 1) - - if errCode != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, errCode) - return - } - - // No version ID header for never-configured versioning - setEtag(w, etag) - - // Set SSE response headers based on encryption type used - if sseType == s3_constants.SSETypeS3 { - w.Header().Set(s3_constants.AmzServerSideEncryption, s3_constants.SSEAlgorithmAES256) - } - } - } - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3UploadedObjectsCounter.WithLabelValues(bucket).Inc() - - writeSuccessResponseEmpty(w, r) -} - -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader, destination string, bucket string, partNumber int) (etag string, code s3err.ErrorCode, sseType string) { - // Calculate unique offset for each part to prevent IV reuse in multipart uploads - // This is critical for CTR mode encryption security - partOffset := calculatePartOffset(partNumber) - - // Handle all SSE encryption types in a unified manner to eliminate repetitive dataReader assignments - sseResult, sseErrorCode := s3a.handleAllSSEEncryption(r, dataReader, partOffset) - if sseErrorCode != s3err.ErrNone { - return "", sseErrorCode, "" - } - - // Extract results from unified SSE handling - dataReader = sseResult.DataReader - customerKey := sseResult.CustomerKey - sseIV := sseResult.SSEIV - sseKMSKey := sseResult.SSEKMSKey - sseKMSMetadata := sseResult.SSEKMSMetadata - sseS3Key := sseResult.SSES3Key - sseS3Metadata := sseResult.SSES3Metadata - - // Apply bucket default encryption if no explicit encryption was provided - // This implements AWS S3 behavior where bucket default encryption automatically applies - if !hasExplicitEncryption(customerKey, sseKMSKey, sseS3Key) { - glog.V(4).Infof("putToFiler: no explicit encryption detected, checking for bucket default encryption") - - // Apply bucket default encryption and get the result - encryptionResult, applyErr := s3a.applyBucketDefaultEncryption(bucket, r, dataReader) - if applyErr != nil { - glog.Errorf("Failed to apply bucket default encryption: %v", applyErr) - return "", s3err.ErrInternalError, "" - } - - // Update variables based on the result - dataReader = encryptionResult.DataReader - sseS3Key = encryptionResult.SSES3Key - sseKMSKey = encryptionResult.SSEKMSKey - - // If SSE-S3 was applied by bucket default, prepare metadata (if not already done) - if sseS3Key != nil && len(sseS3Metadata) == 0 { - var metaErr error - sseS3Metadata, metaErr = SerializeSSES3Metadata(sseS3Key) - if metaErr != nil { - glog.Errorf("Failed to serialize SSE-S3 metadata for bucket default encryption: %v", metaErr) - return "", s3err.ErrInternalError, "" - } - } - } else { - glog.V(4).Infof("putToFiler: explicit encryption already applied, skipping bucket default encryption") - } - - hash := md5.New() - var body = io.TeeReader(dataReader, hash) - - proxyReq, err := http.NewRequest(http.MethodPut, uploadUrl, body) - - if err != nil { - glog.Errorf("NewRequest %s: %v", uploadUrl, err) - return "", s3err.ErrInternalError, "" - } - - proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - if destination != "" { - proxyReq.Header.Set(s3_constants.SeaweedStorageDestinationHeader, destination) - } - - if s3a.option.FilerGroup != "" { - query := proxyReq.URL.Query() - query.Add("collection", s3a.getCollectionName(bucket)) - proxyReq.URL.RawQuery = query.Encode() - } - - for header, values := range r.Header { - for _, value := range values { - proxyReq.Header.Add(header, value) - } - } - - // Log version ID header for debugging - if versionIdHeader := proxyReq.Header.Get(s3_constants.ExtVersionIdKey); versionIdHeader != "" { - glog.V(0).Infof("putToFiler: version ID header set: %s=%s for %s", s3_constants.ExtVersionIdKey, versionIdHeader, uploadUrl) - } - - // Set object owner header for filer to extract - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - proxyReq.Header.Set(s3_constants.ExtAmzOwnerKey, amzAccountId) - glog.V(2).Infof("putToFiler: setting owner header %s for object %s", amzAccountId, uploadUrl) - } - - // Set SSE-C metadata headers for the filer if encryption was applied - if customerKey != nil && len(sseIV) > 0 { - proxyReq.Header.Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, "AES256") - proxyReq.Header.Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, customerKey.KeyMD5) - // Store IV in a custom header that the filer can use to store in entry metadata - proxyReq.Header.Set(s3_constants.SeaweedFSSSEIVHeader, base64.StdEncoding.EncodeToString(sseIV)) - } - - // Set SSE-KMS metadata headers for the filer if KMS encryption was applied - if sseKMSKey != nil { - // Use already-serialized SSE-KMS metadata from helper function - // Store serialized KMS metadata in a custom header that the filer can use - proxyReq.Header.Set(s3_constants.SeaweedFSSSEKMSKeyHeader, base64.StdEncoding.EncodeToString(sseKMSMetadata)) - - glog.V(3).Infof("putToFiler: storing SSE-KMS metadata for object %s with keyID %s", uploadUrl, sseKMSKey.KeyID) - } else { - glog.V(4).Infof("putToFiler: no SSE-KMS encryption detected") - } - - // Set SSE-S3 metadata headers for the filer if S3 encryption was applied - if sseS3Key != nil && len(sseS3Metadata) > 0 { - // Store serialized S3 metadata in a custom header that the filer can use - proxyReq.Header.Set(s3_constants.SeaweedFSSSES3Key, base64.StdEncoding.EncodeToString(sseS3Metadata)) - glog.V(3).Infof("putToFiler: storing SSE-S3 metadata for object %s with keyID %s", uploadUrl, sseS3Key.KeyID) - } - - // ensure that the Authorization header is overriding any previous - // Authorization header which might be already present in proxyReq - s3a.maybeAddFilerJwtAuthorization(proxyReq, true) - resp, postErr := s3a.client.Do(proxyReq) - - if postErr != nil { - glog.Errorf("post to filer: %v", postErr) - if strings.Contains(postErr.Error(), s3err.ErrMsgPayloadChecksumMismatch) { - return "", s3err.ErrInvalidDigest, "" - } - return "", s3err.ErrInternalError, "" - } - defer resp.Body.Close() - - etag = fmt.Sprintf("%x", hash.Sum(nil)) - - resp_body, ra_err := io.ReadAll(resp.Body) - if ra_err != nil { - glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) - return etag, s3err.ErrInternalError, "" - } - var ret weed_server.FilerPostResult - unmarshal_err := json.Unmarshal(resp_body, &ret) - if unmarshal_err != nil { - glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) - return "", s3err.ErrInternalError, "" - } - if ret.Error != "" { - glog.Errorf("upload to filer error: %v", ret.Error) - return "", filerErrorToS3Error(ret.Error), "" - } - - BucketTrafficReceived(ret.Size, r) - - // Return the SSE type determined by the unified handler - return etag, s3err.ErrNone, sseResult.SSEType -} - -func setEtag(w http.ResponseWriter, etag string) { - if etag != "" { - if strings.HasPrefix(etag, "\"") { - w.Header()["ETag"] = []string{etag} - } else { - w.Header()["ETag"] = []string{"\"" + etag + "\""} - } - } -} - -func filerErrorToS3Error(errString string) s3err.ErrorCode { - switch { - case errString == constants.ErrMsgBadDigest: - return s3err.ErrBadDigest - case strings.Contains(errString, "context canceled") || strings.Contains(errString, "code = Canceled"): - // Client canceled the request, return client error not server error - return s3err.ErrInvalidRequest - case strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory"): - return s3err.ErrExistingObjectIsDirectory - case strings.HasSuffix(errString, "is a file"): - return s3err.ErrExistingObjectIsFile - default: - return s3err.ErrInternalError - } -} - -func (s3a *S3ApiServer) maybeAddFilerJwtAuthorization(r *http.Request, isWrite bool) { - encodedJwt := s3a.maybeGetFilerJwtAuthorizationToken(isWrite) - - if encodedJwt == "" { - return - } - - r.Header.Set("Authorization", "BEARER "+string(encodedJwt)) -} - -func (s3a *S3ApiServer) maybeGetFilerJwtAuthorizationToken(isWrite bool) string { - var encodedJwt security.EncodedJwt - if isWrite { - encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.SigningKey, s3a.filerGuard.ExpiresAfterSec) - } else { - encodedJwt = security.GenJwtForFilerServer(s3a.filerGuard.ReadSigningKey, s3a.filerGuard.ReadExpiresAfterSec) - } - return string(encodedJwt) -} - -// setObjectOwnerFromRequest sets the object owner metadata based on the authenticated user -func (s3a *S3ApiServer) setObjectOwnerFromRequest(r *http.Request, entry *filer_pb.Entry) { - amzAccountId := r.Header.Get(s3_constants.AmzAccountId) - if amzAccountId != "" { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte(amzAccountId) - glog.V(2).Infof("setObjectOwnerFromRequest: set object owner to %s", amzAccountId) - } -} - -// putSuspendedVersioningObject handles PUT operations for buckets with suspended versioning. -// -// Key architectural approach: -// Instead of creating the file and then updating its metadata (which can cause race conditions and duplicate versions), -// we set all required metadata as HTTP headers BEFORE calling putToFiler. The filer automatically stores any header -// starting with "Seaweed-" in entry.Extended during file creation, ensuring atomic metadata persistence. -// -// This approach eliminates: -// - Race conditions from read-after-write consistency delays -// - Need for retry loops and exponential backoff -// - Duplicate entries from separate create/update operations -// -// For suspended versioning, objects are stored as regular files (version ID "null") in the bucket directory, -// while existing versions from when versioning was enabled remain preserved in the .versions subdirectory. -func (s3a *S3ApiServer) putSuspendedVersioningObject(r *http.Request, bucket, object string, dataReader io.Reader, objectContentType string) (etag string, errCode s3err.ErrorCode) { - // Normalize object path to ensure consistency with toFilerUrl behavior - normalizedObject := removeDuplicateSlashes(object) - - // Enable detailed logging for testobjbar - isTestObj := (normalizedObject == "testobjbar") - - glog.V(0).Infof("putSuspendedVersioningObject: START bucket=%s, object=%s, normalized=%s, isTestObj=%v", - bucket, object, normalizedObject, isTestObj) - - if isTestObj { - glog.V(0).Infof("=== TESTOBJBAR: putSuspendedVersioningObject START ===") - } - - bucketDir := s3a.option.BucketsPath + "/" + bucket - - // Check if there's an existing null version in .versions directory and delete it - // This ensures suspended versioning properly overwrites the null version as per S3 spec - // Note: We only delete null versions, NOT regular versions (those should be preserved) - versionsObjectPath := normalizedObject + ".versions" - versionsDir := bucketDir + "/" + versionsObjectPath - entries, _, err := s3a.list(versionsDir, "", "", false, 1000) - if err == nil { - // .versions directory exists - glog.V(0).Infof("putSuspendedVersioningObject: found %d entries in .versions for %s/%s", len(entries), bucket, object) - for _, entry := range entries { - if entry.Extended != nil { - if versionIdBytes, ok := entry.Extended[s3_constants.ExtVersionIdKey]; ok { - versionId := string(versionIdBytes) - glog.V(0).Infof("putSuspendedVersioningObject: found version '%s' in .versions", versionId) - if versionId == "null" { - // Only delete null version - preserve real versioned entries - glog.V(0).Infof("putSuspendedVersioningObject: deleting null version from .versions") - err := s3a.rm(versionsDir, entry.Name, true, false) - if err != nil { - glog.Warningf("putSuspendedVersioningObject: failed to delete null version: %v", err) - } else { - glog.V(0).Infof("putSuspendedVersioningObject: successfully deleted null version") - } - break - } - } - } - } - } else { - glog.V(0).Infof("putSuspendedVersioningObject: no .versions directory for %s/%s", bucket, object) - } - - uploadUrl := s3a.toFilerUrl(bucket, normalizedObject) - - hash := md5.New() - var body = io.TeeReader(dataReader, hash) - if objectContentType == "" { - body = mimeDetect(r, body) - } - - // Set all metadata headers BEFORE calling putToFiler - // This ensures the metadata is set during file creation, not after - // The filer automatically stores any header starting with "Seaweed-" in entry.Extended - - // Set version ID to "null" for suspended versioning - r.Header.Set(s3_constants.ExtVersionIdKey, "null") - if isTestObj { - glog.V(0).Infof("=== TESTOBJBAR: set version header before putToFiler, r.Header[%s]=%s ===", - s3_constants.ExtVersionIdKey, r.Header.Get(s3_constants.ExtVersionIdKey)) - } - - // Extract and set object lock metadata as headers - // This handles retention mode, retention date, and legal hold - explicitMode := r.Header.Get(s3_constants.AmzObjectLockMode) - explicitRetainUntilDate := r.Header.Get(s3_constants.AmzObjectLockRetainUntilDate) - - if explicitMode != "" { - r.Header.Set(s3_constants.ExtObjectLockModeKey, explicitMode) - glog.V(2).Infof("putSuspendedVersioningObject: setting object lock mode header: %s", explicitMode) - } - - if explicitRetainUntilDate != "" { - // Parse and convert to Unix timestamp - parsedTime, err := time.Parse(time.RFC3339, explicitRetainUntilDate) - if err != nil { - glog.Errorf("putSuspendedVersioningObject: failed to parse retention until date: %v", err) - return "", s3err.ErrInvalidRequest - } - r.Header.Set(s3_constants.ExtRetentionUntilDateKey, strconv.FormatInt(parsedTime.Unix(), 10)) - glog.V(2).Infof("putSuspendedVersioningObject: setting retention until date header (timestamp: %d)", parsedTime.Unix()) - } - - if legalHold := r.Header.Get(s3_constants.AmzObjectLockLegalHold); legalHold != "" { - if legalHold == s3_constants.LegalHoldOn || legalHold == s3_constants.LegalHoldOff { - r.Header.Set(s3_constants.ExtLegalHoldKey, legalHold) - glog.V(2).Infof("putSuspendedVersioningObject: setting legal hold header: %s", legalHold) - } else { - glog.Errorf("putSuspendedVersioningObject: invalid legal hold value: %s", legalHold) - return "", s3err.ErrInvalidRequest - } - } - - // Apply bucket default retention if no explicit retention was provided - if explicitMode == "" && explicitRetainUntilDate == "" { - // Create a temporary entry to apply defaults - tempEntry := &filer_pb.Entry{Extended: make(map[string][]byte)} - if err := s3a.applyBucketDefaultRetention(bucket, tempEntry); err == nil { - // Copy default retention headers from temp entry - if modeBytes, ok := tempEntry.Extended[s3_constants.ExtObjectLockModeKey]; ok { - r.Header.Set(s3_constants.ExtObjectLockModeKey, string(modeBytes)) - glog.V(2).Infof("putSuspendedVersioningObject: applied bucket default retention mode: %s", string(modeBytes)) - } - if dateBytes, ok := tempEntry.Extended[s3_constants.ExtRetentionUntilDateKey]; ok { - r.Header.Set(s3_constants.ExtRetentionUntilDateKey, string(dateBytes)) - glog.V(2).Infof("putSuspendedVersioningObject: applied bucket default retention date") - } - } - } - - // Upload the file using putToFiler - this will create the file with version metadata - if isTestObj { - glog.V(0).Infof("=== TESTOBJBAR: calling putToFiler ===") - } - etag, errCode, _ = s3a.putToFiler(r, uploadUrl, body, "", bucket, 1) - if errCode != s3err.ErrNone { - glog.Errorf("putSuspendedVersioningObject: failed to upload object: %v", errCode) - return "", errCode - } - if isTestObj { - glog.V(0).Infof("=== TESTOBJBAR: putToFiler completed, etag=%s ===", etag) - } - - // Verify the metadata was set correctly during file creation - if isTestObj { - // Read back the entry to verify - maxRetries := 3 - for attempt := 1; attempt <= maxRetries; attempt++ { - verifyEntry, verifyErr := s3a.getEntry(bucketDir, normalizedObject) - if verifyErr == nil { - glog.V(0).Infof("=== TESTOBJBAR: verify attempt %d, entry.Extended=%v ===", attempt, verifyEntry.Extended) - if verifyEntry.Extended != nil { - if versionIdBytes, ok := verifyEntry.Extended[s3_constants.ExtVersionIdKey]; ok { - glog.V(0).Infof("=== TESTOBJBAR: verification SUCCESSFUL, version=%s ===", string(versionIdBytes)) - } else { - glog.V(0).Infof("=== TESTOBJBAR: verification FAILED, ExtVersionIdKey not found ===") - } - } else { - glog.V(0).Infof("=== TESTOBJBAR: verification FAILED, Extended is nil ===") - } - break - } else { - glog.V(0).Infof("=== TESTOBJBAR: getEntry failed on attempt %d: %v ===", attempt, verifyErr) - } - if attempt < maxRetries { - time.Sleep(time.Millisecond * 10) - } - } - } - - // Update all existing versions/delete markers to set IsLatest=false since "null" is now latest - err = s3a.updateIsLatestFlagsForSuspendedVersioning(bucket, normalizedObject) - if err != nil { - glog.Warningf("putSuspendedVersioningObject: failed to update IsLatest flags: %v", err) - // Don't fail the request, but log the warning - } - - glog.V(2).Infof("putSuspendedVersioningObject: successfully created null version for %s/%s", bucket, object) - if isTestObj { - glog.V(0).Infof("=== TESTOBJBAR: putSuspendedVersioningObject COMPLETED ===") - } - return etag, s3err.ErrNone -} - -// updateIsLatestFlagsForSuspendedVersioning sets IsLatest=false on all existing versions/delete markers -// when a new "null" version becomes the latest during suspended versioning -func (s3a *S3ApiServer) updateIsLatestFlagsForSuspendedVersioning(bucket, object string) error { - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsObjectPath := object + ".versions" - versionsDir := bucketDir + "/" + versionsObjectPath - - glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: updating flags for %s%s", bucket, object) - - // Check if .versions directory exists - _, err := s3a.getEntry(bucketDir, versionsObjectPath) - if err != nil { - // No .versions directory exists, nothing to update - glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: no .versions directory for %s%s", bucket, object) - return nil - } - - // List all entries in .versions directory - entries, _, err := s3a.list(versionsDir, "", "", false, 1000) - if err != nil { - return fmt.Errorf("failed to list versions directory: %v", err) - } - - glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: found %d entries to update", len(entries)) - - // Update each version/delete marker to set IsLatest=false - for _, entry := range entries { - if entry.Extended == nil { - continue - } - - // Check if this entry has a version ID (it should be a version or delete marker) - versionIdBytes, hasVersionId := entry.Extended[s3_constants.ExtVersionIdKey] - if !hasVersionId { - continue - } - - versionId := string(versionIdBytes) - glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: setting IsLatest=false for version %s", versionId) - - // Update the entry to set IsLatest=false (we don't explicitly store this flag, - // it's determined by comparison with latest version metadata) - // We need to clear the latest version metadata from the .versions directory - // so that our getObjectVersionList function will correctly show IsLatest=false - } - - // Clear the latest version metadata from .versions directory since "null" is now latest - versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath) - if err == nil && versionsEntry.Extended != nil { - // Remove latest version metadata so all versions show IsLatest=false - delete(versionsEntry.Extended, s3_constants.ExtLatestVersionIdKey) - delete(versionsEntry.Extended, s3_constants.ExtLatestVersionFileNameKey) - - // Update the .versions directory entry - err = s3a.mkFile(bucketDir, versionsObjectPath, versionsEntry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = versionsEntry.Extended - updatedEntry.Attributes = versionsEntry.Attributes - updatedEntry.Chunks = versionsEntry.Chunks - }) - if err != nil { - return fmt.Errorf("failed to update .versions directory metadata: %v", err) - } - - glog.V(2).Infof("updateIsLatestFlagsForSuspendedVersioning: cleared latest version metadata for %s%s", bucket, object) - } - - return nil -} - -func (s3a *S3ApiServer) putVersionedObject(r *http.Request, bucket, object string, dataReader io.Reader, objectContentType string) (versionId string, etag string, errCode s3err.ErrorCode) { - // Generate version ID - versionId = generateVersionId() - - // Normalize object path to ensure consistency with toFilerUrl behavior - normalizedObject := removeDuplicateSlashes(object) - - glog.V(2).Infof("putVersionedObject: creating version %s for %s/%s (normalized: %s)", versionId, bucket, object, normalizedObject) - - // Create the version file name - versionFileName := s3a.getVersionFileName(versionId) - - // Upload directly to the versions directory - // We need to construct the object path relative to the bucket - versionObjectPath := normalizedObject + ".versions/" + versionFileName - versionUploadUrl := s3a.toFilerUrl(bucket, versionObjectPath) - - // Ensure the .versions directory exists before uploading - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsDir := normalizedObject + ".versions" - err := s3a.mkdir(bucketDir, versionsDir, func(entry *filer_pb.Entry) { - entry.Attributes.Mime = s3_constants.FolderMimeType - }) - if err != nil { - glog.Errorf("putVersionedObject: failed to create .versions directory: %v", err) - return "", "", s3err.ErrInternalError - } - - hash := md5.New() - var body = io.TeeReader(dataReader, hash) - if objectContentType == "" { - body = mimeDetect(r, body) - } - - glog.V(2).Infof("putVersionedObject: uploading %s/%s version %s to %s", bucket, object, versionId, versionUploadUrl) - - etag, errCode, _ = s3a.putToFiler(r, versionUploadUrl, body, "", bucket, 1) - if errCode != s3err.ErrNone { - glog.Errorf("putVersionedObject: failed to upload version: %v", errCode) - return "", "", errCode - } - - // Get the uploaded entry to add versioning metadata - // Use retry logic to handle filer consistency delays - var versionEntry *filer_pb.Entry - maxRetries := 8 - for attempt := 1; attempt <= maxRetries; attempt++ { - versionEntry, err = s3a.getEntry(bucketDir, versionObjectPath) - if err == nil { - break - } - - if attempt < maxRetries { - // Exponential backoff: 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms - delay := time.Millisecond * time.Duration(10*(1<<(attempt-1))) - time.Sleep(delay) - } - } - - if err != nil { - glog.Errorf("putVersionedObject: failed to get version entry after %d attempts: %v", maxRetries, err) - return "", "", s3err.ErrInternalError - } - - // Add versioning metadata to this version - if versionEntry.Extended == nil { - versionEntry.Extended = make(map[string][]byte) - } - versionEntry.Extended[s3_constants.ExtVersionIdKey] = []byte(versionId) - - // Store ETag with quotes for S3 compatibility - if !strings.HasPrefix(etag, "\"") { - etag = "\"" + etag + "\"" - } - versionEntry.Extended[s3_constants.ExtETagKey] = []byte(etag) - - // Set object owner for versioned objects - s3a.setObjectOwnerFromRequest(r, versionEntry) - - // Extract and store object lock metadata from request headers - if err := s3a.extractObjectLockMetadataFromRequest(r, versionEntry); err != nil { - glog.Errorf("putVersionedObject: failed to extract object lock metadata: %v", err) - return "", "", s3err.ErrInvalidRequest - } - - // Update the version entry with metadata - err = s3a.mkFile(bucketDir, versionObjectPath, versionEntry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = versionEntry.Extended - updatedEntry.Attributes = versionEntry.Attributes - updatedEntry.Chunks = versionEntry.Chunks - }) - if err != nil { - glog.Errorf("putVersionedObject: failed to update version metadata: %v", err) - return "", "", s3err.ErrInternalError - } - - // Update the .versions directory metadata to indicate this is the latest version - err = s3a.updateLatestVersionInDirectory(bucket, normalizedObject, versionId, versionFileName) - if err != nil { - glog.Errorf("putVersionedObject: failed to update latest version in directory: %v", err) - return "", "", s3err.ErrInternalError - } - glog.V(2).Infof("putVersionedObject: successfully created version %s for %s/%s (normalized: %s)", versionId, bucket, object, normalizedObject) - return versionId, etag, s3err.ErrNone -} - -// updateLatestVersionInDirectory updates the .versions directory metadata to indicate the latest version -func (s3a *S3ApiServer) updateLatestVersionInDirectory(bucket, object, versionId, versionFileName string) error { - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsObjectPath := object + ".versions" - - // Get the current .versions directory entry with retry logic for filer consistency - var versionsEntry *filer_pb.Entry - var err error - maxRetries := 8 - for attempt := 1; attempt <= maxRetries; attempt++ { - versionsEntry, err = s3a.getEntry(bucketDir, versionsObjectPath) - if err == nil { - break - } - - if attempt < maxRetries { - // Exponential backoff with higher base: 100ms, 200ms, 400ms, 800ms, 1600ms, 3200ms, 6400ms - delay := time.Millisecond * time.Duration(100*(1<<(attempt-1))) - time.Sleep(delay) - } - } - - if err != nil { - glog.Errorf("updateLatestVersionInDirectory: failed to get .versions directory for %s/%s after %d attempts: %v", bucket, object, maxRetries, err) - return fmt.Errorf("failed to get .versions directory after %d attempts: %w", maxRetries, err) - } - - // Add or update the latest version metadata - if versionsEntry.Extended == nil { - versionsEntry.Extended = make(map[string][]byte) - } - versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey] = []byte(versionId) - versionsEntry.Extended[s3_constants.ExtLatestVersionFileNameKey] = []byte(versionFileName) - - // Update the .versions directory entry with metadata - err = s3a.mkFile(bucketDir, versionsObjectPath, versionsEntry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = versionsEntry.Extended - updatedEntry.Attributes = versionsEntry.Attributes - updatedEntry.Chunks = versionsEntry.Chunks - }) - if err != nil { - glog.Errorf("updateLatestVersionInDirectory: failed to update .versions directory metadata: %v", err) - return fmt.Errorf("failed to update .versions directory metadata: %w", err) - } - - return nil -} - -// extractObjectLockMetadataFromRequest extracts object lock headers from PUT requests -// and applies bucket default retention if no explicit retention is provided -func (s3a *S3ApiServer) extractObjectLockMetadataFromRequest(r *http.Request, entry *filer_pb.Entry) error { - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - // Extract explicit object lock mode (GOVERNANCE or COMPLIANCE) - explicitMode := r.Header.Get(s3_constants.AmzObjectLockMode) - if explicitMode != "" { - entry.Extended[s3_constants.ExtObjectLockModeKey] = []byte(explicitMode) - glog.V(2).Infof("extractObjectLockMetadataFromRequest: storing explicit object lock mode: %s", explicitMode) - } - - // Extract explicit retention until date - explicitRetainUntilDate := r.Header.Get(s3_constants.AmzObjectLockRetainUntilDate) - if explicitRetainUntilDate != "" { - // Parse the ISO8601 date and convert to Unix timestamp for storage - parsedTime, err := time.Parse(time.RFC3339, explicitRetainUntilDate) - if err != nil { - glog.Errorf("extractObjectLockMetadataFromRequest: failed to parse retention until date, expected format: %s, error: %v", time.RFC3339, err) - return ErrInvalidRetentionDateFormat - } - entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(parsedTime.Unix(), 10)) - glog.V(2).Infof("extractObjectLockMetadataFromRequest: storing explicit retention until date (timestamp: %d)", parsedTime.Unix()) - } - - // Extract legal hold status - if legalHold := r.Header.Get(s3_constants.AmzObjectLockLegalHold); legalHold != "" { - // Store S3 standard "ON"/"OFF" values directly - if legalHold == s3_constants.LegalHoldOn || legalHold == s3_constants.LegalHoldOff { - entry.Extended[s3_constants.ExtLegalHoldKey] = []byte(legalHold) - glog.V(2).Infof("extractObjectLockMetadataFromRequest: storing legal hold: %s", legalHold) - } else { - glog.Errorf("extractObjectLockMetadataFromRequest: unexpected legal hold value provided, expected 'ON' or 'OFF'") - return ErrInvalidLegalHoldStatus - } - } - - // Apply bucket default retention if no explicit retention was provided - // This implements AWS S3 behavior where bucket default retention automatically applies to new objects - if explicitMode == "" && explicitRetainUntilDate == "" { - bucket, _ := s3_constants.GetBucketAndObject(r) - if err := s3a.applyBucketDefaultRetention(bucket, entry); err != nil { - glog.V(2).Infof("extractObjectLockMetadataFromRequest: skipping bucket default retention for %s: %v", bucket, err) - // Don't fail the upload if default retention can't be applied - this matches AWS behavior - } - } - - return nil -} - -// applyBucketDefaultEncryption applies bucket default encryption settings to a new object -// This implements AWS S3 behavior where bucket default encryption automatically applies to new objects -// when no explicit encryption headers are provided in the upload request. -// Returns the modified dataReader and encryption keys instead of using pointer parameters for better code clarity. -func (s3a *S3ApiServer) applyBucketDefaultEncryption(bucket string, r *http.Request, dataReader io.Reader) (*BucketDefaultEncryptionResult, error) { - // Check if bucket has default encryption configured - encryptionConfig, err := s3a.GetBucketEncryptionConfig(bucket) - if err != nil || encryptionConfig == nil { - // No default encryption configured, return original reader - return &BucketDefaultEncryptionResult{DataReader: dataReader}, nil - } - - if encryptionConfig.SseAlgorithm == "" { - // No encryption algorithm specified - return &BucketDefaultEncryptionResult{DataReader: dataReader}, nil - } - - glog.V(3).Infof("applyBucketDefaultEncryption: applying default encryption %s for bucket %s", encryptionConfig.SseAlgorithm, bucket) - - switch encryptionConfig.SseAlgorithm { - case EncryptionTypeAES256: - // Apply SSE-S3 (AES256) encryption - return s3a.applySSES3DefaultEncryption(dataReader) - - case EncryptionTypeKMS: - // Apply SSE-KMS encryption - return s3a.applySSEKMSDefaultEncryption(bucket, r, dataReader, encryptionConfig) - - default: - return nil, fmt.Errorf("unsupported default encryption algorithm: %s", encryptionConfig.SseAlgorithm) - } -} - -// applySSES3DefaultEncryption applies SSE-S3 encryption as bucket default -func (s3a *S3ApiServer) applySSES3DefaultEncryption(dataReader io.Reader) (*BucketDefaultEncryptionResult, error) { - // Generate SSE-S3 key - keyManager := GetSSES3KeyManager() - key, err := keyManager.GetOrCreateKey("") - if err != nil { - return nil, fmt.Errorf("failed to generate SSE-S3 key for default encryption: %v", err) - } - - // Create encrypted reader - encryptedReader, iv, encErr := CreateSSES3EncryptedReader(dataReader, key) - if encErr != nil { - return nil, fmt.Errorf("failed to create SSE-S3 encrypted reader for default encryption: %v", encErr) - } - - // Store IV on the key object for later decryption - key.IV = iv - - // Store key in manager for later retrieval - keyManager.StoreKey(key) - glog.V(3).Infof("applySSES3DefaultEncryption: applied SSE-S3 default encryption with key ID: %s", key.KeyID) - - return &BucketDefaultEncryptionResult{ - DataReader: encryptedReader, - SSES3Key: key, - }, nil -} - -// applySSEKMSDefaultEncryption applies SSE-KMS encryption as bucket default -func (s3a *S3ApiServer) applySSEKMSDefaultEncryption(bucket string, r *http.Request, dataReader io.Reader, encryptionConfig *s3_pb.EncryptionConfiguration) (*BucketDefaultEncryptionResult, error) { - // Use the KMS key ID from bucket configuration, or default if not specified - keyID := encryptionConfig.KmsKeyId - if keyID == "" { - keyID = "alias/aws/s3" // AWS default KMS key for S3 - } - - // Check if bucket key is enabled in configuration - bucketKeyEnabled := encryptionConfig.BucketKeyEnabled - - // Build encryption context for KMS - bucket, object := s3_constants.GetBucketAndObject(r) - encryptionContext := BuildEncryptionContext(bucket, object, bucketKeyEnabled) - - // Create SSE-KMS encrypted reader - encryptedReader, sseKey, encErr := CreateSSEKMSEncryptedReaderWithBucketKey(dataReader, keyID, encryptionContext, bucketKeyEnabled) - if encErr != nil { - return nil, fmt.Errorf("failed to create SSE-KMS encrypted reader for default encryption: %v", encErr) - } - - glog.V(3).Infof("applySSEKMSDefaultEncryption: applied SSE-KMS default encryption with key ID: %s", keyID) - - return &BucketDefaultEncryptionResult{ - DataReader: encryptedReader, - SSEKMSKey: sseKey, - }, nil -} - -// applyBucketDefaultRetention applies bucket default retention settings to a new object -// This implements AWS S3 behavior where bucket default retention automatically applies to new objects -// when no explicit retention headers are provided in the upload request -func (s3a *S3ApiServer) applyBucketDefaultRetention(bucket string, entry *filer_pb.Entry) error { - // Safety check - if bucket config cache is not available, skip default retention - if s3a.bucketConfigCache == nil { - return nil - } - - // Get bucket configuration (getBucketConfig handles caching internally) - bucketConfig, errCode := s3a.getBucketConfig(bucket) - if errCode != s3err.ErrNone { - return fmt.Errorf("failed to get bucket config: %v", errCode) - } - - // Check if bucket has cached Object Lock configuration - if bucketConfig.ObjectLockConfig == nil { - return nil // No Object Lock configuration - } - - objectLockConfig := bucketConfig.ObjectLockConfig - - // Check if there's a default retention rule - if objectLockConfig.Rule == nil || objectLockConfig.Rule.DefaultRetention == nil { - return nil // No default retention configured - } - - defaultRetention := objectLockConfig.Rule.DefaultRetention - - // Validate default retention has required fields - if defaultRetention.Mode == "" { - return fmt.Errorf("default retention missing mode") - } - - if !defaultRetention.DaysSet && !defaultRetention.YearsSet { - return fmt.Errorf("default retention missing period") - } - - // Calculate retention until date based on default retention period - var retainUntilDate time.Time - now := time.Now() - - if defaultRetention.DaysSet && defaultRetention.Days > 0 { - retainUntilDate = now.AddDate(0, 0, defaultRetention.Days) - } else if defaultRetention.YearsSet && defaultRetention.Years > 0 { - retainUntilDate = now.AddDate(defaultRetention.Years, 0, 0) - } - - // Apply default retention to the object - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - entry.Extended[s3_constants.ExtObjectLockModeKey] = []byte(defaultRetention.Mode) - entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(retainUntilDate.Unix(), 10)) - - glog.V(2).Infof("applyBucketDefaultRetention: applied default retention %s until %s for bucket %s", - defaultRetention.Mode, retainUntilDate.Format(time.RFC3339), bucket) - - return nil -} - -// validateObjectLockHeaders validates object lock headers in PUT requests -func (s3a *S3ApiServer) validateObjectLockHeaders(r *http.Request, versioningEnabled bool) error { - // Extract object lock headers from request - mode := r.Header.Get(s3_constants.AmzObjectLockMode) - retainUntilDateStr := r.Header.Get(s3_constants.AmzObjectLockRetainUntilDate) - legalHold := r.Header.Get(s3_constants.AmzObjectLockLegalHold) - - // Check if any object lock headers are present - hasObjectLockHeaders := mode != "" || retainUntilDateStr != "" || legalHold != "" - - // Object lock headers can only be used on versioned buckets - if hasObjectLockHeaders && !versioningEnabled { - return ErrObjectLockVersioningRequired - } - - // Validate object lock mode if present - if mode != "" { - if mode != s3_constants.RetentionModeGovernance && mode != s3_constants.RetentionModeCompliance { - return ErrInvalidObjectLockMode - } - } - - // Validate retention date if present - if retainUntilDateStr != "" { - retainUntilDate, err := time.Parse(time.RFC3339, retainUntilDateStr) - if err != nil { - return ErrInvalidRetentionDateFormat - } - - // Retention date must be in the future - if retainUntilDate.Before(time.Now()) { - return ErrRetentionDateMustBeFuture - } - } - - // If mode is specified, retention date must also be specified - if mode != "" && retainUntilDateStr == "" { - return ErrObjectLockModeRequiresDate - } - - // If retention date is specified, mode must also be specified - if retainUntilDateStr != "" && mode == "" { - return ErrRetentionDateRequiresMode - } - - // Validate legal hold if present - if legalHold != "" { - if legalHold != s3_constants.LegalHoldOn && legalHold != s3_constants.LegalHoldOff { - return ErrInvalidLegalHoldStatus - } - } - - // Check for governance bypass header - only valid for versioned buckets - bypassGovernance := r.Header.Get("x-amz-bypass-governance-retention") == "true" - - // Governance bypass headers are only valid for versioned buckets (like object lock headers) - if bypassGovernance && !versioningEnabled { - return ErrGovernanceBypassVersioningRequired - } - - return nil -} - -// mapValidationErrorToS3Error maps object lock validation errors to appropriate S3 error codes -func mapValidationErrorToS3Error(err error) s3err.ErrorCode { - // Check for sentinel errors first - switch { - case errors.Is(err, ErrObjectLockVersioningRequired): - // For object lock operations on non-versioned buckets, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrInvalidObjectLockMode): - // For invalid object lock mode, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrInvalidLegalHoldStatus): - // For invalid legal hold status in XML body, return MalformedXML - // AWS S3 treats invalid status values in XML as malformed content - return s3err.ErrMalformedXML - case errors.Is(err, ErrInvalidRetentionDateFormat): - // For malformed retention date format, return MalformedDate - // This matches the test expectations - return s3err.ErrMalformedDate - case errors.Is(err, ErrRetentionDateMustBeFuture): - // For retention dates in the past, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrObjectLockModeRequiresDate): - // For mode without retention date, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrRetentionDateRequiresMode): - // For retention date without mode, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrGovernanceBypassVersioningRequired): - // For governance bypass on non-versioned bucket, return InvalidRequest - // This matches the test expectations - return s3err.ErrInvalidRequest - case errors.Is(err, ErrMalformedXML): - // For malformed XML in request body, return MalformedXML - // This matches the test expectations for invalid retention mode and legal hold status - return s3err.ErrMalformedXML - case errors.Is(err, ErrInvalidRetentionPeriod): - // For invalid retention period (e.g., Days <= 0), return InvalidRetentionPeriod - // This matches the test expectations - return s3err.ErrInvalidRetentionPeriod - case errors.Is(err, ErrComplianceModeActive): - // For compliance mode retention violations, return AccessDenied - // This matches the test expectations - return s3err.ErrAccessDenied - case errors.Is(err, ErrGovernanceModeActive): - // For governance mode retention violations, return AccessDenied - // This matches the test expectations - return s3err.ErrAccessDenied - case errors.Is(err, ErrObjectUnderLegalHold): - // For legal hold violations, return AccessDenied - // This matches the test expectations - return s3err.ErrAccessDenied - case errors.Is(err, ErrGovernanceBypassNotPermitted): - // For governance bypass permission violations, return AccessDenied - // This matches the test expectations - return s3err.ErrAccessDenied - // Validation error constants - case errors.Is(err, ErrObjectLockConfigurationMissingEnabled): - return s3err.ErrMalformedXML - case errors.Is(err, ErrInvalidObjectLockEnabledValue): - return s3err.ErrMalformedXML - case errors.Is(err, ErrRuleMissingDefaultRetention): - return s3err.ErrMalformedXML - case errors.Is(err, ErrDefaultRetentionMissingMode): - return s3err.ErrMalformedXML - case errors.Is(err, ErrInvalidDefaultRetentionMode): - return s3err.ErrMalformedXML - case errors.Is(err, ErrDefaultRetentionMissingPeriod): - return s3err.ErrMalformedXML - case errors.Is(err, ErrDefaultRetentionBothDaysAndYears): - return s3err.ErrMalformedXML - case errors.Is(err, ErrDefaultRetentionDaysOutOfRange): - return s3err.ErrInvalidRetentionPeriod - case errors.Is(err, ErrDefaultRetentionYearsOutOfRange): - return s3err.ErrInvalidRetentionPeriod - } - - // Check for error constants from the updated validation functions - switch { - case errors.Is(err, ErrRetentionMissingMode): - return s3err.ErrInvalidRequest - case errors.Is(err, ErrRetentionMissingRetainUntilDate): - return s3err.ErrInvalidRequest - case errors.Is(err, ErrInvalidRetentionModeValue): - return s3err.ErrMalformedXML - } - - return s3err.ErrInvalidRequest -} - -// EntryGetter interface for dependency injection in tests -// Simplified to only mock the data access dependency -type EntryGetter interface { - getEntry(parentDirectoryPath, entryName string) (*filer_pb.Entry, error) -} - -// conditionalHeaders holds parsed conditional header values -type conditionalHeaders struct { - ifMatch string - ifNoneMatch string - ifModifiedSince time.Time - ifUnmodifiedSince time.Time - isSet bool // true if any conditional headers are present -} - -// parseConditionalHeaders extracts and validates conditional headers from the request -func parseConditionalHeaders(r *http.Request) (conditionalHeaders, s3err.ErrorCode) { - headers := conditionalHeaders{ - ifMatch: r.Header.Get(s3_constants.IfMatch), - ifNoneMatch: r.Header.Get(s3_constants.IfNoneMatch), - } - - ifModifiedSinceStr := r.Header.Get(s3_constants.IfModifiedSince) - ifUnmodifiedSinceStr := r.Header.Get(s3_constants.IfUnmodifiedSince) - - // Check if any conditional headers are present - headers.isSet = headers.ifMatch != "" || headers.ifNoneMatch != "" || - ifModifiedSinceStr != "" || ifUnmodifiedSinceStr != "" - - if !headers.isSet { - return headers, s3err.ErrNone - } - - // Parse date headers with validation - var err error - if ifModifiedSinceStr != "" { - headers.ifModifiedSince, err = time.Parse(time.RFC1123, ifModifiedSinceStr) - if err != nil { - glog.V(3).Infof("parseConditionalHeaders: Invalid If-Modified-Since format: %v", err) - return headers, s3err.ErrInvalidRequest - } - } - - if ifUnmodifiedSinceStr != "" { - headers.ifUnmodifiedSince, err = time.Parse(time.RFC1123, ifUnmodifiedSinceStr) - if err != nil { - glog.V(3).Infof("parseConditionalHeaders: Invalid If-Unmodified-Since format: %v", err) - return headers, s3err.ErrInvalidRequest - } - } - - return headers, s3err.ErrNone -} - -// S3ApiServer implements EntryGetter interface -func (s3a *S3ApiServer) getObjectETag(entry *filer_pb.Entry) string { - // Try to get ETag from Extended attributes first - if etagBytes, hasETag := entry.Extended[s3_constants.ExtETagKey]; hasETag { - return string(etagBytes) - } - // Fallback: calculate ETag from chunks - return s3a.calculateETagFromChunks(entry.Chunks) -} - -func (s3a *S3ApiServer) etagMatches(headerValue, objectETag string) bool { - // Clean the object ETag - objectETag = strings.Trim(objectETag, `"`) - - // Split header value by commas to handle multiple ETags - etags := strings.Split(headerValue, ",") - for _, etag := range etags { - etag = strings.TrimSpace(etag) - etag = strings.Trim(etag, `"`) - if etag == objectETag { - return true - } - } - return false -} - -// checkConditionalHeadersWithGetter is a testable method that accepts a simple EntryGetter -// Uses the production getObjectETag and etagMatches methods to ensure testing of real logic -func (s3a *S3ApiServer) checkConditionalHeadersWithGetter(getter EntryGetter, r *http.Request, bucket, object string) s3err.ErrorCode { - headers, errCode := parseConditionalHeaders(r) - if errCode != s3err.ErrNone { - glog.V(3).Infof("checkConditionalHeaders: Invalid date format") - return errCode - } - if !headers.isSet { - return s3err.ErrNone - } - - // Get object entry for conditional checks. - bucketDir := "/buckets/" + bucket - entry, entryErr := getter.getEntry(bucketDir, object) - objectExists := entryErr == nil - - // For PUT requests, all specified conditions must be met. - // The evaluation order follows AWS S3 behavior for consistency. - - // 1. Check If-Match - if headers.ifMatch != "" { - if !objectExists { - glog.V(3).Infof("checkConditionalHeaders: If-Match failed - object %s/%s does not exist", bucket, object) - return s3err.ErrPreconditionFailed - } - // If `ifMatch` is "*", the condition is met if the object exists. - // Otherwise, we need to check the ETag. - if headers.ifMatch != "*" { - // Use production getObjectETag method - objectETag := s3a.getObjectETag(entry) - // Use production etagMatches method - if !s3a.etagMatches(headers.ifMatch, objectETag) { - glog.V(3).Infof("checkConditionalHeaders: If-Match failed for object %s/%s - expected ETag %s, got %s", bucket, object, headers.ifMatch, objectETag) - return s3err.ErrPreconditionFailed - } - } - glog.V(3).Infof("checkConditionalHeaders: If-Match passed for object %s/%s", bucket, object) - } - - // 2. Check If-Unmodified-Since - if !headers.ifUnmodifiedSince.IsZero() { - if objectExists { - objectModTime := time.Unix(entry.Attributes.Mtime, 0) - if objectModTime.After(headers.ifUnmodifiedSince) { - glog.V(3).Infof("checkConditionalHeaders: If-Unmodified-Since failed - object modified after %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) - return s3err.ErrPreconditionFailed - } - glog.V(3).Infof("checkConditionalHeaders: If-Unmodified-Since passed - object not modified since %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) - } - } - - // 3. Check If-None-Match - if headers.ifNoneMatch != "" { - if objectExists { - if headers.ifNoneMatch == "*" { - glog.V(3).Infof("checkConditionalHeaders: If-None-Match=* failed - object %s/%s exists", bucket, object) - return s3err.ErrPreconditionFailed - } - // Use production getObjectETag method - objectETag := s3a.getObjectETag(entry) - // Use production etagMatches method - if s3a.etagMatches(headers.ifNoneMatch, objectETag) { - glog.V(3).Infof("checkConditionalHeaders: If-None-Match failed - ETag matches %s", objectETag) - return s3err.ErrPreconditionFailed - } - glog.V(3).Infof("checkConditionalHeaders: If-None-Match passed - ETag %s doesn't match %s", objectETag, headers.ifNoneMatch) - } else { - glog.V(3).Infof("checkConditionalHeaders: If-None-Match passed - object %s/%s does not exist", bucket, object) - } - } - - // 4. Check If-Modified-Since - if !headers.ifModifiedSince.IsZero() { - if objectExists { - objectModTime := time.Unix(entry.Attributes.Mtime, 0) - if !objectModTime.After(headers.ifModifiedSince) { - glog.V(3).Infof("checkConditionalHeaders: If-Modified-Since failed - object not modified since %s", r.Header.Get(s3_constants.IfModifiedSince)) - return s3err.ErrPreconditionFailed - } - glog.V(3).Infof("checkConditionalHeaders: If-Modified-Since passed - object modified after %s", r.Header.Get(s3_constants.IfModifiedSince)) - } - } - - return s3err.ErrNone -} - -// checkConditionalHeaders is the production method that uses the S3ApiServer as EntryGetter -func (s3a *S3ApiServer) checkConditionalHeaders(r *http.Request, bucket, object string) s3err.ErrorCode { - return s3a.checkConditionalHeadersWithGetter(s3a, r, bucket, object) -} - -// checkConditionalHeadersForReadsWithGetter is a testable method for read operations -// Uses the production getObjectETag and etagMatches methods to ensure testing of real logic -func (s3a *S3ApiServer) checkConditionalHeadersForReadsWithGetter(getter EntryGetter, r *http.Request, bucket, object string) ConditionalHeaderResult { - headers, errCode := parseConditionalHeaders(r) - if errCode != s3err.ErrNone { - glog.V(3).Infof("checkConditionalHeadersForReads: Invalid date format") - return ConditionalHeaderResult{ErrorCode: errCode} - } - if !headers.isSet { - return ConditionalHeaderResult{ErrorCode: s3err.ErrNone} - } - - // Get object entry for conditional checks. - bucketDir := "/buckets/" + bucket - entry, entryErr := getter.getEntry(bucketDir, object) - objectExists := entryErr == nil - - // If object doesn't exist, fail for If-Match and If-Unmodified-Since - if !objectExists { - if headers.ifMatch != "" { - glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed - object %s/%s does not exist", bucket, object) - return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed} - } - if !headers.ifUnmodifiedSince.IsZero() { - glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object %s/%s does not exist", bucket, object) - return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed} - } - // If-None-Match and If-Modified-Since succeed when object doesn't exist - return ConditionalHeaderResult{ErrorCode: s3err.ErrNone} - } - - // Object exists - check all conditions - // The evaluation order follows AWS S3 behavior for consistency. - - // 1. Check If-Match (412 Precondition Failed if fails) - if headers.ifMatch != "" { - // If `ifMatch` is "*", the condition is met if the object exists. - // Otherwise, we need to check the ETag. - if headers.ifMatch != "*" { - // Use production getObjectETag method - objectETag := s3a.getObjectETag(entry) - // Use production etagMatches method - if !s3a.etagMatches(headers.ifMatch, objectETag) { - glog.V(3).Infof("checkConditionalHeadersForReads: If-Match failed for object %s/%s - expected ETag %s, got %s", bucket, object, headers.ifMatch, objectETag) - return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed} - } - } - glog.V(3).Infof("checkConditionalHeadersForReads: If-Match passed for object %s/%s", bucket, object) - } - - // 2. Check If-Unmodified-Since (412 Precondition Failed if fails) - if !headers.ifUnmodifiedSince.IsZero() { - objectModTime := time.Unix(entry.Attributes.Mtime, 0) - if objectModTime.After(headers.ifUnmodifiedSince) { - glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since failed - object modified after %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) - return ConditionalHeaderResult{ErrorCode: s3err.ErrPreconditionFailed} - } - glog.V(3).Infof("checkConditionalHeadersForReads: If-Unmodified-Since passed - object not modified since %s", r.Header.Get(s3_constants.IfUnmodifiedSince)) - } - - // 3. Check If-None-Match (304 Not Modified if fails) - if headers.ifNoneMatch != "" { - // Use production getObjectETag method - objectETag := s3a.getObjectETag(entry) - - if headers.ifNoneMatch == "*" { - glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match=* failed - object %s/%s exists", bucket, object) - return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag} - } - // Use production etagMatches method - if s3a.etagMatches(headers.ifNoneMatch, objectETag) { - glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match failed - ETag matches %s", objectETag) - return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag} - } - glog.V(3).Infof("checkConditionalHeadersForReads: If-None-Match passed - ETag %s doesn't match %s", objectETag, headers.ifNoneMatch) - } - - // 4. Check If-Modified-Since (304 Not Modified if fails) - if !headers.ifModifiedSince.IsZero() { - objectModTime := time.Unix(entry.Attributes.Mtime, 0) - if !objectModTime.After(headers.ifModifiedSince) { - // Use production getObjectETag method - objectETag := s3a.getObjectETag(entry) - glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since failed - object not modified since %s", r.Header.Get(s3_constants.IfModifiedSince)) - return ConditionalHeaderResult{ErrorCode: s3err.ErrNotModified, ETag: objectETag} - } - glog.V(3).Infof("checkConditionalHeadersForReads: If-Modified-Since passed - object modified after %s", r.Header.Get(s3_constants.IfModifiedSince)) - } - - return ConditionalHeaderResult{ErrorCode: s3err.ErrNone} -} - -// checkConditionalHeadersForReads is the production method that uses the S3ApiServer as EntryGetter -func (s3a *S3ApiServer) checkConditionalHeadersForReads(r *http.Request, bucket, object string) ConditionalHeaderResult { - return s3a.checkConditionalHeadersForReadsWithGetter(s3a, r, bucket, object) -} diff --git a/weed/s3api/s3api_object_handlers_put_test.go b/weed/s3api/s3api_object_handlers_put_test.go deleted file mode 100644 index 9144e2cee..000000000 --- a/weed/s3api/s3api_object_handlers_put_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package s3api - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/util/constants" -) - -func TestFilerErrorToS3Error(t *testing.T) { - tests := []struct { - name string - errString string - expectedErr s3err.ErrorCode - }{ - { - name: "MD5 mismatch error", - errString: constants.ErrMsgBadDigest, - expectedErr: s3err.ErrBadDigest, - }, - { - name: "Context canceled error", - errString: "rpc error: code = Canceled desc = context canceled", - expectedErr: s3err.ErrInvalidRequest, - }, - { - name: "Context canceled error (simple)", - errString: "context canceled", - expectedErr: s3err.ErrInvalidRequest, - }, - { - name: "Directory exists error", - errString: "existing /path/to/file is a directory", - expectedErr: s3err.ErrExistingObjectIsDirectory, - }, - { - name: "File exists error", - errString: "/path/to/file is a file", - expectedErr: s3err.ErrExistingObjectIsFile, - }, - { - name: "Unknown error", - errString: "some random error", - expectedErr: s3err.ErrInternalError, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := filerErrorToS3Error(tt.errString) - if result != tt.expectedErr { - t.Errorf("filerErrorToS3Error(%q) = %v, want %v", tt.errString, result, tt.expectedErr) - } - }) - } -} diff --git a/weed/s3api/s3api_object_handlers_retention.go b/weed/s3api/s3api_object_handlers_retention.go deleted file mode 100644 index 5695be25d..000000000 --- a/weed/s3api/s3api_object_handlers_retention.go +++ /dev/null @@ -1,140 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "errors" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" -) - -// PutObjectRetentionHandler Put object Retention -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html -func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectRetentionHandler %s %s", bucket, object) - - // Check if Object Lock is available for this bucket (requires versioning) - if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "PutObjectRetentionHandler") { - return - } - - // Get version ID from query parameters - versionId := r.URL.Query().Get("versionId") - - // Evaluate governance bypass request (header + permission validation) - governanceBypassAllowed := s3a.evaluateGovernanceBypassRequest(r, bucket, object) - - // Parse retention configuration from request body - retention, err := parseObjectRetention(r) - if err != nil { - glog.Errorf("PutObjectRetentionHandler: failed to parse retention config: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) - return - } - - // Validate retention configuration - if err := ValidateRetention(retention); err != nil { - glog.Errorf("PutObjectRetentionHandler: invalid retention config: %v", err) - s3err.WriteErrorResponse(w, r, mapValidationErrorToS3Error(err)) - return - } - - // Set retention on the object - if err := s3a.setObjectRetention(bucket, object, versionId, retention, governanceBypassAllowed); err != nil { - glog.Errorf("PutObjectRetentionHandler: failed to set retention: %v", err) - - // Handle specific error cases - if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) || errors.Is(err, ErrLatestVersionNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - if errors.Is(err, ErrComplianceModeActive) || errors.Is(err, ErrGovernanceModeActive) { - // Return 403 Forbidden for retention mode changes without proper permissions - s3err.WriteErrorResponse(w, r, s3err.ErrAccessDenied) - return - } - - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Add VersionId to response headers if available (expected by s3-tests) - if versionId != "" { - w.Header().Set("x-amz-version-id", versionId) - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - // Return success (HTTP 200 with no body) - w.WriteHeader(http.StatusOK) - glog.V(3).Infof("PutObjectRetentionHandler: successfully set retention for %s/%s", bucket, object) -} - -// GetObjectRetentionHandler Get object Retention -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html -func (s3a *S3ApiServer) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { - bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectRetentionHandler %s %s", bucket, object) - - // Check if Object Lock is available for this bucket (requires versioning) - if !s3a.handleObjectLockAvailabilityCheck(w, r, bucket, "GetObjectRetentionHandler") { - return - } - - // Get version ID from query parameters - versionId := r.URL.Query().Get("versionId") - - // Get retention configuration for the object - retention, err := s3a.getObjectRetention(bucket, object, versionId) - if err != nil { - glog.Errorf("GetObjectRetentionHandler: failed to get retention: %v", err) - - // Handle specific error cases - if errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) { - s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) - return - } - - if errors.Is(err, ErrNoRetentionConfiguration) { - s3err.WriteErrorResponse(w, r, s3err.ErrObjectLockConfigurationNotFoundError) - return - } - - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Marshal retention configuration to XML - retentionXML, err := xml.Marshal(retention) - if err != nil { - glog.Errorf("GetObjectRetentionHandler: failed to marshal retention: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set response headers - w.Header().Set("Content-Type", "application/xml") - w.WriteHeader(http.StatusOK) - - // Write XML response - if _, err := w.Write([]byte(xml.Header)); err != nil { - glog.Errorf("GetObjectRetentionHandler: failed to write XML header: %v", err) - return - } - - if _, err := w.Write(retentionXML); err != nil { - glog.Errorf("GetObjectRetentionHandler: failed to write retention XML: %v", err) - return - } - - // Record metrics - stats_collect.RecordBucketActiveTime(bucket) - - glog.V(3).Infof("GetObjectRetentionHandler: successfully retrieved retention for %s/%s", bucket, object) -} diff --git a/weed/s3api/s3api_object_handlers_test.go b/weed/s3api/s3api_object_handlers_test.go deleted file mode 100644 index 950dd45f8..000000000 --- a/weed/s3api/s3api_object_handlers_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package s3api - -import ( - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" -) - -// mockAccountManager implements AccountManager for testing -type mockAccountManager struct { - accounts map[string]string -} - -func (m *mockAccountManager) GetAccountNameById(id string) string { - if name, exists := m.accounts[id]; exists { - return name - } - return "" -} - -func (m *mockAccountManager) GetAccountIdByEmail(email string) string { - return "" -} - -func TestNewListEntryOwnerDisplayName(t *testing.T) { - // Create mock IAM with test accounts - iam := &mockAccountManager{ - accounts: map[string]string{ - "testid": "M. Tester", - "userid123": "John Doe", - }, - } - - // Create test entry with owner metadata - entry := &filer_pb.Entry{ - Name: "test-object", - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - FileSize: 1024, - }, - Extended: map[string][]byte{ - s3_constants.ExtAmzOwnerKey: []byte("testid"), - }, - } - - // Test that display name is correctly looked up from IAM - listEntry := newListEntry(entry, "", "dir", "test-object", "/buckets/test/", true, false, false, iam) - - assert.NotNil(t, listEntry.Owner, "Owner should be set when fetchOwner is true") - assert.Equal(t, "testid", listEntry.Owner.ID, "Owner ID should match stored owner") - assert.Equal(t, "M. Tester", listEntry.Owner.DisplayName, "Display name should be looked up from IAM") - - // Test with owner that doesn't exist in IAM (should fallback to ID) - entry.Extended[s3_constants.ExtAmzOwnerKey] = []byte("unknown-user") - listEntry = newListEntry(entry, "", "dir", "test-object", "/buckets/test/", true, false, false, iam) - - assert.Equal(t, "unknown-user", listEntry.Owner.ID, "Owner ID should match stored owner") - assert.Equal(t, "unknown-user", listEntry.Owner.DisplayName, "Display name should fallback to ID when not found in IAM") - - // Test with no owner metadata (should use anonymous) - entry.Extended = make(map[string][]byte) - listEntry = newListEntry(entry, "", "dir", "test-object", "/buckets/test/", true, false, false, iam) - - assert.Equal(t, s3_constants.AccountAnonymousId, listEntry.Owner.ID, "Should use anonymous ID when no owner metadata") - assert.Equal(t, "anonymous", listEntry.Owner.DisplayName, "Should use anonymous display name when no owner metadata") - - // Test with fetchOwner false (should not set owner) - listEntry = newListEntry(entry, "", "dir", "test-object", "/buckets/test/", false, false, false, iam) - - assert.Nil(t, listEntry.Owner, "Owner should not be set when fetchOwner is false") -} - -func TestRemoveDuplicateSlashes(t *testing.T) { - tests := []struct { - name string - path string - expectedResult string - }{ - { - name: "empty", - path: "", - expectedResult: "", - }, - { - name: "slash", - path: "/", - expectedResult: "/", - }, - { - name: "object", - path: "object", - expectedResult: "object", - }, - { - name: "correct path", - path: "/path/to/object", - expectedResult: "/path/to/object", - }, - { - name: "path with duplicates", - path: "///path//to/object//", - expectedResult: "/path/to/object/", - }, - } - - for _, tst := range tests { - t.Run(tst.name, func(t *testing.T) { - obj := removeDuplicateSlashes(tst.path) - assert.Equal(t, tst.expectedResult, obj) - }) - } -} - -func TestS3ApiServer_toFilerUrl(t *testing.T) { - tests := []struct { - name string - args string - want string - }{ - { - "simple", - "/uploads/eaf10b3b-3b3a-4dcd-92a7-edf2a512276e/67b8b9bf-7cca-4cb6-9b34-22fcb4d6e27d/Bildschirmfoto 2022-09-19 um 21.38.37.png", - "/uploads/eaf10b3b-3b3a-4dcd-92a7-edf2a512276e/67b8b9bf-7cca-4cb6-9b34-22fcb4d6e27d/Bildschirmfoto%202022-09-19%20um%2021.38.37.png", - }, - { - "double prefix", - "//uploads/t.png", - "/uploads/t.png", - }, - { - "triple prefix", - "///uploads/t.png", - "/uploads/t.png", - }, - { - "empty prefix", - "uploads/t.png", - "/uploads/t.png", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equalf(t, tt.want, urlEscapeObject(tt.args), "clean %v", tt.args) - }) - } -} diff --git a/weed/s3api/s3api_object_lock_fix_test.go b/weed/s3api/s3api_object_lock_fix_test.go deleted file mode 100644 index e8a3cf6ba..000000000 --- a/weed/s3api/s3api_object_lock_fix_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package s3api - -import ( - "testing" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/stretchr/testify/assert" -) - -// TestVeeamObjectLockBugFix tests the fix for the bug where GetObjectLockConfigurationHandler -// would return NoSuchObjectLockConfiguration for buckets with no extended attributes, -// even when Object Lock was enabled. This caused Veeam to think Object Lock wasn't supported. -func TestVeeamObjectLockBugFix(t *testing.T) { - - t.Run("Bug case: bucket with no extended attributes", func(t *testing.T) { - // This simulates the bug case where a bucket has no extended attributes at all - // The old code would immediately return NoSuchObjectLockConfiguration - // The new code correctly checks if Object Lock is enabled before returning an error - - bucketConfig := &BucketConfig{ - Name: "test-bucket", - Entry: &filer_pb.Entry{ - Name: "test-bucket", - Extended: nil, // This is the key - no extended attributes - }, - } - - // Simulate the isObjectLockEnabledForBucket logic - enabled := false - if bucketConfig.Entry.Extended != nil { - if enabledBytes, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]; exists { - enabled = string(enabledBytes) == s3_constants.ObjectLockEnabled || string(enabledBytes) == "true" - } - } - - // Should correctly return false (not enabled) - this would trigger 404 correctly - assert.False(t, enabled, "Object Lock should not be enabled when no extended attributes exist") - }) - - t.Run("Fix verification: bucket with Object Lock enabled via boolean flag", func(t *testing.T) { - // This verifies the fix works when Object Lock is enabled via boolean flag - - bucketConfig := &BucketConfig{ - Name: "test-bucket", - Entry: &filer_pb.Entry{ - Name: "test-bucket", - Extended: map[string][]byte{ - s3_constants.ExtObjectLockEnabledKey: []byte("true"), - }, - }, - } - - // Simulate the isObjectLockEnabledForBucket logic - enabled := false - if bucketConfig.Entry.Extended != nil { - if enabledBytes, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]; exists { - enabled = string(enabledBytes) == s3_constants.ObjectLockEnabled || string(enabledBytes) == "true" - } - } - - // Should correctly return true (enabled) - this would generate minimal XML response - assert.True(t, enabled, "Object Lock should be enabled when boolean flag is set") - }) - - t.Run("Fix verification: bucket with Object Lock enabled via Enabled constant", func(t *testing.T) { - // Test using the s3_constants.ObjectLockEnabled constant - - bucketConfig := &BucketConfig{ - Name: "test-bucket", - Entry: &filer_pb.Entry{ - Name: "test-bucket", - Extended: map[string][]byte{ - s3_constants.ExtObjectLockEnabledKey: []byte(s3_constants.ObjectLockEnabled), - }, - }, - } - - // Simulate the isObjectLockEnabledForBucket logic - enabled := false - if bucketConfig.Entry.Extended != nil { - if enabledBytes, exists := bucketConfig.Entry.Extended[s3_constants.ExtObjectLockEnabledKey]; exists { - enabled = string(enabledBytes) == s3_constants.ObjectLockEnabled || string(enabledBytes) == "true" - } - } - - // Should correctly return true (enabled) - assert.True(t, enabled, "Object Lock should be enabled when constant is used") - }) -} diff --git a/weed/s3api/s3api_object_lock_headers_test.go b/weed/s3api/s3api_object_lock_headers_test.go deleted file mode 100644 index fc8a01232..000000000 --- a/weed/s3api/s3api_object_lock_headers_test.go +++ /dev/null @@ -1,662 +0,0 @@ -package s3api - -import ( - "fmt" - "net/http/httptest" - "strconv" - "testing" - "time" - - "errors" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/stretchr/testify/assert" -) - -// TestExtractObjectLockMetadataFromRequest tests the function that extracts -// object lock headers from PUT requests and stores them in Extended attributes. -// This test would have caught the bug where object lock headers were ignored. -func TestExtractObjectLockMetadataFromRequest(t *testing.T) { - s3a := &S3ApiServer{} - - t.Run("Extract COMPLIANCE mode and retention date", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - // Verify mode was stored - assert.Contains(t, entry.Extended, s3_constants.ExtObjectLockModeKey) - assert.Equal(t, "COMPLIANCE", string(entry.Extended[s3_constants.ExtObjectLockModeKey])) - - // Verify retention date was stored - assert.Contains(t, entry.Extended, s3_constants.ExtRetentionUntilDateKey) - storedTimestamp, err := strconv.ParseInt(string(entry.Extended[s3_constants.ExtRetentionUntilDateKey]), 10, 64) - assert.NoError(t, err) - storedTime := time.Unix(storedTimestamp, 0) - assert.WithinDuration(t, retainUntilDate, storedTime, 1*time.Second) - }) - - t.Run("Extract GOVERNANCE mode and retention date", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(12 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "GOVERNANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - assert.Equal(t, "GOVERNANCE", string(entry.Extended[s3_constants.ExtObjectLockModeKey])) - assert.Contains(t, entry.Extended, s3_constants.ExtRetentionUntilDateKey) - }) - - t.Run("Extract legal hold ON", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "ON") - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - assert.Contains(t, entry.Extended, s3_constants.ExtLegalHoldKey) - assert.Equal(t, "ON", string(entry.Extended[s3_constants.ExtLegalHoldKey])) - }) - - t.Run("Extract legal hold OFF", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "OFF") - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - assert.Contains(t, entry.Extended, s3_constants.ExtLegalHoldKey) - assert.Equal(t, "OFF", string(entry.Extended[s3_constants.ExtLegalHoldKey])) - }) - - t.Run("Handle all object lock headers together", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "ON") - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - // All metadata should be stored - assert.Equal(t, "COMPLIANCE", string(entry.Extended[s3_constants.ExtObjectLockModeKey])) - assert.Contains(t, entry.Extended, s3_constants.ExtRetentionUntilDateKey) - assert.Equal(t, "ON", string(entry.Extended[s3_constants.ExtLegalHoldKey])) - }) - - t.Run("Handle no object lock headers", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - // No object lock headers set - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - // No object lock metadata should be stored - assert.NotContains(t, entry.Extended, s3_constants.ExtObjectLockModeKey) - assert.NotContains(t, entry.Extended, s3_constants.ExtRetentionUntilDateKey) - assert.NotContains(t, entry.Extended, s3_constants.ExtLegalHoldKey) - }) - - t.Run("Handle invalid retention date - should return error", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "GOVERNANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, "invalid-date") - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrInvalidRetentionDateFormat)) - - // Mode should be stored but not invalid date - assert.Equal(t, "GOVERNANCE", string(entry.Extended[s3_constants.ExtObjectLockModeKey])) - assert.NotContains(t, entry.Extended, s3_constants.ExtRetentionUntilDateKey) - }) - - t.Run("Handle invalid legal hold value - should return error", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "INVALID") - - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrInvalidLegalHoldStatus)) - - // No legal hold metadata should be stored due to error - assert.NotContains(t, entry.Extended, s3_constants.ExtLegalHoldKey) - }) -} - -// TestAddObjectLockHeadersToResponse tests the function that adds object lock -// metadata from Extended attributes to HTTP response headers. -// This test would have caught the bug where HEAD responses didn't include object lock metadata. -func TestAddObjectLockHeadersToResponse(t *testing.T) { - s3a := &S3ApiServer{} - - t.Run("Add COMPLIANCE mode and retention date to response", func(t *testing.T) { - w := httptest.NewRecorder() - retainUntilTime := time.Now().Add(24 * time.Hour) - - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtObjectLockModeKey: []byte("COMPLIANCE"), - s3_constants.ExtRetentionUntilDateKey: []byte(strconv.FormatInt(retainUntilTime.Unix(), 10)), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // Verify headers were set - assert.Equal(t, "COMPLIANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.NotEmpty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - - // Verify the date format is correct - returnedDate := w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate) - parsedTime, err := time.Parse(time.RFC3339, returnedDate) - assert.NoError(t, err) - assert.WithinDuration(t, retainUntilTime, parsedTime, 1*time.Second) - }) - - t.Run("Add GOVERNANCE mode to response", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtObjectLockModeKey: []byte("GOVERNANCE"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - assert.Equal(t, "GOVERNANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - }) - - t.Run("Add legal hold ON to response", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtLegalHoldKey: []byte("ON"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - assert.Equal(t, "ON", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Add legal hold OFF to response", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtLegalHoldKey: []byte("OFF"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - assert.Equal(t, "OFF", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Add all object lock headers to response", func(t *testing.T) { - w := httptest.NewRecorder() - retainUntilTime := time.Now().Add(12 * time.Hour) - - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtObjectLockModeKey: []byte("GOVERNANCE"), - s3_constants.ExtRetentionUntilDateKey: []byte(strconv.FormatInt(retainUntilTime.Unix(), 10)), - s3_constants.ExtLegalHoldKey: []byte("ON"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // All headers should be set - assert.Equal(t, "GOVERNANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.NotEmpty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Equal(t, "ON", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle entry with no object lock metadata", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - "other-metadata": []byte("some-value"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // No object lock headers should be set for entries without object lock metadata - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle entry with object lock mode but no legal hold - should default to OFF", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtObjectLockModeKey: []byte("GOVERNANCE"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // Should set mode and default legal hold to OFF - assert.Equal(t, "GOVERNANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Equal(t, "OFF", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle entry with retention date but no legal hold - should default to OFF", func(t *testing.T) { - w := httptest.NewRecorder() - retainUntilTime := time.Now().Add(24 * time.Hour) - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtRetentionUntilDateKey: []byte(strconv.FormatInt(retainUntilTime.Unix(), 10)), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // Should set retention date and default legal hold to OFF - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.NotEmpty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Equal(t, "OFF", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle nil entry gracefully", func(t *testing.T) { - w := httptest.NewRecorder() - - // Should not panic - s3a.addObjectLockHeadersToResponse(w, nil) - - // No headers should be set - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle entry with nil Extended map gracefully", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: nil, - } - - // Should not panic - s3a.addObjectLockHeadersToResponse(w, entry) - - // No headers should be set - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - }) - - t.Run("Handle invalid retention timestamp gracefully", func(t *testing.T) { - w := httptest.NewRecorder() - entry := &filer_pb.Entry{ - Extended: map[string][]byte{ - s3_constants.ExtObjectLockModeKey: []byte("COMPLIANCE"), - s3_constants.ExtRetentionUntilDateKey: []byte("invalid-timestamp"), - }, - } - - s3a.addObjectLockHeadersToResponse(w, entry) - - // Mode should be set but not retention date due to invalid timestamp - assert.Equal(t, "COMPLIANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Empty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - }) -} - -// TestObjectLockHeaderRoundTrip tests the complete round trip: -// extract from request โ†’ store in Extended attributes โ†’ add to response -func TestObjectLockHeaderRoundTrip(t *testing.T) { - s3a := &S3ApiServer{} - - t.Run("Complete round trip for COMPLIANCE mode", func(t *testing.T) { - // 1. Create request with object lock headers - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "ON") - - // 2. Extract and store in Extended attributes - entry := &filer_pb.Entry{ - Extended: make(map[string][]byte), - } - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - // 3. Add to response headers - w := httptest.NewRecorder() - s3a.addObjectLockHeadersToResponse(w, entry) - - // 4. Verify round trip preserved all data - assert.Equal(t, "COMPLIANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.Equal(t, "ON", w.Header().Get(s3_constants.AmzObjectLockLegalHold)) - - returnedDate := w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate) - parsedTime, err := time.Parse(time.RFC3339, returnedDate) - assert.NoError(t, err) - assert.WithinDuration(t, retainUntilDate, parsedTime, 1*time.Second) - }) - - t.Run("Complete round trip for GOVERNANCE mode", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(12 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "GOVERNANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - entry := &filer_pb.Entry{Extended: make(map[string][]byte)} - err := s3a.extractObjectLockMetadataFromRequest(req, entry) - assert.NoError(t, err) - - w := httptest.NewRecorder() - s3a.addObjectLockHeadersToResponse(w, entry) - - assert.Equal(t, "GOVERNANCE", w.Header().Get(s3_constants.AmzObjectLockMode)) - assert.NotEmpty(t, w.Header().Get(s3_constants.AmzObjectLockRetainUntilDate)) - }) -} - -// TestValidateObjectLockHeaders tests the validateObjectLockHeaders function -// to ensure proper validation of object lock headers in PUT requests -func TestValidateObjectLockHeaders(t *testing.T) { - s3a := &S3ApiServer{} - - t.Run("Valid COMPLIANCE mode with retention date on versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("Valid GOVERNANCE mode with retention date on versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(12 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "GOVERNANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("Valid legal hold ON on versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "ON") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("Valid legal hold OFF on versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "OFF") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("Invalid object lock mode", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "INVALID_MODE") - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrInvalidObjectLockMode)) - }) - - t.Run("Invalid legal hold status", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "INVALID_STATUS") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrInvalidLegalHoldStatus)) - }) - - t.Run("Object lock headers on non-versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, false) // non-versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrObjectLockVersioningRequired)) - }) - - t.Run("Invalid retention date format", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, "invalid-date-format") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrInvalidRetentionDateFormat)) - }) - - t.Run("Retention date in the past", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - pastDate := time.Now().Add(-24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, pastDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrRetentionDateMustBeFuture)) - }) - - t.Run("Mode without retention date", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set(s3_constants.AmzObjectLockMode, "COMPLIANCE") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrObjectLockModeRequiresDate)) - }) - - t.Run("Retention date without mode", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(24 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrRetentionDateRequiresMode)) - }) - - t.Run("Governance bypass header on non-versioned bucket", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set("x-amz-bypass-governance-retention", "true") - - err := s3a.validateObjectLockHeaders(req, false) // non-versioned bucket - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrGovernanceBypassVersioningRequired)) - }) - - t.Run("Governance bypass header on versioned bucket should pass", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - req.Header.Set("x-amz-bypass-governance-retention", "true") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("No object lock headers should pass", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - // No object lock headers set - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) - - t.Run("Mixed valid headers should pass", func(t *testing.T) { - req := httptest.NewRequest("PUT", "/bucket/object", nil) - retainUntilDate := time.Now().Add(48 * time.Hour) - req.Header.Set(s3_constants.AmzObjectLockMode, "GOVERNANCE") - req.Header.Set(s3_constants.AmzObjectLockRetainUntilDate, retainUntilDate.Format(time.RFC3339)) - req.Header.Set(s3_constants.AmzObjectLockLegalHold, "ON") - - err := s3a.validateObjectLockHeaders(req, true) // versioned bucket - assert.NoError(t, err) - }) -} - -// TestMapValidationErrorToS3Error tests the error mapping function -func TestMapValidationErrorToS3Error(t *testing.T) { - tests := []struct { - name string - inputError error - expectedCode s3err.ErrorCode - }{ - { - name: "ErrObjectLockVersioningRequired", - inputError: ErrObjectLockVersioningRequired, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "ErrInvalidObjectLockMode", - inputError: ErrInvalidObjectLockMode, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "ErrInvalidLegalHoldStatus", - inputError: ErrInvalidLegalHoldStatus, - expectedCode: s3err.ErrMalformedXML, - }, - { - name: "ErrInvalidRetentionDateFormat", - inputError: ErrInvalidRetentionDateFormat, - expectedCode: s3err.ErrMalformedDate, - }, - { - name: "ErrRetentionDateMustBeFuture", - inputError: ErrRetentionDateMustBeFuture, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "ErrObjectLockModeRequiresDate", - inputError: ErrObjectLockModeRequiresDate, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "ErrRetentionDateRequiresMode", - inputError: ErrRetentionDateRequiresMode, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "ErrGovernanceBypassVersioningRequired", - inputError: ErrGovernanceBypassVersioningRequired, - expectedCode: s3err.ErrInvalidRequest, - }, - { - name: "Unknown error defaults to ErrInvalidRequest", - inputError: fmt.Errorf("unknown error"), - expectedCode: s3err.ErrInvalidRequest, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := mapValidationErrorToS3Error(tt.inputError) - assert.Equal(t, tt.expectedCode, result) - }) - } -} - -// TestObjectLockPermissionLogic documents the correct behavior for object lock permission checks -// in PUT operations for both versioned and non-versioned buckets -func TestObjectLockPermissionLogic(t *testing.T) { - t.Run("Non-versioned bucket PUT operation logic", func(t *testing.T) { - // In non-versioned buckets, PUT operations overwrite existing objects - // Therefore, we MUST check if the existing object has object lock protections - // that would prevent overwrite before allowing the PUT operation. - // - // This test documents the expected behavior: - // 1. Check object lock headers validity (handled by validateObjectLockHeaders) - // 2. Check if existing object has object lock protections (handled by checkObjectLockPermissions) - // 3. If existing object is under retention/legal hold, deny the PUT unless governance bypass is valid - - t.Log("For non-versioned buckets:") - t.Log("- PUT operations overwrite existing objects") - t.Log("- Must check existing object lock protections before allowing overwrite") - t.Log("- Governance bypass headers can be used to override GOVERNANCE mode retention") - t.Log("- COMPLIANCE mode retention and legal holds cannot be bypassed") - }) - - t.Run("Versioned bucket PUT operation logic", func(t *testing.T) { - // In versioned buckets, PUT operations create new versions without overwriting existing ones - // Therefore, we do NOT need to check existing object permissions since we're not modifying them. - // We only need to validate the object lock headers for the new version being created. - // - // This test documents the expected behavior: - // 1. Check object lock headers validity (handled by validateObjectLockHeaders) - // 2. Skip checking existing object permissions (since we're creating a new version) - // 3. Apply object lock metadata to the new version being created - - t.Log("For versioned buckets:") - t.Log("- PUT operations create new versions without overwriting existing objects") - t.Log("- No need to check existing object lock protections") - t.Log("- Only validate object lock headers for the new version being created") - t.Log("- Each version has independent object lock settings") - }) - - t.Run("Governance bypass header validation", func(t *testing.T) { - // Governance bypass headers should only be used in specific scenarios: - // 1. Only valid on versioned buckets (consistent with object lock headers) - // 2. For non-versioned buckets: Used to override existing object's GOVERNANCE retention - // 3. For versioned buckets: Not typically needed since new versions don't conflict with existing ones - - t.Log("Governance bypass behavior:") - t.Log("- Only valid on versioned buckets (header validation)") - t.Log("- For non-versioned buckets: Allows overwriting objects under GOVERNANCE retention") - t.Log("- For versioned buckets: Not typically needed for PUT operations") - t.Log("- Must have s3:BypassGovernanceRetention permission") - }) -} diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go new file mode 100644 index 000000000..feb289ef3 --- /dev/null +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -0,0 +1,343 @@ +package s3api + +import ( + "crypto/sha1" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" +) + +const ( + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 +) + +// NewMultipartUploadHandler - New multipart upload. +func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + bucket, object := s3_constants.GetBucketAndObject(r) + + createMultipartUploadInput := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: objectKey(aws.String(object)), + Metadata: make(map[string]*string), + } + + metadata := weed_server.SaveAmzMetaData(r, nil, false) + for k, v := range metadata { + createMultipartUploadInput.Metadata[k] = aws.String(string(v)) + } + + contentType := r.Header.Get("Content-Type") + if contentType != "" { + createMultipartUploadInput.ContentType = &contentType + } + response, errCode := s3a.createMultipartUpload(createMultipartUploadInput) + + glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + writeSuccessResponseXML(w, r, response) + +} + +// CompleteMultipartUploadHandler - Completes multipart upload. +func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + + bucket, object := s3_constants.GetBucketAndObject(r) + + parts := &CompleteMultipartUpload{} + if err := xmlDecoder(r.Body, parts, r.ContentLength); err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) + return + } + + // Get upload id. + uploadID, _, _, _ := getObjectResources(r.URL.Query()) + err := s3a.checkUploadId(object, uploadID) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) + return + } + + response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: objectKey(aws.String(object)), + UploadId: aws.String(uploadID), + }, parts) + + glog.V(2).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + writeSuccessResponseXML(w, r, response) + +} + +// AbortMultipartUploadHandler - Aborts multipart upload. +func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + bucket, object := s3_constants.GetBucketAndObject(r) + + // Get upload id. + uploadID, _, _, _ := getObjectResources(r.URL.Query()) + err := s3a.checkUploadId(object, uploadID) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) + return + } + + response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: objectKey(aws.String(object)), + UploadId: aws.String(uploadID), + }) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) + + //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + s3err.WriteXMLResponse(w, r, http.StatusNoContent, response) + s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone) + +} + +// ListMultipartUploadsHandler - Lists multipart uploads. +func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { + bucket, _ := s3_constants.GetBucketAndObject(r) + + prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) + if maxUploads < 0 { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxUploads) + return + } + if keyMarker != "" { + // Marker not common with prefix is not implemented. + if !strings.HasPrefix(keyMarker, prefix) { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) + return + } + } + + response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(bucket), + Delimiter: aws.String(delimiter), + EncodingType: aws.String(encodingType), + KeyMarker: aws.String(keyMarker), + MaxUploads: aws.Int64(int64(maxUploads)), + Prefix: aws.String(prefix), + UploadIdMarker: aws.String(uploadIDMarker), + }) + + glog.V(2).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + // TODO handle encodingType + + writeSuccessResponseXML(w, r, response) +} + +// ListObjectPartsHandler - Lists object parts in a multipart upload. +func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { + bucket, object := s3_constants.GetBucketAndObject(r) + + uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) + if partNumberMarker < 0 { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPartNumberMarker) + return + } + if maxParts < 0 { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts) + return + } + + err := s3a.checkUploadId(object, uploadID) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) + return + } + + response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ + Bucket: aws.String(bucket), + Key: objectKey(aws.String(object)), + MaxParts: aws.Int64(int64(maxParts)), + PartNumberMarker: aws.Int64(int64(partNumberMarker)), + UploadId: aws.String(uploadID), + }) + + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + glog.V(2).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part)) + + writeSuccessResponseXML(w, r, response) + +} + +// PutObjectPartHandler - Put an object part in a multipart upload. +func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { + bucket, object := s3_constants.GetBucketAndObject(r) + + uploadID := r.URL.Query().Get("uploadId") + err := s3a.checkUploadId(object, uploadID) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchUpload) + return + } + + partIDString := r.URL.Query().Get("partNumber") + partID, err := strconv.Atoi(partIDString) + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart) + return + } + if partID > globalMaxPartID { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts) + return + } + + dataReader := r.Body + if s3a.iam.isEnabled() { + rAuthType := getRequestAuthType(r) + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, s3ErrCode) + return + } + } + defer dataReader.Close() + + glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID) + + uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", + s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(bucket), uploadID, partID) + + if partID == 1 && r.Header.Get("Content-Type") == "" { + dataReader = mimeDetect(r, dataReader) + } + destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object) + + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader, destination) + if errCode != s3err.ErrNone { + s3err.WriteErrorResponse(w, r, errCode) + return + } + + setEtag(w, etag) + + writeSuccessResponseEmpty(w, r) + +} + +func (s3a *S3ApiServer) genUploadsFolder(bucket string) string { + return fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, s3_constants.MultipartUploadsFolder) +} + +// Generate uploadID hash string from object +func (s3a *S3ApiServer) generateUploadID(object string) string { + if strings.HasPrefix(object, "/") { + object = object[1:] + } + h := sha1.New() + h.Write([]byte(object)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +//Check object name and uploadID when processing multipart uploading +func (s3a *S3ApiServer) checkUploadId(object string, id string) error { + + hash := s3a.generateUploadID(object) + if hash != id { + glog.Errorf("object %s and uploadID %s are not matched", object, id) + return fmt.Errorf("object %s and uploadID %s are not matched", object, id) + } + return nil +} + +// Parse bucket url queries for ?uploads +func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string) { + prefix = values.Get("prefix") + keyMarker = values.Get("key-marker") + uploadIDMarker = values.Get("upload-id-marker") + delimiter = values.Get("delimiter") + if values.Get("max-uploads") != "" { + maxUploads, _ = strconv.Atoi(values.Get("max-uploads")) + } else { + maxUploads = maxUploadsList + } + encodingType = values.Get("encoding-type") + return +} + +// Parse object url queries +func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string) { + uploadID = values.Get("uploadId") + partNumberMarker, _ = strconv.Atoi(values.Get("part-number-marker")) + if values.Get("max-parts") != "" { + maxParts, _ = strconv.Atoi(values.Get("max-parts")) + } else { + maxParts = maxPartsList + } + encodingType = values.Get("encoding-type") + return +} + +func xmlDecoder(body io.Reader, v interface{}, size int64) error { + var lbody io.Reader + if size > 0 { + lbody = io.LimitReader(body, size) + } else { + lbody = body + } + d := xml.NewDecoder(lbody) + d.CharsetReader = func(label string, input io.Reader) (io.Reader, error) { + return input, nil + } + return d.Decode(v) +} + +type CompleteMultipartUpload struct { + Parts []CompletedPart `xml:"Part"` +} +type CompletedPart struct { + ETag string + PartNumber int +} diff --git a/weed/s3api/s3api_object_retention.go b/weed/s3api/s3api_object_retention.go deleted file mode 100644 index 93e04e7da..000000000 --- a/weed/s3api/s3api_object_retention.go +++ /dev/null @@ -1,681 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// ==================================================================== -// ERROR DEFINITIONS -// ==================================================================== - -// Sentinel errors for proper error handling instead of string matching -var ( - ErrNoRetentionConfiguration = errors.New("no retention configuration found") - ErrNoLegalHoldConfiguration = errors.New("no legal hold configuration found") - ErrBucketNotFound = errors.New("bucket not found") - ErrObjectNotFound = errors.New("object not found") - ErrVersionNotFound = errors.New("version not found") - ErrLatestVersionNotFound = errors.New("latest version not found") - ErrComplianceModeActive = errors.New("object is under COMPLIANCE mode retention and cannot be deleted or modified") - ErrGovernanceModeActive = errors.New("object is under GOVERNANCE mode retention and cannot be deleted or modified without bypass") -) - -// Error definitions for Object Lock -var ( - ErrObjectUnderLegalHold = errors.New("object is under legal hold and cannot be deleted or modified") - ErrGovernanceBypassNotPermitted = errors.New("user does not have permission to bypass governance retention") - ErrInvalidRetentionPeriod = errors.New("invalid retention period specified") - ErrBothDaysAndYearsSpecified = errors.New("both days and years cannot be specified in the same retention configuration") - ErrMalformedXML = errors.New("malformed XML in request body") - - // Validation error constants with specific messages for tests - ErrRetentionMissingMode = errors.New("retention configuration must specify Mode") - ErrRetentionMissingRetainUntilDate = errors.New("retention configuration must specify RetainUntilDate") - ErrInvalidRetentionModeValue = errors.New("invalid retention mode") -) - -const ( - // Maximum retention period limits according to AWS S3 specifications - MaxRetentionDays = 36500 // Maximum number of days for object retention (100 years) - MaxRetentionYears = 100 // Maximum number of years for object retention -) - -// ==================================================================== -// DATA STRUCTURES -// ==================================================================== - -// ObjectRetention represents S3 Object Retention configuration -type ObjectRetention struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Retention"` - Mode string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Mode,omitempty"` - RetainUntilDate *time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RetainUntilDate,omitempty"` -} - -// ObjectLegalHold represents S3 Object Legal Hold configuration -type ObjectLegalHold struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LegalHold"` - Status string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status,omitempty"` -} - -// ObjectLockConfiguration represents S3 Object Lock Configuration -type ObjectLockConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ObjectLockConfiguration"` - ObjectLockEnabled string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ObjectLockEnabled,omitempty"` - Rule *ObjectLockRule `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Rule,omitempty"` -} - -// ObjectLockRule represents an Object Lock Rule -type ObjectLockRule struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Rule"` - DefaultRetention *DefaultRetention `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DefaultRetention,omitempty"` -} - -// DefaultRetention represents default retention settings -// Implements custom XML unmarshal to track if Days/Years were present in XML -type DefaultRetention struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DefaultRetention"` - Mode string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Mode,omitempty"` - Days int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Days,omitempty"` - Years int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Years,omitempty"` - DaysSet bool `xml:"-"` - YearsSet bool `xml:"-"` -} - -// ==================================================================== -// XML PARSING -// ==================================================================== - -// UnmarshalXML implements custom XML unmarshaling for DefaultRetention -// to track whether Days/Years fields were explicitly present in the XML -func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type Alias DefaultRetention - aux := &struct { - *Alias - Days *int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Days,omitempty"` - Years *int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Years,omitempty"` - }{Alias: (*Alias)(dr)} - if err := d.DecodeElement(aux, &start); err != nil { - glog.V(2).Infof("DefaultRetention.UnmarshalXML: decode error: %v", err) - return err - } - if aux.Days != nil { - dr.Days = *aux.Days - dr.DaysSet = true - glog.V(4).Infof("DefaultRetention.UnmarshalXML: Days present, value=%d", dr.Days) - } else { - glog.V(4).Infof("DefaultRetention.UnmarshalXML: Days not present") - } - if aux.Years != nil { - dr.Years = *aux.Years - dr.YearsSet = true - glog.V(4).Infof("DefaultRetention.UnmarshalXML: Years present, value=%d", dr.Years) - } else { - glog.V(4).Infof("DefaultRetention.UnmarshalXML: Years not present") - } - return nil -} - -// parseXML is a generic helper function to parse XML from an HTTP request body. -// It uses xml.Decoder for streaming XML parsing, which is more memory-efficient -// and avoids loading the entire request body into memory. -// -// The function assumes: -// - The request body is not nil (returns error if it is) -// - The request body will be closed after parsing (deferred close) -// - The XML content matches the structure of the provided result type T -// -// This approach is optimized for small XML payloads typical in S3 API requests -// (retention configurations, legal hold settings, etc.) where the overhead of -// streaming parsing is acceptable for the memory efficiency benefits. -func parseXML[T any](request *http.Request, result *T) error { - if request.Body == nil { - return fmt.Errorf("error parsing XML: empty request body") - } - defer request.Body.Close() - - decoder := xml.NewDecoder(request.Body) - if err := decoder.Decode(result); err != nil { - return fmt.Errorf("error parsing XML: %w", err) - } - - return nil -} - -// parseObjectRetention parses XML retention configuration from request body -func parseObjectRetention(request *http.Request) (*ObjectRetention, error) { - var retention ObjectRetention - if err := parseXML(request, &retention); err != nil { - return nil, err - } - return &retention, nil -} - -// parseObjectLegalHold parses XML legal hold configuration from request body -func parseObjectLegalHold(request *http.Request) (*ObjectLegalHold, error) { - var legalHold ObjectLegalHold - if err := parseXML(request, &legalHold); err != nil { - return nil, err - } - return &legalHold, nil -} - -// parseObjectLockConfiguration parses XML object lock configuration from request body -func parseObjectLockConfiguration(request *http.Request) (*ObjectLockConfiguration, error) { - var config ObjectLockConfiguration - if err := parseXML(request, &config); err != nil { - return nil, err - } - return &config, nil -} - -// ==================================================================== -// OBJECT ENTRY OPERATIONS -// ==================================================================== - -// getObjectEntry retrieves the appropriate object entry based on versioning and versionId -func (s3a *S3ApiServer) getObjectEntry(bucket, object, versionId string) (*filer_pb.Entry, error) { - var entry *filer_pb.Entry - var err error - - if versionId != "" { - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - } else { - // Check if versioning is enabled - versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) - if vErr != nil { - return nil, fmt.Errorf("error checking versioning: %w", vErr) - } - - if versioningEnabled { - entry, err = s3a.getLatestObjectVersion(bucket, object) - } else { - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err = s3a.getEntry(bucketDir, object) - } - } - - if err != nil { - return nil, fmt.Errorf("failed to retrieve object %s/%s: %w", bucket, object, ErrObjectNotFound) - } - - return entry, nil -} - -// ==================================================================== -// RETENTION OPERATIONS -// ==================================================================== - -// getObjectRetention retrieves object retention configuration -func (s3a *S3ApiServer) getObjectRetention(bucket, object, versionId string) (*ObjectRetention, error) { - entry, err := s3a.getObjectEntry(bucket, object, versionId) - if err != nil { - return nil, err - } - - if entry.Extended == nil { - return nil, ErrNoRetentionConfiguration - } - - retention := &ObjectRetention{} - - if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists { - retention.Mode = string(modeBytes) - } - - if dateBytes, exists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; exists { - if timestamp, err := strconv.ParseInt(string(dateBytes), 10, 64); err == nil { - t := time.Unix(timestamp, 0) - retention.RetainUntilDate = &t - } else { - return nil, fmt.Errorf("failed to parse retention timestamp for %s/%s: corrupted timestamp data", bucket, object) - } - } - - if retention.Mode == "" || retention.RetainUntilDate == nil { - return nil, ErrNoRetentionConfiguration - } - - return retention, nil -} - -// setObjectRetention sets object retention configuration -func (s3a *S3ApiServer) setObjectRetention(bucket, object, versionId string, retention *ObjectRetention, bypassGovernance bool) error { - var entry *filer_pb.Entry - var err error - var entryPath string - - if versionId != "" { - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - if err != nil { - return fmt.Errorf("failed to get version %s for object %s/%s: %w", versionId, bucket, object, ErrVersionNotFound) - } - entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) - } else { - // Check if versioning is enabled - versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) - if vErr != nil { - return fmt.Errorf("error checking versioning: %w", vErr) - } - - if versioningEnabled { - entry, err = s3a.getLatestObjectVersion(bucket, object) - if err != nil { - return fmt.Errorf("failed to get latest version for object %s/%s: %w", bucket, object, ErrLatestVersionNotFound) - } - // Extract version ID from entry metadata - entryPath = object // default to regular object path - if entry.Extended != nil { - if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { - versionId = string(versionIdBytes) - if versionId != "null" { - entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) - } - } - } - } else { - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err = s3a.getEntry(bucketDir, object) - if err != nil { - return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound) - } - entryPath = object - } - } - - // Check if object is already under retention - if entry.Extended != nil { - if existingMode, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists { - // Check if attempting to change retention mode - if retention.Mode != "" && string(existingMode) != retention.Mode { - // Attempting to change retention mode - if string(existingMode) == s3_constants.RetentionModeCompliance { - // Cannot change compliance mode retention without bypass - return ErrComplianceModeActive - } - - if string(existingMode) == s3_constants.RetentionModeGovernance && !bypassGovernance { - // Cannot change governance mode retention without bypass - return ErrGovernanceModeActive - } - } - - if existingDateBytes, dateExists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; dateExists { - if timestamp, err := strconv.ParseInt(string(existingDateBytes), 10, 64); err == nil { - existingDate := time.Unix(timestamp, 0) - - // Check if the new retention date is earlier than the existing one - if retention.RetainUntilDate != nil && retention.RetainUntilDate.Before(existingDate) { - // Attempting to decrease retention period - if string(existingMode) == s3_constants.RetentionModeCompliance { - // Cannot decrease compliance mode retention without bypass - return ErrComplianceModeActive - } - - if string(existingMode) == s3_constants.RetentionModeGovernance && !bypassGovernance { - // Cannot decrease governance mode retention without bypass - return ErrGovernanceModeActive - } - } - - // If new retention date is later or same, allow the operation - // This covers both increasing retention period and overriding with same/later date - } - } - } - } - - // Update retention metadata - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - if retention.Mode != "" { - entry.Extended[s3_constants.ExtObjectLockModeKey] = []byte(retention.Mode) - } - - if retention.RetainUntilDate != nil { - entry.Extended[s3_constants.ExtRetentionUntilDateKey] = []byte(strconv.FormatInt(retention.RetainUntilDate.Unix(), 10)) - - // Also update the existing WORM fields for compatibility - entry.WormEnforcedAtTsNs = time.Now().UnixNano() - } - - // Update the entry - // NOTE: Potential race condition exists if concurrent calls to PutObjectRetention - // and PutObjectLegalHold update the same object simultaneously, as they might - // overwrite each other's Extended map changes. This is mitigated by the fact - // that mkFile operations are typically serialized at the filer level, but - // future implementations might consider using atomic update operations or - // entry-level locking for complete safety. - bucketDir := s3a.option.BucketsPath + "/" + bucket - return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = entry.Extended - updatedEntry.WormEnforcedAtTsNs = entry.WormEnforcedAtTsNs - }) -} - -// ==================================================================== -// LEGAL HOLD OPERATIONS -// ==================================================================== - -// getObjectLegalHold retrieves object legal hold configuration -func (s3a *S3ApiServer) getObjectLegalHold(bucket, object, versionId string) (*ObjectLegalHold, error) { - entry, err := s3a.getObjectEntry(bucket, object, versionId) - if err != nil { - return nil, err - } - - if entry.Extended == nil { - return nil, ErrNoLegalHoldConfiguration - } - - legalHold := &ObjectLegalHold{} - - if statusBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists { - legalHold.Status = string(statusBytes) - } else { - return nil, ErrNoLegalHoldConfiguration - } - - return legalHold, nil -} - -// setObjectLegalHold sets object legal hold configuration -func (s3a *S3ApiServer) setObjectLegalHold(bucket, object, versionId string, legalHold *ObjectLegalHold) error { - var entry *filer_pb.Entry - var err error - var entryPath string - - if versionId != "" { - entry, err = s3a.getSpecificObjectVersion(bucket, object, versionId) - if err != nil { - return fmt.Errorf("failed to get version %s for object %s/%s: %w", versionId, bucket, object, ErrVersionNotFound) - } - entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) - } else { - // Check if versioning is enabled - versioningEnabled, vErr := s3a.isVersioningEnabled(bucket) - if vErr != nil { - return fmt.Errorf("error checking versioning: %w", vErr) - } - - if versioningEnabled { - entry, err = s3a.getLatestObjectVersion(bucket, object) - if err != nil { - return fmt.Errorf("failed to get latest version for object %s/%s: %w", bucket, object, ErrLatestVersionNotFound) - } - // Extract version ID from entry metadata - entryPath = object // default to regular object path - if entry.Extended != nil { - if versionIdBytes, exists := entry.Extended[s3_constants.ExtVersionIdKey]; exists { - versionId = string(versionIdBytes) - if versionId != "null" { - entryPath = object + ".versions/" + s3a.getVersionFileName(versionId) - } - } - } - } else { - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err = s3a.getEntry(bucketDir, object) - if err != nil { - return fmt.Errorf("failed to get object %s/%s: %w", bucket, object, ErrObjectNotFound) - } - entryPath = object - } - } - - // Update legal hold metadata - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - - entry.Extended[s3_constants.ExtLegalHoldKey] = []byte(legalHold.Status) - - // Update the entry - // NOTE: Potential race condition exists if concurrent calls to PutObjectRetention - // and PutObjectLegalHold update the same object simultaneously, as they might - // overwrite each other's Extended map changes. This is mitigated by the fact - // that mkFile operations are typically serialized at the filer level, but - // future implementations might consider using atomic update operations or - // entry-level locking for complete safety. - bucketDir := s3a.option.BucketsPath + "/" + bucket - return s3a.mkFile(bucketDir, entryPath, entry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = entry.Extended - }) -} - -// ==================================================================== -// PROTECTION ENFORCEMENT -// ==================================================================== - -// isObjectRetentionActive checks if object has active retention -func (s3a *S3ApiServer) isObjectRetentionActive(bucket, object, versionId string) (bool, error) { - retention, err := s3a.getObjectRetention(bucket, object, versionId) - if err != nil { - // If no retention found, object is not under retention - if errors.Is(err, ErrNoRetentionConfiguration) { - return false, nil - } - return false, err - } - - if retention.RetainUntilDate != nil && retention.RetainUntilDate.After(time.Now()) { - return true, nil - } - - return false, nil -} - -// getRetentionFromEntry extracts retention configuration from filer entry -func (s3a *S3ApiServer) getRetentionFromEntry(entry *filer_pb.Entry) (*ObjectRetention, bool, error) { - if entry.Extended == nil { - return nil, false, nil - } - - retention := &ObjectRetention{} - - if modeBytes, exists := entry.Extended[s3_constants.ExtObjectLockModeKey]; exists { - retention.Mode = string(modeBytes) - } - - if dateBytes, exists := entry.Extended[s3_constants.ExtRetentionUntilDateKey]; exists { - if timestamp, err := strconv.ParseInt(string(dateBytes), 10, 64); err == nil { - t := time.Unix(timestamp, 0) - retention.RetainUntilDate = &t - } else { - return nil, false, fmt.Errorf("failed to parse retention timestamp: corrupted timestamp data") - } - } - - if retention.Mode == "" || retention.RetainUntilDate == nil { - return nil, false, nil - } - - // Check if retention is currently active - isActive := retention.RetainUntilDate.After(time.Now()) - return retention, isActive, nil -} - -// getLegalHoldFromEntry extracts legal hold configuration from filer entry -func (s3a *S3ApiServer) getLegalHoldFromEntry(entry *filer_pb.Entry) (*ObjectLegalHold, bool, error) { - if entry.Extended == nil { - return nil, false, nil - } - - legalHold := &ObjectLegalHold{} - - if statusBytes, exists := entry.Extended[s3_constants.ExtLegalHoldKey]; exists { - legalHold.Status = string(statusBytes) - } else { - return nil, false, nil - } - - isActive := legalHold.Status == s3_constants.LegalHoldOn - return legalHold, isActive, nil -} - -// ==================================================================== -// GOVERNANCE BYPASS -// ==================================================================== - -// checkGovernanceBypassPermission checks if user has permission to bypass governance retention -func (s3a *S3ApiServer) checkGovernanceBypassPermission(request *http.Request, bucket, object string) bool { - // Use the existing IAM auth system to check the specific permission - // Create the governance bypass action with proper bucket/object concatenation - // Note: path.Join would drop bucket if object has leading slash, so use explicit formatting - resource := fmt.Sprintf("%s/%s", bucket, strings.TrimPrefix(object, "/")) - action := Action(fmt.Sprintf("%s:%s", s3_constants.ACTION_BYPASS_GOVERNANCE_RETENTION, resource)) - - // Use the IAM system to authenticate and authorize this specific action - identity, errCode := s3a.iam.authRequest(request, action) - if errCode != s3err.ErrNone { - glog.V(3).Infof("IAM auth failed for governance bypass: %v", errCode) - return false - } - - // Verify that the authenticated identity can perform this action - if identity != nil && identity.canDo(action, bucket, object) { - return true - } - - // Additional check: allow users with Admin action to bypass governance retention - // Use the proper S3 Admin action constant instead of generic isAdmin() method - adminAction := Action(fmt.Sprintf("%s:%s", s3_constants.ACTION_ADMIN, resource)) - if identity != nil && identity.canDo(adminAction, bucket, object) { - glog.V(2).Infof("Admin user %s granted governance bypass permission for %s/%s", identity.Name, bucket, object) - return true - } - - return false -} - -// evaluateGovernanceBypassRequest evaluates if governance bypass is requested and permitted -func (s3a *S3ApiServer) evaluateGovernanceBypassRequest(r *http.Request, bucket, object string) bool { - // Step 1: Check if governance bypass was requested via header - bypassRequested := r.Header.Get("x-amz-bypass-governance-retention") == "true" - if !bypassRequested { - // No bypass requested - normal retention enforcement applies - return false - } - - // Step 2: Validate user has permission to bypass governance retention - hasPermission := s3a.checkGovernanceBypassPermission(r, bucket, object) - if !hasPermission { - glog.V(2).Infof("Governance bypass denied for %s/%s: user lacks s3:BypassGovernanceRetention permission", bucket, object) - return false - } - - glog.V(2).Infof("Governance bypass granted for %s/%s: header present and user has permission", bucket, object) - return true -} - -// enforceObjectLockProtections enforces object lock protections for operations -func (s3a *S3ApiServer) enforceObjectLockProtections(request *http.Request, bucket, object, versionId string, governanceBypassAllowed bool) error { - // Get the object entry to check both retention and legal hold - // For delete operations without versionId, we need to check the latest version - var entry *filer_pb.Entry - var err error - - if versionId != "" { - // Check specific version - entry, err = s3a.getObjectEntry(bucket, object, versionId) - } else { - // Check latest version for delete marker creation - entry, err = s3a.getObjectEntry(bucket, object, "") - } - - if err != nil { - // If object doesn't exist, it's not under retention or legal hold - this is expected during delete operations - if errors.Is(err, filer_pb.ErrNotFound) || errors.Is(err, ErrObjectNotFound) || errors.Is(err, ErrVersionNotFound) || errors.Is(err, ErrLatestVersionNotFound) { - // Object doesn't exist, so it can't be under retention or legal hold - this is normal - glog.V(4).Infof("Object %s/%s (versionId: %s) not found during object lock check (expected during delete operations)", bucket, object, versionId) - return nil - } - glog.Warningf("Error retrieving object %s/%s (versionId: %s) for lock check: %v", bucket, object, versionId, err) - return err - } - - // Extract retention information from the entry - retention, retentionActive, err := s3a.getRetentionFromEntry(entry) - if err != nil { - glog.Warningf("Error parsing retention for %s/%s (versionId: %s): %v", bucket, object, versionId, err) - // Continue with legal hold check even if retention parsing fails - } - - // Extract legal hold information from the entry - _, legalHoldActive, err := s3a.getLegalHoldFromEntry(entry) - if err != nil { - glog.Warningf("Error parsing legal hold for %s/%s (versionId: %s): %v", bucket, object, versionId, err) - // Continue with retention check even if legal hold parsing fails - } - - // If object is under legal hold, it cannot be deleted or modified (including delete marker creation) - if legalHoldActive { - return ErrObjectUnderLegalHold - } - - // If object is under retention, check the mode - if retentionActive && retention != nil { - if retention.Mode == s3_constants.RetentionModeCompliance { - return ErrComplianceModeActive - } - - if retention.Mode == s3_constants.RetentionModeGovernance { - if !governanceBypassAllowed { - return ErrGovernanceModeActive - } - // Note: governanceBypassAllowed parameter is already validated by evaluateGovernanceBypassRequest() - // which checks both header presence and IAM permissions, so we trust it here - } - } - - return nil -} - -// ==================================================================== -// AVAILABILITY CHECKS -// ==================================================================== - -// isObjectLockAvailable checks if object lock is available for the bucket -func (s3a *S3ApiServer) isObjectLockAvailable(bucket string) error { - versioningEnabled, err := s3a.isVersioningEnabled(bucket) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - return ErrBucketNotFound - } - return fmt.Errorf("error checking versioning status: %w", err) - } - - if !versioningEnabled { - return fmt.Errorf("object lock requires versioning to be enabled") - } - - return nil -} - -// handleObjectLockAvailabilityCheck handles object lock availability checks for API endpoints -func (s3a *S3ApiServer) handleObjectLockAvailabilityCheck(w http.ResponseWriter, request *http.Request, bucket, handlerName string) bool { - if err := s3a.isObjectLockAvailable(bucket); err != nil { - glog.Errorf("%s: object lock not available for bucket %s: %v", handlerName, bucket, err) - if errors.Is(err, ErrBucketNotFound) { - s3err.WriteErrorResponse(w, request, s3err.ErrNoSuchBucket) - } else { - // Return InvalidRequest for object lock operations on buckets without object lock enabled - // This matches AWS S3 behavior and s3-tests expectations (400 Bad Request) - s3err.WriteErrorResponse(w, request, s3err.ErrInvalidRequest) - } - return false - } - return true -} diff --git a/weed/s3api/s3api_object_retention_test.go b/weed/s3api/s3api_object_retention_test.go deleted file mode 100644 index 20ccf60d9..000000000 --- a/weed/s3api/s3api_object_retention_test.go +++ /dev/null @@ -1,737 +0,0 @@ -package s3api - -import ( - "fmt" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" -) - -func TestValidateRetention(t *testing.T) { - tests := []struct { - name string - retention *ObjectRetention - expectError bool - errorMsg string - }{ - { - name: "Valid GOVERNANCE retention", - retention: &ObjectRetention{ - Mode: s3_constants.RetentionModeGovernance, - RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)), - }, - expectError: false, - }, - { - name: "Valid COMPLIANCE retention", - retention: &ObjectRetention{ - Mode: s3_constants.RetentionModeCompliance, - RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)), - }, - expectError: false, - }, - { - name: "Missing Mode", - retention: &ObjectRetention{ - RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)), - }, - expectError: true, - errorMsg: "retention configuration must specify Mode", - }, - { - name: "Missing RetainUntilDate", - retention: &ObjectRetention{ - Mode: s3_constants.RetentionModeGovernance, - }, - expectError: true, - errorMsg: "retention configuration must specify RetainUntilDate", - }, - { - name: "Invalid Mode", - retention: &ObjectRetention{ - Mode: "INVALID_MODE", - RetainUntilDate: timePtr(time.Now().Add(24 * time.Hour)), - }, - expectError: true, - errorMsg: "invalid retention mode", - }, - { - name: "Past RetainUntilDate", - retention: &ObjectRetention{ - Mode: s3_constants.RetentionModeGovernance, - RetainUntilDate: timePtr(time.Now().Add(-24 * time.Hour)), - }, - expectError: true, - errorMsg: "retain until date must be in the future", - }, - { - name: "Empty retention", - retention: &ObjectRetention{}, - expectError: true, - errorMsg: "retention configuration must specify Mode", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateRetention(tt.retention) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - } - }) - } -} - -func TestValidateLegalHold(t *testing.T) { - tests := []struct { - name string - legalHold *ObjectLegalHold - expectError bool - errorMsg string - }{ - { - name: "Valid ON status", - legalHold: &ObjectLegalHold{ - Status: s3_constants.LegalHoldOn, - }, - expectError: false, - }, - { - name: "Valid OFF status", - legalHold: &ObjectLegalHold{ - Status: s3_constants.LegalHoldOff, - }, - expectError: false, - }, - { - name: "Invalid status", - legalHold: &ObjectLegalHold{ - Status: "INVALID_STATUS", - }, - expectError: true, - errorMsg: "invalid legal hold status", - }, - { - name: "Empty status", - legalHold: &ObjectLegalHold{ - Status: "", - }, - expectError: true, - errorMsg: "invalid legal hold status", - }, - { - name: "Lowercase on", - legalHold: &ObjectLegalHold{ - Status: "on", - }, - expectError: true, - errorMsg: "invalid legal hold status", - }, - { - name: "Lowercase off", - legalHold: &ObjectLegalHold{ - Status: "off", - }, - expectError: true, - errorMsg: "invalid legal hold status", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateLegalHold(tt.legalHold) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - } - }) - } -} - -func TestParseObjectRetention(t *testing.T) { - tests := []struct { - name string - xmlBody string - expectError bool - errorMsg string - expectedResult *ObjectRetention - }{ - { - name: "Valid retention XML", - xmlBody: ` - GOVERNANCE - 2024-12-31T23:59:59Z - `, - expectError: false, - expectedResult: &ObjectRetention{ - Mode: "GOVERNANCE", - RetainUntilDate: timePtr(time.Date(2024, 12, 31, 23, 59, 59, 0, time.UTC)), - }, - }, - { - name: "Valid compliance retention XML", - xmlBody: ` - COMPLIANCE - 2025-01-01T00:00:00Z - `, - expectError: false, - expectedResult: &ObjectRetention{ - Mode: "COMPLIANCE", - RetainUntilDate: timePtr(time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)), - }, - }, - { - name: "Empty XML body", - xmlBody: "", - expectError: true, - errorMsg: "error parsing XML", - }, - { - name: "Invalid XML", - xmlBody: `GOVERNANCEinvalid-date`, - expectError: true, - errorMsg: "cannot parse", - }, - { - name: "Malformed XML", - xmlBody: "GOVERNANCE2024-12-31T23:59:59Z", - expectError: true, - errorMsg: "error parsing XML", - }, - { - name: "Missing Mode", - xmlBody: ` - 2024-12-31T23:59:59Z - `, - expectError: false, - expectedResult: &ObjectRetention{ - Mode: "", - RetainUntilDate: timePtr(time.Date(2024, 12, 31, 23, 59, 59, 0, time.UTC)), - }, - }, - { - name: "Missing RetainUntilDate", - xmlBody: ` - GOVERNANCE - `, - expectError: false, - expectedResult: &ObjectRetention{ - Mode: "GOVERNANCE", - RetainUntilDate: nil, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock HTTP request with XML body - req := &http.Request{ - Body: io.NopCloser(strings.NewReader(tt.xmlBody)), - } - - result, err := parseObjectRetention(req) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if result == nil { - t.Errorf("Expected result but got nil") - } else { - if result.Mode != tt.expectedResult.Mode { - t.Errorf("Expected Mode %s, got %s", tt.expectedResult.Mode, result.Mode) - } - if tt.expectedResult.RetainUntilDate == nil { - if result.RetainUntilDate != nil { - t.Errorf("Expected RetainUntilDate to be nil, got %v", result.RetainUntilDate) - } - } else if result.RetainUntilDate == nil { - t.Errorf("Expected RetainUntilDate to be %v, got nil", tt.expectedResult.RetainUntilDate) - } else if !result.RetainUntilDate.Equal(*tt.expectedResult.RetainUntilDate) { - t.Errorf("Expected RetainUntilDate %v, got %v", tt.expectedResult.RetainUntilDate, result.RetainUntilDate) - } - } - } - }) - } -} - -func TestParseObjectLegalHold(t *testing.T) { - tests := []struct { - name string - xmlBody string - expectError bool - errorMsg string - expectedResult *ObjectLegalHold - }{ - { - name: "Valid legal hold ON", - xmlBody: ` - ON - `, - expectError: false, - expectedResult: &ObjectLegalHold{ - Status: "ON", - }, - }, - { - name: "Valid legal hold OFF", - xmlBody: ` - OFF - `, - expectError: false, - expectedResult: &ObjectLegalHold{ - Status: "OFF", - }, - }, - { - name: "Empty XML body", - xmlBody: "", - expectError: true, - errorMsg: "error parsing XML", - }, - { - name: "Invalid XML", - xmlBody: "ON", - expectError: true, - errorMsg: "error parsing XML", - }, - { - name: "Missing Status", - xmlBody: ` - `, - expectError: false, - expectedResult: &ObjectLegalHold{ - Status: "", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock HTTP request with XML body - req := &http.Request{ - Body: io.NopCloser(strings.NewReader(tt.xmlBody)), - } - - result, err := parseObjectLegalHold(req) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if result == nil { - t.Errorf("Expected result but got nil") - } else { - if result.Status != tt.expectedResult.Status { - t.Errorf("Expected Status %s, got %s", tt.expectedResult.Status, result.Status) - } - } - } - }) - } -} - -func TestParseObjectLockConfiguration(t *testing.T) { - tests := []struct { - name string - xmlBody string - expectError bool - errorMsg string - expectedResult *ObjectLockConfiguration - }{ - { - name: "Valid object lock configuration", - xmlBody: ` - Enabled - `, - expectError: false, - expectedResult: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - }, - }, - { - name: "Valid object lock configuration with rule", - xmlBody: ` - Enabled - - - GOVERNANCE - 30 - - - `, - expectError: false, - expectedResult: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 30, - }, - }, - }, - }, - { - name: "Empty XML body", - xmlBody: "", - expectError: true, - errorMsg: "error parsing XML", - }, - { - name: "Invalid XML", - xmlBody: "Enabled", - expectError: true, - errorMsg: "error parsing XML", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock HTTP request with XML body - req := &http.Request{ - Body: io.NopCloser(strings.NewReader(tt.xmlBody)), - } - - result, err := parseObjectLockConfiguration(req) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if result == nil { - t.Errorf("Expected result but got nil") - } else { - if result.ObjectLockEnabled != tt.expectedResult.ObjectLockEnabled { - t.Errorf("Expected ObjectLockEnabled %s, got %s", tt.expectedResult.ObjectLockEnabled, result.ObjectLockEnabled) - } - if tt.expectedResult.Rule == nil { - if result.Rule != nil { - t.Errorf("Expected Rule to be nil, got %v", result.Rule) - } - } else if result.Rule == nil { - t.Errorf("Expected Rule to be non-nil") - } else { - if result.Rule.DefaultRetention == nil { - t.Errorf("Expected DefaultRetention to be non-nil") - } else { - if result.Rule.DefaultRetention.Mode != tt.expectedResult.Rule.DefaultRetention.Mode { - t.Errorf("Expected DefaultRetention Mode %s, got %s", tt.expectedResult.Rule.DefaultRetention.Mode, result.Rule.DefaultRetention.Mode) - } - if result.Rule.DefaultRetention.Days != tt.expectedResult.Rule.DefaultRetention.Days { - t.Errorf("Expected DefaultRetention Days %d, got %d", tt.expectedResult.Rule.DefaultRetention.Days, result.Rule.DefaultRetention.Days) - } - } - } - } - } - }) - } -} - -func TestValidateObjectLockConfiguration(t *testing.T) { - tests := []struct { - name string - config *ObjectLockConfiguration - expectError bool - errorMsg string - }{ - { - name: "Valid config with ObjectLockEnabled only", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - }, - expectError: false, - }, - { - name: "Missing ObjectLockEnabled", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "", - }, - expectError: true, - errorMsg: "object lock configuration must specify ObjectLockEnabled", - }, - { - name: "Valid config with rule and days", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 30, - DaysSet: true, - }, - }, - }, - expectError: false, - }, - { - name: "Valid config with rule and years", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "COMPLIANCE", - Years: 1, - YearsSet: true, - }, - }, - }, - expectError: false, - }, - { - name: "Invalid ObjectLockEnabled value", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "InvalidValue", - }, - expectError: true, - errorMsg: "invalid object lock enabled value", - }, - { - name: "Invalid rule - missing mode", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Days: 30, - }, - }, - }, - expectError: true, - errorMsg: "default retention must specify Mode", - }, - { - name: "Invalid rule - both days and years", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 30, - Years: 1, - DaysSet: true, - YearsSet: true, - }, - }, - }, - expectError: true, - errorMsg: "default retention cannot specify both Days and Years", - }, - { - name: "Invalid rule - neither days nor years", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - }, - }, - }, - expectError: true, - errorMsg: "default retention must specify either Days or Years", - }, - { - name: "Invalid rule - invalid mode", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "INVALID_MODE", - Days: 30, - DaysSet: true, - }, - }, - }, - expectError: true, - errorMsg: "invalid default retention mode", - }, - { - name: "Invalid rule - days out of range", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 50000, - DaysSet: true, - }, - }, - }, - expectError: true, - errorMsg: fmt.Sprintf("default retention days must be between 0 and %d", MaxRetentionDays), - }, - { - name: "Invalid rule - years out of range", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: &DefaultRetention{ - Mode: "GOVERNANCE", - Years: 200, - YearsSet: true, - }, - }, - }, - expectError: true, - errorMsg: fmt.Sprintf("default retention years must be between 0 and %d", MaxRetentionYears), - }, - { - name: "Invalid rule - missing DefaultRetention", - config: &ObjectLockConfiguration{ - ObjectLockEnabled: "Enabled", - Rule: &ObjectLockRule{ - DefaultRetention: nil, - }, - }, - expectError: true, - errorMsg: "rule configuration must specify DefaultRetention", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateObjectLockConfiguration(tt.config) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - } - }) - } -} - -func TestValidateDefaultRetention(t *testing.T) { - tests := []struct { - name string - retention *DefaultRetention - expectError bool - errorMsg string - }{ - { - name: "Valid retention with days", - retention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 30, - DaysSet: true, - }, - expectError: false, - }, - { - name: "Valid retention with years", - retention: &DefaultRetention{ - Mode: "COMPLIANCE", - Years: 1, - YearsSet: true, - }, - expectError: false, - }, - { - name: "Missing mode", - retention: &DefaultRetention{ - Days: 30, - DaysSet: true, - }, - expectError: true, - errorMsg: "default retention must specify Mode", - }, - { - name: "Invalid mode", - retention: &DefaultRetention{ - Mode: "INVALID", - Days: 30, - DaysSet: true, - }, - expectError: true, - errorMsg: "invalid default retention mode", - }, - { - name: "Both days and years specified", - retention: &DefaultRetention{ - Mode: "GOVERNANCE", - Days: 30, - Years: 1, - DaysSet: true, - YearsSet: true, - }, - expectError: true, - errorMsg: "default retention cannot specify both Days and Years", - }, - { - name: "Neither days nor years specified", - retention: &DefaultRetention{ - Mode: "GOVERNANCE", - }, - expectError: true, - errorMsg: "default retention must specify either Days or Years", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validateDefaultRetention(tt.retention) - - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } else if !strings.Contains(err.Error(), tt.errorMsg) { - t.Errorf("Expected error message to contain '%s', got: %v", tt.errorMsg, err) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - } - }) - } -} - -// Helper function to create a time pointer -func timePtr(t time.Time) *time.Time { - return &t -} diff --git a/weed/s3api/s3api_object_skip_handlers.go b/weed/s3api/s3api_object_skip_handlers.go new file mode 100644 index 000000000..160d02475 --- /dev/null +++ b/weed/s3api/s3api_object_skip_handlers.go @@ -0,0 +1,45 @@ +package s3api + +import ( + "net/http" +) + +// GetObjectAclHandler Put object ACL +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +func (s3a *S3ApiServer) GetObjectAclHandler(w http.ResponseWriter, r *http.Request) { + + w.WriteHeader(http.StatusNoContent) + +} + +// PutObjectAclHandler Put object ACL +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html +func (s3a *S3ApiServer) PutObjectAclHandler(w http.ResponseWriter, r *http.Request) { + + w.WriteHeader(http.StatusNoContent) + +} + +// PutObjectRetentionHandler Put object Retention +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html +func (s3a *S3ApiServer) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { + + w.WriteHeader(http.StatusNoContent) + +} + +// PutObjectLegalHoldHandler Put object Legal Hold +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html +func (s3a *S3ApiServer) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { + + w.WriteHeader(http.StatusNoContent) + +} + +// PutObjectLockConfigurationHandler Put object Lock configuration +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLockConfiguration.html +func (s3a *S3ApiServer) PutObjectLockConfigurationHandler(w http.ResponseWriter, r *http.Request) { + + w.WriteHeader(http.StatusNoContent) + +} diff --git a/weed/s3api/s3api_object_handlers_tagging.go b/weed/s3api/s3api_object_tagging_handlers.go similarity index 93% rename from weed/s3api/s3api_object_handlers_tagging.go rename to weed/s3api/s3api_object_tagging_handlers.go index 23ca05133..1791d7dc8 100644 --- a/weed/s3api/s3api_object_handlers_tagging.go +++ b/weed/s3api/s3api_object_tagging_handlers.go @@ -3,14 +3,14 @@ package s3api import ( "encoding/xml" "fmt" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "io" "net/http" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/util" ) // GetObjectTaggingHandler - GET object tagging diff --git a/weed/s3api/s3api_object_versioning.go b/weed/s3api/s3api_object_versioning.go deleted file mode 100644 index 4f1ff901f..000000000 --- a/weed/s3api/s3api_object_versioning.go +++ /dev/null @@ -1,1000 +0,0 @@ -package s3api - -import ( - "crypto/rand" - "encoding/hex" - "encoding/xml" - "fmt" - "net/http" - "path" - "sort" - "strconv" - "strings" - "time" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - s3_constants "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// S3ListObjectVersionsResult - Custom struct for S3 list-object-versions response -// This avoids conflicts with the XSD generated ListVersionsResult struct -// and ensures proper separation of versions and delete markers into arrays -type S3ListObjectVersionsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult"` - - Name string `xml:"Name"` - Prefix string `xml:"Prefix,omitempty"` - KeyMarker string `xml:"KeyMarker,omitempty"` - VersionIdMarker string `xml:"VersionIdMarker,omitempty"` - NextKeyMarker string `xml:"NextKeyMarker,omitempty"` - NextVersionIdMarker string `xml:"NextVersionIdMarker,omitempty"` - MaxKeys int `xml:"MaxKeys"` - Delimiter string `xml:"Delimiter,omitempty"` - IsTruncated bool `xml:"IsTruncated"` - - // These are the critical fields - arrays instead of single elements - Versions []VersionEntry `xml:"Version,omitempty"` // Array for versions - DeleteMarkers []DeleteMarkerEntry `xml:"DeleteMarker,omitempty"` // Array for delete markers - - CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` - EncodingType string `xml:"EncodingType,omitempty"` -} - -// Original struct - keeping for compatibility but will use S3ListObjectVersionsResult for XML response -type ListObjectVersionsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult"` - Name string `xml:"Name"` - Prefix string `xml:"Prefix"` - KeyMarker string `xml:"KeyMarker,omitempty"` - VersionIdMarker string `xml:"VersionIdMarker,omitempty"` - NextKeyMarker string `xml:"NextKeyMarker,omitempty"` - NextVersionIdMarker string `xml:"NextVersionIdMarker,omitempty"` - MaxKeys int `xml:"MaxKeys"` - Delimiter string `xml:"Delimiter,omitempty"` - IsTruncated bool `xml:"IsTruncated"` - Versions []VersionEntry `xml:"Version,omitempty"` - DeleteMarkers []DeleteMarkerEntry `xml:"DeleteMarker,omitempty"` - CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` -} - -// ObjectVersion represents a version of an S3 object -type ObjectVersion struct { - VersionId string - IsLatest bool - IsDeleteMarker bool - LastModified time.Time - ETag string - Size int64 - Entry *filer_pb.Entry -} - -// generateVersionId creates a unique version ID that preserves chronological order -func generateVersionId() string { - // Use nanosecond timestamp to ensure chronological ordering - // Format as 16-digit hex (first 16 chars of version ID) - now := time.Now().UnixNano() - timestampHex := fmt.Sprintf("%016x", now) - - // Generate random 8 bytes for uniqueness (last 16 chars of version ID) - randBytes := make([]byte, 8) - if _, err := rand.Read(randBytes); err != nil { - glog.Errorf("Failed to generate random bytes for version ID: %v", err) - // Fallback to timestamp-only if random generation fails - return timestampHex + "0000000000000000" - } - - // Combine timestamp (16 chars) + random (16 chars) = 32 chars total - randomHex := hex.EncodeToString(randBytes) - versionId := timestampHex + randomHex - - return versionId -} - -// getVersionedObjectDir returns the directory path for storing object versions -func (s3a *S3ApiServer) getVersionedObjectDir(bucket, object string) string { - return path.Join(s3a.option.BucketsPath, bucket, object+".versions") -} - -// getVersionFileName returns the filename for a specific version -func (s3a *S3ApiServer) getVersionFileName(versionId string) string { - return fmt.Sprintf("v_%s", versionId) -} - -// createDeleteMarker creates a delete marker for versioned delete operations -func (s3a *S3ApiServer) createDeleteMarker(bucket, object string) (string, error) { - versionId := generateVersionId() - - glog.V(2).Infof("createDeleteMarker: creating delete marker %s for %s/%s", versionId, bucket, object) - - // Create the version file name for the delete marker - versionFileName := s3a.getVersionFileName(versionId) - - // Store delete marker in the .versions directory - // Make sure to clean up the object path to remove leading slashes - cleanObject := strings.TrimPrefix(object, "/") - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsDir := bucketDir + "/" + cleanObject + ".versions" - - // Create the delete marker entry in the .versions directory - err := s3a.mkFile(versionsDir, versionFileName, nil, func(entry *filer_pb.Entry) { - entry.Name = versionFileName - entry.IsDirectory = false - if entry.Attributes == nil { - entry.Attributes = &filer_pb.FuseAttributes{} - } - entry.Attributes.Mtime = time.Now().Unix() - if entry.Extended == nil { - entry.Extended = make(map[string][]byte) - } - entry.Extended[s3_constants.ExtVersionIdKey] = []byte(versionId) - entry.Extended[s3_constants.ExtDeleteMarkerKey] = []byte("true") - }) - if err != nil { - return "", fmt.Errorf("failed to create delete marker in .versions directory: %w", err) - } - - // Update the .versions directory metadata to indicate this delete marker is the latest version - err = s3a.updateLatestVersionInDirectory(bucket, cleanObject, versionId, versionFileName) - if err != nil { - glog.Errorf("createDeleteMarker: failed to update latest version in directory: %v", err) - return "", fmt.Errorf("failed to update latest version in directory: %w", err) - } - - glog.V(2).Infof("createDeleteMarker: successfully created delete marker %s for %s/%s", versionId, bucket, object) - return versionId, nil -} - -// listObjectVersions lists all versions of an object -func (s3a *S3ApiServer) listObjectVersions(bucket, prefix, keyMarker, versionIdMarker, delimiter string, maxKeys int) (*S3ListObjectVersionsResult, error) { - var allVersions []interface{} // Can contain VersionEntry or DeleteMarkerEntry - - glog.V(1).Infof("listObjectVersions: listing versions for bucket %s, prefix '%s'", bucket, prefix) - - // Track objects that have been processed to avoid duplicates - processedObjects := make(map[string]bool) - - // Track version IDs globally to prevent duplicates throughout the listing - seenVersionIds := make(map[string]bool) - - // Recursively find all .versions directories in the bucket - bucketPath := path.Join(s3a.option.BucketsPath, bucket) - err := s3a.findVersionsRecursively(bucketPath, "", &allVersions, processedObjects, seenVersionIds, bucket, prefix) - if err != nil { - glog.Errorf("listObjectVersions: findVersionsRecursively failed: %v", err) - return nil, err - } - - glog.V(1).Infof("listObjectVersions: found %d total versions", len(allVersions)) - - // Sort by key, then by LastModified (newest first), then by VersionId for deterministic ordering - sort.Slice(allVersions, func(i, j int) bool { - var keyI, keyJ string - var lastModifiedI, lastModifiedJ time.Time - var versionIdI, versionIdJ string - - switch v := allVersions[i].(type) { - case *VersionEntry: - keyI = v.Key - lastModifiedI = v.LastModified - versionIdI = v.VersionId - case *DeleteMarkerEntry: - keyI = v.Key - lastModifiedI = v.LastModified - versionIdI = v.VersionId - } - - switch v := allVersions[j].(type) { - case *VersionEntry: - keyJ = v.Key - lastModifiedJ = v.LastModified - versionIdJ = v.VersionId - case *DeleteMarkerEntry: - keyJ = v.Key - lastModifiedJ = v.LastModified - versionIdJ = v.VersionId - } - - // First sort by object key - if keyI != keyJ { - return keyI < keyJ - } - - // Then by modification time (newest first) - but use nanosecond precision for ties - timeDiff := lastModifiedI.Sub(lastModifiedJ) - if timeDiff.Abs() > time.Millisecond { - return lastModifiedI.After(lastModifiedJ) - } - - // For very close timestamps (within 1ms), use version ID for deterministic ordering - // Sort version IDs in reverse lexicographic order to maintain newest-first semantics - return versionIdI > versionIdJ - }) - - // Build result using S3ListObjectVersionsResult to avoid conflicts with XSD structs - result := &S3ListObjectVersionsResult{ - Name: bucket, - Prefix: prefix, - KeyMarker: keyMarker, - MaxKeys: maxKeys, - Delimiter: delimiter, - IsTruncated: len(allVersions) > maxKeys, - } - - glog.V(1).Infof("listObjectVersions: building response with %d versions (truncated: %v)", len(allVersions), result.IsTruncated) - - // Limit results - if len(allVersions) > maxKeys { - allVersions = allVersions[:maxKeys] - result.IsTruncated = true - - // Set next markers - switch v := allVersions[len(allVersions)-1].(type) { - case *VersionEntry: - result.NextKeyMarker = v.Key - result.NextVersionIdMarker = v.VersionId - case *DeleteMarkerEntry: - result.NextKeyMarker = v.Key - result.NextVersionIdMarker = v.VersionId - } - } - - // Always initialize empty slices so boto3 gets the expected fields even when empty - result.Versions = make([]VersionEntry, 0) - result.DeleteMarkers = make([]DeleteMarkerEntry, 0) - - // Add versions to result - for i, version := range allVersions { - switch v := version.(type) { - case *VersionEntry: - glog.V(2).Infof("listObjectVersions: adding version %d: key=%s, versionId=%s", i, v.Key, v.VersionId) - result.Versions = append(result.Versions, *v) - case *DeleteMarkerEntry: - glog.V(2).Infof("listObjectVersions: adding delete marker %d: key=%s, versionId=%s", i, v.Key, v.VersionId) - result.DeleteMarkers = append(result.DeleteMarkers, *v) - } - } - - glog.V(1).Infof("listObjectVersions: final result - %d versions, %d delete markers", len(result.Versions), len(result.DeleteMarkers)) - - return result, nil -} - -// findVersionsRecursively searches for all .versions directories and regular files recursively -func (s3a *S3ApiServer) findVersionsRecursively(currentPath, relativePath string, allVersions *[]interface{}, processedObjects map[string]bool, seenVersionIds map[string]bool, bucket, prefix string) error { - // List entries in current directory - entries, _, err := s3a.list(currentPath, "", "", false, 1000) - if err != nil { - return err - } - - for _, entry := range entries { - entryPath := path.Join(relativePath, entry.Name) - - // Skip if this doesn't match the prefix filter - if normalizedPrefix := strings.TrimPrefix(prefix, "/"); normalizedPrefix != "" { - // An entry is a candidate if: - // 1. Its path is a match for the prefix. - // 2. It is a directory that is an ancestor of the prefix path, so we must descend into it. - - // Condition 1: The entry's path starts with the prefix. - isMatch := strings.HasPrefix(entryPath, normalizedPrefix) - if !isMatch && entry.IsDirectory { - // Also check if a directory entry matches a directory-style prefix (e.g., prefix "a/", entry "a"). - isMatch = strings.HasPrefix(entryPath+"/", normalizedPrefix) - } - - // Condition 2: The prefix path starts with the entry's path (and it's a directory). - canDescend := entry.IsDirectory && strings.HasPrefix(normalizedPrefix, entryPath) - - if !isMatch && !canDescend { - continue - } - } - - if entry.IsDirectory { - // Skip .uploads directory (multipart upload temporary files) - if strings.HasPrefix(entry.Name, ".uploads") { - continue - } - - // Check if this is a .versions directory - if strings.HasSuffix(entry.Name, ".versions") { - // Extract object name from .versions directory name - objectKey := strings.TrimSuffix(entryPath, ".versions") - normalizedObjectKey := removeDuplicateSlashes(objectKey) - // Mark both keys as processed for backward compatibility - processedObjects[objectKey] = true - processedObjects[normalizedObjectKey] = true - - glog.V(2).Infof("Found .versions directory for object %s (normalized: %s)", objectKey, normalizedObjectKey) - - versions, err := s3a.getObjectVersionList(bucket, normalizedObjectKey) - if err != nil { - glog.Warningf("Failed to get versions for object %s (normalized: %s): %v", objectKey, normalizedObjectKey, err) - continue - } - - for _, version := range versions { - // Check for duplicate version IDs and skip if already seen - // Use normalized key for deduplication - versionKey := normalizedObjectKey + ":" + version.VersionId - if seenVersionIds[versionKey] { - glog.Warningf("findVersionsRecursively: duplicate version %s for object %s detected, skipping", version.VersionId, normalizedObjectKey) - continue - } - seenVersionIds[versionKey] = true - - if version.IsDeleteMarker { - glog.V(0).Infof("Adding delete marker from .versions: objectKey=%s, versionId=%s, isLatest=%v, versionKey=%s", - normalizedObjectKey, version.VersionId, version.IsLatest, versionKey) - deleteMarker := &DeleteMarkerEntry{ - Key: normalizedObjectKey, // Use normalized key for consistency - VersionId: version.VersionId, - IsLatest: version.IsLatest, - LastModified: version.LastModified, - Owner: s3a.getObjectOwnerFromVersion(version, bucket, normalizedObjectKey), - } - *allVersions = append(*allVersions, deleteMarker) - } else { - glog.V(0).Infof("Adding version from .versions: objectKey=%s, versionId=%s, isLatest=%v, versionKey=%s", - normalizedObjectKey, version.VersionId, version.IsLatest, versionKey) - versionEntry := &VersionEntry{ - Key: normalizedObjectKey, // Use normalized key for consistency - VersionId: version.VersionId, - IsLatest: version.IsLatest, - LastModified: version.LastModified, - ETag: version.ETag, - Size: version.Size, - Owner: s3a.getObjectOwnerFromVersion(version, bucket, normalizedObjectKey), - StorageClass: "STANDARD", - } - *allVersions = append(*allVersions, versionEntry) - } - } - } else { - // This is a regular directory - check if it's an explicit S3 directory object - // Only include directories that were explicitly created via S3 API (have FolderMimeType) - // This excludes implicit directories created when uploading files like "test1/a" - if entry.Attributes.Mime == s3_constants.FolderMimeType { - directoryKey := entryPath - if !strings.HasSuffix(directoryKey, "/") { - directoryKey += "/" - } - - // Add directory as a version entry with VersionId "null" (following S3/Minio behavior) - glog.V(2).Infof("findVersionsRecursively: found explicit S3 directory %s", directoryKey) - - // Calculate ETag for empty directory - directoryETag := "\"d41d8cd98f00b204e9800998ecf8427e\"" - - versionEntry := &VersionEntry{ - Key: directoryKey, - VersionId: "null", - IsLatest: true, - LastModified: time.Unix(entry.Attributes.Mtime, 0), - ETag: directoryETag, - Size: 0, // Directories have size 0 - Owner: s3a.getObjectOwnerFromEntry(entry), - StorageClass: "STANDARD", - } - *allVersions = append(*allVersions, versionEntry) - } - - // Recursively search subdirectories (regardless of whether they're explicit or implicit) - fullPath := path.Join(currentPath, entry.Name) - err := s3a.findVersionsRecursively(fullPath, entryPath, allVersions, processedObjects, seenVersionIds, bucket, prefix) - if err != nil { - glog.Warningf("Error searching subdirectory %s: %v", entryPath, err) - continue - } - } - } else { - // This is a regular file - check if it's a pre-versioning object - objectKey := entryPath - - // Normalize object key to ensure consistency with other version operations - normalizedObjectKey := removeDuplicateSlashes(objectKey) - - // Skip if this object already has a .versions directory (already processed) - // Check both normalized and original keys for backward compatibility - if processedObjects[objectKey] || processedObjects[normalizedObjectKey] { - glog.V(0).Infof("Skipping already processed object: objectKey=%s, normalizedObjectKey=%s, processedObjects[objectKey]=%v, processedObjects[normalizedObjectKey]=%v", - objectKey, normalizedObjectKey, processedObjects[objectKey], processedObjects[normalizedObjectKey]) - continue - } - - glog.V(0).Infof("Processing regular file: objectKey=%s, normalizedObjectKey=%s, NOT in processedObjects", objectKey, normalizedObjectKey) - - // This is a pre-versioning or suspended-versioning object - // Check if this file has version metadata (ExtVersionIdKey) - hasVersionMeta := false - if entry.Extended != nil { - if versionIdBytes, ok := entry.Extended[s3_constants.ExtVersionIdKey]; ok { - hasVersionMeta = true - glog.V(0).Infof("Regular file %s has version metadata: %s", normalizedObjectKey, string(versionIdBytes)) - } - } - - // Check if a .versions directory exists for this object - versionsObjectPath := normalizedObjectKey + ".versions" - _, versionsErr := s3a.getEntry(currentPath, versionsObjectPath) - if versionsErr == nil { - // .versions directory exists - glog.V(0).Infof("Found .versions directory for regular file %s, hasVersionMeta=%v", normalizedObjectKey, hasVersionMeta) - - // If this file has version metadata, it's a suspended versioning null version - // Include it and it will be the latest - if hasVersionMeta { - glog.V(0).Infof("Including suspended versioning file %s (has version metadata)", normalizedObjectKey) - // Continue to add it below - } else { - // No version metadata - this is a pre-versioning file - // Skip it if there's already a null version in .versions - versions, err := s3a.getObjectVersionList(bucket, normalizedObjectKey) - if err == nil { - hasNullVersion := false - for _, v := range versions { - if v.VersionId == "null" { - hasNullVersion = true - break - } - } - if hasNullVersion { - glog.V(0).Infof("Skipping pre-versioning file %s, null version exists in .versions", normalizedObjectKey) - processedObjects[objectKey] = true - processedObjects[normalizedObjectKey] = true - continue - } - } - glog.V(0).Infof("Including pre-versioning file %s (no null version in .versions)", normalizedObjectKey) - } - } else { - glog.V(0).Infof("No .versions directory for regular file %s, hasVersionMeta=%v", normalizedObjectKey, hasVersionMeta) - } - - // Add this file as a null version with IsLatest=true - isLatest := true - - // Check for duplicate version IDs and skip if already seen - // Use normalized key for deduplication to match how other version operations work - versionKey := normalizedObjectKey + ":null" - if seenVersionIds[versionKey] { - glog.Warningf("findVersionsRecursively: duplicate null version for object %s detected (versionKey=%s), skipping", normalizedObjectKey, versionKey) - continue - } - seenVersionIds[versionKey] = true - - etag := s3a.calculateETagFromChunks(entry.Chunks) - - glog.V(0).Infof("Adding null version from regular file: objectKey=%s, normalizedObjectKey=%s, versionKey=%s, isLatest=%v, hasVersionMeta=%v", - objectKey, normalizedObjectKey, versionKey, isLatest, hasVersionMeta) - - versionEntry := &VersionEntry{ - Key: normalizedObjectKey, // Use normalized key for consistency - VersionId: "null", - IsLatest: isLatest, - LastModified: time.Unix(entry.Attributes.Mtime, 0), - ETag: etag, - Size: int64(entry.Attributes.FileSize), - Owner: s3a.getObjectOwnerFromEntry(entry), - StorageClass: "STANDARD", - } - *allVersions = append(*allVersions, versionEntry) - } - } - - return nil -} - -// getObjectVersionList returns all versions of a specific object -func (s3a *S3ApiServer) getObjectVersionList(bucket, object string) ([]*ObjectVersion, error) { - var versions []*ObjectVersion - - glog.V(2).Infof("getObjectVersionList: looking for versions of %s/%s in .versions directory", bucket, object) - - // All versions are now stored in the .versions directory only - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsObjectPath := object + ".versions" - glog.V(2).Infof("getObjectVersionList: checking versions directory %s", versionsObjectPath) - - // Get the .versions directory entry to read latest version metadata - versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath) - if err != nil { - // No versions directory exists, return empty list - glog.V(2).Infof("getObjectVersionList: no versions directory found: %v", err) - return versions, nil - } - - // Get the latest version info from directory metadata - var latestVersionId string - if versionsEntry.Extended != nil { - if latestVersionIdBytes, hasLatestVersionId := versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey]; hasLatestVersionId { - latestVersionId = string(latestVersionIdBytes) - glog.V(2).Infof("getObjectVersionList: latest version ID from directory metadata: %s", latestVersionId) - } - } - - // List all version files in the .versions directory - entries, _, err := s3a.list(bucketDir+"/"+versionsObjectPath, "", "", false, 1000) - if err != nil { - glog.V(2).Infof("getObjectVersionList: failed to list version files: %v", err) - return versions, nil - } - - glog.V(2).Infof("getObjectVersionList: found %d entries in versions directory", len(entries)) - - // Use a map to detect and prevent duplicate version IDs - seenVersionIds := make(map[string]bool) - - for i, entry := range entries { - if entry.Extended == nil { - glog.V(2).Infof("getObjectVersionList: entry %d has no Extended metadata, skipping", i) - continue - } - - versionIdBytes, hasVersionId := entry.Extended[s3_constants.ExtVersionIdKey] - if !hasVersionId { - glog.V(2).Infof("getObjectVersionList: entry %d has no version ID, skipping", i) - continue - } - - versionId := string(versionIdBytes) - - // Check for duplicate version IDs and skip if already seen - if seenVersionIds[versionId] { - glog.Warningf("getObjectVersionList: duplicate version ID %s detected for object %s/%s, skipping", versionId, bucket, object) - continue - } - seenVersionIds[versionId] = true - - // Check if this version is the latest by comparing with directory metadata - isLatest := (versionId == latestVersionId) - - isDeleteMarkerBytes, _ := entry.Extended[s3_constants.ExtDeleteMarkerKey] - isDeleteMarker := string(isDeleteMarkerBytes) == "true" - - glog.V(2).Infof("getObjectVersionList: found version %s, isLatest=%v, isDeleteMarker=%v", versionId, isLatest, isDeleteMarker) - - version := &ObjectVersion{ - VersionId: versionId, - IsLatest: isLatest, - IsDeleteMarker: isDeleteMarker, - LastModified: time.Unix(entry.Attributes.Mtime, 0), - Entry: entry, - } - - if !isDeleteMarker { - // Try to get ETag from Extended attributes first - if etagBytes, hasETag := entry.Extended[s3_constants.ExtETagKey]; hasETag { - version.ETag = string(etagBytes) - } else { - // Fallback: calculate ETag from chunks - version.ETag = s3a.calculateETagFromChunks(entry.Chunks) - } - version.Size = int64(entry.Attributes.FileSize) - } - - versions = append(versions, version) - } - - // Don't sort here - let the main listObjectVersions function handle sorting consistently - - glog.V(2).Infof("getObjectVersionList: returning %d total versions for %s/%s (after deduplication from %d entries)", len(versions), bucket, object, len(entries)) - for i, version := range versions { - glog.V(2).Infof("getObjectVersionList: version %d: %s (isLatest=%v, isDeleteMarker=%v)", i, version.VersionId, version.IsLatest, version.IsDeleteMarker) - } - - return versions, nil -} - -// calculateETagFromChunks calculates ETag from file chunks following S3 multipart rules -// This is a wrapper around filer.ETagChunks that adds quotes for S3 compatibility -func (s3a *S3ApiServer) calculateETagFromChunks(chunks []*filer_pb.FileChunk) string { - if len(chunks) == 0 { - return "\"\"" - } - - // Use the existing filer ETag calculation and add quotes for S3 compatibility - etag := filer.ETagChunks(chunks) - if etag == "" { - return "\"\"" - } - return fmt.Sprintf("\"%s\"", etag) -} - -// getSpecificObjectVersion retrieves a specific version of an object -func (s3a *S3ApiServer) getSpecificObjectVersion(bucket, object, versionId string) (*filer_pb.Entry, error) { - // Normalize object path to ensure consistency with toFilerUrl behavior - normalizedObject := removeDuplicateSlashes(object) - - if versionId == "" { - // Get current version - return s3a.getEntry(path.Join(s3a.option.BucketsPath, bucket), strings.TrimPrefix(normalizedObject, "/")) - } - - if versionId == "null" { - // "null" version ID refers to pre-versioning objects stored as regular files - bucketDir := s3a.option.BucketsPath + "/" + bucket - entry, err := s3a.getEntry(bucketDir, normalizedObject) - if err != nil { - return nil, fmt.Errorf("null version object %s not found: %v", normalizedObject, err) - } - return entry, nil - } - - // Get specific version from .versions directory - versionsDir := s3a.getVersionedObjectDir(bucket, normalizedObject) - versionFile := s3a.getVersionFileName(versionId) - - entry, err := s3a.getEntry(versionsDir, versionFile) - if err != nil { - return nil, fmt.Errorf("version %s not found: %v", versionId, err) - } - - return entry, nil -} - -// deleteSpecificObjectVersion deletes a specific version of an object -func (s3a *S3ApiServer) deleteSpecificObjectVersion(bucket, object, versionId string) error { - // Normalize object path to ensure consistency with toFilerUrl behavior - normalizedObject := removeDuplicateSlashes(object) - - if versionId == "" { - return fmt.Errorf("version ID is required for version-specific deletion") - } - - if versionId == "null" { - // Delete "null" version (pre-versioning object stored as regular file) - bucketDir := s3a.option.BucketsPath + "/" + bucket - cleanObject := strings.TrimPrefix(normalizedObject, "/") - - // Check if the object exists - _, err := s3a.getEntry(bucketDir, cleanObject) - if err != nil { - // Object doesn't exist - this is OK for delete operations (idempotent) - glog.V(2).Infof("deleteSpecificObjectVersion: null version object %s already deleted or doesn't exist", cleanObject) - return nil - } - - // Delete the regular file - deleteErr := s3a.rm(bucketDir, cleanObject, true, false) - if deleteErr != nil { - // Check if file was already deleted by another process - if _, checkErr := s3a.getEntry(bucketDir, cleanObject); checkErr != nil { - // File doesn't exist anymore, deletion was successful - return nil - } - return fmt.Errorf("failed to delete null version %s: %v", cleanObject, deleteErr) - } - return nil - } - - versionsDir := s3a.getVersionedObjectDir(bucket, normalizedObject) - versionFile := s3a.getVersionFileName(versionId) - - // Check if this is the latest version before attempting deletion (for potential metadata update) - versionsEntry, dirErr := s3a.getEntry(path.Join(s3a.option.BucketsPath, bucket), normalizedObject+".versions") - isLatestVersion := false - if dirErr == nil && versionsEntry.Extended != nil { - if latestVersionIdBytes, hasLatest := versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey]; hasLatest { - isLatestVersion = (string(latestVersionIdBytes) == versionId) - } - } - - // Attempt to delete the version file - // Note: We don't check if the file exists first to avoid race conditions - // The deletion operation should be idempotent - deleteErr := s3a.rm(versionsDir, versionFile, true, false) - if deleteErr != nil { - // Check if file was already deleted by another process (race condition handling) - if _, checkErr := s3a.getEntry(versionsDir, versionFile); checkErr != nil { - // File doesn't exist anymore, deletion was successful (another thread deleted it) - glog.V(2).Infof("deleteSpecificObjectVersion: version %s for %s%s already deleted by another process", versionId, bucket, object) - return nil - } - // File still exists but deletion failed for another reason - return fmt.Errorf("failed to delete version %s: %v", versionId, deleteErr) - } - - // If we deleted the latest version, update the .versions directory metadata to point to the new latest - if isLatestVersion { - err := s3a.updateLatestVersionAfterDeletion(bucket, object) - if err != nil { - glog.Warningf("deleteSpecificObjectVersion: failed to update latest version after deletion: %v", err) - // Don't return error since the deletion was successful - } - } - - return nil -} - -// updateLatestVersionAfterDeletion finds the new latest version after deleting the current latest -func (s3a *S3ApiServer) updateLatestVersionAfterDeletion(bucket, object string) error { - bucketDir := s3a.option.BucketsPath + "/" + bucket - cleanObject := strings.TrimPrefix(object, "/") - versionsObjectPath := cleanObject + ".versions" - versionsDir := bucketDir + "/" + versionsObjectPath - - glog.V(1).Infof("updateLatestVersionAfterDeletion: updating latest version for %s/%s, listing %s", bucket, object, versionsDir) - - // List all remaining version files in the .versions directory - entries, _, err := s3a.list(versionsDir, "", "", false, 1000) - if err != nil { - glog.Errorf("updateLatestVersionAfterDeletion: failed to list versions in %s: %v", versionsDir, err) - return fmt.Errorf("failed to list versions: %v", err) - } - - glog.V(1).Infof("updateLatestVersionAfterDeletion: found %d entries in %s", len(entries), versionsDir) - - // Find the most recent remaining version (latest timestamp in version ID) - var latestVersionId string - var latestVersionFileName string - - for _, entry := range entries { - if entry.Extended == nil { - continue - } - - versionIdBytes, hasVersionId := entry.Extended[s3_constants.ExtVersionIdKey] - if !hasVersionId { - continue - } - - versionId := string(versionIdBytes) - - // Skip delete markers when finding latest content version - isDeleteMarkerBytes, _ := entry.Extended[s3_constants.ExtDeleteMarkerKey] - if string(isDeleteMarkerBytes) == "true" { - continue - } - - // Compare version IDs chronologically (our version IDs start with timestamp) - if latestVersionId == "" || versionId > latestVersionId { - glog.V(1).Infof("updateLatestVersionAfterDeletion: found newer version %s (file: %s)", versionId, entry.Name) - latestVersionId = versionId - latestVersionFileName = entry.Name - } else { - glog.V(1).Infof("updateLatestVersionAfterDeletion: skipping older version %s", versionId) - } - } - - // Update the .versions directory metadata - versionsEntry, err := s3a.getEntry(bucketDir, versionsObjectPath) - if err != nil { - return fmt.Errorf("failed to get .versions directory: %v", err) - } - - if versionsEntry.Extended == nil { - versionsEntry.Extended = make(map[string][]byte) - } - - if latestVersionId != "" { - // Update metadata to point to new latest version - versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey] = []byte(latestVersionId) - versionsEntry.Extended[s3_constants.ExtLatestVersionFileNameKey] = []byte(latestVersionFileName) - glog.V(2).Infof("updateLatestVersionAfterDeletion: new latest version for %s/%s is %s", bucket, object, latestVersionId) - } else { - // No versions left, remove latest version metadata - delete(versionsEntry.Extended, s3_constants.ExtLatestVersionIdKey) - delete(versionsEntry.Extended, s3_constants.ExtLatestVersionFileNameKey) - glog.V(2).Infof("updateLatestVersionAfterDeletion: no versions left for %s/%s", bucket, object) - } - - // Update the .versions directory entry - err = s3a.mkFile(bucketDir, versionsObjectPath, versionsEntry.Chunks, func(updatedEntry *filer_pb.Entry) { - updatedEntry.Extended = versionsEntry.Extended - updatedEntry.Attributes = versionsEntry.Attributes - updatedEntry.Chunks = versionsEntry.Chunks - }) - if err != nil { - return fmt.Errorf("failed to update .versions directory metadata: %v", err) - } - - return nil -} - -// ListObjectVersionsHandler handles the list object versions request -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectVersions.html -func (s3a *S3ApiServer) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectVersionsHandler %s", bucket) - - if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { - s3err.WriteErrorResponse(w, r, err) - return - } - - // Parse query parameters - query := r.URL.Query() - originalPrefix := query.Get("prefix") // Keep original prefix for response - prefix := originalPrefix // Use for internal processing - if prefix != "" && !strings.HasPrefix(prefix, "/") { - prefix = "/" + prefix - } - - keyMarker := query.Get("key-marker") - versionIdMarker := query.Get("version-id-marker") - delimiter := query.Get("delimiter") - - maxKeysStr := query.Get("max-keys") - maxKeys := 1000 - if maxKeysStr != "" { - if mk, err := strconv.Atoi(maxKeysStr); err == nil && mk > 0 { - maxKeys = mk - } - } - - // List versions - result, err := s3a.listObjectVersions(bucket, prefix, keyMarker, versionIdMarker, delimiter, maxKeys) - if err != nil { - glog.Errorf("ListObjectVersionsHandler: %v", err) - s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) - return - } - - // Set the original prefix in the response (not the normalized internal prefix) - result.Prefix = originalPrefix - - writeSuccessResponseXML(w, r, result) -} - -// getLatestObjectVersion finds the latest version of an object by reading .versions directory metadata -func (s3a *S3ApiServer) getLatestObjectVersion(bucket, object string) (*filer_pb.Entry, error) { - // Normalize object path to ensure consistency with toFilerUrl behavior - normalizedObject := removeDuplicateSlashes(object) - - bucketDir := s3a.option.BucketsPath + "/" + bucket - versionsObjectPath := normalizedObject + ".versions" - - glog.V(1).Infof("getLatestObjectVersion: looking for latest version of %s/%s (normalized: %s)", bucket, object, normalizedObject) - - // Get the .versions directory entry to read latest version metadata with retry logic for filer consistency - var versionsEntry *filer_pb.Entry - var err error - maxRetries := 8 - for attempt := 1; attempt <= maxRetries; attempt++ { - versionsEntry, err = s3a.getEntry(bucketDir, versionsObjectPath) - if err == nil { - break - } - - if attempt < maxRetries { - // Exponential backoff with higher base: 100ms, 200ms, 400ms, 800ms, 1600ms, 3200ms, 6400ms - delay := time.Millisecond * time.Duration(100*(1<<(attempt-1))) - time.Sleep(delay) - } - } - - if err != nil { - // .versions directory doesn't exist - this can happen for objects that existed - // before versioning was enabled on the bucket. Fall back to checking for a - // regular (non-versioned) object file. - glog.V(1).Infof("getLatestObjectVersion: no .versions directory for %s%s after %d attempts (error: %v), checking for pre-versioning object", bucket, normalizedObject, maxRetries, err) - - regularEntry, regularErr := s3a.getEntry(bucketDir, normalizedObject) - if regularErr != nil { - glog.V(1).Infof("getLatestObjectVersion: no pre-versioning object found for %s%s (error: %v)", bucket, normalizedObject, regularErr) - return nil, fmt.Errorf("failed to get %s%s .versions directory and no regular object found: %w", bucket, normalizedObject, err) - } - - glog.V(1).Infof("getLatestObjectVersion: found pre-versioning object for %s/%s", bucket, normalizedObject) - return regularEntry, nil - } - - // Check if directory has latest version metadata - retry if missing due to race condition - if versionsEntry.Extended == nil { - // Retry a few times to handle the race condition where directory exists but metadata is not yet written - metadataRetries := 3 - for metaAttempt := 1; metaAttempt <= metadataRetries; metaAttempt++ { - // Small delay and re-read the directory - time.Sleep(time.Millisecond * 100) - versionsEntry, err = s3a.getEntry(bucketDir, versionsObjectPath) - if err != nil { - break - } - - if versionsEntry.Extended != nil { - break - } - } - - // If still no metadata after retries, fall back to pre-versioning object - if versionsEntry.Extended == nil { - glog.V(2).Infof("getLatestObjectVersion: no Extended metadata in .versions directory for %s%s after retries, checking for pre-versioning object", bucket, object) - - regularEntry, regularErr := s3a.getEntry(bucketDir, normalizedObject) - if regularErr != nil { - return nil, fmt.Errorf("no version metadata in .versions directory and no regular object found for %s%s", bucket, normalizedObject) - } - - glog.V(2).Infof("getLatestObjectVersion: found pre-versioning object for %s%s (no Extended metadata case)", bucket, object) - return regularEntry, nil - } - } - - latestVersionIdBytes, hasLatestVersionId := versionsEntry.Extended[s3_constants.ExtLatestVersionIdKey] - latestVersionFileBytes, hasLatestVersionFile := versionsEntry.Extended[s3_constants.ExtLatestVersionFileNameKey] - - if !hasLatestVersionId || !hasLatestVersionFile { - // No version metadata means all versioned objects have been deleted. - // Fall back to checking for a pre-versioning object. - glog.V(2).Infof("getLatestObjectVersion: no version metadata in .versions directory for %s/%s, checking for pre-versioning object", bucket, object) - - regularEntry, regularErr := s3a.getEntry(bucketDir, normalizedObject) - if regularErr != nil { - return nil, fmt.Errorf("no version metadata in .versions directory and no regular object found for %s%s", bucket, normalizedObject) - } - - glog.V(2).Infof("getLatestObjectVersion: found pre-versioning object for %s%s after version deletion", bucket, object) - return regularEntry, nil - } - - latestVersionId := string(latestVersionIdBytes) - latestVersionFile := string(latestVersionFileBytes) - - glog.V(2).Infof("getLatestObjectVersion: found latest version %s (file: %s) for %s/%s", latestVersionId, latestVersionFile, bucket, object) - - // Get the actual latest version file entry - latestVersionPath := versionsObjectPath + "/" + latestVersionFile - latestVersionEntry, err := s3a.getEntry(bucketDir, latestVersionPath) - if err != nil { - return nil, fmt.Errorf("failed to get latest version file %s: %v", latestVersionPath, err) - } - - return latestVersionEntry, nil -} - -// getObjectOwnerFromVersion extracts object owner information from version entry metadata -func (s3a *S3ApiServer) getObjectOwnerFromVersion(version *ObjectVersion, bucket, objectKey string) CanonicalUser { - // First try to get owner from the version entry itself - if version.Entry != nil && version.Entry.Extended != nil { - if ownerBytes, exists := version.Entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - ownerId := string(ownerBytes) - ownerDisplayName := s3a.iam.GetAccountNameById(ownerId) - return CanonicalUser{ID: ownerId, DisplayName: ownerDisplayName} - } - } - - // Fallback: try to get owner from the current version of the object - // This handles cases where older versions might not have owner metadata - if version.VersionId == "null" { - // For null version, check the regular object file - bucketDir := s3a.option.BucketsPath + "/" + bucket - if entry, err := s3a.getEntry(bucketDir, objectKey); err == nil && entry.Extended != nil { - if ownerBytes, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - ownerId := string(ownerBytes) - ownerDisplayName := s3a.iam.GetAccountNameById(ownerId) - return CanonicalUser{ID: ownerId, DisplayName: ownerDisplayName} - } - } - } else { - // For versioned objects, try to get from latest version metadata - if latestVersion, err := s3a.getLatestObjectVersion(bucket, objectKey); err == nil && latestVersion.Extended != nil { - if ownerBytes, exists := latestVersion.Extended[s3_constants.ExtAmzOwnerKey]; exists { - ownerId := string(ownerBytes) - ownerDisplayName := s3a.iam.GetAccountNameById(ownerId) - return CanonicalUser{ID: ownerId, DisplayName: ownerDisplayName} - } - } - } - - // Ultimate fallback: return anonymous if no owner found - return CanonicalUser{ID: s3_constants.AccountAnonymousId, DisplayName: "anonymous"} -} - -// getObjectOwnerFromEntry extracts object owner information from a file entry -func (s3a *S3ApiServer) getObjectOwnerFromEntry(entry *filer_pb.Entry) CanonicalUser { - if entry != nil && entry.Extended != nil { - if ownerBytes, exists := entry.Extended[s3_constants.ExtAmzOwnerKey]; exists { - ownerId := string(ownerBytes) - ownerDisplayName := s3a.iam.GetAccountNameById(ownerId) - return CanonicalUser{ID: ownerId, DisplayName: ownerDisplayName} - } - } - - // Fallback: return anonymous if no owner found - return CanonicalUser{ID: s3_constants.AccountAnonymousId, DisplayName: "anonymous"} -} diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go new file mode 100644 index 000000000..e55802937 --- /dev/null +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -0,0 +1,438 @@ +package s3api + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "io" + "net/http" + "net/url" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" +) + +type ListBucketResultV2 struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` + KeyCount int `xml:"KeyCount"` + StartAfter string `xml:"StartAfter,omitempty"` +} + +func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html + + // collect parameters + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("ListObjectsV2Handler %s", bucket) + + originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) + + if maxKeys < 0 { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys) + return + } + if delimiter != "" && delimiter != "/" { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) + return + } + + marker := continuationToken + if continuationToken == "" { + marker = startAfter + } + + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) + + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + + if len(response.Contents) == 0 { + if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) + return + } + } + + responseV2 := &ListBucketResultV2{ + XMLName: response.XMLName, + Name: response.Name, + CommonPrefixes: response.CommonPrefixes, + Contents: response.Contents, + ContinuationToken: continuationToken, + Delimiter: response.Delimiter, + IsTruncated: response.IsTruncated, + KeyCount: len(response.Contents) + len(response.CommonPrefixes), + MaxKeys: response.MaxKeys, + NextContinuationToken: response.NextMarker, + Prefix: response.Prefix, + StartAfter: startAfter, + } + + writeSuccessResponseXML(w, r, responseV2) +} + +func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html + + // collect parameters + bucket, _ := s3_constants.GetBucketAndObject(r) + glog.V(3).Infof("ListObjectsV1Handler %s", bucket) + + originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) + + if maxKeys < 0 { + s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxKeys) + return + } + if delimiter != "" && delimiter != "/" { + s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) + return + } + + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) + + if err != nil { + s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) + return + } + + if len(response.Contents) == 0 { + if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists { + s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) + return + } + } + + writeSuccessResponseXML(w, r, response) +} + +func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) { + // convert full path prefix into directory name and prefix for entry name + reqDir, prefix := filepath.Split(originalPrefix) + if strings.HasPrefix(reqDir, "/") { + reqDir = reqDir[1:] + } + bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) + bucketPrefixLen := len(bucketPrefix) + reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir) + if strings.HasSuffix(reqDir, "/") { + reqDir = strings.TrimSuffix(reqDir, "/") + } + + var contents []ListEntry + var commonPrefixes []PrefixEntry + var isTruncated bool + var doErr error + var nextMarker string + + // check filer + err = s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + + _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, false, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) { + if entry.IsDirectory { + if delimiter == "/" { + commonPrefixes = append(commonPrefixes, PrefixEntry{ + Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:], + }) + } + if !(entry.IsDirectoryKeyObject() && strings.HasSuffix(entry.Name, "/")) { + return + } + } + storageClass := "STANDARD" + if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s", dir, entry.Name)[bucketPrefixLen:], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) + }) + glog.V(4).Infof("end doListFilerEntries isTruncated:%v nextMarker:%v reqDir: %v prefix: %v", isTruncated, nextMarker, reqDir, prefix) + if doErr != nil { + return doErr + } + + if !isTruncated { + nextMarker = "" + } + + if len(contents) == 0 && len(commonPrefixes) == 0 && maxKeys > 0 { + if strings.HasSuffix(originalPrefix, "/") && prefix == "" { + reqDir, prefix = filepath.Split(strings.TrimSuffix(reqDir, "/")) + reqDir = strings.TrimSuffix(reqDir, "/") + } + _, _, _, doErr = s3a.doListFilerEntries(client, reqDir, prefix, 1, prefix, delimiter, true, false, bucketPrefixLen, func(dir string, entry *filer_pb.Entry) { + if entry.IsDirectoryKeyObject() && entry.Name == prefix { + storageClass := "STANDARD" + if v, ok := entry.Extended[s3_constants.AmzStorageClass]; ok { + storageClass = string(v) + } + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s/%s/", dir, entry.Name)[bucketPrefixLen:], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + fmt.Sprintf("%x", entry.Attributes.Md5) + "\"", + Size: int64(filer.FileSize(entry)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, + }, + StorageClass: StorageClass(storageClass), + }) + } + }) + if doErr != nil { + return doErr + } + } + + if len(nextMarker) > 0 { + nextMarker = nextMarker[bucketPrefixLen:] + } + + response = ListBucketResult{ + Name: bucket, + Prefix: originalPrefix, + Marker: marker, + NextMarker: nextMarker, + MaxKeys: maxKeys, + Delimiter: delimiter, + IsTruncated: isTruncated, + Contents: contents, + CommonPrefixes: commonPrefixes, + } + + return nil + }) + + return +} + +func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, inclusiveStartFrom bool, subEntries bool, bucketPrefixLen int, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { + // invariants + // prefix and marker should be under dir, marker may contain "/" + // maxKeys should be updated for each recursion + + if prefix == "/" && delimiter == "/" { + return + } + if maxKeys <= 0 { + return + } + + if strings.Contains(marker, "/") { + if strings.HasSuffix(marker, "/") { + marker = strings.TrimSuffix(marker, "/") + } + sepIndex := strings.Index(marker, "/") + if sepIndex != -1 { + subPrefix, subMarker := marker[0:sepIndex], marker[sepIndex+1:] + var subDir string + if len(dir) > bucketPrefixLen && dir[bucketPrefixLen:] == subPrefix { + subDir = dir + } else { + subDir = fmt.Sprintf("%s/%s", dir, subPrefix) + } + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, subDir, "", maxKeys, subMarker, delimiter, false, false, bucketPrefixLen, eachEntryFn) + if subErr != nil { + err = subErr + return + } + counter += subCounter + isTruncated = isTruncated || subIsTruncated + maxKeys -= subCounter + nextMarker = subNextMarker + // finished processing this sub directory + marker = subPrefix + } + } + if maxKeys <= 0 { + return + } + + // now marker is also a direct child of dir + request := &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: prefix, + Limit: uint32(maxKeys + 2), // bucket root directory needs to skip additional s3_constants.MultipartUploadsFolder folder + StartFromFileName: marker, + InclusiveStartFrom: inclusiveStartFrom, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, listErr := client.ListEntries(ctx, request) + if listErr != nil { + err = fmt.Errorf("list entires %+v: %v", request, listErr) + return + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + err = fmt.Errorf("iterating entires %+v: %v", request, recvErr) + return + } + } + if counter >= maxKeys { + isTruncated = true + return + } + entry := resp.Entry + nextMarker = dir + "/" + entry.Name + if entry.IsDirectory { + // println("ListEntries", dir, "dir:", entry.Name) + if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys + continue + } + if delimiter == "" { + eachEntryFn(dir, entry) + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, false, true, bucketPrefixLen, eachEntryFn) + if subErr != nil { + err = fmt.Errorf("doListFilerEntries2: %v", subErr) + return + } + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) + if subCounter == 0 && entry.IsDirectoryKeyObject() { + entry.Name += "/" + eachEntryFn(dir, entry) + counter++ + } + counter += subCounter + nextMarker = subNextMarker + if subIsTruncated { + isTruncated = true + return + } + } else if delimiter == "/" { + var isEmpty bool + if !s3a.option.AllowEmptyFolder && !entry.IsDirectoryKeyObject() { + if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { + glog.Errorf("check empty folder %s: %v", dir, err) + } + } + if !isEmpty { + nextMarker += "/" + eachEntryFn(dir, entry) + counter++ + } + } + } else if !(delimiter == "/" && subEntries) { + // println("ListEntries", dir, "file:", entry.Name) + eachEntryFn(dir, entry) + counter++ + } + } + return +} + +func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) { + prefix = values.Get("prefix") + token = values.Get("continuation-token") + startAfter = values.Get("start-after") + delimiter = values.Get("delimiter") + if values.Get("max-keys") != "" { + maxkeys, _ = strconv.Atoi(values.Get("max-keys")) + } else { + maxkeys = maxObjectListSizeLimit + } + fetchOwner = values.Get("fetch-owner") == "true" + return +} + +func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int) { + prefix = values.Get("prefix") + marker = values.Get("marker") + delimiter = values.Get("delimiter") + if values.Get("max-keys") != "" { + maxkeys, _ = strconv.Atoi(values.Get("max-keys")) + } else { + maxkeys = maxObjectListSizeLimit + } + return +} + +func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) { + // println("+ isDirectoryAllEmpty", dir, name) + glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name) + defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) + var fileCounter int + var subDirs []string + currentDir := parentDir + "/" + name + var startFrom string + var isExhausted bool + var foundEntry bool + for fileCounter == 0 && !isExhausted && err == nil { + err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error { + foundEntry = true + if entry.IsDirectory { + subDirs = append(subDirs, entry.Name) + } else { + fileCounter++ + } + startFrom = entry.Name + isExhausted = isExhausted || isLast + glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) + return nil + }, startFrom, false, 8) + if !foundEntry { + break + } + } + + if err != nil { + return false, err + } + + if fileCounter > 0 { + return false, nil + } + + for _, subDir := range subDirs { + isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir) + if subErr != nil { + return false, subErr + } + if !isSubEmpty { + return false, nil + } + } + + glog.V(1).Infof("deleting empty folder %s", currentDir) + if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil { + return + } + + return true, nil +} diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go new file mode 100644 index 000000000..641f995b7 --- /dev/null +++ b/weed/s3api/s3api_objects_list_handlers_test.go @@ -0,0 +1,39 @@ +package s3api + +import ( + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "testing" + "time" +) + +func TestListObjectsHandler(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html + + expected := ` +test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49Z` + + response := ListBucketResult{ + Name: "test_container", + Prefix: "", + Marker: "", + NextMarker: "", + MaxKeys: 1000, + IsTruncated: false, + Contents: []ListEntry{{ + Key: "1.zip", + LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC), + ETag: "\"4397da7a7649e8085de9916c240e8166\"", + Size: 1234567, + Owner: CanonicalUser{ + ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932", + }, + StorageClass: "STANDARD", + }}, + } + + encoded := string(s3err.EncodeXMLResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_policy.go b/weed/s3api/s3api_policy.go index dab2e3f02..6e2c8cfa2 100644 --- a/weed/s3api/s3api_policy.go +++ b/weed/s3api/s3api_policy.go @@ -47,14 +47,8 @@ type Filter struct { // Prefix holds the prefix xml tag in and type Prefix struct { - XMLName xml.Name `xml:"Prefix"` - set bool - - val string -} - -func (p Prefix) String() string { - return p.val + string + set bool } // MarshalXML encodes Prefix field into an XML form. @@ -62,21 +56,11 @@ func (p Prefix) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error if !p.set { return nil } - return e.EncodeElement(p.val, startElement) -} - -func (p *Prefix) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - prefix := "" - _ = d.DecodeElement(&prefix, &startElement) - *p = Prefix{set: true, val: prefix} - return nil + return e.EncodeElement(p.string, startElement) } // MarshalXML encodes Filter field into an XML form. func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if !f.set { - return nil - } if err := e.EncodeToken(start); err != nil { return err } diff --git a/weed/s3api/s3api_put_handlers.go b/weed/s3api/s3api_put_handlers.go deleted file mode 100644 index fafd2f329..000000000 --- a/weed/s3api/s3api_put_handlers.go +++ /dev/null @@ -1,270 +0,0 @@ -package s3api - -import ( - "encoding/base64" - "io" - "net/http" - "strings" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// PutToFilerEncryptionResult holds the result of encryption processing -type PutToFilerEncryptionResult struct { - DataReader io.Reader - SSEType string - CustomerKey *SSECustomerKey - SSEIV []byte - SSEKMSKey *SSEKMSKey - SSES3Key *SSES3Key - SSEKMSMetadata []byte - SSES3Metadata []byte -} - -// calculatePartOffset calculates unique offset for each part to prevent IV reuse in multipart uploads -// AWS S3 part numbers must start from 1, never 0 or negative -func calculatePartOffset(partNumber int) int64 { - // AWS S3 part numbers must start from 1, never 0 or negative - if partNumber < 1 { - glog.Errorf("Invalid partNumber: %d. Must be >= 1.", partNumber) - return 0 - } - // Using a large multiplier to ensure block offsets for different parts do not overlap. - // S3 part size limit is 5GB, so this provides a large safety margin. - partOffset := int64(partNumber-1) * s3_constants.PartOffsetMultiplier - return partOffset -} - -// handleSSECEncryption processes SSE-C encryption for the data reader -func (s3a *S3ApiServer) handleSSECEncryption(r *http.Request, dataReader io.Reader) (io.Reader, *SSECustomerKey, []byte, s3err.ErrorCode) { - // Handle SSE-C encryption if requested - customerKey, err := ParseSSECHeaders(r) - if err != nil { - glog.Errorf("SSE-C header validation failed: %v", err) - // Use shared error mapping helper - errCode := MapSSECErrorToS3Error(err) - return nil, nil, nil, errCode - } - - // Apply SSE-C encryption if customer key is provided - var sseIV []byte - if customerKey != nil { - encryptedReader, iv, encErr := CreateSSECEncryptedReader(dataReader, customerKey) - if encErr != nil { - return nil, nil, nil, s3err.ErrInternalError - } - dataReader = encryptedReader - sseIV = iv - } - - return dataReader, customerKey, sseIV, s3err.ErrNone -} - -// handleSSEKMSEncryption processes SSE-KMS encryption for the data reader -func (s3a *S3ApiServer) handleSSEKMSEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSEKMSKey, []byte, s3err.ErrorCode) { - // Handle SSE-KMS encryption if requested - if !IsSSEKMSRequest(r) { - return dataReader, nil, nil, s3err.ErrNone - } - - glog.V(3).Infof("handleSSEKMSEncryption: SSE-KMS request detected, processing encryption") - - // Parse SSE-KMS headers - keyID := r.Header.Get(s3_constants.AmzServerSideEncryptionAwsKmsKeyId) - bucketKeyEnabled := strings.ToLower(r.Header.Get(s3_constants.AmzServerSideEncryptionBucketKeyEnabled)) == "true" - - // Build encryption context - bucket, object := s3_constants.GetBucketAndObject(r) - encryptionContext := BuildEncryptionContext(bucket, object, bucketKeyEnabled) - - // Add any user-provided encryption context - if contextHeader := r.Header.Get(s3_constants.AmzServerSideEncryptionContext); contextHeader != "" { - userContext, err := parseEncryptionContext(contextHeader) - if err != nil { - return nil, nil, nil, s3err.ErrInvalidRequest - } - // Merge user context with default context - for k, v := range userContext { - encryptionContext[k] = v - } - } - - // Check if a base IV is provided (for multipart uploads) - var encryptedReader io.Reader - var sseKey *SSEKMSKey - var encErr error - - baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSBaseIVHeader) - if baseIVHeader != "" { - // Decode the base IV from the header - baseIV, decodeErr := base64.StdEncoding.DecodeString(baseIVHeader) - if decodeErr != nil || len(baseIV) != 16 { - return nil, nil, nil, s3err.ErrInternalError - } - // Use the provided base IV with unique part offset for multipart upload consistency - encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBaseIVAndOffset(dataReader, keyID, encryptionContext, bucketKeyEnabled, baseIV, partOffset) - glog.V(4).Infof("Using provided base IV %x for SSE-KMS encryption", baseIV[:8]) - } else { - // Generate a new IV for single-part uploads - encryptedReader, sseKey, encErr = CreateSSEKMSEncryptedReaderWithBucketKey(dataReader, keyID, encryptionContext, bucketKeyEnabled) - } - - if encErr != nil { - return nil, nil, nil, s3err.ErrInternalError - } - - // Prepare SSE-KMS metadata for later header setting - sseKMSMetadata, metaErr := SerializeSSEKMSMetadata(sseKey) - if metaErr != nil { - return nil, nil, nil, s3err.ErrInternalError - } - - return encryptedReader, sseKey, sseKMSMetadata, s3err.ErrNone -} - -// handleSSES3MultipartEncryption handles multipart upload logic for SSE-S3 encryption -func (s3a *S3ApiServer) handleSSES3MultipartEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSES3Key, s3err.ErrorCode) { - keyDataHeader := r.Header.Get(s3_constants.SeaweedFSSSES3KeyDataHeader) - baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSES3BaseIVHeader) - - glog.V(4).Infof("handleSSES3MultipartEncryption: using provided key and base IV for multipart part") - - // Decode the key data - keyData, decodeErr := base64.StdEncoding.DecodeString(keyDataHeader) - if decodeErr != nil { - return nil, nil, s3err.ErrInternalError - } - - // Deserialize the SSE-S3 key - keyManager := GetSSES3KeyManager() - key, deserializeErr := DeserializeSSES3Metadata(keyData, keyManager) - if deserializeErr != nil { - return nil, nil, s3err.ErrInternalError - } - - // Decode the base IV - baseIV, decodeErr := base64.StdEncoding.DecodeString(baseIVHeader) - if decodeErr != nil || len(baseIV) != s3_constants.AESBlockSize { - return nil, nil, s3err.ErrInternalError - } - - // Use the provided base IV with unique part offset for multipart upload consistency - encryptedReader, _, encErr := CreateSSES3EncryptedReaderWithBaseIV(dataReader, key, baseIV, partOffset) - if encErr != nil { - return nil, nil, s3err.ErrInternalError - } - - glog.V(4).Infof("handleSSES3MultipartEncryption: using provided base IV %x", baseIV[:8]) - return encryptedReader, key, s3err.ErrNone -} - -// handleSSES3SinglePartEncryption handles single-part upload logic for SSE-S3 encryption -func (s3a *S3ApiServer) handleSSES3SinglePartEncryption(dataReader io.Reader) (io.Reader, *SSES3Key, s3err.ErrorCode) { - glog.V(4).Infof("handleSSES3SinglePartEncryption: generating new key for single-part upload") - - keyManager := GetSSES3KeyManager() - key, err := keyManager.GetOrCreateKey("") - if err != nil { - return nil, nil, s3err.ErrInternalError - } - - // Create encrypted reader - encryptedReader, iv, encErr := CreateSSES3EncryptedReader(dataReader, key) - if encErr != nil { - return nil, nil, s3err.ErrInternalError - } - - // Store IV on the key object for later decryption - key.IV = iv - - // Store the key for later use - keyManager.StoreKey(key) - - return encryptedReader, key, s3err.ErrNone -} - -// handleSSES3Encryption processes SSE-S3 encryption for the data reader -func (s3a *S3ApiServer) handleSSES3Encryption(r *http.Request, dataReader io.Reader, partOffset int64) (io.Reader, *SSES3Key, []byte, s3err.ErrorCode) { - if !IsSSES3RequestInternal(r) { - return dataReader, nil, nil, s3err.ErrNone - } - - glog.V(3).Infof("handleSSES3Encryption: SSE-S3 request detected, processing encryption") - - var encryptedReader io.Reader - var sseS3Key *SSES3Key - var errCode s3err.ErrorCode - - // Check if this is multipart upload (key data and base IV provided) - keyDataHeader := r.Header.Get(s3_constants.SeaweedFSSSES3KeyDataHeader) - baseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSES3BaseIVHeader) - - if keyDataHeader != "" && baseIVHeader != "" { - // Multipart upload: use provided key and base IV - encryptedReader, sseS3Key, errCode = s3a.handleSSES3MultipartEncryption(r, dataReader, partOffset) - } else { - // Single-part upload: generate new key and IV - encryptedReader, sseS3Key, errCode = s3a.handleSSES3SinglePartEncryption(dataReader) - } - - if errCode != s3err.ErrNone { - return nil, nil, nil, errCode - } - - // Prepare SSE-S3 metadata for later header setting - sseS3Metadata, metaErr := SerializeSSES3Metadata(sseS3Key) - if metaErr != nil { - return nil, nil, nil, s3err.ErrInternalError - } - - glog.V(3).Infof("handleSSES3Encryption: prepared SSE-S3 metadata for object") - return encryptedReader, sseS3Key, sseS3Metadata, s3err.ErrNone -} - -// handleAllSSEEncryption processes all SSE types in sequence and returns the final encrypted reader -// This eliminates repetitive dataReader assignments and centralizes SSE processing -func (s3a *S3ApiServer) handleAllSSEEncryption(r *http.Request, dataReader io.Reader, partOffset int64) (*PutToFilerEncryptionResult, s3err.ErrorCode) { - result := &PutToFilerEncryptionResult{ - DataReader: dataReader, - } - - // Handle SSE-C encryption first - encryptedReader, customerKey, sseIV, errCode := s3a.handleSSECEncryption(r, result.DataReader) - if errCode != s3err.ErrNone { - return nil, errCode - } - result.DataReader = encryptedReader - result.CustomerKey = customerKey - result.SSEIV = sseIV - - // Handle SSE-KMS encryption - encryptedReader, sseKMSKey, sseKMSMetadata, errCode := s3a.handleSSEKMSEncryption(r, result.DataReader, partOffset) - if errCode != s3err.ErrNone { - return nil, errCode - } - result.DataReader = encryptedReader - result.SSEKMSKey = sseKMSKey - result.SSEKMSMetadata = sseKMSMetadata - - // Handle SSE-S3 encryption - encryptedReader, sseS3Key, sseS3Metadata, errCode := s3a.handleSSES3Encryption(r, result.DataReader, partOffset) - if errCode != s3err.ErrNone { - return nil, errCode - } - result.DataReader = encryptedReader - result.SSES3Key = sseS3Key - result.SSES3Metadata = sseS3Metadata - - // Set SSE type for response headers - if customerKey != nil { - result.SSEType = s3_constants.SSETypeC - } else if sseKMSKey != nil { - result.SSEType = s3_constants.SSETypeKMS - } else if sseS3Key != nil { - result.SSEType = s3_constants.SSETypeS3 - } - - return result, s3err.ErrNone -} diff --git a/weed/s3api/s3api_put_object_helper.go b/weed/s3api/s3api_put_object_helper.go deleted file mode 100644 index 626e1c22d..000000000 --- a/weed/s3api/s3api_put_object_helper.go +++ /dev/null @@ -1,40 +0,0 @@ -package s3api - -import ( - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -// getRequestDataReader returns the appropriate reader for the request body. -// When IAM is disabled, it still processes chunked transfer encoding for -// authTypeStreamingUnsigned to strip checksum headers and extract the actual data. -// This fixes issues where chunked data with checksums would be stored incorrectly -// when IAM is not enabled. -func getRequestDataReader(s3a *S3ApiServer, r *http.Request) (io.ReadCloser, s3err.ErrorCode) { - var s3ErrCode s3err.ErrorCode - dataReader := r.Body - rAuthType := getRequestAuthType(r) - if s3a.iam.isEnabled() { - switch rAuthType { - case authTypeStreamingSigned, authTypeStreamingUnsigned: - dataReader, s3ErrCode = s3a.iam.newChunkedReader(r) - case authTypeSignedV2, authTypePresignedV2: - _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) - case authTypePresigned, authTypeSigned: - _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) - } - } else { - switch rAuthType { - case authTypeStreamingSigned: - s3ErrCode = s3err.ErrAuthNotSetup - case authTypeStreamingUnsigned: - // Even when IAM is disabled, we still need to handle chunked transfer encoding - // to strip checksum headers and process the data correctly - dataReader, s3ErrCode = s3a.iam.newChunkedReader(r) - } - } - - return dataReader, s3ErrCode -} diff --git a/weed/s3api/s3api_put_object_helper_test.go b/weed/s3api/s3api_put_object_helper_test.go deleted file mode 100644 index 455701772..000000000 --- a/weed/s3api/s3api_put_object_helper_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package s3api - -import ( - "net/http" - "strings" - "testing" - - "github.com/seaweedfs/seaweedfs/weed/credential" - _ "github.com/seaweedfs/seaweedfs/weed/credential/memory" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" -) - -func TestGetRequestDataReader_ChunkedEncodingWithoutIAM(t *testing.T) { - // Create an S3ApiServer with IAM disabled - s3a := &S3ApiServer{ - iam: NewIdentityAccessManagementWithStore(&S3ApiServerOption{}, string(credential.StoreTypeMemory)), - } - // Ensure IAM is disabled for this test - s3a.iam.isAuthEnabled = false - - tests := []struct { - name string - contentSha256 string - expectedError s3err.ErrorCode - shouldProcess bool - description string - }{ - { - name: "RegularRequest", - contentSha256: "", - expectedError: s3err.ErrNone, - shouldProcess: false, - description: "Regular requests without chunked encoding should pass through unchanged", - }, - { - name: "StreamingSignedWithoutIAM", - contentSha256: "STREAMING-AWS4-HMAC-SHA256-PAYLOAD", - expectedError: s3err.ErrAuthNotSetup, - shouldProcess: false, - description: "Streaming signed requests should fail when IAM is disabled", - }, - { - name: "StreamingUnsignedWithoutIAM", - contentSha256: "STREAMING-UNSIGNED-PAYLOAD-TRAILER", - expectedError: s3err.ErrNone, - shouldProcess: true, - description: "Streaming unsigned requests should be processed even when IAM is disabled", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - body := strings.NewReader("test data") - req, _ := http.NewRequest("PUT", "/bucket/key", body) - - if tt.contentSha256 != "" { - req.Header.Set("x-amz-content-sha256", tt.contentSha256) - } - - dataReader, errCode := getRequestDataReader(s3a, req) - - // Check error code - if errCode != tt.expectedError { - t.Errorf("Expected error code %v, got %v", tt.expectedError, errCode) - } - - // For successful cases, check if processing occurred - if errCode == s3err.ErrNone { - if tt.shouldProcess { - // For chunked requests, the reader should be different from the original body - if dataReader == req.Body { - t.Error("Expected dataReader to be processed by newChunkedReader, but got raw request body") - } - } else { - // For regular requests, the reader should be the same as the original body - if dataReader != req.Body { - t.Error("Expected dataReader to be the same as request body for regular requests") - } - } - } - - t.Logf("Test case: %s - %s", tt.name, tt.description) - }) - } -} - -func TestGetRequestDataReader_AuthTypeDetection(t *testing.T) { - // Create an S3ApiServer with IAM disabled - s3a := &S3ApiServer{ - iam: NewIdentityAccessManagementWithStore(&S3ApiServerOption{}, string(credential.StoreTypeMemory)), - } - s3a.iam.isAuthEnabled = false - - // Test the specific case mentioned in the issue where chunked data - // with checksum headers would be stored incorrectly - t.Run("ChunkedDataWithChecksum", func(t *testing.T) { - // Simulate a request with chunked data and checksum trailer - body := strings.NewReader("test content") - req, _ := http.NewRequest("PUT", "/bucket/key", body) - req.Header.Set("x-amz-content-sha256", "STREAMING-UNSIGNED-PAYLOAD-TRAILER") - req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32") - - // Verify the auth type is detected correctly - authType := getRequestAuthType(req) - if authType != authTypeStreamingUnsigned { - t.Errorf("Expected authTypeStreamingUnsigned, got %v", authType) - } - - // Verify the request is processed correctly - dataReader, errCode := getRequestDataReader(s3a, req) - if errCode != s3err.ErrNone { - t.Errorf("Expected no error, got %v", errCode) - } - - // The dataReader should be processed by newChunkedReader - if dataReader == req.Body { - t.Error("Expected dataReader to be processed by newChunkedReader to handle chunked encoding") - } - }) -} - -func TestGetRequestDataReader_IAMEnabled(t *testing.T) { - // Create an S3ApiServer with IAM enabled - s3a := &S3ApiServer{ - iam: NewIdentityAccessManagementWithStore(&S3ApiServerOption{}, string(credential.StoreTypeMemory)), - } - s3a.iam.isAuthEnabled = true - - t.Run("StreamingUnsignedWithIAMEnabled", func(t *testing.T) { - body := strings.NewReader("test data") - req, _ := http.NewRequest("PUT", "/bucket/key", body) - req.Header.Set("x-amz-content-sha256", "STREAMING-UNSIGNED-PAYLOAD-TRAILER") - - dataReader, errCode := getRequestDataReader(s3a, req) - - // Should succeed and be processed - if errCode != s3err.ErrNone { - t.Errorf("Expected no error, got %v", errCode) - } - - // Should be processed by newChunkedReader - if dataReader == req.Body { - t.Error("Expected dataReader to be processed by newChunkedReader") - } - }) -} - -// Test helper to verify auth type detection works correctly -func TestAuthTypeDetection(t *testing.T) { - tests := []struct { - name string - headers map[string]string - expectedType authType - }{ - { - name: "StreamingUnsigned", - headers: map[string]string{"x-amz-content-sha256": "STREAMING-UNSIGNED-PAYLOAD-TRAILER"}, - expectedType: authTypeStreamingUnsigned, - }, - { - name: "StreamingSigned", - headers: map[string]string{"x-amz-content-sha256": "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"}, - expectedType: authTypeStreamingSigned, - }, - { - name: "Regular", - headers: map[string]string{}, - expectedType: authTypeAnonymous, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest("PUT", "/bucket/key", strings.NewReader("test")) - for key, value := range tt.headers { - req.Header.Set(key, value) - } - - authType := getRequestAuthType(req) - if authType != tt.expectedType { - t.Errorf("Expected auth type %v, got %v", tt.expectedType, authType) - } - }) - } -} diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 7f5b88566..cc5ca5231 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -2,31 +2,20 @@ package s3api import ( "context" - "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" "net" "net/http" - "os" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/credential" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/iam/integration" - "github.com/seaweedfs/seaweedfs/weed/iam/policy" - "github.com/seaweedfs/seaweedfs/weed/iam/sts" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" - "github.com/seaweedfs/seaweedfs/weed/util/grace" - + "github.com/chrislusf/seaweedfs/weed/pb" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/pb" - . "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/util" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client" "google.golang.org/grpc" ) @@ -35,38 +24,24 @@ type S3ApiServerOption struct { Port int Config string DomainName string - AllowedOrigins []string BucketsPath string GrpcDialOption grpc.DialOption AllowEmptyFolder bool AllowDeleteBucketNotEmpty bool - LocalFilerSocket string - DataCenter string - FilerGroup string - IamConfig string // Advanced IAM configuration file path + LocalFilerSocket *string } type S3ApiServer struct { s3_pb.UnimplementedSeaweedS3Server - option *S3ApiServerOption - iam *IdentityAccessManagement - iamIntegration *S3IAMIntegration // Advanced IAM integration for JWT authentication - cb *CircuitBreaker - randomClientId int32 - filerGuard *security.Guard - client util_http_client.HTTPClientInterface - bucketRegistry *BucketRegistry - credentialManager *credential.CredentialManager - bucketConfigCache *BucketConfigCache + option *S3ApiServerOption + iam *IdentityAccessManagement + cb *CircuitBreaker + randomClientId int32 + filerGuard *security.Guard + client *http.Client } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { - return NewS3ApiServerWithStore(router, option, "") -} - -func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, explicitStore string) (s3ApiServer *S3ApiServer, err error) { - startTsNs := time.Now().UnixNano() - v := util.GetViper() signingKey := v.GetString("jwt.filer_signing.key") v.SetDefault("jwt.filer_signing.expires_after_seconds", 10) @@ -76,70 +51,23 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl v.SetDefault("jwt.filer_signing.read.expires_after_seconds", 60) readExpiresAfterSec := v.GetInt("jwt.filer_signing.read.expires_after_seconds") - v.SetDefault("cors.allowed_origins.values", "*") - - if len(option.AllowedOrigins) == 0 { - allowedOrigins := v.GetString("cors.allowed_origins.values") - domains := strings.Split(allowedOrigins, ",") - option.AllowedOrigins = domains - } - - var iam *IdentityAccessManagement - - iam = NewIdentityAccessManagementWithStore(option, explicitStore) - s3ApiServer = &S3ApiServer{ - option: option, - iam: iam, - randomClientId: util.RandomInt32(), - filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec), - cb: NewCircuitBreaker(option), - credentialManager: iam.credentialManager, - bucketConfigCache: NewBucketConfigCache(60 * time.Minute), // Increased TTL since cache is now event-driven + option: option, + iam: NewIdentityAccessManagement(option), + randomClientId: util.RandomInt32(), + filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec), + cb: NewCircuitBreaker(option), } - - // Initialize advanced IAM system if config is provided - if option.IamConfig != "" { - glog.V(0).Infof("Loading advanced IAM configuration from: %s", option.IamConfig) - - iamManager, err := loadIAMManagerFromConfig(option.IamConfig, func() string { - return string(option.Filer) - }) - if err != nil { - glog.Errorf("Failed to load IAM configuration: %v", err) - } else { - // Create S3 IAM integration with the loaded IAM manager - s3iam := NewS3IAMIntegration(iamManager, string(option.Filer)) - - // Set IAM integration in server - s3ApiServer.iamIntegration = s3iam - - // Set the integration in the traditional IAM for compatibility - iam.SetIAMIntegration(s3iam) - - glog.V(0).Infof("Advanced IAM system initialized successfully") - } - } - - if option.Config != "" { - grace.OnReload(func() { - if err := s3ApiServer.iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { - glog.Errorf("fail to load config file %s: %v", option.Config, err) - } else { - glog.V(0).Infof("Loaded %d identities from config file %s", len(s3ApiServer.iam.identities), option.Config) - } - }) - } - s3ApiServer.bucketRegistry = NewBucketRegistry(s3ApiServer) - if option.LocalFilerSocket == "" { - if s3ApiServer.client, err = util_http.NewGlobalHttpClient(); err != nil { - return nil, err - } + if option.LocalFilerSocket == nil || *option.LocalFilerSocket == "" { + s3ApiServer.client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} } else { s3ApiServer.client = &http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", option.LocalFilerSocket) + return net.Dial("unix", *option.LocalFilerSocket) }, }, } @@ -147,46 +75,16 @@ func NewS3ApiServerWithStore(router *mux.Router, option *S3ApiServerOption, expl s3ApiServer.registerRouter(router) - go s3ApiServer.subscribeMetaEvents("s3", startTsNs, filer.DirectoryEtcRoot, []string{option.BucketsPath}) + go s3ApiServer.subscribeMetaEvents("s3", filer.DirectoryEtcRoot, time.Now().UnixNano()) return s3ApiServer, nil } -// handleCORSOriginValidation handles the common CORS origin validation logic -func (s3a *S3ApiServer) handleCORSOriginValidation(w http.ResponseWriter, r *http.Request) bool { - origin := r.Header.Get("Origin") - if origin != "" { - if len(s3a.option.AllowedOrigins) == 0 || s3a.option.AllowedOrigins[0] == "*" { - origin = "*" - } else { - originFound := false - for _, allowedOrigin := range s3a.option.AllowedOrigins { - if origin == allowedOrigin { - originFound = true - break - } - } - if !originFound { - writeFailureResponse(w, r, http.StatusForbidden) - return false - } - } - } - - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Access-Control-Expose-Headers", "*") - w.Header().Set("Access-Control-Allow-Methods", "*") - w.Header().Set("Access-Control-Allow-Headers", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") - return true -} - func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // API Router apiRouter := router.PathPrefix("/").Subrouter() // Readiness Probe - apiRouter.Methods(http.MethodGet).Path("/status").HandlerFunc(s3a.StatusHandler) - apiRouter.Methods(http.MethodGet).Path("/healthz").HandlerFunc(s3a.StatusHandler) + apiRouter.Methods("GET").Path("/status").HandlerFunc(s3a.StatusHandler) var routers []*mux.Router if s3a.option.DomainName != "" { @@ -200,16 +98,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { } routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter()) - // Get CORS middleware instance with caching - corsMiddleware := s3a.getCORSMiddleware() - for _, bucket := range routers { - // Apply CORS middleware to bucket routers for automatic CORS header handling - bucket.Use(corsMiddleware.Handler) - - // Bucket-specific OPTIONS handler for CORS preflight requests - // Use PathPrefix to catch all bucket-level preflight routes including /bucket/object - bucket.PathPrefix("/").Methods(http.MethodOptions).HandlerFunc(corsMiddleware.HandleOptionsRequest) // each case should follow the next rule: // - requesting object with query must precede any other methods @@ -220,275 +109,124 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // objects with query // CopyObjectPart - bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "") // GetObjectTagging - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "") // PutObjectTagging - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "") // DeleteObjectTagging - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "") // PutObjectACL - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "") // PutObjectRetention - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "") // PutObjectLegalHold - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "") + // PutObjectLockConfiguration + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "") // GetObjectACL - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "") - // GetObjectRetention - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectRetentionHandler, ACTION_READ)), "GET")).Queries("retention", "") - // GetObjectLegalHold - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectLegalHoldHandler, ACTION_READ)), "GET")).Queries("legal-hold", "") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "") // objects with query // raw objects // HeadObject - bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(track(s3a.AuthWithPublicRead(func(w http.ResponseWriter, r *http.Request) { - limitedHandler, _ := s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ) - limitedHandler(w, r) - }, ACTION_READ), "GET")) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET")) // GetObject, but directory listing is not supported - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(track(s3a.AuthWithPublicRead(func(w http.ResponseWriter, r *http.Request) { - limitedHandler, _ := s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ) - limitedHandler(w, r) - }, ACTION_READ), "GET")) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET")) // CopyObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY")) + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY")) // PutObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT")) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT")) // DeleteObject - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE")) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE")) // raw objects // buckets with query // DeleteMultipleObjects - bucket.Methods(http.MethodPost).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "") // GetBucketACL - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ_ACP)), "GET")).Queries("acl", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "") // PutBucketACL - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE_ACP)), "PUT")).Queries("acl", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "") // GetBucketPolicy - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "") // PutBucketPolicy - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "") // DeleteBucketPolicy - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "") // GetBucketCors - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "") // PutBucketCors - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "") // DeleteBucketCors - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "") // GetBucketLifecycleConfiguration - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "") // PutBucketLifecycleConfiguration - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "") // DeleteBucketLifecycleConfiguration - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "") // GetBucketLocation - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "") // GetBucketRequestPayment - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "") - - // GetBucketVersioning - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketVersioningHandler, ACTION_READ)), "GET")).Queries("versioning", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketVersioningHandler, ACTION_WRITE)), "PUT")).Queries("versioning", "") - - // GetObjectLockConfiguration / PutObjectLockConfiguration (bucket-level operations) - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectLockConfigurationHandler, ACTION_READ)), "GET")).Queries("object-lock", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "") - - // GetBucketTagging - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketTaggingHandler, ACTION_TAGGING)), "GET")).Queries("tagging", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "") - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "") - - // GetBucketEncryption - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketEncryptionHandler, ACTION_ADMIN)), "GET")).Queries("encryption", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketEncryptionHandler, ACTION_ADMIN)), "PUT")).Queries("encryption", "") - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketEncryptionHandler, ACTION_ADMIN)), "DELETE")).Queries("encryption", "") - - // GetPublicAccessBlockHandler - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetPublicAccessBlockHandler, ACTION_ADMIN)), "GET")).Queries("publicAccessBlock", "") - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutPublicAccessBlockHandler, ACTION_ADMIN)), "PUT")).Queries("publicAccessBlock", "") - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeletePublicAccessBlockHandler, ACTION_ADMIN)), "DELETE")).Queries("publicAccessBlock", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "") // ListObjectsV2 - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.AuthWithPublicRead(func(w http.ResponseWriter, r *http.Request) { - limitedHandler, _ := s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST) - limitedHandler(w, r) - }, ACTION_LIST), "LIST")).Queries("list-type", "2") - - // ListObjectVersions - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectVersionsHandler, ACTION_LIST)), "LIST")).Queries("versions", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2") // buckets with query - // PutBucketOwnershipControls - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketOwnershipControls, ACTION_ADMIN), "PUT")).Queries("ownershipControls", "") - - //GetBucketOwnershipControls - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketOwnershipControls, ACTION_READ), "GET")).Queries("ownershipControls", "") - - //DeleteBucketOwnershipControls - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketOwnershipControls, ACTION_ADMIN), "DELETE")).Queries("ownershipControls", "") // raw buckets // PostPolicy - bucket.Methods(http.MethodPost).HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST")) + bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST")) // HeadBucket - bucket.Methods(http.MethodHead).HandlerFunc(track(s3a.AuthWithPublicRead(func(w http.ResponseWriter, r *http.Request) { - limitedHandler, _ := s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ) - limitedHandler(w, r) - }, ACTION_READ), "GET")) + bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET")) // PutBucket - bucket.Methods(http.MethodPut).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketHandler, ACTION_ADMIN)), "PUT")) - + bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT")) // DeleteBucket - bucket.Methods(http.MethodDelete).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_DELETE_BUCKET)), "DELETE")) + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE")) // ListObjectsV1 (Legacy) - bucket.Methods(http.MethodGet).HandlerFunc(track(s3a.AuthWithPublicRead(func(w http.ResponseWriter, r *http.Request) { - limitedHandler, _ := s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST) - limitedHandler(w, r) - }, ACTION_LIST), "LIST")) + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST")) // raw buckets } - // Global OPTIONS handler for service-level requests (non-bucket requests) - // This handles requests like OPTIONS /, OPTIONS /status, OPTIONS /healthz - // Place this after bucket handlers to avoid interfering with bucket CORS middleware - apiRouter.Methods(http.MethodOptions).PathPrefix("/").HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - // Only handle if this is not a bucket-specific request - vars := mux.Vars(r) - bucket := vars["bucket"] - if bucket != "" { - // This is a bucket-specific request, let bucket CORS middleware handle it - http.NotFound(w, r) - return - } - - if s3a.handleCORSOriginValidation(w, r) { - writeSuccessResponseEmpty(w, r) - } - }) - // ListBuckets - apiRouter.Methods(http.MethodGet).Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST")) + apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST")) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(s3err.NotFoundHandler) } - -// loadIAMManagerFromConfig loads the advanced IAM manager from configuration file -func loadIAMManagerFromConfig(configPath string, filerAddressProvider func() string) (*integration.IAMManager, error) { - // Read configuration file - configData, err := os.ReadFile(configPath) - if err != nil { - return nil, fmt.Errorf("failed to read config file: %w", err) - } - - // Parse configuration structure - var configRoot struct { - STS *sts.STSConfig `json:"sts"` - Policy *policy.PolicyEngineConfig `json:"policy"` - Providers []map[string]interface{} `json:"providers"` - Roles []*integration.RoleDefinition `json:"roles"` - Policies []struct { - Name string `json:"name"` - Document *policy.PolicyDocument `json:"document"` - } `json:"policies"` - } - - if err := json.Unmarshal(configData, &configRoot); err != nil { - return nil, fmt.Errorf("failed to parse config: %w", err) - } - - // Create IAM configuration - iamConfig := &integration.IAMConfig{ - STS: configRoot.STS, - Policy: configRoot.Policy, - Roles: &integration.RoleStoreConfig{ - StoreType: "memory", // Use memory store for JSON config-based setup - }, - } - - // Initialize IAM manager - iamManager := integration.NewIAMManager() - if err := iamManager.Initialize(iamConfig, filerAddressProvider); err != nil { - return nil, fmt.Errorf("failed to initialize IAM manager: %w", err) - } - - // Load identity providers - providerFactory := sts.NewProviderFactory() - for _, providerConfig := range configRoot.Providers { - provider, err := providerFactory.CreateProvider(&sts.ProviderConfig{ - Name: providerConfig["name"].(string), - Type: providerConfig["type"].(string), - Enabled: true, - Config: providerConfig["config"].(map[string]interface{}), - }) - if err != nil { - glog.Warningf("Failed to create provider %s: %v", providerConfig["name"], err) - continue - } - if provider != nil { - if err := iamManager.RegisterIdentityProvider(provider); err != nil { - glog.Warningf("Failed to register provider %s: %v", providerConfig["name"], err) - } else { - glog.V(1).Infof("Registered identity provider: %s", providerConfig["name"]) - } - } - } - - // Load policies - for _, policyDef := range configRoot.Policies { - if err := iamManager.CreatePolicy(context.Background(), "", policyDef.Name, policyDef.Document); err != nil { - glog.Warningf("Failed to create policy %s: %v", policyDef.Name, err) - } - } - - // Load roles - for _, roleDef := range configRoot.Roles { - if err := iamManager.CreateRole(context.Background(), "", roleDef.RoleName, roleDef); err != nil { - glog.Warningf("Failed to create role %s: %v", roleDef.RoleName, err) - } - } - - glog.V(0).Infof("Loaded %d providers, %d policies and %d roles from config", len(configRoot.Providers), len(configRoot.Policies), len(configRoot.Roles)) - - return iamManager, nil -} diff --git a/weed/s3api/s3api_server_grpc.go b/weed/s3api/s3api_server_grpc.go index 93da82c18..e93d0056f 100644 --- a/weed/s3api/s3api_server_grpc.go +++ b/weed/s3api/s3api_server_grpc.go @@ -2,7 +2,7 @@ package s3api import ( "context" - "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" ) func (s3a *S3ApiServer) Configure(ctx context.Context, request *s3_pb.S3ConfigureRequest) (*s3_pb.S3ConfigureResponse, error) { diff --git a/weed/s3api/s3api_status_handlers.go b/weed/s3api/s3api_status_handlers.go index e5d8b96b8..fafb6ac2f 100644 --- a/weed/s3api/s3api_status_handlers.go +++ b/weed/s3api/s3api_status_handlers.go @@ -1,7 +1,7 @@ package s3api import ( - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" ) diff --git a/weed/s3api/s3api_streaming_copy.go b/weed/s3api/s3api_streaming_copy.go deleted file mode 100644 index c996e6188..000000000 --- a/weed/s3api/s3api_streaming_copy.go +++ /dev/null @@ -1,561 +0,0 @@ -package s3api - -import ( - "context" - "crypto/md5" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "io" - "net/http" - - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -// StreamingCopySpec defines the specification for streaming copy operations -type StreamingCopySpec struct { - SourceReader io.Reader - TargetSize int64 - EncryptionSpec *EncryptionSpec - CompressionSpec *CompressionSpec - HashCalculation bool - BufferSize int -} - -// EncryptionSpec defines encryption parameters for streaming -type EncryptionSpec struct { - NeedsDecryption bool - NeedsEncryption bool - SourceKey interface{} // SSECustomerKey or SSEKMSKey - DestinationKey interface{} // SSECustomerKey or SSEKMSKey - SourceType EncryptionType - DestinationType EncryptionType - SourceMetadata map[string][]byte // Source metadata for IV extraction - DestinationIV []byte // Generated IV for destination -} - -// CompressionSpec defines compression parameters for streaming -type CompressionSpec struct { - IsCompressed bool - CompressionType string - NeedsDecompression bool - NeedsCompression bool -} - -// StreamingCopyManager handles streaming copy operations -type StreamingCopyManager struct { - s3a *S3ApiServer - bufferSize int -} - -// NewStreamingCopyManager creates a new streaming copy manager -func NewStreamingCopyManager(s3a *S3ApiServer) *StreamingCopyManager { - return &StreamingCopyManager{ - s3a: s3a, - bufferSize: 64 * 1024, // 64KB default buffer - } -} - -// ExecuteStreamingCopy performs a streaming copy operation -func (scm *StreamingCopyManager) ExecuteStreamingCopy(ctx context.Context, entry *filer_pb.Entry, r *http.Request, dstPath string, state *EncryptionState) ([]*filer_pb.FileChunk, error) { - // Create streaming copy specification - spec, err := scm.createStreamingSpec(entry, r, state) - if err != nil { - return nil, fmt.Errorf("create streaming spec: %w", err) - } - - // Create source reader from entry - sourceReader, err := scm.createSourceReader(entry) - if err != nil { - return nil, fmt.Errorf("create source reader: %w", err) - } - defer sourceReader.Close() - - spec.SourceReader = sourceReader - - // Create processing pipeline - processedReader, err := scm.createProcessingPipeline(spec) - if err != nil { - return nil, fmt.Errorf("create processing pipeline: %w", err) - } - - // Stream to destination - return scm.streamToDestination(ctx, processedReader, spec, dstPath) -} - -// createStreamingSpec creates a streaming specification based on copy parameters -func (scm *StreamingCopyManager) createStreamingSpec(entry *filer_pb.Entry, r *http.Request, state *EncryptionState) (*StreamingCopySpec, error) { - spec := &StreamingCopySpec{ - BufferSize: scm.bufferSize, - HashCalculation: true, - } - - // Calculate target size - sizeCalc := NewCopySizeCalculator(entry, r) - spec.TargetSize = sizeCalc.CalculateTargetSize() - - // Create encryption specification - encSpec, err := scm.createEncryptionSpec(entry, r, state) - if err != nil { - return nil, err - } - spec.EncryptionSpec = encSpec - - // Create compression specification - spec.CompressionSpec = scm.createCompressionSpec(entry, r) - - return spec, nil -} - -// createEncryptionSpec creates encryption specification for streaming -func (scm *StreamingCopyManager) createEncryptionSpec(entry *filer_pb.Entry, r *http.Request, state *EncryptionState) (*EncryptionSpec, error) { - spec := &EncryptionSpec{ - NeedsDecryption: state.IsSourceEncrypted(), - NeedsEncryption: state.IsTargetEncrypted(), - SourceMetadata: entry.Extended, // Pass source metadata for IV extraction - } - - // Set source encryption details - if state.SrcSSEC { - spec.SourceType = EncryptionTypeSSEC - sourceKey, err := ParseSSECCopySourceHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-C copy source headers: %w", err) - } - spec.SourceKey = sourceKey - } else if state.SrcSSEKMS { - spec.SourceType = EncryptionTypeSSEKMS - // Extract SSE-KMS key from metadata - if keyData, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - sseKey, err := DeserializeSSEKMSMetadata(keyData) - if err != nil { - return nil, fmt.Errorf("deserialize SSE-KMS metadata: %w", err) - } - spec.SourceKey = sseKey - } - } else if state.SrcSSES3 { - spec.SourceType = EncryptionTypeSSES3 - // Extract SSE-S3 key from metadata - if keyData, exists := entry.Extended[s3_constants.SeaweedFSSSES3Key]; exists { - // TODO: This should use a proper SSE-S3 key manager from S3ApiServer - // For now, create a temporary key manager to handle deserialization - tempKeyManager := NewSSES3KeyManager() - sseKey, err := DeserializeSSES3Metadata(keyData, tempKeyManager) - if err != nil { - return nil, fmt.Errorf("deserialize SSE-S3 metadata: %w", err) - } - spec.SourceKey = sseKey - } - } - - // Set destination encryption details - if state.DstSSEC { - spec.DestinationType = EncryptionTypeSSEC - destKey, err := ParseSSECHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-C headers: %w", err) - } - spec.DestinationKey = destKey - } else if state.DstSSEKMS { - spec.DestinationType = EncryptionTypeSSEKMS - // Parse KMS parameters - keyID, encryptionContext, bucketKeyEnabled, err := ParseSSEKMSCopyHeaders(r) - if err != nil { - return nil, fmt.Errorf("parse SSE-KMS copy headers: %w", err) - } - - // Create SSE-KMS key for destination - sseKey := &SSEKMSKey{ - KeyID: keyID, - EncryptionContext: encryptionContext, - BucketKeyEnabled: bucketKeyEnabled, - } - spec.DestinationKey = sseKey - } else if state.DstSSES3 { - spec.DestinationType = EncryptionTypeSSES3 - // Generate or retrieve SSE-S3 key - keyManager := GetSSES3KeyManager() - sseKey, err := keyManager.GetOrCreateKey("") - if err != nil { - return nil, fmt.Errorf("get SSE-S3 key: %w", err) - } - spec.DestinationKey = sseKey - } - - return spec, nil -} - -// createCompressionSpec creates compression specification for streaming -func (scm *StreamingCopyManager) createCompressionSpec(entry *filer_pb.Entry, r *http.Request) *CompressionSpec { - return &CompressionSpec{ - IsCompressed: isCompressedEntry(entry), - // For now, we don't change compression during copy - NeedsDecompression: false, - NeedsCompression: false, - } -} - -// createSourceReader creates a reader for the source entry -func (scm *StreamingCopyManager) createSourceReader(entry *filer_pb.Entry) (io.ReadCloser, error) { - // Create a multi-chunk reader that streams from all chunks - return scm.s3a.createMultiChunkReader(entry) -} - -// createProcessingPipeline creates a processing pipeline for the copy operation -func (scm *StreamingCopyManager) createProcessingPipeline(spec *StreamingCopySpec) (io.Reader, error) { - reader := spec.SourceReader - - // Add decryption if needed - if spec.EncryptionSpec.NeedsDecryption { - decryptedReader, err := scm.createDecryptionReader(reader, spec.EncryptionSpec) - if err != nil { - return nil, fmt.Errorf("create decryption reader: %w", err) - } - reader = decryptedReader - } - - // Add decompression if needed - if spec.CompressionSpec.NeedsDecompression { - decompressedReader, err := scm.createDecompressionReader(reader, spec.CompressionSpec) - if err != nil { - return nil, fmt.Errorf("create decompression reader: %w", err) - } - reader = decompressedReader - } - - // Add compression if needed - if spec.CompressionSpec.NeedsCompression { - compressedReader, err := scm.createCompressionReader(reader, spec.CompressionSpec) - if err != nil { - return nil, fmt.Errorf("create compression reader: %w", err) - } - reader = compressedReader - } - - // Add encryption if needed - if spec.EncryptionSpec.NeedsEncryption { - encryptedReader, err := scm.createEncryptionReader(reader, spec.EncryptionSpec) - if err != nil { - return nil, fmt.Errorf("create encryption reader: %w", err) - } - reader = encryptedReader - } - - // Add hash calculation if needed - if spec.HashCalculation { - reader = scm.createHashReader(reader) - } - - return reader, nil -} - -// createDecryptionReader creates a decryption reader based on encryption type -func (scm *StreamingCopyManager) createDecryptionReader(reader io.Reader, encSpec *EncryptionSpec) (io.Reader, error) { - switch encSpec.SourceType { - case EncryptionTypeSSEC: - if sourceKey, ok := encSpec.SourceKey.(*SSECustomerKey); ok { - // Get IV from metadata - iv, err := GetIVFromMetadata(encSpec.SourceMetadata) - if err != nil { - return nil, fmt.Errorf("get IV from metadata: %w", err) - } - return CreateSSECDecryptedReader(reader, sourceKey, iv) - } - return nil, fmt.Errorf("invalid SSE-C source key type") - - case EncryptionTypeSSEKMS: - if sseKey, ok := encSpec.SourceKey.(*SSEKMSKey); ok { - return CreateSSEKMSDecryptedReader(reader, sseKey) - } - return nil, fmt.Errorf("invalid SSE-KMS source key type") - - case EncryptionTypeSSES3: - if sseKey, ok := encSpec.SourceKey.(*SSES3Key); ok { - // Get IV from metadata - iv, err := GetIVFromMetadata(encSpec.SourceMetadata) - if err != nil { - return nil, fmt.Errorf("get IV from metadata: %w", err) - } - return CreateSSES3DecryptedReader(reader, sseKey, iv) - } - return nil, fmt.Errorf("invalid SSE-S3 source key type") - - default: - return reader, nil - } -} - -// createEncryptionReader creates an encryption reader based on encryption type -func (scm *StreamingCopyManager) createEncryptionReader(reader io.Reader, encSpec *EncryptionSpec) (io.Reader, error) { - switch encSpec.DestinationType { - case EncryptionTypeSSEC: - if destKey, ok := encSpec.DestinationKey.(*SSECustomerKey); ok { - encryptedReader, iv, err := CreateSSECEncryptedReader(reader, destKey) - if err != nil { - return nil, err - } - // Store IV in destination metadata (this would need to be handled by caller) - encSpec.DestinationIV = iv - return encryptedReader, nil - } - return nil, fmt.Errorf("invalid SSE-C destination key type") - - case EncryptionTypeSSEKMS: - if sseKey, ok := encSpec.DestinationKey.(*SSEKMSKey); ok { - encryptedReader, updatedKey, err := CreateSSEKMSEncryptedReaderWithBucketKey(reader, sseKey.KeyID, sseKey.EncryptionContext, sseKey.BucketKeyEnabled) - if err != nil { - return nil, err - } - // Store IV from the updated key - encSpec.DestinationIV = updatedKey.IV - return encryptedReader, nil - } - return nil, fmt.Errorf("invalid SSE-KMS destination key type") - - case EncryptionTypeSSES3: - if sseKey, ok := encSpec.DestinationKey.(*SSES3Key); ok { - encryptedReader, iv, err := CreateSSES3EncryptedReader(reader, sseKey) - if err != nil { - return nil, err - } - // Store IV for metadata - encSpec.DestinationIV = iv - return encryptedReader, nil - } - return nil, fmt.Errorf("invalid SSE-S3 destination key type") - - default: - return reader, nil - } -} - -// createDecompressionReader creates a decompression reader -func (scm *StreamingCopyManager) createDecompressionReader(reader io.Reader, compSpec *CompressionSpec) (io.Reader, error) { - if !compSpec.NeedsDecompression { - return reader, nil - } - - switch compSpec.CompressionType { - case "gzip": - // Use SeaweedFS's streaming gzip decompression - pr, pw := io.Pipe() - go func() { - defer pw.Close() - _, err := util.GunzipStream(pw, reader) - if err != nil { - pw.CloseWithError(fmt.Errorf("gzip decompression failed: %v", err)) - } - }() - return pr, nil - default: - // Unknown compression type, return as-is - return reader, nil - } -} - -// createCompressionReader creates a compression reader -func (scm *StreamingCopyManager) createCompressionReader(reader io.Reader, compSpec *CompressionSpec) (io.Reader, error) { - if !compSpec.NeedsCompression { - return reader, nil - } - - switch compSpec.CompressionType { - case "gzip": - // Use SeaweedFS's streaming gzip compression - pr, pw := io.Pipe() - go func() { - defer pw.Close() - _, err := util.GzipStream(pw, reader) - if err != nil { - pw.CloseWithError(fmt.Errorf("gzip compression failed: %v", err)) - } - }() - return pr, nil - default: - // Unknown compression type, return as-is - return reader, nil - } -} - -// HashReader wraps an io.Reader to calculate MD5 and SHA256 hashes -type HashReader struct { - reader io.Reader - md5Hash hash.Hash - sha256Hash hash.Hash -} - -// NewHashReader creates a new hash calculating reader -func NewHashReader(reader io.Reader) *HashReader { - return &HashReader{ - reader: reader, - md5Hash: md5.New(), - sha256Hash: sha256.New(), - } -} - -// Read implements io.Reader and calculates hashes as data flows through -func (hr *HashReader) Read(p []byte) (n int, err error) { - n, err = hr.reader.Read(p) - if n > 0 { - // Update both hashes with the data read - hr.md5Hash.Write(p[:n]) - hr.sha256Hash.Write(p[:n]) - } - return n, err -} - -// MD5Sum returns the current MD5 hash -func (hr *HashReader) MD5Sum() []byte { - return hr.md5Hash.Sum(nil) -} - -// SHA256Sum returns the current SHA256 hash -func (hr *HashReader) SHA256Sum() []byte { - return hr.sha256Hash.Sum(nil) -} - -// MD5Hex returns the MD5 hash as a hex string -func (hr *HashReader) MD5Hex() string { - return hex.EncodeToString(hr.MD5Sum()) -} - -// SHA256Hex returns the SHA256 hash as a hex string -func (hr *HashReader) SHA256Hex() string { - return hex.EncodeToString(hr.SHA256Sum()) -} - -// createHashReader creates a hash calculation reader -func (scm *StreamingCopyManager) createHashReader(reader io.Reader) io.Reader { - return NewHashReader(reader) -} - -// streamToDestination streams the processed data to the destination -func (scm *StreamingCopyManager) streamToDestination(ctx context.Context, reader io.Reader, spec *StreamingCopySpec, dstPath string) ([]*filer_pb.FileChunk, error) { - // For now, we'll use the existing chunk-based approach - // In a full implementation, this would stream directly to the destination - // without creating intermediate chunks - - // This is a placeholder that converts back to chunk-based approach - // A full streaming implementation would write directly to the destination - return scm.streamToChunks(ctx, reader, spec, dstPath) -} - -// streamToChunks converts streaming data back to chunks (temporary implementation) -func (scm *StreamingCopyManager) streamToChunks(ctx context.Context, reader io.Reader, spec *StreamingCopySpec, dstPath string) ([]*filer_pb.FileChunk, error) { - // This is a simplified implementation that reads the stream and creates chunks - // A full implementation would be more sophisticated - - var chunks []*filer_pb.FileChunk - buffer := make([]byte, spec.BufferSize) - offset := int64(0) - - for { - n, err := reader.Read(buffer) - if n > 0 { - // Create chunk for this data - chunk, chunkErr := scm.createChunkFromData(buffer[:n], offset, dstPath) - if chunkErr != nil { - return nil, fmt.Errorf("create chunk from data: %w", chunkErr) - } - chunks = append(chunks, chunk) - offset += int64(n) - } - - if err == io.EOF { - break - } - if err != nil { - return nil, fmt.Errorf("read stream: %w", err) - } - } - - return chunks, nil -} - -// createChunkFromData creates a chunk from streaming data -func (scm *StreamingCopyManager) createChunkFromData(data []byte, offset int64, dstPath string) (*filer_pb.FileChunk, error) { - // Assign new volume - assignResult, err := scm.s3a.assignNewVolume(dstPath) - if err != nil { - return nil, fmt.Errorf("assign volume: %w", err) - } - - // Create chunk - chunk := &filer_pb.FileChunk{ - Offset: offset, - Size: uint64(len(data)), - } - - // Set file ID - if err := scm.s3a.setChunkFileId(chunk, assignResult); err != nil { - return nil, err - } - - // Upload data - if err := scm.s3a.uploadChunkData(data, assignResult); err != nil { - return nil, fmt.Errorf("upload chunk data: %w", err) - } - - return chunk, nil -} - -// createMultiChunkReader creates a reader that streams from multiple chunks -func (s3a *S3ApiServer) createMultiChunkReader(entry *filer_pb.Entry) (io.ReadCloser, error) { - // Create a multi-reader that combines all chunks - var readers []io.Reader - - for _, chunk := range entry.GetChunks() { - chunkReader, err := s3a.createChunkReader(chunk) - if err != nil { - return nil, fmt.Errorf("create chunk reader: %w", err) - } - readers = append(readers, chunkReader) - } - - multiReader := io.MultiReader(readers...) - return &multiReadCloser{reader: multiReader}, nil -} - -// createChunkReader creates a reader for a single chunk -func (s3a *S3ApiServer) createChunkReader(chunk *filer_pb.FileChunk) (io.Reader, error) { - // Get chunk URL - srcUrl, err := s3a.lookupVolumeUrl(chunk.GetFileIdString()) - if err != nil { - return nil, fmt.Errorf("lookup volume URL: %w", err) - } - - // Create HTTP request for chunk data - req, err := http.NewRequest("GET", srcUrl, nil) - if err != nil { - return nil, fmt.Errorf("create HTTP request: %w", err) - } - - // Execute request - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, fmt.Errorf("execute HTTP request: %w", err) - } - - if resp.StatusCode != http.StatusOK { - resp.Body.Close() - return nil, fmt.Errorf("HTTP request failed: %d", resp.StatusCode) - } - - return resp.Body, nil -} - -// multiReadCloser wraps a multi-reader with a close method -type multiReadCloser struct { - reader io.Reader -} - -func (mrc *multiReadCloser) Read(p []byte) (int, error) { - return mrc.reader.Read(p) -} - -func (mrc *multiReadCloser) Close() error { - return nil -} diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go index 80dced599..6fcf8b165 100644 --- a/weed/s3api/s3api_test.go +++ b/weed/s3api/s3api_test.go @@ -1,7 +1,7 @@ package s3api import ( - "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "testing" "time" ) diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index 79300cf4f..dd6a32ff2 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -1,5 +1,3 @@ -// Code generated by xsdgen. DO NOT EDIT. - package s3api import ( @@ -19,546 +17,11 @@ type AccessControlPolicy struct { } type AmazonCustomerByEmail struct { - EmailAddress string `xml:"EmailAddress"` -} - -type Anon1 struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon1) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon1 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon1) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon1 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon10 struct { -} - -type Anon11 struct { - Bucket string `xml:"Bucket"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon11) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon11 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon11) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon11 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon12 struct { -} - -type Anon13 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - GetMetadata bool `xml:"GetMetadata"` - GetData bool `xml:"GetData"` - InlineData bool `xml:"InlineData"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon13) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon13 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon13) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon13 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon14 struct { - GetObjectResponse GetObjectResult `xml:"GetObjectResponse"` -} - -type Anon15 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - GetMetadata bool `xml:"GetMetadata"` - GetData bool `xml:"GetData"` - InlineData bool `xml:"InlineData"` - ByteRangeStart int64 `xml:"ByteRangeStart,omitempty"` - ByteRangeEnd int64 `xml:"ByteRangeEnd,omitempty"` - IfModifiedSince time.Time `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince time.Time `xml:"IfUnmodifiedSince,omitempty"` - IfMatch []string `xml:"IfMatch,omitempty"` - IfNoneMatch []string `xml:"IfNoneMatch,omitempty"` - ReturnCompleteObjectOnConditionFailure bool `xml:"ReturnCompleteObjectOnConditionFailure,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon15) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon15 - var layout struct { - *T - IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince) - layout.IfUnmodifiedSince = (*xsdDateTime)(&layout.T.IfUnmodifiedSince) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon15) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon15 - var overlay struct { - *T - IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince) - overlay.IfUnmodifiedSince = (*xsdDateTime)(&overlay.T.IfUnmodifiedSince) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon16 struct { - GetObjectResponse GetObjectResult `xml:"GetObjectResponse"` -} - -type Anon17 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - ContentLength int64 `xml:"ContentLength"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon17) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon17 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon17) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon17 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon18 struct { - PutObjectResponse PutObjectResult `xml:"PutObjectResponse"` -} - -type Anon19 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - Data []byte `xml:"Data"` - ContentLength int64 `xml:"ContentLength"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon19) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon19 - var layout struct { - *T - Data *xsdBase64Binary `xml:"Data"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Data = (*xsdBase64Binary)(&layout.T.Data) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon19) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon19 - var overlay struct { - *T - Data *xsdBase64Binary `xml:"Data"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Data = (*xsdBase64Binary)(&overlay.T.Data) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon2 struct { - GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"GetBucketLoggingStatusResponse"` -} - -type Anon20 struct { - PutObjectInlineResponse PutObjectResult `xml:"PutObjectInlineResponse"` -} - -type Anon21 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon21) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon21 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon21) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon21 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon22 struct { - DeleteObjectResponse Status `xml:"DeleteObjectResponse"` -} - -type Anon23 struct { - Bucket string `xml:"Bucket"` - Prefix string `xml:"Prefix,omitempty"` - Marker string `xml:"Marker,omitempty"` - MaxKeys int `xml:"MaxKeys,omitempty"` - Delimiter string `xml:"Delimiter,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon23) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon23 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon23) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon23 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon24 struct { - ListBucketResponse ListBucketResult `xml:"ListBucketResponse"` -} - -type Anon25 struct { - ListVersionsResponse ListVersionsResult `xml:"ListVersionsResponse"` -} - -type Anon26 struct { - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` -} - -func (t *Anon26) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon26 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon26) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon26 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon27 struct { - ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"ListAllMyBucketsResponse"` -} - -type Anon28 struct { - Location string `xml:"Location"` - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - ETag string `xml:"ETag"` -} - -type Anon29 struct { - SourceBucket string `xml:"SourceBucket"` - SourceKey string `xml:"SourceKey"` - DestinationBucket string `xml:"DestinationBucket"` - DestinationKey string `xml:"DestinationKey"` - MetadataDirective MetadataDirective `xml:"MetadataDirective,omitempty"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - CopySourceIfModifiedSince time.Time `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince time.Time `xml:"CopySourceIfUnmodifiedSince,omitempty"` - CopySourceIfMatch []string `xml:"CopySourceIfMatch,omitempty"` - CopySourceIfNoneMatch []string `xml:"CopySourceIfNoneMatch,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon29) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon29 - var layout struct { - *T - CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince) - layout.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfUnmodifiedSince) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon29) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon29 - var overlay struct { - *T - CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince) - overlay.CopySourceIfUnmodifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfUnmodifiedSince) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon3 struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` - BucketLoggingStatus BucketLoggingStatus `xml:"BucketLoggingStatus"` -} - -func (t *Anon3) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon3 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon3) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon3 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon30 struct { - CopyObjectResult CopyObjectResult `xml:"CopyObjectResult"` -} - -type Anon4 struct { -} - -type Anon5 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon5) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon5 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon5) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon5 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon6 struct { - GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"GetObjectAccessControlPolicyResponse"` -} - -type Anon7 struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon7) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon7 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon7) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon7 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) -} - -type Anon8 struct { - GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"GetBucketAccessControlPolicyResponse"` -} - -type Anon9 struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AccessControlList AccessControlList `xml:"AccessControlList"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` -} - -func (t *Anon9) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type T Anon9 - var layout struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - layout.T = (*T)(t) - layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) - return e.EncodeElement(layout, start) -} -func (t *Anon9) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type T Anon9 - var overlay struct { - *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` - } - overlay.T = (*T)(t) - overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) - return d.DecodeElement(&overlay, &start) + EmailAddress string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ EmailAddress"` } type BucketLoggingStatus struct { - LoggingEnabled LoggingSettings `xml:"LoggingEnabled,omitempty"` + LoggingEnabled LoggingSettings `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LoggingEnabled,omitempty"` } type CanonicalUser struct { @@ -567,31 +30,31 @@ type CanonicalUser struct { } type CopyObject struct { - SourceBucket string `xml:"SourceBucket"` - SourceKey string `xml:"SourceKey"` - DestinationBucket string `xml:"DestinationBucket"` - DestinationKey string `xml:"DestinationKey"` - MetadataDirective MetadataDirective `xml:"MetadataDirective,omitempty"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - CopySourceIfModifiedSince time.Time `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince time.Time `xml:"CopySourceIfUnmodifiedSince,omitempty"` - CopySourceIfMatch []string `xml:"CopySourceIfMatch,omitempty"` - CopySourceIfNoneMatch []string `xml:"CopySourceIfNoneMatch,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + SourceBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceBucket"` + SourceKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ SourceKey"` + DestinationBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationBucket"` + DestinationKey string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DestinationKey"` + MetadataDirective MetadataDirective `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MetadataDirective,omitempty"` + Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"` + CopySourceIfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"` + CopySourceIfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"` + CopySourceIfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfMatch,omitempty"` + CopySourceIfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfNoneMatch,omitempty"` + StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *CopyObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T CopyObject var layout struct { *T - CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"` + CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.CopySourceIfModifiedSince = (*xsdDateTime)(&layout.T.CopySourceIfModifiedSince) @@ -603,9 +66,9 @@ func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error type T CopyObject var overlay struct { *T - CopySourceIfModifiedSince *xsdDateTime `xml:"CopySourceIfModifiedSince,omitempty"` - CopySourceIfUnmodifiedSince *xsdDateTime `xml:"CopySourceIfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + CopySourceIfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfModifiedSince,omitempty"` + CopySourceIfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopySourceIfUnmodifiedSince,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.CopySourceIfModifiedSince = (*xsdDateTime)(&overlay.T.CopySourceIfModifiedSince) @@ -615,19 +78,19 @@ func (t *CopyObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error } type CopyObjectResponse struct { - CopyObjectResult CopyObjectResult `xml:"CopyObjectResult"` + CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"` } type CopyObjectResult struct { - LastModified time.Time `xml:"LastModified"` - ETag string `xml:"ETag"` + LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` } func (t *CopyObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T CopyObjectResult var layout struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -637,7 +100,7 @@ func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) type T CopyObjectResult var overlay struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -645,18 +108,18 @@ func (t *CopyObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type CreateBucket struct { - Bucket string `xml:"Bucket"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` } func (t *CreateBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T CreateBucket var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -666,7 +129,7 @@ func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro type T CreateBucket var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -674,30 +137,30 @@ func (t *CreateBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } type CreateBucketConfiguration struct { - LocationConstraint string `xml:"LocationConstraint"` + LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"` } type CreateBucketResponse struct { - CreateBucketReturn CreateBucketResult `xml:"CreateBucketReturn"` + CreateBucketReturn CreateBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketReturn"` } type CreateBucketResult struct { - BucketName string `xml:"BucketName"` + BucketName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketName"` } type DeleteBucket struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *DeleteBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T DeleteBucket var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -707,7 +170,7 @@ func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro type T DeleteBucket var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -715,22 +178,22 @@ func (t *DeleteBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } type DeleteBucketResponse struct { - DeleteBucketResponse Status `xml:"DeleteBucketResponse"` + DeleteBucketResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteBucketResponse"` } type DeleteMarkerEntry struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId"` - IsLatest bool `xml:"IsLatest"` - LastModified time.Time `xml:"LastModified"` - Owner CanonicalUser `xml:"Owner,omitempty"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"` + IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"` + LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"` } func (t *DeleteMarkerEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T DeleteMarkerEntry var layout struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -740,7 +203,7 @@ func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) type T DeleteMarkerEntry var overlay struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -748,19 +211,19 @@ func (t *DeleteMarkerEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type DeleteObject struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *DeleteObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T DeleteObject var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -770,7 +233,7 @@ func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro type T DeleteObject var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -778,22 +241,22 @@ func (t *DeleteObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } type DeleteObjectResponse struct { - DeleteObjectResponse Status `xml:"DeleteObjectResponse"` + DeleteObjectResponse Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteObjectResponse"` } type GetBucketAccessControlPolicy struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *GetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetBucketAccessControlPolicy var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -803,7 +266,7 @@ func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St type T GetBucketAccessControlPolicy var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -811,22 +274,22 @@ func (t *GetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St } type GetBucketAccessControlPolicyResponse struct { - GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"GetBucketAccessControlPolicyResponse"` + GetBucketAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketAccessControlPolicyResponse"` } type GetBucketLoggingStatus struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *GetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetBucketLoggingStatus var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -836,7 +299,7 @@ func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle type T GetBucketLoggingStatus var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -844,26 +307,26 @@ func (t *GetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle } type GetBucketLoggingStatusResponse struct { - GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"GetBucketLoggingStatusResponse"` + GetBucketLoggingStatusResponse BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetBucketLoggingStatusResponse"` } type GetObject struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - GetMetadata bool `xml:"GetMetadata"` - GetData bool `xml:"GetData"` - InlineData bool `xml:"InlineData"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"` + GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"` + InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *GetObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetObject var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -873,7 +336,7 @@ func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { type T GetObject var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -881,19 +344,19 @@ func (t *GetObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { } type GetObjectAccessControlPolicy struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *GetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetObjectAccessControlPolicy var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -903,7 +366,7 @@ func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St type T GetObjectAccessControlPolicy var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -911,35 +374,35 @@ func (t *GetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St } type GetObjectAccessControlPolicyResponse struct { - GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"GetObjectAccessControlPolicyResponse"` + GetObjectAccessControlPolicyResponse AccessControlPolicy `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectAccessControlPolicyResponse"` } type GetObjectExtended struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - GetMetadata bool `xml:"GetMetadata"` - GetData bool `xml:"GetData"` - InlineData bool `xml:"InlineData"` - ByteRangeStart int64 `xml:"ByteRangeStart,omitempty"` - ByteRangeEnd int64 `xml:"ByteRangeEnd,omitempty"` - IfModifiedSince time.Time `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince time.Time `xml:"IfUnmodifiedSince,omitempty"` - IfMatch []string `xml:"IfMatch,omitempty"` - IfNoneMatch []string `xml:"IfNoneMatch,omitempty"` - ReturnCompleteObjectOnConditionFailure bool `xml:"ReturnCompleteObjectOnConditionFailure,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + GetMetadata bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetMetadata"` + GetData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetData"` + InlineData bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InlineData"` + ByteRangeStart int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeStart,omitempty"` + ByteRangeEnd int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ByteRangeEnd,omitempty"` + IfModifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"` + IfUnmodifiedSince time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"` + IfMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfMatch,omitempty"` + IfNoneMatch []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfNoneMatch,omitempty"` + ReturnCompleteObjectOnConditionFailure bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ReturnCompleteObjectOnConditionFailure,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *GetObjectExtended) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetObjectExtended var layout struct { *T - IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"` + IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.IfModifiedSince = (*xsdDateTime)(&layout.T.IfModifiedSince) @@ -951,9 +414,9 @@ func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement) type T GetObjectExtended var overlay struct { *T - IfModifiedSince *xsdDateTime `xml:"IfModifiedSince,omitempty"` - IfUnmodifiedSince *xsdDateTime `xml:"IfUnmodifiedSince,omitempty"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + IfModifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfModifiedSince,omitempty"` + IfUnmodifiedSince *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IfUnmodifiedSince,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.IfModifiedSince = (*xsdDateTime)(&overlay.T.IfModifiedSince) @@ -963,27 +426,27 @@ func (t *GetObjectExtended) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type GetObjectExtendedResponse struct { - GetObjectResponse GetObjectResult `xml:"GetObjectResponse"` + GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"` } type GetObjectResponse struct { - GetObjectResponse GetObjectResult `xml:"GetObjectResponse"` + GetObjectResponse GetObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectResponse"` } type GetObjectResult struct { - Status Status `xml:"Status"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - Data []byte `xml:"Data,omitempty"` - LastModified time.Time `xml:"LastModified"` - ETag string `xml:"ETag"` + Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` + Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"` + LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` + Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"` } func (t *GetObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T GetObjectResult var layout struct { *T - Data *xsdBase64Binary `xml:"Data,omitempty"` - LastModified *xsdDateTime `xml:"LastModified"` + Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } layout.T = (*T)(t) layout.Data = (*xsdBase64Binary)(&layout.T.Data) @@ -994,8 +457,8 @@ func (t *GetObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e type T GetObjectResult var overlay struct { *T - Data *xsdBase64Binary `xml:"Data,omitempty"` - LastModified *xsdDateTime `xml:"LastModified"` + Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data,omitempty"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } overlay.T = (*T)(t) overlay.Data = (*xsdBase64Binary)(&overlay.T.Data) @@ -1008,21 +471,30 @@ type Grant struct { Permission Permission `xml:"Permission"` } +type Grantee struct { + XMLNS string `xml:"xmlns:xsi,attr"` + XMLXSI string `xml:"xsi:type,attr"` + Type string `xml:"Type"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI string `xml:"URI,omitempty"` +} + type Group struct { - URI string `xml:"URI"` + URI string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ URI"` } type ListAllMyBuckets struct { - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` } func (t *ListAllMyBuckets) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListAllMyBuckets var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1032,7 +504,7 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) type T ListAllMyBuckets var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1070,32 +542,26 @@ type ListAllMyBucketsList struct { } type ListAllMyBucketsResponse struct { - ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"ListAllMyBucketsResponse"` -} - -type ListAllMyBucketsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` - Owner CanonicalUser `xml:"Owner"` - Buckets ListAllMyBucketsList `xml:"Buckets"` + ListAllMyBucketsResponse ListAllMyBucketsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResponse"` } type ListBucket struct { - Bucket string `xml:"Bucket"` - Prefix string `xml:"Prefix,omitempty"` - Marker string `xml:"Marker,omitempty"` - MaxKeys int `xml:"MaxKeys,omitempty"` - Delimiter string `xml:"Delimiter,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix,omitempty"` + Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker,omitempty"` + MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys,omitempty"` + Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *ListBucket) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListBucket var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1105,7 +571,7 @@ func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error type T ListBucket var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1113,10 +579,11 @@ func (t *ListBucket) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error } type ListBucketResponse struct { - ListBucketResponse ListBucketResult `xml:"ListBucketResponse"` + ListBucketResponse ListBucketResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResponse"` } type ListBucketResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` Metadata []MetadataEntry `xml:"Metadata,omitempty"` Name string `xml:"Name"` Prefix string `xml:"Prefix"` @@ -1127,16 +594,15 @@ type ListBucketResult struct { IsTruncated bool `xml:"IsTruncated"` Contents []ListEntry `xml:"Contents,omitempty"` CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` - EncodingType string `xml:"EncodingType"` } type ListEntry struct { - Key string `xml:"Key"` - LastModified time.Time `xml:"LastModified"` - ETag string `xml:"ETag"` - Size int64 `xml:"Size"` - Owner *CanonicalUser `xml:"Owner,omitempty"` - StorageClass StorageClass `xml:"StorageClass"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` + Owner CanonicalUser `xml:"Owner,omitempty"` + StorageClass StorageClass `xml:"StorageClass"` } func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { @@ -1161,44 +627,48 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { } type ListVersionsResponse struct { - ListVersionsResponse ListVersionsResult `xml:"ListVersionsResponse"` + ListVersionsResponse ListVersionsResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResponse"` } type ListVersionsResult struct { - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - Name string `xml:"Name"` - Prefix string `xml:"Prefix"` - KeyMarker string `xml:"KeyMarker"` - VersionIdMarker string `xml:"VersionIdMarker"` - NextKeyMarker string `xml:"NextKeyMarker,omitempty"` - NextVersionIdMarker string `xml:"NextVersionIdMarker,omitempty"` - MaxKeys int `xml:"MaxKeys"` - Delimiter string `xml:"Delimiter,omitempty"` - IsTruncated bool `xml:"IsTruncated"` - Version VersionEntry `xml:"Version,omitempty"` - DeleteMarker DeleteMarkerEntry `xml:"DeleteMarker,omitempty"` - CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` + Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` + Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` + Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` + KeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ KeyMarker"` + VersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionIdMarker"` + NextKeyMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextKeyMarker,omitempty"` + NextVersionIdMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextVersionIdMarker,omitempty"` + MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"` + Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"` + IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"` + Version VersionEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version"` + DeleteMarker DeleteMarkerEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker"` + CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"` +} + +type LocationConstraint struct { + LocationConstraint string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"` } type LoggingSettings struct { - TargetBucket string `xml:"TargetBucket"` - TargetPrefix string `xml:"TargetPrefix"` - TargetGrants AccessControlList `xml:"TargetGrants,omitempty"` + TargetBucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetBucket"` + TargetPrefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetPrefix"` + TargetGrants AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TargetGrants,omitempty"` } // May be one of COPY, REPLACE type MetadataDirective string type MetadataEntry struct { - Name string `xml:"Name"` - Value string `xml:"Value"` + Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` + Value string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Value"` } // May be one of Enabled, Disabled type MfaDeleteStatus string type NotificationConfiguration struct { - TopicConfiguration []TopicConfiguration `xml:"TopicConfiguration,omitempty"` + TopicConfiguration []TopicConfiguration `xml:"http://s3.amazonaws.com/doc/2006-03-01/ TopicConfiguration,omitempty"` } // May be one of BucketOwner, Requester @@ -1208,10 +678,10 @@ type Payer string type Permission string type PostResponse struct { - Location string `xml:"Location"` - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - ETag string `xml:"ETag"` + Location string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Location"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` } type PrefixEntry struct { @@ -1219,23 +689,23 @@ type PrefixEntry struct { } type PutObject struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - ContentLength int64 `xml:"ContentLength"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` + ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"` + StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *PutObject) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T PutObject var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1245,7 +715,7 @@ func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { type T PutObject var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1253,25 +723,25 @@ func (t *PutObject) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { } type PutObjectInline struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - Metadata []MetadataEntry `xml:"Metadata,omitempty"` - Data []byte `xml:"Data"` - ContentLength int64 `xml:"ContentLength"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - StorageClass StorageClass `xml:"StorageClass,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` + Data []byte `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"` + ContentLength int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ContentLength"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"` + StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *PutObjectInline) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T PutObjectInline var layout struct { *T - Data *xsdBase64Binary `xml:"Data"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Data = (*xsdBase64Binary)(&layout.T.Data) @@ -1282,8 +752,8 @@ func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e type T PutObjectInline var overlay struct { *T - Data *xsdBase64Binary `xml:"Data"` - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Data *xsdBase64Binary `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Data"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Data = (*xsdBase64Binary)(&overlay.T.Data) @@ -1292,23 +762,23 @@ func (t *PutObjectInline) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e } type PutObjectInlineResponse struct { - PutObjectInlineResponse PutObjectResult `xml:"PutObjectInlineResponse"` + PutObjectInlineResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectInlineResponse"` } type PutObjectResponse struct { - PutObjectResponse PutObjectResult `xml:"PutObjectResponse"` + PutObjectResponse PutObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PutObjectResponse"` } type PutObjectResult struct { - ETag string `xml:"ETag"` - LastModified time.Time `xml:"LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` + LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } func (t *PutObjectResult) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T PutObjectResult var layout struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -1318,7 +788,7 @@ func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e type T PutObjectResult var overlay struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -1326,27 +796,27 @@ func (t *PutObjectResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e } type RequestPaymentConfiguration struct { - Payer Payer `xml:"Payer"` + Payer Payer `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Payer"` } type Result struct { - Status Status `xml:"Status"` + Status Status `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status"` } type SetBucketAccessControlPolicy struct { - Bucket string `xml:"Bucket"` - AccessControlList AccessControlList `xml:"AccessControlList,omitempty"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList,omitempty"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *SetBucketAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T SetBucketAccessControlPolicy var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1356,7 +826,7 @@ func (t *SetBucketAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St type T SetBucketAccessControlPolicy var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1367,19 +837,19 @@ type SetBucketAccessControlPolicyResponse struct { } type SetBucketLoggingStatus struct { - Bucket string `xml:"Bucket"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` - BucketLoggingStatus BucketLoggingStatus `xml:"BucketLoggingStatus"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` + BucketLoggingStatus BucketLoggingStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ BucketLoggingStatus"` } func (t *SetBucketLoggingStatus) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T SetBucketLoggingStatus var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1389,7 +859,7 @@ func (t *SetBucketLoggingStatus) UnmarshalXML(d *xml.Decoder, start xml.StartEle type T SetBucketLoggingStatus var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1400,20 +870,20 @@ type SetBucketLoggingStatusResponse struct { } type SetObjectAccessControlPolicy struct { - Bucket string `xml:"Bucket"` - Key string `xml:"Key"` - AccessControlList AccessControlList `xml:"AccessControlList"` - AWSAccessKeyId string `xml:"AWSAccessKeyId,omitempty"` - Timestamp time.Time `xml:"Timestamp,omitempty"` - Signature string `xml:"Signature,omitempty"` - Credential string `xml:"Credential,omitempty"` + Bucket string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + AccessControlList AccessControlList `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlList"` + AWSAccessKeyId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AWSAccessKeyId,omitempty"` + Timestamp time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` + Signature string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Signature,omitempty"` + Credential string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Credential,omitempty"` } func (t *SetObjectAccessControlPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T SetObjectAccessControlPolicy var layout struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } layout.T = (*T)(t) layout.Timestamp = (*xsdDateTime)(&layout.T.Timestamp) @@ -1423,7 +893,7 @@ func (t *SetObjectAccessControlPolicy) UnmarshalXML(d *xml.Decoder, start xml.St type T SetObjectAccessControlPolicy var overlay struct { *T - Timestamp *xsdDateTime `xml:"Timestamp,omitempty"` + Timestamp *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Timestamp,omitempty"` } overlay.T = (*T)(t) overlay.Timestamp = (*xsdDateTime)(&overlay.T.Timestamp) @@ -1434,37 +904,37 @@ type SetObjectAccessControlPolicyResponse struct { } type Status struct { - Code int `xml:"Code"` - Description string `xml:"Description"` + Code int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Code"` + Description string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Description"` } // May be one of STANDARD, REDUCED_REDUNDANCY, GLACIER, UNKNOWN type StorageClass string type TopicConfiguration struct { - Topic string `xml:"Topic"` - Event []string `xml:"Event"` + Topic string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Topic"` + Event []string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Event"` } type User struct { } type VersionEntry struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId"` - IsLatest bool `xml:"IsLatest"` - LastModified time.Time `xml:"LastModified"` - ETag string `xml:"ETag"` - Size int64 `xml:"Size"` - Owner CanonicalUser `xml:"Owner,omitempty"` - StorageClass StorageClass `xml:"StorageClass"` + Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` + VersionId string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ VersionId"` + IsLatest bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsLatest"` + LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` + Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"` + Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"` + StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"` } func (t *VersionEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T VersionEntry var layout struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -1474,7 +944,7 @@ func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro type T VersionEntry var overlay struct { *T - LastModified *xsdDateTime `xml:"LastModified"` + LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -1482,8 +952,8 @@ func (t *VersionEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } type VersioningConfiguration struct { - Status VersioningStatus `xml:"Status,omitempty"` - MfaDelete MfaDeleteStatus `xml:"MfaDelete,omitempty"` + Status VersioningStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Status,omitempty"` + MfaDelete MfaDeleteStatus `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MfaDelete,omitempty"` } // May be one of Enabled, Suspended @@ -1506,10 +976,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) { type xsdDateTime time.Time func (t *xsdDateTime) UnmarshalText(text []byte) error { - return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999") + return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat) } func (t xsdDateTime) MarshalText() ([]byte, error) { - return _marshalTime((time.Time)(t), "2006-01-02T15:04:05.999999999") + return []byte((time.Time)(t).Format(s3TimeFormat)), nil } func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if (time.Time)(t).IsZero() { @@ -1536,6 +1006,3 @@ func _unmarshalTime(text []byte, t *time.Time, format string) (err error) { } return err } -func _marshalTime(t time.Time, format string) ([]byte, error) { - return []byte(t.Format(format + "Z07:00")), nil -} diff --git a/weed/s3api/s3api_xsd_generated_helper.go b/weed/s3api/s3api_xsd_generated_helper.go deleted file mode 100644 index 24cdd2289..000000000 --- a/weed/s3api/s3api_xsd_generated_helper.go +++ /dev/null @@ -1,10 +0,0 @@ -package s3api - -type Grantee struct { - XMLNS string `xml:"xmlns:xsi,attr"` - XMLXSI string `xml:"xsi:type,attr"` - Type string `xml:"Type"` - ID string `xml:"ID,omitempty"` - DisplayName string `xml:"DisplayName,omitempty"` - URI string `xml:"URI,omitempty"` -} diff --git a/weed/s3api/s3bucket/s3api_bucket.go b/weed/s3api/s3bucket/s3api_bucket.go deleted file mode 100644 index 4fab933d9..000000000 --- a/weed/s3api/s3bucket/s3api_bucket.go +++ /dev/null @@ -1,40 +0,0 @@ -package s3bucket - -import ( - "fmt" - "net" - "strings" - "unicode" -) - -// https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html -func VerifyS3BucketName(name string) (err error) { - if len(name) < 3 || len(name) > 63 { - return fmt.Errorf("bucket name must between [3, 63] characters") - } - for idx, ch := range name { - if !(unicode.IsLower(ch) || ch == '.' || ch == '-' || unicode.IsNumber(ch)) { - return fmt.Errorf("bucket name can only contain lower case characters, numbers, dots, and hyphens") - } - if idx > 0 && (ch == '.' && name[idx-1] == '.') { - return fmt.Errorf("bucket names must not contain two adjacent periods") - } - //TODO buckets with s3 transfer acceleration cannot have . in name - } - if name[0] == '.' || name[0] == '-' { - return fmt.Errorf("name must start with number or lower case character") - } - if name[len(name)-1] == '.' || name[len(name)-1] == '-' { - return fmt.Errorf("name must end with number or lower case character") - } - if strings.HasPrefix(name, "xn--") { - return fmt.Errorf("prefix xn-- is reserved and not allowed in bucket prefix") - } - if strings.HasSuffix(name, "-s3alias") { - return fmt.Errorf("suffix -s3alias is reserved and not allowed in bucket suffix") - } - if net.ParseIP(name) != nil { - return fmt.Errorf("bucket name cannot be ip addresses") - } - return nil -} diff --git a/weed/s3api/s3bucket/s3api_bucket_test.go b/weed/s3api/s3bucket/s3api_bucket_test.go deleted file mode 100644 index 9ada225ef..000000000 --- a/weed/s3api/s3bucket/s3api_bucket_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package s3bucket - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func Test_verifyBucketName(t *testing.T) { - invalidS3BucketNames := []string{ - "A9325325b", - "123.12.153.10", - "abc214..2", - "d", - "aa", - ".ewfs3253543", - "grehtrry-", - "----------", - "x@fdsgr032", - } - for _, invalidName := range invalidS3BucketNames { - err := VerifyS3BucketName(invalidName) - assert.NotNil(t, err) - } - validS3BucketName := []string{ - "a9325325b", - "999.12.153.10", - "abc214.2", - "3d3d3d", - "ewfs3253543", - "grehtrry-a", - "0----------0", - "xafdsgr032", - } - for _, invalidName := range validS3BucketName { - err := VerifyS3BucketName(invalidName) - assert.Nil(t, err) - } -} diff --git a/weed/s3api/s3err/audit_fluent.go b/weed/s3api/s3err/audit_fluent.go index ef2459eac..2deb56896 100644 --- a/weed/s3api/s3err/audit_fluent.go +++ b/weed/s3api/s3err/audit_fluent.go @@ -3,13 +3,12 @@ package s3err import ( "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/fluent/fluent-logger-golang/fluent" "net/http" "os" "time" - - "github.com/fluent/fluent-logger-golang/fluent" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" ) type AccessLogExtend struct { diff --git a/weed/s3api/s3err/error_handler.go b/weed/s3api/s3err/error_handler.go index 24dcfad7f..6753a1641 100644 --- a/weed/s3api/s3err/error_handler.go +++ b/weed/s3api/s3err/error_handler.go @@ -4,14 +4,12 @@ import ( "bytes" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/gorilla/mux" "net/http" "strconv" "strings" "time" - - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" ) type mimeType string @@ -21,16 +19,6 @@ const ( MimeXML mimeType = "application/xml" ) -func WriteAwsXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, result interface{}) { - var bytesBuffer bytes.Buffer - err := xmlutil.BuildXML(result, xml.NewEncoder(&bytesBuffer)) - if err != nil { - WriteErrorResponse(w, r, ErrInternalError) - return - } - WriteResponse(w, r, statusCode, bytesBuffer.Bytes(), MimeXML) -} - func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { WriteResponse(w, r, statusCode, EncodeXMLResponse(response), MimeXML) } @@ -50,7 +38,8 @@ func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorC apiError := GetAPIError(errorCode) errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object) - WriteXMLResponse(w, r, apiError.HTTPStatusCode, errorResponse) + encodedErrorResponse := EncodeXMLResponse(errorResponse) + WriteResponse(w, r, apiError.HTTPStatusCode, encodedErrorResponse, MimeXML) PostLog(r, apiError.HTTPStatusCode, errorCode) } @@ -77,34 +66,9 @@ func EncodeXMLResponse(response interface{}) []byte { func setCommonHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) w.Header().Set("Accept-Ranges", "bytes") - - // Handle CORS headers for requests with Origin header if r.Header.Get("Origin") != "" { - // Use mux.Vars to detect bucket-specific requests more reliably - vars := mux.Vars(r) - bucket := vars["bucket"] - isBucketRequest := bucket != "" - - if !isBucketRequest { - // Service-level request (like OPTIONS /) - apply static CORS if none set - if w.Header().Get("Access-Control-Allow-Origin") == "" { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "*") - w.Header().Set("Access-Control-Allow-Headers", "*") - w.Header().Set("Access-Control-Expose-Headers", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } - } else { - // Bucket-specific request - preserve existing CORS headers or set default - // This handles cases where CORS middleware set headers but auth failed - if w.Header().Get("Access-Control-Allow-Origin") == "" { - // No CORS headers were set by middleware, so this request doesn't match any CORS rule - // According to CORS spec, we should not set CORS headers for non-matching requests - // However, if the bucket has CORS config but request doesn't match, - // we still should not set headers here as it would be incorrect - } - // If CORS headers were already set by middleware, preserve them - } + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") } } diff --git a/weed/s3api/s3err/s3-error.go b/weed/s3api/s3err/s3-error.go index c5e515abd..b87764742 100644 --- a/weed/s3api/s3err/s3-error.go +++ b/weed/s3api/s3err/s3-error.go @@ -1,7 +1,5 @@ package s3err -import "github.com/seaweedfs/seaweedfs/weed/util/constants" - /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage * Copyright 2015-2017 MinIO, Inc. @@ -23,7 +21,7 @@ import "github.com/seaweedfs/seaweedfs/weed/util/constants" // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var s3ErrorResponseMap = map[string]string{ "AccessDenied": "Access Denied.", - "BadDigest": constants.ErrMsgBadDigest, + "BadDigest": "The Content-Md5 you specified did not match what we received.", "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go index 24f8e1b56..57f269a2e 100644 --- a/weed/s3api/s3err/s3api_errors.go +++ b/weed/s3api/s3err/s3api_errors.go @@ -4,8 +4,6 @@ import ( "encoding/xml" "fmt" "net/http" - - "github.com/seaweedfs/seaweedfs/weed/util/constants" ) // APIError structure @@ -59,9 +57,7 @@ const ( ErrNoSuchKey ErrNoSuchUpload ErrInvalidBucketName - ErrInvalidBucketState ErrInvalidDigest - ErrBadDigest ErrInvalidMaxKeys ErrInvalidMaxUploads ErrInvalidMaxParts @@ -87,8 +83,6 @@ const ( ErrMalformedDate ErrMalformedPresignedDate ErrMalformedCredentialDate - ErrMalformedPolicy - ErrInvalidPolicyDocument ErrMissingSignHeadersTag ErrMissingSignTag ErrUnsignedHeaders @@ -107,44 +101,12 @@ const ( ErrAuthNotSetup ErrNotImplemented ErrPreconditionFailed - ErrNotModified ErrExistingObjectIsDirectory ErrExistingObjectIsFile ErrTooManyRequest ErrRequestBytesExceed - - OwnershipControlsNotFoundError - ErrNoSuchTagSet - ErrNoSuchObjectLockConfiguration - ErrNoSuchObjectLegalHold - ErrInvalidRetentionPeriod - ErrObjectLockConfigurationNotFoundError - ErrInvalidUnorderedWithDelimiter - - // SSE-C related errors - ErrInvalidEncryptionAlgorithm - ErrInvalidEncryptionKey - ErrSSECustomerKeyMD5Mismatch - ErrSSECustomerKeyMissing - ErrSSECustomerKeyNotNeeded - - // SSE-KMS related errors - ErrKMSKeyNotFound - ErrKMSAccessDenied - ErrKMSDisabled - ErrKMSInvalidCiphertext - - // Bucket encryption errors - ErrNoSuchBucketEncryptionConfiguration -) - -// Error message constants for checksum validation -const ( - ErrMsgPayloadChecksumMismatch = "payload checksum does not match" - ErrMsgChunkSignatureMismatch = "chunk signature does not match" - ErrMsgChecksumAlgorithmMismatch = "checksum algorithm mismatch" ) // error code to APIError structure, these fields carry respective @@ -180,21 +142,11 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "The specified bucket is not valid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidBucketState: { - Code: "InvalidBucketState", - Description: "The bucket is not in a valid state for the requested operation", - HTTPStatusCode: http.StatusConflict, - }, ErrInvalidDigest: { Code: "InvalidDigest", Description: "The Content-Md5 you specified is not valid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrBadDigest: { - Code: "BadDigest", - Description: constants.ErrMsgBadDigest, - HTTPStatusCode: http.StatusBadRequest, - }, ErrInvalidMaxUploads: { Code: "InvalidArgument", Description: "Argument max-uploads must be an integer between 0 and 2147483647", @@ -230,26 +182,6 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "The bucket policy does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchTagSet: { - Code: "NoSuchTagSet", - Description: "The TagSet does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchObjectLockConfiguration: { - Code: "NoSuchObjectLockConfiguration", - Description: "The specified object does not have an ObjectLock configuration", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchObjectLegalHold: { - Code: "NoSuchObjectLegalHold", - Description: "The specified object does not have a legal hold configuration", - HTTPStatusCode: http.StatusNotFound, - }, - ErrInvalidRetentionPeriod: { - Code: "InvalidRetentionPeriod", - Description: "The retention period specified is invalid", - HTTPStatusCode: http.StatusBadRequest, - }, ErrNoSuchCORSConfiguration: { Code: "NoSuchCORSConfiguration", Description: "The CORS configuration does not exist", @@ -302,16 +234,6 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "The XML you provided was not well-formed or did not validate against our published schema.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMalformedPolicy: { - Code: "MalformedPolicy", - Description: "Policy has invalid resource.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPolicyDocument: { - Code: "InvalidPolicyDocument", - Description: "The content of the policy document is invalid.", - HTTPStatusCode: http.StatusBadRequest, - }, ErrAuthHeaderEmpty: { Code: "InvalidArgument", Description: "Authorization header is invalid -- one and only one ' ' (space) required.", @@ -472,11 +394,6 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "At least one of the pre-conditions you specified did not hold", HTTPStatusCode: http.StatusPreconditionFailed, }, - ErrNotModified: { - Code: "NotModified", - Description: "The object was not modified since the specified time", - HTTPStatusCode: http.StatusNotModified, - }, ErrExistingObjectIsDirectory: { Code: "ExistingObjectIsDirectory", Description: "Existing Object is a directory.", @@ -497,78 +414,6 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "Simultaneous request bytes exceed limitations", HTTPStatusCode: http.StatusTooManyRequests, }, - - OwnershipControlsNotFoundError: { - Code: "OwnershipControlsNotFoundError", - Description: "The bucket ownership controls were not found", - HTTPStatusCode: http.StatusNotFound, - }, - ErrObjectLockConfigurationNotFoundError: { - Code: "ObjectLockConfigurationNotFoundError", - Description: "Object Lock configuration does not exist for this bucket", - HTTPStatusCode: http.StatusNotFound, - }, - ErrInvalidUnorderedWithDelimiter: { - Code: "InvalidArgument", - Description: "Unordered listing cannot be used with delimiter", - HTTPStatusCode: http.StatusBadRequest, - }, - - // SSE-C related error mappings - ErrInvalidEncryptionAlgorithm: { - Code: "InvalidEncryptionAlgorithmError", - Description: "The encryption algorithm specified is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidEncryptionKey: { - Code: "InvalidArgument", - Description: "Invalid encryption key. Encryption key must be 256-bit AES256.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSSECustomerKeyMD5Mismatch: { - Code: "InvalidArgument", - Description: "The provided customer encryption key MD5 does not match the key.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSSECustomerKeyMissing: { - Code: "InvalidArgument", - Description: "Requests specifying Server Side Encryption with Customer provided keys must provide the customer key.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSSECustomerKeyNotNeeded: { - Code: "InvalidArgument", - Description: "The object was not encrypted with customer provided keys.", - HTTPStatusCode: http.StatusBadRequest, - }, - - // SSE-KMS error responses - ErrKMSKeyNotFound: { - Code: "KMSKeyNotFoundException", - Description: "The specified KMS key does not exist.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrKMSAccessDenied: { - Code: "KMSAccessDeniedException", - Description: "Access denied to the specified KMS key.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrKMSDisabled: { - Code: "KMSKeyDisabledException", - Description: "The specified KMS key is disabled.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrKMSInvalidCiphertext: { - Code: "InvalidCiphertext", - Description: "The provided ciphertext is invalid or corrupted.", - HTTPStatusCode: http.StatusBadRequest, - }, - - // Bucket encryption error responses - ErrNoSuchBucketEncryptionConfiguration: { - Code: "ServerSideEncryptionConfigurationNotFoundError", - Description: "The server side encryption configuration was not found.", - HTTPStatusCode: http.StatusNotFound, - }, } // GetAPIError provides API Error for input API error code. diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go index 14c0ad150..003807a25 100644 --- a/weed/s3api/stats.go +++ b/weed/s3api/stats.go @@ -1,50 +1,39 @@ package s3api import ( + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "net/http" "strconv" "time" - - "github.com/seaweedfs/seaweedfs/weed/util/version" - - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" ) +type StatusRecorder struct { + http.ResponseWriter + Status int +} + +func NewStatusResponseWriter(w http.ResponseWriter) *StatusRecorder { + return &StatusRecorder{w, http.StatusOK} +} + +func (r *StatusRecorder) WriteHeader(status int) { + r.Status = status + r.ResponseWriter.WriteHeader(status) +} + +func (r *StatusRecorder) Flush() { + r.ResponseWriter.(http.Flusher).Flush() +} + func track(f http.HandlerFunc, action string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - inFlightGauge := stats_collect.S3InFlightRequestsGauge.WithLabelValues(action) - inFlightGauge.Inc() - defer inFlightGauge.Dec() - bucket, _ := s3_constants.GetBucketAndObject(r) - w.Header().Set("Server", "SeaweedFS "+version.VERSION) - recorder := stats_collect.NewStatusResponseWriter(w) + w.Header().Set("Server", "SeaweedFS S3") + recorder := NewStatusResponseWriter(w) start := time.Now() f(recorder, r) - if recorder.Status == http.StatusForbidden { - bucket = "" - } stats_collect.S3RequestHistogram.WithLabelValues(action, bucket).Observe(time.Since(start).Seconds()) stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status), bucket).Inc() - stats_collect.RecordBucketActiveTime(bucket) } } - -func TimeToFirstByte(action string, start time.Time, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - stats_collect.S3TimeToFirstByteHistogram.WithLabelValues(action, bucket).Observe(float64(time.Since(start).Milliseconds())) - stats_collect.RecordBucketActiveTime(bucket) -} - -func BucketTrafficReceived(bytesReceived int64, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3BucketTrafficReceivedBytesCounter.WithLabelValues(bucket).Add(float64(bytesReceived)) -} - -func BucketTrafficSent(bytesTransferred int64, r *http.Request) { - bucket, _ := s3_constants.GetBucketAndObject(r) - stats_collect.RecordBucketActiveTime(bucket) - stats_collect.S3BucketTrafficSentBytesCounter.WithLabelValues(bucket).Add(float64(bytesTransferred)) -} diff --git a/weed/s3api/tags.go b/weed/s3api/tags.go index c775874ef..d49db6894 100644 --- a/weed/s3api/tags.go +++ b/weed/s3api/tags.go @@ -3,12 +3,8 @@ package s3api import ( "encoding/xml" "fmt" - "net/url" "regexp" - "sort" "strings" - - "github.com/seaweedfs/seaweedfs/weed/util" ) type Tag struct { @@ -42,36 +38,17 @@ func FromTags(tags map[string]string) (t *Tagging) { Value: v, }) } - if tagArr := t.TagSet.Tag; len(tagArr) > 0 { - sort.SliceStable(tagArr, func(i, j int) bool { - return tagArr[i].Key < tagArr[j].Key - }) - } return } func parseTagsHeader(tags string) (map[string]string, error) { parsedTags := make(map[string]string) - for _, v := range util.StringSplit(tags, "&") { + for _, v := range strings.Split(tags, "&") { tag := strings.Split(v, "=") if len(tag) == 2 { - // URL decode both key and value - decodedKey, err := url.QueryUnescape(tag[0]) - if err != nil { - return nil, fmt.Errorf("failed to decode tag key '%s': %w", tag[0], err) - } - decodedValue, err := url.QueryUnescape(tag[1]) - if err != nil { - return nil, fmt.Errorf("failed to decode tag value '%s': %w", tag[1], err) - } - parsedTags[decodedKey] = decodedValue + parsedTags[tag[0]] = tag[1] } else if len(tag) == 1 { - // URL decode key for empty value tags - decodedKey, err := url.QueryUnescape(tag[0]) - if err != nil { - return nil, fmt.Errorf("failed to decode tag key '%s': %w", tag[0], err) - } - parsedTags[decodedKey] = "" + parsedTags[tag[0]] = "" } } return parsedTags, nil diff --git a/weed/s3api/tags_test.go b/weed/s3api/tags_test.go index d91499783..fb464fcae 100644 --- a/weed/s3api/tags_test.go +++ b/weed/s3api/tags_test.go @@ -1,104 +1,114 @@ package s3api import ( + "encoding/xml" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/stretchr/testify/assert" "testing" ) -func TestParseTagsHeader(t *testing.T) { - tests := []struct { - name string - input string - expected map[string]string - expectError bool - }{ - { - name: "simple tags", - input: "key1=value1&key2=value2", - expected: map[string]string{ - "key1": "value1", - "key2": "value2", +func TestXMLUnmarshall(t *testing.T) { + + input := ` + + + + key1 + value1 + + + +` + + tags := &Tagging{} + + xml.Unmarshal([]byte(input), tags) + + assert.Equal(t, len(tags.TagSet.Tag), 1) + assert.Equal(t, tags.TagSet.Tag[0].Key, "key1") + assert.Equal(t, tags.TagSet.Tag[0].Value, "value1") + +} + +func TestXMLMarshall(t *testing.T) { + tags := &Tagging{ + Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/", + TagSet: TagSet{ + []Tag{ + { + Key: "key1", + Value: "value1", + }, }, - expectError: false, - }, - { - name: "URL encoded timestamp - issue #7040 scenario", - input: "Timestamp=2025-07-16%2014%3A40%3A39&Owner=user123", - expected: map[string]string{ - "Timestamp": "2025-07-16 14:40:39", - "Owner": "user123", - }, - expectError: false, - }, - { - name: "URL encoded key and value", - input: "my%20key=my%20value&normal=test", - expected: map[string]string{ - "my key": "my value", - "normal": "test", - }, - expectError: false, - }, - { - name: "empty value", - input: "key1=&key2=value2", - expected: map[string]string{ - "key1": "", - "key2": "value2", - }, - expectError: false, - }, - { - name: "special characters encoded", - input: "path=/tmp%2Ffile.txt&data=hello%21world", - expected: map[string]string{ - "path": "/tmp/file.txt", - "data": "hello!world", - }, - expectError: false, - }, - { - name: "invalid URL encoding", - input: "key1=value%ZZ", - expected: nil, - expectError: true, - }, - { - name: "plus signs and equals in values", - input: "formula=a%2Bb%3Dc&normal=test", - expected: map[string]string{ - "formula": "a+b=c", - "normal": "test", - }, - expectError: false, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseTagsHeader(tt.input) + actual := string(s3err.EncodeXMLResponse(tags)) - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got none") - } - return - } + expected := ` +key1value1` + assert.Equal(t, expected, actual) - if err != nil { - t.Errorf("Unexpected error: %v", err) - return - } +} - if len(result) != len(tt.expected) { - t.Errorf("Expected %d tags, got %d", len(tt.expected), len(result)) - return - } +type TestTags map[string]string - for k, v := range tt.expected { - if result[k] != v { - t.Errorf("Expected tag %s=%s, got %s=%s", k, v, k, result[k]) - } - } - }) +var ValidateTagsTestCases = []struct { + testCaseID int + tags TestTags + wantErrString string +}{ + { + 1, + TestTags{"key-1": "value-1"}, + "", + }, + { + 2, + TestTags{"key-1": "valueOver256R59YI9bahPwAVqvLeKCvM2S1RjzgP8fNDKluCbol0XTTFY6VcMwTBmdnqjsddilXztSGfEoZS1wDAIMBA0rW0CLNSoE2zNg4TT0vDbLHEtZBoZjdZ5E0JNIAqwb9ptIk2VizYmhWjb1G4rJ0CqDGWxcy3usXaQg6Dk6kU8N4hlqwYWeGw7uqdghcQ3ScfF02nHW9QFMN7msLR5fe90mbFBBp3Tjq34i0LEr4By2vxoRa2RqdBhEJhi23Tm"}, + "validate tags: tag value longer than 256", + }, + { + 3, + TestTags{"keyLenOver128a5aUUGcPexMELsz3RyROzIzfO6BKABeApH2nbbagpOxZh2MgBWYDZtFxQaCuQeP1xR7dUJLwfFfDHguVIyxvTStGDk51BemKETIwZ0zkhR7lhfHBp2y0nFnV": "value-1"}, + "validate tags: tag key longer than 128", + }, + { + 4, + TestTags{"key-1*": "value-1"}, + "validate tags key key-1* error, incorrect key", + }, + { + 5, + TestTags{"key-1": "value-1?"}, + "validate tags value value-1? error, incorrect value", + }, + { + 6, + TestTags{ + "key-1": "value", + "key-2": "value", + "key-3": "value", + "key-4": "value", + "key-5": "value", + "key-6": "value", + "key-7": "value", + "key-8": "value", + "key-9": "value", + "key-10": "value", + "key-11": "value", + }, + "validate tags: 11 tags more than 10", + }, +} + +func TestValidateTags(t *testing.T) { + for _, testCase := range ValidateTagsTestCases { + err := ValidateTags(testCase.tags) + if testCase.wantErrString == "" { + assert.NoErrorf(t, err, "no error") + } else { + assert.EqualError(t, err, testCase.wantErrString) + } } } diff --git a/weed/security/guard.go b/weed/security/guard.go index a41cb0288..8cb52620e 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -7,7 +7,7 @@ import ( "net/http" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -17,12 +17,12 @@ var ( /* Guard is to ensure data access security. There are 2 ways to check access: - 1. white list. It's checking request ip address. - 2. JSON Web Token(JWT) generated from secretKey. - The jwt can come from: - 1. url parameter jwt=... - 2. request header "Authorization" - 3. cookie with the name "jwt" +1. white list. It's checking request ip address. +2. JSON Web Token(JWT) generated from secretKey. + The jwt can come from: + 1. url parameter jwt=... + 2. request header "Authorization" + 3. cookie with the name "jwt" The white list is checked first because it is easy. Then the JWT is checked. @@ -32,33 +32,33 @@ The Guard will also check these claims if provided: 2. "nbf" Not Before Generating JWT: - 1. use HS256 to sign - 2. optionally set "exp", "nbf" fields, in Unix time, - the number of seconds elapsed since January 1, 1970 UTC. +1. use HS256 to sign +2. optionally set "exp", "nbf" fields, in Unix time, + the number of seconds elapsed since January 1, 1970 UTC. Referenced: https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go + */ type Guard struct { - whiteListIp map[string]struct{} - whiteListCIDR map[string]*net.IPNet + whiteList []string SigningKey SigningKey ExpiresAfterSec int ReadSigningKey SigningKey ReadExpiresAfterSec int - isWriteActive bool - isEmptyWhiteList bool + isWriteActive bool } func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSigningKey string, readExpiresAfterSec int) *Guard { g := &Guard{ + whiteList: whiteList, SigningKey: SigningKey(signingKey), ExpiresAfterSec: expiresAfterSec, ReadSigningKey: SigningKey(readSigningKey), ReadExpiresAfterSec: readExpiresAfterSec, } - g.UpdateWhiteList(whiteList) + g.isWriteActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0 return g } @@ -76,71 +76,52 @@ func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc { } } -func GetActualRemoteHost(r *http.Request) string { - // For security reasons, only use RemoteAddr to determine the client's IP address. - // Do not trust headers like X-Forwarded-For, as they can be easily spoofed by clients. - host, _, err := net.SplitHostPort(r.RemoteAddr) - if err == nil { - return host +func GetActualRemoteHost(r *http.Request) (host string, err error) { + host = r.Header.Get("HTTP_X_FORWARDED_FOR") + if host == "" { + host = r.Header.Get("X-FORWARDED-FOR") } - - // If SplitHostPort fails, it may be because of a missing port. - // We try to parse RemoteAddr as a raw host (IP or hostname). - host = strings.TrimSpace(r.RemoteAddr) - // It might be an IPv6 address without a port, but with brackets. - // e.g. "[::1]" - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - host = host[1 : len(host)-1] + if strings.Contains(host, ",") { + host = host[0:strings.Index(host, ",")] } - - // Return the host (can be IP or hostname, just like headers) - return host + if host == "" { + host, _, err = net.SplitHostPort(r.RemoteAddr) + } + return } func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error { - if g.isEmptyWhiteList { + if len(g.whiteList) == 0 { return nil } - host := GetActualRemoteHost(r) + host, err := GetActualRemoteHost(r) + if err == nil { + for _, ip := range g.whiteList { - // Check exact match first (works for both IPs and hostnames) - if _, ok := g.whiteListIp[host]; ok { - return nil - } - - // Check CIDR ranges (only for valid IP addresses) - remote := net.ParseIP(host) - if remote != nil { - for _, cidrnet := range g.whiteListCIDR { // If the whitelist entry contains a "/" it // is a CIDR range, and we should check the - if cidrnet.Contains(remote) { + // remote host is within it + if strings.Contains(ip, "/") { + _, cidrnet, err := net.ParseCIDR(ip) + if err != nil { + panic(err) + } + remote := net.ParseIP(host) + if cidrnet.Contains(remote) { + return nil + } + } + + // + // Otherwise we're looking for a literal match. + // + if ip == host { return nil } } } - glog.V(0).Infof("Not in whitelist: %s (original RemoteAddr: %s)", host, r.RemoteAddr) - return fmt.Errorf("Not in whitelist: %s", host) -} - -func (g *Guard) UpdateWhiteList(whiteList []string) { - whiteListIp := make(map[string]struct{}) - whiteListCIDR := make(map[string]*net.IPNet) - for _, ip := range whiteList { - if strings.Contains(ip, "/") { - _, cidrnet, err := net.ParseCIDR(ip) - if err != nil { - glog.Errorf("Parse CIDR %s in whitelist failed: %v", ip, err) - } - whiteListCIDR[ip] = cidrnet - } else { - whiteListIp[ip] = struct{}{} - } - } - g.isEmptyWhiteList = len(whiteListIp) == 0 && len(whiteListCIDR) == 0 - g.isWriteActive = !g.isEmptyWhiteList || len(g.SigningKey) != 0 - g.whiteListIp = whiteListIp - g.whiteListCIDR = whiteListCIDR + glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr) + return fmt.Errorf("Not in whitelist: %s", r.RemoteAddr) } diff --git a/weed/security/jwt.go b/weed/security/jwt.go index d859e9ea8..82ba0df12 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -6,8 +6,8 @@ import ( "strings" "time" - jwt "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/golang-jwt/jwt" ) type EncodedJwt string @@ -17,14 +17,14 @@ type SigningKey []byte // restricting the access this JWT allows to only a single file. type SeaweedFileIdClaims struct { Fid string `json:"fid"` - jwt.RegisteredClaims + jwt.StandardClaims } // SeaweedFilerClaims is created e.g. by S3 proxy server and consumed by Filer server. // Right now, it only contains the standard claims; but this might be extended later // for more fine-grained permissions. type SeaweedFilerClaims struct { - jwt.RegisteredClaims + jwt.StandardClaims } func GenJwtForVolumeServer(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt { @@ -34,10 +34,10 @@ func GenJwtForVolumeServer(signingKey SigningKey, expiresAfterSec int, fileId st claims := SeaweedFileIdClaims{ fileId, - jwt.RegisteredClaims{}, + jwt.StandardClaims{}, } if expiresAfterSec > 0 { - claims.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(expiresAfterSec))) + claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix() } t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) encoded, e := t.SignedString([]byte(signingKey)) @@ -56,10 +56,10 @@ func GenJwtForFilerServer(signingKey SigningKey, expiresAfterSec int) EncodedJwt } claims := SeaweedFilerClaims{ - jwt.RegisteredClaims{}, + jwt.StandardClaims{}, } if expiresAfterSec > 0 { - claims.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Second * time.Duration(expiresAfterSec))) + claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix() } t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) encoded, e := t.SignedString([]byte(signingKey)) @@ -83,14 +83,6 @@ func GetJwt(r *http.Request) EncodedJwt { } } - // Get token from http only cookie - if tokenStr == "" { - token, err := r.Cookie("AT") - if err == nil { - tokenStr = token.Value - } - } - return EncodedJwt(tokenStr) } diff --git a/weed/security/tls.go b/weed/security/tls.go index 1a9dfacb5..bfa9d43c7 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -4,20 +4,18 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "os" - "slices" + "google.golang.org/grpc/credentials/tls/certprovider/pemfile" + "google.golang.org/grpc/security/advancedtls" + "io/ioutil" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/credentials/tls/certprovider/pemfile" - "google.golang.org/grpc/security/advancedtls" ) -const CredRefreshingInterval = time.Duration(5) * time.Hour +const credRefreshingInterval = time.Duration(5) * time.Hour type Authenticator struct { AllowedWildcardDomain string @@ -32,10 +30,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption serverOptions := pemfile.Options{ CertFile: config.GetString(component + ".cert"), KeyFile: config.GetString(component + ".key"), - RefreshDuration: CredRefreshingInterval, - } - if serverOptions.CertFile == "" || serverOptions.KeyFile == "" { - return nil, nil + RefreshDuration: credRefreshingInterval, } serverIdentityProvider, err := pemfile.NewProvider(serverOptions) @@ -46,7 +41,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption serverRootOptions := pemfile.Options{ RootFile: config.GetString("grpc.ca"), - RefreshDuration: CredRefreshingInterval, + RefreshDuration: credRefreshingInterval, } serverRootProvider, err := pemfile.NewProvider(serverRootOptions) if err != nil { @@ -55,7 +50,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption } // Start a server and create a client using advancedtls API with Provider. - options := &advancedtls.Options{ + options := &advancedtls.ServerOptions{ IdentityOptions: advancedtls.IdentityCertificateOptions{ IdentityProvider: serverIdentityProvider, }, @@ -63,22 +58,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption RootProvider: serverRootProvider, }, RequireClientCert: true, - VerificationType: advancedtls.CertVerification, - } - options.MinTLSVersion, err = TlsVersionByName(config.GetString("tls.min_version")) - if err != nil { - glog.Warningf("tls min version parse failed, %v", err) - return nil, nil - } - options.MaxTLSVersion, err = TlsVersionByName(config.GetString("tls.max_version")) - if err != nil { - glog.Warningf("tls max version parse failed, %v", err) - return nil, nil - } - options.CipherSuites, err = TlsCipherSuiteByNames(config.GetString("tls.cipher_suites")) - if err != nil { - glog.Warningf("tls cipher suite parse failed, %v", err) - return nil, nil + VType: advancedtls.CertVerification, } allowedCommonNames := config.GetString(component + ".allowed_commonNames") allowedWildcardDomain := config.GetString("grpc.allowed_wildcard_domain") @@ -91,10 +71,10 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption AllowedCommonNames: allowedCommonNamesMap, AllowedWildcardDomain: allowedWildcardDomain, } - options.AdditionalPeerVerification = auther.Authenticate + options.VerifyPeer = auther.Authenticate } else { - options.AdditionalPeerVerification = func(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { - return &advancedtls.PostHandshakeVerificationResults{}, nil + options.VerifyPeer = func(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) { + return &advancedtls.VerificationResults{}, nil } } ta, err := advancedtls.NewServerCreds(options) @@ -107,55 +87,55 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { if config == nil { - return grpc.WithTransportCredentials(insecure.NewCredentials()) + return grpc.WithInsecure() } certFileName, keyFileName, caFileName := config.GetString(component+".cert"), config.GetString(component+".key"), config.GetString("grpc.ca") if certFileName == "" || keyFileName == "" || caFileName == "" { - return grpc.WithTransportCredentials(insecure.NewCredentials()) + return grpc.WithInsecure() } clientOptions := pemfile.Options{ CertFile: certFileName, KeyFile: keyFileName, - RefreshDuration: CredRefreshingInterval, + RefreshDuration: credRefreshingInterval, } clientProvider, err := pemfile.NewProvider(clientOptions) if err != nil { glog.Warningf("pemfile.NewProvider(%v) failed %v", clientOptions, err) - return grpc.WithTransportCredentials(insecure.NewCredentials()) + return grpc.WithInsecure() } clientRootOptions := pemfile.Options{ RootFile: config.GetString("grpc.ca"), - RefreshDuration: CredRefreshingInterval, + RefreshDuration: credRefreshingInterval, } clientRootProvider, err := pemfile.NewProvider(clientRootOptions) if err != nil { glog.Warningf("pemfile.NewProvider(%v) failed: %v", clientRootOptions, err) - return grpc.WithTransportCredentials(insecure.NewCredentials()) + return grpc.WithInsecure() } - options := &advancedtls.Options{ + options := &advancedtls.ClientOptions{ IdentityOptions: advancedtls.IdentityCertificateOptions{ IdentityProvider: clientProvider, }, - AdditionalPeerVerification: func(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { - return &advancedtls.PostHandshakeVerificationResults{}, nil + VerifyPeer: func(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) { + return &advancedtls.VerificationResults{}, nil }, RootOptions: advancedtls.RootCertificateOptions{ RootProvider: clientRootProvider, }, - VerificationType: advancedtls.CertVerification, + VType: advancedtls.CertVerification, } ta, err := advancedtls.NewClientCreds(options) if err != nil { glog.Warningf("advancedtls.NewClientCreds(%v) failed: %v", options, err) - return grpc.WithTransportCredentials(insecure.NewCredentials()) + return grpc.WithInsecure() } return grpc.WithTransportCredentials(ta) } func LoadClientTLSHTTP(clientCertFile string) *tls.Config { - clientCerts, err := os.ReadFile(clientCertFile) + clientCerts, err := ioutil.ReadFile(clientCertFile) if err != nil { glog.Fatal(err) } @@ -171,68 +151,14 @@ func LoadClientTLSHTTP(clientCertFile string) *tls.Config { } } -func (a Authenticator) Authenticate(params *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { +func (a Authenticator) Authenticate(params *advancedtls.VerificationFuncParams) (*advancedtls.VerificationResults, error) { if a.AllowedWildcardDomain != "" && strings.HasSuffix(params.Leaf.Subject.CommonName, a.AllowedWildcardDomain) { - return &advancedtls.PostHandshakeVerificationResults{}, nil + return &advancedtls.VerificationResults{}, nil } if _, ok := a.AllowedCommonNames[params.Leaf.Subject.CommonName]; ok { - return &advancedtls.PostHandshakeVerificationResults{}, nil + return &advancedtls.VerificationResults{}, nil } err := fmt.Errorf("Authenticate: invalid subject client common name: %s", params.Leaf.Subject.CommonName) glog.Error(err) return nil, err } - -func FixTlsConfig(viper *util.ViperProxy, config *tls.Config) error { - var err error - config.MinVersion, err = TlsVersionByName(viper.GetString("tls.min_version")) - if err != nil { - return err - } - config.MaxVersion, err = TlsVersionByName(viper.GetString("tls.max_version")) - if err != nil { - return err - } - config.CipherSuites, err = TlsCipherSuiteByNames(viper.GetString("tls.cipher_suites")) - return err -} - -func TlsVersionByName(name string) (uint16, error) { - switch name { - case "": - return 0, nil - case "SSLv3": - return tls.VersionSSL30, nil - case "TLS 1.0": - return tls.VersionTLS10, nil - case "TLS 1.1": - return tls.VersionTLS11, nil - case "TLS 1.2": - return tls.VersionTLS12, nil - case "TLS 1.3": - return tls.VersionTLS13, nil - default: - return 0, fmt.Errorf("invalid tls version %s", name) - } -} - -func TlsCipherSuiteByNames(cipherSuiteNames string) ([]uint16, error) { - cipherSuiteNames = strings.TrimSpace(cipherSuiteNames) - if cipherSuiteNames == "" { - return nil, nil - } - names := strings.Split(cipherSuiteNames, ",") - cipherSuites := tls.CipherSuites() - cipherIds := make([]uint16, 0, len(names)) - for _, name := range names { - name = strings.TrimSpace(name) - index := slices.IndexFunc(cipherSuites, func(suite *tls.CipherSuite) bool { - return name == suite.Name - }) - if index == -1 { - return nil, fmt.Errorf("invalid tls cipher suite name %s", name) - } - cipherIds = append(cipherIds, cipherSuites[index].ID) - } - return cipherIds, nil -} diff --git a/weed/sequence/memory_sequencer.go b/weed/sequence/memory_sequencer.go index 92944266c..e20c29cc7 100644 --- a/weed/sequence/memory_sequencer.go +++ b/weed/sequence/memory_sequencer.go @@ -4,7 +4,7 @@ import ( "sync" ) -// default Sequencer +// just for testing type MemorySequencer struct { counter uint64 sequenceLock sync.Mutex @@ -30,3 +30,7 @@ func (m *MemorySequencer) SetMax(seenValue uint64) { m.counter = seenValue + 1 } } + +func (m *MemorySequencer) Peek() uint64 { + return m.counter +} diff --git a/weed/sequence/sequence.go b/weed/sequence/sequence.go index 49de63b73..2258d001b 100644 --- a/weed/sequence/sequence.go +++ b/weed/sequence/sequence.go @@ -3,4 +3,5 @@ package sequence type Sequencer interface { NextFileId(count uint64) uint64 SetMax(uint64) + Peek() uint64 } diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go index 05694f681..381933b3a 100644 --- a/weed/sequence/snowflake_sequencer.go +++ b/weed/sequence/snowflake_sequencer.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "github.com/bwmarrin/snowflake" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/glog" ) // a simple snowflake Sequencer @@ -42,3 +42,8 @@ func (m *SnowflakeSequencer) NextFileId(count uint64) uint64 { // ignore setmax as we are snowflake func (m *SnowflakeSequencer) SetMax(seenValue uint64) { } + +// return a new id as no Peek is stored +func (m *SnowflakeSequencer) Peek() uint64 { + return uint64(m.node.Generate().Int64()) +} diff --git a/weed/sequence/snowflake_sequencer_test.go b/weed/sequence/snowflake_sequencer_test.go index 7e8c1d046..731e330c5 100644 --- a/weed/sequence/snowflake_sequencer_test.go +++ b/weed/sequence/snowflake_sequencer_test.go @@ -2,7 +2,7 @@ package sequence import ( "encoding/hex" - "github.com/seaweedfs/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/stretchr/testify/assert" "testing" ) diff --git a/weed/server/common.go b/weed/server/common.go index 49dd78ce0..f02ec67ac 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -3,10 +3,10 @@ package weed_server import ( "bufio" "bytes" - "context" "encoding/json" "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "io" "io/fs" "mime/multipart" @@ -15,32 +15,21 @@ import ( "path/filepath" "strconv" "strings" - "sync" "time" - "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/util/request_id" - "github.com/seaweedfs/seaweedfs/weed/util/version" - "google.golang.org/grpc/metadata" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) var serverStats *stats.ServerStats var startTime = time.Now() -var writePool = sync.Pool{New: func() interface{} { - return bufio.NewWriterSize(nil, 128*1024) -}, -} func init() { serverStats = stats.NewServerStats() @@ -114,14 +103,13 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON status %s %d: %v", r.URL, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { m := make(map[string]interface{}) m["error"] = err.Error() - glog.V(1).Infof("error JSON response status %d: %s", httpStatus, m["error"]) writeJsonQuiet(w, r, httpStatus, m) } @@ -130,9 +118,8 @@ func debug(params ...interface{}) { } func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) { - ctx := r.Context() m := make(map[string]interface{}) - if r.Method != http.MethodPost { + if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) return } @@ -165,7 +152,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn ope Ttl: r.FormValue("ttl"), DiskType: r.FormValue("disk"), } - assignResult, ae := operation.Assign(ctx, masterFn, grpcDialOption, ar) + assignResult, ae := operation.Assign(masterFn, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return @@ -186,12 +173,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn ope PairMap: pu.PairMap, Jwt: assignResult.Auth, } - uploader, err := operation.NewUploader() - if err != nil { - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - uploadResult, err := uploader.UploadData(ctx, pu.Data, uploadOption) + uploadResult, err := operation.UploadData(pu.Data, uploadOption) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return @@ -241,19 +223,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b func statsHealthHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = version.Version() + m["Version"] = util.Version() writeJsonQuiet(w, r, http.StatusOK, m) } func statsCounterHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = version.Version() + m["Version"] = util.Version() m["Counters"] = serverStats writeJsonQuiet(w, r, http.StatusOK, m) } func statsMemoryHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = version.Version() + m["Version"] = util.Version() m["Memory"] = stats.MemStat() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -270,13 +252,10 @@ func handleStaticResources2(r *mux.Router) { r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(http.FS(StaticFS)))) } -func AdjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) { - // Apply S3 passthrough headers from query parameters - // AWS S3 supports overriding response headers via query parameters like: - // ?response-cache-control=no-cache&response-content-type=application/json - for queryParam, headerValue := range r.URL.Query() { - if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(queryParam)]; ok && len(headerValue) > 0 { - w.Header().Set(normalizedHeader, headerValue[0]) +func adjustPassthroughHeaders(w http.ResponseWriter, r *http.Request, filename string) { + for header, values := range r.Header { + if normalizedHeader, ok := s3_constants.PassThroughHeaders[strings.ToLower(header)]; ok { + w.Header()[normalizedHeader] = values } } adjustHeaderContentDisposition(w, r, filename) @@ -297,50 +276,38 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file } } -func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, prepareWriteFn func(offset int64, size int64) (filer.DoStreamContent, error)) error { +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { rangeReq := r.Header.Get("Range") - bufferedWriter := writePool.Get().(*bufio.Writer) - bufferedWriter.Reset(w) - defer func() { - bufferedWriter.Flush() - writePool.Put(bufferedWriter) - }() + bufferedWriter := bufio.NewWriterSize(w, 128*1024) + defer bufferedWriter.Flush() if rangeReq == "" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - writeFn, err := prepareWriteFn(0, totalSize) - if err != nil { - glog.Errorf("ProcessRangeRequest: %v", err) - w.Header().Del("Content-Length") + if err := writeFn(bufferedWriter, 0, totalSize); err != nil { + glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err) http.Error(w, err.Error(), http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest: %w", err) + return } - if err = writeFn(bufferedWriter); err != nil { - glog.Errorf("ProcessRangeRequest: %v", err) - w.Header().Del("Content-Length") - http.Error(w, err.Error(), http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest: %w", err) - } - return nil + return } //the rest is dealing with partial content request //mostly copy from src/pkg/net/http/fs.go ranges, err := parseRange(rangeReq, totalSize) if err != nil { - glog.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err) + glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err) http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return fmt.Errorf("ProcessRangeRequest header: %w", err) + return } if sumRangesSize(ranges) > totalSize { // The total number of bytes in all the ranges // is larger than the size of the file by // itself, so this is probably an attack, or a // dumb client. Ignore the range request. - return nil + return } if len(ranges) == 0 { - return nil + return } if len(ranges) == 1 { // RFC 2616, Section 14.16: @@ -358,39 +325,22 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) w.Header().Set("Content-Range", ra.contentRange(totalSize)) - writeFn, err := prepareWriteFn(ra.start, ra.length) - if err != nil { - glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) - w.Header().Del("Content-Length") - http.Error(w, err.Error(), http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest: %w", err) - } w.WriteHeader(http.StatusPartialContent) - err = writeFn(bufferedWriter) + err = writeFn(bufferedWriter, ra.start, ra.length) if err != nil { - glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) - w.Header().Del("Content-Length") + glog.Errorf("processRangeRequest headers: %+v err: %v", w.Header(), err) http.Error(w, err.Error(), http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest range[0]: %w", err) + return } - return nil + return } // process multiple ranges - writeFnByRange := make(map[int](func(writer io.Writer) error)) - - for i, ra := range ranges { + for _, ra := range ranges { if ra.start > totalSize { http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return fmt.Errorf("out of range: %w", err) + return } - writeFn, err := prepareWriteFn(ra.start, ra.length) - if err != nil { - glog.Errorf("ProcessRangeRequest range[%d] err: %v", i, err) - http.Error(w, "Internal Error", http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest range[%d] err: %v", i, err) - } - writeFnByRange[i] = writeFn } sendSize := rangesMIMESize(ranges, mimeType, totalSize) pr, pw := io.Pipe() @@ -399,18 +349,13 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 sendContent := pr defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. go func() { - for i, ra := range ranges { + for _, ra := range ranges { part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) if e != nil { pw.CloseWithError(e) return } - writeFn := writeFnByRange[i] - if writeFn == nil { - pw.CloseWithError(e) - return - } - if e = writeFn(part); e != nil { + if e = writeFn(part, ra.start, ra.length); e != nil { pw.CloseWithError(e) return } @@ -423,27 +368,8 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 } w.WriteHeader(http.StatusPartialContent) if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil { - glog.Errorf("ProcessRangeRequest err: %v", err) + glog.Errorf("processRangeRequest err: %v", err) http.Error(w, "Internal Error", http.StatusInternalServerError) - return fmt.Errorf("ProcessRangeRequest err: %w", err) - } - return nil -} - -func requestIDMiddleware(h http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - reqID := r.Header.Get(request_id.AmzRequestIDHeader) - if reqID == "" { - reqID = uuid.New().String() - } - - ctx := context.WithValue(r.Context(), request_id.AmzRequestIDHeader, reqID) - ctx = metadata.NewOutgoingContext(ctx, - metadata.New(map[string]string{ - request_id.AmzRequestIDHeader: reqID, - })) - - w.Header().Set(request_id.AmzRequestIDHeader, reqID) - h(w, r.WithContext(ctx)) + return } } diff --git a/weed/server/constants/volume.go b/weed/server/constants/volume.go deleted file mode 100644 index 77c7b7b47..000000000 --- a/weed/server/constants/volume.go +++ /dev/null @@ -1,5 +0,0 @@ -package constants - -const ( - VolumePulseSeconds = 5 -) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a18c55bb1..17d17c588 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -8,27 +8,25 @@ import ( "strconv" "time" - "github.com/seaweedfs/seaweedfs/weed/cluster" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - glog.V(4).InfofCtx(ctx, "LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) + glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name)) if err == filer_pb.ErrNotFound { return &filer_pb.LookupDirectoryEntryResponse{}, err } if err != nil { - glog.V(3).InfofCtx(ctx, "LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } @@ -97,7 +95,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol for _, vidString := range req.VolumeIds { vid, err := strconv.Atoi(vidString) if err != nil { - glog.V(1).InfofCtx(ctx, "Unknown volume id %d", vid) + glog.V(1).Infof("Unknown volume id %d", vid) return nil, err } var locs []*filer_pb.Location @@ -107,10 +105,9 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol } for _, loc := range locations { locs = append(locs, &filer_pb.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - GrpcPort: uint32(loc.GrpcPort), - DataCenter: loc.DataCenter, + Url: loc.Url, + PublicUrl: loc.PublicUrl, + GrpcPort: uint32(loc.GrpcPort), }) } resp.LocationsMap[vidString] = &filer_pb.Locations{ @@ -121,7 +118,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol return resp, nil } -func (fs *FilerServer) lookupFileId(ctx context.Context, fileId string) (targetUrls []string, err error) { +func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) { fid, err := needle.ParseFileIdFromString(fileId) if err != nil { return nil, err @@ -138,16 +135,16 @@ func (fs *FilerServer) lookupFileId(ctx context.Context, fileId string) (targetU func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - glog.V(4).InfofCtx(ctx, "CreateEntry %v/%v", req.Directory, req.Entry.Name) + glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name) resp = &filer_pb.CreateEntryResponse{} - chunks, garbage, err2 := fs.cleanupChunks(ctx, util.Join(req.Directory, req.Entry.Name), nil, req.Entry) + chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry) if err2 != nil { return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) } - so, err := fs.detectStorageOption(ctx, string(util.NewFullPath(req.Directory, req.Entry.Name)), "", "", 0, "", "", "", "") + so, err := fs.detectStorageOption(string(util.NewFullPath(req.Directory, req.Entry.Name)), "", "", 0, "", "", "", "") if err != nil { return nil, err } @@ -155,12 +152,12 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr newEntry.Chunks = chunks newEntry.TtlSec = so.TtlSeconds - createErr := fs.filer.CreateEntry(ctx, newEntry, req.OExcl, req.IsFromOtherCluster, req.Signatures, req.SkipCheckParentDirectory, so.MaxFileNameLength) + createErr := fs.filer.CreateEntry(ctx, newEntry, req.OExcl, req.IsFromOtherCluster, req.Signatures, req.SkipCheckParentDirectory) if createErr == nil { - fs.filer.DeleteChunksNotRecursive(garbage) + fs.filer.DeleteChunks(garbage) } else { - glog.V(3).InfofCtx(ctx, "CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) resp.Error = createErr.Error() } @@ -169,7 +166,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - glog.V(4).InfofCtx(ctx, "UpdateEntry %v", req) + glog.V(4).Infof("UpdateEntry %v", req) fullpath := util.Join(req.Directory, req.Entry.Name) entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) @@ -177,7 +174,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } - chunks, garbage, err2 := fs.cleanupChunks(ctx, fullpath, entry, req.Entry) + chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry) if err2 != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) } @@ -190,35 +187,35 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { - fs.filer.DeleteChunksNotRecursive(garbage) + fs.filer.DeleteChunks(garbage) fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures) } else { - glog.V(3).InfofCtx(ctx, "UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } return &filer_pb.UpdateEntryResponse{}, err } -func (fs *FilerServer) cleanupChunks(ctx context.Context, fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { +func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { // remove old chunks if not included in the new ones if existingEntry != nil { - garbage, err = filer.MinusChunks(ctx, fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks()) + garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks) if err != nil { - return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %w", err) + return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err) } } // files with manifest chunks are usually large and append only, skip calculating covered chunks - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.GetChunks()) + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks) - chunks, coveredChunks := filer.CompactFileChunks(ctx, fs.lookupFileId, nonManifestChunks) + chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks) garbage = append(garbage, coveredChunks...) if newEntry.Attributes != nil { - so, _ := fs.detectStorageOption(ctx, fullpath, + so, _ := fs.detectStorageOption(fullpath, "", "", newEntry.Attributes.TtlSec, @@ -227,27 +224,23 @@ func (fs *FilerServer) cleanupChunks(ctx context.Context, fullpath string, exist "", "", ) // ignore readonly error for capacity needed to manifestize - chunks, err = filer.MaybeManifestize(fs.saveAsChunk(ctx, so), chunks) + chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks) if err != nil { // not good, but should be ok - glog.V(0).InfofCtx(ctx, "MaybeManifestize: %v", err) + glog.V(0).Infof("MaybeManifestize: %v", err) } } - chunks = append(manifestChunks, chunks...) + chunks = append(chunks, manifestChunks...) return } func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) { - glog.V(4).InfofCtx(ctx, "AppendToEntry %v", req) + glog.V(4).Infof("AppendToEntry %v", req) + fullpath := util.NewFullPath(req.Directory, req.EntryName) - - lockClient := cluster.NewLockClient(fs.grpcDialOption, fs.option.Host) - lock := lockClient.NewShortLivedLock(string(fullpath), string(fs.option.Host)) - defer lock.StopShortLivedLock() - var offset int64 = 0 entry, err := fs.filer.FindEntry(ctx, fullpath) if err == filer_pb.ErrNotFound { @@ -262,7 +255,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo }, } } else { - offset = int64(filer.TotalSize(entry.GetChunks())) + offset = int64(filer.TotalSize(entry.Chunks)) } for _, chunk := range req.Chunks { @@ -270,28 +263,28 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo offset += int64(chunk.Size) } - entry.Chunks = append(entry.GetChunks(), req.Chunks...) - so, err := fs.detectStorageOption(ctx, string(fullpath), "", "", entry.TtlSec, "", "", "", "") + entry.Chunks = append(entry.Chunks, req.Chunks...) + so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "") if err != nil { - glog.WarningfCtx(ctx, "detectStorageOption: %v", err) + glog.Warningf("detectStorageOption: %v", err) return &filer_pb.AppendToEntryResponse{}, err } - entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(ctx, so), entry.GetChunks()) + entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks) if err != nil { // not good, but should be ok - glog.V(0).InfofCtx(ctx, "MaybeManifestize: %v", err) + glog.V(0).Infof("MaybeManifestize: %v", err) } - err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil, false, fs.filer.MaxFilenameLength) + err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil, false) return &filer_pb.AppendToEntryResponse{}, err } func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - glog.V(4).InfofCtx(ctx, "DeleteEntry %v", req) + glog.V(4).Infof("DeleteEntry %v", req) - err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures, req.IfNotModifiedAfter) + err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures) resp = &filer_pb.DeleteEntryResponse{} if err != nil && err != filer_pb.ErrNotFound { resp.Error = err.Error() @@ -301,25 +294,21 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { - if req.DiskType == "" { - req.DiskType = fs.option.DiskType - } - - so, err := fs.detectStorageOption(ctx, req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack, req.DataNode) + so, err := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack, req.DataNode) if err != nil { - glog.V(3).InfofCtx(ctx, "AssignVolume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } assignRequest, altRequest := so.ToAssignRequests(int(req.Count)) - assignResult, err := operation.Assign(ctx, fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) + assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) if err != nil { - glog.V(3).InfofCtx(ctx, "AssignVolume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - glog.V(3).InfofCtx(ctx, "AssignVolume error: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } @@ -339,7 +328,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) { - glog.V(4).InfofCtx(ctx, "CollectionList %v", req) + glog.V(4).Infof("CollectionList %v", req) resp = &filer_pb.CollectionListResponse{} err = fs.filer.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { @@ -361,9 +350,14 @@ func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.Collect func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - glog.V(4).InfofCtx(ctx, "DeleteCollection %v", req) + glog.V(4).Infof("DeleteCollection %v", req) - err = fs.filer.DoDeleteCollection(req.GetCollection()) + err = fs.filer.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: req.GetCollection(), + }) + return err + }) return &filer_pb.DeleteCollectionResponse{}, err } diff --git a/weed/server/filer_grpc_server_admin.go b/weed/server/filer_grpc_server_admin.go index 92a820f35..df5b8fa1e 100644 --- a/weed/server/filer_grpc_server_admin.go +++ b/weed/server/filer_grpc_server_admin.go @@ -3,15 +3,14 @@ package weed_server import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/chrislusf/seaweedfs/weed/cluster" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/util" "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { @@ -49,7 +48,7 @@ func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (res StartTimeNs: time.Now().UnixNano(), } if req.TargetType == cluster.FilerType { - pingErr = pb.WithFilerClient(false, 0, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + pingErr = pb.WithFilerClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { pingResp, err := client.Ping(ctx, &filer_pb.PingRequest{}) if pingResp != nil { resp.RemoteTimeNs = pingResp.StartTimeNs @@ -67,7 +66,7 @@ func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (res }) } if req.TargetType == cluster.MasterType { - pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, false, func(client master_pb.SeaweedClient) error { + pingErr = pb.WithMasterClient(false, pb.ServerAddress(req.Target), fs.grpcDialOption, func(client master_pb.SeaweedClient) error { pingResp, err := client.Ping(ctx, &master_pb.PingRequest{}) if pingResp != nil { resp.RemoteTimeNs = pingResp.StartTimeNs @@ -84,8 +83,10 @@ func (fs *FilerServer) Ping(ctx context.Context, req *filer_pb.PingRequest) (res func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { + clusterId, _ := fs.filer.Store.KvGet(context.Background(), []byte("clusterId")) + t := &filer_pb.GetFilerConfigurationResponse{ - Masters: fs.option.Masters.GetInstancesAsStrings(), + Masters: pb.ToAddressStringsFromMap(fs.option.Masters), Collection: fs.option.Collection, Replication: fs.option.DefaultReplication, MaxMb: uint32(fs.option.MaxMB), @@ -94,13 +95,84 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. Signature: fs.filer.Signature, MetricsAddress: fs.metricsAddress, MetricsIntervalSec: int32(fs.metricsIntervalSec), - Version: version.Version(), + Version: util.Version(), + ClusterId: string(clusterId), FilerGroup: fs.option.FilerGroup, - MajorVersion: version.MAJOR_VERSION, - MinorVersion: version.MINOR_VERSION, } - glog.V(4).InfofCtx(ctx, "GetFilerConfiguration: %v", t) + glog.V(4).Infof("GetFilerConfiguration: %v", t) return t, nil } + +func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { + + req, err := stream.Recv() + if err != nil { + return err + } + + clientName := util.JoinHostPort(req.Name, int(req.GrpcPort)) + m := make(map[string]bool) + for _, tp := range req.Resources { + m[tp] = true + } + fs.brokersLock.Lock() + fs.brokers[clientName] = m + glog.V(0).Infof("+ broker %v", clientName) + fs.brokersLock.Unlock() + + defer func() { + fs.brokersLock.Lock() + delete(fs.brokers, clientName) + glog.V(0).Infof("- broker %v: %v", clientName, err) + fs.brokersLock.Unlock() + }() + + for { + if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { + glog.V(0).Infof("send broker %v: %+v", clientName, err) + return err + } + // println("replied") + + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("recv broker %v: %v", clientName, err) + return err + } + // println("received") + } + +} + +func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { + + resp = &filer_pb.LocateBrokerResponse{} + + fs.brokersLock.Lock() + defer fs.brokersLock.Unlock() + + var localBrokers []*filer_pb.LocateBrokerResponse_Resource + + for b, m := range fs.brokers { + if _, found := m[req.Resource]; found { + resp.Found = true + resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ + { + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }, + } + return + } + localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }) + } + + resp.Resources = localBrokers + + return resp, nil + +} diff --git a/weed/server/filer_grpc_server_dlm.go b/weed/server/filer_grpc_server_dlm.go deleted file mode 100644 index 7e8f93102..000000000 --- a/weed/server/filer_grpc_server_dlm.go +++ /dev/null @@ -1,170 +0,0 @@ -package weed_server - -import ( - "context" - "fmt" - "time" - - "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// DistributedLock is a grpc handler to handle FilerServer's LockRequest -func (fs *FilerServer) DistributedLock(ctx context.Context, req *filer_pb.LockRequest) (resp *filer_pb.LockResponse, err error) { - - glog.V(4).Infof("FILER LOCK: Received DistributedLock request - name=%s owner=%s renewToken=%s secondsToLock=%d isMoved=%v", - req.Name, req.Owner, req.RenewToken, req.SecondsToLock, req.IsMoved) - - resp = &filer_pb.LockResponse{} - - var movedTo pb.ServerAddress - expiredAtNs := time.Now().Add(time.Duration(req.SecondsToLock) * time.Second).UnixNano() - resp.LockOwner, resp.RenewToken, movedTo, err = fs.filer.Dlm.LockWithTimeout(req.Name, expiredAtNs, req.RenewToken, req.Owner) - glog.V(4).Infof("FILER LOCK: LockWithTimeout result - name=%s lockOwner=%s renewToken=%s movedTo=%s err=%v", - req.Name, resp.LockOwner, resp.RenewToken, movedTo, err) - glog.V(4).Infof("lock %s %v %v %v, isMoved=%v %v", req.Name, req.SecondsToLock, req.RenewToken, req.Owner, req.IsMoved, movedTo) - if movedTo != "" && movedTo != fs.option.Host && !req.IsMoved { - glog.V(0).Infof("FILER LOCK: Forwarding to correct filer - from=%s to=%s", fs.option.Host, movedTo) - err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - secondResp, err := client.DistributedLock(ctx, &filer_pb.LockRequest{ - Name: req.Name, - SecondsToLock: req.SecondsToLock, - RenewToken: req.RenewToken, - IsMoved: true, - Owner: req.Owner, - }) - if err == nil { - resp.RenewToken = secondResp.RenewToken - resp.LockOwner = secondResp.LockOwner - resp.Error = secondResp.Error - glog.V(0).Infof("FILER LOCK: Forwarded lock acquired - name=%s renewToken=%s", req.Name, resp.RenewToken) - } else { - glog.V(0).Infof("FILER LOCK: Forward failed - name=%s err=%v", req.Name, err) - } - return err - }) - } - - if err != nil { - resp.Error = fmt.Sprintf("%v", err) - glog.V(0).Infof("FILER LOCK: Error - name=%s error=%s", req.Name, resp.Error) - } - if movedTo != "" { - resp.LockHostMovedTo = string(movedTo) - } - - glog.V(4).Infof("FILER LOCK: Returning response - name=%s renewToken=%s lockOwner=%s error=%s movedTo=%s", - req.Name, resp.RenewToken, resp.LockOwner, resp.Error, resp.LockHostMovedTo) - - return resp, nil -} - -// Unlock is a grpc handler to handle FilerServer's UnlockRequest -func (fs *FilerServer) DistributedUnlock(ctx context.Context, req *filer_pb.UnlockRequest) (resp *filer_pb.UnlockResponse, err error) { - - resp = &filer_pb.UnlockResponse{} - - var movedTo pb.ServerAddress - movedTo, err = fs.filer.Dlm.Unlock(req.Name, req.RenewToken) - - if !req.IsMoved && movedTo != "" { - err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - secondResp, err := client.DistributedUnlock(ctx, &filer_pb.UnlockRequest{ - Name: req.Name, - RenewToken: req.RenewToken, - IsMoved: true, - }) - resp.Error = secondResp.Error - return err - }) - } - - if err != nil { - resp.Error = fmt.Sprintf("%v", err) - } - if movedTo != "" { - resp.MovedTo = string(movedTo) - } - - return resp, nil - -} - -func (fs *FilerServer) FindLockOwner(ctx context.Context, req *filer_pb.FindLockOwnerRequest) (*filer_pb.FindLockOwnerResponse, error) { - owner, movedTo, err := fs.filer.Dlm.FindLockOwner(req.Name) - if !req.IsMoved && movedTo != "" || err == lock_manager.LockNotFound { - err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - secondResp, err := client.FindLockOwner(ctx, &filer_pb.FindLockOwnerRequest{ - Name: req.Name, - IsMoved: true, - }) - if err != nil { - return err - } - owner = secondResp.Owner - return nil - }) - if err != nil { - return nil, err - } - } - - if owner == "" { - glog.V(0).Infof("find lock %s moved to %v: %v", req.Name, movedTo, err) - return nil, status.Error(codes.NotFound, fmt.Sprintf("lock %s not found", req.Name)) - } - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &filer_pb.FindLockOwnerResponse{ - Owner: owner, - }, nil -} - -// TransferLocks is a grpc handler to handle FilerServer's TransferLocksRequest -func (fs *FilerServer) TransferLocks(ctx context.Context, req *filer_pb.TransferLocksRequest) (*filer_pb.TransferLocksResponse, error) { - - for _, lock := range req.Locks { - fs.filer.Dlm.InsertLock(lock.Name, lock.ExpiredAtNs, lock.RenewToken, lock.Owner) - } - - return &filer_pb.TransferLocksResponse{}, nil - -} - -func (fs *FilerServer) OnDlmChangeSnapshot(snapshot []pb.ServerAddress) { - locks := fs.filer.Dlm.SelectNotOwnedLocks(snapshot) - if len(locks) == 0 { - return - } - - for _, lock := range locks { - server := fs.filer.Dlm.CalculateTargetServer(lock.Key, snapshot) - // Use a context with timeout for lock transfer to avoid hanging indefinitely - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - err := pb.WithFilerClient(false, 0, server, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - _, err := client.TransferLocks(ctx, &filer_pb.TransferLocksRequest{ - Locks: []*filer_pb.Lock{ - { - Name: lock.Key, - RenewToken: lock.Token, - ExpiredAtNs: lock.ExpiredAtNs, - Owner: lock.Owner, - }, - }, - }) - return err - }) - cancel() - if err != nil { - // it may not be worth retrying, since the lock may have expired - glog.Errorf("transfer lock %v to %v: %v", lock.Key, server, err) - } - } - -} diff --git a/weed/server/filer_grpc_server_kv.go b/weed/server/filer_grpc_server_kv.go index be90b2014..3cb47115e 100644 --- a/weed/server/filer_grpc_server_kv.go +++ b/weed/server/filer_grpc_server_kv.go @@ -2,8 +2,8 @@ package weed_server import ( "context" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (fs *FilerServer) KvGet(ctx context.Context, req *filer_pb.KvGetRequest) (*filer_pb.KvGetResponse, error) { @@ -30,7 +30,6 @@ func (fs *FilerServer) KvPut(ctx context.Context, req *filer_pb.KvPutRequest) (* if err := fs.filer.Store.KvDelete(ctx, req.Key); err != nil { return &filer_pb.KvPutResponse{Error: err.Error()}, nil } - return &filer_pb.KvPutResponse{}, nil } err := fs.filer.Store.KvPut(ctx, req.Key, req.Value) diff --git a/weed/server/filer_grpc_server_remote.go b/weed/server/filer_grpc_server_remote.go index 081d49ba0..3be986023 100644 --- a/weed/server/filer_grpc_server_remote.go +++ b/weed/server/filer_grpc_server_remote.go @@ -7,15 +7,15 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" - "google.golang.org/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/remote_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req *filer_pb.CacheRemoteObjectToLocalClusterRequest) (*filer_pb.CacheRemoteObjectToLocalClusterResponse, error) { @@ -64,7 +64,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req } // detect storage option - so, err := fs.detectStorageOption(ctx, req.Directory, "", "", 0, "", "", "", "") + so, err := fs.detectStorageOption(req.Directory, "", "", 0, "", "", "", "") if err != nil { return resp, err } @@ -97,7 +97,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req } // assign one volume server - assignResult, err := operation.Assign(ctx, fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) + assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) if err != nil { fetchAndWriteErr = err return @@ -123,9 +123,8 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req // tell filer to tell volume server to download into needles assignedServerAddress := pb.NewServerAddressWithGrpcPort(assignResult.Url, assignResult.GrpcPort) - var etag string err = operation.WithVolumeServerClient(false, assignedServerAddress, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{ + _, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{ VolumeId: uint32(fileId.VolumeId), NeedleId: uint64(fileId.Key), Cookie: uint32(fileId.Cookie), @@ -142,8 +141,6 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req }) if fetchAndWriteErr != nil { return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr) - } else { - etag = resp.ETag } return nil }) @@ -154,12 +151,10 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req } chunks = append(chunks, &filer_pb.FileChunk{ - - FileId: assignResult.Fid, - Offset: localOffset, - Size: uint64(size), - ModifiedTsNs: time.Now().UnixNano(), - ETag: etag, + FileId: assignResult.Fid, + Offset: localOffset, + Size: uint64(size), + Mtime: time.Now().Unix(), Fid: &filer_pb.FileId{ VolumeId: uint32(fileId.VolumeId), FileKey: uint64(fileId.Key), @@ -174,7 +169,7 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req return nil, fetchAndWriteErr } - garbage := entry.GetChunks() + garbage := entry.Chunks newEntry := entry.ShallowClone() newEntry.Chunks = chunks @@ -184,10 +179,10 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req // this skips meta data log events if err := fs.filer.Store.UpdateEntry(context.Background(), newEntry); err != nil { - fs.filer.DeleteUncommittedChunks(ctx, chunks) + fs.filer.DeleteChunks(chunks) return nil, err } - fs.filer.DeleteChunks(ctx, entry.FullPath, garbage) + fs.filer.DeleteChunks(garbage) fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, false, nil) diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index db00dd496..7d6650b53 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -6,10 +6,10 @@ import ( "path/filepath" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { @@ -19,7 +19,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) - if err := fs.filer.CanRename(oldParent, newParent, req.OldName); err != nil { + if err := fs.filer.CanRename(oldParent, newParent); err != nil { return nil, err } @@ -55,7 +55,7 @@ func (fs *FilerServer) StreamRenameEntry(req *filer_pb.StreamRenameEntryRequest, oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) - if err := fs.filer.CanRename(oldParent, newParent, req.OldName); err != nil { + if err := fs.filer.CanRename(oldParent, newParent); err != nil { return err } @@ -165,7 +165,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee newEntry := &filer.Entry{ FullPath: newPath, Attr: entry.Attr, - Chunks: entry.GetChunks(), + Chunks: entry.Chunks, Extended: entry.Extended, Content: entry.Content, HardLinkCounter: entry.HardLinkCounter, @@ -173,7 +173,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee Remote: entry.Remote, Quota: entry.Quota, } - if createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, signatures, false, fs.filer.MaxFilenameLength); createErr != nil { + if createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, signatures, false); createErr != nil { return createErr } if stream != nil { @@ -202,8 +202,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee } // delete old entry - ctx = context.WithValue(ctx, "OP", "MV") - deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures, 0) + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, signatures) if deleteErr != nil { return deleteErr } diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go index f4df550e6..82261ca51 100644 --- a/weed/server/filer_grpc_server_sub_meta.go +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -1,21 +1,18 @@ package weed_server import ( - "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/stats" "strings" - "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/proto" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" ) const ( @@ -25,23 +22,15 @@ const ( func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error { - ctx := stream.Context() - peerAddress := findClientAddress(ctx, 0) + peerAddress := findClientAddress(stream.Context(), 0) - isReplacing, alreadyKnown, clientName := fs.addClient("", req.ClientName, peerAddress, req.ClientId, req.ClientEpoch) - if isReplacing { - fs.filer.MetaAggregator.ListenersCond.Broadcast() // nudges the subscribers that are waiting - } else if alreadyKnown { - fs.filer.MetaAggregator.ListenersCond.Broadcast() // nudges the subscribers that are waiting + alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId) + if alreadyKnown { return fmt.Errorf("duplicated subscription detected for client %s id %d", clientName, req.ClientId) } - defer func() { - glog.V(0).Infof("disconnect %v subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) - fs.deleteClient("", clientName, req.ClientId, req.ClientEpoch) - fs.filer.MetaAggregator.ListenersCond.Broadcast() // nudges the subscribers that are waiting - }() + defer fs.deleteClient(clientName, req.ClientId) - lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2) + lastReadTime := time.Unix(0, req.SinceNs) glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName) @@ -59,58 +48,36 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn) if readPersistedLogErr != nil { - return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr) + return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr) } if isDone { return nil } - glog.V(4).Infof("processed to %v: %v", clientName, processedTsNs) if processedTsNs != 0 { - lastReadTime = log_buffer.NewMessagePosition(processedTsNs, -2) - } else { - nextDayTs := util.GetNextDayTsNano(lastReadTime.Time.UnixNano()) - position := log_buffer.NewMessagePosition(nextDayTs, -2) - found, err := fs.filer.HasPersistedLogFiles(position) - if err != nil { - return fmt.Errorf("checking persisted log files: %w", err) - } - if found { - lastReadTime = position - } + lastReadTime = time.Unix(0, processedTsNs) } glog.V(4).Infof("read in memory %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) lastReadTime, isDone, readInMemoryLogErr = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData("aggMeta:"+clientName, lastReadTime, req.UntilNs, func() bool { - // Check if the client has disconnected by monitoring the context - select { - case <-ctx.Done(): - return false - default: - } - fs.filer.MetaAggregator.ListenersLock.Lock() fs.filer.MetaAggregator.ListenersCond.Wait() fs.filer.MetaAggregator.ListenersLock.Unlock() - return fs.hasClient(req.ClientId, req.ClientEpoch) + return true }, eachLogEntryFn) if readInMemoryLogErr != nil { - if errors.Is(readInMemoryLogErr, log_buffer.ResumeFromDiskError) { + if readInMemoryLogErr == log_buffer.ResumeFromDiskError { continue } glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr) - if !errors.Is(readInMemoryLogErr, log_buffer.ResumeError) { + if readInMemoryLogErr != log_buffer.ResumeError { break } } if isDone { return nil } - if !fs.hasClient(req.ClientId, req.ClientEpoch) { - glog.V(0).Infof("client %v is closed", clientName) - return nil - } time.Sleep(1127 * time.Millisecond) } @@ -121,25 +88,21 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error { - ctx := stream.Context() - peerAddress := findClientAddress(ctx, 0) + peerAddress := findClientAddress(stream.Context(), 0) // use negative client id to differentiate from addClient()/deleteClient() used in SubscribeMetadata() req.ClientId = -req.ClientId - isReplacing, alreadyKnown, clientName := fs.addClient("local", req.ClientName, peerAddress, req.ClientId, req.ClientEpoch) - if isReplacing { - fs.listenersCond.Broadcast() // nudges the subscribers that are waiting - } else if alreadyKnown { + alreadyKnown, clientName := fs.addClient(req.ClientName, peerAddress, req.ClientId) + if alreadyKnown { return fmt.Errorf("duplicated local subscription detected for client %s clientId:%d", clientName, req.ClientId) } defer func() { - glog.V(0).Infof("disconnect %v local subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) - fs.deleteClient("local", clientName, req.ClientId, req.ClientEpoch) - fs.listenersCond.Broadcast() // nudges the subscribers that are waiting + glog.V(0).Infof(" - %v local subscribe %s clientId:%d", clientName, req.PathPrefix, req.ClientId) + fs.deleteClient(clientName, req.ClientId) }() - lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2) + lastReadTime := time.Unix(0, req.SinceNs) glog.V(0).Infof(" + %v local subscribe %s from %+v clientId:%d", clientName, req.PathPrefix, lastReadTime, req.ClientId) eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName) @@ -157,50 +120,27 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn) if readPersistedLogErr != nil { glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr) - return fmt.Errorf("reading from persisted logs: %w", readPersistedLogErr) + return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr) } if isDone { return nil } if processedTsNs != 0 { - lastReadTime = log_buffer.NewMessagePosition(processedTsNs, -2) + lastReadTime = time.Unix(0, processedTsNs) } else { if readInMemoryLogErr == log_buffer.ResumeFromDiskError { time.Sleep(1127 * time.Millisecond) continue } - // If no persisted entries were read for this day, check the next day for logs - nextDayTs := util.GetNextDayTsNano(lastReadTime.Time.UnixNano()) - position := log_buffer.NewMessagePosition(nextDayTs, -2) - found, err := fs.filer.HasPersistedLogFiles(position) - if err != nil { - return fmt.Errorf("checking persisted log files: %w", err) - } - if found { - lastReadTime = position - } } glog.V(0).Infof("read in memory %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) lastReadTime, isDone, readInMemoryLogErr = fs.filer.LocalMetaLogBuffer.LoopProcessLogData("localMeta:"+clientName, lastReadTime, req.UntilNs, func() bool { - - // Check if the client has disconnected by monitoring the context - select { - case <-ctx.Done(): - return false - default: - } - fs.listenersLock.Lock() - atomic.AddInt64(&fs.listenersWaits, 1) fs.listenersCond.Wait() - atomic.AddInt64(&fs.listenersWaits, -1) fs.listenersLock.Unlock() - if !fs.hasClient(req.ClientId, req.ClientEpoch) { - return false - } return true }, eachLogEntryFn) if readInMemoryLogErr != nil { @@ -215,28 +155,25 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq if isDone { return nil } - if !fs.hasClient(req.ClientId, req.ClientEpoch) { - return nil - } } return readInMemoryLogErr } -func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) log_buffer.EachLogEntryFuncType { - return func(logEntry *filer_pb.LogEntry) (bool, error) { +func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error { + return func(logEntry *filer_pb.LogEntry) error { event := &filer_pb.SubscribeMetadataResponse{} if err := proto.Unmarshal(logEntry.Data, event); err != nil { glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) - return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %w", err) + return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) } if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil { - return false, err + return err } - return false, nil + return nil } } @@ -286,8 +223,6 @@ func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRe if hasPrefixIn(fullpath, req.PathPrefixes) { // good - } else if matchByDirectory(dirPath, req.Directories) { - // good } else { if !strings.HasPrefix(fullpath, req.PathPrefix) { if eventNotification.NewParentPath != "" { @@ -328,52 +263,25 @@ func hasPrefixIn(text string, prefixes []string) bool { return false } -func matchByDirectory(dirPath string, directories []string) bool { - for _, dir := range directories { - if dirPath == dir { - return true - } - } - return false -} - -func (fs *FilerServer) addClient(prefix string, clientType string, clientAddress string, clientId int32, clientEpoch int32) (isReplacing, alreadyKnown bool, clientName string) { +func (fs *FilerServer) addClient(clientType string, clientAddress string, clientId int32) (alreadyKnown bool, clientName string) { clientName = clientType + "@" + clientAddress - glog.V(0).Infof("+ %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) + glog.V(0).Infof("+ listener %v", clientName) if clientId != 0 { fs.knownListenersLock.Lock() - defer fs.knownListenersLock.Unlock() - epoch, found := fs.knownListeners[clientId] - if !found || epoch < clientEpoch { - fs.knownListeners[clientId] = clientEpoch - isReplacing = true - } else { - alreadyKnown = true + _, alreadyKnown = fs.knownListeners[clientId] + if !alreadyKnown { + fs.knownListeners[clientId] = struct{}{} } + fs.knownListenersLock.Unlock() } return } -func (fs *FilerServer) deleteClient(prefix string, clientName string, clientId int32, clientEpoch int32) { - glog.V(0).Infof("- %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) +func (fs *FilerServer) deleteClient(clientName string, clientId int32) { + glog.V(0).Infof("- listener %v", clientName) if clientId != 0 { fs.knownListenersLock.Lock() - defer fs.knownListenersLock.Unlock() - epoch, found := fs.knownListeners[clientId] - if found && epoch <= clientEpoch { - delete(fs.knownListeners, clientId) - } + delete(fs.knownListeners, clientId) + fs.knownListenersLock.Unlock() } } - -func (fs *FilerServer) hasClient(clientId int32, clientEpoch int32) bool { - if clientId != 0 { - fs.knownListenersLock.Lock() - defer fs.knownListenersLock.Unlock() - epoch, found := fs.knownListeners[clientId] - if found && epoch <= clientEpoch { - return true - } - } - return false -} diff --git a/weed/server/filer_grpc_server_traverse_meta.go b/weed/server/filer_grpc_server_traverse_meta.go deleted file mode 100644 index 841e7b88b..000000000 --- a/weed/server/filer_grpc_server_traverse_meta.go +++ /dev/null @@ -1,84 +0,0 @@ -package weed_server - -import ( - "context" - "fmt" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/viant/ptrie" -) - -func (fs *FilerServer) TraverseBfsMetadata(req *filer_pb.TraverseBfsMetadataRequest, stream filer_pb.SeaweedFiler_TraverseBfsMetadataServer) error { - - glog.V(0).Infof("TraverseBfsMetadata %v", req) - - excludedTrie := ptrie.New[bool]() - for _, excluded := range req.ExcludedPrefixes { - excludedTrie.Put([]byte(excluded), true) - } - - ctx := stream.Context() - - queue := util.NewQueue[*filer.Entry]() - dirEntry, err := fs.filer.FindEntry(ctx, util.FullPath(req.Directory)) - if err != nil { - return fmt.Errorf("find dir %s: %v", req.Directory, err) - } - queue.Enqueue(dirEntry) - - for item := queue.Dequeue(); item != nil; item = queue.Dequeue() { - if excludedTrie.MatchPrefix([]byte(item.FullPath), func(key []byte, value bool) bool { - return true - }) { - // println("excluded", item.FullPath) - continue - } - parent, _ := item.FullPath.DirAndName() - if err := stream.Send(&filer_pb.TraverseBfsMetadataResponse{ - Directory: parent, - Entry: item.ToProtoEntry(), - }); err != nil { - return fmt.Errorf("send traverse bfs metadata response: %w", err) - } - - if !item.IsDirectory() { - continue - } - - if err := fs.iterateDirectory(ctx, item.FullPath, func(entry *filer.Entry) error { - queue.Enqueue(entry) - return nil - }); err != nil { - return err - } - } - - return nil -} - -func (fs *FilerServer) iterateDirectory(ctx context.Context, dirPath util.FullPath, fn func(entry *filer.Entry) error) (err error) { - var lastFileName string - var listErr error - for { - var hasEntries bool - lastFileName, listErr = fs.filer.StreamListDirectoryEntries(ctx, dirPath, lastFileName, false, 1024, "", "", "", func(entry *filer.Entry) bool { - hasEntries = true - if fnErr := fn(entry); fnErr != nil { - err = fnErr - return false - } - return true - }) - if listErr != nil { - return listErr - } - if err != nil { - return err - } - if !hasEntries { - return nil - } - } -} diff --git a/weed/server/filer_grpc_server_traverse_meta_test.go b/weed/server/filer_grpc_server_traverse_meta_test.go deleted file mode 100644 index 72f8a916e..000000000 --- a/weed/server/filer_grpc_server_traverse_meta_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package weed_server - -import ( - "github.com/stretchr/testify/assert" - "github.com/viant/ptrie" - "testing" -) - -func TestPtrie(t *testing.T) { - b := []byte("/topics/abc/dev") - excludedTrie := ptrie.New[bool]() - excludedTrie.Put([]byte("/topics/abc/d"), true) - excludedTrie.Put([]byte("/topics/abc"), true) - - assert.True(t, excludedTrie.MatchPrefix(b, func(key []byte, value bool) bool { - println("matched1", string(key)) - return true - })) - - assert.True(t, excludedTrie.MatchAll(b, func(key []byte, value bool) bool { - println("matched2", string(key)) - return true - })) - - assert.False(t, excludedTrie.MatchAll([]byte("/topics/ab"), func(key []byte, value bool) bool { - println("matched3", string(key)) - return true - })) - - assert.False(t, excludedTrie.Has(b)) -} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index f395f6d60..6bf0261ee 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -5,57 +5,52 @@ import ( "fmt" "net/http" "os" - "strings" "sync" - "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/stats" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/filer" - _ "github.com/seaweedfs/seaweedfs/weed/filer/arangodb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/cassandra" - _ "github.com/seaweedfs/seaweedfs/weed/filer/cassandra2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/elastic/v7" - _ "github.com/seaweedfs/seaweedfs/weed/filer/etcd" - _ "github.com/seaweedfs/seaweedfs/weed/filer/hbase" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/leveldb3" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mongodb" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mysql" - _ "github.com/seaweedfs/seaweedfs/weed/filer/mysql2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/postgres" - _ "github.com/seaweedfs/seaweedfs/weed/filer/postgres2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis2" - _ "github.com/seaweedfs/seaweedfs/weed/filer/redis3" - _ "github.com/seaweedfs/seaweedfs/weed/filer/sqlite" - _ "github.com/seaweedfs/seaweedfs/weed/filer/tarantool" - _ "github.com/seaweedfs/seaweedfs/weed/filer/ydb" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/notification" - _ "github.com/seaweedfs/seaweedfs/weed/notification/aws_sqs" - _ "github.com/seaweedfs/seaweedfs/weed/notification/gocdk_pub_sub" - _ "github.com/seaweedfs/seaweedfs/weed/notification/google_pub_sub" - _ "github.com/seaweedfs/seaweedfs/weed/notification/kafka" - _ "github.com/seaweedfs/seaweedfs/weed/notification/log" - _ "github.com/seaweedfs/seaweedfs/weed/notification/webhook" - "github.com/seaweedfs/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/filer" + _ "github.com/chrislusf/seaweedfs/weed/filer/arangodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis3" + _ "github.com/chrislusf/seaweedfs/weed/filer/sqlite" + _ "github.com/chrislusf/seaweedfs/weed/filer/ydb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" + _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" + _ "github.com/chrislusf/seaweedfs/weed/notification/log" + "github.com/chrislusf/seaweedfs/weed/security" ) type FilerOption struct { - Masters *pb.ServerDiscovery + Masters map[string]pb.ServerAddress FilerGroup string Collection string DefaultReplication string @@ -73,37 +68,33 @@ type FilerOption struct { SaveToFilerLimit int64 ConcurrentUploadLimit int64 ShowUIDirectoryDelete bool - DownloadMaxBytesPs int64 - DiskType string - AllowedOrigins []string - ExposeDirectoryData bool } type FilerServer struct { - inFlightDataSize int64 - listenersWaits int64 - - // notifying clients - listenersLock sync.Mutex - listenersCond *sync.Cond - - inFlightDataLimitCond *sync.Cond - filer_pb.UnimplementedSeaweedFilerServer option *FilerOption secret security.SigningKey filer *filer.Filer filerGuard *security.Guard - volumeGuard *security.Guard grpcDialOption grpc.DialOption // metrics read from the master metricsAddress string metricsIntervalSec int + // notifying clients + listenersLock sync.Mutex + listenersCond *sync.Cond + // track known metadata listeners knownListenersLock sync.Mutex - knownListeners map[int32]int32 + knownListeners map[int32]struct{} + + brokers map[string]map[string]bool + brokersLock sync.Mutex + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { @@ -117,37 +108,31 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) v.SetDefault("jwt.filer_signing.read.expires_after_seconds", 60) readExpiresAfterSec := v.GetInt("jwt.filer_signing.read.expires_after_seconds") - volumeSigningKey := v.GetString("jwt.signing.key") - v.SetDefault("jwt.signing.expires_after_seconds", 10) - volumeExpiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") - - volumeReadSigningKey := v.GetString("jwt.signing.read.key") - v.SetDefault("jwt.signing.read.expires_after_seconds", 60) - volumeReadExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") - - v.SetDefault("cors.allowed_origins.values", "*") - - allowedOrigins := v.GetString("cors.allowed_origins.values") - domains := strings.Split(allowedOrigins, ",") - option.AllowedOrigins = domains - - v.SetDefault("filer.expose_directory_metadata.enabled", true) - returnDirMetadata := v.GetBool("filer.expose_directory_metadata.enabled") - option.ExposeDirectoryData = returnDirMetadata - fs = &FilerServer{ option: option, grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), - knownListeners: make(map[int32]int32), + knownListeners: make(map[int32]struct{}), + brokers: make(map[string]map[string]bool), inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), } fs.listenersCond = sync.NewCond(&fs.listenersLock) - option.Masters.RefreshBySrvIfAvailable() - if len(option.Masters.GetInstances()) == 0 { + if len(option.Masters) == 0 { glog.Fatal("master list is required!") } + fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.FilerGroup, option.Collection, option.DefaultReplication, option.DataCenter, func() { + fs.listenersCond.Broadcast() + }) + fs.filer.Cipher = option.Cipher + // we do not support IP whitelist right now + fs.filerGuard = security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) + + fs.checkWithMaster() + + go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec) + go fs.filer.KeepMasterClientConnected() + if !util.LoadConfiguration("filer", false) { v.SetDefault("leveldb2.enabled", true) v.SetDefault("leveldb2.dir", option.DefaultLevelDbDir) @@ -161,29 +146,11 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) } util.LoadConfiguration("notification", false) - v.SetDefault("filer.options.max_file_name_length", 255) - maxFilenameLength := v.GetUint32("filer.options.max_file_name_length") - glog.V(0).Infof("max_file_name_length %d", maxFilenameLength) - fs.filer = filer.NewFiler(*option.Masters, fs.grpcDialOption, option.Host, option.FilerGroup, option.Collection, option.DefaultReplication, option.DataCenter, maxFilenameLength, func() { - if atomic.LoadInt64(&fs.listenersWaits) > 0 { - fs.listenersCond.Broadcast() - } - }) - fs.filer.Cipher = option.Cipher - whiteList := util.StringSplit(v.GetString("guard.white_list"), ",") - fs.filerGuard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) - fs.volumeGuard = security.NewGuard([]string{}, volumeSigningKey, volumeExpiresAfterSec, volumeReadSigningKey, volumeReadExpiresAfterSec) - - fs.checkWithMaster() - - go stats.LoopPushingMetric("filer", string(fs.option.Host), fs.metricsAddress, fs.metricsIntervalSec) - go fs.filer.KeepMasterClientConnected(context.Background()) - fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") v.SetDefault("filer.options.buckets_folder", "/buckets") fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder") - // TODO deprecated, will be removed after 2020-12-31 - // replaced by https://github.com/seaweedfs/seaweedfs/wiki/Path-Specific-Configuration + // TODO deprecated, will be be removed after 2020-12-31 + // replaced by https://github.com/chrislusf/seaweedfs/wiki/Path-Specific-Configuration // fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync") isFresh := fs.filer.LoadConfiguration(v) @@ -191,21 +158,19 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) handleStaticResources(defaultMux) if !option.DisableHttp { - defaultMux.HandleFunc("/healthz", requestIDMiddleware(fs.filerHealthzHandler)) - defaultMux.HandleFunc("/", fs.filerGuard.WhiteList(requestIDMiddleware(fs.filerHandler))) + defaultMux.HandleFunc("/", fs.filerHandler) } if defaultMux != readonlyMux { handleStaticResources(readonlyMux) - readonlyMux.HandleFunc("/healthz", requestIDMiddleware(fs.filerHealthzHandler)) - readonlyMux.HandleFunc("/", fs.filerGuard.WhiteList(requestIDMiddleware(fs.readonlyFilerHandler))) + readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } - existingNodes := fs.filer.ListExistingPeerUpdates(context.Background()) + existingNodes := fs.filer.ListExistingPeerUpdates() startFromTime := time.Now().Add(-filer.LogFlushInterval) if isFresh { glog.V(0).Infof("%s bootstrap from peers %+v", option.Host, existingNodes) - if err := fs.filer.MaybeBootstrapFromOnePeer(option.Host, existingNodes, startFromTime); err != nil { - glog.Fatalf("%s bootstrap from %+v: %v", option.Host, existingNodes, err) + if err := fs.filer.MaybeBootstrapFromPeers(option.Host, existingNodes, startFromTime); err != nil { + glog.Fatalf("%s bootstrap from %+v", option.Host, existingNodes) } } fs.filer.AggregateFromPeers(option.Host, existingNodes, startFromTime) @@ -214,13 +179,10 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs.filer.LoadRemoteStorageConfAndMapping() - grace.OnReload(fs.Reload) grace.OnInterrupt(func() { fs.filer.Shutdown() }) - fs.filer.Dlm.LockRing.SetTakeSnapshotCallback(fs.OnDlmChangeSnapshot) - return fs, nil } @@ -228,14 +190,16 @@ func (fs *FilerServer) checkWithMaster() { isConnected := false for !isConnected { - fs.option.Masters.RefreshBySrvIfAvailable() - for _, master := range fs.option.Masters.GetInstances() { + for _, master := range fs.option.Masters { readErr := operation.WithMasterServerClient(false, master, fs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) if err != nil { return fmt.Errorf("get master %s configuration: %v", master, err) } fs.metricsAddress, fs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + if fs.option.DefaultReplication == "" { + fs.option.DefaultReplication = resp.DefaultReplication + } return nil }) if readErr == nil { @@ -245,12 +209,5 @@ func (fs *FilerServer) checkWithMaster() { } } } -} -func (fs *FilerServer) Reload() { - glog.V(0).Infoln("Reload filer server...") - - util.LoadConfiguration("security", false) - v := util.GetViper() - fs.filerGuard.UpdateWhiteList(util.StringSplit(v.GetString("guard.white_list"), ",")) } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index dcfc8e3ed..6f0d0b7ca 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -1,58 +1,32 @@ package weed_server import ( - "context" "errors" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" "net/http" - "os" - "strconv" "strings" "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/stats" ) func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() - inFlightGauge := stats.FilerInFlightRequestsGauge.WithLabelValues(r.Method) - inFlightGauge.Inc() - defer inFlightGauge.Dec() - - statusRecorder := stats.NewStatusResponseWriter(w) - w = statusRecorder - origin := r.Header.Get("Origin") - if origin != "" { - if fs.option.AllowedOrigins == nil || len(fs.option.AllowedOrigins) == 0 || fs.option.AllowedOrigins[0] == "*" { - origin = "*" - } else { - originFound := false - for _, allowedOrigin := range fs.option.AllowedOrigins { - if origin == allowedOrigin { - originFound = true - } - } - if !originFound { - writeJsonError(w, r, http.StatusForbidden, errors.New("origin not allowed")) - return - } - } - - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Access-Control-Expose-Headers", "*") - w.Header().Set("Access-Control-Allow-Headers", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") - w.Header().Set("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") + if r.Method == "OPTIONS" { + stats.FilerRequestCounter.WithLabelValues("options").Inc() + OptionsHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("options").Observe(time.Since(start).Seconds()) + return } - if r.Method == http.MethodOptions { - OptionsHandler(w, r, false) + isReadHttpCall := r.Method == "GET" || r.Method == "HEAD" + if !fs.maybeCheckJwtAuthorization(r, !isReadHttpCall) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) return } @@ -62,43 +36,42 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { fileId = r.RequestURI[len("/?proxyChunkId="):] } if fileId != "" { + stats.FilerRequestCounter.WithLabelValues("proxy").Inc() fs.proxyToVolumeServer(w, r, fileId) - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkProxy).Inc() - stats.FilerRequestHistogram.WithLabelValues(stats.ChunkProxy).Observe(time.Since(start).Seconds()) - return - } - requestMethod := r.Method - defer func(method *string) { - stats.FilerRequestCounter.WithLabelValues(*method, strconv.Itoa(statusRecorder.Status)).Inc() - stats.FilerRequestHistogram.WithLabelValues(*method).Observe(time.Since(start).Seconds()) - }(&requestMethod) - - isReadHttpCall := r.Method == http.MethodGet || r.Method == http.MethodHead - if !fs.maybeCheckJwtAuthorization(r, !isReadHttpCall) { - writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds()) return } - w.Header().Set("Server", "SeaweedFS "+version.VERSION) - + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { - case http.MethodGet, http.MethodHead: + case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r) - case http.MethodDelete: + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) + case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() + fs.GetOrHeadHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) + case "DELETE": + stats.FilerRequestCounter.WithLabelValues("delete").Inc() if _, ok := r.URL.Query()["tagging"]; ok { fs.DeleteTaggingHandler(w, r) } else { fs.DeleteHandler(w, r) } - case http.MethodPost, http.MethodPut: + stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) + case "POST", "PUT": + // wait until in flight data is less than the limit contentLength := getContentLength(r) fs.inFlightDataLimitCond.L.Lock() - inFlightDataSize := atomic.LoadInt64(&fs.inFlightDataSize) - for fs.option.ConcurrentUploadLimit != 0 && inFlightDataSize > fs.option.ConcurrentUploadLimit { - glog.V(4).Infof("wait because inflight data %d > %d", inFlightDataSize, fs.option.ConcurrentUploadLimit) + for fs.option.ConcurrentUploadLimit != 0 && atomic.LoadInt64(&fs.inFlightDataSize) > fs.option.ConcurrentUploadLimit { + glog.V(4).Infof("wait because inflight data %d > %d", fs.inFlightDataSize, fs.option.ConcurrentUploadLimit) fs.inFlightDataLimitCond.Wait() - inFlightDataSize = atomic.LoadInt64(&fs.inFlightDataSize) } fs.inFlightDataLimitCond.L.Unlock() atomic.AddInt64(&fs.inFlightDataSize, contentLength) @@ -107,58 +80,31 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { fs.inFlightDataLimitCond.Signal() }() - if r.Method == http.MethodPut { + if r.Method == "PUT" { + stats.FilerRequestCounter.WithLabelValues("put").Inc() if _, ok := r.URL.Query()["tagging"]; ok { fs.PutTaggingHandler(w, r) } else { fs.PostHandler(w, r, contentLength) } + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) } else { // method == "POST" + stats.FilerRequestCounter.WithLabelValues("post").Inc() fs.PostHandler(w, r, contentLength) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) } - default: - requestMethod = "INVALID" - w.WriteHeader(http.StatusMethodNotAllowed) } } func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) { start := time.Now() - statusRecorder := stats.NewStatusResponseWriter(w) - w = statusRecorder - os.Stdout.WriteString("Request: " + r.Method + " " + r.URL.String() + "\n") - - origin := r.Header.Get("Origin") - if origin != "" { - if fs.option.AllowedOrigins == nil || len(fs.option.AllowedOrigins) == 0 || fs.option.AllowedOrigins[0] == "*" { - origin = "*" - } else { - originFound := false - for _, allowedOrigin := range fs.option.AllowedOrigins { - if origin == allowedOrigin { - originFound = true - } - } - if !originFound { - writeJsonError(w, r, http.StatusForbidden, errors.New("origin not allowed")) - return - } - } - - w.Header().Set("Access-Control-Allow-Origin", origin) - w.Header().Set("Access-Control-Allow-Headers", "OPTIONS, GET, HEAD") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } - requestMethod := r.Method - defer func(method *string) { - stats.FilerRequestCounter.WithLabelValues(*method, strconv.Itoa(statusRecorder.Status)).Inc() - stats.FilerRequestHistogram.WithLabelValues(*method).Observe(time.Since(start).Seconds()) - }(&requestMethod) // We handle OPTIONS first because it never should be authenticated - if r.Method == http.MethodOptions { + if r.Method == "OPTIONS" { + stats.FilerRequestCounter.WithLabelValues("options").Inc() OptionsHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("options").Observe(time.Since(start).Seconds()) return } @@ -167,26 +113,30 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque return } - w.Header().Set("Server", "SeaweedFS "+version.VERSION) - + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { - case http.MethodGet, http.MethodHead: + case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r) - default: - requestMethod = "INVALID" - w.WriteHeader(http.StatusMethodNotAllowed) + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) + case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() + fs.GetOrHeadHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) } } func OptionsHandler(w http.ResponseWriter, r *http.Request, isReadOnly bool) { if isReadOnly { - w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS") + w.Header().Add("Access-Control-Allow-Methods", "GET, OPTIONS") } else { - w.Header().Set("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") - w.Header().Set("Access-Control-Expose-Headers", "*") + w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") } - w.Header().Set("Access-Control-Allow-Headers", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") + w.Header().Add("Access-Control-Allow-Headers", "*") } // maybeCheckJwtAuthorization returns true if access should be granted, false if it should be denied @@ -226,13 +176,3 @@ func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool) return true } } - -func (fs *FilerServer) filerHealthzHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Server", "SeaweedFS "+version.VERSION) - if _, err := fs.filer.Store.FindEntry(context.Background(), filer.TopicsDir); err != nil && err != filer_pb.ErrNotFound { - glog.Warningf("filerHealthzHandler FindEntry: %+v", err) - w.WriteHeader(http.StatusServiceUnavailable) - } else { - w.WriteHeader(http.StatusOK) - } -} diff --git a/weed/server/filer_server_handlers_copy.go b/weed/server/filer_server_handlers_copy.go deleted file mode 100644 index 6320d62fb..000000000 --- a/weed/server/filer_server_handlers_copy.go +++ /dev/null @@ -1,547 +0,0 @@ -package weed_server - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "strings" - "time" - - "golang.org/x/sync/errgroup" - "google.golang.org/protobuf/proto" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/util" -) - -func (fs *FilerServer) copy(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) { - src := r.URL.Query().Get("cp.from") - dst := r.URL.Path - - glog.V(2).InfofCtx(ctx, "FilerServer.copy %v to %v", src, dst) - - var err error - if src, err = clearName(src); err != nil { - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - if dst, err = clearName(dst); err != nil { - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - src = strings.TrimRight(src, "/") - if src == "" { - err = fmt.Errorf("invalid source '/'") - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - - srcPath := util.FullPath(src) - dstPath := util.FullPath(dst) - if dstPath.IsLongerFileName(so.MaxFileNameLength) { - err = fmt.Errorf("dst name too long") - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - - srcEntry, err := fs.filer.FindEntry(ctx, srcPath) - if err != nil { - err = fmt.Errorf("failed to get src entry '%s': %w", src, err) - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - - glog.V(1).InfofCtx(ctx, "FilerServer.copy source entry: content_len=%d, chunks_len=%d", len(srcEntry.Content), len(srcEntry.GetChunks())) - - // Check if source is a directory - currently not supported for recursive copying - if srcEntry.IsDirectory() { - err = fmt.Errorf("copy: directory copying not yet supported for '%s'", src) - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - - _, oldName := srcPath.DirAndName() - finalDstPath := dstPath - - // Check if destination is a directory - dstPathEntry, findErr := fs.filer.FindEntry(ctx, dstPath) - if findErr != nil && findErr != filer_pb.ErrNotFound { - err = fmt.Errorf("failed to check destination path %s: %w", dstPath, findErr) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - - if findErr == nil && dstPathEntry.IsDirectory() { - finalDstPath = dstPath.Child(oldName) - } else { - newDir, newName := dstPath.DirAndName() - newName = util.Nvl(newName, oldName) - finalDstPath = util.FullPath(newDir).Child(newName) - } - - // Check if destination file already exists - // TODO: add an overwrite parameter to allow overwriting - if dstEntry, err := fs.filer.FindEntry(ctx, finalDstPath); err != nil && err != filer_pb.ErrNotFound { - err = fmt.Errorf("failed to check destination entry %s: %w", finalDstPath, err) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } else if dstEntry != nil { - err = fmt.Errorf("destination file %s already exists", finalDstPath) - writeJsonError(w, r, http.StatusConflict, err) - return - } - - // Copy the file content and chunks - newEntry, err := fs.copyEntry(ctx, srcEntry, finalDstPath, so) - if err != nil { - err = fmt.Errorf("failed to copy entry from '%s' to '%s': %w", src, dst, err) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - - if createErr := fs.filer.CreateEntry(ctx, newEntry, true, false, nil, false, fs.filer.MaxFilenameLength); createErr != nil { - err = fmt.Errorf("failed to create copied entry from '%s' to '%s': %w", src, dst, createErr) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - - glog.V(1).InfofCtx(ctx, "FilerServer.copy completed successfully: src='%s' -> dst='%s' (final_path='%s')", src, dst, finalDstPath) - - w.WriteHeader(http.StatusNoContent) -} - -// copyEntry creates a new entry with copied content and chunks -func (fs *FilerServer) copyEntry(ctx context.Context, srcEntry *filer.Entry, dstPath util.FullPath, so *operation.StorageOption) (*filer.Entry, error) { - // Create the base entry structure - // Note: For hard links, we copy the actual content but NOT the HardLinkId/HardLinkCounter - // This creates an independent copy rather than another hard link to the same content - newEntry := &filer.Entry{ - FullPath: dstPath, - // Deep copy Attr field to ensure slice independence (GroupNames, Md5) - Attr: func(a filer.Attr) filer.Attr { - a.GroupNames = append([]string(nil), a.GroupNames...) - a.Md5 = append([]byte(nil), a.Md5...) - return a - }(srcEntry.Attr), - Quota: srcEntry.Quota, - // Intentionally NOT copying HardLinkId and HardLinkCounter to create independent copy - } - - // Deep copy Extended fields to ensure independence - if srcEntry.Extended != nil { - newEntry.Extended = make(map[string][]byte, len(srcEntry.Extended)) - for k, v := range srcEntry.Extended { - newEntry.Extended[k] = append([]byte(nil), v...) - } - } - - // Deep copy Remote field to ensure independence - if srcEntry.Remote != nil { - newEntry.Remote = &filer_pb.RemoteEntry{ - StorageName: srcEntry.Remote.StorageName, - LastLocalSyncTsNs: srcEntry.Remote.LastLocalSyncTsNs, - RemoteETag: srcEntry.Remote.RemoteETag, - RemoteMtime: srcEntry.Remote.RemoteMtime, - RemoteSize: srcEntry.Remote.RemoteSize, - } - } - - // Log if we're copying a hard link so we can track this behavior - if len(srcEntry.HardLinkId) > 0 { - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: copying hard link %s (nlink=%d) as independent file", srcEntry.FullPath, srcEntry.HardLinkCounter) - } - - // Handle small files stored in Content field - if len(srcEntry.Content) > 0 { - // For small files, just copy the content directly - newEntry.Content = make([]byte, len(srcEntry.Content)) - copy(newEntry.Content, srcEntry.Content) - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: copied content directly, size=%d", len(newEntry.Content)) - return newEntry, nil - } - - // Handle files stored as chunks (including resolved hard link content) - if len(srcEntry.GetChunks()) > 0 { - srcChunks := srcEntry.GetChunks() - - // Create HTTP client once for reuse across all chunk operations - client := &http.Client{Timeout: 60 * time.Second} - - // Check if any chunks are manifest chunks - these require special handling - if filer.HasChunkManifest(srcChunks) { - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: handling manifest chunks") - newChunks, err := fs.copyChunksWithManifest(ctx, srcChunks, so, client) - if err != nil { - return nil, fmt.Errorf("failed to copy chunks with manifest: %w", err) - } - newEntry.Chunks = newChunks - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: copied manifest chunks, count=%d", len(newChunks)) - } else { - // Regular chunks without manifest - copy directly - newChunks, err := fs.copyChunks(ctx, srcChunks, so, client) - if err != nil { - return nil, fmt.Errorf("failed to copy chunks: %w", err) - } - newEntry.Chunks = newChunks - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: copied regular chunks, count=%d", len(newChunks)) - } - return newEntry, nil - } - - // Empty file case (or hard link with no content - should not happen if hard link was properly resolved) - if len(srcEntry.HardLinkId) > 0 { - glog.WarningfCtx(ctx, "FilerServer.copyEntry: hard link %s appears to have no content - this may indicate an issue with hard link resolution", srcEntry.FullPath) - } - glog.V(2).InfofCtx(ctx, "FilerServer.copyEntry: empty file, no content or chunks to copy") - return newEntry, nil -} - -// copyChunks creates new chunks by copying data from source chunks using parallel streaming approach -func (fs *FilerServer) copyChunks(ctx context.Context, srcChunks []*filer_pb.FileChunk, so *operation.StorageOption, client *http.Client) ([]*filer_pb.FileChunk, error) { - if len(srcChunks) == 0 { - return nil, nil - } - - // Optimize: Batch volume lookup for all chunks to reduce RPC calls - volumeLocationsMap, err := fs.batchLookupVolumeLocations(ctx, srcChunks) - if err != nil { - return nil, fmt.Errorf("failed to lookup volume locations: %w", err) - } - - // Parallel chunk copying with concurrency control using errgroup - const maxConcurrentChunks = 8 // Match SeaweedFS standard for parallel operations - - // Pre-allocate result slice to maintain order - newChunks := make([]*filer_pb.FileChunk, len(srcChunks)) - - // Use errgroup for cleaner concurrency management - g, gCtx := errgroup.WithContext(ctx) - g.SetLimit(maxConcurrentChunks) // Limit concurrent goroutines - - // Validate that all chunk locations are available before starting any concurrent work - for _, chunk := range srcChunks { - volumeId := chunk.Fid.VolumeId - locations, ok := volumeLocationsMap[volumeId] - if !ok || len(locations) == 0 { - return nil, fmt.Errorf("no locations found for volume %d", volumeId) - } - } - - glog.V(2).InfofCtx(ctx, "FilerServer.copyChunks: starting parallel copy of %d chunks with max concurrency %d", len(srcChunks), maxConcurrentChunks) - - // Launch goroutines for each chunk - for i, srcChunk := range srcChunks { - // Capture loop variables for goroutine closure - chunkIndex := i - chunk := srcChunk - chunkLocations := volumeLocationsMap[srcChunk.Fid.VolumeId] - - g.Go(func() error { - glog.V(3).InfofCtx(gCtx, "FilerServer.copyChunks: copying chunk %d/%d, size=%d", chunkIndex+1, len(srcChunks), chunk.Size) - - // Use streaming copy to avoid loading entire chunk into memory - newChunk, err := fs.streamCopyChunk(gCtx, chunk, so, client, chunkLocations) - if err != nil { - return fmt.Errorf("failed to copy chunk %d (%s): %w", chunkIndex+1, chunk.GetFileIdString(), err) - } - - // Store result at correct index to maintain order - newChunks[chunkIndex] = newChunk - - glog.V(4).InfofCtx(gCtx, "FilerServer.copyChunks: successfully copied chunk %d/%d", chunkIndex+1, len(srcChunks)) - return nil - }) - } - - // Wait for all chunks to complete and return first error (if any) - if err := g.Wait(); err != nil { - return nil, err - } - - // Verify all chunks were copied (shouldn't happen if no errors, but safety check) - for i, chunk := range newChunks { - if chunk == nil { - return nil, fmt.Errorf("chunk %d was not copied (internal error)", i) - } - } - - glog.V(2).InfofCtx(ctx, "FilerServer.copyChunks: successfully completed parallel copy of %d chunks", len(srcChunks)) - return newChunks, nil -} - -// copyChunksWithManifest handles copying chunks that include manifest chunks -func (fs *FilerServer) copyChunksWithManifest(ctx context.Context, srcChunks []*filer_pb.FileChunk, so *operation.StorageOption, client *http.Client) ([]*filer_pb.FileChunk, error) { - if len(srcChunks) == 0 { - return nil, nil - } - - glog.V(2).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: processing %d chunks (some are manifests)", len(srcChunks)) - - // Separate manifest chunks from regular data chunks - manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(srcChunks) - - var newChunks []*filer_pb.FileChunk - - // First, copy all non-manifest chunks directly - if len(nonManifestChunks) > 0 { - glog.V(3).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: copying %d non-manifest chunks", len(nonManifestChunks)) - newNonManifestChunks, err := fs.copyChunks(ctx, nonManifestChunks, so, client) - if err != nil { - return nil, fmt.Errorf("failed to copy non-manifest chunks: %w", err) - } - newChunks = append(newChunks, newNonManifestChunks...) - } - - // Process each manifest chunk separately - for i, manifestChunk := range manifestChunks { - glog.V(3).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: processing manifest chunk %d/%d", i+1, len(manifestChunks)) - - // Resolve the manifest chunk to get the actual data chunks it references - lookupFileIdFn := func(ctx context.Context, fileId string) (urls []string, err error) { - return fs.filer.MasterClient.GetLookupFileIdFunction()(ctx, fileId) - } - - resolvedChunks, err := filer.ResolveOneChunkManifest(ctx, lookupFileIdFn, manifestChunk) - if err != nil { - return nil, fmt.Errorf("failed to resolve manifest chunk %s: %w", manifestChunk.GetFileIdString(), err) - } - - glog.V(4).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: resolved manifest chunk %s to %d data chunks", - manifestChunk.GetFileIdString(), len(resolvedChunks)) - - // Copy all the resolved data chunks (use recursive copyChunksWithManifest to handle nested manifests) - newResolvedChunks, err := fs.copyChunksWithManifest(ctx, resolvedChunks, so, client) - if err != nil { - return nil, fmt.Errorf("failed to copy resolved chunks from manifest %s: %w", manifestChunk.GetFileIdString(), err) - } - - // Create a new manifest chunk that references the copied data chunks - newManifestChunk, err := fs.createManifestChunk(ctx, newResolvedChunks, manifestChunk, so, client) - if err != nil { - return nil, fmt.Errorf("failed to create new manifest chunk: %w", err) - } - - newChunks = append(newChunks, newManifestChunk) - - glog.V(4).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: created new manifest chunk %s for %d resolved chunks", - newManifestChunk.GetFileIdString(), len(newResolvedChunks)) - } - - glog.V(2).InfofCtx(ctx, "FilerServer.copyChunksWithManifest: completed copying %d total chunks (%d manifest, %d regular)", - len(newChunks), len(manifestChunks), len(nonManifestChunks)) - - return newChunks, nil -} - -// createManifestChunk creates a new manifest chunk that references the provided data chunks -func (fs *FilerServer) createManifestChunk(ctx context.Context, dataChunks []*filer_pb.FileChunk, originalManifest *filer_pb.FileChunk, so *operation.StorageOption, client *http.Client) (*filer_pb.FileChunk, error) { - // Create the manifest data structure - filer_pb.BeforeEntrySerialization(dataChunks) - - manifestData := &filer_pb.FileChunkManifest{ - Chunks: dataChunks, - } - - // Serialize the manifest - data, err := proto.Marshal(manifestData) - if err != nil { - return nil, fmt.Errorf("failed to marshal manifest: %w", err) - } - - // Save the manifest data as a new chunk - saveFunc := func(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { - // Assign a new file ID - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(ctx, so) - if assignErr != nil { - return nil, fmt.Errorf("failed to assign file ID for manifest: %w", assignErr) - } - - // Upload the manifest data - err = fs.uploadData(ctx, reader, urlLocation, string(auth), client) - if err != nil { - return nil, fmt.Errorf("failed to upload manifest data: %w", err) - } - - // Create the chunk metadata - chunk = &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(len(data)), - } - return chunk, nil - } - - manifestChunk, err := saveFunc(bytes.NewReader(data), "", originalManifest.Offset, 0) - if err != nil { - return nil, fmt.Errorf("failed to save manifest chunk: %w", err) - } - - // Set manifest-specific properties - manifestChunk.IsChunkManifest = true - manifestChunk.Size = originalManifest.Size - - return manifestChunk, nil -} - -// uploadData uploads data to a volume server -func (fs *FilerServer) uploadData(ctx context.Context, reader io.Reader, urlLocation, auth string, client *http.Client) error { - req, err := http.NewRequestWithContext(ctx, "PUT", urlLocation, reader) - if err != nil { - return fmt.Errorf("failed to create upload request: %w", err) - } - - if auth != "" { - req.Header.Set("Authorization", "Bearer "+auth) - } - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to upload data: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { - body, readErr := io.ReadAll(resp.Body) - if readErr != nil { - return fmt.Errorf("upload failed with status %d, and failed to read response: %w", resp.StatusCode, readErr) - } - return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body)) - } - - return nil -} - -// batchLookupVolumeLocations performs a single batched lookup for all unique volume IDs in the chunks -func (fs *FilerServer) batchLookupVolumeLocations(ctx context.Context, chunks []*filer_pb.FileChunk) (map[uint32][]operation.Location, error) { - // Collect unique volume IDs and their string representations to avoid repeated conversions - volumeIdMap := make(map[uint32]string) - for _, chunk := range chunks { - vid := chunk.Fid.VolumeId - if _, found := volumeIdMap[vid]; !found { - volumeIdMap[vid] = fmt.Sprintf("%d", vid) - } - } - - if len(volumeIdMap) == 0 { - return make(map[uint32][]operation.Location), nil - } - - // Convert to slice of strings for the lookup call - volumeIdStrs := make([]string, 0, len(volumeIdMap)) - for _, vidStr := range volumeIdMap { - volumeIdStrs = append(volumeIdStrs, vidStr) - } - - // Perform single batched lookup - lookupResult, err := operation.LookupVolumeIds(fs.filer.GetMaster, fs.grpcDialOption, volumeIdStrs) - if err != nil { - return nil, fmt.Errorf("failed to lookup volumes: %w", err) - } - - // Convert result to map of volumeId -> locations - volumeLocationsMap := make(map[uint32][]operation.Location) - for volumeId, volumeIdStr := range volumeIdMap { - if volumeLocations, ok := lookupResult[volumeIdStr]; ok && len(volumeLocations.Locations) > 0 { - volumeLocationsMap[volumeId] = volumeLocations.Locations - } - } - - return volumeLocationsMap, nil -} - -// streamCopyChunk copies a chunk using streaming to minimize memory usage -func (fs *FilerServer) streamCopyChunk(ctx context.Context, srcChunk *filer_pb.FileChunk, so *operation.StorageOption, client *http.Client, locations []operation.Location) (*filer_pb.FileChunk, error) { - // Assign a new file ID for destination - fileId, urlLocation, auth, err := fs.assignNewFileInfo(ctx, so) - if err != nil { - return nil, fmt.Errorf("failed to assign new file ID: %w", err) - } - - // Try all available locations for source chunk until one succeeds - fileIdString := srcChunk.GetFileIdString() - var lastErr error - - for i, location := range locations { - srcUrl := fmt.Sprintf("http://%s/%s", location.Url, fileIdString) - glog.V(4).InfofCtx(ctx, "FilerServer.streamCopyChunk: attempting streaming copy from %s to %s (attempt %d/%d)", srcUrl, urlLocation, i+1, len(locations)) - - // Perform streaming copy using HTTP client - err := fs.performStreamCopy(ctx, srcUrl, urlLocation, string(auth), srcChunk.Size, client) - if err != nil { - lastErr = err - glog.V(2).InfofCtx(ctx, "FilerServer.streamCopyChunk: failed streaming copy from %s: %v", srcUrl, err) - continue - } - - // Success - create chunk metadata - newChunk := &filer_pb.FileChunk{ - FileId: fileId, - Offset: srcChunk.Offset, - Size: srcChunk.Size, - ETag: srcChunk.ETag, - } - - glog.V(4).InfofCtx(ctx, "FilerServer.streamCopyChunk: successfully streamed %d bytes", srcChunk.Size) - return newChunk, nil - } - - // All locations failed - return nil, fmt.Errorf("failed to stream copy chunk from any location: %w", lastErr) -} - -// performStreamCopy performs the actual streaming copy from source URL to destination URL -func (fs *FilerServer) performStreamCopy(ctx context.Context, srcUrl, dstUrl, auth string, expectedSize uint64, client *http.Client) error { - // Create HTTP request to read from source - req, err := http.NewRequestWithContext(ctx, "GET", srcUrl, nil) - if err != nil { - return fmt.Errorf("failed to create source request: %v", err) - } - - // Perform source request - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to read from source: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("source returned status %d", resp.StatusCode) - } - - // Create HTTP request to write to destination - dstReq, err := http.NewRequestWithContext(ctx, "PUT", dstUrl, resp.Body) - if err != nil { - return fmt.Errorf("failed to create destination request: %v", err) - } - dstReq.ContentLength = int64(expectedSize) - - // Set authorization header if provided - if auth != "" { - dstReq.Header.Set("Authorization", "Bearer "+auth) - } - dstReq.Header.Set("Content-Type", "application/octet-stream") - - // Perform destination request - dstResp, err := client.Do(dstReq) - if err != nil { - return fmt.Errorf("failed to write to destination: %v", err) - } - defer dstResp.Body.Close() - - if dstResp.StatusCode != http.StatusCreated && dstResp.StatusCode != http.StatusOK { - // Read error response body for more details - body, readErr := io.ReadAll(dstResp.Body) - if readErr != nil { - return fmt.Errorf("destination returned status %d, and failed to read body: %w", dstResp.StatusCode, readErr) - } - return fmt.Errorf("destination returned status %d: %s", dstResp.StatusCode, string(body)) - } - - glog.V(4).InfofCtx(ctx, "FilerServer.performStreamCopy: successfully streamed data from %s to %s", srcUrl, dstUrl) - return nil -} diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go index ff060ec52..301d609ec 100644 --- a/weed/server/filer_server_handlers_proxy.go +++ b/weed/server/filer_server_handlers_proxy.go @@ -1,42 +1,30 @@ package weed_server import ( - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/security" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" - "github.com/seaweedfs/seaweedfs/weed/util/mem" - "github.com/seaweedfs/seaweedfs/weed/util/request_id" - + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/mem" "io" - "math/rand/v2" + "math/rand" "net/http" ) -func (fs *FilerServer) maybeAddVolumeJwtAuthorization(r *http.Request, fileId string, isWrite bool) { - encodedJwt := fs.maybeGetVolumeJwtAuthorizationToken(fileId, isWrite) +var ( + client *http.Client +) - if encodedJwt == "" { - return - } - - r.Header.Set("Authorization", "BEARER "+string(encodedJwt)) -} - -func (fs *FilerServer) maybeGetVolumeJwtAuthorizationToken(fileId string, isWrite bool) string { - var encodedJwt security.EncodedJwt - if isWrite { - encodedJwt = security.GenJwtForVolumeServer(fs.volumeGuard.SigningKey, fs.volumeGuard.ExpiresAfterSec, fileId) - } else { - encodedJwt = security.GenJwtForVolumeServer(fs.volumeGuard.ReadSigningKey, fs.volumeGuard.ReadExpiresAfterSec, fileId) - } - return string(encodedJwt) +func init() { + client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} } func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Request, fileId string) { - ctx := r.Context() - urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(ctx, fileId) + + urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId) if err != nil { - glog.ErrorfCtx(ctx, "locate %s: %v", fileId, err) + glog.Errorf("locate %s: %v", fileId, err) w.WriteHeader(http.StatusInternalServerError) return } @@ -46,16 +34,15 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques return } - proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.IntN(len(urlStrings))], r.Body) + proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.Intn(len(urlStrings))], r.Body) if err != nil { - glog.ErrorfCtx(ctx, "NewRequest %s: %v", urlStrings[0], err) + glog.Errorf("NewRequest %s: %v", urlStrings[0], err) w.WriteHeader(http.StatusInternalServerError) return } proxyReq.Header.Set("Host", r.Host) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - request_id.InjectToRequest(ctx, proxyReq) for header, values := range r.Header { for _, value := range values { @@ -63,14 +50,14 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques } } - proxyResponse, postErr := util_http.GetGlobalHttpClient().Do(proxyReq) + proxyResponse, postErr := client.Do(proxyReq) if postErr != nil { - glog.ErrorfCtx(ctx, "post to filer: %v", postErr) + glog.Errorf("post to filer: %v", postErr) w.WriteHeader(http.StatusInternalServerError) return } - defer util_http.CloseResponse(proxyResponse) + defer util.CloseResponse(proxyResponse) for k, v := range proxyResponse.Header { w.Header()[k] = v diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ab474eef0..28573f7b3 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,10 +1,11 @@ package weed_server import ( - "encoding/base64" - "encoding/hex" - "errors" + "bytes" + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/util/mem" "io" "math" "mime" @@ -14,23 +15,20 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/security" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/images" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) // Validates the preconditions. Returns true if GET/HEAD operation should not proceed. // Preconditions supported are: -// -// If-Modified-Since -// If-Unmodified-Since -// If-Match -// If-None-Match +// If-Modified-Since +// If-Unmodified-Since +// If-Match +// If-None-Match func checkPreconditions(w http.ResponseWriter, r *http.Request, entry *filer.Entry) bool { etag := filer.ETagEntry(entry) @@ -67,14 +65,12 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, entry *filer.Ent ifModifiedSinceHeader := r.Header.Get("If-Modified-Since") if ifNoneMatchETagHeader != "" { if util.CanonicalizeETag(etag) == util.CanonicalizeETag(ifNoneMatchETagHeader) { - SetEtag(w, etag) w.WriteHeader(http.StatusNotModified) return true } } else if ifModifiedSinceHeader != "" { if t, parseError := time.Parse(http.TimeFormat, ifModifiedSinceHeader); parseError == nil { - if !t.Before(entry.Attr.Mtime) { - SetEtag(w, etag) + if t.After(entry.Attr.Mtime) { w.WriteHeader(http.StatusNotModified) return true } @@ -85,96 +81,64 @@ func checkPreconditions(w http.ResponseWriter, r *http.Request, entry *filer.Ent } func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + path := r.URL.Path isForDirectory := strings.HasSuffix(path, "/") if isForDirectory && len(path) > 1 { path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) return } if err == filer_pb.ErrNotFound { - glog.V(2).InfofCtx(ctx, "Not found %s: %v", path, err) - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadNotFound).Inc() + glog.V(1).Infof("Not found %s: %v", path, err) + stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadNotFound).Inc() w.WriteHeader(http.StatusNotFound) } else { - glog.ErrorfCtx(ctx, "Internal %s: %v", path, err) - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadInternal).Inc() + glog.Errorf("Internal %s: %v", path, err) + stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadInternal).Inc() w.WriteHeader(http.StatusInternalServerError) } return } - query := r.URL.Query() - if entry.IsDirectory() { if fs.option.DisableDirListing { - w.WriteHeader(http.StatusForbidden) + w.WriteHeader(http.StatusMethodNotAllowed) return } - if query.Get("metadata") == "true" { - writeJsonQuiet(w, r, http.StatusOK, entry) - return - } - if entry.Attr.Mime == "" || (entry.Attr.Mime == s3_constants.FolderMimeType && r.Header.Get(s3_constants.AmzIdentityId) == "") { - // Don't return directory meta if config value is set to true - if fs.option.ExposeDirectoryData == false { - writeJsonError(w, r, http.StatusForbidden, errors.New("directory listing is disabled")) - return - } - // return index of directory for non s3 gateway - fs.listDirectoryHandler(w, r) - return - } - // inform S3 API this is a user created directory key object - w.Header().Set(s3_constants.SeaweedFSIsDirectoryKey, "true") + fs.listDirectoryHandler(w, r) + return } - if isForDirectory && entry.Attr.Mime != s3_constants.FolderMimeType { + if isForDirectory { w.WriteHeader(http.StatusNotFound) return } + query := r.URL.Query() if query.Get("metadata") == "true" { if query.Get("resolveManifest") == "true" { if entry.Chunks, _, err = filer.ResolveChunkManifest( - ctx, fs.filer.MasterClient.GetLookupFileIdFunction(), - entry.GetChunks(), 0, math.MaxInt64); err != nil { + entry.Chunks, 0, math.MaxInt64); err != nil { err = fmt.Errorf("failed to resolve chunk manifest, err: %s", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) - return } } writeJsonQuiet(w, r, http.StatusOK, entry) return } + etag := filer.ETagEntry(entry) if checkPreconditions(w, r, entry) { return } - var etag string - if partNumber, errNum := strconv.Atoi(r.Header.Get(s3_constants.SeaweedFSPartNumber)); errNum == nil { - if len(entry.Chunks) < partNumber { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadChunk).Inc() - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte("InvalidPart")) - return - } - w.Header().Set(s3_constants.AmzMpPartsCount, strconv.Itoa(len(entry.Chunks))) - partChunk := entry.GetChunks()[partNumber-1] - md5, _ := base64.StdEncoding.DecodeString(partChunk.ETag) - etag = hex.EncodeToString(md5) - r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", partChunk.Offset, uint64(partChunk.Offset)+partChunk.Size-1)) - } else { - etag = filer.ETagEntry(entry) - } w.Header().Set("Accept-Ranges", "bytes") // mime type @@ -186,15 +150,12 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } if mimeType != "" { w.Header().Set("Content-Type", mimeType) - } else { - w.Header().Set("Content-Type", "application/octet-stream") } // print out the header from extended properties for k, v := range entry.Extended { - if !strings.HasPrefix(k, "xattr-") && !strings.HasPrefix(k, "x-seaweedfs-") { + if !strings.HasPrefix(k, "xattr-") { // "xattr-" prefix is set in filesys.XATTR_PREFIX - // "x-seaweedfs-" prefix is for internal metadata that should not become HTTP headers w.Header().Set(k, string(v)) } } @@ -220,85 +181,68 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.Header().Set(s3_constants.AmzTagCount, strconv.Itoa(tagCount)) } - // Set SSE metadata headers for S3 API consumption - if sseIV, exists := entry.Extended[s3_constants.SeaweedFSSSEIV]; exists { - // Convert binary IV to base64 for HTTP header - ivBase64 := base64.StdEncoding.EncodeToString(sseIV) - w.Header().Set(s3_constants.SeaweedFSSSEIVHeader, ivBase64) - } - - // Set SSE-C algorithm and key MD5 headers for S3 API response - if sseAlgorithm, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm]; exists { - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerAlgorithm, string(sseAlgorithm)) - } - if sseKeyMD5, exists := entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5]; exists { - w.Header().Set(s3_constants.AmzServerSideEncryptionCustomerKeyMD5, string(sseKeyMD5)) - } - - if sseKMSKey, exists := entry.Extended[s3_constants.SeaweedFSSSEKMSKey]; exists { - // Convert binary KMS metadata to base64 for HTTP header - kmsBase64 := base64.StdEncoding.EncodeToString(sseKMSKey) - w.Header().Set(s3_constants.SeaweedFSSSEKMSKeyHeader, kmsBase64) - } - - SetEtag(w, etag) + setEtag(w, etag) filename := entry.Name() - AdjustPassthroughHeaders(w, r, filename) + adjustPassthroughHeaders(w, r, filename) - // For range processing, use the original content size, not the encrypted size - // entry.Size() returns max(chunk_sizes, file_size) where chunk_sizes include encryption overhead - // For SSE objects, we need the original unencrypted size for proper range validation - totalSize := int64(entry.FileSize) + totalSize := int64(entry.Size()) - if r.Method == http.MethodHead { + if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return } - ProcessRangeRequest(r, w, totalSize, mimeType, func(offset int64, size int64) (filer.DoStreamContent, error) { - if offset+size <= int64(len(entry.Content)) { - return func(writer io.Writer) error { - _, err := writer.Write(entry.Content[offset : offset+size]) - if err != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorWriteEntry).Inc() - glog.ErrorfCtx(ctx, "failed to write entry content: %v", err) - } - return err - }, nil + if rangeReq := r.Header.Get("Range"); rangeReq == "" { + ext := filepath.Ext(filename) + if len(ext) > 0 { + ext = strings.ToLower(ext) } - chunks := entry.GetChunks() + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + data := mem.Allocate(int(totalSize)) + defer mem.Free(data) + err := filer.ReadAll(data, fs.filer.MasterClient, entry.Chunks) + if err != nil { + glog.Errorf("failed to read %s: %v", path, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode) + io.Copy(w, rs) + return + } + } + + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if offset+size <= int64(len(entry.Content)) { + _, err := writer.Write(entry.Content[offset : offset+size]) + if err != nil { + stats.FilerRequestCounter.WithLabelValues(stats.ErrorWriteEntry).Inc() + glog.Errorf("failed to write entry content: %v", err) + } + return err + } + chunks := entry.Chunks if entry.IsInRemoteOnly() { dir, name := entry.FullPath.DirAndName() - if resp, err := fs.CacheRemoteObjectToLocalCluster(ctx, &filer_pb.CacheRemoteObjectToLocalClusterRequest{ + if resp, err := fs.CacheRemoteObjectToLocalCluster(context.Background(), &filer_pb.CacheRemoteObjectToLocalClusterRequest{ Directory: dir, Name: name, }); err != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadCache).Inc() - glog.ErrorfCtx(ctx, "CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) - return nil, fmt.Errorf("cache %s: %v", entry.FullPath, err) + stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadCache).Inc() + glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) + return fmt.Errorf("cache %s: %v", entry.FullPath, err) } else { - chunks = resp.Entry.GetChunks() + chunks = resp.Entry.Chunks } } - streamFn, err := filer.PrepareStreamContentWithThrottler(ctx, fs.filer.MasterClient, fs.maybeGetVolumeReadJwtAuthorizationToken, chunks, offset, size, fs.option.DownloadMaxBytesPs) + err = filer.StreamContent(fs.filer.MasterClient, writer, chunks, offset, size) if err != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() - glog.ErrorfCtx(ctx, "failed to prepare stream content %s: %v", r.URL, err) - return nil, err + stats.FilerRequestCounter.WithLabelValues(stats.ErrorReadStream).Inc() + glog.Errorf("failed to stream content %s: %v", r.URL, err) } - return func(writer io.Writer) error { - err := streamFn(writer) - if err != nil { - stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() - glog.ErrorfCtx(ctx, "failed to stream content %s: %v", r.URL, err) - } - return err - }, nil + return err }) } - -func (fs *FilerServer) maybeGetVolumeReadJwtAuthorizationToken(fileId string) string { - return string(security.GenJwtForVolumeServer(fs.volumeGuard.ReadSigningKey, fs.volumeGuard.ReadExpiresAfterSec, fileId)) -} diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 9266cb536..eaf17fa18 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -1,49 +1,43 @@ package weed_server import ( - "errors" - "github.com/seaweedfs/seaweedfs/weed/util/version" + "context" "net/http" "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" - ui "github.com/seaweedfs/seaweedfs/weed/server/filer_ui" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) -// listDirectoryHandler lists directories and folders under a directory +// listDirectoryHandler lists directories and folers under a directory // files are sorted by name and paginated via "lastFileName" and "limit". // sub directories are listed on the first page, when "lastFileName" // is empty. func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if fs.option.ExposeDirectoryData == false { - writeJsonError(w, r, http.StatusForbidden, errors.New("ui is disabled")) - return - } - stats.FilerHandlerCounter.WithLabelValues(stats.DirList).Inc() + stats.FilerRequestCounter.WithLabelValues("list").Inc() path := r.URL.Path if strings.HasSuffix(path, "/") && len(path) > 1 { path = path[:len(path)-1] } - limit, limitErr := strconv.Atoi(r.FormValue("limit")) - if limitErr != nil { - limit = fs.option.DirListingLimit + limit, limit_err := strconv.Atoi(r.FormValue("limit")) + if limit_err != nil { + limit = 100 } lastFileName := r.FormValue("lastFileName") namePattern := r.FormValue("namePattern") namePatternExclude := r.FormValue("namePatternExclude") - entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(ctx, util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) + entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) if err != nil { - glog.V(0).InfofCtx(ctx, "listDirectory %s %s %d: %s", path, lastFileName, limit, err) + glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) w.WriteHeader(http.StatusNotFound) return } @@ -58,11 +52,10 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque emptyFolder = false } - glog.V(4).InfofCtx(ctx, "listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries)) + glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries)) if r.Header.Get("Accept") == "application/json" { writeJsonQuiet(w, r, http.StatusOK, struct { - Version string Path string Entries interface{} Limit int @@ -70,7 +63,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque ShouldDisplayLoadMore bool EmptyFolder bool }{ - version.Version(), path, entries, limit, @@ -82,7 +74,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque } err = ui.StatusTpl.Execute(w, struct { - Version string Path string Breadcrumbs []ui.Breadcrumb Entries interface{} @@ -92,7 +83,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque EmptyFolder bool ShowDirectoryDelete bool }{ - version.Version(), path, ui.ToBreadcrumb(path), entries, @@ -103,7 +93,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque fs.option.ShowUIDirectoryDelete, }) if err != nil { - glog.V(0).InfofCtx(ctx, "Template Execute Error: %v", err) + glog.V(0).Infof("Template Execute Error: %v", err) } - } diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go index 8d6961959..ae2093947 100644 --- a/weed/server/filer_server_handlers_tagging.go +++ b/weed/server/filer_server_handlers_tagging.go @@ -1,19 +1,20 @@ package weed_server import ( + "context" "net/http" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) // add or replace one file Seaweed- prefixed attributes // curl -X PUT -H "Seaweed-Name1: value1" http://localhost:8888/path/to/a/file?tagging func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx := context.Background() path := r.URL.Path if strings.HasSuffix(path, "/") { @@ -42,9 +43,9 @@ func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) } } - if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false, fs.filer.MaxFilenameLength); dbErr != nil { - glog.V(0).InfofCtx(ctx, "failing to update %s tagging : %v", path, dbErr) - writeJsonError(w, r, http.StatusInternalServerError, dbErr) + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false); dbErr != nil { + glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) return } @@ -56,7 +57,7 @@ func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) // curl -X DELETE http://localhost:8888/path/to/a/file?tagging func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx := context.Background() path := r.URL.Path if strings.HasSuffix(path, "/") { @@ -108,9 +109,9 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque return } - if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false, fs.filer.MaxFilenameLength); dbErr != nil { - glog.V(0).InfofCtx(ctx, "failing to delete %s tagging : %v", path, dbErr) - writeJsonError(w, r, http.StatusInternalServerError, dbErr) + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false); dbErr != nil { + glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) return } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 4f1ca05be..bbaf28aa8 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -4,22 +4,19 @@ import ( "context" "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "net/http" "os" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/constants" - util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -35,34 +32,22 @@ type FilerPostResult struct { Error string `json:"error,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(ctx context.Context, so *operation.StorageOption) (fileId, urlLocation string, auth security.EncodedJwt, err error) { +func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, urlLocation string, auth security.EncodedJwt, err error) { - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkAssign).Inc() + stats.FilerRequestCounter.WithLabelValues("assign").Inc() start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues(stats.ChunkAssign).Observe(time.Since(start).Seconds()) - }() + defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() ar, altRequest := so.ToAssignRequests(1) - assignResult, ae := operation.Assign(ctx, fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest) if ae != nil { - glog.ErrorfCtx(ctx, "failing to assign a file id: %v", ae) + glog.Errorf("failing to assign a file id: %v", ae) err = ae return } fileId = assignResult.Fid - assignUrl := assignResult.Url - // Prefer same data center - if fs.option.DataCenter != "" { - for _, repl := range assignResult.Replicas { - if repl.DataCenter == fs.option.DataCenter { - assignUrl = repl.Url - break - } - } - } - urlLocation = "http://" + assignUrl + "/" + assignResult.Fid + urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid if so.Fsync { urlLocation += "?fsync=true" } @@ -71,7 +56,8 @@ func (fs *FilerServer) assignNewFileInfo(ctx context.Context, so *operation.Stor } func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, contentLength int64) { - ctx := r.Context() + + ctx := context.Background() destination := r.RequestURI if finalDestination := r.Header.Get(s3_constants.SeaweedStorageDestinationHeader); finalDestination != "" { @@ -79,7 +65,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, conte } query := r.URL.Query() - so, err := fs.detectStorageOption0(ctx, destination, + so, err := fs.detectStorageOption0(destination, query.Get("collection"), query.Get("replication"), query.Get("ttl"), @@ -88,42 +74,24 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, conte query.Get("dataCenter"), query.Get("rack"), query.Get("dataNode"), - query.Get("saveInside"), ) if err != nil { if err == ErrReadOnly { w.WriteHeader(http.StatusInsufficientStorage) } else { - glog.V(1).InfolnCtx(ctx, "post", r.RequestURI, ":", err.Error()) + glog.V(1).Infoln("post", r.RequestURI, ":", err.Error()) w.WriteHeader(http.StatusInternalServerError) } return } - if util.FullPath(r.URL.Path).IsLongerFileName(so.MaxFileNameLength) { - glog.V(1).InfolnCtx(ctx, "post", r.RequestURI, ": ", "entry name too long") - w.WriteHeader(http.StatusRequestURITooLong) - return - } - - // When DiskType is empty,use filer's -disk - if so.DiskType == "" { - so.DiskType = fs.option.DiskType - } - - if strings.HasPrefix(r.URL.Path, "/etc") { - so.SaveInside = true - } - if query.Has("mv.from") { fs.move(ctx, w, r, so) - } else if query.Has("cp.from") { - fs.copy(ctx, w, r, so) } else { fs.autoChunk(ctx, w, r, contentLength, so) } - util_http.CloseRequest(r) + util.CloseRequest(r) } @@ -131,7 +99,7 @@ func (fs *FilerServer) move(ctx context.Context, w http.ResponseWriter, r *http. src := r.URL.Query().Get("mv.from") dst := r.URL.Path - glog.V(2).InfofCtx(ctx, "FilerServer.move %v to %v", src, dst) + glog.V(2).Infof("FilerServer.move %v to %v", src, dst) var err error if src, err = clearName(src); err != nil { @@ -151,11 +119,6 @@ func (fs *FilerServer) move(ctx context.Context, w http.ResponseWriter, r *http. srcPath := util.FullPath(src) dstPath := util.FullPath(dst) - if dstPath.IsLongerFileName(so.MaxFileNameLength) { - err = fmt.Errorf("dst name to long") - writeJsonError(w, r, http.StatusBadRequest, err) - return - } srcEntry, err := fs.filer.FindEntry(ctx, srcPath) if err != nil { err = fmt.Errorf("failed to get src entry '%s', err: %s", src, err) @@ -163,17 +126,6 @@ func (fs *FilerServer) move(ctx context.Context, w http.ResponseWriter, r *http. return } - wormEnforced, err := fs.wormEnforcedForEntry(ctx, src) - if err != nil { - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } else if wormEnforced { - // you cannot move a worm file or directory - err = fmt.Errorf("cannot move write-once entry from '%s' to '%s': %s", src, dst, constants.ErrMsgOperationNotPermitted) - writeJsonError(w, r, http.StatusForbidden, err) - return - } - oldDir, oldName := srcPath.DirAndName() newDir, newName := dstPath.DirAndName() newName = util.Nvl(newName, oldName) @@ -210,6 +162,7 @@ func (fs *FilerServer) move(ctx context.Context, w http.ResponseWriter, r *http. // curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true // curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { + isRecursive := r.FormValue("recursive") == "true" if !isRecursive && fs.option.recursiveDelete { if r.FormValue("recursive") != "false" { @@ -224,26 +177,21 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { objectPath = objectPath[0 : len(objectPath)-1] } - wormEnforced, err := fs.wormEnforcedForEntry(context.TODO(), objectPath) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil) if err != nil { - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } else if wormEnforced { - writeJsonError(w, r, http.StatusForbidden, errors.New(constants.ErrMsgOperationNotPermitted)) - return - } - - err = fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil, 0) - if err != nil && err != filer_pb.ErrNotFound { glog.V(1).Infoln("deleting", objectPath, ":", err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, err) + httpStatus := http.StatusInternalServerError + if err == filer_pb.ErrNotFound { + httpStatus = http.StatusNoContent + } + writeJsonError(w, r, httpStatus, err) return } w.WriteHeader(http.StatusNoContent) } -func (fs *FilerServer) detectStorageOption(ctx context.Context, requestURI, qCollection, qReplication string, ttlSeconds int32, diskType, dataCenter, rack, dataNode string) (*operation.StorageOption, error) { +func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, diskType, dataCenter, rack, dataNode string) (*operation.StorageOption, error) { rule := fs.filer.FilerConf.MatchStorageRule(requestURI) @@ -251,10 +199,6 @@ func (fs *FilerServer) detectStorageOption(ctx context.Context, requestURI, qCol return nil, ErrReadOnly } - if rule.MaxFileNameLength == 0 { - rule.MaxFileNameLength = fs.filer.MaxFilenameLength - } - // required by buckets folder bucketDefaultCollection := "" if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { @@ -264,7 +208,7 @@ func (fs *FilerServer) detectStorageOption(ctx context.Context, requestURI, qCol if ttlSeconds == 0 { ttl, err := needle.ReadTTL(rule.GetTtl()) if err != nil { - glog.ErrorfCtx(ctx, "fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) + glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) } ttlSeconds = int32(ttl.Minutes()) * 60 } @@ -279,29 +223,23 @@ func (fs *FilerServer) detectStorageOption(ctx context.Context, requestURI, qCol DiskType: util.Nvl(diskType, rule.DiskType), Fsync: rule.Fsync, VolumeGrowthCount: rule.VolumeGrowthCount, - MaxFileNameLength: rule.MaxFileNameLength, }, nil } -func (fs *FilerServer) detectStorageOption0(ctx context.Context, requestURI, qCollection, qReplication string, qTtl string, diskType string, fsync string, dataCenter, rack, dataNode, saveInside string) (*operation.StorageOption, error) { +func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, diskType string, fsync string, dataCenter, rack, dataNode string) (*operation.StorageOption, error) { ttl, err := needle.ReadTTL(qTtl) if err != nil { - glog.ErrorfCtx(ctx, "fail to parse ttl %s: %v", qTtl, err) + glog.Errorf("fail to parse ttl %s: %v", qTtl, err) } - so, err := fs.detectStorageOption(ctx, requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack, dataNode) + so, err := fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack, dataNode) if so != nil { if fsync == "false" { so.Fsync = false } else if fsync == "true" { so.Fsync = true } - if saveInside == "true" { - so.SaveInside = true - } else { - so.SaveInside = false - } } return so, err diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index ca36abcac..9c2b9959f 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,28 +1,24 @@ package weed_server import ( - "bytes" "context" - "encoding/base64" - "errors" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "io" "net/http" - "net/url" "os" "path" "strconv" "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/util/constants" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, so *operation.StorageOption) { @@ -38,12 +34,18 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * chunkSize := 1024 * 1024 * maxMB + stats.FilerRequestCounter.WithLabelValues("chunk").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunk").Observe(time.Since(start).Seconds()) + }() + var reply *FilerPostResult var err error var md5bytes []byte - if r.Method == http.MethodPost { + if r.Method == "POST" { if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { - reply, err = fs.mkdir(ctx, w, r, so) + reply, err = fs.mkdir(ctx, w, r) } else { reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, contentLength, so) } @@ -51,17 +53,11 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, contentLength, so) } if err != nil { - errStr := err.Error() - switch { - case errStr == constants.ErrMsgOperationNotPermitted: - writeJsonError(w, r, http.StatusForbidden, err) - case strings.HasPrefix(errStr, "read input:") || errStr == io.ErrUnexpectedEOF.Error(): - writeJsonError(w, r, util.HttpStatusCancelled, err) - case strings.HasSuffix(errStr, "is a file") || strings.HasSuffix(errStr, "already exists"): + if strings.HasPrefix(err.Error(), "read input:") { + writeJsonError(w, r, 499, err) + } else if strings.HasSuffix(err.Error(), "is a file") { writeJsonError(w, r, http.StatusConflict, err) - case errStr == constants.ErrMsgBadDigest: - writeJsonError(w, r, http.StatusBadRequest, err) - default: + } else { writeJsonError(w, r, http.StatusInternalServerError, err) } } else if reply != nil { @@ -74,6 +70,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * } func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { + multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { return nil, nil, multipartReaderErr @@ -93,33 +90,15 @@ func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWrite contentType = "" } - if err := fs.checkPermissions(ctx, r, fileName); err != nil { - return nil, nil, err - } - - if so.SaveInside { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - buf.ReadFrom(part1) - filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, nil, nil, 0, buf.Bytes()) - bufPool.Put(buf) - return - } - - fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadRequestToChunks(ctx, w, r, part1, chunkSize, fileName, contentType, contentLength, so) + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, contentLength, so) if err != nil { return nil, nil, err } md5bytes = md5Hash.Sum(nil) - headerMd5 := r.Header.Get("Content-Md5") - if headerMd5 != "" && !(util.Base64Encode(md5bytes) == headerMd5 || fmt.Sprintf("%x", md5bytes) == headerMd5) { - fs.filer.DeleteUncommittedChunks(ctx, fileChunks) - return nil, nil, errors.New(constants.ErrMsgBadDigest) - } filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) if replyerr != nil { - fs.filer.DeleteUncommittedChunks(ctx, fileChunks) + fs.filer.DeleteChunks(fileChunks) } return @@ -133,25 +112,15 @@ func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter contentType = "" } - if err := fs.checkPermissions(ctx, r, fileName); err != nil { - return nil, nil, err - } - - fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadRequestToChunks(ctx, w, r, r.Body, chunkSize, fileName, contentType, contentLength, so) - + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, contentLength, so) if err != nil { return nil, nil, err } md5bytes = md5Hash.Sum(nil) - headerMd5 := r.Header.Get("Content-Md5") - if headerMd5 != "" && !(util.Base64Encode(md5bytes) == headerMd5 || fmt.Sprintf("%x", md5bytes) == headerMd5) { - fs.filer.DeleteUncommittedChunks(ctx, fileChunks) - return nil, nil, errors.New(constants.ErrMsgBadDigest) - } filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) if replyerr != nil { - fs.filer.DeleteUncommittedChunks(ctx, fileChunks) + fs.filer.DeleteChunks(fileChunks) } return @@ -165,78 +134,6 @@ func skipCheckParentDirEntry(r *http.Request) bool { return r.URL.Query().Get("skipCheckParentDir") == "true" } -func isS3Request(r *http.Request) bool { - return r.Header.Get(s3_constants.AmzAuthType) != "" || r.Header.Get("X-Amz-Date") != "" -} - -func (fs *FilerServer) checkPermissions(ctx context.Context, r *http.Request, fileName string) error { - fullPath := fs.fixFilePath(ctx, r, fileName) - enforced, err := fs.wormEnforcedForEntry(ctx, fullPath) - if err != nil { - return err - } else if enforced { - // you cannot change a worm file - return errors.New(constants.ErrMsgOperationNotPermitted) - } - - return nil -} - -func (fs *FilerServer) wormEnforcedForEntry(ctx context.Context, fullPath string) (bool, error) { - rule := fs.filer.FilerConf.MatchStorageRule(fullPath) - if !rule.Worm { - return false, nil - } - - entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullPath)) - if err != nil { - if errors.Is(err, filer_pb.ErrNotFound) { - return false, nil - } - - return false, err - } - - // worm is not enforced - if entry.WORMEnforcedAtTsNs == 0 { - return false, nil - } - - // worm will never expire - if rule.WormRetentionTimeSeconds == 0 { - return true, nil - } - - enforcedAt := time.Unix(0, entry.WORMEnforcedAtTsNs) - - // worm is expired - if time.Now().Sub(enforcedAt).Seconds() >= float64(rule.WormRetentionTimeSeconds) { - return false, nil - } - - return true, nil -} - -func (fs *FilerServer) fixFilePath(ctx context.Context, r *http.Request, fileName string) string { - // fix the path - fullPath := r.URL.Path - if strings.HasSuffix(fullPath, "/") { - if fileName != "" { - fullPath += fileName - } - } else { - if fileName != "" { - if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(fullPath)); findDirErr == nil { - if possibleDirEntry.IsDirectory() { - fullPath += "/" + fileName - } - } - } - } - - return fullPath -} - func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) { // detect file mode @@ -246,12 +143,25 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } mode, err := strconv.ParseUint(modeStr, 8, 32) if err != nil { - glog.ErrorfCtx(ctx, "Invalid mode format: %s, use 0660 by default", modeStr) + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) mode = 0660 } // fix the path - path := fs.fixFilePath(ctx, r, fileName) + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if fileName != "" { + path += fileName + } + } else { + if fileName != "" { + if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil { + if possibleDirEntry.IsDirectory() { + path += "/" + fileName + } + } + } + } var entry *filer.Entry var newChunks []*filer_pb.FileChunk @@ -263,7 +173,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa if isAppend || isOffsetWrite { existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path)) if findErr != nil && findErr != filer_pb.ErrNotFound { - glog.V(0).InfofCtx(ctx, "failing to find %s: %v", path, findErr) + glog.V(0).Infof("failing to find %s: %v", path, findErr) } entry = existingEntry } @@ -277,7 +187,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } entry.FileSize += uint64(chunkOffset) } - newChunks = append(entry.GetChunks(), fileChunks...) + newChunks = append(entry.Chunks, fileChunks...) // TODO if len(entry.Content) > 0 { @@ -286,7 +196,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } } else { - glog.V(4).InfolnCtx(ctx, "saving", path) + glog.V(4).Infoln("saving", path) newChunks = fileChunks entry = &filer.Entry{ FullPath: util.FullPath(path), @@ -306,16 +216,16 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } // maybe concatenate small chunks into one whole chunk - mergedChunks, replyerr = fs.maybeMergeChunks(ctx, so, newChunks) + mergedChunks, replyerr = fs.maybeMergeChunks(so, newChunks) if replyerr != nil { - glog.V(0).InfofCtx(ctx, "merge chunks %s: %v", r.RequestURI, replyerr) + glog.V(0).Infof("merge chunks %s: %v", r.RequestURI, replyerr) mergedChunks = newChunks } // maybe compact entry chunks - mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(ctx, so), mergedChunks) + mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks) if replyerr != nil { - glog.V(0).InfofCtx(ctx, "manifestize %s: %v", r.RequestURI, replyerr) + glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) return } entry.Chunks = mergedChunks @@ -335,10 +245,6 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa if len(v) > 0 && len(v[0]) > 0 { if strings.HasPrefix(k, needle.PairNamePrefix) || k == "Cache-Control" || k == "Expires" || k == "Content-Disposition" { entry.Extended[k] = []byte(v[0]) - // Log version ID header specifically for debugging - if k == "Seaweed-X-Amz-Version-Id" { - glog.V(0).Infof("filer: storing version ID header in Extended: %s=%s for path=%s", k, v[0], path) - } } if k == "Response-Content-Disposition" { entry.Extended["Content-Disposition"] = []byte(v[0]) @@ -346,97 +252,43 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } } - // Process SSE metadata headers sent by S3 API and store in entry extended metadata - if sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader); sseIVHeader != "" { - // Decode base64-encoded IV and store in metadata - if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil { - entry.Extended[s3_constants.SeaweedFSSSEIV] = ivData - glog.V(4).Infof("Stored SSE-C IV metadata for %s", entry.FullPath) - } else { - glog.Errorf("Failed to decode SSE-C IV header for %s: %v", entry.FullPath, err) - } - } - - // Store SSE-C algorithm and key MD5 for proper S3 API response headers - if sseAlgorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); sseAlgorithm != "" { - entry.Extended[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(sseAlgorithm) - glog.V(4).Infof("Stored SSE-C algorithm metadata for %s", entry.FullPath) - } - if sseKeyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); sseKeyMD5 != "" { - entry.Extended[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(sseKeyMD5) - glog.V(4).Infof("Stored SSE-C key MD5 metadata for %s", entry.FullPath) - } - - if sseKMSHeader := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader); sseKMSHeader != "" { - // Decode base64-encoded KMS metadata and store - if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeader); err == nil { - entry.Extended[s3_constants.SeaweedFSSSEKMSKey] = kmsData - glog.V(4).Infof("Stored SSE-KMS metadata for %s", entry.FullPath) - } else { - glog.Errorf("Failed to decode SSE-KMS metadata header for %s: %v", entry.FullPath, err) - } - } - - dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength) - // In test_bucket_listv2_delimiter_basic, the valid object key is the parent folder - if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && isS3Request(r) { - dbErr = fs.filer.CreateEntry(ctx, entry, false, false, nil, true, so.MaxFileNameLength) - } - if dbErr != nil { + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r)); dbErr != nil { replyerr = dbErr filerResult.Error = dbErr.Error() - glog.V(0).InfofCtx(ctx, "failing to write %s to filer server : %v", path, dbErr) + glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) } return filerResult, replyerr } -func (fs *FilerServer) saveAsChunk(ctx context.Context, so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { +func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { - return func(reader io.Reader, name string, offset int64, tsNs int64) (*filer_pb.FileChunk, error) { - var fileId string - var uploadResult *operation.UploadResult - - err := util.Retry("saveAsChunk", func() error { - // assign one file id for one chunk - assignedFileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(ctx, so) - if assignErr != nil { - return assignErr - } - - fileId = assignedFileId - - // upload the chunk to the volume server - uploadOption := &operation.UploadOption{ - UploadUrl: urlLocation, - Filename: name, - Cipher: fs.option.Cipher, - IsInputCompressed: false, - MimeType: "", - PairMap: nil, - Jwt: auth, - } - - uploader, uploaderErr := operation.NewUploader() - if uploaderErr != nil { - return uploaderErr - } - - var uploadErr error - uploadResult, uploadErr, _ = uploader.Upload(ctx, reader, uploadOption) - if uploadErr != nil { - return uploadErr - } - return nil - }) - if err != nil { - return nil, err + return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, "", "", assignErr } - return uploadResult.ToPbFileChunk(fileId, offset, tsNs), nil + // upload the chunk to the volume server + uploadOption := &operation.UploadOption{ + UploadUrl: urlLocation, + Filename: name, + Cipher: fs.option.Cipher, + IsInputCompressed: false, + MimeType: "", + PairMap: nil, + Jwt: auth, + } + uploadResult, uploadErr, _ := operation.Upload(reader, uploadOption) + if uploadErr != nil { + return nil, "", "", uploadErr + } + + return uploadResult.ToPbFileChunk(fileId, offset), so.Collection, so.Replication, nil } } -func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, replyerr error) { +func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http.Request) (filerResult *FilerPostResult, replyerr error) { // detect file mode modeStr := r.URL.Query().Get("mode") @@ -445,7 +297,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http } mode, err := strconv.ParseUint(modeStr, 8, 32) if err != nil { - glog.ErrorfCtx(ctx, "Invalid mode format: %s, use 0660 by default", modeStr) + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) mode = 0660 } @@ -461,7 +313,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http return } - glog.V(4).InfolnCtx(ctx, "mkdir", path) + glog.V(4).Infoln("mkdir", path) entry := &filer.Entry{ FullPath: util.FullPath(path), Attr: filer.Attr{ @@ -470,7 +322,6 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http Mode: os.FileMode(mode) | os.ModeDir, Uid: OS_UID, Gid: OS_GID, - TtlSec: so.TtlSeconds, }, } @@ -478,10 +329,10 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http Name: util.FullPath(path).Name(), } - if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil { + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false); dbErr != nil { replyerr = dbErr filerResult.Error = dbErr.Error() - glog.V(0).InfofCtx(ctx, "failing to create dir %s on filer server : %v", path, dbErr) + glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr) } return filerResult, replyerr } @@ -499,24 +350,13 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool metadata[s3_constants.AmzStorageClass] = []byte(sc) } - if ce := r.Header.Get("Content-Encoding"); ce != "" { - metadata["Content-Encoding"] = []byte(ce) - } - if tags := r.Header.Get(s3_constants.AmzObjectTagging); tags != "" { - // Use url.ParseQuery for robust parsing and automatic URL decoding - parsedTags, err := url.ParseQuery(tags) - if err != nil { - glog.Errorf("Failed to parse S3 tags '%s': %v", tags, err) - } else { - for key, values := range parsedTags { - // According to S3 spec, if a key is provided multiple times, the last value is used. - // A tag value can be an empty string but not nil. - value := "" - if len(values) > 0 { - value = values[len(values)-1] - } - metadata[s3_constants.AmzObjectTagging+"-"+key] = []byte(value) + for _, v := range strings.Split(tags, "&") { + tag := strings.Split(v, "=") + if len(tag) == 2 { + metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + } else if len(tag) == 1 { + metadata[s3_constants.AmzObjectTagging+"-"+tag[0]] = nil } } } @@ -529,27 +369,6 @@ func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool } } - // Handle SSE-C headers - if algorithm := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm); algorithm != "" { - metadata[s3_constants.AmzServerSideEncryptionCustomerAlgorithm] = []byte(algorithm) - } - if keyMD5 := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5); keyMD5 != "" { - // Store as-is; SSE-C MD5 is base64 and case-sensitive - metadata[s3_constants.AmzServerSideEncryptionCustomerKeyMD5] = []byte(keyMD5) - } - - //acp-owner - acpOwner := r.Header.Get(s3_constants.ExtAmzOwnerKey) - if len(acpOwner) > 0 { - metadata[s3_constants.ExtAmzOwnerKey] = []byte(acpOwner) - } - - //acp-grants - acpGrants := r.Header.Get(s3_constants.ExtAmzAclKey) - if len(acpOwner) > 0 { - metadata[s3_constants.ExtAmzAclKey] = []byte(acpGrants) - } - return } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index 2a3fb6b68..1f10d044e 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -8,24 +8,24 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) // handling single chunk POST or PUT upload func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) { - fileId, urlLocation, auth, err := fs.assignNewFileInfo(ctx, so) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(so) if err != nil || fileId == "" || urlLocation == "" { return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter) } - glog.V(4).InfofCtx(ctx, "write %s to %v", r.URL.Path, urlLocation) + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) // Note: encrypt(gzip(data)), encrypt data first, then gzip @@ -53,19 +53,13 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht PairMap: pu.PairMap, Jwt: auth, } - - uploader, uploaderErr := operation.NewUploader() - if uploaderErr != nil { - return nil, fmt.Errorf("uploader initialization error: %w", uploaderErr) - } - - uploadResult, uploadError := uploader.UploadData(ctx, uncompressedData, uploadOption) + uploadResult, uploadError := operation.UploadData(uncompressedData, uploadOption) if uploadError != nil { - return nil, fmt.Errorf("upload to volume server: %w", uploadError) + return nil, fmt.Errorf("upload to volume server: %v", uploadError) } // Save to chunk manifest structure - fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0, time.Now().UnixNano())} + fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)} // fmt.Printf("uploaded: %+v\n", uploadResult) @@ -96,8 +90,8 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht Size: int64(pu.OriginalDataSize), } - if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil { - fs.filer.DeleteUncommittedChunks(ctx, entry.GetChunks()) + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) err = dbErr filerResult.Error = dbErr.Error() return diff --git a/weed/server/filer_server_handlers_write_merge.go b/weed/server/filer_server_handlers_write_merge.go index 24e642bd6..dadc6f726 100644 --- a/weed/server/filer_server_handlers_write_merge.go +++ b/weed/server/filer_server_handlers_write_merge.go @@ -1,75 +1,11 @@ package weed_server import ( - "context" - "io" - "math" - - "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -const MergeChunkMinCount int = 1000 - -func (fs *FilerServer) maybeMergeChunks(ctx context.Context, so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) { - // Don't merge SSE-encrypted chunks to preserve per-chunk metadata - for _, chunk := range inputChunks { - if chunk.GetSseType() != 0 { // Any SSE type (SSE-C or SSE-KMS) - glog.V(3).InfofCtx(ctx, "Skipping chunk merge for SSE-encrypted chunks") - return inputChunks, nil - } - } - - // Only merge small chunks more than half of the file - var chunkSize = fs.option.MaxMB * 1024 * 1024 - var smallChunk, sumChunk int - var minOffset int64 = math.MaxInt64 - for _, chunk := range inputChunks { - if chunk.IsChunkManifest { - continue - } - if chunk.Size < uint64(chunkSize/2) { - smallChunk++ - if chunk.Offset < minOffset { - minOffset = chunk.Offset - } - } - sumChunk++ - } - if smallChunk < MergeChunkMinCount || smallChunk < sumChunk/2 { - return inputChunks, nil - } - - return fs.mergeChunks(ctx, so, inputChunks, minOffset) -} - -func (fs *FilerServer) mergeChunks(ctx context.Context, so *operation.StorageOption, inputChunks []*filer_pb.FileChunk, chunkOffset int64) (mergedChunks []*filer_pb.FileChunk, mergeErr error) { - chunkedFileReader := filer.NewChunkStreamReaderFromFiler(ctx, fs.filer.MasterClient, inputChunks) - _, mergeErr = chunkedFileReader.Seek(chunkOffset, io.SeekCurrent) - if mergeErr != nil { - return nil, mergeErr - } - mergedChunks, _, _, mergeErr, _ = fs.uploadReaderToChunks(ctx, nil, chunkedFileReader, chunkOffset, int32(fs.option.MaxMB*1024*1024), "", "", true, so) - if mergeErr != nil { - return - } - - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkMerge).Inc() - for _, chunk := range inputChunks { - if chunk.Offset < chunkOffset || chunk.IsChunkManifest { - mergedChunks = append(mergedChunks, chunk) - } - } - - garbage, err := filer.MinusChunks(ctx, fs.lookupFileId, inputChunks, mergedChunks) - if err != nil { - glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", - mergedChunks, inputChunks) - return mergedChunks, err - } - fs.filer.DeleteChunksNotRecursive(garbage) - return +func (fs *FilerServer) maybeMergeChunks(so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) { + //TODO merge consecutive smaller chunks into a large chunk to reduce number of chunks + return inputChunks, nil } diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index 3f3102d14..fe3346402 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -2,28 +2,25 @@ package weed_server import ( "bytes" - "context" "crypto/md5" - "encoding/base64" "fmt" + "golang.org/x/exp/slices" "hash" "io" "net/http" "strconv" + "strings" "sync" + "sync/atomic" "time" - "slices" - - "encoding/json" - - "github.com/seaweedfs/seaweedfs/weed/glog" - "github.com/seaweedfs/seaweedfs/weed/operation" - "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) var bufPool = sync.Pool{ @@ -32,7 +29,7 @@ var bufPool = sync.Pool{ }, } -func (fs *FilerServer) uploadRequestToChunks(ctx context.Context, w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { query := r.URL.Query() isAppend := isAppend(r) @@ -50,36 +47,26 @@ func (fs *FilerServer) uploadRequestToChunks(ctx context.Context, w http.Respons chunkOffset = offsetInt } - return fs.uploadReaderToChunks(ctx, r, reader, chunkOffset, chunkSize, fileName, contentType, isAppend, so) -} - -func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, r *http.Request, reader io.Reader, startOffset int64, chunkSize int32, fileName, contentType string, isAppend bool, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, chunkOffset int64, uploadErr error, smallContent []byte) { - md5Hash = md5.New() - chunkOffset = startOffset var partReader = io.NopCloser(io.TeeReader(reader, md5Hash)) var wg sync.WaitGroup - var bytesBufferCounter int64 = 4 - bytesBufferLimitChan := make(chan struct{}, bytesBufferCounter) + var bytesBufferCounter int64 + bytesBufferLimitCond := sync.NewCond(new(sync.Mutex)) var fileChunksLock sync.Mutex - var uploadErrLock sync.Mutex for { // need to throttle used byte buffer - bytesBufferLimitChan <- struct{}{} - - // As long as there is an error in the upload of one chunk, it can be terminated early - // uploadErr may be modified in other go routines, lock is needed to avoid race condition - uploadErrLock.Lock() - if uploadErr != nil { - <-bytesBufferLimitChan - uploadErrLock.Unlock() - break + bytesBufferLimitCond.L.Lock() + for atomic.LoadInt64(&bytesBufferCounter) >= 4 { + glog.V(4).Infof("waiting for byte buffer %d", bytesBufferCounter) + bytesBufferLimitCond.Wait() } - uploadErrLock.Unlock() + atomic.AddInt64(&bytesBufferCounter, 1) + bytesBufferLimitCond.L.Unlock() bytesBuffer := bufPool.Get().(*bytes.Buffer) + glog.V(4).Infof("received byte buffer %d", bytesBufferCounter) limitedReader := io.LimitReader(partReader, int64(chunkSize)) @@ -90,56 +77,43 @@ func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, r *http.Request // data, err := io.ReadAll(limitedReader) if err != nil || dataSize == 0 { bufPool.Put(bytesBuffer) - <-bytesBufferLimitChan - if err != nil { - uploadErrLock.Lock() - if uploadErr == nil { - uploadErr = err - } - uploadErrLock.Unlock() - } + atomic.AddInt64(&bytesBufferCounter, -1) + bytesBufferLimitCond.Signal() + uploadErr = err break } if chunkOffset == 0 && !isAppend { - if dataSize < fs.option.SaveToFilerLimit { + if dataSize < fs.option.SaveToFilerLimit || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) { chunkOffset += dataSize smallContent = make([]byte, dataSize) bytesBuffer.Read(smallContent) bufPool.Put(bytesBuffer) - <-bytesBufferLimitChan - stats.FilerHandlerCounter.WithLabelValues(stats.ContentSaveToFiler).Inc() + atomic.AddInt64(&bytesBufferCounter, -1) + bytesBufferLimitCond.Signal() break } - } else { - stats.FilerHandlerCounter.WithLabelValues(stats.AutoChunk).Inc() } wg.Add(1) - go func(offset int64, buf *bytes.Buffer) { + go func(offset int64) { defer func() { - bufPool.Put(buf) - <-bytesBufferLimitChan + bufPool.Put(bytesBuffer) + atomic.AddInt64(&bytesBufferCounter, -1) + bytesBufferLimitCond.Signal() wg.Done() }() - chunks, toChunkErr := fs.dataToChunkWithSSE(ctx, r, fileName, contentType, buf.Bytes(), offset, so) + chunk, toChunkErr := fs.dataToChunk(fileName, contentType, bytesBuffer.Bytes(), offset, so) if toChunkErr != nil { - uploadErrLock.Lock() - if uploadErr == nil { - uploadErr = toChunkErr - } - uploadErrLock.Unlock() + uploadErr = toChunkErr } - if chunks != nil { + if chunk != nil { fileChunksLock.Lock() - fileChunksSize := len(fileChunks) + len(chunks) - for _, chunk := range chunks { - fileChunks = append(fileChunks, chunk) - glog.V(4).InfofCtx(ctx, "uploaded %s chunk %d to %s [%d,%d)", fileName, fileChunksSize, chunk.FileId, offset, offset+int64(chunk.Size)) - } + fileChunks = append(fileChunks, chunk) fileChunksLock.Unlock() + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), chunk.FileId, offset, offset+int64(chunk.Size)) } - }(chunkOffset, bytesBuffer) + }(chunkOffset) // reset variables for the next chunk chunkOffset = chunkOffset + dataSize @@ -153,25 +127,21 @@ func (fs *FilerServer) uploadReaderToChunks(ctx context.Context, r *http.Request wg.Wait() if uploadErr != nil { - glog.V(0).InfofCtx(ctx, "upload file %s error: %v", fileName, uploadErr) - for _, chunk := range fileChunks { - glog.V(4).InfofCtx(ctx, "purging failed uploaded %s chunk %s [%d,%d)", fileName, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) - } - fs.filer.DeleteUncommittedChunks(ctx, fileChunks) + fs.filer.DeleteChunks(fileChunks) return nil, md5Hash, 0, uploadErr, nil } - slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) int { - return int(a.Offset - b.Offset) + slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) bool { + return a.Offset < b.Offset }) return fileChunks, md5Hash, chunkOffset, nil, smallContent } -func (fs *FilerServer) doUpload(ctx context.Context, urlLocation string, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { +func (fs *FilerServer) doUpload(urlLocation string, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUpload).Inc() + stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc() start := time.Now() defer func() { - stats.FilerRequestHistogram.WithLabelValues(stats.ChunkUpload).Observe(time.Since(start).Seconds()) + stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds()) }() uploadOption := &operation.UploadOption{ @@ -183,24 +153,14 @@ func (fs *FilerServer) doUpload(ctx context.Context, urlLocation string, limited PairMap: pairMap, Jwt: auth, } - - uploader, err := operation.NewUploader() - if err != nil { - return nil, err, []byte{} - } - - uploadResult, err, data := uploader.Upload(ctx, limitedReader, uploadOption) + uploadResult, err, data := operation.Upload(limitedReader, uploadOption) if uploadResult != nil && uploadResult.RetryCount > 0 { - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkUploadRetry).Add(float64(uploadResult.RetryCount)) + stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount)) } return uploadResult, err, data } -func (fs *FilerServer) dataToChunk(ctx context.Context, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) { - return fs.dataToChunkWithSSE(ctx, nil, fileName, contentType, data, chunkOffset, so) -} - -func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, error) { +func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, chunkOffset int64, so *operation.StorageOption) (*filer_pb.FileChunk, error) { dataReader := util.NewBytesReader(data) // retry to assign a different file id @@ -208,35 +168,27 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, var auth security.EncodedJwt var uploadErr error var uploadResult *operation.UploadResult - var failedFileChunks []*filer_pb.FileChunk - - err := util.Retry("filerDataToChunk", func() error { + for i := 0; i < 3; i++ { // assign one file id for one chunk - fileId, urlLocation, auth, uploadErr = fs.assignNewFileInfo(ctx, so) + fileId, urlLocation, auth, uploadErr = fs.assignNewFileInfo(so) if uploadErr != nil { - glog.V(4).InfofCtx(ctx, "retry later due to assign error: %v", uploadErr) - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkAssignRetry).Inc() - return uploadErr + glog.V(4).Infof("retry later due to assign error: %v", uploadErr) + time.Sleep(time.Duration(i+1) * 251 * time.Millisecond) + continue } + // upload the chunk to the volume server - uploadResult, uploadErr, _ = fs.doUpload(ctx, urlLocation, dataReader, fileName, contentType, nil, auth) + uploadResult, uploadErr, _ = fs.doUpload(urlLocation, dataReader, fileName, contentType, nil, auth) if uploadErr != nil { - glog.V(4).InfofCtx(ctx, "retry later due to upload error: %v", uploadErr) - stats.FilerHandlerCounter.WithLabelValues(stats.ChunkDoUploadRetry).Inc() - fid, _ := filer_pb.ToFileIdObject(fileId) - fileChunk := filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Fid: fid, - } - failedFileChunks = append(failedFileChunks, &fileChunk) - return uploadErr + glog.V(4).Infof("retry later due to upload error: %v", uploadErr) + time.Sleep(time.Duration(i+1) * 251 * time.Millisecond) + continue } - return nil - }) - if err != nil { - glog.ErrorfCtx(ctx, "upload error: %v", err) - return failedFileChunks, err + break + } + if uploadErr != nil { + glog.Errorf("upload error: %v", uploadErr) + return nil, uploadErr } // if last chunk exhausted the reader exactly at the border @@ -244,82 +196,5 @@ func (fs *FilerServer) dataToChunkWithSSE(ctx context.Context, r *http.Request, return nil, nil } - // Extract SSE metadata from request headers if available - var sseType filer_pb.SSEType = filer_pb.SSEType_NONE - var sseMetadata []byte - - if r != nil { - - // Check for SSE-KMS - sseKMSHeaderValue := r.Header.Get(s3_constants.SeaweedFSSSEKMSKeyHeader) - if sseKMSHeaderValue != "" { - sseType = filer_pb.SSEType_SSE_KMS - if kmsData, err := base64.StdEncoding.DecodeString(sseKMSHeaderValue); err == nil { - sseMetadata = kmsData - glog.V(4).InfofCtx(ctx, "Storing SSE-KMS metadata for chunk %s at offset %d", fileId, chunkOffset) - } else { - glog.V(1).InfofCtx(ctx, "Failed to decode SSE-KMS metadata for chunk %s: %v", fileId, err) - } - } else if r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerAlgorithm) != "" { - // SSE-C: Create per-chunk metadata for unified handling - sseType = filer_pb.SSEType_SSE_C - - // Get SSE-C metadata from headers to create unified per-chunk metadata - sseIVHeader := r.Header.Get(s3_constants.SeaweedFSSSEIVHeader) - keyMD5Header := r.Header.Get(s3_constants.AmzServerSideEncryptionCustomerKeyMD5) - - if sseIVHeader != "" && keyMD5Header != "" { - // Decode IV from header - if ivData, err := base64.StdEncoding.DecodeString(sseIVHeader); err == nil { - // Create SSE-C metadata with chunk offset = chunkOffset for proper IV calculation - ssecMetadataStruct := struct { - Algorithm string `json:"algorithm"` - IV string `json:"iv"` - KeyMD5 string `json:"keyMD5"` - PartOffset int64 `json:"partOffset"` - }{ - Algorithm: "AES256", - IV: base64.StdEncoding.EncodeToString(ivData), - KeyMD5: keyMD5Header, - PartOffset: chunkOffset, - } - if ssecMetadata, serErr := json.Marshal(ssecMetadataStruct); serErr == nil { - sseMetadata = ssecMetadata - } else { - glog.V(1).InfofCtx(ctx, "Failed to serialize SSE-C metadata for chunk %s: %v", fileId, serErr) - } - } else { - glog.V(1).InfofCtx(ctx, "Failed to decode SSE-C IV for chunk %s: %v", fileId, err) - } - } else { - glog.V(4).InfofCtx(ctx, "SSE-C chunk %s missing IV or KeyMD5 header", fileId) - } - } else if r.Header.Get(s3_constants.SeaweedFSSSES3Key) != "" { - // SSE-S3: Server-side encryption with server-managed keys - // Set the correct SSE type for SSE-S3 chunks to maintain proper tracking - sseType = filer_pb.SSEType_SSE_S3 - - // Get SSE-S3 metadata from headers - sseS3Header := r.Header.Get(s3_constants.SeaweedFSSSES3Key) - if sseS3Header != "" { - if s3Data, err := base64.StdEncoding.DecodeString(sseS3Header); err == nil { - // For SSE-S3, store metadata at chunk level for consistency with SSE-KMS/SSE-C - glog.V(4).InfofCtx(ctx, "Storing SSE-S3 metadata for chunk %s at offset %d", fileId, chunkOffset) - sseMetadata = s3Data - } else { - glog.V(1).InfofCtx(ctx, "Failed to decode SSE-S3 metadata for chunk %s: %v", fileId, err) - } - } - } - } - - // Create chunk with SSE metadata if available - var chunk *filer_pb.FileChunk - if sseType != filer_pb.SSEType_NONE { - chunk = uploadResult.ToPbFileChunkWithSSE(fileId, chunkOffset, time.Now().UnixNano(), sseType, sseMetadata) - } else { - chunk = uploadResult.ToPbFileChunk(fileId, chunkOffset, time.Now().UnixNano()) - } - - return []*filer_pb.FileChunk{chunk}, nil + return uploadResult.ToPbFileChunk(fileId, chunkOffset), nil } diff --git a/weed/server/filer_server_rocksdb.go b/weed/server/filer_server_rocksdb.go index 57b1d08f8..75965e761 100644 --- a/weed/server/filer_server_rocksdb.go +++ b/weed/server/filer_server_rocksdb.go @@ -4,5 +4,5 @@ package weed_server import ( - _ "github.com/seaweedfs/seaweedfs/weed/filer/rocksdb" + _ "github.com/chrislusf/seaweedfs/weed/filer/rocksdb" ) diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index 638638196..3201ff76c 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -3,7 +3,7 @@ package filer_ui import ( "strings" - "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util" ) type Breadcrumb struct { @@ -13,9 +13,6 @@ type Breadcrumb struct { func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { parts := strings.Split(fullpath, "/") - if fullpath == "/" { - parts = []string{""} - } for i := 0; i < len(parts); i++ { name := parts[i] diff --git a/weed/server/filer_ui/breadcrumb_test.go b/weed/server/filer_ui/breadcrumb_test.go deleted file mode 100644 index 6e42541cb..000000000 --- a/weed/server/filer_ui/breadcrumb_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package filer_ui - -import ( - "reflect" - "testing" -) - -func TestToBreadcrumb(t *testing.T) { - type args struct { - fullpath string - } - tests := []struct { - name string - args args - wantCrumbs []Breadcrumb - }{ - { - name: "empty", - args: args{ - fullpath: "", - }, - wantCrumbs: []Breadcrumb{ - { - Name: "/", - Link: "/", - }, - }, - }, - { - name: "test1", - args: args{ - fullpath: "/", - }, - wantCrumbs: []Breadcrumb{ - { - Name: "/", - Link: "/", - }, - }, - }, - { - name: "test2", - args: args{ - fullpath: "/abc", - }, - wantCrumbs: []Breadcrumb{ - { - Name: "/", - Link: "/", - }, - { - Name: "abc", - Link: "/abc/", - }, - }, - }, - { - name: "test3", - args: args{ - fullpath: "/abc/def", - }, - wantCrumbs: []Breadcrumb{ - { - Name: "/", - Link: "/", - }, - { - Name: "abc", - Link: "/abc/", - }, - { - Name: "def", - Link: "/abc/def/", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotCrumbs := ToBreadcrumb(tt.args.fullpath); !reflect.DeepEqual(gotCrumbs, tt.wantCrumbs) { - t.Errorf("ToBreadcrumb() = %v, want %v", gotCrumbs, tt.wantCrumbs) - } - }) - } -} diff --git a/weed/server/filer_ui/filer.html b/weed/server/filer_ui/filer.html index 627f3ba77..c9d832e8f 100644 --- a/weed/server/filer_ui/filer.html +++ b/weed/server/filer_ui/filer.html @@ -1,7 +1,7 @@ - SeaweedFS Filer {{ .Version }} + SeaweedFS Filer